blob: 2e2e4f9aeb63470a183c2e5a1aac0964a8b6ac2a [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
Steve Frenchfb8c4b12007-07-10 01:16:18 +00005 *
Steve Frenchf19159d2010-04-21 04:12:10 +00006 * Copyright (C) International Business Machines Corp., 2002,2010
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Author(s): Steve French (sfrench@us.ibm.com)
Jeremy Allison7ee1af72006-08-02 21:56:33 +00008 * Jeremy Allison (jra@samba.org)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
Steve French37c0eb42005-10-05 14:50:29 -070025#include <linux/backing-dev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
Steve French37c0eb42005-10-05 14:50:29 -070030#include <linux/writeback.h>
Andrew Morton6f88cc22006-12-10 02:19:44 -080031#include <linux/task_io_accounting_ops.h>
Steve French23e7dd72005-10-20 13:44:56 -070032#include <linux/delay.h>
Jeff Layton3bc303c2009-09-21 06:47:50 -040033#include <linux/mount.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Jeff Layton690c5e32011-10-19 15:30:16 -040035#include <linux/swap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <asm/div64.h>
37#include "cifsfs.h"
38#include "cifspdu.h"
39#include "cifsglob.h"
40#include "cifsproto.h"
41#include "cifs_unicode.h"
42#include "cifs_debug.h"
43#include "cifs_fs_sb.h"
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +053044#include "fscache.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Linus Torvalds1da177e2005-04-16 15:20:36 -070046static inline int cifs_convert_flags(unsigned int flags)
47{
48 if ((flags & O_ACCMODE) == O_RDONLY)
49 return GENERIC_READ;
50 else if ((flags & O_ACCMODE) == O_WRONLY)
51 return GENERIC_WRITE;
52 else if ((flags & O_ACCMODE) == O_RDWR) {
53 /* GENERIC_ALL is too much permission to request
54 can cause unnecessary access denied on create */
55 /* return GENERIC_ALL; */
56 return (GENERIC_READ | GENERIC_WRITE);
57 }
58
Jeff Laytone10f7b52008-05-14 10:21:33 -070059 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
60 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
61 FILE_READ_DATA);
Steve French7fc8f4e2009-02-23 20:43:11 +000062}
Jeff Laytone10f7b52008-05-14 10:21:33 -070063
Jeff Layton608712f2010-10-15 15:33:56 -040064static u32 cifs_posix_convert_flags(unsigned int flags)
Steve French7fc8f4e2009-02-23 20:43:11 +000065{
Jeff Layton608712f2010-10-15 15:33:56 -040066 u32 posix_flags = 0;
Jeff Laytone10f7b52008-05-14 10:21:33 -070067
Steve French7fc8f4e2009-02-23 20:43:11 +000068 if ((flags & O_ACCMODE) == O_RDONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040069 posix_flags = SMB_O_RDONLY;
Steve French7fc8f4e2009-02-23 20:43:11 +000070 else if ((flags & O_ACCMODE) == O_WRONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040071 posix_flags = SMB_O_WRONLY;
72 else if ((flags & O_ACCMODE) == O_RDWR)
73 posix_flags = SMB_O_RDWR;
74
75 if (flags & O_CREAT)
76 posix_flags |= SMB_O_CREAT;
77 if (flags & O_EXCL)
78 posix_flags |= SMB_O_EXCL;
79 if (flags & O_TRUNC)
80 posix_flags |= SMB_O_TRUNC;
81 /* be safe and imply O_SYNC for O_DSYNC */
Christoph Hellwig6b2f3d12009-10-27 11:05:28 +010082 if (flags & O_DSYNC)
Jeff Layton608712f2010-10-15 15:33:56 -040083 posix_flags |= SMB_O_SYNC;
Steve French7fc8f4e2009-02-23 20:43:11 +000084 if (flags & O_DIRECTORY)
Jeff Layton608712f2010-10-15 15:33:56 -040085 posix_flags |= SMB_O_DIRECTORY;
Steve French7fc8f4e2009-02-23 20:43:11 +000086 if (flags & O_NOFOLLOW)
Jeff Layton608712f2010-10-15 15:33:56 -040087 posix_flags |= SMB_O_NOFOLLOW;
Steve French7fc8f4e2009-02-23 20:43:11 +000088 if (flags & O_DIRECT)
Jeff Layton608712f2010-10-15 15:33:56 -040089 posix_flags |= SMB_O_DIRECT;
Steve French7fc8f4e2009-02-23 20:43:11 +000090
91 return posix_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070092}
93
94static inline int cifs_get_disposition(unsigned int flags)
95{
96 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
97 return FILE_CREATE;
98 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
99 return FILE_OVERWRITE_IF;
100 else if ((flags & O_CREAT) == O_CREAT)
101 return FILE_OPEN_IF;
Steve French55aa2e02006-05-30 18:09:31 +0000102 else if ((flags & O_TRUNC) == O_TRUNC)
103 return FILE_OVERWRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104 else
105 return FILE_OPEN;
106}
107
Jeff Layton608712f2010-10-15 15:33:56 -0400108int cifs_posix_open(char *full_path, struct inode **pinode,
109 struct super_block *sb, int mode, unsigned int f_flags,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400110 __u32 *poplock, __u16 *pnetfid, unsigned int xid)
Jeff Layton608712f2010-10-15 15:33:56 -0400111{
112 int rc;
113 FILE_UNIX_BASIC_INFO *presp_data;
114 __u32 posix_flags = 0;
115 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
116 struct cifs_fattr fattr;
117 struct tcon_link *tlink;
Steve French96daf2b2011-05-27 04:34:02 +0000118 struct cifs_tcon *tcon;
Jeff Layton608712f2010-10-15 15:33:56 -0400119
120 cFYI(1, "posix open %s", full_path);
121
122 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
123 if (presp_data == NULL)
124 return -ENOMEM;
125
126 tlink = cifs_sb_tlink(cifs_sb);
127 if (IS_ERR(tlink)) {
128 rc = PTR_ERR(tlink);
129 goto posix_open_ret;
130 }
131
132 tcon = tlink_tcon(tlink);
133 mode &= ~current_umask();
134
135 posix_flags = cifs_posix_convert_flags(f_flags);
136 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
137 poplock, full_path, cifs_sb->local_nls,
138 cifs_sb->mnt_cifs_flags &
139 CIFS_MOUNT_MAP_SPECIAL_CHR);
140 cifs_put_tlink(tlink);
141
142 if (rc)
143 goto posix_open_ret;
144
145 if (presp_data->Type == cpu_to_le32(-1))
146 goto posix_open_ret; /* open ok, caller does qpathinfo */
147
148 if (!pinode)
149 goto posix_open_ret; /* caller does not need info */
150
151 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
152
153 /* get new inode and set it up */
154 if (*pinode == NULL) {
155 cifs_fill_uniqueid(sb, &fattr);
156 *pinode = cifs_iget(sb, &fattr);
157 if (!*pinode) {
158 rc = -ENOMEM;
159 goto posix_open_ret;
160 }
161 } else {
162 cifs_fattr_to_inode(*pinode, &fattr);
163 }
164
165posix_open_ret:
166 kfree(presp_data);
167 return rc;
168}
169
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300170static int
171cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700172 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
173 struct cifs_fid *fid, unsigned int xid)
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300174{
175 int rc;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700176 int desired_access;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300177 int disposition;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500178 int create_options = CREATE_NOT_DIR;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300179 FILE_ALL_INFO *buf;
180
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700181 if (!tcon->ses->server->ops->open)
182 return -ENOSYS;
183
184 desired_access = cifs_convert_flags(f_flags);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300185
186/*********************************************************************
187 * open flag mapping table:
188 *
189 * POSIX Flag CIFS Disposition
190 * ---------- ----------------
191 * O_CREAT FILE_OPEN_IF
192 * O_CREAT | O_EXCL FILE_CREATE
193 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
194 * O_TRUNC FILE_OVERWRITE
195 * none of the above FILE_OPEN
196 *
197 * Note that there is not a direct match between disposition
198 * FILE_SUPERSEDE (ie create whether or not file exists although
199 * O_CREAT | O_TRUNC is similar but truncates the existing
200 * file rather than creating a new file as FILE_SUPERSEDE does
201 * (which uses the attributes / metadata passed in on open call)
202 *?
203 *? O_SYNC is a reasonable match to CIFS writethrough flag
204 *? and the read write flags match reasonably. O_LARGEFILE
205 *? is irrelevant because largefile support is always used
206 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
207 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
208 *********************************************************************/
209
210 disposition = cifs_get_disposition(f_flags);
211
212 /* BB pass O_SYNC flag through on file attributes .. BB */
213
214 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
215 if (!buf)
216 return -ENOMEM;
217
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500218 if (backup_cred(cifs_sb))
219 create_options |= CREATE_OPEN_BACKUP_INTENT;
220
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700221 rc = tcon->ses->server->ops->open(xid, tcon, full_path, disposition,
222 desired_access, create_options, fid,
223 oplock, buf, cifs_sb);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300224
225 if (rc)
226 goto out;
227
228 if (tcon->unix_ext)
229 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
230 xid);
231 else
232 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700233 xid, &fid->netfid);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300234
235out:
236 kfree(buf);
237 return rc;
238}
239
Jeff Layton15ecb432010-10-15 15:34:02 -0400240struct cifsFileInfo *
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700241cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
Jeff Layton15ecb432010-10-15 15:34:02 -0400242 struct tcon_link *tlink, __u32 oplock)
243{
244 struct dentry *dentry = file->f_path.dentry;
245 struct inode *inode = dentry->d_inode;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700246 struct cifsInodeInfo *cinode = CIFS_I(inode);
247 struct cifsFileInfo *cfile;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700248 struct cifs_fid_locks *fdlocks;
Jeff Layton15ecb432010-10-15 15:34:02 -0400249
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700250 cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
251 if (cfile == NULL)
252 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400253
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700254 fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
255 if (!fdlocks) {
256 kfree(cfile);
257 return NULL;
258 }
259
260 INIT_LIST_HEAD(&fdlocks->locks);
261 fdlocks->cfile = cfile;
262 cfile->llist = fdlocks;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700263 down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700264 list_add(&fdlocks->llist, &cinode->llist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700265 up_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700266
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700267 cfile->count = 1;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700268 cfile->pid = current->tgid;
269 cfile->uid = current_fsuid();
270 cfile->dentry = dget(dentry);
271 cfile->f_flags = file->f_flags;
272 cfile->invalidHandle = false;
273 cfile->tlink = cifs_get_tlink(tlink);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700274 INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700275 mutex_init(&cfile->fh_mutex);
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700276 tlink_tcon(tlink)->ses->server->ops->set_fid(cfile, fid, oplock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400277
Jeff Layton44772882010-10-15 15:34:03 -0400278 spin_lock(&cifs_file_list_lock);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700279 list_add(&cfile->tlist, &(tlink_tcon(tlink)->openFileList));
Jeff Layton15ecb432010-10-15 15:34:02 -0400280 /* if readable file instance put first in list*/
281 if (file->f_mode & FMODE_READ)
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700282 list_add(&cfile->flist, &cinode->openFileList);
Jeff Layton15ecb432010-10-15 15:34:02 -0400283 else
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700284 list_add_tail(&cfile->flist, &cinode->openFileList);
Jeff Layton44772882010-10-15 15:34:03 -0400285 spin_unlock(&cifs_file_list_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400286
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700287 file->private_data = cfile;
288 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400289}
290
Jeff Layton764a1b12012-07-25 14:59:54 -0400291struct cifsFileInfo *
292cifsFileInfo_get(struct cifsFileInfo *cifs_file)
293{
294 spin_lock(&cifs_file_list_lock);
295 cifsFileInfo_get_locked(cifs_file);
296 spin_unlock(&cifs_file_list_lock);
297 return cifs_file;
298}
299
Steve Frenchcdff08e2010-10-21 22:46:14 +0000300/*
301 * Release a reference on the file private data. This may involve closing
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400302 * the filehandle out on the server. Must be called without holding
303 * cifs_file_list_lock.
Steve Frenchcdff08e2010-10-21 22:46:14 +0000304 */
Jeff Laytonb33879a2010-10-15 15:34:04 -0400305void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
306{
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300307 struct inode *inode = cifs_file->dentry->d_inode;
Steve French96daf2b2011-05-27 04:34:02 +0000308 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300309 struct cifsInodeInfo *cifsi = CIFS_I(inode);
Pavel Shilovsky4f8ba8a2010-11-21 22:36:12 +0300310 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000311 struct cifsLockInfo *li, *tmp;
312
313 spin_lock(&cifs_file_list_lock);
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400314 if (--cifs_file->count > 0) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000315 spin_unlock(&cifs_file_list_lock);
316 return;
Jeff Laytonb33879a2010-10-15 15:34:04 -0400317 }
Steve Frenchcdff08e2010-10-21 22:46:14 +0000318
319 /* remove it from the lists */
320 list_del(&cifs_file->flist);
321 list_del(&cifs_file->tlist);
322
323 if (list_empty(&cifsi->openFileList)) {
324 cFYI(1, "closing last open instance for inode %p",
325 cifs_file->dentry->d_inode);
Pavel Shilovsky25364132012-09-18 16:20:27 -0700326 /*
327 * In strict cache mode we need invalidate mapping on the last
328 * close because it may cause a error when we open this file
329 * again and get at least level II oplock.
330 */
Pavel Shilovsky4f8ba8a2010-11-21 22:36:12 +0300331 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
332 CIFS_I(inode)->invalid_mapping = true;
Pavel Shilovskyc6723622010-11-03 10:58:57 +0300333 cifs_set_oplock_level(cifsi, 0);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000334 }
335 spin_unlock(&cifs_file_list_lock);
336
Jeff Laytonad635942011-07-26 12:20:17 -0400337 cancel_work_sync(&cifs_file->oplock_break);
338
Steve Frenchcdff08e2010-10-21 22:46:14 +0000339 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700340 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400341 unsigned int xid;
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700342 int rc = -ENOSYS;
343
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400344 xid = get_xid();
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700345 if (server->ops->close)
346 rc = server->ops->close(xid, tcon, &cifs_file->fid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400347 free_xid(xid);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000348 }
349
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700350 /*
351 * Delete any outstanding lock records. We'll lose them when the file
Steve Frenchcdff08e2010-10-21 22:46:14 +0000352 * is closed anyway.
353 */
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700354 down_write(&cifsi->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700355 list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000356 list_del(&li->llist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400357 cifs_del_lock_waiters(li);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000358 kfree(li);
359 }
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700360 list_del(&cifs_file->llist->llist);
361 kfree(cifs_file->llist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700362 up_write(&cifsi->lock_sem);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000363
364 cifs_put_tlink(cifs_file->tlink);
365 dput(cifs_file->dentry);
366 kfree(cifs_file);
Jeff Laytonb33879a2010-10-15 15:34:04 -0400367}
368
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369int cifs_open(struct inode *inode, struct file *file)
370{
371 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400372 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400373 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +0000375 struct cifs_tcon *tcon;
Jeff Layton7ffec372010-09-29 19:51:11 -0400376 struct tcon_link *tlink;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700377 struct cifsFileInfo *cfile = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 char *full_path = NULL;
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300379 bool posix_open_ok = false;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700380 struct cifs_fid fid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400382 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383
384 cifs_sb = CIFS_SB(inode->i_sb);
Jeff Layton7ffec372010-09-29 19:51:11 -0400385 tlink = cifs_sb_tlink(cifs_sb);
386 if (IS_ERR(tlink)) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400387 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400388 return PTR_ERR(tlink);
389 }
390 tcon = tlink_tcon(tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -0800392 full_path = build_path_from_dentry(file->f_path.dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393 if (full_path == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530394 rc = -ENOMEM;
Jeff Layton232341b2010-08-05 13:58:38 -0400395 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396 }
397
Joe Perchesb6b38f72010-04-21 03:50:45 +0000398 cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
399 inode, file->f_flags, full_path);
Steve French276a74a2009-03-03 18:00:34 +0000400
Pavel Shilovsky10b9b982012-03-20 12:55:09 +0300401 if (tcon->ses->server->oplocks)
Steve French276a74a2009-03-03 18:00:34 +0000402 oplock = REQ_OPLOCK;
403 else
404 oplock = 0;
405
Steve French64cc2c62009-03-04 19:54:08 +0000406 if (!tcon->broken_posix_open && tcon->unix_ext &&
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400407 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
408 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Steve French276a74a2009-03-03 18:00:34 +0000409 /* can not refresh inode info since size could be stale */
Jeff Layton2422f672010-06-16 13:40:16 -0400410 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
Steve Frenchfa588e02010-04-22 19:21:55 +0000411 cifs_sb->mnt_file_mode /* ignored */,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700412 file->f_flags, &oplock, &fid.netfid, xid);
Steve French276a74a2009-03-03 18:00:34 +0000413 if (rc == 0) {
Joe Perchesb6b38f72010-04-21 03:50:45 +0000414 cFYI(1, "posix open succeeded");
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300415 posix_open_ok = true;
Steve French64cc2c62009-03-04 19:54:08 +0000416 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
417 if (tcon->ses->serverNOS)
Joe Perchesb6b38f72010-04-21 03:50:45 +0000418 cERROR(1, "server %s of type %s returned"
Steve French64cc2c62009-03-04 19:54:08 +0000419 " unexpected error on SMB posix open"
420 ", disabling posix open support."
421 " Check if server update available.",
422 tcon->ses->serverName,
Joe Perchesb6b38f72010-04-21 03:50:45 +0000423 tcon->ses->serverNOS);
Steve French64cc2c62009-03-04 19:54:08 +0000424 tcon->broken_posix_open = true;
Steve French276a74a2009-03-03 18:00:34 +0000425 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
426 (rc != -EOPNOTSUPP)) /* path not found or net err */
427 goto out;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700428 /*
429 * Else fallthrough to retry open the old way on network i/o
430 * or DFS errors.
431 */
Steve French276a74a2009-03-03 18:00:34 +0000432 }
433
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300434 if (!posix_open_ok) {
435 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700436 file->f_flags, &oplock, &fid, xid);
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300437 if (rc)
438 goto out;
439 }
Jeff Layton47c78b72010-06-16 13:40:17 -0400440
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700441 cfile = cifs_new_fileinfo(&fid, file, tlink, oplock);
442 if (cfile == NULL) {
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700443 if (tcon->ses->server->ops->close)
444 tcon->ses->server->ops->close(xid, tcon, &fid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445 rc = -ENOMEM;
446 goto out;
447 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +0530449 cifs_fscache_set_inode_cookie(inode, file);
450
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300451 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700452 /*
453 * Time to set mode which we can not set earlier due to
454 * problems creating new read-only files.
455 */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300456 struct cifs_unix_set_info_args args = {
457 .mode = inode->i_mode,
458 .uid = NO_CHANGE_64,
459 .gid = NO_CHANGE_64,
460 .ctime = NO_CHANGE_64,
461 .atime = NO_CHANGE_64,
462 .mtime = NO_CHANGE_64,
463 .device = 0,
464 };
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700465 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
466 cfile->pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467 }
468
469out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400471 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400472 cifs_put_tlink(tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473 return rc;
474}
475
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700476/*
477 * Try to reacquire byte range locks that were released when session
478 * to server was lost
479 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480static int cifs_relock_file(struct cifsFileInfo *cifsFile)
481{
482 int rc = 0;
483
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700484 /* BB list all locks open on this file and relock */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485
486 return rc;
487}
488
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700489static int
490cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491{
492 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400493 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400494 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +0000496 struct cifs_tcon *tcon;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700497 struct TCP_Server_Info *server;
498 struct cifsInodeInfo *cinode;
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000499 struct inode *inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500 char *full_path = NULL;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700501 int desired_access;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502 int disposition = FILE_OPEN;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500503 int create_options = CREATE_NOT_DIR;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700504 struct cifs_fid fid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400506 xid = get_xid();
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700507 mutex_lock(&cfile->fh_mutex);
508 if (!cfile->invalidHandle) {
509 mutex_unlock(&cfile->fh_mutex);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530510 rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400511 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530512 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513 }
514
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700515 inode = cfile->dentry->d_inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516 cifs_sb = CIFS_SB(inode->i_sb);
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700517 tcon = tlink_tcon(cfile->tlink);
518 server = tcon->ses->server;
Steve French3a9f4622007-04-04 17:10:24 +0000519
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700520 /*
521 * Can not grab rename sem here because various ops, including those
522 * that already have the rename sem can end up causing writepage to get
523 * called and if the server was down that means we end up here, and we
524 * can never tell if the caller already has the rename_sem.
525 */
526 full_path = build_path_from_dentry(cfile->dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527 if (full_path == NULL) {
Steve French3a9f4622007-04-04 17:10:24 +0000528 rc = -ENOMEM;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700529 mutex_unlock(&cfile->fh_mutex);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400530 free_xid(xid);
Steve French3a9f4622007-04-04 17:10:24 +0000531 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532 }
533
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700534 cFYI(1, "inode = 0x%p file flags 0x%x for %s", inode, cfile->f_flags,
535 full_path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536
Pavel Shilovsky10b9b982012-03-20 12:55:09 +0300537 if (tcon->ses->server->oplocks)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538 oplock = REQ_OPLOCK;
539 else
Steve French4b18f2a2008-04-29 00:06:05 +0000540 oplock = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400542 if (tcon->unix_ext && cap_unix(tcon->ses) &&
Steve French7fc8f4e2009-02-23 20:43:11 +0000543 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400544 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Jeff Layton608712f2010-10-15 15:33:56 -0400545 /*
546 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
547 * original open. Must mask them off for a reopen.
548 */
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700549 unsigned int oflags = cfile->f_flags &
Jeff Layton15886172010-10-15 15:33:59 -0400550 ~(O_CREAT | O_EXCL | O_TRUNC);
Jeff Layton608712f2010-10-15 15:33:56 -0400551
Jeff Layton2422f672010-06-16 13:40:16 -0400552 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700553 cifs_sb->mnt_file_mode /* ignored */,
554 oflags, &oplock, &fid.netfid, xid);
Steve French7fc8f4e2009-02-23 20:43:11 +0000555 if (rc == 0) {
Joe Perchesb6b38f72010-04-21 03:50:45 +0000556 cFYI(1, "posix reopen succeeded");
Steve French7fc8f4e2009-02-23 20:43:11 +0000557 goto reopen_success;
558 }
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700559 /*
560 * fallthrough to retry open the old way on errors, especially
561 * in the reconnect path it is important to retry hard
562 */
Steve French7fc8f4e2009-02-23 20:43:11 +0000563 }
564
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700565 desired_access = cifs_convert_flags(cfile->f_flags);
Steve French7fc8f4e2009-02-23 20:43:11 +0000566
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500567 if (backup_cred(cifs_sb))
568 create_options |= CREATE_OPEN_BACKUP_INTENT;
569
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700570 /*
571 * Can not refresh inode by passing in file_info buf to be returned by
572 * CIFSSMBOpen and then calling get_inode_info with returned buf since
573 * file might have write behind data that needs to be flushed and server
574 * version of file size can be stale. If we knew for sure that inode was
575 * not dirty locally we could do this.
576 */
577 rc = server->ops->open(xid, tcon, full_path, disposition,
578 desired_access, create_options, &fid, &oplock,
579 NULL, cifs_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580 if (rc) {
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700581 mutex_unlock(&cfile->fh_mutex);
582 cFYI(1, "cifs_reopen returned 0x%x", rc);
Joe Perchesb6b38f72010-04-21 03:50:45 +0000583 cFYI(1, "oplock: %d", oplock);
Jeff Layton15886172010-10-15 15:33:59 -0400584 goto reopen_error_exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585 }
Jeff Layton15886172010-10-15 15:33:59 -0400586
587reopen_success:
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700588 cfile->invalidHandle = false;
589 mutex_unlock(&cfile->fh_mutex);
590 cinode = CIFS_I(inode);
Jeff Layton15886172010-10-15 15:33:59 -0400591
592 if (can_flush) {
593 rc = filemap_write_and_wait(inode->i_mapping);
Jeff Laytoneb4b7562010-10-22 14:52:29 -0400594 mapping_set_error(inode->i_mapping, rc);
Jeff Layton15886172010-10-15 15:33:59 -0400595
Jeff Layton15886172010-10-15 15:33:59 -0400596 if (tcon->unix_ext)
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700597 rc = cifs_get_inode_info_unix(&inode, full_path,
598 inode->i_sb, xid);
Jeff Layton15886172010-10-15 15:33:59 -0400599 else
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700600 rc = cifs_get_inode_info(&inode, full_path, NULL,
601 inode->i_sb, xid, NULL);
602 }
603 /*
604 * Else we are writing out data to server already and could deadlock if
605 * we tried to flush data, and since we do not know if we have data that
606 * would invalidate the current end of file on the server we can not go
607 * to the server to get the new inode info.
608 */
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300609
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700610 server->ops->set_fid(cfile, &fid, oplock);
611 cifs_relock_file(cfile);
Jeff Layton15886172010-10-15 15:33:59 -0400612
613reopen_error_exit:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400615 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616 return rc;
617}
618
619int cifs_close(struct inode *inode, struct file *file)
620{
Jeff Layton77970692011-04-05 16:23:47 -0700621 if (file->private_data != NULL) {
622 cifsFileInfo_put(file->private_data);
623 file->private_data = NULL;
624 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625
Steve Frenchcdff08e2010-10-21 22:46:14 +0000626 /* return code from the ->release op is always ignored */
627 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628}
629
630int cifs_closedir(struct inode *inode, struct file *file)
631{
632 int rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400633 unsigned int xid;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700634 struct cifsFileInfo *cfile = file->private_data;
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700635 struct cifs_tcon *tcon;
636 struct TCP_Server_Info *server;
637 char *buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638
Joe Perchesb6b38f72010-04-21 03:50:45 +0000639 cFYI(1, "Closedir inode = 0x%p", inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700641 if (cfile == NULL)
642 return rc;
643
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400644 xid = get_xid();
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700645 tcon = tlink_tcon(cfile->tlink);
646 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700648 cFYI(1, "Freeing private data in close dir");
649 spin_lock(&cifs_file_list_lock);
650 if (!cfile->srch_inf.endOfSearch && !cfile->invalidHandle) {
651 cfile->invalidHandle = true;
652 spin_unlock(&cifs_file_list_lock);
653 if (server->ops->close_dir)
654 rc = server->ops->close_dir(xid, tcon, &cfile->fid);
655 else
656 rc = -ENOSYS;
657 cFYI(1, "Closing uncompleted readdir with rc %d", rc);
658 /* not much we can do if it fails anyway, ignore rc */
659 rc = 0;
660 } else
661 spin_unlock(&cifs_file_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700663 buf = cfile->srch_inf.ntwrk_buf_start;
664 if (buf) {
665 cFYI(1, "closedir free smb buf in srch struct");
666 cfile->srch_inf.ntwrk_buf_start = NULL;
667 if (cfile->srch_inf.smallBuf)
668 cifs_small_buf_release(buf);
669 else
670 cifs_buf_release(buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671 }
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700672
673 cifs_put_tlink(cfile->tlink);
674 kfree(file->private_data);
675 file->private_data = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676 /* BB can we lock the filestruct while this is going on? */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400677 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678 return rc;
679}
680
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400681static struct cifsLockInfo *
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300682cifs_lock_init(__u64 offset, __u64 length, __u8 type)
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000683{
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400684 struct cifsLockInfo *lock =
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000685 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400686 if (!lock)
687 return lock;
688 lock->offset = offset;
689 lock->length = length;
690 lock->type = type;
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400691 lock->pid = current->tgid;
692 INIT_LIST_HEAD(&lock->blist);
693 init_waitqueue_head(&lock->block_q);
694 return lock;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400695}
696
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -0700697void
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400698cifs_del_lock_waiters(struct cifsLockInfo *lock)
699{
700 struct cifsLockInfo *li, *tmp;
701 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
702 list_del_init(&li->blist);
703 wake_up(&li->block_q);
704 }
705}
706
707static bool
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700708cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
709 __u64 length, __u8 type, struct cifsFileInfo *cfile,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700710 struct cifsLockInfo **conf_lock, bool rw_check)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400711{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300712 struct cifsLockInfo *li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700713 struct cifsFileInfo *cur_cfile = fdlocks->cfile;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300714 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400715
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700716 list_for_each_entry(li, &fdlocks->locks, llist) {
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400717 if (offset + length <= li->offset ||
718 offset >= li->offset + li->length)
719 continue;
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700720 if (rw_check && server->ops->compare_fids(cfile, cur_cfile) &&
721 current->tgid == li->pid)
722 continue;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700723 if ((type & server->vals->shared_lock_type) &&
724 ((server->ops->compare_fids(cfile, cur_cfile) &&
725 current->tgid == li->pid) || type == li->type))
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400726 continue;
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700727 if (conf_lock)
728 *conf_lock = li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700729 return true;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400730 }
731 return false;
732}
733
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700734bool
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300735cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700736 __u8 type, struct cifsLockInfo **conf_lock,
737 bool rw_check)
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400738{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300739 bool rc = false;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700740 struct cifs_fid_locks *cur;
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300741 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300742
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700743 list_for_each_entry(cur, &cinode->llist, llist) {
744 rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700745 cfile, conf_lock, rw_check);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300746 if (rc)
747 break;
748 }
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300749
750 return rc;
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400751}
752
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300753/*
754 * Check if there is another lock that prevents us to set the lock (mandatory
755 * style). If such a lock exists, update the flock structure with its
756 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
757 * or leave it the same if we can't. Returns 0 if we don't need to request to
758 * the server or 1 otherwise.
759 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400760static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300761cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
762 __u8 type, struct file_lock *flock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400763{
764 int rc = 0;
765 struct cifsLockInfo *conf_lock;
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300766 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300767 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400768 bool exist;
769
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700770 down_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400771
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300772 exist = cifs_find_lock_conflict(cfile, offset, length, type,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700773 &conf_lock, false);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400774 if (exist) {
775 flock->fl_start = conf_lock->offset;
776 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
777 flock->fl_pid = conf_lock->pid;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300778 if (conf_lock->type & server->vals->shared_lock_type)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400779 flock->fl_type = F_RDLCK;
780 else
781 flock->fl_type = F_WRLCK;
782 } else if (!cinode->can_cache_brlcks)
783 rc = 1;
784 else
785 flock->fl_type = F_UNLCK;
786
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700787 up_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400788 return rc;
789}
790
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400791static void
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300792cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400793{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300794 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700795 down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700796 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700797 up_write(&cinode->lock_sem);
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000798}
799
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300800/*
801 * Set the byte-range lock (mandatory style). Returns:
802 * 1) 0, if we set the lock and don't need to request to the server;
803 * 2) 1, if no locks prevent us but we need to request to the server;
804 * 3) -EACCESS, if there is a lock that prevents us and wait is false.
805 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400806static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300807cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400808 bool wait)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400809{
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400810 struct cifsLockInfo *conf_lock;
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300811 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400812 bool exist;
813 int rc = 0;
814
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400815try_again:
816 exist = false;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700817 down_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400818
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300819 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700820 lock->type, &conf_lock, false);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400821 if (!exist && cinode->can_cache_brlcks) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700822 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700823 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400824 return rc;
825 }
826
827 if (!exist)
828 rc = 1;
829 else if (!wait)
830 rc = -EACCES;
831 else {
832 list_add_tail(&lock->blist, &conf_lock->blist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700833 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400834 rc = wait_event_interruptible(lock->block_q,
835 (lock->blist.prev == &lock->blist) &&
836 (lock->blist.next == &lock->blist));
837 if (!rc)
838 goto try_again;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700839 down_write(&cinode->lock_sem);
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400840 list_del_init(&lock->blist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400841 }
842
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700843 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400844 return rc;
845}
846
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300847/*
848 * Check if there is another lock that prevents us to set the lock (posix
849 * style). If such a lock exists, update the flock structure with its
850 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
851 * or leave it the same if we can't. Returns 0 if we don't need to request to
852 * the server or 1 otherwise.
853 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400854static int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400855cifs_posix_lock_test(struct file *file, struct file_lock *flock)
856{
857 int rc = 0;
858 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
859 unsigned char saved_type = flock->fl_type;
860
Pavel Shilovsky50792762011-10-29 17:17:57 +0400861 if ((flock->fl_flags & FL_POSIX) == 0)
862 return 1;
863
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700864 down_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400865 posix_test_lock(file, flock);
866
867 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
868 flock->fl_type = saved_type;
869 rc = 1;
870 }
871
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700872 up_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400873 return rc;
874}
875
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300876/*
877 * Set the byte-range lock (posix style). Returns:
878 * 1) 0, if we set the lock and don't need to request to the server;
879 * 2) 1, if we need to request to the server;
880 * 3) <0, if the error occurs while setting the lock.
881 */
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400882static int
883cifs_posix_lock_set(struct file *file, struct file_lock *flock)
884{
885 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
Pavel Shilovsky50792762011-10-29 17:17:57 +0400886 int rc = 1;
887
888 if ((flock->fl_flags & FL_POSIX) == 0)
889 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400890
Pavel Shilovsky66189be2012-03-28 21:56:19 +0400891try_again:
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700892 down_write(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400893 if (!cinode->can_cache_brlcks) {
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700894 up_write(&cinode->lock_sem);
Pavel Shilovsky50792762011-10-29 17:17:57 +0400895 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400896 }
Pavel Shilovsky66189be2012-03-28 21:56:19 +0400897
898 rc = posix_lock_file(file, flock, NULL);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700899 up_write(&cinode->lock_sem);
Pavel Shilovsky66189be2012-03-28 21:56:19 +0400900 if (rc == FILE_LOCK_DEFERRED) {
901 rc = wait_event_interruptible(flock->fl_wait, !flock->fl_next);
902 if (!rc)
903 goto try_again;
904 locks_delete_block(flock);
905 }
Steve French9ebb3892012-04-01 13:52:54 -0500906 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400907}
908
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -0700909int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400910cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400911{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400912 unsigned int xid;
913 int rc = 0, stored_rc;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400914 struct cifsLockInfo *li, *tmp;
915 struct cifs_tcon *tcon;
916 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +0400917 unsigned int num, max_num, max_buf;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400918 LOCKING_ANDX_RANGE *buf, *cur;
919 int types[] = {LOCKING_ANDX_LARGE_FILES,
920 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
921 int i;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400922
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400923 xid = get_xid();
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400924 tcon = tlink_tcon(cfile->tlink);
925
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700926 /* we are going to update can_cache_brlcks here - need a write access */
927 down_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400928 if (!cinode->can_cache_brlcks) {
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700929 up_write(&cinode->lock_sem);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400930 free_xid(xid);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400931 return rc;
932 }
933
Pavel Shilovsky0013fb42012-05-31 13:03:26 +0400934 /*
935 * Accessing maxBuf is racy with cifs_reconnect - need to store value
936 * and check it for zero before using.
937 */
938 max_buf = tcon->ses->server->maxBuf;
939 if (!max_buf) {
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700940 up_write(&cinode->lock_sem);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400941 free_xid(xid);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +0400942 return -EINVAL;
943 }
944
945 max_num = (max_buf - sizeof(struct smb_hdr)) /
946 sizeof(LOCKING_ANDX_RANGE);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400947 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
948 if (!buf) {
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700949 up_write(&cinode->lock_sem);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400950 free_xid(xid);
Pavel Shilovskye2f28862012-08-29 21:13:38 +0400951 return -ENOMEM;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400952 }
953
954 for (i = 0; i < 2; i++) {
955 cur = buf;
956 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700957 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400958 if (li->type != types[i])
959 continue;
960 cur->Pid = cpu_to_le16(li->pid);
961 cur->LengthLow = cpu_to_le32((u32)li->length);
962 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
963 cur->OffsetLow = cpu_to_le32((u32)li->offset);
964 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
965 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700966 stored_rc = cifs_lockv(xid, tcon,
967 cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +0300968 (__u8)li->type, 0, num,
969 buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400970 if (stored_rc)
971 rc = stored_rc;
972 cur = buf;
973 num = 0;
974 } else
975 cur++;
976 }
977
978 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700979 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +0300980 (__u8)types[i], 0, num, buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400981 if (stored_rc)
982 rc = stored_rc;
983 }
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400984 }
985
986 cinode->can_cache_brlcks = false;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700987 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400988
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400989 kfree(buf);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400990 free_xid(xid);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400991 return rc;
992}
993
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400994/* copied from fs/locks.c with a name change */
995#define cifs_for_each_lock(inode, lockp) \
996 for (lockp = &inode->i_flock; *lockp != NULL; \
997 lockp = &(*lockp)->fl_next)
998
Pavel Shilovskyd5751462012-03-05 09:39:20 +0300999struct lock_to_push {
1000 struct list_head llist;
1001 __u64 offset;
1002 __u64 length;
1003 __u32 pid;
1004 __u16 netfid;
1005 __u8 type;
1006};
1007
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001008static int
1009cifs_push_posix_locks(struct cifsFileInfo *cfile)
1010{
1011 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
1012 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1013 struct file_lock *flock, **before;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001014 unsigned int count = 0, i = 0;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001015 int rc = 0, xid, type;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001016 struct list_head locks_to_send, *el;
1017 struct lock_to_push *lck, *tmp;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001018 __u64 length;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001019
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001020 xid = get_xid();
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001021
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001022 /* we are going to update can_cache_brlcks here - need a write access */
1023 down_write(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001024 if (!cinode->can_cache_brlcks) {
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001025 up_write(&cinode->lock_sem);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001026 free_xid(xid);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001027 return rc;
1028 }
1029
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001030 lock_flocks();
1031 cifs_for_each_lock(cfile->dentry->d_inode, before) {
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001032 if ((*before)->fl_flags & FL_POSIX)
1033 count++;
1034 }
1035 unlock_flocks();
1036
1037 INIT_LIST_HEAD(&locks_to_send);
1038
1039 /*
Pavel Shilovskyce858522012-03-17 09:46:55 +03001040 * Allocating count locks is enough because no FL_POSIX locks can be
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001041 * added to the list while we are holding cinode->lock_sem that
Pavel Shilovskyce858522012-03-17 09:46:55 +03001042 * protects locking operations of this inode.
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001043 */
1044 for (; i < count; i++) {
1045 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1046 if (!lck) {
1047 rc = -ENOMEM;
1048 goto err_out;
1049 }
1050 list_add_tail(&lck->llist, &locks_to_send);
1051 }
1052
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001053 el = locks_to_send.next;
1054 lock_flocks();
1055 cifs_for_each_lock(cfile->dentry->d_inode, before) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001056 flock = *before;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001057 if ((flock->fl_flags & FL_POSIX) == 0)
1058 continue;
Pavel Shilovskyce858522012-03-17 09:46:55 +03001059 if (el == &locks_to_send) {
1060 /*
1061 * The list ended. We don't have enough allocated
1062 * structures - something is really wrong.
1063 */
1064 cERROR(1, "Can't push all brlocks!");
1065 break;
1066 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001067 length = 1 + flock->fl_end - flock->fl_start;
1068 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1069 type = CIFS_RDLCK;
1070 else
1071 type = CIFS_WRLCK;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001072 lck = list_entry(el, struct lock_to_push, llist);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001073 lck->pid = flock->fl_pid;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001074 lck->netfid = cfile->fid.netfid;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001075 lck->length = length;
1076 lck->type = type;
1077 lck->offset = flock->fl_start;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001078 el = el->next;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001079 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001080 unlock_flocks();
1081
1082 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001083 int stored_rc;
1084
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001085 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001086 lck->offset, lck->length, NULL,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001087 lck->type, 0);
1088 if (stored_rc)
1089 rc = stored_rc;
1090 list_del(&lck->llist);
1091 kfree(lck);
1092 }
1093
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001094out:
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001095 cinode->can_cache_brlcks = false;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001096 up_write(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001097
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001098 free_xid(xid);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001099 return rc;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001100err_out:
1101 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1102 list_del(&lck->llist);
1103 kfree(lck);
1104 }
1105 goto out;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001106}
1107
1108static int
1109cifs_push_locks(struct cifsFileInfo *cfile)
1110{
1111 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1112 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1113
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001114 if (cap_unix(tcon->ses) &&
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001115 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1116 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1117 return cifs_push_posix_locks(cfile);
1118
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001119 return tcon->ses->server->ops->push_mand_locks(cfile);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001120}
1121
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001122static void
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001123cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001124 bool *wait_flag, struct TCP_Server_Info *server)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001125{
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001126 if (flock->fl_flags & FL_POSIX)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001127 cFYI(1, "Posix");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001128 if (flock->fl_flags & FL_FLOCK)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001129 cFYI(1, "Flock");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001130 if (flock->fl_flags & FL_SLEEP) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001131 cFYI(1, "Blocking lock");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001132 *wait_flag = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133 }
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001134 if (flock->fl_flags & FL_ACCESS)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001135 cFYI(1, "Process suspended by mandatory locking - "
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001136 "not implemented yet");
1137 if (flock->fl_flags & FL_LEASE)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001138 cFYI(1, "Lease on file - not implemented yet");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001139 if (flock->fl_flags &
Linus Torvalds1da177e2005-04-16 15:20:36 -07001140 (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001141 cFYI(1, "Unknown lock flags 0x%x", flock->fl_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001142
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001143 *type = server->vals->large_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001144 if (flock->fl_type == F_WRLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001145 cFYI(1, "F_WRLCK ");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001146 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001147 *lock = 1;
1148 } else if (flock->fl_type == F_UNLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001149 cFYI(1, "F_UNLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001150 *type |= server->vals->unlock_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001151 *unlock = 1;
1152 /* Check if unlock includes more than one lock range */
1153 } else if (flock->fl_type == F_RDLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001154 cFYI(1, "F_RDLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001155 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001156 *lock = 1;
1157 } else if (flock->fl_type == F_EXLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001158 cFYI(1, "F_EXLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001159 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001160 *lock = 1;
1161 } else if (flock->fl_type == F_SHLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001162 cFYI(1, "F_SHLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001163 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001164 *lock = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001165 } else
Joe Perchesb6b38f72010-04-21 03:50:45 +00001166 cFYI(1, "Unknown type of lock");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001167}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001169static int
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001170cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001171 bool wait_flag, bool posix_lck, unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001172{
1173 int rc = 0;
1174 __u64 length = 1 + flock->fl_end - flock->fl_start;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001175 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1176 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001177 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001178 __u16 netfid = cfile->fid.netfid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001179
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001180 if (posix_lck) {
1181 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001182
1183 rc = cifs_posix_lock_test(file, flock);
1184 if (!rc)
1185 return rc;
1186
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001187 if (type & server->vals->shared_lock_type)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001188 posix_lock_type = CIFS_RDLCK;
1189 else
1190 posix_lock_type = CIFS_WRLCK;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001191 rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001192 flock->fl_start, length, flock,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001193 posix_lock_type, wait_flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001194 return rc;
1195 }
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001196
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001197 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001198 if (!rc)
1199 return rc;
1200
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001201 /* BB we could chain these into one lock request BB */
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001202 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
1203 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001204 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001205 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1206 type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001207 flock->fl_type = F_UNLCK;
1208 if (rc != 0)
1209 cERROR(1, "Error unlocking previously locked "
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001210 "range %d during test of lock", rc);
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001211 return 0;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001212 }
1213
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001214 if (type & server->vals->shared_lock_type) {
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001215 flock->fl_type = F_WRLCK;
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001216 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001217 }
1218
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001219 type &= ~server->vals->exclusive_lock_type;
1220
1221 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1222 type | server->vals->shared_lock_type,
1223 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001224 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001225 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1226 type | server->vals->shared_lock_type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001227 flock->fl_type = F_RDLCK;
1228 if (rc != 0)
1229 cERROR(1, "Error unlocking previously locked "
1230 "range %d during test of lock", rc);
1231 } else
1232 flock->fl_type = F_WRLCK;
1233
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001234 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001235}
1236
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001237void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001238cifs_move_llist(struct list_head *source, struct list_head *dest)
1239{
1240 struct list_head *li, *tmp;
1241 list_for_each_safe(li, tmp, source)
1242 list_move(li, dest);
1243}
1244
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001245void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001246cifs_free_llist(struct list_head *llist)
1247{
1248 struct cifsLockInfo *li, *tmp;
1249 list_for_each_entry_safe(li, tmp, llist, llist) {
1250 cifs_del_lock_waiters(li);
1251 list_del(&li->llist);
1252 kfree(li);
1253 }
1254}
1255
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001256int
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001257cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1258 unsigned int xid)
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001259{
1260 int rc = 0, stored_rc;
1261 int types[] = {LOCKING_ANDX_LARGE_FILES,
1262 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1263 unsigned int i;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001264 unsigned int max_num, num, max_buf;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001265 LOCKING_ANDX_RANGE *buf, *cur;
1266 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1267 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
1268 struct cifsLockInfo *li, *tmp;
1269 __u64 length = 1 + flock->fl_end - flock->fl_start;
1270 struct list_head tmp_llist;
1271
1272 INIT_LIST_HEAD(&tmp_llist);
1273
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001274 /*
1275 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1276 * and check it for zero before using.
1277 */
1278 max_buf = tcon->ses->server->maxBuf;
1279 if (!max_buf)
1280 return -EINVAL;
1281
1282 max_num = (max_buf - sizeof(struct smb_hdr)) /
1283 sizeof(LOCKING_ANDX_RANGE);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001284 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1285 if (!buf)
1286 return -ENOMEM;
1287
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001288 down_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001289 for (i = 0; i < 2; i++) {
1290 cur = buf;
1291 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001292 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001293 if (flock->fl_start > li->offset ||
1294 (flock->fl_start + length) <
1295 (li->offset + li->length))
1296 continue;
1297 if (current->tgid != li->pid)
1298 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001299 if (types[i] != li->type)
1300 continue;
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001301 if (cinode->can_cache_brlcks) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001302 /*
1303 * We can cache brlock requests - simply remove
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001304 * a lock from the file's list.
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001305 */
1306 list_del(&li->llist);
1307 cifs_del_lock_waiters(li);
1308 kfree(li);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001309 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001310 }
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001311 cur->Pid = cpu_to_le16(li->pid);
1312 cur->LengthLow = cpu_to_le32((u32)li->length);
1313 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1314 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1315 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1316 /*
1317 * We need to save a lock here to let us add it again to
1318 * the file's list if the unlock range request fails on
1319 * the server.
1320 */
1321 list_move(&li->llist, &tmp_llist);
1322 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001323 stored_rc = cifs_lockv(xid, tcon,
1324 cfile->fid.netfid,
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001325 li->type, num, 0, buf);
1326 if (stored_rc) {
1327 /*
1328 * We failed on the unlock range
1329 * request - add all locks from the tmp
1330 * list to the head of the file's list.
1331 */
1332 cifs_move_llist(&tmp_llist,
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001333 &cfile->llist->locks);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001334 rc = stored_rc;
1335 } else
1336 /*
1337 * The unlock range request succeed -
1338 * free the tmp list.
1339 */
1340 cifs_free_llist(&tmp_llist);
1341 cur = buf;
1342 num = 0;
1343 } else
1344 cur++;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001345 }
1346 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001347 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001348 types[i], num, 0, buf);
1349 if (stored_rc) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001350 cifs_move_llist(&tmp_llist,
1351 &cfile->llist->locks);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001352 rc = stored_rc;
1353 } else
1354 cifs_free_llist(&tmp_llist);
1355 }
1356 }
1357
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001358 up_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001359 kfree(buf);
1360 return rc;
1361}
1362
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001363static int
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001364cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001365 bool wait_flag, bool posix_lck, int lock, int unlock,
1366 unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001367{
1368 int rc = 0;
1369 __u64 length = 1 + flock->fl_end - flock->fl_start;
1370 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1371 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001372 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001373
1374 if (posix_lck) {
Steve French08547b02006-02-28 22:39:25 +00001375 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001376
1377 rc = cifs_posix_lock_set(file, flock);
1378 if (!rc || rc < 0)
1379 return rc;
1380
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001381 if (type & server->vals->shared_lock_type)
Steve French08547b02006-02-28 22:39:25 +00001382 posix_lock_type = CIFS_RDLCK;
1383 else
1384 posix_lock_type = CIFS_WRLCK;
Steve French50c2f752007-07-13 00:33:32 +00001385
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001386 if (unlock == 1)
Steve Frenchbeb84dc2006-03-03 23:36:34 +00001387 posix_lock_type = CIFS_UNLCK;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001388
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001389 rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
1390 current->tgid, flock->fl_start, length,
1391 NULL, posix_lock_type, wait_flag);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001392 goto out;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001393 }
1394
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001395 if (lock) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001396 struct cifsLockInfo *lock;
1397
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001398 lock = cifs_lock_init(flock->fl_start, length, type);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001399 if (!lock)
1400 return -ENOMEM;
1401
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001402 rc = cifs_lock_add_if(cfile, lock, wait_flag);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001403 if (rc < 0)
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001404 kfree(lock);
1405 if (rc <= 0)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001406 goto out;
1407
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001408 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1409 type, 1, 0, wait_flag);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001410 if (rc) {
1411 kfree(lock);
1412 goto out;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001413 }
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001414
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001415 cifs_lock_add(cfile, lock);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001416 } else if (unlock)
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001417 rc = server->ops->mand_unlock_range(cfile, flock, xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001418
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001419out:
1420 if (flock->fl_flags & FL_POSIX)
Steve French9ebb3892012-04-01 13:52:54 -05001421 posix_lock_file_wait(file, flock);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001422 return rc;
1423}
1424
1425int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1426{
1427 int rc, xid;
1428 int lock = 0, unlock = 0;
1429 bool wait_flag = false;
1430 bool posix_lck = false;
1431 struct cifs_sb_info *cifs_sb;
1432 struct cifs_tcon *tcon;
1433 struct cifsInodeInfo *cinode;
1434 struct cifsFileInfo *cfile;
1435 __u16 netfid;
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001436 __u32 type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001437
1438 rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001439 xid = get_xid();
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001440
1441 cFYI(1, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld "
1442 "end: %lld", cmd, flock->fl_flags, flock->fl_type,
1443 flock->fl_start, flock->fl_end);
1444
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001445 cfile = (struct cifsFileInfo *)file->private_data;
1446 tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001447
1448 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
1449 tcon->ses->server);
1450
1451 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001452 netfid = cfile->fid.netfid;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001453 cinode = CIFS_I(file->f_path.dentry->d_inode);
1454
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001455 if (cap_unix(tcon->ses) &&
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001456 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1457 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1458 posix_lck = true;
1459 /*
1460 * BB add code here to normalize offset and length to account for
1461 * negative length which we can not accept over the wire.
1462 */
1463 if (IS_GETLK(cmd)) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001464 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001465 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001466 return rc;
1467 }
1468
1469 if (!lock && !unlock) {
1470 /*
1471 * if no lock or unlock then nothing to do since we do not
1472 * know what it is
1473 */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001474 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001475 return -EOPNOTSUPP;
1476 }
1477
1478 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1479 xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001480 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001481 return rc;
1482}
1483
Jeff Layton597b0272012-03-23 14:40:56 -04001484/*
1485 * update the file size (if needed) after a write. Should be called with
1486 * the inode->i_lock held
1487 */
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05001488void
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001489cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1490 unsigned int bytes_written)
1491{
1492 loff_t end_of_write = offset + bytes_written;
1493
1494 if (end_of_write > cifsi->server_eof)
1495 cifsi->server_eof = end_of_write;
1496}
1497
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001498static ssize_t
1499cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
1500 size_t write_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001501{
1502 int rc = 0;
1503 unsigned int bytes_written = 0;
1504 unsigned int total_written;
1505 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001506 struct cifs_tcon *tcon;
1507 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001508 unsigned int xid;
Jeff Layton7da4b492010-10-15 15:34:00 -04001509 struct dentry *dentry = open_file->dentry;
1510 struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001511 struct cifs_io_parms io_parms;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001512
Jeff Layton7da4b492010-10-15 15:34:00 -04001513 cifs_sb = CIFS_SB(dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001514
Joe Perchesb6b38f72010-04-21 03:50:45 +00001515 cFYI(1, "write %zd bytes to offset %lld of %s", write_size,
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001516 *offset, dentry->d_name.name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001518 tcon = tlink_tcon(open_file->tlink);
1519 server = tcon->ses->server;
1520
1521 if (!server->ops->sync_write)
1522 return -ENOSYS;
Steve French50c2f752007-07-13 00:33:32 +00001523
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001524 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001525
Linus Torvalds1da177e2005-04-16 15:20:36 -07001526 for (total_written = 0; write_size > total_written;
1527 total_written += bytes_written) {
1528 rc = -EAGAIN;
1529 while (rc == -EAGAIN) {
Jeff Laytonca83ce32011-04-12 09:13:44 -04001530 struct kvec iov[2];
1531 unsigned int len;
1532
Linus Torvalds1da177e2005-04-16 15:20:36 -07001533 if (open_file->invalidHandle) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534 /* we could deadlock if we called
1535 filemap_fdatawait from here so tell
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001536 reopen_file not to flush data to
Linus Torvalds1da177e2005-04-16 15:20:36 -07001537 server now */
Jeff Layton15886172010-10-15 15:33:59 -04001538 rc = cifs_reopen_file(open_file, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001539 if (rc != 0)
1540 break;
1541 }
Steve French3e844692005-10-03 13:37:24 -07001542
Jeff Laytonca83ce32011-04-12 09:13:44 -04001543 len = min((size_t)cifs_sb->wsize,
1544 write_size - total_written);
1545 /* iov[0] is reserved for smb header */
1546 iov[1].iov_base = (char *)write_data + total_written;
1547 iov[1].iov_len = len;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001548 io_parms.pid = pid;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001549 io_parms.tcon = tcon;
1550 io_parms.offset = *offset;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001551 io_parms.length = len;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001552 rc = server->ops->sync_write(xid, open_file, &io_parms,
1553 &bytes_written, iov, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001554 }
1555 if (rc || (bytes_written == 0)) {
1556 if (total_written)
1557 break;
1558 else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001559 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001560 return rc;
1561 }
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001562 } else {
Jeff Layton597b0272012-03-23 14:40:56 -04001563 spin_lock(&dentry->d_inode->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001564 cifs_update_eof(cifsi, *offset, bytes_written);
Jeff Layton597b0272012-03-23 14:40:56 -04001565 spin_unlock(&dentry->d_inode->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001566 *offset += bytes_written;
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001567 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001568 }
1569
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001570 cifs_stats_bytes_written(tcon, total_written);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571
Jeff Layton7da4b492010-10-15 15:34:00 -04001572 if (total_written > 0) {
1573 spin_lock(&dentry->d_inode->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001574 if (*offset > dentry->d_inode->i_size)
1575 i_size_write(dentry->d_inode, *offset);
Jeff Layton7da4b492010-10-15 15:34:00 -04001576 spin_unlock(&dentry->d_inode->i_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001577 }
Jeff Layton7da4b492010-10-15 15:34:00 -04001578 mark_inode_dirty_sync(dentry->d_inode);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001579 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001580 return total_written;
1581}
1582
Jeff Layton6508d902010-09-29 19:51:11 -04001583struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1584 bool fsuid_only)
Steve French630f3f0c2007-10-25 21:17:17 +00001585{
1586 struct cifsFileInfo *open_file = NULL;
Jeff Layton6508d902010-09-29 19:51:11 -04001587 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1588
1589 /* only filter by fsuid on multiuser mounts */
1590 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1591 fsuid_only = false;
Steve French630f3f0c2007-10-25 21:17:17 +00001592
Jeff Layton44772882010-10-15 15:34:03 -04001593 spin_lock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001594 /* we could simply get the first_list_entry since write-only entries
1595 are always at the end of the list but since the first entry might
1596 have a close pending, we go through the whole list */
1597 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton6508d902010-09-29 19:51:11 -04001598 if (fsuid_only && open_file->uid != current_fsuid())
1599 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001600 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
Steve French630f3f0c2007-10-25 21:17:17 +00001601 if (!open_file->invalidHandle) {
1602 /* found a good file */
1603 /* lock it so it will not be closed on us */
Jeff Layton764a1b12012-07-25 14:59:54 -04001604 cifsFileInfo_get_locked(open_file);
Jeff Layton44772882010-10-15 15:34:03 -04001605 spin_unlock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001606 return open_file;
1607 } /* else might as well continue, and look for
1608 another, or simply have the caller reopen it
1609 again rather than trying to fix this handle */
1610 } else /* write only file */
1611 break; /* write only files are last so must be done */
1612 }
Jeff Layton44772882010-10-15 15:34:03 -04001613 spin_unlock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001614 return NULL;
1615}
Steve French630f3f0c2007-10-25 21:17:17 +00001616
Jeff Layton6508d902010-09-29 19:51:11 -04001617struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1618 bool fsuid_only)
Steve French6148a742005-10-05 12:23:19 -07001619{
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001620 struct cifsFileInfo *open_file, *inv_file = NULL;
Jeff Laytond3892292010-11-02 16:22:50 -04001621 struct cifs_sb_info *cifs_sb;
Jeff Layton2846d382008-09-22 21:33:33 -04001622 bool any_available = false;
Steve Frenchdd99cd82005-10-05 19:32:49 -07001623 int rc;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001624 unsigned int refind = 0;
Steve French6148a742005-10-05 12:23:19 -07001625
Steve French60808232006-04-22 15:53:05 +00001626 /* Having a null inode here (because mapping->host was set to zero by
1627 the VFS or MM) should not happen but we had reports of on oops (due to
1628 it being zero) during stress testcases so we need to check for it */
1629
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001630 if (cifs_inode == NULL) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001631 cERROR(1, "Null inode passed to cifs_writeable_file");
Steve French60808232006-04-22 15:53:05 +00001632 dump_stack();
1633 return NULL;
1634 }
1635
Jeff Laytond3892292010-11-02 16:22:50 -04001636 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1637
Jeff Layton6508d902010-09-29 19:51:11 -04001638 /* only filter by fsuid on multiuser mounts */
1639 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1640 fsuid_only = false;
1641
Jeff Layton44772882010-10-15 15:34:03 -04001642 spin_lock(&cifs_file_list_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001643refind_writable:
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001644 if (refind > MAX_REOPEN_ATT) {
1645 spin_unlock(&cifs_file_list_lock);
1646 return NULL;
1647 }
Steve French6148a742005-10-05 12:23:19 -07001648 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton6508d902010-09-29 19:51:11 -04001649 if (!any_available && open_file->pid != current->tgid)
1650 continue;
1651 if (fsuid_only && open_file->uid != current_fsuid())
1652 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001653 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Steve French9b22b0b2007-10-02 01:11:08 +00001654 if (!open_file->invalidHandle) {
1655 /* found a good writable file */
Jeff Layton764a1b12012-07-25 14:59:54 -04001656 cifsFileInfo_get_locked(open_file);
Jeff Layton44772882010-10-15 15:34:03 -04001657 spin_unlock(&cifs_file_list_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001658 return open_file;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001659 } else {
1660 if (!inv_file)
1661 inv_file = open_file;
Steve French9b22b0b2007-10-02 01:11:08 +00001662 }
Steve French6148a742005-10-05 12:23:19 -07001663 }
1664 }
Jeff Layton2846d382008-09-22 21:33:33 -04001665 /* couldn't find useable FH with same pid, try any available */
1666 if (!any_available) {
1667 any_available = true;
1668 goto refind_writable;
1669 }
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001670
1671 if (inv_file) {
1672 any_available = false;
Jeff Layton764a1b12012-07-25 14:59:54 -04001673 cifsFileInfo_get_locked(inv_file);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001674 }
1675
Jeff Layton44772882010-10-15 15:34:03 -04001676 spin_unlock(&cifs_file_list_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001677
1678 if (inv_file) {
1679 rc = cifs_reopen_file(inv_file, false);
1680 if (!rc)
1681 return inv_file;
1682 else {
1683 spin_lock(&cifs_file_list_lock);
1684 list_move_tail(&inv_file->flist,
1685 &cifs_inode->openFileList);
1686 spin_unlock(&cifs_file_list_lock);
1687 cifsFileInfo_put(inv_file);
1688 spin_lock(&cifs_file_list_lock);
1689 ++refind;
1690 goto refind_writable;
1691 }
1692 }
1693
Steve French6148a742005-10-05 12:23:19 -07001694 return NULL;
1695}
1696
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1698{
1699 struct address_space *mapping = page->mapping;
1700 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1701 char *write_data;
1702 int rc = -EFAULT;
1703 int bytes_written = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001704 struct inode *inode;
Steve French6148a742005-10-05 12:23:19 -07001705 struct cifsFileInfo *open_file;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001706
1707 if (!mapping || !mapping->host)
1708 return -EFAULT;
1709
1710 inode = page->mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001711
1712 offset += (loff_t)from;
1713 write_data = kmap(page);
1714 write_data += from;
1715
1716 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1717 kunmap(page);
1718 return -EIO;
1719 }
1720
1721 /* racing with truncate? */
1722 if (offset > mapping->host->i_size) {
1723 kunmap(page);
1724 return 0; /* don't care */
1725 }
1726
1727 /* check to make sure that we are not extending the file */
1728 if (mapping->host->i_size - offset < (loff_t)to)
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001729 to = (unsigned)(mapping->host->i_size - offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001730
Jeff Layton6508d902010-09-29 19:51:11 -04001731 open_file = find_writable_file(CIFS_I(mapping->host), false);
Steve French6148a742005-10-05 12:23:19 -07001732 if (open_file) {
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001733 bytes_written = cifs_write(open_file, open_file->pid,
1734 write_data, to - from, &offset);
Dave Kleikamp6ab409b2009-08-31 11:07:12 -04001735 cifsFileInfo_put(open_file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001736 /* Does mm or vfs already set times? */
Steve French6148a742005-10-05 12:23:19 -07001737 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001738 if ((bytes_written > 0) && (offset))
Steve French6148a742005-10-05 12:23:19 -07001739 rc = 0;
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001740 else if (bytes_written < 0)
1741 rc = bytes_written;
Steve French6148a742005-10-05 12:23:19 -07001742 } else {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001743 cFYI(1, "No writeable filehandles for inode");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001744 rc = -EIO;
1745 }
1746
1747 kunmap(page);
1748 return rc;
1749}
1750
Linus Torvalds1da177e2005-04-16 15:20:36 -07001751static int cifs_writepages(struct address_space *mapping,
Steve French37c0eb42005-10-05 14:50:29 -07001752 struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001753{
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001754 struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
1755 bool done = false, scanned = false, range_whole = false;
1756 pgoff_t end, index;
1757 struct cifs_writedata *wdata;
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07001758 struct TCP_Server_Info *server;
Steve French37c0eb42005-10-05 14:50:29 -07001759 struct page *page;
Steve French37c0eb42005-10-05 14:50:29 -07001760 int rc = 0;
Jeff Laytoneddb0792012-09-18 16:20:35 -07001761 loff_t isize = i_size_read(mapping->host);
Steve French50c2f752007-07-13 00:33:32 +00001762
Steve French37c0eb42005-10-05 14:50:29 -07001763 /*
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001764 * If wsize is smaller than the page cache size, default to writing
Steve French37c0eb42005-10-05 14:50:29 -07001765 * one page at a time via cifs_writepage
1766 */
1767 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1768 return generic_writepages(mapping, wbc);
1769
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001770 if (wbc->range_cyclic) {
Steve French37c0eb42005-10-05 14:50:29 -07001771 index = mapping->writeback_index; /* Start from prev offset */
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001772 end = -1;
1773 } else {
1774 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1775 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1776 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001777 range_whole = true;
1778 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07001779 }
1780retry:
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001781 while (!done && index <= end) {
1782 unsigned int i, nr_pages, found_pages;
1783 pgoff_t next = 0, tofind;
1784 struct page **pages;
Steve French37c0eb42005-10-05 14:50:29 -07001785
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001786 tofind = min((cifs_sb->wsize / PAGE_CACHE_SIZE) - 1,
1787 end - index) + 1;
Steve French37c0eb42005-10-05 14:50:29 -07001788
Jeff Laytonc2e87642012-03-23 14:40:55 -04001789 wdata = cifs_writedata_alloc((unsigned int)tofind,
1790 cifs_writev_complete);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001791 if (!wdata) {
1792 rc = -ENOMEM;
1793 break;
1794 }
1795
1796 /*
1797 * find_get_pages_tag seems to return a max of 256 on each
1798 * iteration, so we must call it several times in order to
1799 * fill the array or the wsize is effectively limited to
1800 * 256 * PAGE_CACHE_SIZE.
1801 */
1802 found_pages = 0;
1803 pages = wdata->pages;
1804 do {
1805 nr_pages = find_get_pages_tag(mapping, &index,
1806 PAGECACHE_TAG_DIRTY,
1807 tofind, pages);
1808 found_pages += nr_pages;
1809 tofind -= nr_pages;
1810 pages += nr_pages;
1811 } while (nr_pages && tofind && index <= end);
1812
1813 if (found_pages == 0) {
1814 kref_put(&wdata->refcount, cifs_writedata_release);
1815 break;
1816 }
1817
1818 nr_pages = 0;
1819 for (i = 0; i < found_pages; i++) {
1820 page = wdata->pages[i];
Steve French37c0eb42005-10-05 14:50:29 -07001821 /*
1822 * At this point we hold neither mapping->tree_lock nor
1823 * lock on the page itself: the page may be truncated or
1824 * invalidated (changing page->mapping to NULL), or even
1825 * swizzled back from swapper_space to tmpfs file
1826 * mapping
1827 */
1828
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001829 if (nr_pages == 0)
Steve French37c0eb42005-10-05 14:50:29 -07001830 lock_page(page);
Nick Piggin529ae9a2008-08-02 12:01:03 +02001831 else if (!trylock_page(page))
Steve French37c0eb42005-10-05 14:50:29 -07001832 break;
1833
1834 if (unlikely(page->mapping != mapping)) {
1835 unlock_page(page);
1836 break;
1837 }
1838
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001839 if (!wbc->range_cyclic && page->index > end) {
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001840 done = true;
Steve French37c0eb42005-10-05 14:50:29 -07001841 unlock_page(page);
1842 break;
1843 }
1844
1845 if (next && (page->index != next)) {
1846 /* Not next consecutive page */
1847 unlock_page(page);
1848 break;
1849 }
1850
1851 if (wbc->sync_mode != WB_SYNC_NONE)
1852 wait_on_page_writeback(page);
1853
1854 if (PageWriteback(page) ||
Linus Torvaldscb876f42006-12-23 16:19:07 -08001855 !clear_page_dirty_for_io(page)) {
Steve French37c0eb42005-10-05 14:50:29 -07001856 unlock_page(page);
1857 break;
1858 }
Steve French84d2f072005-10-12 15:32:05 -07001859
Linus Torvaldscb876f42006-12-23 16:19:07 -08001860 /*
1861 * This actually clears the dirty bit in the radix tree.
1862 * See cifs_writepage() for more commentary.
1863 */
1864 set_page_writeback(page);
1865
Jeff Laytoneddb0792012-09-18 16:20:35 -07001866 if (page_offset(page) >= isize) {
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001867 done = true;
Steve French84d2f072005-10-12 15:32:05 -07001868 unlock_page(page);
Linus Torvaldscb876f42006-12-23 16:19:07 -08001869 end_page_writeback(page);
Steve French84d2f072005-10-12 15:32:05 -07001870 break;
1871 }
1872
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001873 wdata->pages[i] = page;
Steve French37c0eb42005-10-05 14:50:29 -07001874 next = page->index + 1;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001875 ++nr_pages;
Steve French37c0eb42005-10-05 14:50:29 -07001876 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001877
1878 /* reset index to refind any pages skipped */
1879 if (nr_pages == 0)
1880 index = wdata->pages[0]->index + 1;
1881
1882 /* put any pages we aren't going to use */
1883 for (i = nr_pages; i < found_pages; i++) {
1884 page_cache_release(wdata->pages[i]);
1885 wdata->pages[i] = NULL;
1886 }
1887
1888 /* nothing to write? */
1889 if (nr_pages == 0) {
1890 kref_put(&wdata->refcount, cifs_writedata_release);
1891 continue;
1892 }
1893
1894 wdata->sync_mode = wbc->sync_mode;
1895 wdata->nr_pages = nr_pages;
1896 wdata->offset = page_offset(wdata->pages[0]);
Jeff Laytoneddb0792012-09-18 16:20:35 -07001897 wdata->pagesz = PAGE_CACHE_SIZE;
1898 wdata->tailsz =
1899 min(isize - page_offset(wdata->pages[nr_pages - 1]),
1900 (loff_t)PAGE_CACHE_SIZE);
1901 wdata->bytes = ((nr_pages - 1) * PAGE_CACHE_SIZE) +
1902 wdata->tailsz;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001903
1904 do {
1905 if (wdata->cfile != NULL)
1906 cifsFileInfo_put(wdata->cfile);
1907 wdata->cfile = find_writable_file(CIFS_I(mapping->host),
1908 false);
1909 if (!wdata->cfile) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001910 cERROR(1, "No writable handles for inode");
Steve French23e7dd72005-10-20 13:44:56 -07001911 rc = -EBADF;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001912 break;
Steve French37c0eb42005-10-05 14:50:29 -07001913 }
Jeff Laytonfe5f5d22012-03-23 14:40:55 -04001914 wdata->pid = wdata->cfile->pid;
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07001915 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
1916 rc = server->ops->async_writev(wdata);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001917 } while (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN);
Jeff Laytonf3983c22010-09-22 16:17:40 -07001918
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001919 for (i = 0; i < nr_pages; ++i)
1920 unlock_page(wdata->pages[i]);
Jeff Layton941b8532011-01-11 07:24:01 -05001921
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001922 /* send failure -- clean up the mess */
1923 if (rc != 0) {
1924 for (i = 0; i < nr_pages; ++i) {
Jeff Layton941b8532011-01-11 07:24:01 -05001925 if (rc == -EAGAIN)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001926 redirty_page_for_writepage(wbc,
1927 wdata->pages[i]);
1928 else
1929 SetPageError(wdata->pages[i]);
1930 end_page_writeback(wdata->pages[i]);
1931 page_cache_release(wdata->pages[i]);
Steve French37c0eb42005-10-05 14:50:29 -07001932 }
Jeff Layton941b8532011-01-11 07:24:01 -05001933 if (rc != -EAGAIN)
1934 mapping_set_error(mapping, rc);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001935 }
1936 kref_put(&wdata->refcount, cifs_writedata_release);
Jeff Layton941b8532011-01-11 07:24:01 -05001937
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001938 wbc->nr_to_write -= nr_pages;
1939 if (wbc->nr_to_write <= 0)
1940 done = true;
Dave Kleikampb066a482008-11-18 03:49:05 +00001941
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001942 index = next;
Steve French37c0eb42005-10-05 14:50:29 -07001943 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001944
Steve French37c0eb42005-10-05 14:50:29 -07001945 if (!scanned && !done) {
1946 /*
1947 * We hit the last page and there is more work to be done: wrap
1948 * back to the start of the file
1949 */
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001950 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07001951 index = 0;
1952 goto retry;
1953 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001954
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001955 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
Steve French37c0eb42005-10-05 14:50:29 -07001956 mapping->writeback_index = index;
1957
Linus Torvalds1da177e2005-04-16 15:20:36 -07001958 return rc;
1959}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001960
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04001961static int
1962cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001963{
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04001964 int rc;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001965 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001966
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001967 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001968/* BB add check for wbc flags */
1969 page_cache_get(page);
Steve Frenchad7a2922008-02-07 23:25:02 +00001970 if (!PageUptodate(page))
Joe Perchesb6b38f72010-04-21 03:50:45 +00001971 cFYI(1, "ppw - page not up to date");
Linus Torvaldscb876f42006-12-23 16:19:07 -08001972
1973 /*
1974 * Set the "writeback" flag, and clear "dirty" in the radix tree.
1975 *
1976 * A writepage() implementation always needs to do either this,
1977 * or re-dirty the page with "redirty_page_for_writepage()" in
1978 * the case of a failure.
1979 *
1980 * Just unlocking the page will cause the radix tree tag-bits
1981 * to fail to update with the state of the page correctly.
1982 */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001983 set_page_writeback(page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04001984retry_write:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001985 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04001986 if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
1987 goto retry_write;
1988 else if (rc == -EAGAIN)
1989 redirty_page_for_writepage(wbc, page);
1990 else if (rc != 0)
1991 SetPageError(page);
1992 else
1993 SetPageUptodate(page);
Linus Torvaldscb876f42006-12-23 16:19:07 -08001994 end_page_writeback(page);
1995 page_cache_release(page);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001996 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001997 return rc;
1998}
1999
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002000static int cifs_writepage(struct page *page, struct writeback_control *wbc)
2001{
2002 int rc = cifs_writepage_locked(page, wbc);
2003 unlock_page(page);
2004 return rc;
2005}
2006
Nick Piggind9414772008-09-24 11:32:59 -04002007static int cifs_write_end(struct file *file, struct address_space *mapping,
2008 loff_t pos, unsigned len, unsigned copied,
2009 struct page *page, void *fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002010{
Nick Piggind9414772008-09-24 11:32:59 -04002011 int rc;
2012 struct inode *inode = mapping->host;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002013 struct cifsFileInfo *cfile = file->private_data;
2014 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
2015 __u32 pid;
2016
2017 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2018 pid = cfile->pid;
2019 else
2020 pid = current->tgid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002021
Joe Perchesb6b38f72010-04-21 03:50:45 +00002022 cFYI(1, "write_end for page %p from pos %lld with %d bytes",
2023 page, pos, copied);
Steve Frenchad7a2922008-02-07 23:25:02 +00002024
Jeff Laytona98ee8c2008-11-26 19:32:33 +00002025 if (PageChecked(page)) {
2026 if (copied == len)
2027 SetPageUptodate(page);
2028 ClearPageChecked(page);
2029 } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
Nick Piggind9414772008-09-24 11:32:59 -04002030 SetPageUptodate(page);
2031
Linus Torvalds1da177e2005-04-16 15:20:36 -07002032 if (!PageUptodate(page)) {
Nick Piggind9414772008-09-24 11:32:59 -04002033 char *page_data;
2034 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002035 unsigned int xid;
Nick Piggind9414772008-09-24 11:32:59 -04002036
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002037 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002038 /* this is probably better than directly calling
2039 partialpage_write since in this function the file handle is
2040 known which we might as well leverage */
2041 /* BB check if anything else missing out of ppw
2042 such as updating last write time */
2043 page_data = kmap(page);
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002044 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
Nick Piggind9414772008-09-24 11:32:59 -04002045 /* if (rc < 0) should we set writebehind rc? */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002046 kunmap(page);
Nick Piggind9414772008-09-24 11:32:59 -04002047
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002048 free_xid(xid);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002049 } else {
Nick Piggind9414772008-09-24 11:32:59 -04002050 rc = copied;
2051 pos += copied;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002052 set_page_dirty(page);
2053 }
2054
Nick Piggind9414772008-09-24 11:32:59 -04002055 if (rc > 0) {
2056 spin_lock(&inode->i_lock);
2057 if (pos > inode->i_size)
2058 i_size_write(inode, pos);
2059 spin_unlock(&inode->i_lock);
2060 }
2061
2062 unlock_page(page);
2063 page_cache_release(page);
2064
Linus Torvalds1da177e2005-04-16 15:20:36 -07002065 return rc;
2066}
2067
Josef Bacik02c24a82011-07-16 20:44:56 -04002068int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2069 int datasync)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002070{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002071 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002072 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002073 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002074 struct TCP_Server_Info *server;
Joe Perchesc21dfb62010-07-12 13:50:14 -07002075 struct cifsFileInfo *smbfile = file->private_data;
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002076 struct inode *inode = file->f_path.dentry->d_inode;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002077 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002078
Josef Bacik02c24a82011-07-16 20:44:56 -04002079 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2080 if (rc)
2081 return rc;
2082 mutex_lock(&inode->i_mutex);
2083
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002084 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002085
Joe Perchesb6b38f72010-04-21 03:50:45 +00002086 cFYI(1, "Sync file - name: %s datasync: 0x%x",
Christoph Hellwig7ea80852010-05-26 17:53:25 +02002087 file->f_path.dentry->d_name.name, datasync);
Steve French50c2f752007-07-13 00:33:32 +00002088
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002089 if (!CIFS_I(inode)->clientCanCacheRead) {
2090 rc = cifs_invalidate_mapping(inode);
2091 if (rc) {
2092 cFYI(1, "rc: %d during invalidate phase", rc);
2093 rc = 0; /* don't care about it in fsync */
2094 }
2095 }
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002096
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002097 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002098 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2099 server = tcon->ses->server;
2100 if (server->ops->flush)
2101 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2102 else
2103 rc = -ENOSYS;
2104 }
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002105
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002106 free_xid(xid);
Josef Bacik02c24a82011-07-16 20:44:56 -04002107 mutex_unlock(&inode->i_mutex);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002108 return rc;
2109}
2110
Josef Bacik02c24a82011-07-16 20:44:56 -04002111int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002112{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002113 unsigned int xid;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002114 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002115 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002116 struct TCP_Server_Info *server;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002117 struct cifsFileInfo *smbfile = file->private_data;
2118 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Josef Bacik02c24a82011-07-16 20:44:56 -04002119 struct inode *inode = file->f_mapping->host;
2120
2121 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2122 if (rc)
2123 return rc;
2124 mutex_lock(&inode->i_mutex);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002125
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002126 xid = get_xid();
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002127
2128 cFYI(1, "Sync file - name: %s datasync: 0x%x",
2129 file->f_path.dentry->d_name.name, datasync);
2130
2131 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002132 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2133 server = tcon->ses->server;
2134 if (server->ops->flush)
2135 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2136 else
2137 rc = -ENOSYS;
2138 }
Steve Frenchb298f222009-02-21 21:17:43 +00002139
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002140 free_xid(xid);
Josef Bacik02c24a82011-07-16 20:44:56 -04002141 mutex_unlock(&inode->i_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002142 return rc;
2143}
2144
Linus Torvalds1da177e2005-04-16 15:20:36 -07002145/*
2146 * As file closes, flush all cached write data for this inode checking
2147 * for write behind errors.
2148 */
Miklos Szeredi75e1fcc2006-06-23 02:05:12 -07002149int cifs_flush(struct file *file, fl_owner_t id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002150{
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002151 struct inode *inode = file->f_path.dentry->d_inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002152 int rc = 0;
2153
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002154 if (file->f_mode & FMODE_WRITE)
Jeff Laytond3f13222010-10-15 15:34:07 -04002155 rc = filemap_write_and_wait(inode->i_mapping);
Steve French50c2f752007-07-13 00:33:32 +00002156
Joe Perchesb6b38f72010-04-21 03:50:45 +00002157 cFYI(1, "Flush inode %p file %p rc %d", inode, file, rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002158
2159 return rc;
2160}
2161
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002162static int
2163cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2164{
2165 int rc = 0;
2166 unsigned long i;
2167
2168 for (i = 0; i < num_pages; i++) {
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002169 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002170 if (!pages[i]) {
2171 /*
2172 * save number of pages we have already allocated and
2173 * return with ENOMEM error
2174 */
2175 num_pages = i;
2176 rc = -ENOMEM;
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002177 break;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002178 }
2179 }
2180
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002181 if (rc) {
2182 for (i = 0; i < num_pages; i++)
2183 put_page(pages[i]);
2184 }
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002185 return rc;
2186}
2187
2188static inline
2189size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2190{
2191 size_t num_pages;
2192 size_t clen;
2193
2194 clen = min_t(const size_t, len, wsize);
Jeff Laytona7103b92012-03-23 14:40:56 -04002195 num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002196
2197 if (cur_len)
2198 *cur_len = clen;
2199
2200 return num_pages;
2201}
2202
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002203static void
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002204cifs_uncached_writev_complete(struct work_struct *work)
2205{
2206 int i;
2207 struct cifs_writedata *wdata = container_of(work,
2208 struct cifs_writedata, work);
2209 struct inode *inode = wdata->cfile->dentry->d_inode;
2210 struct cifsInodeInfo *cifsi = CIFS_I(inode);
2211
2212 spin_lock(&inode->i_lock);
2213 cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
2214 if (cifsi->server_eof > inode->i_size)
2215 i_size_write(inode, cifsi->server_eof);
2216 spin_unlock(&inode->i_lock);
2217
2218 complete(&wdata->done);
2219
2220 if (wdata->result != -EAGAIN) {
2221 for (i = 0; i < wdata->nr_pages; i++)
2222 put_page(wdata->pages[i]);
2223 }
2224
2225 kref_put(&wdata->refcount, cifs_writedata_release);
2226}
2227
2228/* attempt to send write to server, retry on any -EAGAIN errors */
2229static int
2230cifs_uncached_retry_writev(struct cifs_writedata *wdata)
2231{
2232 int rc;
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002233 struct TCP_Server_Info *server;
2234
2235 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002236
2237 do {
2238 if (wdata->cfile->invalidHandle) {
2239 rc = cifs_reopen_file(wdata->cfile, false);
2240 if (rc != 0)
2241 continue;
2242 }
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002243 rc = server->ops->async_writev(wdata);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002244 } while (rc == -EAGAIN);
2245
2246 return rc;
2247}
2248
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002249static ssize_t
2250cifs_iovec_write(struct file *file, const struct iovec *iov,
2251 unsigned long nr_segs, loff_t *poffset)
2252{
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002253 unsigned long nr_pages, i;
Pavel Shilovsky76429c12011-01-31 16:03:08 +03002254 size_t copied, len, cur_len;
2255 ssize_t total_written = 0;
Jeff Layton3af9d8f2012-04-13 17:16:59 -04002256 loff_t offset;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002257 struct iov_iter it;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002258 struct cifsFileInfo *open_file;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002259 struct cifs_tcon *tcon;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002260 struct cifs_sb_info *cifs_sb;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002261 struct cifs_writedata *wdata, *tmp;
2262 struct list_head wdata_list;
2263 int rc;
2264 pid_t pid;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002265
2266 len = iov_length(iov, nr_segs);
2267 if (!len)
2268 return 0;
2269
2270 rc = generic_write_checks(file, poffset, &len, 0);
2271 if (rc)
2272 return rc;
2273
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002274 INIT_LIST_HEAD(&wdata_list);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002275 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002276 open_file = file->private_data;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002277 tcon = tlink_tcon(open_file->tlink);
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002278
2279 if (!tcon->ses->server->ops->async_writev)
2280 return -ENOSYS;
2281
Jeff Layton3af9d8f2012-04-13 17:16:59 -04002282 offset = *poffset;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002283
2284 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2285 pid = open_file->pid;
2286 else
2287 pid = current->tgid;
2288
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002289 iov_iter_init(&it, iov, nr_segs, len, 0);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002290 do {
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002291 size_t save_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002292
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002293 nr_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
2294 wdata = cifs_writedata_alloc(nr_pages,
2295 cifs_uncached_writev_complete);
2296 if (!wdata) {
2297 rc = -ENOMEM;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002298 break;
2299 }
2300
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002301 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
2302 if (rc) {
2303 kfree(wdata);
2304 break;
2305 }
2306
2307 save_len = cur_len;
2308 for (i = 0; i < nr_pages; i++) {
2309 copied = min_t(const size_t, cur_len, PAGE_SIZE);
2310 copied = iov_iter_copy_from_user(wdata->pages[i], &it,
2311 0, copied);
2312 cur_len -= copied;
2313 iov_iter_advance(&it, copied);
2314 }
2315 cur_len = save_len - cur_len;
2316
2317 wdata->sync_mode = WB_SYNC_ALL;
2318 wdata->nr_pages = nr_pages;
2319 wdata->offset = (__u64)offset;
2320 wdata->cfile = cifsFileInfo_get(open_file);
2321 wdata->pid = pid;
2322 wdata->bytes = cur_len;
Jeff Laytoneddb0792012-09-18 16:20:35 -07002323 wdata->pagesz = PAGE_SIZE;
2324 wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002325 rc = cifs_uncached_retry_writev(wdata);
2326 if (rc) {
2327 kref_put(&wdata->refcount, cifs_writedata_release);
2328 break;
2329 }
2330
2331 list_add_tail(&wdata->list, &wdata_list);
2332 offset += cur_len;
2333 len -= cur_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002334 } while (len > 0);
2335
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002336 /*
2337 * If at least one write was successfully sent, then discard any rc
2338 * value from the later writes. If the other write succeeds, then
2339 * we'll end up returning whatever was written. If it fails, then
2340 * we'll get a new rc value from that.
2341 */
2342 if (!list_empty(&wdata_list))
2343 rc = 0;
2344
2345 /*
2346 * Wait for and collect replies for any successful sends in order of
2347 * increasing offset. Once an error is hit or we get a fatal signal
2348 * while waiting, then return without waiting for any more replies.
2349 */
2350restart_loop:
2351 list_for_each_entry_safe(wdata, tmp, &wdata_list, list) {
2352 if (!rc) {
2353 /* FIXME: freezable too? */
2354 rc = wait_for_completion_killable(&wdata->done);
2355 if (rc)
2356 rc = -EINTR;
2357 else if (wdata->result)
2358 rc = wdata->result;
2359 else
2360 total_written += wdata->bytes;
2361
2362 /* resend call if it's a retryable error */
2363 if (rc == -EAGAIN) {
2364 rc = cifs_uncached_retry_writev(wdata);
2365 goto restart_loop;
2366 }
2367 }
2368 list_del_init(&wdata->list);
2369 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002370 }
2371
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002372 if (total_written > 0)
2373 *poffset += total_written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002374
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002375 cifs_stats_bytes_written(tcon, total_written);
2376 return total_written ? total_written : (ssize_t)rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002377}
2378
Pavel Shilovsky0b81c1c2011-03-10 10:11:05 +03002379ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002380 unsigned long nr_segs, loff_t pos)
2381{
2382 ssize_t written;
2383 struct inode *inode;
2384
2385 inode = iocb->ki_filp->f_path.dentry->d_inode;
2386
2387 /*
2388 * BB - optimize the way when signing is disabled. We can drop this
2389 * extra memory-to-memory copying and use iovec buffers for constructing
2390 * write request.
2391 */
2392
2393 written = cifs_iovec_write(iocb->ki_filp, iov, nr_segs, &pos);
2394 if (written > 0) {
2395 CIFS_I(inode)->invalid_mapping = true;
2396 iocb->ki_pos = pos;
2397 }
2398
2399 return written;
2400}
2401
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002402static ssize_t
2403cifs_writev(struct kiocb *iocb, const struct iovec *iov,
2404 unsigned long nr_segs, loff_t pos)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002405{
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002406 struct file *file = iocb->ki_filp;
2407 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2408 struct inode *inode = file->f_mapping->host;
2409 struct cifsInodeInfo *cinode = CIFS_I(inode);
2410 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
2411 ssize_t rc = -EACCES;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002412
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002413 BUG_ON(iocb->ki_pos != pos);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002414
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002415 sb_start_write(inode->i_sb);
2416
2417 /*
2418 * We need to hold the sem to be sure nobody modifies lock list
2419 * with a brlock that prevents writing.
2420 */
2421 down_read(&cinode->lock_sem);
2422 if (!cifs_find_lock_conflict(cfile, pos, iov_length(iov, nr_segs),
2423 server->vals->exclusive_lock_type, NULL,
2424 true)) {
2425 mutex_lock(&inode->i_mutex);
2426 rc = __generic_file_aio_write(iocb, iov, nr_segs,
2427 &iocb->ki_pos);
2428 mutex_unlock(&inode->i_mutex);
2429 }
2430
2431 if (rc > 0 || rc == -EIOCBQUEUED) {
2432 ssize_t err;
2433
2434 err = generic_write_sync(file, pos, rc);
2435 if (err < 0 && rc > 0)
2436 rc = err;
2437 }
2438
2439 up_read(&cinode->lock_sem);
2440 sb_end_write(inode->i_sb);
2441 return rc;
2442}
2443
2444ssize_t
2445cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
2446 unsigned long nr_segs, loff_t pos)
2447{
2448 struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
2449 struct cifsInodeInfo *cinode = CIFS_I(inode);
2450 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2451 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2452 iocb->ki_filp->private_data;
2453 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002454
2455 /*
2456 * In strict cache mode we need to write the data to the server exactly
2457 * from the pos to pos+len-1 rather than flush all affected pages
2458 * because it may cause a error with mandatory locks on these pages but
2459 * not on the region from pos to ppos+len-1.
2460 */
2461
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002462 if (!cinode->clientCanCacheAll)
2463 return cifs_user_writev(iocb, iov, nr_segs, pos);
2464
2465 if (cap_unix(tcon->ses) &&
2466 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2467 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2468 return generic_file_aio_write(iocb, iov, nr_segs, pos);
2469
2470 return cifs_writev(iocb, iov, nr_segs, pos);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002471}
2472
Jeff Layton0471ca32012-05-16 07:13:16 -04002473static struct cifs_readdata *
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002474cifs_readdata_alloc(unsigned int nr_pages, work_func_t complete)
Jeff Layton0471ca32012-05-16 07:13:16 -04002475{
2476 struct cifs_readdata *rdata;
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002477
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002478 rdata = kzalloc(sizeof(*rdata) + (sizeof(struct page *) * nr_pages),
2479 GFP_KERNEL);
Jeff Layton0471ca32012-05-16 07:13:16 -04002480 if (rdata != NULL) {
Jeff Layton6993f742012-05-16 07:13:17 -04002481 kref_init(&rdata->refcount);
Jeff Layton1c892542012-05-16 07:13:17 -04002482 INIT_LIST_HEAD(&rdata->list);
2483 init_completion(&rdata->done);
Jeff Layton0471ca32012-05-16 07:13:16 -04002484 INIT_WORK(&rdata->work, complete);
Jeff Layton0471ca32012-05-16 07:13:16 -04002485 }
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002486
Jeff Layton0471ca32012-05-16 07:13:16 -04002487 return rdata;
2488}
2489
Jeff Layton6993f742012-05-16 07:13:17 -04002490void
2491cifs_readdata_release(struct kref *refcount)
Jeff Layton0471ca32012-05-16 07:13:16 -04002492{
Jeff Layton6993f742012-05-16 07:13:17 -04002493 struct cifs_readdata *rdata = container_of(refcount,
2494 struct cifs_readdata, refcount);
2495
2496 if (rdata->cfile)
2497 cifsFileInfo_put(rdata->cfile);
2498
Jeff Layton0471ca32012-05-16 07:13:16 -04002499 kfree(rdata);
2500}
2501
Jeff Layton2a1bb132012-05-16 07:13:17 -04002502static int
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002503cifs_read_allocate_pages(struct cifs_readdata *rdata, unsigned int nr_pages)
Jeff Layton1c892542012-05-16 07:13:17 -04002504{
2505 int rc = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002506 struct page *page;
Jeff Layton1c892542012-05-16 07:13:17 -04002507 unsigned int i;
2508
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002509 for (i = 0; i < nr_pages; i++) {
Jeff Layton1c892542012-05-16 07:13:17 -04002510 page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
2511 if (!page) {
2512 rc = -ENOMEM;
2513 break;
2514 }
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002515 rdata->pages[i] = page;
Jeff Layton1c892542012-05-16 07:13:17 -04002516 }
2517
2518 if (rc) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002519 for (i = 0; i < nr_pages; i++) {
2520 put_page(rdata->pages[i]);
2521 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04002522 }
2523 }
2524 return rc;
2525}
2526
2527static void
2528cifs_uncached_readdata_release(struct kref *refcount)
2529{
Jeff Layton1c892542012-05-16 07:13:17 -04002530 struct cifs_readdata *rdata = container_of(refcount,
2531 struct cifs_readdata, refcount);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002532 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04002533
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002534 for (i = 0; i < rdata->nr_pages; i++) {
2535 put_page(rdata->pages[i]);
2536 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04002537 }
2538 cifs_readdata_release(refcount);
2539}
2540
2541static int
Jeff Layton2a1bb132012-05-16 07:13:17 -04002542cifs_retry_async_readv(struct cifs_readdata *rdata)
2543{
2544 int rc;
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07002545 struct TCP_Server_Info *server;
2546
2547 server = tlink_tcon(rdata->cfile->tlink)->ses->server;
Jeff Layton2a1bb132012-05-16 07:13:17 -04002548
2549 do {
2550 if (rdata->cfile->invalidHandle) {
2551 rc = cifs_reopen_file(rdata->cfile, true);
2552 if (rc != 0)
2553 continue;
2554 }
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07002555 rc = server->ops->async_readv(rdata);
Jeff Layton2a1bb132012-05-16 07:13:17 -04002556 } while (rc == -EAGAIN);
2557
2558 return rc;
2559}
2560
Jeff Layton1c892542012-05-16 07:13:17 -04002561/**
2562 * cifs_readdata_to_iov - copy data from pages in response to an iovec
2563 * @rdata: the readdata response with list of pages holding data
2564 * @iov: vector in which we should copy the data
2565 * @nr_segs: number of segments in vector
2566 * @offset: offset into file of the first iovec
2567 * @copied: used to return the amount of data copied to the iov
2568 *
2569 * This function copies data from a list of pages in a readdata response into
2570 * an array of iovecs. It will first calculate where the data should go
2571 * based on the info in the readdata and then copy the data into that spot.
2572 */
2573static ssize_t
2574cifs_readdata_to_iov(struct cifs_readdata *rdata, const struct iovec *iov,
2575 unsigned long nr_segs, loff_t offset, ssize_t *copied)
2576{
2577 int rc = 0;
2578 struct iov_iter ii;
2579 size_t pos = rdata->offset - offset;
Jeff Layton1c892542012-05-16 07:13:17 -04002580 ssize_t remaining = rdata->bytes;
2581 unsigned char *pdata;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002582 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04002583
2584 /* set up iov_iter and advance to the correct offset */
2585 iov_iter_init(&ii, iov, nr_segs, iov_length(iov, nr_segs), 0);
2586 iov_iter_advance(&ii, pos);
2587
2588 *copied = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002589 for (i = 0; i < rdata->nr_pages; i++) {
Jeff Layton1c892542012-05-16 07:13:17 -04002590 ssize_t copy;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002591 struct page *page = rdata->pages[i];
Jeff Layton1c892542012-05-16 07:13:17 -04002592
2593 /* copy a whole page or whatever's left */
2594 copy = min_t(ssize_t, remaining, PAGE_SIZE);
2595
2596 /* ...but limit it to whatever space is left in the iov */
2597 copy = min_t(ssize_t, copy, iov_iter_count(&ii));
2598
2599 /* go while there's data to be copied and no errors */
2600 if (copy && !rc) {
2601 pdata = kmap(page);
2602 rc = memcpy_toiovecend(ii.iov, pdata, ii.iov_offset,
2603 (int)copy);
2604 kunmap(page);
2605 if (!rc) {
2606 *copied += copy;
2607 remaining -= copy;
2608 iov_iter_advance(&ii, copy);
2609 }
2610 }
Jeff Layton1c892542012-05-16 07:13:17 -04002611 }
2612
2613 return rc;
2614}
2615
2616static void
2617cifs_uncached_readv_complete(struct work_struct *work)
2618{
2619 struct cifs_readdata *rdata = container_of(work,
2620 struct cifs_readdata, work);
Jeff Layton1c892542012-05-16 07:13:17 -04002621
2622 complete(&rdata->done);
2623 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
2624}
2625
2626static int
Jeff Layton8321fec2012-09-19 06:22:32 -07002627cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
2628 struct cifs_readdata *rdata, unsigned int len)
Jeff Layton1c892542012-05-16 07:13:17 -04002629{
Jeff Layton8321fec2012-09-19 06:22:32 -07002630 int total_read = 0, result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002631 unsigned int i;
2632 unsigned int nr_pages = rdata->nr_pages;
Jeff Layton8321fec2012-09-19 06:22:32 -07002633 struct kvec iov;
Jeff Layton1c892542012-05-16 07:13:17 -04002634
Jeff Layton8321fec2012-09-19 06:22:32 -07002635 rdata->tailsz = PAGE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002636 for (i = 0; i < nr_pages; i++) {
2637 struct page *page = rdata->pages[i];
2638
Jeff Layton8321fec2012-09-19 06:22:32 -07002639 if (len >= PAGE_SIZE) {
Jeff Layton1c892542012-05-16 07:13:17 -04002640 /* enough data to fill the page */
Jeff Layton8321fec2012-09-19 06:22:32 -07002641 iov.iov_base = kmap(page);
2642 iov.iov_len = PAGE_SIZE;
2643 cFYI(1, "%u: iov_base=%p iov_len=%zu",
2644 i, iov.iov_base, iov.iov_len);
2645 len -= PAGE_SIZE;
2646 } else if (len > 0) {
Jeff Layton1c892542012-05-16 07:13:17 -04002647 /* enough for partial page, fill and zero the rest */
Jeff Layton8321fec2012-09-19 06:22:32 -07002648 iov.iov_base = kmap(page);
2649 iov.iov_len = len;
2650 cFYI(1, "%u: iov_base=%p iov_len=%zu",
2651 i, iov.iov_base, iov.iov_len);
2652 memset(iov.iov_base + len, '\0', PAGE_SIZE - len);
2653 rdata->tailsz = len;
2654 len = 0;
Jeff Layton1c892542012-05-16 07:13:17 -04002655 } else {
2656 /* no need to hold page hostage */
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002657 rdata->pages[i] = NULL;
2658 rdata->nr_pages--;
Jeff Layton1c892542012-05-16 07:13:17 -04002659 put_page(page);
Jeff Layton8321fec2012-09-19 06:22:32 -07002660 continue;
Jeff Layton1c892542012-05-16 07:13:17 -04002661 }
Jeff Layton8321fec2012-09-19 06:22:32 -07002662
2663 result = cifs_readv_from_socket(server, &iov, 1, iov.iov_len);
2664 kunmap(page);
2665 if (result < 0)
2666 break;
2667
2668 total_read += result;
Jeff Layton1c892542012-05-16 07:13:17 -04002669 }
2670
Jeff Layton8321fec2012-09-19 06:22:32 -07002671 return total_read > 0 ? total_read : result;
Jeff Layton1c892542012-05-16 07:13:17 -04002672}
2673
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002674static ssize_t
2675cifs_iovec_read(struct file *file, const struct iovec *iov,
2676 unsigned long nr_segs, loff_t *poffset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002677{
Jeff Layton1c892542012-05-16 07:13:17 -04002678 ssize_t rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002679 size_t len, cur_len;
Jeff Layton1c892542012-05-16 07:13:17 -04002680 ssize_t total_read = 0;
2681 loff_t offset = *poffset;
2682 unsigned int npages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002683 struct cifs_sb_info *cifs_sb;
Jeff Layton1c892542012-05-16 07:13:17 -04002684 struct cifs_tcon *tcon;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002685 struct cifsFileInfo *open_file;
Jeff Layton1c892542012-05-16 07:13:17 -04002686 struct cifs_readdata *rdata, *tmp;
2687 struct list_head rdata_list;
2688 pid_t pid;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002689
2690 if (!nr_segs)
2691 return 0;
2692
2693 len = iov_length(iov, nr_segs);
2694 if (!len)
2695 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002696
Jeff Layton1c892542012-05-16 07:13:17 -04002697 INIT_LIST_HEAD(&rdata_list);
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002698 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Joe Perchesc21dfb62010-07-12 13:50:14 -07002699 open_file = file->private_data;
Jeff Layton1c892542012-05-16 07:13:17 -04002700 tcon = tlink_tcon(open_file->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002701
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07002702 if (!tcon->ses->server->ops->async_readv)
2703 return -ENOSYS;
2704
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002705 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2706 pid = open_file->pid;
2707 else
2708 pid = current->tgid;
2709
Steve Frenchad7a2922008-02-07 23:25:02 +00002710 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesb6b38f72010-04-21 03:50:45 +00002711 cFYI(1, "attempting read on write only file instance");
Steve Frenchad7a2922008-02-07 23:25:02 +00002712
Jeff Layton1c892542012-05-16 07:13:17 -04002713 do {
2714 cur_len = min_t(const size_t, len - total_read, cifs_sb->rsize);
2715 npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002716
Jeff Layton1c892542012-05-16 07:13:17 -04002717 /* allocate a readdata struct */
2718 rdata = cifs_readdata_alloc(npages,
2719 cifs_uncached_readv_complete);
2720 if (!rdata) {
2721 rc = -ENOMEM;
2722 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002723 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002724
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002725 rc = cifs_read_allocate_pages(rdata, npages);
Jeff Layton1c892542012-05-16 07:13:17 -04002726 if (rc)
2727 goto error;
2728
2729 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002730 rdata->nr_pages = npages;
Jeff Layton1c892542012-05-16 07:13:17 -04002731 rdata->offset = offset;
2732 rdata->bytes = cur_len;
2733 rdata->pid = pid;
Jeff Layton8321fec2012-09-19 06:22:32 -07002734 rdata->pagesz = PAGE_SIZE;
2735 rdata->read_into_pages = cifs_uncached_read_into_pages;
Jeff Layton1c892542012-05-16 07:13:17 -04002736
2737 rc = cifs_retry_async_readv(rdata);
2738error:
2739 if (rc) {
2740 kref_put(&rdata->refcount,
2741 cifs_uncached_readdata_release);
2742 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002743 }
Jeff Layton1c892542012-05-16 07:13:17 -04002744
2745 list_add_tail(&rdata->list, &rdata_list);
2746 offset += cur_len;
2747 len -= cur_len;
2748 } while (len > 0);
2749
2750 /* if at least one read request send succeeded, then reset rc */
2751 if (!list_empty(&rdata_list))
2752 rc = 0;
2753
2754 /* the loop below should proceed in the order of increasing offsets */
2755restart_loop:
2756 list_for_each_entry_safe(rdata, tmp, &rdata_list, list) {
2757 if (!rc) {
2758 ssize_t copied;
2759
2760 /* FIXME: freezable sleep too? */
2761 rc = wait_for_completion_killable(&rdata->done);
2762 if (rc)
2763 rc = -EINTR;
2764 else if (rdata->result)
2765 rc = rdata->result;
2766 else {
2767 rc = cifs_readdata_to_iov(rdata, iov,
2768 nr_segs, *poffset,
2769 &copied);
2770 total_read += copied;
2771 }
2772
2773 /* resend call if it's a retryable error */
2774 if (rc == -EAGAIN) {
2775 rc = cifs_retry_async_readv(rdata);
2776 goto restart_loop;
2777 }
2778 }
2779 list_del_init(&rdata->list);
2780 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002781 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002782
Jeff Layton1c892542012-05-16 07:13:17 -04002783 cifs_stats_bytes_read(tcon, total_read);
2784 *poffset += total_read;
2785
Pavel Shilovsky09a47072012-09-18 16:20:29 -07002786 /* mask nodata case */
2787 if (rc == -ENODATA)
2788 rc = 0;
2789
Jeff Layton1c892542012-05-16 07:13:17 -04002790 return total_read ? total_read : rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002791}
2792
Pavel Shilovsky0b81c1c2011-03-10 10:11:05 +03002793ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov,
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002794 unsigned long nr_segs, loff_t pos)
2795{
2796 ssize_t read;
2797
2798 read = cifs_iovec_read(iocb->ki_filp, iov, nr_segs, &pos);
2799 if (read > 0)
2800 iocb->ki_pos = pos;
2801
2802 return read;
2803}
2804
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002805ssize_t
2806cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov,
2807 unsigned long nr_segs, loff_t pos)
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002808{
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002809 struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
2810 struct cifsInodeInfo *cinode = CIFS_I(inode);
2811 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2812 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2813 iocb->ki_filp->private_data;
2814 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2815 int rc = -EACCES;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002816
2817 /*
2818 * In strict cache mode we need to read from the server all the time
2819 * if we don't have level II oplock because the server can delay mtime
2820 * change - so we can't make a decision about inode invalidating.
2821 * And we can also fail with pagereading if there are mandatory locks
2822 * on pages affected by this read but not on the region from pos to
2823 * pos+len-1.
2824 */
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002825 if (!cinode->clientCanCacheRead)
2826 return cifs_user_readv(iocb, iov, nr_segs, pos);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002827
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002828 if (cap_unix(tcon->ses) &&
2829 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2830 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2831 return generic_file_aio_read(iocb, iov, nr_segs, pos);
2832
2833 /*
2834 * We need to hold the sem to be sure nobody modifies lock list
2835 * with a brlock that prevents reading.
2836 */
2837 down_read(&cinode->lock_sem);
2838 if (!cifs_find_lock_conflict(cfile, pos, iov_length(iov, nr_segs),
2839 tcon->ses->server->vals->shared_lock_type,
2840 NULL, true))
2841 rc = generic_file_aio_read(iocb, iov, nr_segs, pos);
2842 up_read(&cinode->lock_sem);
2843 return rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002844}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002845
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002846static ssize_t
2847cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002848{
2849 int rc = -EACCES;
2850 unsigned int bytes_read = 0;
2851 unsigned int total_read;
2852 unsigned int current_read_size;
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002853 unsigned int rsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002854 struct cifs_sb_info *cifs_sb;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002855 struct cifs_tcon *tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002856 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002857 unsigned int xid;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002858 char *cur_offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002859 struct cifsFileInfo *open_file;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002860 struct cifs_io_parms io_parms;
Steve Frenchec637e32005-12-12 20:53:18 -08002861 int buf_type = CIFS_NO_BUFFER;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002862 __u32 pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002863
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002864 xid = get_xid();
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002865 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002866
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002867 /* FIXME: set up handlers for larger reads and/or convert to async */
2868 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
2869
Linus Torvalds1da177e2005-04-16 15:20:36 -07002870 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05302871 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002872 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05302873 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002874 }
Joe Perchesc21dfb62010-07-12 13:50:14 -07002875 open_file = file->private_data;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002876 tcon = tlink_tcon(open_file->tlink);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002877 server = tcon->ses->server;
2878
2879 if (!server->ops->sync_read) {
2880 free_xid(xid);
2881 return -ENOSYS;
2882 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002883
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002884 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2885 pid = open_file->pid;
2886 else
2887 pid = current->tgid;
2888
Linus Torvalds1da177e2005-04-16 15:20:36 -07002889 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesb6b38f72010-04-21 03:50:45 +00002890 cFYI(1, "attempting read on write only file instance");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002891
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002892 for (total_read = 0, cur_offset = read_data; read_size > total_read;
2893 total_read += bytes_read, cur_offset += bytes_read) {
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002894 current_read_size = min_t(uint, read_size - total_read, rsize);
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002895 /*
2896 * For windows me and 9x we do not want to request more than it
2897 * negotiated since it will refuse the read then.
2898 */
2899 if ((tcon->ses) && !(tcon->ses->capabilities &
2900 tcon->ses->server->vals->cap_large_files)) {
Dan Carpenter7748dd62011-10-18 12:41:35 +03002901 current_read_size = min_t(uint, current_read_size,
Jeff Laytonc974bef2011-10-11 06:41:32 -04002902 CIFSMaxBufSize);
Steve Frenchf9f5c8172005-09-15 23:06:38 -07002903 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002904 rc = -EAGAIN;
2905 while (rc == -EAGAIN) {
Steve Frenchcdff08e2010-10-21 22:46:14 +00002906 if (open_file->invalidHandle) {
Jeff Layton15886172010-10-15 15:33:59 -04002907 rc = cifs_reopen_file(open_file, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002908 if (rc != 0)
2909 break;
2910 }
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002911 io_parms.pid = pid;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002912 io_parms.tcon = tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002913 io_parms.offset = *offset;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002914 io_parms.length = current_read_size;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002915 rc = server->ops->sync_read(xid, open_file, &io_parms,
2916 &bytes_read, &cur_offset,
2917 &buf_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002918 }
2919 if (rc || (bytes_read == 0)) {
2920 if (total_read) {
2921 break;
2922 } else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002923 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002924 return rc;
2925 }
2926 } else {
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002927 cifs_stats_bytes_read(tcon, total_read);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002928 *offset += bytes_read;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002929 }
2930 }
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002931 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002932 return total_read;
2933}
2934
Jeff Laytonca83ce32011-04-12 09:13:44 -04002935/*
2936 * If the page is mmap'ed into a process' page tables, then we need to make
2937 * sure that it doesn't change while being written back.
2938 */
2939static int
2940cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
2941{
2942 struct page *page = vmf->page;
2943
2944 lock_page(page);
2945 return VM_FAULT_LOCKED;
2946}
2947
2948static struct vm_operations_struct cifs_file_vm_ops = {
2949 .fault = filemap_fault,
2950 .page_mkwrite = cifs_page_mkwrite,
2951};
2952
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03002953int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
2954{
2955 int rc, xid;
2956 struct inode *inode = file->f_path.dentry->d_inode;
2957
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002958 xid = get_xid();
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03002959
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002960 if (!CIFS_I(inode)->clientCanCacheRead) {
2961 rc = cifs_invalidate_mapping(inode);
2962 if (rc)
2963 return rc;
2964 }
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03002965
2966 rc = generic_file_mmap(file, vma);
Jeff Laytonca83ce32011-04-12 09:13:44 -04002967 if (rc == 0)
2968 vma->vm_ops = &cifs_file_vm_ops;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002969 free_xid(xid);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03002970 return rc;
2971}
2972
Linus Torvalds1da177e2005-04-16 15:20:36 -07002973int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
2974{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002975 int rc, xid;
2976
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002977 xid = get_xid();
Jeff Laytonabab0952010-02-12 07:44:18 -05002978 rc = cifs_revalidate_file(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002979 if (rc) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00002980 cFYI(1, "Validation prior to mmap failed, error=%d", rc);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002981 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002982 return rc;
2983 }
2984 rc = generic_file_mmap(file, vma);
Jeff Laytonca83ce32011-04-12 09:13:44 -04002985 if (rc == 0)
2986 vma->vm_ops = &cifs_file_vm_ops;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002987 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002988 return rc;
2989}
2990
Jeff Layton0471ca32012-05-16 07:13:16 -04002991static void
2992cifs_readv_complete(struct work_struct *work)
2993{
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002994 unsigned int i;
Jeff Layton0471ca32012-05-16 07:13:16 -04002995 struct cifs_readdata *rdata = container_of(work,
2996 struct cifs_readdata, work);
Jeff Layton0471ca32012-05-16 07:13:16 -04002997
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002998 for (i = 0; i < rdata->nr_pages; i++) {
2999 struct page *page = rdata->pages[i];
3000
Jeff Layton0471ca32012-05-16 07:13:16 -04003001 lru_cache_add_file(page);
3002
3003 if (rdata->result == 0) {
Jeff Layton0471ca32012-05-16 07:13:16 -04003004 flush_dcache_page(page);
3005 SetPageUptodate(page);
3006 }
3007
3008 unlock_page(page);
3009
3010 if (rdata->result == 0)
3011 cifs_readpage_to_fscache(rdata->mapping->host, page);
3012
3013 page_cache_release(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003014 rdata->pages[i] = NULL;
Jeff Layton0471ca32012-05-16 07:13:16 -04003015 }
Jeff Layton6993f742012-05-16 07:13:17 -04003016 kref_put(&rdata->refcount, cifs_readdata_release);
Jeff Layton0471ca32012-05-16 07:13:16 -04003017}
3018
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003019static int
Jeff Layton8321fec2012-09-19 06:22:32 -07003020cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
3021 struct cifs_readdata *rdata, unsigned int len)
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003022{
Jeff Layton8321fec2012-09-19 06:22:32 -07003023 int total_read = 0, result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003024 unsigned int i;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003025 u64 eof;
3026 pgoff_t eof_index;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003027 unsigned int nr_pages = rdata->nr_pages;
Jeff Layton8321fec2012-09-19 06:22:32 -07003028 struct kvec iov;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003029
3030 /* determine the eof that the server (probably) has */
3031 eof = CIFS_I(rdata->mapping->host)->server_eof;
3032 eof_index = eof ? (eof - 1) >> PAGE_CACHE_SHIFT : 0;
3033 cFYI(1, "eof=%llu eof_index=%lu", eof, eof_index);
3034
Jeff Layton8321fec2012-09-19 06:22:32 -07003035 rdata->tailsz = PAGE_CACHE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003036 for (i = 0; i < nr_pages; i++) {
3037 struct page *page = rdata->pages[i];
3038
Jeff Layton8321fec2012-09-19 06:22:32 -07003039 if (len >= PAGE_CACHE_SIZE) {
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003040 /* enough data to fill the page */
Jeff Layton8321fec2012-09-19 06:22:32 -07003041 iov.iov_base = kmap(page);
3042 iov.iov_len = PAGE_CACHE_SIZE;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003043 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
Jeff Layton8321fec2012-09-19 06:22:32 -07003044 i, page->index, iov.iov_base, iov.iov_len);
3045 len -= PAGE_CACHE_SIZE;
3046 } else if (len > 0) {
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003047 /* enough for partial page, fill and zero the rest */
Jeff Layton8321fec2012-09-19 06:22:32 -07003048 iov.iov_base = kmap(page);
3049 iov.iov_len = len;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003050 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
Jeff Layton8321fec2012-09-19 06:22:32 -07003051 i, page->index, iov.iov_base, iov.iov_len);
3052 memset(iov.iov_base + len,
3053 '\0', PAGE_CACHE_SIZE - len);
3054 rdata->tailsz = len;
3055 len = 0;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003056 } else if (page->index > eof_index) {
3057 /*
3058 * The VFS will not try to do readahead past the
3059 * i_size, but it's possible that we have outstanding
3060 * writes with gaps in the middle and the i_size hasn't
3061 * caught up yet. Populate those with zeroed out pages
3062 * to prevent the VFS from repeatedly attempting to
3063 * fill them until the writes are flushed.
3064 */
3065 zero_user(page, 0, PAGE_CACHE_SIZE);
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003066 lru_cache_add_file(page);
3067 flush_dcache_page(page);
3068 SetPageUptodate(page);
3069 unlock_page(page);
3070 page_cache_release(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003071 rdata->pages[i] = NULL;
3072 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07003073 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003074 } else {
3075 /* no need to hold page hostage */
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003076 lru_cache_add_file(page);
3077 unlock_page(page);
3078 page_cache_release(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003079 rdata->pages[i] = NULL;
3080 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07003081 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003082 }
Jeff Layton8321fec2012-09-19 06:22:32 -07003083
3084 result = cifs_readv_from_socket(server, &iov, 1, iov.iov_len);
3085 kunmap(page);
3086 if (result < 0)
3087 break;
3088
3089 total_read += result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003090 }
3091
Jeff Layton8321fec2012-09-19 06:22:32 -07003092 return total_read > 0 ? total_read : result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003093}
3094
Linus Torvalds1da177e2005-04-16 15:20:36 -07003095static int cifs_readpages(struct file *file, struct address_space *mapping,
3096 struct list_head *page_list, unsigned num_pages)
3097{
Jeff Layton690c5e32011-10-19 15:30:16 -04003098 int rc;
3099 struct list_head tmplist;
3100 struct cifsFileInfo *open_file = file->private_data;
3101 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
3102 unsigned int rsize = cifs_sb->rsize;
3103 pid_t pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003104
Jeff Layton690c5e32011-10-19 15:30:16 -04003105 /*
3106 * Give up immediately if rsize is too small to read an entire page.
3107 * The VFS will fall back to readpage. We should never reach this
3108 * point however since we set ra_pages to 0 when the rsize is smaller
3109 * than a cache page.
3110 */
3111 if (unlikely(rsize < PAGE_CACHE_SIZE))
3112 return 0;
Steve Frenchbfa0d752005-08-31 21:50:37 -07003113
Suresh Jayaraman56698232010-07-05 18:13:25 +05303114 /*
3115 * Reads as many pages as possible from fscache. Returns -ENOBUFS
3116 * immediately if the cookie is negative
3117 */
3118 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
3119 &num_pages);
3120 if (rc == 0)
Jeff Layton690c5e32011-10-19 15:30:16 -04003121 return rc;
Suresh Jayaraman56698232010-07-05 18:13:25 +05303122
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003123 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3124 pid = open_file->pid;
3125 else
3126 pid = current->tgid;
3127
Jeff Layton690c5e32011-10-19 15:30:16 -04003128 rc = 0;
3129 INIT_LIST_HEAD(&tmplist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003130
Jeff Layton690c5e32011-10-19 15:30:16 -04003131 cFYI(1, "%s: file=%p mapping=%p num_pages=%u", __func__, file,
3132 mapping, num_pages);
3133
3134 /*
3135 * Start with the page at end of list and move it to private
3136 * list. Do the same with any following pages until we hit
3137 * the rsize limit, hit an index discontinuity, or run out of
3138 * pages. Issue the async read and then start the loop again
3139 * until the list is empty.
3140 *
3141 * Note that list order is important. The page_list is in
3142 * the order of declining indexes. When we put the pages in
3143 * the rdata->pages, then we want them in increasing order.
3144 */
3145 while (!list_empty(page_list)) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003146 unsigned int i;
Jeff Layton690c5e32011-10-19 15:30:16 -04003147 unsigned int bytes = PAGE_CACHE_SIZE;
3148 unsigned int expected_index;
3149 unsigned int nr_pages = 1;
3150 loff_t offset;
3151 struct page *page, *tpage;
3152 struct cifs_readdata *rdata;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003153
3154 page = list_entry(page_list->prev, struct page, lru);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003155
Jeff Layton690c5e32011-10-19 15:30:16 -04003156 /*
3157 * Lock the page and put it in the cache. Since no one else
3158 * should have access to this page, we're safe to simply set
3159 * PG_locked without checking it first.
3160 */
3161 __set_page_locked(page);
3162 rc = add_to_page_cache_locked(page, mapping,
3163 page->index, GFP_KERNEL);
3164
3165 /* give up if we can't stick it in the cache */
3166 if (rc) {
3167 __clear_page_locked(page);
3168 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003169 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003170
Jeff Layton690c5e32011-10-19 15:30:16 -04003171 /* move first page to the tmplist */
3172 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
3173 list_move_tail(&page->lru, &tmplist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003174
Jeff Layton690c5e32011-10-19 15:30:16 -04003175 /* now try and add more pages onto the request */
3176 expected_index = page->index + 1;
3177 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
3178 /* discontinuity ? */
3179 if (page->index != expected_index)
3180 break;
3181
3182 /* would this page push the read over the rsize? */
3183 if (bytes + PAGE_CACHE_SIZE > rsize)
3184 break;
3185
3186 __set_page_locked(page);
3187 if (add_to_page_cache_locked(page, mapping,
3188 page->index, GFP_KERNEL)) {
3189 __clear_page_locked(page);
3190 break;
3191 }
3192 list_move_tail(&page->lru, &tmplist);
3193 bytes += PAGE_CACHE_SIZE;
3194 expected_index++;
3195 nr_pages++;
3196 }
3197
Jeff Layton0471ca32012-05-16 07:13:16 -04003198 rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
Jeff Layton690c5e32011-10-19 15:30:16 -04003199 if (!rdata) {
3200 /* best to give up if we're out of mem */
3201 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3202 list_del(&page->lru);
3203 lru_cache_add_file(page);
3204 unlock_page(page);
3205 page_cache_release(page);
3206 }
3207 rc = -ENOMEM;
3208 break;
3209 }
3210
Jeff Layton6993f742012-05-16 07:13:17 -04003211 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Layton690c5e32011-10-19 15:30:16 -04003212 rdata->mapping = mapping;
3213 rdata->offset = offset;
3214 rdata->bytes = bytes;
3215 rdata->pid = pid;
Jeff Layton8321fec2012-09-19 06:22:32 -07003216 rdata->pagesz = PAGE_CACHE_SIZE;
3217 rdata->read_into_pages = cifs_readpages_read_into_pages;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003218
3219 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3220 list_del(&page->lru);
3221 rdata->pages[rdata->nr_pages++] = page;
3222 }
Jeff Layton690c5e32011-10-19 15:30:16 -04003223
Jeff Layton2a1bb132012-05-16 07:13:17 -04003224 rc = cifs_retry_async_readv(rdata);
Jeff Layton690c5e32011-10-19 15:30:16 -04003225 if (rc != 0) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003226 for (i = 0; i < rdata->nr_pages; i++) {
3227 page = rdata->pages[i];
Jeff Layton690c5e32011-10-19 15:30:16 -04003228 lru_cache_add_file(page);
3229 unlock_page(page);
3230 page_cache_release(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003231 }
Jeff Layton6993f742012-05-16 07:13:17 -04003232 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003233 break;
3234 }
Jeff Layton6993f742012-05-16 07:13:17 -04003235
3236 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003237 }
3238
Linus Torvalds1da177e2005-04-16 15:20:36 -07003239 return rc;
3240}
3241
3242static int cifs_readpage_worker(struct file *file, struct page *page,
3243 loff_t *poffset)
3244{
3245 char *read_data;
3246 int rc;
3247
Suresh Jayaraman56698232010-07-05 18:13:25 +05303248 /* Is the page cached? */
3249 rc = cifs_readpage_from_fscache(file->f_path.dentry->d_inode, page);
3250 if (rc == 0)
3251 goto read_complete;
3252
Linus Torvalds1da177e2005-04-16 15:20:36 -07003253 page_cache_get(page);
3254 read_data = kmap(page);
3255 /* for reads over a certain size could initiate async read ahead */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003256
Linus Torvalds1da177e2005-04-16 15:20:36 -07003257 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003258
Linus Torvalds1da177e2005-04-16 15:20:36 -07003259 if (rc < 0)
3260 goto io_error;
3261 else
Joe Perchesb6b38f72010-04-21 03:50:45 +00003262 cFYI(1, "Bytes read %d", rc);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003263
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08003264 file->f_path.dentry->d_inode->i_atime =
3265 current_fs_time(file->f_path.dentry->d_inode->i_sb);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003266
Linus Torvalds1da177e2005-04-16 15:20:36 -07003267 if (PAGE_CACHE_SIZE > rc)
3268 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
3269
3270 flush_dcache_page(page);
3271 SetPageUptodate(page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05303272
3273 /* send this page to the cache */
3274 cifs_readpage_to_fscache(file->f_path.dentry->d_inode, page);
3275
Linus Torvalds1da177e2005-04-16 15:20:36 -07003276 rc = 0;
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003277
Linus Torvalds1da177e2005-04-16 15:20:36 -07003278io_error:
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003279 kunmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003280 page_cache_release(page);
Suresh Jayaraman56698232010-07-05 18:13:25 +05303281
3282read_complete:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003283 return rc;
3284}
3285
3286static int cifs_readpage(struct file *file, struct page *page)
3287{
3288 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
3289 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003290 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003291
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003292 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003293
3294 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303295 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003296 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303297 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003298 }
3299
Jeff Laytonac3aa2f2012-07-23 13:14:28 -04003300 cFYI(1, "readpage %p at offset %d 0x%x",
Joe Perchesb6b38f72010-04-21 03:50:45 +00003301 page, (int)offset, (int)offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003302
3303 rc = cifs_readpage_worker(file, page, &offset);
3304
3305 unlock_page(page);
3306
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003307 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003308 return rc;
3309}
3310
Steve Frencha403a0a2007-07-26 15:54:16 +00003311static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3312{
3313 struct cifsFileInfo *open_file;
3314
Jeff Layton44772882010-10-15 15:34:03 -04003315 spin_lock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003316 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton2e396b82010-10-15 15:34:01 -04003317 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Jeff Layton44772882010-10-15 15:34:03 -04003318 spin_unlock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003319 return 1;
3320 }
3321 }
Jeff Layton44772882010-10-15 15:34:03 -04003322 spin_unlock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003323 return 0;
3324}
3325
Linus Torvalds1da177e2005-04-16 15:20:36 -07003326/* We do not want to update the file size from server for inodes
3327 open for write - to avoid races with writepage extending
3328 the file - in the future we could consider allowing
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003329 refreshing the inode only on increases in the file size
Linus Torvalds1da177e2005-04-16 15:20:36 -07003330 but this is tricky to do without racing with writebehind
3331 page caching in the current Linux kernel design */
Steve French4b18f2a2008-04-29 00:06:05 +00003332bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003333{
Steve Frencha403a0a2007-07-26 15:54:16 +00003334 if (!cifsInode)
Steve French4b18f2a2008-04-29 00:06:05 +00003335 return true;
Steve French23e7dd72005-10-20 13:44:56 -07003336
Steve Frencha403a0a2007-07-26 15:54:16 +00003337 if (is_inode_writable(cifsInode)) {
3338 /* This inode is open for write at least once */
Steve Frenchc32a0b62006-01-12 14:41:28 -08003339 struct cifs_sb_info *cifs_sb;
3340
Steve Frenchc32a0b62006-01-12 14:41:28 -08003341 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
Steve Frenchad7a2922008-02-07 23:25:02 +00003342 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003343 /* since no page cache to corrupt on directio
Steve Frenchc32a0b62006-01-12 14:41:28 -08003344 we can change size safely */
Steve French4b18f2a2008-04-29 00:06:05 +00003345 return true;
Steve Frenchc32a0b62006-01-12 14:41:28 -08003346 }
3347
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003348 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
Steve French4b18f2a2008-04-29 00:06:05 +00003349 return true;
Steve French7ba52632007-02-08 18:14:13 +00003350
Steve French4b18f2a2008-04-29 00:06:05 +00003351 return false;
Steve French23e7dd72005-10-20 13:44:56 -07003352 } else
Steve French4b18f2a2008-04-29 00:06:05 +00003353 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003354}
3355
Nick Piggind9414772008-09-24 11:32:59 -04003356static int cifs_write_begin(struct file *file, struct address_space *mapping,
3357 loff_t pos, unsigned len, unsigned flags,
3358 struct page **pagep, void **fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003359{
Nick Piggind9414772008-09-24 11:32:59 -04003360 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
3361 loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003362 loff_t page_start = pos & PAGE_MASK;
3363 loff_t i_size;
3364 struct page *page;
3365 int rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003366
Joe Perchesb6b38f72010-04-21 03:50:45 +00003367 cFYI(1, "write_begin from %lld len %d", (long long)pos, len);
Nick Piggind9414772008-09-24 11:32:59 -04003368
Nick Piggin54566b22009-01-04 12:00:53 -08003369 page = grab_cache_page_write_begin(mapping, index, flags);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003370 if (!page) {
3371 rc = -ENOMEM;
3372 goto out;
3373 }
Nick Piggind9414772008-09-24 11:32:59 -04003374
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003375 if (PageUptodate(page))
3376 goto out;
Steve French8a236262007-03-06 00:31:00 +00003377
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003378 /*
3379 * If we write a full page it will be up to date, no need to read from
3380 * the server. If the write is short, we'll end up doing a sync write
3381 * instead.
3382 */
3383 if (len == PAGE_CACHE_SIZE)
3384 goto out;
3385
3386 /*
3387 * optimize away the read when we have an oplock, and we're not
3388 * expecting to use any of the data we'd be reading in. That
3389 * is, when the page lies beyond the EOF, or straddles the EOF
3390 * and the write will cover all of the existing data.
3391 */
3392 if (CIFS_I(mapping->host)->clientCanCacheRead) {
3393 i_size = i_size_read(mapping->host);
3394 if (page_start >= i_size ||
3395 (offset == 0 && (pos + len) >= i_size)) {
3396 zero_user_segments(page, 0, offset,
3397 offset + len,
3398 PAGE_CACHE_SIZE);
3399 /*
3400 * PageChecked means that the parts of the page
3401 * to which we're not writing are considered up
3402 * to date. Once the data is copied to the
3403 * page, it can be set uptodate.
3404 */
3405 SetPageChecked(page);
3406 goto out;
3407 }
3408 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003409
Nick Piggind9414772008-09-24 11:32:59 -04003410 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003411 /*
3412 * might as well read a page, it is fast enough. If we get
3413 * an error, we don't need to return it. cifs_write_end will
3414 * do a sync write instead since PG_uptodate isn't set.
3415 */
3416 cifs_readpage_worker(file, page, &page_start);
Steve French8a236262007-03-06 00:31:00 +00003417 } else {
3418 /* we could try using another file handle if there is one -
3419 but how would we lock it to prevent close of that handle
3420 racing with this read? In any case
Nick Piggind9414772008-09-24 11:32:59 -04003421 this will be written out by write_end so is fine */
Steve French8a236262007-03-06 00:31:00 +00003422 }
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003423out:
3424 *pagep = page;
3425 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003426}
3427
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303428static int cifs_release_page(struct page *page, gfp_t gfp)
3429{
3430 if (PagePrivate(page))
3431 return 0;
3432
3433 return cifs_fscache_release_page(page, gfp);
3434}
3435
3436static void cifs_invalidate_page(struct page *page, unsigned long offset)
3437{
3438 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
3439
3440 if (offset == 0)
3441 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
3442}
3443
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003444static int cifs_launder_page(struct page *page)
3445{
3446 int rc = 0;
3447 loff_t range_start = page_offset(page);
3448 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
3449 struct writeback_control wbc = {
3450 .sync_mode = WB_SYNC_ALL,
3451 .nr_to_write = 0,
3452 .range_start = range_start,
3453 .range_end = range_end,
3454 };
3455
3456 cFYI(1, "Launder page: %p", page);
3457
3458 if (clear_page_dirty_for_io(page))
3459 rc = cifs_writepage_locked(page, &wbc);
3460
3461 cifs_fscache_invalidate_page(page, page->mapping->host);
3462 return rc;
3463}
3464
Tejun Heo9b646972010-07-20 22:09:02 +02003465void cifs_oplock_break(struct work_struct *work)
Jeff Layton3bc303c2009-09-21 06:47:50 -04003466{
3467 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3468 oplock_break);
Jeff Laytona5e18bc2010-10-11 15:07:18 -04003469 struct inode *inode = cfile->dentry->d_inode;
Jeff Layton3bc303c2009-09-21 06:47:50 -04003470 struct cifsInodeInfo *cinode = CIFS_I(inode);
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07003471 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Jeff Laytoneb4b7562010-10-22 14:52:29 -04003472 int rc = 0;
Jeff Layton3bc303c2009-09-21 06:47:50 -04003473
3474 if (inode && S_ISREG(inode->i_mode)) {
Steve Frenchd54ff732010-04-27 04:38:15 +00003475 if (cinode->clientCanCacheRead)
Al Viro8737c932009-12-24 06:47:55 -05003476 break_lease(inode, O_RDONLY);
Steve Frenchd54ff732010-04-27 04:38:15 +00003477 else
Al Viro8737c932009-12-24 06:47:55 -05003478 break_lease(inode, O_WRONLY);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003479 rc = filemap_fdatawrite(inode->i_mapping);
3480 if (cinode->clientCanCacheRead == 0) {
Jeff Laytoneb4b7562010-10-22 14:52:29 -04003481 rc = filemap_fdatawait(inode->i_mapping);
3482 mapping_set_error(inode->i_mapping, rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003483 invalidate_remote_inode(inode);
3484 }
Joe Perchesb6b38f72010-04-21 03:50:45 +00003485 cFYI(1, "Oplock flush inode %p rc %d", inode, rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003486 }
3487
Pavel Shilovsky85160e02011-10-22 15:33:29 +04003488 rc = cifs_push_locks(cfile);
3489 if (rc)
3490 cERROR(1, "Push locks rc = %d", rc);
3491
Jeff Layton3bc303c2009-09-21 06:47:50 -04003492 /*
3493 * releasing stale oplock after recent reconnect of smb session using
3494 * a now incorrect file handle is not a data integrity issue but do
3495 * not bother sending an oplock release if session to server still is
3496 * disconnected since oplock already released by the server
3497 */
Steve Frenchcdff08e2010-10-21 22:46:14 +00003498 if (!cfile->oplock_break_cancelled) {
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07003499 rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
3500 cinode);
Joe Perchesb6b38f72010-04-21 03:50:45 +00003501 cFYI(1, "Oplock release rc = %d", rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003502 }
Jeff Layton3bc303c2009-09-21 06:47:50 -04003503}
3504
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07003505const struct address_space_operations cifs_addr_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003506 .readpage = cifs_readpage,
3507 .readpages = cifs_readpages,
3508 .writepage = cifs_writepage,
Steve French37c0eb42005-10-05 14:50:29 -07003509 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04003510 .write_begin = cifs_write_begin,
3511 .write_end = cifs_write_end,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003512 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303513 .releasepage = cifs_release_page,
3514 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003515 .launder_page = cifs_launder_page,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003516};
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003517
3518/*
3519 * cifs_readpages requires the server to support a buffer large enough to
3520 * contain the header plus one complete page of data. Otherwise, we need
3521 * to leave cifs_readpages out of the address space operations.
3522 */
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07003523const struct address_space_operations cifs_addr_ops_smallbuf = {
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003524 .readpage = cifs_readpage,
3525 .writepage = cifs_writepage,
3526 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04003527 .write_begin = cifs_write_begin,
3528 .write_end = cifs_write_end,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003529 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303530 .releasepage = cifs_release_page,
3531 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003532 .launder_page = cifs_launder_page,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003533};