blob: e2a8e445627588b2012603ddc2df4f21a7351602 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
Steve Frenchfb8c4b12007-07-10 01:16:18 +00005 *
Steve Frenchf19159d2010-04-21 04:12:10 +00006 * Copyright (C) International Business Machines Corp., 2002,2010
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Author(s): Steve French (sfrench@us.ibm.com)
Jeremy Allison7ee1af72006-08-02 21:56:33 +00008 * Jeremy Allison (jra@samba.org)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
Steve French37c0eb42005-10-05 14:50:29 -070025#include <linux/backing-dev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
Steve French37c0eb42005-10-05 14:50:29 -070030#include <linux/writeback.h>
Andrew Morton6f88cc22006-12-10 02:19:44 -080031#include <linux/task_io_accounting_ops.h>
Steve French23e7dd72005-10-20 13:44:56 -070032#include <linux/delay.h>
Jeff Layton3bc303c2009-09-21 06:47:50 -040033#include <linux/mount.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Jeff Layton690c5e32011-10-19 15:30:16 -040035#include <linux/swap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <asm/div64.h>
37#include "cifsfs.h"
38#include "cifspdu.h"
39#include "cifsglob.h"
40#include "cifsproto.h"
41#include "cifs_unicode.h"
42#include "cifs_debug.h"
43#include "cifs_fs_sb.h"
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +053044#include "fscache.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Linus Torvalds1da177e2005-04-16 15:20:36 -070046static inline int cifs_convert_flags(unsigned int flags)
47{
48 if ((flags & O_ACCMODE) == O_RDONLY)
49 return GENERIC_READ;
50 else if ((flags & O_ACCMODE) == O_WRONLY)
51 return GENERIC_WRITE;
52 else if ((flags & O_ACCMODE) == O_RDWR) {
53 /* GENERIC_ALL is too much permission to request
54 can cause unnecessary access denied on create */
55 /* return GENERIC_ALL; */
56 return (GENERIC_READ | GENERIC_WRITE);
57 }
58
Jeff Laytone10f7b52008-05-14 10:21:33 -070059 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
60 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
61 FILE_READ_DATA);
Steve French7fc8f4e2009-02-23 20:43:11 +000062}
Jeff Laytone10f7b52008-05-14 10:21:33 -070063
Jeff Layton608712f2010-10-15 15:33:56 -040064static u32 cifs_posix_convert_flags(unsigned int flags)
Steve French7fc8f4e2009-02-23 20:43:11 +000065{
Jeff Layton608712f2010-10-15 15:33:56 -040066 u32 posix_flags = 0;
Jeff Laytone10f7b52008-05-14 10:21:33 -070067
Steve French7fc8f4e2009-02-23 20:43:11 +000068 if ((flags & O_ACCMODE) == O_RDONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040069 posix_flags = SMB_O_RDONLY;
Steve French7fc8f4e2009-02-23 20:43:11 +000070 else if ((flags & O_ACCMODE) == O_WRONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040071 posix_flags = SMB_O_WRONLY;
72 else if ((flags & O_ACCMODE) == O_RDWR)
73 posix_flags = SMB_O_RDWR;
74
75 if (flags & O_CREAT)
76 posix_flags |= SMB_O_CREAT;
77 if (flags & O_EXCL)
78 posix_flags |= SMB_O_EXCL;
79 if (flags & O_TRUNC)
80 posix_flags |= SMB_O_TRUNC;
81 /* be safe and imply O_SYNC for O_DSYNC */
Christoph Hellwig6b2f3d12009-10-27 11:05:28 +010082 if (flags & O_DSYNC)
Jeff Layton608712f2010-10-15 15:33:56 -040083 posix_flags |= SMB_O_SYNC;
Steve French7fc8f4e2009-02-23 20:43:11 +000084 if (flags & O_DIRECTORY)
Jeff Layton608712f2010-10-15 15:33:56 -040085 posix_flags |= SMB_O_DIRECTORY;
Steve French7fc8f4e2009-02-23 20:43:11 +000086 if (flags & O_NOFOLLOW)
Jeff Layton608712f2010-10-15 15:33:56 -040087 posix_flags |= SMB_O_NOFOLLOW;
Steve French7fc8f4e2009-02-23 20:43:11 +000088 if (flags & O_DIRECT)
Jeff Layton608712f2010-10-15 15:33:56 -040089 posix_flags |= SMB_O_DIRECT;
Steve French7fc8f4e2009-02-23 20:43:11 +000090
91 return posix_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070092}
93
94static inline int cifs_get_disposition(unsigned int flags)
95{
96 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
97 return FILE_CREATE;
98 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
99 return FILE_OVERWRITE_IF;
100 else if ((flags & O_CREAT) == O_CREAT)
101 return FILE_OPEN_IF;
Steve French55aa2e02006-05-30 18:09:31 +0000102 else if ((flags & O_TRUNC) == O_TRUNC)
103 return FILE_OVERWRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104 else
105 return FILE_OPEN;
106}
107
Jeff Layton608712f2010-10-15 15:33:56 -0400108int cifs_posix_open(char *full_path, struct inode **pinode,
109 struct super_block *sb, int mode, unsigned int f_flags,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400110 __u32 *poplock, __u16 *pnetfid, unsigned int xid)
Jeff Layton608712f2010-10-15 15:33:56 -0400111{
112 int rc;
113 FILE_UNIX_BASIC_INFO *presp_data;
114 __u32 posix_flags = 0;
115 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
116 struct cifs_fattr fattr;
117 struct tcon_link *tlink;
Steve French96daf2b2011-05-27 04:34:02 +0000118 struct cifs_tcon *tcon;
Jeff Layton608712f2010-10-15 15:33:56 -0400119
120 cFYI(1, "posix open %s", full_path);
121
122 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
123 if (presp_data == NULL)
124 return -ENOMEM;
125
126 tlink = cifs_sb_tlink(cifs_sb);
127 if (IS_ERR(tlink)) {
128 rc = PTR_ERR(tlink);
129 goto posix_open_ret;
130 }
131
132 tcon = tlink_tcon(tlink);
133 mode &= ~current_umask();
134
135 posix_flags = cifs_posix_convert_flags(f_flags);
136 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
137 poplock, full_path, cifs_sb->local_nls,
138 cifs_sb->mnt_cifs_flags &
139 CIFS_MOUNT_MAP_SPECIAL_CHR);
140 cifs_put_tlink(tlink);
141
142 if (rc)
143 goto posix_open_ret;
144
145 if (presp_data->Type == cpu_to_le32(-1))
146 goto posix_open_ret; /* open ok, caller does qpathinfo */
147
148 if (!pinode)
149 goto posix_open_ret; /* caller does not need info */
150
151 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
152
153 /* get new inode and set it up */
154 if (*pinode == NULL) {
155 cifs_fill_uniqueid(sb, &fattr);
156 *pinode = cifs_iget(sb, &fattr);
157 if (!*pinode) {
158 rc = -ENOMEM;
159 goto posix_open_ret;
160 }
161 } else {
162 cifs_fattr_to_inode(*pinode, &fattr);
163 }
164
165posix_open_ret:
166 kfree(presp_data);
167 return rc;
168}
169
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300170static int
171cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700172 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
173 struct cifs_fid *fid, unsigned int xid)
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300174{
175 int rc;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700176 int desired_access;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300177 int disposition;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500178 int create_options = CREATE_NOT_DIR;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300179 FILE_ALL_INFO *buf;
180
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700181 if (!tcon->ses->server->ops->open)
182 return -ENOSYS;
183
184 desired_access = cifs_convert_flags(f_flags);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300185
186/*********************************************************************
187 * open flag mapping table:
188 *
189 * POSIX Flag CIFS Disposition
190 * ---------- ----------------
191 * O_CREAT FILE_OPEN_IF
192 * O_CREAT | O_EXCL FILE_CREATE
193 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
194 * O_TRUNC FILE_OVERWRITE
195 * none of the above FILE_OPEN
196 *
197 * Note that there is not a direct match between disposition
198 * FILE_SUPERSEDE (ie create whether or not file exists although
199 * O_CREAT | O_TRUNC is similar but truncates the existing
200 * file rather than creating a new file as FILE_SUPERSEDE does
201 * (which uses the attributes / metadata passed in on open call)
202 *?
203 *? O_SYNC is a reasonable match to CIFS writethrough flag
204 *? and the read write flags match reasonably. O_LARGEFILE
205 *? is irrelevant because largefile support is always used
206 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
207 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
208 *********************************************************************/
209
210 disposition = cifs_get_disposition(f_flags);
211
212 /* BB pass O_SYNC flag through on file attributes .. BB */
213
214 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
215 if (!buf)
216 return -ENOMEM;
217
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500218 if (backup_cred(cifs_sb))
219 create_options |= CREATE_OPEN_BACKUP_INTENT;
220
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700221 rc = tcon->ses->server->ops->open(xid, tcon, full_path, disposition,
222 desired_access, create_options, fid,
223 oplock, buf, cifs_sb);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300224
225 if (rc)
226 goto out;
227
228 if (tcon->unix_ext)
229 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
230 xid);
231 else
232 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700233 xid, &fid->netfid);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300234
235out:
236 kfree(buf);
237 return rc;
238}
239
Jeff Layton15ecb432010-10-15 15:34:02 -0400240struct cifsFileInfo *
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700241cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
Jeff Layton15ecb432010-10-15 15:34:02 -0400242 struct tcon_link *tlink, __u32 oplock)
243{
244 struct dentry *dentry = file->f_path.dentry;
245 struct inode *inode = dentry->d_inode;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700246 struct cifsInodeInfo *cinode = CIFS_I(inode);
247 struct cifsFileInfo *cfile;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700248 struct cifs_fid_locks *fdlocks;
Jeff Layton15ecb432010-10-15 15:34:02 -0400249
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700250 cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
251 if (cfile == NULL)
252 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400253
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700254 fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
255 if (!fdlocks) {
256 kfree(cfile);
257 return NULL;
258 }
259
260 INIT_LIST_HEAD(&fdlocks->locks);
261 fdlocks->cfile = cfile;
262 cfile->llist = fdlocks;
263 mutex_lock(&cinode->lock_mutex);
264 list_add(&fdlocks->llist, &cinode->llist);
265 mutex_unlock(&cinode->lock_mutex);
266
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700267 cfile->count = 1;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700268 cfile->pid = current->tgid;
269 cfile->uid = current_fsuid();
270 cfile->dentry = dget(dentry);
271 cfile->f_flags = file->f_flags;
272 cfile->invalidHandle = false;
273 cfile->tlink = cifs_get_tlink(tlink);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700274 INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700275 mutex_init(&cfile->fh_mutex);
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700276 tlink_tcon(tlink)->ses->server->ops->set_fid(cfile, fid, oplock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400277
Jeff Layton44772882010-10-15 15:34:03 -0400278 spin_lock(&cifs_file_list_lock);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700279 list_add(&cfile->tlist, &(tlink_tcon(tlink)->openFileList));
Jeff Layton15ecb432010-10-15 15:34:02 -0400280 /* if readable file instance put first in list*/
281 if (file->f_mode & FMODE_READ)
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700282 list_add(&cfile->flist, &cinode->openFileList);
Jeff Layton15ecb432010-10-15 15:34:02 -0400283 else
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700284 list_add_tail(&cfile->flist, &cinode->openFileList);
Jeff Layton44772882010-10-15 15:34:03 -0400285 spin_unlock(&cifs_file_list_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400286
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700287 file->private_data = cfile;
288 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400289}
290
Jeff Layton764a1b12012-07-25 14:59:54 -0400291struct cifsFileInfo *
292cifsFileInfo_get(struct cifsFileInfo *cifs_file)
293{
294 spin_lock(&cifs_file_list_lock);
295 cifsFileInfo_get_locked(cifs_file);
296 spin_unlock(&cifs_file_list_lock);
297 return cifs_file;
298}
299
Steve Frenchcdff08e2010-10-21 22:46:14 +0000300/*
301 * Release a reference on the file private data. This may involve closing
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400302 * the filehandle out on the server. Must be called without holding
303 * cifs_file_list_lock.
Steve Frenchcdff08e2010-10-21 22:46:14 +0000304 */
Jeff Laytonb33879a2010-10-15 15:34:04 -0400305void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
306{
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300307 struct inode *inode = cifs_file->dentry->d_inode;
Steve French96daf2b2011-05-27 04:34:02 +0000308 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300309 struct cifsInodeInfo *cifsi = CIFS_I(inode);
Pavel Shilovsky4f8ba8a2010-11-21 22:36:12 +0300310 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000311 struct cifsLockInfo *li, *tmp;
312
313 spin_lock(&cifs_file_list_lock);
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400314 if (--cifs_file->count > 0) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000315 spin_unlock(&cifs_file_list_lock);
316 return;
Jeff Laytonb33879a2010-10-15 15:34:04 -0400317 }
Steve Frenchcdff08e2010-10-21 22:46:14 +0000318
319 /* remove it from the lists */
320 list_del(&cifs_file->flist);
321 list_del(&cifs_file->tlist);
322
323 if (list_empty(&cifsi->openFileList)) {
324 cFYI(1, "closing last open instance for inode %p",
325 cifs_file->dentry->d_inode);
Pavel Shilovsky25364132012-09-18 16:20:27 -0700326 /*
327 * In strict cache mode we need invalidate mapping on the last
328 * close because it may cause a error when we open this file
329 * again and get at least level II oplock.
330 */
Pavel Shilovsky4f8ba8a2010-11-21 22:36:12 +0300331 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
332 CIFS_I(inode)->invalid_mapping = true;
Pavel Shilovskyc6723622010-11-03 10:58:57 +0300333 cifs_set_oplock_level(cifsi, 0);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000334 }
335 spin_unlock(&cifs_file_list_lock);
336
Jeff Laytonad635942011-07-26 12:20:17 -0400337 cancel_work_sync(&cifs_file->oplock_break);
338
Steve Frenchcdff08e2010-10-21 22:46:14 +0000339 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700340 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400341 unsigned int xid;
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700342 int rc = -ENOSYS;
343
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400344 xid = get_xid();
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700345 if (server->ops->close)
346 rc = server->ops->close(xid, tcon, &cifs_file->fid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400347 free_xid(xid);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000348 }
349
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700350 /*
351 * Delete any outstanding lock records. We'll lose them when the file
Steve Frenchcdff08e2010-10-21 22:46:14 +0000352 * is closed anyway.
353 */
Pavel Shilovskyd59dad22011-09-22 09:53:59 +0400354 mutex_lock(&cifsi->lock_mutex);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700355 list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000356 list_del(&li->llist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400357 cifs_del_lock_waiters(li);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000358 kfree(li);
359 }
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700360 list_del(&cifs_file->llist->llist);
361 kfree(cifs_file->llist);
Pavel Shilovskyd59dad22011-09-22 09:53:59 +0400362 mutex_unlock(&cifsi->lock_mutex);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000363
364 cifs_put_tlink(cifs_file->tlink);
365 dput(cifs_file->dentry);
366 kfree(cifs_file);
Jeff Laytonb33879a2010-10-15 15:34:04 -0400367}
368
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369int cifs_open(struct inode *inode, struct file *file)
370{
371 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400372 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400373 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +0000375 struct cifs_tcon *tcon;
Jeff Layton7ffec372010-09-29 19:51:11 -0400376 struct tcon_link *tlink;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700377 struct cifsFileInfo *cfile = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 char *full_path = NULL;
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300379 bool posix_open_ok = false;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700380 struct cifs_fid fid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400382 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383
384 cifs_sb = CIFS_SB(inode->i_sb);
Jeff Layton7ffec372010-09-29 19:51:11 -0400385 tlink = cifs_sb_tlink(cifs_sb);
386 if (IS_ERR(tlink)) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400387 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400388 return PTR_ERR(tlink);
389 }
390 tcon = tlink_tcon(tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -0800392 full_path = build_path_from_dentry(file->f_path.dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393 if (full_path == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530394 rc = -ENOMEM;
Jeff Layton232341b2010-08-05 13:58:38 -0400395 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396 }
397
Joe Perchesb6b38f72010-04-21 03:50:45 +0000398 cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
399 inode, file->f_flags, full_path);
Steve French276a74a2009-03-03 18:00:34 +0000400
Pavel Shilovsky10b9b982012-03-20 12:55:09 +0300401 if (tcon->ses->server->oplocks)
Steve French276a74a2009-03-03 18:00:34 +0000402 oplock = REQ_OPLOCK;
403 else
404 oplock = 0;
405
Steve French64cc2c62009-03-04 19:54:08 +0000406 if (!tcon->broken_posix_open && tcon->unix_ext &&
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400407 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
408 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Steve French276a74a2009-03-03 18:00:34 +0000409 /* can not refresh inode info since size could be stale */
Jeff Layton2422f672010-06-16 13:40:16 -0400410 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
Steve Frenchfa588e02010-04-22 19:21:55 +0000411 cifs_sb->mnt_file_mode /* ignored */,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700412 file->f_flags, &oplock, &fid.netfid, xid);
Steve French276a74a2009-03-03 18:00:34 +0000413 if (rc == 0) {
Joe Perchesb6b38f72010-04-21 03:50:45 +0000414 cFYI(1, "posix open succeeded");
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300415 posix_open_ok = true;
Steve French64cc2c62009-03-04 19:54:08 +0000416 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
417 if (tcon->ses->serverNOS)
Joe Perchesb6b38f72010-04-21 03:50:45 +0000418 cERROR(1, "server %s of type %s returned"
Steve French64cc2c62009-03-04 19:54:08 +0000419 " unexpected error on SMB posix open"
420 ", disabling posix open support."
421 " Check if server update available.",
422 tcon->ses->serverName,
Joe Perchesb6b38f72010-04-21 03:50:45 +0000423 tcon->ses->serverNOS);
Steve French64cc2c62009-03-04 19:54:08 +0000424 tcon->broken_posix_open = true;
Steve French276a74a2009-03-03 18:00:34 +0000425 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
426 (rc != -EOPNOTSUPP)) /* path not found or net err */
427 goto out;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700428 /*
429 * Else fallthrough to retry open the old way on network i/o
430 * or DFS errors.
431 */
Steve French276a74a2009-03-03 18:00:34 +0000432 }
433
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300434 if (!posix_open_ok) {
435 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700436 file->f_flags, &oplock, &fid, xid);
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300437 if (rc)
438 goto out;
439 }
Jeff Layton47c78b72010-06-16 13:40:17 -0400440
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700441 cfile = cifs_new_fileinfo(&fid, file, tlink, oplock);
442 if (cfile == NULL) {
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700443 if (tcon->ses->server->ops->close)
444 tcon->ses->server->ops->close(xid, tcon, &fid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445 rc = -ENOMEM;
446 goto out;
447 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +0530449 cifs_fscache_set_inode_cookie(inode, file);
450
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300451 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700452 /*
453 * Time to set mode which we can not set earlier due to
454 * problems creating new read-only files.
455 */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300456 struct cifs_unix_set_info_args args = {
457 .mode = inode->i_mode,
458 .uid = NO_CHANGE_64,
459 .gid = NO_CHANGE_64,
460 .ctime = NO_CHANGE_64,
461 .atime = NO_CHANGE_64,
462 .mtime = NO_CHANGE_64,
463 .device = 0,
464 };
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700465 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
466 cfile->pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467 }
468
469out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400471 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400472 cifs_put_tlink(tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473 return rc;
474}
475
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700476/*
477 * Try to reacquire byte range locks that were released when session
478 * to server was lost
479 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480static int cifs_relock_file(struct cifsFileInfo *cifsFile)
481{
482 int rc = 0;
483
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700484 /* BB list all locks open on this file and relock */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485
486 return rc;
487}
488
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700489static int
490cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491{
492 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400493 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400494 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +0000496 struct cifs_tcon *tcon;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700497 struct TCP_Server_Info *server;
498 struct cifsInodeInfo *cinode;
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000499 struct inode *inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500 char *full_path = NULL;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700501 int desired_access;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502 int disposition = FILE_OPEN;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500503 int create_options = CREATE_NOT_DIR;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700504 struct cifs_fid fid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400506 xid = get_xid();
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700507 mutex_lock(&cfile->fh_mutex);
508 if (!cfile->invalidHandle) {
509 mutex_unlock(&cfile->fh_mutex);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530510 rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400511 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530512 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513 }
514
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700515 inode = cfile->dentry->d_inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516 cifs_sb = CIFS_SB(inode->i_sb);
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700517 tcon = tlink_tcon(cfile->tlink);
518 server = tcon->ses->server;
Steve French3a9f4622007-04-04 17:10:24 +0000519
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700520 /*
521 * Can not grab rename sem here because various ops, including those
522 * that already have the rename sem can end up causing writepage to get
523 * called and if the server was down that means we end up here, and we
524 * can never tell if the caller already has the rename_sem.
525 */
526 full_path = build_path_from_dentry(cfile->dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527 if (full_path == NULL) {
Steve French3a9f4622007-04-04 17:10:24 +0000528 rc = -ENOMEM;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700529 mutex_unlock(&cfile->fh_mutex);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400530 free_xid(xid);
Steve French3a9f4622007-04-04 17:10:24 +0000531 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532 }
533
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700534 cFYI(1, "inode = 0x%p file flags 0x%x for %s", inode, cfile->f_flags,
535 full_path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536
Pavel Shilovsky10b9b982012-03-20 12:55:09 +0300537 if (tcon->ses->server->oplocks)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538 oplock = REQ_OPLOCK;
539 else
Steve French4b18f2a2008-04-29 00:06:05 +0000540 oplock = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400542 if (tcon->unix_ext && cap_unix(tcon->ses) &&
Steve French7fc8f4e2009-02-23 20:43:11 +0000543 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400544 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Jeff Layton608712f2010-10-15 15:33:56 -0400545 /*
546 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
547 * original open. Must mask them off for a reopen.
548 */
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700549 unsigned int oflags = cfile->f_flags &
Jeff Layton15886172010-10-15 15:33:59 -0400550 ~(O_CREAT | O_EXCL | O_TRUNC);
Jeff Layton608712f2010-10-15 15:33:56 -0400551
Jeff Layton2422f672010-06-16 13:40:16 -0400552 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700553 cifs_sb->mnt_file_mode /* ignored */,
554 oflags, &oplock, &fid.netfid, xid);
Steve French7fc8f4e2009-02-23 20:43:11 +0000555 if (rc == 0) {
Joe Perchesb6b38f72010-04-21 03:50:45 +0000556 cFYI(1, "posix reopen succeeded");
Steve French7fc8f4e2009-02-23 20:43:11 +0000557 goto reopen_success;
558 }
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700559 /*
560 * fallthrough to retry open the old way on errors, especially
561 * in the reconnect path it is important to retry hard
562 */
Steve French7fc8f4e2009-02-23 20:43:11 +0000563 }
564
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700565 desired_access = cifs_convert_flags(cfile->f_flags);
Steve French7fc8f4e2009-02-23 20:43:11 +0000566
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500567 if (backup_cred(cifs_sb))
568 create_options |= CREATE_OPEN_BACKUP_INTENT;
569
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700570 /*
571 * Can not refresh inode by passing in file_info buf to be returned by
572 * CIFSSMBOpen and then calling get_inode_info with returned buf since
573 * file might have write behind data that needs to be flushed and server
574 * version of file size can be stale. If we knew for sure that inode was
575 * not dirty locally we could do this.
576 */
577 rc = server->ops->open(xid, tcon, full_path, disposition,
578 desired_access, create_options, &fid, &oplock,
579 NULL, cifs_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580 if (rc) {
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700581 mutex_unlock(&cfile->fh_mutex);
582 cFYI(1, "cifs_reopen returned 0x%x", rc);
Joe Perchesb6b38f72010-04-21 03:50:45 +0000583 cFYI(1, "oplock: %d", oplock);
Jeff Layton15886172010-10-15 15:33:59 -0400584 goto reopen_error_exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585 }
Jeff Layton15886172010-10-15 15:33:59 -0400586
587reopen_success:
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700588 cfile->invalidHandle = false;
589 mutex_unlock(&cfile->fh_mutex);
590 cinode = CIFS_I(inode);
Jeff Layton15886172010-10-15 15:33:59 -0400591
592 if (can_flush) {
593 rc = filemap_write_and_wait(inode->i_mapping);
Jeff Laytoneb4b7562010-10-22 14:52:29 -0400594 mapping_set_error(inode->i_mapping, rc);
Jeff Layton15886172010-10-15 15:33:59 -0400595
Jeff Layton15886172010-10-15 15:33:59 -0400596 if (tcon->unix_ext)
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700597 rc = cifs_get_inode_info_unix(&inode, full_path,
598 inode->i_sb, xid);
Jeff Layton15886172010-10-15 15:33:59 -0400599 else
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700600 rc = cifs_get_inode_info(&inode, full_path, NULL,
601 inode->i_sb, xid, NULL);
602 }
603 /*
604 * Else we are writing out data to server already and could deadlock if
605 * we tried to flush data, and since we do not know if we have data that
606 * would invalidate the current end of file on the server we can not go
607 * to the server to get the new inode info.
608 */
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300609
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700610 server->ops->set_fid(cfile, &fid, oplock);
611 cifs_relock_file(cfile);
Jeff Layton15886172010-10-15 15:33:59 -0400612
613reopen_error_exit:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400615 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616 return rc;
617}
618
619int cifs_close(struct inode *inode, struct file *file)
620{
Jeff Layton77970692011-04-05 16:23:47 -0700621 if (file->private_data != NULL) {
622 cifsFileInfo_put(file->private_data);
623 file->private_data = NULL;
624 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625
Steve Frenchcdff08e2010-10-21 22:46:14 +0000626 /* return code from the ->release op is always ignored */
627 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628}
629
630int cifs_closedir(struct inode *inode, struct file *file)
631{
632 int rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400633 unsigned int xid;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700634 struct cifsFileInfo *cfile = file->private_data;
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700635 struct cifs_tcon *tcon;
636 struct TCP_Server_Info *server;
637 char *buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638
Joe Perchesb6b38f72010-04-21 03:50:45 +0000639 cFYI(1, "Closedir inode = 0x%p", inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700641 if (cfile == NULL)
642 return rc;
643
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400644 xid = get_xid();
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700645 tcon = tlink_tcon(cfile->tlink);
646 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700648 cFYI(1, "Freeing private data in close dir");
649 spin_lock(&cifs_file_list_lock);
650 if (!cfile->srch_inf.endOfSearch && !cfile->invalidHandle) {
651 cfile->invalidHandle = true;
652 spin_unlock(&cifs_file_list_lock);
653 if (server->ops->close_dir)
654 rc = server->ops->close_dir(xid, tcon, &cfile->fid);
655 else
656 rc = -ENOSYS;
657 cFYI(1, "Closing uncompleted readdir with rc %d", rc);
658 /* not much we can do if it fails anyway, ignore rc */
659 rc = 0;
660 } else
661 spin_unlock(&cifs_file_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700663 buf = cfile->srch_inf.ntwrk_buf_start;
664 if (buf) {
665 cFYI(1, "closedir free smb buf in srch struct");
666 cfile->srch_inf.ntwrk_buf_start = NULL;
667 if (cfile->srch_inf.smallBuf)
668 cifs_small_buf_release(buf);
669 else
670 cifs_buf_release(buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671 }
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700672
673 cifs_put_tlink(cfile->tlink);
674 kfree(file->private_data);
675 file->private_data = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676 /* BB can we lock the filestruct while this is going on? */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400677 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678 return rc;
679}
680
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400681static struct cifsLockInfo *
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300682cifs_lock_init(__u64 offset, __u64 length, __u8 type)
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000683{
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400684 struct cifsLockInfo *lock =
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000685 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400686 if (!lock)
687 return lock;
688 lock->offset = offset;
689 lock->length = length;
690 lock->type = type;
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400691 lock->pid = current->tgid;
692 INIT_LIST_HEAD(&lock->blist);
693 init_waitqueue_head(&lock->block_q);
694 return lock;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400695}
696
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -0700697void
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400698cifs_del_lock_waiters(struct cifsLockInfo *lock)
699{
700 struct cifsLockInfo *li, *tmp;
701 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
702 list_del_init(&li->blist);
703 wake_up(&li->block_q);
704 }
705}
706
707static bool
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700708cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
709 __u64 length, __u8 type, struct cifsFileInfo *cfile,
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300710 struct cifsLockInfo **conf_lock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400711{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300712 struct cifsLockInfo *li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700713 struct cifsFileInfo *cur_cfile = fdlocks->cfile;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300714 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400715
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700716 list_for_each_entry(li, &fdlocks->locks, llist) {
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400717 if (offset + length <= li->offset ||
718 offset >= li->offset + li->length)
719 continue;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700720 if ((type & server->vals->shared_lock_type) &&
721 ((server->ops->compare_fids(cfile, cur_cfile) &&
722 current->tgid == li->pid) || type == li->type))
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400723 continue;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700724 *conf_lock = li;
725 return true;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400726 }
727 return false;
728}
729
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400730static bool
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300731cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
732 __u8 type, struct cifsLockInfo **conf_lock)
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400733{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300734 bool rc = false;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700735 struct cifs_fid_locks *cur;
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300736 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300737
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700738 list_for_each_entry(cur, &cinode->llist, llist) {
739 rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300740 cfile, conf_lock);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300741 if (rc)
742 break;
743 }
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300744
745 return rc;
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400746}
747
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300748/*
749 * Check if there is another lock that prevents us to set the lock (mandatory
750 * style). If such a lock exists, update the flock structure with its
751 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
752 * or leave it the same if we can't. Returns 0 if we don't need to request to
753 * the server or 1 otherwise.
754 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400755static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300756cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
757 __u8 type, struct file_lock *flock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400758{
759 int rc = 0;
760 struct cifsLockInfo *conf_lock;
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300761 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300762 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400763 bool exist;
764
765 mutex_lock(&cinode->lock_mutex);
766
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300767 exist = cifs_find_lock_conflict(cfile, offset, length, type,
768 &conf_lock);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400769 if (exist) {
770 flock->fl_start = conf_lock->offset;
771 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
772 flock->fl_pid = conf_lock->pid;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300773 if (conf_lock->type & server->vals->shared_lock_type)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400774 flock->fl_type = F_RDLCK;
775 else
776 flock->fl_type = F_WRLCK;
777 } else if (!cinode->can_cache_brlcks)
778 rc = 1;
779 else
780 flock->fl_type = F_UNLCK;
781
782 mutex_unlock(&cinode->lock_mutex);
783 return rc;
784}
785
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400786static void
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300787cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400788{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300789 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovskyd59dad22011-09-22 09:53:59 +0400790 mutex_lock(&cinode->lock_mutex);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700791 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovskyd59dad22011-09-22 09:53:59 +0400792 mutex_unlock(&cinode->lock_mutex);
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000793}
794
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300795/*
796 * Set the byte-range lock (mandatory style). Returns:
797 * 1) 0, if we set the lock and don't need to request to the server;
798 * 2) 1, if no locks prevent us but we need to request to the server;
799 * 3) -EACCESS, if there is a lock that prevents us and wait is false.
800 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400801static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300802cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400803 bool wait)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400804{
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400805 struct cifsLockInfo *conf_lock;
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300806 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400807 bool exist;
808 int rc = 0;
809
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400810try_again:
811 exist = false;
812 mutex_lock(&cinode->lock_mutex);
813
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300814 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
815 lock->type, &conf_lock);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400816 if (!exist && cinode->can_cache_brlcks) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700817 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400818 mutex_unlock(&cinode->lock_mutex);
819 return rc;
820 }
821
822 if (!exist)
823 rc = 1;
824 else if (!wait)
825 rc = -EACCES;
826 else {
827 list_add_tail(&lock->blist, &conf_lock->blist);
828 mutex_unlock(&cinode->lock_mutex);
829 rc = wait_event_interruptible(lock->block_q,
830 (lock->blist.prev == &lock->blist) &&
831 (lock->blist.next == &lock->blist));
832 if (!rc)
833 goto try_again;
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400834 mutex_lock(&cinode->lock_mutex);
835 list_del_init(&lock->blist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400836 }
837
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400838 mutex_unlock(&cinode->lock_mutex);
839 return rc;
840}
841
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300842/*
843 * Check if there is another lock that prevents us to set the lock (posix
844 * style). If such a lock exists, update the flock structure with its
845 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
846 * or leave it the same if we can't. Returns 0 if we don't need to request to
847 * the server or 1 otherwise.
848 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400849static int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400850cifs_posix_lock_test(struct file *file, struct file_lock *flock)
851{
852 int rc = 0;
853 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
854 unsigned char saved_type = flock->fl_type;
855
Pavel Shilovsky50792762011-10-29 17:17:57 +0400856 if ((flock->fl_flags & FL_POSIX) == 0)
857 return 1;
858
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400859 mutex_lock(&cinode->lock_mutex);
860 posix_test_lock(file, flock);
861
862 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
863 flock->fl_type = saved_type;
864 rc = 1;
865 }
866
867 mutex_unlock(&cinode->lock_mutex);
868 return rc;
869}
870
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300871/*
872 * Set the byte-range lock (posix style). Returns:
873 * 1) 0, if we set the lock and don't need to request to the server;
874 * 2) 1, if we need to request to the server;
875 * 3) <0, if the error occurs while setting the lock.
876 */
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400877static int
878cifs_posix_lock_set(struct file *file, struct file_lock *flock)
879{
880 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
Pavel Shilovsky50792762011-10-29 17:17:57 +0400881 int rc = 1;
882
883 if ((flock->fl_flags & FL_POSIX) == 0)
884 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400885
Pavel Shilovsky66189be2012-03-28 21:56:19 +0400886try_again:
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400887 mutex_lock(&cinode->lock_mutex);
888 if (!cinode->can_cache_brlcks) {
889 mutex_unlock(&cinode->lock_mutex);
Pavel Shilovsky50792762011-10-29 17:17:57 +0400890 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400891 }
Pavel Shilovsky66189be2012-03-28 21:56:19 +0400892
893 rc = posix_lock_file(file, flock, NULL);
Steve French9ebb3892012-04-01 13:52:54 -0500894 mutex_unlock(&cinode->lock_mutex);
Pavel Shilovsky66189be2012-03-28 21:56:19 +0400895 if (rc == FILE_LOCK_DEFERRED) {
896 rc = wait_event_interruptible(flock->fl_wait, !flock->fl_next);
897 if (!rc)
898 goto try_again;
899 locks_delete_block(flock);
900 }
Steve French9ebb3892012-04-01 13:52:54 -0500901 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400902}
903
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -0700904int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400905cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400906{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400907 unsigned int xid;
908 int rc = 0, stored_rc;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400909 struct cifsLockInfo *li, *tmp;
910 struct cifs_tcon *tcon;
911 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +0400912 unsigned int num, max_num, max_buf;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400913 LOCKING_ANDX_RANGE *buf, *cur;
914 int types[] = {LOCKING_ANDX_LARGE_FILES,
915 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
916 int i;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400917
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400918 xid = get_xid();
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400919 tcon = tlink_tcon(cfile->tlink);
920
921 mutex_lock(&cinode->lock_mutex);
922 if (!cinode->can_cache_brlcks) {
923 mutex_unlock(&cinode->lock_mutex);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400924 free_xid(xid);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400925 return rc;
926 }
927
Pavel Shilovsky0013fb42012-05-31 13:03:26 +0400928 /*
929 * Accessing maxBuf is racy with cifs_reconnect - need to store value
930 * and check it for zero before using.
931 */
932 max_buf = tcon->ses->server->maxBuf;
933 if (!max_buf) {
934 mutex_unlock(&cinode->lock_mutex);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400935 free_xid(xid);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +0400936 return -EINVAL;
937 }
938
939 max_num = (max_buf - sizeof(struct smb_hdr)) /
940 sizeof(LOCKING_ANDX_RANGE);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400941 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
942 if (!buf) {
943 mutex_unlock(&cinode->lock_mutex);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400944 free_xid(xid);
Pavel Shilovskye2f28862012-08-29 21:13:38 +0400945 return -ENOMEM;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400946 }
947
948 for (i = 0; i < 2; i++) {
949 cur = buf;
950 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700951 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400952 if (li->type != types[i])
953 continue;
954 cur->Pid = cpu_to_le16(li->pid);
955 cur->LengthLow = cpu_to_le32((u32)li->length);
956 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
957 cur->OffsetLow = cpu_to_le32((u32)li->offset);
958 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
959 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700960 stored_rc = cifs_lockv(xid, tcon,
961 cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +0300962 (__u8)li->type, 0, num,
963 buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400964 if (stored_rc)
965 rc = stored_rc;
966 cur = buf;
967 num = 0;
968 } else
969 cur++;
970 }
971
972 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700973 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +0300974 (__u8)types[i], 0, num, buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400975 if (stored_rc)
976 rc = stored_rc;
977 }
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400978 }
979
980 cinode->can_cache_brlcks = false;
981 mutex_unlock(&cinode->lock_mutex);
982
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400983 kfree(buf);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400984 free_xid(xid);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400985 return rc;
986}
987
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400988/* copied from fs/locks.c with a name change */
989#define cifs_for_each_lock(inode, lockp) \
990 for (lockp = &inode->i_flock; *lockp != NULL; \
991 lockp = &(*lockp)->fl_next)
992
Pavel Shilovskyd5751462012-03-05 09:39:20 +0300993struct lock_to_push {
994 struct list_head llist;
995 __u64 offset;
996 __u64 length;
997 __u32 pid;
998 __u16 netfid;
999 __u8 type;
1000};
1001
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001002static int
1003cifs_push_posix_locks(struct cifsFileInfo *cfile)
1004{
1005 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
1006 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1007 struct file_lock *flock, **before;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001008 unsigned int count = 0, i = 0;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001009 int rc = 0, xid, type;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001010 struct list_head locks_to_send, *el;
1011 struct lock_to_push *lck, *tmp;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001012 __u64 length;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001013
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001014 xid = get_xid();
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001015
1016 mutex_lock(&cinode->lock_mutex);
1017 if (!cinode->can_cache_brlcks) {
1018 mutex_unlock(&cinode->lock_mutex);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001019 free_xid(xid);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001020 return rc;
1021 }
1022
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001023 lock_flocks();
1024 cifs_for_each_lock(cfile->dentry->d_inode, before) {
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001025 if ((*before)->fl_flags & FL_POSIX)
1026 count++;
1027 }
1028 unlock_flocks();
1029
1030 INIT_LIST_HEAD(&locks_to_send);
1031
1032 /*
Pavel Shilovskyce858522012-03-17 09:46:55 +03001033 * Allocating count locks is enough because no FL_POSIX locks can be
1034 * added to the list while we are holding cinode->lock_mutex that
1035 * protects locking operations of this inode.
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001036 */
1037 for (; i < count; i++) {
1038 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1039 if (!lck) {
1040 rc = -ENOMEM;
1041 goto err_out;
1042 }
1043 list_add_tail(&lck->llist, &locks_to_send);
1044 }
1045
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001046 el = locks_to_send.next;
1047 lock_flocks();
1048 cifs_for_each_lock(cfile->dentry->d_inode, before) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001049 flock = *before;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001050 if ((flock->fl_flags & FL_POSIX) == 0)
1051 continue;
Pavel Shilovskyce858522012-03-17 09:46:55 +03001052 if (el == &locks_to_send) {
1053 /*
1054 * The list ended. We don't have enough allocated
1055 * structures - something is really wrong.
1056 */
1057 cERROR(1, "Can't push all brlocks!");
1058 break;
1059 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001060 length = 1 + flock->fl_end - flock->fl_start;
1061 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1062 type = CIFS_RDLCK;
1063 else
1064 type = CIFS_WRLCK;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001065 lck = list_entry(el, struct lock_to_push, llist);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001066 lck->pid = flock->fl_pid;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001067 lck->netfid = cfile->fid.netfid;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001068 lck->length = length;
1069 lck->type = type;
1070 lck->offset = flock->fl_start;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001071 el = el->next;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001072 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001073 unlock_flocks();
1074
1075 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001076 int stored_rc;
1077
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001078 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001079 lck->offset, lck->length, NULL,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001080 lck->type, 0);
1081 if (stored_rc)
1082 rc = stored_rc;
1083 list_del(&lck->llist);
1084 kfree(lck);
1085 }
1086
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001087out:
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001088 cinode->can_cache_brlcks = false;
1089 mutex_unlock(&cinode->lock_mutex);
1090
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001091 free_xid(xid);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001092 return rc;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001093err_out:
1094 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1095 list_del(&lck->llist);
1096 kfree(lck);
1097 }
1098 goto out;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001099}
1100
1101static int
1102cifs_push_locks(struct cifsFileInfo *cfile)
1103{
1104 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1105 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1106
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001107 if (cap_unix(tcon->ses) &&
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001108 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1109 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1110 return cifs_push_posix_locks(cfile);
1111
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001112 return tcon->ses->server->ops->push_mand_locks(cfile);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001113}
1114
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001115static void
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001116cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001117 bool *wait_flag, struct TCP_Server_Info *server)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001118{
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001119 if (flock->fl_flags & FL_POSIX)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001120 cFYI(1, "Posix");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001121 if (flock->fl_flags & FL_FLOCK)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001122 cFYI(1, "Flock");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001123 if (flock->fl_flags & FL_SLEEP) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001124 cFYI(1, "Blocking lock");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001125 *wait_flag = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001126 }
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001127 if (flock->fl_flags & FL_ACCESS)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001128 cFYI(1, "Process suspended by mandatory locking - "
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001129 "not implemented yet");
1130 if (flock->fl_flags & FL_LEASE)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001131 cFYI(1, "Lease on file - not implemented yet");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001132 if (flock->fl_flags &
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133 (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001134 cFYI(1, "Unknown lock flags 0x%x", flock->fl_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001135
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001136 *type = server->vals->large_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001137 if (flock->fl_type == F_WRLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001138 cFYI(1, "F_WRLCK ");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001139 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001140 *lock = 1;
1141 } else if (flock->fl_type == F_UNLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001142 cFYI(1, "F_UNLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001143 *type |= server->vals->unlock_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001144 *unlock = 1;
1145 /* Check if unlock includes more than one lock range */
1146 } else if (flock->fl_type == F_RDLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001147 cFYI(1, "F_RDLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001148 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001149 *lock = 1;
1150 } else if (flock->fl_type == F_EXLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001151 cFYI(1, "F_EXLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001152 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001153 *lock = 1;
1154 } else if (flock->fl_type == F_SHLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001155 cFYI(1, "F_SHLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001156 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001157 *lock = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001158 } else
Joe Perchesb6b38f72010-04-21 03:50:45 +00001159 cFYI(1, "Unknown type of lock");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001160}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001161
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001162static int
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001163cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001164 bool wait_flag, bool posix_lck, unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001165{
1166 int rc = 0;
1167 __u64 length = 1 + flock->fl_end - flock->fl_start;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001168 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1169 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001170 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001171 __u16 netfid = cfile->fid.netfid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001172
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001173 if (posix_lck) {
1174 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001175
1176 rc = cifs_posix_lock_test(file, flock);
1177 if (!rc)
1178 return rc;
1179
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001180 if (type & server->vals->shared_lock_type)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001181 posix_lock_type = CIFS_RDLCK;
1182 else
1183 posix_lock_type = CIFS_WRLCK;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001184 rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001185 flock->fl_start, length, flock,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001186 posix_lock_type, wait_flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001187 return rc;
1188 }
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001189
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001190 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001191 if (!rc)
1192 return rc;
1193
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001194 /* BB we could chain these into one lock request BB */
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001195 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
1196 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001197 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001198 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1199 type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001200 flock->fl_type = F_UNLCK;
1201 if (rc != 0)
1202 cERROR(1, "Error unlocking previously locked "
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001203 "range %d during test of lock", rc);
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001204 return 0;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001205 }
1206
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001207 if (type & server->vals->shared_lock_type) {
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001208 flock->fl_type = F_WRLCK;
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001209 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001210 }
1211
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001212 type &= ~server->vals->exclusive_lock_type;
1213
1214 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1215 type | server->vals->shared_lock_type,
1216 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001217 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001218 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1219 type | server->vals->shared_lock_type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001220 flock->fl_type = F_RDLCK;
1221 if (rc != 0)
1222 cERROR(1, "Error unlocking previously locked "
1223 "range %d during test of lock", rc);
1224 } else
1225 flock->fl_type = F_WRLCK;
1226
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001227 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001228}
1229
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001230void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001231cifs_move_llist(struct list_head *source, struct list_head *dest)
1232{
1233 struct list_head *li, *tmp;
1234 list_for_each_safe(li, tmp, source)
1235 list_move(li, dest);
1236}
1237
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001238void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001239cifs_free_llist(struct list_head *llist)
1240{
1241 struct cifsLockInfo *li, *tmp;
1242 list_for_each_entry_safe(li, tmp, llist, llist) {
1243 cifs_del_lock_waiters(li);
1244 list_del(&li->llist);
1245 kfree(li);
1246 }
1247}
1248
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001249int
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001250cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1251 unsigned int xid)
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001252{
1253 int rc = 0, stored_rc;
1254 int types[] = {LOCKING_ANDX_LARGE_FILES,
1255 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1256 unsigned int i;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001257 unsigned int max_num, num, max_buf;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001258 LOCKING_ANDX_RANGE *buf, *cur;
1259 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1260 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
1261 struct cifsLockInfo *li, *tmp;
1262 __u64 length = 1 + flock->fl_end - flock->fl_start;
1263 struct list_head tmp_llist;
1264
1265 INIT_LIST_HEAD(&tmp_llist);
1266
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001267 /*
1268 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1269 * and check it for zero before using.
1270 */
1271 max_buf = tcon->ses->server->maxBuf;
1272 if (!max_buf)
1273 return -EINVAL;
1274
1275 max_num = (max_buf - sizeof(struct smb_hdr)) /
1276 sizeof(LOCKING_ANDX_RANGE);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001277 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1278 if (!buf)
1279 return -ENOMEM;
1280
1281 mutex_lock(&cinode->lock_mutex);
1282 for (i = 0; i < 2; i++) {
1283 cur = buf;
1284 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001285 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001286 if (flock->fl_start > li->offset ||
1287 (flock->fl_start + length) <
1288 (li->offset + li->length))
1289 continue;
1290 if (current->tgid != li->pid)
1291 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001292 if (types[i] != li->type)
1293 continue;
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001294 if (cinode->can_cache_brlcks) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001295 /*
1296 * We can cache brlock requests - simply remove
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001297 * a lock from the file's list.
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001298 */
1299 list_del(&li->llist);
1300 cifs_del_lock_waiters(li);
1301 kfree(li);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001302 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001303 }
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001304 cur->Pid = cpu_to_le16(li->pid);
1305 cur->LengthLow = cpu_to_le32((u32)li->length);
1306 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1307 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1308 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1309 /*
1310 * We need to save a lock here to let us add it again to
1311 * the file's list if the unlock range request fails on
1312 * the server.
1313 */
1314 list_move(&li->llist, &tmp_llist);
1315 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001316 stored_rc = cifs_lockv(xid, tcon,
1317 cfile->fid.netfid,
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001318 li->type, num, 0, buf);
1319 if (stored_rc) {
1320 /*
1321 * We failed on the unlock range
1322 * request - add all locks from the tmp
1323 * list to the head of the file's list.
1324 */
1325 cifs_move_llist(&tmp_llist,
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001326 &cfile->llist->locks);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001327 rc = stored_rc;
1328 } else
1329 /*
1330 * The unlock range request succeed -
1331 * free the tmp list.
1332 */
1333 cifs_free_llist(&tmp_llist);
1334 cur = buf;
1335 num = 0;
1336 } else
1337 cur++;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001338 }
1339 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001340 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001341 types[i], num, 0, buf);
1342 if (stored_rc) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001343 cifs_move_llist(&tmp_llist,
1344 &cfile->llist->locks);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001345 rc = stored_rc;
1346 } else
1347 cifs_free_llist(&tmp_llist);
1348 }
1349 }
1350
1351 mutex_unlock(&cinode->lock_mutex);
1352 kfree(buf);
1353 return rc;
1354}
1355
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001356static int
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001357cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001358 bool wait_flag, bool posix_lck, int lock, int unlock,
1359 unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001360{
1361 int rc = 0;
1362 __u64 length = 1 + flock->fl_end - flock->fl_start;
1363 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1364 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001365 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001366
1367 if (posix_lck) {
Steve French08547b02006-02-28 22:39:25 +00001368 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001369
1370 rc = cifs_posix_lock_set(file, flock);
1371 if (!rc || rc < 0)
1372 return rc;
1373
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001374 if (type & server->vals->shared_lock_type)
Steve French08547b02006-02-28 22:39:25 +00001375 posix_lock_type = CIFS_RDLCK;
1376 else
1377 posix_lock_type = CIFS_WRLCK;
Steve French50c2f752007-07-13 00:33:32 +00001378
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001379 if (unlock == 1)
Steve Frenchbeb84dc2006-03-03 23:36:34 +00001380 posix_lock_type = CIFS_UNLCK;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001381
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001382 rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
1383 current->tgid, flock->fl_start, length,
1384 NULL, posix_lock_type, wait_flag);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001385 goto out;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001386 }
1387
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001388 if (lock) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001389 struct cifsLockInfo *lock;
1390
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001391 lock = cifs_lock_init(flock->fl_start, length, type);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001392 if (!lock)
1393 return -ENOMEM;
1394
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001395 rc = cifs_lock_add_if(cfile, lock, wait_flag);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001396 if (rc < 0)
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001397 kfree(lock);
1398 if (rc <= 0)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001399 goto out;
1400
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001401 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1402 type, 1, 0, wait_flag);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001403 if (rc) {
1404 kfree(lock);
1405 goto out;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001406 }
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001407
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001408 cifs_lock_add(cfile, lock);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001409 } else if (unlock)
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001410 rc = server->ops->mand_unlock_range(cfile, flock, xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001411
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001412out:
1413 if (flock->fl_flags & FL_POSIX)
Steve French9ebb3892012-04-01 13:52:54 -05001414 posix_lock_file_wait(file, flock);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001415 return rc;
1416}
1417
1418int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1419{
1420 int rc, xid;
1421 int lock = 0, unlock = 0;
1422 bool wait_flag = false;
1423 bool posix_lck = false;
1424 struct cifs_sb_info *cifs_sb;
1425 struct cifs_tcon *tcon;
1426 struct cifsInodeInfo *cinode;
1427 struct cifsFileInfo *cfile;
1428 __u16 netfid;
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001429 __u32 type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001430
1431 rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001432 xid = get_xid();
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001433
1434 cFYI(1, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld "
1435 "end: %lld", cmd, flock->fl_flags, flock->fl_type,
1436 flock->fl_start, flock->fl_end);
1437
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001438 cfile = (struct cifsFileInfo *)file->private_data;
1439 tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001440
1441 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
1442 tcon->ses->server);
1443
1444 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001445 netfid = cfile->fid.netfid;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001446 cinode = CIFS_I(file->f_path.dentry->d_inode);
1447
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001448 if (cap_unix(tcon->ses) &&
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001449 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1450 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1451 posix_lck = true;
1452 /*
1453 * BB add code here to normalize offset and length to account for
1454 * negative length which we can not accept over the wire.
1455 */
1456 if (IS_GETLK(cmd)) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001457 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001458 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001459 return rc;
1460 }
1461
1462 if (!lock && !unlock) {
1463 /*
1464 * if no lock or unlock then nothing to do since we do not
1465 * know what it is
1466 */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001467 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001468 return -EOPNOTSUPP;
1469 }
1470
1471 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1472 xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001473 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001474 return rc;
1475}
1476
Jeff Layton597b0272012-03-23 14:40:56 -04001477/*
1478 * update the file size (if needed) after a write. Should be called with
1479 * the inode->i_lock held
1480 */
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05001481void
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001482cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1483 unsigned int bytes_written)
1484{
1485 loff_t end_of_write = offset + bytes_written;
1486
1487 if (end_of_write > cifsi->server_eof)
1488 cifsi->server_eof = end_of_write;
1489}
1490
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001491static ssize_t
1492cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
1493 size_t write_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001494{
1495 int rc = 0;
1496 unsigned int bytes_written = 0;
1497 unsigned int total_written;
1498 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001499 struct cifs_tcon *tcon;
1500 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001501 unsigned int xid;
Jeff Layton7da4b492010-10-15 15:34:00 -04001502 struct dentry *dentry = open_file->dentry;
1503 struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001504 struct cifs_io_parms io_parms;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001505
Jeff Layton7da4b492010-10-15 15:34:00 -04001506 cifs_sb = CIFS_SB(dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001507
Joe Perchesb6b38f72010-04-21 03:50:45 +00001508 cFYI(1, "write %zd bytes to offset %lld of %s", write_size,
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001509 *offset, dentry->d_name.name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001510
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001511 tcon = tlink_tcon(open_file->tlink);
1512 server = tcon->ses->server;
1513
1514 if (!server->ops->sync_write)
1515 return -ENOSYS;
Steve French50c2f752007-07-13 00:33:32 +00001516
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001517 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001518
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519 for (total_written = 0; write_size > total_written;
1520 total_written += bytes_written) {
1521 rc = -EAGAIN;
1522 while (rc == -EAGAIN) {
Jeff Laytonca83ce32011-04-12 09:13:44 -04001523 struct kvec iov[2];
1524 unsigned int len;
1525
Linus Torvalds1da177e2005-04-16 15:20:36 -07001526 if (open_file->invalidHandle) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001527 /* we could deadlock if we called
1528 filemap_fdatawait from here so tell
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001529 reopen_file not to flush data to
Linus Torvalds1da177e2005-04-16 15:20:36 -07001530 server now */
Jeff Layton15886172010-10-15 15:33:59 -04001531 rc = cifs_reopen_file(open_file, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001532 if (rc != 0)
1533 break;
1534 }
Steve French3e844692005-10-03 13:37:24 -07001535
Jeff Laytonca83ce32011-04-12 09:13:44 -04001536 len = min((size_t)cifs_sb->wsize,
1537 write_size - total_written);
1538 /* iov[0] is reserved for smb header */
1539 iov[1].iov_base = (char *)write_data + total_written;
1540 iov[1].iov_len = len;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001541 io_parms.pid = pid;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001542 io_parms.tcon = tcon;
1543 io_parms.offset = *offset;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001544 io_parms.length = len;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001545 rc = server->ops->sync_write(xid, open_file, &io_parms,
1546 &bytes_written, iov, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001547 }
1548 if (rc || (bytes_written == 0)) {
1549 if (total_written)
1550 break;
1551 else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001552 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001553 return rc;
1554 }
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001555 } else {
Jeff Layton597b0272012-03-23 14:40:56 -04001556 spin_lock(&dentry->d_inode->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001557 cifs_update_eof(cifsi, *offset, bytes_written);
Jeff Layton597b0272012-03-23 14:40:56 -04001558 spin_unlock(&dentry->d_inode->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001559 *offset += bytes_written;
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001560 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001561 }
1562
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001563 cifs_stats_bytes_written(tcon, total_written);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001564
Jeff Layton7da4b492010-10-15 15:34:00 -04001565 if (total_written > 0) {
1566 spin_lock(&dentry->d_inode->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001567 if (*offset > dentry->d_inode->i_size)
1568 i_size_write(dentry->d_inode, *offset);
Jeff Layton7da4b492010-10-15 15:34:00 -04001569 spin_unlock(&dentry->d_inode->i_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001570 }
Jeff Layton7da4b492010-10-15 15:34:00 -04001571 mark_inode_dirty_sync(dentry->d_inode);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001572 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001573 return total_written;
1574}
1575
Jeff Layton6508d902010-09-29 19:51:11 -04001576struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1577 bool fsuid_only)
Steve French630f3f0c2007-10-25 21:17:17 +00001578{
1579 struct cifsFileInfo *open_file = NULL;
Jeff Layton6508d902010-09-29 19:51:11 -04001580 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1581
1582 /* only filter by fsuid on multiuser mounts */
1583 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1584 fsuid_only = false;
Steve French630f3f0c2007-10-25 21:17:17 +00001585
Jeff Layton44772882010-10-15 15:34:03 -04001586 spin_lock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001587 /* we could simply get the first_list_entry since write-only entries
1588 are always at the end of the list but since the first entry might
1589 have a close pending, we go through the whole list */
1590 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton6508d902010-09-29 19:51:11 -04001591 if (fsuid_only && open_file->uid != current_fsuid())
1592 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001593 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
Steve French630f3f0c2007-10-25 21:17:17 +00001594 if (!open_file->invalidHandle) {
1595 /* found a good file */
1596 /* lock it so it will not be closed on us */
Jeff Layton764a1b12012-07-25 14:59:54 -04001597 cifsFileInfo_get_locked(open_file);
Jeff Layton44772882010-10-15 15:34:03 -04001598 spin_unlock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001599 return open_file;
1600 } /* else might as well continue, and look for
1601 another, or simply have the caller reopen it
1602 again rather than trying to fix this handle */
1603 } else /* write only file */
1604 break; /* write only files are last so must be done */
1605 }
Jeff Layton44772882010-10-15 15:34:03 -04001606 spin_unlock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001607 return NULL;
1608}
Steve French630f3f0c2007-10-25 21:17:17 +00001609
Jeff Layton6508d902010-09-29 19:51:11 -04001610struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1611 bool fsuid_only)
Steve French6148a742005-10-05 12:23:19 -07001612{
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001613 struct cifsFileInfo *open_file, *inv_file = NULL;
Jeff Laytond3892292010-11-02 16:22:50 -04001614 struct cifs_sb_info *cifs_sb;
Jeff Layton2846d382008-09-22 21:33:33 -04001615 bool any_available = false;
Steve Frenchdd99cd82005-10-05 19:32:49 -07001616 int rc;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001617 unsigned int refind = 0;
Steve French6148a742005-10-05 12:23:19 -07001618
Steve French60808232006-04-22 15:53:05 +00001619 /* Having a null inode here (because mapping->host was set to zero by
1620 the VFS or MM) should not happen but we had reports of on oops (due to
1621 it being zero) during stress testcases so we need to check for it */
1622
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001623 if (cifs_inode == NULL) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001624 cERROR(1, "Null inode passed to cifs_writeable_file");
Steve French60808232006-04-22 15:53:05 +00001625 dump_stack();
1626 return NULL;
1627 }
1628
Jeff Laytond3892292010-11-02 16:22:50 -04001629 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1630
Jeff Layton6508d902010-09-29 19:51:11 -04001631 /* only filter by fsuid on multiuser mounts */
1632 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1633 fsuid_only = false;
1634
Jeff Layton44772882010-10-15 15:34:03 -04001635 spin_lock(&cifs_file_list_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001636refind_writable:
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001637 if (refind > MAX_REOPEN_ATT) {
1638 spin_unlock(&cifs_file_list_lock);
1639 return NULL;
1640 }
Steve French6148a742005-10-05 12:23:19 -07001641 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton6508d902010-09-29 19:51:11 -04001642 if (!any_available && open_file->pid != current->tgid)
1643 continue;
1644 if (fsuid_only && open_file->uid != current_fsuid())
1645 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001646 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Steve French9b22b0b2007-10-02 01:11:08 +00001647 if (!open_file->invalidHandle) {
1648 /* found a good writable file */
Jeff Layton764a1b12012-07-25 14:59:54 -04001649 cifsFileInfo_get_locked(open_file);
Jeff Layton44772882010-10-15 15:34:03 -04001650 spin_unlock(&cifs_file_list_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001651 return open_file;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001652 } else {
1653 if (!inv_file)
1654 inv_file = open_file;
Steve French9b22b0b2007-10-02 01:11:08 +00001655 }
Steve French6148a742005-10-05 12:23:19 -07001656 }
1657 }
Jeff Layton2846d382008-09-22 21:33:33 -04001658 /* couldn't find useable FH with same pid, try any available */
1659 if (!any_available) {
1660 any_available = true;
1661 goto refind_writable;
1662 }
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001663
1664 if (inv_file) {
1665 any_available = false;
Jeff Layton764a1b12012-07-25 14:59:54 -04001666 cifsFileInfo_get_locked(inv_file);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001667 }
1668
Jeff Layton44772882010-10-15 15:34:03 -04001669 spin_unlock(&cifs_file_list_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001670
1671 if (inv_file) {
1672 rc = cifs_reopen_file(inv_file, false);
1673 if (!rc)
1674 return inv_file;
1675 else {
1676 spin_lock(&cifs_file_list_lock);
1677 list_move_tail(&inv_file->flist,
1678 &cifs_inode->openFileList);
1679 spin_unlock(&cifs_file_list_lock);
1680 cifsFileInfo_put(inv_file);
1681 spin_lock(&cifs_file_list_lock);
1682 ++refind;
1683 goto refind_writable;
1684 }
1685 }
1686
Steve French6148a742005-10-05 12:23:19 -07001687 return NULL;
1688}
1689
Linus Torvalds1da177e2005-04-16 15:20:36 -07001690static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1691{
1692 struct address_space *mapping = page->mapping;
1693 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1694 char *write_data;
1695 int rc = -EFAULT;
1696 int bytes_written = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697 struct inode *inode;
Steve French6148a742005-10-05 12:23:19 -07001698 struct cifsFileInfo *open_file;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001699
1700 if (!mapping || !mapping->host)
1701 return -EFAULT;
1702
1703 inode = page->mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001704
1705 offset += (loff_t)from;
1706 write_data = kmap(page);
1707 write_data += from;
1708
1709 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1710 kunmap(page);
1711 return -EIO;
1712 }
1713
1714 /* racing with truncate? */
1715 if (offset > mapping->host->i_size) {
1716 kunmap(page);
1717 return 0; /* don't care */
1718 }
1719
1720 /* check to make sure that we are not extending the file */
1721 if (mapping->host->i_size - offset < (loff_t)to)
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001722 to = (unsigned)(mapping->host->i_size - offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001723
Jeff Layton6508d902010-09-29 19:51:11 -04001724 open_file = find_writable_file(CIFS_I(mapping->host), false);
Steve French6148a742005-10-05 12:23:19 -07001725 if (open_file) {
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001726 bytes_written = cifs_write(open_file, open_file->pid,
1727 write_data, to - from, &offset);
Dave Kleikamp6ab409b2009-08-31 11:07:12 -04001728 cifsFileInfo_put(open_file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001729 /* Does mm or vfs already set times? */
Steve French6148a742005-10-05 12:23:19 -07001730 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001731 if ((bytes_written > 0) && (offset))
Steve French6148a742005-10-05 12:23:19 -07001732 rc = 0;
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001733 else if (bytes_written < 0)
1734 rc = bytes_written;
Steve French6148a742005-10-05 12:23:19 -07001735 } else {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001736 cFYI(1, "No writeable filehandles for inode");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001737 rc = -EIO;
1738 }
1739
1740 kunmap(page);
1741 return rc;
1742}
1743
Linus Torvalds1da177e2005-04-16 15:20:36 -07001744static int cifs_writepages(struct address_space *mapping,
Steve French37c0eb42005-10-05 14:50:29 -07001745 struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001746{
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001747 struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
1748 bool done = false, scanned = false, range_whole = false;
1749 pgoff_t end, index;
1750 struct cifs_writedata *wdata;
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07001751 struct TCP_Server_Info *server;
Steve French37c0eb42005-10-05 14:50:29 -07001752 struct page *page;
Steve French37c0eb42005-10-05 14:50:29 -07001753 int rc = 0;
Jeff Laytoneddb0792012-09-18 16:20:35 -07001754 loff_t isize = i_size_read(mapping->host);
Steve French50c2f752007-07-13 00:33:32 +00001755
Steve French37c0eb42005-10-05 14:50:29 -07001756 /*
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001757 * If wsize is smaller than the page cache size, default to writing
Steve French37c0eb42005-10-05 14:50:29 -07001758 * one page at a time via cifs_writepage
1759 */
1760 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1761 return generic_writepages(mapping, wbc);
1762
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001763 if (wbc->range_cyclic) {
Steve French37c0eb42005-10-05 14:50:29 -07001764 index = mapping->writeback_index; /* Start from prev offset */
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001765 end = -1;
1766 } else {
1767 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1768 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1769 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001770 range_whole = true;
1771 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07001772 }
1773retry:
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001774 while (!done && index <= end) {
1775 unsigned int i, nr_pages, found_pages;
1776 pgoff_t next = 0, tofind;
1777 struct page **pages;
Steve French37c0eb42005-10-05 14:50:29 -07001778
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001779 tofind = min((cifs_sb->wsize / PAGE_CACHE_SIZE) - 1,
1780 end - index) + 1;
Steve French37c0eb42005-10-05 14:50:29 -07001781
Jeff Laytonc2e87642012-03-23 14:40:55 -04001782 wdata = cifs_writedata_alloc((unsigned int)tofind,
1783 cifs_writev_complete);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001784 if (!wdata) {
1785 rc = -ENOMEM;
1786 break;
1787 }
1788
1789 /*
1790 * find_get_pages_tag seems to return a max of 256 on each
1791 * iteration, so we must call it several times in order to
1792 * fill the array or the wsize is effectively limited to
1793 * 256 * PAGE_CACHE_SIZE.
1794 */
1795 found_pages = 0;
1796 pages = wdata->pages;
1797 do {
1798 nr_pages = find_get_pages_tag(mapping, &index,
1799 PAGECACHE_TAG_DIRTY,
1800 tofind, pages);
1801 found_pages += nr_pages;
1802 tofind -= nr_pages;
1803 pages += nr_pages;
1804 } while (nr_pages && tofind && index <= end);
1805
1806 if (found_pages == 0) {
1807 kref_put(&wdata->refcount, cifs_writedata_release);
1808 break;
1809 }
1810
1811 nr_pages = 0;
1812 for (i = 0; i < found_pages; i++) {
1813 page = wdata->pages[i];
Steve French37c0eb42005-10-05 14:50:29 -07001814 /*
1815 * At this point we hold neither mapping->tree_lock nor
1816 * lock on the page itself: the page may be truncated or
1817 * invalidated (changing page->mapping to NULL), or even
1818 * swizzled back from swapper_space to tmpfs file
1819 * mapping
1820 */
1821
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001822 if (nr_pages == 0)
Steve French37c0eb42005-10-05 14:50:29 -07001823 lock_page(page);
Nick Piggin529ae9a2008-08-02 12:01:03 +02001824 else if (!trylock_page(page))
Steve French37c0eb42005-10-05 14:50:29 -07001825 break;
1826
1827 if (unlikely(page->mapping != mapping)) {
1828 unlock_page(page);
1829 break;
1830 }
1831
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001832 if (!wbc->range_cyclic && page->index > end) {
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001833 done = true;
Steve French37c0eb42005-10-05 14:50:29 -07001834 unlock_page(page);
1835 break;
1836 }
1837
1838 if (next && (page->index != next)) {
1839 /* Not next consecutive page */
1840 unlock_page(page);
1841 break;
1842 }
1843
1844 if (wbc->sync_mode != WB_SYNC_NONE)
1845 wait_on_page_writeback(page);
1846
1847 if (PageWriteback(page) ||
Linus Torvaldscb876f42006-12-23 16:19:07 -08001848 !clear_page_dirty_for_io(page)) {
Steve French37c0eb42005-10-05 14:50:29 -07001849 unlock_page(page);
1850 break;
1851 }
Steve French84d2f072005-10-12 15:32:05 -07001852
Linus Torvaldscb876f42006-12-23 16:19:07 -08001853 /*
1854 * This actually clears the dirty bit in the radix tree.
1855 * See cifs_writepage() for more commentary.
1856 */
1857 set_page_writeback(page);
1858
Jeff Laytoneddb0792012-09-18 16:20:35 -07001859 if (page_offset(page) >= isize) {
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001860 done = true;
Steve French84d2f072005-10-12 15:32:05 -07001861 unlock_page(page);
Linus Torvaldscb876f42006-12-23 16:19:07 -08001862 end_page_writeback(page);
Steve French84d2f072005-10-12 15:32:05 -07001863 break;
1864 }
1865
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001866 wdata->pages[i] = page;
Steve French37c0eb42005-10-05 14:50:29 -07001867 next = page->index + 1;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001868 ++nr_pages;
Steve French37c0eb42005-10-05 14:50:29 -07001869 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001870
1871 /* reset index to refind any pages skipped */
1872 if (nr_pages == 0)
1873 index = wdata->pages[0]->index + 1;
1874
1875 /* put any pages we aren't going to use */
1876 for (i = nr_pages; i < found_pages; i++) {
1877 page_cache_release(wdata->pages[i]);
1878 wdata->pages[i] = NULL;
1879 }
1880
1881 /* nothing to write? */
1882 if (nr_pages == 0) {
1883 kref_put(&wdata->refcount, cifs_writedata_release);
1884 continue;
1885 }
1886
1887 wdata->sync_mode = wbc->sync_mode;
1888 wdata->nr_pages = nr_pages;
1889 wdata->offset = page_offset(wdata->pages[0]);
Jeff Laytoneddb0792012-09-18 16:20:35 -07001890 wdata->pagesz = PAGE_CACHE_SIZE;
1891 wdata->tailsz =
1892 min(isize - page_offset(wdata->pages[nr_pages - 1]),
1893 (loff_t)PAGE_CACHE_SIZE);
1894 wdata->bytes = ((nr_pages - 1) * PAGE_CACHE_SIZE) +
1895 wdata->tailsz;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001896
1897 do {
1898 if (wdata->cfile != NULL)
1899 cifsFileInfo_put(wdata->cfile);
1900 wdata->cfile = find_writable_file(CIFS_I(mapping->host),
1901 false);
1902 if (!wdata->cfile) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001903 cERROR(1, "No writable handles for inode");
Steve French23e7dd72005-10-20 13:44:56 -07001904 rc = -EBADF;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001905 break;
Steve French37c0eb42005-10-05 14:50:29 -07001906 }
Jeff Laytonfe5f5d22012-03-23 14:40:55 -04001907 wdata->pid = wdata->cfile->pid;
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07001908 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
1909 rc = server->ops->async_writev(wdata);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001910 } while (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN);
Jeff Laytonf3983c22010-09-22 16:17:40 -07001911
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001912 for (i = 0; i < nr_pages; ++i)
1913 unlock_page(wdata->pages[i]);
Jeff Layton941b8532011-01-11 07:24:01 -05001914
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001915 /* send failure -- clean up the mess */
1916 if (rc != 0) {
1917 for (i = 0; i < nr_pages; ++i) {
Jeff Layton941b8532011-01-11 07:24:01 -05001918 if (rc == -EAGAIN)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001919 redirty_page_for_writepage(wbc,
1920 wdata->pages[i]);
1921 else
1922 SetPageError(wdata->pages[i]);
1923 end_page_writeback(wdata->pages[i]);
1924 page_cache_release(wdata->pages[i]);
Steve French37c0eb42005-10-05 14:50:29 -07001925 }
Jeff Layton941b8532011-01-11 07:24:01 -05001926 if (rc != -EAGAIN)
1927 mapping_set_error(mapping, rc);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001928 }
1929 kref_put(&wdata->refcount, cifs_writedata_release);
Jeff Layton941b8532011-01-11 07:24:01 -05001930
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001931 wbc->nr_to_write -= nr_pages;
1932 if (wbc->nr_to_write <= 0)
1933 done = true;
Dave Kleikampb066a482008-11-18 03:49:05 +00001934
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001935 index = next;
Steve French37c0eb42005-10-05 14:50:29 -07001936 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001937
Steve French37c0eb42005-10-05 14:50:29 -07001938 if (!scanned && !done) {
1939 /*
1940 * We hit the last page and there is more work to be done: wrap
1941 * back to the start of the file
1942 */
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001943 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07001944 index = 0;
1945 goto retry;
1946 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001947
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001948 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
Steve French37c0eb42005-10-05 14:50:29 -07001949 mapping->writeback_index = index;
1950
Linus Torvalds1da177e2005-04-16 15:20:36 -07001951 return rc;
1952}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001953
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04001954static int
1955cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001956{
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04001957 int rc;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001958 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001959
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001960 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001961/* BB add check for wbc flags */
1962 page_cache_get(page);
Steve Frenchad7a2922008-02-07 23:25:02 +00001963 if (!PageUptodate(page))
Joe Perchesb6b38f72010-04-21 03:50:45 +00001964 cFYI(1, "ppw - page not up to date");
Linus Torvaldscb876f42006-12-23 16:19:07 -08001965
1966 /*
1967 * Set the "writeback" flag, and clear "dirty" in the radix tree.
1968 *
1969 * A writepage() implementation always needs to do either this,
1970 * or re-dirty the page with "redirty_page_for_writepage()" in
1971 * the case of a failure.
1972 *
1973 * Just unlocking the page will cause the radix tree tag-bits
1974 * to fail to update with the state of the page correctly.
1975 */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001976 set_page_writeback(page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04001977retry_write:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001978 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04001979 if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
1980 goto retry_write;
1981 else if (rc == -EAGAIN)
1982 redirty_page_for_writepage(wbc, page);
1983 else if (rc != 0)
1984 SetPageError(page);
1985 else
1986 SetPageUptodate(page);
Linus Torvaldscb876f42006-12-23 16:19:07 -08001987 end_page_writeback(page);
1988 page_cache_release(page);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001989 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001990 return rc;
1991}
1992
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04001993static int cifs_writepage(struct page *page, struct writeback_control *wbc)
1994{
1995 int rc = cifs_writepage_locked(page, wbc);
1996 unlock_page(page);
1997 return rc;
1998}
1999
Nick Piggind9414772008-09-24 11:32:59 -04002000static int cifs_write_end(struct file *file, struct address_space *mapping,
2001 loff_t pos, unsigned len, unsigned copied,
2002 struct page *page, void *fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002003{
Nick Piggind9414772008-09-24 11:32:59 -04002004 int rc;
2005 struct inode *inode = mapping->host;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002006 struct cifsFileInfo *cfile = file->private_data;
2007 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
2008 __u32 pid;
2009
2010 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2011 pid = cfile->pid;
2012 else
2013 pid = current->tgid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002014
Joe Perchesb6b38f72010-04-21 03:50:45 +00002015 cFYI(1, "write_end for page %p from pos %lld with %d bytes",
2016 page, pos, copied);
Steve Frenchad7a2922008-02-07 23:25:02 +00002017
Jeff Laytona98ee8c2008-11-26 19:32:33 +00002018 if (PageChecked(page)) {
2019 if (copied == len)
2020 SetPageUptodate(page);
2021 ClearPageChecked(page);
2022 } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
Nick Piggind9414772008-09-24 11:32:59 -04002023 SetPageUptodate(page);
2024
Linus Torvalds1da177e2005-04-16 15:20:36 -07002025 if (!PageUptodate(page)) {
Nick Piggind9414772008-09-24 11:32:59 -04002026 char *page_data;
2027 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002028 unsigned int xid;
Nick Piggind9414772008-09-24 11:32:59 -04002029
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002030 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002031 /* this is probably better than directly calling
2032 partialpage_write since in this function the file handle is
2033 known which we might as well leverage */
2034 /* BB check if anything else missing out of ppw
2035 such as updating last write time */
2036 page_data = kmap(page);
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002037 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
Nick Piggind9414772008-09-24 11:32:59 -04002038 /* if (rc < 0) should we set writebehind rc? */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002039 kunmap(page);
Nick Piggind9414772008-09-24 11:32:59 -04002040
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002041 free_xid(xid);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002042 } else {
Nick Piggind9414772008-09-24 11:32:59 -04002043 rc = copied;
2044 pos += copied;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002045 set_page_dirty(page);
2046 }
2047
Nick Piggind9414772008-09-24 11:32:59 -04002048 if (rc > 0) {
2049 spin_lock(&inode->i_lock);
2050 if (pos > inode->i_size)
2051 i_size_write(inode, pos);
2052 spin_unlock(&inode->i_lock);
2053 }
2054
2055 unlock_page(page);
2056 page_cache_release(page);
2057
Linus Torvalds1da177e2005-04-16 15:20:36 -07002058 return rc;
2059}
2060
Josef Bacik02c24a82011-07-16 20:44:56 -04002061int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2062 int datasync)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002063{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002064 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002065 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002066 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002067 struct TCP_Server_Info *server;
Joe Perchesc21dfb62010-07-12 13:50:14 -07002068 struct cifsFileInfo *smbfile = file->private_data;
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002069 struct inode *inode = file->f_path.dentry->d_inode;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002070 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002071
Josef Bacik02c24a82011-07-16 20:44:56 -04002072 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2073 if (rc)
2074 return rc;
2075 mutex_lock(&inode->i_mutex);
2076
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002077 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002078
Joe Perchesb6b38f72010-04-21 03:50:45 +00002079 cFYI(1, "Sync file - name: %s datasync: 0x%x",
Christoph Hellwig7ea80852010-05-26 17:53:25 +02002080 file->f_path.dentry->d_name.name, datasync);
Steve French50c2f752007-07-13 00:33:32 +00002081
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002082 if (!CIFS_I(inode)->clientCanCacheRead) {
2083 rc = cifs_invalidate_mapping(inode);
2084 if (rc) {
2085 cFYI(1, "rc: %d during invalidate phase", rc);
2086 rc = 0; /* don't care about it in fsync */
2087 }
2088 }
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002089
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002090 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002091 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2092 server = tcon->ses->server;
2093 if (server->ops->flush)
2094 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2095 else
2096 rc = -ENOSYS;
2097 }
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002098
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002099 free_xid(xid);
Josef Bacik02c24a82011-07-16 20:44:56 -04002100 mutex_unlock(&inode->i_mutex);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002101 return rc;
2102}
2103
Josef Bacik02c24a82011-07-16 20:44:56 -04002104int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002105{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002106 unsigned int xid;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002107 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002108 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002109 struct TCP_Server_Info *server;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002110 struct cifsFileInfo *smbfile = file->private_data;
2111 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Josef Bacik02c24a82011-07-16 20:44:56 -04002112 struct inode *inode = file->f_mapping->host;
2113
2114 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2115 if (rc)
2116 return rc;
2117 mutex_lock(&inode->i_mutex);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002118
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002119 xid = get_xid();
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002120
2121 cFYI(1, "Sync file - name: %s datasync: 0x%x",
2122 file->f_path.dentry->d_name.name, datasync);
2123
2124 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002125 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2126 server = tcon->ses->server;
2127 if (server->ops->flush)
2128 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2129 else
2130 rc = -ENOSYS;
2131 }
Steve Frenchb298f222009-02-21 21:17:43 +00002132
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002133 free_xid(xid);
Josef Bacik02c24a82011-07-16 20:44:56 -04002134 mutex_unlock(&inode->i_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002135 return rc;
2136}
2137
Linus Torvalds1da177e2005-04-16 15:20:36 -07002138/*
2139 * As file closes, flush all cached write data for this inode checking
2140 * for write behind errors.
2141 */
Miklos Szeredi75e1fcc2006-06-23 02:05:12 -07002142int cifs_flush(struct file *file, fl_owner_t id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002143{
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002144 struct inode *inode = file->f_path.dentry->d_inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002145 int rc = 0;
2146
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002147 if (file->f_mode & FMODE_WRITE)
Jeff Laytond3f13222010-10-15 15:34:07 -04002148 rc = filemap_write_and_wait(inode->i_mapping);
Steve French50c2f752007-07-13 00:33:32 +00002149
Joe Perchesb6b38f72010-04-21 03:50:45 +00002150 cFYI(1, "Flush inode %p file %p rc %d", inode, file, rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002151
2152 return rc;
2153}
2154
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002155static int
2156cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2157{
2158 int rc = 0;
2159 unsigned long i;
2160
2161 for (i = 0; i < num_pages; i++) {
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002162 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002163 if (!pages[i]) {
2164 /*
2165 * save number of pages we have already allocated and
2166 * return with ENOMEM error
2167 */
2168 num_pages = i;
2169 rc = -ENOMEM;
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002170 break;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002171 }
2172 }
2173
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002174 if (rc) {
2175 for (i = 0; i < num_pages; i++)
2176 put_page(pages[i]);
2177 }
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002178 return rc;
2179}
2180
2181static inline
2182size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2183{
2184 size_t num_pages;
2185 size_t clen;
2186
2187 clen = min_t(const size_t, len, wsize);
Jeff Laytona7103b92012-03-23 14:40:56 -04002188 num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002189
2190 if (cur_len)
2191 *cur_len = clen;
2192
2193 return num_pages;
2194}
2195
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002196static void
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002197cifs_uncached_writev_complete(struct work_struct *work)
2198{
2199 int i;
2200 struct cifs_writedata *wdata = container_of(work,
2201 struct cifs_writedata, work);
2202 struct inode *inode = wdata->cfile->dentry->d_inode;
2203 struct cifsInodeInfo *cifsi = CIFS_I(inode);
2204
2205 spin_lock(&inode->i_lock);
2206 cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
2207 if (cifsi->server_eof > inode->i_size)
2208 i_size_write(inode, cifsi->server_eof);
2209 spin_unlock(&inode->i_lock);
2210
2211 complete(&wdata->done);
2212
2213 if (wdata->result != -EAGAIN) {
2214 for (i = 0; i < wdata->nr_pages; i++)
2215 put_page(wdata->pages[i]);
2216 }
2217
2218 kref_put(&wdata->refcount, cifs_writedata_release);
2219}
2220
2221/* attempt to send write to server, retry on any -EAGAIN errors */
2222static int
2223cifs_uncached_retry_writev(struct cifs_writedata *wdata)
2224{
2225 int rc;
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002226 struct TCP_Server_Info *server;
2227
2228 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002229
2230 do {
2231 if (wdata->cfile->invalidHandle) {
2232 rc = cifs_reopen_file(wdata->cfile, false);
2233 if (rc != 0)
2234 continue;
2235 }
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002236 rc = server->ops->async_writev(wdata);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002237 } while (rc == -EAGAIN);
2238
2239 return rc;
2240}
2241
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002242static ssize_t
2243cifs_iovec_write(struct file *file, const struct iovec *iov,
2244 unsigned long nr_segs, loff_t *poffset)
2245{
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002246 unsigned long nr_pages, i;
Pavel Shilovsky76429c12011-01-31 16:03:08 +03002247 size_t copied, len, cur_len;
2248 ssize_t total_written = 0;
Jeff Layton3af9d8f2012-04-13 17:16:59 -04002249 loff_t offset;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002250 struct iov_iter it;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002251 struct cifsFileInfo *open_file;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002252 struct cifs_tcon *tcon;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002253 struct cifs_sb_info *cifs_sb;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002254 struct cifs_writedata *wdata, *tmp;
2255 struct list_head wdata_list;
2256 int rc;
2257 pid_t pid;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002258
2259 len = iov_length(iov, nr_segs);
2260 if (!len)
2261 return 0;
2262
2263 rc = generic_write_checks(file, poffset, &len, 0);
2264 if (rc)
2265 return rc;
2266
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002267 INIT_LIST_HEAD(&wdata_list);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002268 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002269 open_file = file->private_data;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002270 tcon = tlink_tcon(open_file->tlink);
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002271
2272 if (!tcon->ses->server->ops->async_writev)
2273 return -ENOSYS;
2274
Jeff Layton3af9d8f2012-04-13 17:16:59 -04002275 offset = *poffset;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002276
2277 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2278 pid = open_file->pid;
2279 else
2280 pid = current->tgid;
2281
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002282 iov_iter_init(&it, iov, nr_segs, len, 0);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002283 do {
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002284 size_t save_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002285
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002286 nr_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
2287 wdata = cifs_writedata_alloc(nr_pages,
2288 cifs_uncached_writev_complete);
2289 if (!wdata) {
2290 rc = -ENOMEM;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002291 break;
2292 }
2293
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002294 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
2295 if (rc) {
2296 kfree(wdata);
2297 break;
2298 }
2299
2300 save_len = cur_len;
2301 for (i = 0; i < nr_pages; i++) {
2302 copied = min_t(const size_t, cur_len, PAGE_SIZE);
2303 copied = iov_iter_copy_from_user(wdata->pages[i], &it,
2304 0, copied);
2305 cur_len -= copied;
2306 iov_iter_advance(&it, copied);
2307 }
2308 cur_len = save_len - cur_len;
2309
2310 wdata->sync_mode = WB_SYNC_ALL;
2311 wdata->nr_pages = nr_pages;
2312 wdata->offset = (__u64)offset;
2313 wdata->cfile = cifsFileInfo_get(open_file);
2314 wdata->pid = pid;
2315 wdata->bytes = cur_len;
Jeff Laytoneddb0792012-09-18 16:20:35 -07002316 wdata->pagesz = PAGE_SIZE;
2317 wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002318 rc = cifs_uncached_retry_writev(wdata);
2319 if (rc) {
2320 kref_put(&wdata->refcount, cifs_writedata_release);
2321 break;
2322 }
2323
2324 list_add_tail(&wdata->list, &wdata_list);
2325 offset += cur_len;
2326 len -= cur_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002327 } while (len > 0);
2328
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002329 /*
2330 * If at least one write was successfully sent, then discard any rc
2331 * value from the later writes. If the other write succeeds, then
2332 * we'll end up returning whatever was written. If it fails, then
2333 * we'll get a new rc value from that.
2334 */
2335 if (!list_empty(&wdata_list))
2336 rc = 0;
2337
2338 /*
2339 * Wait for and collect replies for any successful sends in order of
2340 * increasing offset. Once an error is hit or we get a fatal signal
2341 * while waiting, then return without waiting for any more replies.
2342 */
2343restart_loop:
2344 list_for_each_entry_safe(wdata, tmp, &wdata_list, list) {
2345 if (!rc) {
2346 /* FIXME: freezable too? */
2347 rc = wait_for_completion_killable(&wdata->done);
2348 if (rc)
2349 rc = -EINTR;
2350 else if (wdata->result)
2351 rc = wdata->result;
2352 else
2353 total_written += wdata->bytes;
2354
2355 /* resend call if it's a retryable error */
2356 if (rc == -EAGAIN) {
2357 rc = cifs_uncached_retry_writev(wdata);
2358 goto restart_loop;
2359 }
2360 }
2361 list_del_init(&wdata->list);
2362 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002363 }
2364
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002365 if (total_written > 0)
2366 *poffset += total_written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002367
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002368 cifs_stats_bytes_written(tcon, total_written);
2369 return total_written ? total_written : (ssize_t)rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002370}
2371
Pavel Shilovsky0b81c1c2011-03-10 10:11:05 +03002372ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002373 unsigned long nr_segs, loff_t pos)
2374{
2375 ssize_t written;
2376 struct inode *inode;
2377
2378 inode = iocb->ki_filp->f_path.dentry->d_inode;
2379
2380 /*
2381 * BB - optimize the way when signing is disabled. We can drop this
2382 * extra memory-to-memory copying and use iovec buffers for constructing
2383 * write request.
2384 */
2385
2386 written = cifs_iovec_write(iocb->ki_filp, iov, nr_segs, &pos);
2387 if (written > 0) {
2388 CIFS_I(inode)->invalid_mapping = true;
2389 iocb->ki_pos = pos;
2390 }
2391
2392 return written;
2393}
2394
2395ssize_t cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
2396 unsigned long nr_segs, loff_t pos)
2397{
2398 struct inode *inode;
2399
2400 inode = iocb->ki_filp->f_path.dentry->d_inode;
2401
2402 if (CIFS_I(inode)->clientCanCacheAll)
2403 return generic_file_aio_write(iocb, iov, nr_segs, pos);
2404
2405 /*
2406 * In strict cache mode we need to write the data to the server exactly
2407 * from the pos to pos+len-1 rather than flush all affected pages
2408 * because it may cause a error with mandatory locks on these pages but
2409 * not on the region from pos to ppos+len-1.
2410 */
2411
2412 return cifs_user_writev(iocb, iov, nr_segs, pos);
2413}
2414
Jeff Layton0471ca32012-05-16 07:13:16 -04002415static struct cifs_readdata *
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002416cifs_readdata_alloc(unsigned int nr_pages, work_func_t complete)
Jeff Layton0471ca32012-05-16 07:13:16 -04002417{
2418 struct cifs_readdata *rdata;
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002419
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002420 rdata = kzalloc(sizeof(*rdata) + (sizeof(struct page *) * nr_pages),
2421 GFP_KERNEL);
Jeff Layton0471ca32012-05-16 07:13:16 -04002422 if (rdata != NULL) {
Jeff Layton6993f742012-05-16 07:13:17 -04002423 kref_init(&rdata->refcount);
Jeff Layton1c892542012-05-16 07:13:17 -04002424 INIT_LIST_HEAD(&rdata->list);
2425 init_completion(&rdata->done);
Jeff Layton0471ca32012-05-16 07:13:16 -04002426 INIT_WORK(&rdata->work, complete);
Jeff Layton0471ca32012-05-16 07:13:16 -04002427 }
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002428
Jeff Layton0471ca32012-05-16 07:13:16 -04002429 return rdata;
2430}
2431
Jeff Layton6993f742012-05-16 07:13:17 -04002432void
2433cifs_readdata_release(struct kref *refcount)
Jeff Layton0471ca32012-05-16 07:13:16 -04002434{
Jeff Layton6993f742012-05-16 07:13:17 -04002435 struct cifs_readdata *rdata = container_of(refcount,
2436 struct cifs_readdata, refcount);
2437
2438 if (rdata->cfile)
2439 cifsFileInfo_put(rdata->cfile);
2440
Jeff Layton0471ca32012-05-16 07:13:16 -04002441 kfree(rdata);
2442}
2443
Jeff Layton2a1bb132012-05-16 07:13:17 -04002444static int
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002445cifs_read_allocate_pages(struct cifs_readdata *rdata, unsigned int nr_pages)
Jeff Layton1c892542012-05-16 07:13:17 -04002446{
2447 int rc = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002448 struct page *page;
Jeff Layton1c892542012-05-16 07:13:17 -04002449 unsigned int i;
2450
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002451 for (i = 0; i < nr_pages; i++) {
Jeff Layton1c892542012-05-16 07:13:17 -04002452 page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
2453 if (!page) {
2454 rc = -ENOMEM;
2455 break;
2456 }
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002457 rdata->pages[i] = page;
Jeff Layton1c892542012-05-16 07:13:17 -04002458 }
2459
2460 if (rc) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002461 for (i = 0; i < nr_pages; i++) {
2462 put_page(rdata->pages[i]);
2463 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04002464 }
2465 }
2466 return rc;
2467}
2468
2469static void
2470cifs_uncached_readdata_release(struct kref *refcount)
2471{
Jeff Layton1c892542012-05-16 07:13:17 -04002472 struct cifs_readdata *rdata = container_of(refcount,
2473 struct cifs_readdata, refcount);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002474 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04002475
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002476 for (i = 0; i < rdata->nr_pages; i++) {
2477 put_page(rdata->pages[i]);
2478 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04002479 }
2480 cifs_readdata_release(refcount);
2481}
2482
2483static int
Jeff Layton2a1bb132012-05-16 07:13:17 -04002484cifs_retry_async_readv(struct cifs_readdata *rdata)
2485{
2486 int rc;
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07002487 struct TCP_Server_Info *server;
2488
2489 server = tlink_tcon(rdata->cfile->tlink)->ses->server;
Jeff Layton2a1bb132012-05-16 07:13:17 -04002490
2491 do {
2492 if (rdata->cfile->invalidHandle) {
2493 rc = cifs_reopen_file(rdata->cfile, true);
2494 if (rc != 0)
2495 continue;
2496 }
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07002497 rc = server->ops->async_readv(rdata);
Jeff Layton2a1bb132012-05-16 07:13:17 -04002498 } while (rc == -EAGAIN);
2499
2500 return rc;
2501}
2502
Jeff Layton1c892542012-05-16 07:13:17 -04002503/**
2504 * cifs_readdata_to_iov - copy data from pages in response to an iovec
2505 * @rdata: the readdata response with list of pages holding data
2506 * @iov: vector in which we should copy the data
2507 * @nr_segs: number of segments in vector
2508 * @offset: offset into file of the first iovec
2509 * @copied: used to return the amount of data copied to the iov
2510 *
2511 * This function copies data from a list of pages in a readdata response into
2512 * an array of iovecs. It will first calculate where the data should go
2513 * based on the info in the readdata and then copy the data into that spot.
2514 */
2515static ssize_t
2516cifs_readdata_to_iov(struct cifs_readdata *rdata, const struct iovec *iov,
2517 unsigned long nr_segs, loff_t offset, ssize_t *copied)
2518{
2519 int rc = 0;
2520 struct iov_iter ii;
2521 size_t pos = rdata->offset - offset;
Jeff Layton1c892542012-05-16 07:13:17 -04002522 ssize_t remaining = rdata->bytes;
2523 unsigned char *pdata;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002524 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04002525
2526 /* set up iov_iter and advance to the correct offset */
2527 iov_iter_init(&ii, iov, nr_segs, iov_length(iov, nr_segs), 0);
2528 iov_iter_advance(&ii, pos);
2529
2530 *copied = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002531 for (i = 0; i < rdata->nr_pages; i++) {
Jeff Layton1c892542012-05-16 07:13:17 -04002532 ssize_t copy;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002533 struct page *page = rdata->pages[i];
Jeff Layton1c892542012-05-16 07:13:17 -04002534
2535 /* copy a whole page or whatever's left */
2536 copy = min_t(ssize_t, remaining, PAGE_SIZE);
2537
2538 /* ...but limit it to whatever space is left in the iov */
2539 copy = min_t(ssize_t, copy, iov_iter_count(&ii));
2540
2541 /* go while there's data to be copied and no errors */
2542 if (copy && !rc) {
2543 pdata = kmap(page);
2544 rc = memcpy_toiovecend(ii.iov, pdata, ii.iov_offset,
2545 (int)copy);
2546 kunmap(page);
2547 if (!rc) {
2548 *copied += copy;
2549 remaining -= copy;
2550 iov_iter_advance(&ii, copy);
2551 }
2552 }
Jeff Layton1c892542012-05-16 07:13:17 -04002553 }
2554
2555 return rc;
2556}
2557
2558static void
2559cifs_uncached_readv_complete(struct work_struct *work)
2560{
2561 struct cifs_readdata *rdata = container_of(work,
2562 struct cifs_readdata, work);
Jeff Layton1c892542012-05-16 07:13:17 -04002563
2564 complete(&rdata->done);
2565 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
2566}
2567
2568static int
Jeff Layton8321fec2012-09-19 06:22:32 -07002569cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
2570 struct cifs_readdata *rdata, unsigned int len)
Jeff Layton1c892542012-05-16 07:13:17 -04002571{
Jeff Layton8321fec2012-09-19 06:22:32 -07002572 int total_read = 0, result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002573 unsigned int i;
2574 unsigned int nr_pages = rdata->nr_pages;
Jeff Layton8321fec2012-09-19 06:22:32 -07002575 struct kvec iov;
Jeff Layton1c892542012-05-16 07:13:17 -04002576
Jeff Layton8321fec2012-09-19 06:22:32 -07002577 rdata->tailsz = PAGE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002578 for (i = 0; i < nr_pages; i++) {
2579 struct page *page = rdata->pages[i];
2580
Jeff Layton8321fec2012-09-19 06:22:32 -07002581 if (len >= PAGE_SIZE) {
Jeff Layton1c892542012-05-16 07:13:17 -04002582 /* enough data to fill the page */
Jeff Layton8321fec2012-09-19 06:22:32 -07002583 iov.iov_base = kmap(page);
2584 iov.iov_len = PAGE_SIZE;
2585 cFYI(1, "%u: iov_base=%p iov_len=%zu",
2586 i, iov.iov_base, iov.iov_len);
2587 len -= PAGE_SIZE;
2588 } else if (len > 0) {
Jeff Layton1c892542012-05-16 07:13:17 -04002589 /* enough for partial page, fill and zero the rest */
Jeff Layton8321fec2012-09-19 06:22:32 -07002590 iov.iov_base = kmap(page);
2591 iov.iov_len = len;
2592 cFYI(1, "%u: iov_base=%p iov_len=%zu",
2593 i, iov.iov_base, iov.iov_len);
2594 memset(iov.iov_base + len, '\0', PAGE_SIZE - len);
2595 rdata->tailsz = len;
2596 len = 0;
Jeff Layton1c892542012-05-16 07:13:17 -04002597 } else {
2598 /* no need to hold page hostage */
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002599 rdata->pages[i] = NULL;
2600 rdata->nr_pages--;
Jeff Layton1c892542012-05-16 07:13:17 -04002601 put_page(page);
Jeff Layton8321fec2012-09-19 06:22:32 -07002602 continue;
Jeff Layton1c892542012-05-16 07:13:17 -04002603 }
Jeff Layton8321fec2012-09-19 06:22:32 -07002604
2605 result = cifs_readv_from_socket(server, &iov, 1, iov.iov_len);
2606 kunmap(page);
2607 if (result < 0)
2608 break;
2609
2610 total_read += result;
Jeff Layton1c892542012-05-16 07:13:17 -04002611 }
2612
Jeff Layton8321fec2012-09-19 06:22:32 -07002613 return total_read > 0 ? total_read : result;
Jeff Layton1c892542012-05-16 07:13:17 -04002614}
2615
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002616static ssize_t
2617cifs_iovec_read(struct file *file, const struct iovec *iov,
2618 unsigned long nr_segs, loff_t *poffset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002619{
Jeff Layton1c892542012-05-16 07:13:17 -04002620 ssize_t rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002621 size_t len, cur_len;
Jeff Layton1c892542012-05-16 07:13:17 -04002622 ssize_t total_read = 0;
2623 loff_t offset = *poffset;
2624 unsigned int npages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002625 struct cifs_sb_info *cifs_sb;
Jeff Layton1c892542012-05-16 07:13:17 -04002626 struct cifs_tcon *tcon;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002627 struct cifsFileInfo *open_file;
Jeff Layton1c892542012-05-16 07:13:17 -04002628 struct cifs_readdata *rdata, *tmp;
2629 struct list_head rdata_list;
2630 pid_t pid;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002631
2632 if (!nr_segs)
2633 return 0;
2634
2635 len = iov_length(iov, nr_segs);
2636 if (!len)
2637 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002638
Jeff Layton1c892542012-05-16 07:13:17 -04002639 INIT_LIST_HEAD(&rdata_list);
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002640 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Joe Perchesc21dfb62010-07-12 13:50:14 -07002641 open_file = file->private_data;
Jeff Layton1c892542012-05-16 07:13:17 -04002642 tcon = tlink_tcon(open_file->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002643
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07002644 if (!tcon->ses->server->ops->async_readv)
2645 return -ENOSYS;
2646
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002647 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2648 pid = open_file->pid;
2649 else
2650 pid = current->tgid;
2651
Steve Frenchad7a2922008-02-07 23:25:02 +00002652 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesb6b38f72010-04-21 03:50:45 +00002653 cFYI(1, "attempting read on write only file instance");
Steve Frenchad7a2922008-02-07 23:25:02 +00002654
Jeff Layton1c892542012-05-16 07:13:17 -04002655 do {
2656 cur_len = min_t(const size_t, len - total_read, cifs_sb->rsize);
2657 npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002658
Jeff Layton1c892542012-05-16 07:13:17 -04002659 /* allocate a readdata struct */
2660 rdata = cifs_readdata_alloc(npages,
2661 cifs_uncached_readv_complete);
2662 if (!rdata) {
2663 rc = -ENOMEM;
2664 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002665 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002666
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002667 rc = cifs_read_allocate_pages(rdata, npages);
Jeff Layton1c892542012-05-16 07:13:17 -04002668 if (rc)
2669 goto error;
2670
2671 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002672 rdata->nr_pages = npages;
Jeff Layton1c892542012-05-16 07:13:17 -04002673 rdata->offset = offset;
2674 rdata->bytes = cur_len;
2675 rdata->pid = pid;
Jeff Layton8321fec2012-09-19 06:22:32 -07002676 rdata->pagesz = PAGE_SIZE;
2677 rdata->read_into_pages = cifs_uncached_read_into_pages;
Jeff Layton1c892542012-05-16 07:13:17 -04002678
2679 rc = cifs_retry_async_readv(rdata);
2680error:
2681 if (rc) {
2682 kref_put(&rdata->refcount,
2683 cifs_uncached_readdata_release);
2684 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002685 }
Jeff Layton1c892542012-05-16 07:13:17 -04002686
2687 list_add_tail(&rdata->list, &rdata_list);
2688 offset += cur_len;
2689 len -= cur_len;
2690 } while (len > 0);
2691
2692 /* if at least one read request send succeeded, then reset rc */
2693 if (!list_empty(&rdata_list))
2694 rc = 0;
2695
2696 /* the loop below should proceed in the order of increasing offsets */
2697restart_loop:
2698 list_for_each_entry_safe(rdata, tmp, &rdata_list, list) {
2699 if (!rc) {
2700 ssize_t copied;
2701
2702 /* FIXME: freezable sleep too? */
2703 rc = wait_for_completion_killable(&rdata->done);
2704 if (rc)
2705 rc = -EINTR;
2706 else if (rdata->result)
2707 rc = rdata->result;
2708 else {
2709 rc = cifs_readdata_to_iov(rdata, iov,
2710 nr_segs, *poffset,
2711 &copied);
2712 total_read += copied;
2713 }
2714
2715 /* resend call if it's a retryable error */
2716 if (rc == -EAGAIN) {
2717 rc = cifs_retry_async_readv(rdata);
2718 goto restart_loop;
2719 }
2720 }
2721 list_del_init(&rdata->list);
2722 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002723 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002724
Jeff Layton1c892542012-05-16 07:13:17 -04002725 cifs_stats_bytes_read(tcon, total_read);
2726 *poffset += total_read;
2727
Pavel Shilovsky09a47072012-09-18 16:20:29 -07002728 /* mask nodata case */
2729 if (rc == -ENODATA)
2730 rc = 0;
2731
Jeff Layton1c892542012-05-16 07:13:17 -04002732 return total_read ? total_read : rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002733}
2734
Pavel Shilovsky0b81c1c2011-03-10 10:11:05 +03002735ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov,
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002736 unsigned long nr_segs, loff_t pos)
2737{
2738 ssize_t read;
2739
2740 read = cifs_iovec_read(iocb->ki_filp, iov, nr_segs, &pos);
2741 if (read > 0)
2742 iocb->ki_pos = pos;
2743
2744 return read;
2745}
2746
2747ssize_t cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov,
2748 unsigned long nr_segs, loff_t pos)
2749{
2750 struct inode *inode;
2751
2752 inode = iocb->ki_filp->f_path.dentry->d_inode;
2753
2754 if (CIFS_I(inode)->clientCanCacheRead)
2755 return generic_file_aio_read(iocb, iov, nr_segs, pos);
2756
2757 /*
2758 * In strict cache mode we need to read from the server all the time
2759 * if we don't have level II oplock because the server can delay mtime
2760 * change - so we can't make a decision about inode invalidating.
2761 * And we can also fail with pagereading if there are mandatory locks
2762 * on pages affected by this read but not on the region from pos to
2763 * pos+len-1.
2764 */
2765
2766 return cifs_user_readv(iocb, iov, nr_segs, pos);
2767}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002768
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002769static ssize_t
2770cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002771{
2772 int rc = -EACCES;
2773 unsigned int bytes_read = 0;
2774 unsigned int total_read;
2775 unsigned int current_read_size;
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002776 unsigned int rsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002777 struct cifs_sb_info *cifs_sb;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002778 struct cifs_tcon *tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002779 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002780 unsigned int xid;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002781 char *cur_offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002782 struct cifsFileInfo *open_file;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002783 struct cifs_io_parms io_parms;
Steve Frenchec637e32005-12-12 20:53:18 -08002784 int buf_type = CIFS_NO_BUFFER;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002785 __u32 pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002786
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002787 xid = get_xid();
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002788 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002789
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002790 /* FIXME: set up handlers for larger reads and/or convert to async */
2791 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
2792
Linus Torvalds1da177e2005-04-16 15:20:36 -07002793 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05302794 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002795 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05302796 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002797 }
Joe Perchesc21dfb62010-07-12 13:50:14 -07002798 open_file = file->private_data;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002799 tcon = tlink_tcon(open_file->tlink);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002800 server = tcon->ses->server;
2801
2802 if (!server->ops->sync_read) {
2803 free_xid(xid);
2804 return -ENOSYS;
2805 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002806
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002807 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2808 pid = open_file->pid;
2809 else
2810 pid = current->tgid;
2811
Linus Torvalds1da177e2005-04-16 15:20:36 -07002812 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesb6b38f72010-04-21 03:50:45 +00002813 cFYI(1, "attempting read on write only file instance");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002814
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002815 for (total_read = 0, cur_offset = read_data; read_size > total_read;
2816 total_read += bytes_read, cur_offset += bytes_read) {
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002817 current_read_size = min_t(uint, read_size - total_read, rsize);
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002818 /*
2819 * For windows me and 9x we do not want to request more than it
2820 * negotiated since it will refuse the read then.
2821 */
2822 if ((tcon->ses) && !(tcon->ses->capabilities &
2823 tcon->ses->server->vals->cap_large_files)) {
Dan Carpenter7748dd62011-10-18 12:41:35 +03002824 current_read_size = min_t(uint, current_read_size,
Jeff Laytonc974bef2011-10-11 06:41:32 -04002825 CIFSMaxBufSize);
Steve Frenchf9f5c8172005-09-15 23:06:38 -07002826 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002827 rc = -EAGAIN;
2828 while (rc == -EAGAIN) {
Steve Frenchcdff08e2010-10-21 22:46:14 +00002829 if (open_file->invalidHandle) {
Jeff Layton15886172010-10-15 15:33:59 -04002830 rc = cifs_reopen_file(open_file, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002831 if (rc != 0)
2832 break;
2833 }
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002834 io_parms.pid = pid;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002835 io_parms.tcon = tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002836 io_parms.offset = *offset;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002837 io_parms.length = current_read_size;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002838 rc = server->ops->sync_read(xid, open_file, &io_parms,
2839 &bytes_read, &cur_offset,
2840 &buf_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002841 }
2842 if (rc || (bytes_read == 0)) {
2843 if (total_read) {
2844 break;
2845 } else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002846 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002847 return rc;
2848 }
2849 } else {
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002850 cifs_stats_bytes_read(tcon, total_read);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002851 *offset += bytes_read;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002852 }
2853 }
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002854 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002855 return total_read;
2856}
2857
Jeff Laytonca83ce32011-04-12 09:13:44 -04002858/*
2859 * If the page is mmap'ed into a process' page tables, then we need to make
2860 * sure that it doesn't change while being written back.
2861 */
2862static int
2863cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
2864{
2865 struct page *page = vmf->page;
2866
2867 lock_page(page);
2868 return VM_FAULT_LOCKED;
2869}
2870
2871static struct vm_operations_struct cifs_file_vm_ops = {
2872 .fault = filemap_fault,
2873 .page_mkwrite = cifs_page_mkwrite,
2874};
2875
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03002876int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
2877{
2878 int rc, xid;
2879 struct inode *inode = file->f_path.dentry->d_inode;
2880
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002881 xid = get_xid();
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03002882
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002883 if (!CIFS_I(inode)->clientCanCacheRead) {
2884 rc = cifs_invalidate_mapping(inode);
2885 if (rc)
2886 return rc;
2887 }
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03002888
2889 rc = generic_file_mmap(file, vma);
Jeff Laytonca83ce32011-04-12 09:13:44 -04002890 if (rc == 0)
2891 vma->vm_ops = &cifs_file_vm_ops;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002892 free_xid(xid);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03002893 return rc;
2894}
2895
Linus Torvalds1da177e2005-04-16 15:20:36 -07002896int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
2897{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002898 int rc, xid;
2899
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002900 xid = get_xid();
Jeff Laytonabab0952010-02-12 07:44:18 -05002901 rc = cifs_revalidate_file(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002902 if (rc) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00002903 cFYI(1, "Validation prior to mmap failed, error=%d", rc);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002904 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002905 return rc;
2906 }
2907 rc = generic_file_mmap(file, vma);
Jeff Laytonca83ce32011-04-12 09:13:44 -04002908 if (rc == 0)
2909 vma->vm_ops = &cifs_file_vm_ops;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002910 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002911 return rc;
2912}
2913
Jeff Layton0471ca32012-05-16 07:13:16 -04002914static void
2915cifs_readv_complete(struct work_struct *work)
2916{
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002917 unsigned int i;
Jeff Layton0471ca32012-05-16 07:13:16 -04002918 struct cifs_readdata *rdata = container_of(work,
2919 struct cifs_readdata, work);
Jeff Layton0471ca32012-05-16 07:13:16 -04002920
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002921 for (i = 0; i < rdata->nr_pages; i++) {
2922 struct page *page = rdata->pages[i];
2923
Jeff Layton0471ca32012-05-16 07:13:16 -04002924 lru_cache_add_file(page);
2925
2926 if (rdata->result == 0) {
Jeff Layton0471ca32012-05-16 07:13:16 -04002927 flush_dcache_page(page);
2928 SetPageUptodate(page);
2929 }
2930
2931 unlock_page(page);
2932
2933 if (rdata->result == 0)
2934 cifs_readpage_to_fscache(rdata->mapping->host, page);
2935
2936 page_cache_release(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002937 rdata->pages[i] = NULL;
Jeff Layton0471ca32012-05-16 07:13:16 -04002938 }
Jeff Layton6993f742012-05-16 07:13:17 -04002939 kref_put(&rdata->refcount, cifs_readdata_release);
Jeff Layton0471ca32012-05-16 07:13:16 -04002940}
2941
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04002942static int
Jeff Layton8321fec2012-09-19 06:22:32 -07002943cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
2944 struct cifs_readdata *rdata, unsigned int len)
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04002945{
Jeff Layton8321fec2012-09-19 06:22:32 -07002946 int total_read = 0, result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002947 unsigned int i;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04002948 u64 eof;
2949 pgoff_t eof_index;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002950 unsigned int nr_pages = rdata->nr_pages;
Jeff Layton8321fec2012-09-19 06:22:32 -07002951 struct kvec iov;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04002952
2953 /* determine the eof that the server (probably) has */
2954 eof = CIFS_I(rdata->mapping->host)->server_eof;
2955 eof_index = eof ? (eof - 1) >> PAGE_CACHE_SHIFT : 0;
2956 cFYI(1, "eof=%llu eof_index=%lu", eof, eof_index);
2957
Jeff Layton8321fec2012-09-19 06:22:32 -07002958 rdata->tailsz = PAGE_CACHE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002959 for (i = 0; i < nr_pages; i++) {
2960 struct page *page = rdata->pages[i];
2961
Jeff Layton8321fec2012-09-19 06:22:32 -07002962 if (len >= PAGE_CACHE_SIZE) {
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04002963 /* enough data to fill the page */
Jeff Layton8321fec2012-09-19 06:22:32 -07002964 iov.iov_base = kmap(page);
2965 iov.iov_len = PAGE_CACHE_SIZE;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04002966 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
Jeff Layton8321fec2012-09-19 06:22:32 -07002967 i, page->index, iov.iov_base, iov.iov_len);
2968 len -= PAGE_CACHE_SIZE;
2969 } else if (len > 0) {
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04002970 /* enough for partial page, fill and zero the rest */
Jeff Layton8321fec2012-09-19 06:22:32 -07002971 iov.iov_base = kmap(page);
2972 iov.iov_len = len;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04002973 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
Jeff Layton8321fec2012-09-19 06:22:32 -07002974 i, page->index, iov.iov_base, iov.iov_len);
2975 memset(iov.iov_base + len,
2976 '\0', PAGE_CACHE_SIZE - len);
2977 rdata->tailsz = len;
2978 len = 0;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04002979 } else if (page->index > eof_index) {
2980 /*
2981 * The VFS will not try to do readahead past the
2982 * i_size, but it's possible that we have outstanding
2983 * writes with gaps in the middle and the i_size hasn't
2984 * caught up yet. Populate those with zeroed out pages
2985 * to prevent the VFS from repeatedly attempting to
2986 * fill them until the writes are flushed.
2987 */
2988 zero_user(page, 0, PAGE_CACHE_SIZE);
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04002989 lru_cache_add_file(page);
2990 flush_dcache_page(page);
2991 SetPageUptodate(page);
2992 unlock_page(page);
2993 page_cache_release(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002994 rdata->pages[i] = NULL;
2995 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07002996 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04002997 } else {
2998 /* no need to hold page hostage */
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04002999 lru_cache_add_file(page);
3000 unlock_page(page);
3001 page_cache_release(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003002 rdata->pages[i] = NULL;
3003 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07003004 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003005 }
Jeff Layton8321fec2012-09-19 06:22:32 -07003006
3007 result = cifs_readv_from_socket(server, &iov, 1, iov.iov_len);
3008 kunmap(page);
3009 if (result < 0)
3010 break;
3011
3012 total_read += result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003013 }
3014
Jeff Layton8321fec2012-09-19 06:22:32 -07003015 return total_read > 0 ? total_read : result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003016}
3017
Linus Torvalds1da177e2005-04-16 15:20:36 -07003018static int cifs_readpages(struct file *file, struct address_space *mapping,
3019 struct list_head *page_list, unsigned num_pages)
3020{
Jeff Layton690c5e32011-10-19 15:30:16 -04003021 int rc;
3022 struct list_head tmplist;
3023 struct cifsFileInfo *open_file = file->private_data;
3024 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
3025 unsigned int rsize = cifs_sb->rsize;
3026 pid_t pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003027
Jeff Layton690c5e32011-10-19 15:30:16 -04003028 /*
3029 * Give up immediately if rsize is too small to read an entire page.
3030 * The VFS will fall back to readpage. We should never reach this
3031 * point however since we set ra_pages to 0 when the rsize is smaller
3032 * than a cache page.
3033 */
3034 if (unlikely(rsize < PAGE_CACHE_SIZE))
3035 return 0;
Steve Frenchbfa0d752005-08-31 21:50:37 -07003036
Suresh Jayaraman56698232010-07-05 18:13:25 +05303037 /*
3038 * Reads as many pages as possible from fscache. Returns -ENOBUFS
3039 * immediately if the cookie is negative
3040 */
3041 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
3042 &num_pages);
3043 if (rc == 0)
Jeff Layton690c5e32011-10-19 15:30:16 -04003044 return rc;
Suresh Jayaraman56698232010-07-05 18:13:25 +05303045
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003046 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3047 pid = open_file->pid;
3048 else
3049 pid = current->tgid;
3050
Jeff Layton690c5e32011-10-19 15:30:16 -04003051 rc = 0;
3052 INIT_LIST_HEAD(&tmplist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003053
Jeff Layton690c5e32011-10-19 15:30:16 -04003054 cFYI(1, "%s: file=%p mapping=%p num_pages=%u", __func__, file,
3055 mapping, num_pages);
3056
3057 /*
3058 * Start with the page at end of list and move it to private
3059 * list. Do the same with any following pages until we hit
3060 * the rsize limit, hit an index discontinuity, or run out of
3061 * pages. Issue the async read and then start the loop again
3062 * until the list is empty.
3063 *
3064 * Note that list order is important. The page_list is in
3065 * the order of declining indexes. When we put the pages in
3066 * the rdata->pages, then we want them in increasing order.
3067 */
3068 while (!list_empty(page_list)) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003069 unsigned int i;
Jeff Layton690c5e32011-10-19 15:30:16 -04003070 unsigned int bytes = PAGE_CACHE_SIZE;
3071 unsigned int expected_index;
3072 unsigned int nr_pages = 1;
3073 loff_t offset;
3074 struct page *page, *tpage;
3075 struct cifs_readdata *rdata;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003076
3077 page = list_entry(page_list->prev, struct page, lru);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003078
Jeff Layton690c5e32011-10-19 15:30:16 -04003079 /*
3080 * Lock the page and put it in the cache. Since no one else
3081 * should have access to this page, we're safe to simply set
3082 * PG_locked without checking it first.
3083 */
3084 __set_page_locked(page);
3085 rc = add_to_page_cache_locked(page, mapping,
3086 page->index, GFP_KERNEL);
3087
3088 /* give up if we can't stick it in the cache */
3089 if (rc) {
3090 __clear_page_locked(page);
3091 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003092 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003093
Jeff Layton690c5e32011-10-19 15:30:16 -04003094 /* move first page to the tmplist */
3095 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
3096 list_move_tail(&page->lru, &tmplist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003097
Jeff Layton690c5e32011-10-19 15:30:16 -04003098 /* now try and add more pages onto the request */
3099 expected_index = page->index + 1;
3100 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
3101 /* discontinuity ? */
3102 if (page->index != expected_index)
3103 break;
3104
3105 /* would this page push the read over the rsize? */
3106 if (bytes + PAGE_CACHE_SIZE > rsize)
3107 break;
3108
3109 __set_page_locked(page);
3110 if (add_to_page_cache_locked(page, mapping,
3111 page->index, GFP_KERNEL)) {
3112 __clear_page_locked(page);
3113 break;
3114 }
3115 list_move_tail(&page->lru, &tmplist);
3116 bytes += PAGE_CACHE_SIZE;
3117 expected_index++;
3118 nr_pages++;
3119 }
3120
Jeff Layton0471ca32012-05-16 07:13:16 -04003121 rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
Jeff Layton690c5e32011-10-19 15:30:16 -04003122 if (!rdata) {
3123 /* best to give up if we're out of mem */
3124 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3125 list_del(&page->lru);
3126 lru_cache_add_file(page);
3127 unlock_page(page);
3128 page_cache_release(page);
3129 }
3130 rc = -ENOMEM;
3131 break;
3132 }
3133
Jeff Layton6993f742012-05-16 07:13:17 -04003134 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Layton690c5e32011-10-19 15:30:16 -04003135 rdata->mapping = mapping;
3136 rdata->offset = offset;
3137 rdata->bytes = bytes;
3138 rdata->pid = pid;
Jeff Layton8321fec2012-09-19 06:22:32 -07003139 rdata->pagesz = PAGE_CACHE_SIZE;
3140 rdata->read_into_pages = cifs_readpages_read_into_pages;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003141
3142 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3143 list_del(&page->lru);
3144 rdata->pages[rdata->nr_pages++] = page;
3145 }
Jeff Layton690c5e32011-10-19 15:30:16 -04003146
Jeff Layton2a1bb132012-05-16 07:13:17 -04003147 rc = cifs_retry_async_readv(rdata);
Jeff Layton690c5e32011-10-19 15:30:16 -04003148 if (rc != 0) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003149 for (i = 0; i < rdata->nr_pages; i++) {
3150 page = rdata->pages[i];
Jeff Layton690c5e32011-10-19 15:30:16 -04003151 lru_cache_add_file(page);
3152 unlock_page(page);
3153 page_cache_release(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003154 }
Jeff Layton6993f742012-05-16 07:13:17 -04003155 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003156 break;
3157 }
Jeff Layton6993f742012-05-16 07:13:17 -04003158
3159 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003160 }
3161
Linus Torvalds1da177e2005-04-16 15:20:36 -07003162 return rc;
3163}
3164
3165static int cifs_readpage_worker(struct file *file, struct page *page,
3166 loff_t *poffset)
3167{
3168 char *read_data;
3169 int rc;
3170
Suresh Jayaraman56698232010-07-05 18:13:25 +05303171 /* Is the page cached? */
3172 rc = cifs_readpage_from_fscache(file->f_path.dentry->d_inode, page);
3173 if (rc == 0)
3174 goto read_complete;
3175
Linus Torvalds1da177e2005-04-16 15:20:36 -07003176 page_cache_get(page);
3177 read_data = kmap(page);
3178 /* for reads over a certain size could initiate async read ahead */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003179
Linus Torvalds1da177e2005-04-16 15:20:36 -07003180 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003181
Linus Torvalds1da177e2005-04-16 15:20:36 -07003182 if (rc < 0)
3183 goto io_error;
3184 else
Joe Perchesb6b38f72010-04-21 03:50:45 +00003185 cFYI(1, "Bytes read %d", rc);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003186
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08003187 file->f_path.dentry->d_inode->i_atime =
3188 current_fs_time(file->f_path.dentry->d_inode->i_sb);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003189
Linus Torvalds1da177e2005-04-16 15:20:36 -07003190 if (PAGE_CACHE_SIZE > rc)
3191 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
3192
3193 flush_dcache_page(page);
3194 SetPageUptodate(page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05303195
3196 /* send this page to the cache */
3197 cifs_readpage_to_fscache(file->f_path.dentry->d_inode, page);
3198
Linus Torvalds1da177e2005-04-16 15:20:36 -07003199 rc = 0;
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003200
Linus Torvalds1da177e2005-04-16 15:20:36 -07003201io_error:
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003202 kunmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003203 page_cache_release(page);
Suresh Jayaraman56698232010-07-05 18:13:25 +05303204
3205read_complete:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003206 return rc;
3207}
3208
3209static int cifs_readpage(struct file *file, struct page *page)
3210{
3211 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
3212 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003213 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003214
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003215 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003216
3217 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303218 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003219 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303220 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003221 }
3222
Jeff Laytonac3aa2f2012-07-23 13:14:28 -04003223 cFYI(1, "readpage %p at offset %d 0x%x",
Joe Perchesb6b38f72010-04-21 03:50:45 +00003224 page, (int)offset, (int)offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003225
3226 rc = cifs_readpage_worker(file, page, &offset);
3227
3228 unlock_page(page);
3229
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003230 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003231 return rc;
3232}
3233
Steve Frencha403a0a2007-07-26 15:54:16 +00003234static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3235{
3236 struct cifsFileInfo *open_file;
3237
Jeff Layton44772882010-10-15 15:34:03 -04003238 spin_lock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003239 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton2e396b82010-10-15 15:34:01 -04003240 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Jeff Layton44772882010-10-15 15:34:03 -04003241 spin_unlock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003242 return 1;
3243 }
3244 }
Jeff Layton44772882010-10-15 15:34:03 -04003245 spin_unlock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003246 return 0;
3247}
3248
Linus Torvalds1da177e2005-04-16 15:20:36 -07003249/* We do not want to update the file size from server for inodes
3250 open for write - to avoid races with writepage extending
3251 the file - in the future we could consider allowing
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003252 refreshing the inode only on increases in the file size
Linus Torvalds1da177e2005-04-16 15:20:36 -07003253 but this is tricky to do without racing with writebehind
3254 page caching in the current Linux kernel design */
Steve French4b18f2a2008-04-29 00:06:05 +00003255bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003256{
Steve Frencha403a0a2007-07-26 15:54:16 +00003257 if (!cifsInode)
Steve French4b18f2a2008-04-29 00:06:05 +00003258 return true;
Steve French23e7dd72005-10-20 13:44:56 -07003259
Steve Frencha403a0a2007-07-26 15:54:16 +00003260 if (is_inode_writable(cifsInode)) {
3261 /* This inode is open for write at least once */
Steve Frenchc32a0b62006-01-12 14:41:28 -08003262 struct cifs_sb_info *cifs_sb;
3263
Steve Frenchc32a0b62006-01-12 14:41:28 -08003264 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
Steve Frenchad7a2922008-02-07 23:25:02 +00003265 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003266 /* since no page cache to corrupt on directio
Steve Frenchc32a0b62006-01-12 14:41:28 -08003267 we can change size safely */
Steve French4b18f2a2008-04-29 00:06:05 +00003268 return true;
Steve Frenchc32a0b62006-01-12 14:41:28 -08003269 }
3270
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003271 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
Steve French4b18f2a2008-04-29 00:06:05 +00003272 return true;
Steve French7ba52632007-02-08 18:14:13 +00003273
Steve French4b18f2a2008-04-29 00:06:05 +00003274 return false;
Steve French23e7dd72005-10-20 13:44:56 -07003275 } else
Steve French4b18f2a2008-04-29 00:06:05 +00003276 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003277}
3278
Nick Piggind9414772008-09-24 11:32:59 -04003279static int cifs_write_begin(struct file *file, struct address_space *mapping,
3280 loff_t pos, unsigned len, unsigned flags,
3281 struct page **pagep, void **fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003282{
Nick Piggind9414772008-09-24 11:32:59 -04003283 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
3284 loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003285 loff_t page_start = pos & PAGE_MASK;
3286 loff_t i_size;
3287 struct page *page;
3288 int rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003289
Joe Perchesb6b38f72010-04-21 03:50:45 +00003290 cFYI(1, "write_begin from %lld len %d", (long long)pos, len);
Nick Piggind9414772008-09-24 11:32:59 -04003291
Nick Piggin54566b22009-01-04 12:00:53 -08003292 page = grab_cache_page_write_begin(mapping, index, flags);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003293 if (!page) {
3294 rc = -ENOMEM;
3295 goto out;
3296 }
Nick Piggind9414772008-09-24 11:32:59 -04003297
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003298 if (PageUptodate(page))
3299 goto out;
Steve French8a236262007-03-06 00:31:00 +00003300
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003301 /*
3302 * If we write a full page it will be up to date, no need to read from
3303 * the server. If the write is short, we'll end up doing a sync write
3304 * instead.
3305 */
3306 if (len == PAGE_CACHE_SIZE)
3307 goto out;
3308
3309 /*
3310 * optimize away the read when we have an oplock, and we're not
3311 * expecting to use any of the data we'd be reading in. That
3312 * is, when the page lies beyond the EOF, or straddles the EOF
3313 * and the write will cover all of the existing data.
3314 */
3315 if (CIFS_I(mapping->host)->clientCanCacheRead) {
3316 i_size = i_size_read(mapping->host);
3317 if (page_start >= i_size ||
3318 (offset == 0 && (pos + len) >= i_size)) {
3319 zero_user_segments(page, 0, offset,
3320 offset + len,
3321 PAGE_CACHE_SIZE);
3322 /*
3323 * PageChecked means that the parts of the page
3324 * to which we're not writing are considered up
3325 * to date. Once the data is copied to the
3326 * page, it can be set uptodate.
3327 */
3328 SetPageChecked(page);
3329 goto out;
3330 }
3331 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003332
Nick Piggind9414772008-09-24 11:32:59 -04003333 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003334 /*
3335 * might as well read a page, it is fast enough. If we get
3336 * an error, we don't need to return it. cifs_write_end will
3337 * do a sync write instead since PG_uptodate isn't set.
3338 */
3339 cifs_readpage_worker(file, page, &page_start);
Steve French8a236262007-03-06 00:31:00 +00003340 } else {
3341 /* we could try using another file handle if there is one -
3342 but how would we lock it to prevent close of that handle
3343 racing with this read? In any case
Nick Piggind9414772008-09-24 11:32:59 -04003344 this will be written out by write_end so is fine */
Steve French8a236262007-03-06 00:31:00 +00003345 }
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003346out:
3347 *pagep = page;
3348 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003349}
3350
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303351static int cifs_release_page(struct page *page, gfp_t gfp)
3352{
3353 if (PagePrivate(page))
3354 return 0;
3355
3356 return cifs_fscache_release_page(page, gfp);
3357}
3358
3359static void cifs_invalidate_page(struct page *page, unsigned long offset)
3360{
3361 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
3362
3363 if (offset == 0)
3364 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
3365}
3366
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003367static int cifs_launder_page(struct page *page)
3368{
3369 int rc = 0;
3370 loff_t range_start = page_offset(page);
3371 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
3372 struct writeback_control wbc = {
3373 .sync_mode = WB_SYNC_ALL,
3374 .nr_to_write = 0,
3375 .range_start = range_start,
3376 .range_end = range_end,
3377 };
3378
3379 cFYI(1, "Launder page: %p", page);
3380
3381 if (clear_page_dirty_for_io(page))
3382 rc = cifs_writepage_locked(page, &wbc);
3383
3384 cifs_fscache_invalidate_page(page, page->mapping->host);
3385 return rc;
3386}
3387
Tejun Heo9b646972010-07-20 22:09:02 +02003388void cifs_oplock_break(struct work_struct *work)
Jeff Layton3bc303c2009-09-21 06:47:50 -04003389{
3390 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3391 oplock_break);
Jeff Laytona5e18bc2010-10-11 15:07:18 -04003392 struct inode *inode = cfile->dentry->d_inode;
Jeff Layton3bc303c2009-09-21 06:47:50 -04003393 struct cifsInodeInfo *cinode = CIFS_I(inode);
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07003394 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Jeff Laytoneb4b7562010-10-22 14:52:29 -04003395 int rc = 0;
Jeff Layton3bc303c2009-09-21 06:47:50 -04003396
3397 if (inode && S_ISREG(inode->i_mode)) {
Steve Frenchd54ff732010-04-27 04:38:15 +00003398 if (cinode->clientCanCacheRead)
Al Viro8737c932009-12-24 06:47:55 -05003399 break_lease(inode, O_RDONLY);
Steve Frenchd54ff732010-04-27 04:38:15 +00003400 else
Al Viro8737c932009-12-24 06:47:55 -05003401 break_lease(inode, O_WRONLY);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003402 rc = filemap_fdatawrite(inode->i_mapping);
3403 if (cinode->clientCanCacheRead == 0) {
Jeff Laytoneb4b7562010-10-22 14:52:29 -04003404 rc = filemap_fdatawait(inode->i_mapping);
3405 mapping_set_error(inode->i_mapping, rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003406 invalidate_remote_inode(inode);
3407 }
Joe Perchesb6b38f72010-04-21 03:50:45 +00003408 cFYI(1, "Oplock flush inode %p rc %d", inode, rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003409 }
3410
Pavel Shilovsky85160e02011-10-22 15:33:29 +04003411 rc = cifs_push_locks(cfile);
3412 if (rc)
3413 cERROR(1, "Push locks rc = %d", rc);
3414
Jeff Layton3bc303c2009-09-21 06:47:50 -04003415 /*
3416 * releasing stale oplock after recent reconnect of smb session using
3417 * a now incorrect file handle is not a data integrity issue but do
3418 * not bother sending an oplock release if session to server still is
3419 * disconnected since oplock already released by the server
3420 */
Steve Frenchcdff08e2010-10-21 22:46:14 +00003421 if (!cfile->oplock_break_cancelled) {
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07003422 rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
3423 cinode);
Joe Perchesb6b38f72010-04-21 03:50:45 +00003424 cFYI(1, "Oplock release rc = %d", rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003425 }
Jeff Layton3bc303c2009-09-21 06:47:50 -04003426}
3427
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07003428const struct address_space_operations cifs_addr_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003429 .readpage = cifs_readpage,
3430 .readpages = cifs_readpages,
3431 .writepage = cifs_writepage,
Steve French37c0eb42005-10-05 14:50:29 -07003432 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04003433 .write_begin = cifs_write_begin,
3434 .write_end = cifs_write_end,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003435 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303436 .releasepage = cifs_release_page,
3437 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003438 .launder_page = cifs_launder_page,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003439};
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003440
3441/*
3442 * cifs_readpages requires the server to support a buffer large enough to
3443 * contain the header plus one complete page of data. Otherwise, we need
3444 * to leave cifs_readpages out of the address space operations.
3445 */
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07003446const struct address_space_operations cifs_addr_ops_smallbuf = {
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003447 .readpage = cifs_readpage,
3448 .writepage = cifs_writepage,
3449 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04003450 .write_begin = cifs_write_begin,
3451 .write_end = cifs_write_end,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003452 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303453 .releasepage = cifs_release_page,
3454 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003455 .launder_page = cifs_launder_page,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003456};