blob: ccad858d2d67f9fa2c3cc3bb80f1e3014a4eadc3 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
Steve Frenchfb8c4b12007-07-10 01:16:18 +00005 *
Steve Frenchf19159d2010-04-21 04:12:10 +00006 * Copyright (C) International Business Machines Corp., 2002,2010
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Author(s): Steve French (sfrench@us.ibm.com)
Jeremy Allison7ee1af72006-08-02 21:56:33 +00008 * Jeremy Allison (jra@samba.org)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
Steve French37c0eb42005-10-05 14:50:29 -070025#include <linux/backing-dev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
Steve French37c0eb42005-10-05 14:50:29 -070030#include <linux/writeback.h>
Andrew Morton6f88cc22006-12-10 02:19:44 -080031#include <linux/task_io_accounting_ops.h>
Steve French23e7dd72005-10-20 13:44:56 -070032#include <linux/delay.h>
Jeff Layton3bc303c2009-09-21 06:47:50 -040033#include <linux/mount.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Jeff Layton690c5e32011-10-19 15:30:16 -040035#include <linux/swap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <asm/div64.h>
37#include "cifsfs.h"
38#include "cifspdu.h"
39#include "cifsglob.h"
40#include "cifsproto.h"
41#include "cifs_unicode.h"
42#include "cifs_debug.h"
43#include "cifs_fs_sb.h"
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +053044#include "fscache.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Linus Torvalds1da177e2005-04-16 15:20:36 -070046static inline int cifs_convert_flags(unsigned int flags)
47{
48 if ((flags & O_ACCMODE) == O_RDONLY)
49 return GENERIC_READ;
50 else if ((flags & O_ACCMODE) == O_WRONLY)
51 return GENERIC_WRITE;
52 else if ((flags & O_ACCMODE) == O_RDWR) {
53 /* GENERIC_ALL is too much permission to request
54 can cause unnecessary access denied on create */
55 /* return GENERIC_ALL; */
56 return (GENERIC_READ | GENERIC_WRITE);
57 }
58
Jeff Laytone10f7b52008-05-14 10:21:33 -070059 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
60 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
61 FILE_READ_DATA);
Steve French7fc8f4e2009-02-23 20:43:11 +000062}
Jeff Laytone10f7b52008-05-14 10:21:33 -070063
Jeff Layton608712f2010-10-15 15:33:56 -040064static u32 cifs_posix_convert_flags(unsigned int flags)
Steve French7fc8f4e2009-02-23 20:43:11 +000065{
Jeff Layton608712f2010-10-15 15:33:56 -040066 u32 posix_flags = 0;
Jeff Laytone10f7b52008-05-14 10:21:33 -070067
Steve French7fc8f4e2009-02-23 20:43:11 +000068 if ((flags & O_ACCMODE) == O_RDONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040069 posix_flags = SMB_O_RDONLY;
Steve French7fc8f4e2009-02-23 20:43:11 +000070 else if ((flags & O_ACCMODE) == O_WRONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040071 posix_flags = SMB_O_WRONLY;
72 else if ((flags & O_ACCMODE) == O_RDWR)
73 posix_flags = SMB_O_RDWR;
74
75 if (flags & O_CREAT)
76 posix_flags |= SMB_O_CREAT;
77 if (flags & O_EXCL)
78 posix_flags |= SMB_O_EXCL;
79 if (flags & O_TRUNC)
80 posix_flags |= SMB_O_TRUNC;
81 /* be safe and imply O_SYNC for O_DSYNC */
Christoph Hellwig6b2f3d12009-10-27 11:05:28 +010082 if (flags & O_DSYNC)
Jeff Layton608712f2010-10-15 15:33:56 -040083 posix_flags |= SMB_O_SYNC;
Steve French7fc8f4e2009-02-23 20:43:11 +000084 if (flags & O_DIRECTORY)
Jeff Layton608712f2010-10-15 15:33:56 -040085 posix_flags |= SMB_O_DIRECTORY;
Steve French7fc8f4e2009-02-23 20:43:11 +000086 if (flags & O_NOFOLLOW)
Jeff Layton608712f2010-10-15 15:33:56 -040087 posix_flags |= SMB_O_NOFOLLOW;
Steve French7fc8f4e2009-02-23 20:43:11 +000088 if (flags & O_DIRECT)
Jeff Layton608712f2010-10-15 15:33:56 -040089 posix_flags |= SMB_O_DIRECT;
Steve French7fc8f4e2009-02-23 20:43:11 +000090
91 return posix_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070092}
93
94static inline int cifs_get_disposition(unsigned int flags)
95{
96 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
97 return FILE_CREATE;
98 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
99 return FILE_OVERWRITE_IF;
100 else if ((flags & O_CREAT) == O_CREAT)
101 return FILE_OPEN_IF;
Steve French55aa2e02006-05-30 18:09:31 +0000102 else if ((flags & O_TRUNC) == O_TRUNC)
103 return FILE_OVERWRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104 else
105 return FILE_OPEN;
106}
107
Jeff Layton608712f2010-10-15 15:33:56 -0400108int cifs_posix_open(char *full_path, struct inode **pinode,
109 struct super_block *sb, int mode, unsigned int f_flags,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400110 __u32 *poplock, __u16 *pnetfid, unsigned int xid)
Jeff Layton608712f2010-10-15 15:33:56 -0400111{
112 int rc;
113 FILE_UNIX_BASIC_INFO *presp_data;
114 __u32 posix_flags = 0;
115 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
116 struct cifs_fattr fattr;
117 struct tcon_link *tlink;
Steve French96daf2b2011-05-27 04:34:02 +0000118 struct cifs_tcon *tcon;
Jeff Layton608712f2010-10-15 15:33:56 -0400119
120 cFYI(1, "posix open %s", full_path);
121
122 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
123 if (presp_data == NULL)
124 return -ENOMEM;
125
126 tlink = cifs_sb_tlink(cifs_sb);
127 if (IS_ERR(tlink)) {
128 rc = PTR_ERR(tlink);
129 goto posix_open_ret;
130 }
131
132 tcon = tlink_tcon(tlink);
133 mode &= ~current_umask();
134
135 posix_flags = cifs_posix_convert_flags(f_flags);
136 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
137 poplock, full_path, cifs_sb->local_nls,
138 cifs_sb->mnt_cifs_flags &
139 CIFS_MOUNT_MAP_SPECIAL_CHR);
140 cifs_put_tlink(tlink);
141
142 if (rc)
143 goto posix_open_ret;
144
145 if (presp_data->Type == cpu_to_le32(-1))
146 goto posix_open_ret; /* open ok, caller does qpathinfo */
147
148 if (!pinode)
149 goto posix_open_ret; /* caller does not need info */
150
151 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
152
153 /* get new inode and set it up */
154 if (*pinode == NULL) {
155 cifs_fill_uniqueid(sb, &fattr);
156 *pinode = cifs_iget(sb, &fattr);
157 if (!*pinode) {
158 rc = -ENOMEM;
159 goto posix_open_ret;
160 }
161 } else {
162 cifs_fattr_to_inode(*pinode, &fattr);
163 }
164
165posix_open_ret:
166 kfree(presp_data);
167 return rc;
168}
169
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300170static int
171cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700172 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
173 struct cifs_fid *fid, unsigned int xid)
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300174{
175 int rc;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700176 int desired_access;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300177 int disposition;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500178 int create_options = CREATE_NOT_DIR;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300179 FILE_ALL_INFO *buf;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700180 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300181
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700182 if (!server->ops->open)
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700183 return -ENOSYS;
184
185 desired_access = cifs_convert_flags(f_flags);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300186
187/*********************************************************************
188 * open flag mapping table:
189 *
190 * POSIX Flag CIFS Disposition
191 * ---------- ----------------
192 * O_CREAT FILE_OPEN_IF
193 * O_CREAT | O_EXCL FILE_CREATE
194 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
195 * O_TRUNC FILE_OVERWRITE
196 * none of the above FILE_OPEN
197 *
198 * Note that there is not a direct match between disposition
199 * FILE_SUPERSEDE (ie create whether or not file exists although
200 * O_CREAT | O_TRUNC is similar but truncates the existing
201 * file rather than creating a new file as FILE_SUPERSEDE does
202 * (which uses the attributes / metadata passed in on open call)
203 *?
204 *? O_SYNC is a reasonable match to CIFS writethrough flag
205 *? and the read write flags match reasonably. O_LARGEFILE
206 *? is irrelevant because largefile support is always used
207 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
208 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
209 *********************************************************************/
210
211 disposition = cifs_get_disposition(f_flags);
212
213 /* BB pass O_SYNC flag through on file attributes .. BB */
214
215 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
216 if (!buf)
217 return -ENOMEM;
218
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500219 if (backup_cred(cifs_sb))
220 create_options |= CREATE_OPEN_BACKUP_INTENT;
221
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700222 rc = server->ops->open(xid, tcon, full_path, disposition,
223 desired_access, create_options, fid, oplock, buf,
224 cifs_sb);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300225
226 if (rc)
227 goto out;
228
229 if (tcon->unix_ext)
230 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
231 xid);
232 else
233 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700234 xid, &fid->netfid);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300235
236out:
237 kfree(buf);
238 return rc;
239}
240
Jeff Layton15ecb432010-10-15 15:34:02 -0400241struct cifsFileInfo *
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700242cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
Jeff Layton15ecb432010-10-15 15:34:02 -0400243 struct tcon_link *tlink, __u32 oplock)
244{
245 struct dentry *dentry = file->f_path.dentry;
246 struct inode *inode = dentry->d_inode;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700247 struct cifsInodeInfo *cinode = CIFS_I(inode);
248 struct cifsFileInfo *cfile;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700249 struct cifs_fid_locks *fdlocks;
Jeff Layton15ecb432010-10-15 15:34:02 -0400250
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700251 cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
252 if (cfile == NULL)
253 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400254
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700255 fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
256 if (!fdlocks) {
257 kfree(cfile);
258 return NULL;
259 }
260
261 INIT_LIST_HEAD(&fdlocks->locks);
262 fdlocks->cfile = cfile;
263 cfile->llist = fdlocks;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700264 down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700265 list_add(&fdlocks->llist, &cinode->llist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700266 up_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700267
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700268 cfile->count = 1;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700269 cfile->pid = current->tgid;
270 cfile->uid = current_fsuid();
271 cfile->dentry = dget(dentry);
272 cfile->f_flags = file->f_flags;
273 cfile->invalidHandle = false;
274 cfile->tlink = cifs_get_tlink(tlink);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700275 INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700276 mutex_init(&cfile->fh_mutex);
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700277 tlink_tcon(tlink)->ses->server->ops->set_fid(cfile, fid, oplock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400278
Jeff Layton44772882010-10-15 15:34:03 -0400279 spin_lock(&cifs_file_list_lock);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700280 list_add(&cfile->tlist, &(tlink_tcon(tlink)->openFileList));
Jeff Layton15ecb432010-10-15 15:34:02 -0400281 /* if readable file instance put first in list*/
282 if (file->f_mode & FMODE_READ)
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700283 list_add(&cfile->flist, &cinode->openFileList);
Jeff Layton15ecb432010-10-15 15:34:02 -0400284 else
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700285 list_add_tail(&cfile->flist, &cinode->openFileList);
Jeff Layton44772882010-10-15 15:34:03 -0400286 spin_unlock(&cifs_file_list_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400287
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700288 file->private_data = cfile;
289 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400290}
291
Jeff Layton764a1b12012-07-25 14:59:54 -0400292struct cifsFileInfo *
293cifsFileInfo_get(struct cifsFileInfo *cifs_file)
294{
295 spin_lock(&cifs_file_list_lock);
296 cifsFileInfo_get_locked(cifs_file);
297 spin_unlock(&cifs_file_list_lock);
298 return cifs_file;
299}
300
Steve Frenchcdff08e2010-10-21 22:46:14 +0000301/*
302 * Release a reference on the file private data. This may involve closing
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400303 * the filehandle out on the server. Must be called without holding
304 * cifs_file_list_lock.
Steve Frenchcdff08e2010-10-21 22:46:14 +0000305 */
Jeff Laytonb33879a2010-10-15 15:34:04 -0400306void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
307{
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300308 struct inode *inode = cifs_file->dentry->d_inode;
Steve French96daf2b2011-05-27 04:34:02 +0000309 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300310 struct cifsInodeInfo *cifsi = CIFS_I(inode);
Pavel Shilovsky4f8ba8a2010-11-21 22:36:12 +0300311 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000312 struct cifsLockInfo *li, *tmp;
313
314 spin_lock(&cifs_file_list_lock);
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400315 if (--cifs_file->count > 0) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000316 spin_unlock(&cifs_file_list_lock);
317 return;
Jeff Laytonb33879a2010-10-15 15:34:04 -0400318 }
Steve Frenchcdff08e2010-10-21 22:46:14 +0000319
320 /* remove it from the lists */
321 list_del(&cifs_file->flist);
322 list_del(&cifs_file->tlist);
323
324 if (list_empty(&cifsi->openFileList)) {
325 cFYI(1, "closing last open instance for inode %p",
326 cifs_file->dentry->d_inode);
Pavel Shilovsky25364132012-09-18 16:20:27 -0700327 /*
328 * In strict cache mode we need invalidate mapping on the last
329 * close because it may cause a error when we open this file
330 * again and get at least level II oplock.
331 */
Pavel Shilovsky4f8ba8a2010-11-21 22:36:12 +0300332 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
333 CIFS_I(inode)->invalid_mapping = true;
Pavel Shilovskyc6723622010-11-03 10:58:57 +0300334 cifs_set_oplock_level(cifsi, 0);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000335 }
336 spin_unlock(&cifs_file_list_lock);
337
Jeff Laytonad635942011-07-26 12:20:17 -0400338 cancel_work_sync(&cifs_file->oplock_break);
339
Steve Frenchcdff08e2010-10-21 22:46:14 +0000340 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700341 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400342 unsigned int xid;
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700343 int rc = -ENOSYS;
344
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400345 xid = get_xid();
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700346 if (server->ops->close)
347 rc = server->ops->close(xid, tcon, &cifs_file->fid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400348 free_xid(xid);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000349 }
350
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700351 /*
352 * Delete any outstanding lock records. We'll lose them when the file
Steve Frenchcdff08e2010-10-21 22:46:14 +0000353 * is closed anyway.
354 */
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700355 down_write(&cifsi->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700356 list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000357 list_del(&li->llist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400358 cifs_del_lock_waiters(li);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000359 kfree(li);
360 }
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700361 list_del(&cifs_file->llist->llist);
362 kfree(cifs_file->llist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700363 up_write(&cifsi->lock_sem);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000364
365 cifs_put_tlink(cifs_file->tlink);
366 dput(cifs_file->dentry);
367 kfree(cifs_file);
Jeff Laytonb33879a2010-10-15 15:34:04 -0400368}
369
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370int cifs_open(struct inode *inode, struct file *file)
371{
372 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400373 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400374 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700376 struct TCP_Server_Info *server;
Steve French96daf2b2011-05-27 04:34:02 +0000377 struct cifs_tcon *tcon;
Jeff Layton7ffec372010-09-29 19:51:11 -0400378 struct tcon_link *tlink;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700379 struct cifsFileInfo *cfile = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380 char *full_path = NULL;
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300381 bool posix_open_ok = false;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700382 struct cifs_fid fid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400384 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385
386 cifs_sb = CIFS_SB(inode->i_sb);
Jeff Layton7ffec372010-09-29 19:51:11 -0400387 tlink = cifs_sb_tlink(cifs_sb);
388 if (IS_ERR(tlink)) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400389 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400390 return PTR_ERR(tlink);
391 }
392 tcon = tlink_tcon(tlink);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700393 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -0800395 full_path = build_path_from_dentry(file->f_path.dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396 if (full_path == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530397 rc = -ENOMEM;
Jeff Layton232341b2010-08-05 13:58:38 -0400398 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399 }
400
Joe Perchesb6b38f72010-04-21 03:50:45 +0000401 cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
402 inode, file->f_flags, full_path);
Steve French276a74a2009-03-03 18:00:34 +0000403
Pavel Shilovsky10b9b982012-03-20 12:55:09 +0300404 if (tcon->ses->server->oplocks)
Steve French276a74a2009-03-03 18:00:34 +0000405 oplock = REQ_OPLOCK;
406 else
407 oplock = 0;
408
Steve French64cc2c62009-03-04 19:54:08 +0000409 if (!tcon->broken_posix_open && tcon->unix_ext &&
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400410 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
411 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Steve French276a74a2009-03-03 18:00:34 +0000412 /* can not refresh inode info since size could be stale */
Jeff Layton2422f672010-06-16 13:40:16 -0400413 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
Steve Frenchfa588e02010-04-22 19:21:55 +0000414 cifs_sb->mnt_file_mode /* ignored */,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700415 file->f_flags, &oplock, &fid.netfid, xid);
Steve French276a74a2009-03-03 18:00:34 +0000416 if (rc == 0) {
Joe Perchesb6b38f72010-04-21 03:50:45 +0000417 cFYI(1, "posix open succeeded");
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300418 posix_open_ok = true;
Steve French64cc2c62009-03-04 19:54:08 +0000419 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
420 if (tcon->ses->serverNOS)
Joe Perchesb6b38f72010-04-21 03:50:45 +0000421 cERROR(1, "server %s of type %s returned"
Steve French64cc2c62009-03-04 19:54:08 +0000422 " unexpected error on SMB posix open"
423 ", disabling posix open support."
424 " Check if server update available.",
425 tcon->ses->serverName,
Joe Perchesb6b38f72010-04-21 03:50:45 +0000426 tcon->ses->serverNOS);
Steve French64cc2c62009-03-04 19:54:08 +0000427 tcon->broken_posix_open = true;
Steve French276a74a2009-03-03 18:00:34 +0000428 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
429 (rc != -EOPNOTSUPP)) /* path not found or net err */
430 goto out;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700431 /*
432 * Else fallthrough to retry open the old way on network i/o
433 * or DFS errors.
434 */
Steve French276a74a2009-03-03 18:00:34 +0000435 }
436
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300437 if (!posix_open_ok) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700438 if (server->ops->get_lease_key)
439 server->ops->get_lease_key(inode, &fid);
440
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300441 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700442 file->f_flags, &oplock, &fid, xid);
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300443 if (rc)
444 goto out;
445 }
Jeff Layton47c78b72010-06-16 13:40:17 -0400446
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700447 cfile = cifs_new_fileinfo(&fid, file, tlink, oplock);
448 if (cfile == NULL) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700449 if (server->ops->close)
450 server->ops->close(xid, tcon, &fid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451 rc = -ENOMEM;
452 goto out;
453 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +0530455 cifs_fscache_set_inode_cookie(inode, file);
456
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300457 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700458 /*
459 * Time to set mode which we can not set earlier due to
460 * problems creating new read-only files.
461 */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300462 struct cifs_unix_set_info_args args = {
463 .mode = inode->i_mode,
464 .uid = NO_CHANGE_64,
465 .gid = NO_CHANGE_64,
466 .ctime = NO_CHANGE_64,
467 .atime = NO_CHANGE_64,
468 .mtime = NO_CHANGE_64,
469 .device = 0,
470 };
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700471 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
472 cfile->pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473 }
474
475out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400477 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400478 cifs_put_tlink(tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479 return rc;
480}
481
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700482/*
483 * Try to reacquire byte range locks that were released when session
484 * to server was lost
485 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486static int cifs_relock_file(struct cifsFileInfo *cifsFile)
487{
488 int rc = 0;
489
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700490 /* BB list all locks open on this file and relock */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491
492 return rc;
493}
494
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700495static int
496cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497{
498 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400499 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400500 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +0000502 struct cifs_tcon *tcon;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700503 struct TCP_Server_Info *server;
504 struct cifsInodeInfo *cinode;
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000505 struct inode *inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506 char *full_path = NULL;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700507 int desired_access;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508 int disposition = FILE_OPEN;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500509 int create_options = CREATE_NOT_DIR;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700510 struct cifs_fid fid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400512 xid = get_xid();
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700513 mutex_lock(&cfile->fh_mutex);
514 if (!cfile->invalidHandle) {
515 mutex_unlock(&cfile->fh_mutex);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530516 rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400517 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530518 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519 }
520
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700521 inode = cfile->dentry->d_inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522 cifs_sb = CIFS_SB(inode->i_sb);
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700523 tcon = tlink_tcon(cfile->tlink);
524 server = tcon->ses->server;
Steve French3a9f4622007-04-04 17:10:24 +0000525
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700526 /*
527 * Can not grab rename sem here because various ops, including those
528 * that already have the rename sem can end up causing writepage to get
529 * called and if the server was down that means we end up here, and we
530 * can never tell if the caller already has the rename_sem.
531 */
532 full_path = build_path_from_dentry(cfile->dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533 if (full_path == NULL) {
Steve French3a9f4622007-04-04 17:10:24 +0000534 rc = -ENOMEM;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700535 mutex_unlock(&cfile->fh_mutex);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400536 free_xid(xid);
Steve French3a9f4622007-04-04 17:10:24 +0000537 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538 }
539
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700540 cFYI(1, "inode = 0x%p file flags 0x%x for %s", inode, cfile->f_flags,
541 full_path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542
Pavel Shilovsky10b9b982012-03-20 12:55:09 +0300543 if (tcon->ses->server->oplocks)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544 oplock = REQ_OPLOCK;
545 else
Steve French4b18f2a2008-04-29 00:06:05 +0000546 oplock = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400548 if (tcon->unix_ext && cap_unix(tcon->ses) &&
Steve French7fc8f4e2009-02-23 20:43:11 +0000549 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400550 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Jeff Layton608712f2010-10-15 15:33:56 -0400551 /*
552 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
553 * original open. Must mask them off for a reopen.
554 */
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700555 unsigned int oflags = cfile->f_flags &
Jeff Layton15886172010-10-15 15:33:59 -0400556 ~(O_CREAT | O_EXCL | O_TRUNC);
Jeff Layton608712f2010-10-15 15:33:56 -0400557
Jeff Layton2422f672010-06-16 13:40:16 -0400558 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700559 cifs_sb->mnt_file_mode /* ignored */,
560 oflags, &oplock, &fid.netfid, xid);
Steve French7fc8f4e2009-02-23 20:43:11 +0000561 if (rc == 0) {
Joe Perchesb6b38f72010-04-21 03:50:45 +0000562 cFYI(1, "posix reopen succeeded");
Steve French7fc8f4e2009-02-23 20:43:11 +0000563 goto reopen_success;
564 }
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700565 /*
566 * fallthrough to retry open the old way on errors, especially
567 * in the reconnect path it is important to retry hard
568 */
Steve French7fc8f4e2009-02-23 20:43:11 +0000569 }
570
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700571 desired_access = cifs_convert_flags(cfile->f_flags);
Steve French7fc8f4e2009-02-23 20:43:11 +0000572
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500573 if (backup_cred(cifs_sb))
574 create_options |= CREATE_OPEN_BACKUP_INTENT;
575
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700576 if (server->ops->get_lease_key)
577 server->ops->get_lease_key(inode, &fid);
578
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700579 /*
580 * Can not refresh inode by passing in file_info buf to be returned by
581 * CIFSSMBOpen and then calling get_inode_info with returned buf since
582 * file might have write behind data that needs to be flushed and server
583 * version of file size can be stale. If we knew for sure that inode was
584 * not dirty locally we could do this.
585 */
586 rc = server->ops->open(xid, tcon, full_path, disposition,
587 desired_access, create_options, &fid, &oplock,
588 NULL, cifs_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589 if (rc) {
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700590 mutex_unlock(&cfile->fh_mutex);
591 cFYI(1, "cifs_reopen returned 0x%x", rc);
Joe Perchesb6b38f72010-04-21 03:50:45 +0000592 cFYI(1, "oplock: %d", oplock);
Jeff Layton15886172010-10-15 15:33:59 -0400593 goto reopen_error_exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594 }
Jeff Layton15886172010-10-15 15:33:59 -0400595
596reopen_success:
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700597 cfile->invalidHandle = false;
598 mutex_unlock(&cfile->fh_mutex);
599 cinode = CIFS_I(inode);
Jeff Layton15886172010-10-15 15:33:59 -0400600
601 if (can_flush) {
602 rc = filemap_write_and_wait(inode->i_mapping);
Jeff Laytoneb4b7562010-10-22 14:52:29 -0400603 mapping_set_error(inode->i_mapping, rc);
Jeff Layton15886172010-10-15 15:33:59 -0400604
Jeff Layton15886172010-10-15 15:33:59 -0400605 if (tcon->unix_ext)
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700606 rc = cifs_get_inode_info_unix(&inode, full_path,
607 inode->i_sb, xid);
Jeff Layton15886172010-10-15 15:33:59 -0400608 else
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700609 rc = cifs_get_inode_info(&inode, full_path, NULL,
610 inode->i_sb, xid, NULL);
611 }
612 /*
613 * Else we are writing out data to server already and could deadlock if
614 * we tried to flush data, and since we do not know if we have data that
615 * would invalidate the current end of file on the server we can not go
616 * to the server to get the new inode info.
617 */
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300618
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700619 server->ops->set_fid(cfile, &fid, oplock);
620 cifs_relock_file(cfile);
Jeff Layton15886172010-10-15 15:33:59 -0400621
622reopen_error_exit:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400624 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625 return rc;
626}
627
628int cifs_close(struct inode *inode, struct file *file)
629{
Jeff Layton77970692011-04-05 16:23:47 -0700630 if (file->private_data != NULL) {
631 cifsFileInfo_put(file->private_data);
632 file->private_data = NULL;
633 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634
Steve Frenchcdff08e2010-10-21 22:46:14 +0000635 /* return code from the ->release op is always ignored */
636 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637}
638
639int cifs_closedir(struct inode *inode, struct file *file)
640{
641 int rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400642 unsigned int xid;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700643 struct cifsFileInfo *cfile = file->private_data;
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700644 struct cifs_tcon *tcon;
645 struct TCP_Server_Info *server;
646 char *buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647
Joe Perchesb6b38f72010-04-21 03:50:45 +0000648 cFYI(1, "Closedir inode = 0x%p", inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700650 if (cfile == NULL)
651 return rc;
652
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400653 xid = get_xid();
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700654 tcon = tlink_tcon(cfile->tlink);
655 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700657 cFYI(1, "Freeing private data in close dir");
658 spin_lock(&cifs_file_list_lock);
659 if (!cfile->srch_inf.endOfSearch && !cfile->invalidHandle) {
660 cfile->invalidHandle = true;
661 spin_unlock(&cifs_file_list_lock);
662 if (server->ops->close_dir)
663 rc = server->ops->close_dir(xid, tcon, &cfile->fid);
664 else
665 rc = -ENOSYS;
666 cFYI(1, "Closing uncompleted readdir with rc %d", rc);
667 /* not much we can do if it fails anyway, ignore rc */
668 rc = 0;
669 } else
670 spin_unlock(&cifs_file_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700672 buf = cfile->srch_inf.ntwrk_buf_start;
673 if (buf) {
674 cFYI(1, "closedir free smb buf in srch struct");
675 cfile->srch_inf.ntwrk_buf_start = NULL;
676 if (cfile->srch_inf.smallBuf)
677 cifs_small_buf_release(buf);
678 else
679 cifs_buf_release(buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680 }
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700681
682 cifs_put_tlink(cfile->tlink);
683 kfree(file->private_data);
684 file->private_data = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685 /* BB can we lock the filestruct while this is going on? */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400686 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687 return rc;
688}
689
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400690static struct cifsLockInfo *
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300691cifs_lock_init(__u64 offset, __u64 length, __u8 type)
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000692{
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400693 struct cifsLockInfo *lock =
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000694 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400695 if (!lock)
696 return lock;
697 lock->offset = offset;
698 lock->length = length;
699 lock->type = type;
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400700 lock->pid = current->tgid;
701 INIT_LIST_HEAD(&lock->blist);
702 init_waitqueue_head(&lock->block_q);
703 return lock;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400704}
705
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -0700706void
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400707cifs_del_lock_waiters(struct cifsLockInfo *lock)
708{
709 struct cifsLockInfo *li, *tmp;
710 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
711 list_del_init(&li->blist);
712 wake_up(&li->block_q);
713 }
714}
715
716static bool
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700717cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
718 __u64 length, __u8 type, struct cifsFileInfo *cfile,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700719 struct cifsLockInfo **conf_lock, bool rw_check)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400720{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300721 struct cifsLockInfo *li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700722 struct cifsFileInfo *cur_cfile = fdlocks->cfile;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300723 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400724
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700725 list_for_each_entry(li, &fdlocks->locks, llist) {
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400726 if (offset + length <= li->offset ||
727 offset >= li->offset + li->length)
728 continue;
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700729 if (rw_check && server->ops->compare_fids(cfile, cur_cfile) &&
730 current->tgid == li->pid)
731 continue;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700732 if ((type & server->vals->shared_lock_type) &&
733 ((server->ops->compare_fids(cfile, cur_cfile) &&
734 current->tgid == li->pid) || type == li->type))
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400735 continue;
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700736 if (conf_lock)
737 *conf_lock = li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700738 return true;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400739 }
740 return false;
741}
742
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700743bool
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300744cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700745 __u8 type, struct cifsLockInfo **conf_lock,
746 bool rw_check)
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400747{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300748 bool rc = false;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700749 struct cifs_fid_locks *cur;
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300750 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300751
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700752 list_for_each_entry(cur, &cinode->llist, llist) {
753 rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700754 cfile, conf_lock, rw_check);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300755 if (rc)
756 break;
757 }
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300758
759 return rc;
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400760}
761
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300762/*
763 * Check if there is another lock that prevents us to set the lock (mandatory
764 * style). If such a lock exists, update the flock structure with its
765 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
766 * or leave it the same if we can't. Returns 0 if we don't need to request to
767 * the server or 1 otherwise.
768 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400769static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300770cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
771 __u8 type, struct file_lock *flock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400772{
773 int rc = 0;
774 struct cifsLockInfo *conf_lock;
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300775 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300776 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400777 bool exist;
778
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700779 down_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400780
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300781 exist = cifs_find_lock_conflict(cfile, offset, length, type,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700782 &conf_lock, false);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400783 if (exist) {
784 flock->fl_start = conf_lock->offset;
785 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
786 flock->fl_pid = conf_lock->pid;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300787 if (conf_lock->type & server->vals->shared_lock_type)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400788 flock->fl_type = F_RDLCK;
789 else
790 flock->fl_type = F_WRLCK;
791 } else if (!cinode->can_cache_brlcks)
792 rc = 1;
793 else
794 flock->fl_type = F_UNLCK;
795
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700796 up_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400797 return rc;
798}
799
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400800static void
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300801cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400802{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300803 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700804 down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700805 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700806 up_write(&cinode->lock_sem);
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000807}
808
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300809/*
810 * Set the byte-range lock (mandatory style). Returns:
811 * 1) 0, if we set the lock and don't need to request to the server;
812 * 2) 1, if no locks prevent us but we need to request to the server;
813 * 3) -EACCESS, if there is a lock that prevents us and wait is false.
814 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400815static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300816cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400817 bool wait)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400818{
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400819 struct cifsLockInfo *conf_lock;
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300820 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400821 bool exist;
822 int rc = 0;
823
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400824try_again:
825 exist = false;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700826 down_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400827
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300828 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700829 lock->type, &conf_lock, false);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400830 if (!exist && cinode->can_cache_brlcks) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700831 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700832 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400833 return rc;
834 }
835
836 if (!exist)
837 rc = 1;
838 else if (!wait)
839 rc = -EACCES;
840 else {
841 list_add_tail(&lock->blist, &conf_lock->blist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700842 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400843 rc = wait_event_interruptible(lock->block_q,
844 (lock->blist.prev == &lock->blist) &&
845 (lock->blist.next == &lock->blist));
846 if (!rc)
847 goto try_again;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700848 down_write(&cinode->lock_sem);
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400849 list_del_init(&lock->blist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400850 }
851
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700852 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400853 return rc;
854}
855
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300856/*
857 * Check if there is another lock that prevents us to set the lock (posix
858 * style). If such a lock exists, update the flock structure with its
859 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
860 * or leave it the same if we can't. Returns 0 if we don't need to request to
861 * the server or 1 otherwise.
862 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400863static int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400864cifs_posix_lock_test(struct file *file, struct file_lock *flock)
865{
866 int rc = 0;
867 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
868 unsigned char saved_type = flock->fl_type;
869
Pavel Shilovsky50792762011-10-29 17:17:57 +0400870 if ((flock->fl_flags & FL_POSIX) == 0)
871 return 1;
872
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700873 down_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400874 posix_test_lock(file, flock);
875
876 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
877 flock->fl_type = saved_type;
878 rc = 1;
879 }
880
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700881 up_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400882 return rc;
883}
884
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300885/*
886 * Set the byte-range lock (posix style). Returns:
887 * 1) 0, if we set the lock and don't need to request to the server;
888 * 2) 1, if we need to request to the server;
889 * 3) <0, if the error occurs while setting the lock.
890 */
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400891static int
892cifs_posix_lock_set(struct file *file, struct file_lock *flock)
893{
894 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
Pavel Shilovsky50792762011-10-29 17:17:57 +0400895 int rc = 1;
896
897 if ((flock->fl_flags & FL_POSIX) == 0)
898 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400899
Pavel Shilovsky66189be2012-03-28 21:56:19 +0400900try_again:
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700901 down_write(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400902 if (!cinode->can_cache_brlcks) {
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700903 up_write(&cinode->lock_sem);
Pavel Shilovsky50792762011-10-29 17:17:57 +0400904 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400905 }
Pavel Shilovsky66189be2012-03-28 21:56:19 +0400906
907 rc = posix_lock_file(file, flock, NULL);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700908 up_write(&cinode->lock_sem);
Pavel Shilovsky66189be2012-03-28 21:56:19 +0400909 if (rc == FILE_LOCK_DEFERRED) {
910 rc = wait_event_interruptible(flock->fl_wait, !flock->fl_next);
911 if (!rc)
912 goto try_again;
913 locks_delete_block(flock);
914 }
Steve French9ebb3892012-04-01 13:52:54 -0500915 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400916}
917
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -0700918int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400919cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400920{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400921 unsigned int xid;
922 int rc = 0, stored_rc;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400923 struct cifsLockInfo *li, *tmp;
924 struct cifs_tcon *tcon;
925 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +0400926 unsigned int num, max_num, max_buf;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400927 LOCKING_ANDX_RANGE *buf, *cur;
928 int types[] = {LOCKING_ANDX_LARGE_FILES,
929 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
930 int i;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400931
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400932 xid = get_xid();
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400933 tcon = tlink_tcon(cfile->tlink);
934
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700935 /* we are going to update can_cache_brlcks here - need a write access */
936 down_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400937 if (!cinode->can_cache_brlcks) {
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700938 up_write(&cinode->lock_sem);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400939 free_xid(xid);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400940 return rc;
941 }
942
Pavel Shilovsky0013fb42012-05-31 13:03:26 +0400943 /*
944 * Accessing maxBuf is racy with cifs_reconnect - need to store value
945 * and check it for zero before using.
946 */
947 max_buf = tcon->ses->server->maxBuf;
948 if (!max_buf) {
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700949 up_write(&cinode->lock_sem);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400950 free_xid(xid);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +0400951 return -EINVAL;
952 }
953
954 max_num = (max_buf - sizeof(struct smb_hdr)) /
955 sizeof(LOCKING_ANDX_RANGE);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400956 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
957 if (!buf) {
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700958 up_write(&cinode->lock_sem);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400959 free_xid(xid);
Pavel Shilovskye2f28862012-08-29 21:13:38 +0400960 return -ENOMEM;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400961 }
962
963 for (i = 0; i < 2; i++) {
964 cur = buf;
965 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700966 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400967 if (li->type != types[i])
968 continue;
969 cur->Pid = cpu_to_le16(li->pid);
970 cur->LengthLow = cpu_to_le32((u32)li->length);
971 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
972 cur->OffsetLow = cpu_to_le32((u32)li->offset);
973 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
974 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700975 stored_rc = cifs_lockv(xid, tcon,
976 cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +0300977 (__u8)li->type, 0, num,
978 buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400979 if (stored_rc)
980 rc = stored_rc;
981 cur = buf;
982 num = 0;
983 } else
984 cur++;
985 }
986
987 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700988 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +0300989 (__u8)types[i], 0, num, buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400990 if (stored_rc)
991 rc = stored_rc;
992 }
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400993 }
994
995 cinode->can_cache_brlcks = false;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700996 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400997
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400998 kfree(buf);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400999 free_xid(xid);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001000 return rc;
1001}
1002
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001003/* copied from fs/locks.c with a name change */
1004#define cifs_for_each_lock(inode, lockp) \
1005 for (lockp = &inode->i_flock; *lockp != NULL; \
1006 lockp = &(*lockp)->fl_next)
1007
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001008struct lock_to_push {
1009 struct list_head llist;
1010 __u64 offset;
1011 __u64 length;
1012 __u32 pid;
1013 __u16 netfid;
1014 __u8 type;
1015};
1016
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001017static int
1018cifs_push_posix_locks(struct cifsFileInfo *cfile)
1019{
1020 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
1021 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1022 struct file_lock *flock, **before;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001023 unsigned int count = 0, i = 0;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001024 int rc = 0, xid, type;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001025 struct list_head locks_to_send, *el;
1026 struct lock_to_push *lck, *tmp;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001027 __u64 length;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001028
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001029 xid = get_xid();
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001030
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001031 /* we are going to update can_cache_brlcks here - need a write access */
1032 down_write(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001033 if (!cinode->can_cache_brlcks) {
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001034 up_write(&cinode->lock_sem);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001035 free_xid(xid);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001036 return rc;
1037 }
1038
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001039 lock_flocks();
1040 cifs_for_each_lock(cfile->dentry->d_inode, before) {
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001041 if ((*before)->fl_flags & FL_POSIX)
1042 count++;
1043 }
1044 unlock_flocks();
1045
1046 INIT_LIST_HEAD(&locks_to_send);
1047
1048 /*
Pavel Shilovskyce858522012-03-17 09:46:55 +03001049 * Allocating count locks is enough because no FL_POSIX locks can be
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001050 * added to the list while we are holding cinode->lock_sem that
Pavel Shilovskyce858522012-03-17 09:46:55 +03001051 * protects locking operations of this inode.
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001052 */
1053 for (; i < count; i++) {
1054 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1055 if (!lck) {
1056 rc = -ENOMEM;
1057 goto err_out;
1058 }
1059 list_add_tail(&lck->llist, &locks_to_send);
1060 }
1061
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001062 el = locks_to_send.next;
1063 lock_flocks();
1064 cifs_for_each_lock(cfile->dentry->d_inode, before) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001065 flock = *before;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001066 if ((flock->fl_flags & FL_POSIX) == 0)
1067 continue;
Pavel Shilovskyce858522012-03-17 09:46:55 +03001068 if (el == &locks_to_send) {
1069 /*
1070 * The list ended. We don't have enough allocated
1071 * structures - something is really wrong.
1072 */
1073 cERROR(1, "Can't push all brlocks!");
1074 break;
1075 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001076 length = 1 + flock->fl_end - flock->fl_start;
1077 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1078 type = CIFS_RDLCK;
1079 else
1080 type = CIFS_WRLCK;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001081 lck = list_entry(el, struct lock_to_push, llist);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001082 lck->pid = flock->fl_pid;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001083 lck->netfid = cfile->fid.netfid;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001084 lck->length = length;
1085 lck->type = type;
1086 lck->offset = flock->fl_start;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001087 el = el->next;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001088 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001089 unlock_flocks();
1090
1091 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001092 int stored_rc;
1093
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001094 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001095 lck->offset, lck->length, NULL,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001096 lck->type, 0);
1097 if (stored_rc)
1098 rc = stored_rc;
1099 list_del(&lck->llist);
1100 kfree(lck);
1101 }
1102
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001103out:
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001104 cinode->can_cache_brlcks = false;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001105 up_write(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001106
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001107 free_xid(xid);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001108 return rc;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001109err_out:
1110 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1111 list_del(&lck->llist);
1112 kfree(lck);
1113 }
1114 goto out;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001115}
1116
1117static int
1118cifs_push_locks(struct cifsFileInfo *cfile)
1119{
1120 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1121 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1122
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001123 if (cap_unix(tcon->ses) &&
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001124 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1125 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1126 return cifs_push_posix_locks(cfile);
1127
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001128 return tcon->ses->server->ops->push_mand_locks(cfile);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001129}
1130
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001131static void
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001132cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001133 bool *wait_flag, struct TCP_Server_Info *server)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001134{
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001135 if (flock->fl_flags & FL_POSIX)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001136 cFYI(1, "Posix");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001137 if (flock->fl_flags & FL_FLOCK)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001138 cFYI(1, "Flock");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001139 if (flock->fl_flags & FL_SLEEP) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001140 cFYI(1, "Blocking lock");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001141 *wait_flag = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001142 }
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001143 if (flock->fl_flags & FL_ACCESS)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001144 cFYI(1, "Process suspended by mandatory locking - "
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001145 "not implemented yet");
1146 if (flock->fl_flags & FL_LEASE)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001147 cFYI(1, "Lease on file - not implemented yet");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001148 if (flock->fl_flags &
Linus Torvalds1da177e2005-04-16 15:20:36 -07001149 (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001150 cFYI(1, "Unknown lock flags 0x%x", flock->fl_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001151
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001152 *type = server->vals->large_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001153 if (flock->fl_type == F_WRLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001154 cFYI(1, "F_WRLCK ");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001155 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001156 *lock = 1;
1157 } else if (flock->fl_type == F_UNLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001158 cFYI(1, "F_UNLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001159 *type |= server->vals->unlock_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001160 *unlock = 1;
1161 /* Check if unlock includes more than one lock range */
1162 } else if (flock->fl_type == F_RDLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001163 cFYI(1, "F_RDLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001164 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001165 *lock = 1;
1166 } else if (flock->fl_type == F_EXLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001167 cFYI(1, "F_EXLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001168 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001169 *lock = 1;
1170 } else if (flock->fl_type == F_SHLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001171 cFYI(1, "F_SHLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001172 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001173 *lock = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001174 } else
Joe Perchesb6b38f72010-04-21 03:50:45 +00001175 cFYI(1, "Unknown type of lock");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001176}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001177
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001178static int
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001179cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001180 bool wait_flag, bool posix_lck, unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001181{
1182 int rc = 0;
1183 __u64 length = 1 + flock->fl_end - flock->fl_start;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001184 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1185 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001186 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001187 __u16 netfid = cfile->fid.netfid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001189 if (posix_lck) {
1190 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001191
1192 rc = cifs_posix_lock_test(file, flock);
1193 if (!rc)
1194 return rc;
1195
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001196 if (type & server->vals->shared_lock_type)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001197 posix_lock_type = CIFS_RDLCK;
1198 else
1199 posix_lock_type = CIFS_WRLCK;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001200 rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001201 flock->fl_start, length, flock,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001202 posix_lock_type, wait_flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203 return rc;
1204 }
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001205
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001206 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001207 if (!rc)
1208 return rc;
1209
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001210 /* BB we could chain these into one lock request BB */
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001211 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
1212 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001213 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001214 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1215 type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001216 flock->fl_type = F_UNLCK;
1217 if (rc != 0)
1218 cERROR(1, "Error unlocking previously locked "
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001219 "range %d during test of lock", rc);
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001220 return 0;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001221 }
1222
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001223 if (type & server->vals->shared_lock_type) {
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001224 flock->fl_type = F_WRLCK;
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001225 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001226 }
1227
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001228 type &= ~server->vals->exclusive_lock_type;
1229
1230 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1231 type | server->vals->shared_lock_type,
1232 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001233 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001234 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1235 type | server->vals->shared_lock_type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001236 flock->fl_type = F_RDLCK;
1237 if (rc != 0)
1238 cERROR(1, "Error unlocking previously locked "
1239 "range %d during test of lock", rc);
1240 } else
1241 flock->fl_type = F_WRLCK;
1242
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001243 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001244}
1245
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001246void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001247cifs_move_llist(struct list_head *source, struct list_head *dest)
1248{
1249 struct list_head *li, *tmp;
1250 list_for_each_safe(li, tmp, source)
1251 list_move(li, dest);
1252}
1253
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001254void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001255cifs_free_llist(struct list_head *llist)
1256{
1257 struct cifsLockInfo *li, *tmp;
1258 list_for_each_entry_safe(li, tmp, llist, llist) {
1259 cifs_del_lock_waiters(li);
1260 list_del(&li->llist);
1261 kfree(li);
1262 }
1263}
1264
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001265int
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001266cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1267 unsigned int xid)
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001268{
1269 int rc = 0, stored_rc;
1270 int types[] = {LOCKING_ANDX_LARGE_FILES,
1271 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1272 unsigned int i;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001273 unsigned int max_num, num, max_buf;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001274 LOCKING_ANDX_RANGE *buf, *cur;
1275 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1276 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
1277 struct cifsLockInfo *li, *tmp;
1278 __u64 length = 1 + flock->fl_end - flock->fl_start;
1279 struct list_head tmp_llist;
1280
1281 INIT_LIST_HEAD(&tmp_llist);
1282
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001283 /*
1284 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1285 * and check it for zero before using.
1286 */
1287 max_buf = tcon->ses->server->maxBuf;
1288 if (!max_buf)
1289 return -EINVAL;
1290
1291 max_num = (max_buf - sizeof(struct smb_hdr)) /
1292 sizeof(LOCKING_ANDX_RANGE);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001293 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1294 if (!buf)
1295 return -ENOMEM;
1296
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001297 down_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001298 for (i = 0; i < 2; i++) {
1299 cur = buf;
1300 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001301 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001302 if (flock->fl_start > li->offset ||
1303 (flock->fl_start + length) <
1304 (li->offset + li->length))
1305 continue;
1306 if (current->tgid != li->pid)
1307 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001308 if (types[i] != li->type)
1309 continue;
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001310 if (cinode->can_cache_brlcks) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001311 /*
1312 * We can cache brlock requests - simply remove
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001313 * a lock from the file's list.
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001314 */
1315 list_del(&li->llist);
1316 cifs_del_lock_waiters(li);
1317 kfree(li);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001318 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001319 }
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001320 cur->Pid = cpu_to_le16(li->pid);
1321 cur->LengthLow = cpu_to_le32((u32)li->length);
1322 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1323 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1324 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1325 /*
1326 * We need to save a lock here to let us add it again to
1327 * the file's list if the unlock range request fails on
1328 * the server.
1329 */
1330 list_move(&li->llist, &tmp_llist);
1331 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001332 stored_rc = cifs_lockv(xid, tcon,
1333 cfile->fid.netfid,
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001334 li->type, num, 0, buf);
1335 if (stored_rc) {
1336 /*
1337 * We failed on the unlock range
1338 * request - add all locks from the tmp
1339 * list to the head of the file's list.
1340 */
1341 cifs_move_llist(&tmp_llist,
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001342 &cfile->llist->locks);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001343 rc = stored_rc;
1344 } else
1345 /*
1346 * The unlock range request succeed -
1347 * free the tmp list.
1348 */
1349 cifs_free_llist(&tmp_llist);
1350 cur = buf;
1351 num = 0;
1352 } else
1353 cur++;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001354 }
1355 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001356 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001357 types[i], num, 0, buf);
1358 if (stored_rc) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001359 cifs_move_llist(&tmp_llist,
1360 &cfile->llist->locks);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001361 rc = stored_rc;
1362 } else
1363 cifs_free_llist(&tmp_llist);
1364 }
1365 }
1366
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001367 up_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001368 kfree(buf);
1369 return rc;
1370}
1371
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001372static int
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001373cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001374 bool wait_flag, bool posix_lck, int lock, int unlock,
1375 unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001376{
1377 int rc = 0;
1378 __u64 length = 1 + flock->fl_end - flock->fl_start;
1379 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1380 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001381 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001382
1383 if (posix_lck) {
Steve French08547b02006-02-28 22:39:25 +00001384 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001385
1386 rc = cifs_posix_lock_set(file, flock);
1387 if (!rc || rc < 0)
1388 return rc;
1389
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001390 if (type & server->vals->shared_lock_type)
Steve French08547b02006-02-28 22:39:25 +00001391 posix_lock_type = CIFS_RDLCK;
1392 else
1393 posix_lock_type = CIFS_WRLCK;
Steve French50c2f752007-07-13 00:33:32 +00001394
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001395 if (unlock == 1)
Steve Frenchbeb84dc2006-03-03 23:36:34 +00001396 posix_lock_type = CIFS_UNLCK;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001397
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001398 rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
1399 current->tgid, flock->fl_start, length,
1400 NULL, posix_lock_type, wait_flag);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001401 goto out;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001402 }
1403
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001404 if (lock) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001405 struct cifsLockInfo *lock;
1406
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001407 lock = cifs_lock_init(flock->fl_start, length, type);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001408 if (!lock)
1409 return -ENOMEM;
1410
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001411 rc = cifs_lock_add_if(cfile, lock, wait_flag);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001412 if (rc < 0)
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001413 kfree(lock);
1414 if (rc <= 0)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001415 goto out;
1416
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001417 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1418 type, 1, 0, wait_flag);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001419 if (rc) {
1420 kfree(lock);
1421 goto out;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001422 }
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001423
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001424 cifs_lock_add(cfile, lock);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001425 } else if (unlock)
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001426 rc = server->ops->mand_unlock_range(cfile, flock, xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001427
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001428out:
1429 if (flock->fl_flags & FL_POSIX)
Steve French9ebb3892012-04-01 13:52:54 -05001430 posix_lock_file_wait(file, flock);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001431 return rc;
1432}
1433
1434int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1435{
1436 int rc, xid;
1437 int lock = 0, unlock = 0;
1438 bool wait_flag = false;
1439 bool posix_lck = false;
1440 struct cifs_sb_info *cifs_sb;
1441 struct cifs_tcon *tcon;
1442 struct cifsInodeInfo *cinode;
1443 struct cifsFileInfo *cfile;
1444 __u16 netfid;
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001445 __u32 type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001446
1447 rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001448 xid = get_xid();
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001449
1450 cFYI(1, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld "
1451 "end: %lld", cmd, flock->fl_flags, flock->fl_type,
1452 flock->fl_start, flock->fl_end);
1453
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001454 cfile = (struct cifsFileInfo *)file->private_data;
1455 tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001456
1457 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
1458 tcon->ses->server);
1459
1460 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001461 netfid = cfile->fid.netfid;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001462 cinode = CIFS_I(file->f_path.dentry->d_inode);
1463
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001464 if (cap_unix(tcon->ses) &&
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001465 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1466 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1467 posix_lck = true;
1468 /*
1469 * BB add code here to normalize offset and length to account for
1470 * negative length which we can not accept over the wire.
1471 */
1472 if (IS_GETLK(cmd)) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001473 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001474 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001475 return rc;
1476 }
1477
1478 if (!lock && !unlock) {
1479 /*
1480 * if no lock or unlock then nothing to do since we do not
1481 * know what it is
1482 */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001483 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001484 return -EOPNOTSUPP;
1485 }
1486
1487 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1488 xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001489 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001490 return rc;
1491}
1492
Jeff Layton597b0272012-03-23 14:40:56 -04001493/*
1494 * update the file size (if needed) after a write. Should be called with
1495 * the inode->i_lock held
1496 */
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05001497void
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001498cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1499 unsigned int bytes_written)
1500{
1501 loff_t end_of_write = offset + bytes_written;
1502
1503 if (end_of_write > cifsi->server_eof)
1504 cifsi->server_eof = end_of_write;
1505}
1506
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001507static ssize_t
1508cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
1509 size_t write_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001510{
1511 int rc = 0;
1512 unsigned int bytes_written = 0;
1513 unsigned int total_written;
1514 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001515 struct cifs_tcon *tcon;
1516 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001517 unsigned int xid;
Jeff Layton7da4b492010-10-15 15:34:00 -04001518 struct dentry *dentry = open_file->dentry;
1519 struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001520 struct cifs_io_parms io_parms;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001521
Jeff Layton7da4b492010-10-15 15:34:00 -04001522 cifs_sb = CIFS_SB(dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001523
Joe Perchesb6b38f72010-04-21 03:50:45 +00001524 cFYI(1, "write %zd bytes to offset %lld of %s", write_size,
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001525 *offset, dentry->d_name.name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001526
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001527 tcon = tlink_tcon(open_file->tlink);
1528 server = tcon->ses->server;
1529
1530 if (!server->ops->sync_write)
1531 return -ENOSYS;
Steve French50c2f752007-07-13 00:33:32 +00001532
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001533 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534
Linus Torvalds1da177e2005-04-16 15:20:36 -07001535 for (total_written = 0; write_size > total_written;
1536 total_written += bytes_written) {
1537 rc = -EAGAIN;
1538 while (rc == -EAGAIN) {
Jeff Laytonca83ce32011-04-12 09:13:44 -04001539 struct kvec iov[2];
1540 unsigned int len;
1541
Linus Torvalds1da177e2005-04-16 15:20:36 -07001542 if (open_file->invalidHandle) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543 /* we could deadlock if we called
1544 filemap_fdatawait from here so tell
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001545 reopen_file not to flush data to
Linus Torvalds1da177e2005-04-16 15:20:36 -07001546 server now */
Jeff Layton15886172010-10-15 15:33:59 -04001547 rc = cifs_reopen_file(open_file, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001548 if (rc != 0)
1549 break;
1550 }
Steve French3e844692005-10-03 13:37:24 -07001551
Jeff Laytonca83ce32011-04-12 09:13:44 -04001552 len = min((size_t)cifs_sb->wsize,
1553 write_size - total_written);
1554 /* iov[0] is reserved for smb header */
1555 iov[1].iov_base = (char *)write_data + total_written;
1556 iov[1].iov_len = len;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001557 io_parms.pid = pid;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001558 io_parms.tcon = tcon;
1559 io_parms.offset = *offset;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001560 io_parms.length = len;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001561 rc = server->ops->sync_write(xid, open_file, &io_parms,
1562 &bytes_written, iov, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001563 }
1564 if (rc || (bytes_written == 0)) {
1565 if (total_written)
1566 break;
1567 else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001568 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001569 return rc;
1570 }
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001571 } else {
Jeff Layton597b0272012-03-23 14:40:56 -04001572 spin_lock(&dentry->d_inode->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001573 cifs_update_eof(cifsi, *offset, bytes_written);
Jeff Layton597b0272012-03-23 14:40:56 -04001574 spin_unlock(&dentry->d_inode->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001575 *offset += bytes_written;
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001576 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001577 }
1578
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001579 cifs_stats_bytes_written(tcon, total_written);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001580
Jeff Layton7da4b492010-10-15 15:34:00 -04001581 if (total_written > 0) {
1582 spin_lock(&dentry->d_inode->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001583 if (*offset > dentry->d_inode->i_size)
1584 i_size_write(dentry->d_inode, *offset);
Jeff Layton7da4b492010-10-15 15:34:00 -04001585 spin_unlock(&dentry->d_inode->i_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001586 }
Jeff Layton7da4b492010-10-15 15:34:00 -04001587 mark_inode_dirty_sync(dentry->d_inode);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001588 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001589 return total_written;
1590}
1591
Jeff Layton6508d902010-09-29 19:51:11 -04001592struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1593 bool fsuid_only)
Steve French630f3f0c2007-10-25 21:17:17 +00001594{
1595 struct cifsFileInfo *open_file = NULL;
Jeff Layton6508d902010-09-29 19:51:11 -04001596 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1597
1598 /* only filter by fsuid on multiuser mounts */
1599 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1600 fsuid_only = false;
Steve French630f3f0c2007-10-25 21:17:17 +00001601
Jeff Layton44772882010-10-15 15:34:03 -04001602 spin_lock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001603 /* we could simply get the first_list_entry since write-only entries
1604 are always at the end of the list but since the first entry might
1605 have a close pending, we go through the whole list */
1606 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton6508d902010-09-29 19:51:11 -04001607 if (fsuid_only && open_file->uid != current_fsuid())
1608 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001609 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
Steve French630f3f0c2007-10-25 21:17:17 +00001610 if (!open_file->invalidHandle) {
1611 /* found a good file */
1612 /* lock it so it will not be closed on us */
Jeff Layton764a1b12012-07-25 14:59:54 -04001613 cifsFileInfo_get_locked(open_file);
Jeff Layton44772882010-10-15 15:34:03 -04001614 spin_unlock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001615 return open_file;
1616 } /* else might as well continue, and look for
1617 another, or simply have the caller reopen it
1618 again rather than trying to fix this handle */
1619 } else /* write only file */
1620 break; /* write only files are last so must be done */
1621 }
Jeff Layton44772882010-10-15 15:34:03 -04001622 spin_unlock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001623 return NULL;
1624}
Steve French630f3f0c2007-10-25 21:17:17 +00001625
Jeff Layton6508d902010-09-29 19:51:11 -04001626struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1627 bool fsuid_only)
Steve French6148a742005-10-05 12:23:19 -07001628{
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001629 struct cifsFileInfo *open_file, *inv_file = NULL;
Jeff Laytond3892292010-11-02 16:22:50 -04001630 struct cifs_sb_info *cifs_sb;
Jeff Layton2846d382008-09-22 21:33:33 -04001631 bool any_available = false;
Steve Frenchdd99cd82005-10-05 19:32:49 -07001632 int rc;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001633 unsigned int refind = 0;
Steve French6148a742005-10-05 12:23:19 -07001634
Steve French60808232006-04-22 15:53:05 +00001635 /* Having a null inode here (because mapping->host was set to zero by
1636 the VFS or MM) should not happen but we had reports of on oops (due to
1637 it being zero) during stress testcases so we need to check for it */
1638
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001639 if (cifs_inode == NULL) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001640 cERROR(1, "Null inode passed to cifs_writeable_file");
Steve French60808232006-04-22 15:53:05 +00001641 dump_stack();
1642 return NULL;
1643 }
1644
Jeff Laytond3892292010-11-02 16:22:50 -04001645 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1646
Jeff Layton6508d902010-09-29 19:51:11 -04001647 /* only filter by fsuid on multiuser mounts */
1648 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1649 fsuid_only = false;
1650
Jeff Layton44772882010-10-15 15:34:03 -04001651 spin_lock(&cifs_file_list_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001652refind_writable:
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001653 if (refind > MAX_REOPEN_ATT) {
1654 spin_unlock(&cifs_file_list_lock);
1655 return NULL;
1656 }
Steve French6148a742005-10-05 12:23:19 -07001657 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton6508d902010-09-29 19:51:11 -04001658 if (!any_available && open_file->pid != current->tgid)
1659 continue;
1660 if (fsuid_only && open_file->uid != current_fsuid())
1661 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001662 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Steve French9b22b0b2007-10-02 01:11:08 +00001663 if (!open_file->invalidHandle) {
1664 /* found a good writable file */
Jeff Layton764a1b12012-07-25 14:59:54 -04001665 cifsFileInfo_get_locked(open_file);
Jeff Layton44772882010-10-15 15:34:03 -04001666 spin_unlock(&cifs_file_list_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001667 return open_file;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001668 } else {
1669 if (!inv_file)
1670 inv_file = open_file;
Steve French9b22b0b2007-10-02 01:11:08 +00001671 }
Steve French6148a742005-10-05 12:23:19 -07001672 }
1673 }
Jeff Layton2846d382008-09-22 21:33:33 -04001674 /* couldn't find useable FH with same pid, try any available */
1675 if (!any_available) {
1676 any_available = true;
1677 goto refind_writable;
1678 }
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001679
1680 if (inv_file) {
1681 any_available = false;
Jeff Layton764a1b12012-07-25 14:59:54 -04001682 cifsFileInfo_get_locked(inv_file);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001683 }
1684
Jeff Layton44772882010-10-15 15:34:03 -04001685 spin_unlock(&cifs_file_list_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001686
1687 if (inv_file) {
1688 rc = cifs_reopen_file(inv_file, false);
1689 if (!rc)
1690 return inv_file;
1691 else {
1692 spin_lock(&cifs_file_list_lock);
1693 list_move_tail(&inv_file->flist,
1694 &cifs_inode->openFileList);
1695 spin_unlock(&cifs_file_list_lock);
1696 cifsFileInfo_put(inv_file);
1697 spin_lock(&cifs_file_list_lock);
1698 ++refind;
1699 goto refind_writable;
1700 }
1701 }
1702
Steve French6148a742005-10-05 12:23:19 -07001703 return NULL;
1704}
1705
Linus Torvalds1da177e2005-04-16 15:20:36 -07001706static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1707{
1708 struct address_space *mapping = page->mapping;
1709 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1710 char *write_data;
1711 int rc = -EFAULT;
1712 int bytes_written = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001713 struct inode *inode;
Steve French6148a742005-10-05 12:23:19 -07001714 struct cifsFileInfo *open_file;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001715
1716 if (!mapping || !mapping->host)
1717 return -EFAULT;
1718
1719 inode = page->mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001720
1721 offset += (loff_t)from;
1722 write_data = kmap(page);
1723 write_data += from;
1724
1725 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1726 kunmap(page);
1727 return -EIO;
1728 }
1729
1730 /* racing with truncate? */
1731 if (offset > mapping->host->i_size) {
1732 kunmap(page);
1733 return 0; /* don't care */
1734 }
1735
1736 /* check to make sure that we are not extending the file */
1737 if (mapping->host->i_size - offset < (loff_t)to)
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001738 to = (unsigned)(mapping->host->i_size - offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001739
Jeff Layton6508d902010-09-29 19:51:11 -04001740 open_file = find_writable_file(CIFS_I(mapping->host), false);
Steve French6148a742005-10-05 12:23:19 -07001741 if (open_file) {
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001742 bytes_written = cifs_write(open_file, open_file->pid,
1743 write_data, to - from, &offset);
Dave Kleikamp6ab409b2009-08-31 11:07:12 -04001744 cifsFileInfo_put(open_file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001745 /* Does mm or vfs already set times? */
Steve French6148a742005-10-05 12:23:19 -07001746 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001747 if ((bytes_written > 0) && (offset))
Steve French6148a742005-10-05 12:23:19 -07001748 rc = 0;
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001749 else if (bytes_written < 0)
1750 rc = bytes_written;
Steve French6148a742005-10-05 12:23:19 -07001751 } else {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001752 cFYI(1, "No writeable filehandles for inode");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001753 rc = -EIO;
1754 }
1755
1756 kunmap(page);
1757 return rc;
1758}
1759
Linus Torvalds1da177e2005-04-16 15:20:36 -07001760static int cifs_writepages(struct address_space *mapping,
Steve French37c0eb42005-10-05 14:50:29 -07001761 struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001762{
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001763 struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
1764 bool done = false, scanned = false, range_whole = false;
1765 pgoff_t end, index;
1766 struct cifs_writedata *wdata;
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07001767 struct TCP_Server_Info *server;
Steve French37c0eb42005-10-05 14:50:29 -07001768 struct page *page;
Steve French37c0eb42005-10-05 14:50:29 -07001769 int rc = 0;
Jeff Laytoneddb0792012-09-18 16:20:35 -07001770 loff_t isize = i_size_read(mapping->host);
Steve French50c2f752007-07-13 00:33:32 +00001771
Steve French37c0eb42005-10-05 14:50:29 -07001772 /*
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001773 * If wsize is smaller than the page cache size, default to writing
Steve French37c0eb42005-10-05 14:50:29 -07001774 * one page at a time via cifs_writepage
1775 */
1776 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1777 return generic_writepages(mapping, wbc);
1778
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001779 if (wbc->range_cyclic) {
Steve French37c0eb42005-10-05 14:50:29 -07001780 index = mapping->writeback_index; /* Start from prev offset */
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001781 end = -1;
1782 } else {
1783 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1784 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1785 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001786 range_whole = true;
1787 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07001788 }
1789retry:
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001790 while (!done && index <= end) {
1791 unsigned int i, nr_pages, found_pages;
1792 pgoff_t next = 0, tofind;
1793 struct page **pages;
Steve French37c0eb42005-10-05 14:50:29 -07001794
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001795 tofind = min((cifs_sb->wsize / PAGE_CACHE_SIZE) - 1,
1796 end - index) + 1;
Steve French37c0eb42005-10-05 14:50:29 -07001797
Jeff Laytonc2e87642012-03-23 14:40:55 -04001798 wdata = cifs_writedata_alloc((unsigned int)tofind,
1799 cifs_writev_complete);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001800 if (!wdata) {
1801 rc = -ENOMEM;
1802 break;
1803 }
1804
1805 /*
1806 * find_get_pages_tag seems to return a max of 256 on each
1807 * iteration, so we must call it several times in order to
1808 * fill the array or the wsize is effectively limited to
1809 * 256 * PAGE_CACHE_SIZE.
1810 */
1811 found_pages = 0;
1812 pages = wdata->pages;
1813 do {
1814 nr_pages = find_get_pages_tag(mapping, &index,
1815 PAGECACHE_TAG_DIRTY,
1816 tofind, pages);
1817 found_pages += nr_pages;
1818 tofind -= nr_pages;
1819 pages += nr_pages;
1820 } while (nr_pages && tofind && index <= end);
1821
1822 if (found_pages == 0) {
1823 kref_put(&wdata->refcount, cifs_writedata_release);
1824 break;
1825 }
1826
1827 nr_pages = 0;
1828 for (i = 0; i < found_pages; i++) {
1829 page = wdata->pages[i];
Steve French37c0eb42005-10-05 14:50:29 -07001830 /*
1831 * At this point we hold neither mapping->tree_lock nor
1832 * lock on the page itself: the page may be truncated or
1833 * invalidated (changing page->mapping to NULL), or even
1834 * swizzled back from swapper_space to tmpfs file
1835 * mapping
1836 */
1837
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001838 if (nr_pages == 0)
Steve French37c0eb42005-10-05 14:50:29 -07001839 lock_page(page);
Nick Piggin529ae9a2008-08-02 12:01:03 +02001840 else if (!trylock_page(page))
Steve French37c0eb42005-10-05 14:50:29 -07001841 break;
1842
1843 if (unlikely(page->mapping != mapping)) {
1844 unlock_page(page);
1845 break;
1846 }
1847
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001848 if (!wbc->range_cyclic && page->index > end) {
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001849 done = true;
Steve French37c0eb42005-10-05 14:50:29 -07001850 unlock_page(page);
1851 break;
1852 }
1853
1854 if (next && (page->index != next)) {
1855 /* Not next consecutive page */
1856 unlock_page(page);
1857 break;
1858 }
1859
1860 if (wbc->sync_mode != WB_SYNC_NONE)
1861 wait_on_page_writeback(page);
1862
1863 if (PageWriteback(page) ||
Linus Torvaldscb876f42006-12-23 16:19:07 -08001864 !clear_page_dirty_for_io(page)) {
Steve French37c0eb42005-10-05 14:50:29 -07001865 unlock_page(page);
1866 break;
1867 }
Steve French84d2f072005-10-12 15:32:05 -07001868
Linus Torvaldscb876f42006-12-23 16:19:07 -08001869 /*
1870 * This actually clears the dirty bit in the radix tree.
1871 * See cifs_writepage() for more commentary.
1872 */
1873 set_page_writeback(page);
1874
Jeff Laytoneddb0792012-09-18 16:20:35 -07001875 if (page_offset(page) >= isize) {
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001876 done = true;
Steve French84d2f072005-10-12 15:32:05 -07001877 unlock_page(page);
Linus Torvaldscb876f42006-12-23 16:19:07 -08001878 end_page_writeback(page);
Steve French84d2f072005-10-12 15:32:05 -07001879 break;
1880 }
1881
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001882 wdata->pages[i] = page;
Steve French37c0eb42005-10-05 14:50:29 -07001883 next = page->index + 1;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001884 ++nr_pages;
Steve French37c0eb42005-10-05 14:50:29 -07001885 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001886
1887 /* reset index to refind any pages skipped */
1888 if (nr_pages == 0)
1889 index = wdata->pages[0]->index + 1;
1890
1891 /* put any pages we aren't going to use */
1892 for (i = nr_pages; i < found_pages; i++) {
1893 page_cache_release(wdata->pages[i]);
1894 wdata->pages[i] = NULL;
1895 }
1896
1897 /* nothing to write? */
1898 if (nr_pages == 0) {
1899 kref_put(&wdata->refcount, cifs_writedata_release);
1900 continue;
1901 }
1902
1903 wdata->sync_mode = wbc->sync_mode;
1904 wdata->nr_pages = nr_pages;
1905 wdata->offset = page_offset(wdata->pages[0]);
Jeff Laytoneddb0792012-09-18 16:20:35 -07001906 wdata->pagesz = PAGE_CACHE_SIZE;
1907 wdata->tailsz =
1908 min(isize - page_offset(wdata->pages[nr_pages - 1]),
1909 (loff_t)PAGE_CACHE_SIZE);
1910 wdata->bytes = ((nr_pages - 1) * PAGE_CACHE_SIZE) +
1911 wdata->tailsz;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001912
1913 do {
1914 if (wdata->cfile != NULL)
1915 cifsFileInfo_put(wdata->cfile);
1916 wdata->cfile = find_writable_file(CIFS_I(mapping->host),
1917 false);
1918 if (!wdata->cfile) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001919 cERROR(1, "No writable handles for inode");
Steve French23e7dd72005-10-20 13:44:56 -07001920 rc = -EBADF;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001921 break;
Steve French37c0eb42005-10-05 14:50:29 -07001922 }
Jeff Laytonfe5f5d22012-03-23 14:40:55 -04001923 wdata->pid = wdata->cfile->pid;
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07001924 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
1925 rc = server->ops->async_writev(wdata);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001926 } while (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN);
Jeff Laytonf3983c22010-09-22 16:17:40 -07001927
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001928 for (i = 0; i < nr_pages; ++i)
1929 unlock_page(wdata->pages[i]);
Jeff Layton941b8532011-01-11 07:24:01 -05001930
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001931 /* send failure -- clean up the mess */
1932 if (rc != 0) {
1933 for (i = 0; i < nr_pages; ++i) {
Jeff Layton941b8532011-01-11 07:24:01 -05001934 if (rc == -EAGAIN)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001935 redirty_page_for_writepage(wbc,
1936 wdata->pages[i]);
1937 else
1938 SetPageError(wdata->pages[i]);
1939 end_page_writeback(wdata->pages[i]);
1940 page_cache_release(wdata->pages[i]);
Steve French37c0eb42005-10-05 14:50:29 -07001941 }
Jeff Layton941b8532011-01-11 07:24:01 -05001942 if (rc != -EAGAIN)
1943 mapping_set_error(mapping, rc);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001944 }
1945 kref_put(&wdata->refcount, cifs_writedata_release);
Jeff Layton941b8532011-01-11 07:24:01 -05001946
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001947 wbc->nr_to_write -= nr_pages;
1948 if (wbc->nr_to_write <= 0)
1949 done = true;
Dave Kleikampb066a482008-11-18 03:49:05 +00001950
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001951 index = next;
Steve French37c0eb42005-10-05 14:50:29 -07001952 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001953
Steve French37c0eb42005-10-05 14:50:29 -07001954 if (!scanned && !done) {
1955 /*
1956 * We hit the last page and there is more work to be done: wrap
1957 * back to the start of the file
1958 */
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001959 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07001960 index = 0;
1961 goto retry;
1962 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001963
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001964 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
Steve French37c0eb42005-10-05 14:50:29 -07001965 mapping->writeback_index = index;
1966
Linus Torvalds1da177e2005-04-16 15:20:36 -07001967 return rc;
1968}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001969
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04001970static int
1971cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001972{
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04001973 int rc;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001974 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001975
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001976 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001977/* BB add check for wbc flags */
1978 page_cache_get(page);
Steve Frenchad7a2922008-02-07 23:25:02 +00001979 if (!PageUptodate(page))
Joe Perchesb6b38f72010-04-21 03:50:45 +00001980 cFYI(1, "ppw - page not up to date");
Linus Torvaldscb876f42006-12-23 16:19:07 -08001981
1982 /*
1983 * Set the "writeback" flag, and clear "dirty" in the radix tree.
1984 *
1985 * A writepage() implementation always needs to do either this,
1986 * or re-dirty the page with "redirty_page_for_writepage()" in
1987 * the case of a failure.
1988 *
1989 * Just unlocking the page will cause the radix tree tag-bits
1990 * to fail to update with the state of the page correctly.
1991 */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001992 set_page_writeback(page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04001993retry_write:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001994 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04001995 if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
1996 goto retry_write;
1997 else if (rc == -EAGAIN)
1998 redirty_page_for_writepage(wbc, page);
1999 else if (rc != 0)
2000 SetPageError(page);
2001 else
2002 SetPageUptodate(page);
Linus Torvaldscb876f42006-12-23 16:19:07 -08002003 end_page_writeback(page);
2004 page_cache_release(page);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002005 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002006 return rc;
2007}
2008
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002009static int cifs_writepage(struct page *page, struct writeback_control *wbc)
2010{
2011 int rc = cifs_writepage_locked(page, wbc);
2012 unlock_page(page);
2013 return rc;
2014}
2015
Nick Piggind9414772008-09-24 11:32:59 -04002016static int cifs_write_end(struct file *file, struct address_space *mapping,
2017 loff_t pos, unsigned len, unsigned copied,
2018 struct page *page, void *fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002019{
Nick Piggind9414772008-09-24 11:32:59 -04002020 int rc;
2021 struct inode *inode = mapping->host;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002022 struct cifsFileInfo *cfile = file->private_data;
2023 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
2024 __u32 pid;
2025
2026 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2027 pid = cfile->pid;
2028 else
2029 pid = current->tgid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002030
Joe Perchesb6b38f72010-04-21 03:50:45 +00002031 cFYI(1, "write_end for page %p from pos %lld with %d bytes",
2032 page, pos, copied);
Steve Frenchad7a2922008-02-07 23:25:02 +00002033
Jeff Laytona98ee8c2008-11-26 19:32:33 +00002034 if (PageChecked(page)) {
2035 if (copied == len)
2036 SetPageUptodate(page);
2037 ClearPageChecked(page);
2038 } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
Nick Piggind9414772008-09-24 11:32:59 -04002039 SetPageUptodate(page);
2040
Linus Torvalds1da177e2005-04-16 15:20:36 -07002041 if (!PageUptodate(page)) {
Nick Piggind9414772008-09-24 11:32:59 -04002042 char *page_data;
2043 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002044 unsigned int xid;
Nick Piggind9414772008-09-24 11:32:59 -04002045
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002046 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002047 /* this is probably better than directly calling
2048 partialpage_write since in this function the file handle is
2049 known which we might as well leverage */
2050 /* BB check if anything else missing out of ppw
2051 such as updating last write time */
2052 page_data = kmap(page);
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002053 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
Nick Piggind9414772008-09-24 11:32:59 -04002054 /* if (rc < 0) should we set writebehind rc? */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002055 kunmap(page);
Nick Piggind9414772008-09-24 11:32:59 -04002056
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002057 free_xid(xid);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002058 } else {
Nick Piggind9414772008-09-24 11:32:59 -04002059 rc = copied;
2060 pos += copied;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002061 set_page_dirty(page);
2062 }
2063
Nick Piggind9414772008-09-24 11:32:59 -04002064 if (rc > 0) {
2065 spin_lock(&inode->i_lock);
2066 if (pos > inode->i_size)
2067 i_size_write(inode, pos);
2068 spin_unlock(&inode->i_lock);
2069 }
2070
2071 unlock_page(page);
2072 page_cache_release(page);
2073
Linus Torvalds1da177e2005-04-16 15:20:36 -07002074 return rc;
2075}
2076
Josef Bacik02c24a82011-07-16 20:44:56 -04002077int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2078 int datasync)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002079{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002080 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002081 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002082 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002083 struct TCP_Server_Info *server;
Joe Perchesc21dfb62010-07-12 13:50:14 -07002084 struct cifsFileInfo *smbfile = file->private_data;
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002085 struct inode *inode = file->f_path.dentry->d_inode;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002086 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002087
Josef Bacik02c24a82011-07-16 20:44:56 -04002088 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2089 if (rc)
2090 return rc;
2091 mutex_lock(&inode->i_mutex);
2092
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002093 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002094
Joe Perchesb6b38f72010-04-21 03:50:45 +00002095 cFYI(1, "Sync file - name: %s datasync: 0x%x",
Christoph Hellwig7ea80852010-05-26 17:53:25 +02002096 file->f_path.dentry->d_name.name, datasync);
Steve French50c2f752007-07-13 00:33:32 +00002097
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002098 if (!CIFS_I(inode)->clientCanCacheRead) {
2099 rc = cifs_invalidate_mapping(inode);
2100 if (rc) {
2101 cFYI(1, "rc: %d during invalidate phase", rc);
2102 rc = 0; /* don't care about it in fsync */
2103 }
2104 }
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002105
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002106 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002107 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2108 server = tcon->ses->server;
2109 if (server->ops->flush)
2110 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2111 else
2112 rc = -ENOSYS;
2113 }
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002114
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002115 free_xid(xid);
Josef Bacik02c24a82011-07-16 20:44:56 -04002116 mutex_unlock(&inode->i_mutex);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002117 return rc;
2118}
2119
Josef Bacik02c24a82011-07-16 20:44:56 -04002120int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002121{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002122 unsigned int xid;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002123 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002124 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002125 struct TCP_Server_Info *server;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002126 struct cifsFileInfo *smbfile = file->private_data;
2127 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Josef Bacik02c24a82011-07-16 20:44:56 -04002128 struct inode *inode = file->f_mapping->host;
2129
2130 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2131 if (rc)
2132 return rc;
2133 mutex_lock(&inode->i_mutex);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002134
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002135 xid = get_xid();
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002136
2137 cFYI(1, "Sync file - name: %s datasync: 0x%x",
2138 file->f_path.dentry->d_name.name, datasync);
2139
2140 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002141 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2142 server = tcon->ses->server;
2143 if (server->ops->flush)
2144 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2145 else
2146 rc = -ENOSYS;
2147 }
Steve Frenchb298f222009-02-21 21:17:43 +00002148
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002149 free_xid(xid);
Josef Bacik02c24a82011-07-16 20:44:56 -04002150 mutex_unlock(&inode->i_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002151 return rc;
2152}
2153
Linus Torvalds1da177e2005-04-16 15:20:36 -07002154/*
2155 * As file closes, flush all cached write data for this inode checking
2156 * for write behind errors.
2157 */
Miklos Szeredi75e1fcc2006-06-23 02:05:12 -07002158int cifs_flush(struct file *file, fl_owner_t id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002159{
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002160 struct inode *inode = file->f_path.dentry->d_inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002161 int rc = 0;
2162
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002163 if (file->f_mode & FMODE_WRITE)
Jeff Laytond3f13222010-10-15 15:34:07 -04002164 rc = filemap_write_and_wait(inode->i_mapping);
Steve French50c2f752007-07-13 00:33:32 +00002165
Joe Perchesb6b38f72010-04-21 03:50:45 +00002166 cFYI(1, "Flush inode %p file %p rc %d", inode, file, rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002167
2168 return rc;
2169}
2170
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002171static int
2172cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2173{
2174 int rc = 0;
2175 unsigned long i;
2176
2177 for (i = 0; i < num_pages; i++) {
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002178 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002179 if (!pages[i]) {
2180 /*
2181 * save number of pages we have already allocated and
2182 * return with ENOMEM error
2183 */
2184 num_pages = i;
2185 rc = -ENOMEM;
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002186 break;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002187 }
2188 }
2189
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002190 if (rc) {
2191 for (i = 0; i < num_pages; i++)
2192 put_page(pages[i]);
2193 }
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002194 return rc;
2195}
2196
2197static inline
2198size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2199{
2200 size_t num_pages;
2201 size_t clen;
2202
2203 clen = min_t(const size_t, len, wsize);
Jeff Laytona7103b92012-03-23 14:40:56 -04002204 num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002205
2206 if (cur_len)
2207 *cur_len = clen;
2208
2209 return num_pages;
2210}
2211
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002212static void
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002213cifs_uncached_writev_complete(struct work_struct *work)
2214{
2215 int i;
2216 struct cifs_writedata *wdata = container_of(work,
2217 struct cifs_writedata, work);
2218 struct inode *inode = wdata->cfile->dentry->d_inode;
2219 struct cifsInodeInfo *cifsi = CIFS_I(inode);
2220
2221 spin_lock(&inode->i_lock);
2222 cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
2223 if (cifsi->server_eof > inode->i_size)
2224 i_size_write(inode, cifsi->server_eof);
2225 spin_unlock(&inode->i_lock);
2226
2227 complete(&wdata->done);
2228
2229 if (wdata->result != -EAGAIN) {
2230 for (i = 0; i < wdata->nr_pages; i++)
2231 put_page(wdata->pages[i]);
2232 }
2233
2234 kref_put(&wdata->refcount, cifs_writedata_release);
2235}
2236
2237/* attempt to send write to server, retry on any -EAGAIN errors */
2238static int
2239cifs_uncached_retry_writev(struct cifs_writedata *wdata)
2240{
2241 int rc;
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002242 struct TCP_Server_Info *server;
2243
2244 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002245
2246 do {
2247 if (wdata->cfile->invalidHandle) {
2248 rc = cifs_reopen_file(wdata->cfile, false);
2249 if (rc != 0)
2250 continue;
2251 }
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002252 rc = server->ops->async_writev(wdata);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002253 } while (rc == -EAGAIN);
2254
2255 return rc;
2256}
2257
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002258static ssize_t
2259cifs_iovec_write(struct file *file, const struct iovec *iov,
2260 unsigned long nr_segs, loff_t *poffset)
2261{
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002262 unsigned long nr_pages, i;
Pavel Shilovsky76429c12011-01-31 16:03:08 +03002263 size_t copied, len, cur_len;
2264 ssize_t total_written = 0;
Jeff Layton3af9d8f2012-04-13 17:16:59 -04002265 loff_t offset;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002266 struct iov_iter it;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002267 struct cifsFileInfo *open_file;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002268 struct cifs_tcon *tcon;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002269 struct cifs_sb_info *cifs_sb;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002270 struct cifs_writedata *wdata, *tmp;
2271 struct list_head wdata_list;
2272 int rc;
2273 pid_t pid;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002274
2275 len = iov_length(iov, nr_segs);
2276 if (!len)
2277 return 0;
2278
2279 rc = generic_write_checks(file, poffset, &len, 0);
2280 if (rc)
2281 return rc;
2282
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002283 INIT_LIST_HEAD(&wdata_list);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002284 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002285 open_file = file->private_data;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002286 tcon = tlink_tcon(open_file->tlink);
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002287
2288 if (!tcon->ses->server->ops->async_writev)
2289 return -ENOSYS;
2290
Jeff Layton3af9d8f2012-04-13 17:16:59 -04002291 offset = *poffset;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002292
2293 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2294 pid = open_file->pid;
2295 else
2296 pid = current->tgid;
2297
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002298 iov_iter_init(&it, iov, nr_segs, len, 0);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002299 do {
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002300 size_t save_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002301
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002302 nr_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
2303 wdata = cifs_writedata_alloc(nr_pages,
2304 cifs_uncached_writev_complete);
2305 if (!wdata) {
2306 rc = -ENOMEM;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002307 break;
2308 }
2309
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002310 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
2311 if (rc) {
2312 kfree(wdata);
2313 break;
2314 }
2315
2316 save_len = cur_len;
2317 for (i = 0; i < nr_pages; i++) {
2318 copied = min_t(const size_t, cur_len, PAGE_SIZE);
2319 copied = iov_iter_copy_from_user(wdata->pages[i], &it,
2320 0, copied);
2321 cur_len -= copied;
2322 iov_iter_advance(&it, copied);
2323 }
2324 cur_len = save_len - cur_len;
2325
2326 wdata->sync_mode = WB_SYNC_ALL;
2327 wdata->nr_pages = nr_pages;
2328 wdata->offset = (__u64)offset;
2329 wdata->cfile = cifsFileInfo_get(open_file);
2330 wdata->pid = pid;
2331 wdata->bytes = cur_len;
Jeff Laytoneddb0792012-09-18 16:20:35 -07002332 wdata->pagesz = PAGE_SIZE;
2333 wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002334 rc = cifs_uncached_retry_writev(wdata);
2335 if (rc) {
2336 kref_put(&wdata->refcount, cifs_writedata_release);
2337 break;
2338 }
2339
2340 list_add_tail(&wdata->list, &wdata_list);
2341 offset += cur_len;
2342 len -= cur_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002343 } while (len > 0);
2344
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002345 /*
2346 * If at least one write was successfully sent, then discard any rc
2347 * value from the later writes. If the other write succeeds, then
2348 * we'll end up returning whatever was written. If it fails, then
2349 * we'll get a new rc value from that.
2350 */
2351 if (!list_empty(&wdata_list))
2352 rc = 0;
2353
2354 /*
2355 * Wait for and collect replies for any successful sends in order of
2356 * increasing offset. Once an error is hit or we get a fatal signal
2357 * while waiting, then return without waiting for any more replies.
2358 */
2359restart_loop:
2360 list_for_each_entry_safe(wdata, tmp, &wdata_list, list) {
2361 if (!rc) {
2362 /* FIXME: freezable too? */
2363 rc = wait_for_completion_killable(&wdata->done);
2364 if (rc)
2365 rc = -EINTR;
2366 else if (wdata->result)
2367 rc = wdata->result;
2368 else
2369 total_written += wdata->bytes;
2370
2371 /* resend call if it's a retryable error */
2372 if (rc == -EAGAIN) {
2373 rc = cifs_uncached_retry_writev(wdata);
2374 goto restart_loop;
2375 }
2376 }
2377 list_del_init(&wdata->list);
2378 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002379 }
2380
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002381 if (total_written > 0)
2382 *poffset += total_written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002383
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002384 cifs_stats_bytes_written(tcon, total_written);
2385 return total_written ? total_written : (ssize_t)rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002386}
2387
Pavel Shilovsky0b81c1c2011-03-10 10:11:05 +03002388ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002389 unsigned long nr_segs, loff_t pos)
2390{
2391 ssize_t written;
2392 struct inode *inode;
2393
2394 inode = iocb->ki_filp->f_path.dentry->d_inode;
2395
2396 /*
2397 * BB - optimize the way when signing is disabled. We can drop this
2398 * extra memory-to-memory copying and use iovec buffers for constructing
2399 * write request.
2400 */
2401
2402 written = cifs_iovec_write(iocb->ki_filp, iov, nr_segs, &pos);
2403 if (written > 0) {
2404 CIFS_I(inode)->invalid_mapping = true;
2405 iocb->ki_pos = pos;
2406 }
2407
2408 return written;
2409}
2410
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002411static ssize_t
2412cifs_writev(struct kiocb *iocb, const struct iovec *iov,
2413 unsigned long nr_segs, loff_t pos)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002414{
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002415 struct file *file = iocb->ki_filp;
2416 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2417 struct inode *inode = file->f_mapping->host;
2418 struct cifsInodeInfo *cinode = CIFS_I(inode);
2419 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
2420 ssize_t rc = -EACCES;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002421
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002422 BUG_ON(iocb->ki_pos != pos);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002423
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002424 sb_start_write(inode->i_sb);
2425
2426 /*
2427 * We need to hold the sem to be sure nobody modifies lock list
2428 * with a brlock that prevents writing.
2429 */
2430 down_read(&cinode->lock_sem);
2431 if (!cifs_find_lock_conflict(cfile, pos, iov_length(iov, nr_segs),
2432 server->vals->exclusive_lock_type, NULL,
2433 true)) {
2434 mutex_lock(&inode->i_mutex);
2435 rc = __generic_file_aio_write(iocb, iov, nr_segs,
2436 &iocb->ki_pos);
2437 mutex_unlock(&inode->i_mutex);
2438 }
2439
2440 if (rc > 0 || rc == -EIOCBQUEUED) {
2441 ssize_t err;
2442
2443 err = generic_write_sync(file, pos, rc);
2444 if (err < 0 && rc > 0)
2445 rc = err;
2446 }
2447
2448 up_read(&cinode->lock_sem);
2449 sb_end_write(inode->i_sb);
2450 return rc;
2451}
2452
2453ssize_t
2454cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
2455 unsigned long nr_segs, loff_t pos)
2456{
2457 struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
2458 struct cifsInodeInfo *cinode = CIFS_I(inode);
2459 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2460 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2461 iocb->ki_filp->private_data;
2462 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002463
2464 /*
2465 * In strict cache mode we need to write the data to the server exactly
2466 * from the pos to pos+len-1 rather than flush all affected pages
2467 * because it may cause a error with mandatory locks on these pages but
2468 * not on the region from pos to ppos+len-1.
2469 */
2470
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002471 if (!cinode->clientCanCacheAll)
2472 return cifs_user_writev(iocb, iov, nr_segs, pos);
2473
2474 if (cap_unix(tcon->ses) &&
2475 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2476 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2477 return generic_file_aio_write(iocb, iov, nr_segs, pos);
2478
2479 return cifs_writev(iocb, iov, nr_segs, pos);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002480}
2481
Jeff Layton0471ca32012-05-16 07:13:16 -04002482static struct cifs_readdata *
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002483cifs_readdata_alloc(unsigned int nr_pages, work_func_t complete)
Jeff Layton0471ca32012-05-16 07:13:16 -04002484{
2485 struct cifs_readdata *rdata;
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002486
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002487 rdata = kzalloc(sizeof(*rdata) + (sizeof(struct page *) * nr_pages),
2488 GFP_KERNEL);
Jeff Layton0471ca32012-05-16 07:13:16 -04002489 if (rdata != NULL) {
Jeff Layton6993f742012-05-16 07:13:17 -04002490 kref_init(&rdata->refcount);
Jeff Layton1c892542012-05-16 07:13:17 -04002491 INIT_LIST_HEAD(&rdata->list);
2492 init_completion(&rdata->done);
Jeff Layton0471ca32012-05-16 07:13:16 -04002493 INIT_WORK(&rdata->work, complete);
Jeff Layton0471ca32012-05-16 07:13:16 -04002494 }
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002495
Jeff Layton0471ca32012-05-16 07:13:16 -04002496 return rdata;
2497}
2498
Jeff Layton6993f742012-05-16 07:13:17 -04002499void
2500cifs_readdata_release(struct kref *refcount)
Jeff Layton0471ca32012-05-16 07:13:16 -04002501{
Jeff Layton6993f742012-05-16 07:13:17 -04002502 struct cifs_readdata *rdata = container_of(refcount,
2503 struct cifs_readdata, refcount);
2504
2505 if (rdata->cfile)
2506 cifsFileInfo_put(rdata->cfile);
2507
Jeff Layton0471ca32012-05-16 07:13:16 -04002508 kfree(rdata);
2509}
2510
Jeff Layton2a1bb132012-05-16 07:13:17 -04002511static int
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002512cifs_read_allocate_pages(struct cifs_readdata *rdata, unsigned int nr_pages)
Jeff Layton1c892542012-05-16 07:13:17 -04002513{
2514 int rc = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002515 struct page *page;
Jeff Layton1c892542012-05-16 07:13:17 -04002516 unsigned int i;
2517
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002518 for (i = 0; i < nr_pages; i++) {
Jeff Layton1c892542012-05-16 07:13:17 -04002519 page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
2520 if (!page) {
2521 rc = -ENOMEM;
2522 break;
2523 }
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002524 rdata->pages[i] = page;
Jeff Layton1c892542012-05-16 07:13:17 -04002525 }
2526
2527 if (rc) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002528 for (i = 0; i < nr_pages; i++) {
2529 put_page(rdata->pages[i]);
2530 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04002531 }
2532 }
2533 return rc;
2534}
2535
2536static void
2537cifs_uncached_readdata_release(struct kref *refcount)
2538{
Jeff Layton1c892542012-05-16 07:13:17 -04002539 struct cifs_readdata *rdata = container_of(refcount,
2540 struct cifs_readdata, refcount);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002541 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04002542
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002543 for (i = 0; i < rdata->nr_pages; i++) {
2544 put_page(rdata->pages[i]);
2545 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04002546 }
2547 cifs_readdata_release(refcount);
2548}
2549
2550static int
Jeff Layton2a1bb132012-05-16 07:13:17 -04002551cifs_retry_async_readv(struct cifs_readdata *rdata)
2552{
2553 int rc;
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07002554 struct TCP_Server_Info *server;
2555
2556 server = tlink_tcon(rdata->cfile->tlink)->ses->server;
Jeff Layton2a1bb132012-05-16 07:13:17 -04002557
2558 do {
2559 if (rdata->cfile->invalidHandle) {
2560 rc = cifs_reopen_file(rdata->cfile, true);
2561 if (rc != 0)
2562 continue;
2563 }
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07002564 rc = server->ops->async_readv(rdata);
Jeff Layton2a1bb132012-05-16 07:13:17 -04002565 } while (rc == -EAGAIN);
2566
2567 return rc;
2568}
2569
Jeff Layton1c892542012-05-16 07:13:17 -04002570/**
2571 * cifs_readdata_to_iov - copy data from pages in response to an iovec
2572 * @rdata: the readdata response with list of pages holding data
2573 * @iov: vector in which we should copy the data
2574 * @nr_segs: number of segments in vector
2575 * @offset: offset into file of the first iovec
2576 * @copied: used to return the amount of data copied to the iov
2577 *
2578 * This function copies data from a list of pages in a readdata response into
2579 * an array of iovecs. It will first calculate where the data should go
2580 * based on the info in the readdata and then copy the data into that spot.
2581 */
2582static ssize_t
2583cifs_readdata_to_iov(struct cifs_readdata *rdata, const struct iovec *iov,
2584 unsigned long nr_segs, loff_t offset, ssize_t *copied)
2585{
2586 int rc = 0;
2587 struct iov_iter ii;
2588 size_t pos = rdata->offset - offset;
Jeff Layton1c892542012-05-16 07:13:17 -04002589 ssize_t remaining = rdata->bytes;
2590 unsigned char *pdata;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002591 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04002592
2593 /* set up iov_iter and advance to the correct offset */
2594 iov_iter_init(&ii, iov, nr_segs, iov_length(iov, nr_segs), 0);
2595 iov_iter_advance(&ii, pos);
2596
2597 *copied = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002598 for (i = 0; i < rdata->nr_pages; i++) {
Jeff Layton1c892542012-05-16 07:13:17 -04002599 ssize_t copy;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002600 struct page *page = rdata->pages[i];
Jeff Layton1c892542012-05-16 07:13:17 -04002601
2602 /* copy a whole page or whatever's left */
2603 copy = min_t(ssize_t, remaining, PAGE_SIZE);
2604
2605 /* ...but limit it to whatever space is left in the iov */
2606 copy = min_t(ssize_t, copy, iov_iter_count(&ii));
2607
2608 /* go while there's data to be copied and no errors */
2609 if (copy && !rc) {
2610 pdata = kmap(page);
2611 rc = memcpy_toiovecend(ii.iov, pdata, ii.iov_offset,
2612 (int)copy);
2613 kunmap(page);
2614 if (!rc) {
2615 *copied += copy;
2616 remaining -= copy;
2617 iov_iter_advance(&ii, copy);
2618 }
2619 }
Jeff Layton1c892542012-05-16 07:13:17 -04002620 }
2621
2622 return rc;
2623}
2624
2625static void
2626cifs_uncached_readv_complete(struct work_struct *work)
2627{
2628 struct cifs_readdata *rdata = container_of(work,
2629 struct cifs_readdata, work);
Jeff Layton1c892542012-05-16 07:13:17 -04002630
2631 complete(&rdata->done);
2632 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
2633}
2634
2635static int
Jeff Layton8321fec2012-09-19 06:22:32 -07002636cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
2637 struct cifs_readdata *rdata, unsigned int len)
Jeff Layton1c892542012-05-16 07:13:17 -04002638{
Jeff Layton8321fec2012-09-19 06:22:32 -07002639 int total_read = 0, result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002640 unsigned int i;
2641 unsigned int nr_pages = rdata->nr_pages;
Jeff Layton8321fec2012-09-19 06:22:32 -07002642 struct kvec iov;
Jeff Layton1c892542012-05-16 07:13:17 -04002643
Jeff Layton8321fec2012-09-19 06:22:32 -07002644 rdata->tailsz = PAGE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002645 for (i = 0; i < nr_pages; i++) {
2646 struct page *page = rdata->pages[i];
2647
Jeff Layton8321fec2012-09-19 06:22:32 -07002648 if (len >= PAGE_SIZE) {
Jeff Layton1c892542012-05-16 07:13:17 -04002649 /* enough data to fill the page */
Jeff Layton8321fec2012-09-19 06:22:32 -07002650 iov.iov_base = kmap(page);
2651 iov.iov_len = PAGE_SIZE;
2652 cFYI(1, "%u: iov_base=%p iov_len=%zu",
2653 i, iov.iov_base, iov.iov_len);
2654 len -= PAGE_SIZE;
2655 } else if (len > 0) {
Jeff Layton1c892542012-05-16 07:13:17 -04002656 /* enough for partial page, fill and zero the rest */
Jeff Layton8321fec2012-09-19 06:22:32 -07002657 iov.iov_base = kmap(page);
2658 iov.iov_len = len;
2659 cFYI(1, "%u: iov_base=%p iov_len=%zu",
2660 i, iov.iov_base, iov.iov_len);
2661 memset(iov.iov_base + len, '\0', PAGE_SIZE - len);
2662 rdata->tailsz = len;
2663 len = 0;
Jeff Layton1c892542012-05-16 07:13:17 -04002664 } else {
2665 /* no need to hold page hostage */
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002666 rdata->pages[i] = NULL;
2667 rdata->nr_pages--;
Jeff Layton1c892542012-05-16 07:13:17 -04002668 put_page(page);
Jeff Layton8321fec2012-09-19 06:22:32 -07002669 continue;
Jeff Layton1c892542012-05-16 07:13:17 -04002670 }
Jeff Layton8321fec2012-09-19 06:22:32 -07002671
2672 result = cifs_readv_from_socket(server, &iov, 1, iov.iov_len);
2673 kunmap(page);
2674 if (result < 0)
2675 break;
2676
2677 total_read += result;
Jeff Layton1c892542012-05-16 07:13:17 -04002678 }
2679
Jeff Layton8321fec2012-09-19 06:22:32 -07002680 return total_read > 0 ? total_read : result;
Jeff Layton1c892542012-05-16 07:13:17 -04002681}
2682
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002683static ssize_t
2684cifs_iovec_read(struct file *file, const struct iovec *iov,
2685 unsigned long nr_segs, loff_t *poffset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002686{
Jeff Layton1c892542012-05-16 07:13:17 -04002687 ssize_t rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002688 size_t len, cur_len;
Jeff Layton1c892542012-05-16 07:13:17 -04002689 ssize_t total_read = 0;
2690 loff_t offset = *poffset;
2691 unsigned int npages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002692 struct cifs_sb_info *cifs_sb;
Jeff Layton1c892542012-05-16 07:13:17 -04002693 struct cifs_tcon *tcon;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002694 struct cifsFileInfo *open_file;
Jeff Layton1c892542012-05-16 07:13:17 -04002695 struct cifs_readdata *rdata, *tmp;
2696 struct list_head rdata_list;
2697 pid_t pid;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002698
2699 if (!nr_segs)
2700 return 0;
2701
2702 len = iov_length(iov, nr_segs);
2703 if (!len)
2704 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002705
Jeff Layton1c892542012-05-16 07:13:17 -04002706 INIT_LIST_HEAD(&rdata_list);
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002707 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Joe Perchesc21dfb62010-07-12 13:50:14 -07002708 open_file = file->private_data;
Jeff Layton1c892542012-05-16 07:13:17 -04002709 tcon = tlink_tcon(open_file->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002710
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07002711 if (!tcon->ses->server->ops->async_readv)
2712 return -ENOSYS;
2713
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002714 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2715 pid = open_file->pid;
2716 else
2717 pid = current->tgid;
2718
Steve Frenchad7a2922008-02-07 23:25:02 +00002719 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesb6b38f72010-04-21 03:50:45 +00002720 cFYI(1, "attempting read on write only file instance");
Steve Frenchad7a2922008-02-07 23:25:02 +00002721
Jeff Layton1c892542012-05-16 07:13:17 -04002722 do {
2723 cur_len = min_t(const size_t, len - total_read, cifs_sb->rsize);
2724 npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002725
Jeff Layton1c892542012-05-16 07:13:17 -04002726 /* allocate a readdata struct */
2727 rdata = cifs_readdata_alloc(npages,
2728 cifs_uncached_readv_complete);
2729 if (!rdata) {
2730 rc = -ENOMEM;
2731 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002732 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002733
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002734 rc = cifs_read_allocate_pages(rdata, npages);
Jeff Layton1c892542012-05-16 07:13:17 -04002735 if (rc)
2736 goto error;
2737
2738 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002739 rdata->nr_pages = npages;
Jeff Layton1c892542012-05-16 07:13:17 -04002740 rdata->offset = offset;
2741 rdata->bytes = cur_len;
2742 rdata->pid = pid;
Jeff Layton8321fec2012-09-19 06:22:32 -07002743 rdata->pagesz = PAGE_SIZE;
2744 rdata->read_into_pages = cifs_uncached_read_into_pages;
Jeff Layton1c892542012-05-16 07:13:17 -04002745
2746 rc = cifs_retry_async_readv(rdata);
2747error:
2748 if (rc) {
2749 kref_put(&rdata->refcount,
2750 cifs_uncached_readdata_release);
2751 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002752 }
Jeff Layton1c892542012-05-16 07:13:17 -04002753
2754 list_add_tail(&rdata->list, &rdata_list);
2755 offset += cur_len;
2756 len -= cur_len;
2757 } while (len > 0);
2758
2759 /* if at least one read request send succeeded, then reset rc */
2760 if (!list_empty(&rdata_list))
2761 rc = 0;
2762
2763 /* the loop below should proceed in the order of increasing offsets */
2764restart_loop:
2765 list_for_each_entry_safe(rdata, tmp, &rdata_list, list) {
2766 if (!rc) {
2767 ssize_t copied;
2768
2769 /* FIXME: freezable sleep too? */
2770 rc = wait_for_completion_killable(&rdata->done);
2771 if (rc)
2772 rc = -EINTR;
2773 else if (rdata->result)
2774 rc = rdata->result;
2775 else {
2776 rc = cifs_readdata_to_iov(rdata, iov,
2777 nr_segs, *poffset,
2778 &copied);
2779 total_read += copied;
2780 }
2781
2782 /* resend call if it's a retryable error */
2783 if (rc == -EAGAIN) {
2784 rc = cifs_retry_async_readv(rdata);
2785 goto restart_loop;
2786 }
2787 }
2788 list_del_init(&rdata->list);
2789 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002790 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002791
Jeff Layton1c892542012-05-16 07:13:17 -04002792 cifs_stats_bytes_read(tcon, total_read);
2793 *poffset += total_read;
2794
Pavel Shilovsky09a47072012-09-18 16:20:29 -07002795 /* mask nodata case */
2796 if (rc == -ENODATA)
2797 rc = 0;
2798
Jeff Layton1c892542012-05-16 07:13:17 -04002799 return total_read ? total_read : rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002800}
2801
Pavel Shilovsky0b81c1c2011-03-10 10:11:05 +03002802ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov,
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002803 unsigned long nr_segs, loff_t pos)
2804{
2805 ssize_t read;
2806
2807 read = cifs_iovec_read(iocb->ki_filp, iov, nr_segs, &pos);
2808 if (read > 0)
2809 iocb->ki_pos = pos;
2810
2811 return read;
2812}
2813
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002814ssize_t
2815cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov,
2816 unsigned long nr_segs, loff_t pos)
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002817{
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002818 struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
2819 struct cifsInodeInfo *cinode = CIFS_I(inode);
2820 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2821 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2822 iocb->ki_filp->private_data;
2823 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2824 int rc = -EACCES;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002825
2826 /*
2827 * In strict cache mode we need to read from the server all the time
2828 * if we don't have level II oplock because the server can delay mtime
2829 * change - so we can't make a decision about inode invalidating.
2830 * And we can also fail with pagereading if there are mandatory locks
2831 * on pages affected by this read but not on the region from pos to
2832 * pos+len-1.
2833 */
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002834 if (!cinode->clientCanCacheRead)
2835 return cifs_user_readv(iocb, iov, nr_segs, pos);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002836
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002837 if (cap_unix(tcon->ses) &&
2838 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2839 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2840 return generic_file_aio_read(iocb, iov, nr_segs, pos);
2841
2842 /*
2843 * We need to hold the sem to be sure nobody modifies lock list
2844 * with a brlock that prevents reading.
2845 */
2846 down_read(&cinode->lock_sem);
2847 if (!cifs_find_lock_conflict(cfile, pos, iov_length(iov, nr_segs),
2848 tcon->ses->server->vals->shared_lock_type,
2849 NULL, true))
2850 rc = generic_file_aio_read(iocb, iov, nr_segs, pos);
2851 up_read(&cinode->lock_sem);
2852 return rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002853}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002854
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002855static ssize_t
2856cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002857{
2858 int rc = -EACCES;
2859 unsigned int bytes_read = 0;
2860 unsigned int total_read;
2861 unsigned int current_read_size;
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002862 unsigned int rsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002863 struct cifs_sb_info *cifs_sb;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002864 struct cifs_tcon *tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002865 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002866 unsigned int xid;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002867 char *cur_offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002868 struct cifsFileInfo *open_file;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002869 struct cifs_io_parms io_parms;
Steve Frenchec637e32005-12-12 20:53:18 -08002870 int buf_type = CIFS_NO_BUFFER;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002871 __u32 pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002872
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002873 xid = get_xid();
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002874 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002875
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002876 /* FIXME: set up handlers for larger reads and/or convert to async */
2877 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
2878
Linus Torvalds1da177e2005-04-16 15:20:36 -07002879 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05302880 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002881 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05302882 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002883 }
Joe Perchesc21dfb62010-07-12 13:50:14 -07002884 open_file = file->private_data;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002885 tcon = tlink_tcon(open_file->tlink);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002886 server = tcon->ses->server;
2887
2888 if (!server->ops->sync_read) {
2889 free_xid(xid);
2890 return -ENOSYS;
2891 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002892
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002893 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2894 pid = open_file->pid;
2895 else
2896 pid = current->tgid;
2897
Linus Torvalds1da177e2005-04-16 15:20:36 -07002898 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesb6b38f72010-04-21 03:50:45 +00002899 cFYI(1, "attempting read on write only file instance");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002900
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002901 for (total_read = 0, cur_offset = read_data; read_size > total_read;
2902 total_read += bytes_read, cur_offset += bytes_read) {
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002903 current_read_size = min_t(uint, read_size - total_read, rsize);
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002904 /*
2905 * For windows me and 9x we do not want to request more than it
2906 * negotiated since it will refuse the read then.
2907 */
2908 if ((tcon->ses) && !(tcon->ses->capabilities &
2909 tcon->ses->server->vals->cap_large_files)) {
Dan Carpenter7748dd62011-10-18 12:41:35 +03002910 current_read_size = min_t(uint, current_read_size,
Jeff Laytonc974bef2011-10-11 06:41:32 -04002911 CIFSMaxBufSize);
Steve Frenchf9f5c8172005-09-15 23:06:38 -07002912 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002913 rc = -EAGAIN;
2914 while (rc == -EAGAIN) {
Steve Frenchcdff08e2010-10-21 22:46:14 +00002915 if (open_file->invalidHandle) {
Jeff Layton15886172010-10-15 15:33:59 -04002916 rc = cifs_reopen_file(open_file, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002917 if (rc != 0)
2918 break;
2919 }
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002920 io_parms.pid = pid;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002921 io_parms.tcon = tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002922 io_parms.offset = *offset;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002923 io_parms.length = current_read_size;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002924 rc = server->ops->sync_read(xid, open_file, &io_parms,
2925 &bytes_read, &cur_offset,
2926 &buf_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002927 }
2928 if (rc || (bytes_read == 0)) {
2929 if (total_read) {
2930 break;
2931 } else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002932 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002933 return rc;
2934 }
2935 } else {
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002936 cifs_stats_bytes_read(tcon, total_read);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002937 *offset += bytes_read;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002938 }
2939 }
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002940 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002941 return total_read;
2942}
2943
Jeff Laytonca83ce32011-04-12 09:13:44 -04002944/*
2945 * If the page is mmap'ed into a process' page tables, then we need to make
2946 * sure that it doesn't change while being written back.
2947 */
2948static int
2949cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
2950{
2951 struct page *page = vmf->page;
2952
2953 lock_page(page);
2954 return VM_FAULT_LOCKED;
2955}
2956
2957static struct vm_operations_struct cifs_file_vm_ops = {
2958 .fault = filemap_fault,
2959 .page_mkwrite = cifs_page_mkwrite,
2960};
2961
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03002962int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
2963{
2964 int rc, xid;
2965 struct inode *inode = file->f_path.dentry->d_inode;
2966
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002967 xid = get_xid();
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03002968
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002969 if (!CIFS_I(inode)->clientCanCacheRead) {
2970 rc = cifs_invalidate_mapping(inode);
2971 if (rc)
2972 return rc;
2973 }
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03002974
2975 rc = generic_file_mmap(file, vma);
Jeff Laytonca83ce32011-04-12 09:13:44 -04002976 if (rc == 0)
2977 vma->vm_ops = &cifs_file_vm_ops;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002978 free_xid(xid);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03002979 return rc;
2980}
2981
Linus Torvalds1da177e2005-04-16 15:20:36 -07002982int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
2983{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002984 int rc, xid;
2985
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002986 xid = get_xid();
Jeff Laytonabab0952010-02-12 07:44:18 -05002987 rc = cifs_revalidate_file(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002988 if (rc) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00002989 cFYI(1, "Validation prior to mmap failed, error=%d", rc);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002990 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002991 return rc;
2992 }
2993 rc = generic_file_mmap(file, vma);
Jeff Laytonca83ce32011-04-12 09:13:44 -04002994 if (rc == 0)
2995 vma->vm_ops = &cifs_file_vm_ops;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002996 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002997 return rc;
2998}
2999
Jeff Layton0471ca32012-05-16 07:13:16 -04003000static void
3001cifs_readv_complete(struct work_struct *work)
3002{
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003003 unsigned int i;
Jeff Layton0471ca32012-05-16 07:13:16 -04003004 struct cifs_readdata *rdata = container_of(work,
3005 struct cifs_readdata, work);
Jeff Layton0471ca32012-05-16 07:13:16 -04003006
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003007 for (i = 0; i < rdata->nr_pages; i++) {
3008 struct page *page = rdata->pages[i];
3009
Jeff Layton0471ca32012-05-16 07:13:16 -04003010 lru_cache_add_file(page);
3011
3012 if (rdata->result == 0) {
Jeff Layton0471ca32012-05-16 07:13:16 -04003013 flush_dcache_page(page);
3014 SetPageUptodate(page);
3015 }
3016
3017 unlock_page(page);
3018
3019 if (rdata->result == 0)
3020 cifs_readpage_to_fscache(rdata->mapping->host, page);
3021
3022 page_cache_release(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003023 rdata->pages[i] = NULL;
Jeff Layton0471ca32012-05-16 07:13:16 -04003024 }
Jeff Layton6993f742012-05-16 07:13:17 -04003025 kref_put(&rdata->refcount, cifs_readdata_release);
Jeff Layton0471ca32012-05-16 07:13:16 -04003026}
3027
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003028static int
Jeff Layton8321fec2012-09-19 06:22:32 -07003029cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
3030 struct cifs_readdata *rdata, unsigned int len)
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003031{
Jeff Layton8321fec2012-09-19 06:22:32 -07003032 int total_read = 0, result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003033 unsigned int i;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003034 u64 eof;
3035 pgoff_t eof_index;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003036 unsigned int nr_pages = rdata->nr_pages;
Jeff Layton8321fec2012-09-19 06:22:32 -07003037 struct kvec iov;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003038
3039 /* determine the eof that the server (probably) has */
3040 eof = CIFS_I(rdata->mapping->host)->server_eof;
3041 eof_index = eof ? (eof - 1) >> PAGE_CACHE_SHIFT : 0;
3042 cFYI(1, "eof=%llu eof_index=%lu", eof, eof_index);
3043
Jeff Layton8321fec2012-09-19 06:22:32 -07003044 rdata->tailsz = PAGE_CACHE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003045 for (i = 0; i < nr_pages; i++) {
3046 struct page *page = rdata->pages[i];
3047
Jeff Layton8321fec2012-09-19 06:22:32 -07003048 if (len >= PAGE_CACHE_SIZE) {
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003049 /* enough data to fill the page */
Jeff Layton8321fec2012-09-19 06:22:32 -07003050 iov.iov_base = kmap(page);
3051 iov.iov_len = PAGE_CACHE_SIZE;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003052 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
Jeff Layton8321fec2012-09-19 06:22:32 -07003053 i, page->index, iov.iov_base, iov.iov_len);
3054 len -= PAGE_CACHE_SIZE;
3055 } else if (len > 0) {
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003056 /* enough for partial page, fill and zero the rest */
Jeff Layton8321fec2012-09-19 06:22:32 -07003057 iov.iov_base = kmap(page);
3058 iov.iov_len = len;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003059 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
Jeff Layton8321fec2012-09-19 06:22:32 -07003060 i, page->index, iov.iov_base, iov.iov_len);
3061 memset(iov.iov_base + len,
3062 '\0', PAGE_CACHE_SIZE - len);
3063 rdata->tailsz = len;
3064 len = 0;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003065 } else if (page->index > eof_index) {
3066 /*
3067 * The VFS will not try to do readahead past the
3068 * i_size, but it's possible that we have outstanding
3069 * writes with gaps in the middle and the i_size hasn't
3070 * caught up yet. Populate those with zeroed out pages
3071 * to prevent the VFS from repeatedly attempting to
3072 * fill them until the writes are flushed.
3073 */
3074 zero_user(page, 0, PAGE_CACHE_SIZE);
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003075 lru_cache_add_file(page);
3076 flush_dcache_page(page);
3077 SetPageUptodate(page);
3078 unlock_page(page);
3079 page_cache_release(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003080 rdata->pages[i] = NULL;
3081 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07003082 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003083 } else {
3084 /* no need to hold page hostage */
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003085 lru_cache_add_file(page);
3086 unlock_page(page);
3087 page_cache_release(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003088 rdata->pages[i] = NULL;
3089 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07003090 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003091 }
Jeff Layton8321fec2012-09-19 06:22:32 -07003092
3093 result = cifs_readv_from_socket(server, &iov, 1, iov.iov_len);
3094 kunmap(page);
3095 if (result < 0)
3096 break;
3097
3098 total_read += result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003099 }
3100
Jeff Layton8321fec2012-09-19 06:22:32 -07003101 return total_read > 0 ? total_read : result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003102}
3103
Linus Torvalds1da177e2005-04-16 15:20:36 -07003104static int cifs_readpages(struct file *file, struct address_space *mapping,
3105 struct list_head *page_list, unsigned num_pages)
3106{
Jeff Layton690c5e32011-10-19 15:30:16 -04003107 int rc;
3108 struct list_head tmplist;
3109 struct cifsFileInfo *open_file = file->private_data;
3110 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
3111 unsigned int rsize = cifs_sb->rsize;
3112 pid_t pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003113
Jeff Layton690c5e32011-10-19 15:30:16 -04003114 /*
3115 * Give up immediately if rsize is too small to read an entire page.
3116 * The VFS will fall back to readpage. We should never reach this
3117 * point however since we set ra_pages to 0 when the rsize is smaller
3118 * than a cache page.
3119 */
3120 if (unlikely(rsize < PAGE_CACHE_SIZE))
3121 return 0;
Steve Frenchbfa0d752005-08-31 21:50:37 -07003122
Suresh Jayaraman56698232010-07-05 18:13:25 +05303123 /*
3124 * Reads as many pages as possible from fscache. Returns -ENOBUFS
3125 * immediately if the cookie is negative
3126 */
3127 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
3128 &num_pages);
3129 if (rc == 0)
Jeff Layton690c5e32011-10-19 15:30:16 -04003130 return rc;
Suresh Jayaraman56698232010-07-05 18:13:25 +05303131
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003132 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3133 pid = open_file->pid;
3134 else
3135 pid = current->tgid;
3136
Jeff Layton690c5e32011-10-19 15:30:16 -04003137 rc = 0;
3138 INIT_LIST_HEAD(&tmplist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003139
Jeff Layton690c5e32011-10-19 15:30:16 -04003140 cFYI(1, "%s: file=%p mapping=%p num_pages=%u", __func__, file,
3141 mapping, num_pages);
3142
3143 /*
3144 * Start with the page at end of list and move it to private
3145 * list. Do the same with any following pages until we hit
3146 * the rsize limit, hit an index discontinuity, or run out of
3147 * pages. Issue the async read and then start the loop again
3148 * until the list is empty.
3149 *
3150 * Note that list order is important. The page_list is in
3151 * the order of declining indexes. When we put the pages in
3152 * the rdata->pages, then we want them in increasing order.
3153 */
3154 while (!list_empty(page_list)) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003155 unsigned int i;
Jeff Layton690c5e32011-10-19 15:30:16 -04003156 unsigned int bytes = PAGE_CACHE_SIZE;
3157 unsigned int expected_index;
3158 unsigned int nr_pages = 1;
3159 loff_t offset;
3160 struct page *page, *tpage;
3161 struct cifs_readdata *rdata;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003162
3163 page = list_entry(page_list->prev, struct page, lru);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003164
Jeff Layton690c5e32011-10-19 15:30:16 -04003165 /*
3166 * Lock the page and put it in the cache. Since no one else
3167 * should have access to this page, we're safe to simply set
3168 * PG_locked without checking it first.
3169 */
3170 __set_page_locked(page);
3171 rc = add_to_page_cache_locked(page, mapping,
3172 page->index, GFP_KERNEL);
3173
3174 /* give up if we can't stick it in the cache */
3175 if (rc) {
3176 __clear_page_locked(page);
3177 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003178 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003179
Jeff Layton690c5e32011-10-19 15:30:16 -04003180 /* move first page to the tmplist */
3181 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
3182 list_move_tail(&page->lru, &tmplist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003183
Jeff Layton690c5e32011-10-19 15:30:16 -04003184 /* now try and add more pages onto the request */
3185 expected_index = page->index + 1;
3186 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
3187 /* discontinuity ? */
3188 if (page->index != expected_index)
3189 break;
3190
3191 /* would this page push the read over the rsize? */
3192 if (bytes + PAGE_CACHE_SIZE > rsize)
3193 break;
3194
3195 __set_page_locked(page);
3196 if (add_to_page_cache_locked(page, mapping,
3197 page->index, GFP_KERNEL)) {
3198 __clear_page_locked(page);
3199 break;
3200 }
3201 list_move_tail(&page->lru, &tmplist);
3202 bytes += PAGE_CACHE_SIZE;
3203 expected_index++;
3204 nr_pages++;
3205 }
3206
Jeff Layton0471ca32012-05-16 07:13:16 -04003207 rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
Jeff Layton690c5e32011-10-19 15:30:16 -04003208 if (!rdata) {
3209 /* best to give up if we're out of mem */
3210 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3211 list_del(&page->lru);
3212 lru_cache_add_file(page);
3213 unlock_page(page);
3214 page_cache_release(page);
3215 }
3216 rc = -ENOMEM;
3217 break;
3218 }
3219
Jeff Layton6993f742012-05-16 07:13:17 -04003220 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Layton690c5e32011-10-19 15:30:16 -04003221 rdata->mapping = mapping;
3222 rdata->offset = offset;
3223 rdata->bytes = bytes;
3224 rdata->pid = pid;
Jeff Layton8321fec2012-09-19 06:22:32 -07003225 rdata->pagesz = PAGE_CACHE_SIZE;
3226 rdata->read_into_pages = cifs_readpages_read_into_pages;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003227
3228 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3229 list_del(&page->lru);
3230 rdata->pages[rdata->nr_pages++] = page;
3231 }
Jeff Layton690c5e32011-10-19 15:30:16 -04003232
Jeff Layton2a1bb132012-05-16 07:13:17 -04003233 rc = cifs_retry_async_readv(rdata);
Jeff Layton690c5e32011-10-19 15:30:16 -04003234 if (rc != 0) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003235 for (i = 0; i < rdata->nr_pages; i++) {
3236 page = rdata->pages[i];
Jeff Layton690c5e32011-10-19 15:30:16 -04003237 lru_cache_add_file(page);
3238 unlock_page(page);
3239 page_cache_release(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003240 }
Jeff Layton6993f742012-05-16 07:13:17 -04003241 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003242 break;
3243 }
Jeff Layton6993f742012-05-16 07:13:17 -04003244
3245 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003246 }
3247
Linus Torvalds1da177e2005-04-16 15:20:36 -07003248 return rc;
3249}
3250
3251static int cifs_readpage_worker(struct file *file, struct page *page,
3252 loff_t *poffset)
3253{
3254 char *read_data;
3255 int rc;
3256
Suresh Jayaraman56698232010-07-05 18:13:25 +05303257 /* Is the page cached? */
3258 rc = cifs_readpage_from_fscache(file->f_path.dentry->d_inode, page);
3259 if (rc == 0)
3260 goto read_complete;
3261
Linus Torvalds1da177e2005-04-16 15:20:36 -07003262 page_cache_get(page);
3263 read_data = kmap(page);
3264 /* for reads over a certain size could initiate async read ahead */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003265
Linus Torvalds1da177e2005-04-16 15:20:36 -07003266 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003267
Linus Torvalds1da177e2005-04-16 15:20:36 -07003268 if (rc < 0)
3269 goto io_error;
3270 else
Joe Perchesb6b38f72010-04-21 03:50:45 +00003271 cFYI(1, "Bytes read %d", rc);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003272
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08003273 file->f_path.dentry->d_inode->i_atime =
3274 current_fs_time(file->f_path.dentry->d_inode->i_sb);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003275
Linus Torvalds1da177e2005-04-16 15:20:36 -07003276 if (PAGE_CACHE_SIZE > rc)
3277 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
3278
3279 flush_dcache_page(page);
3280 SetPageUptodate(page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05303281
3282 /* send this page to the cache */
3283 cifs_readpage_to_fscache(file->f_path.dentry->d_inode, page);
3284
Linus Torvalds1da177e2005-04-16 15:20:36 -07003285 rc = 0;
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003286
Linus Torvalds1da177e2005-04-16 15:20:36 -07003287io_error:
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003288 kunmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003289 page_cache_release(page);
Suresh Jayaraman56698232010-07-05 18:13:25 +05303290
3291read_complete:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003292 return rc;
3293}
3294
3295static int cifs_readpage(struct file *file, struct page *page)
3296{
3297 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
3298 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003299 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003300
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003301 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003302
3303 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303304 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003305 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303306 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003307 }
3308
Jeff Laytonac3aa2f2012-07-23 13:14:28 -04003309 cFYI(1, "readpage %p at offset %d 0x%x",
Joe Perchesb6b38f72010-04-21 03:50:45 +00003310 page, (int)offset, (int)offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003311
3312 rc = cifs_readpage_worker(file, page, &offset);
3313
3314 unlock_page(page);
3315
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003316 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003317 return rc;
3318}
3319
Steve Frencha403a0a2007-07-26 15:54:16 +00003320static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3321{
3322 struct cifsFileInfo *open_file;
3323
Jeff Layton44772882010-10-15 15:34:03 -04003324 spin_lock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003325 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton2e396b82010-10-15 15:34:01 -04003326 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Jeff Layton44772882010-10-15 15:34:03 -04003327 spin_unlock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003328 return 1;
3329 }
3330 }
Jeff Layton44772882010-10-15 15:34:03 -04003331 spin_unlock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003332 return 0;
3333}
3334
Linus Torvalds1da177e2005-04-16 15:20:36 -07003335/* We do not want to update the file size from server for inodes
3336 open for write - to avoid races with writepage extending
3337 the file - in the future we could consider allowing
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003338 refreshing the inode only on increases in the file size
Linus Torvalds1da177e2005-04-16 15:20:36 -07003339 but this is tricky to do without racing with writebehind
3340 page caching in the current Linux kernel design */
Steve French4b18f2a2008-04-29 00:06:05 +00003341bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003342{
Steve Frencha403a0a2007-07-26 15:54:16 +00003343 if (!cifsInode)
Steve French4b18f2a2008-04-29 00:06:05 +00003344 return true;
Steve French23e7dd72005-10-20 13:44:56 -07003345
Steve Frencha403a0a2007-07-26 15:54:16 +00003346 if (is_inode_writable(cifsInode)) {
3347 /* This inode is open for write at least once */
Steve Frenchc32a0b62006-01-12 14:41:28 -08003348 struct cifs_sb_info *cifs_sb;
3349
Steve Frenchc32a0b62006-01-12 14:41:28 -08003350 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
Steve Frenchad7a2922008-02-07 23:25:02 +00003351 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003352 /* since no page cache to corrupt on directio
Steve Frenchc32a0b62006-01-12 14:41:28 -08003353 we can change size safely */
Steve French4b18f2a2008-04-29 00:06:05 +00003354 return true;
Steve Frenchc32a0b62006-01-12 14:41:28 -08003355 }
3356
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003357 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
Steve French4b18f2a2008-04-29 00:06:05 +00003358 return true;
Steve French7ba52632007-02-08 18:14:13 +00003359
Steve French4b18f2a2008-04-29 00:06:05 +00003360 return false;
Steve French23e7dd72005-10-20 13:44:56 -07003361 } else
Steve French4b18f2a2008-04-29 00:06:05 +00003362 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003363}
3364
Nick Piggind9414772008-09-24 11:32:59 -04003365static int cifs_write_begin(struct file *file, struct address_space *mapping,
3366 loff_t pos, unsigned len, unsigned flags,
3367 struct page **pagep, void **fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003368{
Nick Piggind9414772008-09-24 11:32:59 -04003369 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
3370 loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003371 loff_t page_start = pos & PAGE_MASK;
3372 loff_t i_size;
3373 struct page *page;
3374 int rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003375
Joe Perchesb6b38f72010-04-21 03:50:45 +00003376 cFYI(1, "write_begin from %lld len %d", (long long)pos, len);
Nick Piggind9414772008-09-24 11:32:59 -04003377
Nick Piggin54566b22009-01-04 12:00:53 -08003378 page = grab_cache_page_write_begin(mapping, index, flags);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003379 if (!page) {
3380 rc = -ENOMEM;
3381 goto out;
3382 }
Nick Piggind9414772008-09-24 11:32:59 -04003383
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003384 if (PageUptodate(page))
3385 goto out;
Steve French8a236262007-03-06 00:31:00 +00003386
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003387 /*
3388 * If we write a full page it will be up to date, no need to read from
3389 * the server. If the write is short, we'll end up doing a sync write
3390 * instead.
3391 */
3392 if (len == PAGE_CACHE_SIZE)
3393 goto out;
3394
3395 /*
3396 * optimize away the read when we have an oplock, and we're not
3397 * expecting to use any of the data we'd be reading in. That
3398 * is, when the page lies beyond the EOF, or straddles the EOF
3399 * and the write will cover all of the existing data.
3400 */
3401 if (CIFS_I(mapping->host)->clientCanCacheRead) {
3402 i_size = i_size_read(mapping->host);
3403 if (page_start >= i_size ||
3404 (offset == 0 && (pos + len) >= i_size)) {
3405 zero_user_segments(page, 0, offset,
3406 offset + len,
3407 PAGE_CACHE_SIZE);
3408 /*
3409 * PageChecked means that the parts of the page
3410 * to which we're not writing are considered up
3411 * to date. Once the data is copied to the
3412 * page, it can be set uptodate.
3413 */
3414 SetPageChecked(page);
3415 goto out;
3416 }
3417 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003418
Nick Piggind9414772008-09-24 11:32:59 -04003419 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003420 /*
3421 * might as well read a page, it is fast enough. If we get
3422 * an error, we don't need to return it. cifs_write_end will
3423 * do a sync write instead since PG_uptodate isn't set.
3424 */
3425 cifs_readpage_worker(file, page, &page_start);
Steve French8a236262007-03-06 00:31:00 +00003426 } else {
3427 /* we could try using another file handle if there is one -
3428 but how would we lock it to prevent close of that handle
3429 racing with this read? In any case
Nick Piggind9414772008-09-24 11:32:59 -04003430 this will be written out by write_end so is fine */
Steve French8a236262007-03-06 00:31:00 +00003431 }
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003432out:
3433 *pagep = page;
3434 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003435}
3436
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303437static int cifs_release_page(struct page *page, gfp_t gfp)
3438{
3439 if (PagePrivate(page))
3440 return 0;
3441
3442 return cifs_fscache_release_page(page, gfp);
3443}
3444
3445static void cifs_invalidate_page(struct page *page, unsigned long offset)
3446{
3447 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
3448
3449 if (offset == 0)
3450 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
3451}
3452
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003453static int cifs_launder_page(struct page *page)
3454{
3455 int rc = 0;
3456 loff_t range_start = page_offset(page);
3457 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
3458 struct writeback_control wbc = {
3459 .sync_mode = WB_SYNC_ALL,
3460 .nr_to_write = 0,
3461 .range_start = range_start,
3462 .range_end = range_end,
3463 };
3464
3465 cFYI(1, "Launder page: %p", page);
3466
3467 if (clear_page_dirty_for_io(page))
3468 rc = cifs_writepage_locked(page, &wbc);
3469
3470 cifs_fscache_invalidate_page(page, page->mapping->host);
3471 return rc;
3472}
3473
Tejun Heo9b646972010-07-20 22:09:02 +02003474void cifs_oplock_break(struct work_struct *work)
Jeff Layton3bc303c2009-09-21 06:47:50 -04003475{
3476 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3477 oplock_break);
Jeff Laytona5e18bc2010-10-11 15:07:18 -04003478 struct inode *inode = cfile->dentry->d_inode;
Jeff Layton3bc303c2009-09-21 06:47:50 -04003479 struct cifsInodeInfo *cinode = CIFS_I(inode);
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07003480 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Jeff Laytoneb4b7562010-10-22 14:52:29 -04003481 int rc = 0;
Jeff Layton3bc303c2009-09-21 06:47:50 -04003482
3483 if (inode && S_ISREG(inode->i_mode)) {
Steve Frenchd54ff732010-04-27 04:38:15 +00003484 if (cinode->clientCanCacheRead)
Al Viro8737c932009-12-24 06:47:55 -05003485 break_lease(inode, O_RDONLY);
Steve Frenchd54ff732010-04-27 04:38:15 +00003486 else
Al Viro8737c932009-12-24 06:47:55 -05003487 break_lease(inode, O_WRONLY);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003488 rc = filemap_fdatawrite(inode->i_mapping);
3489 if (cinode->clientCanCacheRead == 0) {
Jeff Laytoneb4b7562010-10-22 14:52:29 -04003490 rc = filemap_fdatawait(inode->i_mapping);
3491 mapping_set_error(inode->i_mapping, rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003492 invalidate_remote_inode(inode);
3493 }
Joe Perchesb6b38f72010-04-21 03:50:45 +00003494 cFYI(1, "Oplock flush inode %p rc %d", inode, rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003495 }
3496
Pavel Shilovsky85160e02011-10-22 15:33:29 +04003497 rc = cifs_push_locks(cfile);
3498 if (rc)
3499 cERROR(1, "Push locks rc = %d", rc);
3500
Jeff Layton3bc303c2009-09-21 06:47:50 -04003501 /*
3502 * releasing stale oplock after recent reconnect of smb session using
3503 * a now incorrect file handle is not a data integrity issue but do
3504 * not bother sending an oplock release if session to server still is
3505 * disconnected since oplock already released by the server
3506 */
Steve Frenchcdff08e2010-10-21 22:46:14 +00003507 if (!cfile->oplock_break_cancelled) {
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07003508 rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
3509 cinode);
Joe Perchesb6b38f72010-04-21 03:50:45 +00003510 cFYI(1, "Oplock release rc = %d", rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003511 }
Jeff Layton3bc303c2009-09-21 06:47:50 -04003512}
3513
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07003514const struct address_space_operations cifs_addr_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003515 .readpage = cifs_readpage,
3516 .readpages = cifs_readpages,
3517 .writepage = cifs_writepage,
Steve French37c0eb42005-10-05 14:50:29 -07003518 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04003519 .write_begin = cifs_write_begin,
3520 .write_end = cifs_write_end,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003521 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303522 .releasepage = cifs_release_page,
3523 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003524 .launder_page = cifs_launder_page,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003525};
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003526
3527/*
3528 * cifs_readpages requires the server to support a buffer large enough to
3529 * contain the header plus one complete page of data. Otherwise, we need
3530 * to leave cifs_readpages out of the address space operations.
3531 */
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07003532const struct address_space_operations cifs_addr_ops_smallbuf = {
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003533 .readpage = cifs_readpage,
3534 .writepage = cifs_writepage,
3535 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04003536 .write_begin = cifs_write_begin,
3537 .write_end = cifs_write_end,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003538 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303539 .releasepage = cifs_release_page,
3540 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003541 .launder_page = cifs_launder_page,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003542};