blob: 1747cbff7ddfd9d869df67b494a3618542502ce3 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
Steve Frenchfb8c4b12007-07-10 01:16:18 +00005 *
Steve Frenchf19159d2010-04-21 04:12:10 +00006 * Copyright (C) International Business Machines Corp., 2002,2010
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Author(s): Steve French (sfrench@us.ibm.com)
Jeremy Allison7ee1af72006-08-02 21:56:33 +00008 * Jeremy Allison (jra@samba.org)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
Steve French37c0eb42005-10-05 14:50:29 -070025#include <linux/backing-dev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
Steve French37c0eb42005-10-05 14:50:29 -070030#include <linux/writeback.h>
Andrew Morton6f88cc22006-12-10 02:19:44 -080031#include <linux/task_io_accounting_ops.h>
Steve French23e7dd72005-10-20 13:44:56 -070032#include <linux/delay.h>
Jeff Layton3bc303c2009-09-21 06:47:50 -040033#include <linux/mount.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Jeff Layton690c5e32011-10-19 15:30:16 -040035#include <linux/swap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <asm/div64.h>
37#include "cifsfs.h"
38#include "cifspdu.h"
39#include "cifsglob.h"
40#include "cifsproto.h"
41#include "cifs_unicode.h"
42#include "cifs_debug.h"
43#include "cifs_fs_sb.h"
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +053044#include "fscache.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Linus Torvalds1da177e2005-04-16 15:20:36 -070046static inline int cifs_convert_flags(unsigned int flags)
47{
48 if ((flags & O_ACCMODE) == O_RDONLY)
49 return GENERIC_READ;
50 else if ((flags & O_ACCMODE) == O_WRONLY)
51 return GENERIC_WRITE;
52 else if ((flags & O_ACCMODE) == O_RDWR) {
53 /* GENERIC_ALL is too much permission to request
54 can cause unnecessary access denied on create */
55 /* return GENERIC_ALL; */
56 return (GENERIC_READ | GENERIC_WRITE);
57 }
58
Jeff Laytone10f7b52008-05-14 10:21:33 -070059 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
60 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
61 FILE_READ_DATA);
Steve French7fc8f4e2009-02-23 20:43:11 +000062}
Jeff Laytone10f7b52008-05-14 10:21:33 -070063
Jeff Layton608712f2010-10-15 15:33:56 -040064static u32 cifs_posix_convert_flags(unsigned int flags)
Steve French7fc8f4e2009-02-23 20:43:11 +000065{
Jeff Layton608712f2010-10-15 15:33:56 -040066 u32 posix_flags = 0;
Jeff Laytone10f7b52008-05-14 10:21:33 -070067
Steve French7fc8f4e2009-02-23 20:43:11 +000068 if ((flags & O_ACCMODE) == O_RDONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040069 posix_flags = SMB_O_RDONLY;
Steve French7fc8f4e2009-02-23 20:43:11 +000070 else if ((flags & O_ACCMODE) == O_WRONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040071 posix_flags = SMB_O_WRONLY;
72 else if ((flags & O_ACCMODE) == O_RDWR)
73 posix_flags = SMB_O_RDWR;
74
75 if (flags & O_CREAT)
76 posix_flags |= SMB_O_CREAT;
77 if (flags & O_EXCL)
78 posix_flags |= SMB_O_EXCL;
79 if (flags & O_TRUNC)
80 posix_flags |= SMB_O_TRUNC;
81 /* be safe and imply O_SYNC for O_DSYNC */
Christoph Hellwig6b2f3d12009-10-27 11:05:28 +010082 if (flags & O_DSYNC)
Jeff Layton608712f2010-10-15 15:33:56 -040083 posix_flags |= SMB_O_SYNC;
Steve French7fc8f4e2009-02-23 20:43:11 +000084 if (flags & O_DIRECTORY)
Jeff Layton608712f2010-10-15 15:33:56 -040085 posix_flags |= SMB_O_DIRECTORY;
Steve French7fc8f4e2009-02-23 20:43:11 +000086 if (flags & O_NOFOLLOW)
Jeff Layton608712f2010-10-15 15:33:56 -040087 posix_flags |= SMB_O_NOFOLLOW;
Steve French7fc8f4e2009-02-23 20:43:11 +000088 if (flags & O_DIRECT)
Jeff Layton608712f2010-10-15 15:33:56 -040089 posix_flags |= SMB_O_DIRECT;
Steve French7fc8f4e2009-02-23 20:43:11 +000090
91 return posix_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070092}
93
94static inline int cifs_get_disposition(unsigned int flags)
95{
96 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
97 return FILE_CREATE;
98 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
99 return FILE_OVERWRITE_IF;
100 else if ((flags & O_CREAT) == O_CREAT)
101 return FILE_OPEN_IF;
Steve French55aa2e02006-05-30 18:09:31 +0000102 else if ((flags & O_TRUNC) == O_TRUNC)
103 return FILE_OVERWRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104 else
105 return FILE_OPEN;
106}
107
Jeff Layton608712f2010-10-15 15:33:56 -0400108int cifs_posix_open(char *full_path, struct inode **pinode,
109 struct super_block *sb, int mode, unsigned int f_flags,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400110 __u32 *poplock, __u16 *pnetfid, unsigned int xid)
Jeff Layton608712f2010-10-15 15:33:56 -0400111{
112 int rc;
113 FILE_UNIX_BASIC_INFO *presp_data;
114 __u32 posix_flags = 0;
115 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
116 struct cifs_fattr fattr;
117 struct tcon_link *tlink;
Steve French96daf2b2011-05-27 04:34:02 +0000118 struct cifs_tcon *tcon;
Jeff Layton608712f2010-10-15 15:33:56 -0400119
120 cFYI(1, "posix open %s", full_path);
121
122 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
123 if (presp_data == NULL)
124 return -ENOMEM;
125
126 tlink = cifs_sb_tlink(cifs_sb);
127 if (IS_ERR(tlink)) {
128 rc = PTR_ERR(tlink);
129 goto posix_open_ret;
130 }
131
132 tcon = tlink_tcon(tlink);
133 mode &= ~current_umask();
134
135 posix_flags = cifs_posix_convert_flags(f_flags);
136 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
137 poplock, full_path, cifs_sb->local_nls,
138 cifs_sb->mnt_cifs_flags &
139 CIFS_MOUNT_MAP_SPECIAL_CHR);
140 cifs_put_tlink(tlink);
141
142 if (rc)
143 goto posix_open_ret;
144
145 if (presp_data->Type == cpu_to_le32(-1))
146 goto posix_open_ret; /* open ok, caller does qpathinfo */
147
148 if (!pinode)
149 goto posix_open_ret; /* caller does not need info */
150
151 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
152
153 /* get new inode and set it up */
154 if (*pinode == NULL) {
155 cifs_fill_uniqueid(sb, &fattr);
156 *pinode = cifs_iget(sb, &fattr);
157 if (!*pinode) {
158 rc = -ENOMEM;
159 goto posix_open_ret;
160 }
161 } else {
162 cifs_fattr_to_inode(*pinode, &fattr);
163 }
164
165posix_open_ret:
166 kfree(presp_data);
167 return rc;
168}
169
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300170static int
171cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700172 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
173 struct cifs_fid *fid, unsigned int xid)
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300174{
175 int rc;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700176 int desired_access;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300177 int disposition;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500178 int create_options = CREATE_NOT_DIR;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300179 FILE_ALL_INFO *buf;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700180 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300181
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700182 if (!server->ops->open)
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700183 return -ENOSYS;
184
185 desired_access = cifs_convert_flags(f_flags);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300186
187/*********************************************************************
188 * open flag mapping table:
189 *
190 * POSIX Flag CIFS Disposition
191 * ---------- ----------------
192 * O_CREAT FILE_OPEN_IF
193 * O_CREAT | O_EXCL FILE_CREATE
194 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
195 * O_TRUNC FILE_OVERWRITE
196 * none of the above FILE_OPEN
197 *
198 * Note that there is not a direct match between disposition
199 * FILE_SUPERSEDE (ie create whether or not file exists although
200 * O_CREAT | O_TRUNC is similar but truncates the existing
201 * file rather than creating a new file as FILE_SUPERSEDE does
202 * (which uses the attributes / metadata passed in on open call)
203 *?
204 *? O_SYNC is a reasonable match to CIFS writethrough flag
205 *? and the read write flags match reasonably. O_LARGEFILE
206 *? is irrelevant because largefile support is always used
207 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
208 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
209 *********************************************************************/
210
211 disposition = cifs_get_disposition(f_flags);
212
213 /* BB pass O_SYNC flag through on file attributes .. BB */
214
215 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
216 if (!buf)
217 return -ENOMEM;
218
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500219 if (backup_cred(cifs_sb))
220 create_options |= CREATE_OPEN_BACKUP_INTENT;
221
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700222 rc = server->ops->open(xid, tcon, full_path, disposition,
223 desired_access, create_options, fid, oplock, buf,
224 cifs_sb);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300225
226 if (rc)
227 goto out;
228
229 if (tcon->unix_ext)
230 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
231 xid);
232 else
233 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700234 xid, &fid->netfid);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300235
236out:
237 kfree(buf);
238 return rc;
239}
240
Jeff Layton15ecb432010-10-15 15:34:02 -0400241struct cifsFileInfo *
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700242cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
Jeff Layton15ecb432010-10-15 15:34:02 -0400243 struct tcon_link *tlink, __u32 oplock)
244{
245 struct dentry *dentry = file->f_path.dentry;
246 struct inode *inode = dentry->d_inode;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700247 struct cifsInodeInfo *cinode = CIFS_I(inode);
248 struct cifsFileInfo *cfile;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700249 struct cifs_fid_locks *fdlocks;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700250 struct cifs_tcon *tcon = tlink_tcon(tlink);
Jeff Layton15ecb432010-10-15 15:34:02 -0400251
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700252 cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
253 if (cfile == NULL)
254 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400255
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700256 fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
257 if (!fdlocks) {
258 kfree(cfile);
259 return NULL;
260 }
261
262 INIT_LIST_HEAD(&fdlocks->locks);
263 fdlocks->cfile = cfile;
264 cfile->llist = fdlocks;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700265 down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700266 list_add(&fdlocks->llist, &cinode->llist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700267 up_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700268
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700269 cfile->count = 1;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700270 cfile->pid = current->tgid;
271 cfile->uid = current_fsuid();
272 cfile->dentry = dget(dentry);
273 cfile->f_flags = file->f_flags;
274 cfile->invalidHandle = false;
275 cfile->tlink = cifs_get_tlink(tlink);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700276 INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700277 mutex_init(&cfile->fh_mutex);
Jeff Layton15ecb432010-10-15 15:34:02 -0400278
Jeff Layton44772882010-10-15 15:34:03 -0400279 spin_lock(&cifs_file_list_lock);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700280 if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE)
281 oplock = fid->pending_open->oplock;
282 list_del(&fid->pending_open->olist);
283
284 tlink_tcon(tlink)->ses->server->ops->set_fid(cfile, fid, oplock);
285
286 list_add(&cfile->tlist, &tcon->openFileList);
Jeff Layton15ecb432010-10-15 15:34:02 -0400287 /* if readable file instance put first in list*/
288 if (file->f_mode & FMODE_READ)
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700289 list_add(&cfile->flist, &cinode->openFileList);
Jeff Layton15ecb432010-10-15 15:34:02 -0400290 else
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700291 list_add_tail(&cfile->flist, &cinode->openFileList);
Jeff Layton44772882010-10-15 15:34:03 -0400292 spin_unlock(&cifs_file_list_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400293
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700294 file->private_data = cfile;
295 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400296}
297
Jeff Layton764a1b12012-07-25 14:59:54 -0400298struct cifsFileInfo *
299cifsFileInfo_get(struct cifsFileInfo *cifs_file)
300{
301 spin_lock(&cifs_file_list_lock);
302 cifsFileInfo_get_locked(cifs_file);
303 spin_unlock(&cifs_file_list_lock);
304 return cifs_file;
305}
306
Steve Frenchcdff08e2010-10-21 22:46:14 +0000307/*
308 * Release a reference on the file private data. This may involve closing
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400309 * the filehandle out on the server. Must be called without holding
310 * cifs_file_list_lock.
Steve Frenchcdff08e2010-10-21 22:46:14 +0000311 */
Jeff Laytonb33879a2010-10-15 15:34:04 -0400312void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
313{
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300314 struct inode *inode = cifs_file->dentry->d_inode;
Steve French96daf2b2011-05-27 04:34:02 +0000315 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700316 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300317 struct cifsInodeInfo *cifsi = CIFS_I(inode);
Pavel Shilovsky4f8ba8a2010-11-21 22:36:12 +0300318 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000319 struct cifsLockInfo *li, *tmp;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700320 struct cifs_fid fid;
321 struct cifs_pending_open open;
Steve Frenchcdff08e2010-10-21 22:46:14 +0000322
323 spin_lock(&cifs_file_list_lock);
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400324 if (--cifs_file->count > 0) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000325 spin_unlock(&cifs_file_list_lock);
326 return;
Jeff Laytonb33879a2010-10-15 15:34:04 -0400327 }
Steve Frenchcdff08e2010-10-21 22:46:14 +0000328
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700329 if (server->ops->get_lease_key)
330 server->ops->get_lease_key(inode, &fid);
331
332 /* store open in pending opens to make sure we don't miss lease break */
333 cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
334
Steve Frenchcdff08e2010-10-21 22:46:14 +0000335 /* remove it from the lists */
336 list_del(&cifs_file->flist);
337 list_del(&cifs_file->tlist);
338
339 if (list_empty(&cifsi->openFileList)) {
340 cFYI(1, "closing last open instance for inode %p",
341 cifs_file->dentry->d_inode);
Pavel Shilovsky25364132012-09-18 16:20:27 -0700342 /*
343 * In strict cache mode we need invalidate mapping on the last
344 * close because it may cause a error when we open this file
345 * again and get at least level II oplock.
346 */
Pavel Shilovsky4f8ba8a2010-11-21 22:36:12 +0300347 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
348 CIFS_I(inode)->invalid_mapping = true;
Pavel Shilovskyc6723622010-11-03 10:58:57 +0300349 cifs_set_oplock_level(cifsi, 0);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000350 }
351 spin_unlock(&cifs_file_list_lock);
352
Jeff Laytonad635942011-07-26 12:20:17 -0400353 cancel_work_sync(&cifs_file->oplock_break);
354
Steve Frenchcdff08e2010-10-21 22:46:14 +0000355 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700356 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400357 unsigned int xid;
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700358
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400359 xid = get_xid();
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700360 if (server->ops->close)
Pavel Shilovsky760ad0c2012-09-25 11:00:07 +0400361 server->ops->close(xid, tcon, &cifs_file->fid);
362 _free_xid(xid);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000363 }
364
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700365 cifs_del_pending_open(&open);
366
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700367 /*
368 * Delete any outstanding lock records. We'll lose them when the file
Steve Frenchcdff08e2010-10-21 22:46:14 +0000369 * is closed anyway.
370 */
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700371 down_write(&cifsi->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700372 list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000373 list_del(&li->llist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400374 cifs_del_lock_waiters(li);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000375 kfree(li);
376 }
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700377 list_del(&cifs_file->llist->llist);
378 kfree(cifs_file->llist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700379 up_write(&cifsi->lock_sem);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000380
381 cifs_put_tlink(cifs_file->tlink);
382 dput(cifs_file->dentry);
383 kfree(cifs_file);
Jeff Laytonb33879a2010-10-15 15:34:04 -0400384}
385
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386int cifs_open(struct inode *inode, struct file *file)
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700387
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388{
389 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400390 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400391 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700393 struct TCP_Server_Info *server;
Steve French96daf2b2011-05-27 04:34:02 +0000394 struct cifs_tcon *tcon;
Jeff Layton7ffec372010-09-29 19:51:11 -0400395 struct tcon_link *tlink;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700396 struct cifsFileInfo *cfile = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397 char *full_path = NULL;
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300398 bool posix_open_ok = false;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700399 struct cifs_fid fid;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700400 struct cifs_pending_open open;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400402 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403
404 cifs_sb = CIFS_SB(inode->i_sb);
Jeff Layton7ffec372010-09-29 19:51:11 -0400405 tlink = cifs_sb_tlink(cifs_sb);
406 if (IS_ERR(tlink)) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400407 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400408 return PTR_ERR(tlink);
409 }
410 tcon = tlink_tcon(tlink);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700411 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -0800413 full_path = build_path_from_dentry(file->f_path.dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414 if (full_path == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530415 rc = -ENOMEM;
Jeff Layton232341b2010-08-05 13:58:38 -0400416 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417 }
418
Joe Perchesb6b38f72010-04-21 03:50:45 +0000419 cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
420 inode, file->f_flags, full_path);
Steve French276a74a2009-03-03 18:00:34 +0000421
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700422 if (server->oplocks)
Steve French276a74a2009-03-03 18:00:34 +0000423 oplock = REQ_OPLOCK;
424 else
425 oplock = 0;
426
Steve French64cc2c62009-03-04 19:54:08 +0000427 if (!tcon->broken_posix_open && tcon->unix_ext &&
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400428 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
429 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Steve French276a74a2009-03-03 18:00:34 +0000430 /* can not refresh inode info since size could be stale */
Jeff Layton2422f672010-06-16 13:40:16 -0400431 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
Steve Frenchfa588e02010-04-22 19:21:55 +0000432 cifs_sb->mnt_file_mode /* ignored */,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700433 file->f_flags, &oplock, &fid.netfid, xid);
Steve French276a74a2009-03-03 18:00:34 +0000434 if (rc == 0) {
Joe Perchesb6b38f72010-04-21 03:50:45 +0000435 cFYI(1, "posix open succeeded");
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300436 posix_open_ok = true;
Steve French64cc2c62009-03-04 19:54:08 +0000437 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
438 if (tcon->ses->serverNOS)
Joe Perchesb6b38f72010-04-21 03:50:45 +0000439 cERROR(1, "server %s of type %s returned"
Steve French64cc2c62009-03-04 19:54:08 +0000440 " unexpected error on SMB posix open"
441 ", disabling posix open support."
442 " Check if server update available.",
443 tcon->ses->serverName,
Joe Perchesb6b38f72010-04-21 03:50:45 +0000444 tcon->ses->serverNOS);
Steve French64cc2c62009-03-04 19:54:08 +0000445 tcon->broken_posix_open = true;
Steve French276a74a2009-03-03 18:00:34 +0000446 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
447 (rc != -EOPNOTSUPP)) /* path not found or net err */
448 goto out;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700449 /*
450 * Else fallthrough to retry open the old way on network i/o
451 * or DFS errors.
452 */
Steve French276a74a2009-03-03 18:00:34 +0000453 }
454
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700455 if (server->ops->get_lease_key)
456 server->ops->get_lease_key(inode, &fid);
457
458 cifs_add_pending_open(&fid, tlink, &open);
459
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300460 if (!posix_open_ok) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700461 if (server->ops->get_lease_key)
462 server->ops->get_lease_key(inode, &fid);
463
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300464 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700465 file->f_flags, &oplock, &fid, xid);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700466 if (rc) {
467 cifs_del_pending_open(&open);
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300468 goto out;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700469 }
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300470 }
Jeff Layton47c78b72010-06-16 13:40:17 -0400471
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700472 cfile = cifs_new_fileinfo(&fid, file, tlink, oplock);
473 if (cfile == NULL) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700474 if (server->ops->close)
475 server->ops->close(xid, tcon, &fid);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700476 cifs_del_pending_open(&open);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477 rc = -ENOMEM;
478 goto out;
479 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +0530481 cifs_fscache_set_inode_cookie(inode, file);
482
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300483 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700484 /*
485 * Time to set mode which we can not set earlier due to
486 * problems creating new read-only files.
487 */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300488 struct cifs_unix_set_info_args args = {
489 .mode = inode->i_mode,
490 .uid = NO_CHANGE_64,
491 .gid = NO_CHANGE_64,
492 .ctime = NO_CHANGE_64,
493 .atime = NO_CHANGE_64,
494 .mtime = NO_CHANGE_64,
495 .device = 0,
496 };
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700497 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
498 cfile->pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499 }
500
501out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400503 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400504 cifs_put_tlink(tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505 return rc;
506}
507
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700508/*
509 * Try to reacquire byte range locks that were released when session
510 * to server was lost
511 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512static int cifs_relock_file(struct cifsFileInfo *cifsFile)
513{
514 int rc = 0;
515
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700516 /* BB list all locks open on this file and relock */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517
518 return rc;
519}
520
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700521static int
522cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523{
524 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400525 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400526 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +0000528 struct cifs_tcon *tcon;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700529 struct TCP_Server_Info *server;
530 struct cifsInodeInfo *cinode;
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000531 struct inode *inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532 char *full_path = NULL;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700533 int desired_access;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534 int disposition = FILE_OPEN;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500535 int create_options = CREATE_NOT_DIR;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700536 struct cifs_fid fid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400538 xid = get_xid();
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700539 mutex_lock(&cfile->fh_mutex);
540 if (!cfile->invalidHandle) {
541 mutex_unlock(&cfile->fh_mutex);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530542 rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400543 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530544 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545 }
546
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700547 inode = cfile->dentry->d_inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548 cifs_sb = CIFS_SB(inode->i_sb);
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700549 tcon = tlink_tcon(cfile->tlink);
550 server = tcon->ses->server;
Steve French3a9f4622007-04-04 17:10:24 +0000551
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700552 /*
553 * Can not grab rename sem here because various ops, including those
554 * that already have the rename sem can end up causing writepage to get
555 * called and if the server was down that means we end up here, and we
556 * can never tell if the caller already has the rename_sem.
557 */
558 full_path = build_path_from_dentry(cfile->dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559 if (full_path == NULL) {
Steve French3a9f4622007-04-04 17:10:24 +0000560 rc = -ENOMEM;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700561 mutex_unlock(&cfile->fh_mutex);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400562 free_xid(xid);
Steve French3a9f4622007-04-04 17:10:24 +0000563 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564 }
565
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700566 cFYI(1, "inode = 0x%p file flags 0x%x for %s", inode, cfile->f_flags,
567 full_path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568
Pavel Shilovsky10b9b982012-03-20 12:55:09 +0300569 if (tcon->ses->server->oplocks)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570 oplock = REQ_OPLOCK;
571 else
Steve French4b18f2a2008-04-29 00:06:05 +0000572 oplock = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400574 if (tcon->unix_ext && cap_unix(tcon->ses) &&
Steve French7fc8f4e2009-02-23 20:43:11 +0000575 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400576 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Jeff Layton608712f2010-10-15 15:33:56 -0400577 /*
578 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
579 * original open. Must mask them off for a reopen.
580 */
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700581 unsigned int oflags = cfile->f_flags &
Jeff Layton15886172010-10-15 15:33:59 -0400582 ~(O_CREAT | O_EXCL | O_TRUNC);
Jeff Layton608712f2010-10-15 15:33:56 -0400583
Jeff Layton2422f672010-06-16 13:40:16 -0400584 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700585 cifs_sb->mnt_file_mode /* ignored */,
586 oflags, &oplock, &fid.netfid, xid);
Steve French7fc8f4e2009-02-23 20:43:11 +0000587 if (rc == 0) {
Joe Perchesb6b38f72010-04-21 03:50:45 +0000588 cFYI(1, "posix reopen succeeded");
Steve French7fc8f4e2009-02-23 20:43:11 +0000589 goto reopen_success;
590 }
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700591 /*
592 * fallthrough to retry open the old way on errors, especially
593 * in the reconnect path it is important to retry hard
594 */
Steve French7fc8f4e2009-02-23 20:43:11 +0000595 }
596
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700597 desired_access = cifs_convert_flags(cfile->f_flags);
Steve French7fc8f4e2009-02-23 20:43:11 +0000598
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500599 if (backup_cred(cifs_sb))
600 create_options |= CREATE_OPEN_BACKUP_INTENT;
601
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700602 if (server->ops->get_lease_key)
603 server->ops->get_lease_key(inode, &fid);
604
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700605 /*
606 * Can not refresh inode by passing in file_info buf to be returned by
607 * CIFSSMBOpen and then calling get_inode_info with returned buf since
608 * file might have write behind data that needs to be flushed and server
609 * version of file size can be stale. If we knew for sure that inode was
610 * not dirty locally we could do this.
611 */
612 rc = server->ops->open(xid, tcon, full_path, disposition,
613 desired_access, create_options, &fid, &oplock,
614 NULL, cifs_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615 if (rc) {
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700616 mutex_unlock(&cfile->fh_mutex);
617 cFYI(1, "cifs_reopen returned 0x%x", rc);
Joe Perchesb6b38f72010-04-21 03:50:45 +0000618 cFYI(1, "oplock: %d", oplock);
Jeff Layton15886172010-10-15 15:33:59 -0400619 goto reopen_error_exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620 }
Jeff Layton15886172010-10-15 15:33:59 -0400621
622reopen_success:
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700623 cfile->invalidHandle = false;
624 mutex_unlock(&cfile->fh_mutex);
625 cinode = CIFS_I(inode);
Jeff Layton15886172010-10-15 15:33:59 -0400626
627 if (can_flush) {
628 rc = filemap_write_and_wait(inode->i_mapping);
Jeff Laytoneb4b7562010-10-22 14:52:29 -0400629 mapping_set_error(inode->i_mapping, rc);
Jeff Layton15886172010-10-15 15:33:59 -0400630
Jeff Layton15886172010-10-15 15:33:59 -0400631 if (tcon->unix_ext)
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700632 rc = cifs_get_inode_info_unix(&inode, full_path,
633 inode->i_sb, xid);
Jeff Layton15886172010-10-15 15:33:59 -0400634 else
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700635 rc = cifs_get_inode_info(&inode, full_path, NULL,
636 inode->i_sb, xid, NULL);
637 }
638 /*
639 * Else we are writing out data to server already and could deadlock if
640 * we tried to flush data, and since we do not know if we have data that
641 * would invalidate the current end of file on the server we can not go
642 * to the server to get the new inode info.
643 */
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300644
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700645 server->ops->set_fid(cfile, &fid, oplock);
646 cifs_relock_file(cfile);
Jeff Layton15886172010-10-15 15:33:59 -0400647
648reopen_error_exit:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400650 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651 return rc;
652}
653
654int cifs_close(struct inode *inode, struct file *file)
655{
Jeff Layton77970692011-04-05 16:23:47 -0700656 if (file->private_data != NULL) {
657 cifsFileInfo_put(file->private_data);
658 file->private_data = NULL;
659 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660
Steve Frenchcdff08e2010-10-21 22:46:14 +0000661 /* return code from the ->release op is always ignored */
662 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663}
664
665int cifs_closedir(struct inode *inode, struct file *file)
666{
667 int rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400668 unsigned int xid;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700669 struct cifsFileInfo *cfile = file->private_data;
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700670 struct cifs_tcon *tcon;
671 struct TCP_Server_Info *server;
672 char *buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673
Joe Perchesb6b38f72010-04-21 03:50:45 +0000674 cFYI(1, "Closedir inode = 0x%p", inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700676 if (cfile == NULL)
677 return rc;
678
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400679 xid = get_xid();
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700680 tcon = tlink_tcon(cfile->tlink);
681 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700683 cFYI(1, "Freeing private data in close dir");
684 spin_lock(&cifs_file_list_lock);
685 if (!cfile->srch_inf.endOfSearch && !cfile->invalidHandle) {
686 cfile->invalidHandle = true;
687 spin_unlock(&cifs_file_list_lock);
688 if (server->ops->close_dir)
689 rc = server->ops->close_dir(xid, tcon, &cfile->fid);
690 else
691 rc = -ENOSYS;
692 cFYI(1, "Closing uncompleted readdir with rc %d", rc);
693 /* not much we can do if it fails anyway, ignore rc */
694 rc = 0;
695 } else
696 spin_unlock(&cifs_file_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700698 buf = cfile->srch_inf.ntwrk_buf_start;
699 if (buf) {
700 cFYI(1, "closedir free smb buf in srch struct");
701 cfile->srch_inf.ntwrk_buf_start = NULL;
702 if (cfile->srch_inf.smallBuf)
703 cifs_small_buf_release(buf);
704 else
705 cifs_buf_release(buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706 }
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700707
708 cifs_put_tlink(cfile->tlink);
709 kfree(file->private_data);
710 file->private_data = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711 /* BB can we lock the filestruct while this is going on? */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400712 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713 return rc;
714}
715
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400716static struct cifsLockInfo *
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300717cifs_lock_init(__u64 offset, __u64 length, __u8 type)
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000718{
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400719 struct cifsLockInfo *lock =
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000720 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400721 if (!lock)
722 return lock;
723 lock->offset = offset;
724 lock->length = length;
725 lock->type = type;
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400726 lock->pid = current->tgid;
727 INIT_LIST_HEAD(&lock->blist);
728 init_waitqueue_head(&lock->block_q);
729 return lock;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400730}
731
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -0700732void
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400733cifs_del_lock_waiters(struct cifsLockInfo *lock)
734{
735 struct cifsLockInfo *li, *tmp;
736 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
737 list_del_init(&li->blist);
738 wake_up(&li->block_q);
739 }
740}
741
742static bool
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700743cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
744 __u64 length, __u8 type, struct cifsFileInfo *cfile,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700745 struct cifsLockInfo **conf_lock, bool rw_check)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400746{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300747 struct cifsLockInfo *li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700748 struct cifsFileInfo *cur_cfile = fdlocks->cfile;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300749 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400750
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700751 list_for_each_entry(li, &fdlocks->locks, llist) {
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400752 if (offset + length <= li->offset ||
753 offset >= li->offset + li->length)
754 continue;
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700755 if (rw_check && server->ops->compare_fids(cfile, cur_cfile) &&
756 current->tgid == li->pid)
757 continue;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700758 if ((type & server->vals->shared_lock_type) &&
759 ((server->ops->compare_fids(cfile, cur_cfile) &&
760 current->tgid == li->pid) || type == li->type))
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400761 continue;
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700762 if (conf_lock)
763 *conf_lock = li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700764 return true;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400765 }
766 return false;
767}
768
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700769bool
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300770cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700771 __u8 type, struct cifsLockInfo **conf_lock,
772 bool rw_check)
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400773{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300774 bool rc = false;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700775 struct cifs_fid_locks *cur;
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300776 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300777
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700778 list_for_each_entry(cur, &cinode->llist, llist) {
779 rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700780 cfile, conf_lock, rw_check);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300781 if (rc)
782 break;
783 }
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300784
785 return rc;
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400786}
787
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300788/*
789 * Check if there is another lock that prevents us to set the lock (mandatory
790 * style). If such a lock exists, update the flock structure with its
791 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
792 * or leave it the same if we can't. Returns 0 if we don't need to request to
793 * the server or 1 otherwise.
794 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400795static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300796cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
797 __u8 type, struct file_lock *flock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400798{
799 int rc = 0;
800 struct cifsLockInfo *conf_lock;
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300801 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300802 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400803 bool exist;
804
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700805 down_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400806
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300807 exist = cifs_find_lock_conflict(cfile, offset, length, type,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700808 &conf_lock, false);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400809 if (exist) {
810 flock->fl_start = conf_lock->offset;
811 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
812 flock->fl_pid = conf_lock->pid;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300813 if (conf_lock->type & server->vals->shared_lock_type)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400814 flock->fl_type = F_RDLCK;
815 else
816 flock->fl_type = F_WRLCK;
817 } else if (!cinode->can_cache_brlcks)
818 rc = 1;
819 else
820 flock->fl_type = F_UNLCK;
821
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700822 up_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400823 return rc;
824}
825
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400826static void
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300827cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400828{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300829 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700830 down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700831 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700832 up_write(&cinode->lock_sem);
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000833}
834
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300835/*
836 * Set the byte-range lock (mandatory style). Returns:
837 * 1) 0, if we set the lock and don't need to request to the server;
838 * 2) 1, if no locks prevent us but we need to request to the server;
839 * 3) -EACCESS, if there is a lock that prevents us and wait is false.
840 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400841static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300842cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400843 bool wait)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400844{
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400845 struct cifsLockInfo *conf_lock;
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300846 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400847 bool exist;
848 int rc = 0;
849
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400850try_again:
851 exist = false;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700852 down_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400853
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300854 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700855 lock->type, &conf_lock, false);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400856 if (!exist && cinode->can_cache_brlcks) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700857 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700858 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400859 return rc;
860 }
861
862 if (!exist)
863 rc = 1;
864 else if (!wait)
865 rc = -EACCES;
866 else {
867 list_add_tail(&lock->blist, &conf_lock->blist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700868 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400869 rc = wait_event_interruptible(lock->block_q,
870 (lock->blist.prev == &lock->blist) &&
871 (lock->blist.next == &lock->blist));
872 if (!rc)
873 goto try_again;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700874 down_write(&cinode->lock_sem);
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400875 list_del_init(&lock->blist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400876 }
877
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700878 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400879 return rc;
880}
881
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300882/*
883 * Check if there is another lock that prevents us to set the lock (posix
884 * style). If such a lock exists, update the flock structure with its
885 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
886 * or leave it the same if we can't. Returns 0 if we don't need to request to
887 * the server or 1 otherwise.
888 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400889static int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400890cifs_posix_lock_test(struct file *file, struct file_lock *flock)
891{
892 int rc = 0;
893 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
894 unsigned char saved_type = flock->fl_type;
895
Pavel Shilovsky50792762011-10-29 17:17:57 +0400896 if ((flock->fl_flags & FL_POSIX) == 0)
897 return 1;
898
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700899 down_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400900 posix_test_lock(file, flock);
901
902 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
903 flock->fl_type = saved_type;
904 rc = 1;
905 }
906
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700907 up_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400908 return rc;
909}
910
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300911/*
912 * Set the byte-range lock (posix style). Returns:
913 * 1) 0, if we set the lock and don't need to request to the server;
914 * 2) 1, if we need to request to the server;
915 * 3) <0, if the error occurs while setting the lock.
916 */
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400917static int
918cifs_posix_lock_set(struct file *file, struct file_lock *flock)
919{
920 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
Pavel Shilovsky50792762011-10-29 17:17:57 +0400921 int rc = 1;
922
923 if ((flock->fl_flags & FL_POSIX) == 0)
924 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400925
Pavel Shilovsky66189be2012-03-28 21:56:19 +0400926try_again:
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700927 down_write(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400928 if (!cinode->can_cache_brlcks) {
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700929 up_write(&cinode->lock_sem);
Pavel Shilovsky50792762011-10-29 17:17:57 +0400930 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400931 }
Pavel Shilovsky66189be2012-03-28 21:56:19 +0400932
933 rc = posix_lock_file(file, flock, NULL);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700934 up_write(&cinode->lock_sem);
Pavel Shilovsky66189be2012-03-28 21:56:19 +0400935 if (rc == FILE_LOCK_DEFERRED) {
936 rc = wait_event_interruptible(flock->fl_wait, !flock->fl_next);
937 if (!rc)
938 goto try_again;
939 locks_delete_block(flock);
940 }
Steve French9ebb3892012-04-01 13:52:54 -0500941 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400942}
943
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -0700944int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400945cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400946{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400947 unsigned int xid;
948 int rc = 0, stored_rc;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400949 struct cifsLockInfo *li, *tmp;
950 struct cifs_tcon *tcon;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +0400951 unsigned int num, max_num, max_buf;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400952 LOCKING_ANDX_RANGE *buf, *cur;
953 int types[] = {LOCKING_ANDX_LARGE_FILES,
954 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
955 int i;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400956
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400957 xid = get_xid();
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400958 tcon = tlink_tcon(cfile->tlink);
959
Pavel Shilovsky0013fb42012-05-31 13:03:26 +0400960 /*
961 * Accessing maxBuf is racy with cifs_reconnect - need to store value
962 * and check it for zero before using.
963 */
964 max_buf = tcon->ses->server->maxBuf;
965 if (!max_buf) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400966 free_xid(xid);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +0400967 return -EINVAL;
968 }
969
970 max_num = (max_buf - sizeof(struct smb_hdr)) /
971 sizeof(LOCKING_ANDX_RANGE);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400972 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
973 if (!buf) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400974 free_xid(xid);
Pavel Shilovskye2f28862012-08-29 21:13:38 +0400975 return -ENOMEM;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400976 }
977
978 for (i = 0; i < 2; i++) {
979 cur = buf;
980 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700981 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400982 if (li->type != types[i])
983 continue;
984 cur->Pid = cpu_to_le16(li->pid);
985 cur->LengthLow = cpu_to_le32((u32)li->length);
986 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
987 cur->OffsetLow = cpu_to_le32((u32)li->offset);
988 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
989 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700990 stored_rc = cifs_lockv(xid, tcon,
991 cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +0300992 (__u8)li->type, 0, num,
993 buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400994 if (stored_rc)
995 rc = stored_rc;
996 cur = buf;
997 num = 0;
998 } else
999 cur++;
1000 }
1001
1002 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001003 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001004 (__u8)types[i], 0, num, buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001005 if (stored_rc)
1006 rc = stored_rc;
1007 }
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001008 }
1009
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001010 kfree(buf);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001011 free_xid(xid);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001012 return rc;
1013}
1014
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001015/* copied from fs/locks.c with a name change */
1016#define cifs_for_each_lock(inode, lockp) \
1017 for (lockp = &inode->i_flock; *lockp != NULL; \
1018 lockp = &(*lockp)->fl_next)
1019
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001020struct lock_to_push {
1021 struct list_head llist;
1022 __u64 offset;
1023 __u64 length;
1024 __u32 pid;
1025 __u16 netfid;
1026 __u8 type;
1027};
1028
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001029static int
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001030cifs_push_posix_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001031{
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001032 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1033 struct file_lock *flock, **before;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001034 unsigned int count = 0, i = 0;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001035 int rc = 0, xid, type;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001036 struct list_head locks_to_send, *el;
1037 struct lock_to_push *lck, *tmp;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001038 __u64 length;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001039
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001040 xid = get_xid();
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001041
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001042 lock_flocks();
1043 cifs_for_each_lock(cfile->dentry->d_inode, before) {
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001044 if ((*before)->fl_flags & FL_POSIX)
1045 count++;
1046 }
1047 unlock_flocks();
1048
1049 INIT_LIST_HEAD(&locks_to_send);
1050
1051 /*
Pavel Shilovskyce858522012-03-17 09:46:55 +03001052 * Allocating count locks is enough because no FL_POSIX locks can be
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001053 * added to the list while we are holding cinode->lock_sem that
Pavel Shilovskyce858522012-03-17 09:46:55 +03001054 * protects locking operations of this inode.
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001055 */
1056 for (; i < count; i++) {
1057 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1058 if (!lck) {
1059 rc = -ENOMEM;
1060 goto err_out;
1061 }
1062 list_add_tail(&lck->llist, &locks_to_send);
1063 }
1064
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001065 el = locks_to_send.next;
1066 lock_flocks();
1067 cifs_for_each_lock(cfile->dentry->d_inode, before) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001068 flock = *before;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001069 if ((flock->fl_flags & FL_POSIX) == 0)
1070 continue;
Pavel Shilovskyce858522012-03-17 09:46:55 +03001071 if (el == &locks_to_send) {
1072 /*
1073 * The list ended. We don't have enough allocated
1074 * structures - something is really wrong.
1075 */
1076 cERROR(1, "Can't push all brlocks!");
1077 break;
1078 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001079 length = 1 + flock->fl_end - flock->fl_start;
1080 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1081 type = CIFS_RDLCK;
1082 else
1083 type = CIFS_WRLCK;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001084 lck = list_entry(el, struct lock_to_push, llist);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001085 lck->pid = flock->fl_pid;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001086 lck->netfid = cfile->fid.netfid;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001087 lck->length = length;
1088 lck->type = type;
1089 lck->offset = flock->fl_start;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001090 el = el->next;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001091 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001092 unlock_flocks();
1093
1094 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001095 int stored_rc;
1096
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001097 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001098 lck->offset, lck->length, NULL,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001099 lck->type, 0);
1100 if (stored_rc)
1101 rc = stored_rc;
1102 list_del(&lck->llist);
1103 kfree(lck);
1104 }
1105
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001106out:
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001107 free_xid(xid);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001108 return rc;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001109err_out:
1110 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1111 list_del(&lck->llist);
1112 kfree(lck);
1113 }
1114 goto out;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001115}
1116
1117static int
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001118cifs_push_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001119{
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001120 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001121 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001122 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001123 int rc = 0;
1124
1125 /* we are going to update can_cache_brlcks here - need a write access */
1126 down_write(&cinode->lock_sem);
1127 if (!cinode->can_cache_brlcks) {
1128 up_write(&cinode->lock_sem);
1129 return rc;
1130 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001131
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001132 if (cap_unix(tcon->ses) &&
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001133 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1134 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001135 rc = cifs_push_posix_locks(cfile);
1136 else
1137 rc = tcon->ses->server->ops->push_mand_locks(cfile);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001138
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001139 cinode->can_cache_brlcks = false;
1140 up_write(&cinode->lock_sem);
1141 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001142}
1143
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001144static void
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001145cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001146 bool *wait_flag, struct TCP_Server_Info *server)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147{
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001148 if (flock->fl_flags & FL_POSIX)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001149 cFYI(1, "Posix");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001150 if (flock->fl_flags & FL_FLOCK)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001151 cFYI(1, "Flock");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001152 if (flock->fl_flags & FL_SLEEP) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001153 cFYI(1, "Blocking lock");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001154 *wait_flag = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001155 }
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001156 if (flock->fl_flags & FL_ACCESS)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001157 cFYI(1, "Process suspended by mandatory locking - "
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001158 "not implemented yet");
1159 if (flock->fl_flags & FL_LEASE)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001160 cFYI(1, "Lease on file - not implemented yet");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001161 if (flock->fl_flags &
Jeff Layton3d6d8542012-09-19 06:22:46 -07001162 (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
1163 FL_ACCESS | FL_LEASE | FL_CLOSE)))
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001164 cFYI(1, "Unknown lock flags 0x%x", flock->fl_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001165
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001166 *type = server->vals->large_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001167 if (flock->fl_type == F_WRLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001168 cFYI(1, "F_WRLCK ");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001169 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001170 *lock = 1;
1171 } else if (flock->fl_type == F_UNLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001172 cFYI(1, "F_UNLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001173 *type |= server->vals->unlock_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001174 *unlock = 1;
1175 /* Check if unlock includes more than one lock range */
1176 } else if (flock->fl_type == F_RDLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001177 cFYI(1, "F_RDLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001178 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001179 *lock = 1;
1180 } else if (flock->fl_type == F_EXLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001181 cFYI(1, "F_EXLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001182 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001183 *lock = 1;
1184 } else if (flock->fl_type == F_SHLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001185 cFYI(1, "F_SHLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001186 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001187 *lock = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188 } else
Joe Perchesb6b38f72010-04-21 03:50:45 +00001189 cFYI(1, "Unknown type of lock");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001190}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001191
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001192static int
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001193cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001194 bool wait_flag, bool posix_lck, unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001195{
1196 int rc = 0;
1197 __u64 length = 1 + flock->fl_end - flock->fl_start;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001198 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1199 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001200 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001201 __u16 netfid = cfile->fid.netfid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001202
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001203 if (posix_lck) {
1204 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001205
1206 rc = cifs_posix_lock_test(file, flock);
1207 if (!rc)
1208 return rc;
1209
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001210 if (type & server->vals->shared_lock_type)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001211 posix_lock_type = CIFS_RDLCK;
1212 else
1213 posix_lock_type = CIFS_WRLCK;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001214 rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001215 flock->fl_start, length, flock,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001216 posix_lock_type, wait_flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217 return rc;
1218 }
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001219
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001220 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001221 if (!rc)
1222 return rc;
1223
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001224 /* BB we could chain these into one lock request BB */
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001225 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
1226 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001227 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001228 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1229 type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001230 flock->fl_type = F_UNLCK;
1231 if (rc != 0)
1232 cERROR(1, "Error unlocking previously locked "
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001233 "range %d during test of lock", rc);
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001234 return 0;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001235 }
1236
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001237 if (type & server->vals->shared_lock_type) {
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001238 flock->fl_type = F_WRLCK;
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001239 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001240 }
1241
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001242 type &= ~server->vals->exclusive_lock_type;
1243
1244 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1245 type | server->vals->shared_lock_type,
1246 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001247 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001248 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1249 type | server->vals->shared_lock_type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001250 flock->fl_type = F_RDLCK;
1251 if (rc != 0)
1252 cERROR(1, "Error unlocking previously locked "
1253 "range %d during test of lock", rc);
1254 } else
1255 flock->fl_type = F_WRLCK;
1256
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001257 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001258}
1259
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001260void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001261cifs_move_llist(struct list_head *source, struct list_head *dest)
1262{
1263 struct list_head *li, *tmp;
1264 list_for_each_safe(li, tmp, source)
1265 list_move(li, dest);
1266}
1267
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001268void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001269cifs_free_llist(struct list_head *llist)
1270{
1271 struct cifsLockInfo *li, *tmp;
1272 list_for_each_entry_safe(li, tmp, llist, llist) {
1273 cifs_del_lock_waiters(li);
1274 list_del(&li->llist);
1275 kfree(li);
1276 }
1277}
1278
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001279int
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001280cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1281 unsigned int xid)
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001282{
1283 int rc = 0, stored_rc;
1284 int types[] = {LOCKING_ANDX_LARGE_FILES,
1285 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1286 unsigned int i;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001287 unsigned int max_num, num, max_buf;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001288 LOCKING_ANDX_RANGE *buf, *cur;
1289 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1290 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
1291 struct cifsLockInfo *li, *tmp;
1292 __u64 length = 1 + flock->fl_end - flock->fl_start;
1293 struct list_head tmp_llist;
1294
1295 INIT_LIST_HEAD(&tmp_llist);
1296
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001297 /*
1298 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1299 * and check it for zero before using.
1300 */
1301 max_buf = tcon->ses->server->maxBuf;
1302 if (!max_buf)
1303 return -EINVAL;
1304
1305 max_num = (max_buf - sizeof(struct smb_hdr)) /
1306 sizeof(LOCKING_ANDX_RANGE);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001307 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1308 if (!buf)
1309 return -ENOMEM;
1310
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001311 down_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001312 for (i = 0; i < 2; i++) {
1313 cur = buf;
1314 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001315 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001316 if (flock->fl_start > li->offset ||
1317 (flock->fl_start + length) <
1318 (li->offset + li->length))
1319 continue;
1320 if (current->tgid != li->pid)
1321 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001322 if (types[i] != li->type)
1323 continue;
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001324 if (cinode->can_cache_brlcks) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001325 /*
1326 * We can cache brlock requests - simply remove
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001327 * a lock from the file's list.
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001328 */
1329 list_del(&li->llist);
1330 cifs_del_lock_waiters(li);
1331 kfree(li);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001332 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001333 }
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001334 cur->Pid = cpu_to_le16(li->pid);
1335 cur->LengthLow = cpu_to_le32((u32)li->length);
1336 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1337 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1338 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1339 /*
1340 * We need to save a lock here to let us add it again to
1341 * the file's list if the unlock range request fails on
1342 * the server.
1343 */
1344 list_move(&li->llist, &tmp_llist);
1345 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001346 stored_rc = cifs_lockv(xid, tcon,
1347 cfile->fid.netfid,
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001348 li->type, num, 0, buf);
1349 if (stored_rc) {
1350 /*
1351 * We failed on the unlock range
1352 * request - add all locks from the tmp
1353 * list to the head of the file's list.
1354 */
1355 cifs_move_llist(&tmp_llist,
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001356 &cfile->llist->locks);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001357 rc = stored_rc;
1358 } else
1359 /*
1360 * The unlock range request succeed -
1361 * free the tmp list.
1362 */
1363 cifs_free_llist(&tmp_llist);
1364 cur = buf;
1365 num = 0;
1366 } else
1367 cur++;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001368 }
1369 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001370 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001371 types[i], num, 0, buf);
1372 if (stored_rc) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001373 cifs_move_llist(&tmp_llist,
1374 &cfile->llist->locks);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001375 rc = stored_rc;
1376 } else
1377 cifs_free_llist(&tmp_llist);
1378 }
1379 }
1380
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001381 up_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001382 kfree(buf);
1383 return rc;
1384}
1385
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001386static int
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001387cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001388 bool wait_flag, bool posix_lck, int lock, int unlock,
1389 unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001390{
1391 int rc = 0;
1392 __u64 length = 1 + flock->fl_end - flock->fl_start;
1393 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1394 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001395 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001396
1397 if (posix_lck) {
Steve French08547b02006-02-28 22:39:25 +00001398 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001399
1400 rc = cifs_posix_lock_set(file, flock);
1401 if (!rc || rc < 0)
1402 return rc;
1403
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001404 if (type & server->vals->shared_lock_type)
Steve French08547b02006-02-28 22:39:25 +00001405 posix_lock_type = CIFS_RDLCK;
1406 else
1407 posix_lock_type = CIFS_WRLCK;
Steve French50c2f752007-07-13 00:33:32 +00001408
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001409 if (unlock == 1)
Steve Frenchbeb84dc2006-03-03 23:36:34 +00001410 posix_lock_type = CIFS_UNLCK;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001411
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001412 rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
1413 current->tgid, flock->fl_start, length,
1414 NULL, posix_lock_type, wait_flag);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001415 goto out;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001416 }
1417
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001418 if (lock) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001419 struct cifsLockInfo *lock;
1420
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001421 lock = cifs_lock_init(flock->fl_start, length, type);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001422 if (!lock)
1423 return -ENOMEM;
1424
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001425 rc = cifs_lock_add_if(cfile, lock, wait_flag);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001426 if (rc < 0)
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001427 kfree(lock);
1428 if (rc <= 0)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001429 goto out;
1430
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001431 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1432 type, 1, 0, wait_flag);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001433 if (rc) {
1434 kfree(lock);
1435 goto out;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001436 }
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001437
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001438 cifs_lock_add(cfile, lock);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001439 } else if (unlock)
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001440 rc = server->ops->mand_unlock_range(cfile, flock, xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001441
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001442out:
1443 if (flock->fl_flags & FL_POSIX)
Steve French9ebb3892012-04-01 13:52:54 -05001444 posix_lock_file_wait(file, flock);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001445 return rc;
1446}
1447
1448int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1449{
1450 int rc, xid;
1451 int lock = 0, unlock = 0;
1452 bool wait_flag = false;
1453 bool posix_lck = false;
1454 struct cifs_sb_info *cifs_sb;
1455 struct cifs_tcon *tcon;
1456 struct cifsInodeInfo *cinode;
1457 struct cifsFileInfo *cfile;
1458 __u16 netfid;
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001459 __u32 type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001460
1461 rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001462 xid = get_xid();
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001463
1464 cFYI(1, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld "
1465 "end: %lld", cmd, flock->fl_flags, flock->fl_type,
1466 flock->fl_start, flock->fl_end);
1467
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001468 cfile = (struct cifsFileInfo *)file->private_data;
1469 tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001470
1471 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
1472 tcon->ses->server);
1473
1474 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001475 netfid = cfile->fid.netfid;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001476 cinode = CIFS_I(file->f_path.dentry->d_inode);
1477
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001478 if (cap_unix(tcon->ses) &&
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001479 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1480 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1481 posix_lck = true;
1482 /*
1483 * BB add code here to normalize offset and length to account for
1484 * negative length which we can not accept over the wire.
1485 */
1486 if (IS_GETLK(cmd)) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001487 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001488 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001489 return rc;
1490 }
1491
1492 if (!lock && !unlock) {
1493 /*
1494 * if no lock or unlock then nothing to do since we do not
1495 * know what it is
1496 */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001497 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001498 return -EOPNOTSUPP;
1499 }
1500
1501 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1502 xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001503 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001504 return rc;
1505}
1506
Jeff Layton597b0272012-03-23 14:40:56 -04001507/*
1508 * update the file size (if needed) after a write. Should be called with
1509 * the inode->i_lock held
1510 */
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05001511void
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001512cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1513 unsigned int bytes_written)
1514{
1515 loff_t end_of_write = offset + bytes_written;
1516
1517 if (end_of_write > cifsi->server_eof)
1518 cifsi->server_eof = end_of_write;
1519}
1520
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001521static ssize_t
1522cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
1523 size_t write_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524{
1525 int rc = 0;
1526 unsigned int bytes_written = 0;
1527 unsigned int total_written;
1528 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001529 struct cifs_tcon *tcon;
1530 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001531 unsigned int xid;
Jeff Layton7da4b492010-10-15 15:34:00 -04001532 struct dentry *dentry = open_file->dentry;
1533 struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001534 struct cifs_io_parms io_parms;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001535
Jeff Layton7da4b492010-10-15 15:34:00 -04001536 cifs_sb = CIFS_SB(dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001537
Joe Perchesb6b38f72010-04-21 03:50:45 +00001538 cFYI(1, "write %zd bytes to offset %lld of %s", write_size,
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001539 *offset, dentry->d_name.name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001540
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001541 tcon = tlink_tcon(open_file->tlink);
1542 server = tcon->ses->server;
1543
1544 if (!server->ops->sync_write)
1545 return -ENOSYS;
Steve French50c2f752007-07-13 00:33:32 +00001546
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001547 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001548
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549 for (total_written = 0; write_size > total_written;
1550 total_written += bytes_written) {
1551 rc = -EAGAIN;
1552 while (rc == -EAGAIN) {
Jeff Laytonca83ce32011-04-12 09:13:44 -04001553 struct kvec iov[2];
1554 unsigned int len;
1555
Linus Torvalds1da177e2005-04-16 15:20:36 -07001556 if (open_file->invalidHandle) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001557 /* we could deadlock if we called
1558 filemap_fdatawait from here so tell
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001559 reopen_file not to flush data to
Linus Torvalds1da177e2005-04-16 15:20:36 -07001560 server now */
Jeff Layton15886172010-10-15 15:33:59 -04001561 rc = cifs_reopen_file(open_file, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001562 if (rc != 0)
1563 break;
1564 }
Steve French3e844692005-10-03 13:37:24 -07001565
Jeff Laytonca83ce32011-04-12 09:13:44 -04001566 len = min((size_t)cifs_sb->wsize,
1567 write_size - total_written);
1568 /* iov[0] is reserved for smb header */
1569 iov[1].iov_base = (char *)write_data + total_written;
1570 iov[1].iov_len = len;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001571 io_parms.pid = pid;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001572 io_parms.tcon = tcon;
1573 io_parms.offset = *offset;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001574 io_parms.length = len;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001575 rc = server->ops->sync_write(xid, open_file, &io_parms,
1576 &bytes_written, iov, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001577 }
1578 if (rc || (bytes_written == 0)) {
1579 if (total_written)
1580 break;
1581 else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001582 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001583 return rc;
1584 }
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001585 } else {
Jeff Layton597b0272012-03-23 14:40:56 -04001586 spin_lock(&dentry->d_inode->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001587 cifs_update_eof(cifsi, *offset, bytes_written);
Jeff Layton597b0272012-03-23 14:40:56 -04001588 spin_unlock(&dentry->d_inode->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001589 *offset += bytes_written;
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001590 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001591 }
1592
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001593 cifs_stats_bytes_written(tcon, total_written);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001594
Jeff Layton7da4b492010-10-15 15:34:00 -04001595 if (total_written > 0) {
1596 spin_lock(&dentry->d_inode->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001597 if (*offset > dentry->d_inode->i_size)
1598 i_size_write(dentry->d_inode, *offset);
Jeff Layton7da4b492010-10-15 15:34:00 -04001599 spin_unlock(&dentry->d_inode->i_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001600 }
Jeff Layton7da4b492010-10-15 15:34:00 -04001601 mark_inode_dirty_sync(dentry->d_inode);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001602 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001603 return total_written;
1604}
1605
Jeff Layton6508d902010-09-29 19:51:11 -04001606struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1607 bool fsuid_only)
Steve French630f3f0c2007-10-25 21:17:17 +00001608{
1609 struct cifsFileInfo *open_file = NULL;
Jeff Layton6508d902010-09-29 19:51:11 -04001610 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1611
1612 /* only filter by fsuid on multiuser mounts */
1613 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1614 fsuid_only = false;
Steve French630f3f0c2007-10-25 21:17:17 +00001615
Jeff Layton44772882010-10-15 15:34:03 -04001616 spin_lock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001617 /* we could simply get the first_list_entry since write-only entries
1618 are always at the end of the list but since the first entry might
1619 have a close pending, we go through the whole list */
1620 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton6508d902010-09-29 19:51:11 -04001621 if (fsuid_only && open_file->uid != current_fsuid())
1622 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001623 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
Steve French630f3f0c2007-10-25 21:17:17 +00001624 if (!open_file->invalidHandle) {
1625 /* found a good file */
1626 /* lock it so it will not be closed on us */
Jeff Layton764a1b12012-07-25 14:59:54 -04001627 cifsFileInfo_get_locked(open_file);
Jeff Layton44772882010-10-15 15:34:03 -04001628 spin_unlock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001629 return open_file;
1630 } /* else might as well continue, and look for
1631 another, or simply have the caller reopen it
1632 again rather than trying to fix this handle */
1633 } else /* write only file */
1634 break; /* write only files are last so must be done */
1635 }
Jeff Layton44772882010-10-15 15:34:03 -04001636 spin_unlock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001637 return NULL;
1638}
Steve French630f3f0c2007-10-25 21:17:17 +00001639
Jeff Layton6508d902010-09-29 19:51:11 -04001640struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1641 bool fsuid_only)
Steve French6148a742005-10-05 12:23:19 -07001642{
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001643 struct cifsFileInfo *open_file, *inv_file = NULL;
Jeff Laytond3892292010-11-02 16:22:50 -04001644 struct cifs_sb_info *cifs_sb;
Jeff Layton2846d382008-09-22 21:33:33 -04001645 bool any_available = false;
Steve Frenchdd99cd82005-10-05 19:32:49 -07001646 int rc;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001647 unsigned int refind = 0;
Steve French6148a742005-10-05 12:23:19 -07001648
Steve French60808232006-04-22 15:53:05 +00001649 /* Having a null inode here (because mapping->host was set to zero by
1650 the VFS or MM) should not happen but we had reports of on oops (due to
1651 it being zero) during stress testcases so we need to check for it */
1652
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001653 if (cifs_inode == NULL) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001654 cERROR(1, "Null inode passed to cifs_writeable_file");
Steve French60808232006-04-22 15:53:05 +00001655 dump_stack();
1656 return NULL;
1657 }
1658
Jeff Laytond3892292010-11-02 16:22:50 -04001659 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1660
Jeff Layton6508d902010-09-29 19:51:11 -04001661 /* only filter by fsuid on multiuser mounts */
1662 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1663 fsuid_only = false;
1664
Jeff Layton44772882010-10-15 15:34:03 -04001665 spin_lock(&cifs_file_list_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001666refind_writable:
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001667 if (refind > MAX_REOPEN_ATT) {
1668 spin_unlock(&cifs_file_list_lock);
1669 return NULL;
1670 }
Steve French6148a742005-10-05 12:23:19 -07001671 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton6508d902010-09-29 19:51:11 -04001672 if (!any_available && open_file->pid != current->tgid)
1673 continue;
1674 if (fsuid_only && open_file->uid != current_fsuid())
1675 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001676 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Steve French9b22b0b2007-10-02 01:11:08 +00001677 if (!open_file->invalidHandle) {
1678 /* found a good writable file */
Jeff Layton764a1b12012-07-25 14:59:54 -04001679 cifsFileInfo_get_locked(open_file);
Jeff Layton44772882010-10-15 15:34:03 -04001680 spin_unlock(&cifs_file_list_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001681 return open_file;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001682 } else {
1683 if (!inv_file)
1684 inv_file = open_file;
Steve French9b22b0b2007-10-02 01:11:08 +00001685 }
Steve French6148a742005-10-05 12:23:19 -07001686 }
1687 }
Jeff Layton2846d382008-09-22 21:33:33 -04001688 /* couldn't find useable FH with same pid, try any available */
1689 if (!any_available) {
1690 any_available = true;
1691 goto refind_writable;
1692 }
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001693
1694 if (inv_file) {
1695 any_available = false;
Jeff Layton764a1b12012-07-25 14:59:54 -04001696 cifsFileInfo_get_locked(inv_file);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001697 }
1698
Jeff Layton44772882010-10-15 15:34:03 -04001699 spin_unlock(&cifs_file_list_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001700
1701 if (inv_file) {
1702 rc = cifs_reopen_file(inv_file, false);
1703 if (!rc)
1704 return inv_file;
1705 else {
1706 spin_lock(&cifs_file_list_lock);
1707 list_move_tail(&inv_file->flist,
1708 &cifs_inode->openFileList);
1709 spin_unlock(&cifs_file_list_lock);
1710 cifsFileInfo_put(inv_file);
1711 spin_lock(&cifs_file_list_lock);
1712 ++refind;
1713 goto refind_writable;
1714 }
1715 }
1716
Steve French6148a742005-10-05 12:23:19 -07001717 return NULL;
1718}
1719
Linus Torvalds1da177e2005-04-16 15:20:36 -07001720static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1721{
1722 struct address_space *mapping = page->mapping;
1723 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1724 char *write_data;
1725 int rc = -EFAULT;
1726 int bytes_written = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001727 struct inode *inode;
Steve French6148a742005-10-05 12:23:19 -07001728 struct cifsFileInfo *open_file;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001729
1730 if (!mapping || !mapping->host)
1731 return -EFAULT;
1732
1733 inode = page->mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001734
1735 offset += (loff_t)from;
1736 write_data = kmap(page);
1737 write_data += from;
1738
1739 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1740 kunmap(page);
1741 return -EIO;
1742 }
1743
1744 /* racing with truncate? */
1745 if (offset > mapping->host->i_size) {
1746 kunmap(page);
1747 return 0; /* don't care */
1748 }
1749
1750 /* check to make sure that we are not extending the file */
1751 if (mapping->host->i_size - offset < (loff_t)to)
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001752 to = (unsigned)(mapping->host->i_size - offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001753
Jeff Layton6508d902010-09-29 19:51:11 -04001754 open_file = find_writable_file(CIFS_I(mapping->host), false);
Steve French6148a742005-10-05 12:23:19 -07001755 if (open_file) {
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001756 bytes_written = cifs_write(open_file, open_file->pid,
1757 write_data, to - from, &offset);
Dave Kleikamp6ab409b2009-08-31 11:07:12 -04001758 cifsFileInfo_put(open_file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001759 /* Does mm or vfs already set times? */
Steve French6148a742005-10-05 12:23:19 -07001760 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001761 if ((bytes_written > 0) && (offset))
Steve French6148a742005-10-05 12:23:19 -07001762 rc = 0;
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001763 else if (bytes_written < 0)
1764 rc = bytes_written;
Steve French6148a742005-10-05 12:23:19 -07001765 } else {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001766 cFYI(1, "No writeable filehandles for inode");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001767 rc = -EIO;
1768 }
1769
1770 kunmap(page);
1771 return rc;
1772}
1773
Linus Torvalds1da177e2005-04-16 15:20:36 -07001774static int cifs_writepages(struct address_space *mapping,
Steve French37c0eb42005-10-05 14:50:29 -07001775 struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001776{
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001777 struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
1778 bool done = false, scanned = false, range_whole = false;
1779 pgoff_t end, index;
1780 struct cifs_writedata *wdata;
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07001781 struct TCP_Server_Info *server;
Steve French37c0eb42005-10-05 14:50:29 -07001782 struct page *page;
Steve French37c0eb42005-10-05 14:50:29 -07001783 int rc = 0;
Steve French50c2f752007-07-13 00:33:32 +00001784
Steve French37c0eb42005-10-05 14:50:29 -07001785 /*
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001786 * If wsize is smaller than the page cache size, default to writing
Steve French37c0eb42005-10-05 14:50:29 -07001787 * one page at a time via cifs_writepage
1788 */
1789 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1790 return generic_writepages(mapping, wbc);
1791
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001792 if (wbc->range_cyclic) {
Steve French37c0eb42005-10-05 14:50:29 -07001793 index = mapping->writeback_index; /* Start from prev offset */
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001794 end = -1;
1795 } else {
1796 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1797 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1798 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001799 range_whole = true;
1800 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07001801 }
1802retry:
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001803 while (!done && index <= end) {
1804 unsigned int i, nr_pages, found_pages;
1805 pgoff_t next = 0, tofind;
1806 struct page **pages;
Steve French37c0eb42005-10-05 14:50:29 -07001807
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001808 tofind = min((cifs_sb->wsize / PAGE_CACHE_SIZE) - 1,
1809 end - index) + 1;
Steve French37c0eb42005-10-05 14:50:29 -07001810
Jeff Laytonc2e87642012-03-23 14:40:55 -04001811 wdata = cifs_writedata_alloc((unsigned int)tofind,
1812 cifs_writev_complete);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001813 if (!wdata) {
1814 rc = -ENOMEM;
1815 break;
1816 }
1817
1818 /*
1819 * find_get_pages_tag seems to return a max of 256 on each
1820 * iteration, so we must call it several times in order to
1821 * fill the array or the wsize is effectively limited to
1822 * 256 * PAGE_CACHE_SIZE.
1823 */
1824 found_pages = 0;
1825 pages = wdata->pages;
1826 do {
1827 nr_pages = find_get_pages_tag(mapping, &index,
1828 PAGECACHE_TAG_DIRTY,
1829 tofind, pages);
1830 found_pages += nr_pages;
1831 tofind -= nr_pages;
1832 pages += nr_pages;
1833 } while (nr_pages && tofind && index <= end);
1834
1835 if (found_pages == 0) {
1836 kref_put(&wdata->refcount, cifs_writedata_release);
1837 break;
1838 }
1839
1840 nr_pages = 0;
1841 for (i = 0; i < found_pages; i++) {
1842 page = wdata->pages[i];
Steve French37c0eb42005-10-05 14:50:29 -07001843 /*
1844 * At this point we hold neither mapping->tree_lock nor
1845 * lock on the page itself: the page may be truncated or
1846 * invalidated (changing page->mapping to NULL), or even
1847 * swizzled back from swapper_space to tmpfs file
1848 * mapping
1849 */
1850
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001851 if (nr_pages == 0)
Steve French37c0eb42005-10-05 14:50:29 -07001852 lock_page(page);
Nick Piggin529ae9a2008-08-02 12:01:03 +02001853 else if (!trylock_page(page))
Steve French37c0eb42005-10-05 14:50:29 -07001854 break;
1855
1856 if (unlikely(page->mapping != mapping)) {
1857 unlock_page(page);
1858 break;
1859 }
1860
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001861 if (!wbc->range_cyclic && page->index > end) {
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001862 done = true;
Steve French37c0eb42005-10-05 14:50:29 -07001863 unlock_page(page);
1864 break;
1865 }
1866
1867 if (next && (page->index != next)) {
1868 /* Not next consecutive page */
1869 unlock_page(page);
1870 break;
1871 }
1872
1873 if (wbc->sync_mode != WB_SYNC_NONE)
1874 wait_on_page_writeback(page);
1875
1876 if (PageWriteback(page) ||
Linus Torvaldscb876f42006-12-23 16:19:07 -08001877 !clear_page_dirty_for_io(page)) {
Steve French37c0eb42005-10-05 14:50:29 -07001878 unlock_page(page);
1879 break;
1880 }
Steve French84d2f072005-10-12 15:32:05 -07001881
Linus Torvaldscb876f42006-12-23 16:19:07 -08001882 /*
1883 * This actually clears the dirty bit in the radix tree.
1884 * See cifs_writepage() for more commentary.
1885 */
1886 set_page_writeback(page);
1887
Jeff Layton3a98b862012-11-26 09:48:41 -05001888 if (page_offset(page) >= i_size_read(mapping->host)) {
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001889 done = true;
Steve French84d2f072005-10-12 15:32:05 -07001890 unlock_page(page);
Linus Torvaldscb876f42006-12-23 16:19:07 -08001891 end_page_writeback(page);
Steve French84d2f072005-10-12 15:32:05 -07001892 break;
1893 }
1894
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001895 wdata->pages[i] = page;
Steve French37c0eb42005-10-05 14:50:29 -07001896 next = page->index + 1;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001897 ++nr_pages;
Steve French37c0eb42005-10-05 14:50:29 -07001898 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001899
1900 /* reset index to refind any pages skipped */
1901 if (nr_pages == 0)
1902 index = wdata->pages[0]->index + 1;
1903
1904 /* put any pages we aren't going to use */
1905 for (i = nr_pages; i < found_pages; i++) {
1906 page_cache_release(wdata->pages[i]);
1907 wdata->pages[i] = NULL;
1908 }
1909
1910 /* nothing to write? */
1911 if (nr_pages == 0) {
1912 kref_put(&wdata->refcount, cifs_writedata_release);
1913 continue;
1914 }
1915
1916 wdata->sync_mode = wbc->sync_mode;
1917 wdata->nr_pages = nr_pages;
1918 wdata->offset = page_offset(wdata->pages[0]);
Jeff Laytoneddb0792012-09-18 16:20:35 -07001919 wdata->pagesz = PAGE_CACHE_SIZE;
1920 wdata->tailsz =
Jeff Layton3a98b862012-11-26 09:48:41 -05001921 min(i_size_read(mapping->host) -
1922 page_offset(wdata->pages[nr_pages - 1]),
Jeff Laytoneddb0792012-09-18 16:20:35 -07001923 (loff_t)PAGE_CACHE_SIZE);
1924 wdata->bytes = ((nr_pages - 1) * PAGE_CACHE_SIZE) +
1925 wdata->tailsz;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001926
1927 do {
1928 if (wdata->cfile != NULL)
1929 cifsFileInfo_put(wdata->cfile);
1930 wdata->cfile = find_writable_file(CIFS_I(mapping->host),
1931 false);
1932 if (!wdata->cfile) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001933 cERROR(1, "No writable handles for inode");
Steve French23e7dd72005-10-20 13:44:56 -07001934 rc = -EBADF;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001935 break;
Steve French37c0eb42005-10-05 14:50:29 -07001936 }
Jeff Laytonfe5f5d22012-03-23 14:40:55 -04001937 wdata->pid = wdata->cfile->pid;
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07001938 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
1939 rc = server->ops->async_writev(wdata);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001940 } while (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN);
Jeff Laytonf3983c22010-09-22 16:17:40 -07001941
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001942 for (i = 0; i < nr_pages; ++i)
1943 unlock_page(wdata->pages[i]);
Jeff Layton941b8532011-01-11 07:24:01 -05001944
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001945 /* send failure -- clean up the mess */
1946 if (rc != 0) {
1947 for (i = 0; i < nr_pages; ++i) {
Jeff Layton941b8532011-01-11 07:24:01 -05001948 if (rc == -EAGAIN)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001949 redirty_page_for_writepage(wbc,
1950 wdata->pages[i]);
1951 else
1952 SetPageError(wdata->pages[i]);
1953 end_page_writeback(wdata->pages[i]);
1954 page_cache_release(wdata->pages[i]);
Steve French37c0eb42005-10-05 14:50:29 -07001955 }
Jeff Layton941b8532011-01-11 07:24:01 -05001956 if (rc != -EAGAIN)
1957 mapping_set_error(mapping, rc);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001958 }
1959 kref_put(&wdata->refcount, cifs_writedata_release);
Jeff Layton941b8532011-01-11 07:24:01 -05001960
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001961 wbc->nr_to_write -= nr_pages;
1962 if (wbc->nr_to_write <= 0)
1963 done = true;
Dave Kleikampb066a482008-11-18 03:49:05 +00001964
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001965 index = next;
Steve French37c0eb42005-10-05 14:50:29 -07001966 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001967
Steve French37c0eb42005-10-05 14:50:29 -07001968 if (!scanned && !done) {
1969 /*
1970 * We hit the last page and there is more work to be done: wrap
1971 * back to the start of the file
1972 */
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001973 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07001974 index = 0;
1975 goto retry;
1976 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001977
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001978 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
Steve French37c0eb42005-10-05 14:50:29 -07001979 mapping->writeback_index = index;
1980
Linus Torvalds1da177e2005-04-16 15:20:36 -07001981 return rc;
1982}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001983
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04001984static int
1985cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001986{
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04001987 int rc;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001988 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001989
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001990 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001991/* BB add check for wbc flags */
1992 page_cache_get(page);
Steve Frenchad7a2922008-02-07 23:25:02 +00001993 if (!PageUptodate(page))
Joe Perchesb6b38f72010-04-21 03:50:45 +00001994 cFYI(1, "ppw - page not up to date");
Linus Torvaldscb876f42006-12-23 16:19:07 -08001995
1996 /*
1997 * Set the "writeback" flag, and clear "dirty" in the radix tree.
1998 *
1999 * A writepage() implementation always needs to do either this,
2000 * or re-dirty the page with "redirty_page_for_writepage()" in
2001 * the case of a failure.
2002 *
2003 * Just unlocking the page will cause the radix tree tag-bits
2004 * to fail to update with the state of the page correctly.
2005 */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002006 set_page_writeback(page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002007retry_write:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002008 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002009 if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
2010 goto retry_write;
2011 else if (rc == -EAGAIN)
2012 redirty_page_for_writepage(wbc, page);
2013 else if (rc != 0)
2014 SetPageError(page);
2015 else
2016 SetPageUptodate(page);
Linus Torvaldscb876f42006-12-23 16:19:07 -08002017 end_page_writeback(page);
2018 page_cache_release(page);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002019 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002020 return rc;
2021}
2022
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002023static int cifs_writepage(struct page *page, struct writeback_control *wbc)
2024{
2025 int rc = cifs_writepage_locked(page, wbc);
2026 unlock_page(page);
2027 return rc;
2028}
2029
Nick Piggind9414772008-09-24 11:32:59 -04002030static int cifs_write_end(struct file *file, struct address_space *mapping,
2031 loff_t pos, unsigned len, unsigned copied,
2032 struct page *page, void *fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002033{
Nick Piggind9414772008-09-24 11:32:59 -04002034 int rc;
2035 struct inode *inode = mapping->host;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002036 struct cifsFileInfo *cfile = file->private_data;
2037 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
2038 __u32 pid;
2039
2040 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2041 pid = cfile->pid;
2042 else
2043 pid = current->tgid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002044
Joe Perchesb6b38f72010-04-21 03:50:45 +00002045 cFYI(1, "write_end for page %p from pos %lld with %d bytes",
2046 page, pos, copied);
Steve Frenchad7a2922008-02-07 23:25:02 +00002047
Jeff Laytona98ee8c2008-11-26 19:32:33 +00002048 if (PageChecked(page)) {
2049 if (copied == len)
2050 SetPageUptodate(page);
2051 ClearPageChecked(page);
2052 } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
Nick Piggind9414772008-09-24 11:32:59 -04002053 SetPageUptodate(page);
2054
Linus Torvalds1da177e2005-04-16 15:20:36 -07002055 if (!PageUptodate(page)) {
Nick Piggind9414772008-09-24 11:32:59 -04002056 char *page_data;
2057 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002058 unsigned int xid;
Nick Piggind9414772008-09-24 11:32:59 -04002059
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002060 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002061 /* this is probably better than directly calling
2062 partialpage_write since in this function the file handle is
2063 known which we might as well leverage */
2064 /* BB check if anything else missing out of ppw
2065 such as updating last write time */
2066 page_data = kmap(page);
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002067 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
Nick Piggind9414772008-09-24 11:32:59 -04002068 /* if (rc < 0) should we set writebehind rc? */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002069 kunmap(page);
Nick Piggind9414772008-09-24 11:32:59 -04002070
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002071 free_xid(xid);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002072 } else {
Nick Piggind9414772008-09-24 11:32:59 -04002073 rc = copied;
2074 pos += copied;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002075 set_page_dirty(page);
2076 }
2077
Nick Piggind9414772008-09-24 11:32:59 -04002078 if (rc > 0) {
2079 spin_lock(&inode->i_lock);
2080 if (pos > inode->i_size)
2081 i_size_write(inode, pos);
2082 spin_unlock(&inode->i_lock);
2083 }
2084
2085 unlock_page(page);
2086 page_cache_release(page);
2087
Linus Torvalds1da177e2005-04-16 15:20:36 -07002088 return rc;
2089}
2090
Josef Bacik02c24a82011-07-16 20:44:56 -04002091int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2092 int datasync)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002093{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002094 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002095 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002096 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002097 struct TCP_Server_Info *server;
Joe Perchesc21dfb62010-07-12 13:50:14 -07002098 struct cifsFileInfo *smbfile = file->private_data;
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002099 struct inode *inode = file->f_path.dentry->d_inode;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002100 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002101
Josef Bacik02c24a82011-07-16 20:44:56 -04002102 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2103 if (rc)
2104 return rc;
2105 mutex_lock(&inode->i_mutex);
2106
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002107 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002108
Joe Perchesb6b38f72010-04-21 03:50:45 +00002109 cFYI(1, "Sync file - name: %s datasync: 0x%x",
Christoph Hellwig7ea80852010-05-26 17:53:25 +02002110 file->f_path.dentry->d_name.name, datasync);
Steve French50c2f752007-07-13 00:33:32 +00002111
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002112 if (!CIFS_I(inode)->clientCanCacheRead) {
2113 rc = cifs_invalidate_mapping(inode);
2114 if (rc) {
2115 cFYI(1, "rc: %d during invalidate phase", rc);
2116 rc = 0; /* don't care about it in fsync */
2117 }
2118 }
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002119
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002120 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002121 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2122 server = tcon->ses->server;
2123 if (server->ops->flush)
2124 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2125 else
2126 rc = -ENOSYS;
2127 }
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002128
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002129 free_xid(xid);
Josef Bacik02c24a82011-07-16 20:44:56 -04002130 mutex_unlock(&inode->i_mutex);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002131 return rc;
2132}
2133
Josef Bacik02c24a82011-07-16 20:44:56 -04002134int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002135{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002136 unsigned int xid;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002137 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002138 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002139 struct TCP_Server_Info *server;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002140 struct cifsFileInfo *smbfile = file->private_data;
2141 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Josef Bacik02c24a82011-07-16 20:44:56 -04002142 struct inode *inode = file->f_mapping->host;
2143
2144 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2145 if (rc)
2146 return rc;
2147 mutex_lock(&inode->i_mutex);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002148
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002149 xid = get_xid();
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002150
2151 cFYI(1, "Sync file - name: %s datasync: 0x%x",
2152 file->f_path.dentry->d_name.name, datasync);
2153
2154 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002155 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2156 server = tcon->ses->server;
2157 if (server->ops->flush)
2158 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2159 else
2160 rc = -ENOSYS;
2161 }
Steve Frenchb298f222009-02-21 21:17:43 +00002162
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002163 free_xid(xid);
Josef Bacik02c24a82011-07-16 20:44:56 -04002164 mutex_unlock(&inode->i_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002165 return rc;
2166}
2167
Linus Torvalds1da177e2005-04-16 15:20:36 -07002168/*
2169 * As file closes, flush all cached write data for this inode checking
2170 * for write behind errors.
2171 */
Miklos Szeredi75e1fcc2006-06-23 02:05:12 -07002172int cifs_flush(struct file *file, fl_owner_t id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002173{
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002174 struct inode *inode = file->f_path.dentry->d_inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002175 int rc = 0;
2176
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002177 if (file->f_mode & FMODE_WRITE)
Jeff Laytond3f13222010-10-15 15:34:07 -04002178 rc = filemap_write_and_wait(inode->i_mapping);
Steve French50c2f752007-07-13 00:33:32 +00002179
Joe Perchesb6b38f72010-04-21 03:50:45 +00002180 cFYI(1, "Flush inode %p file %p rc %d", inode, file, rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002181
2182 return rc;
2183}
2184
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002185static int
2186cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2187{
2188 int rc = 0;
2189 unsigned long i;
2190
2191 for (i = 0; i < num_pages; i++) {
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002192 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002193 if (!pages[i]) {
2194 /*
2195 * save number of pages we have already allocated and
2196 * return with ENOMEM error
2197 */
2198 num_pages = i;
2199 rc = -ENOMEM;
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002200 break;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002201 }
2202 }
2203
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002204 if (rc) {
2205 for (i = 0; i < num_pages; i++)
2206 put_page(pages[i]);
2207 }
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002208 return rc;
2209}
2210
2211static inline
2212size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2213{
2214 size_t num_pages;
2215 size_t clen;
2216
2217 clen = min_t(const size_t, len, wsize);
Jeff Laytona7103b92012-03-23 14:40:56 -04002218 num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002219
2220 if (cur_len)
2221 *cur_len = clen;
2222
2223 return num_pages;
2224}
2225
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002226static void
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002227cifs_uncached_writev_complete(struct work_struct *work)
2228{
2229 int i;
2230 struct cifs_writedata *wdata = container_of(work,
2231 struct cifs_writedata, work);
2232 struct inode *inode = wdata->cfile->dentry->d_inode;
2233 struct cifsInodeInfo *cifsi = CIFS_I(inode);
2234
2235 spin_lock(&inode->i_lock);
2236 cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
2237 if (cifsi->server_eof > inode->i_size)
2238 i_size_write(inode, cifsi->server_eof);
2239 spin_unlock(&inode->i_lock);
2240
2241 complete(&wdata->done);
2242
2243 if (wdata->result != -EAGAIN) {
2244 for (i = 0; i < wdata->nr_pages; i++)
2245 put_page(wdata->pages[i]);
2246 }
2247
2248 kref_put(&wdata->refcount, cifs_writedata_release);
2249}
2250
2251/* attempt to send write to server, retry on any -EAGAIN errors */
2252static int
2253cifs_uncached_retry_writev(struct cifs_writedata *wdata)
2254{
2255 int rc;
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002256 struct TCP_Server_Info *server;
2257
2258 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002259
2260 do {
2261 if (wdata->cfile->invalidHandle) {
2262 rc = cifs_reopen_file(wdata->cfile, false);
2263 if (rc != 0)
2264 continue;
2265 }
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002266 rc = server->ops->async_writev(wdata);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002267 } while (rc == -EAGAIN);
2268
2269 return rc;
2270}
2271
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002272static ssize_t
2273cifs_iovec_write(struct file *file, const struct iovec *iov,
2274 unsigned long nr_segs, loff_t *poffset)
2275{
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002276 unsigned long nr_pages, i;
Pavel Shilovsky76429c12011-01-31 16:03:08 +03002277 size_t copied, len, cur_len;
2278 ssize_t total_written = 0;
Jeff Layton3af9d8f2012-04-13 17:16:59 -04002279 loff_t offset;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002280 struct iov_iter it;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002281 struct cifsFileInfo *open_file;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002282 struct cifs_tcon *tcon;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002283 struct cifs_sb_info *cifs_sb;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002284 struct cifs_writedata *wdata, *tmp;
2285 struct list_head wdata_list;
2286 int rc;
2287 pid_t pid;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002288
2289 len = iov_length(iov, nr_segs);
2290 if (!len)
2291 return 0;
2292
2293 rc = generic_write_checks(file, poffset, &len, 0);
2294 if (rc)
2295 return rc;
2296
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002297 INIT_LIST_HEAD(&wdata_list);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002298 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002299 open_file = file->private_data;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002300 tcon = tlink_tcon(open_file->tlink);
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002301
2302 if (!tcon->ses->server->ops->async_writev)
2303 return -ENOSYS;
2304
Jeff Layton3af9d8f2012-04-13 17:16:59 -04002305 offset = *poffset;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002306
2307 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2308 pid = open_file->pid;
2309 else
2310 pid = current->tgid;
2311
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002312 iov_iter_init(&it, iov, nr_segs, len, 0);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002313 do {
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002314 size_t save_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002315
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002316 nr_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
2317 wdata = cifs_writedata_alloc(nr_pages,
2318 cifs_uncached_writev_complete);
2319 if (!wdata) {
2320 rc = -ENOMEM;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002321 break;
2322 }
2323
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002324 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
2325 if (rc) {
2326 kfree(wdata);
2327 break;
2328 }
2329
2330 save_len = cur_len;
2331 for (i = 0; i < nr_pages; i++) {
2332 copied = min_t(const size_t, cur_len, PAGE_SIZE);
2333 copied = iov_iter_copy_from_user(wdata->pages[i], &it,
2334 0, copied);
2335 cur_len -= copied;
2336 iov_iter_advance(&it, copied);
2337 }
2338 cur_len = save_len - cur_len;
2339
2340 wdata->sync_mode = WB_SYNC_ALL;
2341 wdata->nr_pages = nr_pages;
2342 wdata->offset = (__u64)offset;
2343 wdata->cfile = cifsFileInfo_get(open_file);
2344 wdata->pid = pid;
2345 wdata->bytes = cur_len;
Jeff Laytoneddb0792012-09-18 16:20:35 -07002346 wdata->pagesz = PAGE_SIZE;
2347 wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002348 rc = cifs_uncached_retry_writev(wdata);
2349 if (rc) {
2350 kref_put(&wdata->refcount, cifs_writedata_release);
2351 break;
2352 }
2353
2354 list_add_tail(&wdata->list, &wdata_list);
2355 offset += cur_len;
2356 len -= cur_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002357 } while (len > 0);
2358
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002359 /*
2360 * If at least one write was successfully sent, then discard any rc
2361 * value from the later writes. If the other write succeeds, then
2362 * we'll end up returning whatever was written. If it fails, then
2363 * we'll get a new rc value from that.
2364 */
2365 if (!list_empty(&wdata_list))
2366 rc = 0;
2367
2368 /*
2369 * Wait for and collect replies for any successful sends in order of
2370 * increasing offset. Once an error is hit or we get a fatal signal
2371 * while waiting, then return without waiting for any more replies.
2372 */
2373restart_loop:
2374 list_for_each_entry_safe(wdata, tmp, &wdata_list, list) {
2375 if (!rc) {
2376 /* FIXME: freezable too? */
2377 rc = wait_for_completion_killable(&wdata->done);
2378 if (rc)
2379 rc = -EINTR;
2380 else if (wdata->result)
2381 rc = wdata->result;
2382 else
2383 total_written += wdata->bytes;
2384
2385 /* resend call if it's a retryable error */
2386 if (rc == -EAGAIN) {
2387 rc = cifs_uncached_retry_writev(wdata);
2388 goto restart_loop;
2389 }
2390 }
2391 list_del_init(&wdata->list);
2392 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002393 }
2394
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002395 if (total_written > 0)
2396 *poffset += total_written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002397
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002398 cifs_stats_bytes_written(tcon, total_written);
2399 return total_written ? total_written : (ssize_t)rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002400}
2401
Pavel Shilovsky0b81c1c2011-03-10 10:11:05 +03002402ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002403 unsigned long nr_segs, loff_t pos)
2404{
2405 ssize_t written;
2406 struct inode *inode;
2407
2408 inode = iocb->ki_filp->f_path.dentry->d_inode;
2409
2410 /*
2411 * BB - optimize the way when signing is disabled. We can drop this
2412 * extra memory-to-memory copying and use iovec buffers for constructing
2413 * write request.
2414 */
2415
2416 written = cifs_iovec_write(iocb->ki_filp, iov, nr_segs, &pos);
2417 if (written > 0) {
2418 CIFS_I(inode)->invalid_mapping = true;
2419 iocb->ki_pos = pos;
2420 }
2421
2422 return written;
2423}
2424
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002425static ssize_t
2426cifs_writev(struct kiocb *iocb, const struct iovec *iov,
2427 unsigned long nr_segs, loff_t pos)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002428{
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002429 struct file *file = iocb->ki_filp;
2430 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2431 struct inode *inode = file->f_mapping->host;
2432 struct cifsInodeInfo *cinode = CIFS_I(inode);
2433 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
2434 ssize_t rc = -EACCES;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002435
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002436 BUG_ON(iocb->ki_pos != pos);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002437
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002438 sb_start_write(inode->i_sb);
2439
2440 /*
2441 * We need to hold the sem to be sure nobody modifies lock list
2442 * with a brlock that prevents writing.
2443 */
2444 down_read(&cinode->lock_sem);
2445 if (!cifs_find_lock_conflict(cfile, pos, iov_length(iov, nr_segs),
2446 server->vals->exclusive_lock_type, NULL,
2447 true)) {
2448 mutex_lock(&inode->i_mutex);
2449 rc = __generic_file_aio_write(iocb, iov, nr_segs,
2450 &iocb->ki_pos);
2451 mutex_unlock(&inode->i_mutex);
2452 }
2453
2454 if (rc > 0 || rc == -EIOCBQUEUED) {
2455 ssize_t err;
2456
2457 err = generic_write_sync(file, pos, rc);
2458 if (err < 0 && rc > 0)
2459 rc = err;
2460 }
2461
2462 up_read(&cinode->lock_sem);
2463 sb_end_write(inode->i_sb);
2464 return rc;
2465}
2466
2467ssize_t
2468cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
2469 unsigned long nr_segs, loff_t pos)
2470{
2471 struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
2472 struct cifsInodeInfo *cinode = CIFS_I(inode);
2473 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2474 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2475 iocb->ki_filp->private_data;
2476 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002477
Pavel Shilovsky25078102012-09-19 06:22:45 -07002478#ifdef CONFIG_CIFS_SMB2
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002479 /*
Pavel Shilovsky25078102012-09-19 06:22:45 -07002480 * If we have an oplock for read and want to write a data to the file
2481 * we need to store it in the page cache and then push it to the server
2482 * to be sure the next read will get a valid data.
2483 */
2484 if (!cinode->clientCanCacheAll && cinode->clientCanCacheRead) {
2485 ssize_t written;
2486 int rc;
2487
2488 written = generic_file_aio_write(iocb, iov, nr_segs, pos);
2489 rc = filemap_fdatawrite(inode->i_mapping);
2490 if (rc)
2491 return (ssize_t)rc;
2492
2493 return written;
2494 }
2495#endif
2496
2497 /*
2498 * For non-oplocked files in strict cache mode we need to write the data
2499 * to the server exactly from the pos to pos+len-1 rather than flush all
2500 * affected pages because it may cause a error with mandatory locks on
2501 * these pages but not on the region from pos to ppos+len-1.
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002502 */
2503
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002504 if (!cinode->clientCanCacheAll)
2505 return cifs_user_writev(iocb, iov, nr_segs, pos);
2506
2507 if (cap_unix(tcon->ses) &&
2508 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2509 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2510 return generic_file_aio_write(iocb, iov, nr_segs, pos);
2511
2512 return cifs_writev(iocb, iov, nr_segs, pos);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002513}
2514
Jeff Layton0471ca32012-05-16 07:13:16 -04002515static struct cifs_readdata *
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002516cifs_readdata_alloc(unsigned int nr_pages, work_func_t complete)
Jeff Layton0471ca32012-05-16 07:13:16 -04002517{
2518 struct cifs_readdata *rdata;
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002519
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002520 rdata = kzalloc(sizeof(*rdata) + (sizeof(struct page *) * nr_pages),
2521 GFP_KERNEL);
Jeff Layton0471ca32012-05-16 07:13:16 -04002522 if (rdata != NULL) {
Jeff Layton6993f742012-05-16 07:13:17 -04002523 kref_init(&rdata->refcount);
Jeff Layton1c892542012-05-16 07:13:17 -04002524 INIT_LIST_HEAD(&rdata->list);
2525 init_completion(&rdata->done);
Jeff Layton0471ca32012-05-16 07:13:16 -04002526 INIT_WORK(&rdata->work, complete);
Jeff Layton0471ca32012-05-16 07:13:16 -04002527 }
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002528
Jeff Layton0471ca32012-05-16 07:13:16 -04002529 return rdata;
2530}
2531
Jeff Layton6993f742012-05-16 07:13:17 -04002532void
2533cifs_readdata_release(struct kref *refcount)
Jeff Layton0471ca32012-05-16 07:13:16 -04002534{
Jeff Layton6993f742012-05-16 07:13:17 -04002535 struct cifs_readdata *rdata = container_of(refcount,
2536 struct cifs_readdata, refcount);
2537
2538 if (rdata->cfile)
2539 cifsFileInfo_put(rdata->cfile);
2540
Jeff Layton0471ca32012-05-16 07:13:16 -04002541 kfree(rdata);
2542}
2543
Jeff Layton2a1bb132012-05-16 07:13:17 -04002544static int
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002545cifs_read_allocate_pages(struct cifs_readdata *rdata, unsigned int nr_pages)
Jeff Layton1c892542012-05-16 07:13:17 -04002546{
2547 int rc = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002548 struct page *page;
Jeff Layton1c892542012-05-16 07:13:17 -04002549 unsigned int i;
2550
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002551 for (i = 0; i < nr_pages; i++) {
Jeff Layton1c892542012-05-16 07:13:17 -04002552 page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
2553 if (!page) {
2554 rc = -ENOMEM;
2555 break;
2556 }
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002557 rdata->pages[i] = page;
Jeff Layton1c892542012-05-16 07:13:17 -04002558 }
2559
2560 if (rc) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002561 for (i = 0; i < nr_pages; i++) {
2562 put_page(rdata->pages[i]);
2563 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04002564 }
2565 }
2566 return rc;
2567}
2568
2569static void
2570cifs_uncached_readdata_release(struct kref *refcount)
2571{
Jeff Layton1c892542012-05-16 07:13:17 -04002572 struct cifs_readdata *rdata = container_of(refcount,
2573 struct cifs_readdata, refcount);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002574 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04002575
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002576 for (i = 0; i < rdata->nr_pages; i++) {
2577 put_page(rdata->pages[i]);
2578 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04002579 }
2580 cifs_readdata_release(refcount);
2581}
2582
2583static int
Jeff Layton2a1bb132012-05-16 07:13:17 -04002584cifs_retry_async_readv(struct cifs_readdata *rdata)
2585{
2586 int rc;
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07002587 struct TCP_Server_Info *server;
2588
2589 server = tlink_tcon(rdata->cfile->tlink)->ses->server;
Jeff Layton2a1bb132012-05-16 07:13:17 -04002590
2591 do {
2592 if (rdata->cfile->invalidHandle) {
2593 rc = cifs_reopen_file(rdata->cfile, true);
2594 if (rc != 0)
2595 continue;
2596 }
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07002597 rc = server->ops->async_readv(rdata);
Jeff Layton2a1bb132012-05-16 07:13:17 -04002598 } while (rc == -EAGAIN);
2599
2600 return rc;
2601}
2602
Jeff Layton1c892542012-05-16 07:13:17 -04002603/**
2604 * cifs_readdata_to_iov - copy data from pages in response to an iovec
2605 * @rdata: the readdata response with list of pages holding data
2606 * @iov: vector in which we should copy the data
2607 * @nr_segs: number of segments in vector
2608 * @offset: offset into file of the first iovec
2609 * @copied: used to return the amount of data copied to the iov
2610 *
2611 * This function copies data from a list of pages in a readdata response into
2612 * an array of iovecs. It will first calculate where the data should go
2613 * based on the info in the readdata and then copy the data into that spot.
2614 */
2615static ssize_t
2616cifs_readdata_to_iov(struct cifs_readdata *rdata, const struct iovec *iov,
2617 unsigned long nr_segs, loff_t offset, ssize_t *copied)
2618{
2619 int rc = 0;
2620 struct iov_iter ii;
2621 size_t pos = rdata->offset - offset;
Jeff Layton1c892542012-05-16 07:13:17 -04002622 ssize_t remaining = rdata->bytes;
2623 unsigned char *pdata;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002624 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04002625
2626 /* set up iov_iter and advance to the correct offset */
2627 iov_iter_init(&ii, iov, nr_segs, iov_length(iov, nr_segs), 0);
2628 iov_iter_advance(&ii, pos);
2629
2630 *copied = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002631 for (i = 0; i < rdata->nr_pages; i++) {
Jeff Layton1c892542012-05-16 07:13:17 -04002632 ssize_t copy;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002633 struct page *page = rdata->pages[i];
Jeff Layton1c892542012-05-16 07:13:17 -04002634
2635 /* copy a whole page or whatever's left */
2636 copy = min_t(ssize_t, remaining, PAGE_SIZE);
2637
2638 /* ...but limit it to whatever space is left in the iov */
2639 copy = min_t(ssize_t, copy, iov_iter_count(&ii));
2640
2641 /* go while there's data to be copied and no errors */
2642 if (copy && !rc) {
2643 pdata = kmap(page);
2644 rc = memcpy_toiovecend(ii.iov, pdata, ii.iov_offset,
2645 (int)copy);
2646 kunmap(page);
2647 if (!rc) {
2648 *copied += copy;
2649 remaining -= copy;
2650 iov_iter_advance(&ii, copy);
2651 }
2652 }
Jeff Layton1c892542012-05-16 07:13:17 -04002653 }
2654
2655 return rc;
2656}
2657
2658static void
2659cifs_uncached_readv_complete(struct work_struct *work)
2660{
2661 struct cifs_readdata *rdata = container_of(work,
2662 struct cifs_readdata, work);
Jeff Layton1c892542012-05-16 07:13:17 -04002663
2664 complete(&rdata->done);
2665 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
2666}
2667
2668static int
Jeff Layton8321fec2012-09-19 06:22:32 -07002669cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
2670 struct cifs_readdata *rdata, unsigned int len)
Jeff Layton1c892542012-05-16 07:13:17 -04002671{
Jeff Layton8321fec2012-09-19 06:22:32 -07002672 int total_read = 0, result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002673 unsigned int i;
2674 unsigned int nr_pages = rdata->nr_pages;
Jeff Layton8321fec2012-09-19 06:22:32 -07002675 struct kvec iov;
Jeff Layton1c892542012-05-16 07:13:17 -04002676
Jeff Layton8321fec2012-09-19 06:22:32 -07002677 rdata->tailsz = PAGE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002678 for (i = 0; i < nr_pages; i++) {
2679 struct page *page = rdata->pages[i];
2680
Jeff Layton8321fec2012-09-19 06:22:32 -07002681 if (len >= PAGE_SIZE) {
Jeff Layton1c892542012-05-16 07:13:17 -04002682 /* enough data to fill the page */
Jeff Layton8321fec2012-09-19 06:22:32 -07002683 iov.iov_base = kmap(page);
2684 iov.iov_len = PAGE_SIZE;
2685 cFYI(1, "%u: iov_base=%p iov_len=%zu",
2686 i, iov.iov_base, iov.iov_len);
2687 len -= PAGE_SIZE;
2688 } else if (len > 0) {
Jeff Layton1c892542012-05-16 07:13:17 -04002689 /* enough for partial page, fill and zero the rest */
Jeff Layton8321fec2012-09-19 06:22:32 -07002690 iov.iov_base = kmap(page);
2691 iov.iov_len = len;
2692 cFYI(1, "%u: iov_base=%p iov_len=%zu",
2693 i, iov.iov_base, iov.iov_len);
2694 memset(iov.iov_base + len, '\0', PAGE_SIZE - len);
2695 rdata->tailsz = len;
2696 len = 0;
Jeff Layton1c892542012-05-16 07:13:17 -04002697 } else {
2698 /* no need to hold page hostage */
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002699 rdata->pages[i] = NULL;
2700 rdata->nr_pages--;
Jeff Layton1c892542012-05-16 07:13:17 -04002701 put_page(page);
Jeff Layton8321fec2012-09-19 06:22:32 -07002702 continue;
Jeff Layton1c892542012-05-16 07:13:17 -04002703 }
Jeff Layton8321fec2012-09-19 06:22:32 -07002704
2705 result = cifs_readv_from_socket(server, &iov, 1, iov.iov_len);
2706 kunmap(page);
2707 if (result < 0)
2708 break;
2709
2710 total_read += result;
Jeff Layton1c892542012-05-16 07:13:17 -04002711 }
2712
Jeff Layton8321fec2012-09-19 06:22:32 -07002713 return total_read > 0 ? total_read : result;
Jeff Layton1c892542012-05-16 07:13:17 -04002714}
2715
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002716static ssize_t
2717cifs_iovec_read(struct file *file, const struct iovec *iov,
2718 unsigned long nr_segs, loff_t *poffset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002719{
Jeff Layton1c892542012-05-16 07:13:17 -04002720 ssize_t rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002721 size_t len, cur_len;
Jeff Layton1c892542012-05-16 07:13:17 -04002722 ssize_t total_read = 0;
2723 loff_t offset = *poffset;
2724 unsigned int npages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002725 struct cifs_sb_info *cifs_sb;
Jeff Layton1c892542012-05-16 07:13:17 -04002726 struct cifs_tcon *tcon;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002727 struct cifsFileInfo *open_file;
Jeff Layton1c892542012-05-16 07:13:17 -04002728 struct cifs_readdata *rdata, *tmp;
2729 struct list_head rdata_list;
2730 pid_t pid;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002731
2732 if (!nr_segs)
2733 return 0;
2734
2735 len = iov_length(iov, nr_segs);
2736 if (!len)
2737 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002738
Jeff Layton1c892542012-05-16 07:13:17 -04002739 INIT_LIST_HEAD(&rdata_list);
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002740 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Joe Perchesc21dfb62010-07-12 13:50:14 -07002741 open_file = file->private_data;
Jeff Layton1c892542012-05-16 07:13:17 -04002742 tcon = tlink_tcon(open_file->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002743
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07002744 if (!tcon->ses->server->ops->async_readv)
2745 return -ENOSYS;
2746
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002747 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2748 pid = open_file->pid;
2749 else
2750 pid = current->tgid;
2751
Steve Frenchad7a2922008-02-07 23:25:02 +00002752 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesb6b38f72010-04-21 03:50:45 +00002753 cFYI(1, "attempting read on write only file instance");
Steve Frenchad7a2922008-02-07 23:25:02 +00002754
Jeff Layton1c892542012-05-16 07:13:17 -04002755 do {
2756 cur_len = min_t(const size_t, len - total_read, cifs_sb->rsize);
2757 npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002758
Jeff Layton1c892542012-05-16 07:13:17 -04002759 /* allocate a readdata struct */
2760 rdata = cifs_readdata_alloc(npages,
2761 cifs_uncached_readv_complete);
2762 if (!rdata) {
2763 rc = -ENOMEM;
2764 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002765 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002766
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002767 rc = cifs_read_allocate_pages(rdata, npages);
Jeff Layton1c892542012-05-16 07:13:17 -04002768 if (rc)
2769 goto error;
2770
2771 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002772 rdata->nr_pages = npages;
Jeff Layton1c892542012-05-16 07:13:17 -04002773 rdata->offset = offset;
2774 rdata->bytes = cur_len;
2775 rdata->pid = pid;
Jeff Layton8321fec2012-09-19 06:22:32 -07002776 rdata->pagesz = PAGE_SIZE;
2777 rdata->read_into_pages = cifs_uncached_read_into_pages;
Jeff Layton1c892542012-05-16 07:13:17 -04002778
2779 rc = cifs_retry_async_readv(rdata);
2780error:
2781 if (rc) {
2782 kref_put(&rdata->refcount,
2783 cifs_uncached_readdata_release);
2784 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002785 }
Jeff Layton1c892542012-05-16 07:13:17 -04002786
2787 list_add_tail(&rdata->list, &rdata_list);
2788 offset += cur_len;
2789 len -= cur_len;
2790 } while (len > 0);
2791
2792 /* if at least one read request send succeeded, then reset rc */
2793 if (!list_empty(&rdata_list))
2794 rc = 0;
2795
2796 /* the loop below should proceed in the order of increasing offsets */
2797restart_loop:
2798 list_for_each_entry_safe(rdata, tmp, &rdata_list, list) {
2799 if (!rc) {
2800 ssize_t copied;
2801
2802 /* FIXME: freezable sleep too? */
2803 rc = wait_for_completion_killable(&rdata->done);
2804 if (rc)
2805 rc = -EINTR;
2806 else if (rdata->result)
2807 rc = rdata->result;
2808 else {
2809 rc = cifs_readdata_to_iov(rdata, iov,
2810 nr_segs, *poffset,
2811 &copied);
2812 total_read += copied;
2813 }
2814
2815 /* resend call if it's a retryable error */
2816 if (rc == -EAGAIN) {
2817 rc = cifs_retry_async_readv(rdata);
2818 goto restart_loop;
2819 }
2820 }
2821 list_del_init(&rdata->list);
2822 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002823 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002824
Jeff Layton1c892542012-05-16 07:13:17 -04002825 cifs_stats_bytes_read(tcon, total_read);
2826 *poffset += total_read;
2827
Pavel Shilovsky09a47072012-09-18 16:20:29 -07002828 /* mask nodata case */
2829 if (rc == -ENODATA)
2830 rc = 0;
2831
Jeff Layton1c892542012-05-16 07:13:17 -04002832 return total_read ? total_read : rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002833}
2834
Pavel Shilovsky0b81c1c2011-03-10 10:11:05 +03002835ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov,
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002836 unsigned long nr_segs, loff_t pos)
2837{
2838 ssize_t read;
2839
2840 read = cifs_iovec_read(iocb->ki_filp, iov, nr_segs, &pos);
2841 if (read > 0)
2842 iocb->ki_pos = pos;
2843
2844 return read;
2845}
2846
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002847ssize_t
2848cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov,
2849 unsigned long nr_segs, loff_t pos)
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002850{
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002851 struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
2852 struct cifsInodeInfo *cinode = CIFS_I(inode);
2853 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2854 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2855 iocb->ki_filp->private_data;
2856 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2857 int rc = -EACCES;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002858
2859 /*
2860 * In strict cache mode we need to read from the server all the time
2861 * if we don't have level II oplock because the server can delay mtime
2862 * change - so we can't make a decision about inode invalidating.
2863 * And we can also fail with pagereading if there are mandatory locks
2864 * on pages affected by this read but not on the region from pos to
2865 * pos+len-1.
2866 */
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002867 if (!cinode->clientCanCacheRead)
2868 return cifs_user_readv(iocb, iov, nr_segs, pos);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002869
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002870 if (cap_unix(tcon->ses) &&
2871 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2872 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2873 return generic_file_aio_read(iocb, iov, nr_segs, pos);
2874
2875 /*
2876 * We need to hold the sem to be sure nobody modifies lock list
2877 * with a brlock that prevents reading.
2878 */
2879 down_read(&cinode->lock_sem);
2880 if (!cifs_find_lock_conflict(cfile, pos, iov_length(iov, nr_segs),
2881 tcon->ses->server->vals->shared_lock_type,
2882 NULL, true))
2883 rc = generic_file_aio_read(iocb, iov, nr_segs, pos);
2884 up_read(&cinode->lock_sem);
2885 return rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002886}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002887
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002888static ssize_t
2889cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002890{
2891 int rc = -EACCES;
2892 unsigned int bytes_read = 0;
2893 unsigned int total_read;
2894 unsigned int current_read_size;
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002895 unsigned int rsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002896 struct cifs_sb_info *cifs_sb;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002897 struct cifs_tcon *tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002898 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002899 unsigned int xid;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002900 char *cur_offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002901 struct cifsFileInfo *open_file;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002902 struct cifs_io_parms io_parms;
Steve Frenchec637e32005-12-12 20:53:18 -08002903 int buf_type = CIFS_NO_BUFFER;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002904 __u32 pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002905
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002906 xid = get_xid();
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002907 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002908
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002909 /* FIXME: set up handlers for larger reads and/or convert to async */
2910 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
2911
Linus Torvalds1da177e2005-04-16 15:20:36 -07002912 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05302913 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002914 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05302915 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002916 }
Joe Perchesc21dfb62010-07-12 13:50:14 -07002917 open_file = file->private_data;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002918 tcon = tlink_tcon(open_file->tlink);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002919 server = tcon->ses->server;
2920
2921 if (!server->ops->sync_read) {
2922 free_xid(xid);
2923 return -ENOSYS;
2924 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002925
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002926 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2927 pid = open_file->pid;
2928 else
2929 pid = current->tgid;
2930
Linus Torvalds1da177e2005-04-16 15:20:36 -07002931 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesb6b38f72010-04-21 03:50:45 +00002932 cFYI(1, "attempting read on write only file instance");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002933
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002934 for (total_read = 0, cur_offset = read_data; read_size > total_read;
2935 total_read += bytes_read, cur_offset += bytes_read) {
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002936 current_read_size = min_t(uint, read_size - total_read, rsize);
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002937 /*
2938 * For windows me and 9x we do not want to request more than it
2939 * negotiated since it will refuse the read then.
2940 */
2941 if ((tcon->ses) && !(tcon->ses->capabilities &
2942 tcon->ses->server->vals->cap_large_files)) {
Dan Carpenter7748dd62011-10-18 12:41:35 +03002943 current_read_size = min_t(uint, current_read_size,
Jeff Laytonc974bef2011-10-11 06:41:32 -04002944 CIFSMaxBufSize);
Steve Frenchf9f5c8172005-09-15 23:06:38 -07002945 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002946 rc = -EAGAIN;
2947 while (rc == -EAGAIN) {
Steve Frenchcdff08e2010-10-21 22:46:14 +00002948 if (open_file->invalidHandle) {
Jeff Layton15886172010-10-15 15:33:59 -04002949 rc = cifs_reopen_file(open_file, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002950 if (rc != 0)
2951 break;
2952 }
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002953 io_parms.pid = pid;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002954 io_parms.tcon = tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002955 io_parms.offset = *offset;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002956 io_parms.length = current_read_size;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002957 rc = server->ops->sync_read(xid, open_file, &io_parms,
2958 &bytes_read, &cur_offset,
2959 &buf_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002960 }
2961 if (rc || (bytes_read == 0)) {
2962 if (total_read) {
2963 break;
2964 } else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002965 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002966 return rc;
2967 }
2968 } else {
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002969 cifs_stats_bytes_read(tcon, total_read);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002970 *offset += bytes_read;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002971 }
2972 }
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002973 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002974 return total_read;
2975}
2976
Jeff Laytonca83ce32011-04-12 09:13:44 -04002977/*
2978 * If the page is mmap'ed into a process' page tables, then we need to make
2979 * sure that it doesn't change while being written back.
2980 */
2981static int
2982cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
2983{
2984 struct page *page = vmf->page;
2985
2986 lock_page(page);
2987 return VM_FAULT_LOCKED;
2988}
2989
2990static struct vm_operations_struct cifs_file_vm_ops = {
2991 .fault = filemap_fault,
2992 .page_mkwrite = cifs_page_mkwrite,
Konstantin Khlebnikov0b173bc2012-10-08 16:28:46 -07002993 .remap_pages = generic_file_remap_pages,
Jeff Laytonca83ce32011-04-12 09:13:44 -04002994};
2995
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03002996int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
2997{
2998 int rc, xid;
2999 struct inode *inode = file->f_path.dentry->d_inode;
3000
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003001 xid = get_xid();
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003002
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04003003 if (!CIFS_I(inode)->clientCanCacheRead) {
3004 rc = cifs_invalidate_mapping(inode);
3005 if (rc)
3006 return rc;
3007 }
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003008
3009 rc = generic_file_mmap(file, vma);
Jeff Laytonca83ce32011-04-12 09:13:44 -04003010 if (rc == 0)
3011 vma->vm_ops = &cifs_file_vm_ops;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003012 free_xid(xid);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003013 return rc;
3014}
3015
Linus Torvalds1da177e2005-04-16 15:20:36 -07003016int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
3017{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003018 int rc, xid;
3019
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003020 xid = get_xid();
Jeff Laytonabab0952010-02-12 07:44:18 -05003021 rc = cifs_revalidate_file(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003022 if (rc) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00003023 cFYI(1, "Validation prior to mmap failed, error=%d", rc);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003024 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003025 return rc;
3026 }
3027 rc = generic_file_mmap(file, vma);
Jeff Laytonca83ce32011-04-12 09:13:44 -04003028 if (rc == 0)
3029 vma->vm_ops = &cifs_file_vm_ops;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003030 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003031 return rc;
3032}
3033
Jeff Layton0471ca32012-05-16 07:13:16 -04003034static void
3035cifs_readv_complete(struct work_struct *work)
3036{
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003037 unsigned int i;
Jeff Layton0471ca32012-05-16 07:13:16 -04003038 struct cifs_readdata *rdata = container_of(work,
3039 struct cifs_readdata, work);
Jeff Layton0471ca32012-05-16 07:13:16 -04003040
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003041 for (i = 0; i < rdata->nr_pages; i++) {
3042 struct page *page = rdata->pages[i];
3043
Jeff Layton0471ca32012-05-16 07:13:16 -04003044 lru_cache_add_file(page);
3045
3046 if (rdata->result == 0) {
Jeff Layton0471ca32012-05-16 07:13:16 -04003047 flush_dcache_page(page);
3048 SetPageUptodate(page);
3049 }
3050
3051 unlock_page(page);
3052
3053 if (rdata->result == 0)
3054 cifs_readpage_to_fscache(rdata->mapping->host, page);
3055
3056 page_cache_release(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003057 rdata->pages[i] = NULL;
Jeff Layton0471ca32012-05-16 07:13:16 -04003058 }
Jeff Layton6993f742012-05-16 07:13:17 -04003059 kref_put(&rdata->refcount, cifs_readdata_release);
Jeff Layton0471ca32012-05-16 07:13:16 -04003060}
3061
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003062static int
Jeff Layton8321fec2012-09-19 06:22:32 -07003063cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
3064 struct cifs_readdata *rdata, unsigned int len)
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003065{
Jeff Layton8321fec2012-09-19 06:22:32 -07003066 int total_read = 0, result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003067 unsigned int i;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003068 u64 eof;
3069 pgoff_t eof_index;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003070 unsigned int nr_pages = rdata->nr_pages;
Jeff Layton8321fec2012-09-19 06:22:32 -07003071 struct kvec iov;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003072
3073 /* determine the eof that the server (probably) has */
3074 eof = CIFS_I(rdata->mapping->host)->server_eof;
3075 eof_index = eof ? (eof - 1) >> PAGE_CACHE_SHIFT : 0;
3076 cFYI(1, "eof=%llu eof_index=%lu", eof, eof_index);
3077
Jeff Layton8321fec2012-09-19 06:22:32 -07003078 rdata->tailsz = PAGE_CACHE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003079 for (i = 0; i < nr_pages; i++) {
3080 struct page *page = rdata->pages[i];
3081
Jeff Layton8321fec2012-09-19 06:22:32 -07003082 if (len >= PAGE_CACHE_SIZE) {
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003083 /* enough data to fill the page */
Jeff Layton8321fec2012-09-19 06:22:32 -07003084 iov.iov_base = kmap(page);
3085 iov.iov_len = PAGE_CACHE_SIZE;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003086 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
Jeff Layton8321fec2012-09-19 06:22:32 -07003087 i, page->index, iov.iov_base, iov.iov_len);
3088 len -= PAGE_CACHE_SIZE;
3089 } else if (len > 0) {
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003090 /* enough for partial page, fill and zero the rest */
Jeff Layton8321fec2012-09-19 06:22:32 -07003091 iov.iov_base = kmap(page);
3092 iov.iov_len = len;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003093 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
Jeff Layton8321fec2012-09-19 06:22:32 -07003094 i, page->index, iov.iov_base, iov.iov_len);
3095 memset(iov.iov_base + len,
3096 '\0', PAGE_CACHE_SIZE - len);
3097 rdata->tailsz = len;
3098 len = 0;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003099 } else if (page->index > eof_index) {
3100 /*
3101 * The VFS will not try to do readahead past the
3102 * i_size, but it's possible that we have outstanding
3103 * writes with gaps in the middle and the i_size hasn't
3104 * caught up yet. Populate those with zeroed out pages
3105 * to prevent the VFS from repeatedly attempting to
3106 * fill them until the writes are flushed.
3107 */
3108 zero_user(page, 0, PAGE_CACHE_SIZE);
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003109 lru_cache_add_file(page);
3110 flush_dcache_page(page);
3111 SetPageUptodate(page);
3112 unlock_page(page);
3113 page_cache_release(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003114 rdata->pages[i] = NULL;
3115 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07003116 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003117 } else {
3118 /* no need to hold page hostage */
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003119 lru_cache_add_file(page);
3120 unlock_page(page);
3121 page_cache_release(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003122 rdata->pages[i] = NULL;
3123 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07003124 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003125 }
Jeff Layton8321fec2012-09-19 06:22:32 -07003126
3127 result = cifs_readv_from_socket(server, &iov, 1, iov.iov_len);
3128 kunmap(page);
3129 if (result < 0)
3130 break;
3131
3132 total_read += result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003133 }
3134
Jeff Layton8321fec2012-09-19 06:22:32 -07003135 return total_read > 0 ? total_read : result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003136}
3137
Linus Torvalds1da177e2005-04-16 15:20:36 -07003138static int cifs_readpages(struct file *file, struct address_space *mapping,
3139 struct list_head *page_list, unsigned num_pages)
3140{
Jeff Layton690c5e32011-10-19 15:30:16 -04003141 int rc;
3142 struct list_head tmplist;
3143 struct cifsFileInfo *open_file = file->private_data;
3144 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
3145 unsigned int rsize = cifs_sb->rsize;
3146 pid_t pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003147
Jeff Layton690c5e32011-10-19 15:30:16 -04003148 /*
3149 * Give up immediately if rsize is too small to read an entire page.
3150 * The VFS will fall back to readpage. We should never reach this
3151 * point however since we set ra_pages to 0 when the rsize is smaller
3152 * than a cache page.
3153 */
3154 if (unlikely(rsize < PAGE_CACHE_SIZE))
3155 return 0;
Steve Frenchbfa0d752005-08-31 21:50:37 -07003156
Suresh Jayaraman56698232010-07-05 18:13:25 +05303157 /*
3158 * Reads as many pages as possible from fscache. Returns -ENOBUFS
3159 * immediately if the cookie is negative
3160 */
3161 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
3162 &num_pages);
3163 if (rc == 0)
Jeff Layton690c5e32011-10-19 15:30:16 -04003164 return rc;
Suresh Jayaraman56698232010-07-05 18:13:25 +05303165
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003166 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3167 pid = open_file->pid;
3168 else
3169 pid = current->tgid;
3170
Jeff Layton690c5e32011-10-19 15:30:16 -04003171 rc = 0;
3172 INIT_LIST_HEAD(&tmplist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003173
Jeff Layton690c5e32011-10-19 15:30:16 -04003174 cFYI(1, "%s: file=%p mapping=%p num_pages=%u", __func__, file,
3175 mapping, num_pages);
3176
3177 /*
3178 * Start with the page at end of list and move it to private
3179 * list. Do the same with any following pages until we hit
3180 * the rsize limit, hit an index discontinuity, or run out of
3181 * pages. Issue the async read and then start the loop again
3182 * until the list is empty.
3183 *
3184 * Note that list order is important. The page_list is in
3185 * the order of declining indexes. When we put the pages in
3186 * the rdata->pages, then we want them in increasing order.
3187 */
3188 while (!list_empty(page_list)) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003189 unsigned int i;
Jeff Layton690c5e32011-10-19 15:30:16 -04003190 unsigned int bytes = PAGE_CACHE_SIZE;
3191 unsigned int expected_index;
3192 unsigned int nr_pages = 1;
3193 loff_t offset;
3194 struct page *page, *tpage;
3195 struct cifs_readdata *rdata;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003196
3197 page = list_entry(page_list->prev, struct page, lru);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003198
Jeff Layton690c5e32011-10-19 15:30:16 -04003199 /*
3200 * Lock the page and put it in the cache. Since no one else
3201 * should have access to this page, we're safe to simply set
3202 * PG_locked without checking it first.
3203 */
3204 __set_page_locked(page);
3205 rc = add_to_page_cache_locked(page, mapping,
3206 page->index, GFP_KERNEL);
3207
3208 /* give up if we can't stick it in the cache */
3209 if (rc) {
3210 __clear_page_locked(page);
3211 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003212 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003213
Jeff Layton690c5e32011-10-19 15:30:16 -04003214 /* move first page to the tmplist */
3215 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
3216 list_move_tail(&page->lru, &tmplist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003217
Jeff Layton690c5e32011-10-19 15:30:16 -04003218 /* now try and add more pages onto the request */
3219 expected_index = page->index + 1;
3220 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
3221 /* discontinuity ? */
3222 if (page->index != expected_index)
3223 break;
3224
3225 /* would this page push the read over the rsize? */
3226 if (bytes + PAGE_CACHE_SIZE > rsize)
3227 break;
3228
3229 __set_page_locked(page);
3230 if (add_to_page_cache_locked(page, mapping,
3231 page->index, GFP_KERNEL)) {
3232 __clear_page_locked(page);
3233 break;
3234 }
3235 list_move_tail(&page->lru, &tmplist);
3236 bytes += PAGE_CACHE_SIZE;
3237 expected_index++;
3238 nr_pages++;
3239 }
3240
Jeff Layton0471ca32012-05-16 07:13:16 -04003241 rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
Jeff Layton690c5e32011-10-19 15:30:16 -04003242 if (!rdata) {
3243 /* best to give up if we're out of mem */
3244 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3245 list_del(&page->lru);
3246 lru_cache_add_file(page);
3247 unlock_page(page);
3248 page_cache_release(page);
3249 }
3250 rc = -ENOMEM;
3251 break;
3252 }
3253
Jeff Layton6993f742012-05-16 07:13:17 -04003254 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Layton690c5e32011-10-19 15:30:16 -04003255 rdata->mapping = mapping;
3256 rdata->offset = offset;
3257 rdata->bytes = bytes;
3258 rdata->pid = pid;
Jeff Layton8321fec2012-09-19 06:22:32 -07003259 rdata->pagesz = PAGE_CACHE_SIZE;
3260 rdata->read_into_pages = cifs_readpages_read_into_pages;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003261
3262 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3263 list_del(&page->lru);
3264 rdata->pages[rdata->nr_pages++] = page;
3265 }
Jeff Layton690c5e32011-10-19 15:30:16 -04003266
Jeff Layton2a1bb132012-05-16 07:13:17 -04003267 rc = cifs_retry_async_readv(rdata);
Jeff Layton690c5e32011-10-19 15:30:16 -04003268 if (rc != 0) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003269 for (i = 0; i < rdata->nr_pages; i++) {
3270 page = rdata->pages[i];
Jeff Layton690c5e32011-10-19 15:30:16 -04003271 lru_cache_add_file(page);
3272 unlock_page(page);
3273 page_cache_release(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003274 }
Jeff Layton6993f742012-05-16 07:13:17 -04003275 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003276 break;
3277 }
Jeff Layton6993f742012-05-16 07:13:17 -04003278
3279 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003280 }
3281
Linus Torvalds1da177e2005-04-16 15:20:36 -07003282 return rc;
3283}
3284
3285static int cifs_readpage_worker(struct file *file, struct page *page,
3286 loff_t *poffset)
3287{
3288 char *read_data;
3289 int rc;
3290
Suresh Jayaraman56698232010-07-05 18:13:25 +05303291 /* Is the page cached? */
3292 rc = cifs_readpage_from_fscache(file->f_path.dentry->d_inode, page);
3293 if (rc == 0)
3294 goto read_complete;
3295
Linus Torvalds1da177e2005-04-16 15:20:36 -07003296 page_cache_get(page);
3297 read_data = kmap(page);
3298 /* for reads over a certain size could initiate async read ahead */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003299
Linus Torvalds1da177e2005-04-16 15:20:36 -07003300 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003301
Linus Torvalds1da177e2005-04-16 15:20:36 -07003302 if (rc < 0)
3303 goto io_error;
3304 else
Joe Perchesb6b38f72010-04-21 03:50:45 +00003305 cFYI(1, "Bytes read %d", rc);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003306
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08003307 file->f_path.dentry->d_inode->i_atime =
3308 current_fs_time(file->f_path.dentry->d_inode->i_sb);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003309
Linus Torvalds1da177e2005-04-16 15:20:36 -07003310 if (PAGE_CACHE_SIZE > rc)
3311 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
3312
3313 flush_dcache_page(page);
3314 SetPageUptodate(page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05303315
3316 /* send this page to the cache */
3317 cifs_readpage_to_fscache(file->f_path.dentry->d_inode, page);
3318
Linus Torvalds1da177e2005-04-16 15:20:36 -07003319 rc = 0;
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003320
Linus Torvalds1da177e2005-04-16 15:20:36 -07003321io_error:
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003322 kunmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003323 page_cache_release(page);
Suresh Jayaraman56698232010-07-05 18:13:25 +05303324
3325read_complete:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003326 return rc;
3327}
3328
3329static int cifs_readpage(struct file *file, struct page *page)
3330{
3331 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
3332 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003333 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003334
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003335 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003336
3337 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303338 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003339 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303340 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003341 }
3342
Jeff Laytonac3aa2f2012-07-23 13:14:28 -04003343 cFYI(1, "readpage %p at offset %d 0x%x",
Joe Perchesb6b38f72010-04-21 03:50:45 +00003344 page, (int)offset, (int)offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003345
3346 rc = cifs_readpage_worker(file, page, &offset);
3347
3348 unlock_page(page);
3349
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003350 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003351 return rc;
3352}
3353
Steve Frencha403a0a2007-07-26 15:54:16 +00003354static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3355{
3356 struct cifsFileInfo *open_file;
3357
Jeff Layton44772882010-10-15 15:34:03 -04003358 spin_lock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003359 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton2e396b82010-10-15 15:34:01 -04003360 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Jeff Layton44772882010-10-15 15:34:03 -04003361 spin_unlock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003362 return 1;
3363 }
3364 }
Jeff Layton44772882010-10-15 15:34:03 -04003365 spin_unlock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003366 return 0;
3367}
3368
Linus Torvalds1da177e2005-04-16 15:20:36 -07003369/* We do not want to update the file size from server for inodes
3370 open for write - to avoid races with writepage extending
3371 the file - in the future we could consider allowing
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003372 refreshing the inode only on increases in the file size
Linus Torvalds1da177e2005-04-16 15:20:36 -07003373 but this is tricky to do without racing with writebehind
3374 page caching in the current Linux kernel design */
Steve French4b18f2a2008-04-29 00:06:05 +00003375bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003376{
Steve Frencha403a0a2007-07-26 15:54:16 +00003377 if (!cifsInode)
Steve French4b18f2a2008-04-29 00:06:05 +00003378 return true;
Steve French23e7dd72005-10-20 13:44:56 -07003379
Steve Frencha403a0a2007-07-26 15:54:16 +00003380 if (is_inode_writable(cifsInode)) {
3381 /* This inode is open for write at least once */
Steve Frenchc32a0b62006-01-12 14:41:28 -08003382 struct cifs_sb_info *cifs_sb;
3383
Steve Frenchc32a0b62006-01-12 14:41:28 -08003384 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
Steve Frenchad7a2922008-02-07 23:25:02 +00003385 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003386 /* since no page cache to corrupt on directio
Steve Frenchc32a0b62006-01-12 14:41:28 -08003387 we can change size safely */
Steve French4b18f2a2008-04-29 00:06:05 +00003388 return true;
Steve Frenchc32a0b62006-01-12 14:41:28 -08003389 }
3390
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003391 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
Steve French4b18f2a2008-04-29 00:06:05 +00003392 return true;
Steve French7ba52632007-02-08 18:14:13 +00003393
Steve French4b18f2a2008-04-29 00:06:05 +00003394 return false;
Steve French23e7dd72005-10-20 13:44:56 -07003395 } else
Steve French4b18f2a2008-04-29 00:06:05 +00003396 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003397}
3398
Nick Piggind9414772008-09-24 11:32:59 -04003399static int cifs_write_begin(struct file *file, struct address_space *mapping,
3400 loff_t pos, unsigned len, unsigned flags,
3401 struct page **pagep, void **fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003402{
Nick Piggind9414772008-09-24 11:32:59 -04003403 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
3404 loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003405 loff_t page_start = pos & PAGE_MASK;
3406 loff_t i_size;
3407 struct page *page;
3408 int rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003409
Joe Perchesb6b38f72010-04-21 03:50:45 +00003410 cFYI(1, "write_begin from %lld len %d", (long long)pos, len);
Nick Piggind9414772008-09-24 11:32:59 -04003411
Nick Piggin54566b22009-01-04 12:00:53 -08003412 page = grab_cache_page_write_begin(mapping, index, flags);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003413 if (!page) {
3414 rc = -ENOMEM;
3415 goto out;
3416 }
Nick Piggind9414772008-09-24 11:32:59 -04003417
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003418 if (PageUptodate(page))
3419 goto out;
Steve French8a236262007-03-06 00:31:00 +00003420
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003421 /*
3422 * If we write a full page it will be up to date, no need to read from
3423 * the server. If the write is short, we'll end up doing a sync write
3424 * instead.
3425 */
3426 if (len == PAGE_CACHE_SIZE)
3427 goto out;
3428
3429 /*
3430 * optimize away the read when we have an oplock, and we're not
3431 * expecting to use any of the data we'd be reading in. That
3432 * is, when the page lies beyond the EOF, or straddles the EOF
3433 * and the write will cover all of the existing data.
3434 */
3435 if (CIFS_I(mapping->host)->clientCanCacheRead) {
3436 i_size = i_size_read(mapping->host);
3437 if (page_start >= i_size ||
3438 (offset == 0 && (pos + len) >= i_size)) {
3439 zero_user_segments(page, 0, offset,
3440 offset + len,
3441 PAGE_CACHE_SIZE);
3442 /*
3443 * PageChecked means that the parts of the page
3444 * to which we're not writing are considered up
3445 * to date. Once the data is copied to the
3446 * page, it can be set uptodate.
3447 */
3448 SetPageChecked(page);
3449 goto out;
3450 }
3451 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003452
Nick Piggind9414772008-09-24 11:32:59 -04003453 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003454 /*
3455 * might as well read a page, it is fast enough. If we get
3456 * an error, we don't need to return it. cifs_write_end will
3457 * do a sync write instead since PG_uptodate isn't set.
3458 */
3459 cifs_readpage_worker(file, page, &page_start);
Steve French8a236262007-03-06 00:31:00 +00003460 } else {
3461 /* we could try using another file handle if there is one -
3462 but how would we lock it to prevent close of that handle
3463 racing with this read? In any case
Nick Piggind9414772008-09-24 11:32:59 -04003464 this will be written out by write_end so is fine */
Steve French8a236262007-03-06 00:31:00 +00003465 }
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003466out:
3467 *pagep = page;
3468 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003469}
3470
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303471static int cifs_release_page(struct page *page, gfp_t gfp)
3472{
3473 if (PagePrivate(page))
3474 return 0;
3475
3476 return cifs_fscache_release_page(page, gfp);
3477}
3478
3479static void cifs_invalidate_page(struct page *page, unsigned long offset)
3480{
3481 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
3482
3483 if (offset == 0)
3484 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
3485}
3486
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003487static int cifs_launder_page(struct page *page)
3488{
3489 int rc = 0;
3490 loff_t range_start = page_offset(page);
3491 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
3492 struct writeback_control wbc = {
3493 .sync_mode = WB_SYNC_ALL,
3494 .nr_to_write = 0,
3495 .range_start = range_start,
3496 .range_end = range_end,
3497 };
3498
3499 cFYI(1, "Launder page: %p", page);
3500
3501 if (clear_page_dirty_for_io(page))
3502 rc = cifs_writepage_locked(page, &wbc);
3503
3504 cifs_fscache_invalidate_page(page, page->mapping->host);
3505 return rc;
3506}
3507
Tejun Heo9b646972010-07-20 22:09:02 +02003508void cifs_oplock_break(struct work_struct *work)
Jeff Layton3bc303c2009-09-21 06:47:50 -04003509{
3510 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3511 oplock_break);
Jeff Laytona5e18bc2010-10-11 15:07:18 -04003512 struct inode *inode = cfile->dentry->d_inode;
Jeff Layton3bc303c2009-09-21 06:47:50 -04003513 struct cifsInodeInfo *cinode = CIFS_I(inode);
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07003514 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Jeff Laytoneb4b7562010-10-22 14:52:29 -04003515 int rc = 0;
Jeff Layton3bc303c2009-09-21 06:47:50 -04003516
3517 if (inode && S_ISREG(inode->i_mode)) {
Steve Frenchd54ff732010-04-27 04:38:15 +00003518 if (cinode->clientCanCacheRead)
Al Viro8737c932009-12-24 06:47:55 -05003519 break_lease(inode, O_RDONLY);
Steve Frenchd54ff732010-04-27 04:38:15 +00003520 else
Al Viro8737c932009-12-24 06:47:55 -05003521 break_lease(inode, O_WRONLY);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003522 rc = filemap_fdatawrite(inode->i_mapping);
3523 if (cinode->clientCanCacheRead == 0) {
Jeff Laytoneb4b7562010-10-22 14:52:29 -04003524 rc = filemap_fdatawait(inode->i_mapping);
3525 mapping_set_error(inode->i_mapping, rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003526 invalidate_remote_inode(inode);
3527 }
Joe Perchesb6b38f72010-04-21 03:50:45 +00003528 cFYI(1, "Oplock flush inode %p rc %d", inode, rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003529 }
3530
Pavel Shilovsky85160e02011-10-22 15:33:29 +04003531 rc = cifs_push_locks(cfile);
3532 if (rc)
3533 cERROR(1, "Push locks rc = %d", rc);
3534
Jeff Layton3bc303c2009-09-21 06:47:50 -04003535 /*
3536 * releasing stale oplock after recent reconnect of smb session using
3537 * a now incorrect file handle is not a data integrity issue but do
3538 * not bother sending an oplock release if session to server still is
3539 * disconnected since oplock already released by the server
3540 */
Steve Frenchcdff08e2010-10-21 22:46:14 +00003541 if (!cfile->oplock_break_cancelled) {
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07003542 rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
3543 cinode);
Joe Perchesb6b38f72010-04-21 03:50:45 +00003544 cFYI(1, "Oplock release rc = %d", rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003545 }
Jeff Layton3bc303c2009-09-21 06:47:50 -04003546}
3547
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07003548const struct address_space_operations cifs_addr_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003549 .readpage = cifs_readpage,
3550 .readpages = cifs_readpages,
3551 .writepage = cifs_writepage,
Steve French37c0eb42005-10-05 14:50:29 -07003552 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04003553 .write_begin = cifs_write_begin,
3554 .write_end = cifs_write_end,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003555 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303556 .releasepage = cifs_release_page,
3557 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003558 .launder_page = cifs_launder_page,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003559};
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003560
3561/*
3562 * cifs_readpages requires the server to support a buffer large enough to
3563 * contain the header plus one complete page of data. Otherwise, we need
3564 * to leave cifs_readpages out of the address space operations.
3565 */
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07003566const struct address_space_operations cifs_addr_ops_smallbuf = {
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003567 .readpage = cifs_readpage,
3568 .writepage = cifs_writepage,
3569 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04003570 .write_begin = cifs_write_begin,
3571 .write_end = cifs_write_end,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003572 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303573 .releasepage = cifs_release_page,
3574 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003575 .launder_page = cifs_launder_page,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003576};