blob: edb25b4bbb959c49e0681d7a7b992b2e1093a710 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
Steve Frenchfb8c4b12007-07-10 01:16:18 +00005 *
Steve Frenchf19159d2010-04-21 04:12:10 +00006 * Copyright (C) International Business Machines Corp., 2002,2010
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Author(s): Steve French (sfrench@us.ibm.com)
Jeremy Allison7ee1af72006-08-02 21:56:33 +00008 * Jeremy Allison (jra@samba.org)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
Steve French37c0eb42005-10-05 14:50:29 -070025#include <linux/backing-dev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
Steve French37c0eb42005-10-05 14:50:29 -070030#include <linux/writeback.h>
Andrew Morton6f88cc22006-12-10 02:19:44 -080031#include <linux/task_io_accounting_ops.h>
Steve French23e7dd72005-10-20 13:44:56 -070032#include <linux/delay.h>
Jeff Layton3bc303c2009-09-21 06:47:50 -040033#include <linux/mount.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Jeff Layton690c5e32011-10-19 15:30:16 -040035#include <linux/swap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <asm/div64.h>
37#include "cifsfs.h"
38#include "cifspdu.h"
39#include "cifsglob.h"
40#include "cifsproto.h"
41#include "cifs_unicode.h"
42#include "cifs_debug.h"
43#include "cifs_fs_sb.h"
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +053044#include "fscache.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Linus Torvalds1da177e2005-04-16 15:20:36 -070046static inline int cifs_convert_flags(unsigned int flags)
47{
48 if ((flags & O_ACCMODE) == O_RDONLY)
49 return GENERIC_READ;
50 else if ((flags & O_ACCMODE) == O_WRONLY)
51 return GENERIC_WRITE;
52 else if ((flags & O_ACCMODE) == O_RDWR) {
53 /* GENERIC_ALL is too much permission to request
54 can cause unnecessary access denied on create */
55 /* return GENERIC_ALL; */
56 return (GENERIC_READ | GENERIC_WRITE);
57 }
58
Jeff Laytone10f7b52008-05-14 10:21:33 -070059 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
60 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
61 FILE_READ_DATA);
Steve French7fc8f4e2009-02-23 20:43:11 +000062}
Jeff Laytone10f7b52008-05-14 10:21:33 -070063
Jeff Layton608712f2010-10-15 15:33:56 -040064static u32 cifs_posix_convert_flags(unsigned int flags)
Steve French7fc8f4e2009-02-23 20:43:11 +000065{
Jeff Layton608712f2010-10-15 15:33:56 -040066 u32 posix_flags = 0;
Jeff Laytone10f7b52008-05-14 10:21:33 -070067
Steve French7fc8f4e2009-02-23 20:43:11 +000068 if ((flags & O_ACCMODE) == O_RDONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040069 posix_flags = SMB_O_RDONLY;
Steve French7fc8f4e2009-02-23 20:43:11 +000070 else if ((flags & O_ACCMODE) == O_WRONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040071 posix_flags = SMB_O_WRONLY;
72 else if ((flags & O_ACCMODE) == O_RDWR)
73 posix_flags = SMB_O_RDWR;
74
75 if (flags & O_CREAT)
76 posix_flags |= SMB_O_CREAT;
77 if (flags & O_EXCL)
78 posix_flags |= SMB_O_EXCL;
79 if (flags & O_TRUNC)
80 posix_flags |= SMB_O_TRUNC;
81 /* be safe and imply O_SYNC for O_DSYNC */
Christoph Hellwig6b2f3d12009-10-27 11:05:28 +010082 if (flags & O_DSYNC)
Jeff Layton608712f2010-10-15 15:33:56 -040083 posix_flags |= SMB_O_SYNC;
Steve French7fc8f4e2009-02-23 20:43:11 +000084 if (flags & O_DIRECTORY)
Jeff Layton608712f2010-10-15 15:33:56 -040085 posix_flags |= SMB_O_DIRECTORY;
Steve French7fc8f4e2009-02-23 20:43:11 +000086 if (flags & O_NOFOLLOW)
Jeff Layton608712f2010-10-15 15:33:56 -040087 posix_flags |= SMB_O_NOFOLLOW;
Steve French7fc8f4e2009-02-23 20:43:11 +000088 if (flags & O_DIRECT)
Jeff Layton608712f2010-10-15 15:33:56 -040089 posix_flags |= SMB_O_DIRECT;
Steve French7fc8f4e2009-02-23 20:43:11 +000090
91 return posix_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070092}
93
94static inline int cifs_get_disposition(unsigned int flags)
95{
96 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
97 return FILE_CREATE;
98 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
99 return FILE_OVERWRITE_IF;
100 else if ((flags & O_CREAT) == O_CREAT)
101 return FILE_OPEN_IF;
Steve French55aa2e02006-05-30 18:09:31 +0000102 else if ((flags & O_TRUNC) == O_TRUNC)
103 return FILE_OVERWRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104 else
105 return FILE_OPEN;
106}
107
Jeff Layton608712f2010-10-15 15:33:56 -0400108int cifs_posix_open(char *full_path, struct inode **pinode,
109 struct super_block *sb, int mode, unsigned int f_flags,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400110 __u32 *poplock, __u16 *pnetfid, unsigned int xid)
Jeff Layton608712f2010-10-15 15:33:56 -0400111{
112 int rc;
113 FILE_UNIX_BASIC_INFO *presp_data;
114 __u32 posix_flags = 0;
115 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
116 struct cifs_fattr fattr;
117 struct tcon_link *tlink;
Steve French96daf2b2011-05-27 04:34:02 +0000118 struct cifs_tcon *tcon;
Jeff Layton608712f2010-10-15 15:33:56 -0400119
120 cFYI(1, "posix open %s", full_path);
121
122 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
123 if (presp_data == NULL)
124 return -ENOMEM;
125
126 tlink = cifs_sb_tlink(cifs_sb);
127 if (IS_ERR(tlink)) {
128 rc = PTR_ERR(tlink);
129 goto posix_open_ret;
130 }
131
132 tcon = tlink_tcon(tlink);
133 mode &= ~current_umask();
134
135 posix_flags = cifs_posix_convert_flags(f_flags);
136 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
137 poplock, full_path, cifs_sb->local_nls,
138 cifs_sb->mnt_cifs_flags &
139 CIFS_MOUNT_MAP_SPECIAL_CHR);
140 cifs_put_tlink(tlink);
141
142 if (rc)
143 goto posix_open_ret;
144
145 if (presp_data->Type == cpu_to_le32(-1))
146 goto posix_open_ret; /* open ok, caller does qpathinfo */
147
148 if (!pinode)
149 goto posix_open_ret; /* caller does not need info */
150
151 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
152
153 /* get new inode and set it up */
154 if (*pinode == NULL) {
155 cifs_fill_uniqueid(sb, &fattr);
156 *pinode = cifs_iget(sb, &fattr);
157 if (!*pinode) {
158 rc = -ENOMEM;
159 goto posix_open_ret;
160 }
161 } else {
162 cifs_fattr_to_inode(*pinode, &fattr);
163 }
164
165posix_open_ret:
166 kfree(presp_data);
167 return rc;
168}
169
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300170static int
171cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700172 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
173 struct cifs_fid *fid, unsigned int xid)
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300174{
175 int rc;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700176 int desired_access;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300177 int disposition;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500178 int create_options = CREATE_NOT_DIR;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300179 FILE_ALL_INFO *buf;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700180 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300181
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700182 if (!server->ops->open)
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700183 return -ENOSYS;
184
185 desired_access = cifs_convert_flags(f_flags);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300186
187/*********************************************************************
188 * open flag mapping table:
189 *
190 * POSIX Flag CIFS Disposition
191 * ---------- ----------------
192 * O_CREAT FILE_OPEN_IF
193 * O_CREAT | O_EXCL FILE_CREATE
194 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
195 * O_TRUNC FILE_OVERWRITE
196 * none of the above FILE_OPEN
197 *
198 * Note that there is not a direct match between disposition
199 * FILE_SUPERSEDE (ie create whether or not file exists although
200 * O_CREAT | O_TRUNC is similar but truncates the existing
201 * file rather than creating a new file as FILE_SUPERSEDE does
202 * (which uses the attributes / metadata passed in on open call)
203 *?
204 *? O_SYNC is a reasonable match to CIFS writethrough flag
205 *? and the read write flags match reasonably. O_LARGEFILE
206 *? is irrelevant because largefile support is always used
207 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
208 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
209 *********************************************************************/
210
211 disposition = cifs_get_disposition(f_flags);
212
213 /* BB pass O_SYNC flag through on file attributes .. BB */
214
215 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
216 if (!buf)
217 return -ENOMEM;
218
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500219 if (backup_cred(cifs_sb))
220 create_options |= CREATE_OPEN_BACKUP_INTENT;
221
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700222 rc = server->ops->open(xid, tcon, full_path, disposition,
223 desired_access, create_options, fid, oplock, buf,
224 cifs_sb);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300225
226 if (rc)
227 goto out;
228
229 if (tcon->unix_ext)
230 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
231 xid);
232 else
233 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700234 xid, &fid->netfid);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300235
236out:
237 kfree(buf);
238 return rc;
239}
240
Jeff Layton15ecb432010-10-15 15:34:02 -0400241struct cifsFileInfo *
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700242cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
Jeff Layton15ecb432010-10-15 15:34:02 -0400243 struct tcon_link *tlink, __u32 oplock)
244{
245 struct dentry *dentry = file->f_path.dentry;
246 struct inode *inode = dentry->d_inode;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700247 struct cifsInodeInfo *cinode = CIFS_I(inode);
248 struct cifsFileInfo *cfile;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700249 struct cifs_fid_locks *fdlocks;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700250 struct cifs_tcon *tcon = tlink_tcon(tlink);
Jeff Layton15ecb432010-10-15 15:34:02 -0400251
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700252 cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
253 if (cfile == NULL)
254 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400255
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700256 fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
257 if (!fdlocks) {
258 kfree(cfile);
259 return NULL;
260 }
261
262 INIT_LIST_HEAD(&fdlocks->locks);
263 fdlocks->cfile = cfile;
264 cfile->llist = fdlocks;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700265 down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700266 list_add(&fdlocks->llist, &cinode->llist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700267 up_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700268
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700269 cfile->count = 1;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700270 cfile->pid = current->tgid;
271 cfile->uid = current_fsuid();
272 cfile->dentry = dget(dentry);
273 cfile->f_flags = file->f_flags;
274 cfile->invalidHandle = false;
275 cfile->tlink = cifs_get_tlink(tlink);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700276 INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700277 mutex_init(&cfile->fh_mutex);
Jeff Layton15ecb432010-10-15 15:34:02 -0400278
Jeff Layton44772882010-10-15 15:34:03 -0400279 spin_lock(&cifs_file_list_lock);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700280 if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE)
281 oplock = fid->pending_open->oplock;
282 list_del(&fid->pending_open->olist);
283
284 tlink_tcon(tlink)->ses->server->ops->set_fid(cfile, fid, oplock);
285
286 list_add(&cfile->tlist, &tcon->openFileList);
Jeff Layton15ecb432010-10-15 15:34:02 -0400287 /* if readable file instance put first in list*/
288 if (file->f_mode & FMODE_READ)
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700289 list_add(&cfile->flist, &cinode->openFileList);
Jeff Layton15ecb432010-10-15 15:34:02 -0400290 else
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700291 list_add_tail(&cfile->flist, &cinode->openFileList);
Jeff Layton44772882010-10-15 15:34:03 -0400292 spin_unlock(&cifs_file_list_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400293
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700294 file->private_data = cfile;
295 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400296}
297
Jeff Layton764a1b12012-07-25 14:59:54 -0400298struct cifsFileInfo *
299cifsFileInfo_get(struct cifsFileInfo *cifs_file)
300{
301 spin_lock(&cifs_file_list_lock);
302 cifsFileInfo_get_locked(cifs_file);
303 spin_unlock(&cifs_file_list_lock);
304 return cifs_file;
305}
306
Steve Frenchcdff08e2010-10-21 22:46:14 +0000307/*
308 * Release a reference on the file private data. This may involve closing
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400309 * the filehandle out on the server. Must be called without holding
310 * cifs_file_list_lock.
Steve Frenchcdff08e2010-10-21 22:46:14 +0000311 */
Jeff Laytonb33879a2010-10-15 15:34:04 -0400312void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
313{
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300314 struct inode *inode = cifs_file->dentry->d_inode;
Steve French96daf2b2011-05-27 04:34:02 +0000315 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700316 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300317 struct cifsInodeInfo *cifsi = CIFS_I(inode);
Pavel Shilovsky4f8ba8a2010-11-21 22:36:12 +0300318 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000319 struct cifsLockInfo *li, *tmp;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700320 struct cifs_fid fid;
321 struct cifs_pending_open open;
Steve Frenchcdff08e2010-10-21 22:46:14 +0000322
323 spin_lock(&cifs_file_list_lock);
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400324 if (--cifs_file->count > 0) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000325 spin_unlock(&cifs_file_list_lock);
326 return;
Jeff Laytonb33879a2010-10-15 15:34:04 -0400327 }
Steve Frenchcdff08e2010-10-21 22:46:14 +0000328
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700329 if (server->ops->get_lease_key)
330 server->ops->get_lease_key(inode, &fid);
331
332 /* store open in pending opens to make sure we don't miss lease break */
333 cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
334
Steve Frenchcdff08e2010-10-21 22:46:14 +0000335 /* remove it from the lists */
336 list_del(&cifs_file->flist);
337 list_del(&cifs_file->tlist);
338
339 if (list_empty(&cifsi->openFileList)) {
340 cFYI(1, "closing last open instance for inode %p",
341 cifs_file->dentry->d_inode);
Pavel Shilovsky25364132012-09-18 16:20:27 -0700342 /*
343 * In strict cache mode we need invalidate mapping on the last
344 * close because it may cause a error when we open this file
345 * again and get at least level II oplock.
346 */
Pavel Shilovsky4f8ba8a2010-11-21 22:36:12 +0300347 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
348 CIFS_I(inode)->invalid_mapping = true;
Pavel Shilovskyc6723622010-11-03 10:58:57 +0300349 cifs_set_oplock_level(cifsi, 0);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000350 }
351 spin_unlock(&cifs_file_list_lock);
352
Jeff Laytonad635942011-07-26 12:20:17 -0400353 cancel_work_sync(&cifs_file->oplock_break);
354
Steve Frenchcdff08e2010-10-21 22:46:14 +0000355 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700356 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400357 unsigned int xid;
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700358
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400359 xid = get_xid();
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700360 if (server->ops->close)
Pavel Shilovsky760ad0c2012-09-25 11:00:07 +0400361 server->ops->close(xid, tcon, &cifs_file->fid);
362 _free_xid(xid);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000363 }
364
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700365 cifs_del_pending_open(&open);
366
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700367 /*
368 * Delete any outstanding lock records. We'll lose them when the file
Steve Frenchcdff08e2010-10-21 22:46:14 +0000369 * is closed anyway.
370 */
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700371 down_write(&cifsi->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700372 list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000373 list_del(&li->llist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400374 cifs_del_lock_waiters(li);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000375 kfree(li);
376 }
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700377 list_del(&cifs_file->llist->llist);
378 kfree(cifs_file->llist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700379 up_write(&cifsi->lock_sem);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000380
381 cifs_put_tlink(cifs_file->tlink);
382 dput(cifs_file->dentry);
383 kfree(cifs_file);
Jeff Laytonb33879a2010-10-15 15:34:04 -0400384}
385
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386int cifs_open(struct inode *inode, struct file *file)
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700387
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388{
389 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400390 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400391 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700393 struct TCP_Server_Info *server;
Steve French96daf2b2011-05-27 04:34:02 +0000394 struct cifs_tcon *tcon;
Jeff Layton7ffec372010-09-29 19:51:11 -0400395 struct tcon_link *tlink;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700396 struct cifsFileInfo *cfile = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397 char *full_path = NULL;
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300398 bool posix_open_ok = false;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700399 struct cifs_fid fid;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700400 struct cifs_pending_open open;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400402 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403
404 cifs_sb = CIFS_SB(inode->i_sb);
Jeff Layton7ffec372010-09-29 19:51:11 -0400405 tlink = cifs_sb_tlink(cifs_sb);
406 if (IS_ERR(tlink)) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400407 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400408 return PTR_ERR(tlink);
409 }
410 tcon = tlink_tcon(tlink);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700411 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -0800413 full_path = build_path_from_dentry(file->f_path.dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414 if (full_path == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530415 rc = -ENOMEM;
Jeff Layton232341b2010-08-05 13:58:38 -0400416 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417 }
418
Joe Perchesb6b38f72010-04-21 03:50:45 +0000419 cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
420 inode, file->f_flags, full_path);
Steve French276a74a2009-03-03 18:00:34 +0000421
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700422 if (server->oplocks)
Steve French276a74a2009-03-03 18:00:34 +0000423 oplock = REQ_OPLOCK;
424 else
425 oplock = 0;
426
Steve French64cc2c62009-03-04 19:54:08 +0000427 if (!tcon->broken_posix_open && tcon->unix_ext &&
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400428 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
429 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Steve French276a74a2009-03-03 18:00:34 +0000430 /* can not refresh inode info since size could be stale */
Jeff Layton2422f672010-06-16 13:40:16 -0400431 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
Steve Frenchfa588e02010-04-22 19:21:55 +0000432 cifs_sb->mnt_file_mode /* ignored */,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700433 file->f_flags, &oplock, &fid.netfid, xid);
Steve French276a74a2009-03-03 18:00:34 +0000434 if (rc == 0) {
Joe Perchesb6b38f72010-04-21 03:50:45 +0000435 cFYI(1, "posix open succeeded");
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300436 posix_open_ok = true;
Steve French64cc2c62009-03-04 19:54:08 +0000437 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
438 if (tcon->ses->serverNOS)
Joe Perchesb6b38f72010-04-21 03:50:45 +0000439 cERROR(1, "server %s of type %s returned"
Steve French64cc2c62009-03-04 19:54:08 +0000440 " unexpected error on SMB posix open"
441 ", disabling posix open support."
442 " Check if server update available.",
443 tcon->ses->serverName,
Joe Perchesb6b38f72010-04-21 03:50:45 +0000444 tcon->ses->serverNOS);
Steve French64cc2c62009-03-04 19:54:08 +0000445 tcon->broken_posix_open = true;
Steve French276a74a2009-03-03 18:00:34 +0000446 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
447 (rc != -EOPNOTSUPP)) /* path not found or net err */
448 goto out;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700449 /*
450 * Else fallthrough to retry open the old way on network i/o
451 * or DFS errors.
452 */
Steve French276a74a2009-03-03 18:00:34 +0000453 }
454
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700455 if (server->ops->get_lease_key)
456 server->ops->get_lease_key(inode, &fid);
457
458 cifs_add_pending_open(&fid, tlink, &open);
459
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300460 if (!posix_open_ok) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700461 if (server->ops->get_lease_key)
462 server->ops->get_lease_key(inode, &fid);
463
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300464 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700465 file->f_flags, &oplock, &fid, xid);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700466 if (rc) {
467 cifs_del_pending_open(&open);
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300468 goto out;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700469 }
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300470 }
Jeff Layton47c78b72010-06-16 13:40:17 -0400471
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700472 cfile = cifs_new_fileinfo(&fid, file, tlink, oplock);
473 if (cfile == NULL) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700474 if (server->ops->close)
475 server->ops->close(xid, tcon, &fid);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700476 cifs_del_pending_open(&open);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477 rc = -ENOMEM;
478 goto out;
479 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +0530481 cifs_fscache_set_inode_cookie(inode, file);
482
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300483 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700484 /*
485 * Time to set mode which we can not set earlier due to
486 * problems creating new read-only files.
487 */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300488 struct cifs_unix_set_info_args args = {
489 .mode = inode->i_mode,
490 .uid = NO_CHANGE_64,
491 .gid = NO_CHANGE_64,
492 .ctime = NO_CHANGE_64,
493 .atime = NO_CHANGE_64,
494 .mtime = NO_CHANGE_64,
495 .device = 0,
496 };
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700497 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
498 cfile->pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499 }
500
501out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400503 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400504 cifs_put_tlink(tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505 return rc;
506}
507
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700508/*
509 * Try to reacquire byte range locks that were released when session
510 * to server was lost
511 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512static int cifs_relock_file(struct cifsFileInfo *cifsFile)
513{
514 int rc = 0;
515
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700516 /* BB list all locks open on this file and relock */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517
518 return rc;
519}
520
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700521static int
522cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523{
524 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400525 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400526 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +0000528 struct cifs_tcon *tcon;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700529 struct TCP_Server_Info *server;
530 struct cifsInodeInfo *cinode;
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000531 struct inode *inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532 char *full_path = NULL;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700533 int desired_access;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534 int disposition = FILE_OPEN;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500535 int create_options = CREATE_NOT_DIR;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700536 struct cifs_fid fid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400538 xid = get_xid();
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700539 mutex_lock(&cfile->fh_mutex);
540 if (!cfile->invalidHandle) {
541 mutex_unlock(&cfile->fh_mutex);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530542 rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400543 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530544 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545 }
546
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700547 inode = cfile->dentry->d_inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548 cifs_sb = CIFS_SB(inode->i_sb);
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700549 tcon = tlink_tcon(cfile->tlink);
550 server = tcon->ses->server;
Steve French3a9f4622007-04-04 17:10:24 +0000551
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700552 /*
553 * Can not grab rename sem here because various ops, including those
554 * that already have the rename sem can end up causing writepage to get
555 * called and if the server was down that means we end up here, and we
556 * can never tell if the caller already has the rename_sem.
557 */
558 full_path = build_path_from_dentry(cfile->dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559 if (full_path == NULL) {
Steve French3a9f4622007-04-04 17:10:24 +0000560 rc = -ENOMEM;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700561 mutex_unlock(&cfile->fh_mutex);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400562 free_xid(xid);
Steve French3a9f4622007-04-04 17:10:24 +0000563 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564 }
565
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700566 cFYI(1, "inode = 0x%p file flags 0x%x for %s", inode, cfile->f_flags,
567 full_path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568
Pavel Shilovsky10b9b982012-03-20 12:55:09 +0300569 if (tcon->ses->server->oplocks)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570 oplock = REQ_OPLOCK;
571 else
Steve French4b18f2a2008-04-29 00:06:05 +0000572 oplock = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400574 if (tcon->unix_ext && cap_unix(tcon->ses) &&
Steve French7fc8f4e2009-02-23 20:43:11 +0000575 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400576 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Jeff Layton608712f2010-10-15 15:33:56 -0400577 /*
578 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
579 * original open. Must mask them off for a reopen.
580 */
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700581 unsigned int oflags = cfile->f_flags &
Jeff Layton15886172010-10-15 15:33:59 -0400582 ~(O_CREAT | O_EXCL | O_TRUNC);
Jeff Layton608712f2010-10-15 15:33:56 -0400583
Jeff Layton2422f672010-06-16 13:40:16 -0400584 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700585 cifs_sb->mnt_file_mode /* ignored */,
586 oflags, &oplock, &fid.netfid, xid);
Steve French7fc8f4e2009-02-23 20:43:11 +0000587 if (rc == 0) {
Joe Perchesb6b38f72010-04-21 03:50:45 +0000588 cFYI(1, "posix reopen succeeded");
Steve French7fc8f4e2009-02-23 20:43:11 +0000589 goto reopen_success;
590 }
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700591 /*
592 * fallthrough to retry open the old way on errors, especially
593 * in the reconnect path it is important to retry hard
594 */
Steve French7fc8f4e2009-02-23 20:43:11 +0000595 }
596
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700597 desired_access = cifs_convert_flags(cfile->f_flags);
Steve French7fc8f4e2009-02-23 20:43:11 +0000598
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500599 if (backup_cred(cifs_sb))
600 create_options |= CREATE_OPEN_BACKUP_INTENT;
601
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700602 if (server->ops->get_lease_key)
603 server->ops->get_lease_key(inode, &fid);
604
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700605 /*
606 * Can not refresh inode by passing in file_info buf to be returned by
607 * CIFSSMBOpen and then calling get_inode_info with returned buf since
608 * file might have write behind data that needs to be flushed and server
609 * version of file size can be stale. If we knew for sure that inode was
610 * not dirty locally we could do this.
611 */
612 rc = server->ops->open(xid, tcon, full_path, disposition,
613 desired_access, create_options, &fid, &oplock,
614 NULL, cifs_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615 if (rc) {
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700616 mutex_unlock(&cfile->fh_mutex);
617 cFYI(1, "cifs_reopen returned 0x%x", rc);
Joe Perchesb6b38f72010-04-21 03:50:45 +0000618 cFYI(1, "oplock: %d", oplock);
Jeff Layton15886172010-10-15 15:33:59 -0400619 goto reopen_error_exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620 }
Jeff Layton15886172010-10-15 15:33:59 -0400621
622reopen_success:
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700623 cfile->invalidHandle = false;
624 mutex_unlock(&cfile->fh_mutex);
625 cinode = CIFS_I(inode);
Jeff Layton15886172010-10-15 15:33:59 -0400626
627 if (can_flush) {
628 rc = filemap_write_and_wait(inode->i_mapping);
Jeff Laytoneb4b7562010-10-22 14:52:29 -0400629 mapping_set_error(inode->i_mapping, rc);
Jeff Layton15886172010-10-15 15:33:59 -0400630
Jeff Layton15886172010-10-15 15:33:59 -0400631 if (tcon->unix_ext)
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700632 rc = cifs_get_inode_info_unix(&inode, full_path,
633 inode->i_sb, xid);
Jeff Layton15886172010-10-15 15:33:59 -0400634 else
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700635 rc = cifs_get_inode_info(&inode, full_path, NULL,
636 inode->i_sb, xid, NULL);
637 }
638 /*
639 * Else we are writing out data to server already and could deadlock if
640 * we tried to flush data, and since we do not know if we have data that
641 * would invalidate the current end of file on the server we can not go
642 * to the server to get the new inode info.
643 */
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300644
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700645 server->ops->set_fid(cfile, &fid, oplock);
646 cifs_relock_file(cfile);
Jeff Layton15886172010-10-15 15:33:59 -0400647
648reopen_error_exit:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400650 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651 return rc;
652}
653
654int cifs_close(struct inode *inode, struct file *file)
655{
Jeff Layton77970692011-04-05 16:23:47 -0700656 if (file->private_data != NULL) {
657 cifsFileInfo_put(file->private_data);
658 file->private_data = NULL;
659 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660
Steve Frenchcdff08e2010-10-21 22:46:14 +0000661 /* return code from the ->release op is always ignored */
662 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663}
664
665int cifs_closedir(struct inode *inode, struct file *file)
666{
667 int rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400668 unsigned int xid;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700669 struct cifsFileInfo *cfile = file->private_data;
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700670 struct cifs_tcon *tcon;
671 struct TCP_Server_Info *server;
672 char *buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673
Joe Perchesb6b38f72010-04-21 03:50:45 +0000674 cFYI(1, "Closedir inode = 0x%p", inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700676 if (cfile == NULL)
677 return rc;
678
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400679 xid = get_xid();
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700680 tcon = tlink_tcon(cfile->tlink);
681 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700683 cFYI(1, "Freeing private data in close dir");
684 spin_lock(&cifs_file_list_lock);
685 if (!cfile->srch_inf.endOfSearch && !cfile->invalidHandle) {
686 cfile->invalidHandle = true;
687 spin_unlock(&cifs_file_list_lock);
688 if (server->ops->close_dir)
689 rc = server->ops->close_dir(xid, tcon, &cfile->fid);
690 else
691 rc = -ENOSYS;
692 cFYI(1, "Closing uncompleted readdir with rc %d", rc);
693 /* not much we can do if it fails anyway, ignore rc */
694 rc = 0;
695 } else
696 spin_unlock(&cifs_file_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700698 buf = cfile->srch_inf.ntwrk_buf_start;
699 if (buf) {
700 cFYI(1, "closedir free smb buf in srch struct");
701 cfile->srch_inf.ntwrk_buf_start = NULL;
702 if (cfile->srch_inf.smallBuf)
703 cifs_small_buf_release(buf);
704 else
705 cifs_buf_release(buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706 }
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700707
708 cifs_put_tlink(cfile->tlink);
709 kfree(file->private_data);
710 file->private_data = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711 /* BB can we lock the filestruct while this is going on? */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400712 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713 return rc;
714}
715
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400716static struct cifsLockInfo *
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300717cifs_lock_init(__u64 offset, __u64 length, __u8 type)
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000718{
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400719 struct cifsLockInfo *lock =
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000720 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400721 if (!lock)
722 return lock;
723 lock->offset = offset;
724 lock->length = length;
725 lock->type = type;
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400726 lock->pid = current->tgid;
727 INIT_LIST_HEAD(&lock->blist);
728 init_waitqueue_head(&lock->block_q);
729 return lock;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400730}
731
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -0700732void
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400733cifs_del_lock_waiters(struct cifsLockInfo *lock)
734{
735 struct cifsLockInfo *li, *tmp;
736 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
737 list_del_init(&li->blist);
738 wake_up(&li->block_q);
739 }
740}
741
742static bool
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700743cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
744 __u64 length, __u8 type, struct cifsFileInfo *cfile,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700745 struct cifsLockInfo **conf_lock, bool rw_check)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400746{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300747 struct cifsLockInfo *li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700748 struct cifsFileInfo *cur_cfile = fdlocks->cfile;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300749 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400750
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700751 list_for_each_entry(li, &fdlocks->locks, llist) {
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400752 if (offset + length <= li->offset ||
753 offset >= li->offset + li->length)
754 continue;
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700755 if (rw_check && server->ops->compare_fids(cfile, cur_cfile) &&
756 current->tgid == li->pid)
757 continue;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700758 if ((type & server->vals->shared_lock_type) &&
759 ((server->ops->compare_fids(cfile, cur_cfile) &&
760 current->tgid == li->pid) || type == li->type))
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400761 continue;
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700762 if (conf_lock)
763 *conf_lock = li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700764 return true;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400765 }
766 return false;
767}
768
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700769bool
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300770cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700771 __u8 type, struct cifsLockInfo **conf_lock,
772 bool rw_check)
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400773{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300774 bool rc = false;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700775 struct cifs_fid_locks *cur;
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300776 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300777
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700778 list_for_each_entry(cur, &cinode->llist, llist) {
779 rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700780 cfile, conf_lock, rw_check);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300781 if (rc)
782 break;
783 }
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300784
785 return rc;
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400786}
787
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300788/*
789 * Check if there is another lock that prevents us to set the lock (mandatory
790 * style). If such a lock exists, update the flock structure with its
791 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
792 * or leave it the same if we can't. Returns 0 if we don't need to request to
793 * the server or 1 otherwise.
794 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400795static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300796cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
797 __u8 type, struct file_lock *flock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400798{
799 int rc = 0;
800 struct cifsLockInfo *conf_lock;
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300801 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300802 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400803 bool exist;
804
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700805 down_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400806
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300807 exist = cifs_find_lock_conflict(cfile, offset, length, type,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700808 &conf_lock, false);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400809 if (exist) {
810 flock->fl_start = conf_lock->offset;
811 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
812 flock->fl_pid = conf_lock->pid;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300813 if (conf_lock->type & server->vals->shared_lock_type)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400814 flock->fl_type = F_RDLCK;
815 else
816 flock->fl_type = F_WRLCK;
817 } else if (!cinode->can_cache_brlcks)
818 rc = 1;
819 else
820 flock->fl_type = F_UNLCK;
821
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700822 up_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400823 return rc;
824}
825
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400826static void
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300827cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400828{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300829 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700830 down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700831 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700832 up_write(&cinode->lock_sem);
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000833}
834
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300835/*
836 * Set the byte-range lock (mandatory style). Returns:
837 * 1) 0, if we set the lock and don't need to request to the server;
838 * 2) 1, if no locks prevent us but we need to request to the server;
839 * 3) -EACCESS, if there is a lock that prevents us and wait is false.
840 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400841static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300842cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400843 bool wait)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400844{
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400845 struct cifsLockInfo *conf_lock;
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300846 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400847 bool exist;
848 int rc = 0;
849
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400850try_again:
851 exist = false;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700852 down_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400853
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300854 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700855 lock->type, &conf_lock, false);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400856 if (!exist && cinode->can_cache_brlcks) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700857 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700858 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400859 return rc;
860 }
861
862 if (!exist)
863 rc = 1;
864 else if (!wait)
865 rc = -EACCES;
866 else {
867 list_add_tail(&lock->blist, &conf_lock->blist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700868 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400869 rc = wait_event_interruptible(lock->block_q,
870 (lock->blist.prev == &lock->blist) &&
871 (lock->blist.next == &lock->blist));
872 if (!rc)
873 goto try_again;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700874 down_write(&cinode->lock_sem);
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400875 list_del_init(&lock->blist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400876 }
877
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700878 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400879 return rc;
880}
881
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300882/*
883 * Check if there is another lock that prevents us to set the lock (posix
884 * style). If such a lock exists, update the flock structure with its
885 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
886 * or leave it the same if we can't. Returns 0 if we don't need to request to
887 * the server or 1 otherwise.
888 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400889static int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400890cifs_posix_lock_test(struct file *file, struct file_lock *flock)
891{
892 int rc = 0;
893 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
894 unsigned char saved_type = flock->fl_type;
895
Pavel Shilovsky50792762011-10-29 17:17:57 +0400896 if ((flock->fl_flags & FL_POSIX) == 0)
897 return 1;
898
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700899 down_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400900 posix_test_lock(file, flock);
901
902 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
903 flock->fl_type = saved_type;
904 rc = 1;
905 }
906
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700907 up_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400908 return rc;
909}
910
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300911/*
912 * Set the byte-range lock (posix style). Returns:
913 * 1) 0, if we set the lock and don't need to request to the server;
914 * 2) 1, if we need to request to the server;
915 * 3) <0, if the error occurs while setting the lock.
916 */
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400917static int
918cifs_posix_lock_set(struct file *file, struct file_lock *flock)
919{
920 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
Pavel Shilovsky50792762011-10-29 17:17:57 +0400921 int rc = 1;
922
923 if ((flock->fl_flags & FL_POSIX) == 0)
924 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400925
Pavel Shilovsky66189be2012-03-28 21:56:19 +0400926try_again:
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700927 down_write(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400928 if (!cinode->can_cache_brlcks) {
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700929 up_write(&cinode->lock_sem);
Pavel Shilovsky50792762011-10-29 17:17:57 +0400930 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400931 }
Pavel Shilovsky66189be2012-03-28 21:56:19 +0400932
933 rc = posix_lock_file(file, flock, NULL);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700934 up_write(&cinode->lock_sem);
Pavel Shilovsky66189be2012-03-28 21:56:19 +0400935 if (rc == FILE_LOCK_DEFERRED) {
936 rc = wait_event_interruptible(flock->fl_wait, !flock->fl_next);
937 if (!rc)
938 goto try_again;
939 locks_delete_block(flock);
940 }
Steve French9ebb3892012-04-01 13:52:54 -0500941 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400942}
943
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -0700944int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400945cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400946{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400947 unsigned int xid;
948 int rc = 0, stored_rc;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400949 struct cifsLockInfo *li, *tmp;
950 struct cifs_tcon *tcon;
951 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +0400952 unsigned int num, max_num, max_buf;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400953 LOCKING_ANDX_RANGE *buf, *cur;
954 int types[] = {LOCKING_ANDX_LARGE_FILES,
955 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
956 int i;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400957
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400958 xid = get_xid();
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400959 tcon = tlink_tcon(cfile->tlink);
960
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700961 /* we are going to update can_cache_brlcks here - need a write access */
962 down_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400963 if (!cinode->can_cache_brlcks) {
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700964 up_write(&cinode->lock_sem);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400965 free_xid(xid);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400966 return rc;
967 }
968
Pavel Shilovsky0013fb42012-05-31 13:03:26 +0400969 /*
970 * Accessing maxBuf is racy with cifs_reconnect - need to store value
971 * and check it for zero before using.
972 */
973 max_buf = tcon->ses->server->maxBuf;
974 if (!max_buf) {
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700975 up_write(&cinode->lock_sem);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400976 free_xid(xid);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +0400977 return -EINVAL;
978 }
979
980 max_num = (max_buf - sizeof(struct smb_hdr)) /
981 sizeof(LOCKING_ANDX_RANGE);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400982 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
983 if (!buf) {
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700984 up_write(&cinode->lock_sem);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400985 free_xid(xid);
Pavel Shilovskye2f28862012-08-29 21:13:38 +0400986 return -ENOMEM;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400987 }
988
989 for (i = 0; i < 2; i++) {
990 cur = buf;
991 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700992 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400993 if (li->type != types[i])
994 continue;
995 cur->Pid = cpu_to_le16(li->pid);
996 cur->LengthLow = cpu_to_le32((u32)li->length);
997 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
998 cur->OffsetLow = cpu_to_le32((u32)li->offset);
999 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1000 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001001 stored_rc = cifs_lockv(xid, tcon,
1002 cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001003 (__u8)li->type, 0, num,
1004 buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001005 if (stored_rc)
1006 rc = stored_rc;
1007 cur = buf;
1008 num = 0;
1009 } else
1010 cur++;
1011 }
1012
1013 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001014 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001015 (__u8)types[i], 0, num, buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001016 if (stored_rc)
1017 rc = stored_rc;
1018 }
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001019 }
1020
1021 cinode->can_cache_brlcks = false;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001022 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001023
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001024 kfree(buf);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001025 free_xid(xid);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001026 return rc;
1027}
1028
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001029/* copied from fs/locks.c with a name change */
1030#define cifs_for_each_lock(inode, lockp) \
1031 for (lockp = &inode->i_flock; *lockp != NULL; \
1032 lockp = &(*lockp)->fl_next)
1033
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001034struct lock_to_push {
1035 struct list_head llist;
1036 __u64 offset;
1037 __u64 length;
1038 __u32 pid;
1039 __u16 netfid;
1040 __u8 type;
1041};
1042
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001043static int
1044cifs_push_posix_locks(struct cifsFileInfo *cfile)
1045{
1046 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
1047 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1048 struct file_lock *flock, **before;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001049 unsigned int count = 0, i = 0;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001050 int rc = 0, xid, type;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001051 struct list_head locks_to_send, *el;
1052 struct lock_to_push *lck, *tmp;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001053 __u64 length;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001054
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001055 xid = get_xid();
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001056
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001057 /* we are going to update can_cache_brlcks here - need a write access */
1058 down_write(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001059 if (!cinode->can_cache_brlcks) {
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001060 up_write(&cinode->lock_sem);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001061 free_xid(xid);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001062 return rc;
1063 }
1064
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001065 lock_flocks();
1066 cifs_for_each_lock(cfile->dentry->d_inode, before) {
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001067 if ((*before)->fl_flags & FL_POSIX)
1068 count++;
1069 }
1070 unlock_flocks();
1071
1072 INIT_LIST_HEAD(&locks_to_send);
1073
1074 /*
Pavel Shilovskyce858522012-03-17 09:46:55 +03001075 * Allocating count locks is enough because no FL_POSIX locks can be
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001076 * added to the list while we are holding cinode->lock_sem that
Pavel Shilovskyce858522012-03-17 09:46:55 +03001077 * protects locking operations of this inode.
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001078 */
1079 for (; i < count; i++) {
1080 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1081 if (!lck) {
1082 rc = -ENOMEM;
1083 goto err_out;
1084 }
1085 list_add_tail(&lck->llist, &locks_to_send);
1086 }
1087
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001088 el = locks_to_send.next;
1089 lock_flocks();
1090 cifs_for_each_lock(cfile->dentry->d_inode, before) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001091 flock = *before;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001092 if ((flock->fl_flags & FL_POSIX) == 0)
1093 continue;
Pavel Shilovskyce858522012-03-17 09:46:55 +03001094 if (el == &locks_to_send) {
1095 /*
1096 * The list ended. We don't have enough allocated
1097 * structures - something is really wrong.
1098 */
1099 cERROR(1, "Can't push all brlocks!");
1100 break;
1101 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001102 length = 1 + flock->fl_end - flock->fl_start;
1103 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1104 type = CIFS_RDLCK;
1105 else
1106 type = CIFS_WRLCK;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001107 lck = list_entry(el, struct lock_to_push, llist);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001108 lck->pid = flock->fl_pid;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001109 lck->netfid = cfile->fid.netfid;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001110 lck->length = length;
1111 lck->type = type;
1112 lck->offset = flock->fl_start;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001113 el = el->next;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001114 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001115 unlock_flocks();
1116
1117 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001118 int stored_rc;
1119
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001120 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001121 lck->offset, lck->length, NULL,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001122 lck->type, 0);
1123 if (stored_rc)
1124 rc = stored_rc;
1125 list_del(&lck->llist);
1126 kfree(lck);
1127 }
1128
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001129out:
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001130 cinode->can_cache_brlcks = false;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001131 up_write(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001132
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001133 free_xid(xid);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001134 return rc;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001135err_out:
1136 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1137 list_del(&lck->llist);
1138 kfree(lck);
1139 }
1140 goto out;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001141}
1142
1143static int
1144cifs_push_locks(struct cifsFileInfo *cfile)
1145{
1146 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1147 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1148
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001149 if (cap_unix(tcon->ses) &&
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001150 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1151 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1152 return cifs_push_posix_locks(cfile);
1153
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001154 return tcon->ses->server->ops->push_mand_locks(cfile);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001155}
1156
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001157static void
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001158cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001159 bool *wait_flag, struct TCP_Server_Info *server)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001160{
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001161 if (flock->fl_flags & FL_POSIX)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001162 cFYI(1, "Posix");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001163 if (flock->fl_flags & FL_FLOCK)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001164 cFYI(1, "Flock");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001165 if (flock->fl_flags & FL_SLEEP) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001166 cFYI(1, "Blocking lock");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001167 *wait_flag = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168 }
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001169 if (flock->fl_flags & FL_ACCESS)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001170 cFYI(1, "Process suspended by mandatory locking - "
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001171 "not implemented yet");
1172 if (flock->fl_flags & FL_LEASE)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001173 cFYI(1, "Lease on file - not implemented yet");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001174 if (flock->fl_flags &
Jeff Layton3d6d8542012-09-19 06:22:46 -07001175 (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
1176 FL_ACCESS | FL_LEASE | FL_CLOSE)))
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001177 cFYI(1, "Unknown lock flags 0x%x", flock->fl_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001178
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001179 *type = server->vals->large_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001180 if (flock->fl_type == F_WRLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001181 cFYI(1, "F_WRLCK ");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001182 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001183 *lock = 1;
1184 } else if (flock->fl_type == F_UNLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001185 cFYI(1, "F_UNLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001186 *type |= server->vals->unlock_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001187 *unlock = 1;
1188 /* Check if unlock includes more than one lock range */
1189 } else if (flock->fl_type == F_RDLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001190 cFYI(1, "F_RDLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001191 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001192 *lock = 1;
1193 } else if (flock->fl_type == F_EXLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001194 cFYI(1, "F_EXLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001195 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001196 *lock = 1;
1197 } else if (flock->fl_type == F_SHLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001198 cFYI(1, "F_SHLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001199 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001200 *lock = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201 } else
Joe Perchesb6b38f72010-04-21 03:50:45 +00001202 cFYI(1, "Unknown type of lock");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001203}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001204
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001205static int
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001206cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001207 bool wait_flag, bool posix_lck, unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001208{
1209 int rc = 0;
1210 __u64 length = 1 + flock->fl_end - flock->fl_start;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001211 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1212 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001213 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001214 __u16 netfid = cfile->fid.netfid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001216 if (posix_lck) {
1217 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001218
1219 rc = cifs_posix_lock_test(file, flock);
1220 if (!rc)
1221 return rc;
1222
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001223 if (type & server->vals->shared_lock_type)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001224 posix_lock_type = CIFS_RDLCK;
1225 else
1226 posix_lock_type = CIFS_WRLCK;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001227 rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001228 flock->fl_start, length, flock,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001229 posix_lock_type, wait_flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001230 return rc;
1231 }
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001232
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001233 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001234 if (!rc)
1235 return rc;
1236
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001237 /* BB we could chain these into one lock request BB */
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001238 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
1239 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001240 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001241 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1242 type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001243 flock->fl_type = F_UNLCK;
1244 if (rc != 0)
1245 cERROR(1, "Error unlocking previously locked "
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001246 "range %d during test of lock", rc);
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001247 return 0;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001248 }
1249
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001250 if (type & server->vals->shared_lock_type) {
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001251 flock->fl_type = F_WRLCK;
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001252 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001253 }
1254
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001255 type &= ~server->vals->exclusive_lock_type;
1256
1257 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1258 type | server->vals->shared_lock_type,
1259 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001260 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001261 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1262 type | server->vals->shared_lock_type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001263 flock->fl_type = F_RDLCK;
1264 if (rc != 0)
1265 cERROR(1, "Error unlocking previously locked "
1266 "range %d during test of lock", rc);
1267 } else
1268 flock->fl_type = F_WRLCK;
1269
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001270 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001271}
1272
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001273void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001274cifs_move_llist(struct list_head *source, struct list_head *dest)
1275{
1276 struct list_head *li, *tmp;
1277 list_for_each_safe(li, tmp, source)
1278 list_move(li, dest);
1279}
1280
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001281void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001282cifs_free_llist(struct list_head *llist)
1283{
1284 struct cifsLockInfo *li, *tmp;
1285 list_for_each_entry_safe(li, tmp, llist, llist) {
1286 cifs_del_lock_waiters(li);
1287 list_del(&li->llist);
1288 kfree(li);
1289 }
1290}
1291
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001292int
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001293cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1294 unsigned int xid)
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001295{
1296 int rc = 0, stored_rc;
1297 int types[] = {LOCKING_ANDX_LARGE_FILES,
1298 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1299 unsigned int i;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001300 unsigned int max_num, num, max_buf;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001301 LOCKING_ANDX_RANGE *buf, *cur;
1302 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1303 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
1304 struct cifsLockInfo *li, *tmp;
1305 __u64 length = 1 + flock->fl_end - flock->fl_start;
1306 struct list_head tmp_llist;
1307
1308 INIT_LIST_HEAD(&tmp_llist);
1309
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001310 /*
1311 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1312 * and check it for zero before using.
1313 */
1314 max_buf = tcon->ses->server->maxBuf;
1315 if (!max_buf)
1316 return -EINVAL;
1317
1318 max_num = (max_buf - sizeof(struct smb_hdr)) /
1319 sizeof(LOCKING_ANDX_RANGE);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001320 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1321 if (!buf)
1322 return -ENOMEM;
1323
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001324 down_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001325 for (i = 0; i < 2; i++) {
1326 cur = buf;
1327 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001328 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001329 if (flock->fl_start > li->offset ||
1330 (flock->fl_start + length) <
1331 (li->offset + li->length))
1332 continue;
1333 if (current->tgid != li->pid)
1334 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001335 if (types[i] != li->type)
1336 continue;
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001337 if (cinode->can_cache_brlcks) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001338 /*
1339 * We can cache brlock requests - simply remove
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001340 * a lock from the file's list.
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001341 */
1342 list_del(&li->llist);
1343 cifs_del_lock_waiters(li);
1344 kfree(li);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001345 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001346 }
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001347 cur->Pid = cpu_to_le16(li->pid);
1348 cur->LengthLow = cpu_to_le32((u32)li->length);
1349 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1350 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1351 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1352 /*
1353 * We need to save a lock here to let us add it again to
1354 * the file's list if the unlock range request fails on
1355 * the server.
1356 */
1357 list_move(&li->llist, &tmp_llist);
1358 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001359 stored_rc = cifs_lockv(xid, tcon,
1360 cfile->fid.netfid,
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001361 li->type, num, 0, buf);
1362 if (stored_rc) {
1363 /*
1364 * We failed on the unlock range
1365 * request - add all locks from the tmp
1366 * list to the head of the file's list.
1367 */
1368 cifs_move_llist(&tmp_llist,
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001369 &cfile->llist->locks);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001370 rc = stored_rc;
1371 } else
1372 /*
1373 * The unlock range request succeed -
1374 * free the tmp list.
1375 */
1376 cifs_free_llist(&tmp_llist);
1377 cur = buf;
1378 num = 0;
1379 } else
1380 cur++;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001381 }
1382 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001383 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001384 types[i], num, 0, buf);
1385 if (stored_rc) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001386 cifs_move_llist(&tmp_llist,
1387 &cfile->llist->locks);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001388 rc = stored_rc;
1389 } else
1390 cifs_free_llist(&tmp_llist);
1391 }
1392 }
1393
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001394 up_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001395 kfree(buf);
1396 return rc;
1397}
1398
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001399static int
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001400cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001401 bool wait_flag, bool posix_lck, int lock, int unlock,
1402 unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001403{
1404 int rc = 0;
1405 __u64 length = 1 + flock->fl_end - flock->fl_start;
1406 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1407 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001408 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001409
1410 if (posix_lck) {
Steve French08547b02006-02-28 22:39:25 +00001411 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001412
1413 rc = cifs_posix_lock_set(file, flock);
1414 if (!rc || rc < 0)
1415 return rc;
1416
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001417 if (type & server->vals->shared_lock_type)
Steve French08547b02006-02-28 22:39:25 +00001418 posix_lock_type = CIFS_RDLCK;
1419 else
1420 posix_lock_type = CIFS_WRLCK;
Steve French50c2f752007-07-13 00:33:32 +00001421
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001422 if (unlock == 1)
Steve Frenchbeb84dc2006-03-03 23:36:34 +00001423 posix_lock_type = CIFS_UNLCK;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001424
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001425 rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
1426 current->tgid, flock->fl_start, length,
1427 NULL, posix_lock_type, wait_flag);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001428 goto out;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001429 }
1430
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001431 if (lock) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001432 struct cifsLockInfo *lock;
1433
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001434 lock = cifs_lock_init(flock->fl_start, length, type);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001435 if (!lock)
1436 return -ENOMEM;
1437
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001438 rc = cifs_lock_add_if(cfile, lock, wait_flag);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001439 if (rc < 0)
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001440 kfree(lock);
1441 if (rc <= 0)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001442 goto out;
1443
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001444 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1445 type, 1, 0, wait_flag);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001446 if (rc) {
1447 kfree(lock);
1448 goto out;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001449 }
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001450
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001451 cifs_lock_add(cfile, lock);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001452 } else if (unlock)
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001453 rc = server->ops->mand_unlock_range(cfile, flock, xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001454
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001455out:
1456 if (flock->fl_flags & FL_POSIX)
Steve French9ebb3892012-04-01 13:52:54 -05001457 posix_lock_file_wait(file, flock);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001458 return rc;
1459}
1460
1461int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1462{
1463 int rc, xid;
1464 int lock = 0, unlock = 0;
1465 bool wait_flag = false;
1466 bool posix_lck = false;
1467 struct cifs_sb_info *cifs_sb;
1468 struct cifs_tcon *tcon;
1469 struct cifsInodeInfo *cinode;
1470 struct cifsFileInfo *cfile;
1471 __u16 netfid;
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001472 __u32 type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001473
1474 rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001475 xid = get_xid();
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001476
1477 cFYI(1, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld "
1478 "end: %lld", cmd, flock->fl_flags, flock->fl_type,
1479 flock->fl_start, flock->fl_end);
1480
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001481 cfile = (struct cifsFileInfo *)file->private_data;
1482 tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001483
1484 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
1485 tcon->ses->server);
1486
1487 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001488 netfid = cfile->fid.netfid;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001489 cinode = CIFS_I(file->f_path.dentry->d_inode);
1490
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001491 if (cap_unix(tcon->ses) &&
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001492 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1493 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1494 posix_lck = true;
1495 /*
1496 * BB add code here to normalize offset and length to account for
1497 * negative length which we can not accept over the wire.
1498 */
1499 if (IS_GETLK(cmd)) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001500 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001501 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001502 return rc;
1503 }
1504
1505 if (!lock && !unlock) {
1506 /*
1507 * if no lock or unlock then nothing to do since we do not
1508 * know what it is
1509 */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001510 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001511 return -EOPNOTSUPP;
1512 }
1513
1514 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1515 xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001516 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517 return rc;
1518}
1519
Jeff Layton597b0272012-03-23 14:40:56 -04001520/*
1521 * update the file size (if needed) after a write. Should be called with
1522 * the inode->i_lock held
1523 */
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05001524void
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001525cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1526 unsigned int bytes_written)
1527{
1528 loff_t end_of_write = offset + bytes_written;
1529
1530 if (end_of_write > cifsi->server_eof)
1531 cifsi->server_eof = end_of_write;
1532}
1533
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001534static ssize_t
1535cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
1536 size_t write_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001537{
1538 int rc = 0;
1539 unsigned int bytes_written = 0;
1540 unsigned int total_written;
1541 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001542 struct cifs_tcon *tcon;
1543 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001544 unsigned int xid;
Jeff Layton7da4b492010-10-15 15:34:00 -04001545 struct dentry *dentry = open_file->dentry;
1546 struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001547 struct cifs_io_parms io_parms;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001548
Jeff Layton7da4b492010-10-15 15:34:00 -04001549 cifs_sb = CIFS_SB(dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001550
Joe Perchesb6b38f72010-04-21 03:50:45 +00001551 cFYI(1, "write %zd bytes to offset %lld of %s", write_size,
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001552 *offset, dentry->d_name.name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001553
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001554 tcon = tlink_tcon(open_file->tlink);
1555 server = tcon->ses->server;
1556
1557 if (!server->ops->sync_write)
1558 return -ENOSYS;
Steve French50c2f752007-07-13 00:33:32 +00001559
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001560 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001561
Linus Torvalds1da177e2005-04-16 15:20:36 -07001562 for (total_written = 0; write_size > total_written;
1563 total_written += bytes_written) {
1564 rc = -EAGAIN;
1565 while (rc == -EAGAIN) {
Jeff Laytonca83ce32011-04-12 09:13:44 -04001566 struct kvec iov[2];
1567 unsigned int len;
1568
Linus Torvalds1da177e2005-04-16 15:20:36 -07001569 if (open_file->invalidHandle) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001570 /* we could deadlock if we called
1571 filemap_fdatawait from here so tell
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001572 reopen_file not to flush data to
Linus Torvalds1da177e2005-04-16 15:20:36 -07001573 server now */
Jeff Layton15886172010-10-15 15:33:59 -04001574 rc = cifs_reopen_file(open_file, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001575 if (rc != 0)
1576 break;
1577 }
Steve French3e844692005-10-03 13:37:24 -07001578
Jeff Laytonca83ce32011-04-12 09:13:44 -04001579 len = min((size_t)cifs_sb->wsize,
1580 write_size - total_written);
1581 /* iov[0] is reserved for smb header */
1582 iov[1].iov_base = (char *)write_data + total_written;
1583 iov[1].iov_len = len;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001584 io_parms.pid = pid;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001585 io_parms.tcon = tcon;
1586 io_parms.offset = *offset;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001587 io_parms.length = len;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001588 rc = server->ops->sync_write(xid, open_file, &io_parms,
1589 &bytes_written, iov, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001590 }
1591 if (rc || (bytes_written == 0)) {
1592 if (total_written)
1593 break;
1594 else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001595 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001596 return rc;
1597 }
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001598 } else {
Jeff Layton597b0272012-03-23 14:40:56 -04001599 spin_lock(&dentry->d_inode->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001600 cifs_update_eof(cifsi, *offset, bytes_written);
Jeff Layton597b0272012-03-23 14:40:56 -04001601 spin_unlock(&dentry->d_inode->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001602 *offset += bytes_written;
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001603 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001604 }
1605
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001606 cifs_stats_bytes_written(tcon, total_written);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001607
Jeff Layton7da4b492010-10-15 15:34:00 -04001608 if (total_written > 0) {
1609 spin_lock(&dentry->d_inode->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001610 if (*offset > dentry->d_inode->i_size)
1611 i_size_write(dentry->d_inode, *offset);
Jeff Layton7da4b492010-10-15 15:34:00 -04001612 spin_unlock(&dentry->d_inode->i_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001613 }
Jeff Layton7da4b492010-10-15 15:34:00 -04001614 mark_inode_dirty_sync(dentry->d_inode);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001615 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001616 return total_written;
1617}
1618
Jeff Layton6508d902010-09-29 19:51:11 -04001619struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1620 bool fsuid_only)
Steve French630f3f0c2007-10-25 21:17:17 +00001621{
1622 struct cifsFileInfo *open_file = NULL;
Jeff Layton6508d902010-09-29 19:51:11 -04001623 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1624
1625 /* only filter by fsuid on multiuser mounts */
1626 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1627 fsuid_only = false;
Steve French630f3f0c2007-10-25 21:17:17 +00001628
Jeff Layton44772882010-10-15 15:34:03 -04001629 spin_lock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001630 /* we could simply get the first_list_entry since write-only entries
1631 are always at the end of the list but since the first entry might
1632 have a close pending, we go through the whole list */
1633 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton6508d902010-09-29 19:51:11 -04001634 if (fsuid_only && open_file->uid != current_fsuid())
1635 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001636 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
Steve French630f3f0c2007-10-25 21:17:17 +00001637 if (!open_file->invalidHandle) {
1638 /* found a good file */
1639 /* lock it so it will not be closed on us */
Jeff Layton764a1b12012-07-25 14:59:54 -04001640 cifsFileInfo_get_locked(open_file);
Jeff Layton44772882010-10-15 15:34:03 -04001641 spin_unlock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001642 return open_file;
1643 } /* else might as well continue, and look for
1644 another, or simply have the caller reopen it
1645 again rather than trying to fix this handle */
1646 } else /* write only file */
1647 break; /* write only files are last so must be done */
1648 }
Jeff Layton44772882010-10-15 15:34:03 -04001649 spin_unlock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001650 return NULL;
1651}
Steve French630f3f0c2007-10-25 21:17:17 +00001652
Jeff Layton6508d902010-09-29 19:51:11 -04001653struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1654 bool fsuid_only)
Steve French6148a742005-10-05 12:23:19 -07001655{
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001656 struct cifsFileInfo *open_file, *inv_file = NULL;
Jeff Laytond3892292010-11-02 16:22:50 -04001657 struct cifs_sb_info *cifs_sb;
Jeff Layton2846d382008-09-22 21:33:33 -04001658 bool any_available = false;
Steve Frenchdd99cd82005-10-05 19:32:49 -07001659 int rc;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001660 unsigned int refind = 0;
Steve French6148a742005-10-05 12:23:19 -07001661
Steve French60808232006-04-22 15:53:05 +00001662 /* Having a null inode here (because mapping->host was set to zero by
1663 the VFS or MM) should not happen but we had reports of on oops (due to
1664 it being zero) during stress testcases so we need to check for it */
1665
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001666 if (cifs_inode == NULL) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001667 cERROR(1, "Null inode passed to cifs_writeable_file");
Steve French60808232006-04-22 15:53:05 +00001668 dump_stack();
1669 return NULL;
1670 }
1671
Jeff Laytond3892292010-11-02 16:22:50 -04001672 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1673
Jeff Layton6508d902010-09-29 19:51:11 -04001674 /* only filter by fsuid on multiuser mounts */
1675 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1676 fsuid_only = false;
1677
Jeff Layton44772882010-10-15 15:34:03 -04001678 spin_lock(&cifs_file_list_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001679refind_writable:
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001680 if (refind > MAX_REOPEN_ATT) {
1681 spin_unlock(&cifs_file_list_lock);
1682 return NULL;
1683 }
Steve French6148a742005-10-05 12:23:19 -07001684 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton6508d902010-09-29 19:51:11 -04001685 if (!any_available && open_file->pid != current->tgid)
1686 continue;
1687 if (fsuid_only && open_file->uid != current_fsuid())
1688 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001689 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Steve French9b22b0b2007-10-02 01:11:08 +00001690 if (!open_file->invalidHandle) {
1691 /* found a good writable file */
Jeff Layton764a1b12012-07-25 14:59:54 -04001692 cifsFileInfo_get_locked(open_file);
Jeff Layton44772882010-10-15 15:34:03 -04001693 spin_unlock(&cifs_file_list_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001694 return open_file;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001695 } else {
1696 if (!inv_file)
1697 inv_file = open_file;
Steve French9b22b0b2007-10-02 01:11:08 +00001698 }
Steve French6148a742005-10-05 12:23:19 -07001699 }
1700 }
Jeff Layton2846d382008-09-22 21:33:33 -04001701 /* couldn't find useable FH with same pid, try any available */
1702 if (!any_available) {
1703 any_available = true;
1704 goto refind_writable;
1705 }
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001706
1707 if (inv_file) {
1708 any_available = false;
Jeff Layton764a1b12012-07-25 14:59:54 -04001709 cifsFileInfo_get_locked(inv_file);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001710 }
1711
Jeff Layton44772882010-10-15 15:34:03 -04001712 spin_unlock(&cifs_file_list_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001713
1714 if (inv_file) {
1715 rc = cifs_reopen_file(inv_file, false);
1716 if (!rc)
1717 return inv_file;
1718 else {
1719 spin_lock(&cifs_file_list_lock);
1720 list_move_tail(&inv_file->flist,
1721 &cifs_inode->openFileList);
1722 spin_unlock(&cifs_file_list_lock);
1723 cifsFileInfo_put(inv_file);
1724 spin_lock(&cifs_file_list_lock);
1725 ++refind;
1726 goto refind_writable;
1727 }
1728 }
1729
Steve French6148a742005-10-05 12:23:19 -07001730 return NULL;
1731}
1732
Linus Torvalds1da177e2005-04-16 15:20:36 -07001733static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1734{
1735 struct address_space *mapping = page->mapping;
1736 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1737 char *write_data;
1738 int rc = -EFAULT;
1739 int bytes_written = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001740 struct inode *inode;
Steve French6148a742005-10-05 12:23:19 -07001741 struct cifsFileInfo *open_file;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001742
1743 if (!mapping || !mapping->host)
1744 return -EFAULT;
1745
1746 inode = page->mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001747
1748 offset += (loff_t)from;
1749 write_data = kmap(page);
1750 write_data += from;
1751
1752 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1753 kunmap(page);
1754 return -EIO;
1755 }
1756
1757 /* racing with truncate? */
1758 if (offset > mapping->host->i_size) {
1759 kunmap(page);
1760 return 0; /* don't care */
1761 }
1762
1763 /* check to make sure that we are not extending the file */
1764 if (mapping->host->i_size - offset < (loff_t)to)
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001765 to = (unsigned)(mapping->host->i_size - offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001766
Jeff Layton6508d902010-09-29 19:51:11 -04001767 open_file = find_writable_file(CIFS_I(mapping->host), false);
Steve French6148a742005-10-05 12:23:19 -07001768 if (open_file) {
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001769 bytes_written = cifs_write(open_file, open_file->pid,
1770 write_data, to - from, &offset);
Dave Kleikamp6ab409b2009-08-31 11:07:12 -04001771 cifsFileInfo_put(open_file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001772 /* Does mm or vfs already set times? */
Steve French6148a742005-10-05 12:23:19 -07001773 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001774 if ((bytes_written > 0) && (offset))
Steve French6148a742005-10-05 12:23:19 -07001775 rc = 0;
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001776 else if (bytes_written < 0)
1777 rc = bytes_written;
Steve French6148a742005-10-05 12:23:19 -07001778 } else {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001779 cFYI(1, "No writeable filehandles for inode");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001780 rc = -EIO;
1781 }
1782
1783 kunmap(page);
1784 return rc;
1785}
1786
Linus Torvalds1da177e2005-04-16 15:20:36 -07001787static int cifs_writepages(struct address_space *mapping,
Steve French37c0eb42005-10-05 14:50:29 -07001788 struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001789{
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001790 struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
1791 bool done = false, scanned = false, range_whole = false;
1792 pgoff_t end, index;
1793 struct cifs_writedata *wdata;
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07001794 struct TCP_Server_Info *server;
Steve French37c0eb42005-10-05 14:50:29 -07001795 struct page *page;
Steve French37c0eb42005-10-05 14:50:29 -07001796 int rc = 0;
Jeff Laytoneddb0792012-09-18 16:20:35 -07001797 loff_t isize = i_size_read(mapping->host);
Steve French50c2f752007-07-13 00:33:32 +00001798
Steve French37c0eb42005-10-05 14:50:29 -07001799 /*
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001800 * If wsize is smaller than the page cache size, default to writing
Steve French37c0eb42005-10-05 14:50:29 -07001801 * one page at a time via cifs_writepage
1802 */
1803 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1804 return generic_writepages(mapping, wbc);
1805
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001806 if (wbc->range_cyclic) {
Steve French37c0eb42005-10-05 14:50:29 -07001807 index = mapping->writeback_index; /* Start from prev offset */
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001808 end = -1;
1809 } else {
1810 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1811 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1812 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001813 range_whole = true;
1814 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07001815 }
1816retry:
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001817 while (!done && index <= end) {
1818 unsigned int i, nr_pages, found_pages;
1819 pgoff_t next = 0, tofind;
1820 struct page **pages;
Steve French37c0eb42005-10-05 14:50:29 -07001821
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001822 tofind = min((cifs_sb->wsize / PAGE_CACHE_SIZE) - 1,
1823 end - index) + 1;
Steve French37c0eb42005-10-05 14:50:29 -07001824
Jeff Laytonc2e87642012-03-23 14:40:55 -04001825 wdata = cifs_writedata_alloc((unsigned int)tofind,
1826 cifs_writev_complete);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001827 if (!wdata) {
1828 rc = -ENOMEM;
1829 break;
1830 }
1831
1832 /*
1833 * find_get_pages_tag seems to return a max of 256 on each
1834 * iteration, so we must call it several times in order to
1835 * fill the array or the wsize is effectively limited to
1836 * 256 * PAGE_CACHE_SIZE.
1837 */
1838 found_pages = 0;
1839 pages = wdata->pages;
1840 do {
1841 nr_pages = find_get_pages_tag(mapping, &index,
1842 PAGECACHE_TAG_DIRTY,
1843 tofind, pages);
1844 found_pages += nr_pages;
1845 tofind -= nr_pages;
1846 pages += nr_pages;
1847 } while (nr_pages && tofind && index <= end);
1848
1849 if (found_pages == 0) {
1850 kref_put(&wdata->refcount, cifs_writedata_release);
1851 break;
1852 }
1853
1854 nr_pages = 0;
1855 for (i = 0; i < found_pages; i++) {
1856 page = wdata->pages[i];
Steve French37c0eb42005-10-05 14:50:29 -07001857 /*
1858 * At this point we hold neither mapping->tree_lock nor
1859 * lock on the page itself: the page may be truncated or
1860 * invalidated (changing page->mapping to NULL), or even
1861 * swizzled back from swapper_space to tmpfs file
1862 * mapping
1863 */
1864
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001865 if (nr_pages == 0)
Steve French37c0eb42005-10-05 14:50:29 -07001866 lock_page(page);
Nick Piggin529ae9a2008-08-02 12:01:03 +02001867 else if (!trylock_page(page))
Steve French37c0eb42005-10-05 14:50:29 -07001868 break;
1869
1870 if (unlikely(page->mapping != mapping)) {
1871 unlock_page(page);
1872 break;
1873 }
1874
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001875 if (!wbc->range_cyclic && page->index > end) {
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001876 done = true;
Steve French37c0eb42005-10-05 14:50:29 -07001877 unlock_page(page);
1878 break;
1879 }
1880
1881 if (next && (page->index != next)) {
1882 /* Not next consecutive page */
1883 unlock_page(page);
1884 break;
1885 }
1886
1887 if (wbc->sync_mode != WB_SYNC_NONE)
1888 wait_on_page_writeback(page);
1889
1890 if (PageWriteback(page) ||
Linus Torvaldscb876f42006-12-23 16:19:07 -08001891 !clear_page_dirty_for_io(page)) {
Steve French37c0eb42005-10-05 14:50:29 -07001892 unlock_page(page);
1893 break;
1894 }
Steve French84d2f072005-10-12 15:32:05 -07001895
Linus Torvaldscb876f42006-12-23 16:19:07 -08001896 /*
1897 * This actually clears the dirty bit in the radix tree.
1898 * See cifs_writepage() for more commentary.
1899 */
1900 set_page_writeback(page);
1901
Jeff Laytoneddb0792012-09-18 16:20:35 -07001902 if (page_offset(page) >= isize) {
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001903 done = true;
Steve French84d2f072005-10-12 15:32:05 -07001904 unlock_page(page);
Linus Torvaldscb876f42006-12-23 16:19:07 -08001905 end_page_writeback(page);
Steve French84d2f072005-10-12 15:32:05 -07001906 break;
1907 }
1908
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001909 wdata->pages[i] = page;
Steve French37c0eb42005-10-05 14:50:29 -07001910 next = page->index + 1;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001911 ++nr_pages;
Steve French37c0eb42005-10-05 14:50:29 -07001912 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001913
1914 /* reset index to refind any pages skipped */
1915 if (nr_pages == 0)
1916 index = wdata->pages[0]->index + 1;
1917
1918 /* put any pages we aren't going to use */
1919 for (i = nr_pages; i < found_pages; i++) {
1920 page_cache_release(wdata->pages[i]);
1921 wdata->pages[i] = NULL;
1922 }
1923
1924 /* nothing to write? */
1925 if (nr_pages == 0) {
1926 kref_put(&wdata->refcount, cifs_writedata_release);
1927 continue;
1928 }
1929
1930 wdata->sync_mode = wbc->sync_mode;
1931 wdata->nr_pages = nr_pages;
1932 wdata->offset = page_offset(wdata->pages[0]);
Jeff Laytoneddb0792012-09-18 16:20:35 -07001933 wdata->pagesz = PAGE_CACHE_SIZE;
1934 wdata->tailsz =
1935 min(isize - page_offset(wdata->pages[nr_pages - 1]),
1936 (loff_t)PAGE_CACHE_SIZE);
1937 wdata->bytes = ((nr_pages - 1) * PAGE_CACHE_SIZE) +
1938 wdata->tailsz;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001939
1940 do {
1941 if (wdata->cfile != NULL)
1942 cifsFileInfo_put(wdata->cfile);
1943 wdata->cfile = find_writable_file(CIFS_I(mapping->host),
1944 false);
1945 if (!wdata->cfile) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001946 cERROR(1, "No writable handles for inode");
Steve French23e7dd72005-10-20 13:44:56 -07001947 rc = -EBADF;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001948 break;
Steve French37c0eb42005-10-05 14:50:29 -07001949 }
Jeff Laytonfe5f5d22012-03-23 14:40:55 -04001950 wdata->pid = wdata->cfile->pid;
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07001951 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
1952 rc = server->ops->async_writev(wdata);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001953 } while (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN);
Jeff Laytonf3983c22010-09-22 16:17:40 -07001954
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001955 for (i = 0; i < nr_pages; ++i)
1956 unlock_page(wdata->pages[i]);
Jeff Layton941b8532011-01-11 07:24:01 -05001957
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001958 /* send failure -- clean up the mess */
1959 if (rc != 0) {
1960 for (i = 0; i < nr_pages; ++i) {
Jeff Layton941b8532011-01-11 07:24:01 -05001961 if (rc == -EAGAIN)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001962 redirty_page_for_writepage(wbc,
1963 wdata->pages[i]);
1964 else
1965 SetPageError(wdata->pages[i]);
1966 end_page_writeback(wdata->pages[i]);
1967 page_cache_release(wdata->pages[i]);
Steve French37c0eb42005-10-05 14:50:29 -07001968 }
Jeff Layton941b8532011-01-11 07:24:01 -05001969 if (rc != -EAGAIN)
1970 mapping_set_error(mapping, rc);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001971 }
1972 kref_put(&wdata->refcount, cifs_writedata_release);
Jeff Layton941b8532011-01-11 07:24:01 -05001973
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001974 wbc->nr_to_write -= nr_pages;
1975 if (wbc->nr_to_write <= 0)
1976 done = true;
Dave Kleikampb066a482008-11-18 03:49:05 +00001977
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001978 index = next;
Steve French37c0eb42005-10-05 14:50:29 -07001979 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001980
Steve French37c0eb42005-10-05 14:50:29 -07001981 if (!scanned && !done) {
1982 /*
1983 * We hit the last page and there is more work to be done: wrap
1984 * back to the start of the file
1985 */
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001986 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07001987 index = 0;
1988 goto retry;
1989 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001990
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001991 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
Steve French37c0eb42005-10-05 14:50:29 -07001992 mapping->writeback_index = index;
1993
Linus Torvalds1da177e2005-04-16 15:20:36 -07001994 return rc;
1995}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001996
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04001997static int
1998cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001999{
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002000 int rc;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002001 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002002
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002003 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002004/* BB add check for wbc flags */
2005 page_cache_get(page);
Steve Frenchad7a2922008-02-07 23:25:02 +00002006 if (!PageUptodate(page))
Joe Perchesb6b38f72010-04-21 03:50:45 +00002007 cFYI(1, "ppw - page not up to date");
Linus Torvaldscb876f42006-12-23 16:19:07 -08002008
2009 /*
2010 * Set the "writeback" flag, and clear "dirty" in the radix tree.
2011 *
2012 * A writepage() implementation always needs to do either this,
2013 * or re-dirty the page with "redirty_page_for_writepage()" in
2014 * the case of a failure.
2015 *
2016 * Just unlocking the page will cause the radix tree tag-bits
2017 * to fail to update with the state of the page correctly.
2018 */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002019 set_page_writeback(page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002020retry_write:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002021 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002022 if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
2023 goto retry_write;
2024 else if (rc == -EAGAIN)
2025 redirty_page_for_writepage(wbc, page);
2026 else if (rc != 0)
2027 SetPageError(page);
2028 else
2029 SetPageUptodate(page);
Linus Torvaldscb876f42006-12-23 16:19:07 -08002030 end_page_writeback(page);
2031 page_cache_release(page);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002032 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002033 return rc;
2034}
2035
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002036static int cifs_writepage(struct page *page, struct writeback_control *wbc)
2037{
2038 int rc = cifs_writepage_locked(page, wbc);
2039 unlock_page(page);
2040 return rc;
2041}
2042
Nick Piggind9414772008-09-24 11:32:59 -04002043static int cifs_write_end(struct file *file, struct address_space *mapping,
2044 loff_t pos, unsigned len, unsigned copied,
2045 struct page *page, void *fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002046{
Nick Piggind9414772008-09-24 11:32:59 -04002047 int rc;
2048 struct inode *inode = mapping->host;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002049 struct cifsFileInfo *cfile = file->private_data;
2050 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
2051 __u32 pid;
2052
2053 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2054 pid = cfile->pid;
2055 else
2056 pid = current->tgid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002057
Joe Perchesb6b38f72010-04-21 03:50:45 +00002058 cFYI(1, "write_end for page %p from pos %lld with %d bytes",
2059 page, pos, copied);
Steve Frenchad7a2922008-02-07 23:25:02 +00002060
Jeff Laytona98ee8c2008-11-26 19:32:33 +00002061 if (PageChecked(page)) {
2062 if (copied == len)
2063 SetPageUptodate(page);
2064 ClearPageChecked(page);
2065 } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
Nick Piggind9414772008-09-24 11:32:59 -04002066 SetPageUptodate(page);
2067
Linus Torvalds1da177e2005-04-16 15:20:36 -07002068 if (!PageUptodate(page)) {
Nick Piggind9414772008-09-24 11:32:59 -04002069 char *page_data;
2070 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002071 unsigned int xid;
Nick Piggind9414772008-09-24 11:32:59 -04002072
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002073 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002074 /* this is probably better than directly calling
2075 partialpage_write since in this function the file handle is
2076 known which we might as well leverage */
2077 /* BB check if anything else missing out of ppw
2078 such as updating last write time */
2079 page_data = kmap(page);
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002080 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
Nick Piggind9414772008-09-24 11:32:59 -04002081 /* if (rc < 0) should we set writebehind rc? */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002082 kunmap(page);
Nick Piggind9414772008-09-24 11:32:59 -04002083
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002084 free_xid(xid);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002085 } else {
Nick Piggind9414772008-09-24 11:32:59 -04002086 rc = copied;
2087 pos += copied;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002088 set_page_dirty(page);
2089 }
2090
Nick Piggind9414772008-09-24 11:32:59 -04002091 if (rc > 0) {
2092 spin_lock(&inode->i_lock);
2093 if (pos > inode->i_size)
2094 i_size_write(inode, pos);
2095 spin_unlock(&inode->i_lock);
2096 }
2097
2098 unlock_page(page);
2099 page_cache_release(page);
2100
Linus Torvalds1da177e2005-04-16 15:20:36 -07002101 return rc;
2102}
2103
Josef Bacik02c24a82011-07-16 20:44:56 -04002104int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2105 int datasync)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002106{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002107 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002108 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002109 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002110 struct TCP_Server_Info *server;
Joe Perchesc21dfb62010-07-12 13:50:14 -07002111 struct cifsFileInfo *smbfile = file->private_data;
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002112 struct inode *inode = file->f_path.dentry->d_inode;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002113 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002114
Josef Bacik02c24a82011-07-16 20:44:56 -04002115 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2116 if (rc)
2117 return rc;
2118 mutex_lock(&inode->i_mutex);
2119
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002120 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002121
Joe Perchesb6b38f72010-04-21 03:50:45 +00002122 cFYI(1, "Sync file - name: %s datasync: 0x%x",
Christoph Hellwig7ea80852010-05-26 17:53:25 +02002123 file->f_path.dentry->d_name.name, datasync);
Steve French50c2f752007-07-13 00:33:32 +00002124
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002125 if (!CIFS_I(inode)->clientCanCacheRead) {
2126 rc = cifs_invalidate_mapping(inode);
2127 if (rc) {
2128 cFYI(1, "rc: %d during invalidate phase", rc);
2129 rc = 0; /* don't care about it in fsync */
2130 }
2131 }
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002132
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002133 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002134 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2135 server = tcon->ses->server;
2136 if (server->ops->flush)
2137 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2138 else
2139 rc = -ENOSYS;
2140 }
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002141
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002142 free_xid(xid);
Josef Bacik02c24a82011-07-16 20:44:56 -04002143 mutex_unlock(&inode->i_mutex);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002144 return rc;
2145}
2146
Josef Bacik02c24a82011-07-16 20:44:56 -04002147int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002148{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002149 unsigned int xid;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002150 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002151 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002152 struct TCP_Server_Info *server;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002153 struct cifsFileInfo *smbfile = file->private_data;
2154 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Josef Bacik02c24a82011-07-16 20:44:56 -04002155 struct inode *inode = file->f_mapping->host;
2156
2157 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2158 if (rc)
2159 return rc;
2160 mutex_lock(&inode->i_mutex);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002161
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002162 xid = get_xid();
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002163
2164 cFYI(1, "Sync file - name: %s datasync: 0x%x",
2165 file->f_path.dentry->d_name.name, datasync);
2166
2167 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002168 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2169 server = tcon->ses->server;
2170 if (server->ops->flush)
2171 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2172 else
2173 rc = -ENOSYS;
2174 }
Steve Frenchb298f222009-02-21 21:17:43 +00002175
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002176 free_xid(xid);
Josef Bacik02c24a82011-07-16 20:44:56 -04002177 mutex_unlock(&inode->i_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002178 return rc;
2179}
2180
Linus Torvalds1da177e2005-04-16 15:20:36 -07002181/*
2182 * As file closes, flush all cached write data for this inode checking
2183 * for write behind errors.
2184 */
Miklos Szeredi75e1fcc2006-06-23 02:05:12 -07002185int cifs_flush(struct file *file, fl_owner_t id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002186{
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002187 struct inode *inode = file->f_path.dentry->d_inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002188 int rc = 0;
2189
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002190 if (file->f_mode & FMODE_WRITE)
Jeff Laytond3f13222010-10-15 15:34:07 -04002191 rc = filemap_write_and_wait(inode->i_mapping);
Steve French50c2f752007-07-13 00:33:32 +00002192
Joe Perchesb6b38f72010-04-21 03:50:45 +00002193 cFYI(1, "Flush inode %p file %p rc %d", inode, file, rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002194
2195 return rc;
2196}
2197
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002198static int
2199cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2200{
2201 int rc = 0;
2202 unsigned long i;
2203
2204 for (i = 0; i < num_pages; i++) {
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002205 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002206 if (!pages[i]) {
2207 /*
2208 * save number of pages we have already allocated and
2209 * return with ENOMEM error
2210 */
2211 num_pages = i;
2212 rc = -ENOMEM;
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002213 break;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002214 }
2215 }
2216
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002217 if (rc) {
2218 for (i = 0; i < num_pages; i++)
2219 put_page(pages[i]);
2220 }
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002221 return rc;
2222}
2223
2224static inline
2225size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2226{
2227 size_t num_pages;
2228 size_t clen;
2229
2230 clen = min_t(const size_t, len, wsize);
Jeff Laytona7103b92012-03-23 14:40:56 -04002231 num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002232
2233 if (cur_len)
2234 *cur_len = clen;
2235
2236 return num_pages;
2237}
2238
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002239static void
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002240cifs_uncached_writev_complete(struct work_struct *work)
2241{
2242 int i;
2243 struct cifs_writedata *wdata = container_of(work,
2244 struct cifs_writedata, work);
2245 struct inode *inode = wdata->cfile->dentry->d_inode;
2246 struct cifsInodeInfo *cifsi = CIFS_I(inode);
2247
2248 spin_lock(&inode->i_lock);
2249 cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
2250 if (cifsi->server_eof > inode->i_size)
2251 i_size_write(inode, cifsi->server_eof);
2252 spin_unlock(&inode->i_lock);
2253
2254 complete(&wdata->done);
2255
2256 if (wdata->result != -EAGAIN) {
2257 for (i = 0; i < wdata->nr_pages; i++)
2258 put_page(wdata->pages[i]);
2259 }
2260
2261 kref_put(&wdata->refcount, cifs_writedata_release);
2262}
2263
2264/* attempt to send write to server, retry on any -EAGAIN errors */
2265static int
2266cifs_uncached_retry_writev(struct cifs_writedata *wdata)
2267{
2268 int rc;
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002269 struct TCP_Server_Info *server;
2270
2271 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002272
2273 do {
2274 if (wdata->cfile->invalidHandle) {
2275 rc = cifs_reopen_file(wdata->cfile, false);
2276 if (rc != 0)
2277 continue;
2278 }
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002279 rc = server->ops->async_writev(wdata);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002280 } while (rc == -EAGAIN);
2281
2282 return rc;
2283}
2284
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002285static ssize_t
2286cifs_iovec_write(struct file *file, const struct iovec *iov,
2287 unsigned long nr_segs, loff_t *poffset)
2288{
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002289 unsigned long nr_pages, i;
Pavel Shilovsky76429c12011-01-31 16:03:08 +03002290 size_t copied, len, cur_len;
2291 ssize_t total_written = 0;
Jeff Layton3af9d8f2012-04-13 17:16:59 -04002292 loff_t offset;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002293 struct iov_iter it;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002294 struct cifsFileInfo *open_file;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002295 struct cifs_tcon *tcon;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002296 struct cifs_sb_info *cifs_sb;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002297 struct cifs_writedata *wdata, *tmp;
2298 struct list_head wdata_list;
2299 int rc;
2300 pid_t pid;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002301
2302 len = iov_length(iov, nr_segs);
2303 if (!len)
2304 return 0;
2305
2306 rc = generic_write_checks(file, poffset, &len, 0);
2307 if (rc)
2308 return rc;
2309
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002310 INIT_LIST_HEAD(&wdata_list);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002311 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002312 open_file = file->private_data;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002313 tcon = tlink_tcon(open_file->tlink);
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002314
2315 if (!tcon->ses->server->ops->async_writev)
2316 return -ENOSYS;
2317
Jeff Layton3af9d8f2012-04-13 17:16:59 -04002318 offset = *poffset;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002319
2320 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2321 pid = open_file->pid;
2322 else
2323 pid = current->tgid;
2324
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002325 iov_iter_init(&it, iov, nr_segs, len, 0);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002326 do {
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002327 size_t save_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002328
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002329 nr_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
2330 wdata = cifs_writedata_alloc(nr_pages,
2331 cifs_uncached_writev_complete);
2332 if (!wdata) {
2333 rc = -ENOMEM;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002334 break;
2335 }
2336
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002337 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
2338 if (rc) {
2339 kfree(wdata);
2340 break;
2341 }
2342
2343 save_len = cur_len;
2344 for (i = 0; i < nr_pages; i++) {
2345 copied = min_t(const size_t, cur_len, PAGE_SIZE);
2346 copied = iov_iter_copy_from_user(wdata->pages[i], &it,
2347 0, copied);
2348 cur_len -= copied;
2349 iov_iter_advance(&it, copied);
2350 }
2351 cur_len = save_len - cur_len;
2352
2353 wdata->sync_mode = WB_SYNC_ALL;
2354 wdata->nr_pages = nr_pages;
2355 wdata->offset = (__u64)offset;
2356 wdata->cfile = cifsFileInfo_get(open_file);
2357 wdata->pid = pid;
2358 wdata->bytes = cur_len;
Jeff Laytoneddb0792012-09-18 16:20:35 -07002359 wdata->pagesz = PAGE_SIZE;
2360 wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002361 rc = cifs_uncached_retry_writev(wdata);
2362 if (rc) {
2363 kref_put(&wdata->refcount, cifs_writedata_release);
2364 break;
2365 }
2366
2367 list_add_tail(&wdata->list, &wdata_list);
2368 offset += cur_len;
2369 len -= cur_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002370 } while (len > 0);
2371
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002372 /*
2373 * If at least one write was successfully sent, then discard any rc
2374 * value from the later writes. If the other write succeeds, then
2375 * we'll end up returning whatever was written. If it fails, then
2376 * we'll get a new rc value from that.
2377 */
2378 if (!list_empty(&wdata_list))
2379 rc = 0;
2380
2381 /*
2382 * Wait for and collect replies for any successful sends in order of
2383 * increasing offset. Once an error is hit or we get a fatal signal
2384 * while waiting, then return without waiting for any more replies.
2385 */
2386restart_loop:
2387 list_for_each_entry_safe(wdata, tmp, &wdata_list, list) {
2388 if (!rc) {
2389 /* FIXME: freezable too? */
2390 rc = wait_for_completion_killable(&wdata->done);
2391 if (rc)
2392 rc = -EINTR;
2393 else if (wdata->result)
2394 rc = wdata->result;
2395 else
2396 total_written += wdata->bytes;
2397
2398 /* resend call if it's a retryable error */
2399 if (rc == -EAGAIN) {
2400 rc = cifs_uncached_retry_writev(wdata);
2401 goto restart_loop;
2402 }
2403 }
2404 list_del_init(&wdata->list);
2405 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002406 }
2407
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002408 if (total_written > 0)
2409 *poffset += total_written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002410
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002411 cifs_stats_bytes_written(tcon, total_written);
2412 return total_written ? total_written : (ssize_t)rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002413}
2414
Pavel Shilovsky0b81c1c2011-03-10 10:11:05 +03002415ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002416 unsigned long nr_segs, loff_t pos)
2417{
2418 ssize_t written;
2419 struct inode *inode;
2420
2421 inode = iocb->ki_filp->f_path.dentry->d_inode;
2422
2423 /*
2424 * BB - optimize the way when signing is disabled. We can drop this
2425 * extra memory-to-memory copying and use iovec buffers for constructing
2426 * write request.
2427 */
2428
2429 written = cifs_iovec_write(iocb->ki_filp, iov, nr_segs, &pos);
2430 if (written > 0) {
2431 CIFS_I(inode)->invalid_mapping = true;
2432 iocb->ki_pos = pos;
2433 }
2434
2435 return written;
2436}
2437
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002438static ssize_t
2439cifs_writev(struct kiocb *iocb, const struct iovec *iov,
2440 unsigned long nr_segs, loff_t pos)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002441{
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002442 struct file *file = iocb->ki_filp;
2443 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2444 struct inode *inode = file->f_mapping->host;
2445 struct cifsInodeInfo *cinode = CIFS_I(inode);
2446 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
2447 ssize_t rc = -EACCES;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002448
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002449 BUG_ON(iocb->ki_pos != pos);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002450
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002451 sb_start_write(inode->i_sb);
2452
2453 /*
2454 * We need to hold the sem to be sure nobody modifies lock list
2455 * with a brlock that prevents writing.
2456 */
2457 down_read(&cinode->lock_sem);
2458 if (!cifs_find_lock_conflict(cfile, pos, iov_length(iov, nr_segs),
2459 server->vals->exclusive_lock_type, NULL,
2460 true)) {
2461 mutex_lock(&inode->i_mutex);
2462 rc = __generic_file_aio_write(iocb, iov, nr_segs,
2463 &iocb->ki_pos);
2464 mutex_unlock(&inode->i_mutex);
2465 }
2466
2467 if (rc > 0 || rc == -EIOCBQUEUED) {
2468 ssize_t err;
2469
2470 err = generic_write_sync(file, pos, rc);
2471 if (err < 0 && rc > 0)
2472 rc = err;
2473 }
2474
2475 up_read(&cinode->lock_sem);
2476 sb_end_write(inode->i_sb);
2477 return rc;
2478}
2479
2480ssize_t
2481cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
2482 unsigned long nr_segs, loff_t pos)
2483{
2484 struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
2485 struct cifsInodeInfo *cinode = CIFS_I(inode);
2486 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2487 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2488 iocb->ki_filp->private_data;
2489 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002490
Pavel Shilovsky25078102012-09-19 06:22:45 -07002491#ifdef CONFIG_CIFS_SMB2
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002492 /*
Pavel Shilovsky25078102012-09-19 06:22:45 -07002493 * If we have an oplock for read and want to write a data to the file
2494 * we need to store it in the page cache and then push it to the server
2495 * to be sure the next read will get a valid data.
2496 */
2497 if (!cinode->clientCanCacheAll && cinode->clientCanCacheRead) {
2498 ssize_t written;
2499 int rc;
2500
2501 written = generic_file_aio_write(iocb, iov, nr_segs, pos);
2502 rc = filemap_fdatawrite(inode->i_mapping);
2503 if (rc)
2504 return (ssize_t)rc;
2505
2506 return written;
2507 }
2508#endif
2509
2510 /*
2511 * For non-oplocked files in strict cache mode we need to write the data
2512 * to the server exactly from the pos to pos+len-1 rather than flush all
2513 * affected pages because it may cause a error with mandatory locks on
2514 * these pages but not on the region from pos to ppos+len-1.
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002515 */
2516
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002517 if (!cinode->clientCanCacheAll)
2518 return cifs_user_writev(iocb, iov, nr_segs, pos);
2519
2520 if (cap_unix(tcon->ses) &&
2521 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2522 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2523 return generic_file_aio_write(iocb, iov, nr_segs, pos);
2524
2525 return cifs_writev(iocb, iov, nr_segs, pos);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002526}
2527
Jeff Layton0471ca32012-05-16 07:13:16 -04002528static struct cifs_readdata *
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002529cifs_readdata_alloc(unsigned int nr_pages, work_func_t complete)
Jeff Layton0471ca32012-05-16 07:13:16 -04002530{
2531 struct cifs_readdata *rdata;
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002532
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002533 rdata = kzalloc(sizeof(*rdata) + (sizeof(struct page *) * nr_pages),
2534 GFP_KERNEL);
Jeff Layton0471ca32012-05-16 07:13:16 -04002535 if (rdata != NULL) {
Jeff Layton6993f742012-05-16 07:13:17 -04002536 kref_init(&rdata->refcount);
Jeff Layton1c892542012-05-16 07:13:17 -04002537 INIT_LIST_HEAD(&rdata->list);
2538 init_completion(&rdata->done);
Jeff Layton0471ca32012-05-16 07:13:16 -04002539 INIT_WORK(&rdata->work, complete);
Jeff Layton0471ca32012-05-16 07:13:16 -04002540 }
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002541
Jeff Layton0471ca32012-05-16 07:13:16 -04002542 return rdata;
2543}
2544
Jeff Layton6993f742012-05-16 07:13:17 -04002545void
2546cifs_readdata_release(struct kref *refcount)
Jeff Layton0471ca32012-05-16 07:13:16 -04002547{
Jeff Layton6993f742012-05-16 07:13:17 -04002548 struct cifs_readdata *rdata = container_of(refcount,
2549 struct cifs_readdata, refcount);
2550
2551 if (rdata->cfile)
2552 cifsFileInfo_put(rdata->cfile);
2553
Jeff Layton0471ca32012-05-16 07:13:16 -04002554 kfree(rdata);
2555}
2556
Jeff Layton2a1bb132012-05-16 07:13:17 -04002557static int
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002558cifs_read_allocate_pages(struct cifs_readdata *rdata, unsigned int nr_pages)
Jeff Layton1c892542012-05-16 07:13:17 -04002559{
2560 int rc = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002561 struct page *page;
Jeff Layton1c892542012-05-16 07:13:17 -04002562 unsigned int i;
2563
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002564 for (i = 0; i < nr_pages; i++) {
Jeff Layton1c892542012-05-16 07:13:17 -04002565 page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
2566 if (!page) {
2567 rc = -ENOMEM;
2568 break;
2569 }
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002570 rdata->pages[i] = page;
Jeff Layton1c892542012-05-16 07:13:17 -04002571 }
2572
2573 if (rc) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002574 for (i = 0; i < nr_pages; i++) {
2575 put_page(rdata->pages[i]);
2576 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04002577 }
2578 }
2579 return rc;
2580}
2581
2582static void
2583cifs_uncached_readdata_release(struct kref *refcount)
2584{
Jeff Layton1c892542012-05-16 07:13:17 -04002585 struct cifs_readdata *rdata = container_of(refcount,
2586 struct cifs_readdata, refcount);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002587 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04002588
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002589 for (i = 0; i < rdata->nr_pages; i++) {
2590 put_page(rdata->pages[i]);
2591 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04002592 }
2593 cifs_readdata_release(refcount);
2594}
2595
2596static int
Jeff Layton2a1bb132012-05-16 07:13:17 -04002597cifs_retry_async_readv(struct cifs_readdata *rdata)
2598{
2599 int rc;
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07002600 struct TCP_Server_Info *server;
2601
2602 server = tlink_tcon(rdata->cfile->tlink)->ses->server;
Jeff Layton2a1bb132012-05-16 07:13:17 -04002603
2604 do {
2605 if (rdata->cfile->invalidHandle) {
2606 rc = cifs_reopen_file(rdata->cfile, true);
2607 if (rc != 0)
2608 continue;
2609 }
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07002610 rc = server->ops->async_readv(rdata);
Jeff Layton2a1bb132012-05-16 07:13:17 -04002611 } while (rc == -EAGAIN);
2612
2613 return rc;
2614}
2615
Jeff Layton1c892542012-05-16 07:13:17 -04002616/**
2617 * cifs_readdata_to_iov - copy data from pages in response to an iovec
2618 * @rdata: the readdata response with list of pages holding data
2619 * @iov: vector in which we should copy the data
2620 * @nr_segs: number of segments in vector
2621 * @offset: offset into file of the first iovec
2622 * @copied: used to return the amount of data copied to the iov
2623 *
2624 * This function copies data from a list of pages in a readdata response into
2625 * an array of iovecs. It will first calculate where the data should go
2626 * based on the info in the readdata and then copy the data into that spot.
2627 */
2628static ssize_t
2629cifs_readdata_to_iov(struct cifs_readdata *rdata, const struct iovec *iov,
2630 unsigned long nr_segs, loff_t offset, ssize_t *copied)
2631{
2632 int rc = 0;
2633 struct iov_iter ii;
2634 size_t pos = rdata->offset - offset;
Jeff Layton1c892542012-05-16 07:13:17 -04002635 ssize_t remaining = rdata->bytes;
2636 unsigned char *pdata;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002637 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04002638
2639 /* set up iov_iter and advance to the correct offset */
2640 iov_iter_init(&ii, iov, nr_segs, iov_length(iov, nr_segs), 0);
2641 iov_iter_advance(&ii, pos);
2642
2643 *copied = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002644 for (i = 0; i < rdata->nr_pages; i++) {
Jeff Layton1c892542012-05-16 07:13:17 -04002645 ssize_t copy;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002646 struct page *page = rdata->pages[i];
Jeff Layton1c892542012-05-16 07:13:17 -04002647
2648 /* copy a whole page or whatever's left */
2649 copy = min_t(ssize_t, remaining, PAGE_SIZE);
2650
2651 /* ...but limit it to whatever space is left in the iov */
2652 copy = min_t(ssize_t, copy, iov_iter_count(&ii));
2653
2654 /* go while there's data to be copied and no errors */
2655 if (copy && !rc) {
2656 pdata = kmap(page);
2657 rc = memcpy_toiovecend(ii.iov, pdata, ii.iov_offset,
2658 (int)copy);
2659 kunmap(page);
2660 if (!rc) {
2661 *copied += copy;
2662 remaining -= copy;
2663 iov_iter_advance(&ii, copy);
2664 }
2665 }
Jeff Layton1c892542012-05-16 07:13:17 -04002666 }
2667
2668 return rc;
2669}
2670
2671static void
2672cifs_uncached_readv_complete(struct work_struct *work)
2673{
2674 struct cifs_readdata *rdata = container_of(work,
2675 struct cifs_readdata, work);
Jeff Layton1c892542012-05-16 07:13:17 -04002676
2677 complete(&rdata->done);
2678 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
2679}
2680
2681static int
Jeff Layton8321fec2012-09-19 06:22:32 -07002682cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
2683 struct cifs_readdata *rdata, unsigned int len)
Jeff Layton1c892542012-05-16 07:13:17 -04002684{
Jeff Layton8321fec2012-09-19 06:22:32 -07002685 int total_read = 0, result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002686 unsigned int i;
2687 unsigned int nr_pages = rdata->nr_pages;
Jeff Layton8321fec2012-09-19 06:22:32 -07002688 struct kvec iov;
Jeff Layton1c892542012-05-16 07:13:17 -04002689
Jeff Layton8321fec2012-09-19 06:22:32 -07002690 rdata->tailsz = PAGE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002691 for (i = 0; i < nr_pages; i++) {
2692 struct page *page = rdata->pages[i];
2693
Jeff Layton8321fec2012-09-19 06:22:32 -07002694 if (len >= PAGE_SIZE) {
Jeff Layton1c892542012-05-16 07:13:17 -04002695 /* enough data to fill the page */
Jeff Layton8321fec2012-09-19 06:22:32 -07002696 iov.iov_base = kmap(page);
2697 iov.iov_len = PAGE_SIZE;
2698 cFYI(1, "%u: iov_base=%p iov_len=%zu",
2699 i, iov.iov_base, iov.iov_len);
2700 len -= PAGE_SIZE;
2701 } else if (len > 0) {
Jeff Layton1c892542012-05-16 07:13:17 -04002702 /* enough for partial page, fill and zero the rest */
Jeff Layton8321fec2012-09-19 06:22:32 -07002703 iov.iov_base = kmap(page);
2704 iov.iov_len = len;
2705 cFYI(1, "%u: iov_base=%p iov_len=%zu",
2706 i, iov.iov_base, iov.iov_len);
2707 memset(iov.iov_base + len, '\0', PAGE_SIZE - len);
2708 rdata->tailsz = len;
2709 len = 0;
Jeff Layton1c892542012-05-16 07:13:17 -04002710 } else {
2711 /* no need to hold page hostage */
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002712 rdata->pages[i] = NULL;
2713 rdata->nr_pages--;
Jeff Layton1c892542012-05-16 07:13:17 -04002714 put_page(page);
Jeff Layton8321fec2012-09-19 06:22:32 -07002715 continue;
Jeff Layton1c892542012-05-16 07:13:17 -04002716 }
Jeff Layton8321fec2012-09-19 06:22:32 -07002717
2718 result = cifs_readv_from_socket(server, &iov, 1, iov.iov_len);
2719 kunmap(page);
2720 if (result < 0)
2721 break;
2722
2723 total_read += result;
Jeff Layton1c892542012-05-16 07:13:17 -04002724 }
2725
Jeff Layton8321fec2012-09-19 06:22:32 -07002726 return total_read > 0 ? total_read : result;
Jeff Layton1c892542012-05-16 07:13:17 -04002727}
2728
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002729static ssize_t
2730cifs_iovec_read(struct file *file, const struct iovec *iov,
2731 unsigned long nr_segs, loff_t *poffset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002732{
Jeff Layton1c892542012-05-16 07:13:17 -04002733 ssize_t rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002734 size_t len, cur_len;
Jeff Layton1c892542012-05-16 07:13:17 -04002735 ssize_t total_read = 0;
2736 loff_t offset = *poffset;
2737 unsigned int npages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002738 struct cifs_sb_info *cifs_sb;
Jeff Layton1c892542012-05-16 07:13:17 -04002739 struct cifs_tcon *tcon;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002740 struct cifsFileInfo *open_file;
Jeff Layton1c892542012-05-16 07:13:17 -04002741 struct cifs_readdata *rdata, *tmp;
2742 struct list_head rdata_list;
2743 pid_t pid;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002744
2745 if (!nr_segs)
2746 return 0;
2747
2748 len = iov_length(iov, nr_segs);
2749 if (!len)
2750 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002751
Jeff Layton1c892542012-05-16 07:13:17 -04002752 INIT_LIST_HEAD(&rdata_list);
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002753 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Joe Perchesc21dfb62010-07-12 13:50:14 -07002754 open_file = file->private_data;
Jeff Layton1c892542012-05-16 07:13:17 -04002755 tcon = tlink_tcon(open_file->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002756
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07002757 if (!tcon->ses->server->ops->async_readv)
2758 return -ENOSYS;
2759
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002760 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2761 pid = open_file->pid;
2762 else
2763 pid = current->tgid;
2764
Steve Frenchad7a2922008-02-07 23:25:02 +00002765 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesb6b38f72010-04-21 03:50:45 +00002766 cFYI(1, "attempting read on write only file instance");
Steve Frenchad7a2922008-02-07 23:25:02 +00002767
Jeff Layton1c892542012-05-16 07:13:17 -04002768 do {
2769 cur_len = min_t(const size_t, len - total_read, cifs_sb->rsize);
2770 npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002771
Jeff Layton1c892542012-05-16 07:13:17 -04002772 /* allocate a readdata struct */
2773 rdata = cifs_readdata_alloc(npages,
2774 cifs_uncached_readv_complete);
2775 if (!rdata) {
2776 rc = -ENOMEM;
2777 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002778 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002779
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002780 rc = cifs_read_allocate_pages(rdata, npages);
Jeff Layton1c892542012-05-16 07:13:17 -04002781 if (rc)
2782 goto error;
2783
2784 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002785 rdata->nr_pages = npages;
Jeff Layton1c892542012-05-16 07:13:17 -04002786 rdata->offset = offset;
2787 rdata->bytes = cur_len;
2788 rdata->pid = pid;
Jeff Layton8321fec2012-09-19 06:22:32 -07002789 rdata->pagesz = PAGE_SIZE;
2790 rdata->read_into_pages = cifs_uncached_read_into_pages;
Jeff Layton1c892542012-05-16 07:13:17 -04002791
2792 rc = cifs_retry_async_readv(rdata);
2793error:
2794 if (rc) {
2795 kref_put(&rdata->refcount,
2796 cifs_uncached_readdata_release);
2797 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002798 }
Jeff Layton1c892542012-05-16 07:13:17 -04002799
2800 list_add_tail(&rdata->list, &rdata_list);
2801 offset += cur_len;
2802 len -= cur_len;
2803 } while (len > 0);
2804
2805 /* if at least one read request send succeeded, then reset rc */
2806 if (!list_empty(&rdata_list))
2807 rc = 0;
2808
2809 /* the loop below should proceed in the order of increasing offsets */
2810restart_loop:
2811 list_for_each_entry_safe(rdata, tmp, &rdata_list, list) {
2812 if (!rc) {
2813 ssize_t copied;
2814
2815 /* FIXME: freezable sleep too? */
2816 rc = wait_for_completion_killable(&rdata->done);
2817 if (rc)
2818 rc = -EINTR;
2819 else if (rdata->result)
2820 rc = rdata->result;
2821 else {
2822 rc = cifs_readdata_to_iov(rdata, iov,
2823 nr_segs, *poffset,
2824 &copied);
2825 total_read += copied;
2826 }
2827
2828 /* resend call if it's a retryable error */
2829 if (rc == -EAGAIN) {
2830 rc = cifs_retry_async_readv(rdata);
2831 goto restart_loop;
2832 }
2833 }
2834 list_del_init(&rdata->list);
2835 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002836 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002837
Jeff Layton1c892542012-05-16 07:13:17 -04002838 cifs_stats_bytes_read(tcon, total_read);
2839 *poffset += total_read;
2840
Pavel Shilovsky09a47072012-09-18 16:20:29 -07002841 /* mask nodata case */
2842 if (rc == -ENODATA)
2843 rc = 0;
2844
Jeff Layton1c892542012-05-16 07:13:17 -04002845 return total_read ? total_read : rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002846}
2847
Pavel Shilovsky0b81c1c2011-03-10 10:11:05 +03002848ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov,
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002849 unsigned long nr_segs, loff_t pos)
2850{
2851 ssize_t read;
2852
2853 read = cifs_iovec_read(iocb->ki_filp, iov, nr_segs, &pos);
2854 if (read > 0)
2855 iocb->ki_pos = pos;
2856
2857 return read;
2858}
2859
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002860ssize_t
2861cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov,
2862 unsigned long nr_segs, loff_t pos)
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002863{
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002864 struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
2865 struct cifsInodeInfo *cinode = CIFS_I(inode);
2866 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2867 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2868 iocb->ki_filp->private_data;
2869 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2870 int rc = -EACCES;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002871
2872 /*
2873 * In strict cache mode we need to read from the server all the time
2874 * if we don't have level II oplock because the server can delay mtime
2875 * change - so we can't make a decision about inode invalidating.
2876 * And we can also fail with pagereading if there are mandatory locks
2877 * on pages affected by this read but not on the region from pos to
2878 * pos+len-1.
2879 */
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002880 if (!cinode->clientCanCacheRead)
2881 return cifs_user_readv(iocb, iov, nr_segs, pos);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002882
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002883 if (cap_unix(tcon->ses) &&
2884 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2885 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2886 return generic_file_aio_read(iocb, iov, nr_segs, pos);
2887
2888 /*
2889 * We need to hold the sem to be sure nobody modifies lock list
2890 * with a brlock that prevents reading.
2891 */
2892 down_read(&cinode->lock_sem);
2893 if (!cifs_find_lock_conflict(cfile, pos, iov_length(iov, nr_segs),
2894 tcon->ses->server->vals->shared_lock_type,
2895 NULL, true))
2896 rc = generic_file_aio_read(iocb, iov, nr_segs, pos);
2897 up_read(&cinode->lock_sem);
2898 return rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002899}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002900
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002901static ssize_t
2902cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002903{
2904 int rc = -EACCES;
2905 unsigned int bytes_read = 0;
2906 unsigned int total_read;
2907 unsigned int current_read_size;
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002908 unsigned int rsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002909 struct cifs_sb_info *cifs_sb;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002910 struct cifs_tcon *tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002911 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002912 unsigned int xid;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002913 char *cur_offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002914 struct cifsFileInfo *open_file;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002915 struct cifs_io_parms io_parms;
Steve Frenchec637e32005-12-12 20:53:18 -08002916 int buf_type = CIFS_NO_BUFFER;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002917 __u32 pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002918
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002919 xid = get_xid();
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002920 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002921
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002922 /* FIXME: set up handlers for larger reads and/or convert to async */
2923 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
2924
Linus Torvalds1da177e2005-04-16 15:20:36 -07002925 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05302926 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002927 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05302928 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002929 }
Joe Perchesc21dfb62010-07-12 13:50:14 -07002930 open_file = file->private_data;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002931 tcon = tlink_tcon(open_file->tlink);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002932 server = tcon->ses->server;
2933
2934 if (!server->ops->sync_read) {
2935 free_xid(xid);
2936 return -ENOSYS;
2937 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002938
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002939 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2940 pid = open_file->pid;
2941 else
2942 pid = current->tgid;
2943
Linus Torvalds1da177e2005-04-16 15:20:36 -07002944 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesb6b38f72010-04-21 03:50:45 +00002945 cFYI(1, "attempting read on write only file instance");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002946
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002947 for (total_read = 0, cur_offset = read_data; read_size > total_read;
2948 total_read += bytes_read, cur_offset += bytes_read) {
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002949 current_read_size = min_t(uint, read_size - total_read, rsize);
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002950 /*
2951 * For windows me and 9x we do not want to request more than it
2952 * negotiated since it will refuse the read then.
2953 */
2954 if ((tcon->ses) && !(tcon->ses->capabilities &
2955 tcon->ses->server->vals->cap_large_files)) {
Dan Carpenter7748dd62011-10-18 12:41:35 +03002956 current_read_size = min_t(uint, current_read_size,
Jeff Laytonc974bef2011-10-11 06:41:32 -04002957 CIFSMaxBufSize);
Steve Frenchf9f5c8172005-09-15 23:06:38 -07002958 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002959 rc = -EAGAIN;
2960 while (rc == -EAGAIN) {
Steve Frenchcdff08e2010-10-21 22:46:14 +00002961 if (open_file->invalidHandle) {
Jeff Layton15886172010-10-15 15:33:59 -04002962 rc = cifs_reopen_file(open_file, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002963 if (rc != 0)
2964 break;
2965 }
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002966 io_parms.pid = pid;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002967 io_parms.tcon = tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002968 io_parms.offset = *offset;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002969 io_parms.length = current_read_size;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002970 rc = server->ops->sync_read(xid, open_file, &io_parms,
2971 &bytes_read, &cur_offset,
2972 &buf_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002973 }
2974 if (rc || (bytes_read == 0)) {
2975 if (total_read) {
2976 break;
2977 } else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002978 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002979 return rc;
2980 }
2981 } else {
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002982 cifs_stats_bytes_read(tcon, total_read);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002983 *offset += bytes_read;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002984 }
2985 }
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002986 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002987 return total_read;
2988}
2989
Jeff Laytonca83ce32011-04-12 09:13:44 -04002990/*
2991 * If the page is mmap'ed into a process' page tables, then we need to make
2992 * sure that it doesn't change while being written back.
2993 */
2994static int
2995cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
2996{
2997 struct page *page = vmf->page;
2998
2999 lock_page(page);
3000 return VM_FAULT_LOCKED;
3001}
3002
3003static struct vm_operations_struct cifs_file_vm_ops = {
3004 .fault = filemap_fault,
3005 .page_mkwrite = cifs_page_mkwrite,
Konstantin Khlebnikov0b173bc2012-10-08 16:28:46 -07003006 .remap_pages = generic_file_remap_pages,
Jeff Laytonca83ce32011-04-12 09:13:44 -04003007};
3008
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003009int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
3010{
3011 int rc, xid;
3012 struct inode *inode = file->f_path.dentry->d_inode;
3013
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003014 xid = get_xid();
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003015
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04003016 if (!CIFS_I(inode)->clientCanCacheRead) {
3017 rc = cifs_invalidate_mapping(inode);
3018 if (rc)
3019 return rc;
3020 }
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003021
3022 rc = generic_file_mmap(file, vma);
Jeff Laytonca83ce32011-04-12 09:13:44 -04003023 if (rc == 0)
3024 vma->vm_ops = &cifs_file_vm_ops;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003025 free_xid(xid);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003026 return rc;
3027}
3028
Linus Torvalds1da177e2005-04-16 15:20:36 -07003029int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
3030{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003031 int rc, xid;
3032
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003033 xid = get_xid();
Jeff Laytonabab0952010-02-12 07:44:18 -05003034 rc = cifs_revalidate_file(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003035 if (rc) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00003036 cFYI(1, "Validation prior to mmap failed, error=%d", rc);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003037 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003038 return rc;
3039 }
3040 rc = generic_file_mmap(file, vma);
Jeff Laytonca83ce32011-04-12 09:13:44 -04003041 if (rc == 0)
3042 vma->vm_ops = &cifs_file_vm_ops;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003043 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003044 return rc;
3045}
3046
Jeff Layton0471ca32012-05-16 07:13:16 -04003047static void
3048cifs_readv_complete(struct work_struct *work)
3049{
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003050 unsigned int i;
Jeff Layton0471ca32012-05-16 07:13:16 -04003051 struct cifs_readdata *rdata = container_of(work,
3052 struct cifs_readdata, work);
Jeff Layton0471ca32012-05-16 07:13:16 -04003053
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003054 for (i = 0; i < rdata->nr_pages; i++) {
3055 struct page *page = rdata->pages[i];
3056
Jeff Layton0471ca32012-05-16 07:13:16 -04003057 lru_cache_add_file(page);
3058
3059 if (rdata->result == 0) {
Jeff Layton0471ca32012-05-16 07:13:16 -04003060 flush_dcache_page(page);
3061 SetPageUptodate(page);
3062 }
3063
3064 unlock_page(page);
3065
3066 if (rdata->result == 0)
3067 cifs_readpage_to_fscache(rdata->mapping->host, page);
3068
3069 page_cache_release(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003070 rdata->pages[i] = NULL;
Jeff Layton0471ca32012-05-16 07:13:16 -04003071 }
Jeff Layton6993f742012-05-16 07:13:17 -04003072 kref_put(&rdata->refcount, cifs_readdata_release);
Jeff Layton0471ca32012-05-16 07:13:16 -04003073}
3074
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003075static int
Jeff Layton8321fec2012-09-19 06:22:32 -07003076cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
3077 struct cifs_readdata *rdata, unsigned int len)
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003078{
Jeff Layton8321fec2012-09-19 06:22:32 -07003079 int total_read = 0, result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003080 unsigned int i;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003081 u64 eof;
3082 pgoff_t eof_index;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003083 unsigned int nr_pages = rdata->nr_pages;
Jeff Layton8321fec2012-09-19 06:22:32 -07003084 struct kvec iov;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003085
3086 /* determine the eof that the server (probably) has */
3087 eof = CIFS_I(rdata->mapping->host)->server_eof;
3088 eof_index = eof ? (eof - 1) >> PAGE_CACHE_SHIFT : 0;
3089 cFYI(1, "eof=%llu eof_index=%lu", eof, eof_index);
3090
Jeff Layton8321fec2012-09-19 06:22:32 -07003091 rdata->tailsz = PAGE_CACHE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003092 for (i = 0; i < nr_pages; i++) {
3093 struct page *page = rdata->pages[i];
3094
Jeff Layton8321fec2012-09-19 06:22:32 -07003095 if (len >= PAGE_CACHE_SIZE) {
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003096 /* enough data to fill the page */
Jeff Layton8321fec2012-09-19 06:22:32 -07003097 iov.iov_base = kmap(page);
3098 iov.iov_len = PAGE_CACHE_SIZE;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003099 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
Jeff Layton8321fec2012-09-19 06:22:32 -07003100 i, page->index, iov.iov_base, iov.iov_len);
3101 len -= PAGE_CACHE_SIZE;
3102 } else if (len > 0) {
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003103 /* enough for partial page, fill and zero the rest */
Jeff Layton8321fec2012-09-19 06:22:32 -07003104 iov.iov_base = kmap(page);
3105 iov.iov_len = len;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003106 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
Jeff Layton8321fec2012-09-19 06:22:32 -07003107 i, page->index, iov.iov_base, iov.iov_len);
3108 memset(iov.iov_base + len,
3109 '\0', PAGE_CACHE_SIZE - len);
3110 rdata->tailsz = len;
3111 len = 0;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003112 } else if (page->index > eof_index) {
3113 /*
3114 * The VFS will not try to do readahead past the
3115 * i_size, but it's possible that we have outstanding
3116 * writes with gaps in the middle and the i_size hasn't
3117 * caught up yet. Populate those with zeroed out pages
3118 * to prevent the VFS from repeatedly attempting to
3119 * fill them until the writes are flushed.
3120 */
3121 zero_user(page, 0, PAGE_CACHE_SIZE);
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003122 lru_cache_add_file(page);
3123 flush_dcache_page(page);
3124 SetPageUptodate(page);
3125 unlock_page(page);
3126 page_cache_release(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003127 rdata->pages[i] = NULL;
3128 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07003129 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003130 } else {
3131 /* no need to hold page hostage */
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003132 lru_cache_add_file(page);
3133 unlock_page(page);
3134 page_cache_release(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003135 rdata->pages[i] = NULL;
3136 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07003137 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003138 }
Jeff Layton8321fec2012-09-19 06:22:32 -07003139
3140 result = cifs_readv_from_socket(server, &iov, 1, iov.iov_len);
3141 kunmap(page);
3142 if (result < 0)
3143 break;
3144
3145 total_read += result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003146 }
3147
Jeff Layton8321fec2012-09-19 06:22:32 -07003148 return total_read > 0 ? total_read : result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003149}
3150
Linus Torvalds1da177e2005-04-16 15:20:36 -07003151static int cifs_readpages(struct file *file, struct address_space *mapping,
3152 struct list_head *page_list, unsigned num_pages)
3153{
Jeff Layton690c5e32011-10-19 15:30:16 -04003154 int rc;
3155 struct list_head tmplist;
3156 struct cifsFileInfo *open_file = file->private_data;
3157 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
3158 unsigned int rsize = cifs_sb->rsize;
3159 pid_t pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003160
Jeff Layton690c5e32011-10-19 15:30:16 -04003161 /*
3162 * Give up immediately if rsize is too small to read an entire page.
3163 * The VFS will fall back to readpage. We should never reach this
3164 * point however since we set ra_pages to 0 when the rsize is smaller
3165 * than a cache page.
3166 */
3167 if (unlikely(rsize < PAGE_CACHE_SIZE))
3168 return 0;
Steve Frenchbfa0d752005-08-31 21:50:37 -07003169
Suresh Jayaraman56698232010-07-05 18:13:25 +05303170 /*
3171 * Reads as many pages as possible from fscache. Returns -ENOBUFS
3172 * immediately if the cookie is negative
3173 */
3174 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
3175 &num_pages);
3176 if (rc == 0)
Jeff Layton690c5e32011-10-19 15:30:16 -04003177 return rc;
Suresh Jayaraman56698232010-07-05 18:13:25 +05303178
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003179 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3180 pid = open_file->pid;
3181 else
3182 pid = current->tgid;
3183
Jeff Layton690c5e32011-10-19 15:30:16 -04003184 rc = 0;
3185 INIT_LIST_HEAD(&tmplist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003186
Jeff Layton690c5e32011-10-19 15:30:16 -04003187 cFYI(1, "%s: file=%p mapping=%p num_pages=%u", __func__, file,
3188 mapping, num_pages);
3189
3190 /*
3191 * Start with the page at end of list and move it to private
3192 * list. Do the same with any following pages until we hit
3193 * the rsize limit, hit an index discontinuity, or run out of
3194 * pages. Issue the async read and then start the loop again
3195 * until the list is empty.
3196 *
3197 * Note that list order is important. The page_list is in
3198 * the order of declining indexes. When we put the pages in
3199 * the rdata->pages, then we want them in increasing order.
3200 */
3201 while (!list_empty(page_list)) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003202 unsigned int i;
Jeff Layton690c5e32011-10-19 15:30:16 -04003203 unsigned int bytes = PAGE_CACHE_SIZE;
3204 unsigned int expected_index;
3205 unsigned int nr_pages = 1;
3206 loff_t offset;
3207 struct page *page, *tpage;
3208 struct cifs_readdata *rdata;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003209
3210 page = list_entry(page_list->prev, struct page, lru);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003211
Jeff Layton690c5e32011-10-19 15:30:16 -04003212 /*
3213 * Lock the page and put it in the cache. Since no one else
3214 * should have access to this page, we're safe to simply set
3215 * PG_locked without checking it first.
3216 */
3217 __set_page_locked(page);
3218 rc = add_to_page_cache_locked(page, mapping,
3219 page->index, GFP_KERNEL);
3220
3221 /* give up if we can't stick it in the cache */
3222 if (rc) {
3223 __clear_page_locked(page);
3224 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003225 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003226
Jeff Layton690c5e32011-10-19 15:30:16 -04003227 /* move first page to the tmplist */
3228 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
3229 list_move_tail(&page->lru, &tmplist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003230
Jeff Layton690c5e32011-10-19 15:30:16 -04003231 /* now try and add more pages onto the request */
3232 expected_index = page->index + 1;
3233 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
3234 /* discontinuity ? */
3235 if (page->index != expected_index)
3236 break;
3237
3238 /* would this page push the read over the rsize? */
3239 if (bytes + PAGE_CACHE_SIZE > rsize)
3240 break;
3241
3242 __set_page_locked(page);
3243 if (add_to_page_cache_locked(page, mapping,
3244 page->index, GFP_KERNEL)) {
3245 __clear_page_locked(page);
3246 break;
3247 }
3248 list_move_tail(&page->lru, &tmplist);
3249 bytes += PAGE_CACHE_SIZE;
3250 expected_index++;
3251 nr_pages++;
3252 }
3253
Jeff Layton0471ca32012-05-16 07:13:16 -04003254 rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
Jeff Layton690c5e32011-10-19 15:30:16 -04003255 if (!rdata) {
3256 /* best to give up if we're out of mem */
3257 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3258 list_del(&page->lru);
3259 lru_cache_add_file(page);
3260 unlock_page(page);
3261 page_cache_release(page);
3262 }
3263 rc = -ENOMEM;
3264 break;
3265 }
3266
Jeff Layton6993f742012-05-16 07:13:17 -04003267 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Layton690c5e32011-10-19 15:30:16 -04003268 rdata->mapping = mapping;
3269 rdata->offset = offset;
3270 rdata->bytes = bytes;
3271 rdata->pid = pid;
Jeff Layton8321fec2012-09-19 06:22:32 -07003272 rdata->pagesz = PAGE_CACHE_SIZE;
3273 rdata->read_into_pages = cifs_readpages_read_into_pages;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003274
3275 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3276 list_del(&page->lru);
3277 rdata->pages[rdata->nr_pages++] = page;
3278 }
Jeff Layton690c5e32011-10-19 15:30:16 -04003279
Jeff Layton2a1bb132012-05-16 07:13:17 -04003280 rc = cifs_retry_async_readv(rdata);
Jeff Layton690c5e32011-10-19 15:30:16 -04003281 if (rc != 0) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003282 for (i = 0; i < rdata->nr_pages; i++) {
3283 page = rdata->pages[i];
Jeff Layton690c5e32011-10-19 15:30:16 -04003284 lru_cache_add_file(page);
3285 unlock_page(page);
3286 page_cache_release(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003287 }
Jeff Layton6993f742012-05-16 07:13:17 -04003288 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003289 break;
3290 }
Jeff Layton6993f742012-05-16 07:13:17 -04003291
3292 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003293 }
3294
Linus Torvalds1da177e2005-04-16 15:20:36 -07003295 return rc;
3296}
3297
3298static int cifs_readpage_worker(struct file *file, struct page *page,
3299 loff_t *poffset)
3300{
3301 char *read_data;
3302 int rc;
3303
Suresh Jayaraman56698232010-07-05 18:13:25 +05303304 /* Is the page cached? */
3305 rc = cifs_readpage_from_fscache(file->f_path.dentry->d_inode, page);
3306 if (rc == 0)
3307 goto read_complete;
3308
Linus Torvalds1da177e2005-04-16 15:20:36 -07003309 page_cache_get(page);
3310 read_data = kmap(page);
3311 /* for reads over a certain size could initiate async read ahead */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003312
Linus Torvalds1da177e2005-04-16 15:20:36 -07003313 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003314
Linus Torvalds1da177e2005-04-16 15:20:36 -07003315 if (rc < 0)
3316 goto io_error;
3317 else
Joe Perchesb6b38f72010-04-21 03:50:45 +00003318 cFYI(1, "Bytes read %d", rc);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003319
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08003320 file->f_path.dentry->d_inode->i_atime =
3321 current_fs_time(file->f_path.dentry->d_inode->i_sb);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003322
Linus Torvalds1da177e2005-04-16 15:20:36 -07003323 if (PAGE_CACHE_SIZE > rc)
3324 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
3325
3326 flush_dcache_page(page);
3327 SetPageUptodate(page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05303328
3329 /* send this page to the cache */
3330 cifs_readpage_to_fscache(file->f_path.dentry->d_inode, page);
3331
Linus Torvalds1da177e2005-04-16 15:20:36 -07003332 rc = 0;
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003333
Linus Torvalds1da177e2005-04-16 15:20:36 -07003334io_error:
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003335 kunmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003336 page_cache_release(page);
Suresh Jayaraman56698232010-07-05 18:13:25 +05303337
3338read_complete:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003339 return rc;
3340}
3341
3342static int cifs_readpage(struct file *file, struct page *page)
3343{
3344 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
3345 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003346 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003347
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003348 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003349
3350 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303351 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003352 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303353 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003354 }
3355
Jeff Laytonac3aa2f2012-07-23 13:14:28 -04003356 cFYI(1, "readpage %p at offset %d 0x%x",
Joe Perchesb6b38f72010-04-21 03:50:45 +00003357 page, (int)offset, (int)offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003358
3359 rc = cifs_readpage_worker(file, page, &offset);
3360
3361 unlock_page(page);
3362
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003363 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003364 return rc;
3365}
3366
Steve Frencha403a0a2007-07-26 15:54:16 +00003367static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3368{
3369 struct cifsFileInfo *open_file;
3370
Jeff Layton44772882010-10-15 15:34:03 -04003371 spin_lock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003372 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton2e396b82010-10-15 15:34:01 -04003373 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Jeff Layton44772882010-10-15 15:34:03 -04003374 spin_unlock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003375 return 1;
3376 }
3377 }
Jeff Layton44772882010-10-15 15:34:03 -04003378 spin_unlock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003379 return 0;
3380}
3381
Linus Torvalds1da177e2005-04-16 15:20:36 -07003382/* We do not want to update the file size from server for inodes
3383 open for write - to avoid races with writepage extending
3384 the file - in the future we could consider allowing
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003385 refreshing the inode only on increases in the file size
Linus Torvalds1da177e2005-04-16 15:20:36 -07003386 but this is tricky to do without racing with writebehind
3387 page caching in the current Linux kernel design */
Steve French4b18f2a2008-04-29 00:06:05 +00003388bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003389{
Steve Frencha403a0a2007-07-26 15:54:16 +00003390 if (!cifsInode)
Steve French4b18f2a2008-04-29 00:06:05 +00003391 return true;
Steve French23e7dd72005-10-20 13:44:56 -07003392
Steve Frencha403a0a2007-07-26 15:54:16 +00003393 if (is_inode_writable(cifsInode)) {
3394 /* This inode is open for write at least once */
Steve Frenchc32a0b62006-01-12 14:41:28 -08003395 struct cifs_sb_info *cifs_sb;
3396
Steve Frenchc32a0b62006-01-12 14:41:28 -08003397 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
Steve Frenchad7a2922008-02-07 23:25:02 +00003398 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003399 /* since no page cache to corrupt on directio
Steve Frenchc32a0b62006-01-12 14:41:28 -08003400 we can change size safely */
Steve French4b18f2a2008-04-29 00:06:05 +00003401 return true;
Steve Frenchc32a0b62006-01-12 14:41:28 -08003402 }
3403
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003404 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
Steve French4b18f2a2008-04-29 00:06:05 +00003405 return true;
Steve French7ba52632007-02-08 18:14:13 +00003406
Steve French4b18f2a2008-04-29 00:06:05 +00003407 return false;
Steve French23e7dd72005-10-20 13:44:56 -07003408 } else
Steve French4b18f2a2008-04-29 00:06:05 +00003409 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003410}
3411
Nick Piggind9414772008-09-24 11:32:59 -04003412static int cifs_write_begin(struct file *file, struct address_space *mapping,
3413 loff_t pos, unsigned len, unsigned flags,
3414 struct page **pagep, void **fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003415{
Nick Piggind9414772008-09-24 11:32:59 -04003416 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
3417 loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003418 loff_t page_start = pos & PAGE_MASK;
3419 loff_t i_size;
3420 struct page *page;
3421 int rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003422
Joe Perchesb6b38f72010-04-21 03:50:45 +00003423 cFYI(1, "write_begin from %lld len %d", (long long)pos, len);
Nick Piggind9414772008-09-24 11:32:59 -04003424
Nick Piggin54566b22009-01-04 12:00:53 -08003425 page = grab_cache_page_write_begin(mapping, index, flags);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003426 if (!page) {
3427 rc = -ENOMEM;
3428 goto out;
3429 }
Nick Piggind9414772008-09-24 11:32:59 -04003430
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003431 if (PageUptodate(page))
3432 goto out;
Steve French8a236262007-03-06 00:31:00 +00003433
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003434 /*
3435 * If we write a full page it will be up to date, no need to read from
3436 * the server. If the write is short, we'll end up doing a sync write
3437 * instead.
3438 */
3439 if (len == PAGE_CACHE_SIZE)
3440 goto out;
3441
3442 /*
3443 * optimize away the read when we have an oplock, and we're not
3444 * expecting to use any of the data we'd be reading in. That
3445 * is, when the page lies beyond the EOF, or straddles the EOF
3446 * and the write will cover all of the existing data.
3447 */
3448 if (CIFS_I(mapping->host)->clientCanCacheRead) {
3449 i_size = i_size_read(mapping->host);
3450 if (page_start >= i_size ||
3451 (offset == 0 && (pos + len) >= i_size)) {
3452 zero_user_segments(page, 0, offset,
3453 offset + len,
3454 PAGE_CACHE_SIZE);
3455 /*
3456 * PageChecked means that the parts of the page
3457 * to which we're not writing are considered up
3458 * to date. Once the data is copied to the
3459 * page, it can be set uptodate.
3460 */
3461 SetPageChecked(page);
3462 goto out;
3463 }
3464 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003465
Nick Piggind9414772008-09-24 11:32:59 -04003466 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003467 /*
3468 * might as well read a page, it is fast enough. If we get
3469 * an error, we don't need to return it. cifs_write_end will
3470 * do a sync write instead since PG_uptodate isn't set.
3471 */
3472 cifs_readpage_worker(file, page, &page_start);
Steve French8a236262007-03-06 00:31:00 +00003473 } else {
3474 /* we could try using another file handle if there is one -
3475 but how would we lock it to prevent close of that handle
3476 racing with this read? In any case
Nick Piggind9414772008-09-24 11:32:59 -04003477 this will be written out by write_end so is fine */
Steve French8a236262007-03-06 00:31:00 +00003478 }
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003479out:
3480 *pagep = page;
3481 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003482}
3483
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303484static int cifs_release_page(struct page *page, gfp_t gfp)
3485{
3486 if (PagePrivate(page))
3487 return 0;
3488
3489 return cifs_fscache_release_page(page, gfp);
3490}
3491
3492static void cifs_invalidate_page(struct page *page, unsigned long offset)
3493{
3494 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
3495
3496 if (offset == 0)
3497 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
3498}
3499
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003500static int cifs_launder_page(struct page *page)
3501{
3502 int rc = 0;
3503 loff_t range_start = page_offset(page);
3504 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
3505 struct writeback_control wbc = {
3506 .sync_mode = WB_SYNC_ALL,
3507 .nr_to_write = 0,
3508 .range_start = range_start,
3509 .range_end = range_end,
3510 };
3511
3512 cFYI(1, "Launder page: %p", page);
3513
3514 if (clear_page_dirty_for_io(page))
3515 rc = cifs_writepage_locked(page, &wbc);
3516
3517 cifs_fscache_invalidate_page(page, page->mapping->host);
3518 return rc;
3519}
3520
Tejun Heo9b646972010-07-20 22:09:02 +02003521void cifs_oplock_break(struct work_struct *work)
Jeff Layton3bc303c2009-09-21 06:47:50 -04003522{
3523 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3524 oplock_break);
Jeff Laytona5e18bc2010-10-11 15:07:18 -04003525 struct inode *inode = cfile->dentry->d_inode;
Jeff Layton3bc303c2009-09-21 06:47:50 -04003526 struct cifsInodeInfo *cinode = CIFS_I(inode);
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07003527 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Jeff Laytoneb4b7562010-10-22 14:52:29 -04003528 int rc = 0;
Jeff Layton3bc303c2009-09-21 06:47:50 -04003529
3530 if (inode && S_ISREG(inode->i_mode)) {
Steve Frenchd54ff732010-04-27 04:38:15 +00003531 if (cinode->clientCanCacheRead)
Al Viro8737c932009-12-24 06:47:55 -05003532 break_lease(inode, O_RDONLY);
Steve Frenchd54ff732010-04-27 04:38:15 +00003533 else
Al Viro8737c932009-12-24 06:47:55 -05003534 break_lease(inode, O_WRONLY);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003535 rc = filemap_fdatawrite(inode->i_mapping);
3536 if (cinode->clientCanCacheRead == 0) {
Jeff Laytoneb4b7562010-10-22 14:52:29 -04003537 rc = filemap_fdatawait(inode->i_mapping);
3538 mapping_set_error(inode->i_mapping, rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003539 invalidate_remote_inode(inode);
3540 }
Joe Perchesb6b38f72010-04-21 03:50:45 +00003541 cFYI(1, "Oplock flush inode %p rc %d", inode, rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003542 }
3543
Pavel Shilovsky85160e02011-10-22 15:33:29 +04003544 rc = cifs_push_locks(cfile);
3545 if (rc)
3546 cERROR(1, "Push locks rc = %d", rc);
3547
Jeff Layton3bc303c2009-09-21 06:47:50 -04003548 /*
3549 * releasing stale oplock after recent reconnect of smb session using
3550 * a now incorrect file handle is not a data integrity issue but do
3551 * not bother sending an oplock release if session to server still is
3552 * disconnected since oplock already released by the server
3553 */
Steve Frenchcdff08e2010-10-21 22:46:14 +00003554 if (!cfile->oplock_break_cancelled) {
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07003555 rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
3556 cinode);
Joe Perchesb6b38f72010-04-21 03:50:45 +00003557 cFYI(1, "Oplock release rc = %d", rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003558 }
Jeff Layton3bc303c2009-09-21 06:47:50 -04003559}
3560
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07003561const struct address_space_operations cifs_addr_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003562 .readpage = cifs_readpage,
3563 .readpages = cifs_readpages,
3564 .writepage = cifs_writepage,
Steve French37c0eb42005-10-05 14:50:29 -07003565 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04003566 .write_begin = cifs_write_begin,
3567 .write_end = cifs_write_end,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003568 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303569 .releasepage = cifs_release_page,
3570 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003571 .launder_page = cifs_launder_page,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003572};
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003573
3574/*
3575 * cifs_readpages requires the server to support a buffer large enough to
3576 * contain the header plus one complete page of data. Otherwise, we need
3577 * to leave cifs_readpages out of the address space operations.
3578 */
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07003579const struct address_space_operations cifs_addr_ops_smallbuf = {
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003580 .readpage = cifs_readpage,
3581 .writepage = cifs_writepage,
3582 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04003583 .write_begin = cifs_write_begin,
3584 .write_end = cifs_write_end,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003585 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303586 .releasepage = cifs_release_page,
3587 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003588 .launder_page = cifs_launder_page,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003589};