blob: bceffa8c034eb55dba9d0be7ec53086ea5c2a2a9 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
Steve Frenchfb8c4b12007-07-10 01:16:18 +00005 *
Steve Frenchf19159d2010-04-21 04:12:10 +00006 * Copyright (C) International Business Machines Corp., 2002,2010
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Author(s): Steve French (sfrench@us.ibm.com)
Jeremy Allison7ee1af72006-08-02 21:56:33 +00008 * Jeremy Allison (jra@samba.org)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
Steve French37c0eb42005-10-05 14:50:29 -070025#include <linux/backing-dev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
Steve French37c0eb42005-10-05 14:50:29 -070030#include <linux/writeback.h>
Andrew Morton6f88cc22006-12-10 02:19:44 -080031#include <linux/task_io_accounting_ops.h>
Steve French23e7dd72005-10-20 13:44:56 -070032#include <linux/delay.h>
Jeff Layton3bc303c2009-09-21 06:47:50 -040033#include <linux/mount.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Jeff Layton690c5e32011-10-19 15:30:16 -040035#include <linux/swap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <asm/div64.h>
37#include "cifsfs.h"
38#include "cifspdu.h"
39#include "cifsglob.h"
40#include "cifsproto.h"
41#include "cifs_unicode.h"
42#include "cifs_debug.h"
43#include "cifs_fs_sb.h"
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +053044#include "fscache.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Linus Torvalds1da177e2005-04-16 15:20:36 -070046static inline int cifs_convert_flags(unsigned int flags)
47{
48 if ((flags & O_ACCMODE) == O_RDONLY)
49 return GENERIC_READ;
50 else if ((flags & O_ACCMODE) == O_WRONLY)
51 return GENERIC_WRITE;
52 else if ((flags & O_ACCMODE) == O_RDWR) {
53 /* GENERIC_ALL is too much permission to request
54 can cause unnecessary access denied on create */
55 /* return GENERIC_ALL; */
56 return (GENERIC_READ | GENERIC_WRITE);
57 }
58
Jeff Laytone10f7b52008-05-14 10:21:33 -070059 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
60 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
61 FILE_READ_DATA);
Steve French7fc8f4e2009-02-23 20:43:11 +000062}
Jeff Laytone10f7b52008-05-14 10:21:33 -070063
Jeff Layton608712f2010-10-15 15:33:56 -040064static u32 cifs_posix_convert_flags(unsigned int flags)
Steve French7fc8f4e2009-02-23 20:43:11 +000065{
Jeff Layton608712f2010-10-15 15:33:56 -040066 u32 posix_flags = 0;
Jeff Laytone10f7b52008-05-14 10:21:33 -070067
Steve French7fc8f4e2009-02-23 20:43:11 +000068 if ((flags & O_ACCMODE) == O_RDONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040069 posix_flags = SMB_O_RDONLY;
Steve French7fc8f4e2009-02-23 20:43:11 +000070 else if ((flags & O_ACCMODE) == O_WRONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040071 posix_flags = SMB_O_WRONLY;
72 else if ((flags & O_ACCMODE) == O_RDWR)
73 posix_flags = SMB_O_RDWR;
74
75 if (flags & O_CREAT)
76 posix_flags |= SMB_O_CREAT;
77 if (flags & O_EXCL)
78 posix_flags |= SMB_O_EXCL;
79 if (flags & O_TRUNC)
80 posix_flags |= SMB_O_TRUNC;
81 /* be safe and imply O_SYNC for O_DSYNC */
Christoph Hellwig6b2f3d12009-10-27 11:05:28 +010082 if (flags & O_DSYNC)
Jeff Layton608712f2010-10-15 15:33:56 -040083 posix_flags |= SMB_O_SYNC;
Steve French7fc8f4e2009-02-23 20:43:11 +000084 if (flags & O_DIRECTORY)
Jeff Layton608712f2010-10-15 15:33:56 -040085 posix_flags |= SMB_O_DIRECTORY;
Steve French7fc8f4e2009-02-23 20:43:11 +000086 if (flags & O_NOFOLLOW)
Jeff Layton608712f2010-10-15 15:33:56 -040087 posix_flags |= SMB_O_NOFOLLOW;
Steve French7fc8f4e2009-02-23 20:43:11 +000088 if (flags & O_DIRECT)
Jeff Layton608712f2010-10-15 15:33:56 -040089 posix_flags |= SMB_O_DIRECT;
Steve French7fc8f4e2009-02-23 20:43:11 +000090
91 return posix_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070092}
93
94static inline int cifs_get_disposition(unsigned int flags)
95{
96 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
97 return FILE_CREATE;
98 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
99 return FILE_OVERWRITE_IF;
100 else if ((flags & O_CREAT) == O_CREAT)
101 return FILE_OPEN_IF;
Steve French55aa2e02006-05-30 18:09:31 +0000102 else if ((flags & O_TRUNC) == O_TRUNC)
103 return FILE_OVERWRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104 else
105 return FILE_OPEN;
106}
107
Jeff Layton608712f2010-10-15 15:33:56 -0400108int cifs_posix_open(char *full_path, struct inode **pinode,
109 struct super_block *sb, int mode, unsigned int f_flags,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400110 __u32 *poplock, __u16 *pnetfid, unsigned int xid)
Jeff Layton608712f2010-10-15 15:33:56 -0400111{
112 int rc;
113 FILE_UNIX_BASIC_INFO *presp_data;
114 __u32 posix_flags = 0;
115 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
116 struct cifs_fattr fattr;
117 struct tcon_link *tlink;
Steve French96daf2b2011-05-27 04:34:02 +0000118 struct cifs_tcon *tcon;
Jeff Layton608712f2010-10-15 15:33:56 -0400119
120 cFYI(1, "posix open %s", full_path);
121
122 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
123 if (presp_data == NULL)
124 return -ENOMEM;
125
126 tlink = cifs_sb_tlink(cifs_sb);
127 if (IS_ERR(tlink)) {
128 rc = PTR_ERR(tlink);
129 goto posix_open_ret;
130 }
131
132 tcon = tlink_tcon(tlink);
133 mode &= ~current_umask();
134
135 posix_flags = cifs_posix_convert_flags(f_flags);
136 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
137 poplock, full_path, cifs_sb->local_nls,
138 cifs_sb->mnt_cifs_flags &
139 CIFS_MOUNT_MAP_SPECIAL_CHR);
140 cifs_put_tlink(tlink);
141
142 if (rc)
143 goto posix_open_ret;
144
145 if (presp_data->Type == cpu_to_le32(-1))
146 goto posix_open_ret; /* open ok, caller does qpathinfo */
147
148 if (!pinode)
149 goto posix_open_ret; /* caller does not need info */
150
151 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
152
153 /* get new inode and set it up */
154 if (*pinode == NULL) {
155 cifs_fill_uniqueid(sb, &fattr);
156 *pinode = cifs_iget(sb, &fattr);
157 if (!*pinode) {
158 rc = -ENOMEM;
159 goto posix_open_ret;
160 }
161 } else {
162 cifs_fattr_to_inode(*pinode, &fattr);
163 }
164
165posix_open_ret:
166 kfree(presp_data);
167 return rc;
168}
169
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300170static int
171cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700172 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
173 struct cifs_fid *fid, unsigned int xid)
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300174{
175 int rc;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700176 int desired_access;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300177 int disposition;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500178 int create_options = CREATE_NOT_DIR;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300179 FILE_ALL_INFO *buf;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700180 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300181
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700182 if (!server->ops->open)
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700183 return -ENOSYS;
184
185 desired_access = cifs_convert_flags(f_flags);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300186
187/*********************************************************************
188 * open flag mapping table:
189 *
190 * POSIX Flag CIFS Disposition
191 * ---------- ----------------
192 * O_CREAT FILE_OPEN_IF
193 * O_CREAT | O_EXCL FILE_CREATE
194 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
195 * O_TRUNC FILE_OVERWRITE
196 * none of the above FILE_OPEN
197 *
198 * Note that there is not a direct match between disposition
199 * FILE_SUPERSEDE (ie create whether or not file exists although
200 * O_CREAT | O_TRUNC is similar but truncates the existing
201 * file rather than creating a new file as FILE_SUPERSEDE does
202 * (which uses the attributes / metadata passed in on open call)
203 *?
204 *? O_SYNC is a reasonable match to CIFS writethrough flag
205 *? and the read write flags match reasonably. O_LARGEFILE
206 *? is irrelevant because largefile support is always used
207 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
208 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
209 *********************************************************************/
210
211 disposition = cifs_get_disposition(f_flags);
212
213 /* BB pass O_SYNC flag through on file attributes .. BB */
214
215 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
216 if (!buf)
217 return -ENOMEM;
218
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500219 if (backup_cred(cifs_sb))
220 create_options |= CREATE_OPEN_BACKUP_INTENT;
221
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700222 rc = server->ops->open(xid, tcon, full_path, disposition,
223 desired_access, create_options, fid, oplock, buf,
224 cifs_sb);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300225
226 if (rc)
227 goto out;
228
229 if (tcon->unix_ext)
230 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
231 xid);
232 else
233 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700234 xid, &fid->netfid);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300235
236out:
237 kfree(buf);
238 return rc;
239}
240
Jeff Layton15ecb432010-10-15 15:34:02 -0400241struct cifsFileInfo *
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700242cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
Jeff Layton15ecb432010-10-15 15:34:02 -0400243 struct tcon_link *tlink, __u32 oplock)
244{
245 struct dentry *dentry = file->f_path.dentry;
246 struct inode *inode = dentry->d_inode;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700247 struct cifsInodeInfo *cinode = CIFS_I(inode);
248 struct cifsFileInfo *cfile;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700249 struct cifs_fid_locks *fdlocks;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700250 struct cifs_tcon *tcon = tlink_tcon(tlink);
Jeff Layton15ecb432010-10-15 15:34:02 -0400251
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700252 cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
253 if (cfile == NULL)
254 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400255
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700256 fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
257 if (!fdlocks) {
258 kfree(cfile);
259 return NULL;
260 }
261
262 INIT_LIST_HEAD(&fdlocks->locks);
263 fdlocks->cfile = cfile;
264 cfile->llist = fdlocks;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700265 down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700266 list_add(&fdlocks->llist, &cinode->llist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700267 up_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700268
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700269 cfile->count = 1;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700270 cfile->pid = current->tgid;
271 cfile->uid = current_fsuid();
272 cfile->dentry = dget(dentry);
273 cfile->f_flags = file->f_flags;
274 cfile->invalidHandle = false;
275 cfile->tlink = cifs_get_tlink(tlink);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700276 INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700277 mutex_init(&cfile->fh_mutex);
Jeff Layton15ecb432010-10-15 15:34:02 -0400278
Jeff Layton44772882010-10-15 15:34:03 -0400279 spin_lock(&cifs_file_list_lock);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700280 if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE)
281 oplock = fid->pending_open->oplock;
282 list_del(&fid->pending_open->olist);
283
284 tlink_tcon(tlink)->ses->server->ops->set_fid(cfile, fid, oplock);
285
286 list_add(&cfile->tlist, &tcon->openFileList);
Jeff Layton15ecb432010-10-15 15:34:02 -0400287 /* if readable file instance put first in list*/
288 if (file->f_mode & FMODE_READ)
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700289 list_add(&cfile->flist, &cinode->openFileList);
Jeff Layton15ecb432010-10-15 15:34:02 -0400290 else
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700291 list_add_tail(&cfile->flist, &cinode->openFileList);
Jeff Layton44772882010-10-15 15:34:03 -0400292 spin_unlock(&cifs_file_list_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400293
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700294 file->private_data = cfile;
295 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400296}
297
Jeff Layton764a1b12012-07-25 14:59:54 -0400298struct cifsFileInfo *
299cifsFileInfo_get(struct cifsFileInfo *cifs_file)
300{
301 spin_lock(&cifs_file_list_lock);
302 cifsFileInfo_get_locked(cifs_file);
303 spin_unlock(&cifs_file_list_lock);
304 return cifs_file;
305}
306
Steve Frenchcdff08e2010-10-21 22:46:14 +0000307/*
308 * Release a reference on the file private data. This may involve closing
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400309 * the filehandle out on the server. Must be called without holding
310 * cifs_file_list_lock.
Steve Frenchcdff08e2010-10-21 22:46:14 +0000311 */
Jeff Laytonb33879a2010-10-15 15:34:04 -0400312void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
313{
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300314 struct inode *inode = cifs_file->dentry->d_inode;
Steve French96daf2b2011-05-27 04:34:02 +0000315 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700316 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300317 struct cifsInodeInfo *cifsi = CIFS_I(inode);
Pavel Shilovsky4f8ba8a2010-11-21 22:36:12 +0300318 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000319 struct cifsLockInfo *li, *tmp;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700320 struct cifs_fid fid;
321 struct cifs_pending_open open;
Steve Frenchcdff08e2010-10-21 22:46:14 +0000322
323 spin_lock(&cifs_file_list_lock);
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400324 if (--cifs_file->count > 0) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000325 spin_unlock(&cifs_file_list_lock);
326 return;
Jeff Laytonb33879a2010-10-15 15:34:04 -0400327 }
Steve Frenchcdff08e2010-10-21 22:46:14 +0000328
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700329 if (server->ops->get_lease_key)
330 server->ops->get_lease_key(inode, &fid);
331
332 /* store open in pending opens to make sure we don't miss lease break */
333 cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
334
Steve Frenchcdff08e2010-10-21 22:46:14 +0000335 /* remove it from the lists */
336 list_del(&cifs_file->flist);
337 list_del(&cifs_file->tlist);
338
339 if (list_empty(&cifsi->openFileList)) {
340 cFYI(1, "closing last open instance for inode %p",
341 cifs_file->dentry->d_inode);
Pavel Shilovsky25364132012-09-18 16:20:27 -0700342 /*
343 * In strict cache mode we need invalidate mapping on the last
344 * close because it may cause a error when we open this file
345 * again and get at least level II oplock.
346 */
Pavel Shilovsky4f8ba8a2010-11-21 22:36:12 +0300347 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
348 CIFS_I(inode)->invalid_mapping = true;
Pavel Shilovskyc6723622010-11-03 10:58:57 +0300349 cifs_set_oplock_level(cifsi, 0);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000350 }
351 spin_unlock(&cifs_file_list_lock);
352
Jeff Laytonad635942011-07-26 12:20:17 -0400353 cancel_work_sync(&cifs_file->oplock_break);
354
Steve Frenchcdff08e2010-10-21 22:46:14 +0000355 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700356 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400357 unsigned int xid;
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700358
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400359 xid = get_xid();
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700360 if (server->ops->close)
Pavel Shilovsky760ad0c2012-09-25 11:00:07 +0400361 server->ops->close(xid, tcon, &cifs_file->fid);
362 _free_xid(xid);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000363 }
364
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700365 cifs_del_pending_open(&open);
366
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700367 /*
368 * Delete any outstanding lock records. We'll lose them when the file
Steve Frenchcdff08e2010-10-21 22:46:14 +0000369 * is closed anyway.
370 */
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700371 down_write(&cifsi->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700372 list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000373 list_del(&li->llist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400374 cifs_del_lock_waiters(li);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000375 kfree(li);
376 }
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700377 list_del(&cifs_file->llist->llist);
378 kfree(cifs_file->llist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700379 up_write(&cifsi->lock_sem);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000380
381 cifs_put_tlink(cifs_file->tlink);
382 dput(cifs_file->dentry);
383 kfree(cifs_file);
Jeff Laytonb33879a2010-10-15 15:34:04 -0400384}
385
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386int cifs_open(struct inode *inode, struct file *file)
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700387
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388{
389 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400390 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400391 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700393 struct TCP_Server_Info *server;
Steve French96daf2b2011-05-27 04:34:02 +0000394 struct cifs_tcon *tcon;
Jeff Layton7ffec372010-09-29 19:51:11 -0400395 struct tcon_link *tlink;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700396 struct cifsFileInfo *cfile = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397 char *full_path = NULL;
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300398 bool posix_open_ok = false;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700399 struct cifs_fid fid;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700400 struct cifs_pending_open open;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400402 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403
404 cifs_sb = CIFS_SB(inode->i_sb);
Jeff Layton7ffec372010-09-29 19:51:11 -0400405 tlink = cifs_sb_tlink(cifs_sb);
406 if (IS_ERR(tlink)) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400407 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400408 return PTR_ERR(tlink);
409 }
410 tcon = tlink_tcon(tlink);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700411 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -0800413 full_path = build_path_from_dentry(file->f_path.dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414 if (full_path == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530415 rc = -ENOMEM;
Jeff Layton232341b2010-08-05 13:58:38 -0400416 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417 }
418
Joe Perchesb6b38f72010-04-21 03:50:45 +0000419 cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
420 inode, file->f_flags, full_path);
Steve French276a74a2009-03-03 18:00:34 +0000421
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700422 if (server->oplocks)
Steve French276a74a2009-03-03 18:00:34 +0000423 oplock = REQ_OPLOCK;
424 else
425 oplock = 0;
426
Steve French64cc2c62009-03-04 19:54:08 +0000427 if (!tcon->broken_posix_open && tcon->unix_ext &&
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400428 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
429 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Steve French276a74a2009-03-03 18:00:34 +0000430 /* can not refresh inode info since size could be stale */
Jeff Layton2422f672010-06-16 13:40:16 -0400431 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
Steve Frenchfa588e02010-04-22 19:21:55 +0000432 cifs_sb->mnt_file_mode /* ignored */,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700433 file->f_flags, &oplock, &fid.netfid, xid);
Steve French276a74a2009-03-03 18:00:34 +0000434 if (rc == 0) {
Joe Perchesb6b38f72010-04-21 03:50:45 +0000435 cFYI(1, "posix open succeeded");
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300436 posix_open_ok = true;
Steve French64cc2c62009-03-04 19:54:08 +0000437 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
438 if (tcon->ses->serverNOS)
Joe Perchesb6b38f72010-04-21 03:50:45 +0000439 cERROR(1, "server %s of type %s returned"
Steve French64cc2c62009-03-04 19:54:08 +0000440 " unexpected error on SMB posix open"
441 ", disabling posix open support."
442 " Check if server update available.",
443 tcon->ses->serverName,
Joe Perchesb6b38f72010-04-21 03:50:45 +0000444 tcon->ses->serverNOS);
Steve French64cc2c62009-03-04 19:54:08 +0000445 tcon->broken_posix_open = true;
Steve French276a74a2009-03-03 18:00:34 +0000446 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
447 (rc != -EOPNOTSUPP)) /* path not found or net err */
448 goto out;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700449 /*
450 * Else fallthrough to retry open the old way on network i/o
451 * or DFS errors.
452 */
Steve French276a74a2009-03-03 18:00:34 +0000453 }
454
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700455 if (server->ops->get_lease_key)
456 server->ops->get_lease_key(inode, &fid);
457
458 cifs_add_pending_open(&fid, tlink, &open);
459
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300460 if (!posix_open_ok) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700461 if (server->ops->get_lease_key)
462 server->ops->get_lease_key(inode, &fid);
463
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300464 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700465 file->f_flags, &oplock, &fid, xid);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700466 if (rc) {
467 cifs_del_pending_open(&open);
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300468 goto out;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700469 }
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300470 }
Jeff Layton47c78b72010-06-16 13:40:17 -0400471
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700472 cfile = cifs_new_fileinfo(&fid, file, tlink, oplock);
473 if (cfile == NULL) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700474 if (server->ops->close)
475 server->ops->close(xid, tcon, &fid);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700476 cifs_del_pending_open(&open);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477 rc = -ENOMEM;
478 goto out;
479 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +0530481 cifs_fscache_set_inode_cookie(inode, file);
482
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300483 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700484 /*
485 * Time to set mode which we can not set earlier due to
486 * problems creating new read-only files.
487 */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300488 struct cifs_unix_set_info_args args = {
489 .mode = inode->i_mode,
490 .uid = NO_CHANGE_64,
491 .gid = NO_CHANGE_64,
492 .ctime = NO_CHANGE_64,
493 .atime = NO_CHANGE_64,
494 .mtime = NO_CHANGE_64,
495 .device = 0,
496 };
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700497 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
498 cfile->pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499 }
500
501out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400503 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400504 cifs_put_tlink(tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505 return rc;
506}
507
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400508static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
509
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700510/*
511 * Try to reacquire byte range locks that were released when session
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400512 * to server was lost.
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700513 */
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400514static int
515cifs_relock_file(struct cifsFileInfo *cfile)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516{
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400517 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
518 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
519 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520 int rc = 0;
521
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400522 /* we are going to update can_cache_brlcks here - need a write access */
523 down_write(&cinode->lock_sem);
524 if (cinode->can_cache_brlcks) {
525 /* can cache locks - no need to push them */
526 up_write(&cinode->lock_sem);
527 return rc;
528 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400530 if (cap_unix(tcon->ses) &&
531 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
532 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
533 rc = cifs_push_posix_locks(cfile);
534 else
535 rc = tcon->ses->server->ops->push_mand_locks(cfile);
536
537 up_write(&cinode->lock_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538 return rc;
539}
540
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700541static int
542cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543{
544 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400545 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400546 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +0000548 struct cifs_tcon *tcon;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700549 struct TCP_Server_Info *server;
550 struct cifsInodeInfo *cinode;
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000551 struct inode *inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552 char *full_path = NULL;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700553 int desired_access;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554 int disposition = FILE_OPEN;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500555 int create_options = CREATE_NOT_DIR;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700556 struct cifs_fid fid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400558 xid = get_xid();
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700559 mutex_lock(&cfile->fh_mutex);
560 if (!cfile->invalidHandle) {
561 mutex_unlock(&cfile->fh_mutex);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530562 rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400563 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530564 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565 }
566
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700567 inode = cfile->dentry->d_inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568 cifs_sb = CIFS_SB(inode->i_sb);
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700569 tcon = tlink_tcon(cfile->tlink);
570 server = tcon->ses->server;
Steve French3a9f4622007-04-04 17:10:24 +0000571
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700572 /*
573 * Can not grab rename sem here because various ops, including those
574 * that already have the rename sem can end up causing writepage to get
575 * called and if the server was down that means we end up here, and we
576 * can never tell if the caller already has the rename_sem.
577 */
578 full_path = build_path_from_dentry(cfile->dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579 if (full_path == NULL) {
Steve French3a9f4622007-04-04 17:10:24 +0000580 rc = -ENOMEM;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700581 mutex_unlock(&cfile->fh_mutex);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400582 free_xid(xid);
Steve French3a9f4622007-04-04 17:10:24 +0000583 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584 }
585
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700586 cFYI(1, "inode = 0x%p file flags 0x%x for %s", inode, cfile->f_flags,
587 full_path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588
Pavel Shilovsky10b9b982012-03-20 12:55:09 +0300589 if (tcon->ses->server->oplocks)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590 oplock = REQ_OPLOCK;
591 else
Steve French4b18f2a2008-04-29 00:06:05 +0000592 oplock = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400594 if (tcon->unix_ext && cap_unix(tcon->ses) &&
Steve French7fc8f4e2009-02-23 20:43:11 +0000595 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400596 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Jeff Layton608712f2010-10-15 15:33:56 -0400597 /*
598 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
599 * original open. Must mask them off for a reopen.
600 */
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700601 unsigned int oflags = cfile->f_flags &
Jeff Layton15886172010-10-15 15:33:59 -0400602 ~(O_CREAT | O_EXCL | O_TRUNC);
Jeff Layton608712f2010-10-15 15:33:56 -0400603
Jeff Layton2422f672010-06-16 13:40:16 -0400604 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700605 cifs_sb->mnt_file_mode /* ignored */,
606 oflags, &oplock, &fid.netfid, xid);
Steve French7fc8f4e2009-02-23 20:43:11 +0000607 if (rc == 0) {
Joe Perchesb6b38f72010-04-21 03:50:45 +0000608 cFYI(1, "posix reopen succeeded");
Steve French7fc8f4e2009-02-23 20:43:11 +0000609 goto reopen_success;
610 }
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700611 /*
612 * fallthrough to retry open the old way on errors, especially
613 * in the reconnect path it is important to retry hard
614 */
Steve French7fc8f4e2009-02-23 20:43:11 +0000615 }
616
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700617 desired_access = cifs_convert_flags(cfile->f_flags);
Steve French7fc8f4e2009-02-23 20:43:11 +0000618
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500619 if (backup_cred(cifs_sb))
620 create_options |= CREATE_OPEN_BACKUP_INTENT;
621
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700622 if (server->ops->get_lease_key)
623 server->ops->get_lease_key(inode, &fid);
624
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700625 /*
626 * Can not refresh inode by passing in file_info buf to be returned by
627 * CIFSSMBOpen and then calling get_inode_info with returned buf since
628 * file might have write behind data that needs to be flushed and server
629 * version of file size can be stale. If we knew for sure that inode was
630 * not dirty locally we could do this.
631 */
632 rc = server->ops->open(xid, tcon, full_path, disposition,
633 desired_access, create_options, &fid, &oplock,
634 NULL, cifs_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635 if (rc) {
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700636 mutex_unlock(&cfile->fh_mutex);
637 cFYI(1, "cifs_reopen returned 0x%x", rc);
Joe Perchesb6b38f72010-04-21 03:50:45 +0000638 cFYI(1, "oplock: %d", oplock);
Jeff Layton15886172010-10-15 15:33:59 -0400639 goto reopen_error_exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640 }
Jeff Layton15886172010-10-15 15:33:59 -0400641
642reopen_success:
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700643 cfile->invalidHandle = false;
644 mutex_unlock(&cfile->fh_mutex);
645 cinode = CIFS_I(inode);
Jeff Layton15886172010-10-15 15:33:59 -0400646
647 if (can_flush) {
648 rc = filemap_write_and_wait(inode->i_mapping);
Jeff Laytoneb4b7562010-10-22 14:52:29 -0400649 mapping_set_error(inode->i_mapping, rc);
Jeff Layton15886172010-10-15 15:33:59 -0400650
Jeff Layton15886172010-10-15 15:33:59 -0400651 if (tcon->unix_ext)
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700652 rc = cifs_get_inode_info_unix(&inode, full_path,
653 inode->i_sb, xid);
Jeff Layton15886172010-10-15 15:33:59 -0400654 else
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700655 rc = cifs_get_inode_info(&inode, full_path, NULL,
656 inode->i_sb, xid, NULL);
657 }
658 /*
659 * Else we are writing out data to server already and could deadlock if
660 * we tried to flush data, and since we do not know if we have data that
661 * would invalidate the current end of file on the server we can not go
662 * to the server to get the new inode info.
663 */
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300664
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700665 server->ops->set_fid(cfile, &fid, oplock);
666 cifs_relock_file(cfile);
Jeff Layton15886172010-10-15 15:33:59 -0400667
668reopen_error_exit:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400670 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671 return rc;
672}
673
674int cifs_close(struct inode *inode, struct file *file)
675{
Jeff Layton77970692011-04-05 16:23:47 -0700676 if (file->private_data != NULL) {
677 cifsFileInfo_put(file->private_data);
678 file->private_data = NULL;
679 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680
Steve Frenchcdff08e2010-10-21 22:46:14 +0000681 /* return code from the ->release op is always ignored */
682 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683}
684
685int cifs_closedir(struct inode *inode, struct file *file)
686{
687 int rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400688 unsigned int xid;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700689 struct cifsFileInfo *cfile = file->private_data;
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700690 struct cifs_tcon *tcon;
691 struct TCP_Server_Info *server;
692 char *buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693
Joe Perchesb6b38f72010-04-21 03:50:45 +0000694 cFYI(1, "Closedir inode = 0x%p", inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700696 if (cfile == NULL)
697 return rc;
698
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400699 xid = get_xid();
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700700 tcon = tlink_tcon(cfile->tlink);
701 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700703 cFYI(1, "Freeing private data in close dir");
704 spin_lock(&cifs_file_list_lock);
705 if (!cfile->srch_inf.endOfSearch && !cfile->invalidHandle) {
706 cfile->invalidHandle = true;
707 spin_unlock(&cifs_file_list_lock);
708 if (server->ops->close_dir)
709 rc = server->ops->close_dir(xid, tcon, &cfile->fid);
710 else
711 rc = -ENOSYS;
712 cFYI(1, "Closing uncompleted readdir with rc %d", rc);
713 /* not much we can do if it fails anyway, ignore rc */
714 rc = 0;
715 } else
716 spin_unlock(&cifs_file_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700718 buf = cfile->srch_inf.ntwrk_buf_start;
719 if (buf) {
720 cFYI(1, "closedir free smb buf in srch struct");
721 cfile->srch_inf.ntwrk_buf_start = NULL;
722 if (cfile->srch_inf.smallBuf)
723 cifs_small_buf_release(buf);
724 else
725 cifs_buf_release(buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726 }
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700727
728 cifs_put_tlink(cfile->tlink);
729 kfree(file->private_data);
730 file->private_data = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731 /* BB can we lock the filestruct while this is going on? */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400732 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733 return rc;
734}
735
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400736static struct cifsLockInfo *
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300737cifs_lock_init(__u64 offset, __u64 length, __u8 type)
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000738{
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400739 struct cifsLockInfo *lock =
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000740 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400741 if (!lock)
742 return lock;
743 lock->offset = offset;
744 lock->length = length;
745 lock->type = type;
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400746 lock->pid = current->tgid;
747 INIT_LIST_HEAD(&lock->blist);
748 init_waitqueue_head(&lock->block_q);
749 return lock;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400750}
751
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -0700752void
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400753cifs_del_lock_waiters(struct cifsLockInfo *lock)
754{
755 struct cifsLockInfo *li, *tmp;
756 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
757 list_del_init(&li->blist);
758 wake_up(&li->block_q);
759 }
760}
761
762static bool
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700763cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
764 __u64 length, __u8 type, struct cifsFileInfo *cfile,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700765 struct cifsLockInfo **conf_lock, bool rw_check)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400766{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300767 struct cifsLockInfo *li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700768 struct cifsFileInfo *cur_cfile = fdlocks->cfile;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300769 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400770
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700771 list_for_each_entry(li, &fdlocks->locks, llist) {
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400772 if (offset + length <= li->offset ||
773 offset >= li->offset + li->length)
774 continue;
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700775 if (rw_check && server->ops->compare_fids(cfile, cur_cfile) &&
776 current->tgid == li->pid)
777 continue;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700778 if ((type & server->vals->shared_lock_type) &&
779 ((server->ops->compare_fids(cfile, cur_cfile) &&
780 current->tgid == li->pid) || type == li->type))
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400781 continue;
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700782 if (conf_lock)
783 *conf_lock = li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700784 return true;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400785 }
786 return false;
787}
788
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700789bool
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300790cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700791 __u8 type, struct cifsLockInfo **conf_lock,
792 bool rw_check)
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400793{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300794 bool rc = false;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700795 struct cifs_fid_locks *cur;
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300796 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300797
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700798 list_for_each_entry(cur, &cinode->llist, llist) {
799 rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700800 cfile, conf_lock, rw_check);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300801 if (rc)
802 break;
803 }
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300804
805 return rc;
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400806}
807
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300808/*
809 * Check if there is another lock that prevents us to set the lock (mandatory
810 * style). If such a lock exists, update the flock structure with its
811 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
812 * or leave it the same if we can't. Returns 0 if we don't need to request to
813 * the server or 1 otherwise.
814 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400815static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300816cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
817 __u8 type, struct file_lock *flock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400818{
819 int rc = 0;
820 struct cifsLockInfo *conf_lock;
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300821 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300822 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400823 bool exist;
824
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700825 down_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400826
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300827 exist = cifs_find_lock_conflict(cfile, offset, length, type,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700828 &conf_lock, false);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400829 if (exist) {
830 flock->fl_start = conf_lock->offset;
831 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
832 flock->fl_pid = conf_lock->pid;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300833 if (conf_lock->type & server->vals->shared_lock_type)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400834 flock->fl_type = F_RDLCK;
835 else
836 flock->fl_type = F_WRLCK;
837 } else if (!cinode->can_cache_brlcks)
838 rc = 1;
839 else
840 flock->fl_type = F_UNLCK;
841
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700842 up_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400843 return rc;
844}
845
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400846static void
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300847cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400848{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300849 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700850 down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700851 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700852 up_write(&cinode->lock_sem);
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000853}
854
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300855/*
856 * Set the byte-range lock (mandatory style). Returns:
857 * 1) 0, if we set the lock and don't need to request to the server;
858 * 2) 1, if no locks prevent us but we need to request to the server;
859 * 3) -EACCESS, if there is a lock that prevents us and wait is false.
860 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400861static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300862cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400863 bool wait)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400864{
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400865 struct cifsLockInfo *conf_lock;
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300866 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400867 bool exist;
868 int rc = 0;
869
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400870try_again:
871 exist = false;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700872 down_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400873
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300874 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700875 lock->type, &conf_lock, false);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400876 if (!exist && cinode->can_cache_brlcks) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700877 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700878 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400879 return rc;
880 }
881
882 if (!exist)
883 rc = 1;
884 else if (!wait)
885 rc = -EACCES;
886 else {
887 list_add_tail(&lock->blist, &conf_lock->blist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700888 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400889 rc = wait_event_interruptible(lock->block_q,
890 (lock->blist.prev == &lock->blist) &&
891 (lock->blist.next == &lock->blist));
892 if (!rc)
893 goto try_again;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700894 down_write(&cinode->lock_sem);
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400895 list_del_init(&lock->blist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400896 }
897
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700898 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400899 return rc;
900}
901
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300902/*
903 * Check if there is another lock that prevents us to set the lock (posix
904 * style). If such a lock exists, update the flock structure with its
905 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
906 * or leave it the same if we can't. Returns 0 if we don't need to request to
907 * the server or 1 otherwise.
908 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400909static int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400910cifs_posix_lock_test(struct file *file, struct file_lock *flock)
911{
912 int rc = 0;
913 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
914 unsigned char saved_type = flock->fl_type;
915
Pavel Shilovsky50792762011-10-29 17:17:57 +0400916 if ((flock->fl_flags & FL_POSIX) == 0)
917 return 1;
918
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700919 down_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400920 posix_test_lock(file, flock);
921
922 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
923 flock->fl_type = saved_type;
924 rc = 1;
925 }
926
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700927 up_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400928 return rc;
929}
930
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300931/*
932 * Set the byte-range lock (posix style). Returns:
933 * 1) 0, if we set the lock and don't need to request to the server;
934 * 2) 1, if we need to request to the server;
935 * 3) <0, if the error occurs while setting the lock.
936 */
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400937static int
938cifs_posix_lock_set(struct file *file, struct file_lock *flock)
939{
940 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
Pavel Shilovsky50792762011-10-29 17:17:57 +0400941 int rc = 1;
942
943 if ((flock->fl_flags & FL_POSIX) == 0)
944 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400945
Pavel Shilovsky66189be2012-03-28 21:56:19 +0400946try_again:
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700947 down_write(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400948 if (!cinode->can_cache_brlcks) {
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700949 up_write(&cinode->lock_sem);
Pavel Shilovsky50792762011-10-29 17:17:57 +0400950 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400951 }
Pavel Shilovsky66189be2012-03-28 21:56:19 +0400952
953 rc = posix_lock_file(file, flock, NULL);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700954 up_write(&cinode->lock_sem);
Pavel Shilovsky66189be2012-03-28 21:56:19 +0400955 if (rc == FILE_LOCK_DEFERRED) {
956 rc = wait_event_interruptible(flock->fl_wait, !flock->fl_next);
957 if (!rc)
958 goto try_again;
959 locks_delete_block(flock);
960 }
Steve French9ebb3892012-04-01 13:52:54 -0500961 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400962}
963
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -0700964int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400965cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400966{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400967 unsigned int xid;
968 int rc = 0, stored_rc;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400969 struct cifsLockInfo *li, *tmp;
970 struct cifs_tcon *tcon;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +0400971 unsigned int num, max_num, max_buf;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400972 LOCKING_ANDX_RANGE *buf, *cur;
973 int types[] = {LOCKING_ANDX_LARGE_FILES,
974 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
975 int i;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400976
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400977 xid = get_xid();
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400978 tcon = tlink_tcon(cfile->tlink);
979
Pavel Shilovsky0013fb42012-05-31 13:03:26 +0400980 /*
981 * Accessing maxBuf is racy with cifs_reconnect - need to store value
982 * and check it for zero before using.
983 */
984 max_buf = tcon->ses->server->maxBuf;
985 if (!max_buf) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400986 free_xid(xid);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +0400987 return -EINVAL;
988 }
989
990 max_num = (max_buf - sizeof(struct smb_hdr)) /
991 sizeof(LOCKING_ANDX_RANGE);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400992 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
993 if (!buf) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400994 free_xid(xid);
Pavel Shilovskye2f28862012-08-29 21:13:38 +0400995 return -ENOMEM;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400996 }
997
998 for (i = 0; i < 2; i++) {
999 cur = buf;
1000 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001001 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001002 if (li->type != types[i])
1003 continue;
1004 cur->Pid = cpu_to_le16(li->pid);
1005 cur->LengthLow = cpu_to_le32((u32)li->length);
1006 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1007 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1008 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1009 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001010 stored_rc = cifs_lockv(xid, tcon,
1011 cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001012 (__u8)li->type, 0, num,
1013 buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001014 if (stored_rc)
1015 rc = stored_rc;
1016 cur = buf;
1017 num = 0;
1018 } else
1019 cur++;
1020 }
1021
1022 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001023 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001024 (__u8)types[i], 0, num, buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001025 if (stored_rc)
1026 rc = stored_rc;
1027 }
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001028 }
1029
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001030 kfree(buf);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001031 free_xid(xid);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001032 return rc;
1033}
1034
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001035/* copied from fs/locks.c with a name change */
1036#define cifs_for_each_lock(inode, lockp) \
1037 for (lockp = &inode->i_flock; *lockp != NULL; \
1038 lockp = &(*lockp)->fl_next)
1039
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001040struct lock_to_push {
1041 struct list_head llist;
1042 __u64 offset;
1043 __u64 length;
1044 __u32 pid;
1045 __u16 netfid;
1046 __u8 type;
1047};
1048
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001049static int
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001050cifs_push_posix_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001051{
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001052 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1053 struct file_lock *flock, **before;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001054 unsigned int count = 0, i = 0;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001055 int rc = 0, xid, type;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001056 struct list_head locks_to_send, *el;
1057 struct lock_to_push *lck, *tmp;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001058 __u64 length;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001059
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001060 xid = get_xid();
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001061
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001062 lock_flocks();
1063 cifs_for_each_lock(cfile->dentry->d_inode, before) {
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001064 if ((*before)->fl_flags & FL_POSIX)
1065 count++;
1066 }
1067 unlock_flocks();
1068
1069 INIT_LIST_HEAD(&locks_to_send);
1070
1071 /*
Pavel Shilovskyce858522012-03-17 09:46:55 +03001072 * Allocating count locks is enough because no FL_POSIX locks can be
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001073 * added to the list while we are holding cinode->lock_sem that
Pavel Shilovskyce858522012-03-17 09:46:55 +03001074 * protects locking operations of this inode.
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001075 */
1076 for (; i < count; i++) {
1077 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1078 if (!lck) {
1079 rc = -ENOMEM;
1080 goto err_out;
1081 }
1082 list_add_tail(&lck->llist, &locks_to_send);
1083 }
1084
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001085 el = locks_to_send.next;
1086 lock_flocks();
1087 cifs_for_each_lock(cfile->dentry->d_inode, before) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001088 flock = *before;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001089 if ((flock->fl_flags & FL_POSIX) == 0)
1090 continue;
Pavel Shilovskyce858522012-03-17 09:46:55 +03001091 if (el == &locks_to_send) {
1092 /*
1093 * The list ended. We don't have enough allocated
1094 * structures - something is really wrong.
1095 */
1096 cERROR(1, "Can't push all brlocks!");
1097 break;
1098 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001099 length = 1 + flock->fl_end - flock->fl_start;
1100 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1101 type = CIFS_RDLCK;
1102 else
1103 type = CIFS_WRLCK;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001104 lck = list_entry(el, struct lock_to_push, llist);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001105 lck->pid = flock->fl_pid;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001106 lck->netfid = cfile->fid.netfid;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001107 lck->length = length;
1108 lck->type = type;
1109 lck->offset = flock->fl_start;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001110 el = el->next;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001111 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001112 unlock_flocks();
1113
1114 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001115 int stored_rc;
1116
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001117 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001118 lck->offset, lck->length, NULL,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001119 lck->type, 0);
1120 if (stored_rc)
1121 rc = stored_rc;
1122 list_del(&lck->llist);
1123 kfree(lck);
1124 }
1125
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001126out:
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001127 free_xid(xid);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001128 return rc;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001129err_out:
1130 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1131 list_del(&lck->llist);
1132 kfree(lck);
1133 }
1134 goto out;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001135}
1136
1137static int
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001138cifs_push_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001139{
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001140 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001141 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001142 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001143 int rc = 0;
1144
1145 /* we are going to update can_cache_brlcks here - need a write access */
1146 down_write(&cinode->lock_sem);
1147 if (!cinode->can_cache_brlcks) {
1148 up_write(&cinode->lock_sem);
1149 return rc;
1150 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001151
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001152 if (cap_unix(tcon->ses) &&
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001153 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1154 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001155 rc = cifs_push_posix_locks(cfile);
1156 else
1157 rc = tcon->ses->server->ops->push_mand_locks(cfile);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001158
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001159 cinode->can_cache_brlcks = false;
1160 up_write(&cinode->lock_sem);
1161 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001162}
1163
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001164static void
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001165cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001166 bool *wait_flag, struct TCP_Server_Info *server)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001167{
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001168 if (flock->fl_flags & FL_POSIX)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001169 cFYI(1, "Posix");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001170 if (flock->fl_flags & FL_FLOCK)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001171 cFYI(1, "Flock");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001172 if (flock->fl_flags & FL_SLEEP) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001173 cFYI(1, "Blocking lock");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001174 *wait_flag = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001175 }
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001176 if (flock->fl_flags & FL_ACCESS)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001177 cFYI(1, "Process suspended by mandatory locking - "
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001178 "not implemented yet");
1179 if (flock->fl_flags & FL_LEASE)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001180 cFYI(1, "Lease on file - not implemented yet");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001181 if (flock->fl_flags &
Jeff Layton3d6d8542012-09-19 06:22:46 -07001182 (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
1183 FL_ACCESS | FL_LEASE | FL_CLOSE)))
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001184 cFYI(1, "Unknown lock flags 0x%x", flock->fl_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001185
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001186 *type = server->vals->large_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001187 if (flock->fl_type == F_WRLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001188 cFYI(1, "F_WRLCK ");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001189 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001190 *lock = 1;
1191 } else if (flock->fl_type == F_UNLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001192 cFYI(1, "F_UNLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001193 *type |= server->vals->unlock_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001194 *unlock = 1;
1195 /* Check if unlock includes more than one lock range */
1196 } else if (flock->fl_type == F_RDLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001197 cFYI(1, "F_RDLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001198 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001199 *lock = 1;
1200 } else if (flock->fl_type == F_EXLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001201 cFYI(1, "F_EXLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001202 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001203 *lock = 1;
1204 } else if (flock->fl_type == F_SHLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001205 cFYI(1, "F_SHLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001206 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001207 *lock = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208 } else
Joe Perchesb6b38f72010-04-21 03:50:45 +00001209 cFYI(1, "Unknown type of lock");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001210}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001212static int
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001213cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001214 bool wait_flag, bool posix_lck, unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001215{
1216 int rc = 0;
1217 __u64 length = 1 + flock->fl_end - flock->fl_start;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001218 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1219 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001220 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001221 __u16 netfid = cfile->fid.netfid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001223 if (posix_lck) {
1224 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001225
1226 rc = cifs_posix_lock_test(file, flock);
1227 if (!rc)
1228 return rc;
1229
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001230 if (type & server->vals->shared_lock_type)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001231 posix_lock_type = CIFS_RDLCK;
1232 else
1233 posix_lock_type = CIFS_WRLCK;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001234 rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001235 flock->fl_start, length, flock,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001236 posix_lock_type, wait_flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001237 return rc;
1238 }
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001239
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001240 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001241 if (!rc)
1242 return rc;
1243
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001244 /* BB we could chain these into one lock request BB */
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001245 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
1246 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001247 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001248 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1249 type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001250 flock->fl_type = F_UNLCK;
1251 if (rc != 0)
1252 cERROR(1, "Error unlocking previously locked "
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001253 "range %d during test of lock", rc);
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001254 return 0;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001255 }
1256
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001257 if (type & server->vals->shared_lock_type) {
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001258 flock->fl_type = F_WRLCK;
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001259 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001260 }
1261
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001262 type &= ~server->vals->exclusive_lock_type;
1263
1264 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1265 type | server->vals->shared_lock_type,
1266 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001267 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001268 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1269 type | server->vals->shared_lock_type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001270 flock->fl_type = F_RDLCK;
1271 if (rc != 0)
1272 cERROR(1, "Error unlocking previously locked "
1273 "range %d during test of lock", rc);
1274 } else
1275 flock->fl_type = F_WRLCK;
1276
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001277 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001278}
1279
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001280void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001281cifs_move_llist(struct list_head *source, struct list_head *dest)
1282{
1283 struct list_head *li, *tmp;
1284 list_for_each_safe(li, tmp, source)
1285 list_move(li, dest);
1286}
1287
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001288void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001289cifs_free_llist(struct list_head *llist)
1290{
1291 struct cifsLockInfo *li, *tmp;
1292 list_for_each_entry_safe(li, tmp, llist, llist) {
1293 cifs_del_lock_waiters(li);
1294 list_del(&li->llist);
1295 kfree(li);
1296 }
1297}
1298
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001299int
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001300cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1301 unsigned int xid)
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001302{
1303 int rc = 0, stored_rc;
1304 int types[] = {LOCKING_ANDX_LARGE_FILES,
1305 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1306 unsigned int i;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001307 unsigned int max_num, num, max_buf;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001308 LOCKING_ANDX_RANGE *buf, *cur;
1309 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1310 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
1311 struct cifsLockInfo *li, *tmp;
1312 __u64 length = 1 + flock->fl_end - flock->fl_start;
1313 struct list_head tmp_llist;
1314
1315 INIT_LIST_HEAD(&tmp_llist);
1316
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001317 /*
1318 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1319 * and check it for zero before using.
1320 */
1321 max_buf = tcon->ses->server->maxBuf;
1322 if (!max_buf)
1323 return -EINVAL;
1324
1325 max_num = (max_buf - sizeof(struct smb_hdr)) /
1326 sizeof(LOCKING_ANDX_RANGE);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001327 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1328 if (!buf)
1329 return -ENOMEM;
1330
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001331 down_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001332 for (i = 0; i < 2; i++) {
1333 cur = buf;
1334 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001335 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001336 if (flock->fl_start > li->offset ||
1337 (flock->fl_start + length) <
1338 (li->offset + li->length))
1339 continue;
1340 if (current->tgid != li->pid)
1341 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001342 if (types[i] != li->type)
1343 continue;
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001344 if (cinode->can_cache_brlcks) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001345 /*
1346 * We can cache brlock requests - simply remove
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001347 * a lock from the file's list.
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001348 */
1349 list_del(&li->llist);
1350 cifs_del_lock_waiters(li);
1351 kfree(li);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001352 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001353 }
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001354 cur->Pid = cpu_to_le16(li->pid);
1355 cur->LengthLow = cpu_to_le32((u32)li->length);
1356 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1357 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1358 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1359 /*
1360 * We need to save a lock here to let us add it again to
1361 * the file's list if the unlock range request fails on
1362 * the server.
1363 */
1364 list_move(&li->llist, &tmp_llist);
1365 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001366 stored_rc = cifs_lockv(xid, tcon,
1367 cfile->fid.netfid,
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001368 li->type, num, 0, buf);
1369 if (stored_rc) {
1370 /*
1371 * We failed on the unlock range
1372 * request - add all locks from the tmp
1373 * list to the head of the file's list.
1374 */
1375 cifs_move_llist(&tmp_llist,
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001376 &cfile->llist->locks);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001377 rc = stored_rc;
1378 } else
1379 /*
1380 * The unlock range request succeed -
1381 * free the tmp list.
1382 */
1383 cifs_free_llist(&tmp_llist);
1384 cur = buf;
1385 num = 0;
1386 } else
1387 cur++;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001388 }
1389 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001390 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001391 types[i], num, 0, buf);
1392 if (stored_rc) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001393 cifs_move_llist(&tmp_llist,
1394 &cfile->llist->locks);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001395 rc = stored_rc;
1396 } else
1397 cifs_free_llist(&tmp_llist);
1398 }
1399 }
1400
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001401 up_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001402 kfree(buf);
1403 return rc;
1404}
1405
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001406static int
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001407cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001408 bool wait_flag, bool posix_lck, int lock, int unlock,
1409 unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001410{
1411 int rc = 0;
1412 __u64 length = 1 + flock->fl_end - flock->fl_start;
1413 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1414 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001415 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001416
1417 if (posix_lck) {
Steve French08547b02006-02-28 22:39:25 +00001418 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001419
1420 rc = cifs_posix_lock_set(file, flock);
1421 if (!rc || rc < 0)
1422 return rc;
1423
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001424 if (type & server->vals->shared_lock_type)
Steve French08547b02006-02-28 22:39:25 +00001425 posix_lock_type = CIFS_RDLCK;
1426 else
1427 posix_lock_type = CIFS_WRLCK;
Steve French50c2f752007-07-13 00:33:32 +00001428
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001429 if (unlock == 1)
Steve Frenchbeb84dc2006-03-03 23:36:34 +00001430 posix_lock_type = CIFS_UNLCK;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001431
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001432 rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
1433 current->tgid, flock->fl_start, length,
1434 NULL, posix_lock_type, wait_flag);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001435 goto out;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001436 }
1437
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001438 if (lock) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001439 struct cifsLockInfo *lock;
1440
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001441 lock = cifs_lock_init(flock->fl_start, length, type);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001442 if (!lock)
1443 return -ENOMEM;
1444
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001445 rc = cifs_lock_add_if(cfile, lock, wait_flag);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001446 if (rc < 0) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001447 kfree(lock);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001448 return rc;
1449 }
1450 if (!rc)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001451 goto out;
1452
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001453 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1454 type, 1, 0, wait_flag);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001455 if (rc) {
1456 kfree(lock);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001457 return rc;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001458 }
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001459
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001460 cifs_lock_add(cfile, lock);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001461 } else if (unlock)
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001462 rc = server->ops->mand_unlock_range(cfile, flock, xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001463
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001464out:
1465 if (flock->fl_flags & FL_POSIX)
Steve French9ebb3892012-04-01 13:52:54 -05001466 posix_lock_file_wait(file, flock);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001467 return rc;
1468}
1469
1470int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1471{
1472 int rc, xid;
1473 int lock = 0, unlock = 0;
1474 bool wait_flag = false;
1475 bool posix_lck = false;
1476 struct cifs_sb_info *cifs_sb;
1477 struct cifs_tcon *tcon;
1478 struct cifsInodeInfo *cinode;
1479 struct cifsFileInfo *cfile;
1480 __u16 netfid;
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001481 __u32 type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001482
1483 rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001484 xid = get_xid();
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001485
1486 cFYI(1, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld "
1487 "end: %lld", cmd, flock->fl_flags, flock->fl_type,
1488 flock->fl_start, flock->fl_end);
1489
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001490 cfile = (struct cifsFileInfo *)file->private_data;
1491 tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001492
1493 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
1494 tcon->ses->server);
1495
1496 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001497 netfid = cfile->fid.netfid;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001498 cinode = CIFS_I(file->f_path.dentry->d_inode);
1499
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001500 if (cap_unix(tcon->ses) &&
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001501 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1502 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1503 posix_lck = true;
1504 /*
1505 * BB add code here to normalize offset and length to account for
1506 * negative length which we can not accept over the wire.
1507 */
1508 if (IS_GETLK(cmd)) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001509 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001510 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001511 return rc;
1512 }
1513
1514 if (!lock && !unlock) {
1515 /*
1516 * if no lock or unlock then nothing to do since we do not
1517 * know what it is
1518 */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001519 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001520 return -EOPNOTSUPP;
1521 }
1522
1523 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1524 xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001525 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001526 return rc;
1527}
1528
Jeff Layton597b0272012-03-23 14:40:56 -04001529/*
1530 * update the file size (if needed) after a write. Should be called with
1531 * the inode->i_lock held
1532 */
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05001533void
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001534cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1535 unsigned int bytes_written)
1536{
1537 loff_t end_of_write = offset + bytes_written;
1538
1539 if (end_of_write > cifsi->server_eof)
1540 cifsi->server_eof = end_of_write;
1541}
1542
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001543static ssize_t
1544cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
1545 size_t write_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001546{
1547 int rc = 0;
1548 unsigned int bytes_written = 0;
1549 unsigned int total_written;
1550 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001551 struct cifs_tcon *tcon;
1552 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001553 unsigned int xid;
Jeff Layton7da4b492010-10-15 15:34:00 -04001554 struct dentry *dentry = open_file->dentry;
1555 struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001556 struct cifs_io_parms io_parms;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001557
Jeff Layton7da4b492010-10-15 15:34:00 -04001558 cifs_sb = CIFS_SB(dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001559
Joe Perchesb6b38f72010-04-21 03:50:45 +00001560 cFYI(1, "write %zd bytes to offset %lld of %s", write_size,
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001561 *offset, dentry->d_name.name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001562
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001563 tcon = tlink_tcon(open_file->tlink);
1564 server = tcon->ses->server;
1565
1566 if (!server->ops->sync_write)
1567 return -ENOSYS;
Steve French50c2f752007-07-13 00:33:32 +00001568
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001569 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001570
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571 for (total_written = 0; write_size > total_written;
1572 total_written += bytes_written) {
1573 rc = -EAGAIN;
1574 while (rc == -EAGAIN) {
Jeff Laytonca83ce32011-04-12 09:13:44 -04001575 struct kvec iov[2];
1576 unsigned int len;
1577
Linus Torvalds1da177e2005-04-16 15:20:36 -07001578 if (open_file->invalidHandle) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001579 /* we could deadlock if we called
1580 filemap_fdatawait from here so tell
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001581 reopen_file not to flush data to
Linus Torvalds1da177e2005-04-16 15:20:36 -07001582 server now */
Jeff Layton15886172010-10-15 15:33:59 -04001583 rc = cifs_reopen_file(open_file, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001584 if (rc != 0)
1585 break;
1586 }
Steve French3e844692005-10-03 13:37:24 -07001587
Jeff Laytonca83ce32011-04-12 09:13:44 -04001588 len = min((size_t)cifs_sb->wsize,
1589 write_size - total_written);
1590 /* iov[0] is reserved for smb header */
1591 iov[1].iov_base = (char *)write_data + total_written;
1592 iov[1].iov_len = len;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001593 io_parms.pid = pid;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001594 io_parms.tcon = tcon;
1595 io_parms.offset = *offset;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001596 io_parms.length = len;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001597 rc = server->ops->sync_write(xid, open_file, &io_parms,
1598 &bytes_written, iov, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001599 }
1600 if (rc || (bytes_written == 0)) {
1601 if (total_written)
1602 break;
1603 else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001604 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001605 return rc;
1606 }
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001607 } else {
Jeff Layton597b0272012-03-23 14:40:56 -04001608 spin_lock(&dentry->d_inode->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001609 cifs_update_eof(cifsi, *offset, bytes_written);
Jeff Layton597b0272012-03-23 14:40:56 -04001610 spin_unlock(&dentry->d_inode->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001611 *offset += bytes_written;
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001612 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001613 }
1614
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001615 cifs_stats_bytes_written(tcon, total_written);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001616
Jeff Layton7da4b492010-10-15 15:34:00 -04001617 if (total_written > 0) {
1618 spin_lock(&dentry->d_inode->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001619 if (*offset > dentry->d_inode->i_size)
1620 i_size_write(dentry->d_inode, *offset);
Jeff Layton7da4b492010-10-15 15:34:00 -04001621 spin_unlock(&dentry->d_inode->i_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001622 }
Jeff Layton7da4b492010-10-15 15:34:00 -04001623 mark_inode_dirty_sync(dentry->d_inode);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001624 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001625 return total_written;
1626}
1627
Jeff Layton6508d902010-09-29 19:51:11 -04001628struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1629 bool fsuid_only)
Steve French630f3f0c2007-10-25 21:17:17 +00001630{
1631 struct cifsFileInfo *open_file = NULL;
Jeff Layton6508d902010-09-29 19:51:11 -04001632 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1633
1634 /* only filter by fsuid on multiuser mounts */
1635 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1636 fsuid_only = false;
Steve French630f3f0c2007-10-25 21:17:17 +00001637
Jeff Layton44772882010-10-15 15:34:03 -04001638 spin_lock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001639 /* we could simply get the first_list_entry since write-only entries
1640 are always at the end of the list but since the first entry might
1641 have a close pending, we go through the whole list */
1642 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton6508d902010-09-29 19:51:11 -04001643 if (fsuid_only && open_file->uid != current_fsuid())
1644 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001645 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
Steve French630f3f0c2007-10-25 21:17:17 +00001646 if (!open_file->invalidHandle) {
1647 /* found a good file */
1648 /* lock it so it will not be closed on us */
Jeff Layton764a1b12012-07-25 14:59:54 -04001649 cifsFileInfo_get_locked(open_file);
Jeff Layton44772882010-10-15 15:34:03 -04001650 spin_unlock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001651 return open_file;
1652 } /* else might as well continue, and look for
1653 another, or simply have the caller reopen it
1654 again rather than trying to fix this handle */
1655 } else /* write only file */
1656 break; /* write only files are last so must be done */
1657 }
Jeff Layton44772882010-10-15 15:34:03 -04001658 spin_unlock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001659 return NULL;
1660}
Steve French630f3f0c2007-10-25 21:17:17 +00001661
Jeff Layton6508d902010-09-29 19:51:11 -04001662struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1663 bool fsuid_only)
Steve French6148a742005-10-05 12:23:19 -07001664{
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001665 struct cifsFileInfo *open_file, *inv_file = NULL;
Jeff Laytond3892292010-11-02 16:22:50 -04001666 struct cifs_sb_info *cifs_sb;
Jeff Layton2846d382008-09-22 21:33:33 -04001667 bool any_available = false;
Steve Frenchdd99cd82005-10-05 19:32:49 -07001668 int rc;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001669 unsigned int refind = 0;
Steve French6148a742005-10-05 12:23:19 -07001670
Steve French60808232006-04-22 15:53:05 +00001671 /* Having a null inode here (because mapping->host was set to zero by
1672 the VFS or MM) should not happen but we had reports of on oops (due to
1673 it being zero) during stress testcases so we need to check for it */
1674
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001675 if (cifs_inode == NULL) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001676 cERROR(1, "Null inode passed to cifs_writeable_file");
Steve French60808232006-04-22 15:53:05 +00001677 dump_stack();
1678 return NULL;
1679 }
1680
Jeff Laytond3892292010-11-02 16:22:50 -04001681 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1682
Jeff Layton6508d902010-09-29 19:51:11 -04001683 /* only filter by fsuid on multiuser mounts */
1684 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1685 fsuid_only = false;
1686
Jeff Layton44772882010-10-15 15:34:03 -04001687 spin_lock(&cifs_file_list_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001688refind_writable:
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001689 if (refind > MAX_REOPEN_ATT) {
1690 spin_unlock(&cifs_file_list_lock);
1691 return NULL;
1692 }
Steve French6148a742005-10-05 12:23:19 -07001693 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton6508d902010-09-29 19:51:11 -04001694 if (!any_available && open_file->pid != current->tgid)
1695 continue;
1696 if (fsuid_only && open_file->uid != current_fsuid())
1697 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001698 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Steve French9b22b0b2007-10-02 01:11:08 +00001699 if (!open_file->invalidHandle) {
1700 /* found a good writable file */
Jeff Layton764a1b12012-07-25 14:59:54 -04001701 cifsFileInfo_get_locked(open_file);
Jeff Layton44772882010-10-15 15:34:03 -04001702 spin_unlock(&cifs_file_list_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001703 return open_file;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001704 } else {
1705 if (!inv_file)
1706 inv_file = open_file;
Steve French9b22b0b2007-10-02 01:11:08 +00001707 }
Steve French6148a742005-10-05 12:23:19 -07001708 }
1709 }
Jeff Layton2846d382008-09-22 21:33:33 -04001710 /* couldn't find useable FH with same pid, try any available */
1711 if (!any_available) {
1712 any_available = true;
1713 goto refind_writable;
1714 }
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001715
1716 if (inv_file) {
1717 any_available = false;
Jeff Layton764a1b12012-07-25 14:59:54 -04001718 cifsFileInfo_get_locked(inv_file);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001719 }
1720
Jeff Layton44772882010-10-15 15:34:03 -04001721 spin_unlock(&cifs_file_list_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001722
1723 if (inv_file) {
1724 rc = cifs_reopen_file(inv_file, false);
1725 if (!rc)
1726 return inv_file;
1727 else {
1728 spin_lock(&cifs_file_list_lock);
1729 list_move_tail(&inv_file->flist,
1730 &cifs_inode->openFileList);
1731 spin_unlock(&cifs_file_list_lock);
1732 cifsFileInfo_put(inv_file);
1733 spin_lock(&cifs_file_list_lock);
1734 ++refind;
1735 goto refind_writable;
1736 }
1737 }
1738
Steve French6148a742005-10-05 12:23:19 -07001739 return NULL;
1740}
1741
Linus Torvalds1da177e2005-04-16 15:20:36 -07001742static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1743{
1744 struct address_space *mapping = page->mapping;
1745 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1746 char *write_data;
1747 int rc = -EFAULT;
1748 int bytes_written = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001749 struct inode *inode;
Steve French6148a742005-10-05 12:23:19 -07001750 struct cifsFileInfo *open_file;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001751
1752 if (!mapping || !mapping->host)
1753 return -EFAULT;
1754
1755 inode = page->mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001756
1757 offset += (loff_t)from;
1758 write_data = kmap(page);
1759 write_data += from;
1760
1761 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1762 kunmap(page);
1763 return -EIO;
1764 }
1765
1766 /* racing with truncate? */
1767 if (offset > mapping->host->i_size) {
1768 kunmap(page);
1769 return 0; /* don't care */
1770 }
1771
1772 /* check to make sure that we are not extending the file */
1773 if (mapping->host->i_size - offset < (loff_t)to)
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001774 to = (unsigned)(mapping->host->i_size - offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001775
Jeff Layton6508d902010-09-29 19:51:11 -04001776 open_file = find_writable_file(CIFS_I(mapping->host), false);
Steve French6148a742005-10-05 12:23:19 -07001777 if (open_file) {
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001778 bytes_written = cifs_write(open_file, open_file->pid,
1779 write_data, to - from, &offset);
Dave Kleikamp6ab409b2009-08-31 11:07:12 -04001780 cifsFileInfo_put(open_file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001781 /* Does mm or vfs already set times? */
Steve French6148a742005-10-05 12:23:19 -07001782 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001783 if ((bytes_written > 0) && (offset))
Steve French6148a742005-10-05 12:23:19 -07001784 rc = 0;
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001785 else if (bytes_written < 0)
1786 rc = bytes_written;
Steve French6148a742005-10-05 12:23:19 -07001787 } else {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001788 cFYI(1, "No writeable filehandles for inode");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001789 rc = -EIO;
1790 }
1791
1792 kunmap(page);
1793 return rc;
1794}
1795
Linus Torvalds1da177e2005-04-16 15:20:36 -07001796static int cifs_writepages(struct address_space *mapping,
Steve French37c0eb42005-10-05 14:50:29 -07001797 struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001798{
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001799 struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
1800 bool done = false, scanned = false, range_whole = false;
1801 pgoff_t end, index;
1802 struct cifs_writedata *wdata;
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07001803 struct TCP_Server_Info *server;
Steve French37c0eb42005-10-05 14:50:29 -07001804 struct page *page;
Steve French37c0eb42005-10-05 14:50:29 -07001805 int rc = 0;
Steve French50c2f752007-07-13 00:33:32 +00001806
Steve French37c0eb42005-10-05 14:50:29 -07001807 /*
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001808 * If wsize is smaller than the page cache size, default to writing
Steve French37c0eb42005-10-05 14:50:29 -07001809 * one page at a time via cifs_writepage
1810 */
1811 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1812 return generic_writepages(mapping, wbc);
1813
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001814 if (wbc->range_cyclic) {
Steve French37c0eb42005-10-05 14:50:29 -07001815 index = mapping->writeback_index; /* Start from prev offset */
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001816 end = -1;
1817 } else {
1818 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1819 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1820 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001821 range_whole = true;
1822 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07001823 }
1824retry:
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001825 while (!done && index <= end) {
1826 unsigned int i, nr_pages, found_pages;
1827 pgoff_t next = 0, tofind;
1828 struct page **pages;
Steve French37c0eb42005-10-05 14:50:29 -07001829
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001830 tofind = min((cifs_sb->wsize / PAGE_CACHE_SIZE) - 1,
1831 end - index) + 1;
Steve French37c0eb42005-10-05 14:50:29 -07001832
Jeff Laytonc2e87642012-03-23 14:40:55 -04001833 wdata = cifs_writedata_alloc((unsigned int)tofind,
1834 cifs_writev_complete);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001835 if (!wdata) {
1836 rc = -ENOMEM;
1837 break;
1838 }
1839
1840 /*
1841 * find_get_pages_tag seems to return a max of 256 on each
1842 * iteration, so we must call it several times in order to
1843 * fill the array or the wsize is effectively limited to
1844 * 256 * PAGE_CACHE_SIZE.
1845 */
1846 found_pages = 0;
1847 pages = wdata->pages;
1848 do {
1849 nr_pages = find_get_pages_tag(mapping, &index,
1850 PAGECACHE_TAG_DIRTY,
1851 tofind, pages);
1852 found_pages += nr_pages;
1853 tofind -= nr_pages;
1854 pages += nr_pages;
1855 } while (nr_pages && tofind && index <= end);
1856
1857 if (found_pages == 0) {
1858 kref_put(&wdata->refcount, cifs_writedata_release);
1859 break;
1860 }
1861
1862 nr_pages = 0;
1863 for (i = 0; i < found_pages; i++) {
1864 page = wdata->pages[i];
Steve French37c0eb42005-10-05 14:50:29 -07001865 /*
1866 * At this point we hold neither mapping->tree_lock nor
1867 * lock on the page itself: the page may be truncated or
1868 * invalidated (changing page->mapping to NULL), or even
1869 * swizzled back from swapper_space to tmpfs file
1870 * mapping
1871 */
1872
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001873 if (nr_pages == 0)
Steve French37c0eb42005-10-05 14:50:29 -07001874 lock_page(page);
Nick Piggin529ae9a2008-08-02 12:01:03 +02001875 else if (!trylock_page(page))
Steve French37c0eb42005-10-05 14:50:29 -07001876 break;
1877
1878 if (unlikely(page->mapping != mapping)) {
1879 unlock_page(page);
1880 break;
1881 }
1882
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001883 if (!wbc->range_cyclic && page->index > end) {
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001884 done = true;
Steve French37c0eb42005-10-05 14:50:29 -07001885 unlock_page(page);
1886 break;
1887 }
1888
1889 if (next && (page->index != next)) {
1890 /* Not next consecutive page */
1891 unlock_page(page);
1892 break;
1893 }
1894
1895 if (wbc->sync_mode != WB_SYNC_NONE)
1896 wait_on_page_writeback(page);
1897
1898 if (PageWriteback(page) ||
Linus Torvaldscb876f42006-12-23 16:19:07 -08001899 !clear_page_dirty_for_io(page)) {
Steve French37c0eb42005-10-05 14:50:29 -07001900 unlock_page(page);
1901 break;
1902 }
Steve French84d2f072005-10-12 15:32:05 -07001903
Linus Torvaldscb876f42006-12-23 16:19:07 -08001904 /*
1905 * This actually clears the dirty bit in the radix tree.
1906 * See cifs_writepage() for more commentary.
1907 */
1908 set_page_writeback(page);
1909
Jeff Layton3a98b862012-11-26 09:48:41 -05001910 if (page_offset(page) >= i_size_read(mapping->host)) {
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001911 done = true;
Steve French84d2f072005-10-12 15:32:05 -07001912 unlock_page(page);
Linus Torvaldscb876f42006-12-23 16:19:07 -08001913 end_page_writeback(page);
Steve French84d2f072005-10-12 15:32:05 -07001914 break;
1915 }
1916
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001917 wdata->pages[i] = page;
Steve French37c0eb42005-10-05 14:50:29 -07001918 next = page->index + 1;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001919 ++nr_pages;
Steve French37c0eb42005-10-05 14:50:29 -07001920 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001921
1922 /* reset index to refind any pages skipped */
1923 if (nr_pages == 0)
1924 index = wdata->pages[0]->index + 1;
1925
1926 /* put any pages we aren't going to use */
1927 for (i = nr_pages; i < found_pages; i++) {
1928 page_cache_release(wdata->pages[i]);
1929 wdata->pages[i] = NULL;
1930 }
1931
1932 /* nothing to write? */
1933 if (nr_pages == 0) {
1934 kref_put(&wdata->refcount, cifs_writedata_release);
1935 continue;
1936 }
1937
1938 wdata->sync_mode = wbc->sync_mode;
1939 wdata->nr_pages = nr_pages;
1940 wdata->offset = page_offset(wdata->pages[0]);
Jeff Laytoneddb0792012-09-18 16:20:35 -07001941 wdata->pagesz = PAGE_CACHE_SIZE;
1942 wdata->tailsz =
Jeff Layton3a98b862012-11-26 09:48:41 -05001943 min(i_size_read(mapping->host) -
1944 page_offset(wdata->pages[nr_pages - 1]),
Jeff Laytoneddb0792012-09-18 16:20:35 -07001945 (loff_t)PAGE_CACHE_SIZE);
1946 wdata->bytes = ((nr_pages - 1) * PAGE_CACHE_SIZE) +
1947 wdata->tailsz;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001948
1949 do {
1950 if (wdata->cfile != NULL)
1951 cifsFileInfo_put(wdata->cfile);
1952 wdata->cfile = find_writable_file(CIFS_I(mapping->host),
1953 false);
1954 if (!wdata->cfile) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001955 cERROR(1, "No writable handles for inode");
Steve French23e7dd72005-10-20 13:44:56 -07001956 rc = -EBADF;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001957 break;
Steve French37c0eb42005-10-05 14:50:29 -07001958 }
Jeff Laytonfe5f5d22012-03-23 14:40:55 -04001959 wdata->pid = wdata->cfile->pid;
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07001960 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
1961 rc = server->ops->async_writev(wdata);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001962 } while (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN);
Jeff Laytonf3983c22010-09-22 16:17:40 -07001963
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001964 for (i = 0; i < nr_pages; ++i)
1965 unlock_page(wdata->pages[i]);
Jeff Layton941b8532011-01-11 07:24:01 -05001966
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001967 /* send failure -- clean up the mess */
1968 if (rc != 0) {
1969 for (i = 0; i < nr_pages; ++i) {
Jeff Layton941b8532011-01-11 07:24:01 -05001970 if (rc == -EAGAIN)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001971 redirty_page_for_writepage(wbc,
1972 wdata->pages[i]);
1973 else
1974 SetPageError(wdata->pages[i]);
1975 end_page_writeback(wdata->pages[i]);
1976 page_cache_release(wdata->pages[i]);
Steve French37c0eb42005-10-05 14:50:29 -07001977 }
Jeff Layton941b8532011-01-11 07:24:01 -05001978 if (rc != -EAGAIN)
1979 mapping_set_error(mapping, rc);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001980 }
1981 kref_put(&wdata->refcount, cifs_writedata_release);
Jeff Layton941b8532011-01-11 07:24:01 -05001982
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001983 wbc->nr_to_write -= nr_pages;
1984 if (wbc->nr_to_write <= 0)
1985 done = true;
Dave Kleikampb066a482008-11-18 03:49:05 +00001986
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001987 index = next;
Steve French37c0eb42005-10-05 14:50:29 -07001988 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001989
Steve French37c0eb42005-10-05 14:50:29 -07001990 if (!scanned && !done) {
1991 /*
1992 * We hit the last page and there is more work to be done: wrap
1993 * back to the start of the file
1994 */
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001995 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07001996 index = 0;
1997 goto retry;
1998 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001999
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002000 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
Steve French37c0eb42005-10-05 14:50:29 -07002001 mapping->writeback_index = index;
2002
Linus Torvalds1da177e2005-04-16 15:20:36 -07002003 return rc;
2004}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002005
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002006static int
2007cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002008{
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002009 int rc;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002010 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002011
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002012 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002013/* BB add check for wbc flags */
2014 page_cache_get(page);
Steve Frenchad7a2922008-02-07 23:25:02 +00002015 if (!PageUptodate(page))
Joe Perchesb6b38f72010-04-21 03:50:45 +00002016 cFYI(1, "ppw - page not up to date");
Linus Torvaldscb876f42006-12-23 16:19:07 -08002017
2018 /*
2019 * Set the "writeback" flag, and clear "dirty" in the radix tree.
2020 *
2021 * A writepage() implementation always needs to do either this,
2022 * or re-dirty the page with "redirty_page_for_writepage()" in
2023 * the case of a failure.
2024 *
2025 * Just unlocking the page will cause the radix tree tag-bits
2026 * to fail to update with the state of the page correctly.
2027 */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002028 set_page_writeback(page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002029retry_write:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002030 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002031 if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
2032 goto retry_write;
2033 else if (rc == -EAGAIN)
2034 redirty_page_for_writepage(wbc, page);
2035 else if (rc != 0)
2036 SetPageError(page);
2037 else
2038 SetPageUptodate(page);
Linus Torvaldscb876f42006-12-23 16:19:07 -08002039 end_page_writeback(page);
2040 page_cache_release(page);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002041 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002042 return rc;
2043}
2044
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002045static int cifs_writepage(struct page *page, struct writeback_control *wbc)
2046{
2047 int rc = cifs_writepage_locked(page, wbc);
2048 unlock_page(page);
2049 return rc;
2050}
2051
Nick Piggind9414772008-09-24 11:32:59 -04002052static int cifs_write_end(struct file *file, struct address_space *mapping,
2053 loff_t pos, unsigned len, unsigned copied,
2054 struct page *page, void *fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002055{
Nick Piggind9414772008-09-24 11:32:59 -04002056 int rc;
2057 struct inode *inode = mapping->host;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002058 struct cifsFileInfo *cfile = file->private_data;
2059 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
2060 __u32 pid;
2061
2062 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2063 pid = cfile->pid;
2064 else
2065 pid = current->tgid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002066
Joe Perchesb6b38f72010-04-21 03:50:45 +00002067 cFYI(1, "write_end for page %p from pos %lld with %d bytes",
2068 page, pos, copied);
Steve Frenchad7a2922008-02-07 23:25:02 +00002069
Jeff Laytona98ee8c2008-11-26 19:32:33 +00002070 if (PageChecked(page)) {
2071 if (copied == len)
2072 SetPageUptodate(page);
2073 ClearPageChecked(page);
2074 } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
Nick Piggind9414772008-09-24 11:32:59 -04002075 SetPageUptodate(page);
2076
Linus Torvalds1da177e2005-04-16 15:20:36 -07002077 if (!PageUptodate(page)) {
Nick Piggind9414772008-09-24 11:32:59 -04002078 char *page_data;
2079 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002080 unsigned int xid;
Nick Piggind9414772008-09-24 11:32:59 -04002081
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002082 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002083 /* this is probably better than directly calling
2084 partialpage_write since in this function the file handle is
2085 known which we might as well leverage */
2086 /* BB check if anything else missing out of ppw
2087 such as updating last write time */
2088 page_data = kmap(page);
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002089 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
Nick Piggind9414772008-09-24 11:32:59 -04002090 /* if (rc < 0) should we set writebehind rc? */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002091 kunmap(page);
Nick Piggind9414772008-09-24 11:32:59 -04002092
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002093 free_xid(xid);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002094 } else {
Nick Piggind9414772008-09-24 11:32:59 -04002095 rc = copied;
2096 pos += copied;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002097 set_page_dirty(page);
2098 }
2099
Nick Piggind9414772008-09-24 11:32:59 -04002100 if (rc > 0) {
2101 spin_lock(&inode->i_lock);
2102 if (pos > inode->i_size)
2103 i_size_write(inode, pos);
2104 spin_unlock(&inode->i_lock);
2105 }
2106
2107 unlock_page(page);
2108 page_cache_release(page);
2109
Linus Torvalds1da177e2005-04-16 15:20:36 -07002110 return rc;
2111}
2112
Josef Bacik02c24a82011-07-16 20:44:56 -04002113int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2114 int datasync)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002115{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002116 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002117 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002118 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002119 struct TCP_Server_Info *server;
Joe Perchesc21dfb62010-07-12 13:50:14 -07002120 struct cifsFileInfo *smbfile = file->private_data;
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002121 struct inode *inode = file->f_path.dentry->d_inode;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002122 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002123
Josef Bacik02c24a82011-07-16 20:44:56 -04002124 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2125 if (rc)
2126 return rc;
2127 mutex_lock(&inode->i_mutex);
2128
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002129 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002130
Joe Perchesb6b38f72010-04-21 03:50:45 +00002131 cFYI(1, "Sync file - name: %s datasync: 0x%x",
Christoph Hellwig7ea80852010-05-26 17:53:25 +02002132 file->f_path.dentry->d_name.name, datasync);
Steve French50c2f752007-07-13 00:33:32 +00002133
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002134 if (!CIFS_I(inode)->clientCanCacheRead) {
2135 rc = cifs_invalidate_mapping(inode);
2136 if (rc) {
2137 cFYI(1, "rc: %d during invalidate phase", rc);
2138 rc = 0; /* don't care about it in fsync */
2139 }
2140 }
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002141
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002142 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002143 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2144 server = tcon->ses->server;
2145 if (server->ops->flush)
2146 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2147 else
2148 rc = -ENOSYS;
2149 }
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002150
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002151 free_xid(xid);
Josef Bacik02c24a82011-07-16 20:44:56 -04002152 mutex_unlock(&inode->i_mutex);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002153 return rc;
2154}
2155
Josef Bacik02c24a82011-07-16 20:44:56 -04002156int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002157{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002158 unsigned int xid;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002159 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002160 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002161 struct TCP_Server_Info *server;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002162 struct cifsFileInfo *smbfile = file->private_data;
2163 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Josef Bacik02c24a82011-07-16 20:44:56 -04002164 struct inode *inode = file->f_mapping->host;
2165
2166 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2167 if (rc)
2168 return rc;
2169 mutex_lock(&inode->i_mutex);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002170
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002171 xid = get_xid();
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002172
2173 cFYI(1, "Sync file - name: %s datasync: 0x%x",
2174 file->f_path.dentry->d_name.name, datasync);
2175
2176 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002177 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2178 server = tcon->ses->server;
2179 if (server->ops->flush)
2180 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2181 else
2182 rc = -ENOSYS;
2183 }
Steve Frenchb298f222009-02-21 21:17:43 +00002184
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002185 free_xid(xid);
Josef Bacik02c24a82011-07-16 20:44:56 -04002186 mutex_unlock(&inode->i_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002187 return rc;
2188}
2189
Linus Torvalds1da177e2005-04-16 15:20:36 -07002190/*
2191 * As file closes, flush all cached write data for this inode checking
2192 * for write behind errors.
2193 */
Miklos Szeredi75e1fcc2006-06-23 02:05:12 -07002194int cifs_flush(struct file *file, fl_owner_t id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002195{
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002196 struct inode *inode = file->f_path.dentry->d_inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002197 int rc = 0;
2198
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002199 if (file->f_mode & FMODE_WRITE)
Jeff Laytond3f13222010-10-15 15:34:07 -04002200 rc = filemap_write_and_wait(inode->i_mapping);
Steve French50c2f752007-07-13 00:33:32 +00002201
Joe Perchesb6b38f72010-04-21 03:50:45 +00002202 cFYI(1, "Flush inode %p file %p rc %d", inode, file, rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002203
2204 return rc;
2205}
2206
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002207static int
2208cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2209{
2210 int rc = 0;
2211 unsigned long i;
2212
2213 for (i = 0; i < num_pages; i++) {
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002214 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002215 if (!pages[i]) {
2216 /*
2217 * save number of pages we have already allocated and
2218 * return with ENOMEM error
2219 */
2220 num_pages = i;
2221 rc = -ENOMEM;
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002222 break;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002223 }
2224 }
2225
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002226 if (rc) {
2227 for (i = 0; i < num_pages; i++)
2228 put_page(pages[i]);
2229 }
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002230 return rc;
2231}
2232
2233static inline
2234size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2235{
2236 size_t num_pages;
2237 size_t clen;
2238
2239 clen = min_t(const size_t, len, wsize);
Jeff Laytona7103b92012-03-23 14:40:56 -04002240 num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002241
2242 if (cur_len)
2243 *cur_len = clen;
2244
2245 return num_pages;
2246}
2247
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002248static void
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002249cifs_uncached_writev_complete(struct work_struct *work)
2250{
2251 int i;
2252 struct cifs_writedata *wdata = container_of(work,
2253 struct cifs_writedata, work);
2254 struct inode *inode = wdata->cfile->dentry->d_inode;
2255 struct cifsInodeInfo *cifsi = CIFS_I(inode);
2256
2257 spin_lock(&inode->i_lock);
2258 cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
2259 if (cifsi->server_eof > inode->i_size)
2260 i_size_write(inode, cifsi->server_eof);
2261 spin_unlock(&inode->i_lock);
2262
2263 complete(&wdata->done);
2264
2265 if (wdata->result != -EAGAIN) {
2266 for (i = 0; i < wdata->nr_pages; i++)
2267 put_page(wdata->pages[i]);
2268 }
2269
2270 kref_put(&wdata->refcount, cifs_writedata_release);
2271}
2272
2273/* attempt to send write to server, retry on any -EAGAIN errors */
2274static int
2275cifs_uncached_retry_writev(struct cifs_writedata *wdata)
2276{
2277 int rc;
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002278 struct TCP_Server_Info *server;
2279
2280 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002281
2282 do {
2283 if (wdata->cfile->invalidHandle) {
2284 rc = cifs_reopen_file(wdata->cfile, false);
2285 if (rc != 0)
2286 continue;
2287 }
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002288 rc = server->ops->async_writev(wdata);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002289 } while (rc == -EAGAIN);
2290
2291 return rc;
2292}
2293
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002294static ssize_t
2295cifs_iovec_write(struct file *file, const struct iovec *iov,
2296 unsigned long nr_segs, loff_t *poffset)
2297{
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002298 unsigned long nr_pages, i;
Pavel Shilovsky76429c12011-01-31 16:03:08 +03002299 size_t copied, len, cur_len;
2300 ssize_t total_written = 0;
Jeff Layton3af9d8f2012-04-13 17:16:59 -04002301 loff_t offset;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002302 struct iov_iter it;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002303 struct cifsFileInfo *open_file;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002304 struct cifs_tcon *tcon;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002305 struct cifs_sb_info *cifs_sb;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002306 struct cifs_writedata *wdata, *tmp;
2307 struct list_head wdata_list;
2308 int rc;
2309 pid_t pid;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002310
2311 len = iov_length(iov, nr_segs);
2312 if (!len)
2313 return 0;
2314
2315 rc = generic_write_checks(file, poffset, &len, 0);
2316 if (rc)
2317 return rc;
2318
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002319 INIT_LIST_HEAD(&wdata_list);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002320 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002321 open_file = file->private_data;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002322 tcon = tlink_tcon(open_file->tlink);
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002323
2324 if (!tcon->ses->server->ops->async_writev)
2325 return -ENOSYS;
2326
Jeff Layton3af9d8f2012-04-13 17:16:59 -04002327 offset = *poffset;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002328
2329 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2330 pid = open_file->pid;
2331 else
2332 pid = current->tgid;
2333
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002334 iov_iter_init(&it, iov, nr_segs, len, 0);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002335 do {
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002336 size_t save_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002337
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002338 nr_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
2339 wdata = cifs_writedata_alloc(nr_pages,
2340 cifs_uncached_writev_complete);
2341 if (!wdata) {
2342 rc = -ENOMEM;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002343 break;
2344 }
2345
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002346 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
2347 if (rc) {
2348 kfree(wdata);
2349 break;
2350 }
2351
2352 save_len = cur_len;
2353 for (i = 0; i < nr_pages; i++) {
2354 copied = min_t(const size_t, cur_len, PAGE_SIZE);
2355 copied = iov_iter_copy_from_user(wdata->pages[i], &it,
2356 0, copied);
2357 cur_len -= copied;
2358 iov_iter_advance(&it, copied);
2359 }
2360 cur_len = save_len - cur_len;
2361
2362 wdata->sync_mode = WB_SYNC_ALL;
2363 wdata->nr_pages = nr_pages;
2364 wdata->offset = (__u64)offset;
2365 wdata->cfile = cifsFileInfo_get(open_file);
2366 wdata->pid = pid;
2367 wdata->bytes = cur_len;
Jeff Laytoneddb0792012-09-18 16:20:35 -07002368 wdata->pagesz = PAGE_SIZE;
2369 wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002370 rc = cifs_uncached_retry_writev(wdata);
2371 if (rc) {
2372 kref_put(&wdata->refcount, cifs_writedata_release);
2373 break;
2374 }
2375
2376 list_add_tail(&wdata->list, &wdata_list);
2377 offset += cur_len;
2378 len -= cur_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002379 } while (len > 0);
2380
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002381 /*
2382 * If at least one write was successfully sent, then discard any rc
2383 * value from the later writes. If the other write succeeds, then
2384 * we'll end up returning whatever was written. If it fails, then
2385 * we'll get a new rc value from that.
2386 */
2387 if (!list_empty(&wdata_list))
2388 rc = 0;
2389
2390 /*
2391 * Wait for and collect replies for any successful sends in order of
2392 * increasing offset. Once an error is hit or we get a fatal signal
2393 * while waiting, then return without waiting for any more replies.
2394 */
2395restart_loop:
2396 list_for_each_entry_safe(wdata, tmp, &wdata_list, list) {
2397 if (!rc) {
2398 /* FIXME: freezable too? */
2399 rc = wait_for_completion_killable(&wdata->done);
2400 if (rc)
2401 rc = -EINTR;
2402 else if (wdata->result)
2403 rc = wdata->result;
2404 else
2405 total_written += wdata->bytes;
2406
2407 /* resend call if it's a retryable error */
2408 if (rc == -EAGAIN) {
2409 rc = cifs_uncached_retry_writev(wdata);
2410 goto restart_loop;
2411 }
2412 }
2413 list_del_init(&wdata->list);
2414 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002415 }
2416
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002417 if (total_written > 0)
2418 *poffset += total_written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002419
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002420 cifs_stats_bytes_written(tcon, total_written);
2421 return total_written ? total_written : (ssize_t)rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002422}
2423
Pavel Shilovsky0b81c1c2011-03-10 10:11:05 +03002424ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002425 unsigned long nr_segs, loff_t pos)
2426{
2427 ssize_t written;
2428 struct inode *inode;
2429
2430 inode = iocb->ki_filp->f_path.dentry->d_inode;
2431
2432 /*
2433 * BB - optimize the way when signing is disabled. We can drop this
2434 * extra memory-to-memory copying and use iovec buffers for constructing
2435 * write request.
2436 */
2437
2438 written = cifs_iovec_write(iocb->ki_filp, iov, nr_segs, &pos);
2439 if (written > 0) {
2440 CIFS_I(inode)->invalid_mapping = true;
2441 iocb->ki_pos = pos;
2442 }
2443
2444 return written;
2445}
2446
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002447static ssize_t
2448cifs_writev(struct kiocb *iocb, const struct iovec *iov,
2449 unsigned long nr_segs, loff_t pos)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002450{
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002451 struct file *file = iocb->ki_filp;
2452 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2453 struct inode *inode = file->f_mapping->host;
2454 struct cifsInodeInfo *cinode = CIFS_I(inode);
2455 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
2456 ssize_t rc = -EACCES;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002457
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002458 BUG_ON(iocb->ki_pos != pos);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002459
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002460 sb_start_write(inode->i_sb);
2461
2462 /*
2463 * We need to hold the sem to be sure nobody modifies lock list
2464 * with a brlock that prevents writing.
2465 */
2466 down_read(&cinode->lock_sem);
2467 if (!cifs_find_lock_conflict(cfile, pos, iov_length(iov, nr_segs),
2468 server->vals->exclusive_lock_type, NULL,
2469 true)) {
2470 mutex_lock(&inode->i_mutex);
2471 rc = __generic_file_aio_write(iocb, iov, nr_segs,
2472 &iocb->ki_pos);
2473 mutex_unlock(&inode->i_mutex);
2474 }
2475
2476 if (rc > 0 || rc == -EIOCBQUEUED) {
2477 ssize_t err;
2478
2479 err = generic_write_sync(file, pos, rc);
2480 if (err < 0 && rc > 0)
2481 rc = err;
2482 }
2483
2484 up_read(&cinode->lock_sem);
2485 sb_end_write(inode->i_sb);
2486 return rc;
2487}
2488
2489ssize_t
2490cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
2491 unsigned long nr_segs, loff_t pos)
2492{
2493 struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
2494 struct cifsInodeInfo *cinode = CIFS_I(inode);
2495 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2496 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2497 iocb->ki_filp->private_data;
2498 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002499
Pavel Shilovsky25078102012-09-19 06:22:45 -07002500#ifdef CONFIG_CIFS_SMB2
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002501 /*
Pavel Shilovsky25078102012-09-19 06:22:45 -07002502 * If we have an oplock for read and want to write a data to the file
2503 * we need to store it in the page cache and then push it to the server
2504 * to be sure the next read will get a valid data.
2505 */
2506 if (!cinode->clientCanCacheAll && cinode->clientCanCacheRead) {
2507 ssize_t written;
2508 int rc;
2509
2510 written = generic_file_aio_write(iocb, iov, nr_segs, pos);
2511 rc = filemap_fdatawrite(inode->i_mapping);
2512 if (rc)
2513 return (ssize_t)rc;
2514
2515 return written;
2516 }
2517#endif
2518
2519 /*
2520 * For non-oplocked files in strict cache mode we need to write the data
2521 * to the server exactly from the pos to pos+len-1 rather than flush all
2522 * affected pages because it may cause a error with mandatory locks on
2523 * these pages but not on the region from pos to ppos+len-1.
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002524 */
2525
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002526 if (!cinode->clientCanCacheAll)
2527 return cifs_user_writev(iocb, iov, nr_segs, pos);
2528
2529 if (cap_unix(tcon->ses) &&
2530 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2531 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2532 return generic_file_aio_write(iocb, iov, nr_segs, pos);
2533
2534 return cifs_writev(iocb, iov, nr_segs, pos);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002535}
2536
Jeff Layton0471ca32012-05-16 07:13:16 -04002537static struct cifs_readdata *
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002538cifs_readdata_alloc(unsigned int nr_pages, work_func_t complete)
Jeff Layton0471ca32012-05-16 07:13:16 -04002539{
2540 struct cifs_readdata *rdata;
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002541
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002542 rdata = kzalloc(sizeof(*rdata) + (sizeof(struct page *) * nr_pages),
2543 GFP_KERNEL);
Jeff Layton0471ca32012-05-16 07:13:16 -04002544 if (rdata != NULL) {
Jeff Layton6993f742012-05-16 07:13:17 -04002545 kref_init(&rdata->refcount);
Jeff Layton1c892542012-05-16 07:13:17 -04002546 INIT_LIST_HEAD(&rdata->list);
2547 init_completion(&rdata->done);
Jeff Layton0471ca32012-05-16 07:13:16 -04002548 INIT_WORK(&rdata->work, complete);
Jeff Layton0471ca32012-05-16 07:13:16 -04002549 }
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002550
Jeff Layton0471ca32012-05-16 07:13:16 -04002551 return rdata;
2552}
2553
Jeff Layton6993f742012-05-16 07:13:17 -04002554void
2555cifs_readdata_release(struct kref *refcount)
Jeff Layton0471ca32012-05-16 07:13:16 -04002556{
Jeff Layton6993f742012-05-16 07:13:17 -04002557 struct cifs_readdata *rdata = container_of(refcount,
2558 struct cifs_readdata, refcount);
2559
2560 if (rdata->cfile)
2561 cifsFileInfo_put(rdata->cfile);
2562
Jeff Layton0471ca32012-05-16 07:13:16 -04002563 kfree(rdata);
2564}
2565
Jeff Layton2a1bb132012-05-16 07:13:17 -04002566static int
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002567cifs_read_allocate_pages(struct cifs_readdata *rdata, unsigned int nr_pages)
Jeff Layton1c892542012-05-16 07:13:17 -04002568{
2569 int rc = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002570 struct page *page;
Jeff Layton1c892542012-05-16 07:13:17 -04002571 unsigned int i;
2572
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002573 for (i = 0; i < nr_pages; i++) {
Jeff Layton1c892542012-05-16 07:13:17 -04002574 page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
2575 if (!page) {
2576 rc = -ENOMEM;
2577 break;
2578 }
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002579 rdata->pages[i] = page;
Jeff Layton1c892542012-05-16 07:13:17 -04002580 }
2581
2582 if (rc) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002583 for (i = 0; i < nr_pages; i++) {
2584 put_page(rdata->pages[i]);
2585 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04002586 }
2587 }
2588 return rc;
2589}
2590
2591static void
2592cifs_uncached_readdata_release(struct kref *refcount)
2593{
Jeff Layton1c892542012-05-16 07:13:17 -04002594 struct cifs_readdata *rdata = container_of(refcount,
2595 struct cifs_readdata, refcount);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002596 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04002597
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002598 for (i = 0; i < rdata->nr_pages; i++) {
2599 put_page(rdata->pages[i]);
2600 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04002601 }
2602 cifs_readdata_release(refcount);
2603}
2604
2605static int
Jeff Layton2a1bb132012-05-16 07:13:17 -04002606cifs_retry_async_readv(struct cifs_readdata *rdata)
2607{
2608 int rc;
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07002609 struct TCP_Server_Info *server;
2610
2611 server = tlink_tcon(rdata->cfile->tlink)->ses->server;
Jeff Layton2a1bb132012-05-16 07:13:17 -04002612
2613 do {
2614 if (rdata->cfile->invalidHandle) {
2615 rc = cifs_reopen_file(rdata->cfile, true);
2616 if (rc != 0)
2617 continue;
2618 }
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07002619 rc = server->ops->async_readv(rdata);
Jeff Layton2a1bb132012-05-16 07:13:17 -04002620 } while (rc == -EAGAIN);
2621
2622 return rc;
2623}
2624
Jeff Layton1c892542012-05-16 07:13:17 -04002625/**
2626 * cifs_readdata_to_iov - copy data from pages in response to an iovec
2627 * @rdata: the readdata response with list of pages holding data
2628 * @iov: vector in which we should copy the data
2629 * @nr_segs: number of segments in vector
2630 * @offset: offset into file of the first iovec
2631 * @copied: used to return the amount of data copied to the iov
2632 *
2633 * This function copies data from a list of pages in a readdata response into
2634 * an array of iovecs. It will first calculate where the data should go
2635 * based on the info in the readdata and then copy the data into that spot.
2636 */
2637static ssize_t
2638cifs_readdata_to_iov(struct cifs_readdata *rdata, const struct iovec *iov,
2639 unsigned long nr_segs, loff_t offset, ssize_t *copied)
2640{
2641 int rc = 0;
2642 struct iov_iter ii;
2643 size_t pos = rdata->offset - offset;
Jeff Layton1c892542012-05-16 07:13:17 -04002644 ssize_t remaining = rdata->bytes;
2645 unsigned char *pdata;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002646 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04002647
2648 /* set up iov_iter and advance to the correct offset */
2649 iov_iter_init(&ii, iov, nr_segs, iov_length(iov, nr_segs), 0);
2650 iov_iter_advance(&ii, pos);
2651
2652 *copied = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002653 for (i = 0; i < rdata->nr_pages; i++) {
Jeff Layton1c892542012-05-16 07:13:17 -04002654 ssize_t copy;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002655 struct page *page = rdata->pages[i];
Jeff Layton1c892542012-05-16 07:13:17 -04002656
2657 /* copy a whole page or whatever's left */
2658 copy = min_t(ssize_t, remaining, PAGE_SIZE);
2659
2660 /* ...but limit it to whatever space is left in the iov */
2661 copy = min_t(ssize_t, copy, iov_iter_count(&ii));
2662
2663 /* go while there's data to be copied and no errors */
2664 if (copy && !rc) {
2665 pdata = kmap(page);
2666 rc = memcpy_toiovecend(ii.iov, pdata, ii.iov_offset,
2667 (int)copy);
2668 kunmap(page);
2669 if (!rc) {
2670 *copied += copy;
2671 remaining -= copy;
2672 iov_iter_advance(&ii, copy);
2673 }
2674 }
Jeff Layton1c892542012-05-16 07:13:17 -04002675 }
2676
2677 return rc;
2678}
2679
2680static void
2681cifs_uncached_readv_complete(struct work_struct *work)
2682{
2683 struct cifs_readdata *rdata = container_of(work,
2684 struct cifs_readdata, work);
Jeff Layton1c892542012-05-16 07:13:17 -04002685
2686 complete(&rdata->done);
2687 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
2688}
2689
2690static int
Jeff Layton8321fec2012-09-19 06:22:32 -07002691cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
2692 struct cifs_readdata *rdata, unsigned int len)
Jeff Layton1c892542012-05-16 07:13:17 -04002693{
Jeff Layton8321fec2012-09-19 06:22:32 -07002694 int total_read = 0, result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002695 unsigned int i;
2696 unsigned int nr_pages = rdata->nr_pages;
Jeff Layton8321fec2012-09-19 06:22:32 -07002697 struct kvec iov;
Jeff Layton1c892542012-05-16 07:13:17 -04002698
Jeff Layton8321fec2012-09-19 06:22:32 -07002699 rdata->tailsz = PAGE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002700 for (i = 0; i < nr_pages; i++) {
2701 struct page *page = rdata->pages[i];
2702
Jeff Layton8321fec2012-09-19 06:22:32 -07002703 if (len >= PAGE_SIZE) {
Jeff Layton1c892542012-05-16 07:13:17 -04002704 /* enough data to fill the page */
Jeff Layton8321fec2012-09-19 06:22:32 -07002705 iov.iov_base = kmap(page);
2706 iov.iov_len = PAGE_SIZE;
2707 cFYI(1, "%u: iov_base=%p iov_len=%zu",
2708 i, iov.iov_base, iov.iov_len);
2709 len -= PAGE_SIZE;
2710 } else if (len > 0) {
Jeff Layton1c892542012-05-16 07:13:17 -04002711 /* enough for partial page, fill and zero the rest */
Jeff Layton8321fec2012-09-19 06:22:32 -07002712 iov.iov_base = kmap(page);
2713 iov.iov_len = len;
2714 cFYI(1, "%u: iov_base=%p iov_len=%zu",
2715 i, iov.iov_base, iov.iov_len);
2716 memset(iov.iov_base + len, '\0', PAGE_SIZE - len);
2717 rdata->tailsz = len;
2718 len = 0;
Jeff Layton1c892542012-05-16 07:13:17 -04002719 } else {
2720 /* no need to hold page hostage */
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002721 rdata->pages[i] = NULL;
2722 rdata->nr_pages--;
Jeff Layton1c892542012-05-16 07:13:17 -04002723 put_page(page);
Jeff Layton8321fec2012-09-19 06:22:32 -07002724 continue;
Jeff Layton1c892542012-05-16 07:13:17 -04002725 }
Jeff Layton8321fec2012-09-19 06:22:32 -07002726
2727 result = cifs_readv_from_socket(server, &iov, 1, iov.iov_len);
2728 kunmap(page);
2729 if (result < 0)
2730 break;
2731
2732 total_read += result;
Jeff Layton1c892542012-05-16 07:13:17 -04002733 }
2734
Jeff Layton8321fec2012-09-19 06:22:32 -07002735 return total_read > 0 ? total_read : result;
Jeff Layton1c892542012-05-16 07:13:17 -04002736}
2737
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002738static ssize_t
2739cifs_iovec_read(struct file *file, const struct iovec *iov,
2740 unsigned long nr_segs, loff_t *poffset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002741{
Jeff Layton1c892542012-05-16 07:13:17 -04002742 ssize_t rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002743 size_t len, cur_len;
Jeff Layton1c892542012-05-16 07:13:17 -04002744 ssize_t total_read = 0;
2745 loff_t offset = *poffset;
2746 unsigned int npages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002747 struct cifs_sb_info *cifs_sb;
Jeff Layton1c892542012-05-16 07:13:17 -04002748 struct cifs_tcon *tcon;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002749 struct cifsFileInfo *open_file;
Jeff Layton1c892542012-05-16 07:13:17 -04002750 struct cifs_readdata *rdata, *tmp;
2751 struct list_head rdata_list;
2752 pid_t pid;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002753
2754 if (!nr_segs)
2755 return 0;
2756
2757 len = iov_length(iov, nr_segs);
2758 if (!len)
2759 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002760
Jeff Layton1c892542012-05-16 07:13:17 -04002761 INIT_LIST_HEAD(&rdata_list);
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002762 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Joe Perchesc21dfb62010-07-12 13:50:14 -07002763 open_file = file->private_data;
Jeff Layton1c892542012-05-16 07:13:17 -04002764 tcon = tlink_tcon(open_file->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002765
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07002766 if (!tcon->ses->server->ops->async_readv)
2767 return -ENOSYS;
2768
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002769 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2770 pid = open_file->pid;
2771 else
2772 pid = current->tgid;
2773
Steve Frenchad7a2922008-02-07 23:25:02 +00002774 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesb6b38f72010-04-21 03:50:45 +00002775 cFYI(1, "attempting read on write only file instance");
Steve Frenchad7a2922008-02-07 23:25:02 +00002776
Jeff Layton1c892542012-05-16 07:13:17 -04002777 do {
2778 cur_len = min_t(const size_t, len - total_read, cifs_sb->rsize);
2779 npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002780
Jeff Layton1c892542012-05-16 07:13:17 -04002781 /* allocate a readdata struct */
2782 rdata = cifs_readdata_alloc(npages,
2783 cifs_uncached_readv_complete);
2784 if (!rdata) {
2785 rc = -ENOMEM;
2786 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002787 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002788
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002789 rc = cifs_read_allocate_pages(rdata, npages);
Jeff Layton1c892542012-05-16 07:13:17 -04002790 if (rc)
2791 goto error;
2792
2793 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002794 rdata->nr_pages = npages;
Jeff Layton1c892542012-05-16 07:13:17 -04002795 rdata->offset = offset;
2796 rdata->bytes = cur_len;
2797 rdata->pid = pid;
Jeff Layton8321fec2012-09-19 06:22:32 -07002798 rdata->pagesz = PAGE_SIZE;
2799 rdata->read_into_pages = cifs_uncached_read_into_pages;
Jeff Layton1c892542012-05-16 07:13:17 -04002800
2801 rc = cifs_retry_async_readv(rdata);
2802error:
2803 if (rc) {
2804 kref_put(&rdata->refcount,
2805 cifs_uncached_readdata_release);
2806 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002807 }
Jeff Layton1c892542012-05-16 07:13:17 -04002808
2809 list_add_tail(&rdata->list, &rdata_list);
2810 offset += cur_len;
2811 len -= cur_len;
2812 } while (len > 0);
2813
2814 /* if at least one read request send succeeded, then reset rc */
2815 if (!list_empty(&rdata_list))
2816 rc = 0;
2817
2818 /* the loop below should proceed in the order of increasing offsets */
2819restart_loop:
2820 list_for_each_entry_safe(rdata, tmp, &rdata_list, list) {
2821 if (!rc) {
2822 ssize_t copied;
2823
2824 /* FIXME: freezable sleep too? */
2825 rc = wait_for_completion_killable(&rdata->done);
2826 if (rc)
2827 rc = -EINTR;
2828 else if (rdata->result)
2829 rc = rdata->result;
2830 else {
2831 rc = cifs_readdata_to_iov(rdata, iov,
2832 nr_segs, *poffset,
2833 &copied);
2834 total_read += copied;
2835 }
2836
2837 /* resend call if it's a retryable error */
2838 if (rc == -EAGAIN) {
2839 rc = cifs_retry_async_readv(rdata);
2840 goto restart_loop;
2841 }
2842 }
2843 list_del_init(&rdata->list);
2844 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002845 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002846
Jeff Layton1c892542012-05-16 07:13:17 -04002847 cifs_stats_bytes_read(tcon, total_read);
2848 *poffset += total_read;
2849
Pavel Shilovsky09a47072012-09-18 16:20:29 -07002850 /* mask nodata case */
2851 if (rc == -ENODATA)
2852 rc = 0;
2853
Jeff Layton1c892542012-05-16 07:13:17 -04002854 return total_read ? total_read : rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002855}
2856
Pavel Shilovsky0b81c1c2011-03-10 10:11:05 +03002857ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov,
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002858 unsigned long nr_segs, loff_t pos)
2859{
2860 ssize_t read;
2861
2862 read = cifs_iovec_read(iocb->ki_filp, iov, nr_segs, &pos);
2863 if (read > 0)
2864 iocb->ki_pos = pos;
2865
2866 return read;
2867}
2868
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002869ssize_t
2870cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov,
2871 unsigned long nr_segs, loff_t pos)
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002872{
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002873 struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
2874 struct cifsInodeInfo *cinode = CIFS_I(inode);
2875 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2876 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2877 iocb->ki_filp->private_data;
2878 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2879 int rc = -EACCES;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002880
2881 /*
2882 * In strict cache mode we need to read from the server all the time
2883 * if we don't have level II oplock because the server can delay mtime
2884 * change - so we can't make a decision about inode invalidating.
2885 * And we can also fail with pagereading if there are mandatory locks
2886 * on pages affected by this read but not on the region from pos to
2887 * pos+len-1.
2888 */
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002889 if (!cinode->clientCanCacheRead)
2890 return cifs_user_readv(iocb, iov, nr_segs, pos);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002891
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002892 if (cap_unix(tcon->ses) &&
2893 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2894 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2895 return generic_file_aio_read(iocb, iov, nr_segs, pos);
2896
2897 /*
2898 * We need to hold the sem to be sure nobody modifies lock list
2899 * with a brlock that prevents reading.
2900 */
2901 down_read(&cinode->lock_sem);
2902 if (!cifs_find_lock_conflict(cfile, pos, iov_length(iov, nr_segs),
2903 tcon->ses->server->vals->shared_lock_type,
2904 NULL, true))
2905 rc = generic_file_aio_read(iocb, iov, nr_segs, pos);
2906 up_read(&cinode->lock_sem);
2907 return rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002908}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002909
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002910static ssize_t
2911cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002912{
2913 int rc = -EACCES;
2914 unsigned int bytes_read = 0;
2915 unsigned int total_read;
2916 unsigned int current_read_size;
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002917 unsigned int rsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002918 struct cifs_sb_info *cifs_sb;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002919 struct cifs_tcon *tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002920 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002921 unsigned int xid;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002922 char *cur_offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002923 struct cifsFileInfo *open_file;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002924 struct cifs_io_parms io_parms;
Steve Frenchec637e32005-12-12 20:53:18 -08002925 int buf_type = CIFS_NO_BUFFER;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002926 __u32 pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002927
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002928 xid = get_xid();
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002929 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002930
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002931 /* FIXME: set up handlers for larger reads and/or convert to async */
2932 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
2933
Linus Torvalds1da177e2005-04-16 15:20:36 -07002934 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05302935 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002936 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05302937 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002938 }
Joe Perchesc21dfb62010-07-12 13:50:14 -07002939 open_file = file->private_data;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002940 tcon = tlink_tcon(open_file->tlink);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002941 server = tcon->ses->server;
2942
2943 if (!server->ops->sync_read) {
2944 free_xid(xid);
2945 return -ENOSYS;
2946 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002947
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002948 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2949 pid = open_file->pid;
2950 else
2951 pid = current->tgid;
2952
Linus Torvalds1da177e2005-04-16 15:20:36 -07002953 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesb6b38f72010-04-21 03:50:45 +00002954 cFYI(1, "attempting read on write only file instance");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002955
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002956 for (total_read = 0, cur_offset = read_data; read_size > total_read;
2957 total_read += bytes_read, cur_offset += bytes_read) {
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002958 current_read_size = min_t(uint, read_size - total_read, rsize);
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002959 /*
2960 * For windows me and 9x we do not want to request more than it
2961 * negotiated since it will refuse the read then.
2962 */
2963 if ((tcon->ses) && !(tcon->ses->capabilities &
2964 tcon->ses->server->vals->cap_large_files)) {
Dan Carpenter7748dd62011-10-18 12:41:35 +03002965 current_read_size = min_t(uint, current_read_size,
Jeff Laytonc974bef2011-10-11 06:41:32 -04002966 CIFSMaxBufSize);
Steve Frenchf9f5c8172005-09-15 23:06:38 -07002967 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002968 rc = -EAGAIN;
2969 while (rc == -EAGAIN) {
Steve Frenchcdff08e2010-10-21 22:46:14 +00002970 if (open_file->invalidHandle) {
Jeff Layton15886172010-10-15 15:33:59 -04002971 rc = cifs_reopen_file(open_file, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002972 if (rc != 0)
2973 break;
2974 }
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002975 io_parms.pid = pid;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002976 io_parms.tcon = tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002977 io_parms.offset = *offset;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002978 io_parms.length = current_read_size;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002979 rc = server->ops->sync_read(xid, open_file, &io_parms,
2980 &bytes_read, &cur_offset,
2981 &buf_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002982 }
2983 if (rc || (bytes_read == 0)) {
2984 if (total_read) {
2985 break;
2986 } else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002987 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002988 return rc;
2989 }
2990 } else {
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002991 cifs_stats_bytes_read(tcon, total_read);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002992 *offset += bytes_read;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002993 }
2994 }
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002995 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002996 return total_read;
2997}
2998
Jeff Laytonca83ce32011-04-12 09:13:44 -04002999/*
3000 * If the page is mmap'ed into a process' page tables, then we need to make
3001 * sure that it doesn't change while being written back.
3002 */
3003static int
3004cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
3005{
3006 struct page *page = vmf->page;
3007
3008 lock_page(page);
3009 return VM_FAULT_LOCKED;
3010}
3011
3012static struct vm_operations_struct cifs_file_vm_ops = {
3013 .fault = filemap_fault,
3014 .page_mkwrite = cifs_page_mkwrite,
Konstantin Khlebnikov0b173bc2012-10-08 16:28:46 -07003015 .remap_pages = generic_file_remap_pages,
Jeff Laytonca83ce32011-04-12 09:13:44 -04003016};
3017
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003018int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
3019{
3020 int rc, xid;
3021 struct inode *inode = file->f_path.dentry->d_inode;
3022
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003023 xid = get_xid();
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003024
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04003025 if (!CIFS_I(inode)->clientCanCacheRead) {
3026 rc = cifs_invalidate_mapping(inode);
3027 if (rc)
3028 return rc;
3029 }
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003030
3031 rc = generic_file_mmap(file, vma);
Jeff Laytonca83ce32011-04-12 09:13:44 -04003032 if (rc == 0)
3033 vma->vm_ops = &cifs_file_vm_ops;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003034 free_xid(xid);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003035 return rc;
3036}
3037
Linus Torvalds1da177e2005-04-16 15:20:36 -07003038int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
3039{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003040 int rc, xid;
3041
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003042 xid = get_xid();
Jeff Laytonabab0952010-02-12 07:44:18 -05003043 rc = cifs_revalidate_file(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003044 if (rc) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00003045 cFYI(1, "Validation prior to mmap failed, error=%d", rc);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003046 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003047 return rc;
3048 }
3049 rc = generic_file_mmap(file, vma);
Jeff Laytonca83ce32011-04-12 09:13:44 -04003050 if (rc == 0)
3051 vma->vm_ops = &cifs_file_vm_ops;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003052 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003053 return rc;
3054}
3055
Jeff Layton0471ca32012-05-16 07:13:16 -04003056static void
3057cifs_readv_complete(struct work_struct *work)
3058{
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003059 unsigned int i;
Jeff Layton0471ca32012-05-16 07:13:16 -04003060 struct cifs_readdata *rdata = container_of(work,
3061 struct cifs_readdata, work);
Jeff Layton0471ca32012-05-16 07:13:16 -04003062
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003063 for (i = 0; i < rdata->nr_pages; i++) {
3064 struct page *page = rdata->pages[i];
3065
Jeff Layton0471ca32012-05-16 07:13:16 -04003066 lru_cache_add_file(page);
3067
3068 if (rdata->result == 0) {
Jeff Layton0471ca32012-05-16 07:13:16 -04003069 flush_dcache_page(page);
3070 SetPageUptodate(page);
3071 }
3072
3073 unlock_page(page);
3074
3075 if (rdata->result == 0)
3076 cifs_readpage_to_fscache(rdata->mapping->host, page);
3077
3078 page_cache_release(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003079 rdata->pages[i] = NULL;
Jeff Layton0471ca32012-05-16 07:13:16 -04003080 }
Jeff Layton6993f742012-05-16 07:13:17 -04003081 kref_put(&rdata->refcount, cifs_readdata_release);
Jeff Layton0471ca32012-05-16 07:13:16 -04003082}
3083
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003084static int
Jeff Layton8321fec2012-09-19 06:22:32 -07003085cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
3086 struct cifs_readdata *rdata, unsigned int len)
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003087{
Jeff Layton8321fec2012-09-19 06:22:32 -07003088 int total_read = 0, result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003089 unsigned int i;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003090 u64 eof;
3091 pgoff_t eof_index;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003092 unsigned int nr_pages = rdata->nr_pages;
Jeff Layton8321fec2012-09-19 06:22:32 -07003093 struct kvec iov;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003094
3095 /* determine the eof that the server (probably) has */
3096 eof = CIFS_I(rdata->mapping->host)->server_eof;
3097 eof_index = eof ? (eof - 1) >> PAGE_CACHE_SHIFT : 0;
3098 cFYI(1, "eof=%llu eof_index=%lu", eof, eof_index);
3099
Jeff Layton8321fec2012-09-19 06:22:32 -07003100 rdata->tailsz = PAGE_CACHE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003101 for (i = 0; i < nr_pages; i++) {
3102 struct page *page = rdata->pages[i];
3103
Jeff Layton8321fec2012-09-19 06:22:32 -07003104 if (len >= PAGE_CACHE_SIZE) {
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003105 /* enough data to fill the page */
Jeff Layton8321fec2012-09-19 06:22:32 -07003106 iov.iov_base = kmap(page);
3107 iov.iov_len = PAGE_CACHE_SIZE;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003108 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
Jeff Layton8321fec2012-09-19 06:22:32 -07003109 i, page->index, iov.iov_base, iov.iov_len);
3110 len -= PAGE_CACHE_SIZE;
3111 } else if (len > 0) {
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003112 /* enough for partial page, fill and zero the rest */
Jeff Layton8321fec2012-09-19 06:22:32 -07003113 iov.iov_base = kmap(page);
3114 iov.iov_len = len;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003115 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
Jeff Layton8321fec2012-09-19 06:22:32 -07003116 i, page->index, iov.iov_base, iov.iov_len);
3117 memset(iov.iov_base + len,
3118 '\0', PAGE_CACHE_SIZE - len);
3119 rdata->tailsz = len;
3120 len = 0;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003121 } else if (page->index > eof_index) {
3122 /*
3123 * The VFS will not try to do readahead past the
3124 * i_size, but it's possible that we have outstanding
3125 * writes with gaps in the middle and the i_size hasn't
3126 * caught up yet. Populate those with zeroed out pages
3127 * to prevent the VFS from repeatedly attempting to
3128 * fill them until the writes are flushed.
3129 */
3130 zero_user(page, 0, PAGE_CACHE_SIZE);
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003131 lru_cache_add_file(page);
3132 flush_dcache_page(page);
3133 SetPageUptodate(page);
3134 unlock_page(page);
3135 page_cache_release(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003136 rdata->pages[i] = NULL;
3137 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07003138 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003139 } else {
3140 /* no need to hold page hostage */
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003141 lru_cache_add_file(page);
3142 unlock_page(page);
3143 page_cache_release(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003144 rdata->pages[i] = NULL;
3145 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07003146 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003147 }
Jeff Layton8321fec2012-09-19 06:22:32 -07003148
3149 result = cifs_readv_from_socket(server, &iov, 1, iov.iov_len);
3150 kunmap(page);
3151 if (result < 0)
3152 break;
3153
3154 total_read += result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003155 }
3156
Jeff Layton8321fec2012-09-19 06:22:32 -07003157 return total_read > 0 ? total_read : result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003158}
3159
Linus Torvalds1da177e2005-04-16 15:20:36 -07003160static int cifs_readpages(struct file *file, struct address_space *mapping,
3161 struct list_head *page_list, unsigned num_pages)
3162{
Jeff Layton690c5e32011-10-19 15:30:16 -04003163 int rc;
3164 struct list_head tmplist;
3165 struct cifsFileInfo *open_file = file->private_data;
3166 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
3167 unsigned int rsize = cifs_sb->rsize;
3168 pid_t pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003169
Jeff Layton690c5e32011-10-19 15:30:16 -04003170 /*
3171 * Give up immediately if rsize is too small to read an entire page.
3172 * The VFS will fall back to readpage. We should never reach this
3173 * point however since we set ra_pages to 0 when the rsize is smaller
3174 * than a cache page.
3175 */
3176 if (unlikely(rsize < PAGE_CACHE_SIZE))
3177 return 0;
Steve Frenchbfa0d752005-08-31 21:50:37 -07003178
Suresh Jayaraman56698232010-07-05 18:13:25 +05303179 /*
3180 * Reads as many pages as possible from fscache. Returns -ENOBUFS
3181 * immediately if the cookie is negative
3182 */
3183 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
3184 &num_pages);
3185 if (rc == 0)
Jeff Layton690c5e32011-10-19 15:30:16 -04003186 return rc;
Suresh Jayaraman56698232010-07-05 18:13:25 +05303187
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003188 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3189 pid = open_file->pid;
3190 else
3191 pid = current->tgid;
3192
Jeff Layton690c5e32011-10-19 15:30:16 -04003193 rc = 0;
3194 INIT_LIST_HEAD(&tmplist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003195
Jeff Layton690c5e32011-10-19 15:30:16 -04003196 cFYI(1, "%s: file=%p mapping=%p num_pages=%u", __func__, file,
3197 mapping, num_pages);
3198
3199 /*
3200 * Start with the page at end of list and move it to private
3201 * list. Do the same with any following pages until we hit
3202 * the rsize limit, hit an index discontinuity, or run out of
3203 * pages. Issue the async read and then start the loop again
3204 * until the list is empty.
3205 *
3206 * Note that list order is important. The page_list is in
3207 * the order of declining indexes. When we put the pages in
3208 * the rdata->pages, then we want them in increasing order.
3209 */
3210 while (!list_empty(page_list)) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003211 unsigned int i;
Jeff Layton690c5e32011-10-19 15:30:16 -04003212 unsigned int bytes = PAGE_CACHE_SIZE;
3213 unsigned int expected_index;
3214 unsigned int nr_pages = 1;
3215 loff_t offset;
3216 struct page *page, *tpage;
3217 struct cifs_readdata *rdata;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003218
3219 page = list_entry(page_list->prev, struct page, lru);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003220
Jeff Layton690c5e32011-10-19 15:30:16 -04003221 /*
3222 * Lock the page and put it in the cache. Since no one else
3223 * should have access to this page, we're safe to simply set
3224 * PG_locked without checking it first.
3225 */
3226 __set_page_locked(page);
3227 rc = add_to_page_cache_locked(page, mapping,
3228 page->index, GFP_KERNEL);
3229
3230 /* give up if we can't stick it in the cache */
3231 if (rc) {
3232 __clear_page_locked(page);
3233 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003234 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003235
Jeff Layton690c5e32011-10-19 15:30:16 -04003236 /* move first page to the tmplist */
3237 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
3238 list_move_tail(&page->lru, &tmplist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003239
Jeff Layton690c5e32011-10-19 15:30:16 -04003240 /* now try and add more pages onto the request */
3241 expected_index = page->index + 1;
3242 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
3243 /* discontinuity ? */
3244 if (page->index != expected_index)
3245 break;
3246
3247 /* would this page push the read over the rsize? */
3248 if (bytes + PAGE_CACHE_SIZE > rsize)
3249 break;
3250
3251 __set_page_locked(page);
3252 if (add_to_page_cache_locked(page, mapping,
3253 page->index, GFP_KERNEL)) {
3254 __clear_page_locked(page);
3255 break;
3256 }
3257 list_move_tail(&page->lru, &tmplist);
3258 bytes += PAGE_CACHE_SIZE;
3259 expected_index++;
3260 nr_pages++;
3261 }
3262
Jeff Layton0471ca32012-05-16 07:13:16 -04003263 rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
Jeff Layton690c5e32011-10-19 15:30:16 -04003264 if (!rdata) {
3265 /* best to give up if we're out of mem */
3266 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3267 list_del(&page->lru);
3268 lru_cache_add_file(page);
3269 unlock_page(page);
3270 page_cache_release(page);
3271 }
3272 rc = -ENOMEM;
3273 break;
3274 }
3275
Jeff Layton6993f742012-05-16 07:13:17 -04003276 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Layton690c5e32011-10-19 15:30:16 -04003277 rdata->mapping = mapping;
3278 rdata->offset = offset;
3279 rdata->bytes = bytes;
3280 rdata->pid = pid;
Jeff Layton8321fec2012-09-19 06:22:32 -07003281 rdata->pagesz = PAGE_CACHE_SIZE;
3282 rdata->read_into_pages = cifs_readpages_read_into_pages;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003283
3284 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3285 list_del(&page->lru);
3286 rdata->pages[rdata->nr_pages++] = page;
3287 }
Jeff Layton690c5e32011-10-19 15:30:16 -04003288
Jeff Layton2a1bb132012-05-16 07:13:17 -04003289 rc = cifs_retry_async_readv(rdata);
Jeff Layton690c5e32011-10-19 15:30:16 -04003290 if (rc != 0) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003291 for (i = 0; i < rdata->nr_pages; i++) {
3292 page = rdata->pages[i];
Jeff Layton690c5e32011-10-19 15:30:16 -04003293 lru_cache_add_file(page);
3294 unlock_page(page);
3295 page_cache_release(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003296 }
Jeff Layton6993f742012-05-16 07:13:17 -04003297 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003298 break;
3299 }
Jeff Layton6993f742012-05-16 07:13:17 -04003300
3301 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003302 }
3303
Linus Torvalds1da177e2005-04-16 15:20:36 -07003304 return rc;
3305}
3306
3307static int cifs_readpage_worker(struct file *file, struct page *page,
3308 loff_t *poffset)
3309{
3310 char *read_data;
3311 int rc;
3312
Suresh Jayaraman56698232010-07-05 18:13:25 +05303313 /* Is the page cached? */
3314 rc = cifs_readpage_from_fscache(file->f_path.dentry->d_inode, page);
3315 if (rc == 0)
3316 goto read_complete;
3317
Linus Torvalds1da177e2005-04-16 15:20:36 -07003318 page_cache_get(page);
3319 read_data = kmap(page);
3320 /* for reads over a certain size could initiate async read ahead */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003321
Linus Torvalds1da177e2005-04-16 15:20:36 -07003322 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003323
Linus Torvalds1da177e2005-04-16 15:20:36 -07003324 if (rc < 0)
3325 goto io_error;
3326 else
Joe Perchesb6b38f72010-04-21 03:50:45 +00003327 cFYI(1, "Bytes read %d", rc);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003328
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08003329 file->f_path.dentry->d_inode->i_atime =
3330 current_fs_time(file->f_path.dentry->d_inode->i_sb);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003331
Linus Torvalds1da177e2005-04-16 15:20:36 -07003332 if (PAGE_CACHE_SIZE > rc)
3333 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
3334
3335 flush_dcache_page(page);
3336 SetPageUptodate(page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05303337
3338 /* send this page to the cache */
3339 cifs_readpage_to_fscache(file->f_path.dentry->d_inode, page);
3340
Linus Torvalds1da177e2005-04-16 15:20:36 -07003341 rc = 0;
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003342
Linus Torvalds1da177e2005-04-16 15:20:36 -07003343io_error:
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003344 kunmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003345 page_cache_release(page);
Suresh Jayaraman56698232010-07-05 18:13:25 +05303346
3347read_complete:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003348 return rc;
3349}
3350
3351static int cifs_readpage(struct file *file, struct page *page)
3352{
3353 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
3354 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003355 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003356
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003357 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003358
3359 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303360 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003361 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303362 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003363 }
3364
Jeff Laytonac3aa2f2012-07-23 13:14:28 -04003365 cFYI(1, "readpage %p at offset %d 0x%x",
Joe Perchesb6b38f72010-04-21 03:50:45 +00003366 page, (int)offset, (int)offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003367
3368 rc = cifs_readpage_worker(file, page, &offset);
3369
3370 unlock_page(page);
3371
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003372 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003373 return rc;
3374}
3375
Steve Frencha403a0a2007-07-26 15:54:16 +00003376static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3377{
3378 struct cifsFileInfo *open_file;
3379
Jeff Layton44772882010-10-15 15:34:03 -04003380 spin_lock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003381 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton2e396b82010-10-15 15:34:01 -04003382 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Jeff Layton44772882010-10-15 15:34:03 -04003383 spin_unlock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003384 return 1;
3385 }
3386 }
Jeff Layton44772882010-10-15 15:34:03 -04003387 spin_unlock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003388 return 0;
3389}
3390
Linus Torvalds1da177e2005-04-16 15:20:36 -07003391/* We do not want to update the file size from server for inodes
3392 open for write - to avoid races with writepage extending
3393 the file - in the future we could consider allowing
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003394 refreshing the inode only on increases in the file size
Linus Torvalds1da177e2005-04-16 15:20:36 -07003395 but this is tricky to do without racing with writebehind
3396 page caching in the current Linux kernel design */
Steve French4b18f2a2008-04-29 00:06:05 +00003397bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003398{
Steve Frencha403a0a2007-07-26 15:54:16 +00003399 if (!cifsInode)
Steve French4b18f2a2008-04-29 00:06:05 +00003400 return true;
Steve French23e7dd72005-10-20 13:44:56 -07003401
Steve Frencha403a0a2007-07-26 15:54:16 +00003402 if (is_inode_writable(cifsInode)) {
3403 /* This inode is open for write at least once */
Steve Frenchc32a0b62006-01-12 14:41:28 -08003404 struct cifs_sb_info *cifs_sb;
3405
Steve Frenchc32a0b62006-01-12 14:41:28 -08003406 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
Steve Frenchad7a2922008-02-07 23:25:02 +00003407 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003408 /* since no page cache to corrupt on directio
Steve Frenchc32a0b62006-01-12 14:41:28 -08003409 we can change size safely */
Steve French4b18f2a2008-04-29 00:06:05 +00003410 return true;
Steve Frenchc32a0b62006-01-12 14:41:28 -08003411 }
3412
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003413 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
Steve French4b18f2a2008-04-29 00:06:05 +00003414 return true;
Steve French7ba52632007-02-08 18:14:13 +00003415
Steve French4b18f2a2008-04-29 00:06:05 +00003416 return false;
Steve French23e7dd72005-10-20 13:44:56 -07003417 } else
Steve French4b18f2a2008-04-29 00:06:05 +00003418 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003419}
3420
Nick Piggind9414772008-09-24 11:32:59 -04003421static int cifs_write_begin(struct file *file, struct address_space *mapping,
3422 loff_t pos, unsigned len, unsigned flags,
3423 struct page **pagep, void **fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003424{
Nick Piggind9414772008-09-24 11:32:59 -04003425 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
3426 loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003427 loff_t page_start = pos & PAGE_MASK;
3428 loff_t i_size;
3429 struct page *page;
3430 int rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003431
Joe Perchesb6b38f72010-04-21 03:50:45 +00003432 cFYI(1, "write_begin from %lld len %d", (long long)pos, len);
Nick Piggind9414772008-09-24 11:32:59 -04003433
Nick Piggin54566b22009-01-04 12:00:53 -08003434 page = grab_cache_page_write_begin(mapping, index, flags);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003435 if (!page) {
3436 rc = -ENOMEM;
3437 goto out;
3438 }
Nick Piggind9414772008-09-24 11:32:59 -04003439
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003440 if (PageUptodate(page))
3441 goto out;
Steve French8a236262007-03-06 00:31:00 +00003442
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003443 /*
3444 * If we write a full page it will be up to date, no need to read from
3445 * the server. If the write is short, we'll end up doing a sync write
3446 * instead.
3447 */
3448 if (len == PAGE_CACHE_SIZE)
3449 goto out;
3450
3451 /*
3452 * optimize away the read when we have an oplock, and we're not
3453 * expecting to use any of the data we'd be reading in. That
3454 * is, when the page lies beyond the EOF, or straddles the EOF
3455 * and the write will cover all of the existing data.
3456 */
3457 if (CIFS_I(mapping->host)->clientCanCacheRead) {
3458 i_size = i_size_read(mapping->host);
3459 if (page_start >= i_size ||
3460 (offset == 0 && (pos + len) >= i_size)) {
3461 zero_user_segments(page, 0, offset,
3462 offset + len,
3463 PAGE_CACHE_SIZE);
3464 /*
3465 * PageChecked means that the parts of the page
3466 * to which we're not writing are considered up
3467 * to date. Once the data is copied to the
3468 * page, it can be set uptodate.
3469 */
3470 SetPageChecked(page);
3471 goto out;
3472 }
3473 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003474
Nick Piggind9414772008-09-24 11:32:59 -04003475 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003476 /*
3477 * might as well read a page, it is fast enough. If we get
3478 * an error, we don't need to return it. cifs_write_end will
3479 * do a sync write instead since PG_uptodate isn't set.
3480 */
3481 cifs_readpage_worker(file, page, &page_start);
Steve French8a236262007-03-06 00:31:00 +00003482 } else {
3483 /* we could try using another file handle if there is one -
3484 but how would we lock it to prevent close of that handle
3485 racing with this read? In any case
Nick Piggind9414772008-09-24 11:32:59 -04003486 this will be written out by write_end so is fine */
Steve French8a236262007-03-06 00:31:00 +00003487 }
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003488out:
3489 *pagep = page;
3490 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003491}
3492
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303493static int cifs_release_page(struct page *page, gfp_t gfp)
3494{
3495 if (PagePrivate(page))
3496 return 0;
3497
3498 return cifs_fscache_release_page(page, gfp);
3499}
3500
3501static void cifs_invalidate_page(struct page *page, unsigned long offset)
3502{
3503 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
3504
3505 if (offset == 0)
3506 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
3507}
3508
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003509static int cifs_launder_page(struct page *page)
3510{
3511 int rc = 0;
3512 loff_t range_start = page_offset(page);
3513 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
3514 struct writeback_control wbc = {
3515 .sync_mode = WB_SYNC_ALL,
3516 .nr_to_write = 0,
3517 .range_start = range_start,
3518 .range_end = range_end,
3519 };
3520
3521 cFYI(1, "Launder page: %p", page);
3522
3523 if (clear_page_dirty_for_io(page))
3524 rc = cifs_writepage_locked(page, &wbc);
3525
3526 cifs_fscache_invalidate_page(page, page->mapping->host);
3527 return rc;
3528}
3529
Tejun Heo9b646972010-07-20 22:09:02 +02003530void cifs_oplock_break(struct work_struct *work)
Jeff Layton3bc303c2009-09-21 06:47:50 -04003531{
3532 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3533 oplock_break);
Jeff Laytona5e18bc2010-10-11 15:07:18 -04003534 struct inode *inode = cfile->dentry->d_inode;
Jeff Layton3bc303c2009-09-21 06:47:50 -04003535 struct cifsInodeInfo *cinode = CIFS_I(inode);
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07003536 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Jeff Laytoneb4b7562010-10-22 14:52:29 -04003537 int rc = 0;
Jeff Layton3bc303c2009-09-21 06:47:50 -04003538
3539 if (inode && S_ISREG(inode->i_mode)) {
Steve Frenchd54ff732010-04-27 04:38:15 +00003540 if (cinode->clientCanCacheRead)
Al Viro8737c932009-12-24 06:47:55 -05003541 break_lease(inode, O_RDONLY);
Steve Frenchd54ff732010-04-27 04:38:15 +00003542 else
Al Viro8737c932009-12-24 06:47:55 -05003543 break_lease(inode, O_WRONLY);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003544 rc = filemap_fdatawrite(inode->i_mapping);
3545 if (cinode->clientCanCacheRead == 0) {
Jeff Laytoneb4b7562010-10-22 14:52:29 -04003546 rc = filemap_fdatawait(inode->i_mapping);
3547 mapping_set_error(inode->i_mapping, rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003548 invalidate_remote_inode(inode);
3549 }
Joe Perchesb6b38f72010-04-21 03:50:45 +00003550 cFYI(1, "Oplock flush inode %p rc %d", inode, rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003551 }
3552
Pavel Shilovsky85160e02011-10-22 15:33:29 +04003553 rc = cifs_push_locks(cfile);
3554 if (rc)
3555 cERROR(1, "Push locks rc = %d", rc);
3556
Jeff Layton3bc303c2009-09-21 06:47:50 -04003557 /*
3558 * releasing stale oplock after recent reconnect of smb session using
3559 * a now incorrect file handle is not a data integrity issue but do
3560 * not bother sending an oplock release if session to server still is
3561 * disconnected since oplock already released by the server
3562 */
Steve Frenchcdff08e2010-10-21 22:46:14 +00003563 if (!cfile->oplock_break_cancelled) {
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07003564 rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
3565 cinode);
Joe Perchesb6b38f72010-04-21 03:50:45 +00003566 cFYI(1, "Oplock release rc = %d", rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003567 }
Jeff Layton3bc303c2009-09-21 06:47:50 -04003568}
3569
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07003570const struct address_space_operations cifs_addr_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003571 .readpage = cifs_readpage,
3572 .readpages = cifs_readpages,
3573 .writepage = cifs_writepage,
Steve French37c0eb42005-10-05 14:50:29 -07003574 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04003575 .write_begin = cifs_write_begin,
3576 .write_end = cifs_write_end,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003577 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303578 .releasepage = cifs_release_page,
3579 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003580 .launder_page = cifs_launder_page,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003581};
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003582
3583/*
3584 * cifs_readpages requires the server to support a buffer large enough to
3585 * contain the header plus one complete page of data. Otherwise, we need
3586 * to leave cifs_readpages out of the address space operations.
3587 */
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07003588const struct address_space_operations cifs_addr_ops_smallbuf = {
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003589 .readpage = cifs_readpage,
3590 .writepage = cifs_writepage,
3591 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04003592 .write_begin = cifs_write_begin,
3593 .write_end = cifs_write_end,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003594 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303595 .releasepage = cifs_release_page,
3596 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003597 .launder_page = cifs_launder_page,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003598};