blob: 67fe0b811f2303098e19233b8fb7846cdfd32f15 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
Steve Frenchfb8c4b12007-07-10 01:16:18 +00005 *
Steve Frenchf19159d2010-04-21 04:12:10 +00006 * Copyright (C) International Business Machines Corp., 2002,2010
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Author(s): Steve French (sfrench@us.ibm.com)
Jeremy Allison7ee1af72006-08-02 21:56:33 +00008 * Jeremy Allison (jra@samba.org)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
Steve French37c0eb42005-10-05 14:50:29 -070025#include <linux/backing-dev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
Steve French37c0eb42005-10-05 14:50:29 -070030#include <linux/writeback.h>
Andrew Morton6f88cc22006-12-10 02:19:44 -080031#include <linux/task_io_accounting_ops.h>
Steve French23e7dd72005-10-20 13:44:56 -070032#include <linux/delay.h>
Jeff Layton3bc303c2009-09-21 06:47:50 -040033#include <linux/mount.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Jeff Layton690c5e32011-10-19 15:30:16 -040035#include <linux/swap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <asm/div64.h>
37#include "cifsfs.h"
38#include "cifspdu.h"
39#include "cifsglob.h"
40#include "cifsproto.h"
41#include "cifs_unicode.h"
42#include "cifs_debug.h"
43#include "cifs_fs_sb.h"
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +053044#include "fscache.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Linus Torvalds1da177e2005-04-16 15:20:36 -070046static inline int cifs_convert_flags(unsigned int flags)
47{
48 if ((flags & O_ACCMODE) == O_RDONLY)
49 return GENERIC_READ;
50 else if ((flags & O_ACCMODE) == O_WRONLY)
51 return GENERIC_WRITE;
52 else if ((flags & O_ACCMODE) == O_RDWR) {
53 /* GENERIC_ALL is too much permission to request
54 can cause unnecessary access denied on create */
55 /* return GENERIC_ALL; */
56 return (GENERIC_READ | GENERIC_WRITE);
57 }
58
Jeff Laytone10f7b52008-05-14 10:21:33 -070059 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
60 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
61 FILE_READ_DATA);
Steve French7fc8f4e2009-02-23 20:43:11 +000062}
Jeff Laytone10f7b52008-05-14 10:21:33 -070063
Jeff Layton608712f2010-10-15 15:33:56 -040064static u32 cifs_posix_convert_flags(unsigned int flags)
Steve French7fc8f4e2009-02-23 20:43:11 +000065{
Jeff Layton608712f2010-10-15 15:33:56 -040066 u32 posix_flags = 0;
Jeff Laytone10f7b52008-05-14 10:21:33 -070067
Steve French7fc8f4e2009-02-23 20:43:11 +000068 if ((flags & O_ACCMODE) == O_RDONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040069 posix_flags = SMB_O_RDONLY;
Steve French7fc8f4e2009-02-23 20:43:11 +000070 else if ((flags & O_ACCMODE) == O_WRONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040071 posix_flags = SMB_O_WRONLY;
72 else if ((flags & O_ACCMODE) == O_RDWR)
73 posix_flags = SMB_O_RDWR;
74
75 if (flags & O_CREAT)
76 posix_flags |= SMB_O_CREAT;
77 if (flags & O_EXCL)
78 posix_flags |= SMB_O_EXCL;
79 if (flags & O_TRUNC)
80 posix_flags |= SMB_O_TRUNC;
81 /* be safe and imply O_SYNC for O_DSYNC */
Christoph Hellwig6b2f3d12009-10-27 11:05:28 +010082 if (flags & O_DSYNC)
Jeff Layton608712f2010-10-15 15:33:56 -040083 posix_flags |= SMB_O_SYNC;
Steve French7fc8f4e2009-02-23 20:43:11 +000084 if (flags & O_DIRECTORY)
Jeff Layton608712f2010-10-15 15:33:56 -040085 posix_flags |= SMB_O_DIRECTORY;
Steve French7fc8f4e2009-02-23 20:43:11 +000086 if (flags & O_NOFOLLOW)
Jeff Layton608712f2010-10-15 15:33:56 -040087 posix_flags |= SMB_O_NOFOLLOW;
Steve French7fc8f4e2009-02-23 20:43:11 +000088 if (flags & O_DIRECT)
Jeff Layton608712f2010-10-15 15:33:56 -040089 posix_flags |= SMB_O_DIRECT;
Steve French7fc8f4e2009-02-23 20:43:11 +000090
91 return posix_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070092}
93
94static inline int cifs_get_disposition(unsigned int flags)
95{
96 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
97 return FILE_CREATE;
98 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
99 return FILE_OVERWRITE_IF;
100 else if ((flags & O_CREAT) == O_CREAT)
101 return FILE_OPEN_IF;
Steve French55aa2e02006-05-30 18:09:31 +0000102 else if ((flags & O_TRUNC) == O_TRUNC)
103 return FILE_OVERWRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104 else
105 return FILE_OPEN;
106}
107
Jeff Layton608712f2010-10-15 15:33:56 -0400108int cifs_posix_open(char *full_path, struct inode **pinode,
109 struct super_block *sb, int mode, unsigned int f_flags,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400110 __u32 *poplock, __u16 *pnetfid, unsigned int xid)
Jeff Layton608712f2010-10-15 15:33:56 -0400111{
112 int rc;
113 FILE_UNIX_BASIC_INFO *presp_data;
114 __u32 posix_flags = 0;
115 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
116 struct cifs_fattr fattr;
117 struct tcon_link *tlink;
Steve French96daf2b2011-05-27 04:34:02 +0000118 struct cifs_tcon *tcon;
Jeff Layton608712f2010-10-15 15:33:56 -0400119
120 cFYI(1, "posix open %s", full_path);
121
122 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
123 if (presp_data == NULL)
124 return -ENOMEM;
125
126 tlink = cifs_sb_tlink(cifs_sb);
127 if (IS_ERR(tlink)) {
128 rc = PTR_ERR(tlink);
129 goto posix_open_ret;
130 }
131
132 tcon = tlink_tcon(tlink);
133 mode &= ~current_umask();
134
135 posix_flags = cifs_posix_convert_flags(f_flags);
136 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
137 poplock, full_path, cifs_sb->local_nls,
138 cifs_sb->mnt_cifs_flags &
139 CIFS_MOUNT_MAP_SPECIAL_CHR);
140 cifs_put_tlink(tlink);
141
142 if (rc)
143 goto posix_open_ret;
144
145 if (presp_data->Type == cpu_to_le32(-1))
146 goto posix_open_ret; /* open ok, caller does qpathinfo */
147
148 if (!pinode)
149 goto posix_open_ret; /* caller does not need info */
150
151 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
152
153 /* get new inode and set it up */
154 if (*pinode == NULL) {
155 cifs_fill_uniqueid(sb, &fattr);
156 *pinode = cifs_iget(sb, &fattr);
157 if (!*pinode) {
158 rc = -ENOMEM;
159 goto posix_open_ret;
160 }
161 } else {
162 cifs_fattr_to_inode(*pinode, &fattr);
163 }
164
165posix_open_ret:
166 kfree(presp_data);
167 return rc;
168}
169
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300170static int
171cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700172 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
173 struct cifs_fid *fid, unsigned int xid)
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300174{
175 int rc;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700176 int desired_access;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300177 int disposition;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500178 int create_options = CREATE_NOT_DIR;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300179 FILE_ALL_INFO *buf;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700180 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300181
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700182 if (!server->ops->open)
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700183 return -ENOSYS;
184
185 desired_access = cifs_convert_flags(f_flags);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300186
187/*********************************************************************
188 * open flag mapping table:
189 *
190 * POSIX Flag CIFS Disposition
191 * ---------- ----------------
192 * O_CREAT FILE_OPEN_IF
193 * O_CREAT | O_EXCL FILE_CREATE
194 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
195 * O_TRUNC FILE_OVERWRITE
196 * none of the above FILE_OPEN
197 *
198 * Note that there is not a direct match between disposition
199 * FILE_SUPERSEDE (ie create whether or not file exists although
200 * O_CREAT | O_TRUNC is similar but truncates the existing
201 * file rather than creating a new file as FILE_SUPERSEDE does
202 * (which uses the attributes / metadata passed in on open call)
203 *?
204 *? O_SYNC is a reasonable match to CIFS writethrough flag
205 *? and the read write flags match reasonably. O_LARGEFILE
206 *? is irrelevant because largefile support is always used
207 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
208 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
209 *********************************************************************/
210
211 disposition = cifs_get_disposition(f_flags);
212
213 /* BB pass O_SYNC flag through on file attributes .. BB */
214
215 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
216 if (!buf)
217 return -ENOMEM;
218
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500219 if (backup_cred(cifs_sb))
220 create_options |= CREATE_OPEN_BACKUP_INTENT;
221
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700222 rc = server->ops->open(xid, tcon, full_path, disposition,
223 desired_access, create_options, fid, oplock, buf,
224 cifs_sb);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300225
226 if (rc)
227 goto out;
228
229 if (tcon->unix_ext)
230 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
231 xid);
232 else
233 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700234 xid, &fid->netfid);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300235
236out:
237 kfree(buf);
238 return rc;
239}
240
Jeff Layton15ecb432010-10-15 15:34:02 -0400241struct cifsFileInfo *
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700242cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
Jeff Layton15ecb432010-10-15 15:34:02 -0400243 struct tcon_link *tlink, __u32 oplock)
244{
245 struct dentry *dentry = file->f_path.dentry;
246 struct inode *inode = dentry->d_inode;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700247 struct cifsInodeInfo *cinode = CIFS_I(inode);
248 struct cifsFileInfo *cfile;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700249 struct cifs_fid_locks *fdlocks;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700250 struct cifs_tcon *tcon = tlink_tcon(tlink);
Jeff Layton15ecb432010-10-15 15:34:02 -0400251
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700252 cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
253 if (cfile == NULL)
254 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400255
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700256 fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
257 if (!fdlocks) {
258 kfree(cfile);
259 return NULL;
260 }
261
262 INIT_LIST_HEAD(&fdlocks->locks);
263 fdlocks->cfile = cfile;
264 cfile->llist = fdlocks;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700265 down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700266 list_add(&fdlocks->llist, &cinode->llist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700267 up_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700268
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700269 cfile->count = 1;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700270 cfile->pid = current->tgid;
271 cfile->uid = current_fsuid();
272 cfile->dentry = dget(dentry);
273 cfile->f_flags = file->f_flags;
274 cfile->invalidHandle = false;
275 cfile->tlink = cifs_get_tlink(tlink);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700276 INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700277 mutex_init(&cfile->fh_mutex);
Jeff Layton15ecb432010-10-15 15:34:02 -0400278
Jeff Layton44772882010-10-15 15:34:03 -0400279 spin_lock(&cifs_file_list_lock);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700280 if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE)
281 oplock = fid->pending_open->oplock;
282 list_del(&fid->pending_open->olist);
283
284 tlink_tcon(tlink)->ses->server->ops->set_fid(cfile, fid, oplock);
285
286 list_add(&cfile->tlist, &tcon->openFileList);
Jeff Layton15ecb432010-10-15 15:34:02 -0400287 /* if readable file instance put first in list*/
288 if (file->f_mode & FMODE_READ)
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700289 list_add(&cfile->flist, &cinode->openFileList);
Jeff Layton15ecb432010-10-15 15:34:02 -0400290 else
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700291 list_add_tail(&cfile->flist, &cinode->openFileList);
Jeff Layton44772882010-10-15 15:34:03 -0400292 spin_unlock(&cifs_file_list_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400293
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700294 file->private_data = cfile;
295 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400296}
297
Jeff Layton764a1b12012-07-25 14:59:54 -0400298struct cifsFileInfo *
299cifsFileInfo_get(struct cifsFileInfo *cifs_file)
300{
301 spin_lock(&cifs_file_list_lock);
302 cifsFileInfo_get_locked(cifs_file);
303 spin_unlock(&cifs_file_list_lock);
304 return cifs_file;
305}
306
Steve Frenchcdff08e2010-10-21 22:46:14 +0000307/*
308 * Release a reference on the file private data. This may involve closing
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400309 * the filehandle out on the server. Must be called without holding
310 * cifs_file_list_lock.
Steve Frenchcdff08e2010-10-21 22:46:14 +0000311 */
Jeff Laytonb33879a2010-10-15 15:34:04 -0400312void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
313{
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300314 struct inode *inode = cifs_file->dentry->d_inode;
Steve French96daf2b2011-05-27 04:34:02 +0000315 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700316 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300317 struct cifsInodeInfo *cifsi = CIFS_I(inode);
Pavel Shilovsky4f8ba8a2010-11-21 22:36:12 +0300318 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000319 struct cifsLockInfo *li, *tmp;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700320 struct cifs_fid fid;
321 struct cifs_pending_open open;
Steve Frenchcdff08e2010-10-21 22:46:14 +0000322
323 spin_lock(&cifs_file_list_lock);
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400324 if (--cifs_file->count > 0) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000325 spin_unlock(&cifs_file_list_lock);
326 return;
Jeff Laytonb33879a2010-10-15 15:34:04 -0400327 }
Steve Frenchcdff08e2010-10-21 22:46:14 +0000328
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700329 if (server->ops->get_lease_key)
330 server->ops->get_lease_key(inode, &fid);
331
332 /* store open in pending opens to make sure we don't miss lease break */
333 cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
334
Steve Frenchcdff08e2010-10-21 22:46:14 +0000335 /* remove it from the lists */
336 list_del(&cifs_file->flist);
337 list_del(&cifs_file->tlist);
338
339 if (list_empty(&cifsi->openFileList)) {
340 cFYI(1, "closing last open instance for inode %p",
341 cifs_file->dentry->d_inode);
Pavel Shilovsky25364132012-09-18 16:20:27 -0700342 /*
343 * In strict cache mode we need invalidate mapping on the last
344 * close because it may cause a error when we open this file
345 * again and get at least level II oplock.
346 */
Pavel Shilovsky4f8ba8a2010-11-21 22:36:12 +0300347 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
348 CIFS_I(inode)->invalid_mapping = true;
Pavel Shilovskyc6723622010-11-03 10:58:57 +0300349 cifs_set_oplock_level(cifsi, 0);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000350 }
351 spin_unlock(&cifs_file_list_lock);
352
Jeff Laytonad635942011-07-26 12:20:17 -0400353 cancel_work_sync(&cifs_file->oplock_break);
354
Steve Frenchcdff08e2010-10-21 22:46:14 +0000355 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700356 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400357 unsigned int xid;
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700358
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400359 xid = get_xid();
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700360 if (server->ops->close)
Pavel Shilovsky760ad0c2012-09-25 11:00:07 +0400361 server->ops->close(xid, tcon, &cifs_file->fid);
362 _free_xid(xid);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000363 }
364
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700365 cifs_del_pending_open(&open);
366
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700367 /*
368 * Delete any outstanding lock records. We'll lose them when the file
Steve Frenchcdff08e2010-10-21 22:46:14 +0000369 * is closed anyway.
370 */
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700371 down_write(&cifsi->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700372 list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000373 list_del(&li->llist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400374 cifs_del_lock_waiters(li);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000375 kfree(li);
376 }
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700377 list_del(&cifs_file->llist->llist);
378 kfree(cifs_file->llist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700379 up_write(&cifsi->lock_sem);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000380
381 cifs_put_tlink(cifs_file->tlink);
382 dput(cifs_file->dentry);
383 kfree(cifs_file);
Jeff Laytonb33879a2010-10-15 15:34:04 -0400384}
385
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386int cifs_open(struct inode *inode, struct file *file)
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700387
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388{
389 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400390 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400391 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700393 struct TCP_Server_Info *server;
Steve French96daf2b2011-05-27 04:34:02 +0000394 struct cifs_tcon *tcon;
Jeff Layton7ffec372010-09-29 19:51:11 -0400395 struct tcon_link *tlink;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700396 struct cifsFileInfo *cfile = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397 char *full_path = NULL;
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300398 bool posix_open_ok = false;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700399 struct cifs_fid fid;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700400 struct cifs_pending_open open;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400402 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403
404 cifs_sb = CIFS_SB(inode->i_sb);
Jeff Layton7ffec372010-09-29 19:51:11 -0400405 tlink = cifs_sb_tlink(cifs_sb);
406 if (IS_ERR(tlink)) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400407 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400408 return PTR_ERR(tlink);
409 }
410 tcon = tlink_tcon(tlink);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700411 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -0800413 full_path = build_path_from_dentry(file->f_path.dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414 if (full_path == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530415 rc = -ENOMEM;
Jeff Layton232341b2010-08-05 13:58:38 -0400416 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417 }
418
Joe Perchesb6b38f72010-04-21 03:50:45 +0000419 cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
420 inode, file->f_flags, full_path);
Steve French276a74a2009-03-03 18:00:34 +0000421
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700422 if (server->oplocks)
Steve French276a74a2009-03-03 18:00:34 +0000423 oplock = REQ_OPLOCK;
424 else
425 oplock = 0;
426
Steve French64cc2c62009-03-04 19:54:08 +0000427 if (!tcon->broken_posix_open && tcon->unix_ext &&
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400428 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
429 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Steve French276a74a2009-03-03 18:00:34 +0000430 /* can not refresh inode info since size could be stale */
Jeff Layton2422f672010-06-16 13:40:16 -0400431 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
Steve Frenchfa588e02010-04-22 19:21:55 +0000432 cifs_sb->mnt_file_mode /* ignored */,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700433 file->f_flags, &oplock, &fid.netfid, xid);
Steve French276a74a2009-03-03 18:00:34 +0000434 if (rc == 0) {
Joe Perchesb6b38f72010-04-21 03:50:45 +0000435 cFYI(1, "posix open succeeded");
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300436 posix_open_ok = true;
Steve French64cc2c62009-03-04 19:54:08 +0000437 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
438 if (tcon->ses->serverNOS)
Joe Perchesb6b38f72010-04-21 03:50:45 +0000439 cERROR(1, "server %s of type %s returned"
Steve French64cc2c62009-03-04 19:54:08 +0000440 " unexpected error on SMB posix open"
441 ", disabling posix open support."
442 " Check if server update available.",
443 tcon->ses->serverName,
Joe Perchesb6b38f72010-04-21 03:50:45 +0000444 tcon->ses->serverNOS);
Steve French64cc2c62009-03-04 19:54:08 +0000445 tcon->broken_posix_open = true;
Steve French276a74a2009-03-03 18:00:34 +0000446 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
447 (rc != -EOPNOTSUPP)) /* path not found or net err */
448 goto out;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700449 /*
450 * Else fallthrough to retry open the old way on network i/o
451 * or DFS errors.
452 */
Steve French276a74a2009-03-03 18:00:34 +0000453 }
454
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700455 if (server->ops->get_lease_key)
456 server->ops->get_lease_key(inode, &fid);
457
458 cifs_add_pending_open(&fid, tlink, &open);
459
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300460 if (!posix_open_ok) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700461 if (server->ops->get_lease_key)
462 server->ops->get_lease_key(inode, &fid);
463
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300464 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700465 file->f_flags, &oplock, &fid, xid);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700466 if (rc) {
467 cifs_del_pending_open(&open);
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300468 goto out;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700469 }
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300470 }
Jeff Layton47c78b72010-06-16 13:40:17 -0400471
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700472 cfile = cifs_new_fileinfo(&fid, file, tlink, oplock);
473 if (cfile == NULL) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700474 if (server->ops->close)
475 server->ops->close(xid, tcon, &fid);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700476 cifs_del_pending_open(&open);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477 rc = -ENOMEM;
478 goto out;
479 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +0530481 cifs_fscache_set_inode_cookie(inode, file);
482
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300483 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700484 /*
485 * Time to set mode which we can not set earlier due to
486 * problems creating new read-only files.
487 */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300488 struct cifs_unix_set_info_args args = {
489 .mode = inode->i_mode,
490 .uid = NO_CHANGE_64,
491 .gid = NO_CHANGE_64,
492 .ctime = NO_CHANGE_64,
493 .atime = NO_CHANGE_64,
494 .mtime = NO_CHANGE_64,
495 .device = 0,
496 };
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700497 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
498 cfile->pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499 }
500
501out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400503 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400504 cifs_put_tlink(tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505 return rc;
506}
507
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400508static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
509
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700510/*
511 * Try to reacquire byte range locks that were released when session
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400512 * to server was lost.
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700513 */
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400514static int
515cifs_relock_file(struct cifsFileInfo *cfile)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516{
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400517 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
518 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
519 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520 int rc = 0;
521
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400522 /* we are going to update can_cache_brlcks here - need a write access */
523 down_write(&cinode->lock_sem);
524 if (cinode->can_cache_brlcks) {
525 /* can cache locks - no need to push them */
526 up_write(&cinode->lock_sem);
527 return rc;
528 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400530 if (cap_unix(tcon->ses) &&
531 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
532 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
533 rc = cifs_push_posix_locks(cfile);
534 else
535 rc = tcon->ses->server->ops->push_mand_locks(cfile);
536
537 up_write(&cinode->lock_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538 return rc;
539}
540
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700541static int
542cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543{
544 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400545 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400546 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +0000548 struct cifs_tcon *tcon;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700549 struct TCP_Server_Info *server;
550 struct cifsInodeInfo *cinode;
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000551 struct inode *inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552 char *full_path = NULL;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700553 int desired_access;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554 int disposition = FILE_OPEN;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500555 int create_options = CREATE_NOT_DIR;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700556 struct cifs_fid fid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400558 xid = get_xid();
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700559 mutex_lock(&cfile->fh_mutex);
560 if (!cfile->invalidHandle) {
561 mutex_unlock(&cfile->fh_mutex);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530562 rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400563 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530564 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565 }
566
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700567 inode = cfile->dentry->d_inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568 cifs_sb = CIFS_SB(inode->i_sb);
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700569 tcon = tlink_tcon(cfile->tlink);
570 server = tcon->ses->server;
Steve French3a9f4622007-04-04 17:10:24 +0000571
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700572 /*
573 * Can not grab rename sem here because various ops, including those
574 * that already have the rename sem can end up causing writepage to get
575 * called and if the server was down that means we end up here, and we
576 * can never tell if the caller already has the rename_sem.
577 */
578 full_path = build_path_from_dentry(cfile->dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579 if (full_path == NULL) {
Steve French3a9f4622007-04-04 17:10:24 +0000580 rc = -ENOMEM;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700581 mutex_unlock(&cfile->fh_mutex);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400582 free_xid(xid);
Steve French3a9f4622007-04-04 17:10:24 +0000583 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584 }
585
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700586 cFYI(1, "inode = 0x%p file flags 0x%x for %s", inode, cfile->f_flags,
587 full_path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588
Pavel Shilovsky10b9b982012-03-20 12:55:09 +0300589 if (tcon->ses->server->oplocks)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590 oplock = REQ_OPLOCK;
591 else
Steve French4b18f2a2008-04-29 00:06:05 +0000592 oplock = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400594 if (tcon->unix_ext && cap_unix(tcon->ses) &&
Steve French7fc8f4e2009-02-23 20:43:11 +0000595 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400596 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Jeff Layton608712f2010-10-15 15:33:56 -0400597 /*
598 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
599 * original open. Must mask them off for a reopen.
600 */
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700601 unsigned int oflags = cfile->f_flags &
Jeff Layton15886172010-10-15 15:33:59 -0400602 ~(O_CREAT | O_EXCL | O_TRUNC);
Jeff Layton608712f2010-10-15 15:33:56 -0400603
Jeff Layton2422f672010-06-16 13:40:16 -0400604 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700605 cifs_sb->mnt_file_mode /* ignored */,
606 oflags, &oplock, &fid.netfid, xid);
Steve French7fc8f4e2009-02-23 20:43:11 +0000607 if (rc == 0) {
Joe Perchesb6b38f72010-04-21 03:50:45 +0000608 cFYI(1, "posix reopen succeeded");
Steve French7fc8f4e2009-02-23 20:43:11 +0000609 goto reopen_success;
610 }
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700611 /*
612 * fallthrough to retry open the old way on errors, especially
613 * in the reconnect path it is important to retry hard
614 */
Steve French7fc8f4e2009-02-23 20:43:11 +0000615 }
616
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700617 desired_access = cifs_convert_flags(cfile->f_flags);
Steve French7fc8f4e2009-02-23 20:43:11 +0000618
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500619 if (backup_cred(cifs_sb))
620 create_options |= CREATE_OPEN_BACKUP_INTENT;
621
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700622 if (server->ops->get_lease_key)
623 server->ops->get_lease_key(inode, &fid);
624
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700625 /*
626 * Can not refresh inode by passing in file_info buf to be returned by
627 * CIFSSMBOpen and then calling get_inode_info with returned buf since
628 * file might have write behind data that needs to be flushed and server
629 * version of file size can be stale. If we knew for sure that inode was
630 * not dirty locally we could do this.
631 */
632 rc = server->ops->open(xid, tcon, full_path, disposition,
633 desired_access, create_options, &fid, &oplock,
634 NULL, cifs_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635 if (rc) {
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700636 mutex_unlock(&cfile->fh_mutex);
637 cFYI(1, "cifs_reopen returned 0x%x", rc);
Joe Perchesb6b38f72010-04-21 03:50:45 +0000638 cFYI(1, "oplock: %d", oplock);
Jeff Layton15886172010-10-15 15:33:59 -0400639 goto reopen_error_exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640 }
Jeff Layton15886172010-10-15 15:33:59 -0400641
642reopen_success:
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700643 cfile->invalidHandle = false;
644 mutex_unlock(&cfile->fh_mutex);
645 cinode = CIFS_I(inode);
Jeff Layton15886172010-10-15 15:33:59 -0400646
647 if (can_flush) {
648 rc = filemap_write_and_wait(inode->i_mapping);
Jeff Laytoneb4b7562010-10-22 14:52:29 -0400649 mapping_set_error(inode->i_mapping, rc);
Jeff Layton15886172010-10-15 15:33:59 -0400650
Jeff Layton15886172010-10-15 15:33:59 -0400651 if (tcon->unix_ext)
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700652 rc = cifs_get_inode_info_unix(&inode, full_path,
653 inode->i_sb, xid);
Jeff Layton15886172010-10-15 15:33:59 -0400654 else
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700655 rc = cifs_get_inode_info(&inode, full_path, NULL,
656 inode->i_sb, xid, NULL);
657 }
658 /*
659 * Else we are writing out data to server already and could deadlock if
660 * we tried to flush data, and since we do not know if we have data that
661 * would invalidate the current end of file on the server we can not go
662 * to the server to get the new inode info.
663 */
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300664
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700665 server->ops->set_fid(cfile, &fid, oplock);
666 cifs_relock_file(cfile);
Jeff Layton15886172010-10-15 15:33:59 -0400667
668reopen_error_exit:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400670 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671 return rc;
672}
673
674int cifs_close(struct inode *inode, struct file *file)
675{
Jeff Layton77970692011-04-05 16:23:47 -0700676 if (file->private_data != NULL) {
677 cifsFileInfo_put(file->private_data);
678 file->private_data = NULL;
679 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680
Steve Frenchcdff08e2010-10-21 22:46:14 +0000681 /* return code from the ->release op is always ignored */
682 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683}
684
685int cifs_closedir(struct inode *inode, struct file *file)
686{
687 int rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400688 unsigned int xid;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700689 struct cifsFileInfo *cfile = file->private_data;
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700690 struct cifs_tcon *tcon;
691 struct TCP_Server_Info *server;
692 char *buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693
Joe Perchesb6b38f72010-04-21 03:50:45 +0000694 cFYI(1, "Closedir inode = 0x%p", inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700696 if (cfile == NULL)
697 return rc;
698
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400699 xid = get_xid();
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700700 tcon = tlink_tcon(cfile->tlink);
701 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700703 cFYI(1, "Freeing private data in close dir");
704 spin_lock(&cifs_file_list_lock);
705 if (!cfile->srch_inf.endOfSearch && !cfile->invalidHandle) {
706 cfile->invalidHandle = true;
707 spin_unlock(&cifs_file_list_lock);
708 if (server->ops->close_dir)
709 rc = server->ops->close_dir(xid, tcon, &cfile->fid);
710 else
711 rc = -ENOSYS;
712 cFYI(1, "Closing uncompleted readdir with rc %d", rc);
713 /* not much we can do if it fails anyway, ignore rc */
714 rc = 0;
715 } else
716 spin_unlock(&cifs_file_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700718 buf = cfile->srch_inf.ntwrk_buf_start;
719 if (buf) {
720 cFYI(1, "closedir free smb buf in srch struct");
721 cfile->srch_inf.ntwrk_buf_start = NULL;
722 if (cfile->srch_inf.smallBuf)
723 cifs_small_buf_release(buf);
724 else
725 cifs_buf_release(buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726 }
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700727
728 cifs_put_tlink(cfile->tlink);
729 kfree(file->private_data);
730 file->private_data = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731 /* BB can we lock the filestruct while this is going on? */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400732 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733 return rc;
734}
735
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400736static struct cifsLockInfo *
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300737cifs_lock_init(__u64 offset, __u64 length, __u8 type)
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000738{
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400739 struct cifsLockInfo *lock =
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000740 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400741 if (!lock)
742 return lock;
743 lock->offset = offset;
744 lock->length = length;
745 lock->type = type;
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400746 lock->pid = current->tgid;
747 INIT_LIST_HEAD(&lock->blist);
748 init_waitqueue_head(&lock->block_q);
749 return lock;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400750}
751
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -0700752void
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400753cifs_del_lock_waiters(struct cifsLockInfo *lock)
754{
755 struct cifsLockInfo *li, *tmp;
756 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
757 list_del_init(&li->blist);
758 wake_up(&li->block_q);
759 }
760}
761
762static bool
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700763cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
764 __u64 length, __u8 type, struct cifsFileInfo *cfile,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700765 struct cifsLockInfo **conf_lock, bool rw_check)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400766{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300767 struct cifsLockInfo *li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700768 struct cifsFileInfo *cur_cfile = fdlocks->cfile;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300769 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400770
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700771 list_for_each_entry(li, &fdlocks->locks, llist) {
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400772 if (offset + length <= li->offset ||
773 offset >= li->offset + li->length)
774 continue;
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700775 if (rw_check && server->ops->compare_fids(cfile, cur_cfile) &&
776 current->tgid == li->pid)
777 continue;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700778 if ((type & server->vals->shared_lock_type) &&
779 ((server->ops->compare_fids(cfile, cur_cfile) &&
780 current->tgid == li->pid) || type == li->type))
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400781 continue;
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700782 if (conf_lock)
783 *conf_lock = li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700784 return true;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400785 }
786 return false;
787}
788
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700789bool
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300790cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700791 __u8 type, struct cifsLockInfo **conf_lock,
792 bool rw_check)
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400793{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300794 bool rc = false;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700795 struct cifs_fid_locks *cur;
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300796 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300797
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700798 list_for_each_entry(cur, &cinode->llist, llist) {
799 rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700800 cfile, conf_lock, rw_check);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300801 if (rc)
802 break;
803 }
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300804
805 return rc;
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400806}
807
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300808/*
809 * Check if there is another lock that prevents us to set the lock (mandatory
810 * style). If such a lock exists, update the flock structure with its
811 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
812 * or leave it the same if we can't. Returns 0 if we don't need to request to
813 * the server or 1 otherwise.
814 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400815static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300816cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
817 __u8 type, struct file_lock *flock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400818{
819 int rc = 0;
820 struct cifsLockInfo *conf_lock;
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300821 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300822 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400823 bool exist;
824
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700825 down_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400826
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300827 exist = cifs_find_lock_conflict(cfile, offset, length, type,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700828 &conf_lock, false);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400829 if (exist) {
830 flock->fl_start = conf_lock->offset;
831 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
832 flock->fl_pid = conf_lock->pid;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300833 if (conf_lock->type & server->vals->shared_lock_type)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400834 flock->fl_type = F_RDLCK;
835 else
836 flock->fl_type = F_WRLCK;
837 } else if (!cinode->can_cache_brlcks)
838 rc = 1;
839 else
840 flock->fl_type = F_UNLCK;
841
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700842 up_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400843 return rc;
844}
845
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400846static void
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300847cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400848{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300849 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700850 down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700851 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700852 up_write(&cinode->lock_sem);
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000853}
854
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300855/*
856 * Set the byte-range lock (mandatory style). Returns:
857 * 1) 0, if we set the lock and don't need to request to the server;
858 * 2) 1, if no locks prevent us but we need to request to the server;
859 * 3) -EACCESS, if there is a lock that prevents us and wait is false.
860 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400861static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300862cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400863 bool wait)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400864{
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400865 struct cifsLockInfo *conf_lock;
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300866 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400867 bool exist;
868 int rc = 0;
869
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400870try_again:
871 exist = false;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700872 down_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400873
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300874 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700875 lock->type, &conf_lock, false);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400876 if (!exist && cinode->can_cache_brlcks) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700877 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700878 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400879 return rc;
880 }
881
882 if (!exist)
883 rc = 1;
884 else if (!wait)
885 rc = -EACCES;
886 else {
887 list_add_tail(&lock->blist, &conf_lock->blist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700888 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400889 rc = wait_event_interruptible(lock->block_q,
890 (lock->blist.prev == &lock->blist) &&
891 (lock->blist.next == &lock->blist));
892 if (!rc)
893 goto try_again;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700894 down_write(&cinode->lock_sem);
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400895 list_del_init(&lock->blist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400896 }
897
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700898 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400899 return rc;
900}
901
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300902/*
903 * Check if there is another lock that prevents us to set the lock (posix
904 * style). If such a lock exists, update the flock structure with its
905 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
906 * or leave it the same if we can't. Returns 0 if we don't need to request to
907 * the server or 1 otherwise.
908 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400909static int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400910cifs_posix_lock_test(struct file *file, struct file_lock *flock)
911{
912 int rc = 0;
913 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
914 unsigned char saved_type = flock->fl_type;
915
Pavel Shilovsky50792762011-10-29 17:17:57 +0400916 if ((flock->fl_flags & FL_POSIX) == 0)
917 return 1;
918
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700919 down_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400920 posix_test_lock(file, flock);
921
922 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
923 flock->fl_type = saved_type;
924 rc = 1;
925 }
926
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700927 up_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400928 return rc;
929}
930
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300931/*
932 * Set the byte-range lock (posix style). Returns:
933 * 1) 0, if we set the lock and don't need to request to the server;
934 * 2) 1, if we need to request to the server;
935 * 3) <0, if the error occurs while setting the lock.
936 */
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400937static int
938cifs_posix_lock_set(struct file *file, struct file_lock *flock)
939{
940 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
Pavel Shilovsky50792762011-10-29 17:17:57 +0400941 int rc = 1;
942
943 if ((flock->fl_flags & FL_POSIX) == 0)
944 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400945
Pavel Shilovsky66189be2012-03-28 21:56:19 +0400946try_again:
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700947 down_write(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400948 if (!cinode->can_cache_brlcks) {
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700949 up_write(&cinode->lock_sem);
Pavel Shilovsky50792762011-10-29 17:17:57 +0400950 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400951 }
Pavel Shilovsky66189be2012-03-28 21:56:19 +0400952
953 rc = posix_lock_file(file, flock, NULL);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700954 up_write(&cinode->lock_sem);
Pavel Shilovsky66189be2012-03-28 21:56:19 +0400955 if (rc == FILE_LOCK_DEFERRED) {
956 rc = wait_event_interruptible(flock->fl_wait, !flock->fl_next);
957 if (!rc)
958 goto try_again;
959 locks_delete_block(flock);
960 }
Steve French9ebb3892012-04-01 13:52:54 -0500961 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400962}
963
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -0700964int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400965cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400966{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400967 unsigned int xid;
968 int rc = 0, stored_rc;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400969 struct cifsLockInfo *li, *tmp;
970 struct cifs_tcon *tcon;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +0400971 unsigned int num, max_num, max_buf;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400972 LOCKING_ANDX_RANGE *buf, *cur;
973 int types[] = {LOCKING_ANDX_LARGE_FILES,
974 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
975 int i;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400976
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400977 xid = get_xid();
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400978 tcon = tlink_tcon(cfile->tlink);
979
Pavel Shilovsky0013fb42012-05-31 13:03:26 +0400980 /*
981 * Accessing maxBuf is racy with cifs_reconnect - need to store value
982 * and check it for zero before using.
983 */
984 max_buf = tcon->ses->server->maxBuf;
985 if (!max_buf) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400986 free_xid(xid);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +0400987 return -EINVAL;
988 }
989
990 max_num = (max_buf - sizeof(struct smb_hdr)) /
991 sizeof(LOCKING_ANDX_RANGE);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400992 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
993 if (!buf) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400994 free_xid(xid);
Pavel Shilovskye2f28862012-08-29 21:13:38 +0400995 return -ENOMEM;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400996 }
997
998 for (i = 0; i < 2; i++) {
999 cur = buf;
1000 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001001 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001002 if (li->type != types[i])
1003 continue;
1004 cur->Pid = cpu_to_le16(li->pid);
1005 cur->LengthLow = cpu_to_le32((u32)li->length);
1006 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1007 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1008 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1009 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001010 stored_rc = cifs_lockv(xid, tcon,
1011 cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001012 (__u8)li->type, 0, num,
1013 buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001014 if (stored_rc)
1015 rc = stored_rc;
1016 cur = buf;
1017 num = 0;
1018 } else
1019 cur++;
1020 }
1021
1022 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001023 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001024 (__u8)types[i], 0, num, buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001025 if (stored_rc)
1026 rc = stored_rc;
1027 }
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001028 }
1029
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001030 kfree(buf);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001031 free_xid(xid);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001032 return rc;
1033}
1034
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001035/* copied from fs/locks.c with a name change */
1036#define cifs_for_each_lock(inode, lockp) \
1037 for (lockp = &inode->i_flock; *lockp != NULL; \
1038 lockp = &(*lockp)->fl_next)
1039
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001040struct lock_to_push {
1041 struct list_head llist;
1042 __u64 offset;
1043 __u64 length;
1044 __u32 pid;
1045 __u16 netfid;
1046 __u8 type;
1047};
1048
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001049static int
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001050cifs_push_posix_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001051{
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001052 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1053 struct file_lock *flock, **before;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001054 unsigned int count = 0, i = 0;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001055 int rc = 0, xid, type;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001056 struct list_head locks_to_send, *el;
1057 struct lock_to_push *lck, *tmp;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001058 __u64 length;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001059
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001060 xid = get_xid();
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001061
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001062 lock_flocks();
1063 cifs_for_each_lock(cfile->dentry->d_inode, before) {
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001064 if ((*before)->fl_flags & FL_POSIX)
1065 count++;
1066 }
1067 unlock_flocks();
1068
1069 INIT_LIST_HEAD(&locks_to_send);
1070
1071 /*
Pavel Shilovskyce858522012-03-17 09:46:55 +03001072 * Allocating count locks is enough because no FL_POSIX locks can be
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001073 * added to the list while we are holding cinode->lock_sem that
Pavel Shilovskyce858522012-03-17 09:46:55 +03001074 * protects locking operations of this inode.
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001075 */
1076 for (; i < count; i++) {
1077 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1078 if (!lck) {
1079 rc = -ENOMEM;
1080 goto err_out;
1081 }
1082 list_add_tail(&lck->llist, &locks_to_send);
1083 }
1084
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001085 el = locks_to_send.next;
1086 lock_flocks();
1087 cifs_for_each_lock(cfile->dentry->d_inode, before) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001088 flock = *before;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001089 if ((flock->fl_flags & FL_POSIX) == 0)
1090 continue;
Pavel Shilovskyce858522012-03-17 09:46:55 +03001091 if (el == &locks_to_send) {
1092 /*
1093 * The list ended. We don't have enough allocated
1094 * structures - something is really wrong.
1095 */
1096 cERROR(1, "Can't push all brlocks!");
1097 break;
1098 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001099 length = 1 + flock->fl_end - flock->fl_start;
1100 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1101 type = CIFS_RDLCK;
1102 else
1103 type = CIFS_WRLCK;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001104 lck = list_entry(el, struct lock_to_push, llist);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001105 lck->pid = flock->fl_pid;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001106 lck->netfid = cfile->fid.netfid;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001107 lck->length = length;
1108 lck->type = type;
1109 lck->offset = flock->fl_start;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001110 el = el->next;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001111 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001112 unlock_flocks();
1113
1114 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001115 int stored_rc;
1116
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001117 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001118 lck->offset, lck->length, NULL,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001119 lck->type, 0);
1120 if (stored_rc)
1121 rc = stored_rc;
1122 list_del(&lck->llist);
1123 kfree(lck);
1124 }
1125
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001126out:
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001127 free_xid(xid);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001128 return rc;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001129err_out:
1130 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1131 list_del(&lck->llist);
1132 kfree(lck);
1133 }
1134 goto out;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001135}
1136
1137static int
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001138cifs_push_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001139{
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001140 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001141 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001142 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001143 int rc = 0;
1144
1145 /* we are going to update can_cache_brlcks here - need a write access */
1146 down_write(&cinode->lock_sem);
1147 if (!cinode->can_cache_brlcks) {
1148 up_write(&cinode->lock_sem);
1149 return rc;
1150 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001151
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001152 if (cap_unix(tcon->ses) &&
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001153 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1154 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001155 rc = cifs_push_posix_locks(cfile);
1156 else
1157 rc = tcon->ses->server->ops->push_mand_locks(cfile);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001158
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001159 cinode->can_cache_brlcks = false;
1160 up_write(&cinode->lock_sem);
1161 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001162}
1163
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001164static void
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001165cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001166 bool *wait_flag, struct TCP_Server_Info *server)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001167{
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001168 if (flock->fl_flags & FL_POSIX)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001169 cFYI(1, "Posix");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001170 if (flock->fl_flags & FL_FLOCK)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001171 cFYI(1, "Flock");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001172 if (flock->fl_flags & FL_SLEEP) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001173 cFYI(1, "Blocking lock");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001174 *wait_flag = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001175 }
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001176 if (flock->fl_flags & FL_ACCESS)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001177 cFYI(1, "Process suspended by mandatory locking - "
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001178 "not implemented yet");
1179 if (flock->fl_flags & FL_LEASE)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001180 cFYI(1, "Lease on file - not implemented yet");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001181 if (flock->fl_flags &
Jeff Layton3d6d8542012-09-19 06:22:46 -07001182 (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
1183 FL_ACCESS | FL_LEASE | FL_CLOSE)))
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001184 cFYI(1, "Unknown lock flags 0x%x", flock->fl_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001185
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001186 *type = server->vals->large_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001187 if (flock->fl_type == F_WRLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001188 cFYI(1, "F_WRLCK ");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001189 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001190 *lock = 1;
1191 } else if (flock->fl_type == F_UNLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001192 cFYI(1, "F_UNLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001193 *type |= server->vals->unlock_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001194 *unlock = 1;
1195 /* Check if unlock includes more than one lock range */
1196 } else if (flock->fl_type == F_RDLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001197 cFYI(1, "F_RDLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001198 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001199 *lock = 1;
1200 } else if (flock->fl_type == F_EXLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001201 cFYI(1, "F_EXLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001202 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001203 *lock = 1;
1204 } else if (flock->fl_type == F_SHLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001205 cFYI(1, "F_SHLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001206 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001207 *lock = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208 } else
Joe Perchesb6b38f72010-04-21 03:50:45 +00001209 cFYI(1, "Unknown type of lock");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001210}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001212static int
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001213cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001214 bool wait_flag, bool posix_lck, unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001215{
1216 int rc = 0;
1217 __u64 length = 1 + flock->fl_end - flock->fl_start;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001218 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1219 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001220 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001221 __u16 netfid = cfile->fid.netfid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001223 if (posix_lck) {
1224 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001225
1226 rc = cifs_posix_lock_test(file, flock);
1227 if (!rc)
1228 return rc;
1229
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001230 if (type & server->vals->shared_lock_type)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001231 posix_lock_type = CIFS_RDLCK;
1232 else
1233 posix_lock_type = CIFS_WRLCK;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001234 rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001235 flock->fl_start, length, flock,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001236 posix_lock_type, wait_flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001237 return rc;
1238 }
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001239
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001240 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001241 if (!rc)
1242 return rc;
1243
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001244 /* BB we could chain these into one lock request BB */
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001245 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
1246 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001247 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001248 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1249 type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001250 flock->fl_type = F_UNLCK;
1251 if (rc != 0)
1252 cERROR(1, "Error unlocking previously locked "
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001253 "range %d during test of lock", rc);
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001254 return 0;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001255 }
1256
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001257 if (type & server->vals->shared_lock_type) {
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001258 flock->fl_type = F_WRLCK;
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001259 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001260 }
1261
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001262 type &= ~server->vals->exclusive_lock_type;
1263
1264 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1265 type | server->vals->shared_lock_type,
1266 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001267 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001268 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1269 type | server->vals->shared_lock_type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001270 flock->fl_type = F_RDLCK;
1271 if (rc != 0)
1272 cERROR(1, "Error unlocking previously locked "
1273 "range %d during test of lock", rc);
1274 } else
1275 flock->fl_type = F_WRLCK;
1276
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001277 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001278}
1279
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001280void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001281cifs_move_llist(struct list_head *source, struct list_head *dest)
1282{
1283 struct list_head *li, *tmp;
1284 list_for_each_safe(li, tmp, source)
1285 list_move(li, dest);
1286}
1287
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001288void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001289cifs_free_llist(struct list_head *llist)
1290{
1291 struct cifsLockInfo *li, *tmp;
1292 list_for_each_entry_safe(li, tmp, llist, llist) {
1293 cifs_del_lock_waiters(li);
1294 list_del(&li->llist);
1295 kfree(li);
1296 }
1297}
1298
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001299int
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001300cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1301 unsigned int xid)
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001302{
1303 int rc = 0, stored_rc;
1304 int types[] = {LOCKING_ANDX_LARGE_FILES,
1305 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1306 unsigned int i;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001307 unsigned int max_num, num, max_buf;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001308 LOCKING_ANDX_RANGE *buf, *cur;
1309 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1310 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
1311 struct cifsLockInfo *li, *tmp;
1312 __u64 length = 1 + flock->fl_end - flock->fl_start;
1313 struct list_head tmp_llist;
1314
1315 INIT_LIST_HEAD(&tmp_llist);
1316
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001317 /*
1318 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1319 * and check it for zero before using.
1320 */
1321 max_buf = tcon->ses->server->maxBuf;
1322 if (!max_buf)
1323 return -EINVAL;
1324
1325 max_num = (max_buf - sizeof(struct smb_hdr)) /
1326 sizeof(LOCKING_ANDX_RANGE);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001327 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1328 if (!buf)
1329 return -ENOMEM;
1330
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001331 down_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001332 for (i = 0; i < 2; i++) {
1333 cur = buf;
1334 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001335 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001336 if (flock->fl_start > li->offset ||
1337 (flock->fl_start + length) <
1338 (li->offset + li->length))
1339 continue;
1340 if (current->tgid != li->pid)
1341 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001342 if (types[i] != li->type)
1343 continue;
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001344 if (cinode->can_cache_brlcks) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001345 /*
1346 * We can cache brlock requests - simply remove
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001347 * a lock from the file's list.
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001348 */
1349 list_del(&li->llist);
1350 cifs_del_lock_waiters(li);
1351 kfree(li);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001352 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001353 }
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001354 cur->Pid = cpu_to_le16(li->pid);
1355 cur->LengthLow = cpu_to_le32((u32)li->length);
1356 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1357 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1358 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1359 /*
1360 * We need to save a lock here to let us add it again to
1361 * the file's list if the unlock range request fails on
1362 * the server.
1363 */
1364 list_move(&li->llist, &tmp_llist);
1365 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001366 stored_rc = cifs_lockv(xid, tcon,
1367 cfile->fid.netfid,
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001368 li->type, num, 0, buf);
1369 if (stored_rc) {
1370 /*
1371 * We failed on the unlock range
1372 * request - add all locks from the tmp
1373 * list to the head of the file's list.
1374 */
1375 cifs_move_llist(&tmp_llist,
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001376 &cfile->llist->locks);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001377 rc = stored_rc;
1378 } else
1379 /*
1380 * The unlock range request succeed -
1381 * free the tmp list.
1382 */
1383 cifs_free_llist(&tmp_llist);
1384 cur = buf;
1385 num = 0;
1386 } else
1387 cur++;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001388 }
1389 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001390 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001391 types[i], num, 0, buf);
1392 if (stored_rc) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001393 cifs_move_llist(&tmp_llist,
1394 &cfile->llist->locks);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001395 rc = stored_rc;
1396 } else
1397 cifs_free_llist(&tmp_llist);
1398 }
1399 }
1400
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001401 up_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001402 kfree(buf);
1403 return rc;
1404}
1405
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001406static int
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001407cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001408 bool wait_flag, bool posix_lck, int lock, int unlock,
1409 unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001410{
1411 int rc = 0;
1412 __u64 length = 1 + flock->fl_end - flock->fl_start;
1413 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1414 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001415 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001416
1417 if (posix_lck) {
Steve French08547b02006-02-28 22:39:25 +00001418 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001419
1420 rc = cifs_posix_lock_set(file, flock);
1421 if (!rc || rc < 0)
1422 return rc;
1423
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001424 if (type & server->vals->shared_lock_type)
Steve French08547b02006-02-28 22:39:25 +00001425 posix_lock_type = CIFS_RDLCK;
1426 else
1427 posix_lock_type = CIFS_WRLCK;
Steve French50c2f752007-07-13 00:33:32 +00001428
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001429 if (unlock == 1)
Steve Frenchbeb84dc2006-03-03 23:36:34 +00001430 posix_lock_type = CIFS_UNLCK;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001431
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001432 rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
1433 current->tgid, flock->fl_start, length,
1434 NULL, posix_lock_type, wait_flag);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001435 goto out;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001436 }
1437
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001438 if (lock) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001439 struct cifsLockInfo *lock;
1440
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001441 lock = cifs_lock_init(flock->fl_start, length, type);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001442 if (!lock)
1443 return -ENOMEM;
1444
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001445 rc = cifs_lock_add_if(cfile, lock, wait_flag);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001446 if (rc < 0)
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001447 kfree(lock);
1448 if (rc <= 0)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001449 goto out;
1450
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001451 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1452 type, 1, 0, wait_flag);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001453 if (rc) {
1454 kfree(lock);
1455 goto out;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001456 }
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001457
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001458 cifs_lock_add(cfile, lock);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001459 } else if (unlock)
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001460 rc = server->ops->mand_unlock_range(cfile, flock, xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001461
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001462out:
1463 if (flock->fl_flags & FL_POSIX)
Steve French9ebb3892012-04-01 13:52:54 -05001464 posix_lock_file_wait(file, flock);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001465 return rc;
1466}
1467
1468int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1469{
1470 int rc, xid;
1471 int lock = 0, unlock = 0;
1472 bool wait_flag = false;
1473 bool posix_lck = false;
1474 struct cifs_sb_info *cifs_sb;
1475 struct cifs_tcon *tcon;
1476 struct cifsInodeInfo *cinode;
1477 struct cifsFileInfo *cfile;
1478 __u16 netfid;
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001479 __u32 type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001480
1481 rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001482 xid = get_xid();
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001483
1484 cFYI(1, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld "
1485 "end: %lld", cmd, flock->fl_flags, flock->fl_type,
1486 flock->fl_start, flock->fl_end);
1487
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001488 cfile = (struct cifsFileInfo *)file->private_data;
1489 tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001490
1491 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
1492 tcon->ses->server);
1493
1494 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001495 netfid = cfile->fid.netfid;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001496 cinode = CIFS_I(file->f_path.dentry->d_inode);
1497
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001498 if (cap_unix(tcon->ses) &&
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001499 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1500 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1501 posix_lck = true;
1502 /*
1503 * BB add code here to normalize offset and length to account for
1504 * negative length which we can not accept over the wire.
1505 */
1506 if (IS_GETLK(cmd)) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001507 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001508 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001509 return rc;
1510 }
1511
1512 if (!lock && !unlock) {
1513 /*
1514 * if no lock or unlock then nothing to do since we do not
1515 * know what it is
1516 */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001517 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001518 return -EOPNOTSUPP;
1519 }
1520
1521 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1522 xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001523 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524 return rc;
1525}
1526
Jeff Layton597b0272012-03-23 14:40:56 -04001527/*
1528 * update the file size (if needed) after a write. Should be called with
1529 * the inode->i_lock held
1530 */
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05001531void
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001532cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1533 unsigned int bytes_written)
1534{
1535 loff_t end_of_write = offset + bytes_written;
1536
1537 if (end_of_write > cifsi->server_eof)
1538 cifsi->server_eof = end_of_write;
1539}
1540
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001541static ssize_t
1542cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
1543 size_t write_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001544{
1545 int rc = 0;
1546 unsigned int bytes_written = 0;
1547 unsigned int total_written;
1548 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001549 struct cifs_tcon *tcon;
1550 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001551 unsigned int xid;
Jeff Layton7da4b492010-10-15 15:34:00 -04001552 struct dentry *dentry = open_file->dentry;
1553 struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001554 struct cifs_io_parms io_parms;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001555
Jeff Layton7da4b492010-10-15 15:34:00 -04001556 cifs_sb = CIFS_SB(dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001557
Joe Perchesb6b38f72010-04-21 03:50:45 +00001558 cFYI(1, "write %zd bytes to offset %lld of %s", write_size,
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001559 *offset, dentry->d_name.name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001560
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001561 tcon = tlink_tcon(open_file->tlink);
1562 server = tcon->ses->server;
1563
1564 if (!server->ops->sync_write)
1565 return -ENOSYS;
Steve French50c2f752007-07-13 00:33:32 +00001566
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001567 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001568
Linus Torvalds1da177e2005-04-16 15:20:36 -07001569 for (total_written = 0; write_size > total_written;
1570 total_written += bytes_written) {
1571 rc = -EAGAIN;
1572 while (rc == -EAGAIN) {
Jeff Laytonca83ce32011-04-12 09:13:44 -04001573 struct kvec iov[2];
1574 unsigned int len;
1575
Linus Torvalds1da177e2005-04-16 15:20:36 -07001576 if (open_file->invalidHandle) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001577 /* we could deadlock if we called
1578 filemap_fdatawait from here so tell
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001579 reopen_file not to flush data to
Linus Torvalds1da177e2005-04-16 15:20:36 -07001580 server now */
Jeff Layton15886172010-10-15 15:33:59 -04001581 rc = cifs_reopen_file(open_file, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001582 if (rc != 0)
1583 break;
1584 }
Steve French3e844692005-10-03 13:37:24 -07001585
Jeff Laytonca83ce32011-04-12 09:13:44 -04001586 len = min((size_t)cifs_sb->wsize,
1587 write_size - total_written);
1588 /* iov[0] is reserved for smb header */
1589 iov[1].iov_base = (char *)write_data + total_written;
1590 iov[1].iov_len = len;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001591 io_parms.pid = pid;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001592 io_parms.tcon = tcon;
1593 io_parms.offset = *offset;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001594 io_parms.length = len;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001595 rc = server->ops->sync_write(xid, open_file, &io_parms,
1596 &bytes_written, iov, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001597 }
1598 if (rc || (bytes_written == 0)) {
1599 if (total_written)
1600 break;
1601 else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001602 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001603 return rc;
1604 }
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001605 } else {
Jeff Layton597b0272012-03-23 14:40:56 -04001606 spin_lock(&dentry->d_inode->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001607 cifs_update_eof(cifsi, *offset, bytes_written);
Jeff Layton597b0272012-03-23 14:40:56 -04001608 spin_unlock(&dentry->d_inode->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001609 *offset += bytes_written;
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001610 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001611 }
1612
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001613 cifs_stats_bytes_written(tcon, total_written);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001614
Jeff Layton7da4b492010-10-15 15:34:00 -04001615 if (total_written > 0) {
1616 spin_lock(&dentry->d_inode->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001617 if (*offset > dentry->d_inode->i_size)
1618 i_size_write(dentry->d_inode, *offset);
Jeff Layton7da4b492010-10-15 15:34:00 -04001619 spin_unlock(&dentry->d_inode->i_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001620 }
Jeff Layton7da4b492010-10-15 15:34:00 -04001621 mark_inode_dirty_sync(dentry->d_inode);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001622 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001623 return total_written;
1624}
1625
Jeff Layton6508d902010-09-29 19:51:11 -04001626struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1627 bool fsuid_only)
Steve French630f3f0c2007-10-25 21:17:17 +00001628{
1629 struct cifsFileInfo *open_file = NULL;
Jeff Layton6508d902010-09-29 19:51:11 -04001630 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1631
1632 /* only filter by fsuid on multiuser mounts */
1633 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1634 fsuid_only = false;
Steve French630f3f0c2007-10-25 21:17:17 +00001635
Jeff Layton44772882010-10-15 15:34:03 -04001636 spin_lock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001637 /* we could simply get the first_list_entry since write-only entries
1638 are always at the end of the list but since the first entry might
1639 have a close pending, we go through the whole list */
1640 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton6508d902010-09-29 19:51:11 -04001641 if (fsuid_only && open_file->uid != current_fsuid())
1642 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001643 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
Steve French630f3f0c2007-10-25 21:17:17 +00001644 if (!open_file->invalidHandle) {
1645 /* found a good file */
1646 /* lock it so it will not be closed on us */
Jeff Layton764a1b12012-07-25 14:59:54 -04001647 cifsFileInfo_get_locked(open_file);
Jeff Layton44772882010-10-15 15:34:03 -04001648 spin_unlock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001649 return open_file;
1650 } /* else might as well continue, and look for
1651 another, or simply have the caller reopen it
1652 again rather than trying to fix this handle */
1653 } else /* write only file */
1654 break; /* write only files are last so must be done */
1655 }
Jeff Layton44772882010-10-15 15:34:03 -04001656 spin_unlock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001657 return NULL;
1658}
Steve French630f3f0c2007-10-25 21:17:17 +00001659
Jeff Layton6508d902010-09-29 19:51:11 -04001660struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1661 bool fsuid_only)
Steve French6148a742005-10-05 12:23:19 -07001662{
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001663 struct cifsFileInfo *open_file, *inv_file = NULL;
Jeff Laytond3892292010-11-02 16:22:50 -04001664 struct cifs_sb_info *cifs_sb;
Jeff Layton2846d382008-09-22 21:33:33 -04001665 bool any_available = false;
Steve Frenchdd99cd82005-10-05 19:32:49 -07001666 int rc;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001667 unsigned int refind = 0;
Steve French6148a742005-10-05 12:23:19 -07001668
Steve French60808232006-04-22 15:53:05 +00001669 /* Having a null inode here (because mapping->host was set to zero by
1670 the VFS or MM) should not happen but we had reports of on oops (due to
1671 it being zero) during stress testcases so we need to check for it */
1672
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001673 if (cifs_inode == NULL) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001674 cERROR(1, "Null inode passed to cifs_writeable_file");
Steve French60808232006-04-22 15:53:05 +00001675 dump_stack();
1676 return NULL;
1677 }
1678
Jeff Laytond3892292010-11-02 16:22:50 -04001679 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1680
Jeff Layton6508d902010-09-29 19:51:11 -04001681 /* only filter by fsuid on multiuser mounts */
1682 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1683 fsuid_only = false;
1684
Jeff Layton44772882010-10-15 15:34:03 -04001685 spin_lock(&cifs_file_list_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001686refind_writable:
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001687 if (refind > MAX_REOPEN_ATT) {
1688 spin_unlock(&cifs_file_list_lock);
1689 return NULL;
1690 }
Steve French6148a742005-10-05 12:23:19 -07001691 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton6508d902010-09-29 19:51:11 -04001692 if (!any_available && open_file->pid != current->tgid)
1693 continue;
1694 if (fsuid_only && open_file->uid != current_fsuid())
1695 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001696 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Steve French9b22b0b2007-10-02 01:11:08 +00001697 if (!open_file->invalidHandle) {
1698 /* found a good writable file */
Jeff Layton764a1b12012-07-25 14:59:54 -04001699 cifsFileInfo_get_locked(open_file);
Jeff Layton44772882010-10-15 15:34:03 -04001700 spin_unlock(&cifs_file_list_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001701 return open_file;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001702 } else {
1703 if (!inv_file)
1704 inv_file = open_file;
Steve French9b22b0b2007-10-02 01:11:08 +00001705 }
Steve French6148a742005-10-05 12:23:19 -07001706 }
1707 }
Jeff Layton2846d382008-09-22 21:33:33 -04001708 /* couldn't find useable FH with same pid, try any available */
1709 if (!any_available) {
1710 any_available = true;
1711 goto refind_writable;
1712 }
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001713
1714 if (inv_file) {
1715 any_available = false;
Jeff Layton764a1b12012-07-25 14:59:54 -04001716 cifsFileInfo_get_locked(inv_file);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001717 }
1718
Jeff Layton44772882010-10-15 15:34:03 -04001719 spin_unlock(&cifs_file_list_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001720
1721 if (inv_file) {
1722 rc = cifs_reopen_file(inv_file, false);
1723 if (!rc)
1724 return inv_file;
1725 else {
1726 spin_lock(&cifs_file_list_lock);
1727 list_move_tail(&inv_file->flist,
1728 &cifs_inode->openFileList);
1729 spin_unlock(&cifs_file_list_lock);
1730 cifsFileInfo_put(inv_file);
1731 spin_lock(&cifs_file_list_lock);
1732 ++refind;
1733 goto refind_writable;
1734 }
1735 }
1736
Steve French6148a742005-10-05 12:23:19 -07001737 return NULL;
1738}
1739
Linus Torvalds1da177e2005-04-16 15:20:36 -07001740static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1741{
1742 struct address_space *mapping = page->mapping;
1743 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1744 char *write_data;
1745 int rc = -EFAULT;
1746 int bytes_written = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001747 struct inode *inode;
Steve French6148a742005-10-05 12:23:19 -07001748 struct cifsFileInfo *open_file;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001749
1750 if (!mapping || !mapping->host)
1751 return -EFAULT;
1752
1753 inode = page->mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001754
1755 offset += (loff_t)from;
1756 write_data = kmap(page);
1757 write_data += from;
1758
1759 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1760 kunmap(page);
1761 return -EIO;
1762 }
1763
1764 /* racing with truncate? */
1765 if (offset > mapping->host->i_size) {
1766 kunmap(page);
1767 return 0; /* don't care */
1768 }
1769
1770 /* check to make sure that we are not extending the file */
1771 if (mapping->host->i_size - offset < (loff_t)to)
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001772 to = (unsigned)(mapping->host->i_size - offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001773
Jeff Layton6508d902010-09-29 19:51:11 -04001774 open_file = find_writable_file(CIFS_I(mapping->host), false);
Steve French6148a742005-10-05 12:23:19 -07001775 if (open_file) {
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001776 bytes_written = cifs_write(open_file, open_file->pid,
1777 write_data, to - from, &offset);
Dave Kleikamp6ab409b2009-08-31 11:07:12 -04001778 cifsFileInfo_put(open_file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001779 /* Does mm or vfs already set times? */
Steve French6148a742005-10-05 12:23:19 -07001780 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001781 if ((bytes_written > 0) && (offset))
Steve French6148a742005-10-05 12:23:19 -07001782 rc = 0;
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001783 else if (bytes_written < 0)
1784 rc = bytes_written;
Steve French6148a742005-10-05 12:23:19 -07001785 } else {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001786 cFYI(1, "No writeable filehandles for inode");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001787 rc = -EIO;
1788 }
1789
1790 kunmap(page);
1791 return rc;
1792}
1793
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794static int cifs_writepages(struct address_space *mapping,
Steve French37c0eb42005-10-05 14:50:29 -07001795 struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001796{
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001797 struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
1798 bool done = false, scanned = false, range_whole = false;
1799 pgoff_t end, index;
1800 struct cifs_writedata *wdata;
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07001801 struct TCP_Server_Info *server;
Steve French37c0eb42005-10-05 14:50:29 -07001802 struct page *page;
Steve French37c0eb42005-10-05 14:50:29 -07001803 int rc = 0;
Steve French50c2f752007-07-13 00:33:32 +00001804
Steve French37c0eb42005-10-05 14:50:29 -07001805 /*
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001806 * If wsize is smaller than the page cache size, default to writing
Steve French37c0eb42005-10-05 14:50:29 -07001807 * one page at a time via cifs_writepage
1808 */
1809 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1810 return generic_writepages(mapping, wbc);
1811
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001812 if (wbc->range_cyclic) {
Steve French37c0eb42005-10-05 14:50:29 -07001813 index = mapping->writeback_index; /* Start from prev offset */
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001814 end = -1;
1815 } else {
1816 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1817 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1818 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001819 range_whole = true;
1820 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07001821 }
1822retry:
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001823 while (!done && index <= end) {
1824 unsigned int i, nr_pages, found_pages;
1825 pgoff_t next = 0, tofind;
1826 struct page **pages;
Steve French37c0eb42005-10-05 14:50:29 -07001827
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001828 tofind = min((cifs_sb->wsize / PAGE_CACHE_SIZE) - 1,
1829 end - index) + 1;
Steve French37c0eb42005-10-05 14:50:29 -07001830
Jeff Laytonc2e87642012-03-23 14:40:55 -04001831 wdata = cifs_writedata_alloc((unsigned int)tofind,
1832 cifs_writev_complete);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001833 if (!wdata) {
1834 rc = -ENOMEM;
1835 break;
1836 }
1837
1838 /*
1839 * find_get_pages_tag seems to return a max of 256 on each
1840 * iteration, so we must call it several times in order to
1841 * fill the array or the wsize is effectively limited to
1842 * 256 * PAGE_CACHE_SIZE.
1843 */
1844 found_pages = 0;
1845 pages = wdata->pages;
1846 do {
1847 nr_pages = find_get_pages_tag(mapping, &index,
1848 PAGECACHE_TAG_DIRTY,
1849 tofind, pages);
1850 found_pages += nr_pages;
1851 tofind -= nr_pages;
1852 pages += nr_pages;
1853 } while (nr_pages && tofind && index <= end);
1854
1855 if (found_pages == 0) {
1856 kref_put(&wdata->refcount, cifs_writedata_release);
1857 break;
1858 }
1859
1860 nr_pages = 0;
1861 for (i = 0; i < found_pages; i++) {
1862 page = wdata->pages[i];
Steve French37c0eb42005-10-05 14:50:29 -07001863 /*
1864 * At this point we hold neither mapping->tree_lock nor
1865 * lock on the page itself: the page may be truncated or
1866 * invalidated (changing page->mapping to NULL), or even
1867 * swizzled back from swapper_space to tmpfs file
1868 * mapping
1869 */
1870
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001871 if (nr_pages == 0)
Steve French37c0eb42005-10-05 14:50:29 -07001872 lock_page(page);
Nick Piggin529ae9a2008-08-02 12:01:03 +02001873 else if (!trylock_page(page))
Steve French37c0eb42005-10-05 14:50:29 -07001874 break;
1875
1876 if (unlikely(page->mapping != mapping)) {
1877 unlock_page(page);
1878 break;
1879 }
1880
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001881 if (!wbc->range_cyclic && page->index > end) {
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001882 done = true;
Steve French37c0eb42005-10-05 14:50:29 -07001883 unlock_page(page);
1884 break;
1885 }
1886
1887 if (next && (page->index != next)) {
1888 /* Not next consecutive page */
1889 unlock_page(page);
1890 break;
1891 }
1892
1893 if (wbc->sync_mode != WB_SYNC_NONE)
1894 wait_on_page_writeback(page);
1895
1896 if (PageWriteback(page) ||
Linus Torvaldscb876f42006-12-23 16:19:07 -08001897 !clear_page_dirty_for_io(page)) {
Steve French37c0eb42005-10-05 14:50:29 -07001898 unlock_page(page);
1899 break;
1900 }
Steve French84d2f072005-10-12 15:32:05 -07001901
Linus Torvaldscb876f42006-12-23 16:19:07 -08001902 /*
1903 * This actually clears the dirty bit in the radix tree.
1904 * See cifs_writepage() for more commentary.
1905 */
1906 set_page_writeback(page);
1907
Jeff Layton3a98b862012-11-26 09:48:41 -05001908 if (page_offset(page) >= i_size_read(mapping->host)) {
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001909 done = true;
Steve French84d2f072005-10-12 15:32:05 -07001910 unlock_page(page);
Linus Torvaldscb876f42006-12-23 16:19:07 -08001911 end_page_writeback(page);
Steve French84d2f072005-10-12 15:32:05 -07001912 break;
1913 }
1914
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001915 wdata->pages[i] = page;
Steve French37c0eb42005-10-05 14:50:29 -07001916 next = page->index + 1;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001917 ++nr_pages;
Steve French37c0eb42005-10-05 14:50:29 -07001918 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001919
1920 /* reset index to refind any pages skipped */
1921 if (nr_pages == 0)
1922 index = wdata->pages[0]->index + 1;
1923
1924 /* put any pages we aren't going to use */
1925 for (i = nr_pages; i < found_pages; i++) {
1926 page_cache_release(wdata->pages[i]);
1927 wdata->pages[i] = NULL;
1928 }
1929
1930 /* nothing to write? */
1931 if (nr_pages == 0) {
1932 kref_put(&wdata->refcount, cifs_writedata_release);
1933 continue;
1934 }
1935
1936 wdata->sync_mode = wbc->sync_mode;
1937 wdata->nr_pages = nr_pages;
1938 wdata->offset = page_offset(wdata->pages[0]);
Jeff Laytoneddb0792012-09-18 16:20:35 -07001939 wdata->pagesz = PAGE_CACHE_SIZE;
1940 wdata->tailsz =
Jeff Layton3a98b862012-11-26 09:48:41 -05001941 min(i_size_read(mapping->host) -
1942 page_offset(wdata->pages[nr_pages - 1]),
Jeff Laytoneddb0792012-09-18 16:20:35 -07001943 (loff_t)PAGE_CACHE_SIZE);
1944 wdata->bytes = ((nr_pages - 1) * PAGE_CACHE_SIZE) +
1945 wdata->tailsz;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001946
1947 do {
1948 if (wdata->cfile != NULL)
1949 cifsFileInfo_put(wdata->cfile);
1950 wdata->cfile = find_writable_file(CIFS_I(mapping->host),
1951 false);
1952 if (!wdata->cfile) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001953 cERROR(1, "No writable handles for inode");
Steve French23e7dd72005-10-20 13:44:56 -07001954 rc = -EBADF;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001955 break;
Steve French37c0eb42005-10-05 14:50:29 -07001956 }
Jeff Laytonfe5f5d22012-03-23 14:40:55 -04001957 wdata->pid = wdata->cfile->pid;
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07001958 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
1959 rc = server->ops->async_writev(wdata);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001960 } while (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN);
Jeff Laytonf3983c22010-09-22 16:17:40 -07001961
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001962 for (i = 0; i < nr_pages; ++i)
1963 unlock_page(wdata->pages[i]);
Jeff Layton941b8532011-01-11 07:24:01 -05001964
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001965 /* send failure -- clean up the mess */
1966 if (rc != 0) {
1967 for (i = 0; i < nr_pages; ++i) {
Jeff Layton941b8532011-01-11 07:24:01 -05001968 if (rc == -EAGAIN)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001969 redirty_page_for_writepage(wbc,
1970 wdata->pages[i]);
1971 else
1972 SetPageError(wdata->pages[i]);
1973 end_page_writeback(wdata->pages[i]);
1974 page_cache_release(wdata->pages[i]);
Steve French37c0eb42005-10-05 14:50:29 -07001975 }
Jeff Layton941b8532011-01-11 07:24:01 -05001976 if (rc != -EAGAIN)
1977 mapping_set_error(mapping, rc);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001978 }
1979 kref_put(&wdata->refcount, cifs_writedata_release);
Jeff Layton941b8532011-01-11 07:24:01 -05001980
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001981 wbc->nr_to_write -= nr_pages;
1982 if (wbc->nr_to_write <= 0)
1983 done = true;
Dave Kleikampb066a482008-11-18 03:49:05 +00001984
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001985 index = next;
Steve French37c0eb42005-10-05 14:50:29 -07001986 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001987
Steve French37c0eb42005-10-05 14:50:29 -07001988 if (!scanned && !done) {
1989 /*
1990 * We hit the last page and there is more work to be done: wrap
1991 * back to the start of the file
1992 */
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001993 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07001994 index = 0;
1995 goto retry;
1996 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001997
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001998 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
Steve French37c0eb42005-10-05 14:50:29 -07001999 mapping->writeback_index = index;
2000
Linus Torvalds1da177e2005-04-16 15:20:36 -07002001 return rc;
2002}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002003
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002004static int
2005cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002006{
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002007 int rc;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002008 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002009
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002010 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002011/* BB add check for wbc flags */
2012 page_cache_get(page);
Steve Frenchad7a2922008-02-07 23:25:02 +00002013 if (!PageUptodate(page))
Joe Perchesb6b38f72010-04-21 03:50:45 +00002014 cFYI(1, "ppw - page not up to date");
Linus Torvaldscb876f42006-12-23 16:19:07 -08002015
2016 /*
2017 * Set the "writeback" flag, and clear "dirty" in the radix tree.
2018 *
2019 * A writepage() implementation always needs to do either this,
2020 * or re-dirty the page with "redirty_page_for_writepage()" in
2021 * the case of a failure.
2022 *
2023 * Just unlocking the page will cause the radix tree tag-bits
2024 * to fail to update with the state of the page correctly.
2025 */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002026 set_page_writeback(page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002027retry_write:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002028 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002029 if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
2030 goto retry_write;
2031 else if (rc == -EAGAIN)
2032 redirty_page_for_writepage(wbc, page);
2033 else if (rc != 0)
2034 SetPageError(page);
2035 else
2036 SetPageUptodate(page);
Linus Torvaldscb876f42006-12-23 16:19:07 -08002037 end_page_writeback(page);
2038 page_cache_release(page);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002039 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002040 return rc;
2041}
2042
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002043static int cifs_writepage(struct page *page, struct writeback_control *wbc)
2044{
2045 int rc = cifs_writepage_locked(page, wbc);
2046 unlock_page(page);
2047 return rc;
2048}
2049
Nick Piggind9414772008-09-24 11:32:59 -04002050static int cifs_write_end(struct file *file, struct address_space *mapping,
2051 loff_t pos, unsigned len, unsigned copied,
2052 struct page *page, void *fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002053{
Nick Piggind9414772008-09-24 11:32:59 -04002054 int rc;
2055 struct inode *inode = mapping->host;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002056 struct cifsFileInfo *cfile = file->private_data;
2057 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
2058 __u32 pid;
2059
2060 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2061 pid = cfile->pid;
2062 else
2063 pid = current->tgid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002064
Joe Perchesb6b38f72010-04-21 03:50:45 +00002065 cFYI(1, "write_end for page %p from pos %lld with %d bytes",
2066 page, pos, copied);
Steve Frenchad7a2922008-02-07 23:25:02 +00002067
Jeff Laytona98ee8c2008-11-26 19:32:33 +00002068 if (PageChecked(page)) {
2069 if (copied == len)
2070 SetPageUptodate(page);
2071 ClearPageChecked(page);
2072 } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
Nick Piggind9414772008-09-24 11:32:59 -04002073 SetPageUptodate(page);
2074
Linus Torvalds1da177e2005-04-16 15:20:36 -07002075 if (!PageUptodate(page)) {
Nick Piggind9414772008-09-24 11:32:59 -04002076 char *page_data;
2077 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002078 unsigned int xid;
Nick Piggind9414772008-09-24 11:32:59 -04002079
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002080 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002081 /* this is probably better than directly calling
2082 partialpage_write since in this function the file handle is
2083 known which we might as well leverage */
2084 /* BB check if anything else missing out of ppw
2085 such as updating last write time */
2086 page_data = kmap(page);
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002087 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
Nick Piggind9414772008-09-24 11:32:59 -04002088 /* if (rc < 0) should we set writebehind rc? */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002089 kunmap(page);
Nick Piggind9414772008-09-24 11:32:59 -04002090
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002091 free_xid(xid);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002092 } else {
Nick Piggind9414772008-09-24 11:32:59 -04002093 rc = copied;
2094 pos += copied;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002095 set_page_dirty(page);
2096 }
2097
Nick Piggind9414772008-09-24 11:32:59 -04002098 if (rc > 0) {
2099 spin_lock(&inode->i_lock);
2100 if (pos > inode->i_size)
2101 i_size_write(inode, pos);
2102 spin_unlock(&inode->i_lock);
2103 }
2104
2105 unlock_page(page);
2106 page_cache_release(page);
2107
Linus Torvalds1da177e2005-04-16 15:20:36 -07002108 return rc;
2109}
2110
Josef Bacik02c24a82011-07-16 20:44:56 -04002111int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2112 int datasync)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002113{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002114 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002115 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002116 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002117 struct TCP_Server_Info *server;
Joe Perchesc21dfb62010-07-12 13:50:14 -07002118 struct cifsFileInfo *smbfile = file->private_data;
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002119 struct inode *inode = file->f_path.dentry->d_inode;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002120 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002121
Josef Bacik02c24a82011-07-16 20:44:56 -04002122 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2123 if (rc)
2124 return rc;
2125 mutex_lock(&inode->i_mutex);
2126
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002127 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002128
Joe Perchesb6b38f72010-04-21 03:50:45 +00002129 cFYI(1, "Sync file - name: %s datasync: 0x%x",
Christoph Hellwig7ea80852010-05-26 17:53:25 +02002130 file->f_path.dentry->d_name.name, datasync);
Steve French50c2f752007-07-13 00:33:32 +00002131
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002132 if (!CIFS_I(inode)->clientCanCacheRead) {
2133 rc = cifs_invalidate_mapping(inode);
2134 if (rc) {
2135 cFYI(1, "rc: %d during invalidate phase", rc);
2136 rc = 0; /* don't care about it in fsync */
2137 }
2138 }
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002139
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002140 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002141 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2142 server = tcon->ses->server;
2143 if (server->ops->flush)
2144 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2145 else
2146 rc = -ENOSYS;
2147 }
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002148
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002149 free_xid(xid);
Josef Bacik02c24a82011-07-16 20:44:56 -04002150 mutex_unlock(&inode->i_mutex);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002151 return rc;
2152}
2153
Josef Bacik02c24a82011-07-16 20:44:56 -04002154int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002155{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002156 unsigned int xid;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002157 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002158 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002159 struct TCP_Server_Info *server;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002160 struct cifsFileInfo *smbfile = file->private_data;
2161 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Josef Bacik02c24a82011-07-16 20:44:56 -04002162 struct inode *inode = file->f_mapping->host;
2163
2164 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2165 if (rc)
2166 return rc;
2167 mutex_lock(&inode->i_mutex);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002168
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002169 xid = get_xid();
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002170
2171 cFYI(1, "Sync file - name: %s datasync: 0x%x",
2172 file->f_path.dentry->d_name.name, datasync);
2173
2174 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002175 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2176 server = tcon->ses->server;
2177 if (server->ops->flush)
2178 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2179 else
2180 rc = -ENOSYS;
2181 }
Steve Frenchb298f222009-02-21 21:17:43 +00002182
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002183 free_xid(xid);
Josef Bacik02c24a82011-07-16 20:44:56 -04002184 mutex_unlock(&inode->i_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002185 return rc;
2186}
2187
Linus Torvalds1da177e2005-04-16 15:20:36 -07002188/*
2189 * As file closes, flush all cached write data for this inode checking
2190 * for write behind errors.
2191 */
Miklos Szeredi75e1fcc2006-06-23 02:05:12 -07002192int cifs_flush(struct file *file, fl_owner_t id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002193{
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002194 struct inode *inode = file->f_path.dentry->d_inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002195 int rc = 0;
2196
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002197 if (file->f_mode & FMODE_WRITE)
Jeff Laytond3f13222010-10-15 15:34:07 -04002198 rc = filemap_write_and_wait(inode->i_mapping);
Steve French50c2f752007-07-13 00:33:32 +00002199
Joe Perchesb6b38f72010-04-21 03:50:45 +00002200 cFYI(1, "Flush inode %p file %p rc %d", inode, file, rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002201
2202 return rc;
2203}
2204
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002205static int
2206cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2207{
2208 int rc = 0;
2209 unsigned long i;
2210
2211 for (i = 0; i < num_pages; i++) {
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002212 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002213 if (!pages[i]) {
2214 /*
2215 * save number of pages we have already allocated and
2216 * return with ENOMEM error
2217 */
2218 num_pages = i;
2219 rc = -ENOMEM;
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002220 break;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002221 }
2222 }
2223
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002224 if (rc) {
2225 for (i = 0; i < num_pages; i++)
2226 put_page(pages[i]);
2227 }
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002228 return rc;
2229}
2230
2231static inline
2232size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2233{
2234 size_t num_pages;
2235 size_t clen;
2236
2237 clen = min_t(const size_t, len, wsize);
Jeff Laytona7103b92012-03-23 14:40:56 -04002238 num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002239
2240 if (cur_len)
2241 *cur_len = clen;
2242
2243 return num_pages;
2244}
2245
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002246static void
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002247cifs_uncached_writev_complete(struct work_struct *work)
2248{
2249 int i;
2250 struct cifs_writedata *wdata = container_of(work,
2251 struct cifs_writedata, work);
2252 struct inode *inode = wdata->cfile->dentry->d_inode;
2253 struct cifsInodeInfo *cifsi = CIFS_I(inode);
2254
2255 spin_lock(&inode->i_lock);
2256 cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
2257 if (cifsi->server_eof > inode->i_size)
2258 i_size_write(inode, cifsi->server_eof);
2259 spin_unlock(&inode->i_lock);
2260
2261 complete(&wdata->done);
2262
2263 if (wdata->result != -EAGAIN) {
2264 for (i = 0; i < wdata->nr_pages; i++)
2265 put_page(wdata->pages[i]);
2266 }
2267
2268 kref_put(&wdata->refcount, cifs_writedata_release);
2269}
2270
2271/* attempt to send write to server, retry on any -EAGAIN errors */
2272static int
2273cifs_uncached_retry_writev(struct cifs_writedata *wdata)
2274{
2275 int rc;
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002276 struct TCP_Server_Info *server;
2277
2278 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002279
2280 do {
2281 if (wdata->cfile->invalidHandle) {
2282 rc = cifs_reopen_file(wdata->cfile, false);
2283 if (rc != 0)
2284 continue;
2285 }
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002286 rc = server->ops->async_writev(wdata);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002287 } while (rc == -EAGAIN);
2288
2289 return rc;
2290}
2291
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002292static ssize_t
2293cifs_iovec_write(struct file *file, const struct iovec *iov,
2294 unsigned long nr_segs, loff_t *poffset)
2295{
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002296 unsigned long nr_pages, i;
Pavel Shilovsky76429c12011-01-31 16:03:08 +03002297 size_t copied, len, cur_len;
2298 ssize_t total_written = 0;
Jeff Layton3af9d8f2012-04-13 17:16:59 -04002299 loff_t offset;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002300 struct iov_iter it;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002301 struct cifsFileInfo *open_file;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002302 struct cifs_tcon *tcon;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002303 struct cifs_sb_info *cifs_sb;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002304 struct cifs_writedata *wdata, *tmp;
2305 struct list_head wdata_list;
2306 int rc;
2307 pid_t pid;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002308
2309 len = iov_length(iov, nr_segs);
2310 if (!len)
2311 return 0;
2312
2313 rc = generic_write_checks(file, poffset, &len, 0);
2314 if (rc)
2315 return rc;
2316
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002317 INIT_LIST_HEAD(&wdata_list);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002318 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002319 open_file = file->private_data;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002320 tcon = tlink_tcon(open_file->tlink);
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002321
2322 if (!tcon->ses->server->ops->async_writev)
2323 return -ENOSYS;
2324
Jeff Layton3af9d8f2012-04-13 17:16:59 -04002325 offset = *poffset;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002326
2327 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2328 pid = open_file->pid;
2329 else
2330 pid = current->tgid;
2331
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002332 iov_iter_init(&it, iov, nr_segs, len, 0);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002333 do {
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002334 size_t save_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002335
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002336 nr_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
2337 wdata = cifs_writedata_alloc(nr_pages,
2338 cifs_uncached_writev_complete);
2339 if (!wdata) {
2340 rc = -ENOMEM;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002341 break;
2342 }
2343
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002344 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
2345 if (rc) {
2346 kfree(wdata);
2347 break;
2348 }
2349
2350 save_len = cur_len;
2351 for (i = 0; i < nr_pages; i++) {
2352 copied = min_t(const size_t, cur_len, PAGE_SIZE);
2353 copied = iov_iter_copy_from_user(wdata->pages[i], &it,
2354 0, copied);
2355 cur_len -= copied;
2356 iov_iter_advance(&it, copied);
2357 }
2358 cur_len = save_len - cur_len;
2359
2360 wdata->sync_mode = WB_SYNC_ALL;
2361 wdata->nr_pages = nr_pages;
2362 wdata->offset = (__u64)offset;
2363 wdata->cfile = cifsFileInfo_get(open_file);
2364 wdata->pid = pid;
2365 wdata->bytes = cur_len;
Jeff Laytoneddb0792012-09-18 16:20:35 -07002366 wdata->pagesz = PAGE_SIZE;
2367 wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002368 rc = cifs_uncached_retry_writev(wdata);
2369 if (rc) {
2370 kref_put(&wdata->refcount, cifs_writedata_release);
2371 break;
2372 }
2373
2374 list_add_tail(&wdata->list, &wdata_list);
2375 offset += cur_len;
2376 len -= cur_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002377 } while (len > 0);
2378
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002379 /*
2380 * If at least one write was successfully sent, then discard any rc
2381 * value from the later writes. If the other write succeeds, then
2382 * we'll end up returning whatever was written. If it fails, then
2383 * we'll get a new rc value from that.
2384 */
2385 if (!list_empty(&wdata_list))
2386 rc = 0;
2387
2388 /*
2389 * Wait for and collect replies for any successful sends in order of
2390 * increasing offset. Once an error is hit or we get a fatal signal
2391 * while waiting, then return without waiting for any more replies.
2392 */
2393restart_loop:
2394 list_for_each_entry_safe(wdata, tmp, &wdata_list, list) {
2395 if (!rc) {
2396 /* FIXME: freezable too? */
2397 rc = wait_for_completion_killable(&wdata->done);
2398 if (rc)
2399 rc = -EINTR;
2400 else if (wdata->result)
2401 rc = wdata->result;
2402 else
2403 total_written += wdata->bytes;
2404
2405 /* resend call if it's a retryable error */
2406 if (rc == -EAGAIN) {
2407 rc = cifs_uncached_retry_writev(wdata);
2408 goto restart_loop;
2409 }
2410 }
2411 list_del_init(&wdata->list);
2412 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002413 }
2414
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002415 if (total_written > 0)
2416 *poffset += total_written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002417
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002418 cifs_stats_bytes_written(tcon, total_written);
2419 return total_written ? total_written : (ssize_t)rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002420}
2421
Pavel Shilovsky0b81c1c2011-03-10 10:11:05 +03002422ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002423 unsigned long nr_segs, loff_t pos)
2424{
2425 ssize_t written;
2426 struct inode *inode;
2427
2428 inode = iocb->ki_filp->f_path.dentry->d_inode;
2429
2430 /*
2431 * BB - optimize the way when signing is disabled. We can drop this
2432 * extra memory-to-memory copying and use iovec buffers for constructing
2433 * write request.
2434 */
2435
2436 written = cifs_iovec_write(iocb->ki_filp, iov, nr_segs, &pos);
2437 if (written > 0) {
2438 CIFS_I(inode)->invalid_mapping = true;
2439 iocb->ki_pos = pos;
2440 }
2441
2442 return written;
2443}
2444
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002445static ssize_t
2446cifs_writev(struct kiocb *iocb, const struct iovec *iov,
2447 unsigned long nr_segs, loff_t pos)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002448{
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002449 struct file *file = iocb->ki_filp;
2450 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2451 struct inode *inode = file->f_mapping->host;
2452 struct cifsInodeInfo *cinode = CIFS_I(inode);
2453 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
2454 ssize_t rc = -EACCES;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002455
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002456 BUG_ON(iocb->ki_pos != pos);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002457
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002458 sb_start_write(inode->i_sb);
2459
2460 /*
2461 * We need to hold the sem to be sure nobody modifies lock list
2462 * with a brlock that prevents writing.
2463 */
2464 down_read(&cinode->lock_sem);
2465 if (!cifs_find_lock_conflict(cfile, pos, iov_length(iov, nr_segs),
2466 server->vals->exclusive_lock_type, NULL,
2467 true)) {
2468 mutex_lock(&inode->i_mutex);
2469 rc = __generic_file_aio_write(iocb, iov, nr_segs,
2470 &iocb->ki_pos);
2471 mutex_unlock(&inode->i_mutex);
2472 }
2473
2474 if (rc > 0 || rc == -EIOCBQUEUED) {
2475 ssize_t err;
2476
2477 err = generic_write_sync(file, pos, rc);
2478 if (err < 0 && rc > 0)
2479 rc = err;
2480 }
2481
2482 up_read(&cinode->lock_sem);
2483 sb_end_write(inode->i_sb);
2484 return rc;
2485}
2486
2487ssize_t
2488cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
2489 unsigned long nr_segs, loff_t pos)
2490{
2491 struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
2492 struct cifsInodeInfo *cinode = CIFS_I(inode);
2493 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2494 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2495 iocb->ki_filp->private_data;
2496 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002497
Pavel Shilovsky25078102012-09-19 06:22:45 -07002498#ifdef CONFIG_CIFS_SMB2
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002499 /*
Pavel Shilovsky25078102012-09-19 06:22:45 -07002500 * If we have an oplock for read and want to write a data to the file
2501 * we need to store it in the page cache and then push it to the server
2502 * to be sure the next read will get a valid data.
2503 */
2504 if (!cinode->clientCanCacheAll && cinode->clientCanCacheRead) {
2505 ssize_t written;
2506 int rc;
2507
2508 written = generic_file_aio_write(iocb, iov, nr_segs, pos);
2509 rc = filemap_fdatawrite(inode->i_mapping);
2510 if (rc)
2511 return (ssize_t)rc;
2512
2513 return written;
2514 }
2515#endif
2516
2517 /*
2518 * For non-oplocked files in strict cache mode we need to write the data
2519 * to the server exactly from the pos to pos+len-1 rather than flush all
2520 * affected pages because it may cause a error with mandatory locks on
2521 * these pages but not on the region from pos to ppos+len-1.
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002522 */
2523
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002524 if (!cinode->clientCanCacheAll)
2525 return cifs_user_writev(iocb, iov, nr_segs, pos);
2526
2527 if (cap_unix(tcon->ses) &&
2528 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2529 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2530 return generic_file_aio_write(iocb, iov, nr_segs, pos);
2531
2532 return cifs_writev(iocb, iov, nr_segs, pos);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002533}
2534
Jeff Layton0471ca32012-05-16 07:13:16 -04002535static struct cifs_readdata *
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002536cifs_readdata_alloc(unsigned int nr_pages, work_func_t complete)
Jeff Layton0471ca32012-05-16 07:13:16 -04002537{
2538 struct cifs_readdata *rdata;
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002539
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002540 rdata = kzalloc(sizeof(*rdata) + (sizeof(struct page *) * nr_pages),
2541 GFP_KERNEL);
Jeff Layton0471ca32012-05-16 07:13:16 -04002542 if (rdata != NULL) {
Jeff Layton6993f742012-05-16 07:13:17 -04002543 kref_init(&rdata->refcount);
Jeff Layton1c892542012-05-16 07:13:17 -04002544 INIT_LIST_HEAD(&rdata->list);
2545 init_completion(&rdata->done);
Jeff Layton0471ca32012-05-16 07:13:16 -04002546 INIT_WORK(&rdata->work, complete);
Jeff Layton0471ca32012-05-16 07:13:16 -04002547 }
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002548
Jeff Layton0471ca32012-05-16 07:13:16 -04002549 return rdata;
2550}
2551
Jeff Layton6993f742012-05-16 07:13:17 -04002552void
2553cifs_readdata_release(struct kref *refcount)
Jeff Layton0471ca32012-05-16 07:13:16 -04002554{
Jeff Layton6993f742012-05-16 07:13:17 -04002555 struct cifs_readdata *rdata = container_of(refcount,
2556 struct cifs_readdata, refcount);
2557
2558 if (rdata->cfile)
2559 cifsFileInfo_put(rdata->cfile);
2560
Jeff Layton0471ca32012-05-16 07:13:16 -04002561 kfree(rdata);
2562}
2563
Jeff Layton2a1bb132012-05-16 07:13:17 -04002564static int
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002565cifs_read_allocate_pages(struct cifs_readdata *rdata, unsigned int nr_pages)
Jeff Layton1c892542012-05-16 07:13:17 -04002566{
2567 int rc = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002568 struct page *page;
Jeff Layton1c892542012-05-16 07:13:17 -04002569 unsigned int i;
2570
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002571 for (i = 0; i < nr_pages; i++) {
Jeff Layton1c892542012-05-16 07:13:17 -04002572 page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
2573 if (!page) {
2574 rc = -ENOMEM;
2575 break;
2576 }
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002577 rdata->pages[i] = page;
Jeff Layton1c892542012-05-16 07:13:17 -04002578 }
2579
2580 if (rc) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002581 for (i = 0; i < nr_pages; i++) {
2582 put_page(rdata->pages[i]);
2583 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04002584 }
2585 }
2586 return rc;
2587}
2588
2589static void
2590cifs_uncached_readdata_release(struct kref *refcount)
2591{
Jeff Layton1c892542012-05-16 07:13:17 -04002592 struct cifs_readdata *rdata = container_of(refcount,
2593 struct cifs_readdata, refcount);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002594 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04002595
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002596 for (i = 0; i < rdata->nr_pages; i++) {
2597 put_page(rdata->pages[i]);
2598 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04002599 }
2600 cifs_readdata_release(refcount);
2601}
2602
2603static int
Jeff Layton2a1bb132012-05-16 07:13:17 -04002604cifs_retry_async_readv(struct cifs_readdata *rdata)
2605{
2606 int rc;
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07002607 struct TCP_Server_Info *server;
2608
2609 server = tlink_tcon(rdata->cfile->tlink)->ses->server;
Jeff Layton2a1bb132012-05-16 07:13:17 -04002610
2611 do {
2612 if (rdata->cfile->invalidHandle) {
2613 rc = cifs_reopen_file(rdata->cfile, true);
2614 if (rc != 0)
2615 continue;
2616 }
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07002617 rc = server->ops->async_readv(rdata);
Jeff Layton2a1bb132012-05-16 07:13:17 -04002618 } while (rc == -EAGAIN);
2619
2620 return rc;
2621}
2622
Jeff Layton1c892542012-05-16 07:13:17 -04002623/**
2624 * cifs_readdata_to_iov - copy data from pages in response to an iovec
2625 * @rdata: the readdata response with list of pages holding data
2626 * @iov: vector in which we should copy the data
2627 * @nr_segs: number of segments in vector
2628 * @offset: offset into file of the first iovec
2629 * @copied: used to return the amount of data copied to the iov
2630 *
2631 * This function copies data from a list of pages in a readdata response into
2632 * an array of iovecs. It will first calculate where the data should go
2633 * based on the info in the readdata and then copy the data into that spot.
2634 */
2635static ssize_t
2636cifs_readdata_to_iov(struct cifs_readdata *rdata, const struct iovec *iov,
2637 unsigned long nr_segs, loff_t offset, ssize_t *copied)
2638{
2639 int rc = 0;
2640 struct iov_iter ii;
2641 size_t pos = rdata->offset - offset;
Jeff Layton1c892542012-05-16 07:13:17 -04002642 ssize_t remaining = rdata->bytes;
2643 unsigned char *pdata;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002644 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04002645
2646 /* set up iov_iter and advance to the correct offset */
2647 iov_iter_init(&ii, iov, nr_segs, iov_length(iov, nr_segs), 0);
2648 iov_iter_advance(&ii, pos);
2649
2650 *copied = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002651 for (i = 0; i < rdata->nr_pages; i++) {
Jeff Layton1c892542012-05-16 07:13:17 -04002652 ssize_t copy;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002653 struct page *page = rdata->pages[i];
Jeff Layton1c892542012-05-16 07:13:17 -04002654
2655 /* copy a whole page or whatever's left */
2656 copy = min_t(ssize_t, remaining, PAGE_SIZE);
2657
2658 /* ...but limit it to whatever space is left in the iov */
2659 copy = min_t(ssize_t, copy, iov_iter_count(&ii));
2660
2661 /* go while there's data to be copied and no errors */
2662 if (copy && !rc) {
2663 pdata = kmap(page);
2664 rc = memcpy_toiovecend(ii.iov, pdata, ii.iov_offset,
2665 (int)copy);
2666 kunmap(page);
2667 if (!rc) {
2668 *copied += copy;
2669 remaining -= copy;
2670 iov_iter_advance(&ii, copy);
2671 }
2672 }
Jeff Layton1c892542012-05-16 07:13:17 -04002673 }
2674
2675 return rc;
2676}
2677
2678static void
2679cifs_uncached_readv_complete(struct work_struct *work)
2680{
2681 struct cifs_readdata *rdata = container_of(work,
2682 struct cifs_readdata, work);
Jeff Layton1c892542012-05-16 07:13:17 -04002683
2684 complete(&rdata->done);
2685 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
2686}
2687
2688static int
Jeff Layton8321fec2012-09-19 06:22:32 -07002689cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
2690 struct cifs_readdata *rdata, unsigned int len)
Jeff Layton1c892542012-05-16 07:13:17 -04002691{
Jeff Layton8321fec2012-09-19 06:22:32 -07002692 int total_read = 0, result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002693 unsigned int i;
2694 unsigned int nr_pages = rdata->nr_pages;
Jeff Layton8321fec2012-09-19 06:22:32 -07002695 struct kvec iov;
Jeff Layton1c892542012-05-16 07:13:17 -04002696
Jeff Layton8321fec2012-09-19 06:22:32 -07002697 rdata->tailsz = PAGE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002698 for (i = 0; i < nr_pages; i++) {
2699 struct page *page = rdata->pages[i];
2700
Jeff Layton8321fec2012-09-19 06:22:32 -07002701 if (len >= PAGE_SIZE) {
Jeff Layton1c892542012-05-16 07:13:17 -04002702 /* enough data to fill the page */
Jeff Layton8321fec2012-09-19 06:22:32 -07002703 iov.iov_base = kmap(page);
2704 iov.iov_len = PAGE_SIZE;
2705 cFYI(1, "%u: iov_base=%p iov_len=%zu",
2706 i, iov.iov_base, iov.iov_len);
2707 len -= PAGE_SIZE;
2708 } else if (len > 0) {
Jeff Layton1c892542012-05-16 07:13:17 -04002709 /* enough for partial page, fill and zero the rest */
Jeff Layton8321fec2012-09-19 06:22:32 -07002710 iov.iov_base = kmap(page);
2711 iov.iov_len = len;
2712 cFYI(1, "%u: iov_base=%p iov_len=%zu",
2713 i, iov.iov_base, iov.iov_len);
2714 memset(iov.iov_base + len, '\0', PAGE_SIZE - len);
2715 rdata->tailsz = len;
2716 len = 0;
Jeff Layton1c892542012-05-16 07:13:17 -04002717 } else {
2718 /* no need to hold page hostage */
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002719 rdata->pages[i] = NULL;
2720 rdata->nr_pages--;
Jeff Layton1c892542012-05-16 07:13:17 -04002721 put_page(page);
Jeff Layton8321fec2012-09-19 06:22:32 -07002722 continue;
Jeff Layton1c892542012-05-16 07:13:17 -04002723 }
Jeff Layton8321fec2012-09-19 06:22:32 -07002724
2725 result = cifs_readv_from_socket(server, &iov, 1, iov.iov_len);
2726 kunmap(page);
2727 if (result < 0)
2728 break;
2729
2730 total_read += result;
Jeff Layton1c892542012-05-16 07:13:17 -04002731 }
2732
Jeff Layton8321fec2012-09-19 06:22:32 -07002733 return total_read > 0 ? total_read : result;
Jeff Layton1c892542012-05-16 07:13:17 -04002734}
2735
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002736static ssize_t
2737cifs_iovec_read(struct file *file, const struct iovec *iov,
2738 unsigned long nr_segs, loff_t *poffset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002739{
Jeff Layton1c892542012-05-16 07:13:17 -04002740 ssize_t rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002741 size_t len, cur_len;
Jeff Layton1c892542012-05-16 07:13:17 -04002742 ssize_t total_read = 0;
2743 loff_t offset = *poffset;
2744 unsigned int npages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002745 struct cifs_sb_info *cifs_sb;
Jeff Layton1c892542012-05-16 07:13:17 -04002746 struct cifs_tcon *tcon;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002747 struct cifsFileInfo *open_file;
Jeff Layton1c892542012-05-16 07:13:17 -04002748 struct cifs_readdata *rdata, *tmp;
2749 struct list_head rdata_list;
2750 pid_t pid;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002751
2752 if (!nr_segs)
2753 return 0;
2754
2755 len = iov_length(iov, nr_segs);
2756 if (!len)
2757 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002758
Jeff Layton1c892542012-05-16 07:13:17 -04002759 INIT_LIST_HEAD(&rdata_list);
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002760 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Joe Perchesc21dfb62010-07-12 13:50:14 -07002761 open_file = file->private_data;
Jeff Layton1c892542012-05-16 07:13:17 -04002762 tcon = tlink_tcon(open_file->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002763
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07002764 if (!tcon->ses->server->ops->async_readv)
2765 return -ENOSYS;
2766
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002767 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2768 pid = open_file->pid;
2769 else
2770 pid = current->tgid;
2771
Steve Frenchad7a2922008-02-07 23:25:02 +00002772 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesb6b38f72010-04-21 03:50:45 +00002773 cFYI(1, "attempting read on write only file instance");
Steve Frenchad7a2922008-02-07 23:25:02 +00002774
Jeff Layton1c892542012-05-16 07:13:17 -04002775 do {
2776 cur_len = min_t(const size_t, len - total_read, cifs_sb->rsize);
2777 npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002778
Jeff Layton1c892542012-05-16 07:13:17 -04002779 /* allocate a readdata struct */
2780 rdata = cifs_readdata_alloc(npages,
2781 cifs_uncached_readv_complete);
2782 if (!rdata) {
2783 rc = -ENOMEM;
2784 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002785 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002786
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002787 rc = cifs_read_allocate_pages(rdata, npages);
Jeff Layton1c892542012-05-16 07:13:17 -04002788 if (rc)
2789 goto error;
2790
2791 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002792 rdata->nr_pages = npages;
Jeff Layton1c892542012-05-16 07:13:17 -04002793 rdata->offset = offset;
2794 rdata->bytes = cur_len;
2795 rdata->pid = pid;
Jeff Layton8321fec2012-09-19 06:22:32 -07002796 rdata->pagesz = PAGE_SIZE;
2797 rdata->read_into_pages = cifs_uncached_read_into_pages;
Jeff Layton1c892542012-05-16 07:13:17 -04002798
2799 rc = cifs_retry_async_readv(rdata);
2800error:
2801 if (rc) {
2802 kref_put(&rdata->refcount,
2803 cifs_uncached_readdata_release);
2804 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002805 }
Jeff Layton1c892542012-05-16 07:13:17 -04002806
2807 list_add_tail(&rdata->list, &rdata_list);
2808 offset += cur_len;
2809 len -= cur_len;
2810 } while (len > 0);
2811
2812 /* if at least one read request send succeeded, then reset rc */
2813 if (!list_empty(&rdata_list))
2814 rc = 0;
2815
2816 /* the loop below should proceed in the order of increasing offsets */
2817restart_loop:
2818 list_for_each_entry_safe(rdata, tmp, &rdata_list, list) {
2819 if (!rc) {
2820 ssize_t copied;
2821
2822 /* FIXME: freezable sleep too? */
2823 rc = wait_for_completion_killable(&rdata->done);
2824 if (rc)
2825 rc = -EINTR;
2826 else if (rdata->result)
2827 rc = rdata->result;
2828 else {
2829 rc = cifs_readdata_to_iov(rdata, iov,
2830 nr_segs, *poffset,
2831 &copied);
2832 total_read += copied;
2833 }
2834
2835 /* resend call if it's a retryable error */
2836 if (rc == -EAGAIN) {
2837 rc = cifs_retry_async_readv(rdata);
2838 goto restart_loop;
2839 }
2840 }
2841 list_del_init(&rdata->list);
2842 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002843 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002844
Jeff Layton1c892542012-05-16 07:13:17 -04002845 cifs_stats_bytes_read(tcon, total_read);
2846 *poffset += total_read;
2847
Pavel Shilovsky09a47072012-09-18 16:20:29 -07002848 /* mask nodata case */
2849 if (rc == -ENODATA)
2850 rc = 0;
2851
Jeff Layton1c892542012-05-16 07:13:17 -04002852 return total_read ? total_read : rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002853}
2854
Pavel Shilovsky0b81c1c2011-03-10 10:11:05 +03002855ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov,
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002856 unsigned long nr_segs, loff_t pos)
2857{
2858 ssize_t read;
2859
2860 read = cifs_iovec_read(iocb->ki_filp, iov, nr_segs, &pos);
2861 if (read > 0)
2862 iocb->ki_pos = pos;
2863
2864 return read;
2865}
2866
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002867ssize_t
2868cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov,
2869 unsigned long nr_segs, loff_t pos)
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002870{
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002871 struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
2872 struct cifsInodeInfo *cinode = CIFS_I(inode);
2873 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2874 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2875 iocb->ki_filp->private_data;
2876 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2877 int rc = -EACCES;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002878
2879 /*
2880 * In strict cache mode we need to read from the server all the time
2881 * if we don't have level II oplock because the server can delay mtime
2882 * change - so we can't make a decision about inode invalidating.
2883 * And we can also fail with pagereading if there are mandatory locks
2884 * on pages affected by this read but not on the region from pos to
2885 * pos+len-1.
2886 */
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002887 if (!cinode->clientCanCacheRead)
2888 return cifs_user_readv(iocb, iov, nr_segs, pos);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002889
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002890 if (cap_unix(tcon->ses) &&
2891 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2892 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2893 return generic_file_aio_read(iocb, iov, nr_segs, pos);
2894
2895 /*
2896 * We need to hold the sem to be sure nobody modifies lock list
2897 * with a brlock that prevents reading.
2898 */
2899 down_read(&cinode->lock_sem);
2900 if (!cifs_find_lock_conflict(cfile, pos, iov_length(iov, nr_segs),
2901 tcon->ses->server->vals->shared_lock_type,
2902 NULL, true))
2903 rc = generic_file_aio_read(iocb, iov, nr_segs, pos);
2904 up_read(&cinode->lock_sem);
2905 return rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002906}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002907
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002908static ssize_t
2909cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002910{
2911 int rc = -EACCES;
2912 unsigned int bytes_read = 0;
2913 unsigned int total_read;
2914 unsigned int current_read_size;
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002915 unsigned int rsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002916 struct cifs_sb_info *cifs_sb;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002917 struct cifs_tcon *tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002918 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002919 unsigned int xid;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002920 char *cur_offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002921 struct cifsFileInfo *open_file;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002922 struct cifs_io_parms io_parms;
Steve Frenchec637e32005-12-12 20:53:18 -08002923 int buf_type = CIFS_NO_BUFFER;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002924 __u32 pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002925
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002926 xid = get_xid();
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002927 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002928
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002929 /* FIXME: set up handlers for larger reads and/or convert to async */
2930 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
2931
Linus Torvalds1da177e2005-04-16 15:20:36 -07002932 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05302933 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002934 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05302935 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002936 }
Joe Perchesc21dfb62010-07-12 13:50:14 -07002937 open_file = file->private_data;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002938 tcon = tlink_tcon(open_file->tlink);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002939 server = tcon->ses->server;
2940
2941 if (!server->ops->sync_read) {
2942 free_xid(xid);
2943 return -ENOSYS;
2944 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002945
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002946 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2947 pid = open_file->pid;
2948 else
2949 pid = current->tgid;
2950
Linus Torvalds1da177e2005-04-16 15:20:36 -07002951 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesb6b38f72010-04-21 03:50:45 +00002952 cFYI(1, "attempting read on write only file instance");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002953
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002954 for (total_read = 0, cur_offset = read_data; read_size > total_read;
2955 total_read += bytes_read, cur_offset += bytes_read) {
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002956 current_read_size = min_t(uint, read_size - total_read, rsize);
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002957 /*
2958 * For windows me and 9x we do not want to request more than it
2959 * negotiated since it will refuse the read then.
2960 */
2961 if ((tcon->ses) && !(tcon->ses->capabilities &
2962 tcon->ses->server->vals->cap_large_files)) {
Dan Carpenter7748dd62011-10-18 12:41:35 +03002963 current_read_size = min_t(uint, current_read_size,
Jeff Laytonc974bef2011-10-11 06:41:32 -04002964 CIFSMaxBufSize);
Steve Frenchf9f5c8172005-09-15 23:06:38 -07002965 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002966 rc = -EAGAIN;
2967 while (rc == -EAGAIN) {
Steve Frenchcdff08e2010-10-21 22:46:14 +00002968 if (open_file->invalidHandle) {
Jeff Layton15886172010-10-15 15:33:59 -04002969 rc = cifs_reopen_file(open_file, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002970 if (rc != 0)
2971 break;
2972 }
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002973 io_parms.pid = pid;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002974 io_parms.tcon = tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002975 io_parms.offset = *offset;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002976 io_parms.length = current_read_size;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002977 rc = server->ops->sync_read(xid, open_file, &io_parms,
2978 &bytes_read, &cur_offset,
2979 &buf_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002980 }
2981 if (rc || (bytes_read == 0)) {
2982 if (total_read) {
2983 break;
2984 } else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002985 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002986 return rc;
2987 }
2988 } else {
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002989 cifs_stats_bytes_read(tcon, total_read);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002990 *offset += bytes_read;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002991 }
2992 }
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002993 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002994 return total_read;
2995}
2996
Jeff Laytonca83ce32011-04-12 09:13:44 -04002997/*
2998 * If the page is mmap'ed into a process' page tables, then we need to make
2999 * sure that it doesn't change while being written back.
3000 */
3001static int
3002cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
3003{
3004 struct page *page = vmf->page;
3005
3006 lock_page(page);
3007 return VM_FAULT_LOCKED;
3008}
3009
3010static struct vm_operations_struct cifs_file_vm_ops = {
3011 .fault = filemap_fault,
3012 .page_mkwrite = cifs_page_mkwrite,
Konstantin Khlebnikov0b173bc2012-10-08 16:28:46 -07003013 .remap_pages = generic_file_remap_pages,
Jeff Laytonca83ce32011-04-12 09:13:44 -04003014};
3015
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003016int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
3017{
3018 int rc, xid;
3019 struct inode *inode = file->f_path.dentry->d_inode;
3020
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003021 xid = get_xid();
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003022
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04003023 if (!CIFS_I(inode)->clientCanCacheRead) {
3024 rc = cifs_invalidate_mapping(inode);
3025 if (rc)
3026 return rc;
3027 }
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003028
3029 rc = generic_file_mmap(file, vma);
Jeff Laytonca83ce32011-04-12 09:13:44 -04003030 if (rc == 0)
3031 vma->vm_ops = &cifs_file_vm_ops;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003032 free_xid(xid);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003033 return rc;
3034}
3035
Linus Torvalds1da177e2005-04-16 15:20:36 -07003036int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
3037{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003038 int rc, xid;
3039
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003040 xid = get_xid();
Jeff Laytonabab0952010-02-12 07:44:18 -05003041 rc = cifs_revalidate_file(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003042 if (rc) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00003043 cFYI(1, "Validation prior to mmap failed, error=%d", rc);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003044 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003045 return rc;
3046 }
3047 rc = generic_file_mmap(file, vma);
Jeff Laytonca83ce32011-04-12 09:13:44 -04003048 if (rc == 0)
3049 vma->vm_ops = &cifs_file_vm_ops;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003050 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003051 return rc;
3052}
3053
Jeff Layton0471ca32012-05-16 07:13:16 -04003054static void
3055cifs_readv_complete(struct work_struct *work)
3056{
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003057 unsigned int i;
Jeff Layton0471ca32012-05-16 07:13:16 -04003058 struct cifs_readdata *rdata = container_of(work,
3059 struct cifs_readdata, work);
Jeff Layton0471ca32012-05-16 07:13:16 -04003060
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003061 for (i = 0; i < rdata->nr_pages; i++) {
3062 struct page *page = rdata->pages[i];
3063
Jeff Layton0471ca32012-05-16 07:13:16 -04003064 lru_cache_add_file(page);
3065
3066 if (rdata->result == 0) {
Jeff Layton0471ca32012-05-16 07:13:16 -04003067 flush_dcache_page(page);
3068 SetPageUptodate(page);
3069 }
3070
3071 unlock_page(page);
3072
3073 if (rdata->result == 0)
3074 cifs_readpage_to_fscache(rdata->mapping->host, page);
3075
3076 page_cache_release(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003077 rdata->pages[i] = NULL;
Jeff Layton0471ca32012-05-16 07:13:16 -04003078 }
Jeff Layton6993f742012-05-16 07:13:17 -04003079 kref_put(&rdata->refcount, cifs_readdata_release);
Jeff Layton0471ca32012-05-16 07:13:16 -04003080}
3081
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003082static int
Jeff Layton8321fec2012-09-19 06:22:32 -07003083cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
3084 struct cifs_readdata *rdata, unsigned int len)
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003085{
Jeff Layton8321fec2012-09-19 06:22:32 -07003086 int total_read = 0, result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003087 unsigned int i;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003088 u64 eof;
3089 pgoff_t eof_index;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003090 unsigned int nr_pages = rdata->nr_pages;
Jeff Layton8321fec2012-09-19 06:22:32 -07003091 struct kvec iov;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003092
3093 /* determine the eof that the server (probably) has */
3094 eof = CIFS_I(rdata->mapping->host)->server_eof;
3095 eof_index = eof ? (eof - 1) >> PAGE_CACHE_SHIFT : 0;
3096 cFYI(1, "eof=%llu eof_index=%lu", eof, eof_index);
3097
Jeff Layton8321fec2012-09-19 06:22:32 -07003098 rdata->tailsz = PAGE_CACHE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003099 for (i = 0; i < nr_pages; i++) {
3100 struct page *page = rdata->pages[i];
3101
Jeff Layton8321fec2012-09-19 06:22:32 -07003102 if (len >= PAGE_CACHE_SIZE) {
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003103 /* enough data to fill the page */
Jeff Layton8321fec2012-09-19 06:22:32 -07003104 iov.iov_base = kmap(page);
3105 iov.iov_len = PAGE_CACHE_SIZE;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003106 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
Jeff Layton8321fec2012-09-19 06:22:32 -07003107 i, page->index, iov.iov_base, iov.iov_len);
3108 len -= PAGE_CACHE_SIZE;
3109 } else if (len > 0) {
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003110 /* enough for partial page, fill and zero the rest */
Jeff Layton8321fec2012-09-19 06:22:32 -07003111 iov.iov_base = kmap(page);
3112 iov.iov_len = len;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003113 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
Jeff Layton8321fec2012-09-19 06:22:32 -07003114 i, page->index, iov.iov_base, iov.iov_len);
3115 memset(iov.iov_base + len,
3116 '\0', PAGE_CACHE_SIZE - len);
3117 rdata->tailsz = len;
3118 len = 0;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003119 } else if (page->index > eof_index) {
3120 /*
3121 * The VFS will not try to do readahead past the
3122 * i_size, but it's possible that we have outstanding
3123 * writes with gaps in the middle and the i_size hasn't
3124 * caught up yet. Populate those with zeroed out pages
3125 * to prevent the VFS from repeatedly attempting to
3126 * fill them until the writes are flushed.
3127 */
3128 zero_user(page, 0, PAGE_CACHE_SIZE);
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003129 lru_cache_add_file(page);
3130 flush_dcache_page(page);
3131 SetPageUptodate(page);
3132 unlock_page(page);
3133 page_cache_release(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003134 rdata->pages[i] = NULL;
3135 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07003136 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003137 } else {
3138 /* no need to hold page hostage */
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003139 lru_cache_add_file(page);
3140 unlock_page(page);
3141 page_cache_release(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003142 rdata->pages[i] = NULL;
3143 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07003144 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003145 }
Jeff Layton8321fec2012-09-19 06:22:32 -07003146
3147 result = cifs_readv_from_socket(server, &iov, 1, iov.iov_len);
3148 kunmap(page);
3149 if (result < 0)
3150 break;
3151
3152 total_read += result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003153 }
3154
Jeff Layton8321fec2012-09-19 06:22:32 -07003155 return total_read > 0 ? total_read : result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003156}
3157
Linus Torvalds1da177e2005-04-16 15:20:36 -07003158static int cifs_readpages(struct file *file, struct address_space *mapping,
3159 struct list_head *page_list, unsigned num_pages)
3160{
Jeff Layton690c5e32011-10-19 15:30:16 -04003161 int rc;
3162 struct list_head tmplist;
3163 struct cifsFileInfo *open_file = file->private_data;
3164 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
3165 unsigned int rsize = cifs_sb->rsize;
3166 pid_t pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003167
Jeff Layton690c5e32011-10-19 15:30:16 -04003168 /*
3169 * Give up immediately if rsize is too small to read an entire page.
3170 * The VFS will fall back to readpage. We should never reach this
3171 * point however since we set ra_pages to 0 when the rsize is smaller
3172 * than a cache page.
3173 */
3174 if (unlikely(rsize < PAGE_CACHE_SIZE))
3175 return 0;
Steve Frenchbfa0d752005-08-31 21:50:37 -07003176
Suresh Jayaraman56698232010-07-05 18:13:25 +05303177 /*
3178 * Reads as many pages as possible from fscache. Returns -ENOBUFS
3179 * immediately if the cookie is negative
3180 */
3181 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
3182 &num_pages);
3183 if (rc == 0)
Jeff Layton690c5e32011-10-19 15:30:16 -04003184 return rc;
Suresh Jayaraman56698232010-07-05 18:13:25 +05303185
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003186 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3187 pid = open_file->pid;
3188 else
3189 pid = current->tgid;
3190
Jeff Layton690c5e32011-10-19 15:30:16 -04003191 rc = 0;
3192 INIT_LIST_HEAD(&tmplist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003193
Jeff Layton690c5e32011-10-19 15:30:16 -04003194 cFYI(1, "%s: file=%p mapping=%p num_pages=%u", __func__, file,
3195 mapping, num_pages);
3196
3197 /*
3198 * Start with the page at end of list and move it to private
3199 * list. Do the same with any following pages until we hit
3200 * the rsize limit, hit an index discontinuity, or run out of
3201 * pages. Issue the async read and then start the loop again
3202 * until the list is empty.
3203 *
3204 * Note that list order is important. The page_list is in
3205 * the order of declining indexes. When we put the pages in
3206 * the rdata->pages, then we want them in increasing order.
3207 */
3208 while (!list_empty(page_list)) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003209 unsigned int i;
Jeff Layton690c5e32011-10-19 15:30:16 -04003210 unsigned int bytes = PAGE_CACHE_SIZE;
3211 unsigned int expected_index;
3212 unsigned int nr_pages = 1;
3213 loff_t offset;
3214 struct page *page, *tpage;
3215 struct cifs_readdata *rdata;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003216
3217 page = list_entry(page_list->prev, struct page, lru);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003218
Jeff Layton690c5e32011-10-19 15:30:16 -04003219 /*
3220 * Lock the page and put it in the cache. Since no one else
3221 * should have access to this page, we're safe to simply set
3222 * PG_locked without checking it first.
3223 */
3224 __set_page_locked(page);
3225 rc = add_to_page_cache_locked(page, mapping,
3226 page->index, GFP_KERNEL);
3227
3228 /* give up if we can't stick it in the cache */
3229 if (rc) {
3230 __clear_page_locked(page);
3231 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003232 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003233
Jeff Layton690c5e32011-10-19 15:30:16 -04003234 /* move first page to the tmplist */
3235 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
3236 list_move_tail(&page->lru, &tmplist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003237
Jeff Layton690c5e32011-10-19 15:30:16 -04003238 /* now try and add more pages onto the request */
3239 expected_index = page->index + 1;
3240 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
3241 /* discontinuity ? */
3242 if (page->index != expected_index)
3243 break;
3244
3245 /* would this page push the read over the rsize? */
3246 if (bytes + PAGE_CACHE_SIZE > rsize)
3247 break;
3248
3249 __set_page_locked(page);
3250 if (add_to_page_cache_locked(page, mapping,
3251 page->index, GFP_KERNEL)) {
3252 __clear_page_locked(page);
3253 break;
3254 }
3255 list_move_tail(&page->lru, &tmplist);
3256 bytes += PAGE_CACHE_SIZE;
3257 expected_index++;
3258 nr_pages++;
3259 }
3260
Jeff Layton0471ca32012-05-16 07:13:16 -04003261 rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
Jeff Layton690c5e32011-10-19 15:30:16 -04003262 if (!rdata) {
3263 /* best to give up if we're out of mem */
3264 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3265 list_del(&page->lru);
3266 lru_cache_add_file(page);
3267 unlock_page(page);
3268 page_cache_release(page);
3269 }
3270 rc = -ENOMEM;
3271 break;
3272 }
3273
Jeff Layton6993f742012-05-16 07:13:17 -04003274 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Layton690c5e32011-10-19 15:30:16 -04003275 rdata->mapping = mapping;
3276 rdata->offset = offset;
3277 rdata->bytes = bytes;
3278 rdata->pid = pid;
Jeff Layton8321fec2012-09-19 06:22:32 -07003279 rdata->pagesz = PAGE_CACHE_SIZE;
3280 rdata->read_into_pages = cifs_readpages_read_into_pages;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003281
3282 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3283 list_del(&page->lru);
3284 rdata->pages[rdata->nr_pages++] = page;
3285 }
Jeff Layton690c5e32011-10-19 15:30:16 -04003286
Jeff Layton2a1bb132012-05-16 07:13:17 -04003287 rc = cifs_retry_async_readv(rdata);
Jeff Layton690c5e32011-10-19 15:30:16 -04003288 if (rc != 0) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003289 for (i = 0; i < rdata->nr_pages; i++) {
3290 page = rdata->pages[i];
Jeff Layton690c5e32011-10-19 15:30:16 -04003291 lru_cache_add_file(page);
3292 unlock_page(page);
3293 page_cache_release(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003294 }
Jeff Layton6993f742012-05-16 07:13:17 -04003295 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003296 break;
3297 }
Jeff Layton6993f742012-05-16 07:13:17 -04003298
3299 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003300 }
3301
Linus Torvalds1da177e2005-04-16 15:20:36 -07003302 return rc;
3303}
3304
3305static int cifs_readpage_worker(struct file *file, struct page *page,
3306 loff_t *poffset)
3307{
3308 char *read_data;
3309 int rc;
3310
Suresh Jayaraman56698232010-07-05 18:13:25 +05303311 /* Is the page cached? */
3312 rc = cifs_readpage_from_fscache(file->f_path.dentry->d_inode, page);
3313 if (rc == 0)
3314 goto read_complete;
3315
Linus Torvalds1da177e2005-04-16 15:20:36 -07003316 page_cache_get(page);
3317 read_data = kmap(page);
3318 /* for reads over a certain size could initiate async read ahead */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003319
Linus Torvalds1da177e2005-04-16 15:20:36 -07003320 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003321
Linus Torvalds1da177e2005-04-16 15:20:36 -07003322 if (rc < 0)
3323 goto io_error;
3324 else
Joe Perchesb6b38f72010-04-21 03:50:45 +00003325 cFYI(1, "Bytes read %d", rc);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003326
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08003327 file->f_path.dentry->d_inode->i_atime =
3328 current_fs_time(file->f_path.dentry->d_inode->i_sb);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003329
Linus Torvalds1da177e2005-04-16 15:20:36 -07003330 if (PAGE_CACHE_SIZE > rc)
3331 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
3332
3333 flush_dcache_page(page);
3334 SetPageUptodate(page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05303335
3336 /* send this page to the cache */
3337 cifs_readpage_to_fscache(file->f_path.dentry->d_inode, page);
3338
Linus Torvalds1da177e2005-04-16 15:20:36 -07003339 rc = 0;
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003340
Linus Torvalds1da177e2005-04-16 15:20:36 -07003341io_error:
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003342 kunmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003343 page_cache_release(page);
Suresh Jayaraman56698232010-07-05 18:13:25 +05303344
3345read_complete:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003346 return rc;
3347}
3348
3349static int cifs_readpage(struct file *file, struct page *page)
3350{
3351 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
3352 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003353 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003354
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003355 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003356
3357 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303358 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003359 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303360 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003361 }
3362
Jeff Laytonac3aa2f2012-07-23 13:14:28 -04003363 cFYI(1, "readpage %p at offset %d 0x%x",
Joe Perchesb6b38f72010-04-21 03:50:45 +00003364 page, (int)offset, (int)offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003365
3366 rc = cifs_readpage_worker(file, page, &offset);
3367
3368 unlock_page(page);
3369
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003370 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003371 return rc;
3372}
3373
Steve Frencha403a0a2007-07-26 15:54:16 +00003374static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3375{
3376 struct cifsFileInfo *open_file;
3377
Jeff Layton44772882010-10-15 15:34:03 -04003378 spin_lock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003379 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton2e396b82010-10-15 15:34:01 -04003380 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Jeff Layton44772882010-10-15 15:34:03 -04003381 spin_unlock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003382 return 1;
3383 }
3384 }
Jeff Layton44772882010-10-15 15:34:03 -04003385 spin_unlock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003386 return 0;
3387}
3388
Linus Torvalds1da177e2005-04-16 15:20:36 -07003389/* We do not want to update the file size from server for inodes
3390 open for write - to avoid races with writepage extending
3391 the file - in the future we could consider allowing
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003392 refreshing the inode only on increases in the file size
Linus Torvalds1da177e2005-04-16 15:20:36 -07003393 but this is tricky to do without racing with writebehind
3394 page caching in the current Linux kernel design */
Steve French4b18f2a2008-04-29 00:06:05 +00003395bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003396{
Steve Frencha403a0a2007-07-26 15:54:16 +00003397 if (!cifsInode)
Steve French4b18f2a2008-04-29 00:06:05 +00003398 return true;
Steve French23e7dd72005-10-20 13:44:56 -07003399
Steve Frencha403a0a2007-07-26 15:54:16 +00003400 if (is_inode_writable(cifsInode)) {
3401 /* This inode is open for write at least once */
Steve Frenchc32a0b62006-01-12 14:41:28 -08003402 struct cifs_sb_info *cifs_sb;
3403
Steve Frenchc32a0b62006-01-12 14:41:28 -08003404 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
Steve Frenchad7a2922008-02-07 23:25:02 +00003405 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003406 /* since no page cache to corrupt on directio
Steve Frenchc32a0b62006-01-12 14:41:28 -08003407 we can change size safely */
Steve French4b18f2a2008-04-29 00:06:05 +00003408 return true;
Steve Frenchc32a0b62006-01-12 14:41:28 -08003409 }
3410
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003411 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
Steve French4b18f2a2008-04-29 00:06:05 +00003412 return true;
Steve French7ba52632007-02-08 18:14:13 +00003413
Steve French4b18f2a2008-04-29 00:06:05 +00003414 return false;
Steve French23e7dd72005-10-20 13:44:56 -07003415 } else
Steve French4b18f2a2008-04-29 00:06:05 +00003416 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003417}
3418
Nick Piggind9414772008-09-24 11:32:59 -04003419static int cifs_write_begin(struct file *file, struct address_space *mapping,
3420 loff_t pos, unsigned len, unsigned flags,
3421 struct page **pagep, void **fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003422{
Nick Piggind9414772008-09-24 11:32:59 -04003423 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
3424 loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003425 loff_t page_start = pos & PAGE_MASK;
3426 loff_t i_size;
3427 struct page *page;
3428 int rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003429
Joe Perchesb6b38f72010-04-21 03:50:45 +00003430 cFYI(1, "write_begin from %lld len %d", (long long)pos, len);
Nick Piggind9414772008-09-24 11:32:59 -04003431
Nick Piggin54566b22009-01-04 12:00:53 -08003432 page = grab_cache_page_write_begin(mapping, index, flags);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003433 if (!page) {
3434 rc = -ENOMEM;
3435 goto out;
3436 }
Nick Piggind9414772008-09-24 11:32:59 -04003437
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003438 if (PageUptodate(page))
3439 goto out;
Steve French8a236262007-03-06 00:31:00 +00003440
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003441 /*
3442 * If we write a full page it will be up to date, no need to read from
3443 * the server. If the write is short, we'll end up doing a sync write
3444 * instead.
3445 */
3446 if (len == PAGE_CACHE_SIZE)
3447 goto out;
3448
3449 /*
3450 * optimize away the read when we have an oplock, and we're not
3451 * expecting to use any of the data we'd be reading in. That
3452 * is, when the page lies beyond the EOF, or straddles the EOF
3453 * and the write will cover all of the existing data.
3454 */
3455 if (CIFS_I(mapping->host)->clientCanCacheRead) {
3456 i_size = i_size_read(mapping->host);
3457 if (page_start >= i_size ||
3458 (offset == 0 && (pos + len) >= i_size)) {
3459 zero_user_segments(page, 0, offset,
3460 offset + len,
3461 PAGE_CACHE_SIZE);
3462 /*
3463 * PageChecked means that the parts of the page
3464 * to which we're not writing are considered up
3465 * to date. Once the data is copied to the
3466 * page, it can be set uptodate.
3467 */
3468 SetPageChecked(page);
3469 goto out;
3470 }
3471 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003472
Nick Piggind9414772008-09-24 11:32:59 -04003473 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003474 /*
3475 * might as well read a page, it is fast enough. If we get
3476 * an error, we don't need to return it. cifs_write_end will
3477 * do a sync write instead since PG_uptodate isn't set.
3478 */
3479 cifs_readpage_worker(file, page, &page_start);
Steve French8a236262007-03-06 00:31:00 +00003480 } else {
3481 /* we could try using another file handle if there is one -
3482 but how would we lock it to prevent close of that handle
3483 racing with this read? In any case
Nick Piggind9414772008-09-24 11:32:59 -04003484 this will be written out by write_end so is fine */
Steve French8a236262007-03-06 00:31:00 +00003485 }
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003486out:
3487 *pagep = page;
3488 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003489}
3490
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303491static int cifs_release_page(struct page *page, gfp_t gfp)
3492{
3493 if (PagePrivate(page))
3494 return 0;
3495
3496 return cifs_fscache_release_page(page, gfp);
3497}
3498
3499static void cifs_invalidate_page(struct page *page, unsigned long offset)
3500{
3501 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
3502
3503 if (offset == 0)
3504 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
3505}
3506
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003507static int cifs_launder_page(struct page *page)
3508{
3509 int rc = 0;
3510 loff_t range_start = page_offset(page);
3511 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
3512 struct writeback_control wbc = {
3513 .sync_mode = WB_SYNC_ALL,
3514 .nr_to_write = 0,
3515 .range_start = range_start,
3516 .range_end = range_end,
3517 };
3518
3519 cFYI(1, "Launder page: %p", page);
3520
3521 if (clear_page_dirty_for_io(page))
3522 rc = cifs_writepage_locked(page, &wbc);
3523
3524 cifs_fscache_invalidate_page(page, page->mapping->host);
3525 return rc;
3526}
3527
Tejun Heo9b646972010-07-20 22:09:02 +02003528void cifs_oplock_break(struct work_struct *work)
Jeff Layton3bc303c2009-09-21 06:47:50 -04003529{
3530 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3531 oplock_break);
Jeff Laytona5e18bc2010-10-11 15:07:18 -04003532 struct inode *inode = cfile->dentry->d_inode;
Jeff Layton3bc303c2009-09-21 06:47:50 -04003533 struct cifsInodeInfo *cinode = CIFS_I(inode);
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07003534 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Jeff Laytoneb4b7562010-10-22 14:52:29 -04003535 int rc = 0;
Jeff Layton3bc303c2009-09-21 06:47:50 -04003536
3537 if (inode && S_ISREG(inode->i_mode)) {
Steve Frenchd54ff732010-04-27 04:38:15 +00003538 if (cinode->clientCanCacheRead)
Al Viro8737c932009-12-24 06:47:55 -05003539 break_lease(inode, O_RDONLY);
Steve Frenchd54ff732010-04-27 04:38:15 +00003540 else
Al Viro8737c932009-12-24 06:47:55 -05003541 break_lease(inode, O_WRONLY);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003542 rc = filemap_fdatawrite(inode->i_mapping);
3543 if (cinode->clientCanCacheRead == 0) {
Jeff Laytoneb4b7562010-10-22 14:52:29 -04003544 rc = filemap_fdatawait(inode->i_mapping);
3545 mapping_set_error(inode->i_mapping, rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003546 invalidate_remote_inode(inode);
3547 }
Joe Perchesb6b38f72010-04-21 03:50:45 +00003548 cFYI(1, "Oplock flush inode %p rc %d", inode, rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003549 }
3550
Pavel Shilovsky85160e02011-10-22 15:33:29 +04003551 rc = cifs_push_locks(cfile);
3552 if (rc)
3553 cERROR(1, "Push locks rc = %d", rc);
3554
Jeff Layton3bc303c2009-09-21 06:47:50 -04003555 /*
3556 * releasing stale oplock after recent reconnect of smb session using
3557 * a now incorrect file handle is not a data integrity issue but do
3558 * not bother sending an oplock release if session to server still is
3559 * disconnected since oplock already released by the server
3560 */
Steve Frenchcdff08e2010-10-21 22:46:14 +00003561 if (!cfile->oplock_break_cancelled) {
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07003562 rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
3563 cinode);
Joe Perchesb6b38f72010-04-21 03:50:45 +00003564 cFYI(1, "Oplock release rc = %d", rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003565 }
Jeff Layton3bc303c2009-09-21 06:47:50 -04003566}
3567
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07003568const struct address_space_operations cifs_addr_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003569 .readpage = cifs_readpage,
3570 .readpages = cifs_readpages,
3571 .writepage = cifs_writepage,
Steve French37c0eb42005-10-05 14:50:29 -07003572 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04003573 .write_begin = cifs_write_begin,
3574 .write_end = cifs_write_end,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003575 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303576 .releasepage = cifs_release_page,
3577 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003578 .launder_page = cifs_launder_page,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003579};
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003580
3581/*
3582 * cifs_readpages requires the server to support a buffer large enough to
3583 * contain the header plus one complete page of data. Otherwise, we need
3584 * to leave cifs_readpages out of the address space operations.
3585 */
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07003586const struct address_space_operations cifs_addr_ops_smallbuf = {
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003587 .readpage = cifs_readpage,
3588 .writepage = cifs_writepage,
3589 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04003590 .write_begin = cifs_write_begin,
3591 .write_end = cifs_write_end,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003592 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303593 .releasepage = cifs_release_page,
3594 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003595 .launder_page = cifs_launder_page,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003596};