blob: 075f7cfd1da53aecfc75f171c75e4376c9180ecb [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
Steve Frenchfb8c4b12007-07-10 01:16:18 +00005 *
Steve Frenchf19159d2010-04-21 04:12:10 +00006 * Copyright (C) International Business Machines Corp., 2002,2010
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Author(s): Steve French (sfrench@us.ibm.com)
Jeremy Allison7ee1af72006-08-02 21:56:33 +00008 * Jeremy Allison (jra@samba.org)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
Steve French37c0eb42005-10-05 14:50:29 -070025#include <linux/backing-dev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
Steve French37c0eb42005-10-05 14:50:29 -070030#include <linux/writeback.h>
Andrew Morton6f88cc22006-12-10 02:19:44 -080031#include <linux/task_io_accounting_ops.h>
Steve French23e7dd72005-10-20 13:44:56 -070032#include <linux/delay.h>
Jeff Layton3bc303c2009-09-21 06:47:50 -040033#include <linux/mount.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Jeff Layton690c5e32011-10-19 15:30:16 -040035#include <linux/swap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <asm/div64.h>
37#include "cifsfs.h"
38#include "cifspdu.h"
39#include "cifsglob.h"
40#include "cifsproto.h"
41#include "cifs_unicode.h"
42#include "cifs_debug.h"
43#include "cifs_fs_sb.h"
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +053044#include "fscache.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Linus Torvalds1da177e2005-04-16 15:20:36 -070046static inline int cifs_convert_flags(unsigned int flags)
47{
48 if ((flags & O_ACCMODE) == O_RDONLY)
49 return GENERIC_READ;
50 else if ((flags & O_ACCMODE) == O_WRONLY)
51 return GENERIC_WRITE;
52 else if ((flags & O_ACCMODE) == O_RDWR) {
53 /* GENERIC_ALL is too much permission to request
54 can cause unnecessary access denied on create */
55 /* return GENERIC_ALL; */
56 return (GENERIC_READ | GENERIC_WRITE);
57 }
58
Jeff Laytone10f7b52008-05-14 10:21:33 -070059 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
60 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
61 FILE_READ_DATA);
Steve French7fc8f4e2009-02-23 20:43:11 +000062}
Jeff Laytone10f7b52008-05-14 10:21:33 -070063
Jeff Layton608712f2010-10-15 15:33:56 -040064static u32 cifs_posix_convert_flags(unsigned int flags)
Steve French7fc8f4e2009-02-23 20:43:11 +000065{
Jeff Layton608712f2010-10-15 15:33:56 -040066 u32 posix_flags = 0;
Jeff Laytone10f7b52008-05-14 10:21:33 -070067
Steve French7fc8f4e2009-02-23 20:43:11 +000068 if ((flags & O_ACCMODE) == O_RDONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040069 posix_flags = SMB_O_RDONLY;
Steve French7fc8f4e2009-02-23 20:43:11 +000070 else if ((flags & O_ACCMODE) == O_WRONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040071 posix_flags = SMB_O_WRONLY;
72 else if ((flags & O_ACCMODE) == O_RDWR)
73 posix_flags = SMB_O_RDWR;
74
75 if (flags & O_CREAT)
76 posix_flags |= SMB_O_CREAT;
77 if (flags & O_EXCL)
78 posix_flags |= SMB_O_EXCL;
79 if (flags & O_TRUNC)
80 posix_flags |= SMB_O_TRUNC;
81 /* be safe and imply O_SYNC for O_DSYNC */
Christoph Hellwig6b2f3d12009-10-27 11:05:28 +010082 if (flags & O_DSYNC)
Jeff Layton608712f2010-10-15 15:33:56 -040083 posix_flags |= SMB_O_SYNC;
Steve French7fc8f4e2009-02-23 20:43:11 +000084 if (flags & O_DIRECTORY)
Jeff Layton608712f2010-10-15 15:33:56 -040085 posix_flags |= SMB_O_DIRECTORY;
Steve French7fc8f4e2009-02-23 20:43:11 +000086 if (flags & O_NOFOLLOW)
Jeff Layton608712f2010-10-15 15:33:56 -040087 posix_flags |= SMB_O_NOFOLLOW;
Steve French7fc8f4e2009-02-23 20:43:11 +000088 if (flags & O_DIRECT)
Jeff Layton608712f2010-10-15 15:33:56 -040089 posix_flags |= SMB_O_DIRECT;
Steve French7fc8f4e2009-02-23 20:43:11 +000090
91 return posix_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070092}
93
94static inline int cifs_get_disposition(unsigned int flags)
95{
96 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
97 return FILE_CREATE;
98 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
99 return FILE_OVERWRITE_IF;
100 else if ((flags & O_CREAT) == O_CREAT)
101 return FILE_OPEN_IF;
Steve French55aa2e02006-05-30 18:09:31 +0000102 else if ((flags & O_TRUNC) == O_TRUNC)
103 return FILE_OVERWRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104 else
105 return FILE_OPEN;
106}
107
Jeff Layton608712f2010-10-15 15:33:56 -0400108int cifs_posix_open(char *full_path, struct inode **pinode,
109 struct super_block *sb, int mode, unsigned int f_flags,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400110 __u32 *poplock, __u16 *pnetfid, unsigned int xid)
Jeff Layton608712f2010-10-15 15:33:56 -0400111{
112 int rc;
113 FILE_UNIX_BASIC_INFO *presp_data;
114 __u32 posix_flags = 0;
115 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
116 struct cifs_fattr fattr;
117 struct tcon_link *tlink;
Steve French96daf2b2011-05-27 04:34:02 +0000118 struct cifs_tcon *tcon;
Jeff Layton608712f2010-10-15 15:33:56 -0400119
120 cFYI(1, "posix open %s", full_path);
121
122 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
123 if (presp_data == NULL)
124 return -ENOMEM;
125
126 tlink = cifs_sb_tlink(cifs_sb);
127 if (IS_ERR(tlink)) {
128 rc = PTR_ERR(tlink);
129 goto posix_open_ret;
130 }
131
132 tcon = tlink_tcon(tlink);
133 mode &= ~current_umask();
134
135 posix_flags = cifs_posix_convert_flags(f_flags);
136 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
137 poplock, full_path, cifs_sb->local_nls,
138 cifs_sb->mnt_cifs_flags &
139 CIFS_MOUNT_MAP_SPECIAL_CHR);
140 cifs_put_tlink(tlink);
141
142 if (rc)
143 goto posix_open_ret;
144
145 if (presp_data->Type == cpu_to_le32(-1))
146 goto posix_open_ret; /* open ok, caller does qpathinfo */
147
148 if (!pinode)
149 goto posix_open_ret; /* caller does not need info */
150
151 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
152
153 /* get new inode and set it up */
154 if (*pinode == NULL) {
155 cifs_fill_uniqueid(sb, &fattr);
156 *pinode = cifs_iget(sb, &fattr);
157 if (!*pinode) {
158 rc = -ENOMEM;
159 goto posix_open_ret;
160 }
161 } else {
162 cifs_fattr_to_inode(*pinode, &fattr);
163 }
164
165posix_open_ret:
166 kfree(presp_data);
167 return rc;
168}
169
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300170static int
171cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700172 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
173 struct cifs_fid *fid, unsigned int xid)
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300174{
175 int rc;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700176 int desired_access;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300177 int disposition;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500178 int create_options = CREATE_NOT_DIR;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300179 FILE_ALL_INFO *buf;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700180 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300181
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700182 if (!server->ops->open)
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700183 return -ENOSYS;
184
185 desired_access = cifs_convert_flags(f_flags);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300186
187/*********************************************************************
188 * open flag mapping table:
189 *
190 * POSIX Flag CIFS Disposition
191 * ---------- ----------------
192 * O_CREAT FILE_OPEN_IF
193 * O_CREAT | O_EXCL FILE_CREATE
194 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
195 * O_TRUNC FILE_OVERWRITE
196 * none of the above FILE_OPEN
197 *
198 * Note that there is not a direct match between disposition
199 * FILE_SUPERSEDE (ie create whether or not file exists although
200 * O_CREAT | O_TRUNC is similar but truncates the existing
201 * file rather than creating a new file as FILE_SUPERSEDE does
202 * (which uses the attributes / metadata passed in on open call)
203 *?
204 *? O_SYNC is a reasonable match to CIFS writethrough flag
205 *? and the read write flags match reasonably. O_LARGEFILE
206 *? is irrelevant because largefile support is always used
207 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
208 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
209 *********************************************************************/
210
211 disposition = cifs_get_disposition(f_flags);
212
213 /* BB pass O_SYNC flag through on file attributes .. BB */
214
215 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
216 if (!buf)
217 return -ENOMEM;
218
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500219 if (backup_cred(cifs_sb))
220 create_options |= CREATE_OPEN_BACKUP_INTENT;
221
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700222 rc = server->ops->open(xid, tcon, full_path, disposition,
223 desired_access, create_options, fid, oplock, buf,
224 cifs_sb);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300225
226 if (rc)
227 goto out;
228
229 if (tcon->unix_ext)
230 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
231 xid);
232 else
233 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700234 xid, &fid->netfid);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300235
236out:
237 kfree(buf);
238 return rc;
239}
240
Jeff Layton15ecb432010-10-15 15:34:02 -0400241struct cifsFileInfo *
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700242cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
Jeff Layton15ecb432010-10-15 15:34:02 -0400243 struct tcon_link *tlink, __u32 oplock)
244{
245 struct dentry *dentry = file->f_path.dentry;
246 struct inode *inode = dentry->d_inode;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700247 struct cifsInodeInfo *cinode = CIFS_I(inode);
248 struct cifsFileInfo *cfile;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700249 struct cifs_fid_locks *fdlocks;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700250 struct cifs_tcon *tcon = tlink_tcon(tlink);
Jeff Layton15ecb432010-10-15 15:34:02 -0400251
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700252 cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
253 if (cfile == NULL)
254 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400255
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700256 fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
257 if (!fdlocks) {
258 kfree(cfile);
259 return NULL;
260 }
261
262 INIT_LIST_HEAD(&fdlocks->locks);
263 fdlocks->cfile = cfile;
264 cfile->llist = fdlocks;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700265 down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700266 list_add(&fdlocks->llist, &cinode->llist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700267 up_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700268
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700269 cfile->count = 1;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700270 cfile->pid = current->tgid;
271 cfile->uid = current_fsuid();
272 cfile->dentry = dget(dentry);
273 cfile->f_flags = file->f_flags;
274 cfile->invalidHandle = false;
275 cfile->tlink = cifs_get_tlink(tlink);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700276 INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700277 mutex_init(&cfile->fh_mutex);
Jeff Layton15ecb432010-10-15 15:34:02 -0400278
Jeff Layton44772882010-10-15 15:34:03 -0400279 spin_lock(&cifs_file_list_lock);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700280 if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE)
281 oplock = fid->pending_open->oplock;
282 list_del(&fid->pending_open->olist);
283
284 tlink_tcon(tlink)->ses->server->ops->set_fid(cfile, fid, oplock);
285
286 list_add(&cfile->tlist, &tcon->openFileList);
Jeff Layton15ecb432010-10-15 15:34:02 -0400287 /* if readable file instance put first in list*/
288 if (file->f_mode & FMODE_READ)
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700289 list_add(&cfile->flist, &cinode->openFileList);
Jeff Layton15ecb432010-10-15 15:34:02 -0400290 else
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700291 list_add_tail(&cfile->flist, &cinode->openFileList);
Jeff Layton44772882010-10-15 15:34:03 -0400292 spin_unlock(&cifs_file_list_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400293
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700294 file->private_data = cfile;
295 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400296}
297
Jeff Layton764a1b12012-07-25 14:59:54 -0400298struct cifsFileInfo *
299cifsFileInfo_get(struct cifsFileInfo *cifs_file)
300{
301 spin_lock(&cifs_file_list_lock);
302 cifsFileInfo_get_locked(cifs_file);
303 spin_unlock(&cifs_file_list_lock);
304 return cifs_file;
305}
306
Steve Frenchcdff08e2010-10-21 22:46:14 +0000307/*
308 * Release a reference on the file private data. This may involve closing
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400309 * the filehandle out on the server. Must be called without holding
310 * cifs_file_list_lock.
Steve Frenchcdff08e2010-10-21 22:46:14 +0000311 */
Jeff Laytonb33879a2010-10-15 15:34:04 -0400312void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
313{
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300314 struct inode *inode = cifs_file->dentry->d_inode;
Steve French96daf2b2011-05-27 04:34:02 +0000315 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700316 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300317 struct cifsInodeInfo *cifsi = CIFS_I(inode);
Pavel Shilovsky4f8ba8a2010-11-21 22:36:12 +0300318 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000319 struct cifsLockInfo *li, *tmp;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700320 struct cifs_fid fid;
321 struct cifs_pending_open open;
Steve Frenchcdff08e2010-10-21 22:46:14 +0000322
323 spin_lock(&cifs_file_list_lock);
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400324 if (--cifs_file->count > 0) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000325 spin_unlock(&cifs_file_list_lock);
326 return;
Jeff Laytonb33879a2010-10-15 15:34:04 -0400327 }
Steve Frenchcdff08e2010-10-21 22:46:14 +0000328
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700329 if (server->ops->get_lease_key)
330 server->ops->get_lease_key(inode, &fid);
331
332 /* store open in pending opens to make sure we don't miss lease break */
333 cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
334
Steve Frenchcdff08e2010-10-21 22:46:14 +0000335 /* remove it from the lists */
336 list_del(&cifs_file->flist);
337 list_del(&cifs_file->tlist);
338
339 if (list_empty(&cifsi->openFileList)) {
340 cFYI(1, "closing last open instance for inode %p",
341 cifs_file->dentry->d_inode);
Pavel Shilovsky25364132012-09-18 16:20:27 -0700342 /*
343 * In strict cache mode we need invalidate mapping on the last
344 * close because it may cause a error when we open this file
345 * again and get at least level II oplock.
346 */
Pavel Shilovsky4f8ba8a2010-11-21 22:36:12 +0300347 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
348 CIFS_I(inode)->invalid_mapping = true;
Pavel Shilovskyc6723622010-11-03 10:58:57 +0300349 cifs_set_oplock_level(cifsi, 0);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000350 }
351 spin_unlock(&cifs_file_list_lock);
352
Jeff Laytonad635942011-07-26 12:20:17 -0400353 cancel_work_sync(&cifs_file->oplock_break);
354
Steve Frenchcdff08e2010-10-21 22:46:14 +0000355 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700356 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400357 unsigned int xid;
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700358 int rc = -ENOSYS;
359
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400360 xid = get_xid();
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700361 if (server->ops->close)
362 rc = server->ops->close(xid, tcon, &cifs_file->fid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400363 free_xid(xid);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000364 }
365
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700366 cifs_del_pending_open(&open);
367
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700368 /*
369 * Delete any outstanding lock records. We'll lose them when the file
Steve Frenchcdff08e2010-10-21 22:46:14 +0000370 * is closed anyway.
371 */
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700372 down_write(&cifsi->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700373 list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000374 list_del(&li->llist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400375 cifs_del_lock_waiters(li);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000376 kfree(li);
377 }
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700378 list_del(&cifs_file->llist->llist);
379 kfree(cifs_file->llist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700380 up_write(&cifsi->lock_sem);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000381
382 cifs_put_tlink(cifs_file->tlink);
383 dput(cifs_file->dentry);
384 kfree(cifs_file);
Jeff Laytonb33879a2010-10-15 15:34:04 -0400385}
386
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387int cifs_open(struct inode *inode, struct file *file)
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700388
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389{
390 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400391 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400392 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700394 struct TCP_Server_Info *server;
Steve French96daf2b2011-05-27 04:34:02 +0000395 struct cifs_tcon *tcon;
Jeff Layton7ffec372010-09-29 19:51:11 -0400396 struct tcon_link *tlink;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700397 struct cifsFileInfo *cfile = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398 char *full_path = NULL;
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300399 bool posix_open_ok = false;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700400 struct cifs_fid fid;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700401 struct cifs_pending_open open;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400403 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404
405 cifs_sb = CIFS_SB(inode->i_sb);
Jeff Layton7ffec372010-09-29 19:51:11 -0400406 tlink = cifs_sb_tlink(cifs_sb);
407 if (IS_ERR(tlink)) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400408 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400409 return PTR_ERR(tlink);
410 }
411 tcon = tlink_tcon(tlink);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700412 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -0800414 full_path = build_path_from_dentry(file->f_path.dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415 if (full_path == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530416 rc = -ENOMEM;
Jeff Layton232341b2010-08-05 13:58:38 -0400417 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418 }
419
Joe Perchesb6b38f72010-04-21 03:50:45 +0000420 cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
421 inode, file->f_flags, full_path);
Steve French276a74a2009-03-03 18:00:34 +0000422
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700423 if (server->oplocks)
Steve French276a74a2009-03-03 18:00:34 +0000424 oplock = REQ_OPLOCK;
425 else
426 oplock = 0;
427
Steve French64cc2c62009-03-04 19:54:08 +0000428 if (!tcon->broken_posix_open && tcon->unix_ext &&
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400429 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
430 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Steve French276a74a2009-03-03 18:00:34 +0000431 /* can not refresh inode info since size could be stale */
Jeff Layton2422f672010-06-16 13:40:16 -0400432 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
Steve Frenchfa588e02010-04-22 19:21:55 +0000433 cifs_sb->mnt_file_mode /* ignored */,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700434 file->f_flags, &oplock, &fid.netfid, xid);
Steve French276a74a2009-03-03 18:00:34 +0000435 if (rc == 0) {
Joe Perchesb6b38f72010-04-21 03:50:45 +0000436 cFYI(1, "posix open succeeded");
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300437 posix_open_ok = true;
Steve French64cc2c62009-03-04 19:54:08 +0000438 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
439 if (tcon->ses->serverNOS)
Joe Perchesb6b38f72010-04-21 03:50:45 +0000440 cERROR(1, "server %s of type %s returned"
Steve French64cc2c62009-03-04 19:54:08 +0000441 " unexpected error on SMB posix open"
442 ", disabling posix open support."
443 " Check if server update available.",
444 tcon->ses->serverName,
Joe Perchesb6b38f72010-04-21 03:50:45 +0000445 tcon->ses->serverNOS);
Steve French64cc2c62009-03-04 19:54:08 +0000446 tcon->broken_posix_open = true;
Steve French276a74a2009-03-03 18:00:34 +0000447 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
448 (rc != -EOPNOTSUPP)) /* path not found or net err */
449 goto out;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700450 /*
451 * Else fallthrough to retry open the old way on network i/o
452 * or DFS errors.
453 */
Steve French276a74a2009-03-03 18:00:34 +0000454 }
455
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700456 if (server->ops->get_lease_key)
457 server->ops->get_lease_key(inode, &fid);
458
459 cifs_add_pending_open(&fid, tlink, &open);
460
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300461 if (!posix_open_ok) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700462 if (server->ops->get_lease_key)
463 server->ops->get_lease_key(inode, &fid);
464
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300465 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700466 file->f_flags, &oplock, &fid, xid);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700467 if (rc) {
468 cifs_del_pending_open(&open);
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300469 goto out;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700470 }
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300471 }
Jeff Layton47c78b72010-06-16 13:40:17 -0400472
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700473 cfile = cifs_new_fileinfo(&fid, file, tlink, oplock);
474 if (cfile == NULL) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700475 if (server->ops->close)
476 server->ops->close(xid, tcon, &fid);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700477 cifs_del_pending_open(&open);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478 rc = -ENOMEM;
479 goto out;
480 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +0530482 cifs_fscache_set_inode_cookie(inode, file);
483
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300484 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700485 /*
486 * Time to set mode which we can not set earlier due to
487 * problems creating new read-only files.
488 */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300489 struct cifs_unix_set_info_args args = {
490 .mode = inode->i_mode,
491 .uid = NO_CHANGE_64,
492 .gid = NO_CHANGE_64,
493 .ctime = NO_CHANGE_64,
494 .atime = NO_CHANGE_64,
495 .mtime = NO_CHANGE_64,
496 .device = 0,
497 };
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700498 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
499 cfile->pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500 }
501
502out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400504 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400505 cifs_put_tlink(tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506 return rc;
507}
508
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700509/*
510 * Try to reacquire byte range locks that were released when session
511 * to server was lost
512 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513static int cifs_relock_file(struct cifsFileInfo *cifsFile)
514{
515 int rc = 0;
516
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700517 /* BB list all locks open on this file and relock */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518
519 return rc;
520}
521
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700522static int
523cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524{
525 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400526 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400527 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +0000529 struct cifs_tcon *tcon;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700530 struct TCP_Server_Info *server;
531 struct cifsInodeInfo *cinode;
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000532 struct inode *inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533 char *full_path = NULL;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700534 int desired_access;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535 int disposition = FILE_OPEN;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500536 int create_options = CREATE_NOT_DIR;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700537 struct cifs_fid fid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400539 xid = get_xid();
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700540 mutex_lock(&cfile->fh_mutex);
541 if (!cfile->invalidHandle) {
542 mutex_unlock(&cfile->fh_mutex);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530543 rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400544 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530545 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546 }
547
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700548 inode = cfile->dentry->d_inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549 cifs_sb = CIFS_SB(inode->i_sb);
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700550 tcon = tlink_tcon(cfile->tlink);
551 server = tcon->ses->server;
Steve French3a9f4622007-04-04 17:10:24 +0000552
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700553 /*
554 * Can not grab rename sem here because various ops, including those
555 * that already have the rename sem can end up causing writepage to get
556 * called and if the server was down that means we end up here, and we
557 * can never tell if the caller already has the rename_sem.
558 */
559 full_path = build_path_from_dentry(cfile->dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560 if (full_path == NULL) {
Steve French3a9f4622007-04-04 17:10:24 +0000561 rc = -ENOMEM;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700562 mutex_unlock(&cfile->fh_mutex);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400563 free_xid(xid);
Steve French3a9f4622007-04-04 17:10:24 +0000564 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565 }
566
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700567 cFYI(1, "inode = 0x%p file flags 0x%x for %s", inode, cfile->f_flags,
568 full_path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569
Pavel Shilovsky10b9b982012-03-20 12:55:09 +0300570 if (tcon->ses->server->oplocks)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571 oplock = REQ_OPLOCK;
572 else
Steve French4b18f2a2008-04-29 00:06:05 +0000573 oplock = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400575 if (tcon->unix_ext && cap_unix(tcon->ses) &&
Steve French7fc8f4e2009-02-23 20:43:11 +0000576 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400577 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Jeff Layton608712f2010-10-15 15:33:56 -0400578 /*
579 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
580 * original open. Must mask them off for a reopen.
581 */
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700582 unsigned int oflags = cfile->f_flags &
Jeff Layton15886172010-10-15 15:33:59 -0400583 ~(O_CREAT | O_EXCL | O_TRUNC);
Jeff Layton608712f2010-10-15 15:33:56 -0400584
Jeff Layton2422f672010-06-16 13:40:16 -0400585 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700586 cifs_sb->mnt_file_mode /* ignored */,
587 oflags, &oplock, &fid.netfid, xid);
Steve French7fc8f4e2009-02-23 20:43:11 +0000588 if (rc == 0) {
Joe Perchesb6b38f72010-04-21 03:50:45 +0000589 cFYI(1, "posix reopen succeeded");
Steve French7fc8f4e2009-02-23 20:43:11 +0000590 goto reopen_success;
591 }
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700592 /*
593 * fallthrough to retry open the old way on errors, especially
594 * in the reconnect path it is important to retry hard
595 */
Steve French7fc8f4e2009-02-23 20:43:11 +0000596 }
597
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700598 desired_access = cifs_convert_flags(cfile->f_flags);
Steve French7fc8f4e2009-02-23 20:43:11 +0000599
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500600 if (backup_cred(cifs_sb))
601 create_options |= CREATE_OPEN_BACKUP_INTENT;
602
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700603 if (server->ops->get_lease_key)
604 server->ops->get_lease_key(inode, &fid);
605
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700606 /*
607 * Can not refresh inode by passing in file_info buf to be returned by
608 * CIFSSMBOpen and then calling get_inode_info with returned buf since
609 * file might have write behind data that needs to be flushed and server
610 * version of file size can be stale. If we knew for sure that inode was
611 * not dirty locally we could do this.
612 */
613 rc = server->ops->open(xid, tcon, full_path, disposition,
614 desired_access, create_options, &fid, &oplock,
615 NULL, cifs_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616 if (rc) {
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700617 mutex_unlock(&cfile->fh_mutex);
618 cFYI(1, "cifs_reopen returned 0x%x", rc);
Joe Perchesb6b38f72010-04-21 03:50:45 +0000619 cFYI(1, "oplock: %d", oplock);
Jeff Layton15886172010-10-15 15:33:59 -0400620 goto reopen_error_exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621 }
Jeff Layton15886172010-10-15 15:33:59 -0400622
623reopen_success:
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700624 cfile->invalidHandle = false;
625 mutex_unlock(&cfile->fh_mutex);
626 cinode = CIFS_I(inode);
Jeff Layton15886172010-10-15 15:33:59 -0400627
628 if (can_flush) {
629 rc = filemap_write_and_wait(inode->i_mapping);
Jeff Laytoneb4b7562010-10-22 14:52:29 -0400630 mapping_set_error(inode->i_mapping, rc);
Jeff Layton15886172010-10-15 15:33:59 -0400631
Jeff Layton15886172010-10-15 15:33:59 -0400632 if (tcon->unix_ext)
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700633 rc = cifs_get_inode_info_unix(&inode, full_path,
634 inode->i_sb, xid);
Jeff Layton15886172010-10-15 15:33:59 -0400635 else
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700636 rc = cifs_get_inode_info(&inode, full_path, NULL,
637 inode->i_sb, xid, NULL);
638 }
639 /*
640 * Else we are writing out data to server already and could deadlock if
641 * we tried to flush data, and since we do not know if we have data that
642 * would invalidate the current end of file on the server we can not go
643 * to the server to get the new inode info.
644 */
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300645
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700646 server->ops->set_fid(cfile, &fid, oplock);
647 cifs_relock_file(cfile);
Jeff Layton15886172010-10-15 15:33:59 -0400648
649reopen_error_exit:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400651 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652 return rc;
653}
654
655int cifs_close(struct inode *inode, struct file *file)
656{
Jeff Layton77970692011-04-05 16:23:47 -0700657 if (file->private_data != NULL) {
658 cifsFileInfo_put(file->private_data);
659 file->private_data = NULL;
660 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661
Steve Frenchcdff08e2010-10-21 22:46:14 +0000662 /* return code from the ->release op is always ignored */
663 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664}
665
666int cifs_closedir(struct inode *inode, struct file *file)
667{
668 int rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400669 unsigned int xid;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700670 struct cifsFileInfo *cfile = file->private_data;
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700671 struct cifs_tcon *tcon;
672 struct TCP_Server_Info *server;
673 char *buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674
Joe Perchesb6b38f72010-04-21 03:50:45 +0000675 cFYI(1, "Closedir inode = 0x%p", inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700677 if (cfile == NULL)
678 return rc;
679
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400680 xid = get_xid();
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700681 tcon = tlink_tcon(cfile->tlink);
682 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700684 cFYI(1, "Freeing private data in close dir");
685 spin_lock(&cifs_file_list_lock);
686 if (!cfile->srch_inf.endOfSearch && !cfile->invalidHandle) {
687 cfile->invalidHandle = true;
688 spin_unlock(&cifs_file_list_lock);
689 if (server->ops->close_dir)
690 rc = server->ops->close_dir(xid, tcon, &cfile->fid);
691 else
692 rc = -ENOSYS;
693 cFYI(1, "Closing uncompleted readdir with rc %d", rc);
694 /* not much we can do if it fails anyway, ignore rc */
695 rc = 0;
696 } else
697 spin_unlock(&cifs_file_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700699 buf = cfile->srch_inf.ntwrk_buf_start;
700 if (buf) {
701 cFYI(1, "closedir free smb buf in srch struct");
702 cfile->srch_inf.ntwrk_buf_start = NULL;
703 if (cfile->srch_inf.smallBuf)
704 cifs_small_buf_release(buf);
705 else
706 cifs_buf_release(buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707 }
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700708
709 cifs_put_tlink(cfile->tlink);
710 kfree(file->private_data);
711 file->private_data = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712 /* BB can we lock the filestruct while this is going on? */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400713 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714 return rc;
715}
716
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400717static struct cifsLockInfo *
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300718cifs_lock_init(__u64 offset, __u64 length, __u8 type)
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000719{
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400720 struct cifsLockInfo *lock =
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000721 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400722 if (!lock)
723 return lock;
724 lock->offset = offset;
725 lock->length = length;
726 lock->type = type;
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400727 lock->pid = current->tgid;
728 INIT_LIST_HEAD(&lock->blist);
729 init_waitqueue_head(&lock->block_q);
730 return lock;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400731}
732
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -0700733void
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400734cifs_del_lock_waiters(struct cifsLockInfo *lock)
735{
736 struct cifsLockInfo *li, *tmp;
737 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
738 list_del_init(&li->blist);
739 wake_up(&li->block_q);
740 }
741}
742
743static bool
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700744cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
745 __u64 length, __u8 type, struct cifsFileInfo *cfile,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700746 struct cifsLockInfo **conf_lock, bool rw_check)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400747{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300748 struct cifsLockInfo *li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700749 struct cifsFileInfo *cur_cfile = fdlocks->cfile;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300750 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400751
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700752 list_for_each_entry(li, &fdlocks->locks, llist) {
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400753 if (offset + length <= li->offset ||
754 offset >= li->offset + li->length)
755 continue;
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700756 if (rw_check && server->ops->compare_fids(cfile, cur_cfile) &&
757 current->tgid == li->pid)
758 continue;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700759 if ((type & server->vals->shared_lock_type) &&
760 ((server->ops->compare_fids(cfile, cur_cfile) &&
761 current->tgid == li->pid) || type == li->type))
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400762 continue;
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700763 if (conf_lock)
764 *conf_lock = li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700765 return true;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400766 }
767 return false;
768}
769
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700770bool
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300771cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700772 __u8 type, struct cifsLockInfo **conf_lock,
773 bool rw_check)
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400774{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300775 bool rc = false;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700776 struct cifs_fid_locks *cur;
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300777 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300778
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700779 list_for_each_entry(cur, &cinode->llist, llist) {
780 rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700781 cfile, conf_lock, rw_check);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300782 if (rc)
783 break;
784 }
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300785
786 return rc;
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400787}
788
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300789/*
790 * Check if there is another lock that prevents us to set the lock (mandatory
791 * style). If such a lock exists, update the flock structure with its
792 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
793 * or leave it the same if we can't. Returns 0 if we don't need to request to
794 * the server or 1 otherwise.
795 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400796static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300797cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
798 __u8 type, struct file_lock *flock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400799{
800 int rc = 0;
801 struct cifsLockInfo *conf_lock;
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300802 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300803 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400804 bool exist;
805
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700806 down_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400807
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300808 exist = cifs_find_lock_conflict(cfile, offset, length, type,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700809 &conf_lock, false);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400810 if (exist) {
811 flock->fl_start = conf_lock->offset;
812 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
813 flock->fl_pid = conf_lock->pid;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300814 if (conf_lock->type & server->vals->shared_lock_type)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400815 flock->fl_type = F_RDLCK;
816 else
817 flock->fl_type = F_WRLCK;
818 } else if (!cinode->can_cache_brlcks)
819 rc = 1;
820 else
821 flock->fl_type = F_UNLCK;
822
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700823 up_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400824 return rc;
825}
826
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400827static void
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300828cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400829{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300830 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700831 down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700832 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700833 up_write(&cinode->lock_sem);
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000834}
835
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300836/*
837 * Set the byte-range lock (mandatory style). Returns:
838 * 1) 0, if we set the lock and don't need to request to the server;
839 * 2) 1, if no locks prevent us but we need to request to the server;
840 * 3) -EACCESS, if there is a lock that prevents us and wait is false.
841 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400842static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300843cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400844 bool wait)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400845{
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400846 struct cifsLockInfo *conf_lock;
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300847 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400848 bool exist;
849 int rc = 0;
850
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400851try_again:
852 exist = false;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700853 down_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400854
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300855 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700856 lock->type, &conf_lock, false);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400857 if (!exist && cinode->can_cache_brlcks) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700858 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700859 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400860 return rc;
861 }
862
863 if (!exist)
864 rc = 1;
865 else if (!wait)
866 rc = -EACCES;
867 else {
868 list_add_tail(&lock->blist, &conf_lock->blist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700869 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400870 rc = wait_event_interruptible(lock->block_q,
871 (lock->blist.prev == &lock->blist) &&
872 (lock->blist.next == &lock->blist));
873 if (!rc)
874 goto try_again;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700875 down_write(&cinode->lock_sem);
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400876 list_del_init(&lock->blist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400877 }
878
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700879 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400880 return rc;
881}
882
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300883/*
884 * Check if there is another lock that prevents us to set the lock (posix
885 * style). If such a lock exists, update the flock structure with its
886 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
887 * or leave it the same if we can't. Returns 0 if we don't need to request to
888 * the server or 1 otherwise.
889 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400890static int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400891cifs_posix_lock_test(struct file *file, struct file_lock *flock)
892{
893 int rc = 0;
894 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
895 unsigned char saved_type = flock->fl_type;
896
Pavel Shilovsky50792762011-10-29 17:17:57 +0400897 if ((flock->fl_flags & FL_POSIX) == 0)
898 return 1;
899
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700900 down_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400901 posix_test_lock(file, flock);
902
903 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
904 flock->fl_type = saved_type;
905 rc = 1;
906 }
907
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700908 up_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400909 return rc;
910}
911
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300912/*
913 * Set the byte-range lock (posix style). Returns:
914 * 1) 0, if we set the lock and don't need to request to the server;
915 * 2) 1, if we need to request to the server;
916 * 3) <0, if the error occurs while setting the lock.
917 */
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400918static int
919cifs_posix_lock_set(struct file *file, struct file_lock *flock)
920{
921 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
Pavel Shilovsky50792762011-10-29 17:17:57 +0400922 int rc = 1;
923
924 if ((flock->fl_flags & FL_POSIX) == 0)
925 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400926
Pavel Shilovsky66189be2012-03-28 21:56:19 +0400927try_again:
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700928 down_write(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400929 if (!cinode->can_cache_brlcks) {
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700930 up_write(&cinode->lock_sem);
Pavel Shilovsky50792762011-10-29 17:17:57 +0400931 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400932 }
Pavel Shilovsky66189be2012-03-28 21:56:19 +0400933
934 rc = posix_lock_file(file, flock, NULL);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700935 up_write(&cinode->lock_sem);
Pavel Shilovsky66189be2012-03-28 21:56:19 +0400936 if (rc == FILE_LOCK_DEFERRED) {
937 rc = wait_event_interruptible(flock->fl_wait, !flock->fl_next);
938 if (!rc)
939 goto try_again;
940 locks_delete_block(flock);
941 }
Steve French9ebb3892012-04-01 13:52:54 -0500942 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400943}
944
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -0700945int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400946cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400947{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400948 unsigned int xid;
949 int rc = 0, stored_rc;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400950 struct cifsLockInfo *li, *tmp;
951 struct cifs_tcon *tcon;
952 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +0400953 unsigned int num, max_num, max_buf;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400954 LOCKING_ANDX_RANGE *buf, *cur;
955 int types[] = {LOCKING_ANDX_LARGE_FILES,
956 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
957 int i;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400958
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400959 xid = get_xid();
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400960 tcon = tlink_tcon(cfile->tlink);
961
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700962 /* we are going to update can_cache_brlcks here - need a write access */
963 down_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400964 if (!cinode->can_cache_brlcks) {
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700965 up_write(&cinode->lock_sem);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400966 free_xid(xid);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400967 return rc;
968 }
969
Pavel Shilovsky0013fb42012-05-31 13:03:26 +0400970 /*
971 * Accessing maxBuf is racy with cifs_reconnect - need to store value
972 * and check it for zero before using.
973 */
974 max_buf = tcon->ses->server->maxBuf;
975 if (!max_buf) {
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700976 up_write(&cinode->lock_sem);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400977 free_xid(xid);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +0400978 return -EINVAL;
979 }
980
981 max_num = (max_buf - sizeof(struct smb_hdr)) /
982 sizeof(LOCKING_ANDX_RANGE);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400983 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
984 if (!buf) {
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700985 up_write(&cinode->lock_sem);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400986 free_xid(xid);
Pavel Shilovskye2f28862012-08-29 21:13:38 +0400987 return -ENOMEM;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400988 }
989
990 for (i = 0; i < 2; i++) {
991 cur = buf;
992 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700993 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400994 if (li->type != types[i])
995 continue;
996 cur->Pid = cpu_to_le16(li->pid);
997 cur->LengthLow = cpu_to_le32((u32)li->length);
998 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
999 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1000 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1001 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001002 stored_rc = cifs_lockv(xid, tcon,
1003 cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001004 (__u8)li->type, 0, num,
1005 buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001006 if (stored_rc)
1007 rc = stored_rc;
1008 cur = buf;
1009 num = 0;
1010 } else
1011 cur++;
1012 }
1013
1014 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001015 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001016 (__u8)types[i], 0, num, buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001017 if (stored_rc)
1018 rc = stored_rc;
1019 }
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001020 }
1021
1022 cinode->can_cache_brlcks = false;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001023 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001024
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001025 kfree(buf);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001026 free_xid(xid);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001027 return rc;
1028}
1029
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001030/* copied from fs/locks.c with a name change */
1031#define cifs_for_each_lock(inode, lockp) \
1032 for (lockp = &inode->i_flock; *lockp != NULL; \
1033 lockp = &(*lockp)->fl_next)
1034
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001035struct lock_to_push {
1036 struct list_head llist;
1037 __u64 offset;
1038 __u64 length;
1039 __u32 pid;
1040 __u16 netfid;
1041 __u8 type;
1042};
1043
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001044static int
1045cifs_push_posix_locks(struct cifsFileInfo *cfile)
1046{
1047 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
1048 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1049 struct file_lock *flock, **before;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001050 unsigned int count = 0, i = 0;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001051 int rc = 0, xid, type;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001052 struct list_head locks_to_send, *el;
1053 struct lock_to_push *lck, *tmp;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001054 __u64 length;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001055
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001056 xid = get_xid();
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001057
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001058 /* we are going to update can_cache_brlcks here - need a write access */
1059 down_write(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001060 if (!cinode->can_cache_brlcks) {
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001061 up_write(&cinode->lock_sem);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001062 free_xid(xid);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001063 return rc;
1064 }
1065
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001066 lock_flocks();
1067 cifs_for_each_lock(cfile->dentry->d_inode, before) {
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001068 if ((*before)->fl_flags & FL_POSIX)
1069 count++;
1070 }
1071 unlock_flocks();
1072
1073 INIT_LIST_HEAD(&locks_to_send);
1074
1075 /*
Pavel Shilovskyce858522012-03-17 09:46:55 +03001076 * Allocating count locks is enough because no FL_POSIX locks can be
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001077 * added to the list while we are holding cinode->lock_sem that
Pavel Shilovskyce858522012-03-17 09:46:55 +03001078 * protects locking operations of this inode.
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001079 */
1080 for (; i < count; i++) {
1081 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1082 if (!lck) {
1083 rc = -ENOMEM;
1084 goto err_out;
1085 }
1086 list_add_tail(&lck->llist, &locks_to_send);
1087 }
1088
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001089 el = locks_to_send.next;
1090 lock_flocks();
1091 cifs_for_each_lock(cfile->dentry->d_inode, before) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001092 flock = *before;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001093 if ((flock->fl_flags & FL_POSIX) == 0)
1094 continue;
Pavel Shilovskyce858522012-03-17 09:46:55 +03001095 if (el == &locks_to_send) {
1096 /*
1097 * The list ended. We don't have enough allocated
1098 * structures - something is really wrong.
1099 */
1100 cERROR(1, "Can't push all brlocks!");
1101 break;
1102 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001103 length = 1 + flock->fl_end - flock->fl_start;
1104 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1105 type = CIFS_RDLCK;
1106 else
1107 type = CIFS_WRLCK;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001108 lck = list_entry(el, struct lock_to_push, llist);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001109 lck->pid = flock->fl_pid;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001110 lck->netfid = cfile->fid.netfid;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001111 lck->length = length;
1112 lck->type = type;
1113 lck->offset = flock->fl_start;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001114 el = el->next;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001115 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001116 unlock_flocks();
1117
1118 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001119 int stored_rc;
1120
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001121 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001122 lck->offset, lck->length, NULL,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001123 lck->type, 0);
1124 if (stored_rc)
1125 rc = stored_rc;
1126 list_del(&lck->llist);
1127 kfree(lck);
1128 }
1129
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001130out:
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001131 cinode->can_cache_brlcks = false;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001132 up_write(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001133
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001134 free_xid(xid);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001135 return rc;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001136err_out:
1137 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1138 list_del(&lck->llist);
1139 kfree(lck);
1140 }
1141 goto out;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001142}
1143
1144static int
1145cifs_push_locks(struct cifsFileInfo *cfile)
1146{
1147 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1148 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1149
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001150 if (cap_unix(tcon->ses) &&
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001151 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1152 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1153 return cifs_push_posix_locks(cfile);
1154
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001155 return tcon->ses->server->ops->push_mand_locks(cfile);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001156}
1157
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001158static void
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001159cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001160 bool *wait_flag, struct TCP_Server_Info *server)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001161{
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001162 if (flock->fl_flags & FL_POSIX)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001163 cFYI(1, "Posix");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001164 if (flock->fl_flags & FL_FLOCK)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001165 cFYI(1, "Flock");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001166 if (flock->fl_flags & FL_SLEEP) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001167 cFYI(1, "Blocking lock");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001168 *wait_flag = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169 }
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001170 if (flock->fl_flags & FL_ACCESS)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001171 cFYI(1, "Process suspended by mandatory locking - "
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001172 "not implemented yet");
1173 if (flock->fl_flags & FL_LEASE)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001174 cFYI(1, "Lease on file - not implemented yet");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001175 if (flock->fl_flags &
Jeff Layton3d6d8542012-09-19 06:22:46 -07001176 (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
1177 FL_ACCESS | FL_LEASE | FL_CLOSE)))
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001178 cFYI(1, "Unknown lock flags 0x%x", flock->fl_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001179
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001180 *type = server->vals->large_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001181 if (flock->fl_type == F_WRLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001182 cFYI(1, "F_WRLCK ");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001183 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001184 *lock = 1;
1185 } else if (flock->fl_type == F_UNLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001186 cFYI(1, "F_UNLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001187 *type |= server->vals->unlock_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001188 *unlock = 1;
1189 /* Check if unlock includes more than one lock range */
1190 } else if (flock->fl_type == F_RDLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001191 cFYI(1, "F_RDLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001192 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001193 *lock = 1;
1194 } else if (flock->fl_type == F_EXLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001195 cFYI(1, "F_EXLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001196 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001197 *lock = 1;
1198 } else if (flock->fl_type == F_SHLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001199 cFYI(1, "F_SHLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001200 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001201 *lock = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001202 } else
Joe Perchesb6b38f72010-04-21 03:50:45 +00001203 cFYI(1, "Unknown type of lock");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001204}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001205
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001206static int
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001207cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001208 bool wait_flag, bool posix_lck, unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001209{
1210 int rc = 0;
1211 __u64 length = 1 + flock->fl_end - flock->fl_start;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001212 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1213 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001214 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001215 __u16 netfid = cfile->fid.netfid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001216
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001217 if (posix_lck) {
1218 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001219
1220 rc = cifs_posix_lock_test(file, flock);
1221 if (!rc)
1222 return rc;
1223
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001224 if (type & server->vals->shared_lock_type)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001225 posix_lock_type = CIFS_RDLCK;
1226 else
1227 posix_lock_type = CIFS_WRLCK;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001228 rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001229 flock->fl_start, length, flock,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001230 posix_lock_type, wait_flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001231 return rc;
1232 }
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001233
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001234 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001235 if (!rc)
1236 return rc;
1237
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001238 /* BB we could chain these into one lock request BB */
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001239 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
1240 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001241 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001242 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1243 type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001244 flock->fl_type = F_UNLCK;
1245 if (rc != 0)
1246 cERROR(1, "Error unlocking previously locked "
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001247 "range %d during test of lock", rc);
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001248 return 0;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001249 }
1250
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001251 if (type & server->vals->shared_lock_type) {
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001252 flock->fl_type = F_WRLCK;
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001253 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001254 }
1255
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001256 type &= ~server->vals->exclusive_lock_type;
1257
1258 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1259 type | server->vals->shared_lock_type,
1260 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001261 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001262 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1263 type | server->vals->shared_lock_type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001264 flock->fl_type = F_RDLCK;
1265 if (rc != 0)
1266 cERROR(1, "Error unlocking previously locked "
1267 "range %d during test of lock", rc);
1268 } else
1269 flock->fl_type = F_WRLCK;
1270
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001271 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001272}
1273
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001274void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001275cifs_move_llist(struct list_head *source, struct list_head *dest)
1276{
1277 struct list_head *li, *tmp;
1278 list_for_each_safe(li, tmp, source)
1279 list_move(li, dest);
1280}
1281
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001282void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001283cifs_free_llist(struct list_head *llist)
1284{
1285 struct cifsLockInfo *li, *tmp;
1286 list_for_each_entry_safe(li, tmp, llist, llist) {
1287 cifs_del_lock_waiters(li);
1288 list_del(&li->llist);
1289 kfree(li);
1290 }
1291}
1292
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001293int
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001294cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1295 unsigned int xid)
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001296{
1297 int rc = 0, stored_rc;
1298 int types[] = {LOCKING_ANDX_LARGE_FILES,
1299 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1300 unsigned int i;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001301 unsigned int max_num, num, max_buf;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001302 LOCKING_ANDX_RANGE *buf, *cur;
1303 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1304 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
1305 struct cifsLockInfo *li, *tmp;
1306 __u64 length = 1 + flock->fl_end - flock->fl_start;
1307 struct list_head tmp_llist;
1308
1309 INIT_LIST_HEAD(&tmp_llist);
1310
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001311 /*
1312 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1313 * and check it for zero before using.
1314 */
1315 max_buf = tcon->ses->server->maxBuf;
1316 if (!max_buf)
1317 return -EINVAL;
1318
1319 max_num = (max_buf - sizeof(struct smb_hdr)) /
1320 sizeof(LOCKING_ANDX_RANGE);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001321 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1322 if (!buf)
1323 return -ENOMEM;
1324
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001325 down_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001326 for (i = 0; i < 2; i++) {
1327 cur = buf;
1328 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001329 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001330 if (flock->fl_start > li->offset ||
1331 (flock->fl_start + length) <
1332 (li->offset + li->length))
1333 continue;
1334 if (current->tgid != li->pid)
1335 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001336 if (types[i] != li->type)
1337 continue;
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001338 if (cinode->can_cache_brlcks) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001339 /*
1340 * We can cache brlock requests - simply remove
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001341 * a lock from the file's list.
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001342 */
1343 list_del(&li->llist);
1344 cifs_del_lock_waiters(li);
1345 kfree(li);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001346 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001347 }
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001348 cur->Pid = cpu_to_le16(li->pid);
1349 cur->LengthLow = cpu_to_le32((u32)li->length);
1350 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1351 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1352 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1353 /*
1354 * We need to save a lock here to let us add it again to
1355 * the file's list if the unlock range request fails on
1356 * the server.
1357 */
1358 list_move(&li->llist, &tmp_llist);
1359 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001360 stored_rc = cifs_lockv(xid, tcon,
1361 cfile->fid.netfid,
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001362 li->type, num, 0, buf);
1363 if (stored_rc) {
1364 /*
1365 * We failed on the unlock range
1366 * request - add all locks from the tmp
1367 * list to the head of the file's list.
1368 */
1369 cifs_move_llist(&tmp_llist,
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001370 &cfile->llist->locks);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001371 rc = stored_rc;
1372 } else
1373 /*
1374 * The unlock range request succeed -
1375 * free the tmp list.
1376 */
1377 cifs_free_llist(&tmp_llist);
1378 cur = buf;
1379 num = 0;
1380 } else
1381 cur++;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001382 }
1383 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001384 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001385 types[i], num, 0, buf);
1386 if (stored_rc) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001387 cifs_move_llist(&tmp_llist,
1388 &cfile->llist->locks);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001389 rc = stored_rc;
1390 } else
1391 cifs_free_llist(&tmp_llist);
1392 }
1393 }
1394
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001395 up_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001396 kfree(buf);
1397 return rc;
1398}
1399
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001400static int
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001401cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001402 bool wait_flag, bool posix_lck, int lock, int unlock,
1403 unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001404{
1405 int rc = 0;
1406 __u64 length = 1 + flock->fl_end - flock->fl_start;
1407 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1408 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001409 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001410
1411 if (posix_lck) {
Steve French08547b02006-02-28 22:39:25 +00001412 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001413
1414 rc = cifs_posix_lock_set(file, flock);
1415 if (!rc || rc < 0)
1416 return rc;
1417
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001418 if (type & server->vals->shared_lock_type)
Steve French08547b02006-02-28 22:39:25 +00001419 posix_lock_type = CIFS_RDLCK;
1420 else
1421 posix_lock_type = CIFS_WRLCK;
Steve French50c2f752007-07-13 00:33:32 +00001422
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001423 if (unlock == 1)
Steve Frenchbeb84dc2006-03-03 23:36:34 +00001424 posix_lock_type = CIFS_UNLCK;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001425
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001426 rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
1427 current->tgid, flock->fl_start, length,
1428 NULL, posix_lock_type, wait_flag);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001429 goto out;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001430 }
1431
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001432 if (lock) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001433 struct cifsLockInfo *lock;
1434
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001435 lock = cifs_lock_init(flock->fl_start, length, type);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001436 if (!lock)
1437 return -ENOMEM;
1438
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001439 rc = cifs_lock_add_if(cfile, lock, wait_flag);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001440 if (rc < 0)
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001441 kfree(lock);
1442 if (rc <= 0)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001443 goto out;
1444
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001445 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1446 type, 1, 0, wait_flag);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001447 if (rc) {
1448 kfree(lock);
1449 goto out;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001450 }
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001451
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001452 cifs_lock_add(cfile, lock);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001453 } else if (unlock)
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001454 rc = server->ops->mand_unlock_range(cfile, flock, xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001455
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001456out:
1457 if (flock->fl_flags & FL_POSIX)
Steve French9ebb3892012-04-01 13:52:54 -05001458 posix_lock_file_wait(file, flock);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001459 return rc;
1460}
1461
1462int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1463{
1464 int rc, xid;
1465 int lock = 0, unlock = 0;
1466 bool wait_flag = false;
1467 bool posix_lck = false;
1468 struct cifs_sb_info *cifs_sb;
1469 struct cifs_tcon *tcon;
1470 struct cifsInodeInfo *cinode;
1471 struct cifsFileInfo *cfile;
1472 __u16 netfid;
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001473 __u32 type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001474
1475 rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001476 xid = get_xid();
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001477
1478 cFYI(1, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld "
1479 "end: %lld", cmd, flock->fl_flags, flock->fl_type,
1480 flock->fl_start, flock->fl_end);
1481
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001482 cfile = (struct cifsFileInfo *)file->private_data;
1483 tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001484
1485 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
1486 tcon->ses->server);
1487
1488 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001489 netfid = cfile->fid.netfid;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001490 cinode = CIFS_I(file->f_path.dentry->d_inode);
1491
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001492 if (cap_unix(tcon->ses) &&
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001493 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1494 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1495 posix_lck = true;
1496 /*
1497 * BB add code here to normalize offset and length to account for
1498 * negative length which we can not accept over the wire.
1499 */
1500 if (IS_GETLK(cmd)) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001501 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001502 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001503 return rc;
1504 }
1505
1506 if (!lock && !unlock) {
1507 /*
1508 * if no lock or unlock then nothing to do since we do not
1509 * know what it is
1510 */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001511 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001512 return -EOPNOTSUPP;
1513 }
1514
1515 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1516 xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001517 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001518 return rc;
1519}
1520
Jeff Layton597b0272012-03-23 14:40:56 -04001521/*
1522 * update the file size (if needed) after a write. Should be called with
1523 * the inode->i_lock held
1524 */
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05001525void
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001526cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1527 unsigned int bytes_written)
1528{
1529 loff_t end_of_write = offset + bytes_written;
1530
1531 if (end_of_write > cifsi->server_eof)
1532 cifsi->server_eof = end_of_write;
1533}
1534
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001535static ssize_t
1536cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
1537 size_t write_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001538{
1539 int rc = 0;
1540 unsigned int bytes_written = 0;
1541 unsigned int total_written;
1542 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001543 struct cifs_tcon *tcon;
1544 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001545 unsigned int xid;
Jeff Layton7da4b492010-10-15 15:34:00 -04001546 struct dentry *dentry = open_file->dentry;
1547 struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001548 struct cifs_io_parms io_parms;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549
Jeff Layton7da4b492010-10-15 15:34:00 -04001550 cifs_sb = CIFS_SB(dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001551
Joe Perchesb6b38f72010-04-21 03:50:45 +00001552 cFYI(1, "write %zd bytes to offset %lld of %s", write_size,
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001553 *offset, dentry->d_name.name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001554
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001555 tcon = tlink_tcon(open_file->tlink);
1556 server = tcon->ses->server;
1557
1558 if (!server->ops->sync_write)
1559 return -ENOSYS;
Steve French50c2f752007-07-13 00:33:32 +00001560
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001561 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001562
Linus Torvalds1da177e2005-04-16 15:20:36 -07001563 for (total_written = 0; write_size > total_written;
1564 total_written += bytes_written) {
1565 rc = -EAGAIN;
1566 while (rc == -EAGAIN) {
Jeff Laytonca83ce32011-04-12 09:13:44 -04001567 struct kvec iov[2];
1568 unsigned int len;
1569
Linus Torvalds1da177e2005-04-16 15:20:36 -07001570 if (open_file->invalidHandle) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571 /* we could deadlock if we called
1572 filemap_fdatawait from here so tell
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001573 reopen_file not to flush data to
Linus Torvalds1da177e2005-04-16 15:20:36 -07001574 server now */
Jeff Layton15886172010-10-15 15:33:59 -04001575 rc = cifs_reopen_file(open_file, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001576 if (rc != 0)
1577 break;
1578 }
Steve French3e844692005-10-03 13:37:24 -07001579
Jeff Laytonca83ce32011-04-12 09:13:44 -04001580 len = min((size_t)cifs_sb->wsize,
1581 write_size - total_written);
1582 /* iov[0] is reserved for smb header */
1583 iov[1].iov_base = (char *)write_data + total_written;
1584 iov[1].iov_len = len;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001585 io_parms.pid = pid;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001586 io_parms.tcon = tcon;
1587 io_parms.offset = *offset;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001588 io_parms.length = len;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001589 rc = server->ops->sync_write(xid, open_file, &io_parms,
1590 &bytes_written, iov, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001591 }
1592 if (rc || (bytes_written == 0)) {
1593 if (total_written)
1594 break;
1595 else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001596 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001597 return rc;
1598 }
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001599 } else {
Jeff Layton597b0272012-03-23 14:40:56 -04001600 spin_lock(&dentry->d_inode->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001601 cifs_update_eof(cifsi, *offset, bytes_written);
Jeff Layton597b0272012-03-23 14:40:56 -04001602 spin_unlock(&dentry->d_inode->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001603 *offset += bytes_written;
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001604 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001605 }
1606
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001607 cifs_stats_bytes_written(tcon, total_written);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001608
Jeff Layton7da4b492010-10-15 15:34:00 -04001609 if (total_written > 0) {
1610 spin_lock(&dentry->d_inode->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001611 if (*offset > dentry->d_inode->i_size)
1612 i_size_write(dentry->d_inode, *offset);
Jeff Layton7da4b492010-10-15 15:34:00 -04001613 spin_unlock(&dentry->d_inode->i_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001614 }
Jeff Layton7da4b492010-10-15 15:34:00 -04001615 mark_inode_dirty_sync(dentry->d_inode);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001616 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001617 return total_written;
1618}
1619
Jeff Layton6508d902010-09-29 19:51:11 -04001620struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1621 bool fsuid_only)
Steve French630f3f0c2007-10-25 21:17:17 +00001622{
1623 struct cifsFileInfo *open_file = NULL;
Jeff Layton6508d902010-09-29 19:51:11 -04001624 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1625
1626 /* only filter by fsuid on multiuser mounts */
1627 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1628 fsuid_only = false;
Steve French630f3f0c2007-10-25 21:17:17 +00001629
Jeff Layton44772882010-10-15 15:34:03 -04001630 spin_lock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001631 /* we could simply get the first_list_entry since write-only entries
1632 are always at the end of the list but since the first entry might
1633 have a close pending, we go through the whole list */
1634 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton6508d902010-09-29 19:51:11 -04001635 if (fsuid_only && open_file->uid != current_fsuid())
1636 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001637 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
Steve French630f3f0c2007-10-25 21:17:17 +00001638 if (!open_file->invalidHandle) {
1639 /* found a good file */
1640 /* lock it so it will not be closed on us */
Jeff Layton764a1b12012-07-25 14:59:54 -04001641 cifsFileInfo_get_locked(open_file);
Jeff Layton44772882010-10-15 15:34:03 -04001642 spin_unlock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001643 return open_file;
1644 } /* else might as well continue, and look for
1645 another, or simply have the caller reopen it
1646 again rather than trying to fix this handle */
1647 } else /* write only file */
1648 break; /* write only files are last so must be done */
1649 }
Jeff Layton44772882010-10-15 15:34:03 -04001650 spin_unlock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001651 return NULL;
1652}
Steve French630f3f0c2007-10-25 21:17:17 +00001653
Jeff Layton6508d902010-09-29 19:51:11 -04001654struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1655 bool fsuid_only)
Steve French6148a742005-10-05 12:23:19 -07001656{
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001657 struct cifsFileInfo *open_file, *inv_file = NULL;
Jeff Laytond3892292010-11-02 16:22:50 -04001658 struct cifs_sb_info *cifs_sb;
Jeff Layton2846d382008-09-22 21:33:33 -04001659 bool any_available = false;
Steve Frenchdd99cd82005-10-05 19:32:49 -07001660 int rc;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001661 unsigned int refind = 0;
Steve French6148a742005-10-05 12:23:19 -07001662
Steve French60808232006-04-22 15:53:05 +00001663 /* Having a null inode here (because mapping->host was set to zero by
1664 the VFS or MM) should not happen but we had reports of on oops (due to
1665 it being zero) during stress testcases so we need to check for it */
1666
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001667 if (cifs_inode == NULL) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001668 cERROR(1, "Null inode passed to cifs_writeable_file");
Steve French60808232006-04-22 15:53:05 +00001669 dump_stack();
1670 return NULL;
1671 }
1672
Jeff Laytond3892292010-11-02 16:22:50 -04001673 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1674
Jeff Layton6508d902010-09-29 19:51:11 -04001675 /* only filter by fsuid on multiuser mounts */
1676 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1677 fsuid_only = false;
1678
Jeff Layton44772882010-10-15 15:34:03 -04001679 spin_lock(&cifs_file_list_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001680refind_writable:
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001681 if (refind > MAX_REOPEN_ATT) {
1682 spin_unlock(&cifs_file_list_lock);
1683 return NULL;
1684 }
Steve French6148a742005-10-05 12:23:19 -07001685 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton6508d902010-09-29 19:51:11 -04001686 if (!any_available && open_file->pid != current->tgid)
1687 continue;
1688 if (fsuid_only && open_file->uid != current_fsuid())
1689 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001690 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Steve French9b22b0b2007-10-02 01:11:08 +00001691 if (!open_file->invalidHandle) {
1692 /* found a good writable file */
Jeff Layton764a1b12012-07-25 14:59:54 -04001693 cifsFileInfo_get_locked(open_file);
Jeff Layton44772882010-10-15 15:34:03 -04001694 spin_unlock(&cifs_file_list_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001695 return open_file;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001696 } else {
1697 if (!inv_file)
1698 inv_file = open_file;
Steve French9b22b0b2007-10-02 01:11:08 +00001699 }
Steve French6148a742005-10-05 12:23:19 -07001700 }
1701 }
Jeff Layton2846d382008-09-22 21:33:33 -04001702 /* couldn't find useable FH with same pid, try any available */
1703 if (!any_available) {
1704 any_available = true;
1705 goto refind_writable;
1706 }
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001707
1708 if (inv_file) {
1709 any_available = false;
Jeff Layton764a1b12012-07-25 14:59:54 -04001710 cifsFileInfo_get_locked(inv_file);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001711 }
1712
Jeff Layton44772882010-10-15 15:34:03 -04001713 spin_unlock(&cifs_file_list_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001714
1715 if (inv_file) {
1716 rc = cifs_reopen_file(inv_file, false);
1717 if (!rc)
1718 return inv_file;
1719 else {
1720 spin_lock(&cifs_file_list_lock);
1721 list_move_tail(&inv_file->flist,
1722 &cifs_inode->openFileList);
1723 spin_unlock(&cifs_file_list_lock);
1724 cifsFileInfo_put(inv_file);
1725 spin_lock(&cifs_file_list_lock);
1726 ++refind;
1727 goto refind_writable;
1728 }
1729 }
1730
Steve French6148a742005-10-05 12:23:19 -07001731 return NULL;
1732}
1733
Linus Torvalds1da177e2005-04-16 15:20:36 -07001734static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1735{
1736 struct address_space *mapping = page->mapping;
1737 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1738 char *write_data;
1739 int rc = -EFAULT;
1740 int bytes_written = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001741 struct inode *inode;
Steve French6148a742005-10-05 12:23:19 -07001742 struct cifsFileInfo *open_file;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001743
1744 if (!mapping || !mapping->host)
1745 return -EFAULT;
1746
1747 inode = page->mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001748
1749 offset += (loff_t)from;
1750 write_data = kmap(page);
1751 write_data += from;
1752
1753 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1754 kunmap(page);
1755 return -EIO;
1756 }
1757
1758 /* racing with truncate? */
1759 if (offset > mapping->host->i_size) {
1760 kunmap(page);
1761 return 0; /* don't care */
1762 }
1763
1764 /* check to make sure that we are not extending the file */
1765 if (mapping->host->i_size - offset < (loff_t)to)
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001766 to = (unsigned)(mapping->host->i_size - offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001767
Jeff Layton6508d902010-09-29 19:51:11 -04001768 open_file = find_writable_file(CIFS_I(mapping->host), false);
Steve French6148a742005-10-05 12:23:19 -07001769 if (open_file) {
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001770 bytes_written = cifs_write(open_file, open_file->pid,
1771 write_data, to - from, &offset);
Dave Kleikamp6ab409b2009-08-31 11:07:12 -04001772 cifsFileInfo_put(open_file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001773 /* Does mm or vfs already set times? */
Steve French6148a742005-10-05 12:23:19 -07001774 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001775 if ((bytes_written > 0) && (offset))
Steve French6148a742005-10-05 12:23:19 -07001776 rc = 0;
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001777 else if (bytes_written < 0)
1778 rc = bytes_written;
Steve French6148a742005-10-05 12:23:19 -07001779 } else {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001780 cFYI(1, "No writeable filehandles for inode");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001781 rc = -EIO;
1782 }
1783
1784 kunmap(page);
1785 return rc;
1786}
1787
Linus Torvalds1da177e2005-04-16 15:20:36 -07001788static int cifs_writepages(struct address_space *mapping,
Steve French37c0eb42005-10-05 14:50:29 -07001789 struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001790{
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001791 struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
1792 bool done = false, scanned = false, range_whole = false;
1793 pgoff_t end, index;
1794 struct cifs_writedata *wdata;
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07001795 struct TCP_Server_Info *server;
Steve French37c0eb42005-10-05 14:50:29 -07001796 struct page *page;
Steve French37c0eb42005-10-05 14:50:29 -07001797 int rc = 0;
Jeff Laytoneddb0792012-09-18 16:20:35 -07001798 loff_t isize = i_size_read(mapping->host);
Steve French50c2f752007-07-13 00:33:32 +00001799
Steve French37c0eb42005-10-05 14:50:29 -07001800 /*
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001801 * If wsize is smaller than the page cache size, default to writing
Steve French37c0eb42005-10-05 14:50:29 -07001802 * one page at a time via cifs_writepage
1803 */
1804 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1805 return generic_writepages(mapping, wbc);
1806
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001807 if (wbc->range_cyclic) {
Steve French37c0eb42005-10-05 14:50:29 -07001808 index = mapping->writeback_index; /* Start from prev offset */
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001809 end = -1;
1810 } else {
1811 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1812 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1813 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001814 range_whole = true;
1815 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07001816 }
1817retry:
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001818 while (!done && index <= end) {
1819 unsigned int i, nr_pages, found_pages;
1820 pgoff_t next = 0, tofind;
1821 struct page **pages;
Steve French37c0eb42005-10-05 14:50:29 -07001822
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001823 tofind = min((cifs_sb->wsize / PAGE_CACHE_SIZE) - 1,
1824 end - index) + 1;
Steve French37c0eb42005-10-05 14:50:29 -07001825
Jeff Laytonc2e87642012-03-23 14:40:55 -04001826 wdata = cifs_writedata_alloc((unsigned int)tofind,
1827 cifs_writev_complete);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001828 if (!wdata) {
1829 rc = -ENOMEM;
1830 break;
1831 }
1832
1833 /*
1834 * find_get_pages_tag seems to return a max of 256 on each
1835 * iteration, so we must call it several times in order to
1836 * fill the array or the wsize is effectively limited to
1837 * 256 * PAGE_CACHE_SIZE.
1838 */
1839 found_pages = 0;
1840 pages = wdata->pages;
1841 do {
1842 nr_pages = find_get_pages_tag(mapping, &index,
1843 PAGECACHE_TAG_DIRTY,
1844 tofind, pages);
1845 found_pages += nr_pages;
1846 tofind -= nr_pages;
1847 pages += nr_pages;
1848 } while (nr_pages && tofind && index <= end);
1849
1850 if (found_pages == 0) {
1851 kref_put(&wdata->refcount, cifs_writedata_release);
1852 break;
1853 }
1854
1855 nr_pages = 0;
1856 for (i = 0; i < found_pages; i++) {
1857 page = wdata->pages[i];
Steve French37c0eb42005-10-05 14:50:29 -07001858 /*
1859 * At this point we hold neither mapping->tree_lock nor
1860 * lock on the page itself: the page may be truncated or
1861 * invalidated (changing page->mapping to NULL), or even
1862 * swizzled back from swapper_space to tmpfs file
1863 * mapping
1864 */
1865
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001866 if (nr_pages == 0)
Steve French37c0eb42005-10-05 14:50:29 -07001867 lock_page(page);
Nick Piggin529ae9a2008-08-02 12:01:03 +02001868 else if (!trylock_page(page))
Steve French37c0eb42005-10-05 14:50:29 -07001869 break;
1870
1871 if (unlikely(page->mapping != mapping)) {
1872 unlock_page(page);
1873 break;
1874 }
1875
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001876 if (!wbc->range_cyclic && page->index > end) {
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001877 done = true;
Steve French37c0eb42005-10-05 14:50:29 -07001878 unlock_page(page);
1879 break;
1880 }
1881
1882 if (next && (page->index != next)) {
1883 /* Not next consecutive page */
1884 unlock_page(page);
1885 break;
1886 }
1887
1888 if (wbc->sync_mode != WB_SYNC_NONE)
1889 wait_on_page_writeback(page);
1890
1891 if (PageWriteback(page) ||
Linus Torvaldscb876f42006-12-23 16:19:07 -08001892 !clear_page_dirty_for_io(page)) {
Steve French37c0eb42005-10-05 14:50:29 -07001893 unlock_page(page);
1894 break;
1895 }
Steve French84d2f072005-10-12 15:32:05 -07001896
Linus Torvaldscb876f42006-12-23 16:19:07 -08001897 /*
1898 * This actually clears the dirty bit in the radix tree.
1899 * See cifs_writepage() for more commentary.
1900 */
1901 set_page_writeback(page);
1902
Jeff Laytoneddb0792012-09-18 16:20:35 -07001903 if (page_offset(page) >= isize) {
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001904 done = true;
Steve French84d2f072005-10-12 15:32:05 -07001905 unlock_page(page);
Linus Torvaldscb876f42006-12-23 16:19:07 -08001906 end_page_writeback(page);
Steve French84d2f072005-10-12 15:32:05 -07001907 break;
1908 }
1909
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001910 wdata->pages[i] = page;
Steve French37c0eb42005-10-05 14:50:29 -07001911 next = page->index + 1;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001912 ++nr_pages;
Steve French37c0eb42005-10-05 14:50:29 -07001913 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001914
1915 /* reset index to refind any pages skipped */
1916 if (nr_pages == 0)
1917 index = wdata->pages[0]->index + 1;
1918
1919 /* put any pages we aren't going to use */
1920 for (i = nr_pages; i < found_pages; i++) {
1921 page_cache_release(wdata->pages[i]);
1922 wdata->pages[i] = NULL;
1923 }
1924
1925 /* nothing to write? */
1926 if (nr_pages == 0) {
1927 kref_put(&wdata->refcount, cifs_writedata_release);
1928 continue;
1929 }
1930
1931 wdata->sync_mode = wbc->sync_mode;
1932 wdata->nr_pages = nr_pages;
1933 wdata->offset = page_offset(wdata->pages[0]);
Jeff Laytoneddb0792012-09-18 16:20:35 -07001934 wdata->pagesz = PAGE_CACHE_SIZE;
1935 wdata->tailsz =
1936 min(isize - page_offset(wdata->pages[nr_pages - 1]),
1937 (loff_t)PAGE_CACHE_SIZE);
1938 wdata->bytes = ((nr_pages - 1) * PAGE_CACHE_SIZE) +
1939 wdata->tailsz;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001940
1941 do {
1942 if (wdata->cfile != NULL)
1943 cifsFileInfo_put(wdata->cfile);
1944 wdata->cfile = find_writable_file(CIFS_I(mapping->host),
1945 false);
1946 if (!wdata->cfile) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001947 cERROR(1, "No writable handles for inode");
Steve French23e7dd72005-10-20 13:44:56 -07001948 rc = -EBADF;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001949 break;
Steve French37c0eb42005-10-05 14:50:29 -07001950 }
Jeff Laytonfe5f5d22012-03-23 14:40:55 -04001951 wdata->pid = wdata->cfile->pid;
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07001952 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
1953 rc = server->ops->async_writev(wdata);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001954 } while (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN);
Jeff Laytonf3983c22010-09-22 16:17:40 -07001955
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001956 for (i = 0; i < nr_pages; ++i)
1957 unlock_page(wdata->pages[i]);
Jeff Layton941b8532011-01-11 07:24:01 -05001958
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001959 /* send failure -- clean up the mess */
1960 if (rc != 0) {
1961 for (i = 0; i < nr_pages; ++i) {
Jeff Layton941b8532011-01-11 07:24:01 -05001962 if (rc == -EAGAIN)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001963 redirty_page_for_writepage(wbc,
1964 wdata->pages[i]);
1965 else
1966 SetPageError(wdata->pages[i]);
1967 end_page_writeback(wdata->pages[i]);
1968 page_cache_release(wdata->pages[i]);
Steve French37c0eb42005-10-05 14:50:29 -07001969 }
Jeff Layton941b8532011-01-11 07:24:01 -05001970 if (rc != -EAGAIN)
1971 mapping_set_error(mapping, rc);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001972 }
1973 kref_put(&wdata->refcount, cifs_writedata_release);
Jeff Layton941b8532011-01-11 07:24:01 -05001974
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001975 wbc->nr_to_write -= nr_pages;
1976 if (wbc->nr_to_write <= 0)
1977 done = true;
Dave Kleikampb066a482008-11-18 03:49:05 +00001978
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001979 index = next;
Steve French37c0eb42005-10-05 14:50:29 -07001980 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001981
Steve French37c0eb42005-10-05 14:50:29 -07001982 if (!scanned && !done) {
1983 /*
1984 * We hit the last page and there is more work to be done: wrap
1985 * back to the start of the file
1986 */
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001987 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07001988 index = 0;
1989 goto retry;
1990 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001991
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001992 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
Steve French37c0eb42005-10-05 14:50:29 -07001993 mapping->writeback_index = index;
1994
Linus Torvalds1da177e2005-04-16 15:20:36 -07001995 return rc;
1996}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001997
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04001998static int
1999cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002000{
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002001 int rc;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002002 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002003
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002004 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002005/* BB add check for wbc flags */
2006 page_cache_get(page);
Steve Frenchad7a2922008-02-07 23:25:02 +00002007 if (!PageUptodate(page))
Joe Perchesb6b38f72010-04-21 03:50:45 +00002008 cFYI(1, "ppw - page not up to date");
Linus Torvaldscb876f42006-12-23 16:19:07 -08002009
2010 /*
2011 * Set the "writeback" flag, and clear "dirty" in the radix tree.
2012 *
2013 * A writepage() implementation always needs to do either this,
2014 * or re-dirty the page with "redirty_page_for_writepage()" in
2015 * the case of a failure.
2016 *
2017 * Just unlocking the page will cause the radix tree tag-bits
2018 * to fail to update with the state of the page correctly.
2019 */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002020 set_page_writeback(page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002021retry_write:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002022 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002023 if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
2024 goto retry_write;
2025 else if (rc == -EAGAIN)
2026 redirty_page_for_writepage(wbc, page);
2027 else if (rc != 0)
2028 SetPageError(page);
2029 else
2030 SetPageUptodate(page);
Linus Torvaldscb876f42006-12-23 16:19:07 -08002031 end_page_writeback(page);
2032 page_cache_release(page);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002033 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002034 return rc;
2035}
2036
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002037static int cifs_writepage(struct page *page, struct writeback_control *wbc)
2038{
2039 int rc = cifs_writepage_locked(page, wbc);
2040 unlock_page(page);
2041 return rc;
2042}
2043
Nick Piggind9414772008-09-24 11:32:59 -04002044static int cifs_write_end(struct file *file, struct address_space *mapping,
2045 loff_t pos, unsigned len, unsigned copied,
2046 struct page *page, void *fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002047{
Nick Piggind9414772008-09-24 11:32:59 -04002048 int rc;
2049 struct inode *inode = mapping->host;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002050 struct cifsFileInfo *cfile = file->private_data;
2051 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
2052 __u32 pid;
2053
2054 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2055 pid = cfile->pid;
2056 else
2057 pid = current->tgid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002058
Joe Perchesb6b38f72010-04-21 03:50:45 +00002059 cFYI(1, "write_end for page %p from pos %lld with %d bytes",
2060 page, pos, copied);
Steve Frenchad7a2922008-02-07 23:25:02 +00002061
Jeff Laytona98ee8c2008-11-26 19:32:33 +00002062 if (PageChecked(page)) {
2063 if (copied == len)
2064 SetPageUptodate(page);
2065 ClearPageChecked(page);
2066 } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
Nick Piggind9414772008-09-24 11:32:59 -04002067 SetPageUptodate(page);
2068
Linus Torvalds1da177e2005-04-16 15:20:36 -07002069 if (!PageUptodate(page)) {
Nick Piggind9414772008-09-24 11:32:59 -04002070 char *page_data;
2071 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002072 unsigned int xid;
Nick Piggind9414772008-09-24 11:32:59 -04002073
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002074 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002075 /* this is probably better than directly calling
2076 partialpage_write since in this function the file handle is
2077 known which we might as well leverage */
2078 /* BB check if anything else missing out of ppw
2079 such as updating last write time */
2080 page_data = kmap(page);
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002081 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
Nick Piggind9414772008-09-24 11:32:59 -04002082 /* if (rc < 0) should we set writebehind rc? */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002083 kunmap(page);
Nick Piggind9414772008-09-24 11:32:59 -04002084
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002085 free_xid(xid);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002086 } else {
Nick Piggind9414772008-09-24 11:32:59 -04002087 rc = copied;
2088 pos += copied;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002089 set_page_dirty(page);
2090 }
2091
Nick Piggind9414772008-09-24 11:32:59 -04002092 if (rc > 0) {
2093 spin_lock(&inode->i_lock);
2094 if (pos > inode->i_size)
2095 i_size_write(inode, pos);
2096 spin_unlock(&inode->i_lock);
2097 }
2098
2099 unlock_page(page);
2100 page_cache_release(page);
2101
Linus Torvalds1da177e2005-04-16 15:20:36 -07002102 return rc;
2103}
2104
Josef Bacik02c24a82011-07-16 20:44:56 -04002105int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2106 int datasync)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002107{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002108 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002109 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002110 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002111 struct TCP_Server_Info *server;
Joe Perchesc21dfb62010-07-12 13:50:14 -07002112 struct cifsFileInfo *smbfile = file->private_data;
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002113 struct inode *inode = file->f_path.dentry->d_inode;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002114 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002115
Josef Bacik02c24a82011-07-16 20:44:56 -04002116 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2117 if (rc)
2118 return rc;
2119 mutex_lock(&inode->i_mutex);
2120
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002121 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002122
Joe Perchesb6b38f72010-04-21 03:50:45 +00002123 cFYI(1, "Sync file - name: %s datasync: 0x%x",
Christoph Hellwig7ea80852010-05-26 17:53:25 +02002124 file->f_path.dentry->d_name.name, datasync);
Steve French50c2f752007-07-13 00:33:32 +00002125
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002126 if (!CIFS_I(inode)->clientCanCacheRead) {
2127 rc = cifs_invalidate_mapping(inode);
2128 if (rc) {
2129 cFYI(1, "rc: %d during invalidate phase", rc);
2130 rc = 0; /* don't care about it in fsync */
2131 }
2132 }
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002133
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002134 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002135 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2136 server = tcon->ses->server;
2137 if (server->ops->flush)
2138 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2139 else
2140 rc = -ENOSYS;
2141 }
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002142
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002143 free_xid(xid);
Josef Bacik02c24a82011-07-16 20:44:56 -04002144 mutex_unlock(&inode->i_mutex);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002145 return rc;
2146}
2147
Josef Bacik02c24a82011-07-16 20:44:56 -04002148int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002149{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002150 unsigned int xid;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002151 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002152 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002153 struct TCP_Server_Info *server;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002154 struct cifsFileInfo *smbfile = file->private_data;
2155 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Josef Bacik02c24a82011-07-16 20:44:56 -04002156 struct inode *inode = file->f_mapping->host;
2157
2158 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2159 if (rc)
2160 return rc;
2161 mutex_lock(&inode->i_mutex);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002162
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002163 xid = get_xid();
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002164
2165 cFYI(1, "Sync file - name: %s datasync: 0x%x",
2166 file->f_path.dentry->d_name.name, datasync);
2167
2168 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002169 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2170 server = tcon->ses->server;
2171 if (server->ops->flush)
2172 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2173 else
2174 rc = -ENOSYS;
2175 }
Steve Frenchb298f222009-02-21 21:17:43 +00002176
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002177 free_xid(xid);
Josef Bacik02c24a82011-07-16 20:44:56 -04002178 mutex_unlock(&inode->i_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002179 return rc;
2180}
2181
Linus Torvalds1da177e2005-04-16 15:20:36 -07002182/*
2183 * As file closes, flush all cached write data for this inode checking
2184 * for write behind errors.
2185 */
Miklos Szeredi75e1fcc2006-06-23 02:05:12 -07002186int cifs_flush(struct file *file, fl_owner_t id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002187{
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002188 struct inode *inode = file->f_path.dentry->d_inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002189 int rc = 0;
2190
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002191 if (file->f_mode & FMODE_WRITE)
Jeff Laytond3f13222010-10-15 15:34:07 -04002192 rc = filemap_write_and_wait(inode->i_mapping);
Steve French50c2f752007-07-13 00:33:32 +00002193
Joe Perchesb6b38f72010-04-21 03:50:45 +00002194 cFYI(1, "Flush inode %p file %p rc %d", inode, file, rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002195
2196 return rc;
2197}
2198
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002199static int
2200cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2201{
2202 int rc = 0;
2203 unsigned long i;
2204
2205 for (i = 0; i < num_pages; i++) {
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002206 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002207 if (!pages[i]) {
2208 /*
2209 * save number of pages we have already allocated and
2210 * return with ENOMEM error
2211 */
2212 num_pages = i;
2213 rc = -ENOMEM;
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002214 break;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002215 }
2216 }
2217
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002218 if (rc) {
2219 for (i = 0; i < num_pages; i++)
2220 put_page(pages[i]);
2221 }
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002222 return rc;
2223}
2224
2225static inline
2226size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2227{
2228 size_t num_pages;
2229 size_t clen;
2230
2231 clen = min_t(const size_t, len, wsize);
Jeff Laytona7103b92012-03-23 14:40:56 -04002232 num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002233
2234 if (cur_len)
2235 *cur_len = clen;
2236
2237 return num_pages;
2238}
2239
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002240static void
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002241cifs_uncached_writev_complete(struct work_struct *work)
2242{
2243 int i;
2244 struct cifs_writedata *wdata = container_of(work,
2245 struct cifs_writedata, work);
2246 struct inode *inode = wdata->cfile->dentry->d_inode;
2247 struct cifsInodeInfo *cifsi = CIFS_I(inode);
2248
2249 spin_lock(&inode->i_lock);
2250 cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
2251 if (cifsi->server_eof > inode->i_size)
2252 i_size_write(inode, cifsi->server_eof);
2253 spin_unlock(&inode->i_lock);
2254
2255 complete(&wdata->done);
2256
2257 if (wdata->result != -EAGAIN) {
2258 for (i = 0; i < wdata->nr_pages; i++)
2259 put_page(wdata->pages[i]);
2260 }
2261
2262 kref_put(&wdata->refcount, cifs_writedata_release);
2263}
2264
2265/* attempt to send write to server, retry on any -EAGAIN errors */
2266static int
2267cifs_uncached_retry_writev(struct cifs_writedata *wdata)
2268{
2269 int rc;
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002270 struct TCP_Server_Info *server;
2271
2272 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002273
2274 do {
2275 if (wdata->cfile->invalidHandle) {
2276 rc = cifs_reopen_file(wdata->cfile, false);
2277 if (rc != 0)
2278 continue;
2279 }
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002280 rc = server->ops->async_writev(wdata);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002281 } while (rc == -EAGAIN);
2282
2283 return rc;
2284}
2285
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002286static ssize_t
2287cifs_iovec_write(struct file *file, const struct iovec *iov,
2288 unsigned long nr_segs, loff_t *poffset)
2289{
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002290 unsigned long nr_pages, i;
Pavel Shilovsky76429c12011-01-31 16:03:08 +03002291 size_t copied, len, cur_len;
2292 ssize_t total_written = 0;
Jeff Layton3af9d8f2012-04-13 17:16:59 -04002293 loff_t offset;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002294 struct iov_iter it;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002295 struct cifsFileInfo *open_file;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002296 struct cifs_tcon *tcon;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002297 struct cifs_sb_info *cifs_sb;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002298 struct cifs_writedata *wdata, *tmp;
2299 struct list_head wdata_list;
2300 int rc;
2301 pid_t pid;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002302
2303 len = iov_length(iov, nr_segs);
2304 if (!len)
2305 return 0;
2306
2307 rc = generic_write_checks(file, poffset, &len, 0);
2308 if (rc)
2309 return rc;
2310
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002311 INIT_LIST_HEAD(&wdata_list);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002312 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002313 open_file = file->private_data;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002314 tcon = tlink_tcon(open_file->tlink);
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002315
2316 if (!tcon->ses->server->ops->async_writev)
2317 return -ENOSYS;
2318
Jeff Layton3af9d8f2012-04-13 17:16:59 -04002319 offset = *poffset;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002320
2321 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2322 pid = open_file->pid;
2323 else
2324 pid = current->tgid;
2325
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002326 iov_iter_init(&it, iov, nr_segs, len, 0);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002327 do {
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002328 size_t save_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002329
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002330 nr_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
2331 wdata = cifs_writedata_alloc(nr_pages,
2332 cifs_uncached_writev_complete);
2333 if (!wdata) {
2334 rc = -ENOMEM;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002335 break;
2336 }
2337
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002338 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
2339 if (rc) {
2340 kfree(wdata);
2341 break;
2342 }
2343
2344 save_len = cur_len;
2345 for (i = 0; i < nr_pages; i++) {
2346 copied = min_t(const size_t, cur_len, PAGE_SIZE);
2347 copied = iov_iter_copy_from_user(wdata->pages[i], &it,
2348 0, copied);
2349 cur_len -= copied;
2350 iov_iter_advance(&it, copied);
2351 }
2352 cur_len = save_len - cur_len;
2353
2354 wdata->sync_mode = WB_SYNC_ALL;
2355 wdata->nr_pages = nr_pages;
2356 wdata->offset = (__u64)offset;
2357 wdata->cfile = cifsFileInfo_get(open_file);
2358 wdata->pid = pid;
2359 wdata->bytes = cur_len;
Jeff Laytoneddb0792012-09-18 16:20:35 -07002360 wdata->pagesz = PAGE_SIZE;
2361 wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002362 rc = cifs_uncached_retry_writev(wdata);
2363 if (rc) {
2364 kref_put(&wdata->refcount, cifs_writedata_release);
2365 break;
2366 }
2367
2368 list_add_tail(&wdata->list, &wdata_list);
2369 offset += cur_len;
2370 len -= cur_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002371 } while (len > 0);
2372
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002373 /*
2374 * If at least one write was successfully sent, then discard any rc
2375 * value from the later writes. If the other write succeeds, then
2376 * we'll end up returning whatever was written. If it fails, then
2377 * we'll get a new rc value from that.
2378 */
2379 if (!list_empty(&wdata_list))
2380 rc = 0;
2381
2382 /*
2383 * Wait for and collect replies for any successful sends in order of
2384 * increasing offset. Once an error is hit or we get a fatal signal
2385 * while waiting, then return without waiting for any more replies.
2386 */
2387restart_loop:
2388 list_for_each_entry_safe(wdata, tmp, &wdata_list, list) {
2389 if (!rc) {
2390 /* FIXME: freezable too? */
2391 rc = wait_for_completion_killable(&wdata->done);
2392 if (rc)
2393 rc = -EINTR;
2394 else if (wdata->result)
2395 rc = wdata->result;
2396 else
2397 total_written += wdata->bytes;
2398
2399 /* resend call if it's a retryable error */
2400 if (rc == -EAGAIN) {
2401 rc = cifs_uncached_retry_writev(wdata);
2402 goto restart_loop;
2403 }
2404 }
2405 list_del_init(&wdata->list);
2406 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002407 }
2408
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002409 if (total_written > 0)
2410 *poffset += total_written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002411
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002412 cifs_stats_bytes_written(tcon, total_written);
2413 return total_written ? total_written : (ssize_t)rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002414}
2415
Pavel Shilovsky0b81c1c2011-03-10 10:11:05 +03002416ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002417 unsigned long nr_segs, loff_t pos)
2418{
2419 ssize_t written;
2420 struct inode *inode;
2421
2422 inode = iocb->ki_filp->f_path.dentry->d_inode;
2423
2424 /*
2425 * BB - optimize the way when signing is disabled. We can drop this
2426 * extra memory-to-memory copying and use iovec buffers for constructing
2427 * write request.
2428 */
2429
2430 written = cifs_iovec_write(iocb->ki_filp, iov, nr_segs, &pos);
2431 if (written > 0) {
2432 CIFS_I(inode)->invalid_mapping = true;
2433 iocb->ki_pos = pos;
2434 }
2435
2436 return written;
2437}
2438
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002439static ssize_t
2440cifs_writev(struct kiocb *iocb, const struct iovec *iov,
2441 unsigned long nr_segs, loff_t pos)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002442{
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002443 struct file *file = iocb->ki_filp;
2444 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2445 struct inode *inode = file->f_mapping->host;
2446 struct cifsInodeInfo *cinode = CIFS_I(inode);
2447 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
2448 ssize_t rc = -EACCES;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002449
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002450 BUG_ON(iocb->ki_pos != pos);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002451
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002452 sb_start_write(inode->i_sb);
2453
2454 /*
2455 * We need to hold the sem to be sure nobody modifies lock list
2456 * with a brlock that prevents writing.
2457 */
2458 down_read(&cinode->lock_sem);
2459 if (!cifs_find_lock_conflict(cfile, pos, iov_length(iov, nr_segs),
2460 server->vals->exclusive_lock_type, NULL,
2461 true)) {
2462 mutex_lock(&inode->i_mutex);
2463 rc = __generic_file_aio_write(iocb, iov, nr_segs,
2464 &iocb->ki_pos);
2465 mutex_unlock(&inode->i_mutex);
2466 }
2467
2468 if (rc > 0 || rc == -EIOCBQUEUED) {
2469 ssize_t err;
2470
2471 err = generic_write_sync(file, pos, rc);
2472 if (err < 0 && rc > 0)
2473 rc = err;
2474 }
2475
2476 up_read(&cinode->lock_sem);
2477 sb_end_write(inode->i_sb);
2478 return rc;
2479}
2480
2481ssize_t
2482cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
2483 unsigned long nr_segs, loff_t pos)
2484{
2485 struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
2486 struct cifsInodeInfo *cinode = CIFS_I(inode);
2487 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2488 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2489 iocb->ki_filp->private_data;
2490 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002491
Pavel Shilovsky25078102012-09-19 06:22:45 -07002492#ifdef CONFIG_CIFS_SMB2
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002493 /*
Pavel Shilovsky25078102012-09-19 06:22:45 -07002494 * If we have an oplock for read and want to write a data to the file
2495 * we need to store it in the page cache and then push it to the server
2496 * to be sure the next read will get a valid data.
2497 */
2498 if (!cinode->clientCanCacheAll && cinode->clientCanCacheRead) {
2499 ssize_t written;
2500 int rc;
2501
2502 written = generic_file_aio_write(iocb, iov, nr_segs, pos);
2503 rc = filemap_fdatawrite(inode->i_mapping);
2504 if (rc)
2505 return (ssize_t)rc;
2506
2507 return written;
2508 }
2509#endif
2510
2511 /*
2512 * For non-oplocked files in strict cache mode we need to write the data
2513 * to the server exactly from the pos to pos+len-1 rather than flush all
2514 * affected pages because it may cause a error with mandatory locks on
2515 * these pages but not on the region from pos to ppos+len-1.
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002516 */
2517
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002518 if (!cinode->clientCanCacheAll)
2519 return cifs_user_writev(iocb, iov, nr_segs, pos);
2520
2521 if (cap_unix(tcon->ses) &&
2522 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2523 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2524 return generic_file_aio_write(iocb, iov, nr_segs, pos);
2525
2526 return cifs_writev(iocb, iov, nr_segs, pos);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002527}
2528
Jeff Layton0471ca32012-05-16 07:13:16 -04002529static struct cifs_readdata *
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002530cifs_readdata_alloc(unsigned int nr_pages, work_func_t complete)
Jeff Layton0471ca32012-05-16 07:13:16 -04002531{
2532 struct cifs_readdata *rdata;
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002533
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002534 rdata = kzalloc(sizeof(*rdata) + (sizeof(struct page *) * nr_pages),
2535 GFP_KERNEL);
Jeff Layton0471ca32012-05-16 07:13:16 -04002536 if (rdata != NULL) {
Jeff Layton6993f742012-05-16 07:13:17 -04002537 kref_init(&rdata->refcount);
Jeff Layton1c892542012-05-16 07:13:17 -04002538 INIT_LIST_HEAD(&rdata->list);
2539 init_completion(&rdata->done);
Jeff Layton0471ca32012-05-16 07:13:16 -04002540 INIT_WORK(&rdata->work, complete);
Jeff Layton0471ca32012-05-16 07:13:16 -04002541 }
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002542
Jeff Layton0471ca32012-05-16 07:13:16 -04002543 return rdata;
2544}
2545
Jeff Layton6993f742012-05-16 07:13:17 -04002546void
2547cifs_readdata_release(struct kref *refcount)
Jeff Layton0471ca32012-05-16 07:13:16 -04002548{
Jeff Layton6993f742012-05-16 07:13:17 -04002549 struct cifs_readdata *rdata = container_of(refcount,
2550 struct cifs_readdata, refcount);
2551
2552 if (rdata->cfile)
2553 cifsFileInfo_put(rdata->cfile);
2554
Jeff Layton0471ca32012-05-16 07:13:16 -04002555 kfree(rdata);
2556}
2557
Jeff Layton2a1bb132012-05-16 07:13:17 -04002558static int
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002559cifs_read_allocate_pages(struct cifs_readdata *rdata, unsigned int nr_pages)
Jeff Layton1c892542012-05-16 07:13:17 -04002560{
2561 int rc = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002562 struct page *page;
Jeff Layton1c892542012-05-16 07:13:17 -04002563 unsigned int i;
2564
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002565 for (i = 0; i < nr_pages; i++) {
Jeff Layton1c892542012-05-16 07:13:17 -04002566 page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
2567 if (!page) {
2568 rc = -ENOMEM;
2569 break;
2570 }
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002571 rdata->pages[i] = page;
Jeff Layton1c892542012-05-16 07:13:17 -04002572 }
2573
2574 if (rc) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002575 for (i = 0; i < nr_pages; i++) {
2576 put_page(rdata->pages[i]);
2577 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04002578 }
2579 }
2580 return rc;
2581}
2582
2583static void
2584cifs_uncached_readdata_release(struct kref *refcount)
2585{
Jeff Layton1c892542012-05-16 07:13:17 -04002586 struct cifs_readdata *rdata = container_of(refcount,
2587 struct cifs_readdata, refcount);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002588 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04002589
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002590 for (i = 0; i < rdata->nr_pages; i++) {
2591 put_page(rdata->pages[i]);
2592 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04002593 }
2594 cifs_readdata_release(refcount);
2595}
2596
2597static int
Jeff Layton2a1bb132012-05-16 07:13:17 -04002598cifs_retry_async_readv(struct cifs_readdata *rdata)
2599{
2600 int rc;
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07002601 struct TCP_Server_Info *server;
2602
2603 server = tlink_tcon(rdata->cfile->tlink)->ses->server;
Jeff Layton2a1bb132012-05-16 07:13:17 -04002604
2605 do {
2606 if (rdata->cfile->invalidHandle) {
2607 rc = cifs_reopen_file(rdata->cfile, true);
2608 if (rc != 0)
2609 continue;
2610 }
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07002611 rc = server->ops->async_readv(rdata);
Jeff Layton2a1bb132012-05-16 07:13:17 -04002612 } while (rc == -EAGAIN);
2613
2614 return rc;
2615}
2616
Jeff Layton1c892542012-05-16 07:13:17 -04002617/**
2618 * cifs_readdata_to_iov - copy data from pages in response to an iovec
2619 * @rdata: the readdata response with list of pages holding data
2620 * @iov: vector in which we should copy the data
2621 * @nr_segs: number of segments in vector
2622 * @offset: offset into file of the first iovec
2623 * @copied: used to return the amount of data copied to the iov
2624 *
2625 * This function copies data from a list of pages in a readdata response into
2626 * an array of iovecs. It will first calculate where the data should go
2627 * based on the info in the readdata and then copy the data into that spot.
2628 */
2629static ssize_t
2630cifs_readdata_to_iov(struct cifs_readdata *rdata, const struct iovec *iov,
2631 unsigned long nr_segs, loff_t offset, ssize_t *copied)
2632{
2633 int rc = 0;
2634 struct iov_iter ii;
2635 size_t pos = rdata->offset - offset;
Jeff Layton1c892542012-05-16 07:13:17 -04002636 ssize_t remaining = rdata->bytes;
2637 unsigned char *pdata;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002638 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04002639
2640 /* set up iov_iter and advance to the correct offset */
2641 iov_iter_init(&ii, iov, nr_segs, iov_length(iov, nr_segs), 0);
2642 iov_iter_advance(&ii, pos);
2643
2644 *copied = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002645 for (i = 0; i < rdata->nr_pages; i++) {
Jeff Layton1c892542012-05-16 07:13:17 -04002646 ssize_t copy;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002647 struct page *page = rdata->pages[i];
Jeff Layton1c892542012-05-16 07:13:17 -04002648
2649 /* copy a whole page or whatever's left */
2650 copy = min_t(ssize_t, remaining, PAGE_SIZE);
2651
2652 /* ...but limit it to whatever space is left in the iov */
2653 copy = min_t(ssize_t, copy, iov_iter_count(&ii));
2654
2655 /* go while there's data to be copied and no errors */
2656 if (copy && !rc) {
2657 pdata = kmap(page);
2658 rc = memcpy_toiovecend(ii.iov, pdata, ii.iov_offset,
2659 (int)copy);
2660 kunmap(page);
2661 if (!rc) {
2662 *copied += copy;
2663 remaining -= copy;
2664 iov_iter_advance(&ii, copy);
2665 }
2666 }
Jeff Layton1c892542012-05-16 07:13:17 -04002667 }
2668
2669 return rc;
2670}
2671
2672static void
2673cifs_uncached_readv_complete(struct work_struct *work)
2674{
2675 struct cifs_readdata *rdata = container_of(work,
2676 struct cifs_readdata, work);
Jeff Layton1c892542012-05-16 07:13:17 -04002677
2678 complete(&rdata->done);
2679 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
2680}
2681
2682static int
Jeff Layton8321fec2012-09-19 06:22:32 -07002683cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
2684 struct cifs_readdata *rdata, unsigned int len)
Jeff Layton1c892542012-05-16 07:13:17 -04002685{
Jeff Layton8321fec2012-09-19 06:22:32 -07002686 int total_read = 0, result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002687 unsigned int i;
2688 unsigned int nr_pages = rdata->nr_pages;
Jeff Layton8321fec2012-09-19 06:22:32 -07002689 struct kvec iov;
Jeff Layton1c892542012-05-16 07:13:17 -04002690
Jeff Layton8321fec2012-09-19 06:22:32 -07002691 rdata->tailsz = PAGE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002692 for (i = 0; i < nr_pages; i++) {
2693 struct page *page = rdata->pages[i];
2694
Jeff Layton8321fec2012-09-19 06:22:32 -07002695 if (len >= PAGE_SIZE) {
Jeff Layton1c892542012-05-16 07:13:17 -04002696 /* enough data to fill the page */
Jeff Layton8321fec2012-09-19 06:22:32 -07002697 iov.iov_base = kmap(page);
2698 iov.iov_len = PAGE_SIZE;
2699 cFYI(1, "%u: iov_base=%p iov_len=%zu",
2700 i, iov.iov_base, iov.iov_len);
2701 len -= PAGE_SIZE;
2702 } else if (len > 0) {
Jeff Layton1c892542012-05-16 07:13:17 -04002703 /* enough for partial page, fill and zero the rest */
Jeff Layton8321fec2012-09-19 06:22:32 -07002704 iov.iov_base = kmap(page);
2705 iov.iov_len = len;
2706 cFYI(1, "%u: iov_base=%p iov_len=%zu",
2707 i, iov.iov_base, iov.iov_len);
2708 memset(iov.iov_base + len, '\0', PAGE_SIZE - len);
2709 rdata->tailsz = len;
2710 len = 0;
Jeff Layton1c892542012-05-16 07:13:17 -04002711 } else {
2712 /* no need to hold page hostage */
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002713 rdata->pages[i] = NULL;
2714 rdata->nr_pages--;
Jeff Layton1c892542012-05-16 07:13:17 -04002715 put_page(page);
Jeff Layton8321fec2012-09-19 06:22:32 -07002716 continue;
Jeff Layton1c892542012-05-16 07:13:17 -04002717 }
Jeff Layton8321fec2012-09-19 06:22:32 -07002718
2719 result = cifs_readv_from_socket(server, &iov, 1, iov.iov_len);
2720 kunmap(page);
2721 if (result < 0)
2722 break;
2723
2724 total_read += result;
Jeff Layton1c892542012-05-16 07:13:17 -04002725 }
2726
Jeff Layton8321fec2012-09-19 06:22:32 -07002727 return total_read > 0 ? total_read : result;
Jeff Layton1c892542012-05-16 07:13:17 -04002728}
2729
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002730static ssize_t
2731cifs_iovec_read(struct file *file, const struct iovec *iov,
2732 unsigned long nr_segs, loff_t *poffset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002733{
Jeff Layton1c892542012-05-16 07:13:17 -04002734 ssize_t rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002735 size_t len, cur_len;
Jeff Layton1c892542012-05-16 07:13:17 -04002736 ssize_t total_read = 0;
2737 loff_t offset = *poffset;
2738 unsigned int npages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002739 struct cifs_sb_info *cifs_sb;
Jeff Layton1c892542012-05-16 07:13:17 -04002740 struct cifs_tcon *tcon;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002741 struct cifsFileInfo *open_file;
Jeff Layton1c892542012-05-16 07:13:17 -04002742 struct cifs_readdata *rdata, *tmp;
2743 struct list_head rdata_list;
2744 pid_t pid;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002745
2746 if (!nr_segs)
2747 return 0;
2748
2749 len = iov_length(iov, nr_segs);
2750 if (!len)
2751 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002752
Jeff Layton1c892542012-05-16 07:13:17 -04002753 INIT_LIST_HEAD(&rdata_list);
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002754 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Joe Perchesc21dfb62010-07-12 13:50:14 -07002755 open_file = file->private_data;
Jeff Layton1c892542012-05-16 07:13:17 -04002756 tcon = tlink_tcon(open_file->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002757
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07002758 if (!tcon->ses->server->ops->async_readv)
2759 return -ENOSYS;
2760
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002761 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2762 pid = open_file->pid;
2763 else
2764 pid = current->tgid;
2765
Steve Frenchad7a2922008-02-07 23:25:02 +00002766 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesb6b38f72010-04-21 03:50:45 +00002767 cFYI(1, "attempting read on write only file instance");
Steve Frenchad7a2922008-02-07 23:25:02 +00002768
Jeff Layton1c892542012-05-16 07:13:17 -04002769 do {
2770 cur_len = min_t(const size_t, len - total_read, cifs_sb->rsize);
2771 npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002772
Jeff Layton1c892542012-05-16 07:13:17 -04002773 /* allocate a readdata struct */
2774 rdata = cifs_readdata_alloc(npages,
2775 cifs_uncached_readv_complete);
2776 if (!rdata) {
2777 rc = -ENOMEM;
2778 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002779 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002780
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002781 rc = cifs_read_allocate_pages(rdata, npages);
Jeff Layton1c892542012-05-16 07:13:17 -04002782 if (rc)
2783 goto error;
2784
2785 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002786 rdata->nr_pages = npages;
Jeff Layton1c892542012-05-16 07:13:17 -04002787 rdata->offset = offset;
2788 rdata->bytes = cur_len;
2789 rdata->pid = pid;
Jeff Layton8321fec2012-09-19 06:22:32 -07002790 rdata->pagesz = PAGE_SIZE;
2791 rdata->read_into_pages = cifs_uncached_read_into_pages;
Jeff Layton1c892542012-05-16 07:13:17 -04002792
2793 rc = cifs_retry_async_readv(rdata);
2794error:
2795 if (rc) {
2796 kref_put(&rdata->refcount,
2797 cifs_uncached_readdata_release);
2798 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002799 }
Jeff Layton1c892542012-05-16 07:13:17 -04002800
2801 list_add_tail(&rdata->list, &rdata_list);
2802 offset += cur_len;
2803 len -= cur_len;
2804 } while (len > 0);
2805
2806 /* if at least one read request send succeeded, then reset rc */
2807 if (!list_empty(&rdata_list))
2808 rc = 0;
2809
2810 /* the loop below should proceed in the order of increasing offsets */
2811restart_loop:
2812 list_for_each_entry_safe(rdata, tmp, &rdata_list, list) {
2813 if (!rc) {
2814 ssize_t copied;
2815
2816 /* FIXME: freezable sleep too? */
2817 rc = wait_for_completion_killable(&rdata->done);
2818 if (rc)
2819 rc = -EINTR;
2820 else if (rdata->result)
2821 rc = rdata->result;
2822 else {
2823 rc = cifs_readdata_to_iov(rdata, iov,
2824 nr_segs, *poffset,
2825 &copied);
2826 total_read += copied;
2827 }
2828
2829 /* resend call if it's a retryable error */
2830 if (rc == -EAGAIN) {
2831 rc = cifs_retry_async_readv(rdata);
2832 goto restart_loop;
2833 }
2834 }
2835 list_del_init(&rdata->list);
2836 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002837 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002838
Jeff Layton1c892542012-05-16 07:13:17 -04002839 cifs_stats_bytes_read(tcon, total_read);
2840 *poffset += total_read;
2841
Pavel Shilovsky09a47072012-09-18 16:20:29 -07002842 /* mask nodata case */
2843 if (rc == -ENODATA)
2844 rc = 0;
2845
Jeff Layton1c892542012-05-16 07:13:17 -04002846 return total_read ? total_read : rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002847}
2848
Pavel Shilovsky0b81c1c2011-03-10 10:11:05 +03002849ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov,
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002850 unsigned long nr_segs, loff_t pos)
2851{
2852 ssize_t read;
2853
2854 read = cifs_iovec_read(iocb->ki_filp, iov, nr_segs, &pos);
2855 if (read > 0)
2856 iocb->ki_pos = pos;
2857
2858 return read;
2859}
2860
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002861ssize_t
2862cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov,
2863 unsigned long nr_segs, loff_t pos)
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002864{
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002865 struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
2866 struct cifsInodeInfo *cinode = CIFS_I(inode);
2867 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2868 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2869 iocb->ki_filp->private_data;
2870 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2871 int rc = -EACCES;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002872
2873 /*
2874 * In strict cache mode we need to read from the server all the time
2875 * if we don't have level II oplock because the server can delay mtime
2876 * change - so we can't make a decision about inode invalidating.
2877 * And we can also fail with pagereading if there are mandatory locks
2878 * on pages affected by this read but not on the region from pos to
2879 * pos+len-1.
2880 */
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002881 if (!cinode->clientCanCacheRead)
2882 return cifs_user_readv(iocb, iov, nr_segs, pos);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002883
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002884 if (cap_unix(tcon->ses) &&
2885 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2886 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2887 return generic_file_aio_read(iocb, iov, nr_segs, pos);
2888
2889 /*
2890 * We need to hold the sem to be sure nobody modifies lock list
2891 * with a brlock that prevents reading.
2892 */
2893 down_read(&cinode->lock_sem);
2894 if (!cifs_find_lock_conflict(cfile, pos, iov_length(iov, nr_segs),
2895 tcon->ses->server->vals->shared_lock_type,
2896 NULL, true))
2897 rc = generic_file_aio_read(iocb, iov, nr_segs, pos);
2898 up_read(&cinode->lock_sem);
2899 return rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002900}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002901
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002902static ssize_t
2903cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002904{
2905 int rc = -EACCES;
2906 unsigned int bytes_read = 0;
2907 unsigned int total_read;
2908 unsigned int current_read_size;
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002909 unsigned int rsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002910 struct cifs_sb_info *cifs_sb;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002911 struct cifs_tcon *tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002912 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002913 unsigned int xid;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002914 char *cur_offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002915 struct cifsFileInfo *open_file;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002916 struct cifs_io_parms io_parms;
Steve Frenchec637e32005-12-12 20:53:18 -08002917 int buf_type = CIFS_NO_BUFFER;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002918 __u32 pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002919
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002920 xid = get_xid();
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002921 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002922
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002923 /* FIXME: set up handlers for larger reads and/or convert to async */
2924 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
2925
Linus Torvalds1da177e2005-04-16 15:20:36 -07002926 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05302927 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002928 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05302929 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002930 }
Joe Perchesc21dfb62010-07-12 13:50:14 -07002931 open_file = file->private_data;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002932 tcon = tlink_tcon(open_file->tlink);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002933 server = tcon->ses->server;
2934
2935 if (!server->ops->sync_read) {
2936 free_xid(xid);
2937 return -ENOSYS;
2938 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002939
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002940 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2941 pid = open_file->pid;
2942 else
2943 pid = current->tgid;
2944
Linus Torvalds1da177e2005-04-16 15:20:36 -07002945 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesb6b38f72010-04-21 03:50:45 +00002946 cFYI(1, "attempting read on write only file instance");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002947
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002948 for (total_read = 0, cur_offset = read_data; read_size > total_read;
2949 total_read += bytes_read, cur_offset += bytes_read) {
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002950 current_read_size = min_t(uint, read_size - total_read, rsize);
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002951 /*
2952 * For windows me and 9x we do not want to request more than it
2953 * negotiated since it will refuse the read then.
2954 */
2955 if ((tcon->ses) && !(tcon->ses->capabilities &
2956 tcon->ses->server->vals->cap_large_files)) {
Dan Carpenter7748dd62011-10-18 12:41:35 +03002957 current_read_size = min_t(uint, current_read_size,
Jeff Laytonc974bef2011-10-11 06:41:32 -04002958 CIFSMaxBufSize);
Steve Frenchf9f5c8172005-09-15 23:06:38 -07002959 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002960 rc = -EAGAIN;
2961 while (rc == -EAGAIN) {
Steve Frenchcdff08e2010-10-21 22:46:14 +00002962 if (open_file->invalidHandle) {
Jeff Layton15886172010-10-15 15:33:59 -04002963 rc = cifs_reopen_file(open_file, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002964 if (rc != 0)
2965 break;
2966 }
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002967 io_parms.pid = pid;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002968 io_parms.tcon = tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002969 io_parms.offset = *offset;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002970 io_parms.length = current_read_size;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002971 rc = server->ops->sync_read(xid, open_file, &io_parms,
2972 &bytes_read, &cur_offset,
2973 &buf_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002974 }
2975 if (rc || (bytes_read == 0)) {
2976 if (total_read) {
2977 break;
2978 } else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002979 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002980 return rc;
2981 }
2982 } else {
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002983 cifs_stats_bytes_read(tcon, total_read);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002984 *offset += bytes_read;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002985 }
2986 }
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002987 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002988 return total_read;
2989}
2990
Jeff Laytonca83ce32011-04-12 09:13:44 -04002991/*
2992 * If the page is mmap'ed into a process' page tables, then we need to make
2993 * sure that it doesn't change while being written back.
2994 */
2995static int
2996cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
2997{
2998 struct page *page = vmf->page;
2999
3000 lock_page(page);
3001 return VM_FAULT_LOCKED;
3002}
3003
3004static struct vm_operations_struct cifs_file_vm_ops = {
3005 .fault = filemap_fault,
3006 .page_mkwrite = cifs_page_mkwrite,
3007};
3008
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003009int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
3010{
3011 int rc, xid;
3012 struct inode *inode = file->f_path.dentry->d_inode;
3013
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003014 xid = get_xid();
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003015
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04003016 if (!CIFS_I(inode)->clientCanCacheRead) {
3017 rc = cifs_invalidate_mapping(inode);
3018 if (rc)
3019 return rc;
3020 }
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003021
3022 rc = generic_file_mmap(file, vma);
Jeff Laytonca83ce32011-04-12 09:13:44 -04003023 if (rc == 0)
3024 vma->vm_ops = &cifs_file_vm_ops;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003025 free_xid(xid);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003026 return rc;
3027}
3028
Linus Torvalds1da177e2005-04-16 15:20:36 -07003029int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
3030{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003031 int rc, xid;
3032
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003033 xid = get_xid();
Jeff Laytonabab0952010-02-12 07:44:18 -05003034 rc = cifs_revalidate_file(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003035 if (rc) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00003036 cFYI(1, "Validation prior to mmap failed, error=%d", rc);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003037 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003038 return rc;
3039 }
3040 rc = generic_file_mmap(file, vma);
Jeff Laytonca83ce32011-04-12 09:13:44 -04003041 if (rc == 0)
3042 vma->vm_ops = &cifs_file_vm_ops;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003043 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003044 return rc;
3045}
3046
Jeff Layton0471ca32012-05-16 07:13:16 -04003047static void
3048cifs_readv_complete(struct work_struct *work)
3049{
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003050 unsigned int i;
Jeff Layton0471ca32012-05-16 07:13:16 -04003051 struct cifs_readdata *rdata = container_of(work,
3052 struct cifs_readdata, work);
Jeff Layton0471ca32012-05-16 07:13:16 -04003053
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003054 for (i = 0; i < rdata->nr_pages; i++) {
3055 struct page *page = rdata->pages[i];
3056
Jeff Layton0471ca32012-05-16 07:13:16 -04003057 lru_cache_add_file(page);
3058
3059 if (rdata->result == 0) {
Jeff Layton0471ca32012-05-16 07:13:16 -04003060 flush_dcache_page(page);
3061 SetPageUptodate(page);
3062 }
3063
3064 unlock_page(page);
3065
3066 if (rdata->result == 0)
3067 cifs_readpage_to_fscache(rdata->mapping->host, page);
3068
3069 page_cache_release(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003070 rdata->pages[i] = NULL;
Jeff Layton0471ca32012-05-16 07:13:16 -04003071 }
Jeff Layton6993f742012-05-16 07:13:17 -04003072 kref_put(&rdata->refcount, cifs_readdata_release);
Jeff Layton0471ca32012-05-16 07:13:16 -04003073}
3074
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003075static int
Jeff Layton8321fec2012-09-19 06:22:32 -07003076cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
3077 struct cifs_readdata *rdata, unsigned int len)
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003078{
Jeff Layton8321fec2012-09-19 06:22:32 -07003079 int total_read = 0, result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003080 unsigned int i;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003081 u64 eof;
3082 pgoff_t eof_index;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003083 unsigned int nr_pages = rdata->nr_pages;
Jeff Layton8321fec2012-09-19 06:22:32 -07003084 struct kvec iov;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003085
3086 /* determine the eof that the server (probably) has */
3087 eof = CIFS_I(rdata->mapping->host)->server_eof;
3088 eof_index = eof ? (eof - 1) >> PAGE_CACHE_SHIFT : 0;
3089 cFYI(1, "eof=%llu eof_index=%lu", eof, eof_index);
3090
Jeff Layton8321fec2012-09-19 06:22:32 -07003091 rdata->tailsz = PAGE_CACHE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003092 for (i = 0; i < nr_pages; i++) {
3093 struct page *page = rdata->pages[i];
3094
Jeff Layton8321fec2012-09-19 06:22:32 -07003095 if (len >= PAGE_CACHE_SIZE) {
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003096 /* enough data to fill the page */
Jeff Layton8321fec2012-09-19 06:22:32 -07003097 iov.iov_base = kmap(page);
3098 iov.iov_len = PAGE_CACHE_SIZE;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003099 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
Jeff Layton8321fec2012-09-19 06:22:32 -07003100 i, page->index, iov.iov_base, iov.iov_len);
3101 len -= PAGE_CACHE_SIZE;
3102 } else if (len > 0) {
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003103 /* enough for partial page, fill and zero the rest */
Jeff Layton8321fec2012-09-19 06:22:32 -07003104 iov.iov_base = kmap(page);
3105 iov.iov_len = len;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003106 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
Jeff Layton8321fec2012-09-19 06:22:32 -07003107 i, page->index, iov.iov_base, iov.iov_len);
3108 memset(iov.iov_base + len,
3109 '\0', PAGE_CACHE_SIZE - len);
3110 rdata->tailsz = len;
3111 len = 0;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003112 } else if (page->index > eof_index) {
3113 /*
3114 * The VFS will not try to do readahead past the
3115 * i_size, but it's possible that we have outstanding
3116 * writes with gaps in the middle and the i_size hasn't
3117 * caught up yet. Populate those with zeroed out pages
3118 * to prevent the VFS from repeatedly attempting to
3119 * fill them until the writes are flushed.
3120 */
3121 zero_user(page, 0, PAGE_CACHE_SIZE);
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003122 lru_cache_add_file(page);
3123 flush_dcache_page(page);
3124 SetPageUptodate(page);
3125 unlock_page(page);
3126 page_cache_release(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003127 rdata->pages[i] = NULL;
3128 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07003129 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003130 } else {
3131 /* no need to hold page hostage */
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003132 lru_cache_add_file(page);
3133 unlock_page(page);
3134 page_cache_release(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003135 rdata->pages[i] = NULL;
3136 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07003137 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003138 }
Jeff Layton8321fec2012-09-19 06:22:32 -07003139
3140 result = cifs_readv_from_socket(server, &iov, 1, iov.iov_len);
3141 kunmap(page);
3142 if (result < 0)
3143 break;
3144
3145 total_read += result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003146 }
3147
Jeff Layton8321fec2012-09-19 06:22:32 -07003148 return total_read > 0 ? total_read : result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003149}
3150
Linus Torvalds1da177e2005-04-16 15:20:36 -07003151static int cifs_readpages(struct file *file, struct address_space *mapping,
3152 struct list_head *page_list, unsigned num_pages)
3153{
Jeff Layton690c5e32011-10-19 15:30:16 -04003154 int rc;
3155 struct list_head tmplist;
3156 struct cifsFileInfo *open_file = file->private_data;
3157 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
3158 unsigned int rsize = cifs_sb->rsize;
3159 pid_t pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003160
Jeff Layton690c5e32011-10-19 15:30:16 -04003161 /*
3162 * Give up immediately if rsize is too small to read an entire page.
3163 * The VFS will fall back to readpage. We should never reach this
3164 * point however since we set ra_pages to 0 when the rsize is smaller
3165 * than a cache page.
3166 */
3167 if (unlikely(rsize < PAGE_CACHE_SIZE))
3168 return 0;
Steve Frenchbfa0d752005-08-31 21:50:37 -07003169
Suresh Jayaraman56698232010-07-05 18:13:25 +05303170 /*
3171 * Reads as many pages as possible from fscache. Returns -ENOBUFS
3172 * immediately if the cookie is negative
3173 */
3174 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
3175 &num_pages);
3176 if (rc == 0)
Jeff Layton690c5e32011-10-19 15:30:16 -04003177 return rc;
Suresh Jayaraman56698232010-07-05 18:13:25 +05303178
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003179 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3180 pid = open_file->pid;
3181 else
3182 pid = current->tgid;
3183
Jeff Layton690c5e32011-10-19 15:30:16 -04003184 rc = 0;
3185 INIT_LIST_HEAD(&tmplist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003186
Jeff Layton690c5e32011-10-19 15:30:16 -04003187 cFYI(1, "%s: file=%p mapping=%p num_pages=%u", __func__, file,
3188 mapping, num_pages);
3189
3190 /*
3191 * Start with the page at end of list and move it to private
3192 * list. Do the same with any following pages until we hit
3193 * the rsize limit, hit an index discontinuity, or run out of
3194 * pages. Issue the async read and then start the loop again
3195 * until the list is empty.
3196 *
3197 * Note that list order is important. The page_list is in
3198 * the order of declining indexes. When we put the pages in
3199 * the rdata->pages, then we want them in increasing order.
3200 */
3201 while (!list_empty(page_list)) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003202 unsigned int i;
Jeff Layton690c5e32011-10-19 15:30:16 -04003203 unsigned int bytes = PAGE_CACHE_SIZE;
3204 unsigned int expected_index;
3205 unsigned int nr_pages = 1;
3206 loff_t offset;
3207 struct page *page, *tpage;
3208 struct cifs_readdata *rdata;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003209
3210 page = list_entry(page_list->prev, struct page, lru);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003211
Jeff Layton690c5e32011-10-19 15:30:16 -04003212 /*
3213 * Lock the page and put it in the cache. Since no one else
3214 * should have access to this page, we're safe to simply set
3215 * PG_locked without checking it first.
3216 */
3217 __set_page_locked(page);
3218 rc = add_to_page_cache_locked(page, mapping,
3219 page->index, GFP_KERNEL);
3220
3221 /* give up if we can't stick it in the cache */
3222 if (rc) {
3223 __clear_page_locked(page);
3224 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003225 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003226
Jeff Layton690c5e32011-10-19 15:30:16 -04003227 /* move first page to the tmplist */
3228 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
3229 list_move_tail(&page->lru, &tmplist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003230
Jeff Layton690c5e32011-10-19 15:30:16 -04003231 /* now try and add more pages onto the request */
3232 expected_index = page->index + 1;
3233 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
3234 /* discontinuity ? */
3235 if (page->index != expected_index)
3236 break;
3237
3238 /* would this page push the read over the rsize? */
3239 if (bytes + PAGE_CACHE_SIZE > rsize)
3240 break;
3241
3242 __set_page_locked(page);
3243 if (add_to_page_cache_locked(page, mapping,
3244 page->index, GFP_KERNEL)) {
3245 __clear_page_locked(page);
3246 break;
3247 }
3248 list_move_tail(&page->lru, &tmplist);
3249 bytes += PAGE_CACHE_SIZE;
3250 expected_index++;
3251 nr_pages++;
3252 }
3253
Jeff Layton0471ca32012-05-16 07:13:16 -04003254 rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
Jeff Layton690c5e32011-10-19 15:30:16 -04003255 if (!rdata) {
3256 /* best to give up if we're out of mem */
3257 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3258 list_del(&page->lru);
3259 lru_cache_add_file(page);
3260 unlock_page(page);
3261 page_cache_release(page);
3262 }
3263 rc = -ENOMEM;
3264 break;
3265 }
3266
Jeff Layton6993f742012-05-16 07:13:17 -04003267 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Layton690c5e32011-10-19 15:30:16 -04003268 rdata->mapping = mapping;
3269 rdata->offset = offset;
3270 rdata->bytes = bytes;
3271 rdata->pid = pid;
Jeff Layton8321fec2012-09-19 06:22:32 -07003272 rdata->pagesz = PAGE_CACHE_SIZE;
3273 rdata->read_into_pages = cifs_readpages_read_into_pages;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003274
3275 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3276 list_del(&page->lru);
3277 rdata->pages[rdata->nr_pages++] = page;
3278 }
Jeff Layton690c5e32011-10-19 15:30:16 -04003279
Jeff Layton2a1bb132012-05-16 07:13:17 -04003280 rc = cifs_retry_async_readv(rdata);
Jeff Layton690c5e32011-10-19 15:30:16 -04003281 if (rc != 0) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003282 for (i = 0; i < rdata->nr_pages; i++) {
3283 page = rdata->pages[i];
Jeff Layton690c5e32011-10-19 15:30:16 -04003284 lru_cache_add_file(page);
3285 unlock_page(page);
3286 page_cache_release(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003287 }
Jeff Layton6993f742012-05-16 07:13:17 -04003288 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003289 break;
3290 }
Jeff Layton6993f742012-05-16 07:13:17 -04003291
3292 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003293 }
3294
Linus Torvalds1da177e2005-04-16 15:20:36 -07003295 return rc;
3296}
3297
3298static int cifs_readpage_worker(struct file *file, struct page *page,
3299 loff_t *poffset)
3300{
3301 char *read_data;
3302 int rc;
3303
Suresh Jayaraman56698232010-07-05 18:13:25 +05303304 /* Is the page cached? */
3305 rc = cifs_readpage_from_fscache(file->f_path.dentry->d_inode, page);
3306 if (rc == 0)
3307 goto read_complete;
3308
Linus Torvalds1da177e2005-04-16 15:20:36 -07003309 page_cache_get(page);
3310 read_data = kmap(page);
3311 /* for reads over a certain size could initiate async read ahead */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003312
Linus Torvalds1da177e2005-04-16 15:20:36 -07003313 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003314
Linus Torvalds1da177e2005-04-16 15:20:36 -07003315 if (rc < 0)
3316 goto io_error;
3317 else
Joe Perchesb6b38f72010-04-21 03:50:45 +00003318 cFYI(1, "Bytes read %d", rc);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003319
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08003320 file->f_path.dentry->d_inode->i_atime =
3321 current_fs_time(file->f_path.dentry->d_inode->i_sb);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003322
Linus Torvalds1da177e2005-04-16 15:20:36 -07003323 if (PAGE_CACHE_SIZE > rc)
3324 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
3325
3326 flush_dcache_page(page);
3327 SetPageUptodate(page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05303328
3329 /* send this page to the cache */
3330 cifs_readpage_to_fscache(file->f_path.dentry->d_inode, page);
3331
Linus Torvalds1da177e2005-04-16 15:20:36 -07003332 rc = 0;
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003333
Linus Torvalds1da177e2005-04-16 15:20:36 -07003334io_error:
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003335 kunmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003336 page_cache_release(page);
Suresh Jayaraman56698232010-07-05 18:13:25 +05303337
3338read_complete:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003339 return rc;
3340}
3341
3342static int cifs_readpage(struct file *file, struct page *page)
3343{
3344 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
3345 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003346 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003347
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003348 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003349
3350 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303351 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003352 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303353 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003354 }
3355
Jeff Laytonac3aa2f2012-07-23 13:14:28 -04003356 cFYI(1, "readpage %p at offset %d 0x%x",
Joe Perchesb6b38f72010-04-21 03:50:45 +00003357 page, (int)offset, (int)offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003358
3359 rc = cifs_readpage_worker(file, page, &offset);
3360
3361 unlock_page(page);
3362
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003363 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003364 return rc;
3365}
3366
Steve Frencha403a0a2007-07-26 15:54:16 +00003367static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3368{
3369 struct cifsFileInfo *open_file;
3370
Jeff Layton44772882010-10-15 15:34:03 -04003371 spin_lock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003372 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton2e396b82010-10-15 15:34:01 -04003373 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Jeff Layton44772882010-10-15 15:34:03 -04003374 spin_unlock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003375 return 1;
3376 }
3377 }
Jeff Layton44772882010-10-15 15:34:03 -04003378 spin_unlock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003379 return 0;
3380}
3381
Linus Torvalds1da177e2005-04-16 15:20:36 -07003382/* We do not want to update the file size from server for inodes
3383 open for write - to avoid races with writepage extending
3384 the file - in the future we could consider allowing
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003385 refreshing the inode only on increases in the file size
Linus Torvalds1da177e2005-04-16 15:20:36 -07003386 but this is tricky to do without racing with writebehind
3387 page caching in the current Linux kernel design */
Steve French4b18f2a2008-04-29 00:06:05 +00003388bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003389{
Steve Frencha403a0a2007-07-26 15:54:16 +00003390 if (!cifsInode)
Steve French4b18f2a2008-04-29 00:06:05 +00003391 return true;
Steve French23e7dd72005-10-20 13:44:56 -07003392
Steve Frencha403a0a2007-07-26 15:54:16 +00003393 if (is_inode_writable(cifsInode)) {
3394 /* This inode is open for write at least once */
Steve Frenchc32a0b62006-01-12 14:41:28 -08003395 struct cifs_sb_info *cifs_sb;
3396
Steve Frenchc32a0b62006-01-12 14:41:28 -08003397 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
Steve Frenchad7a2922008-02-07 23:25:02 +00003398 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003399 /* since no page cache to corrupt on directio
Steve Frenchc32a0b62006-01-12 14:41:28 -08003400 we can change size safely */
Steve French4b18f2a2008-04-29 00:06:05 +00003401 return true;
Steve Frenchc32a0b62006-01-12 14:41:28 -08003402 }
3403
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003404 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
Steve French4b18f2a2008-04-29 00:06:05 +00003405 return true;
Steve French7ba52632007-02-08 18:14:13 +00003406
Steve French4b18f2a2008-04-29 00:06:05 +00003407 return false;
Steve French23e7dd72005-10-20 13:44:56 -07003408 } else
Steve French4b18f2a2008-04-29 00:06:05 +00003409 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003410}
3411
Nick Piggind9414772008-09-24 11:32:59 -04003412static int cifs_write_begin(struct file *file, struct address_space *mapping,
3413 loff_t pos, unsigned len, unsigned flags,
3414 struct page **pagep, void **fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003415{
Nick Piggind9414772008-09-24 11:32:59 -04003416 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
3417 loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003418 loff_t page_start = pos & PAGE_MASK;
3419 loff_t i_size;
3420 struct page *page;
3421 int rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003422
Joe Perchesb6b38f72010-04-21 03:50:45 +00003423 cFYI(1, "write_begin from %lld len %d", (long long)pos, len);
Nick Piggind9414772008-09-24 11:32:59 -04003424
Nick Piggin54566b22009-01-04 12:00:53 -08003425 page = grab_cache_page_write_begin(mapping, index, flags);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003426 if (!page) {
3427 rc = -ENOMEM;
3428 goto out;
3429 }
Nick Piggind9414772008-09-24 11:32:59 -04003430
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003431 if (PageUptodate(page))
3432 goto out;
Steve French8a236262007-03-06 00:31:00 +00003433
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003434 /*
3435 * If we write a full page it will be up to date, no need to read from
3436 * the server. If the write is short, we'll end up doing a sync write
3437 * instead.
3438 */
3439 if (len == PAGE_CACHE_SIZE)
3440 goto out;
3441
3442 /*
3443 * optimize away the read when we have an oplock, and we're not
3444 * expecting to use any of the data we'd be reading in. That
3445 * is, when the page lies beyond the EOF, or straddles the EOF
3446 * and the write will cover all of the existing data.
3447 */
3448 if (CIFS_I(mapping->host)->clientCanCacheRead) {
3449 i_size = i_size_read(mapping->host);
3450 if (page_start >= i_size ||
3451 (offset == 0 && (pos + len) >= i_size)) {
3452 zero_user_segments(page, 0, offset,
3453 offset + len,
3454 PAGE_CACHE_SIZE);
3455 /*
3456 * PageChecked means that the parts of the page
3457 * to which we're not writing are considered up
3458 * to date. Once the data is copied to the
3459 * page, it can be set uptodate.
3460 */
3461 SetPageChecked(page);
3462 goto out;
3463 }
3464 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003465
Nick Piggind9414772008-09-24 11:32:59 -04003466 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003467 /*
3468 * might as well read a page, it is fast enough. If we get
3469 * an error, we don't need to return it. cifs_write_end will
3470 * do a sync write instead since PG_uptodate isn't set.
3471 */
3472 cifs_readpage_worker(file, page, &page_start);
Steve French8a236262007-03-06 00:31:00 +00003473 } else {
3474 /* we could try using another file handle if there is one -
3475 but how would we lock it to prevent close of that handle
3476 racing with this read? In any case
Nick Piggind9414772008-09-24 11:32:59 -04003477 this will be written out by write_end so is fine */
Steve French8a236262007-03-06 00:31:00 +00003478 }
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003479out:
3480 *pagep = page;
3481 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003482}
3483
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303484static int cifs_release_page(struct page *page, gfp_t gfp)
3485{
3486 if (PagePrivate(page))
3487 return 0;
3488
3489 return cifs_fscache_release_page(page, gfp);
3490}
3491
3492static void cifs_invalidate_page(struct page *page, unsigned long offset)
3493{
3494 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
3495
3496 if (offset == 0)
3497 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
3498}
3499
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003500static int cifs_launder_page(struct page *page)
3501{
3502 int rc = 0;
3503 loff_t range_start = page_offset(page);
3504 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
3505 struct writeback_control wbc = {
3506 .sync_mode = WB_SYNC_ALL,
3507 .nr_to_write = 0,
3508 .range_start = range_start,
3509 .range_end = range_end,
3510 };
3511
3512 cFYI(1, "Launder page: %p", page);
3513
3514 if (clear_page_dirty_for_io(page))
3515 rc = cifs_writepage_locked(page, &wbc);
3516
3517 cifs_fscache_invalidate_page(page, page->mapping->host);
3518 return rc;
3519}
3520
Tejun Heo9b646972010-07-20 22:09:02 +02003521void cifs_oplock_break(struct work_struct *work)
Jeff Layton3bc303c2009-09-21 06:47:50 -04003522{
3523 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3524 oplock_break);
Jeff Laytona5e18bc2010-10-11 15:07:18 -04003525 struct inode *inode = cfile->dentry->d_inode;
Jeff Layton3bc303c2009-09-21 06:47:50 -04003526 struct cifsInodeInfo *cinode = CIFS_I(inode);
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07003527 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Jeff Laytoneb4b7562010-10-22 14:52:29 -04003528 int rc = 0;
Jeff Layton3bc303c2009-09-21 06:47:50 -04003529
3530 if (inode && S_ISREG(inode->i_mode)) {
Steve Frenchd54ff732010-04-27 04:38:15 +00003531 if (cinode->clientCanCacheRead)
Al Viro8737c932009-12-24 06:47:55 -05003532 break_lease(inode, O_RDONLY);
Steve Frenchd54ff732010-04-27 04:38:15 +00003533 else
Al Viro8737c932009-12-24 06:47:55 -05003534 break_lease(inode, O_WRONLY);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003535 rc = filemap_fdatawrite(inode->i_mapping);
3536 if (cinode->clientCanCacheRead == 0) {
Jeff Laytoneb4b7562010-10-22 14:52:29 -04003537 rc = filemap_fdatawait(inode->i_mapping);
3538 mapping_set_error(inode->i_mapping, rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003539 invalidate_remote_inode(inode);
3540 }
Joe Perchesb6b38f72010-04-21 03:50:45 +00003541 cFYI(1, "Oplock flush inode %p rc %d", inode, rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003542 }
3543
Pavel Shilovsky85160e02011-10-22 15:33:29 +04003544 rc = cifs_push_locks(cfile);
3545 if (rc)
3546 cERROR(1, "Push locks rc = %d", rc);
3547
Jeff Layton3bc303c2009-09-21 06:47:50 -04003548 /*
3549 * releasing stale oplock after recent reconnect of smb session using
3550 * a now incorrect file handle is not a data integrity issue but do
3551 * not bother sending an oplock release if session to server still is
3552 * disconnected since oplock already released by the server
3553 */
Steve Frenchcdff08e2010-10-21 22:46:14 +00003554 if (!cfile->oplock_break_cancelled) {
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07003555 rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
3556 cinode);
Joe Perchesb6b38f72010-04-21 03:50:45 +00003557 cFYI(1, "Oplock release rc = %d", rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003558 }
Jeff Layton3bc303c2009-09-21 06:47:50 -04003559}
3560
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07003561const struct address_space_operations cifs_addr_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003562 .readpage = cifs_readpage,
3563 .readpages = cifs_readpages,
3564 .writepage = cifs_writepage,
Steve French37c0eb42005-10-05 14:50:29 -07003565 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04003566 .write_begin = cifs_write_begin,
3567 .write_end = cifs_write_end,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003568 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303569 .releasepage = cifs_release_page,
3570 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003571 .launder_page = cifs_launder_page,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003572};
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003573
3574/*
3575 * cifs_readpages requires the server to support a buffer large enough to
3576 * contain the header plus one complete page of data. Otherwise, we need
3577 * to leave cifs_readpages out of the address space operations.
3578 */
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07003579const struct address_space_operations cifs_addr_ops_smallbuf = {
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003580 .readpage = cifs_readpage,
3581 .writepage = cifs_writepage,
3582 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04003583 .write_begin = cifs_write_begin,
3584 .write_end = cifs_write_end,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003585 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303586 .releasepage = cifs_release_page,
3587 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003588 .launder_page = cifs_launder_page,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003589};