blob: 07e9d41cade7cb3b9d20ecd9a88fa6ef37edf2ca [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
Steve Frenchfb8c4b12007-07-10 01:16:18 +00005 *
Steve Frenchf19159d2010-04-21 04:12:10 +00006 * Copyright (C) International Business Machines Corp., 2002,2010
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Author(s): Steve French (sfrench@us.ibm.com)
Jeremy Allison7ee1af72006-08-02 21:56:33 +00008 * Jeremy Allison (jra@samba.org)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
Steve French37c0eb42005-10-05 14:50:29 -070025#include <linux/backing-dev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
Steve French37c0eb42005-10-05 14:50:29 -070030#include <linux/writeback.h>
Andrew Morton6f88cc22006-12-10 02:19:44 -080031#include <linux/task_io_accounting_ops.h>
Steve French23e7dd72005-10-20 13:44:56 -070032#include <linux/delay.h>
Jeff Layton3bc303c2009-09-21 06:47:50 -040033#include <linux/mount.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Jeff Layton690c5e32011-10-19 15:30:16 -040035#include <linux/swap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <asm/div64.h>
37#include "cifsfs.h"
38#include "cifspdu.h"
39#include "cifsglob.h"
40#include "cifsproto.h"
41#include "cifs_unicode.h"
42#include "cifs_debug.h"
43#include "cifs_fs_sb.h"
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +053044#include "fscache.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Linus Torvalds1da177e2005-04-16 15:20:36 -070046static inline int cifs_convert_flags(unsigned int flags)
47{
48 if ((flags & O_ACCMODE) == O_RDONLY)
49 return GENERIC_READ;
50 else if ((flags & O_ACCMODE) == O_WRONLY)
51 return GENERIC_WRITE;
52 else if ((flags & O_ACCMODE) == O_RDWR) {
53 /* GENERIC_ALL is too much permission to request
54 can cause unnecessary access denied on create */
55 /* return GENERIC_ALL; */
56 return (GENERIC_READ | GENERIC_WRITE);
57 }
58
Jeff Laytone10f7b52008-05-14 10:21:33 -070059 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
60 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
61 FILE_READ_DATA);
Steve French7fc8f4e2009-02-23 20:43:11 +000062}
Jeff Laytone10f7b52008-05-14 10:21:33 -070063
Jeff Layton608712f2010-10-15 15:33:56 -040064static u32 cifs_posix_convert_flags(unsigned int flags)
Steve French7fc8f4e2009-02-23 20:43:11 +000065{
Jeff Layton608712f2010-10-15 15:33:56 -040066 u32 posix_flags = 0;
Jeff Laytone10f7b52008-05-14 10:21:33 -070067
Steve French7fc8f4e2009-02-23 20:43:11 +000068 if ((flags & O_ACCMODE) == O_RDONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040069 posix_flags = SMB_O_RDONLY;
Steve French7fc8f4e2009-02-23 20:43:11 +000070 else if ((flags & O_ACCMODE) == O_WRONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040071 posix_flags = SMB_O_WRONLY;
72 else if ((flags & O_ACCMODE) == O_RDWR)
73 posix_flags = SMB_O_RDWR;
74
75 if (flags & O_CREAT)
76 posix_flags |= SMB_O_CREAT;
77 if (flags & O_EXCL)
78 posix_flags |= SMB_O_EXCL;
79 if (flags & O_TRUNC)
80 posix_flags |= SMB_O_TRUNC;
81 /* be safe and imply O_SYNC for O_DSYNC */
Christoph Hellwig6b2f3d12009-10-27 11:05:28 +010082 if (flags & O_DSYNC)
Jeff Layton608712f2010-10-15 15:33:56 -040083 posix_flags |= SMB_O_SYNC;
Steve French7fc8f4e2009-02-23 20:43:11 +000084 if (flags & O_DIRECTORY)
Jeff Layton608712f2010-10-15 15:33:56 -040085 posix_flags |= SMB_O_DIRECTORY;
Steve French7fc8f4e2009-02-23 20:43:11 +000086 if (flags & O_NOFOLLOW)
Jeff Layton608712f2010-10-15 15:33:56 -040087 posix_flags |= SMB_O_NOFOLLOW;
Steve French7fc8f4e2009-02-23 20:43:11 +000088 if (flags & O_DIRECT)
Jeff Layton608712f2010-10-15 15:33:56 -040089 posix_flags |= SMB_O_DIRECT;
Steve French7fc8f4e2009-02-23 20:43:11 +000090
91 return posix_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070092}
93
94static inline int cifs_get_disposition(unsigned int flags)
95{
96 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
97 return FILE_CREATE;
98 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
99 return FILE_OVERWRITE_IF;
100 else if ((flags & O_CREAT) == O_CREAT)
101 return FILE_OPEN_IF;
Steve French55aa2e02006-05-30 18:09:31 +0000102 else if ((flags & O_TRUNC) == O_TRUNC)
103 return FILE_OVERWRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104 else
105 return FILE_OPEN;
106}
107
Jeff Layton608712f2010-10-15 15:33:56 -0400108int cifs_posix_open(char *full_path, struct inode **pinode,
109 struct super_block *sb, int mode, unsigned int f_flags,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400110 __u32 *poplock, __u16 *pnetfid, unsigned int xid)
Jeff Layton608712f2010-10-15 15:33:56 -0400111{
112 int rc;
113 FILE_UNIX_BASIC_INFO *presp_data;
114 __u32 posix_flags = 0;
115 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
116 struct cifs_fattr fattr;
117 struct tcon_link *tlink;
Steve French96daf2b2011-05-27 04:34:02 +0000118 struct cifs_tcon *tcon;
Jeff Layton608712f2010-10-15 15:33:56 -0400119
120 cFYI(1, "posix open %s", full_path);
121
122 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
123 if (presp_data == NULL)
124 return -ENOMEM;
125
126 tlink = cifs_sb_tlink(cifs_sb);
127 if (IS_ERR(tlink)) {
128 rc = PTR_ERR(tlink);
129 goto posix_open_ret;
130 }
131
132 tcon = tlink_tcon(tlink);
133 mode &= ~current_umask();
134
135 posix_flags = cifs_posix_convert_flags(f_flags);
136 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
137 poplock, full_path, cifs_sb->local_nls,
138 cifs_sb->mnt_cifs_flags &
139 CIFS_MOUNT_MAP_SPECIAL_CHR);
140 cifs_put_tlink(tlink);
141
142 if (rc)
143 goto posix_open_ret;
144
145 if (presp_data->Type == cpu_to_le32(-1))
146 goto posix_open_ret; /* open ok, caller does qpathinfo */
147
148 if (!pinode)
149 goto posix_open_ret; /* caller does not need info */
150
151 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
152
153 /* get new inode and set it up */
154 if (*pinode == NULL) {
155 cifs_fill_uniqueid(sb, &fattr);
156 *pinode = cifs_iget(sb, &fattr);
157 if (!*pinode) {
158 rc = -ENOMEM;
159 goto posix_open_ret;
160 }
161 } else {
162 cifs_fattr_to_inode(*pinode, &fattr);
163 }
164
165posix_open_ret:
166 kfree(presp_data);
167 return rc;
168}
169
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300170static int
171cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
Steve French96daf2b2011-05-27 04:34:02 +0000172 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *poplock,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400173 __u16 *pnetfid, unsigned int xid)
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300174{
175 int rc;
176 int desiredAccess;
177 int disposition;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500178 int create_options = CREATE_NOT_DIR;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300179 FILE_ALL_INFO *buf;
180
181 desiredAccess = cifs_convert_flags(f_flags);
182
183/*********************************************************************
184 * open flag mapping table:
185 *
186 * POSIX Flag CIFS Disposition
187 * ---------- ----------------
188 * O_CREAT FILE_OPEN_IF
189 * O_CREAT | O_EXCL FILE_CREATE
190 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
191 * O_TRUNC FILE_OVERWRITE
192 * none of the above FILE_OPEN
193 *
194 * Note that there is not a direct match between disposition
195 * FILE_SUPERSEDE (ie create whether or not file exists although
196 * O_CREAT | O_TRUNC is similar but truncates the existing
197 * file rather than creating a new file as FILE_SUPERSEDE does
198 * (which uses the attributes / metadata passed in on open call)
199 *?
200 *? O_SYNC is a reasonable match to CIFS writethrough flag
201 *? and the read write flags match reasonably. O_LARGEFILE
202 *? is irrelevant because largefile support is always used
203 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
204 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
205 *********************************************************************/
206
207 disposition = cifs_get_disposition(f_flags);
208
209 /* BB pass O_SYNC flag through on file attributes .. BB */
210
211 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
212 if (!buf)
213 return -ENOMEM;
214
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500215 if (backup_cred(cifs_sb))
216 create_options |= CREATE_OPEN_BACKUP_INTENT;
217
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300218 if (tcon->ses->capabilities & CAP_NT_SMBS)
219 rc = CIFSSMBOpen(xid, tcon, full_path, disposition,
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500220 desiredAccess, create_options, pnetfid, poplock, buf,
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300221 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
222 & CIFS_MOUNT_MAP_SPECIAL_CHR);
223 else
224 rc = SMBLegacyOpen(xid, tcon, full_path, disposition,
225 desiredAccess, CREATE_NOT_DIR, pnetfid, poplock, buf,
226 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
227 & CIFS_MOUNT_MAP_SPECIAL_CHR);
228
229 if (rc)
230 goto out;
231
232 if (tcon->unix_ext)
233 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
234 xid);
235 else
236 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
237 xid, pnetfid);
238
239out:
240 kfree(buf);
241 return rc;
242}
243
Jeff Layton15ecb432010-10-15 15:34:02 -0400244struct cifsFileInfo *
245cifs_new_fileinfo(__u16 fileHandle, struct file *file,
246 struct tcon_link *tlink, __u32 oplock)
247{
248 struct dentry *dentry = file->f_path.dentry;
249 struct inode *inode = dentry->d_inode;
250 struct cifsInodeInfo *pCifsInode = CIFS_I(inode);
251 struct cifsFileInfo *pCifsFile;
252
253 pCifsFile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
254 if (pCifsFile == NULL)
255 return pCifsFile;
256
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400257 pCifsFile->count = 1;
Jeff Layton15ecb432010-10-15 15:34:02 -0400258 pCifsFile->netfid = fileHandle;
259 pCifsFile->pid = current->tgid;
260 pCifsFile->uid = current_fsuid();
261 pCifsFile->dentry = dget(dentry);
262 pCifsFile->f_flags = file->f_flags;
263 pCifsFile->invalidHandle = false;
Jeff Layton15ecb432010-10-15 15:34:02 -0400264 pCifsFile->tlink = cifs_get_tlink(tlink);
265 mutex_init(&pCifsFile->fh_mutex);
Jeff Layton15ecb432010-10-15 15:34:02 -0400266 INIT_WORK(&pCifsFile->oplock_break, cifs_oplock_break);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300267 INIT_LIST_HEAD(&pCifsFile->llist);
Jeff Layton15ecb432010-10-15 15:34:02 -0400268
Jeff Layton44772882010-10-15 15:34:03 -0400269 spin_lock(&cifs_file_list_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400270 list_add(&pCifsFile->tlist, &(tlink_tcon(tlink)->openFileList));
271 /* if readable file instance put first in list*/
272 if (file->f_mode & FMODE_READ)
273 list_add(&pCifsFile->flist, &pCifsInode->openFileList);
274 else
275 list_add_tail(&pCifsFile->flist, &pCifsInode->openFileList);
Jeff Layton44772882010-10-15 15:34:03 -0400276 spin_unlock(&cifs_file_list_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400277
Pavel Shilovskyc6723622010-11-03 10:58:57 +0300278 cifs_set_oplock_level(pCifsInode, oplock);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400279 pCifsInode->can_cache_brlcks = pCifsInode->clientCanCacheAll;
Jeff Layton15ecb432010-10-15 15:34:02 -0400280
281 file->private_data = pCifsFile;
282 return pCifsFile;
283}
284
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400285static void cifs_del_lock_waiters(struct cifsLockInfo *lock);
286
Steve Frenchcdff08e2010-10-21 22:46:14 +0000287/*
288 * Release a reference on the file private data. This may involve closing
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400289 * the filehandle out on the server. Must be called without holding
290 * cifs_file_list_lock.
Steve Frenchcdff08e2010-10-21 22:46:14 +0000291 */
Jeff Laytonb33879a2010-10-15 15:34:04 -0400292void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
293{
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300294 struct inode *inode = cifs_file->dentry->d_inode;
Steve French96daf2b2011-05-27 04:34:02 +0000295 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300296 struct cifsInodeInfo *cifsi = CIFS_I(inode);
Pavel Shilovsky4f8ba8a2010-11-21 22:36:12 +0300297 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000298 struct cifsLockInfo *li, *tmp;
299
300 spin_lock(&cifs_file_list_lock);
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400301 if (--cifs_file->count > 0) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000302 spin_unlock(&cifs_file_list_lock);
303 return;
Jeff Laytonb33879a2010-10-15 15:34:04 -0400304 }
Steve Frenchcdff08e2010-10-21 22:46:14 +0000305
306 /* remove it from the lists */
307 list_del(&cifs_file->flist);
308 list_del(&cifs_file->tlist);
309
310 if (list_empty(&cifsi->openFileList)) {
311 cFYI(1, "closing last open instance for inode %p",
312 cifs_file->dentry->d_inode);
Pavel Shilovsky4f8ba8a2010-11-21 22:36:12 +0300313
314 /* in strict cache mode we need invalidate mapping on the last
315 close because it may cause a error when we open this file
316 again and get at least level II oplock */
317 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
318 CIFS_I(inode)->invalid_mapping = true;
319
Pavel Shilovskyc6723622010-11-03 10:58:57 +0300320 cifs_set_oplock_level(cifsi, 0);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000321 }
322 spin_unlock(&cifs_file_list_lock);
323
Jeff Laytonad635942011-07-26 12:20:17 -0400324 cancel_work_sync(&cifs_file->oplock_break);
325
Steve Frenchcdff08e2010-10-21 22:46:14 +0000326 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400327 unsigned int xid;
328 int rc;
329 xid = get_xid();
Steve Frenchcdff08e2010-10-21 22:46:14 +0000330 rc = CIFSSMBClose(xid, tcon, cifs_file->netfid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400331 free_xid(xid);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000332 }
333
334 /* Delete any outstanding lock records. We'll lose them when the file
335 * is closed anyway.
336 */
Pavel Shilovskyd59dad22011-09-22 09:53:59 +0400337 mutex_lock(&cifsi->lock_mutex);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300338 list_for_each_entry_safe(li, tmp, &cifs_file->llist, llist) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000339 list_del(&li->llist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400340 cifs_del_lock_waiters(li);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000341 kfree(li);
342 }
Pavel Shilovskyd59dad22011-09-22 09:53:59 +0400343 mutex_unlock(&cifsi->lock_mutex);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000344
345 cifs_put_tlink(cifs_file->tlink);
346 dput(cifs_file->dentry);
347 kfree(cifs_file);
Jeff Laytonb33879a2010-10-15 15:34:04 -0400348}
349
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350int cifs_open(struct inode *inode, struct file *file)
351{
352 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400353 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400354 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +0000356 struct cifs_tcon *tcon;
Jeff Layton7ffec372010-09-29 19:51:11 -0400357 struct tcon_link *tlink;
Jeff Layton6ca9f3b2010-06-16 13:40:16 -0400358 struct cifsFileInfo *pCifsFile = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359 char *full_path = NULL;
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300360 bool posix_open_ok = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361 __u16 netfid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400363 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364
365 cifs_sb = CIFS_SB(inode->i_sb);
Jeff Layton7ffec372010-09-29 19:51:11 -0400366 tlink = cifs_sb_tlink(cifs_sb);
367 if (IS_ERR(tlink)) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400368 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400369 return PTR_ERR(tlink);
370 }
371 tcon = tlink_tcon(tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -0800373 full_path = build_path_from_dentry(file->f_path.dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374 if (full_path == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530375 rc = -ENOMEM;
Jeff Layton232341b2010-08-05 13:58:38 -0400376 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377 }
378
Joe Perchesb6b38f72010-04-21 03:50:45 +0000379 cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
380 inode, file->f_flags, full_path);
Steve French276a74a2009-03-03 18:00:34 +0000381
Pavel Shilovsky10b9b982012-03-20 12:55:09 +0300382 if (tcon->ses->server->oplocks)
Steve French276a74a2009-03-03 18:00:34 +0000383 oplock = REQ_OPLOCK;
384 else
385 oplock = 0;
386
Steve French64cc2c62009-03-04 19:54:08 +0000387 if (!tcon->broken_posix_open && tcon->unix_ext &&
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400388 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
389 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Steve French276a74a2009-03-03 18:00:34 +0000390 /* can not refresh inode info since size could be stale */
Jeff Layton2422f672010-06-16 13:40:16 -0400391 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
Steve Frenchfa588e02010-04-22 19:21:55 +0000392 cifs_sb->mnt_file_mode /* ignored */,
Jeff Layton608712f2010-10-15 15:33:56 -0400393 file->f_flags, &oplock, &netfid, xid);
Steve French276a74a2009-03-03 18:00:34 +0000394 if (rc == 0) {
Joe Perchesb6b38f72010-04-21 03:50:45 +0000395 cFYI(1, "posix open succeeded");
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300396 posix_open_ok = true;
Steve French64cc2c62009-03-04 19:54:08 +0000397 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
398 if (tcon->ses->serverNOS)
Joe Perchesb6b38f72010-04-21 03:50:45 +0000399 cERROR(1, "server %s of type %s returned"
Steve French64cc2c62009-03-04 19:54:08 +0000400 " unexpected error on SMB posix open"
401 ", disabling posix open support."
402 " Check if server update available.",
403 tcon->ses->serverName,
Joe Perchesb6b38f72010-04-21 03:50:45 +0000404 tcon->ses->serverNOS);
Steve French64cc2c62009-03-04 19:54:08 +0000405 tcon->broken_posix_open = true;
Steve French276a74a2009-03-03 18:00:34 +0000406 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
407 (rc != -EOPNOTSUPP)) /* path not found or net err */
408 goto out;
Steve French64cc2c62009-03-04 19:54:08 +0000409 /* else fallthrough to retry open the old way on network i/o
410 or DFS errors */
Steve French276a74a2009-03-03 18:00:34 +0000411 }
412
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300413 if (!posix_open_ok) {
414 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
415 file->f_flags, &oplock, &netfid, xid);
416 if (rc)
417 goto out;
418 }
Jeff Layton47c78b72010-06-16 13:40:17 -0400419
Jeff Laytonabfe1ee2010-10-15 15:33:58 -0400420 pCifsFile = cifs_new_fileinfo(netfid, file, tlink, oplock);
Jeff Layton6ca9f3b2010-06-16 13:40:16 -0400421 if (pCifsFile == NULL) {
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300422 CIFSSMBClose(xid, tcon, netfid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423 rc = -ENOMEM;
424 goto out;
425 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +0530427 cifs_fscache_set_inode_cookie(inode, file);
428
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300429 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430 /* time to set mode which we can not set earlier due to
431 problems creating new read-only files */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300432 struct cifs_unix_set_info_args args = {
433 .mode = inode->i_mode,
434 .uid = NO_CHANGE_64,
435 .gid = NO_CHANGE_64,
436 .ctime = NO_CHANGE_64,
437 .atime = NO_CHANGE_64,
438 .mtime = NO_CHANGE_64,
439 .device = 0,
440 };
Jeff Laytond44a9fe2011-01-07 11:30:29 -0500441 CIFSSMBUnixSetFileInfo(xid, tcon, &args, netfid,
442 pCifsFile->pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443 }
444
445out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400447 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400448 cifs_put_tlink(tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449 return rc;
450}
451
Adrian Bunk04187262006-06-30 18:23:04 +0200452/* Try to reacquire byte range locks that were released when session */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453/* to server was lost */
454static int cifs_relock_file(struct cifsFileInfo *cifsFile)
455{
456 int rc = 0;
457
458/* BB list all locks open on this file and relock */
459
460 return rc;
461}
462
Jeff Layton15886172010-10-15 15:33:59 -0400463static int cifs_reopen_file(struct cifsFileInfo *pCifsFile, bool can_flush)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464{
465 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400466 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400467 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +0000469 struct cifs_tcon *tcon;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470 struct cifsInodeInfo *pCifsInode;
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000471 struct inode *inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472 char *full_path = NULL;
473 int desiredAccess;
474 int disposition = FILE_OPEN;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500475 int create_options = CREATE_NOT_DIR;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476 __u16 netfid;
477
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400478 xid = get_xid();
Jeff Laytonf0a71eb2009-06-27 07:04:55 -0400479 mutex_lock(&pCifsFile->fh_mutex);
Steve French4b18f2a2008-04-29 00:06:05 +0000480 if (!pCifsFile->invalidHandle) {
Jeff Laytonf0a71eb2009-06-27 07:04:55 -0400481 mutex_unlock(&pCifsFile->fh_mutex);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530482 rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400483 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530484 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485 }
486
Jeff Layton15886172010-10-15 15:33:59 -0400487 inode = pCifsFile->dentry->d_inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488 cifs_sb = CIFS_SB(inode->i_sb);
Jeff Layton13cfb732010-09-29 19:51:11 -0400489 tcon = tlink_tcon(pCifsFile->tlink);
Steve French3a9f4622007-04-04 17:10:24 +0000490
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491/* can not grab rename sem here because various ops, including
492 those that already have the rename sem can end up causing writepage
493 to get called and if the server was down that means we end up here,
494 and we can never tell if the caller already has the rename_sem */
Jeff Layton15886172010-10-15 15:33:59 -0400495 full_path = build_path_from_dentry(pCifsFile->dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496 if (full_path == NULL) {
Steve French3a9f4622007-04-04 17:10:24 +0000497 rc = -ENOMEM;
Jeff Laytonf0a71eb2009-06-27 07:04:55 -0400498 mutex_unlock(&pCifsFile->fh_mutex);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400499 free_xid(xid);
Steve French3a9f4622007-04-04 17:10:24 +0000500 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501 }
502
Joe Perchesb6b38f72010-04-21 03:50:45 +0000503 cFYI(1, "inode = 0x%p file flags 0x%x for %s",
Jeff Layton15886172010-10-15 15:33:59 -0400504 inode, pCifsFile->f_flags, full_path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505
Pavel Shilovsky10b9b982012-03-20 12:55:09 +0300506 if (tcon->ses->server->oplocks)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507 oplock = REQ_OPLOCK;
508 else
Steve French4b18f2a2008-04-29 00:06:05 +0000509 oplock = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400511 if (tcon->unix_ext && cap_unix(tcon->ses) &&
Steve French7fc8f4e2009-02-23 20:43:11 +0000512 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400513 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Jeff Layton608712f2010-10-15 15:33:56 -0400514 /*
515 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
516 * original open. Must mask them off for a reopen.
517 */
Jeff Layton15886172010-10-15 15:33:59 -0400518 unsigned int oflags = pCifsFile->f_flags &
519 ~(O_CREAT | O_EXCL | O_TRUNC);
Jeff Layton608712f2010-10-15 15:33:56 -0400520
Jeff Layton2422f672010-06-16 13:40:16 -0400521 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
Steve Frenchfa588e02010-04-22 19:21:55 +0000522 cifs_sb->mnt_file_mode /* ignored */,
523 oflags, &oplock, &netfid, xid);
Steve French7fc8f4e2009-02-23 20:43:11 +0000524 if (rc == 0) {
Joe Perchesb6b38f72010-04-21 03:50:45 +0000525 cFYI(1, "posix reopen succeeded");
Steve French7fc8f4e2009-02-23 20:43:11 +0000526 goto reopen_success;
527 }
528 /* fallthrough to retry open the old way on errors, especially
529 in the reconnect path it is important to retry hard */
530 }
531
Jeff Layton15886172010-10-15 15:33:59 -0400532 desiredAccess = cifs_convert_flags(pCifsFile->f_flags);
Steve French7fc8f4e2009-02-23 20:43:11 +0000533
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500534 if (backup_cred(cifs_sb))
535 create_options |= CREATE_OPEN_BACKUP_INTENT;
536
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537 /* Can not refresh inode by passing in file_info buf to be returned
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000538 by SMBOpen and then calling get_inode_info with returned buf
539 since file might have write behind data that needs to be flushed
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540 and server version of file size can be stale. If we knew for sure
541 that inode was not dirty locally we could do this */
542
Steve French7fc8f4e2009-02-23 20:43:11 +0000543 rc = CIFSSMBOpen(xid, tcon, full_path, disposition, desiredAccess,
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500544 create_options, &netfid, &oplock, NULL,
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000545 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
Steve French737b7582005-04-28 22:41:06 -0700546 CIFS_MOUNT_MAP_SPECIAL_CHR);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547 if (rc) {
Jeff Laytonf0a71eb2009-06-27 07:04:55 -0400548 mutex_unlock(&pCifsFile->fh_mutex);
Joe Perchesb6b38f72010-04-21 03:50:45 +0000549 cFYI(1, "cifs_open returned 0x%x", rc);
550 cFYI(1, "oplock: %d", oplock);
Jeff Layton15886172010-10-15 15:33:59 -0400551 goto reopen_error_exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552 }
Jeff Layton15886172010-10-15 15:33:59 -0400553
554reopen_success:
555 pCifsFile->netfid = netfid;
556 pCifsFile->invalidHandle = false;
557 mutex_unlock(&pCifsFile->fh_mutex);
558 pCifsInode = CIFS_I(inode);
559
560 if (can_flush) {
561 rc = filemap_write_and_wait(inode->i_mapping);
Jeff Laytoneb4b7562010-10-22 14:52:29 -0400562 mapping_set_error(inode->i_mapping, rc);
Jeff Layton15886172010-10-15 15:33:59 -0400563
Jeff Layton15886172010-10-15 15:33:59 -0400564 if (tcon->unix_ext)
565 rc = cifs_get_inode_info_unix(&inode,
566 full_path, inode->i_sb, xid);
567 else
568 rc = cifs_get_inode_info(&inode,
569 full_path, NULL, inode->i_sb,
570 xid, NULL);
571 } /* else we are writing out data to server already
572 and could deadlock if we tried to flush data, and
573 since we do not know if we have data that would
574 invalidate the current end of file on the server
575 we can not go to the server to get the new inod
576 info */
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300577
Pavel Shilovskyc6723622010-11-03 10:58:57 +0300578 cifs_set_oplock_level(pCifsInode, oplock);
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300579
Jeff Layton15886172010-10-15 15:33:59 -0400580 cifs_relock_file(pCifsFile);
581
582reopen_error_exit:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400584 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585 return rc;
586}
587
588int cifs_close(struct inode *inode, struct file *file)
589{
Jeff Layton77970692011-04-05 16:23:47 -0700590 if (file->private_data != NULL) {
591 cifsFileInfo_put(file->private_data);
592 file->private_data = NULL;
593 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594
Steve Frenchcdff08e2010-10-21 22:46:14 +0000595 /* return code from the ->release op is always ignored */
596 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597}
598
599int cifs_closedir(struct inode *inode, struct file *file)
600{
601 int rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400602 unsigned int xid;
Joe Perchesc21dfb62010-07-12 13:50:14 -0700603 struct cifsFileInfo *pCFileStruct = file->private_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604 char *ptmp;
605
Joe Perchesb6b38f72010-04-21 03:50:45 +0000606 cFYI(1, "Closedir inode = 0x%p", inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400608 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609
610 if (pCFileStruct) {
Steve French96daf2b2011-05-27 04:34:02 +0000611 struct cifs_tcon *pTcon = tlink_tcon(pCFileStruct->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612
Joe Perchesb6b38f72010-04-21 03:50:45 +0000613 cFYI(1, "Freeing private data in close dir");
Jeff Layton44772882010-10-15 15:34:03 -0400614 spin_lock(&cifs_file_list_lock);
Steve French4b18f2a2008-04-29 00:06:05 +0000615 if (!pCFileStruct->srch_inf.endOfSearch &&
616 !pCFileStruct->invalidHandle) {
617 pCFileStruct->invalidHandle = true;
Jeff Layton44772882010-10-15 15:34:03 -0400618 spin_unlock(&cifs_file_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619 rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid);
Joe Perchesb6b38f72010-04-21 03:50:45 +0000620 cFYI(1, "Closing uncompleted readdir with rc %d",
621 rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622 /* not much we can do if it fails anyway, ignore rc */
623 rc = 0;
Steve Frenchddb4cbf2008-11-20 20:00:44 +0000624 } else
Jeff Layton44772882010-10-15 15:34:03 -0400625 spin_unlock(&cifs_file_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626 ptmp = pCFileStruct->srch_inf.ntwrk_buf_start;
627 if (ptmp) {
Joe Perchesb6b38f72010-04-21 03:50:45 +0000628 cFYI(1, "closedir free smb buf in srch struct");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629 pCFileStruct->srch_inf.ntwrk_buf_start = NULL;
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000630 if (pCFileStruct->srch_inf.smallBuf)
Steve Frenchd47d7c12006-02-28 03:45:48 +0000631 cifs_small_buf_release(ptmp);
632 else
633 cifs_buf_release(ptmp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634 }
Jeff Layton13cfb732010-09-29 19:51:11 -0400635 cifs_put_tlink(pCFileStruct->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636 kfree(file->private_data);
637 file->private_data = NULL;
638 }
639 /* BB can we lock the filestruct while this is going on? */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400640 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641 return rc;
642}
643
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400644static struct cifsLockInfo *
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300645cifs_lock_init(__u64 offset, __u64 length, __u8 type)
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000646{
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400647 struct cifsLockInfo *lock =
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000648 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400649 if (!lock)
650 return lock;
651 lock->offset = offset;
652 lock->length = length;
653 lock->type = type;
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400654 lock->pid = current->tgid;
655 INIT_LIST_HEAD(&lock->blist);
656 init_waitqueue_head(&lock->block_q);
657 return lock;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400658}
659
660static void
661cifs_del_lock_waiters(struct cifsLockInfo *lock)
662{
663 struct cifsLockInfo *li, *tmp;
664 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
665 list_del_init(&li->blist);
666 wake_up(&li->block_q);
667 }
668}
669
670static bool
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300671cifs_find_fid_lock_conflict(struct cifsFileInfo *cfile, __u64 offset,
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300672 __u64 length, __u8 type, struct cifsFileInfo *cur,
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300673 struct cifsLockInfo **conf_lock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400674{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300675 struct cifsLockInfo *li;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300676 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400677
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300678 list_for_each_entry(li, &cfile->llist, llist) {
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400679 if (offset + length <= li->offset ||
680 offset >= li->offset + li->length)
681 continue;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300682 else if ((type & server->vals->shared_lock_type) &&
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300683 ((server->ops->compare_fids(cur, cfile) &&
684 current->tgid == li->pid) || type == li->type))
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400685 continue;
686 else {
687 *conf_lock = li;
688 return true;
689 }
690 }
691 return false;
692}
693
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400694static bool
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300695cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
696 __u8 type, struct cifsLockInfo **conf_lock)
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400697{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300698 bool rc = false;
699 struct cifsFileInfo *fid, *tmp;
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300700 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300701
702 spin_lock(&cifs_file_list_lock);
703 list_for_each_entry_safe(fid, tmp, &cinode->openFileList, flist) {
704 rc = cifs_find_fid_lock_conflict(fid, offset, length, type,
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300705 cfile, conf_lock);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300706 if (rc)
707 break;
708 }
709 spin_unlock(&cifs_file_list_lock);
710
711 return rc;
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400712}
713
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300714/*
715 * Check if there is another lock that prevents us to set the lock (mandatory
716 * style). If such a lock exists, update the flock structure with its
717 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
718 * or leave it the same if we can't. Returns 0 if we don't need to request to
719 * the server or 1 otherwise.
720 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400721static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300722cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
723 __u8 type, struct file_lock *flock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400724{
725 int rc = 0;
726 struct cifsLockInfo *conf_lock;
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300727 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300728 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400729 bool exist;
730
731 mutex_lock(&cinode->lock_mutex);
732
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300733 exist = cifs_find_lock_conflict(cfile, offset, length, type,
734 &conf_lock);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400735 if (exist) {
736 flock->fl_start = conf_lock->offset;
737 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
738 flock->fl_pid = conf_lock->pid;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300739 if (conf_lock->type & server->vals->shared_lock_type)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400740 flock->fl_type = F_RDLCK;
741 else
742 flock->fl_type = F_WRLCK;
743 } else if (!cinode->can_cache_brlcks)
744 rc = 1;
745 else
746 flock->fl_type = F_UNLCK;
747
748 mutex_unlock(&cinode->lock_mutex);
749 return rc;
750}
751
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400752static void
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300753cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400754{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300755 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovskyd59dad22011-09-22 09:53:59 +0400756 mutex_lock(&cinode->lock_mutex);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300757 list_add_tail(&lock->llist, &cfile->llist);
Pavel Shilovskyd59dad22011-09-22 09:53:59 +0400758 mutex_unlock(&cinode->lock_mutex);
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000759}
760
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300761/*
762 * Set the byte-range lock (mandatory style). Returns:
763 * 1) 0, if we set the lock and don't need to request to the server;
764 * 2) 1, if no locks prevent us but we need to request to the server;
765 * 3) -EACCESS, if there is a lock that prevents us and wait is false.
766 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400767static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300768cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400769 bool wait)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400770{
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400771 struct cifsLockInfo *conf_lock;
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300772 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400773 bool exist;
774 int rc = 0;
775
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400776try_again:
777 exist = false;
778 mutex_lock(&cinode->lock_mutex);
779
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300780 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
781 lock->type, &conf_lock);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400782 if (!exist && cinode->can_cache_brlcks) {
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300783 list_add_tail(&lock->llist, &cfile->llist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400784 mutex_unlock(&cinode->lock_mutex);
785 return rc;
786 }
787
788 if (!exist)
789 rc = 1;
790 else if (!wait)
791 rc = -EACCES;
792 else {
793 list_add_tail(&lock->blist, &conf_lock->blist);
794 mutex_unlock(&cinode->lock_mutex);
795 rc = wait_event_interruptible(lock->block_q,
796 (lock->blist.prev == &lock->blist) &&
797 (lock->blist.next == &lock->blist));
798 if (!rc)
799 goto try_again;
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400800 mutex_lock(&cinode->lock_mutex);
801 list_del_init(&lock->blist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400802 }
803
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400804 mutex_unlock(&cinode->lock_mutex);
805 return rc;
806}
807
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300808/*
809 * Check if there is another lock that prevents us to set the lock (posix
810 * style). If such a lock exists, update the flock structure with its
811 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
812 * or leave it the same if we can't. Returns 0 if we don't need to request to
813 * the server or 1 otherwise.
814 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400815static int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400816cifs_posix_lock_test(struct file *file, struct file_lock *flock)
817{
818 int rc = 0;
819 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
820 unsigned char saved_type = flock->fl_type;
821
Pavel Shilovsky50792762011-10-29 17:17:57 +0400822 if ((flock->fl_flags & FL_POSIX) == 0)
823 return 1;
824
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400825 mutex_lock(&cinode->lock_mutex);
826 posix_test_lock(file, flock);
827
828 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
829 flock->fl_type = saved_type;
830 rc = 1;
831 }
832
833 mutex_unlock(&cinode->lock_mutex);
834 return rc;
835}
836
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300837/*
838 * Set the byte-range lock (posix style). Returns:
839 * 1) 0, if we set the lock and don't need to request to the server;
840 * 2) 1, if we need to request to the server;
841 * 3) <0, if the error occurs while setting the lock.
842 */
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400843static int
844cifs_posix_lock_set(struct file *file, struct file_lock *flock)
845{
846 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
Pavel Shilovsky50792762011-10-29 17:17:57 +0400847 int rc = 1;
848
849 if ((flock->fl_flags & FL_POSIX) == 0)
850 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400851
Pavel Shilovsky66189be2012-03-28 21:56:19 +0400852try_again:
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400853 mutex_lock(&cinode->lock_mutex);
854 if (!cinode->can_cache_brlcks) {
855 mutex_unlock(&cinode->lock_mutex);
Pavel Shilovsky50792762011-10-29 17:17:57 +0400856 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400857 }
Pavel Shilovsky66189be2012-03-28 21:56:19 +0400858
859 rc = posix_lock_file(file, flock, NULL);
Steve French9ebb3892012-04-01 13:52:54 -0500860 mutex_unlock(&cinode->lock_mutex);
Pavel Shilovsky66189be2012-03-28 21:56:19 +0400861 if (rc == FILE_LOCK_DEFERRED) {
862 rc = wait_event_interruptible(flock->fl_wait, !flock->fl_next);
863 if (!rc)
864 goto try_again;
865 locks_delete_block(flock);
866 }
Steve French9ebb3892012-04-01 13:52:54 -0500867 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400868}
869
870static int
871cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400872{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400873 unsigned int xid;
874 int rc = 0, stored_rc;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400875 struct cifsLockInfo *li, *tmp;
876 struct cifs_tcon *tcon;
877 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +0400878 unsigned int num, max_num, max_buf;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400879 LOCKING_ANDX_RANGE *buf, *cur;
880 int types[] = {LOCKING_ANDX_LARGE_FILES,
881 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
882 int i;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400883
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400884 xid = get_xid();
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400885 tcon = tlink_tcon(cfile->tlink);
886
887 mutex_lock(&cinode->lock_mutex);
888 if (!cinode->can_cache_brlcks) {
889 mutex_unlock(&cinode->lock_mutex);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400890 free_xid(xid);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400891 return rc;
892 }
893
Pavel Shilovsky0013fb42012-05-31 13:03:26 +0400894 /*
895 * Accessing maxBuf is racy with cifs_reconnect - need to store value
896 * and check it for zero before using.
897 */
898 max_buf = tcon->ses->server->maxBuf;
899 if (!max_buf) {
900 mutex_unlock(&cinode->lock_mutex);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400901 free_xid(xid);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +0400902 return -EINVAL;
903 }
904
905 max_num = (max_buf - sizeof(struct smb_hdr)) /
906 sizeof(LOCKING_ANDX_RANGE);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400907 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
908 if (!buf) {
909 mutex_unlock(&cinode->lock_mutex);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400910 free_xid(xid);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400911 return rc;
912 }
913
914 for (i = 0; i < 2; i++) {
915 cur = buf;
916 num = 0;
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300917 list_for_each_entry_safe(li, tmp, &cfile->llist, llist) {
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400918 if (li->type != types[i])
919 continue;
920 cur->Pid = cpu_to_le16(li->pid);
921 cur->LengthLow = cpu_to_le32((u32)li->length);
922 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
923 cur->OffsetLow = cpu_to_le32((u32)li->offset);
924 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
925 if (++num == max_num) {
926 stored_rc = cifs_lockv(xid, tcon, cfile->netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +0300927 (__u8)li->type, 0, num,
928 buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400929 if (stored_rc)
930 rc = stored_rc;
931 cur = buf;
932 num = 0;
933 } else
934 cur++;
935 }
936
937 if (num) {
938 stored_rc = cifs_lockv(xid, tcon, cfile->netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +0300939 (__u8)types[i], 0, num, buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400940 if (stored_rc)
941 rc = stored_rc;
942 }
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400943 }
944
945 cinode->can_cache_brlcks = false;
946 mutex_unlock(&cinode->lock_mutex);
947
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400948 kfree(buf);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400949 free_xid(xid);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400950 return rc;
951}
952
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400953/* copied from fs/locks.c with a name change */
954#define cifs_for_each_lock(inode, lockp) \
955 for (lockp = &inode->i_flock; *lockp != NULL; \
956 lockp = &(*lockp)->fl_next)
957
Pavel Shilovskyd5751462012-03-05 09:39:20 +0300958struct lock_to_push {
959 struct list_head llist;
960 __u64 offset;
961 __u64 length;
962 __u32 pid;
963 __u16 netfid;
964 __u8 type;
965};
966
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400967static int
968cifs_push_posix_locks(struct cifsFileInfo *cfile)
969{
970 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
971 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
972 struct file_lock *flock, **before;
Pavel Shilovskyd5751462012-03-05 09:39:20 +0300973 unsigned int count = 0, i = 0;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400974 int rc = 0, xid, type;
Pavel Shilovskyd5751462012-03-05 09:39:20 +0300975 struct list_head locks_to_send, *el;
976 struct lock_to_push *lck, *tmp;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400977 __u64 length;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400978
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400979 xid = get_xid();
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400980
981 mutex_lock(&cinode->lock_mutex);
982 if (!cinode->can_cache_brlcks) {
983 mutex_unlock(&cinode->lock_mutex);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400984 free_xid(xid);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400985 return rc;
986 }
987
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400988 lock_flocks();
989 cifs_for_each_lock(cfile->dentry->d_inode, before) {
Pavel Shilovskyd5751462012-03-05 09:39:20 +0300990 if ((*before)->fl_flags & FL_POSIX)
991 count++;
992 }
993 unlock_flocks();
994
995 INIT_LIST_HEAD(&locks_to_send);
996
997 /*
Pavel Shilovskyce858522012-03-17 09:46:55 +0300998 * Allocating count locks is enough because no FL_POSIX locks can be
999 * added to the list while we are holding cinode->lock_mutex that
1000 * protects locking operations of this inode.
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001001 */
1002 for (; i < count; i++) {
1003 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1004 if (!lck) {
1005 rc = -ENOMEM;
1006 goto err_out;
1007 }
1008 list_add_tail(&lck->llist, &locks_to_send);
1009 }
1010
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001011 el = locks_to_send.next;
1012 lock_flocks();
1013 cifs_for_each_lock(cfile->dentry->d_inode, before) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001014 flock = *before;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001015 if ((flock->fl_flags & FL_POSIX) == 0)
1016 continue;
Pavel Shilovskyce858522012-03-17 09:46:55 +03001017 if (el == &locks_to_send) {
1018 /*
1019 * The list ended. We don't have enough allocated
1020 * structures - something is really wrong.
1021 */
1022 cERROR(1, "Can't push all brlocks!");
1023 break;
1024 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001025 length = 1 + flock->fl_end - flock->fl_start;
1026 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1027 type = CIFS_RDLCK;
1028 else
1029 type = CIFS_WRLCK;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001030 lck = list_entry(el, struct lock_to_push, llist);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001031 lck->pid = flock->fl_pid;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001032 lck->netfid = cfile->netfid;
1033 lck->length = length;
1034 lck->type = type;
1035 lck->offset = flock->fl_start;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001036 el = el->next;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001037 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001038 unlock_flocks();
1039
1040 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001041 int stored_rc;
1042
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001043 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001044 lck->offset, lck->length, NULL,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001045 lck->type, 0);
1046 if (stored_rc)
1047 rc = stored_rc;
1048 list_del(&lck->llist);
1049 kfree(lck);
1050 }
1051
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001052out:
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001053 cinode->can_cache_brlcks = false;
1054 mutex_unlock(&cinode->lock_mutex);
1055
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001056 free_xid(xid);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001057 return rc;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001058err_out:
1059 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1060 list_del(&lck->llist);
1061 kfree(lck);
1062 }
1063 goto out;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001064}
1065
1066static int
1067cifs_push_locks(struct cifsFileInfo *cfile)
1068{
1069 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1070 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1071
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001072 if (cap_unix(tcon->ses) &&
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001073 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1074 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1075 return cifs_push_posix_locks(cfile);
1076
1077 return cifs_push_mandatory_locks(cfile);
1078}
1079
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001080static void
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001081cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001082 bool *wait_flag, struct TCP_Server_Info *server)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001083{
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001084 if (flock->fl_flags & FL_POSIX)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001085 cFYI(1, "Posix");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001086 if (flock->fl_flags & FL_FLOCK)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001087 cFYI(1, "Flock");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001088 if (flock->fl_flags & FL_SLEEP) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001089 cFYI(1, "Blocking lock");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001090 *wait_flag = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001091 }
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001092 if (flock->fl_flags & FL_ACCESS)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001093 cFYI(1, "Process suspended by mandatory locking - "
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001094 "not implemented yet");
1095 if (flock->fl_flags & FL_LEASE)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001096 cFYI(1, "Lease on file - not implemented yet");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001097 if (flock->fl_flags &
Linus Torvalds1da177e2005-04-16 15:20:36 -07001098 (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001099 cFYI(1, "Unknown lock flags 0x%x", flock->fl_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001101 *type = server->vals->large_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001102 if (flock->fl_type == F_WRLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001103 cFYI(1, "F_WRLCK ");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001104 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001105 *lock = 1;
1106 } else if (flock->fl_type == F_UNLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001107 cFYI(1, "F_UNLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001108 *type |= server->vals->unlock_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001109 *unlock = 1;
1110 /* Check if unlock includes more than one lock range */
1111 } else if (flock->fl_type == F_RDLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001112 cFYI(1, "F_RDLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001113 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001114 *lock = 1;
1115 } else if (flock->fl_type == F_EXLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001116 cFYI(1, "F_EXLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001117 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001118 *lock = 1;
1119 } else if (flock->fl_type == F_SHLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001120 cFYI(1, "F_SHLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001121 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001122 *lock = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001123 } else
Joe Perchesb6b38f72010-04-21 03:50:45 +00001124 cFYI(1, "Unknown type of lock");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001125}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001126
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001127static int
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001128cifs_mandatory_lock(unsigned int xid, struct cifsFileInfo *cfile, __u64 offset,
Pavel Shilovsky55157df2012-02-28 14:04:17 +03001129 __u64 length, __u32 type, int lock, int unlock, bool wait)
1130{
1131 return CIFSSMBLock(xid, tlink_tcon(cfile->tlink), cfile->netfid,
1132 current->tgid, length, offset, unlock, lock,
1133 (__u8)type, wait, 0);
1134}
1135
1136static int
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001137cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001138 bool wait_flag, bool posix_lck, unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001139{
1140 int rc = 0;
1141 __u64 length = 1 + flock->fl_end - flock->fl_start;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001142 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1143 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001144 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001145 __u16 netfid = cfile->netfid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001147 if (posix_lck) {
1148 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001149
1150 rc = cifs_posix_lock_test(file, flock);
1151 if (!rc)
1152 return rc;
1153
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001154 if (type & server->vals->shared_lock_type)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001155 posix_lock_type = CIFS_RDLCK;
1156 else
1157 posix_lock_type = CIFS_WRLCK;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001158 rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001159 flock->fl_start, length, flock,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001160 posix_lock_type, wait_flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001161 return rc;
1162 }
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001163
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001164 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001165 if (!rc)
1166 return rc;
1167
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001168 /* BB we could chain these into one lock request BB */
Pavel Shilovsky55157df2012-02-28 14:04:17 +03001169 rc = cifs_mandatory_lock(xid, cfile, flock->fl_start, length, type,
1170 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001171 if (rc == 0) {
Pavel Shilovsky55157df2012-02-28 14:04:17 +03001172 rc = cifs_mandatory_lock(xid, cfile, flock->fl_start, length,
1173 type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001174 flock->fl_type = F_UNLCK;
1175 if (rc != 0)
1176 cERROR(1, "Error unlocking previously locked "
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001177 "range %d during test of lock", rc);
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001178 return 0;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001179 }
1180
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001181 if (type & server->vals->shared_lock_type) {
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001182 flock->fl_type = F_WRLCK;
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001183 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001184 }
1185
Pavel Shilovsky55157df2012-02-28 14:04:17 +03001186 rc = cifs_mandatory_lock(xid, cfile, flock->fl_start, length,
1187 type | server->vals->shared_lock_type, 1, 0,
1188 false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001189 if (rc == 0) {
Pavel Shilovsky55157df2012-02-28 14:04:17 +03001190 rc = cifs_mandatory_lock(xid, cfile, flock->fl_start, length,
1191 type | server->vals->shared_lock_type,
1192 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001193 flock->fl_type = F_RDLCK;
1194 if (rc != 0)
1195 cERROR(1, "Error unlocking previously locked "
1196 "range %d during test of lock", rc);
1197 } else
1198 flock->fl_type = F_WRLCK;
1199
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001200 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001201}
1202
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001203static void
1204cifs_move_llist(struct list_head *source, struct list_head *dest)
1205{
1206 struct list_head *li, *tmp;
1207 list_for_each_safe(li, tmp, source)
1208 list_move(li, dest);
1209}
1210
1211static void
1212cifs_free_llist(struct list_head *llist)
1213{
1214 struct cifsLockInfo *li, *tmp;
1215 list_for_each_entry_safe(li, tmp, llist, llist) {
1216 cifs_del_lock_waiters(li);
1217 list_del(&li->llist);
1218 kfree(li);
1219 }
1220}
1221
1222static int
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001223cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1224 unsigned int xid)
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001225{
1226 int rc = 0, stored_rc;
1227 int types[] = {LOCKING_ANDX_LARGE_FILES,
1228 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1229 unsigned int i;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001230 unsigned int max_num, num, max_buf;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001231 LOCKING_ANDX_RANGE *buf, *cur;
1232 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1233 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
1234 struct cifsLockInfo *li, *tmp;
1235 __u64 length = 1 + flock->fl_end - flock->fl_start;
1236 struct list_head tmp_llist;
1237
1238 INIT_LIST_HEAD(&tmp_llist);
1239
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001240 /*
1241 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1242 * and check it for zero before using.
1243 */
1244 max_buf = tcon->ses->server->maxBuf;
1245 if (!max_buf)
1246 return -EINVAL;
1247
1248 max_num = (max_buf - sizeof(struct smb_hdr)) /
1249 sizeof(LOCKING_ANDX_RANGE);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001250 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1251 if (!buf)
1252 return -ENOMEM;
1253
1254 mutex_lock(&cinode->lock_mutex);
1255 for (i = 0; i < 2; i++) {
1256 cur = buf;
1257 num = 0;
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001258 list_for_each_entry_safe(li, tmp, &cfile->llist, llist) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001259 if (flock->fl_start > li->offset ||
1260 (flock->fl_start + length) <
1261 (li->offset + li->length))
1262 continue;
1263 if (current->tgid != li->pid)
1264 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001265 if (types[i] != li->type)
1266 continue;
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001267 if (cinode->can_cache_brlcks) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001268 /*
1269 * We can cache brlock requests - simply remove
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001270 * a lock from the file's list.
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001271 */
1272 list_del(&li->llist);
1273 cifs_del_lock_waiters(li);
1274 kfree(li);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001275 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001276 }
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001277 cur->Pid = cpu_to_le16(li->pid);
1278 cur->LengthLow = cpu_to_le32((u32)li->length);
1279 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1280 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1281 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1282 /*
1283 * We need to save a lock here to let us add it again to
1284 * the file's list if the unlock range request fails on
1285 * the server.
1286 */
1287 list_move(&li->llist, &tmp_llist);
1288 if (++num == max_num) {
1289 stored_rc = cifs_lockv(xid, tcon, cfile->netfid,
1290 li->type, num, 0, buf);
1291 if (stored_rc) {
1292 /*
1293 * We failed on the unlock range
1294 * request - add all locks from the tmp
1295 * list to the head of the file's list.
1296 */
1297 cifs_move_llist(&tmp_llist,
1298 &cfile->llist);
1299 rc = stored_rc;
1300 } else
1301 /*
1302 * The unlock range request succeed -
1303 * free the tmp list.
1304 */
1305 cifs_free_llist(&tmp_llist);
1306 cur = buf;
1307 num = 0;
1308 } else
1309 cur++;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001310 }
1311 if (num) {
1312 stored_rc = cifs_lockv(xid, tcon, cfile->netfid,
1313 types[i], num, 0, buf);
1314 if (stored_rc) {
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001315 cifs_move_llist(&tmp_llist, &cfile->llist);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001316 rc = stored_rc;
1317 } else
1318 cifs_free_llist(&tmp_llist);
1319 }
1320 }
1321
1322 mutex_unlock(&cinode->lock_mutex);
1323 kfree(buf);
1324 return rc;
1325}
1326
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001327static int
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001328cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001329 bool wait_flag, bool posix_lck, int lock, int unlock,
1330 unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001331{
1332 int rc = 0;
1333 __u64 length = 1 + flock->fl_end - flock->fl_start;
1334 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1335 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001336 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001337 __u16 netfid = cfile->netfid;
1338
1339 if (posix_lck) {
Steve French08547b02006-02-28 22:39:25 +00001340 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001341
1342 rc = cifs_posix_lock_set(file, flock);
1343 if (!rc || rc < 0)
1344 return rc;
1345
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001346 if (type & server->vals->shared_lock_type)
Steve French08547b02006-02-28 22:39:25 +00001347 posix_lock_type = CIFS_RDLCK;
1348 else
1349 posix_lock_type = CIFS_WRLCK;
Steve French50c2f752007-07-13 00:33:32 +00001350
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001351 if (unlock == 1)
Steve Frenchbeb84dc2006-03-03 23:36:34 +00001352 posix_lock_type = CIFS_UNLCK;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001353
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001354 rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001355 flock->fl_start, length, NULL,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001356 posix_lock_type, wait_flag);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001357 goto out;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001358 }
1359
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001360 if (lock) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001361 struct cifsLockInfo *lock;
1362
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001363 lock = cifs_lock_init(flock->fl_start, length, type);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001364 if (!lock)
1365 return -ENOMEM;
1366
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001367 rc = cifs_lock_add_if(cfile, lock, wait_flag);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001368 if (rc < 0)
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001369 kfree(lock);
1370 if (rc <= 0)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001371 goto out;
1372
Pavel Shilovsky7f924472012-03-28 17:10:25 +04001373 rc = cifs_mandatory_lock(xid, cfile, flock->fl_start, length,
1374 type, 1, 0, wait_flag);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001375 if (rc) {
1376 kfree(lock);
1377 goto out;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001378 }
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001379
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001380 cifs_lock_add(cfile, lock);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001381 } else if (unlock)
1382 rc = cifs_unlock_range(cfile, flock, xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001383
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001384out:
1385 if (flock->fl_flags & FL_POSIX)
Steve French9ebb3892012-04-01 13:52:54 -05001386 posix_lock_file_wait(file, flock);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001387 return rc;
1388}
1389
1390int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1391{
1392 int rc, xid;
1393 int lock = 0, unlock = 0;
1394 bool wait_flag = false;
1395 bool posix_lck = false;
1396 struct cifs_sb_info *cifs_sb;
1397 struct cifs_tcon *tcon;
1398 struct cifsInodeInfo *cinode;
1399 struct cifsFileInfo *cfile;
1400 __u16 netfid;
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001401 __u32 type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001402
1403 rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001404 xid = get_xid();
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001405
1406 cFYI(1, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld "
1407 "end: %lld", cmd, flock->fl_flags, flock->fl_type,
1408 flock->fl_start, flock->fl_end);
1409
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001410 cfile = (struct cifsFileInfo *)file->private_data;
1411 tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001412
1413 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
1414 tcon->ses->server);
1415
1416 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001417 netfid = cfile->netfid;
1418 cinode = CIFS_I(file->f_path.dentry->d_inode);
1419
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001420 if (cap_unix(tcon->ses) &&
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001421 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1422 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1423 posix_lck = true;
1424 /*
1425 * BB add code here to normalize offset and length to account for
1426 * negative length which we can not accept over the wire.
1427 */
1428 if (IS_GETLK(cmd)) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001429 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001430 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001431 return rc;
1432 }
1433
1434 if (!lock && !unlock) {
1435 /*
1436 * if no lock or unlock then nothing to do since we do not
1437 * know what it is
1438 */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001439 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001440 return -EOPNOTSUPP;
1441 }
1442
1443 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1444 xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001445 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001446 return rc;
1447}
1448
Jeff Layton597b0272012-03-23 14:40:56 -04001449/*
1450 * update the file size (if needed) after a write. Should be called with
1451 * the inode->i_lock held
1452 */
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05001453void
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001454cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1455 unsigned int bytes_written)
1456{
1457 loff_t end_of_write = offset + bytes_written;
1458
1459 if (end_of_write > cifsi->server_eof)
1460 cifsi->server_eof = end_of_write;
1461}
1462
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001463static ssize_t cifs_write(struct cifsFileInfo *open_file, __u32 pid,
Jeff Layton7da4b492010-10-15 15:34:00 -04001464 const char *write_data, size_t write_size,
1465 loff_t *poffset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001466{
1467 int rc = 0;
1468 unsigned int bytes_written = 0;
1469 unsigned int total_written;
1470 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +00001471 struct cifs_tcon *pTcon;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001472 unsigned int xid;
Jeff Layton7da4b492010-10-15 15:34:00 -04001473 struct dentry *dentry = open_file->dentry;
1474 struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001475 struct cifs_io_parms io_parms;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001476
Jeff Layton7da4b492010-10-15 15:34:00 -04001477 cifs_sb = CIFS_SB(dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001478
Joe Perchesb6b38f72010-04-21 03:50:45 +00001479 cFYI(1, "write %zd bytes to offset %lld of %s", write_size,
Jeff Layton7da4b492010-10-15 15:34:00 -04001480 *poffset, dentry->d_name.name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001481
Jeff Layton13cfb732010-09-29 19:51:11 -04001482 pTcon = tlink_tcon(open_file->tlink);
Steve French50c2f752007-07-13 00:33:32 +00001483
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001484 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001485
Linus Torvalds1da177e2005-04-16 15:20:36 -07001486 for (total_written = 0; write_size > total_written;
1487 total_written += bytes_written) {
1488 rc = -EAGAIN;
1489 while (rc == -EAGAIN) {
Jeff Laytonca83ce32011-04-12 09:13:44 -04001490 struct kvec iov[2];
1491 unsigned int len;
1492
Linus Torvalds1da177e2005-04-16 15:20:36 -07001493 if (open_file->invalidHandle) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001494 /* we could deadlock if we called
1495 filemap_fdatawait from here so tell
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001496 reopen_file not to flush data to
Linus Torvalds1da177e2005-04-16 15:20:36 -07001497 server now */
Jeff Layton15886172010-10-15 15:33:59 -04001498 rc = cifs_reopen_file(open_file, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001499 if (rc != 0)
1500 break;
1501 }
Steve French3e844692005-10-03 13:37:24 -07001502
Jeff Laytonca83ce32011-04-12 09:13:44 -04001503 len = min((size_t)cifs_sb->wsize,
1504 write_size - total_written);
1505 /* iov[0] is reserved for smb header */
1506 iov[1].iov_base = (char *)write_data + total_written;
1507 iov[1].iov_len = len;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001508 io_parms.netfid = open_file->netfid;
1509 io_parms.pid = pid;
1510 io_parms.tcon = pTcon;
1511 io_parms.offset = *poffset;
1512 io_parms.length = len;
1513 rc = CIFSSMBWrite2(xid, &io_parms, &bytes_written, iov,
1514 1, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001515 }
1516 if (rc || (bytes_written == 0)) {
1517 if (total_written)
1518 break;
1519 else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001520 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001521 return rc;
1522 }
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001523 } else {
Jeff Layton597b0272012-03-23 14:40:56 -04001524 spin_lock(&dentry->d_inode->i_lock);
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001525 cifs_update_eof(cifsi, *poffset, bytes_written);
Jeff Layton597b0272012-03-23 14:40:56 -04001526 spin_unlock(&dentry->d_inode->i_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001527 *poffset += bytes_written;
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001528 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529 }
1530
Steve Frencha4544342005-08-24 13:59:35 -07001531 cifs_stats_bytes_written(pTcon, total_written);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001532
Jeff Layton7da4b492010-10-15 15:34:00 -04001533 if (total_written > 0) {
1534 spin_lock(&dentry->d_inode->i_lock);
1535 if (*poffset > dentry->d_inode->i_size)
1536 i_size_write(dentry->d_inode, *poffset);
1537 spin_unlock(&dentry->d_inode->i_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001538 }
Jeff Layton7da4b492010-10-15 15:34:00 -04001539 mark_inode_dirty_sync(dentry->d_inode);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001540 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001541 return total_written;
1542}
1543
Jeff Layton6508d902010-09-29 19:51:11 -04001544struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1545 bool fsuid_only)
Steve French630f3f0c2007-10-25 21:17:17 +00001546{
1547 struct cifsFileInfo *open_file = NULL;
Jeff Layton6508d902010-09-29 19:51:11 -04001548 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1549
1550 /* only filter by fsuid on multiuser mounts */
1551 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1552 fsuid_only = false;
Steve French630f3f0c2007-10-25 21:17:17 +00001553
Jeff Layton44772882010-10-15 15:34:03 -04001554 spin_lock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001555 /* we could simply get the first_list_entry since write-only entries
1556 are always at the end of the list but since the first entry might
1557 have a close pending, we go through the whole list */
1558 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton6508d902010-09-29 19:51:11 -04001559 if (fsuid_only && open_file->uid != current_fsuid())
1560 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001561 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
Steve French630f3f0c2007-10-25 21:17:17 +00001562 if (!open_file->invalidHandle) {
1563 /* found a good file */
1564 /* lock it so it will not be closed on us */
Dave Kleikamp6ab409b2009-08-31 11:07:12 -04001565 cifsFileInfo_get(open_file);
Jeff Layton44772882010-10-15 15:34:03 -04001566 spin_unlock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001567 return open_file;
1568 } /* else might as well continue, and look for
1569 another, or simply have the caller reopen it
1570 again rather than trying to fix this handle */
1571 } else /* write only file */
1572 break; /* write only files are last so must be done */
1573 }
Jeff Layton44772882010-10-15 15:34:03 -04001574 spin_unlock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001575 return NULL;
1576}
Steve French630f3f0c2007-10-25 21:17:17 +00001577
Jeff Layton6508d902010-09-29 19:51:11 -04001578struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1579 bool fsuid_only)
Steve French6148a742005-10-05 12:23:19 -07001580{
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001581 struct cifsFileInfo *open_file, *inv_file = NULL;
Jeff Laytond3892292010-11-02 16:22:50 -04001582 struct cifs_sb_info *cifs_sb;
Jeff Layton2846d382008-09-22 21:33:33 -04001583 bool any_available = false;
Steve Frenchdd99cd82005-10-05 19:32:49 -07001584 int rc;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001585 unsigned int refind = 0;
Steve French6148a742005-10-05 12:23:19 -07001586
Steve French60808232006-04-22 15:53:05 +00001587 /* Having a null inode here (because mapping->host was set to zero by
1588 the VFS or MM) should not happen but we had reports of on oops (due to
1589 it being zero) during stress testcases so we need to check for it */
1590
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001591 if (cifs_inode == NULL) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001592 cERROR(1, "Null inode passed to cifs_writeable_file");
Steve French60808232006-04-22 15:53:05 +00001593 dump_stack();
1594 return NULL;
1595 }
1596
Jeff Laytond3892292010-11-02 16:22:50 -04001597 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1598
Jeff Layton6508d902010-09-29 19:51:11 -04001599 /* only filter by fsuid on multiuser mounts */
1600 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1601 fsuid_only = false;
1602
Jeff Layton44772882010-10-15 15:34:03 -04001603 spin_lock(&cifs_file_list_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001604refind_writable:
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001605 if (refind > MAX_REOPEN_ATT) {
1606 spin_unlock(&cifs_file_list_lock);
1607 return NULL;
1608 }
Steve French6148a742005-10-05 12:23:19 -07001609 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton6508d902010-09-29 19:51:11 -04001610 if (!any_available && open_file->pid != current->tgid)
1611 continue;
1612 if (fsuid_only && open_file->uid != current_fsuid())
1613 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001614 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Steve French9b22b0b2007-10-02 01:11:08 +00001615 if (!open_file->invalidHandle) {
1616 /* found a good writable file */
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001617 cifsFileInfo_get(open_file);
Jeff Layton44772882010-10-15 15:34:03 -04001618 spin_unlock(&cifs_file_list_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001619 return open_file;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001620 } else {
1621 if (!inv_file)
1622 inv_file = open_file;
Steve French9b22b0b2007-10-02 01:11:08 +00001623 }
Steve French6148a742005-10-05 12:23:19 -07001624 }
1625 }
Jeff Layton2846d382008-09-22 21:33:33 -04001626 /* couldn't find useable FH with same pid, try any available */
1627 if (!any_available) {
1628 any_available = true;
1629 goto refind_writable;
1630 }
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001631
1632 if (inv_file) {
1633 any_available = false;
1634 cifsFileInfo_get(inv_file);
1635 }
1636
Jeff Layton44772882010-10-15 15:34:03 -04001637 spin_unlock(&cifs_file_list_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001638
1639 if (inv_file) {
1640 rc = cifs_reopen_file(inv_file, false);
1641 if (!rc)
1642 return inv_file;
1643 else {
1644 spin_lock(&cifs_file_list_lock);
1645 list_move_tail(&inv_file->flist,
1646 &cifs_inode->openFileList);
1647 spin_unlock(&cifs_file_list_lock);
1648 cifsFileInfo_put(inv_file);
1649 spin_lock(&cifs_file_list_lock);
1650 ++refind;
1651 goto refind_writable;
1652 }
1653 }
1654
Steve French6148a742005-10-05 12:23:19 -07001655 return NULL;
1656}
1657
Linus Torvalds1da177e2005-04-16 15:20:36 -07001658static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1659{
1660 struct address_space *mapping = page->mapping;
1661 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1662 char *write_data;
1663 int rc = -EFAULT;
1664 int bytes_written = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001665 struct inode *inode;
Steve French6148a742005-10-05 12:23:19 -07001666 struct cifsFileInfo *open_file;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001667
1668 if (!mapping || !mapping->host)
1669 return -EFAULT;
1670
1671 inode = page->mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001672
1673 offset += (loff_t)from;
1674 write_data = kmap(page);
1675 write_data += from;
1676
1677 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1678 kunmap(page);
1679 return -EIO;
1680 }
1681
1682 /* racing with truncate? */
1683 if (offset > mapping->host->i_size) {
1684 kunmap(page);
1685 return 0; /* don't care */
1686 }
1687
1688 /* check to make sure that we are not extending the file */
1689 if (mapping->host->i_size - offset < (loff_t)to)
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001690 to = (unsigned)(mapping->host->i_size - offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001691
Jeff Layton6508d902010-09-29 19:51:11 -04001692 open_file = find_writable_file(CIFS_I(mapping->host), false);
Steve French6148a742005-10-05 12:23:19 -07001693 if (open_file) {
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001694 bytes_written = cifs_write(open_file, open_file->pid,
1695 write_data, to - from, &offset);
Dave Kleikamp6ab409b2009-08-31 11:07:12 -04001696 cifsFileInfo_put(open_file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697 /* Does mm or vfs already set times? */
Steve French6148a742005-10-05 12:23:19 -07001698 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001699 if ((bytes_written > 0) && (offset))
Steve French6148a742005-10-05 12:23:19 -07001700 rc = 0;
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001701 else if (bytes_written < 0)
1702 rc = bytes_written;
Steve French6148a742005-10-05 12:23:19 -07001703 } else {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001704 cFYI(1, "No writeable filehandles for inode");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001705 rc = -EIO;
1706 }
1707
1708 kunmap(page);
1709 return rc;
1710}
1711
Jeff Laytone9492872012-03-23 14:40:56 -04001712/*
1713 * Marshal up the iov array, reserving the first one for the header. Also,
1714 * set wdata->bytes.
1715 */
1716static void
1717cifs_writepages_marshal_iov(struct kvec *iov, struct cifs_writedata *wdata)
1718{
1719 int i;
1720 struct inode *inode = wdata->cfile->dentry->d_inode;
1721 loff_t size = i_size_read(inode);
1722
1723 /* marshal up the pages into iov array */
1724 wdata->bytes = 0;
1725 for (i = 0; i < wdata->nr_pages; i++) {
1726 iov[i + 1].iov_len = min(size - page_offset(wdata->pages[i]),
1727 (loff_t)PAGE_CACHE_SIZE);
1728 iov[i + 1].iov_base = kmap(wdata->pages[i]);
1729 wdata->bytes += iov[i + 1].iov_len;
1730 }
1731}
1732
Linus Torvalds1da177e2005-04-16 15:20:36 -07001733static int cifs_writepages(struct address_space *mapping,
Steve French37c0eb42005-10-05 14:50:29 -07001734 struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001735{
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001736 struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
1737 bool done = false, scanned = false, range_whole = false;
1738 pgoff_t end, index;
1739 struct cifs_writedata *wdata;
Steve French37c0eb42005-10-05 14:50:29 -07001740 struct page *page;
Steve French37c0eb42005-10-05 14:50:29 -07001741 int rc = 0;
Steve French50c2f752007-07-13 00:33:32 +00001742
Steve French37c0eb42005-10-05 14:50:29 -07001743 /*
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001744 * If wsize is smaller than the page cache size, default to writing
Steve French37c0eb42005-10-05 14:50:29 -07001745 * one page at a time via cifs_writepage
1746 */
1747 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1748 return generic_writepages(mapping, wbc);
1749
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001750 if (wbc->range_cyclic) {
Steve French37c0eb42005-10-05 14:50:29 -07001751 index = mapping->writeback_index; /* Start from prev offset */
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001752 end = -1;
1753 } else {
1754 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1755 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1756 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001757 range_whole = true;
1758 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07001759 }
1760retry:
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001761 while (!done && index <= end) {
1762 unsigned int i, nr_pages, found_pages;
1763 pgoff_t next = 0, tofind;
1764 struct page **pages;
Steve French37c0eb42005-10-05 14:50:29 -07001765
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001766 tofind = min((cifs_sb->wsize / PAGE_CACHE_SIZE) - 1,
1767 end - index) + 1;
Steve French37c0eb42005-10-05 14:50:29 -07001768
Jeff Laytonc2e87642012-03-23 14:40:55 -04001769 wdata = cifs_writedata_alloc((unsigned int)tofind,
1770 cifs_writev_complete);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001771 if (!wdata) {
1772 rc = -ENOMEM;
1773 break;
1774 }
1775
1776 /*
1777 * find_get_pages_tag seems to return a max of 256 on each
1778 * iteration, so we must call it several times in order to
1779 * fill the array or the wsize is effectively limited to
1780 * 256 * PAGE_CACHE_SIZE.
1781 */
1782 found_pages = 0;
1783 pages = wdata->pages;
1784 do {
1785 nr_pages = find_get_pages_tag(mapping, &index,
1786 PAGECACHE_TAG_DIRTY,
1787 tofind, pages);
1788 found_pages += nr_pages;
1789 tofind -= nr_pages;
1790 pages += nr_pages;
1791 } while (nr_pages && tofind && index <= end);
1792
1793 if (found_pages == 0) {
1794 kref_put(&wdata->refcount, cifs_writedata_release);
1795 break;
1796 }
1797
1798 nr_pages = 0;
1799 for (i = 0; i < found_pages; i++) {
1800 page = wdata->pages[i];
Steve French37c0eb42005-10-05 14:50:29 -07001801 /*
1802 * At this point we hold neither mapping->tree_lock nor
1803 * lock on the page itself: the page may be truncated or
1804 * invalidated (changing page->mapping to NULL), or even
1805 * swizzled back from swapper_space to tmpfs file
1806 * mapping
1807 */
1808
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001809 if (nr_pages == 0)
Steve French37c0eb42005-10-05 14:50:29 -07001810 lock_page(page);
Nick Piggin529ae9a2008-08-02 12:01:03 +02001811 else if (!trylock_page(page))
Steve French37c0eb42005-10-05 14:50:29 -07001812 break;
1813
1814 if (unlikely(page->mapping != mapping)) {
1815 unlock_page(page);
1816 break;
1817 }
1818
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001819 if (!wbc->range_cyclic && page->index > end) {
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001820 done = true;
Steve French37c0eb42005-10-05 14:50:29 -07001821 unlock_page(page);
1822 break;
1823 }
1824
1825 if (next && (page->index != next)) {
1826 /* Not next consecutive page */
1827 unlock_page(page);
1828 break;
1829 }
1830
1831 if (wbc->sync_mode != WB_SYNC_NONE)
1832 wait_on_page_writeback(page);
1833
1834 if (PageWriteback(page) ||
Linus Torvaldscb876f42006-12-23 16:19:07 -08001835 !clear_page_dirty_for_io(page)) {
Steve French37c0eb42005-10-05 14:50:29 -07001836 unlock_page(page);
1837 break;
1838 }
Steve French84d2f072005-10-12 15:32:05 -07001839
Linus Torvaldscb876f42006-12-23 16:19:07 -08001840 /*
1841 * This actually clears the dirty bit in the radix tree.
1842 * See cifs_writepage() for more commentary.
1843 */
1844 set_page_writeback(page);
1845
Steve French84d2f072005-10-12 15:32:05 -07001846 if (page_offset(page) >= mapping->host->i_size) {
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001847 done = true;
Steve French84d2f072005-10-12 15:32:05 -07001848 unlock_page(page);
Linus Torvaldscb876f42006-12-23 16:19:07 -08001849 end_page_writeback(page);
Steve French84d2f072005-10-12 15:32:05 -07001850 break;
1851 }
1852
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001853 wdata->pages[i] = page;
Steve French37c0eb42005-10-05 14:50:29 -07001854 next = page->index + 1;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001855 ++nr_pages;
Steve French37c0eb42005-10-05 14:50:29 -07001856 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001857
1858 /* reset index to refind any pages skipped */
1859 if (nr_pages == 0)
1860 index = wdata->pages[0]->index + 1;
1861
1862 /* put any pages we aren't going to use */
1863 for (i = nr_pages; i < found_pages; i++) {
1864 page_cache_release(wdata->pages[i]);
1865 wdata->pages[i] = NULL;
1866 }
1867
1868 /* nothing to write? */
1869 if (nr_pages == 0) {
1870 kref_put(&wdata->refcount, cifs_writedata_release);
1871 continue;
1872 }
1873
1874 wdata->sync_mode = wbc->sync_mode;
1875 wdata->nr_pages = nr_pages;
1876 wdata->offset = page_offset(wdata->pages[0]);
Jeff Laytone9492872012-03-23 14:40:56 -04001877 wdata->marshal_iov = cifs_writepages_marshal_iov;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001878
1879 do {
1880 if (wdata->cfile != NULL)
1881 cifsFileInfo_put(wdata->cfile);
1882 wdata->cfile = find_writable_file(CIFS_I(mapping->host),
1883 false);
1884 if (!wdata->cfile) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001885 cERROR(1, "No writable handles for inode");
Steve French23e7dd72005-10-20 13:44:56 -07001886 rc = -EBADF;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001887 break;
Steve French37c0eb42005-10-05 14:50:29 -07001888 }
Jeff Laytonfe5f5d22012-03-23 14:40:55 -04001889 wdata->pid = wdata->cfile->pid;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001890 rc = cifs_async_writev(wdata);
1891 } while (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN);
Jeff Laytonf3983c22010-09-22 16:17:40 -07001892
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001893 for (i = 0; i < nr_pages; ++i)
1894 unlock_page(wdata->pages[i]);
Jeff Layton941b8532011-01-11 07:24:01 -05001895
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001896 /* send failure -- clean up the mess */
1897 if (rc != 0) {
1898 for (i = 0; i < nr_pages; ++i) {
Jeff Layton941b8532011-01-11 07:24:01 -05001899 if (rc == -EAGAIN)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001900 redirty_page_for_writepage(wbc,
1901 wdata->pages[i]);
1902 else
1903 SetPageError(wdata->pages[i]);
1904 end_page_writeback(wdata->pages[i]);
1905 page_cache_release(wdata->pages[i]);
Steve French37c0eb42005-10-05 14:50:29 -07001906 }
Jeff Layton941b8532011-01-11 07:24:01 -05001907 if (rc != -EAGAIN)
1908 mapping_set_error(mapping, rc);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001909 }
1910 kref_put(&wdata->refcount, cifs_writedata_release);
Jeff Layton941b8532011-01-11 07:24:01 -05001911
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001912 wbc->nr_to_write -= nr_pages;
1913 if (wbc->nr_to_write <= 0)
1914 done = true;
Dave Kleikampb066a482008-11-18 03:49:05 +00001915
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001916 index = next;
Steve French37c0eb42005-10-05 14:50:29 -07001917 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001918
Steve French37c0eb42005-10-05 14:50:29 -07001919 if (!scanned && !done) {
1920 /*
1921 * We hit the last page and there is more work to be done: wrap
1922 * back to the start of the file
1923 */
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001924 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07001925 index = 0;
1926 goto retry;
1927 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001928
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001929 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
Steve French37c0eb42005-10-05 14:50:29 -07001930 mapping->writeback_index = index;
1931
Linus Torvalds1da177e2005-04-16 15:20:36 -07001932 return rc;
1933}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001934
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04001935static int
1936cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001937{
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04001938 int rc;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001939 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001940
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001941 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001942/* BB add check for wbc flags */
1943 page_cache_get(page);
Steve Frenchad7a2922008-02-07 23:25:02 +00001944 if (!PageUptodate(page))
Joe Perchesb6b38f72010-04-21 03:50:45 +00001945 cFYI(1, "ppw - page not up to date");
Linus Torvaldscb876f42006-12-23 16:19:07 -08001946
1947 /*
1948 * Set the "writeback" flag, and clear "dirty" in the radix tree.
1949 *
1950 * A writepage() implementation always needs to do either this,
1951 * or re-dirty the page with "redirty_page_for_writepage()" in
1952 * the case of a failure.
1953 *
1954 * Just unlocking the page will cause the radix tree tag-bits
1955 * to fail to update with the state of the page correctly.
1956 */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001957 set_page_writeback(page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04001958retry_write:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001959 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04001960 if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
1961 goto retry_write;
1962 else if (rc == -EAGAIN)
1963 redirty_page_for_writepage(wbc, page);
1964 else if (rc != 0)
1965 SetPageError(page);
1966 else
1967 SetPageUptodate(page);
Linus Torvaldscb876f42006-12-23 16:19:07 -08001968 end_page_writeback(page);
1969 page_cache_release(page);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001970 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001971 return rc;
1972}
1973
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04001974static int cifs_writepage(struct page *page, struct writeback_control *wbc)
1975{
1976 int rc = cifs_writepage_locked(page, wbc);
1977 unlock_page(page);
1978 return rc;
1979}
1980
Nick Piggind9414772008-09-24 11:32:59 -04001981static int cifs_write_end(struct file *file, struct address_space *mapping,
1982 loff_t pos, unsigned len, unsigned copied,
1983 struct page *page, void *fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001984{
Nick Piggind9414772008-09-24 11:32:59 -04001985 int rc;
1986 struct inode *inode = mapping->host;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00001987 struct cifsFileInfo *cfile = file->private_data;
1988 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1989 __u32 pid;
1990
1991 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
1992 pid = cfile->pid;
1993 else
1994 pid = current->tgid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001995
Joe Perchesb6b38f72010-04-21 03:50:45 +00001996 cFYI(1, "write_end for page %p from pos %lld with %d bytes",
1997 page, pos, copied);
Steve Frenchad7a2922008-02-07 23:25:02 +00001998
Jeff Laytona98ee8c2008-11-26 19:32:33 +00001999 if (PageChecked(page)) {
2000 if (copied == len)
2001 SetPageUptodate(page);
2002 ClearPageChecked(page);
2003 } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
Nick Piggind9414772008-09-24 11:32:59 -04002004 SetPageUptodate(page);
2005
Linus Torvalds1da177e2005-04-16 15:20:36 -07002006 if (!PageUptodate(page)) {
Nick Piggind9414772008-09-24 11:32:59 -04002007 char *page_data;
2008 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002009 unsigned int xid;
Nick Piggind9414772008-09-24 11:32:59 -04002010
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002011 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002012 /* this is probably better than directly calling
2013 partialpage_write since in this function the file handle is
2014 known which we might as well leverage */
2015 /* BB check if anything else missing out of ppw
2016 such as updating last write time */
2017 page_data = kmap(page);
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002018 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
Nick Piggind9414772008-09-24 11:32:59 -04002019 /* if (rc < 0) should we set writebehind rc? */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002020 kunmap(page);
Nick Piggind9414772008-09-24 11:32:59 -04002021
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002022 free_xid(xid);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002023 } else {
Nick Piggind9414772008-09-24 11:32:59 -04002024 rc = copied;
2025 pos += copied;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002026 set_page_dirty(page);
2027 }
2028
Nick Piggind9414772008-09-24 11:32:59 -04002029 if (rc > 0) {
2030 spin_lock(&inode->i_lock);
2031 if (pos > inode->i_size)
2032 i_size_write(inode, pos);
2033 spin_unlock(&inode->i_lock);
2034 }
2035
2036 unlock_page(page);
2037 page_cache_release(page);
2038
Linus Torvalds1da177e2005-04-16 15:20:36 -07002039 return rc;
2040}
2041
Josef Bacik02c24a82011-07-16 20:44:56 -04002042int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2043 int datasync)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002044{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002045 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002046 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002047 struct cifs_tcon *tcon;
Joe Perchesc21dfb62010-07-12 13:50:14 -07002048 struct cifsFileInfo *smbfile = file->private_data;
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002049 struct inode *inode = file->f_path.dentry->d_inode;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002050 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002051
Josef Bacik02c24a82011-07-16 20:44:56 -04002052 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2053 if (rc)
2054 return rc;
2055 mutex_lock(&inode->i_mutex);
2056
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002057 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002058
Joe Perchesb6b38f72010-04-21 03:50:45 +00002059 cFYI(1, "Sync file - name: %s datasync: 0x%x",
Christoph Hellwig7ea80852010-05-26 17:53:25 +02002060 file->f_path.dentry->d_name.name, datasync);
Steve French50c2f752007-07-13 00:33:32 +00002061
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002062 if (!CIFS_I(inode)->clientCanCacheRead) {
2063 rc = cifs_invalidate_mapping(inode);
2064 if (rc) {
2065 cFYI(1, "rc: %d during invalidate phase", rc);
2066 rc = 0; /* don't care about it in fsync */
2067 }
2068 }
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002069
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002070 tcon = tlink_tcon(smbfile->tlink);
2071 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
2072 rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
2073
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002074 free_xid(xid);
Josef Bacik02c24a82011-07-16 20:44:56 -04002075 mutex_unlock(&inode->i_mutex);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002076 return rc;
2077}
2078
Josef Bacik02c24a82011-07-16 20:44:56 -04002079int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002080{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002081 unsigned int xid;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002082 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002083 struct cifs_tcon *tcon;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002084 struct cifsFileInfo *smbfile = file->private_data;
2085 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Josef Bacik02c24a82011-07-16 20:44:56 -04002086 struct inode *inode = file->f_mapping->host;
2087
2088 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2089 if (rc)
2090 return rc;
2091 mutex_lock(&inode->i_mutex);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002092
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002093 xid = get_xid();
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002094
2095 cFYI(1, "Sync file - name: %s datasync: 0x%x",
2096 file->f_path.dentry->d_name.name, datasync);
2097
2098 tcon = tlink_tcon(smbfile->tlink);
2099 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
2100 rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
Steve Frenchb298f222009-02-21 21:17:43 +00002101
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002102 free_xid(xid);
Josef Bacik02c24a82011-07-16 20:44:56 -04002103 mutex_unlock(&inode->i_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002104 return rc;
2105}
2106
Linus Torvalds1da177e2005-04-16 15:20:36 -07002107/*
2108 * As file closes, flush all cached write data for this inode checking
2109 * for write behind errors.
2110 */
Miklos Szeredi75e1fcc2006-06-23 02:05:12 -07002111int cifs_flush(struct file *file, fl_owner_t id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002112{
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002113 struct inode *inode = file->f_path.dentry->d_inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002114 int rc = 0;
2115
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002116 if (file->f_mode & FMODE_WRITE)
Jeff Laytond3f13222010-10-15 15:34:07 -04002117 rc = filemap_write_and_wait(inode->i_mapping);
Steve French50c2f752007-07-13 00:33:32 +00002118
Joe Perchesb6b38f72010-04-21 03:50:45 +00002119 cFYI(1, "Flush inode %p file %p rc %d", inode, file, rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002120
2121 return rc;
2122}
2123
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002124static int
2125cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2126{
2127 int rc = 0;
2128 unsigned long i;
2129
2130 for (i = 0; i < num_pages; i++) {
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002131 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002132 if (!pages[i]) {
2133 /*
2134 * save number of pages we have already allocated and
2135 * return with ENOMEM error
2136 */
2137 num_pages = i;
2138 rc = -ENOMEM;
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002139 break;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002140 }
2141 }
2142
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002143 if (rc) {
2144 for (i = 0; i < num_pages; i++)
2145 put_page(pages[i]);
2146 }
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002147 return rc;
2148}
2149
2150static inline
2151size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2152{
2153 size_t num_pages;
2154 size_t clen;
2155
2156 clen = min_t(const size_t, len, wsize);
Jeff Laytona7103b92012-03-23 14:40:56 -04002157 num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002158
2159 if (cur_len)
2160 *cur_len = clen;
2161
2162 return num_pages;
2163}
2164
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002165static void
2166cifs_uncached_marshal_iov(struct kvec *iov, struct cifs_writedata *wdata)
2167{
2168 int i;
2169 size_t bytes = wdata->bytes;
2170
2171 /* marshal up the pages into iov array */
2172 for (i = 0; i < wdata->nr_pages; i++) {
Steve Frenchc7ad42b2012-03-23 16:30:56 -05002173 iov[i + 1].iov_len = min_t(size_t, bytes, PAGE_SIZE);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002174 iov[i + 1].iov_base = kmap(wdata->pages[i]);
2175 bytes -= iov[i + 1].iov_len;
2176 }
2177}
2178
2179static void
2180cifs_uncached_writev_complete(struct work_struct *work)
2181{
2182 int i;
2183 struct cifs_writedata *wdata = container_of(work,
2184 struct cifs_writedata, work);
2185 struct inode *inode = wdata->cfile->dentry->d_inode;
2186 struct cifsInodeInfo *cifsi = CIFS_I(inode);
2187
2188 spin_lock(&inode->i_lock);
2189 cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
2190 if (cifsi->server_eof > inode->i_size)
2191 i_size_write(inode, cifsi->server_eof);
2192 spin_unlock(&inode->i_lock);
2193
2194 complete(&wdata->done);
2195
2196 if (wdata->result != -EAGAIN) {
2197 for (i = 0; i < wdata->nr_pages; i++)
2198 put_page(wdata->pages[i]);
2199 }
2200
2201 kref_put(&wdata->refcount, cifs_writedata_release);
2202}
2203
2204/* attempt to send write to server, retry on any -EAGAIN errors */
2205static int
2206cifs_uncached_retry_writev(struct cifs_writedata *wdata)
2207{
2208 int rc;
2209
2210 do {
2211 if (wdata->cfile->invalidHandle) {
2212 rc = cifs_reopen_file(wdata->cfile, false);
2213 if (rc != 0)
2214 continue;
2215 }
2216 rc = cifs_async_writev(wdata);
2217 } while (rc == -EAGAIN);
2218
2219 return rc;
2220}
2221
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002222static ssize_t
2223cifs_iovec_write(struct file *file, const struct iovec *iov,
2224 unsigned long nr_segs, loff_t *poffset)
2225{
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002226 unsigned long nr_pages, i;
Pavel Shilovsky76429c12011-01-31 16:03:08 +03002227 size_t copied, len, cur_len;
2228 ssize_t total_written = 0;
Jeff Layton3af9d8f2012-04-13 17:16:59 -04002229 loff_t offset;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002230 struct iov_iter it;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002231 struct cifsFileInfo *open_file;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002232 struct cifs_tcon *tcon;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002233 struct cifs_sb_info *cifs_sb;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002234 struct cifs_writedata *wdata, *tmp;
2235 struct list_head wdata_list;
2236 int rc;
2237 pid_t pid;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002238
2239 len = iov_length(iov, nr_segs);
2240 if (!len)
2241 return 0;
2242
2243 rc = generic_write_checks(file, poffset, &len, 0);
2244 if (rc)
2245 return rc;
2246
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002247 INIT_LIST_HEAD(&wdata_list);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002248 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002249 open_file = file->private_data;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002250 tcon = tlink_tcon(open_file->tlink);
Jeff Layton3af9d8f2012-04-13 17:16:59 -04002251 offset = *poffset;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002252
2253 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2254 pid = open_file->pid;
2255 else
2256 pid = current->tgid;
2257
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002258 iov_iter_init(&it, iov, nr_segs, len, 0);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002259 do {
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002260 size_t save_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002261
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002262 nr_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
2263 wdata = cifs_writedata_alloc(nr_pages,
2264 cifs_uncached_writev_complete);
2265 if (!wdata) {
2266 rc = -ENOMEM;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002267 break;
2268 }
2269
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002270 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
2271 if (rc) {
2272 kfree(wdata);
2273 break;
2274 }
2275
2276 save_len = cur_len;
2277 for (i = 0; i < nr_pages; i++) {
2278 copied = min_t(const size_t, cur_len, PAGE_SIZE);
2279 copied = iov_iter_copy_from_user(wdata->pages[i], &it,
2280 0, copied);
2281 cur_len -= copied;
2282 iov_iter_advance(&it, copied);
2283 }
2284 cur_len = save_len - cur_len;
2285
2286 wdata->sync_mode = WB_SYNC_ALL;
2287 wdata->nr_pages = nr_pages;
2288 wdata->offset = (__u64)offset;
2289 wdata->cfile = cifsFileInfo_get(open_file);
2290 wdata->pid = pid;
2291 wdata->bytes = cur_len;
2292 wdata->marshal_iov = cifs_uncached_marshal_iov;
2293 rc = cifs_uncached_retry_writev(wdata);
2294 if (rc) {
2295 kref_put(&wdata->refcount, cifs_writedata_release);
2296 break;
2297 }
2298
2299 list_add_tail(&wdata->list, &wdata_list);
2300 offset += cur_len;
2301 len -= cur_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002302 } while (len > 0);
2303
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002304 /*
2305 * If at least one write was successfully sent, then discard any rc
2306 * value from the later writes. If the other write succeeds, then
2307 * we'll end up returning whatever was written. If it fails, then
2308 * we'll get a new rc value from that.
2309 */
2310 if (!list_empty(&wdata_list))
2311 rc = 0;
2312
2313 /*
2314 * Wait for and collect replies for any successful sends in order of
2315 * increasing offset. Once an error is hit or we get a fatal signal
2316 * while waiting, then return without waiting for any more replies.
2317 */
2318restart_loop:
2319 list_for_each_entry_safe(wdata, tmp, &wdata_list, list) {
2320 if (!rc) {
2321 /* FIXME: freezable too? */
2322 rc = wait_for_completion_killable(&wdata->done);
2323 if (rc)
2324 rc = -EINTR;
2325 else if (wdata->result)
2326 rc = wdata->result;
2327 else
2328 total_written += wdata->bytes;
2329
2330 /* resend call if it's a retryable error */
2331 if (rc == -EAGAIN) {
2332 rc = cifs_uncached_retry_writev(wdata);
2333 goto restart_loop;
2334 }
2335 }
2336 list_del_init(&wdata->list);
2337 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002338 }
2339
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002340 if (total_written > 0)
2341 *poffset += total_written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002342
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002343 cifs_stats_bytes_written(tcon, total_written);
2344 return total_written ? total_written : (ssize_t)rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002345}
2346
Pavel Shilovsky0b81c1c2011-03-10 10:11:05 +03002347ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002348 unsigned long nr_segs, loff_t pos)
2349{
2350 ssize_t written;
2351 struct inode *inode;
2352
2353 inode = iocb->ki_filp->f_path.dentry->d_inode;
2354
2355 /*
2356 * BB - optimize the way when signing is disabled. We can drop this
2357 * extra memory-to-memory copying and use iovec buffers for constructing
2358 * write request.
2359 */
2360
2361 written = cifs_iovec_write(iocb->ki_filp, iov, nr_segs, &pos);
2362 if (written > 0) {
2363 CIFS_I(inode)->invalid_mapping = true;
2364 iocb->ki_pos = pos;
2365 }
2366
2367 return written;
2368}
2369
2370ssize_t cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
2371 unsigned long nr_segs, loff_t pos)
2372{
2373 struct inode *inode;
2374
2375 inode = iocb->ki_filp->f_path.dentry->d_inode;
2376
2377 if (CIFS_I(inode)->clientCanCacheAll)
2378 return generic_file_aio_write(iocb, iov, nr_segs, pos);
2379
2380 /*
2381 * In strict cache mode we need to write the data to the server exactly
2382 * from the pos to pos+len-1 rather than flush all affected pages
2383 * because it may cause a error with mandatory locks on these pages but
2384 * not on the region from pos to ppos+len-1.
2385 */
2386
2387 return cifs_user_writev(iocb, iov, nr_segs, pos);
2388}
2389
Jeff Layton0471ca32012-05-16 07:13:16 -04002390static struct cifs_readdata *
2391cifs_readdata_alloc(unsigned int nr_vecs, work_func_t complete)
2392{
2393 struct cifs_readdata *rdata;
2394
2395 rdata = kzalloc(sizeof(*rdata) +
2396 sizeof(struct kvec) * nr_vecs, GFP_KERNEL);
2397 if (rdata != NULL) {
Jeff Layton6993f742012-05-16 07:13:17 -04002398 kref_init(&rdata->refcount);
Jeff Layton1c892542012-05-16 07:13:17 -04002399 INIT_LIST_HEAD(&rdata->list);
2400 init_completion(&rdata->done);
Jeff Layton0471ca32012-05-16 07:13:16 -04002401 INIT_WORK(&rdata->work, complete);
2402 INIT_LIST_HEAD(&rdata->pages);
2403 }
2404 return rdata;
2405}
2406
Jeff Layton6993f742012-05-16 07:13:17 -04002407void
2408cifs_readdata_release(struct kref *refcount)
Jeff Layton0471ca32012-05-16 07:13:16 -04002409{
Jeff Layton6993f742012-05-16 07:13:17 -04002410 struct cifs_readdata *rdata = container_of(refcount,
2411 struct cifs_readdata, refcount);
2412
2413 if (rdata->cfile)
2414 cifsFileInfo_put(rdata->cfile);
2415
Jeff Layton0471ca32012-05-16 07:13:16 -04002416 kfree(rdata);
2417}
2418
Jeff Layton2a1bb132012-05-16 07:13:17 -04002419static int
Jeff Layton1c892542012-05-16 07:13:17 -04002420cifs_read_allocate_pages(struct list_head *list, unsigned int npages)
2421{
2422 int rc = 0;
2423 struct page *page, *tpage;
2424 unsigned int i;
2425
2426 for (i = 0; i < npages; i++) {
2427 page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
2428 if (!page) {
2429 rc = -ENOMEM;
2430 break;
2431 }
2432 list_add(&page->lru, list);
2433 }
2434
2435 if (rc) {
2436 list_for_each_entry_safe(page, tpage, list, lru) {
2437 list_del(&page->lru);
2438 put_page(page);
2439 }
2440 }
2441 return rc;
2442}
2443
2444static void
2445cifs_uncached_readdata_release(struct kref *refcount)
2446{
2447 struct page *page, *tpage;
2448 struct cifs_readdata *rdata = container_of(refcount,
2449 struct cifs_readdata, refcount);
2450
2451 list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
2452 list_del(&page->lru);
2453 put_page(page);
2454 }
2455 cifs_readdata_release(refcount);
2456}
2457
2458static int
Jeff Layton2a1bb132012-05-16 07:13:17 -04002459cifs_retry_async_readv(struct cifs_readdata *rdata)
2460{
2461 int rc;
2462
2463 do {
2464 if (rdata->cfile->invalidHandle) {
2465 rc = cifs_reopen_file(rdata->cfile, true);
2466 if (rc != 0)
2467 continue;
2468 }
2469 rc = cifs_async_readv(rdata);
2470 } while (rc == -EAGAIN);
2471
2472 return rc;
2473}
2474
Jeff Layton1c892542012-05-16 07:13:17 -04002475/**
2476 * cifs_readdata_to_iov - copy data from pages in response to an iovec
2477 * @rdata: the readdata response with list of pages holding data
2478 * @iov: vector in which we should copy the data
2479 * @nr_segs: number of segments in vector
2480 * @offset: offset into file of the first iovec
2481 * @copied: used to return the amount of data copied to the iov
2482 *
2483 * This function copies data from a list of pages in a readdata response into
2484 * an array of iovecs. It will first calculate where the data should go
2485 * based on the info in the readdata and then copy the data into that spot.
2486 */
2487static ssize_t
2488cifs_readdata_to_iov(struct cifs_readdata *rdata, const struct iovec *iov,
2489 unsigned long nr_segs, loff_t offset, ssize_t *copied)
2490{
2491 int rc = 0;
2492 struct iov_iter ii;
2493 size_t pos = rdata->offset - offset;
2494 struct page *page, *tpage;
2495 ssize_t remaining = rdata->bytes;
2496 unsigned char *pdata;
2497
2498 /* set up iov_iter and advance to the correct offset */
2499 iov_iter_init(&ii, iov, nr_segs, iov_length(iov, nr_segs), 0);
2500 iov_iter_advance(&ii, pos);
2501
2502 *copied = 0;
2503 list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
2504 ssize_t copy;
2505
2506 /* copy a whole page or whatever's left */
2507 copy = min_t(ssize_t, remaining, PAGE_SIZE);
2508
2509 /* ...but limit it to whatever space is left in the iov */
2510 copy = min_t(ssize_t, copy, iov_iter_count(&ii));
2511
2512 /* go while there's data to be copied and no errors */
2513 if (copy && !rc) {
2514 pdata = kmap(page);
2515 rc = memcpy_toiovecend(ii.iov, pdata, ii.iov_offset,
2516 (int)copy);
2517 kunmap(page);
2518 if (!rc) {
2519 *copied += copy;
2520 remaining -= copy;
2521 iov_iter_advance(&ii, copy);
2522 }
2523 }
2524
2525 list_del(&page->lru);
2526 put_page(page);
2527 }
2528
2529 return rc;
2530}
2531
2532static void
2533cifs_uncached_readv_complete(struct work_struct *work)
2534{
2535 struct cifs_readdata *rdata = container_of(work,
2536 struct cifs_readdata, work);
2537
2538 /* if the result is non-zero then the pages weren't kmapped */
2539 if (rdata->result == 0) {
2540 struct page *page;
2541
2542 list_for_each_entry(page, &rdata->pages, lru)
2543 kunmap(page);
2544 }
2545
2546 complete(&rdata->done);
2547 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
2548}
2549
2550static int
2551cifs_uncached_read_marshal_iov(struct cifs_readdata *rdata,
2552 unsigned int remaining)
2553{
2554 int len = 0;
2555 struct page *page, *tpage;
2556
2557 rdata->nr_iov = 1;
2558 list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
2559 if (remaining >= PAGE_SIZE) {
2560 /* enough data to fill the page */
2561 rdata->iov[rdata->nr_iov].iov_base = kmap(page);
2562 rdata->iov[rdata->nr_iov].iov_len = PAGE_SIZE;
2563 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
2564 rdata->nr_iov, page->index,
2565 rdata->iov[rdata->nr_iov].iov_base,
2566 rdata->iov[rdata->nr_iov].iov_len);
2567 ++rdata->nr_iov;
2568 len += PAGE_SIZE;
2569 remaining -= PAGE_SIZE;
2570 } else if (remaining > 0) {
2571 /* enough for partial page, fill and zero the rest */
2572 rdata->iov[rdata->nr_iov].iov_base = kmap(page);
2573 rdata->iov[rdata->nr_iov].iov_len = remaining;
2574 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
2575 rdata->nr_iov, page->index,
2576 rdata->iov[rdata->nr_iov].iov_base,
2577 rdata->iov[rdata->nr_iov].iov_len);
2578 memset(rdata->iov[rdata->nr_iov].iov_base + remaining,
2579 '\0', PAGE_SIZE - remaining);
2580 ++rdata->nr_iov;
2581 len += remaining;
2582 remaining = 0;
2583 } else {
2584 /* no need to hold page hostage */
2585 list_del(&page->lru);
2586 put_page(page);
2587 }
2588 }
2589
2590 return len;
2591}
2592
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002593static ssize_t
2594cifs_iovec_read(struct file *file, const struct iovec *iov,
2595 unsigned long nr_segs, loff_t *poffset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002596{
Jeff Layton1c892542012-05-16 07:13:17 -04002597 ssize_t rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002598 size_t len, cur_len;
Jeff Layton1c892542012-05-16 07:13:17 -04002599 ssize_t total_read = 0;
2600 loff_t offset = *poffset;
2601 unsigned int npages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002602 struct cifs_sb_info *cifs_sb;
Jeff Layton1c892542012-05-16 07:13:17 -04002603 struct cifs_tcon *tcon;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002604 struct cifsFileInfo *open_file;
Jeff Layton1c892542012-05-16 07:13:17 -04002605 struct cifs_readdata *rdata, *tmp;
2606 struct list_head rdata_list;
2607 pid_t pid;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002608
2609 if (!nr_segs)
2610 return 0;
2611
2612 len = iov_length(iov, nr_segs);
2613 if (!len)
2614 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002615
Jeff Layton1c892542012-05-16 07:13:17 -04002616 INIT_LIST_HEAD(&rdata_list);
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002617 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Joe Perchesc21dfb62010-07-12 13:50:14 -07002618 open_file = file->private_data;
Jeff Layton1c892542012-05-16 07:13:17 -04002619 tcon = tlink_tcon(open_file->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002620
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002621 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2622 pid = open_file->pid;
2623 else
2624 pid = current->tgid;
2625
Steve Frenchad7a2922008-02-07 23:25:02 +00002626 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesb6b38f72010-04-21 03:50:45 +00002627 cFYI(1, "attempting read on write only file instance");
Steve Frenchad7a2922008-02-07 23:25:02 +00002628
Jeff Layton1c892542012-05-16 07:13:17 -04002629 do {
2630 cur_len = min_t(const size_t, len - total_read, cifs_sb->rsize);
2631 npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002632
Jeff Layton1c892542012-05-16 07:13:17 -04002633 /* allocate a readdata struct */
2634 rdata = cifs_readdata_alloc(npages,
2635 cifs_uncached_readv_complete);
2636 if (!rdata) {
2637 rc = -ENOMEM;
2638 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002639 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002640
Jeff Layton1c892542012-05-16 07:13:17 -04002641 rc = cifs_read_allocate_pages(&rdata->pages, npages);
2642 if (rc)
2643 goto error;
2644
2645 rdata->cfile = cifsFileInfo_get(open_file);
2646 rdata->offset = offset;
2647 rdata->bytes = cur_len;
2648 rdata->pid = pid;
2649 rdata->marshal_iov = cifs_uncached_read_marshal_iov;
2650
2651 rc = cifs_retry_async_readv(rdata);
2652error:
2653 if (rc) {
2654 kref_put(&rdata->refcount,
2655 cifs_uncached_readdata_release);
2656 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002657 }
Jeff Layton1c892542012-05-16 07:13:17 -04002658
2659 list_add_tail(&rdata->list, &rdata_list);
2660 offset += cur_len;
2661 len -= cur_len;
2662 } while (len > 0);
2663
2664 /* if at least one read request send succeeded, then reset rc */
2665 if (!list_empty(&rdata_list))
2666 rc = 0;
2667
2668 /* the loop below should proceed in the order of increasing offsets */
2669restart_loop:
2670 list_for_each_entry_safe(rdata, tmp, &rdata_list, list) {
2671 if (!rc) {
2672 ssize_t copied;
2673
2674 /* FIXME: freezable sleep too? */
2675 rc = wait_for_completion_killable(&rdata->done);
2676 if (rc)
2677 rc = -EINTR;
2678 else if (rdata->result)
2679 rc = rdata->result;
2680 else {
2681 rc = cifs_readdata_to_iov(rdata, iov,
2682 nr_segs, *poffset,
2683 &copied);
2684 total_read += copied;
2685 }
2686
2687 /* resend call if it's a retryable error */
2688 if (rc == -EAGAIN) {
2689 rc = cifs_retry_async_readv(rdata);
2690 goto restart_loop;
2691 }
2692 }
2693 list_del_init(&rdata->list);
2694 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002695 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002696
Jeff Layton1c892542012-05-16 07:13:17 -04002697 cifs_stats_bytes_read(tcon, total_read);
2698 *poffset += total_read;
2699
2700 return total_read ? total_read : rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002701}
2702
Pavel Shilovsky0b81c1c2011-03-10 10:11:05 +03002703ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov,
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002704 unsigned long nr_segs, loff_t pos)
2705{
2706 ssize_t read;
2707
2708 read = cifs_iovec_read(iocb->ki_filp, iov, nr_segs, &pos);
2709 if (read > 0)
2710 iocb->ki_pos = pos;
2711
2712 return read;
2713}
2714
2715ssize_t cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov,
2716 unsigned long nr_segs, loff_t pos)
2717{
2718 struct inode *inode;
2719
2720 inode = iocb->ki_filp->f_path.dentry->d_inode;
2721
2722 if (CIFS_I(inode)->clientCanCacheRead)
2723 return generic_file_aio_read(iocb, iov, nr_segs, pos);
2724
2725 /*
2726 * In strict cache mode we need to read from the server all the time
2727 * if we don't have level II oplock because the server can delay mtime
2728 * change - so we can't make a decision about inode invalidating.
2729 * And we can also fail with pagereading if there are mandatory locks
2730 * on pages affected by this read but not on the region from pos to
2731 * pos+len-1.
2732 */
2733
2734 return cifs_user_readv(iocb, iov, nr_segs, pos);
2735}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002736
2737static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002738 loff_t *poffset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002739{
2740 int rc = -EACCES;
2741 unsigned int bytes_read = 0;
2742 unsigned int total_read;
2743 unsigned int current_read_size;
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002744 unsigned int rsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002745 struct cifs_sb_info *cifs_sb;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002746 struct cifs_tcon *tcon;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002747 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002748 char *current_offset;
2749 struct cifsFileInfo *open_file;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002750 struct cifs_io_parms io_parms;
Steve Frenchec637e32005-12-12 20:53:18 -08002751 int buf_type = CIFS_NO_BUFFER;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002752 __u32 pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002753
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002754 xid = get_xid();
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002755 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002756
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002757 /* FIXME: set up handlers for larger reads and/or convert to async */
2758 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
2759
Linus Torvalds1da177e2005-04-16 15:20:36 -07002760 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05302761 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002762 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05302763 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002764 }
Joe Perchesc21dfb62010-07-12 13:50:14 -07002765 open_file = file->private_data;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002766 tcon = tlink_tcon(open_file->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002767
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002768 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2769 pid = open_file->pid;
2770 else
2771 pid = current->tgid;
2772
Linus Torvalds1da177e2005-04-16 15:20:36 -07002773 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesb6b38f72010-04-21 03:50:45 +00002774 cFYI(1, "attempting read on write only file instance");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002775
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002776 for (total_read = 0, current_offset = read_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002777 read_size > total_read;
2778 total_read += bytes_read, current_offset += bytes_read) {
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002779 current_read_size = min_t(uint, read_size - total_read, rsize);
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002780 /*
2781 * For windows me and 9x we do not want to request more than it
2782 * negotiated since it will refuse the read then.
2783 */
2784 if ((tcon->ses) && !(tcon->ses->capabilities &
2785 tcon->ses->server->vals->cap_large_files)) {
Dan Carpenter7748dd62011-10-18 12:41:35 +03002786 current_read_size = min_t(uint, current_read_size,
Jeff Laytonc974bef2011-10-11 06:41:32 -04002787 CIFSMaxBufSize);
Steve Frenchf9f5c8172005-09-15 23:06:38 -07002788 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002789 rc = -EAGAIN;
2790 while (rc == -EAGAIN) {
Steve Frenchcdff08e2010-10-21 22:46:14 +00002791 if (open_file->invalidHandle) {
Jeff Layton15886172010-10-15 15:33:59 -04002792 rc = cifs_reopen_file(open_file, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002793 if (rc != 0)
2794 break;
2795 }
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002796 io_parms.netfid = open_file->netfid;
2797 io_parms.pid = pid;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002798 io_parms.tcon = tcon;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002799 io_parms.offset = *poffset;
2800 io_parms.length = current_read_size;
2801 rc = CIFSSMBRead(xid, &io_parms, &bytes_read,
2802 &current_offset, &buf_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002803 }
2804 if (rc || (bytes_read == 0)) {
2805 if (total_read) {
2806 break;
2807 } else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002808 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002809 return rc;
2810 }
2811 } else {
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002812 cifs_stats_bytes_read(tcon, total_read);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002813 *poffset += bytes_read;
2814 }
2815 }
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002816 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002817 return total_read;
2818}
2819
Jeff Laytonca83ce32011-04-12 09:13:44 -04002820/*
2821 * If the page is mmap'ed into a process' page tables, then we need to make
2822 * sure that it doesn't change while being written back.
2823 */
2824static int
2825cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
2826{
2827 struct page *page = vmf->page;
2828
2829 lock_page(page);
2830 return VM_FAULT_LOCKED;
2831}
2832
2833static struct vm_operations_struct cifs_file_vm_ops = {
2834 .fault = filemap_fault,
2835 .page_mkwrite = cifs_page_mkwrite,
2836};
2837
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03002838int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
2839{
2840 int rc, xid;
2841 struct inode *inode = file->f_path.dentry->d_inode;
2842
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002843 xid = get_xid();
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03002844
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002845 if (!CIFS_I(inode)->clientCanCacheRead) {
2846 rc = cifs_invalidate_mapping(inode);
2847 if (rc)
2848 return rc;
2849 }
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03002850
2851 rc = generic_file_mmap(file, vma);
Jeff Laytonca83ce32011-04-12 09:13:44 -04002852 if (rc == 0)
2853 vma->vm_ops = &cifs_file_vm_ops;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002854 free_xid(xid);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03002855 return rc;
2856}
2857
Linus Torvalds1da177e2005-04-16 15:20:36 -07002858int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
2859{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002860 int rc, xid;
2861
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002862 xid = get_xid();
Jeff Laytonabab0952010-02-12 07:44:18 -05002863 rc = cifs_revalidate_file(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002864 if (rc) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00002865 cFYI(1, "Validation prior to mmap failed, error=%d", rc);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002866 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002867 return rc;
2868 }
2869 rc = generic_file_mmap(file, vma);
Jeff Laytonca83ce32011-04-12 09:13:44 -04002870 if (rc == 0)
2871 vma->vm_ops = &cifs_file_vm_ops;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002872 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002873 return rc;
2874}
2875
Jeff Layton0471ca32012-05-16 07:13:16 -04002876static void
2877cifs_readv_complete(struct work_struct *work)
2878{
2879 struct cifs_readdata *rdata = container_of(work,
2880 struct cifs_readdata, work);
2881 struct page *page, *tpage;
2882
2883 list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
2884 list_del(&page->lru);
2885 lru_cache_add_file(page);
2886
2887 if (rdata->result == 0) {
2888 kunmap(page);
2889 flush_dcache_page(page);
2890 SetPageUptodate(page);
2891 }
2892
2893 unlock_page(page);
2894
2895 if (rdata->result == 0)
2896 cifs_readpage_to_fscache(rdata->mapping->host, page);
2897
2898 page_cache_release(page);
2899 }
Jeff Layton6993f742012-05-16 07:13:17 -04002900 kref_put(&rdata->refcount, cifs_readdata_release);
Jeff Layton0471ca32012-05-16 07:13:16 -04002901}
2902
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04002903static int
2904cifs_readpages_marshal_iov(struct cifs_readdata *rdata, unsigned int remaining)
2905{
2906 int len = 0;
2907 struct page *page, *tpage;
2908 u64 eof;
2909 pgoff_t eof_index;
2910
2911 /* determine the eof that the server (probably) has */
2912 eof = CIFS_I(rdata->mapping->host)->server_eof;
2913 eof_index = eof ? (eof - 1) >> PAGE_CACHE_SHIFT : 0;
2914 cFYI(1, "eof=%llu eof_index=%lu", eof, eof_index);
2915
2916 rdata->nr_iov = 1;
2917 list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
2918 if (remaining >= PAGE_CACHE_SIZE) {
2919 /* enough data to fill the page */
2920 rdata->iov[rdata->nr_iov].iov_base = kmap(page);
2921 rdata->iov[rdata->nr_iov].iov_len = PAGE_CACHE_SIZE;
2922 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
2923 rdata->nr_iov, page->index,
2924 rdata->iov[rdata->nr_iov].iov_base,
2925 rdata->iov[rdata->nr_iov].iov_len);
2926 ++rdata->nr_iov;
2927 len += PAGE_CACHE_SIZE;
2928 remaining -= PAGE_CACHE_SIZE;
2929 } else if (remaining > 0) {
2930 /* enough for partial page, fill and zero the rest */
2931 rdata->iov[rdata->nr_iov].iov_base = kmap(page);
2932 rdata->iov[rdata->nr_iov].iov_len = remaining;
2933 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
2934 rdata->nr_iov, page->index,
2935 rdata->iov[rdata->nr_iov].iov_base,
2936 rdata->iov[rdata->nr_iov].iov_len);
2937 memset(rdata->iov[rdata->nr_iov].iov_base + remaining,
2938 '\0', PAGE_CACHE_SIZE - remaining);
2939 ++rdata->nr_iov;
2940 len += remaining;
2941 remaining = 0;
2942 } else if (page->index > eof_index) {
2943 /*
2944 * The VFS will not try to do readahead past the
2945 * i_size, but it's possible that we have outstanding
2946 * writes with gaps in the middle and the i_size hasn't
2947 * caught up yet. Populate those with zeroed out pages
2948 * to prevent the VFS from repeatedly attempting to
2949 * fill them until the writes are flushed.
2950 */
2951 zero_user(page, 0, PAGE_CACHE_SIZE);
2952 list_del(&page->lru);
2953 lru_cache_add_file(page);
2954 flush_dcache_page(page);
2955 SetPageUptodate(page);
2956 unlock_page(page);
2957 page_cache_release(page);
2958 } else {
2959 /* no need to hold page hostage */
2960 list_del(&page->lru);
2961 lru_cache_add_file(page);
2962 unlock_page(page);
2963 page_cache_release(page);
2964 }
2965 }
2966
2967 return len;
2968}
2969
Linus Torvalds1da177e2005-04-16 15:20:36 -07002970static int cifs_readpages(struct file *file, struct address_space *mapping,
2971 struct list_head *page_list, unsigned num_pages)
2972{
Jeff Layton690c5e32011-10-19 15:30:16 -04002973 int rc;
2974 struct list_head tmplist;
2975 struct cifsFileInfo *open_file = file->private_data;
2976 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
2977 unsigned int rsize = cifs_sb->rsize;
2978 pid_t pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002979
Jeff Layton690c5e32011-10-19 15:30:16 -04002980 /*
2981 * Give up immediately if rsize is too small to read an entire page.
2982 * The VFS will fall back to readpage. We should never reach this
2983 * point however since we set ra_pages to 0 when the rsize is smaller
2984 * than a cache page.
2985 */
2986 if (unlikely(rsize < PAGE_CACHE_SIZE))
2987 return 0;
Steve Frenchbfa0d752005-08-31 21:50:37 -07002988
Suresh Jayaraman56698232010-07-05 18:13:25 +05302989 /*
2990 * Reads as many pages as possible from fscache. Returns -ENOBUFS
2991 * immediately if the cookie is negative
2992 */
2993 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
2994 &num_pages);
2995 if (rc == 0)
Jeff Layton690c5e32011-10-19 15:30:16 -04002996 return rc;
Suresh Jayaraman56698232010-07-05 18:13:25 +05302997
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002998 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2999 pid = open_file->pid;
3000 else
3001 pid = current->tgid;
3002
Jeff Layton690c5e32011-10-19 15:30:16 -04003003 rc = 0;
3004 INIT_LIST_HEAD(&tmplist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003005
Jeff Layton690c5e32011-10-19 15:30:16 -04003006 cFYI(1, "%s: file=%p mapping=%p num_pages=%u", __func__, file,
3007 mapping, num_pages);
3008
3009 /*
3010 * Start with the page at end of list and move it to private
3011 * list. Do the same with any following pages until we hit
3012 * the rsize limit, hit an index discontinuity, or run out of
3013 * pages. Issue the async read and then start the loop again
3014 * until the list is empty.
3015 *
3016 * Note that list order is important. The page_list is in
3017 * the order of declining indexes. When we put the pages in
3018 * the rdata->pages, then we want them in increasing order.
3019 */
3020 while (!list_empty(page_list)) {
3021 unsigned int bytes = PAGE_CACHE_SIZE;
3022 unsigned int expected_index;
3023 unsigned int nr_pages = 1;
3024 loff_t offset;
3025 struct page *page, *tpage;
3026 struct cifs_readdata *rdata;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003027
3028 page = list_entry(page_list->prev, struct page, lru);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003029
Jeff Layton690c5e32011-10-19 15:30:16 -04003030 /*
3031 * Lock the page and put it in the cache. Since no one else
3032 * should have access to this page, we're safe to simply set
3033 * PG_locked without checking it first.
3034 */
3035 __set_page_locked(page);
3036 rc = add_to_page_cache_locked(page, mapping,
3037 page->index, GFP_KERNEL);
3038
3039 /* give up if we can't stick it in the cache */
3040 if (rc) {
3041 __clear_page_locked(page);
3042 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003043 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003044
Jeff Layton690c5e32011-10-19 15:30:16 -04003045 /* move first page to the tmplist */
3046 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
3047 list_move_tail(&page->lru, &tmplist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003048
Jeff Layton690c5e32011-10-19 15:30:16 -04003049 /* now try and add more pages onto the request */
3050 expected_index = page->index + 1;
3051 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
3052 /* discontinuity ? */
3053 if (page->index != expected_index)
3054 break;
3055
3056 /* would this page push the read over the rsize? */
3057 if (bytes + PAGE_CACHE_SIZE > rsize)
3058 break;
3059
3060 __set_page_locked(page);
3061 if (add_to_page_cache_locked(page, mapping,
3062 page->index, GFP_KERNEL)) {
3063 __clear_page_locked(page);
3064 break;
3065 }
3066 list_move_tail(&page->lru, &tmplist);
3067 bytes += PAGE_CACHE_SIZE;
3068 expected_index++;
3069 nr_pages++;
3070 }
3071
Jeff Layton0471ca32012-05-16 07:13:16 -04003072 rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
Jeff Layton690c5e32011-10-19 15:30:16 -04003073 if (!rdata) {
3074 /* best to give up if we're out of mem */
3075 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3076 list_del(&page->lru);
3077 lru_cache_add_file(page);
3078 unlock_page(page);
3079 page_cache_release(page);
3080 }
3081 rc = -ENOMEM;
3082 break;
3083 }
3084
3085 spin_lock(&cifs_file_list_lock);
Jeff Layton690c5e32011-10-19 15:30:16 -04003086 spin_unlock(&cifs_file_list_lock);
Jeff Layton6993f742012-05-16 07:13:17 -04003087 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Layton690c5e32011-10-19 15:30:16 -04003088 rdata->mapping = mapping;
3089 rdata->offset = offset;
3090 rdata->bytes = bytes;
3091 rdata->pid = pid;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003092 rdata->marshal_iov = cifs_readpages_marshal_iov;
Jeff Layton690c5e32011-10-19 15:30:16 -04003093 list_splice_init(&tmplist, &rdata->pages);
3094
Jeff Layton2a1bb132012-05-16 07:13:17 -04003095 rc = cifs_retry_async_readv(rdata);
Jeff Layton690c5e32011-10-19 15:30:16 -04003096 if (rc != 0) {
3097 list_for_each_entry_safe(page, tpage, &rdata->pages,
3098 lru) {
3099 list_del(&page->lru);
3100 lru_cache_add_file(page);
3101 unlock_page(page);
3102 page_cache_release(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003103 }
Jeff Layton6993f742012-05-16 07:13:17 -04003104 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003105 break;
3106 }
Jeff Layton6993f742012-05-16 07:13:17 -04003107
3108 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003109 }
3110
Linus Torvalds1da177e2005-04-16 15:20:36 -07003111 return rc;
3112}
3113
3114static int cifs_readpage_worker(struct file *file, struct page *page,
3115 loff_t *poffset)
3116{
3117 char *read_data;
3118 int rc;
3119
Suresh Jayaraman56698232010-07-05 18:13:25 +05303120 /* Is the page cached? */
3121 rc = cifs_readpage_from_fscache(file->f_path.dentry->d_inode, page);
3122 if (rc == 0)
3123 goto read_complete;
3124
Linus Torvalds1da177e2005-04-16 15:20:36 -07003125 page_cache_get(page);
3126 read_data = kmap(page);
3127 /* for reads over a certain size could initiate async read ahead */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003128
Linus Torvalds1da177e2005-04-16 15:20:36 -07003129 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003130
Linus Torvalds1da177e2005-04-16 15:20:36 -07003131 if (rc < 0)
3132 goto io_error;
3133 else
Joe Perchesb6b38f72010-04-21 03:50:45 +00003134 cFYI(1, "Bytes read %d", rc);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003135
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08003136 file->f_path.dentry->d_inode->i_atime =
3137 current_fs_time(file->f_path.dentry->d_inode->i_sb);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003138
Linus Torvalds1da177e2005-04-16 15:20:36 -07003139 if (PAGE_CACHE_SIZE > rc)
3140 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
3141
3142 flush_dcache_page(page);
3143 SetPageUptodate(page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05303144
3145 /* send this page to the cache */
3146 cifs_readpage_to_fscache(file->f_path.dentry->d_inode, page);
3147
Linus Torvalds1da177e2005-04-16 15:20:36 -07003148 rc = 0;
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003149
Linus Torvalds1da177e2005-04-16 15:20:36 -07003150io_error:
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003151 kunmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003152 page_cache_release(page);
Suresh Jayaraman56698232010-07-05 18:13:25 +05303153
3154read_complete:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003155 return rc;
3156}
3157
3158static int cifs_readpage(struct file *file, struct page *page)
3159{
3160 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
3161 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003162 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003163
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003164 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003165
3166 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303167 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003168 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303169 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003170 }
3171
Jeff Laytonac3aa2f2012-07-23 13:14:28 -04003172 cFYI(1, "readpage %p at offset %d 0x%x",
Joe Perchesb6b38f72010-04-21 03:50:45 +00003173 page, (int)offset, (int)offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003174
3175 rc = cifs_readpage_worker(file, page, &offset);
3176
3177 unlock_page(page);
3178
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003179 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003180 return rc;
3181}
3182
Steve Frencha403a0a2007-07-26 15:54:16 +00003183static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3184{
3185 struct cifsFileInfo *open_file;
3186
Jeff Layton44772882010-10-15 15:34:03 -04003187 spin_lock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003188 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton2e396b82010-10-15 15:34:01 -04003189 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Jeff Layton44772882010-10-15 15:34:03 -04003190 spin_unlock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003191 return 1;
3192 }
3193 }
Jeff Layton44772882010-10-15 15:34:03 -04003194 spin_unlock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003195 return 0;
3196}
3197
Linus Torvalds1da177e2005-04-16 15:20:36 -07003198/* We do not want to update the file size from server for inodes
3199 open for write - to avoid races with writepage extending
3200 the file - in the future we could consider allowing
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003201 refreshing the inode only on increases in the file size
Linus Torvalds1da177e2005-04-16 15:20:36 -07003202 but this is tricky to do without racing with writebehind
3203 page caching in the current Linux kernel design */
Steve French4b18f2a2008-04-29 00:06:05 +00003204bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003205{
Steve Frencha403a0a2007-07-26 15:54:16 +00003206 if (!cifsInode)
Steve French4b18f2a2008-04-29 00:06:05 +00003207 return true;
Steve French23e7dd72005-10-20 13:44:56 -07003208
Steve Frencha403a0a2007-07-26 15:54:16 +00003209 if (is_inode_writable(cifsInode)) {
3210 /* This inode is open for write at least once */
Steve Frenchc32a0b62006-01-12 14:41:28 -08003211 struct cifs_sb_info *cifs_sb;
3212
Steve Frenchc32a0b62006-01-12 14:41:28 -08003213 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
Steve Frenchad7a2922008-02-07 23:25:02 +00003214 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003215 /* since no page cache to corrupt on directio
Steve Frenchc32a0b62006-01-12 14:41:28 -08003216 we can change size safely */
Steve French4b18f2a2008-04-29 00:06:05 +00003217 return true;
Steve Frenchc32a0b62006-01-12 14:41:28 -08003218 }
3219
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003220 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
Steve French4b18f2a2008-04-29 00:06:05 +00003221 return true;
Steve French7ba52632007-02-08 18:14:13 +00003222
Steve French4b18f2a2008-04-29 00:06:05 +00003223 return false;
Steve French23e7dd72005-10-20 13:44:56 -07003224 } else
Steve French4b18f2a2008-04-29 00:06:05 +00003225 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003226}
3227
Nick Piggind9414772008-09-24 11:32:59 -04003228static int cifs_write_begin(struct file *file, struct address_space *mapping,
3229 loff_t pos, unsigned len, unsigned flags,
3230 struct page **pagep, void **fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003231{
Nick Piggind9414772008-09-24 11:32:59 -04003232 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
3233 loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003234 loff_t page_start = pos & PAGE_MASK;
3235 loff_t i_size;
3236 struct page *page;
3237 int rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003238
Joe Perchesb6b38f72010-04-21 03:50:45 +00003239 cFYI(1, "write_begin from %lld len %d", (long long)pos, len);
Nick Piggind9414772008-09-24 11:32:59 -04003240
Nick Piggin54566b22009-01-04 12:00:53 -08003241 page = grab_cache_page_write_begin(mapping, index, flags);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003242 if (!page) {
3243 rc = -ENOMEM;
3244 goto out;
3245 }
Nick Piggind9414772008-09-24 11:32:59 -04003246
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003247 if (PageUptodate(page))
3248 goto out;
Steve French8a236262007-03-06 00:31:00 +00003249
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003250 /*
3251 * If we write a full page it will be up to date, no need to read from
3252 * the server. If the write is short, we'll end up doing a sync write
3253 * instead.
3254 */
3255 if (len == PAGE_CACHE_SIZE)
3256 goto out;
3257
3258 /*
3259 * optimize away the read when we have an oplock, and we're not
3260 * expecting to use any of the data we'd be reading in. That
3261 * is, when the page lies beyond the EOF, or straddles the EOF
3262 * and the write will cover all of the existing data.
3263 */
3264 if (CIFS_I(mapping->host)->clientCanCacheRead) {
3265 i_size = i_size_read(mapping->host);
3266 if (page_start >= i_size ||
3267 (offset == 0 && (pos + len) >= i_size)) {
3268 zero_user_segments(page, 0, offset,
3269 offset + len,
3270 PAGE_CACHE_SIZE);
3271 /*
3272 * PageChecked means that the parts of the page
3273 * to which we're not writing are considered up
3274 * to date. Once the data is copied to the
3275 * page, it can be set uptodate.
3276 */
3277 SetPageChecked(page);
3278 goto out;
3279 }
3280 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003281
Nick Piggind9414772008-09-24 11:32:59 -04003282 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003283 /*
3284 * might as well read a page, it is fast enough. If we get
3285 * an error, we don't need to return it. cifs_write_end will
3286 * do a sync write instead since PG_uptodate isn't set.
3287 */
3288 cifs_readpage_worker(file, page, &page_start);
Steve French8a236262007-03-06 00:31:00 +00003289 } else {
3290 /* we could try using another file handle if there is one -
3291 but how would we lock it to prevent close of that handle
3292 racing with this read? In any case
Nick Piggind9414772008-09-24 11:32:59 -04003293 this will be written out by write_end so is fine */
Steve French8a236262007-03-06 00:31:00 +00003294 }
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003295out:
3296 *pagep = page;
3297 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003298}
3299
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303300static int cifs_release_page(struct page *page, gfp_t gfp)
3301{
3302 if (PagePrivate(page))
3303 return 0;
3304
3305 return cifs_fscache_release_page(page, gfp);
3306}
3307
3308static void cifs_invalidate_page(struct page *page, unsigned long offset)
3309{
3310 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
3311
3312 if (offset == 0)
3313 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
3314}
3315
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003316static int cifs_launder_page(struct page *page)
3317{
3318 int rc = 0;
3319 loff_t range_start = page_offset(page);
3320 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
3321 struct writeback_control wbc = {
3322 .sync_mode = WB_SYNC_ALL,
3323 .nr_to_write = 0,
3324 .range_start = range_start,
3325 .range_end = range_end,
3326 };
3327
3328 cFYI(1, "Launder page: %p", page);
3329
3330 if (clear_page_dirty_for_io(page))
3331 rc = cifs_writepage_locked(page, &wbc);
3332
3333 cifs_fscache_invalidate_page(page, page->mapping->host);
3334 return rc;
3335}
3336
Tejun Heo9b646972010-07-20 22:09:02 +02003337void cifs_oplock_break(struct work_struct *work)
Jeff Layton3bc303c2009-09-21 06:47:50 -04003338{
3339 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3340 oplock_break);
Jeff Laytona5e18bc2010-10-11 15:07:18 -04003341 struct inode *inode = cfile->dentry->d_inode;
Jeff Layton3bc303c2009-09-21 06:47:50 -04003342 struct cifsInodeInfo *cinode = CIFS_I(inode);
Jeff Laytoneb4b7562010-10-22 14:52:29 -04003343 int rc = 0;
Jeff Layton3bc303c2009-09-21 06:47:50 -04003344
3345 if (inode && S_ISREG(inode->i_mode)) {
Steve Frenchd54ff732010-04-27 04:38:15 +00003346 if (cinode->clientCanCacheRead)
Al Viro8737c932009-12-24 06:47:55 -05003347 break_lease(inode, O_RDONLY);
Steve Frenchd54ff732010-04-27 04:38:15 +00003348 else
Al Viro8737c932009-12-24 06:47:55 -05003349 break_lease(inode, O_WRONLY);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003350 rc = filemap_fdatawrite(inode->i_mapping);
3351 if (cinode->clientCanCacheRead == 0) {
Jeff Laytoneb4b7562010-10-22 14:52:29 -04003352 rc = filemap_fdatawait(inode->i_mapping);
3353 mapping_set_error(inode->i_mapping, rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003354 invalidate_remote_inode(inode);
3355 }
Joe Perchesb6b38f72010-04-21 03:50:45 +00003356 cFYI(1, "Oplock flush inode %p rc %d", inode, rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003357 }
3358
Pavel Shilovsky85160e02011-10-22 15:33:29 +04003359 rc = cifs_push_locks(cfile);
3360 if (rc)
3361 cERROR(1, "Push locks rc = %d", rc);
3362
Jeff Layton3bc303c2009-09-21 06:47:50 -04003363 /*
3364 * releasing stale oplock after recent reconnect of smb session using
3365 * a now incorrect file handle is not a data integrity issue but do
3366 * not bother sending an oplock release if session to server still is
3367 * disconnected since oplock already released by the server
3368 */
Steve Frenchcdff08e2010-10-21 22:46:14 +00003369 if (!cfile->oplock_break_cancelled) {
Pavel Shilovsky03776f42010-08-17 11:26:00 +04003370 rc = CIFSSMBLock(0, tlink_tcon(cfile->tlink), cfile->netfid,
3371 current->tgid, 0, 0, 0, 0,
3372 LOCKING_ANDX_OPLOCK_RELEASE, false,
Pavel Shilovsky12fed002011-01-17 20:15:44 +03003373 cinode->clientCanCacheRead ? 1 : 0);
Joe Perchesb6b38f72010-04-21 03:50:45 +00003374 cFYI(1, "Oplock release rc = %d", rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003375 }
Jeff Layton3bc303c2009-09-21 06:47:50 -04003376}
3377
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07003378const struct address_space_operations cifs_addr_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003379 .readpage = cifs_readpage,
3380 .readpages = cifs_readpages,
3381 .writepage = cifs_writepage,
Steve French37c0eb42005-10-05 14:50:29 -07003382 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04003383 .write_begin = cifs_write_begin,
3384 .write_end = cifs_write_end,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003385 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303386 .releasepage = cifs_release_page,
3387 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003388 .launder_page = cifs_launder_page,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003389};
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003390
3391/*
3392 * cifs_readpages requires the server to support a buffer large enough to
3393 * contain the header plus one complete page of data. Otherwise, we need
3394 * to leave cifs_readpages out of the address space operations.
3395 */
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07003396const struct address_space_operations cifs_addr_ops_smallbuf = {
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003397 .readpage = cifs_readpage,
3398 .writepage = cifs_writepage,
3399 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04003400 .write_begin = cifs_write_begin,
3401 .write_end = cifs_write_end,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003402 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303403 .releasepage = cifs_release_page,
3404 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003405 .launder_page = cifs_launder_page,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003406};