blob: 712f2a4d0d49811d78dd749f1e7c78605d956681 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
Steve Frenchfb8c4b12007-07-10 01:16:18 +00005 *
Steve Frenchf19159d2010-04-21 04:12:10 +00006 * Copyright (C) International Business Machines Corp., 2002,2010
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Author(s): Steve French (sfrench@us.ibm.com)
Jeremy Allison7ee1af72006-08-02 21:56:33 +00008 * Jeremy Allison (jra@samba.org)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
Steve French37c0eb42005-10-05 14:50:29 -070025#include <linux/backing-dev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
Steve French37c0eb42005-10-05 14:50:29 -070030#include <linux/writeback.h>
Andrew Morton6f88cc22006-12-10 02:19:44 -080031#include <linux/task_io_accounting_ops.h>
Steve French23e7dd72005-10-20 13:44:56 -070032#include <linux/delay.h>
Jeff Layton3bc303c2009-09-21 06:47:50 -040033#include <linux/mount.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Jeff Layton690c5e32011-10-19 15:30:16 -040035#include <linux/swap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <asm/div64.h>
37#include "cifsfs.h"
38#include "cifspdu.h"
39#include "cifsglob.h"
40#include "cifsproto.h"
41#include "cifs_unicode.h"
42#include "cifs_debug.h"
43#include "cifs_fs_sb.h"
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +053044#include "fscache.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Linus Torvalds1da177e2005-04-16 15:20:36 -070046static inline int cifs_convert_flags(unsigned int flags)
47{
48 if ((flags & O_ACCMODE) == O_RDONLY)
49 return GENERIC_READ;
50 else if ((flags & O_ACCMODE) == O_WRONLY)
51 return GENERIC_WRITE;
52 else if ((flags & O_ACCMODE) == O_RDWR) {
53 /* GENERIC_ALL is too much permission to request
54 can cause unnecessary access denied on create */
55 /* return GENERIC_ALL; */
56 return (GENERIC_READ | GENERIC_WRITE);
57 }
58
Jeff Laytone10f7b52008-05-14 10:21:33 -070059 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
60 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
61 FILE_READ_DATA);
Steve French7fc8f4e2009-02-23 20:43:11 +000062}
Jeff Laytone10f7b52008-05-14 10:21:33 -070063
Jeff Layton608712f2010-10-15 15:33:56 -040064static u32 cifs_posix_convert_flags(unsigned int flags)
Steve French7fc8f4e2009-02-23 20:43:11 +000065{
Jeff Layton608712f2010-10-15 15:33:56 -040066 u32 posix_flags = 0;
Jeff Laytone10f7b52008-05-14 10:21:33 -070067
Steve French7fc8f4e2009-02-23 20:43:11 +000068 if ((flags & O_ACCMODE) == O_RDONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040069 posix_flags = SMB_O_RDONLY;
Steve French7fc8f4e2009-02-23 20:43:11 +000070 else if ((flags & O_ACCMODE) == O_WRONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040071 posix_flags = SMB_O_WRONLY;
72 else if ((flags & O_ACCMODE) == O_RDWR)
73 posix_flags = SMB_O_RDWR;
74
75 if (flags & O_CREAT)
76 posix_flags |= SMB_O_CREAT;
77 if (flags & O_EXCL)
78 posix_flags |= SMB_O_EXCL;
79 if (flags & O_TRUNC)
80 posix_flags |= SMB_O_TRUNC;
81 /* be safe and imply O_SYNC for O_DSYNC */
Christoph Hellwig6b2f3d12009-10-27 11:05:28 +010082 if (flags & O_DSYNC)
Jeff Layton608712f2010-10-15 15:33:56 -040083 posix_flags |= SMB_O_SYNC;
Steve French7fc8f4e2009-02-23 20:43:11 +000084 if (flags & O_DIRECTORY)
Jeff Layton608712f2010-10-15 15:33:56 -040085 posix_flags |= SMB_O_DIRECTORY;
Steve French7fc8f4e2009-02-23 20:43:11 +000086 if (flags & O_NOFOLLOW)
Jeff Layton608712f2010-10-15 15:33:56 -040087 posix_flags |= SMB_O_NOFOLLOW;
Steve French7fc8f4e2009-02-23 20:43:11 +000088 if (flags & O_DIRECT)
Jeff Layton608712f2010-10-15 15:33:56 -040089 posix_flags |= SMB_O_DIRECT;
Steve French7fc8f4e2009-02-23 20:43:11 +000090
91 return posix_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070092}
93
94static inline int cifs_get_disposition(unsigned int flags)
95{
96 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
97 return FILE_CREATE;
98 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
99 return FILE_OVERWRITE_IF;
100 else if ((flags & O_CREAT) == O_CREAT)
101 return FILE_OPEN_IF;
Steve French55aa2e02006-05-30 18:09:31 +0000102 else if ((flags & O_TRUNC) == O_TRUNC)
103 return FILE_OVERWRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104 else
105 return FILE_OPEN;
106}
107
Jeff Layton608712f2010-10-15 15:33:56 -0400108int cifs_posix_open(char *full_path, struct inode **pinode,
109 struct super_block *sb, int mode, unsigned int f_flags,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400110 __u32 *poplock, __u16 *pnetfid, unsigned int xid)
Jeff Layton608712f2010-10-15 15:33:56 -0400111{
112 int rc;
113 FILE_UNIX_BASIC_INFO *presp_data;
114 __u32 posix_flags = 0;
115 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
116 struct cifs_fattr fattr;
117 struct tcon_link *tlink;
Steve French96daf2b2011-05-27 04:34:02 +0000118 struct cifs_tcon *tcon;
Jeff Layton608712f2010-10-15 15:33:56 -0400119
120 cFYI(1, "posix open %s", full_path);
121
122 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
123 if (presp_data == NULL)
124 return -ENOMEM;
125
126 tlink = cifs_sb_tlink(cifs_sb);
127 if (IS_ERR(tlink)) {
128 rc = PTR_ERR(tlink);
129 goto posix_open_ret;
130 }
131
132 tcon = tlink_tcon(tlink);
133 mode &= ~current_umask();
134
135 posix_flags = cifs_posix_convert_flags(f_flags);
136 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
137 poplock, full_path, cifs_sb->local_nls,
138 cifs_sb->mnt_cifs_flags &
139 CIFS_MOUNT_MAP_SPECIAL_CHR);
140 cifs_put_tlink(tlink);
141
142 if (rc)
143 goto posix_open_ret;
144
145 if (presp_data->Type == cpu_to_le32(-1))
146 goto posix_open_ret; /* open ok, caller does qpathinfo */
147
148 if (!pinode)
149 goto posix_open_ret; /* caller does not need info */
150
151 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
152
153 /* get new inode and set it up */
154 if (*pinode == NULL) {
155 cifs_fill_uniqueid(sb, &fattr);
156 *pinode = cifs_iget(sb, &fattr);
157 if (!*pinode) {
158 rc = -ENOMEM;
159 goto posix_open_ret;
160 }
161 } else {
162 cifs_fattr_to_inode(*pinode, &fattr);
163 }
164
165posix_open_ret:
166 kfree(presp_data);
167 return rc;
168}
169
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300170static int
171cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
Steve French96daf2b2011-05-27 04:34:02 +0000172 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *poplock,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400173 __u16 *pnetfid, unsigned int xid)
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300174{
175 int rc;
176 int desiredAccess;
177 int disposition;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500178 int create_options = CREATE_NOT_DIR;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300179 FILE_ALL_INFO *buf;
180
181 desiredAccess = cifs_convert_flags(f_flags);
182
183/*********************************************************************
184 * open flag mapping table:
185 *
186 * POSIX Flag CIFS Disposition
187 * ---------- ----------------
188 * O_CREAT FILE_OPEN_IF
189 * O_CREAT | O_EXCL FILE_CREATE
190 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
191 * O_TRUNC FILE_OVERWRITE
192 * none of the above FILE_OPEN
193 *
194 * Note that there is not a direct match between disposition
195 * FILE_SUPERSEDE (ie create whether or not file exists although
196 * O_CREAT | O_TRUNC is similar but truncates the existing
197 * file rather than creating a new file as FILE_SUPERSEDE does
198 * (which uses the attributes / metadata passed in on open call)
199 *?
200 *? O_SYNC is a reasonable match to CIFS writethrough flag
201 *? and the read write flags match reasonably. O_LARGEFILE
202 *? is irrelevant because largefile support is always used
203 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
204 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
205 *********************************************************************/
206
207 disposition = cifs_get_disposition(f_flags);
208
209 /* BB pass O_SYNC flag through on file attributes .. BB */
210
211 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
212 if (!buf)
213 return -ENOMEM;
214
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500215 if (backup_cred(cifs_sb))
216 create_options |= CREATE_OPEN_BACKUP_INTENT;
217
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300218 if (tcon->ses->capabilities & CAP_NT_SMBS)
219 rc = CIFSSMBOpen(xid, tcon, full_path, disposition,
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500220 desiredAccess, create_options, pnetfid, poplock, buf,
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300221 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
222 & CIFS_MOUNT_MAP_SPECIAL_CHR);
223 else
224 rc = SMBLegacyOpen(xid, tcon, full_path, disposition,
225 desiredAccess, CREATE_NOT_DIR, pnetfid, poplock, buf,
226 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
227 & CIFS_MOUNT_MAP_SPECIAL_CHR);
228
229 if (rc)
230 goto out;
231
232 if (tcon->unix_ext)
233 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
234 xid);
235 else
236 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
237 xid, pnetfid);
238
239out:
240 kfree(buf);
241 return rc;
242}
243
Jeff Layton15ecb432010-10-15 15:34:02 -0400244struct cifsFileInfo *
245cifs_new_fileinfo(__u16 fileHandle, struct file *file,
246 struct tcon_link *tlink, __u32 oplock)
247{
248 struct dentry *dentry = file->f_path.dentry;
249 struct inode *inode = dentry->d_inode;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700250 struct cifsInodeInfo *cinode = CIFS_I(inode);
251 struct cifsFileInfo *cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400252
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700253 cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
254 if (cfile == NULL)
255 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400256
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700257 cfile->count = 1;
258 cfile->fid.netfid = fileHandle;
259 cfile->pid = current->tgid;
260 cfile->uid = current_fsuid();
261 cfile->dentry = dget(dentry);
262 cfile->f_flags = file->f_flags;
263 cfile->invalidHandle = false;
264 cfile->tlink = cifs_get_tlink(tlink);
265 mutex_init(&cfile->fh_mutex);
266 INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
267 INIT_LIST_HEAD(&cfile->llist);
Jeff Layton15ecb432010-10-15 15:34:02 -0400268
Jeff Layton44772882010-10-15 15:34:03 -0400269 spin_lock(&cifs_file_list_lock);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700270 list_add(&cfile->tlist, &(tlink_tcon(tlink)->openFileList));
Jeff Layton15ecb432010-10-15 15:34:02 -0400271 /* if readable file instance put first in list*/
272 if (file->f_mode & FMODE_READ)
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700273 list_add(&cfile->flist, &cinode->openFileList);
Jeff Layton15ecb432010-10-15 15:34:02 -0400274 else
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700275 list_add_tail(&cfile->flist, &cinode->openFileList);
Jeff Layton44772882010-10-15 15:34:03 -0400276 spin_unlock(&cifs_file_list_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400277
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700278 cifs_set_oplock_level(cinode, oplock);
279 cinode->can_cache_brlcks = cinode->clientCanCacheAll;
Jeff Layton15ecb432010-10-15 15:34:02 -0400280
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700281 file->private_data = cfile;
282 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400283}
284
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400285static void cifs_del_lock_waiters(struct cifsLockInfo *lock);
286
Jeff Layton764a1b12012-07-25 14:59:54 -0400287struct cifsFileInfo *
288cifsFileInfo_get(struct cifsFileInfo *cifs_file)
289{
290 spin_lock(&cifs_file_list_lock);
291 cifsFileInfo_get_locked(cifs_file);
292 spin_unlock(&cifs_file_list_lock);
293 return cifs_file;
294}
295
Steve Frenchcdff08e2010-10-21 22:46:14 +0000296/*
297 * Release a reference on the file private data. This may involve closing
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400298 * the filehandle out on the server. Must be called without holding
299 * cifs_file_list_lock.
Steve Frenchcdff08e2010-10-21 22:46:14 +0000300 */
Jeff Laytonb33879a2010-10-15 15:34:04 -0400301void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
302{
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300303 struct inode *inode = cifs_file->dentry->d_inode;
Steve French96daf2b2011-05-27 04:34:02 +0000304 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300305 struct cifsInodeInfo *cifsi = CIFS_I(inode);
Pavel Shilovsky4f8ba8a2010-11-21 22:36:12 +0300306 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000307 struct cifsLockInfo *li, *tmp;
308
309 spin_lock(&cifs_file_list_lock);
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400310 if (--cifs_file->count > 0) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000311 spin_unlock(&cifs_file_list_lock);
312 return;
Jeff Laytonb33879a2010-10-15 15:34:04 -0400313 }
Steve Frenchcdff08e2010-10-21 22:46:14 +0000314
315 /* remove it from the lists */
316 list_del(&cifs_file->flist);
317 list_del(&cifs_file->tlist);
318
319 if (list_empty(&cifsi->openFileList)) {
320 cFYI(1, "closing last open instance for inode %p",
321 cifs_file->dentry->d_inode);
Pavel Shilovsky4f8ba8a2010-11-21 22:36:12 +0300322
323 /* in strict cache mode we need invalidate mapping on the last
324 close because it may cause a error when we open this file
325 again and get at least level II oplock */
326 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
327 CIFS_I(inode)->invalid_mapping = true;
328
Pavel Shilovskyc6723622010-11-03 10:58:57 +0300329 cifs_set_oplock_level(cifsi, 0);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000330 }
331 spin_unlock(&cifs_file_list_lock);
332
Jeff Laytonad635942011-07-26 12:20:17 -0400333 cancel_work_sync(&cifs_file->oplock_break);
334
Steve Frenchcdff08e2010-10-21 22:46:14 +0000335 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400336 unsigned int xid;
337 int rc;
338 xid = get_xid();
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700339 rc = CIFSSMBClose(xid, tcon, cifs_file->fid.netfid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400340 free_xid(xid);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000341 }
342
343 /* Delete any outstanding lock records. We'll lose them when the file
344 * is closed anyway.
345 */
Pavel Shilovskyd59dad22011-09-22 09:53:59 +0400346 mutex_lock(&cifsi->lock_mutex);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300347 list_for_each_entry_safe(li, tmp, &cifs_file->llist, llist) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000348 list_del(&li->llist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400349 cifs_del_lock_waiters(li);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000350 kfree(li);
351 }
Pavel Shilovskyd59dad22011-09-22 09:53:59 +0400352 mutex_unlock(&cifsi->lock_mutex);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000353
354 cifs_put_tlink(cifs_file->tlink);
355 dput(cifs_file->dentry);
356 kfree(cifs_file);
Jeff Laytonb33879a2010-10-15 15:34:04 -0400357}
358
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359int cifs_open(struct inode *inode, struct file *file)
360{
361 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400362 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400363 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +0000365 struct cifs_tcon *tcon;
Jeff Layton7ffec372010-09-29 19:51:11 -0400366 struct tcon_link *tlink;
Jeff Layton6ca9f3b2010-06-16 13:40:16 -0400367 struct cifsFileInfo *pCifsFile = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368 char *full_path = NULL;
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300369 bool posix_open_ok = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370 __u16 netfid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400372 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373
374 cifs_sb = CIFS_SB(inode->i_sb);
Jeff Layton7ffec372010-09-29 19:51:11 -0400375 tlink = cifs_sb_tlink(cifs_sb);
376 if (IS_ERR(tlink)) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400377 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400378 return PTR_ERR(tlink);
379 }
380 tcon = tlink_tcon(tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -0800382 full_path = build_path_from_dentry(file->f_path.dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383 if (full_path == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530384 rc = -ENOMEM;
Jeff Layton232341b2010-08-05 13:58:38 -0400385 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386 }
387
Joe Perchesb6b38f72010-04-21 03:50:45 +0000388 cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
389 inode, file->f_flags, full_path);
Steve French276a74a2009-03-03 18:00:34 +0000390
Pavel Shilovsky10b9b982012-03-20 12:55:09 +0300391 if (tcon->ses->server->oplocks)
Steve French276a74a2009-03-03 18:00:34 +0000392 oplock = REQ_OPLOCK;
393 else
394 oplock = 0;
395
Steve French64cc2c62009-03-04 19:54:08 +0000396 if (!tcon->broken_posix_open && tcon->unix_ext &&
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400397 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
398 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Steve French276a74a2009-03-03 18:00:34 +0000399 /* can not refresh inode info since size could be stale */
Jeff Layton2422f672010-06-16 13:40:16 -0400400 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
Steve Frenchfa588e02010-04-22 19:21:55 +0000401 cifs_sb->mnt_file_mode /* ignored */,
Jeff Layton608712f2010-10-15 15:33:56 -0400402 file->f_flags, &oplock, &netfid, xid);
Steve French276a74a2009-03-03 18:00:34 +0000403 if (rc == 0) {
Joe Perchesb6b38f72010-04-21 03:50:45 +0000404 cFYI(1, "posix open succeeded");
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300405 posix_open_ok = true;
Steve French64cc2c62009-03-04 19:54:08 +0000406 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
407 if (tcon->ses->serverNOS)
Joe Perchesb6b38f72010-04-21 03:50:45 +0000408 cERROR(1, "server %s of type %s returned"
Steve French64cc2c62009-03-04 19:54:08 +0000409 " unexpected error on SMB posix open"
410 ", disabling posix open support."
411 " Check if server update available.",
412 tcon->ses->serverName,
Joe Perchesb6b38f72010-04-21 03:50:45 +0000413 tcon->ses->serverNOS);
Steve French64cc2c62009-03-04 19:54:08 +0000414 tcon->broken_posix_open = true;
Steve French276a74a2009-03-03 18:00:34 +0000415 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
416 (rc != -EOPNOTSUPP)) /* path not found or net err */
417 goto out;
Steve French64cc2c62009-03-04 19:54:08 +0000418 /* else fallthrough to retry open the old way on network i/o
419 or DFS errors */
Steve French276a74a2009-03-03 18:00:34 +0000420 }
421
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300422 if (!posix_open_ok) {
423 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
424 file->f_flags, &oplock, &netfid, xid);
425 if (rc)
426 goto out;
427 }
Jeff Layton47c78b72010-06-16 13:40:17 -0400428
Jeff Laytonabfe1ee2010-10-15 15:33:58 -0400429 pCifsFile = cifs_new_fileinfo(netfid, file, tlink, oplock);
Jeff Layton6ca9f3b2010-06-16 13:40:16 -0400430 if (pCifsFile == NULL) {
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300431 CIFSSMBClose(xid, tcon, netfid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432 rc = -ENOMEM;
433 goto out;
434 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +0530436 cifs_fscache_set_inode_cookie(inode, file);
437
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300438 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439 /* time to set mode which we can not set earlier due to
440 problems creating new read-only files */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300441 struct cifs_unix_set_info_args args = {
442 .mode = inode->i_mode,
443 .uid = NO_CHANGE_64,
444 .gid = NO_CHANGE_64,
445 .ctime = NO_CHANGE_64,
446 .atime = NO_CHANGE_64,
447 .mtime = NO_CHANGE_64,
448 .device = 0,
449 };
Jeff Laytond44a9fe2011-01-07 11:30:29 -0500450 CIFSSMBUnixSetFileInfo(xid, tcon, &args, netfid,
451 pCifsFile->pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452 }
453
454out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400456 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400457 cifs_put_tlink(tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458 return rc;
459}
460
Adrian Bunk04187262006-06-30 18:23:04 +0200461/* Try to reacquire byte range locks that were released when session */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462/* to server was lost */
463static int cifs_relock_file(struct cifsFileInfo *cifsFile)
464{
465 int rc = 0;
466
467/* BB list all locks open on this file and relock */
468
469 return rc;
470}
471
Jeff Layton15886172010-10-15 15:33:59 -0400472static int cifs_reopen_file(struct cifsFileInfo *pCifsFile, bool can_flush)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473{
474 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400475 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400476 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +0000478 struct cifs_tcon *tcon;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479 struct cifsInodeInfo *pCifsInode;
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000480 struct inode *inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481 char *full_path = NULL;
482 int desiredAccess;
483 int disposition = FILE_OPEN;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500484 int create_options = CREATE_NOT_DIR;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485 __u16 netfid;
486
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400487 xid = get_xid();
Jeff Laytonf0a71eb2009-06-27 07:04:55 -0400488 mutex_lock(&pCifsFile->fh_mutex);
Steve French4b18f2a2008-04-29 00:06:05 +0000489 if (!pCifsFile->invalidHandle) {
Jeff Laytonf0a71eb2009-06-27 07:04:55 -0400490 mutex_unlock(&pCifsFile->fh_mutex);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530491 rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400492 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530493 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494 }
495
Jeff Layton15886172010-10-15 15:33:59 -0400496 inode = pCifsFile->dentry->d_inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497 cifs_sb = CIFS_SB(inode->i_sb);
Jeff Layton13cfb732010-09-29 19:51:11 -0400498 tcon = tlink_tcon(pCifsFile->tlink);
Steve French3a9f4622007-04-04 17:10:24 +0000499
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500/* can not grab rename sem here because various ops, including
501 those that already have the rename sem can end up causing writepage
502 to get called and if the server was down that means we end up here,
503 and we can never tell if the caller already has the rename_sem */
Jeff Layton15886172010-10-15 15:33:59 -0400504 full_path = build_path_from_dentry(pCifsFile->dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505 if (full_path == NULL) {
Steve French3a9f4622007-04-04 17:10:24 +0000506 rc = -ENOMEM;
Jeff Laytonf0a71eb2009-06-27 07:04:55 -0400507 mutex_unlock(&pCifsFile->fh_mutex);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400508 free_xid(xid);
Steve French3a9f4622007-04-04 17:10:24 +0000509 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510 }
511
Joe Perchesb6b38f72010-04-21 03:50:45 +0000512 cFYI(1, "inode = 0x%p file flags 0x%x for %s",
Jeff Layton15886172010-10-15 15:33:59 -0400513 inode, pCifsFile->f_flags, full_path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514
Pavel Shilovsky10b9b982012-03-20 12:55:09 +0300515 if (tcon->ses->server->oplocks)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516 oplock = REQ_OPLOCK;
517 else
Steve French4b18f2a2008-04-29 00:06:05 +0000518 oplock = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400520 if (tcon->unix_ext && cap_unix(tcon->ses) &&
Steve French7fc8f4e2009-02-23 20:43:11 +0000521 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400522 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Jeff Layton608712f2010-10-15 15:33:56 -0400523 /*
524 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
525 * original open. Must mask them off for a reopen.
526 */
Jeff Layton15886172010-10-15 15:33:59 -0400527 unsigned int oflags = pCifsFile->f_flags &
528 ~(O_CREAT | O_EXCL | O_TRUNC);
Jeff Layton608712f2010-10-15 15:33:56 -0400529
Jeff Layton2422f672010-06-16 13:40:16 -0400530 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
Steve Frenchfa588e02010-04-22 19:21:55 +0000531 cifs_sb->mnt_file_mode /* ignored */,
532 oflags, &oplock, &netfid, xid);
Steve French7fc8f4e2009-02-23 20:43:11 +0000533 if (rc == 0) {
Joe Perchesb6b38f72010-04-21 03:50:45 +0000534 cFYI(1, "posix reopen succeeded");
Steve French7fc8f4e2009-02-23 20:43:11 +0000535 goto reopen_success;
536 }
537 /* fallthrough to retry open the old way on errors, especially
538 in the reconnect path it is important to retry hard */
539 }
540
Jeff Layton15886172010-10-15 15:33:59 -0400541 desiredAccess = cifs_convert_flags(pCifsFile->f_flags);
Steve French7fc8f4e2009-02-23 20:43:11 +0000542
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500543 if (backup_cred(cifs_sb))
544 create_options |= CREATE_OPEN_BACKUP_INTENT;
545
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546 /* Can not refresh inode by passing in file_info buf to be returned
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000547 by SMBOpen and then calling get_inode_info with returned buf
548 since file might have write behind data that needs to be flushed
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549 and server version of file size can be stale. If we knew for sure
550 that inode was not dirty locally we could do this */
551
Steve French7fc8f4e2009-02-23 20:43:11 +0000552 rc = CIFSSMBOpen(xid, tcon, full_path, disposition, desiredAccess,
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500553 create_options, &netfid, &oplock, NULL,
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000554 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
Steve French737b7582005-04-28 22:41:06 -0700555 CIFS_MOUNT_MAP_SPECIAL_CHR);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556 if (rc) {
Jeff Laytonf0a71eb2009-06-27 07:04:55 -0400557 mutex_unlock(&pCifsFile->fh_mutex);
Joe Perchesb6b38f72010-04-21 03:50:45 +0000558 cFYI(1, "cifs_open returned 0x%x", rc);
559 cFYI(1, "oplock: %d", oplock);
Jeff Layton15886172010-10-15 15:33:59 -0400560 goto reopen_error_exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561 }
Jeff Layton15886172010-10-15 15:33:59 -0400562
563reopen_success:
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700564 pCifsFile->fid.netfid = netfid;
Jeff Layton15886172010-10-15 15:33:59 -0400565 pCifsFile->invalidHandle = false;
566 mutex_unlock(&pCifsFile->fh_mutex);
567 pCifsInode = CIFS_I(inode);
568
569 if (can_flush) {
570 rc = filemap_write_and_wait(inode->i_mapping);
Jeff Laytoneb4b7562010-10-22 14:52:29 -0400571 mapping_set_error(inode->i_mapping, rc);
Jeff Layton15886172010-10-15 15:33:59 -0400572
Jeff Layton15886172010-10-15 15:33:59 -0400573 if (tcon->unix_ext)
574 rc = cifs_get_inode_info_unix(&inode,
575 full_path, inode->i_sb, xid);
576 else
577 rc = cifs_get_inode_info(&inode,
578 full_path, NULL, inode->i_sb,
579 xid, NULL);
580 } /* else we are writing out data to server already
581 and could deadlock if we tried to flush data, and
582 since we do not know if we have data that would
583 invalidate the current end of file on the server
584 we can not go to the server to get the new inod
585 info */
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300586
Pavel Shilovskyc6723622010-11-03 10:58:57 +0300587 cifs_set_oplock_level(pCifsInode, oplock);
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300588
Jeff Layton15886172010-10-15 15:33:59 -0400589 cifs_relock_file(pCifsFile);
590
591reopen_error_exit:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400593 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594 return rc;
595}
596
597int cifs_close(struct inode *inode, struct file *file)
598{
Jeff Layton77970692011-04-05 16:23:47 -0700599 if (file->private_data != NULL) {
600 cifsFileInfo_put(file->private_data);
601 file->private_data = NULL;
602 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603
Steve Frenchcdff08e2010-10-21 22:46:14 +0000604 /* return code from the ->release op is always ignored */
605 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606}
607
608int cifs_closedir(struct inode *inode, struct file *file)
609{
610 int rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400611 unsigned int xid;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700612 struct cifsFileInfo *cfile = file->private_data;
613 char *tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614
Joe Perchesb6b38f72010-04-21 03:50:45 +0000615 cFYI(1, "Closedir inode = 0x%p", inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400617 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700619 if (cfile) {
620 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621
Joe Perchesb6b38f72010-04-21 03:50:45 +0000622 cFYI(1, "Freeing private data in close dir");
Jeff Layton44772882010-10-15 15:34:03 -0400623 spin_lock(&cifs_file_list_lock);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700624 if (!cfile->srch_inf.endOfSearch && !cfile->invalidHandle) {
625 cfile->invalidHandle = true;
Jeff Layton44772882010-10-15 15:34:03 -0400626 spin_unlock(&cifs_file_list_lock);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700627 rc = CIFSFindClose(xid, tcon, cfile->fid.netfid);
628 cFYI(1, "Closing uncompleted readdir with rc %d", rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629 /* not much we can do if it fails anyway, ignore rc */
630 rc = 0;
Steve Frenchddb4cbf2008-11-20 20:00:44 +0000631 } else
Jeff Layton44772882010-10-15 15:34:03 -0400632 spin_unlock(&cifs_file_list_lock);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700633 tmp = cfile->srch_inf.ntwrk_buf_start;
634 if (tmp) {
Joe Perchesb6b38f72010-04-21 03:50:45 +0000635 cFYI(1, "closedir free smb buf in srch struct");
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700636 cfile->srch_inf.ntwrk_buf_start = NULL;
637 if (cfile->srch_inf.smallBuf)
638 cifs_small_buf_release(tmp);
Steve Frenchd47d7c12006-02-28 03:45:48 +0000639 else
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700640 cifs_buf_release(tmp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641 }
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700642 cifs_put_tlink(cfile->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643 kfree(file->private_data);
644 file->private_data = NULL;
645 }
646 /* BB can we lock the filestruct while this is going on? */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400647 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648 return rc;
649}
650
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400651static struct cifsLockInfo *
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300652cifs_lock_init(__u64 offset, __u64 length, __u8 type)
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000653{
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400654 struct cifsLockInfo *lock =
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000655 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400656 if (!lock)
657 return lock;
658 lock->offset = offset;
659 lock->length = length;
660 lock->type = type;
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400661 lock->pid = current->tgid;
662 INIT_LIST_HEAD(&lock->blist);
663 init_waitqueue_head(&lock->block_q);
664 return lock;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400665}
666
667static void
668cifs_del_lock_waiters(struct cifsLockInfo *lock)
669{
670 struct cifsLockInfo *li, *tmp;
671 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
672 list_del_init(&li->blist);
673 wake_up(&li->block_q);
674 }
675}
676
677static bool
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300678cifs_find_fid_lock_conflict(struct cifsFileInfo *cfile, __u64 offset,
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300679 __u64 length, __u8 type, struct cifsFileInfo *cur,
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300680 struct cifsLockInfo **conf_lock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400681{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300682 struct cifsLockInfo *li;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300683 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400684
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300685 list_for_each_entry(li, &cfile->llist, llist) {
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400686 if (offset + length <= li->offset ||
687 offset >= li->offset + li->length)
688 continue;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300689 else if ((type & server->vals->shared_lock_type) &&
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300690 ((server->ops->compare_fids(cur, cfile) &&
691 current->tgid == li->pid) || type == li->type))
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400692 continue;
693 else {
694 *conf_lock = li;
695 return true;
696 }
697 }
698 return false;
699}
700
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400701static bool
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300702cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
703 __u8 type, struct cifsLockInfo **conf_lock)
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400704{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300705 bool rc = false;
706 struct cifsFileInfo *fid, *tmp;
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300707 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300708
709 spin_lock(&cifs_file_list_lock);
710 list_for_each_entry_safe(fid, tmp, &cinode->openFileList, flist) {
711 rc = cifs_find_fid_lock_conflict(fid, offset, length, type,
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300712 cfile, conf_lock);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300713 if (rc)
714 break;
715 }
716 spin_unlock(&cifs_file_list_lock);
717
718 return rc;
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400719}
720
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300721/*
722 * Check if there is another lock that prevents us to set the lock (mandatory
723 * style). If such a lock exists, update the flock structure with its
724 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
725 * or leave it the same if we can't. Returns 0 if we don't need to request to
726 * the server or 1 otherwise.
727 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400728static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300729cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
730 __u8 type, struct file_lock *flock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400731{
732 int rc = 0;
733 struct cifsLockInfo *conf_lock;
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300734 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300735 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400736 bool exist;
737
738 mutex_lock(&cinode->lock_mutex);
739
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300740 exist = cifs_find_lock_conflict(cfile, offset, length, type,
741 &conf_lock);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400742 if (exist) {
743 flock->fl_start = conf_lock->offset;
744 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
745 flock->fl_pid = conf_lock->pid;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300746 if (conf_lock->type & server->vals->shared_lock_type)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400747 flock->fl_type = F_RDLCK;
748 else
749 flock->fl_type = F_WRLCK;
750 } else if (!cinode->can_cache_brlcks)
751 rc = 1;
752 else
753 flock->fl_type = F_UNLCK;
754
755 mutex_unlock(&cinode->lock_mutex);
756 return rc;
757}
758
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400759static void
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300760cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400761{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300762 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovskyd59dad22011-09-22 09:53:59 +0400763 mutex_lock(&cinode->lock_mutex);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300764 list_add_tail(&lock->llist, &cfile->llist);
Pavel Shilovskyd59dad22011-09-22 09:53:59 +0400765 mutex_unlock(&cinode->lock_mutex);
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000766}
767
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300768/*
769 * Set the byte-range lock (mandatory style). Returns:
770 * 1) 0, if we set the lock and don't need to request to the server;
771 * 2) 1, if no locks prevent us but we need to request to the server;
772 * 3) -EACCESS, if there is a lock that prevents us and wait is false.
773 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400774static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300775cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400776 bool wait)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400777{
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400778 struct cifsLockInfo *conf_lock;
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300779 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400780 bool exist;
781 int rc = 0;
782
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400783try_again:
784 exist = false;
785 mutex_lock(&cinode->lock_mutex);
786
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300787 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
788 lock->type, &conf_lock);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400789 if (!exist && cinode->can_cache_brlcks) {
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300790 list_add_tail(&lock->llist, &cfile->llist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400791 mutex_unlock(&cinode->lock_mutex);
792 return rc;
793 }
794
795 if (!exist)
796 rc = 1;
797 else if (!wait)
798 rc = -EACCES;
799 else {
800 list_add_tail(&lock->blist, &conf_lock->blist);
801 mutex_unlock(&cinode->lock_mutex);
802 rc = wait_event_interruptible(lock->block_q,
803 (lock->blist.prev == &lock->blist) &&
804 (lock->blist.next == &lock->blist));
805 if (!rc)
806 goto try_again;
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400807 mutex_lock(&cinode->lock_mutex);
808 list_del_init(&lock->blist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400809 }
810
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400811 mutex_unlock(&cinode->lock_mutex);
812 return rc;
813}
814
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300815/*
816 * Check if there is another lock that prevents us to set the lock (posix
817 * style). If such a lock exists, update the flock structure with its
818 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
819 * or leave it the same if we can't. Returns 0 if we don't need to request to
820 * the server or 1 otherwise.
821 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400822static int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400823cifs_posix_lock_test(struct file *file, struct file_lock *flock)
824{
825 int rc = 0;
826 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
827 unsigned char saved_type = flock->fl_type;
828
Pavel Shilovsky50792762011-10-29 17:17:57 +0400829 if ((flock->fl_flags & FL_POSIX) == 0)
830 return 1;
831
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400832 mutex_lock(&cinode->lock_mutex);
833 posix_test_lock(file, flock);
834
835 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
836 flock->fl_type = saved_type;
837 rc = 1;
838 }
839
840 mutex_unlock(&cinode->lock_mutex);
841 return rc;
842}
843
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300844/*
845 * Set the byte-range lock (posix style). Returns:
846 * 1) 0, if we set the lock and don't need to request to the server;
847 * 2) 1, if we need to request to the server;
848 * 3) <0, if the error occurs while setting the lock.
849 */
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400850static int
851cifs_posix_lock_set(struct file *file, struct file_lock *flock)
852{
853 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
Pavel Shilovsky50792762011-10-29 17:17:57 +0400854 int rc = 1;
855
856 if ((flock->fl_flags & FL_POSIX) == 0)
857 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400858
Pavel Shilovsky66189be2012-03-28 21:56:19 +0400859try_again:
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400860 mutex_lock(&cinode->lock_mutex);
861 if (!cinode->can_cache_brlcks) {
862 mutex_unlock(&cinode->lock_mutex);
Pavel Shilovsky50792762011-10-29 17:17:57 +0400863 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400864 }
Pavel Shilovsky66189be2012-03-28 21:56:19 +0400865
866 rc = posix_lock_file(file, flock, NULL);
Steve French9ebb3892012-04-01 13:52:54 -0500867 mutex_unlock(&cinode->lock_mutex);
Pavel Shilovsky66189be2012-03-28 21:56:19 +0400868 if (rc == FILE_LOCK_DEFERRED) {
869 rc = wait_event_interruptible(flock->fl_wait, !flock->fl_next);
870 if (!rc)
871 goto try_again;
872 locks_delete_block(flock);
873 }
Steve French9ebb3892012-04-01 13:52:54 -0500874 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400875}
876
877static int
878cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400879{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400880 unsigned int xid;
881 int rc = 0, stored_rc;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400882 struct cifsLockInfo *li, *tmp;
883 struct cifs_tcon *tcon;
884 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +0400885 unsigned int num, max_num, max_buf;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400886 LOCKING_ANDX_RANGE *buf, *cur;
887 int types[] = {LOCKING_ANDX_LARGE_FILES,
888 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
889 int i;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400890
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400891 xid = get_xid();
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400892 tcon = tlink_tcon(cfile->tlink);
893
894 mutex_lock(&cinode->lock_mutex);
895 if (!cinode->can_cache_brlcks) {
896 mutex_unlock(&cinode->lock_mutex);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400897 free_xid(xid);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400898 return rc;
899 }
900
Pavel Shilovsky0013fb42012-05-31 13:03:26 +0400901 /*
902 * Accessing maxBuf is racy with cifs_reconnect - need to store value
903 * and check it for zero before using.
904 */
905 max_buf = tcon->ses->server->maxBuf;
906 if (!max_buf) {
907 mutex_unlock(&cinode->lock_mutex);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400908 free_xid(xid);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +0400909 return -EINVAL;
910 }
911
912 max_num = (max_buf - sizeof(struct smb_hdr)) /
913 sizeof(LOCKING_ANDX_RANGE);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400914 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
915 if (!buf) {
916 mutex_unlock(&cinode->lock_mutex);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400917 free_xid(xid);
Pavel Shilovskye2f28862012-08-29 21:13:38 +0400918 return -ENOMEM;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400919 }
920
921 for (i = 0; i < 2; i++) {
922 cur = buf;
923 num = 0;
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300924 list_for_each_entry_safe(li, tmp, &cfile->llist, llist) {
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400925 if (li->type != types[i])
926 continue;
927 cur->Pid = cpu_to_le16(li->pid);
928 cur->LengthLow = cpu_to_le32((u32)li->length);
929 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
930 cur->OffsetLow = cpu_to_le32((u32)li->offset);
931 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
932 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700933 stored_rc = cifs_lockv(xid, tcon,
934 cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +0300935 (__u8)li->type, 0, num,
936 buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400937 if (stored_rc)
938 rc = stored_rc;
939 cur = buf;
940 num = 0;
941 } else
942 cur++;
943 }
944
945 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700946 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +0300947 (__u8)types[i], 0, num, buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400948 if (stored_rc)
949 rc = stored_rc;
950 }
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400951 }
952
953 cinode->can_cache_brlcks = false;
954 mutex_unlock(&cinode->lock_mutex);
955
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400956 kfree(buf);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400957 free_xid(xid);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400958 return rc;
959}
960
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400961/* copied from fs/locks.c with a name change */
962#define cifs_for_each_lock(inode, lockp) \
963 for (lockp = &inode->i_flock; *lockp != NULL; \
964 lockp = &(*lockp)->fl_next)
965
Pavel Shilovskyd5751462012-03-05 09:39:20 +0300966struct lock_to_push {
967 struct list_head llist;
968 __u64 offset;
969 __u64 length;
970 __u32 pid;
971 __u16 netfid;
972 __u8 type;
973};
974
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400975static int
976cifs_push_posix_locks(struct cifsFileInfo *cfile)
977{
978 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
979 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
980 struct file_lock *flock, **before;
Pavel Shilovskyd5751462012-03-05 09:39:20 +0300981 unsigned int count = 0, i = 0;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400982 int rc = 0, xid, type;
Pavel Shilovskyd5751462012-03-05 09:39:20 +0300983 struct list_head locks_to_send, *el;
984 struct lock_to_push *lck, *tmp;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400985 __u64 length;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400986
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400987 xid = get_xid();
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400988
989 mutex_lock(&cinode->lock_mutex);
990 if (!cinode->can_cache_brlcks) {
991 mutex_unlock(&cinode->lock_mutex);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400992 free_xid(xid);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400993 return rc;
994 }
995
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400996 lock_flocks();
997 cifs_for_each_lock(cfile->dentry->d_inode, before) {
Pavel Shilovskyd5751462012-03-05 09:39:20 +0300998 if ((*before)->fl_flags & FL_POSIX)
999 count++;
1000 }
1001 unlock_flocks();
1002
1003 INIT_LIST_HEAD(&locks_to_send);
1004
1005 /*
Pavel Shilovskyce858522012-03-17 09:46:55 +03001006 * Allocating count locks is enough because no FL_POSIX locks can be
1007 * added to the list while we are holding cinode->lock_mutex that
1008 * protects locking operations of this inode.
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001009 */
1010 for (; i < count; i++) {
1011 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1012 if (!lck) {
1013 rc = -ENOMEM;
1014 goto err_out;
1015 }
1016 list_add_tail(&lck->llist, &locks_to_send);
1017 }
1018
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001019 el = locks_to_send.next;
1020 lock_flocks();
1021 cifs_for_each_lock(cfile->dentry->d_inode, before) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001022 flock = *before;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001023 if ((flock->fl_flags & FL_POSIX) == 0)
1024 continue;
Pavel Shilovskyce858522012-03-17 09:46:55 +03001025 if (el == &locks_to_send) {
1026 /*
1027 * The list ended. We don't have enough allocated
1028 * structures - something is really wrong.
1029 */
1030 cERROR(1, "Can't push all brlocks!");
1031 break;
1032 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001033 length = 1 + flock->fl_end - flock->fl_start;
1034 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1035 type = CIFS_RDLCK;
1036 else
1037 type = CIFS_WRLCK;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001038 lck = list_entry(el, struct lock_to_push, llist);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001039 lck->pid = flock->fl_pid;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001040 lck->netfid = cfile->fid.netfid;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001041 lck->length = length;
1042 lck->type = type;
1043 lck->offset = flock->fl_start;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001044 el = el->next;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001045 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001046 unlock_flocks();
1047
1048 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001049 int stored_rc;
1050
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001051 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001052 lck->offset, lck->length, NULL,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001053 lck->type, 0);
1054 if (stored_rc)
1055 rc = stored_rc;
1056 list_del(&lck->llist);
1057 kfree(lck);
1058 }
1059
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001060out:
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001061 cinode->can_cache_brlcks = false;
1062 mutex_unlock(&cinode->lock_mutex);
1063
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001064 free_xid(xid);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001065 return rc;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001066err_out:
1067 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1068 list_del(&lck->llist);
1069 kfree(lck);
1070 }
1071 goto out;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001072}
1073
1074static int
1075cifs_push_locks(struct cifsFileInfo *cfile)
1076{
1077 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1078 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1079
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001080 if (cap_unix(tcon->ses) &&
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001081 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1082 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1083 return cifs_push_posix_locks(cfile);
1084
1085 return cifs_push_mandatory_locks(cfile);
1086}
1087
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001088static void
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001089cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001090 bool *wait_flag, struct TCP_Server_Info *server)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001091{
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001092 if (flock->fl_flags & FL_POSIX)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001093 cFYI(1, "Posix");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001094 if (flock->fl_flags & FL_FLOCK)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001095 cFYI(1, "Flock");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001096 if (flock->fl_flags & FL_SLEEP) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001097 cFYI(1, "Blocking lock");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001098 *wait_flag = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099 }
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001100 if (flock->fl_flags & FL_ACCESS)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001101 cFYI(1, "Process suspended by mandatory locking - "
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001102 "not implemented yet");
1103 if (flock->fl_flags & FL_LEASE)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001104 cFYI(1, "Lease on file - not implemented yet");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001105 if (flock->fl_flags &
Linus Torvalds1da177e2005-04-16 15:20:36 -07001106 (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001107 cFYI(1, "Unknown lock flags 0x%x", flock->fl_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001109 *type = server->vals->large_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001110 if (flock->fl_type == F_WRLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001111 cFYI(1, "F_WRLCK ");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001112 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001113 *lock = 1;
1114 } else if (flock->fl_type == F_UNLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001115 cFYI(1, "F_UNLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001116 *type |= server->vals->unlock_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001117 *unlock = 1;
1118 /* Check if unlock includes more than one lock range */
1119 } else if (flock->fl_type == F_RDLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001120 cFYI(1, "F_RDLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001121 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001122 *lock = 1;
1123 } else if (flock->fl_type == F_EXLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001124 cFYI(1, "F_EXLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001125 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001126 *lock = 1;
1127 } else if (flock->fl_type == F_SHLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001128 cFYI(1, "F_SHLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001129 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001130 *lock = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131 } else
Joe Perchesb6b38f72010-04-21 03:50:45 +00001132 cFYI(1, "Unknown type of lock");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001133}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001134
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001135static int
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001136cifs_mandatory_lock(unsigned int xid, struct cifsFileInfo *cfile, __u64 offset,
Pavel Shilovsky55157df2012-02-28 14:04:17 +03001137 __u64 length, __u32 type, int lock, int unlock, bool wait)
1138{
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001139 return CIFSSMBLock(xid, tlink_tcon(cfile->tlink), cfile->fid.netfid,
Pavel Shilovsky55157df2012-02-28 14:04:17 +03001140 current->tgid, length, offset, unlock, lock,
1141 (__u8)type, wait, 0);
1142}
1143
1144static int
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001145cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001146 bool wait_flag, bool posix_lck, unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001147{
1148 int rc = 0;
1149 __u64 length = 1 + flock->fl_end - flock->fl_start;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001150 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1151 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001152 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001153 __u16 netfid = cfile->fid.netfid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001155 if (posix_lck) {
1156 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001157
1158 rc = cifs_posix_lock_test(file, flock);
1159 if (!rc)
1160 return rc;
1161
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001162 if (type & server->vals->shared_lock_type)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001163 posix_lock_type = CIFS_RDLCK;
1164 else
1165 posix_lock_type = CIFS_WRLCK;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001166 rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001167 flock->fl_start, length, flock,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001168 posix_lock_type, wait_flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169 return rc;
1170 }
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001171
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001172 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001173 if (!rc)
1174 return rc;
1175
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001176 /* BB we could chain these into one lock request BB */
Pavel Shilovsky55157df2012-02-28 14:04:17 +03001177 rc = cifs_mandatory_lock(xid, cfile, flock->fl_start, length, type,
1178 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001179 if (rc == 0) {
Pavel Shilovsky55157df2012-02-28 14:04:17 +03001180 rc = cifs_mandatory_lock(xid, cfile, flock->fl_start, length,
1181 type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001182 flock->fl_type = F_UNLCK;
1183 if (rc != 0)
1184 cERROR(1, "Error unlocking previously locked "
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001185 "range %d during test of lock", rc);
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001186 return 0;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001187 }
1188
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001189 if (type & server->vals->shared_lock_type) {
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001190 flock->fl_type = F_WRLCK;
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001191 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001192 }
1193
Pavel Shilovsky55157df2012-02-28 14:04:17 +03001194 rc = cifs_mandatory_lock(xid, cfile, flock->fl_start, length,
1195 type | server->vals->shared_lock_type, 1, 0,
1196 false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001197 if (rc == 0) {
Pavel Shilovsky55157df2012-02-28 14:04:17 +03001198 rc = cifs_mandatory_lock(xid, cfile, flock->fl_start, length,
1199 type | server->vals->shared_lock_type,
1200 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001201 flock->fl_type = F_RDLCK;
1202 if (rc != 0)
1203 cERROR(1, "Error unlocking previously locked "
1204 "range %d during test of lock", rc);
1205 } else
1206 flock->fl_type = F_WRLCK;
1207
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001208 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001209}
1210
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001211static void
1212cifs_move_llist(struct list_head *source, struct list_head *dest)
1213{
1214 struct list_head *li, *tmp;
1215 list_for_each_safe(li, tmp, source)
1216 list_move(li, dest);
1217}
1218
1219static void
1220cifs_free_llist(struct list_head *llist)
1221{
1222 struct cifsLockInfo *li, *tmp;
1223 list_for_each_entry_safe(li, tmp, llist, llist) {
1224 cifs_del_lock_waiters(li);
1225 list_del(&li->llist);
1226 kfree(li);
1227 }
1228}
1229
1230static int
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001231cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1232 unsigned int xid)
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001233{
1234 int rc = 0, stored_rc;
1235 int types[] = {LOCKING_ANDX_LARGE_FILES,
1236 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1237 unsigned int i;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001238 unsigned int max_num, num, max_buf;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001239 LOCKING_ANDX_RANGE *buf, *cur;
1240 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1241 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
1242 struct cifsLockInfo *li, *tmp;
1243 __u64 length = 1 + flock->fl_end - flock->fl_start;
1244 struct list_head tmp_llist;
1245
1246 INIT_LIST_HEAD(&tmp_llist);
1247
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001248 /*
1249 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1250 * and check it for zero before using.
1251 */
1252 max_buf = tcon->ses->server->maxBuf;
1253 if (!max_buf)
1254 return -EINVAL;
1255
1256 max_num = (max_buf - sizeof(struct smb_hdr)) /
1257 sizeof(LOCKING_ANDX_RANGE);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001258 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1259 if (!buf)
1260 return -ENOMEM;
1261
1262 mutex_lock(&cinode->lock_mutex);
1263 for (i = 0; i < 2; i++) {
1264 cur = buf;
1265 num = 0;
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001266 list_for_each_entry_safe(li, tmp, &cfile->llist, llist) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001267 if (flock->fl_start > li->offset ||
1268 (flock->fl_start + length) <
1269 (li->offset + li->length))
1270 continue;
1271 if (current->tgid != li->pid)
1272 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001273 if (types[i] != li->type)
1274 continue;
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001275 if (cinode->can_cache_brlcks) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001276 /*
1277 * We can cache brlock requests - simply remove
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001278 * a lock from the file's list.
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001279 */
1280 list_del(&li->llist);
1281 cifs_del_lock_waiters(li);
1282 kfree(li);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001283 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001284 }
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001285 cur->Pid = cpu_to_le16(li->pid);
1286 cur->LengthLow = cpu_to_le32((u32)li->length);
1287 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1288 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1289 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1290 /*
1291 * We need to save a lock here to let us add it again to
1292 * the file's list if the unlock range request fails on
1293 * the server.
1294 */
1295 list_move(&li->llist, &tmp_llist);
1296 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001297 stored_rc = cifs_lockv(xid, tcon,
1298 cfile->fid.netfid,
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001299 li->type, num, 0, buf);
1300 if (stored_rc) {
1301 /*
1302 * We failed on the unlock range
1303 * request - add all locks from the tmp
1304 * list to the head of the file's list.
1305 */
1306 cifs_move_llist(&tmp_llist,
1307 &cfile->llist);
1308 rc = stored_rc;
1309 } else
1310 /*
1311 * The unlock range request succeed -
1312 * free the tmp list.
1313 */
1314 cifs_free_llist(&tmp_llist);
1315 cur = buf;
1316 num = 0;
1317 } else
1318 cur++;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001319 }
1320 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001321 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001322 types[i], num, 0, buf);
1323 if (stored_rc) {
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001324 cifs_move_llist(&tmp_llist, &cfile->llist);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001325 rc = stored_rc;
1326 } else
1327 cifs_free_llist(&tmp_llist);
1328 }
1329 }
1330
1331 mutex_unlock(&cinode->lock_mutex);
1332 kfree(buf);
1333 return rc;
1334}
1335
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001336static int
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001337cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001338 bool wait_flag, bool posix_lck, int lock, int unlock,
1339 unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001340{
1341 int rc = 0;
1342 __u64 length = 1 + flock->fl_end - flock->fl_start;
1343 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1344 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001345 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001346 __u16 netfid = cfile->fid.netfid;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001347
1348 if (posix_lck) {
Steve French08547b02006-02-28 22:39:25 +00001349 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001350
1351 rc = cifs_posix_lock_set(file, flock);
1352 if (!rc || rc < 0)
1353 return rc;
1354
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001355 if (type & server->vals->shared_lock_type)
Steve French08547b02006-02-28 22:39:25 +00001356 posix_lock_type = CIFS_RDLCK;
1357 else
1358 posix_lock_type = CIFS_WRLCK;
Steve French50c2f752007-07-13 00:33:32 +00001359
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001360 if (unlock == 1)
Steve Frenchbeb84dc2006-03-03 23:36:34 +00001361 posix_lock_type = CIFS_UNLCK;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001362
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001363 rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001364 flock->fl_start, length, NULL,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001365 posix_lock_type, wait_flag);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001366 goto out;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001367 }
1368
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001369 if (lock) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001370 struct cifsLockInfo *lock;
1371
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001372 lock = cifs_lock_init(flock->fl_start, length, type);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001373 if (!lock)
1374 return -ENOMEM;
1375
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001376 rc = cifs_lock_add_if(cfile, lock, wait_flag);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001377 if (rc < 0)
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001378 kfree(lock);
1379 if (rc <= 0)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001380 goto out;
1381
Pavel Shilovsky7f924472012-03-28 17:10:25 +04001382 rc = cifs_mandatory_lock(xid, cfile, flock->fl_start, length,
1383 type, 1, 0, wait_flag);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001384 if (rc) {
1385 kfree(lock);
1386 goto out;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001387 }
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001388
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001389 cifs_lock_add(cfile, lock);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001390 } else if (unlock)
1391 rc = cifs_unlock_range(cfile, flock, xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001392
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001393out:
1394 if (flock->fl_flags & FL_POSIX)
Steve French9ebb3892012-04-01 13:52:54 -05001395 posix_lock_file_wait(file, flock);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001396 return rc;
1397}
1398
1399int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1400{
1401 int rc, xid;
1402 int lock = 0, unlock = 0;
1403 bool wait_flag = false;
1404 bool posix_lck = false;
1405 struct cifs_sb_info *cifs_sb;
1406 struct cifs_tcon *tcon;
1407 struct cifsInodeInfo *cinode;
1408 struct cifsFileInfo *cfile;
1409 __u16 netfid;
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001410 __u32 type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001411
1412 rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001413 xid = get_xid();
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001414
1415 cFYI(1, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld "
1416 "end: %lld", cmd, flock->fl_flags, flock->fl_type,
1417 flock->fl_start, flock->fl_end);
1418
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001419 cfile = (struct cifsFileInfo *)file->private_data;
1420 tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001421
1422 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
1423 tcon->ses->server);
1424
1425 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001426 netfid = cfile->fid.netfid;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001427 cinode = CIFS_I(file->f_path.dentry->d_inode);
1428
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001429 if (cap_unix(tcon->ses) &&
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001430 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1431 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1432 posix_lck = true;
1433 /*
1434 * BB add code here to normalize offset and length to account for
1435 * negative length which we can not accept over the wire.
1436 */
1437 if (IS_GETLK(cmd)) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001438 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001439 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001440 return rc;
1441 }
1442
1443 if (!lock && !unlock) {
1444 /*
1445 * if no lock or unlock then nothing to do since we do not
1446 * know what it is
1447 */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001448 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001449 return -EOPNOTSUPP;
1450 }
1451
1452 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1453 xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001454 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001455 return rc;
1456}
1457
Jeff Layton597b0272012-03-23 14:40:56 -04001458/*
1459 * update the file size (if needed) after a write. Should be called with
1460 * the inode->i_lock held
1461 */
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05001462void
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001463cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1464 unsigned int bytes_written)
1465{
1466 loff_t end_of_write = offset + bytes_written;
1467
1468 if (end_of_write > cifsi->server_eof)
1469 cifsi->server_eof = end_of_write;
1470}
1471
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001472static ssize_t cifs_write(struct cifsFileInfo *open_file, __u32 pid,
Jeff Layton7da4b492010-10-15 15:34:00 -04001473 const char *write_data, size_t write_size,
1474 loff_t *poffset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001475{
1476 int rc = 0;
1477 unsigned int bytes_written = 0;
1478 unsigned int total_written;
1479 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +00001480 struct cifs_tcon *pTcon;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001481 unsigned int xid;
Jeff Layton7da4b492010-10-15 15:34:00 -04001482 struct dentry *dentry = open_file->dentry;
1483 struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001484 struct cifs_io_parms io_parms;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001485
Jeff Layton7da4b492010-10-15 15:34:00 -04001486 cifs_sb = CIFS_SB(dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001487
Joe Perchesb6b38f72010-04-21 03:50:45 +00001488 cFYI(1, "write %zd bytes to offset %lld of %s", write_size,
Jeff Layton7da4b492010-10-15 15:34:00 -04001489 *poffset, dentry->d_name.name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001490
Jeff Layton13cfb732010-09-29 19:51:11 -04001491 pTcon = tlink_tcon(open_file->tlink);
Steve French50c2f752007-07-13 00:33:32 +00001492
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001493 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001494
Linus Torvalds1da177e2005-04-16 15:20:36 -07001495 for (total_written = 0; write_size > total_written;
1496 total_written += bytes_written) {
1497 rc = -EAGAIN;
1498 while (rc == -EAGAIN) {
Jeff Laytonca83ce32011-04-12 09:13:44 -04001499 struct kvec iov[2];
1500 unsigned int len;
1501
Linus Torvalds1da177e2005-04-16 15:20:36 -07001502 if (open_file->invalidHandle) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001503 /* we could deadlock if we called
1504 filemap_fdatawait from here so tell
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001505 reopen_file not to flush data to
Linus Torvalds1da177e2005-04-16 15:20:36 -07001506 server now */
Jeff Layton15886172010-10-15 15:33:59 -04001507 rc = cifs_reopen_file(open_file, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001508 if (rc != 0)
1509 break;
1510 }
Steve French3e844692005-10-03 13:37:24 -07001511
Jeff Laytonca83ce32011-04-12 09:13:44 -04001512 len = min((size_t)cifs_sb->wsize,
1513 write_size - total_written);
1514 /* iov[0] is reserved for smb header */
1515 iov[1].iov_base = (char *)write_data + total_written;
1516 iov[1].iov_len = len;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001517 io_parms.netfid = open_file->fid.netfid;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001518 io_parms.pid = pid;
1519 io_parms.tcon = pTcon;
1520 io_parms.offset = *poffset;
1521 io_parms.length = len;
1522 rc = CIFSSMBWrite2(xid, &io_parms, &bytes_written, iov,
1523 1, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524 }
1525 if (rc || (bytes_written == 0)) {
1526 if (total_written)
1527 break;
1528 else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001529 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001530 return rc;
1531 }
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001532 } else {
Jeff Layton597b0272012-03-23 14:40:56 -04001533 spin_lock(&dentry->d_inode->i_lock);
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001534 cifs_update_eof(cifsi, *poffset, bytes_written);
Jeff Layton597b0272012-03-23 14:40:56 -04001535 spin_unlock(&dentry->d_inode->i_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001536 *poffset += bytes_written;
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001537 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001538 }
1539
Steve Frencha4544342005-08-24 13:59:35 -07001540 cifs_stats_bytes_written(pTcon, total_written);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001541
Jeff Layton7da4b492010-10-15 15:34:00 -04001542 if (total_written > 0) {
1543 spin_lock(&dentry->d_inode->i_lock);
1544 if (*poffset > dentry->d_inode->i_size)
1545 i_size_write(dentry->d_inode, *poffset);
1546 spin_unlock(&dentry->d_inode->i_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001547 }
Jeff Layton7da4b492010-10-15 15:34:00 -04001548 mark_inode_dirty_sync(dentry->d_inode);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001549 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001550 return total_written;
1551}
1552
Jeff Layton6508d902010-09-29 19:51:11 -04001553struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1554 bool fsuid_only)
Steve French630f3f0c2007-10-25 21:17:17 +00001555{
1556 struct cifsFileInfo *open_file = NULL;
Jeff Layton6508d902010-09-29 19:51:11 -04001557 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1558
1559 /* only filter by fsuid on multiuser mounts */
1560 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1561 fsuid_only = false;
Steve French630f3f0c2007-10-25 21:17:17 +00001562
Jeff Layton44772882010-10-15 15:34:03 -04001563 spin_lock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001564 /* we could simply get the first_list_entry since write-only entries
1565 are always at the end of the list but since the first entry might
1566 have a close pending, we go through the whole list */
1567 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton6508d902010-09-29 19:51:11 -04001568 if (fsuid_only && open_file->uid != current_fsuid())
1569 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001570 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
Steve French630f3f0c2007-10-25 21:17:17 +00001571 if (!open_file->invalidHandle) {
1572 /* found a good file */
1573 /* lock it so it will not be closed on us */
Jeff Layton764a1b12012-07-25 14:59:54 -04001574 cifsFileInfo_get_locked(open_file);
Jeff Layton44772882010-10-15 15:34:03 -04001575 spin_unlock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001576 return open_file;
1577 } /* else might as well continue, and look for
1578 another, or simply have the caller reopen it
1579 again rather than trying to fix this handle */
1580 } else /* write only file */
1581 break; /* write only files are last so must be done */
1582 }
Jeff Layton44772882010-10-15 15:34:03 -04001583 spin_unlock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001584 return NULL;
1585}
Steve French630f3f0c2007-10-25 21:17:17 +00001586
Jeff Layton6508d902010-09-29 19:51:11 -04001587struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1588 bool fsuid_only)
Steve French6148a742005-10-05 12:23:19 -07001589{
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001590 struct cifsFileInfo *open_file, *inv_file = NULL;
Jeff Laytond3892292010-11-02 16:22:50 -04001591 struct cifs_sb_info *cifs_sb;
Jeff Layton2846d382008-09-22 21:33:33 -04001592 bool any_available = false;
Steve Frenchdd99cd82005-10-05 19:32:49 -07001593 int rc;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001594 unsigned int refind = 0;
Steve French6148a742005-10-05 12:23:19 -07001595
Steve French60808232006-04-22 15:53:05 +00001596 /* Having a null inode here (because mapping->host was set to zero by
1597 the VFS or MM) should not happen but we had reports of on oops (due to
1598 it being zero) during stress testcases so we need to check for it */
1599
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001600 if (cifs_inode == NULL) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001601 cERROR(1, "Null inode passed to cifs_writeable_file");
Steve French60808232006-04-22 15:53:05 +00001602 dump_stack();
1603 return NULL;
1604 }
1605
Jeff Laytond3892292010-11-02 16:22:50 -04001606 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1607
Jeff Layton6508d902010-09-29 19:51:11 -04001608 /* only filter by fsuid on multiuser mounts */
1609 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1610 fsuid_only = false;
1611
Jeff Layton44772882010-10-15 15:34:03 -04001612 spin_lock(&cifs_file_list_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001613refind_writable:
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001614 if (refind > MAX_REOPEN_ATT) {
1615 spin_unlock(&cifs_file_list_lock);
1616 return NULL;
1617 }
Steve French6148a742005-10-05 12:23:19 -07001618 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton6508d902010-09-29 19:51:11 -04001619 if (!any_available && open_file->pid != current->tgid)
1620 continue;
1621 if (fsuid_only && open_file->uid != current_fsuid())
1622 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001623 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Steve French9b22b0b2007-10-02 01:11:08 +00001624 if (!open_file->invalidHandle) {
1625 /* found a good writable file */
Jeff Layton764a1b12012-07-25 14:59:54 -04001626 cifsFileInfo_get_locked(open_file);
Jeff Layton44772882010-10-15 15:34:03 -04001627 spin_unlock(&cifs_file_list_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001628 return open_file;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001629 } else {
1630 if (!inv_file)
1631 inv_file = open_file;
Steve French9b22b0b2007-10-02 01:11:08 +00001632 }
Steve French6148a742005-10-05 12:23:19 -07001633 }
1634 }
Jeff Layton2846d382008-09-22 21:33:33 -04001635 /* couldn't find useable FH with same pid, try any available */
1636 if (!any_available) {
1637 any_available = true;
1638 goto refind_writable;
1639 }
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001640
1641 if (inv_file) {
1642 any_available = false;
Jeff Layton764a1b12012-07-25 14:59:54 -04001643 cifsFileInfo_get_locked(inv_file);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001644 }
1645
Jeff Layton44772882010-10-15 15:34:03 -04001646 spin_unlock(&cifs_file_list_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001647
1648 if (inv_file) {
1649 rc = cifs_reopen_file(inv_file, false);
1650 if (!rc)
1651 return inv_file;
1652 else {
1653 spin_lock(&cifs_file_list_lock);
1654 list_move_tail(&inv_file->flist,
1655 &cifs_inode->openFileList);
1656 spin_unlock(&cifs_file_list_lock);
1657 cifsFileInfo_put(inv_file);
1658 spin_lock(&cifs_file_list_lock);
1659 ++refind;
1660 goto refind_writable;
1661 }
1662 }
1663
Steve French6148a742005-10-05 12:23:19 -07001664 return NULL;
1665}
1666
Linus Torvalds1da177e2005-04-16 15:20:36 -07001667static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1668{
1669 struct address_space *mapping = page->mapping;
1670 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1671 char *write_data;
1672 int rc = -EFAULT;
1673 int bytes_written = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001674 struct inode *inode;
Steve French6148a742005-10-05 12:23:19 -07001675 struct cifsFileInfo *open_file;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001676
1677 if (!mapping || !mapping->host)
1678 return -EFAULT;
1679
1680 inode = page->mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001681
1682 offset += (loff_t)from;
1683 write_data = kmap(page);
1684 write_data += from;
1685
1686 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1687 kunmap(page);
1688 return -EIO;
1689 }
1690
1691 /* racing with truncate? */
1692 if (offset > mapping->host->i_size) {
1693 kunmap(page);
1694 return 0; /* don't care */
1695 }
1696
1697 /* check to make sure that we are not extending the file */
1698 if (mapping->host->i_size - offset < (loff_t)to)
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001699 to = (unsigned)(mapping->host->i_size - offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001700
Jeff Layton6508d902010-09-29 19:51:11 -04001701 open_file = find_writable_file(CIFS_I(mapping->host), false);
Steve French6148a742005-10-05 12:23:19 -07001702 if (open_file) {
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001703 bytes_written = cifs_write(open_file, open_file->pid,
1704 write_data, to - from, &offset);
Dave Kleikamp6ab409b2009-08-31 11:07:12 -04001705 cifsFileInfo_put(open_file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001706 /* Does mm or vfs already set times? */
Steve French6148a742005-10-05 12:23:19 -07001707 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001708 if ((bytes_written > 0) && (offset))
Steve French6148a742005-10-05 12:23:19 -07001709 rc = 0;
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001710 else if (bytes_written < 0)
1711 rc = bytes_written;
Steve French6148a742005-10-05 12:23:19 -07001712 } else {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001713 cFYI(1, "No writeable filehandles for inode");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001714 rc = -EIO;
1715 }
1716
1717 kunmap(page);
1718 return rc;
1719}
1720
Jeff Laytone9492872012-03-23 14:40:56 -04001721/*
1722 * Marshal up the iov array, reserving the first one for the header. Also,
1723 * set wdata->bytes.
1724 */
1725static void
1726cifs_writepages_marshal_iov(struct kvec *iov, struct cifs_writedata *wdata)
1727{
1728 int i;
1729 struct inode *inode = wdata->cfile->dentry->d_inode;
1730 loff_t size = i_size_read(inode);
1731
1732 /* marshal up the pages into iov array */
1733 wdata->bytes = 0;
1734 for (i = 0; i < wdata->nr_pages; i++) {
1735 iov[i + 1].iov_len = min(size - page_offset(wdata->pages[i]),
1736 (loff_t)PAGE_CACHE_SIZE);
1737 iov[i + 1].iov_base = kmap(wdata->pages[i]);
1738 wdata->bytes += iov[i + 1].iov_len;
1739 }
1740}
1741
Linus Torvalds1da177e2005-04-16 15:20:36 -07001742static int cifs_writepages(struct address_space *mapping,
Steve French37c0eb42005-10-05 14:50:29 -07001743 struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001744{
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001745 struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
1746 bool done = false, scanned = false, range_whole = false;
1747 pgoff_t end, index;
1748 struct cifs_writedata *wdata;
Steve French37c0eb42005-10-05 14:50:29 -07001749 struct page *page;
Steve French37c0eb42005-10-05 14:50:29 -07001750 int rc = 0;
Steve French50c2f752007-07-13 00:33:32 +00001751
Steve French37c0eb42005-10-05 14:50:29 -07001752 /*
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001753 * If wsize is smaller than the page cache size, default to writing
Steve French37c0eb42005-10-05 14:50:29 -07001754 * one page at a time via cifs_writepage
1755 */
1756 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1757 return generic_writepages(mapping, wbc);
1758
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001759 if (wbc->range_cyclic) {
Steve French37c0eb42005-10-05 14:50:29 -07001760 index = mapping->writeback_index; /* Start from prev offset */
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001761 end = -1;
1762 } else {
1763 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1764 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1765 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001766 range_whole = true;
1767 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07001768 }
1769retry:
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001770 while (!done && index <= end) {
1771 unsigned int i, nr_pages, found_pages;
1772 pgoff_t next = 0, tofind;
1773 struct page **pages;
Steve French37c0eb42005-10-05 14:50:29 -07001774
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001775 tofind = min((cifs_sb->wsize / PAGE_CACHE_SIZE) - 1,
1776 end - index) + 1;
Steve French37c0eb42005-10-05 14:50:29 -07001777
Jeff Laytonc2e87642012-03-23 14:40:55 -04001778 wdata = cifs_writedata_alloc((unsigned int)tofind,
1779 cifs_writev_complete);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001780 if (!wdata) {
1781 rc = -ENOMEM;
1782 break;
1783 }
1784
1785 /*
1786 * find_get_pages_tag seems to return a max of 256 on each
1787 * iteration, so we must call it several times in order to
1788 * fill the array or the wsize is effectively limited to
1789 * 256 * PAGE_CACHE_SIZE.
1790 */
1791 found_pages = 0;
1792 pages = wdata->pages;
1793 do {
1794 nr_pages = find_get_pages_tag(mapping, &index,
1795 PAGECACHE_TAG_DIRTY,
1796 tofind, pages);
1797 found_pages += nr_pages;
1798 tofind -= nr_pages;
1799 pages += nr_pages;
1800 } while (nr_pages && tofind && index <= end);
1801
1802 if (found_pages == 0) {
1803 kref_put(&wdata->refcount, cifs_writedata_release);
1804 break;
1805 }
1806
1807 nr_pages = 0;
1808 for (i = 0; i < found_pages; i++) {
1809 page = wdata->pages[i];
Steve French37c0eb42005-10-05 14:50:29 -07001810 /*
1811 * At this point we hold neither mapping->tree_lock nor
1812 * lock on the page itself: the page may be truncated or
1813 * invalidated (changing page->mapping to NULL), or even
1814 * swizzled back from swapper_space to tmpfs file
1815 * mapping
1816 */
1817
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001818 if (nr_pages == 0)
Steve French37c0eb42005-10-05 14:50:29 -07001819 lock_page(page);
Nick Piggin529ae9a2008-08-02 12:01:03 +02001820 else if (!trylock_page(page))
Steve French37c0eb42005-10-05 14:50:29 -07001821 break;
1822
1823 if (unlikely(page->mapping != mapping)) {
1824 unlock_page(page);
1825 break;
1826 }
1827
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001828 if (!wbc->range_cyclic && page->index > end) {
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001829 done = true;
Steve French37c0eb42005-10-05 14:50:29 -07001830 unlock_page(page);
1831 break;
1832 }
1833
1834 if (next && (page->index != next)) {
1835 /* Not next consecutive page */
1836 unlock_page(page);
1837 break;
1838 }
1839
1840 if (wbc->sync_mode != WB_SYNC_NONE)
1841 wait_on_page_writeback(page);
1842
1843 if (PageWriteback(page) ||
Linus Torvaldscb876f42006-12-23 16:19:07 -08001844 !clear_page_dirty_for_io(page)) {
Steve French37c0eb42005-10-05 14:50:29 -07001845 unlock_page(page);
1846 break;
1847 }
Steve French84d2f072005-10-12 15:32:05 -07001848
Linus Torvaldscb876f42006-12-23 16:19:07 -08001849 /*
1850 * This actually clears the dirty bit in the radix tree.
1851 * See cifs_writepage() for more commentary.
1852 */
1853 set_page_writeback(page);
1854
Steve French84d2f072005-10-12 15:32:05 -07001855 if (page_offset(page) >= mapping->host->i_size) {
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001856 done = true;
Steve French84d2f072005-10-12 15:32:05 -07001857 unlock_page(page);
Linus Torvaldscb876f42006-12-23 16:19:07 -08001858 end_page_writeback(page);
Steve French84d2f072005-10-12 15:32:05 -07001859 break;
1860 }
1861
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001862 wdata->pages[i] = page;
Steve French37c0eb42005-10-05 14:50:29 -07001863 next = page->index + 1;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001864 ++nr_pages;
Steve French37c0eb42005-10-05 14:50:29 -07001865 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001866
1867 /* reset index to refind any pages skipped */
1868 if (nr_pages == 0)
1869 index = wdata->pages[0]->index + 1;
1870
1871 /* put any pages we aren't going to use */
1872 for (i = nr_pages; i < found_pages; i++) {
1873 page_cache_release(wdata->pages[i]);
1874 wdata->pages[i] = NULL;
1875 }
1876
1877 /* nothing to write? */
1878 if (nr_pages == 0) {
1879 kref_put(&wdata->refcount, cifs_writedata_release);
1880 continue;
1881 }
1882
1883 wdata->sync_mode = wbc->sync_mode;
1884 wdata->nr_pages = nr_pages;
1885 wdata->offset = page_offset(wdata->pages[0]);
Jeff Laytone9492872012-03-23 14:40:56 -04001886 wdata->marshal_iov = cifs_writepages_marshal_iov;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001887
1888 do {
1889 if (wdata->cfile != NULL)
1890 cifsFileInfo_put(wdata->cfile);
1891 wdata->cfile = find_writable_file(CIFS_I(mapping->host),
1892 false);
1893 if (!wdata->cfile) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001894 cERROR(1, "No writable handles for inode");
Steve French23e7dd72005-10-20 13:44:56 -07001895 rc = -EBADF;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001896 break;
Steve French37c0eb42005-10-05 14:50:29 -07001897 }
Jeff Laytonfe5f5d22012-03-23 14:40:55 -04001898 wdata->pid = wdata->cfile->pid;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001899 rc = cifs_async_writev(wdata);
1900 } while (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN);
Jeff Laytonf3983c22010-09-22 16:17:40 -07001901
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001902 for (i = 0; i < nr_pages; ++i)
1903 unlock_page(wdata->pages[i]);
Jeff Layton941b8532011-01-11 07:24:01 -05001904
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001905 /* send failure -- clean up the mess */
1906 if (rc != 0) {
1907 for (i = 0; i < nr_pages; ++i) {
Jeff Layton941b8532011-01-11 07:24:01 -05001908 if (rc == -EAGAIN)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001909 redirty_page_for_writepage(wbc,
1910 wdata->pages[i]);
1911 else
1912 SetPageError(wdata->pages[i]);
1913 end_page_writeback(wdata->pages[i]);
1914 page_cache_release(wdata->pages[i]);
Steve French37c0eb42005-10-05 14:50:29 -07001915 }
Jeff Layton941b8532011-01-11 07:24:01 -05001916 if (rc != -EAGAIN)
1917 mapping_set_error(mapping, rc);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001918 }
1919 kref_put(&wdata->refcount, cifs_writedata_release);
Jeff Layton941b8532011-01-11 07:24:01 -05001920
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001921 wbc->nr_to_write -= nr_pages;
1922 if (wbc->nr_to_write <= 0)
1923 done = true;
Dave Kleikampb066a482008-11-18 03:49:05 +00001924
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001925 index = next;
Steve French37c0eb42005-10-05 14:50:29 -07001926 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001927
Steve French37c0eb42005-10-05 14:50:29 -07001928 if (!scanned && !done) {
1929 /*
1930 * We hit the last page and there is more work to be done: wrap
1931 * back to the start of the file
1932 */
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001933 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07001934 index = 0;
1935 goto retry;
1936 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001937
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001938 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
Steve French37c0eb42005-10-05 14:50:29 -07001939 mapping->writeback_index = index;
1940
Linus Torvalds1da177e2005-04-16 15:20:36 -07001941 return rc;
1942}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001943
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04001944static int
1945cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001946{
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04001947 int rc;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001948 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001949
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001950 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001951/* BB add check for wbc flags */
1952 page_cache_get(page);
Steve Frenchad7a2922008-02-07 23:25:02 +00001953 if (!PageUptodate(page))
Joe Perchesb6b38f72010-04-21 03:50:45 +00001954 cFYI(1, "ppw - page not up to date");
Linus Torvaldscb876f42006-12-23 16:19:07 -08001955
1956 /*
1957 * Set the "writeback" flag, and clear "dirty" in the radix tree.
1958 *
1959 * A writepage() implementation always needs to do either this,
1960 * or re-dirty the page with "redirty_page_for_writepage()" in
1961 * the case of a failure.
1962 *
1963 * Just unlocking the page will cause the radix tree tag-bits
1964 * to fail to update with the state of the page correctly.
1965 */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001966 set_page_writeback(page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04001967retry_write:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001968 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04001969 if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
1970 goto retry_write;
1971 else if (rc == -EAGAIN)
1972 redirty_page_for_writepage(wbc, page);
1973 else if (rc != 0)
1974 SetPageError(page);
1975 else
1976 SetPageUptodate(page);
Linus Torvaldscb876f42006-12-23 16:19:07 -08001977 end_page_writeback(page);
1978 page_cache_release(page);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001979 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001980 return rc;
1981}
1982
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04001983static int cifs_writepage(struct page *page, struct writeback_control *wbc)
1984{
1985 int rc = cifs_writepage_locked(page, wbc);
1986 unlock_page(page);
1987 return rc;
1988}
1989
Nick Piggind9414772008-09-24 11:32:59 -04001990static int cifs_write_end(struct file *file, struct address_space *mapping,
1991 loff_t pos, unsigned len, unsigned copied,
1992 struct page *page, void *fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001993{
Nick Piggind9414772008-09-24 11:32:59 -04001994 int rc;
1995 struct inode *inode = mapping->host;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00001996 struct cifsFileInfo *cfile = file->private_data;
1997 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1998 __u32 pid;
1999
2000 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2001 pid = cfile->pid;
2002 else
2003 pid = current->tgid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002004
Joe Perchesb6b38f72010-04-21 03:50:45 +00002005 cFYI(1, "write_end for page %p from pos %lld with %d bytes",
2006 page, pos, copied);
Steve Frenchad7a2922008-02-07 23:25:02 +00002007
Jeff Laytona98ee8c2008-11-26 19:32:33 +00002008 if (PageChecked(page)) {
2009 if (copied == len)
2010 SetPageUptodate(page);
2011 ClearPageChecked(page);
2012 } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
Nick Piggind9414772008-09-24 11:32:59 -04002013 SetPageUptodate(page);
2014
Linus Torvalds1da177e2005-04-16 15:20:36 -07002015 if (!PageUptodate(page)) {
Nick Piggind9414772008-09-24 11:32:59 -04002016 char *page_data;
2017 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002018 unsigned int xid;
Nick Piggind9414772008-09-24 11:32:59 -04002019
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002020 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002021 /* this is probably better than directly calling
2022 partialpage_write since in this function the file handle is
2023 known which we might as well leverage */
2024 /* BB check if anything else missing out of ppw
2025 such as updating last write time */
2026 page_data = kmap(page);
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002027 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
Nick Piggind9414772008-09-24 11:32:59 -04002028 /* if (rc < 0) should we set writebehind rc? */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002029 kunmap(page);
Nick Piggind9414772008-09-24 11:32:59 -04002030
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002031 free_xid(xid);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002032 } else {
Nick Piggind9414772008-09-24 11:32:59 -04002033 rc = copied;
2034 pos += copied;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002035 set_page_dirty(page);
2036 }
2037
Nick Piggind9414772008-09-24 11:32:59 -04002038 if (rc > 0) {
2039 spin_lock(&inode->i_lock);
2040 if (pos > inode->i_size)
2041 i_size_write(inode, pos);
2042 spin_unlock(&inode->i_lock);
2043 }
2044
2045 unlock_page(page);
2046 page_cache_release(page);
2047
Linus Torvalds1da177e2005-04-16 15:20:36 -07002048 return rc;
2049}
2050
Josef Bacik02c24a82011-07-16 20:44:56 -04002051int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2052 int datasync)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002053{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002054 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002055 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002056 struct cifs_tcon *tcon;
Joe Perchesc21dfb62010-07-12 13:50:14 -07002057 struct cifsFileInfo *smbfile = file->private_data;
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002058 struct inode *inode = file->f_path.dentry->d_inode;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002059 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002060
Josef Bacik02c24a82011-07-16 20:44:56 -04002061 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2062 if (rc)
2063 return rc;
2064 mutex_lock(&inode->i_mutex);
2065
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002066 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002067
Joe Perchesb6b38f72010-04-21 03:50:45 +00002068 cFYI(1, "Sync file - name: %s datasync: 0x%x",
Christoph Hellwig7ea80852010-05-26 17:53:25 +02002069 file->f_path.dentry->d_name.name, datasync);
Steve French50c2f752007-07-13 00:33:32 +00002070
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002071 if (!CIFS_I(inode)->clientCanCacheRead) {
2072 rc = cifs_invalidate_mapping(inode);
2073 if (rc) {
2074 cFYI(1, "rc: %d during invalidate phase", rc);
2075 rc = 0; /* don't care about it in fsync */
2076 }
2077 }
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002078
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002079 tcon = tlink_tcon(smbfile->tlink);
2080 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07002081 rc = CIFSSMBFlush(xid, tcon, smbfile->fid.netfid);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002082
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002083 free_xid(xid);
Josef Bacik02c24a82011-07-16 20:44:56 -04002084 mutex_unlock(&inode->i_mutex);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002085 return rc;
2086}
2087
Josef Bacik02c24a82011-07-16 20:44:56 -04002088int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002089{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002090 unsigned int xid;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002091 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002092 struct cifs_tcon *tcon;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002093 struct cifsFileInfo *smbfile = file->private_data;
2094 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Josef Bacik02c24a82011-07-16 20:44:56 -04002095 struct inode *inode = file->f_mapping->host;
2096
2097 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2098 if (rc)
2099 return rc;
2100 mutex_lock(&inode->i_mutex);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002101
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002102 xid = get_xid();
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002103
2104 cFYI(1, "Sync file - name: %s datasync: 0x%x",
2105 file->f_path.dentry->d_name.name, datasync);
2106
2107 tcon = tlink_tcon(smbfile->tlink);
2108 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07002109 rc = CIFSSMBFlush(xid, tcon, smbfile->fid.netfid);
Steve Frenchb298f222009-02-21 21:17:43 +00002110
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002111 free_xid(xid);
Josef Bacik02c24a82011-07-16 20:44:56 -04002112 mutex_unlock(&inode->i_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002113 return rc;
2114}
2115
Linus Torvalds1da177e2005-04-16 15:20:36 -07002116/*
2117 * As file closes, flush all cached write data for this inode checking
2118 * for write behind errors.
2119 */
Miklos Szeredi75e1fcc2006-06-23 02:05:12 -07002120int cifs_flush(struct file *file, fl_owner_t id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002121{
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002122 struct inode *inode = file->f_path.dentry->d_inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002123 int rc = 0;
2124
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002125 if (file->f_mode & FMODE_WRITE)
Jeff Laytond3f13222010-10-15 15:34:07 -04002126 rc = filemap_write_and_wait(inode->i_mapping);
Steve French50c2f752007-07-13 00:33:32 +00002127
Joe Perchesb6b38f72010-04-21 03:50:45 +00002128 cFYI(1, "Flush inode %p file %p rc %d", inode, file, rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002129
2130 return rc;
2131}
2132
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002133static int
2134cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2135{
2136 int rc = 0;
2137 unsigned long i;
2138
2139 for (i = 0; i < num_pages; i++) {
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002140 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002141 if (!pages[i]) {
2142 /*
2143 * save number of pages we have already allocated and
2144 * return with ENOMEM error
2145 */
2146 num_pages = i;
2147 rc = -ENOMEM;
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002148 break;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002149 }
2150 }
2151
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002152 if (rc) {
2153 for (i = 0; i < num_pages; i++)
2154 put_page(pages[i]);
2155 }
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002156 return rc;
2157}
2158
2159static inline
2160size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2161{
2162 size_t num_pages;
2163 size_t clen;
2164
2165 clen = min_t(const size_t, len, wsize);
Jeff Laytona7103b92012-03-23 14:40:56 -04002166 num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002167
2168 if (cur_len)
2169 *cur_len = clen;
2170
2171 return num_pages;
2172}
2173
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002174static void
2175cifs_uncached_marshal_iov(struct kvec *iov, struct cifs_writedata *wdata)
2176{
2177 int i;
2178 size_t bytes = wdata->bytes;
2179
2180 /* marshal up the pages into iov array */
2181 for (i = 0; i < wdata->nr_pages; i++) {
Steve Frenchc7ad42b2012-03-23 16:30:56 -05002182 iov[i + 1].iov_len = min_t(size_t, bytes, PAGE_SIZE);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002183 iov[i + 1].iov_base = kmap(wdata->pages[i]);
2184 bytes -= iov[i + 1].iov_len;
2185 }
2186}
2187
2188static void
2189cifs_uncached_writev_complete(struct work_struct *work)
2190{
2191 int i;
2192 struct cifs_writedata *wdata = container_of(work,
2193 struct cifs_writedata, work);
2194 struct inode *inode = wdata->cfile->dentry->d_inode;
2195 struct cifsInodeInfo *cifsi = CIFS_I(inode);
2196
2197 spin_lock(&inode->i_lock);
2198 cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
2199 if (cifsi->server_eof > inode->i_size)
2200 i_size_write(inode, cifsi->server_eof);
2201 spin_unlock(&inode->i_lock);
2202
2203 complete(&wdata->done);
2204
2205 if (wdata->result != -EAGAIN) {
2206 for (i = 0; i < wdata->nr_pages; i++)
2207 put_page(wdata->pages[i]);
2208 }
2209
2210 kref_put(&wdata->refcount, cifs_writedata_release);
2211}
2212
2213/* attempt to send write to server, retry on any -EAGAIN errors */
2214static int
2215cifs_uncached_retry_writev(struct cifs_writedata *wdata)
2216{
2217 int rc;
2218
2219 do {
2220 if (wdata->cfile->invalidHandle) {
2221 rc = cifs_reopen_file(wdata->cfile, false);
2222 if (rc != 0)
2223 continue;
2224 }
2225 rc = cifs_async_writev(wdata);
2226 } while (rc == -EAGAIN);
2227
2228 return rc;
2229}
2230
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002231static ssize_t
2232cifs_iovec_write(struct file *file, const struct iovec *iov,
2233 unsigned long nr_segs, loff_t *poffset)
2234{
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002235 unsigned long nr_pages, i;
Pavel Shilovsky76429c12011-01-31 16:03:08 +03002236 size_t copied, len, cur_len;
2237 ssize_t total_written = 0;
Jeff Layton3af9d8f2012-04-13 17:16:59 -04002238 loff_t offset;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002239 struct iov_iter it;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002240 struct cifsFileInfo *open_file;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002241 struct cifs_tcon *tcon;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002242 struct cifs_sb_info *cifs_sb;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002243 struct cifs_writedata *wdata, *tmp;
2244 struct list_head wdata_list;
2245 int rc;
2246 pid_t pid;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002247
2248 len = iov_length(iov, nr_segs);
2249 if (!len)
2250 return 0;
2251
2252 rc = generic_write_checks(file, poffset, &len, 0);
2253 if (rc)
2254 return rc;
2255
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002256 INIT_LIST_HEAD(&wdata_list);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002257 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002258 open_file = file->private_data;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002259 tcon = tlink_tcon(open_file->tlink);
Jeff Layton3af9d8f2012-04-13 17:16:59 -04002260 offset = *poffset;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002261
2262 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2263 pid = open_file->pid;
2264 else
2265 pid = current->tgid;
2266
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002267 iov_iter_init(&it, iov, nr_segs, len, 0);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002268 do {
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002269 size_t save_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002270
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002271 nr_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
2272 wdata = cifs_writedata_alloc(nr_pages,
2273 cifs_uncached_writev_complete);
2274 if (!wdata) {
2275 rc = -ENOMEM;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002276 break;
2277 }
2278
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002279 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
2280 if (rc) {
2281 kfree(wdata);
2282 break;
2283 }
2284
2285 save_len = cur_len;
2286 for (i = 0; i < nr_pages; i++) {
2287 copied = min_t(const size_t, cur_len, PAGE_SIZE);
2288 copied = iov_iter_copy_from_user(wdata->pages[i], &it,
2289 0, copied);
2290 cur_len -= copied;
2291 iov_iter_advance(&it, copied);
2292 }
2293 cur_len = save_len - cur_len;
2294
2295 wdata->sync_mode = WB_SYNC_ALL;
2296 wdata->nr_pages = nr_pages;
2297 wdata->offset = (__u64)offset;
2298 wdata->cfile = cifsFileInfo_get(open_file);
2299 wdata->pid = pid;
2300 wdata->bytes = cur_len;
2301 wdata->marshal_iov = cifs_uncached_marshal_iov;
2302 rc = cifs_uncached_retry_writev(wdata);
2303 if (rc) {
2304 kref_put(&wdata->refcount, cifs_writedata_release);
2305 break;
2306 }
2307
2308 list_add_tail(&wdata->list, &wdata_list);
2309 offset += cur_len;
2310 len -= cur_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002311 } while (len > 0);
2312
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002313 /*
2314 * If at least one write was successfully sent, then discard any rc
2315 * value from the later writes. If the other write succeeds, then
2316 * we'll end up returning whatever was written. If it fails, then
2317 * we'll get a new rc value from that.
2318 */
2319 if (!list_empty(&wdata_list))
2320 rc = 0;
2321
2322 /*
2323 * Wait for and collect replies for any successful sends in order of
2324 * increasing offset. Once an error is hit or we get a fatal signal
2325 * while waiting, then return without waiting for any more replies.
2326 */
2327restart_loop:
2328 list_for_each_entry_safe(wdata, tmp, &wdata_list, list) {
2329 if (!rc) {
2330 /* FIXME: freezable too? */
2331 rc = wait_for_completion_killable(&wdata->done);
2332 if (rc)
2333 rc = -EINTR;
2334 else if (wdata->result)
2335 rc = wdata->result;
2336 else
2337 total_written += wdata->bytes;
2338
2339 /* resend call if it's a retryable error */
2340 if (rc == -EAGAIN) {
2341 rc = cifs_uncached_retry_writev(wdata);
2342 goto restart_loop;
2343 }
2344 }
2345 list_del_init(&wdata->list);
2346 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002347 }
2348
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002349 if (total_written > 0)
2350 *poffset += total_written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002351
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002352 cifs_stats_bytes_written(tcon, total_written);
2353 return total_written ? total_written : (ssize_t)rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002354}
2355
Pavel Shilovsky0b81c1c2011-03-10 10:11:05 +03002356ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002357 unsigned long nr_segs, loff_t pos)
2358{
2359 ssize_t written;
2360 struct inode *inode;
2361
2362 inode = iocb->ki_filp->f_path.dentry->d_inode;
2363
2364 /*
2365 * BB - optimize the way when signing is disabled. We can drop this
2366 * extra memory-to-memory copying and use iovec buffers for constructing
2367 * write request.
2368 */
2369
2370 written = cifs_iovec_write(iocb->ki_filp, iov, nr_segs, &pos);
2371 if (written > 0) {
2372 CIFS_I(inode)->invalid_mapping = true;
2373 iocb->ki_pos = pos;
2374 }
2375
2376 return written;
2377}
2378
2379ssize_t cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
2380 unsigned long nr_segs, loff_t pos)
2381{
2382 struct inode *inode;
2383
2384 inode = iocb->ki_filp->f_path.dentry->d_inode;
2385
2386 if (CIFS_I(inode)->clientCanCacheAll)
2387 return generic_file_aio_write(iocb, iov, nr_segs, pos);
2388
2389 /*
2390 * In strict cache mode we need to write the data to the server exactly
2391 * from the pos to pos+len-1 rather than flush all affected pages
2392 * because it may cause a error with mandatory locks on these pages but
2393 * not on the region from pos to ppos+len-1.
2394 */
2395
2396 return cifs_user_writev(iocb, iov, nr_segs, pos);
2397}
2398
Jeff Layton0471ca32012-05-16 07:13:16 -04002399static struct cifs_readdata *
2400cifs_readdata_alloc(unsigned int nr_vecs, work_func_t complete)
2401{
2402 struct cifs_readdata *rdata;
2403
2404 rdata = kzalloc(sizeof(*rdata) +
2405 sizeof(struct kvec) * nr_vecs, GFP_KERNEL);
2406 if (rdata != NULL) {
Jeff Layton6993f742012-05-16 07:13:17 -04002407 kref_init(&rdata->refcount);
Jeff Layton1c892542012-05-16 07:13:17 -04002408 INIT_LIST_HEAD(&rdata->list);
2409 init_completion(&rdata->done);
Jeff Layton0471ca32012-05-16 07:13:16 -04002410 INIT_WORK(&rdata->work, complete);
2411 INIT_LIST_HEAD(&rdata->pages);
2412 }
2413 return rdata;
2414}
2415
Jeff Layton6993f742012-05-16 07:13:17 -04002416void
2417cifs_readdata_release(struct kref *refcount)
Jeff Layton0471ca32012-05-16 07:13:16 -04002418{
Jeff Layton6993f742012-05-16 07:13:17 -04002419 struct cifs_readdata *rdata = container_of(refcount,
2420 struct cifs_readdata, refcount);
2421
2422 if (rdata->cfile)
2423 cifsFileInfo_put(rdata->cfile);
2424
Jeff Layton0471ca32012-05-16 07:13:16 -04002425 kfree(rdata);
2426}
2427
Jeff Layton2a1bb132012-05-16 07:13:17 -04002428static int
Jeff Layton1c892542012-05-16 07:13:17 -04002429cifs_read_allocate_pages(struct list_head *list, unsigned int npages)
2430{
2431 int rc = 0;
2432 struct page *page, *tpage;
2433 unsigned int i;
2434
2435 for (i = 0; i < npages; i++) {
2436 page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
2437 if (!page) {
2438 rc = -ENOMEM;
2439 break;
2440 }
2441 list_add(&page->lru, list);
2442 }
2443
2444 if (rc) {
2445 list_for_each_entry_safe(page, tpage, list, lru) {
2446 list_del(&page->lru);
2447 put_page(page);
2448 }
2449 }
2450 return rc;
2451}
2452
2453static void
2454cifs_uncached_readdata_release(struct kref *refcount)
2455{
2456 struct page *page, *tpage;
2457 struct cifs_readdata *rdata = container_of(refcount,
2458 struct cifs_readdata, refcount);
2459
2460 list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
2461 list_del(&page->lru);
2462 put_page(page);
2463 }
2464 cifs_readdata_release(refcount);
2465}
2466
2467static int
Jeff Layton2a1bb132012-05-16 07:13:17 -04002468cifs_retry_async_readv(struct cifs_readdata *rdata)
2469{
2470 int rc;
2471
2472 do {
2473 if (rdata->cfile->invalidHandle) {
2474 rc = cifs_reopen_file(rdata->cfile, true);
2475 if (rc != 0)
2476 continue;
2477 }
2478 rc = cifs_async_readv(rdata);
2479 } while (rc == -EAGAIN);
2480
2481 return rc;
2482}
2483
Jeff Layton1c892542012-05-16 07:13:17 -04002484/**
2485 * cifs_readdata_to_iov - copy data from pages in response to an iovec
2486 * @rdata: the readdata response with list of pages holding data
2487 * @iov: vector in which we should copy the data
2488 * @nr_segs: number of segments in vector
2489 * @offset: offset into file of the first iovec
2490 * @copied: used to return the amount of data copied to the iov
2491 *
2492 * This function copies data from a list of pages in a readdata response into
2493 * an array of iovecs. It will first calculate where the data should go
2494 * based on the info in the readdata and then copy the data into that spot.
2495 */
2496static ssize_t
2497cifs_readdata_to_iov(struct cifs_readdata *rdata, const struct iovec *iov,
2498 unsigned long nr_segs, loff_t offset, ssize_t *copied)
2499{
2500 int rc = 0;
2501 struct iov_iter ii;
2502 size_t pos = rdata->offset - offset;
2503 struct page *page, *tpage;
2504 ssize_t remaining = rdata->bytes;
2505 unsigned char *pdata;
2506
2507 /* set up iov_iter and advance to the correct offset */
2508 iov_iter_init(&ii, iov, nr_segs, iov_length(iov, nr_segs), 0);
2509 iov_iter_advance(&ii, pos);
2510
2511 *copied = 0;
2512 list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
2513 ssize_t copy;
2514
2515 /* copy a whole page or whatever's left */
2516 copy = min_t(ssize_t, remaining, PAGE_SIZE);
2517
2518 /* ...but limit it to whatever space is left in the iov */
2519 copy = min_t(ssize_t, copy, iov_iter_count(&ii));
2520
2521 /* go while there's data to be copied and no errors */
2522 if (copy && !rc) {
2523 pdata = kmap(page);
2524 rc = memcpy_toiovecend(ii.iov, pdata, ii.iov_offset,
2525 (int)copy);
2526 kunmap(page);
2527 if (!rc) {
2528 *copied += copy;
2529 remaining -= copy;
2530 iov_iter_advance(&ii, copy);
2531 }
2532 }
2533
2534 list_del(&page->lru);
2535 put_page(page);
2536 }
2537
2538 return rc;
2539}
2540
2541static void
2542cifs_uncached_readv_complete(struct work_struct *work)
2543{
2544 struct cifs_readdata *rdata = container_of(work,
2545 struct cifs_readdata, work);
2546
2547 /* if the result is non-zero then the pages weren't kmapped */
2548 if (rdata->result == 0) {
2549 struct page *page;
2550
2551 list_for_each_entry(page, &rdata->pages, lru)
2552 kunmap(page);
2553 }
2554
2555 complete(&rdata->done);
2556 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
2557}
2558
2559static int
2560cifs_uncached_read_marshal_iov(struct cifs_readdata *rdata,
2561 unsigned int remaining)
2562{
2563 int len = 0;
2564 struct page *page, *tpage;
2565
2566 rdata->nr_iov = 1;
2567 list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
2568 if (remaining >= PAGE_SIZE) {
2569 /* enough data to fill the page */
2570 rdata->iov[rdata->nr_iov].iov_base = kmap(page);
2571 rdata->iov[rdata->nr_iov].iov_len = PAGE_SIZE;
2572 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
2573 rdata->nr_iov, page->index,
2574 rdata->iov[rdata->nr_iov].iov_base,
2575 rdata->iov[rdata->nr_iov].iov_len);
2576 ++rdata->nr_iov;
2577 len += PAGE_SIZE;
2578 remaining -= PAGE_SIZE;
2579 } else if (remaining > 0) {
2580 /* enough for partial page, fill and zero the rest */
2581 rdata->iov[rdata->nr_iov].iov_base = kmap(page);
2582 rdata->iov[rdata->nr_iov].iov_len = remaining;
2583 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
2584 rdata->nr_iov, page->index,
2585 rdata->iov[rdata->nr_iov].iov_base,
2586 rdata->iov[rdata->nr_iov].iov_len);
2587 memset(rdata->iov[rdata->nr_iov].iov_base + remaining,
2588 '\0', PAGE_SIZE - remaining);
2589 ++rdata->nr_iov;
2590 len += remaining;
2591 remaining = 0;
2592 } else {
2593 /* no need to hold page hostage */
2594 list_del(&page->lru);
2595 put_page(page);
2596 }
2597 }
2598
2599 return len;
2600}
2601
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002602static ssize_t
2603cifs_iovec_read(struct file *file, const struct iovec *iov,
2604 unsigned long nr_segs, loff_t *poffset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002605{
Jeff Layton1c892542012-05-16 07:13:17 -04002606 ssize_t rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002607 size_t len, cur_len;
Jeff Layton1c892542012-05-16 07:13:17 -04002608 ssize_t total_read = 0;
2609 loff_t offset = *poffset;
2610 unsigned int npages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002611 struct cifs_sb_info *cifs_sb;
Jeff Layton1c892542012-05-16 07:13:17 -04002612 struct cifs_tcon *tcon;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002613 struct cifsFileInfo *open_file;
Jeff Layton1c892542012-05-16 07:13:17 -04002614 struct cifs_readdata *rdata, *tmp;
2615 struct list_head rdata_list;
2616 pid_t pid;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002617
2618 if (!nr_segs)
2619 return 0;
2620
2621 len = iov_length(iov, nr_segs);
2622 if (!len)
2623 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002624
Jeff Layton1c892542012-05-16 07:13:17 -04002625 INIT_LIST_HEAD(&rdata_list);
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002626 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Joe Perchesc21dfb62010-07-12 13:50:14 -07002627 open_file = file->private_data;
Jeff Layton1c892542012-05-16 07:13:17 -04002628 tcon = tlink_tcon(open_file->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002629
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002630 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2631 pid = open_file->pid;
2632 else
2633 pid = current->tgid;
2634
Steve Frenchad7a2922008-02-07 23:25:02 +00002635 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesb6b38f72010-04-21 03:50:45 +00002636 cFYI(1, "attempting read on write only file instance");
Steve Frenchad7a2922008-02-07 23:25:02 +00002637
Jeff Layton1c892542012-05-16 07:13:17 -04002638 do {
2639 cur_len = min_t(const size_t, len - total_read, cifs_sb->rsize);
2640 npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002641
Jeff Layton1c892542012-05-16 07:13:17 -04002642 /* allocate a readdata struct */
2643 rdata = cifs_readdata_alloc(npages,
2644 cifs_uncached_readv_complete);
2645 if (!rdata) {
2646 rc = -ENOMEM;
2647 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002648 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002649
Jeff Layton1c892542012-05-16 07:13:17 -04002650 rc = cifs_read_allocate_pages(&rdata->pages, npages);
2651 if (rc)
2652 goto error;
2653
2654 rdata->cfile = cifsFileInfo_get(open_file);
2655 rdata->offset = offset;
2656 rdata->bytes = cur_len;
2657 rdata->pid = pid;
2658 rdata->marshal_iov = cifs_uncached_read_marshal_iov;
2659
2660 rc = cifs_retry_async_readv(rdata);
2661error:
2662 if (rc) {
2663 kref_put(&rdata->refcount,
2664 cifs_uncached_readdata_release);
2665 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002666 }
Jeff Layton1c892542012-05-16 07:13:17 -04002667
2668 list_add_tail(&rdata->list, &rdata_list);
2669 offset += cur_len;
2670 len -= cur_len;
2671 } while (len > 0);
2672
2673 /* if at least one read request send succeeded, then reset rc */
2674 if (!list_empty(&rdata_list))
2675 rc = 0;
2676
2677 /* the loop below should proceed in the order of increasing offsets */
2678restart_loop:
2679 list_for_each_entry_safe(rdata, tmp, &rdata_list, list) {
2680 if (!rc) {
2681 ssize_t copied;
2682
2683 /* FIXME: freezable sleep too? */
2684 rc = wait_for_completion_killable(&rdata->done);
2685 if (rc)
2686 rc = -EINTR;
2687 else if (rdata->result)
2688 rc = rdata->result;
2689 else {
2690 rc = cifs_readdata_to_iov(rdata, iov,
2691 nr_segs, *poffset,
2692 &copied);
2693 total_read += copied;
2694 }
2695
2696 /* resend call if it's a retryable error */
2697 if (rc == -EAGAIN) {
2698 rc = cifs_retry_async_readv(rdata);
2699 goto restart_loop;
2700 }
2701 }
2702 list_del_init(&rdata->list);
2703 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002704 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002705
Jeff Layton1c892542012-05-16 07:13:17 -04002706 cifs_stats_bytes_read(tcon, total_read);
2707 *poffset += total_read;
2708
2709 return total_read ? total_read : rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002710}
2711
Pavel Shilovsky0b81c1c2011-03-10 10:11:05 +03002712ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov,
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002713 unsigned long nr_segs, loff_t pos)
2714{
2715 ssize_t read;
2716
2717 read = cifs_iovec_read(iocb->ki_filp, iov, nr_segs, &pos);
2718 if (read > 0)
2719 iocb->ki_pos = pos;
2720
2721 return read;
2722}
2723
2724ssize_t cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov,
2725 unsigned long nr_segs, loff_t pos)
2726{
2727 struct inode *inode;
2728
2729 inode = iocb->ki_filp->f_path.dentry->d_inode;
2730
2731 if (CIFS_I(inode)->clientCanCacheRead)
2732 return generic_file_aio_read(iocb, iov, nr_segs, pos);
2733
2734 /*
2735 * In strict cache mode we need to read from the server all the time
2736 * if we don't have level II oplock because the server can delay mtime
2737 * change - so we can't make a decision about inode invalidating.
2738 * And we can also fail with pagereading if there are mandatory locks
2739 * on pages affected by this read but not on the region from pos to
2740 * pos+len-1.
2741 */
2742
2743 return cifs_user_readv(iocb, iov, nr_segs, pos);
2744}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002745
2746static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002747 loff_t *poffset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002748{
2749 int rc = -EACCES;
2750 unsigned int bytes_read = 0;
2751 unsigned int total_read;
2752 unsigned int current_read_size;
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002753 unsigned int rsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002754 struct cifs_sb_info *cifs_sb;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002755 struct cifs_tcon *tcon;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002756 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002757 char *current_offset;
2758 struct cifsFileInfo *open_file;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002759 struct cifs_io_parms io_parms;
Steve Frenchec637e32005-12-12 20:53:18 -08002760 int buf_type = CIFS_NO_BUFFER;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002761 __u32 pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002762
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002763 xid = get_xid();
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002764 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002765
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002766 /* FIXME: set up handlers for larger reads and/or convert to async */
2767 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
2768
Linus Torvalds1da177e2005-04-16 15:20:36 -07002769 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05302770 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002771 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05302772 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002773 }
Joe Perchesc21dfb62010-07-12 13:50:14 -07002774 open_file = file->private_data;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002775 tcon = tlink_tcon(open_file->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002776
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002777 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2778 pid = open_file->pid;
2779 else
2780 pid = current->tgid;
2781
Linus Torvalds1da177e2005-04-16 15:20:36 -07002782 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesb6b38f72010-04-21 03:50:45 +00002783 cFYI(1, "attempting read on write only file instance");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002784
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002785 for (total_read = 0, current_offset = read_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002786 read_size > total_read;
2787 total_read += bytes_read, current_offset += bytes_read) {
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002788 current_read_size = min_t(uint, read_size - total_read, rsize);
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002789 /*
2790 * For windows me and 9x we do not want to request more than it
2791 * negotiated since it will refuse the read then.
2792 */
2793 if ((tcon->ses) && !(tcon->ses->capabilities &
2794 tcon->ses->server->vals->cap_large_files)) {
Dan Carpenter7748dd62011-10-18 12:41:35 +03002795 current_read_size = min_t(uint, current_read_size,
Jeff Laytonc974bef2011-10-11 06:41:32 -04002796 CIFSMaxBufSize);
Steve Frenchf9f5c8172005-09-15 23:06:38 -07002797 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002798 rc = -EAGAIN;
2799 while (rc == -EAGAIN) {
Steve Frenchcdff08e2010-10-21 22:46:14 +00002800 if (open_file->invalidHandle) {
Jeff Layton15886172010-10-15 15:33:59 -04002801 rc = cifs_reopen_file(open_file, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002802 if (rc != 0)
2803 break;
2804 }
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07002805 io_parms.netfid = open_file->fid.netfid;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002806 io_parms.pid = pid;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002807 io_parms.tcon = tcon;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002808 io_parms.offset = *poffset;
2809 io_parms.length = current_read_size;
2810 rc = CIFSSMBRead(xid, &io_parms, &bytes_read,
2811 &current_offset, &buf_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002812 }
2813 if (rc || (bytes_read == 0)) {
2814 if (total_read) {
2815 break;
2816 } else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002817 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002818 return rc;
2819 }
2820 } else {
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002821 cifs_stats_bytes_read(tcon, total_read);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002822 *poffset += bytes_read;
2823 }
2824 }
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002825 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002826 return total_read;
2827}
2828
Jeff Laytonca83ce32011-04-12 09:13:44 -04002829/*
2830 * If the page is mmap'ed into a process' page tables, then we need to make
2831 * sure that it doesn't change while being written back.
2832 */
2833static int
2834cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
2835{
2836 struct page *page = vmf->page;
2837
2838 lock_page(page);
2839 return VM_FAULT_LOCKED;
2840}
2841
2842static struct vm_operations_struct cifs_file_vm_ops = {
2843 .fault = filemap_fault,
2844 .page_mkwrite = cifs_page_mkwrite,
2845};
2846
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03002847int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
2848{
2849 int rc, xid;
2850 struct inode *inode = file->f_path.dentry->d_inode;
2851
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002852 xid = get_xid();
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03002853
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002854 if (!CIFS_I(inode)->clientCanCacheRead) {
2855 rc = cifs_invalidate_mapping(inode);
2856 if (rc)
2857 return rc;
2858 }
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03002859
2860 rc = generic_file_mmap(file, vma);
Jeff Laytonca83ce32011-04-12 09:13:44 -04002861 if (rc == 0)
2862 vma->vm_ops = &cifs_file_vm_ops;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002863 free_xid(xid);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03002864 return rc;
2865}
2866
Linus Torvalds1da177e2005-04-16 15:20:36 -07002867int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
2868{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002869 int rc, xid;
2870
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002871 xid = get_xid();
Jeff Laytonabab0952010-02-12 07:44:18 -05002872 rc = cifs_revalidate_file(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002873 if (rc) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00002874 cFYI(1, "Validation prior to mmap failed, error=%d", rc);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002875 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002876 return rc;
2877 }
2878 rc = generic_file_mmap(file, vma);
Jeff Laytonca83ce32011-04-12 09:13:44 -04002879 if (rc == 0)
2880 vma->vm_ops = &cifs_file_vm_ops;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002881 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002882 return rc;
2883}
2884
Jeff Layton0471ca32012-05-16 07:13:16 -04002885static void
2886cifs_readv_complete(struct work_struct *work)
2887{
2888 struct cifs_readdata *rdata = container_of(work,
2889 struct cifs_readdata, work);
2890 struct page *page, *tpage;
2891
2892 list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
2893 list_del(&page->lru);
2894 lru_cache_add_file(page);
2895
2896 if (rdata->result == 0) {
2897 kunmap(page);
2898 flush_dcache_page(page);
2899 SetPageUptodate(page);
2900 }
2901
2902 unlock_page(page);
2903
2904 if (rdata->result == 0)
2905 cifs_readpage_to_fscache(rdata->mapping->host, page);
2906
2907 page_cache_release(page);
2908 }
Jeff Layton6993f742012-05-16 07:13:17 -04002909 kref_put(&rdata->refcount, cifs_readdata_release);
Jeff Layton0471ca32012-05-16 07:13:16 -04002910}
2911
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04002912static int
2913cifs_readpages_marshal_iov(struct cifs_readdata *rdata, unsigned int remaining)
2914{
2915 int len = 0;
2916 struct page *page, *tpage;
2917 u64 eof;
2918 pgoff_t eof_index;
2919
2920 /* determine the eof that the server (probably) has */
2921 eof = CIFS_I(rdata->mapping->host)->server_eof;
2922 eof_index = eof ? (eof - 1) >> PAGE_CACHE_SHIFT : 0;
2923 cFYI(1, "eof=%llu eof_index=%lu", eof, eof_index);
2924
2925 rdata->nr_iov = 1;
2926 list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
2927 if (remaining >= PAGE_CACHE_SIZE) {
2928 /* enough data to fill the page */
2929 rdata->iov[rdata->nr_iov].iov_base = kmap(page);
2930 rdata->iov[rdata->nr_iov].iov_len = PAGE_CACHE_SIZE;
2931 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
2932 rdata->nr_iov, page->index,
2933 rdata->iov[rdata->nr_iov].iov_base,
2934 rdata->iov[rdata->nr_iov].iov_len);
2935 ++rdata->nr_iov;
2936 len += PAGE_CACHE_SIZE;
2937 remaining -= PAGE_CACHE_SIZE;
2938 } else if (remaining > 0) {
2939 /* enough for partial page, fill and zero the rest */
2940 rdata->iov[rdata->nr_iov].iov_base = kmap(page);
2941 rdata->iov[rdata->nr_iov].iov_len = remaining;
2942 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
2943 rdata->nr_iov, page->index,
2944 rdata->iov[rdata->nr_iov].iov_base,
2945 rdata->iov[rdata->nr_iov].iov_len);
2946 memset(rdata->iov[rdata->nr_iov].iov_base + remaining,
2947 '\0', PAGE_CACHE_SIZE - remaining);
2948 ++rdata->nr_iov;
2949 len += remaining;
2950 remaining = 0;
2951 } else if (page->index > eof_index) {
2952 /*
2953 * The VFS will not try to do readahead past the
2954 * i_size, but it's possible that we have outstanding
2955 * writes with gaps in the middle and the i_size hasn't
2956 * caught up yet. Populate those with zeroed out pages
2957 * to prevent the VFS from repeatedly attempting to
2958 * fill them until the writes are flushed.
2959 */
2960 zero_user(page, 0, PAGE_CACHE_SIZE);
2961 list_del(&page->lru);
2962 lru_cache_add_file(page);
2963 flush_dcache_page(page);
2964 SetPageUptodate(page);
2965 unlock_page(page);
2966 page_cache_release(page);
2967 } else {
2968 /* no need to hold page hostage */
2969 list_del(&page->lru);
2970 lru_cache_add_file(page);
2971 unlock_page(page);
2972 page_cache_release(page);
2973 }
2974 }
2975
2976 return len;
2977}
2978
Linus Torvalds1da177e2005-04-16 15:20:36 -07002979static int cifs_readpages(struct file *file, struct address_space *mapping,
2980 struct list_head *page_list, unsigned num_pages)
2981{
Jeff Layton690c5e32011-10-19 15:30:16 -04002982 int rc;
2983 struct list_head tmplist;
2984 struct cifsFileInfo *open_file = file->private_data;
2985 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
2986 unsigned int rsize = cifs_sb->rsize;
2987 pid_t pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002988
Jeff Layton690c5e32011-10-19 15:30:16 -04002989 /*
2990 * Give up immediately if rsize is too small to read an entire page.
2991 * The VFS will fall back to readpage. We should never reach this
2992 * point however since we set ra_pages to 0 when the rsize is smaller
2993 * than a cache page.
2994 */
2995 if (unlikely(rsize < PAGE_CACHE_SIZE))
2996 return 0;
Steve Frenchbfa0d752005-08-31 21:50:37 -07002997
Suresh Jayaraman56698232010-07-05 18:13:25 +05302998 /*
2999 * Reads as many pages as possible from fscache. Returns -ENOBUFS
3000 * immediately if the cookie is negative
3001 */
3002 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
3003 &num_pages);
3004 if (rc == 0)
Jeff Layton690c5e32011-10-19 15:30:16 -04003005 return rc;
Suresh Jayaraman56698232010-07-05 18:13:25 +05303006
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003007 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3008 pid = open_file->pid;
3009 else
3010 pid = current->tgid;
3011
Jeff Layton690c5e32011-10-19 15:30:16 -04003012 rc = 0;
3013 INIT_LIST_HEAD(&tmplist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003014
Jeff Layton690c5e32011-10-19 15:30:16 -04003015 cFYI(1, "%s: file=%p mapping=%p num_pages=%u", __func__, file,
3016 mapping, num_pages);
3017
3018 /*
3019 * Start with the page at end of list and move it to private
3020 * list. Do the same with any following pages until we hit
3021 * the rsize limit, hit an index discontinuity, or run out of
3022 * pages. Issue the async read and then start the loop again
3023 * until the list is empty.
3024 *
3025 * Note that list order is important. The page_list is in
3026 * the order of declining indexes. When we put the pages in
3027 * the rdata->pages, then we want them in increasing order.
3028 */
3029 while (!list_empty(page_list)) {
3030 unsigned int bytes = PAGE_CACHE_SIZE;
3031 unsigned int expected_index;
3032 unsigned int nr_pages = 1;
3033 loff_t offset;
3034 struct page *page, *tpage;
3035 struct cifs_readdata *rdata;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003036
3037 page = list_entry(page_list->prev, struct page, lru);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003038
Jeff Layton690c5e32011-10-19 15:30:16 -04003039 /*
3040 * Lock the page and put it in the cache. Since no one else
3041 * should have access to this page, we're safe to simply set
3042 * PG_locked without checking it first.
3043 */
3044 __set_page_locked(page);
3045 rc = add_to_page_cache_locked(page, mapping,
3046 page->index, GFP_KERNEL);
3047
3048 /* give up if we can't stick it in the cache */
3049 if (rc) {
3050 __clear_page_locked(page);
3051 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003052 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003053
Jeff Layton690c5e32011-10-19 15:30:16 -04003054 /* move first page to the tmplist */
3055 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
3056 list_move_tail(&page->lru, &tmplist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003057
Jeff Layton690c5e32011-10-19 15:30:16 -04003058 /* now try and add more pages onto the request */
3059 expected_index = page->index + 1;
3060 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
3061 /* discontinuity ? */
3062 if (page->index != expected_index)
3063 break;
3064
3065 /* would this page push the read over the rsize? */
3066 if (bytes + PAGE_CACHE_SIZE > rsize)
3067 break;
3068
3069 __set_page_locked(page);
3070 if (add_to_page_cache_locked(page, mapping,
3071 page->index, GFP_KERNEL)) {
3072 __clear_page_locked(page);
3073 break;
3074 }
3075 list_move_tail(&page->lru, &tmplist);
3076 bytes += PAGE_CACHE_SIZE;
3077 expected_index++;
3078 nr_pages++;
3079 }
3080
Jeff Layton0471ca32012-05-16 07:13:16 -04003081 rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
Jeff Layton690c5e32011-10-19 15:30:16 -04003082 if (!rdata) {
3083 /* best to give up if we're out of mem */
3084 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3085 list_del(&page->lru);
3086 lru_cache_add_file(page);
3087 unlock_page(page);
3088 page_cache_release(page);
3089 }
3090 rc = -ENOMEM;
3091 break;
3092 }
3093
Jeff Layton6993f742012-05-16 07:13:17 -04003094 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Layton690c5e32011-10-19 15:30:16 -04003095 rdata->mapping = mapping;
3096 rdata->offset = offset;
3097 rdata->bytes = bytes;
3098 rdata->pid = pid;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003099 rdata->marshal_iov = cifs_readpages_marshal_iov;
Jeff Layton690c5e32011-10-19 15:30:16 -04003100 list_splice_init(&tmplist, &rdata->pages);
3101
Jeff Layton2a1bb132012-05-16 07:13:17 -04003102 rc = cifs_retry_async_readv(rdata);
Jeff Layton690c5e32011-10-19 15:30:16 -04003103 if (rc != 0) {
3104 list_for_each_entry_safe(page, tpage, &rdata->pages,
3105 lru) {
3106 list_del(&page->lru);
3107 lru_cache_add_file(page);
3108 unlock_page(page);
3109 page_cache_release(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003110 }
Jeff Layton6993f742012-05-16 07:13:17 -04003111 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003112 break;
3113 }
Jeff Layton6993f742012-05-16 07:13:17 -04003114
3115 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003116 }
3117
Linus Torvalds1da177e2005-04-16 15:20:36 -07003118 return rc;
3119}
3120
3121static int cifs_readpage_worker(struct file *file, struct page *page,
3122 loff_t *poffset)
3123{
3124 char *read_data;
3125 int rc;
3126
Suresh Jayaraman56698232010-07-05 18:13:25 +05303127 /* Is the page cached? */
3128 rc = cifs_readpage_from_fscache(file->f_path.dentry->d_inode, page);
3129 if (rc == 0)
3130 goto read_complete;
3131
Linus Torvalds1da177e2005-04-16 15:20:36 -07003132 page_cache_get(page);
3133 read_data = kmap(page);
3134 /* for reads over a certain size could initiate async read ahead */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003135
Linus Torvalds1da177e2005-04-16 15:20:36 -07003136 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003137
Linus Torvalds1da177e2005-04-16 15:20:36 -07003138 if (rc < 0)
3139 goto io_error;
3140 else
Joe Perchesb6b38f72010-04-21 03:50:45 +00003141 cFYI(1, "Bytes read %d", rc);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003142
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08003143 file->f_path.dentry->d_inode->i_atime =
3144 current_fs_time(file->f_path.dentry->d_inode->i_sb);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003145
Linus Torvalds1da177e2005-04-16 15:20:36 -07003146 if (PAGE_CACHE_SIZE > rc)
3147 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
3148
3149 flush_dcache_page(page);
3150 SetPageUptodate(page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05303151
3152 /* send this page to the cache */
3153 cifs_readpage_to_fscache(file->f_path.dentry->d_inode, page);
3154
Linus Torvalds1da177e2005-04-16 15:20:36 -07003155 rc = 0;
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003156
Linus Torvalds1da177e2005-04-16 15:20:36 -07003157io_error:
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003158 kunmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003159 page_cache_release(page);
Suresh Jayaraman56698232010-07-05 18:13:25 +05303160
3161read_complete:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003162 return rc;
3163}
3164
3165static int cifs_readpage(struct file *file, struct page *page)
3166{
3167 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
3168 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003169 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003170
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003171 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003172
3173 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303174 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003175 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303176 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003177 }
3178
Jeff Laytonac3aa2f2012-07-23 13:14:28 -04003179 cFYI(1, "readpage %p at offset %d 0x%x",
Joe Perchesb6b38f72010-04-21 03:50:45 +00003180 page, (int)offset, (int)offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003181
3182 rc = cifs_readpage_worker(file, page, &offset);
3183
3184 unlock_page(page);
3185
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003186 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003187 return rc;
3188}
3189
Steve Frencha403a0a2007-07-26 15:54:16 +00003190static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3191{
3192 struct cifsFileInfo *open_file;
3193
Jeff Layton44772882010-10-15 15:34:03 -04003194 spin_lock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003195 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton2e396b82010-10-15 15:34:01 -04003196 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Jeff Layton44772882010-10-15 15:34:03 -04003197 spin_unlock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003198 return 1;
3199 }
3200 }
Jeff Layton44772882010-10-15 15:34:03 -04003201 spin_unlock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003202 return 0;
3203}
3204
Linus Torvalds1da177e2005-04-16 15:20:36 -07003205/* We do not want to update the file size from server for inodes
3206 open for write - to avoid races with writepage extending
3207 the file - in the future we could consider allowing
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003208 refreshing the inode only on increases in the file size
Linus Torvalds1da177e2005-04-16 15:20:36 -07003209 but this is tricky to do without racing with writebehind
3210 page caching in the current Linux kernel design */
Steve French4b18f2a2008-04-29 00:06:05 +00003211bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003212{
Steve Frencha403a0a2007-07-26 15:54:16 +00003213 if (!cifsInode)
Steve French4b18f2a2008-04-29 00:06:05 +00003214 return true;
Steve French23e7dd72005-10-20 13:44:56 -07003215
Steve Frencha403a0a2007-07-26 15:54:16 +00003216 if (is_inode_writable(cifsInode)) {
3217 /* This inode is open for write at least once */
Steve Frenchc32a0b62006-01-12 14:41:28 -08003218 struct cifs_sb_info *cifs_sb;
3219
Steve Frenchc32a0b62006-01-12 14:41:28 -08003220 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
Steve Frenchad7a2922008-02-07 23:25:02 +00003221 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003222 /* since no page cache to corrupt on directio
Steve Frenchc32a0b62006-01-12 14:41:28 -08003223 we can change size safely */
Steve French4b18f2a2008-04-29 00:06:05 +00003224 return true;
Steve Frenchc32a0b62006-01-12 14:41:28 -08003225 }
3226
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003227 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
Steve French4b18f2a2008-04-29 00:06:05 +00003228 return true;
Steve French7ba52632007-02-08 18:14:13 +00003229
Steve French4b18f2a2008-04-29 00:06:05 +00003230 return false;
Steve French23e7dd72005-10-20 13:44:56 -07003231 } else
Steve French4b18f2a2008-04-29 00:06:05 +00003232 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003233}
3234
Nick Piggind9414772008-09-24 11:32:59 -04003235static int cifs_write_begin(struct file *file, struct address_space *mapping,
3236 loff_t pos, unsigned len, unsigned flags,
3237 struct page **pagep, void **fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003238{
Nick Piggind9414772008-09-24 11:32:59 -04003239 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
3240 loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003241 loff_t page_start = pos & PAGE_MASK;
3242 loff_t i_size;
3243 struct page *page;
3244 int rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003245
Joe Perchesb6b38f72010-04-21 03:50:45 +00003246 cFYI(1, "write_begin from %lld len %d", (long long)pos, len);
Nick Piggind9414772008-09-24 11:32:59 -04003247
Nick Piggin54566b22009-01-04 12:00:53 -08003248 page = grab_cache_page_write_begin(mapping, index, flags);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003249 if (!page) {
3250 rc = -ENOMEM;
3251 goto out;
3252 }
Nick Piggind9414772008-09-24 11:32:59 -04003253
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003254 if (PageUptodate(page))
3255 goto out;
Steve French8a236262007-03-06 00:31:00 +00003256
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003257 /*
3258 * If we write a full page it will be up to date, no need to read from
3259 * the server. If the write is short, we'll end up doing a sync write
3260 * instead.
3261 */
3262 if (len == PAGE_CACHE_SIZE)
3263 goto out;
3264
3265 /*
3266 * optimize away the read when we have an oplock, and we're not
3267 * expecting to use any of the data we'd be reading in. That
3268 * is, when the page lies beyond the EOF, or straddles the EOF
3269 * and the write will cover all of the existing data.
3270 */
3271 if (CIFS_I(mapping->host)->clientCanCacheRead) {
3272 i_size = i_size_read(mapping->host);
3273 if (page_start >= i_size ||
3274 (offset == 0 && (pos + len) >= i_size)) {
3275 zero_user_segments(page, 0, offset,
3276 offset + len,
3277 PAGE_CACHE_SIZE);
3278 /*
3279 * PageChecked means that the parts of the page
3280 * to which we're not writing are considered up
3281 * to date. Once the data is copied to the
3282 * page, it can be set uptodate.
3283 */
3284 SetPageChecked(page);
3285 goto out;
3286 }
3287 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003288
Nick Piggind9414772008-09-24 11:32:59 -04003289 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003290 /*
3291 * might as well read a page, it is fast enough. If we get
3292 * an error, we don't need to return it. cifs_write_end will
3293 * do a sync write instead since PG_uptodate isn't set.
3294 */
3295 cifs_readpage_worker(file, page, &page_start);
Steve French8a236262007-03-06 00:31:00 +00003296 } else {
3297 /* we could try using another file handle if there is one -
3298 but how would we lock it to prevent close of that handle
3299 racing with this read? In any case
Nick Piggind9414772008-09-24 11:32:59 -04003300 this will be written out by write_end so is fine */
Steve French8a236262007-03-06 00:31:00 +00003301 }
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003302out:
3303 *pagep = page;
3304 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003305}
3306
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303307static int cifs_release_page(struct page *page, gfp_t gfp)
3308{
3309 if (PagePrivate(page))
3310 return 0;
3311
3312 return cifs_fscache_release_page(page, gfp);
3313}
3314
3315static void cifs_invalidate_page(struct page *page, unsigned long offset)
3316{
3317 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
3318
3319 if (offset == 0)
3320 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
3321}
3322
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003323static int cifs_launder_page(struct page *page)
3324{
3325 int rc = 0;
3326 loff_t range_start = page_offset(page);
3327 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
3328 struct writeback_control wbc = {
3329 .sync_mode = WB_SYNC_ALL,
3330 .nr_to_write = 0,
3331 .range_start = range_start,
3332 .range_end = range_end,
3333 };
3334
3335 cFYI(1, "Launder page: %p", page);
3336
3337 if (clear_page_dirty_for_io(page))
3338 rc = cifs_writepage_locked(page, &wbc);
3339
3340 cifs_fscache_invalidate_page(page, page->mapping->host);
3341 return rc;
3342}
3343
Tejun Heo9b646972010-07-20 22:09:02 +02003344void cifs_oplock_break(struct work_struct *work)
Jeff Layton3bc303c2009-09-21 06:47:50 -04003345{
3346 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3347 oplock_break);
Jeff Laytona5e18bc2010-10-11 15:07:18 -04003348 struct inode *inode = cfile->dentry->d_inode;
Jeff Layton3bc303c2009-09-21 06:47:50 -04003349 struct cifsInodeInfo *cinode = CIFS_I(inode);
Jeff Laytoneb4b7562010-10-22 14:52:29 -04003350 int rc = 0;
Jeff Layton3bc303c2009-09-21 06:47:50 -04003351
3352 if (inode && S_ISREG(inode->i_mode)) {
Steve Frenchd54ff732010-04-27 04:38:15 +00003353 if (cinode->clientCanCacheRead)
Al Viro8737c932009-12-24 06:47:55 -05003354 break_lease(inode, O_RDONLY);
Steve Frenchd54ff732010-04-27 04:38:15 +00003355 else
Al Viro8737c932009-12-24 06:47:55 -05003356 break_lease(inode, O_WRONLY);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003357 rc = filemap_fdatawrite(inode->i_mapping);
3358 if (cinode->clientCanCacheRead == 0) {
Jeff Laytoneb4b7562010-10-22 14:52:29 -04003359 rc = filemap_fdatawait(inode->i_mapping);
3360 mapping_set_error(inode->i_mapping, rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003361 invalidate_remote_inode(inode);
3362 }
Joe Perchesb6b38f72010-04-21 03:50:45 +00003363 cFYI(1, "Oplock flush inode %p rc %d", inode, rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003364 }
3365
Pavel Shilovsky85160e02011-10-22 15:33:29 +04003366 rc = cifs_push_locks(cfile);
3367 if (rc)
3368 cERROR(1, "Push locks rc = %d", rc);
3369
Jeff Layton3bc303c2009-09-21 06:47:50 -04003370 /*
3371 * releasing stale oplock after recent reconnect of smb session using
3372 * a now incorrect file handle is not a data integrity issue but do
3373 * not bother sending an oplock release if session to server still is
3374 * disconnected since oplock already released by the server
3375 */
Steve Frenchcdff08e2010-10-21 22:46:14 +00003376 if (!cfile->oplock_break_cancelled) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07003377 rc = CIFSSMBLock(0, tlink_tcon(cfile->tlink), cfile->fid.netfid,
Pavel Shilovsky03776f42010-08-17 11:26:00 +04003378 current->tgid, 0, 0, 0, 0,
3379 LOCKING_ANDX_OPLOCK_RELEASE, false,
Pavel Shilovsky12fed002011-01-17 20:15:44 +03003380 cinode->clientCanCacheRead ? 1 : 0);
Joe Perchesb6b38f72010-04-21 03:50:45 +00003381 cFYI(1, "Oplock release rc = %d", rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003382 }
Jeff Layton3bc303c2009-09-21 06:47:50 -04003383}
3384
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07003385const struct address_space_operations cifs_addr_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003386 .readpage = cifs_readpage,
3387 .readpages = cifs_readpages,
3388 .writepage = cifs_writepage,
Steve French37c0eb42005-10-05 14:50:29 -07003389 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04003390 .write_begin = cifs_write_begin,
3391 .write_end = cifs_write_end,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003392 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303393 .releasepage = cifs_release_page,
3394 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003395 .launder_page = cifs_launder_page,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003396};
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003397
3398/*
3399 * cifs_readpages requires the server to support a buffer large enough to
3400 * contain the header plus one complete page of data. Otherwise, we need
3401 * to leave cifs_readpages out of the address space operations.
3402 */
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07003403const struct address_space_operations cifs_addr_ops_smallbuf = {
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003404 .readpage = cifs_readpage,
3405 .writepage = cifs_writepage,
3406 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04003407 .write_begin = cifs_write_begin,
3408 .write_end = cifs_write_end,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003409 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303410 .releasepage = cifs_release_page,
3411 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003412 .launder_page = cifs_launder_page,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003413};