blob: 6eaf48270c975e9bb4deff9596d16bfdcdb2a918 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
Steve Frenchfb8c4b12007-07-10 01:16:18 +00005 *
Steve Frenchf19159d2010-04-21 04:12:10 +00006 * Copyright (C) International Business Machines Corp., 2002,2010
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Author(s): Steve French (sfrench@us.ibm.com)
Jeremy Allison7ee1af72006-08-02 21:56:33 +00008 * Jeremy Allison (jra@samba.org)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
Steve French37c0eb42005-10-05 14:50:29 -070025#include <linux/backing-dev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
Steve French37c0eb42005-10-05 14:50:29 -070030#include <linux/writeback.h>
Andrew Morton6f88cc22006-12-10 02:19:44 -080031#include <linux/task_io_accounting_ops.h>
Steve French23e7dd72005-10-20 13:44:56 -070032#include <linux/delay.h>
Jeff Layton3bc303c2009-09-21 06:47:50 -040033#include <linux/mount.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Jeff Layton690c5e32011-10-19 15:30:16 -040035#include <linux/swap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <asm/div64.h>
37#include "cifsfs.h"
38#include "cifspdu.h"
39#include "cifsglob.h"
40#include "cifsproto.h"
41#include "cifs_unicode.h"
42#include "cifs_debug.h"
43#include "cifs_fs_sb.h"
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +053044#include "fscache.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Linus Torvalds1da177e2005-04-16 15:20:36 -070046static inline int cifs_convert_flags(unsigned int flags)
47{
48 if ((flags & O_ACCMODE) == O_RDONLY)
49 return GENERIC_READ;
50 else if ((flags & O_ACCMODE) == O_WRONLY)
51 return GENERIC_WRITE;
52 else if ((flags & O_ACCMODE) == O_RDWR) {
53 /* GENERIC_ALL is too much permission to request
54 can cause unnecessary access denied on create */
55 /* return GENERIC_ALL; */
56 return (GENERIC_READ | GENERIC_WRITE);
57 }
58
Jeff Laytone10f7b52008-05-14 10:21:33 -070059 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
60 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
61 FILE_READ_DATA);
Steve French7fc8f4e2009-02-23 20:43:11 +000062}
Jeff Laytone10f7b52008-05-14 10:21:33 -070063
Jeff Layton608712f2010-10-15 15:33:56 -040064static u32 cifs_posix_convert_flags(unsigned int flags)
Steve French7fc8f4e2009-02-23 20:43:11 +000065{
Jeff Layton608712f2010-10-15 15:33:56 -040066 u32 posix_flags = 0;
Jeff Laytone10f7b52008-05-14 10:21:33 -070067
Steve French7fc8f4e2009-02-23 20:43:11 +000068 if ((flags & O_ACCMODE) == O_RDONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040069 posix_flags = SMB_O_RDONLY;
Steve French7fc8f4e2009-02-23 20:43:11 +000070 else if ((flags & O_ACCMODE) == O_WRONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040071 posix_flags = SMB_O_WRONLY;
72 else if ((flags & O_ACCMODE) == O_RDWR)
73 posix_flags = SMB_O_RDWR;
74
75 if (flags & O_CREAT)
76 posix_flags |= SMB_O_CREAT;
77 if (flags & O_EXCL)
78 posix_flags |= SMB_O_EXCL;
79 if (flags & O_TRUNC)
80 posix_flags |= SMB_O_TRUNC;
81 /* be safe and imply O_SYNC for O_DSYNC */
Christoph Hellwig6b2f3d12009-10-27 11:05:28 +010082 if (flags & O_DSYNC)
Jeff Layton608712f2010-10-15 15:33:56 -040083 posix_flags |= SMB_O_SYNC;
Steve French7fc8f4e2009-02-23 20:43:11 +000084 if (flags & O_DIRECTORY)
Jeff Layton608712f2010-10-15 15:33:56 -040085 posix_flags |= SMB_O_DIRECTORY;
Steve French7fc8f4e2009-02-23 20:43:11 +000086 if (flags & O_NOFOLLOW)
Jeff Layton608712f2010-10-15 15:33:56 -040087 posix_flags |= SMB_O_NOFOLLOW;
Steve French7fc8f4e2009-02-23 20:43:11 +000088 if (flags & O_DIRECT)
Jeff Layton608712f2010-10-15 15:33:56 -040089 posix_flags |= SMB_O_DIRECT;
Steve French7fc8f4e2009-02-23 20:43:11 +000090
91 return posix_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070092}
93
94static inline int cifs_get_disposition(unsigned int flags)
95{
96 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
97 return FILE_CREATE;
98 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
99 return FILE_OVERWRITE_IF;
100 else if ((flags & O_CREAT) == O_CREAT)
101 return FILE_OPEN_IF;
Steve French55aa2e02006-05-30 18:09:31 +0000102 else if ((flags & O_TRUNC) == O_TRUNC)
103 return FILE_OVERWRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104 else
105 return FILE_OPEN;
106}
107
Jeff Layton608712f2010-10-15 15:33:56 -0400108int cifs_posix_open(char *full_path, struct inode **pinode,
109 struct super_block *sb, int mode, unsigned int f_flags,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400110 __u32 *poplock, __u16 *pnetfid, unsigned int xid)
Jeff Layton608712f2010-10-15 15:33:56 -0400111{
112 int rc;
113 FILE_UNIX_BASIC_INFO *presp_data;
114 __u32 posix_flags = 0;
115 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
116 struct cifs_fattr fattr;
117 struct tcon_link *tlink;
Steve French96daf2b2011-05-27 04:34:02 +0000118 struct cifs_tcon *tcon;
Jeff Layton608712f2010-10-15 15:33:56 -0400119
120 cFYI(1, "posix open %s", full_path);
121
122 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
123 if (presp_data == NULL)
124 return -ENOMEM;
125
126 tlink = cifs_sb_tlink(cifs_sb);
127 if (IS_ERR(tlink)) {
128 rc = PTR_ERR(tlink);
129 goto posix_open_ret;
130 }
131
132 tcon = tlink_tcon(tlink);
133 mode &= ~current_umask();
134
135 posix_flags = cifs_posix_convert_flags(f_flags);
136 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
137 poplock, full_path, cifs_sb->local_nls,
138 cifs_sb->mnt_cifs_flags &
139 CIFS_MOUNT_MAP_SPECIAL_CHR);
140 cifs_put_tlink(tlink);
141
142 if (rc)
143 goto posix_open_ret;
144
145 if (presp_data->Type == cpu_to_le32(-1))
146 goto posix_open_ret; /* open ok, caller does qpathinfo */
147
148 if (!pinode)
149 goto posix_open_ret; /* caller does not need info */
150
151 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
152
153 /* get new inode and set it up */
154 if (*pinode == NULL) {
155 cifs_fill_uniqueid(sb, &fattr);
156 *pinode = cifs_iget(sb, &fattr);
157 if (!*pinode) {
158 rc = -ENOMEM;
159 goto posix_open_ret;
160 }
161 } else {
162 cifs_fattr_to_inode(*pinode, &fattr);
163 }
164
165posix_open_ret:
166 kfree(presp_data);
167 return rc;
168}
169
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300170static int
171cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700172 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
173 struct cifs_fid *fid, unsigned int xid)
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300174{
175 int rc;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700176 int desired_access;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300177 int disposition;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500178 int create_options = CREATE_NOT_DIR;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300179 FILE_ALL_INFO *buf;
180
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700181 if (!tcon->ses->server->ops->open)
182 return -ENOSYS;
183
184 desired_access = cifs_convert_flags(f_flags);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300185
186/*********************************************************************
187 * open flag mapping table:
188 *
189 * POSIX Flag CIFS Disposition
190 * ---------- ----------------
191 * O_CREAT FILE_OPEN_IF
192 * O_CREAT | O_EXCL FILE_CREATE
193 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
194 * O_TRUNC FILE_OVERWRITE
195 * none of the above FILE_OPEN
196 *
197 * Note that there is not a direct match between disposition
198 * FILE_SUPERSEDE (ie create whether or not file exists although
199 * O_CREAT | O_TRUNC is similar but truncates the existing
200 * file rather than creating a new file as FILE_SUPERSEDE does
201 * (which uses the attributes / metadata passed in on open call)
202 *?
203 *? O_SYNC is a reasonable match to CIFS writethrough flag
204 *? and the read write flags match reasonably. O_LARGEFILE
205 *? is irrelevant because largefile support is always used
206 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
207 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
208 *********************************************************************/
209
210 disposition = cifs_get_disposition(f_flags);
211
212 /* BB pass O_SYNC flag through on file attributes .. BB */
213
214 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
215 if (!buf)
216 return -ENOMEM;
217
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500218 if (backup_cred(cifs_sb))
219 create_options |= CREATE_OPEN_BACKUP_INTENT;
220
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700221 rc = tcon->ses->server->ops->open(xid, tcon, full_path, disposition,
222 desired_access, create_options, fid,
223 oplock, buf, cifs_sb);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300224
225 if (rc)
226 goto out;
227
228 if (tcon->unix_ext)
229 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
230 xid);
231 else
232 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700233 xid, &fid->netfid);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300234
235out:
236 kfree(buf);
237 return rc;
238}
239
Jeff Layton15ecb432010-10-15 15:34:02 -0400240struct cifsFileInfo *
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700241cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
Jeff Layton15ecb432010-10-15 15:34:02 -0400242 struct tcon_link *tlink, __u32 oplock)
243{
244 struct dentry *dentry = file->f_path.dentry;
245 struct inode *inode = dentry->d_inode;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700246 struct cifsInodeInfo *cinode = CIFS_I(inode);
247 struct cifsFileInfo *cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400248
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700249 cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
250 if (cfile == NULL)
251 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400252
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700253 cfile->count = 1;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700254 cfile->pid = current->tgid;
255 cfile->uid = current_fsuid();
256 cfile->dentry = dget(dentry);
257 cfile->f_flags = file->f_flags;
258 cfile->invalidHandle = false;
259 cfile->tlink = cifs_get_tlink(tlink);
260 mutex_init(&cfile->fh_mutex);
261 INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
262 INIT_LIST_HEAD(&cfile->llist);
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700263 tlink_tcon(tlink)->ses->server->ops->set_fid(cfile, fid, oplock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400264
Jeff Layton44772882010-10-15 15:34:03 -0400265 spin_lock(&cifs_file_list_lock);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700266 list_add(&cfile->tlist, &(tlink_tcon(tlink)->openFileList));
Jeff Layton15ecb432010-10-15 15:34:02 -0400267 /* if readable file instance put first in list*/
268 if (file->f_mode & FMODE_READ)
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700269 list_add(&cfile->flist, &cinode->openFileList);
Jeff Layton15ecb432010-10-15 15:34:02 -0400270 else
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700271 list_add_tail(&cfile->flist, &cinode->openFileList);
Jeff Layton44772882010-10-15 15:34:03 -0400272 spin_unlock(&cifs_file_list_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400273
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700274 file->private_data = cfile;
275 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400276}
277
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400278static void cifs_del_lock_waiters(struct cifsLockInfo *lock);
279
Jeff Layton764a1b12012-07-25 14:59:54 -0400280struct cifsFileInfo *
281cifsFileInfo_get(struct cifsFileInfo *cifs_file)
282{
283 spin_lock(&cifs_file_list_lock);
284 cifsFileInfo_get_locked(cifs_file);
285 spin_unlock(&cifs_file_list_lock);
286 return cifs_file;
287}
288
Steve Frenchcdff08e2010-10-21 22:46:14 +0000289/*
290 * Release a reference on the file private data. This may involve closing
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400291 * the filehandle out on the server. Must be called without holding
292 * cifs_file_list_lock.
Steve Frenchcdff08e2010-10-21 22:46:14 +0000293 */
Jeff Laytonb33879a2010-10-15 15:34:04 -0400294void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
295{
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300296 struct inode *inode = cifs_file->dentry->d_inode;
Steve French96daf2b2011-05-27 04:34:02 +0000297 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300298 struct cifsInodeInfo *cifsi = CIFS_I(inode);
Pavel Shilovsky4f8ba8a2010-11-21 22:36:12 +0300299 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000300 struct cifsLockInfo *li, *tmp;
301
302 spin_lock(&cifs_file_list_lock);
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400303 if (--cifs_file->count > 0) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000304 spin_unlock(&cifs_file_list_lock);
305 return;
Jeff Laytonb33879a2010-10-15 15:34:04 -0400306 }
Steve Frenchcdff08e2010-10-21 22:46:14 +0000307
308 /* remove it from the lists */
309 list_del(&cifs_file->flist);
310 list_del(&cifs_file->tlist);
311
312 if (list_empty(&cifsi->openFileList)) {
313 cFYI(1, "closing last open instance for inode %p",
314 cifs_file->dentry->d_inode);
Pavel Shilovsky25364132012-09-18 16:20:27 -0700315 /*
316 * In strict cache mode we need invalidate mapping on the last
317 * close because it may cause a error when we open this file
318 * again and get at least level II oplock.
319 */
Pavel Shilovsky4f8ba8a2010-11-21 22:36:12 +0300320 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
321 CIFS_I(inode)->invalid_mapping = true;
Pavel Shilovskyc6723622010-11-03 10:58:57 +0300322 cifs_set_oplock_level(cifsi, 0);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000323 }
324 spin_unlock(&cifs_file_list_lock);
325
Jeff Laytonad635942011-07-26 12:20:17 -0400326 cancel_work_sync(&cifs_file->oplock_break);
327
Steve Frenchcdff08e2010-10-21 22:46:14 +0000328 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700329 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400330 unsigned int xid;
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700331 int rc = -ENOSYS;
332
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400333 xid = get_xid();
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700334 if (server->ops->close)
335 rc = server->ops->close(xid, tcon, &cifs_file->fid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400336 free_xid(xid);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000337 }
338
339 /* Delete any outstanding lock records. We'll lose them when the file
340 * is closed anyway.
341 */
Pavel Shilovskyd59dad22011-09-22 09:53:59 +0400342 mutex_lock(&cifsi->lock_mutex);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300343 list_for_each_entry_safe(li, tmp, &cifs_file->llist, llist) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000344 list_del(&li->llist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400345 cifs_del_lock_waiters(li);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000346 kfree(li);
347 }
Pavel Shilovskyd59dad22011-09-22 09:53:59 +0400348 mutex_unlock(&cifsi->lock_mutex);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000349
350 cifs_put_tlink(cifs_file->tlink);
351 dput(cifs_file->dentry);
352 kfree(cifs_file);
Jeff Laytonb33879a2010-10-15 15:34:04 -0400353}
354
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355int cifs_open(struct inode *inode, struct file *file)
356{
357 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400358 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400359 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +0000361 struct cifs_tcon *tcon;
Jeff Layton7ffec372010-09-29 19:51:11 -0400362 struct tcon_link *tlink;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700363 struct cifsFileInfo *cfile = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364 char *full_path = NULL;
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300365 bool posix_open_ok = false;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700366 struct cifs_fid fid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400368 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369
370 cifs_sb = CIFS_SB(inode->i_sb);
Jeff Layton7ffec372010-09-29 19:51:11 -0400371 tlink = cifs_sb_tlink(cifs_sb);
372 if (IS_ERR(tlink)) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400373 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400374 return PTR_ERR(tlink);
375 }
376 tcon = tlink_tcon(tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -0800378 full_path = build_path_from_dentry(file->f_path.dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379 if (full_path == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530380 rc = -ENOMEM;
Jeff Layton232341b2010-08-05 13:58:38 -0400381 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382 }
383
Joe Perchesb6b38f72010-04-21 03:50:45 +0000384 cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
385 inode, file->f_flags, full_path);
Steve French276a74a2009-03-03 18:00:34 +0000386
Pavel Shilovsky10b9b982012-03-20 12:55:09 +0300387 if (tcon->ses->server->oplocks)
Steve French276a74a2009-03-03 18:00:34 +0000388 oplock = REQ_OPLOCK;
389 else
390 oplock = 0;
391
Steve French64cc2c62009-03-04 19:54:08 +0000392 if (!tcon->broken_posix_open && tcon->unix_ext &&
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400393 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
394 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Steve French276a74a2009-03-03 18:00:34 +0000395 /* can not refresh inode info since size could be stale */
Jeff Layton2422f672010-06-16 13:40:16 -0400396 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
Steve Frenchfa588e02010-04-22 19:21:55 +0000397 cifs_sb->mnt_file_mode /* ignored */,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700398 file->f_flags, &oplock, &fid.netfid, xid);
Steve French276a74a2009-03-03 18:00:34 +0000399 if (rc == 0) {
Joe Perchesb6b38f72010-04-21 03:50:45 +0000400 cFYI(1, "posix open succeeded");
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300401 posix_open_ok = true;
Steve French64cc2c62009-03-04 19:54:08 +0000402 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
403 if (tcon->ses->serverNOS)
Joe Perchesb6b38f72010-04-21 03:50:45 +0000404 cERROR(1, "server %s of type %s returned"
Steve French64cc2c62009-03-04 19:54:08 +0000405 " unexpected error on SMB posix open"
406 ", disabling posix open support."
407 " Check if server update available.",
408 tcon->ses->serverName,
Joe Perchesb6b38f72010-04-21 03:50:45 +0000409 tcon->ses->serverNOS);
Steve French64cc2c62009-03-04 19:54:08 +0000410 tcon->broken_posix_open = true;
Steve French276a74a2009-03-03 18:00:34 +0000411 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
412 (rc != -EOPNOTSUPP)) /* path not found or net err */
413 goto out;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700414 /*
415 * Else fallthrough to retry open the old way on network i/o
416 * or DFS errors.
417 */
Steve French276a74a2009-03-03 18:00:34 +0000418 }
419
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300420 if (!posix_open_ok) {
421 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700422 file->f_flags, &oplock, &fid, xid);
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300423 if (rc)
424 goto out;
425 }
Jeff Layton47c78b72010-06-16 13:40:17 -0400426
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700427 cfile = cifs_new_fileinfo(&fid, file, tlink, oplock);
428 if (cfile == NULL) {
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700429 if (tcon->ses->server->ops->close)
430 tcon->ses->server->ops->close(xid, tcon, &fid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431 rc = -ENOMEM;
432 goto out;
433 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +0530435 cifs_fscache_set_inode_cookie(inode, file);
436
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300437 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700438 /*
439 * Time to set mode which we can not set earlier due to
440 * problems creating new read-only files.
441 */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300442 struct cifs_unix_set_info_args args = {
443 .mode = inode->i_mode,
444 .uid = NO_CHANGE_64,
445 .gid = NO_CHANGE_64,
446 .ctime = NO_CHANGE_64,
447 .atime = NO_CHANGE_64,
448 .mtime = NO_CHANGE_64,
449 .device = 0,
450 };
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700451 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
452 cfile->pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453 }
454
455out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400457 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400458 cifs_put_tlink(tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459 return rc;
460}
461
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700462/*
463 * Try to reacquire byte range locks that were released when session
464 * to server was lost
465 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466static int cifs_relock_file(struct cifsFileInfo *cifsFile)
467{
468 int rc = 0;
469
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700470 /* BB list all locks open on this file and relock */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471
472 return rc;
473}
474
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700475static int
476cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477{
478 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400479 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400480 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +0000482 struct cifs_tcon *tcon;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700483 struct TCP_Server_Info *server;
484 struct cifsInodeInfo *cinode;
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000485 struct inode *inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486 char *full_path = NULL;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700487 int desired_access;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488 int disposition = FILE_OPEN;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500489 int create_options = CREATE_NOT_DIR;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700490 struct cifs_fid fid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400492 xid = get_xid();
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700493 mutex_lock(&cfile->fh_mutex);
494 if (!cfile->invalidHandle) {
495 mutex_unlock(&cfile->fh_mutex);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530496 rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400497 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530498 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499 }
500
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700501 inode = cfile->dentry->d_inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502 cifs_sb = CIFS_SB(inode->i_sb);
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700503 tcon = tlink_tcon(cfile->tlink);
504 server = tcon->ses->server;
Steve French3a9f4622007-04-04 17:10:24 +0000505
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700506 /*
507 * Can not grab rename sem here because various ops, including those
508 * that already have the rename sem can end up causing writepage to get
509 * called and if the server was down that means we end up here, and we
510 * can never tell if the caller already has the rename_sem.
511 */
512 full_path = build_path_from_dentry(cfile->dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513 if (full_path == NULL) {
Steve French3a9f4622007-04-04 17:10:24 +0000514 rc = -ENOMEM;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700515 mutex_unlock(&cfile->fh_mutex);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400516 free_xid(xid);
Steve French3a9f4622007-04-04 17:10:24 +0000517 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518 }
519
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700520 cFYI(1, "inode = 0x%p file flags 0x%x for %s", inode, cfile->f_flags,
521 full_path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522
Pavel Shilovsky10b9b982012-03-20 12:55:09 +0300523 if (tcon->ses->server->oplocks)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524 oplock = REQ_OPLOCK;
525 else
Steve French4b18f2a2008-04-29 00:06:05 +0000526 oplock = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400528 if (tcon->unix_ext && cap_unix(tcon->ses) &&
Steve French7fc8f4e2009-02-23 20:43:11 +0000529 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400530 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Jeff Layton608712f2010-10-15 15:33:56 -0400531 /*
532 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
533 * original open. Must mask them off for a reopen.
534 */
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700535 unsigned int oflags = cfile->f_flags &
Jeff Layton15886172010-10-15 15:33:59 -0400536 ~(O_CREAT | O_EXCL | O_TRUNC);
Jeff Layton608712f2010-10-15 15:33:56 -0400537
Jeff Layton2422f672010-06-16 13:40:16 -0400538 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700539 cifs_sb->mnt_file_mode /* ignored */,
540 oflags, &oplock, &fid.netfid, xid);
Steve French7fc8f4e2009-02-23 20:43:11 +0000541 if (rc == 0) {
Joe Perchesb6b38f72010-04-21 03:50:45 +0000542 cFYI(1, "posix reopen succeeded");
Steve French7fc8f4e2009-02-23 20:43:11 +0000543 goto reopen_success;
544 }
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700545 /*
546 * fallthrough to retry open the old way on errors, especially
547 * in the reconnect path it is important to retry hard
548 */
Steve French7fc8f4e2009-02-23 20:43:11 +0000549 }
550
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700551 desired_access = cifs_convert_flags(cfile->f_flags);
Steve French7fc8f4e2009-02-23 20:43:11 +0000552
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500553 if (backup_cred(cifs_sb))
554 create_options |= CREATE_OPEN_BACKUP_INTENT;
555
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700556 /*
557 * Can not refresh inode by passing in file_info buf to be returned by
558 * CIFSSMBOpen and then calling get_inode_info with returned buf since
559 * file might have write behind data that needs to be flushed and server
560 * version of file size can be stale. If we knew for sure that inode was
561 * not dirty locally we could do this.
562 */
563 rc = server->ops->open(xid, tcon, full_path, disposition,
564 desired_access, create_options, &fid, &oplock,
565 NULL, cifs_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566 if (rc) {
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700567 mutex_unlock(&cfile->fh_mutex);
568 cFYI(1, "cifs_reopen returned 0x%x", rc);
Joe Perchesb6b38f72010-04-21 03:50:45 +0000569 cFYI(1, "oplock: %d", oplock);
Jeff Layton15886172010-10-15 15:33:59 -0400570 goto reopen_error_exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571 }
Jeff Layton15886172010-10-15 15:33:59 -0400572
573reopen_success:
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700574 cfile->invalidHandle = false;
575 mutex_unlock(&cfile->fh_mutex);
576 cinode = CIFS_I(inode);
Jeff Layton15886172010-10-15 15:33:59 -0400577
578 if (can_flush) {
579 rc = filemap_write_and_wait(inode->i_mapping);
Jeff Laytoneb4b7562010-10-22 14:52:29 -0400580 mapping_set_error(inode->i_mapping, rc);
Jeff Layton15886172010-10-15 15:33:59 -0400581
Jeff Layton15886172010-10-15 15:33:59 -0400582 if (tcon->unix_ext)
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700583 rc = cifs_get_inode_info_unix(&inode, full_path,
584 inode->i_sb, xid);
Jeff Layton15886172010-10-15 15:33:59 -0400585 else
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700586 rc = cifs_get_inode_info(&inode, full_path, NULL,
587 inode->i_sb, xid, NULL);
588 }
589 /*
590 * Else we are writing out data to server already and could deadlock if
591 * we tried to flush data, and since we do not know if we have data that
592 * would invalidate the current end of file on the server we can not go
593 * to the server to get the new inode info.
594 */
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300595
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700596 server->ops->set_fid(cfile, &fid, oplock);
597 cifs_relock_file(cfile);
Jeff Layton15886172010-10-15 15:33:59 -0400598
599reopen_error_exit:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400601 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602 return rc;
603}
604
605int cifs_close(struct inode *inode, struct file *file)
606{
Jeff Layton77970692011-04-05 16:23:47 -0700607 if (file->private_data != NULL) {
608 cifsFileInfo_put(file->private_data);
609 file->private_data = NULL;
610 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611
Steve Frenchcdff08e2010-10-21 22:46:14 +0000612 /* return code from the ->release op is always ignored */
613 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614}
615
616int cifs_closedir(struct inode *inode, struct file *file)
617{
618 int rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400619 unsigned int xid;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700620 struct cifsFileInfo *cfile = file->private_data;
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700621 struct cifs_tcon *tcon;
622 struct TCP_Server_Info *server;
623 char *buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624
Joe Perchesb6b38f72010-04-21 03:50:45 +0000625 cFYI(1, "Closedir inode = 0x%p", inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700627 if (cfile == NULL)
628 return rc;
629
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400630 xid = get_xid();
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700631 tcon = tlink_tcon(cfile->tlink);
632 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700634 cFYI(1, "Freeing private data in close dir");
635 spin_lock(&cifs_file_list_lock);
636 if (!cfile->srch_inf.endOfSearch && !cfile->invalidHandle) {
637 cfile->invalidHandle = true;
638 spin_unlock(&cifs_file_list_lock);
639 if (server->ops->close_dir)
640 rc = server->ops->close_dir(xid, tcon, &cfile->fid);
641 else
642 rc = -ENOSYS;
643 cFYI(1, "Closing uncompleted readdir with rc %d", rc);
644 /* not much we can do if it fails anyway, ignore rc */
645 rc = 0;
646 } else
647 spin_unlock(&cifs_file_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700649 buf = cfile->srch_inf.ntwrk_buf_start;
650 if (buf) {
651 cFYI(1, "closedir free smb buf in srch struct");
652 cfile->srch_inf.ntwrk_buf_start = NULL;
653 if (cfile->srch_inf.smallBuf)
654 cifs_small_buf_release(buf);
655 else
656 cifs_buf_release(buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657 }
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700658
659 cifs_put_tlink(cfile->tlink);
660 kfree(file->private_data);
661 file->private_data = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662 /* BB can we lock the filestruct while this is going on? */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400663 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664 return rc;
665}
666
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400667static struct cifsLockInfo *
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300668cifs_lock_init(__u64 offset, __u64 length, __u8 type)
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000669{
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400670 struct cifsLockInfo *lock =
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000671 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400672 if (!lock)
673 return lock;
674 lock->offset = offset;
675 lock->length = length;
676 lock->type = type;
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400677 lock->pid = current->tgid;
678 INIT_LIST_HEAD(&lock->blist);
679 init_waitqueue_head(&lock->block_q);
680 return lock;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400681}
682
683static void
684cifs_del_lock_waiters(struct cifsLockInfo *lock)
685{
686 struct cifsLockInfo *li, *tmp;
687 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
688 list_del_init(&li->blist);
689 wake_up(&li->block_q);
690 }
691}
692
693static bool
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300694cifs_find_fid_lock_conflict(struct cifsFileInfo *cfile, __u64 offset,
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300695 __u64 length, __u8 type, struct cifsFileInfo *cur,
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300696 struct cifsLockInfo **conf_lock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400697{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300698 struct cifsLockInfo *li;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300699 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400700
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300701 list_for_each_entry(li, &cfile->llist, llist) {
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400702 if (offset + length <= li->offset ||
703 offset >= li->offset + li->length)
704 continue;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300705 else if ((type & server->vals->shared_lock_type) &&
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300706 ((server->ops->compare_fids(cur, cfile) &&
707 current->tgid == li->pid) || type == li->type))
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400708 continue;
709 else {
710 *conf_lock = li;
711 return true;
712 }
713 }
714 return false;
715}
716
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400717static bool
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300718cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
719 __u8 type, struct cifsLockInfo **conf_lock)
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400720{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300721 bool rc = false;
722 struct cifsFileInfo *fid, *tmp;
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300723 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300724
725 spin_lock(&cifs_file_list_lock);
726 list_for_each_entry_safe(fid, tmp, &cinode->openFileList, flist) {
727 rc = cifs_find_fid_lock_conflict(fid, offset, length, type,
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300728 cfile, conf_lock);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300729 if (rc)
730 break;
731 }
732 spin_unlock(&cifs_file_list_lock);
733
734 return rc;
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400735}
736
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300737/*
738 * Check if there is another lock that prevents us to set the lock (mandatory
739 * style). If such a lock exists, update the flock structure with its
740 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
741 * or leave it the same if we can't. Returns 0 if we don't need to request to
742 * the server or 1 otherwise.
743 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400744static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300745cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
746 __u8 type, struct file_lock *flock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400747{
748 int rc = 0;
749 struct cifsLockInfo *conf_lock;
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300750 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300751 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400752 bool exist;
753
754 mutex_lock(&cinode->lock_mutex);
755
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300756 exist = cifs_find_lock_conflict(cfile, offset, length, type,
757 &conf_lock);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400758 if (exist) {
759 flock->fl_start = conf_lock->offset;
760 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
761 flock->fl_pid = conf_lock->pid;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300762 if (conf_lock->type & server->vals->shared_lock_type)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400763 flock->fl_type = F_RDLCK;
764 else
765 flock->fl_type = F_WRLCK;
766 } else if (!cinode->can_cache_brlcks)
767 rc = 1;
768 else
769 flock->fl_type = F_UNLCK;
770
771 mutex_unlock(&cinode->lock_mutex);
772 return rc;
773}
774
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400775static void
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300776cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400777{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300778 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovskyd59dad22011-09-22 09:53:59 +0400779 mutex_lock(&cinode->lock_mutex);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300780 list_add_tail(&lock->llist, &cfile->llist);
Pavel Shilovskyd59dad22011-09-22 09:53:59 +0400781 mutex_unlock(&cinode->lock_mutex);
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000782}
783
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300784/*
785 * Set the byte-range lock (mandatory style). Returns:
786 * 1) 0, if we set the lock and don't need to request to the server;
787 * 2) 1, if no locks prevent us but we need to request to the server;
788 * 3) -EACCESS, if there is a lock that prevents us and wait is false.
789 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400790static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300791cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400792 bool wait)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400793{
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400794 struct cifsLockInfo *conf_lock;
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300795 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400796 bool exist;
797 int rc = 0;
798
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400799try_again:
800 exist = false;
801 mutex_lock(&cinode->lock_mutex);
802
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300803 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
804 lock->type, &conf_lock);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400805 if (!exist && cinode->can_cache_brlcks) {
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300806 list_add_tail(&lock->llist, &cfile->llist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400807 mutex_unlock(&cinode->lock_mutex);
808 return rc;
809 }
810
811 if (!exist)
812 rc = 1;
813 else if (!wait)
814 rc = -EACCES;
815 else {
816 list_add_tail(&lock->blist, &conf_lock->blist);
817 mutex_unlock(&cinode->lock_mutex);
818 rc = wait_event_interruptible(lock->block_q,
819 (lock->blist.prev == &lock->blist) &&
820 (lock->blist.next == &lock->blist));
821 if (!rc)
822 goto try_again;
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400823 mutex_lock(&cinode->lock_mutex);
824 list_del_init(&lock->blist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400825 }
826
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400827 mutex_unlock(&cinode->lock_mutex);
828 return rc;
829}
830
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300831/*
832 * Check if there is another lock that prevents us to set the lock (posix
833 * style). If such a lock exists, update the flock structure with its
834 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
835 * or leave it the same if we can't. Returns 0 if we don't need to request to
836 * the server or 1 otherwise.
837 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400838static int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400839cifs_posix_lock_test(struct file *file, struct file_lock *flock)
840{
841 int rc = 0;
842 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
843 unsigned char saved_type = flock->fl_type;
844
Pavel Shilovsky50792762011-10-29 17:17:57 +0400845 if ((flock->fl_flags & FL_POSIX) == 0)
846 return 1;
847
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400848 mutex_lock(&cinode->lock_mutex);
849 posix_test_lock(file, flock);
850
851 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
852 flock->fl_type = saved_type;
853 rc = 1;
854 }
855
856 mutex_unlock(&cinode->lock_mutex);
857 return rc;
858}
859
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300860/*
861 * Set the byte-range lock (posix style). Returns:
862 * 1) 0, if we set the lock and don't need to request to the server;
863 * 2) 1, if we need to request to the server;
864 * 3) <0, if the error occurs while setting the lock.
865 */
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400866static int
867cifs_posix_lock_set(struct file *file, struct file_lock *flock)
868{
869 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
Pavel Shilovsky50792762011-10-29 17:17:57 +0400870 int rc = 1;
871
872 if ((flock->fl_flags & FL_POSIX) == 0)
873 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400874
Pavel Shilovsky66189be2012-03-28 21:56:19 +0400875try_again:
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400876 mutex_lock(&cinode->lock_mutex);
877 if (!cinode->can_cache_brlcks) {
878 mutex_unlock(&cinode->lock_mutex);
Pavel Shilovsky50792762011-10-29 17:17:57 +0400879 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400880 }
Pavel Shilovsky66189be2012-03-28 21:56:19 +0400881
882 rc = posix_lock_file(file, flock, NULL);
Steve French9ebb3892012-04-01 13:52:54 -0500883 mutex_unlock(&cinode->lock_mutex);
Pavel Shilovsky66189be2012-03-28 21:56:19 +0400884 if (rc == FILE_LOCK_DEFERRED) {
885 rc = wait_event_interruptible(flock->fl_wait, !flock->fl_next);
886 if (!rc)
887 goto try_again;
888 locks_delete_block(flock);
889 }
Steve French9ebb3892012-04-01 13:52:54 -0500890 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400891}
892
893static int
894cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400895{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400896 unsigned int xid;
897 int rc = 0, stored_rc;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400898 struct cifsLockInfo *li, *tmp;
899 struct cifs_tcon *tcon;
900 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +0400901 unsigned int num, max_num, max_buf;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400902 LOCKING_ANDX_RANGE *buf, *cur;
903 int types[] = {LOCKING_ANDX_LARGE_FILES,
904 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
905 int i;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400906
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400907 xid = get_xid();
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400908 tcon = tlink_tcon(cfile->tlink);
909
910 mutex_lock(&cinode->lock_mutex);
911 if (!cinode->can_cache_brlcks) {
912 mutex_unlock(&cinode->lock_mutex);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400913 free_xid(xid);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400914 return rc;
915 }
916
Pavel Shilovsky0013fb42012-05-31 13:03:26 +0400917 /*
918 * Accessing maxBuf is racy with cifs_reconnect - need to store value
919 * and check it for zero before using.
920 */
921 max_buf = tcon->ses->server->maxBuf;
922 if (!max_buf) {
923 mutex_unlock(&cinode->lock_mutex);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400924 free_xid(xid);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +0400925 return -EINVAL;
926 }
927
928 max_num = (max_buf - sizeof(struct smb_hdr)) /
929 sizeof(LOCKING_ANDX_RANGE);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400930 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
931 if (!buf) {
932 mutex_unlock(&cinode->lock_mutex);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400933 free_xid(xid);
Pavel Shilovskye2f28862012-08-29 21:13:38 +0400934 return -ENOMEM;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400935 }
936
937 for (i = 0; i < 2; i++) {
938 cur = buf;
939 num = 0;
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300940 list_for_each_entry_safe(li, tmp, &cfile->llist, llist) {
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400941 if (li->type != types[i])
942 continue;
943 cur->Pid = cpu_to_le16(li->pid);
944 cur->LengthLow = cpu_to_le32((u32)li->length);
945 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
946 cur->OffsetLow = cpu_to_le32((u32)li->offset);
947 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
948 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700949 stored_rc = cifs_lockv(xid, tcon,
950 cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +0300951 (__u8)li->type, 0, num,
952 buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400953 if (stored_rc)
954 rc = stored_rc;
955 cur = buf;
956 num = 0;
957 } else
958 cur++;
959 }
960
961 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700962 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +0300963 (__u8)types[i], 0, num, buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400964 if (stored_rc)
965 rc = stored_rc;
966 }
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400967 }
968
969 cinode->can_cache_brlcks = false;
970 mutex_unlock(&cinode->lock_mutex);
971
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400972 kfree(buf);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400973 free_xid(xid);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400974 return rc;
975}
976
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400977/* copied from fs/locks.c with a name change */
978#define cifs_for_each_lock(inode, lockp) \
979 for (lockp = &inode->i_flock; *lockp != NULL; \
980 lockp = &(*lockp)->fl_next)
981
Pavel Shilovskyd5751462012-03-05 09:39:20 +0300982struct lock_to_push {
983 struct list_head llist;
984 __u64 offset;
985 __u64 length;
986 __u32 pid;
987 __u16 netfid;
988 __u8 type;
989};
990
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400991static int
992cifs_push_posix_locks(struct cifsFileInfo *cfile)
993{
994 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
995 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
996 struct file_lock *flock, **before;
Pavel Shilovskyd5751462012-03-05 09:39:20 +0300997 unsigned int count = 0, i = 0;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400998 int rc = 0, xid, type;
Pavel Shilovskyd5751462012-03-05 09:39:20 +0300999 struct list_head locks_to_send, *el;
1000 struct lock_to_push *lck, *tmp;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001001 __u64 length;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001002
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001003 xid = get_xid();
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001004
1005 mutex_lock(&cinode->lock_mutex);
1006 if (!cinode->can_cache_brlcks) {
1007 mutex_unlock(&cinode->lock_mutex);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001008 free_xid(xid);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001009 return rc;
1010 }
1011
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001012 lock_flocks();
1013 cifs_for_each_lock(cfile->dentry->d_inode, before) {
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001014 if ((*before)->fl_flags & FL_POSIX)
1015 count++;
1016 }
1017 unlock_flocks();
1018
1019 INIT_LIST_HEAD(&locks_to_send);
1020
1021 /*
Pavel Shilovskyce858522012-03-17 09:46:55 +03001022 * Allocating count locks is enough because no FL_POSIX locks can be
1023 * added to the list while we are holding cinode->lock_mutex that
1024 * protects locking operations of this inode.
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001025 */
1026 for (; i < count; i++) {
1027 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1028 if (!lck) {
1029 rc = -ENOMEM;
1030 goto err_out;
1031 }
1032 list_add_tail(&lck->llist, &locks_to_send);
1033 }
1034
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001035 el = locks_to_send.next;
1036 lock_flocks();
1037 cifs_for_each_lock(cfile->dentry->d_inode, before) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001038 flock = *before;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001039 if ((flock->fl_flags & FL_POSIX) == 0)
1040 continue;
Pavel Shilovskyce858522012-03-17 09:46:55 +03001041 if (el == &locks_to_send) {
1042 /*
1043 * The list ended. We don't have enough allocated
1044 * structures - something is really wrong.
1045 */
1046 cERROR(1, "Can't push all brlocks!");
1047 break;
1048 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001049 length = 1 + flock->fl_end - flock->fl_start;
1050 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1051 type = CIFS_RDLCK;
1052 else
1053 type = CIFS_WRLCK;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001054 lck = list_entry(el, struct lock_to_push, llist);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001055 lck->pid = flock->fl_pid;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001056 lck->netfid = cfile->fid.netfid;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001057 lck->length = length;
1058 lck->type = type;
1059 lck->offset = flock->fl_start;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001060 el = el->next;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001061 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001062 unlock_flocks();
1063
1064 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001065 int stored_rc;
1066
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001067 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001068 lck->offset, lck->length, NULL,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001069 lck->type, 0);
1070 if (stored_rc)
1071 rc = stored_rc;
1072 list_del(&lck->llist);
1073 kfree(lck);
1074 }
1075
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001076out:
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001077 cinode->can_cache_brlcks = false;
1078 mutex_unlock(&cinode->lock_mutex);
1079
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001080 free_xid(xid);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001081 return rc;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001082err_out:
1083 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1084 list_del(&lck->llist);
1085 kfree(lck);
1086 }
1087 goto out;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001088}
1089
1090static int
1091cifs_push_locks(struct cifsFileInfo *cfile)
1092{
1093 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1094 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1095
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001096 if (cap_unix(tcon->ses) &&
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001097 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1098 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1099 return cifs_push_posix_locks(cfile);
1100
1101 return cifs_push_mandatory_locks(cfile);
1102}
1103
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001104static void
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001105cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001106 bool *wait_flag, struct TCP_Server_Info *server)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001107{
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001108 if (flock->fl_flags & FL_POSIX)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001109 cFYI(1, "Posix");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001110 if (flock->fl_flags & FL_FLOCK)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001111 cFYI(1, "Flock");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001112 if (flock->fl_flags & FL_SLEEP) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001113 cFYI(1, "Blocking lock");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001114 *wait_flag = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001115 }
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001116 if (flock->fl_flags & FL_ACCESS)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001117 cFYI(1, "Process suspended by mandatory locking - "
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001118 "not implemented yet");
1119 if (flock->fl_flags & FL_LEASE)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001120 cFYI(1, "Lease on file - not implemented yet");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001121 if (flock->fl_flags &
Linus Torvalds1da177e2005-04-16 15:20:36 -07001122 (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001123 cFYI(1, "Unknown lock flags 0x%x", flock->fl_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001125 *type = server->vals->large_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001126 if (flock->fl_type == F_WRLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001127 cFYI(1, "F_WRLCK ");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001128 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001129 *lock = 1;
1130 } else if (flock->fl_type == F_UNLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001131 cFYI(1, "F_UNLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001132 *type |= server->vals->unlock_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001133 *unlock = 1;
1134 /* Check if unlock includes more than one lock range */
1135 } else if (flock->fl_type == F_RDLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001136 cFYI(1, "F_RDLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001137 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001138 *lock = 1;
1139 } else if (flock->fl_type == F_EXLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001140 cFYI(1, "F_EXLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001141 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001142 *lock = 1;
1143 } else if (flock->fl_type == F_SHLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001144 cFYI(1, "F_SHLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001145 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001146 *lock = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147 } else
Joe Perchesb6b38f72010-04-21 03:50:45 +00001148 cFYI(1, "Unknown type of lock");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001149}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001150
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001151static int
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001152cifs_mandatory_lock(unsigned int xid, struct cifsFileInfo *cfile, __u64 offset,
Pavel Shilovsky55157df2012-02-28 14:04:17 +03001153 __u64 length, __u32 type, int lock, int unlock, bool wait)
1154{
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001155 return CIFSSMBLock(xid, tlink_tcon(cfile->tlink), cfile->fid.netfid,
Pavel Shilovsky55157df2012-02-28 14:04:17 +03001156 current->tgid, length, offset, unlock, lock,
1157 (__u8)type, wait, 0);
1158}
1159
1160static int
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001161cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001162 bool wait_flag, bool posix_lck, unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001163{
1164 int rc = 0;
1165 __u64 length = 1 + flock->fl_end - flock->fl_start;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001166 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1167 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001168 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001169 __u16 netfid = cfile->fid.netfid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001170
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001171 if (posix_lck) {
1172 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001173
1174 rc = cifs_posix_lock_test(file, flock);
1175 if (!rc)
1176 return rc;
1177
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001178 if (type & server->vals->shared_lock_type)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001179 posix_lock_type = CIFS_RDLCK;
1180 else
1181 posix_lock_type = CIFS_WRLCK;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001182 rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001183 flock->fl_start, length, flock,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001184 posix_lock_type, wait_flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001185 return rc;
1186 }
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001187
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001188 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001189 if (!rc)
1190 return rc;
1191
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001192 /* BB we could chain these into one lock request BB */
Pavel Shilovsky55157df2012-02-28 14:04:17 +03001193 rc = cifs_mandatory_lock(xid, cfile, flock->fl_start, length, type,
1194 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001195 if (rc == 0) {
Pavel Shilovsky55157df2012-02-28 14:04:17 +03001196 rc = cifs_mandatory_lock(xid, cfile, flock->fl_start, length,
1197 type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001198 flock->fl_type = F_UNLCK;
1199 if (rc != 0)
1200 cERROR(1, "Error unlocking previously locked "
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001201 "range %d during test of lock", rc);
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001202 return 0;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001203 }
1204
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001205 if (type & server->vals->shared_lock_type) {
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001206 flock->fl_type = F_WRLCK;
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001207 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001208 }
1209
Pavel Shilovsky55157df2012-02-28 14:04:17 +03001210 rc = cifs_mandatory_lock(xid, cfile, flock->fl_start, length,
1211 type | server->vals->shared_lock_type, 1, 0,
1212 false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001213 if (rc == 0) {
Pavel Shilovsky55157df2012-02-28 14:04:17 +03001214 rc = cifs_mandatory_lock(xid, cfile, flock->fl_start, length,
1215 type | server->vals->shared_lock_type,
1216 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001217 flock->fl_type = F_RDLCK;
1218 if (rc != 0)
1219 cERROR(1, "Error unlocking previously locked "
1220 "range %d during test of lock", rc);
1221 } else
1222 flock->fl_type = F_WRLCK;
1223
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001224 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001225}
1226
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001227static void
1228cifs_move_llist(struct list_head *source, struct list_head *dest)
1229{
1230 struct list_head *li, *tmp;
1231 list_for_each_safe(li, tmp, source)
1232 list_move(li, dest);
1233}
1234
1235static void
1236cifs_free_llist(struct list_head *llist)
1237{
1238 struct cifsLockInfo *li, *tmp;
1239 list_for_each_entry_safe(li, tmp, llist, llist) {
1240 cifs_del_lock_waiters(li);
1241 list_del(&li->llist);
1242 kfree(li);
1243 }
1244}
1245
1246static int
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001247cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1248 unsigned int xid)
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001249{
1250 int rc = 0, stored_rc;
1251 int types[] = {LOCKING_ANDX_LARGE_FILES,
1252 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1253 unsigned int i;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001254 unsigned int max_num, num, max_buf;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001255 LOCKING_ANDX_RANGE *buf, *cur;
1256 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1257 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
1258 struct cifsLockInfo *li, *tmp;
1259 __u64 length = 1 + flock->fl_end - flock->fl_start;
1260 struct list_head tmp_llist;
1261
1262 INIT_LIST_HEAD(&tmp_llist);
1263
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001264 /*
1265 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1266 * and check it for zero before using.
1267 */
1268 max_buf = tcon->ses->server->maxBuf;
1269 if (!max_buf)
1270 return -EINVAL;
1271
1272 max_num = (max_buf - sizeof(struct smb_hdr)) /
1273 sizeof(LOCKING_ANDX_RANGE);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001274 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1275 if (!buf)
1276 return -ENOMEM;
1277
1278 mutex_lock(&cinode->lock_mutex);
1279 for (i = 0; i < 2; i++) {
1280 cur = buf;
1281 num = 0;
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001282 list_for_each_entry_safe(li, tmp, &cfile->llist, llist) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001283 if (flock->fl_start > li->offset ||
1284 (flock->fl_start + length) <
1285 (li->offset + li->length))
1286 continue;
1287 if (current->tgid != li->pid)
1288 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001289 if (types[i] != li->type)
1290 continue;
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001291 if (cinode->can_cache_brlcks) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001292 /*
1293 * We can cache brlock requests - simply remove
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001294 * a lock from the file's list.
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001295 */
1296 list_del(&li->llist);
1297 cifs_del_lock_waiters(li);
1298 kfree(li);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001299 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001300 }
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001301 cur->Pid = cpu_to_le16(li->pid);
1302 cur->LengthLow = cpu_to_le32((u32)li->length);
1303 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1304 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1305 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1306 /*
1307 * We need to save a lock here to let us add it again to
1308 * the file's list if the unlock range request fails on
1309 * the server.
1310 */
1311 list_move(&li->llist, &tmp_llist);
1312 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001313 stored_rc = cifs_lockv(xid, tcon,
1314 cfile->fid.netfid,
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001315 li->type, num, 0, buf);
1316 if (stored_rc) {
1317 /*
1318 * We failed on the unlock range
1319 * request - add all locks from the tmp
1320 * list to the head of the file's list.
1321 */
1322 cifs_move_llist(&tmp_llist,
1323 &cfile->llist);
1324 rc = stored_rc;
1325 } else
1326 /*
1327 * The unlock range request succeed -
1328 * free the tmp list.
1329 */
1330 cifs_free_llist(&tmp_llist);
1331 cur = buf;
1332 num = 0;
1333 } else
1334 cur++;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001335 }
1336 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001337 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001338 types[i], num, 0, buf);
1339 if (stored_rc) {
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001340 cifs_move_llist(&tmp_llist, &cfile->llist);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001341 rc = stored_rc;
1342 } else
1343 cifs_free_llist(&tmp_llist);
1344 }
1345 }
1346
1347 mutex_unlock(&cinode->lock_mutex);
1348 kfree(buf);
1349 return rc;
1350}
1351
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001352static int
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001353cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001354 bool wait_flag, bool posix_lck, int lock, int unlock,
1355 unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001356{
1357 int rc = 0;
1358 __u64 length = 1 + flock->fl_end - flock->fl_start;
1359 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1360 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001361 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001362 __u16 netfid = cfile->fid.netfid;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001363
1364 if (posix_lck) {
Steve French08547b02006-02-28 22:39:25 +00001365 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001366
1367 rc = cifs_posix_lock_set(file, flock);
1368 if (!rc || rc < 0)
1369 return rc;
1370
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001371 if (type & server->vals->shared_lock_type)
Steve French08547b02006-02-28 22:39:25 +00001372 posix_lock_type = CIFS_RDLCK;
1373 else
1374 posix_lock_type = CIFS_WRLCK;
Steve French50c2f752007-07-13 00:33:32 +00001375
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001376 if (unlock == 1)
Steve Frenchbeb84dc2006-03-03 23:36:34 +00001377 posix_lock_type = CIFS_UNLCK;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001378
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001379 rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001380 flock->fl_start, length, NULL,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001381 posix_lock_type, wait_flag);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001382 goto out;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001383 }
1384
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001385 if (lock) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001386 struct cifsLockInfo *lock;
1387
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001388 lock = cifs_lock_init(flock->fl_start, length, type);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001389 if (!lock)
1390 return -ENOMEM;
1391
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001392 rc = cifs_lock_add_if(cfile, lock, wait_flag);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001393 if (rc < 0)
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001394 kfree(lock);
1395 if (rc <= 0)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001396 goto out;
1397
Pavel Shilovsky7f924472012-03-28 17:10:25 +04001398 rc = cifs_mandatory_lock(xid, cfile, flock->fl_start, length,
1399 type, 1, 0, wait_flag);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001400 if (rc) {
1401 kfree(lock);
1402 goto out;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001403 }
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001404
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001405 cifs_lock_add(cfile, lock);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001406 } else if (unlock)
1407 rc = cifs_unlock_range(cfile, flock, xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001408
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001409out:
1410 if (flock->fl_flags & FL_POSIX)
Steve French9ebb3892012-04-01 13:52:54 -05001411 posix_lock_file_wait(file, flock);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001412 return rc;
1413}
1414
1415int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1416{
1417 int rc, xid;
1418 int lock = 0, unlock = 0;
1419 bool wait_flag = false;
1420 bool posix_lck = false;
1421 struct cifs_sb_info *cifs_sb;
1422 struct cifs_tcon *tcon;
1423 struct cifsInodeInfo *cinode;
1424 struct cifsFileInfo *cfile;
1425 __u16 netfid;
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001426 __u32 type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001427
1428 rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001429 xid = get_xid();
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001430
1431 cFYI(1, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld "
1432 "end: %lld", cmd, flock->fl_flags, flock->fl_type,
1433 flock->fl_start, flock->fl_end);
1434
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001435 cfile = (struct cifsFileInfo *)file->private_data;
1436 tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001437
1438 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
1439 tcon->ses->server);
1440
1441 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001442 netfid = cfile->fid.netfid;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001443 cinode = CIFS_I(file->f_path.dentry->d_inode);
1444
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001445 if (cap_unix(tcon->ses) &&
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001446 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1447 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1448 posix_lck = true;
1449 /*
1450 * BB add code here to normalize offset and length to account for
1451 * negative length which we can not accept over the wire.
1452 */
1453 if (IS_GETLK(cmd)) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001454 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001455 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001456 return rc;
1457 }
1458
1459 if (!lock && !unlock) {
1460 /*
1461 * if no lock or unlock then nothing to do since we do not
1462 * know what it is
1463 */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001464 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001465 return -EOPNOTSUPP;
1466 }
1467
1468 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1469 xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001470 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001471 return rc;
1472}
1473
Jeff Layton597b0272012-03-23 14:40:56 -04001474/*
1475 * update the file size (if needed) after a write. Should be called with
1476 * the inode->i_lock held
1477 */
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05001478void
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001479cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1480 unsigned int bytes_written)
1481{
1482 loff_t end_of_write = offset + bytes_written;
1483
1484 if (end_of_write > cifsi->server_eof)
1485 cifsi->server_eof = end_of_write;
1486}
1487
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001488static ssize_t
1489cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
1490 size_t write_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001491{
1492 int rc = 0;
1493 unsigned int bytes_written = 0;
1494 unsigned int total_written;
1495 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001496 struct cifs_tcon *tcon;
1497 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001498 unsigned int xid;
Jeff Layton7da4b492010-10-15 15:34:00 -04001499 struct dentry *dentry = open_file->dentry;
1500 struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001501 struct cifs_io_parms io_parms;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001502
Jeff Layton7da4b492010-10-15 15:34:00 -04001503 cifs_sb = CIFS_SB(dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001504
Joe Perchesb6b38f72010-04-21 03:50:45 +00001505 cFYI(1, "write %zd bytes to offset %lld of %s", write_size,
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001506 *offset, dentry->d_name.name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001507
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001508 tcon = tlink_tcon(open_file->tlink);
1509 server = tcon->ses->server;
1510
1511 if (!server->ops->sync_write)
1512 return -ENOSYS;
Steve French50c2f752007-07-13 00:33:32 +00001513
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001514 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001515
Linus Torvalds1da177e2005-04-16 15:20:36 -07001516 for (total_written = 0; write_size > total_written;
1517 total_written += bytes_written) {
1518 rc = -EAGAIN;
1519 while (rc == -EAGAIN) {
Jeff Laytonca83ce32011-04-12 09:13:44 -04001520 struct kvec iov[2];
1521 unsigned int len;
1522
Linus Torvalds1da177e2005-04-16 15:20:36 -07001523 if (open_file->invalidHandle) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524 /* we could deadlock if we called
1525 filemap_fdatawait from here so tell
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001526 reopen_file not to flush data to
Linus Torvalds1da177e2005-04-16 15:20:36 -07001527 server now */
Jeff Layton15886172010-10-15 15:33:59 -04001528 rc = cifs_reopen_file(open_file, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529 if (rc != 0)
1530 break;
1531 }
Steve French3e844692005-10-03 13:37:24 -07001532
Jeff Laytonca83ce32011-04-12 09:13:44 -04001533 len = min((size_t)cifs_sb->wsize,
1534 write_size - total_written);
1535 /* iov[0] is reserved for smb header */
1536 iov[1].iov_base = (char *)write_data + total_written;
1537 iov[1].iov_len = len;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001538 io_parms.pid = pid;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001539 io_parms.tcon = tcon;
1540 io_parms.offset = *offset;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001541 io_parms.length = len;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001542 rc = server->ops->sync_write(xid, open_file, &io_parms,
1543 &bytes_written, iov, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001544 }
1545 if (rc || (bytes_written == 0)) {
1546 if (total_written)
1547 break;
1548 else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001549 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001550 return rc;
1551 }
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001552 } else {
Jeff Layton597b0272012-03-23 14:40:56 -04001553 spin_lock(&dentry->d_inode->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001554 cifs_update_eof(cifsi, *offset, bytes_written);
Jeff Layton597b0272012-03-23 14:40:56 -04001555 spin_unlock(&dentry->d_inode->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001556 *offset += bytes_written;
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001557 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001558 }
1559
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001560 cifs_stats_bytes_written(tcon, total_written);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001561
Jeff Layton7da4b492010-10-15 15:34:00 -04001562 if (total_written > 0) {
1563 spin_lock(&dentry->d_inode->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001564 if (*offset > dentry->d_inode->i_size)
1565 i_size_write(dentry->d_inode, *offset);
Jeff Layton7da4b492010-10-15 15:34:00 -04001566 spin_unlock(&dentry->d_inode->i_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001567 }
Jeff Layton7da4b492010-10-15 15:34:00 -04001568 mark_inode_dirty_sync(dentry->d_inode);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001569 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001570 return total_written;
1571}
1572
Jeff Layton6508d902010-09-29 19:51:11 -04001573struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1574 bool fsuid_only)
Steve French630f3f0c2007-10-25 21:17:17 +00001575{
1576 struct cifsFileInfo *open_file = NULL;
Jeff Layton6508d902010-09-29 19:51:11 -04001577 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1578
1579 /* only filter by fsuid on multiuser mounts */
1580 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1581 fsuid_only = false;
Steve French630f3f0c2007-10-25 21:17:17 +00001582
Jeff Layton44772882010-10-15 15:34:03 -04001583 spin_lock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001584 /* we could simply get the first_list_entry since write-only entries
1585 are always at the end of the list but since the first entry might
1586 have a close pending, we go through the whole list */
1587 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton6508d902010-09-29 19:51:11 -04001588 if (fsuid_only && open_file->uid != current_fsuid())
1589 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001590 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
Steve French630f3f0c2007-10-25 21:17:17 +00001591 if (!open_file->invalidHandle) {
1592 /* found a good file */
1593 /* lock it so it will not be closed on us */
Jeff Layton764a1b12012-07-25 14:59:54 -04001594 cifsFileInfo_get_locked(open_file);
Jeff Layton44772882010-10-15 15:34:03 -04001595 spin_unlock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001596 return open_file;
1597 } /* else might as well continue, and look for
1598 another, or simply have the caller reopen it
1599 again rather than trying to fix this handle */
1600 } else /* write only file */
1601 break; /* write only files are last so must be done */
1602 }
Jeff Layton44772882010-10-15 15:34:03 -04001603 spin_unlock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001604 return NULL;
1605}
Steve French630f3f0c2007-10-25 21:17:17 +00001606
Jeff Layton6508d902010-09-29 19:51:11 -04001607struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1608 bool fsuid_only)
Steve French6148a742005-10-05 12:23:19 -07001609{
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001610 struct cifsFileInfo *open_file, *inv_file = NULL;
Jeff Laytond3892292010-11-02 16:22:50 -04001611 struct cifs_sb_info *cifs_sb;
Jeff Layton2846d382008-09-22 21:33:33 -04001612 bool any_available = false;
Steve Frenchdd99cd82005-10-05 19:32:49 -07001613 int rc;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001614 unsigned int refind = 0;
Steve French6148a742005-10-05 12:23:19 -07001615
Steve French60808232006-04-22 15:53:05 +00001616 /* Having a null inode here (because mapping->host was set to zero by
1617 the VFS or MM) should not happen but we had reports of on oops (due to
1618 it being zero) during stress testcases so we need to check for it */
1619
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001620 if (cifs_inode == NULL) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001621 cERROR(1, "Null inode passed to cifs_writeable_file");
Steve French60808232006-04-22 15:53:05 +00001622 dump_stack();
1623 return NULL;
1624 }
1625
Jeff Laytond3892292010-11-02 16:22:50 -04001626 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1627
Jeff Layton6508d902010-09-29 19:51:11 -04001628 /* only filter by fsuid on multiuser mounts */
1629 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1630 fsuid_only = false;
1631
Jeff Layton44772882010-10-15 15:34:03 -04001632 spin_lock(&cifs_file_list_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001633refind_writable:
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001634 if (refind > MAX_REOPEN_ATT) {
1635 spin_unlock(&cifs_file_list_lock);
1636 return NULL;
1637 }
Steve French6148a742005-10-05 12:23:19 -07001638 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton6508d902010-09-29 19:51:11 -04001639 if (!any_available && open_file->pid != current->tgid)
1640 continue;
1641 if (fsuid_only && open_file->uid != current_fsuid())
1642 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001643 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Steve French9b22b0b2007-10-02 01:11:08 +00001644 if (!open_file->invalidHandle) {
1645 /* found a good writable file */
Jeff Layton764a1b12012-07-25 14:59:54 -04001646 cifsFileInfo_get_locked(open_file);
Jeff Layton44772882010-10-15 15:34:03 -04001647 spin_unlock(&cifs_file_list_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001648 return open_file;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001649 } else {
1650 if (!inv_file)
1651 inv_file = open_file;
Steve French9b22b0b2007-10-02 01:11:08 +00001652 }
Steve French6148a742005-10-05 12:23:19 -07001653 }
1654 }
Jeff Layton2846d382008-09-22 21:33:33 -04001655 /* couldn't find useable FH with same pid, try any available */
1656 if (!any_available) {
1657 any_available = true;
1658 goto refind_writable;
1659 }
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001660
1661 if (inv_file) {
1662 any_available = false;
Jeff Layton764a1b12012-07-25 14:59:54 -04001663 cifsFileInfo_get_locked(inv_file);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001664 }
1665
Jeff Layton44772882010-10-15 15:34:03 -04001666 spin_unlock(&cifs_file_list_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001667
1668 if (inv_file) {
1669 rc = cifs_reopen_file(inv_file, false);
1670 if (!rc)
1671 return inv_file;
1672 else {
1673 spin_lock(&cifs_file_list_lock);
1674 list_move_tail(&inv_file->flist,
1675 &cifs_inode->openFileList);
1676 spin_unlock(&cifs_file_list_lock);
1677 cifsFileInfo_put(inv_file);
1678 spin_lock(&cifs_file_list_lock);
1679 ++refind;
1680 goto refind_writable;
1681 }
1682 }
1683
Steve French6148a742005-10-05 12:23:19 -07001684 return NULL;
1685}
1686
Linus Torvalds1da177e2005-04-16 15:20:36 -07001687static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1688{
1689 struct address_space *mapping = page->mapping;
1690 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1691 char *write_data;
1692 int rc = -EFAULT;
1693 int bytes_written = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001694 struct inode *inode;
Steve French6148a742005-10-05 12:23:19 -07001695 struct cifsFileInfo *open_file;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001696
1697 if (!mapping || !mapping->host)
1698 return -EFAULT;
1699
1700 inode = page->mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001701
1702 offset += (loff_t)from;
1703 write_data = kmap(page);
1704 write_data += from;
1705
1706 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1707 kunmap(page);
1708 return -EIO;
1709 }
1710
1711 /* racing with truncate? */
1712 if (offset > mapping->host->i_size) {
1713 kunmap(page);
1714 return 0; /* don't care */
1715 }
1716
1717 /* check to make sure that we are not extending the file */
1718 if (mapping->host->i_size - offset < (loff_t)to)
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001719 to = (unsigned)(mapping->host->i_size - offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001720
Jeff Layton6508d902010-09-29 19:51:11 -04001721 open_file = find_writable_file(CIFS_I(mapping->host), false);
Steve French6148a742005-10-05 12:23:19 -07001722 if (open_file) {
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001723 bytes_written = cifs_write(open_file, open_file->pid,
1724 write_data, to - from, &offset);
Dave Kleikamp6ab409b2009-08-31 11:07:12 -04001725 cifsFileInfo_put(open_file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001726 /* Does mm or vfs already set times? */
Steve French6148a742005-10-05 12:23:19 -07001727 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001728 if ((bytes_written > 0) && (offset))
Steve French6148a742005-10-05 12:23:19 -07001729 rc = 0;
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001730 else if (bytes_written < 0)
1731 rc = bytes_written;
Steve French6148a742005-10-05 12:23:19 -07001732 } else {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001733 cFYI(1, "No writeable filehandles for inode");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001734 rc = -EIO;
1735 }
1736
1737 kunmap(page);
1738 return rc;
1739}
1740
Linus Torvalds1da177e2005-04-16 15:20:36 -07001741static int cifs_writepages(struct address_space *mapping,
Steve French37c0eb42005-10-05 14:50:29 -07001742 struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001743{
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001744 struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
1745 bool done = false, scanned = false, range_whole = false;
1746 pgoff_t end, index;
1747 struct cifs_writedata *wdata;
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07001748 struct TCP_Server_Info *server;
Steve French37c0eb42005-10-05 14:50:29 -07001749 struct page *page;
Steve French37c0eb42005-10-05 14:50:29 -07001750 int rc = 0;
Jeff Laytoneddb0792012-09-18 16:20:35 -07001751 loff_t isize = i_size_read(mapping->host);
Steve French50c2f752007-07-13 00:33:32 +00001752
Steve French37c0eb42005-10-05 14:50:29 -07001753 /*
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001754 * If wsize is smaller than the page cache size, default to writing
Steve French37c0eb42005-10-05 14:50:29 -07001755 * one page at a time via cifs_writepage
1756 */
1757 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1758 return generic_writepages(mapping, wbc);
1759
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001760 if (wbc->range_cyclic) {
Steve French37c0eb42005-10-05 14:50:29 -07001761 index = mapping->writeback_index; /* Start from prev offset */
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001762 end = -1;
1763 } else {
1764 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1765 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1766 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001767 range_whole = true;
1768 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07001769 }
1770retry:
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001771 while (!done && index <= end) {
1772 unsigned int i, nr_pages, found_pages;
1773 pgoff_t next = 0, tofind;
1774 struct page **pages;
Steve French37c0eb42005-10-05 14:50:29 -07001775
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001776 tofind = min((cifs_sb->wsize / PAGE_CACHE_SIZE) - 1,
1777 end - index) + 1;
Steve French37c0eb42005-10-05 14:50:29 -07001778
Jeff Laytonc2e87642012-03-23 14:40:55 -04001779 wdata = cifs_writedata_alloc((unsigned int)tofind,
1780 cifs_writev_complete);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001781 if (!wdata) {
1782 rc = -ENOMEM;
1783 break;
1784 }
1785
1786 /*
1787 * find_get_pages_tag seems to return a max of 256 on each
1788 * iteration, so we must call it several times in order to
1789 * fill the array or the wsize is effectively limited to
1790 * 256 * PAGE_CACHE_SIZE.
1791 */
1792 found_pages = 0;
1793 pages = wdata->pages;
1794 do {
1795 nr_pages = find_get_pages_tag(mapping, &index,
1796 PAGECACHE_TAG_DIRTY,
1797 tofind, pages);
1798 found_pages += nr_pages;
1799 tofind -= nr_pages;
1800 pages += nr_pages;
1801 } while (nr_pages && tofind && index <= end);
1802
1803 if (found_pages == 0) {
1804 kref_put(&wdata->refcount, cifs_writedata_release);
1805 break;
1806 }
1807
1808 nr_pages = 0;
1809 for (i = 0; i < found_pages; i++) {
1810 page = wdata->pages[i];
Steve French37c0eb42005-10-05 14:50:29 -07001811 /*
1812 * At this point we hold neither mapping->tree_lock nor
1813 * lock on the page itself: the page may be truncated or
1814 * invalidated (changing page->mapping to NULL), or even
1815 * swizzled back from swapper_space to tmpfs file
1816 * mapping
1817 */
1818
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001819 if (nr_pages == 0)
Steve French37c0eb42005-10-05 14:50:29 -07001820 lock_page(page);
Nick Piggin529ae9a2008-08-02 12:01:03 +02001821 else if (!trylock_page(page))
Steve French37c0eb42005-10-05 14:50:29 -07001822 break;
1823
1824 if (unlikely(page->mapping != mapping)) {
1825 unlock_page(page);
1826 break;
1827 }
1828
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001829 if (!wbc->range_cyclic && page->index > end) {
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001830 done = true;
Steve French37c0eb42005-10-05 14:50:29 -07001831 unlock_page(page);
1832 break;
1833 }
1834
1835 if (next && (page->index != next)) {
1836 /* Not next consecutive page */
1837 unlock_page(page);
1838 break;
1839 }
1840
1841 if (wbc->sync_mode != WB_SYNC_NONE)
1842 wait_on_page_writeback(page);
1843
1844 if (PageWriteback(page) ||
Linus Torvaldscb876f42006-12-23 16:19:07 -08001845 !clear_page_dirty_for_io(page)) {
Steve French37c0eb42005-10-05 14:50:29 -07001846 unlock_page(page);
1847 break;
1848 }
Steve French84d2f072005-10-12 15:32:05 -07001849
Linus Torvaldscb876f42006-12-23 16:19:07 -08001850 /*
1851 * This actually clears the dirty bit in the radix tree.
1852 * See cifs_writepage() for more commentary.
1853 */
1854 set_page_writeback(page);
1855
Jeff Laytoneddb0792012-09-18 16:20:35 -07001856 if (page_offset(page) >= isize) {
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001857 done = true;
Steve French84d2f072005-10-12 15:32:05 -07001858 unlock_page(page);
Linus Torvaldscb876f42006-12-23 16:19:07 -08001859 end_page_writeback(page);
Steve French84d2f072005-10-12 15:32:05 -07001860 break;
1861 }
1862
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001863 wdata->pages[i] = page;
Steve French37c0eb42005-10-05 14:50:29 -07001864 next = page->index + 1;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001865 ++nr_pages;
Steve French37c0eb42005-10-05 14:50:29 -07001866 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001867
1868 /* reset index to refind any pages skipped */
1869 if (nr_pages == 0)
1870 index = wdata->pages[0]->index + 1;
1871
1872 /* put any pages we aren't going to use */
1873 for (i = nr_pages; i < found_pages; i++) {
1874 page_cache_release(wdata->pages[i]);
1875 wdata->pages[i] = NULL;
1876 }
1877
1878 /* nothing to write? */
1879 if (nr_pages == 0) {
1880 kref_put(&wdata->refcount, cifs_writedata_release);
1881 continue;
1882 }
1883
1884 wdata->sync_mode = wbc->sync_mode;
1885 wdata->nr_pages = nr_pages;
1886 wdata->offset = page_offset(wdata->pages[0]);
Jeff Laytoneddb0792012-09-18 16:20:35 -07001887 wdata->pagesz = PAGE_CACHE_SIZE;
1888 wdata->tailsz =
1889 min(isize - page_offset(wdata->pages[nr_pages - 1]),
1890 (loff_t)PAGE_CACHE_SIZE);
1891 wdata->bytes = ((nr_pages - 1) * PAGE_CACHE_SIZE) +
1892 wdata->tailsz;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001893
1894 do {
1895 if (wdata->cfile != NULL)
1896 cifsFileInfo_put(wdata->cfile);
1897 wdata->cfile = find_writable_file(CIFS_I(mapping->host),
1898 false);
1899 if (!wdata->cfile) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001900 cERROR(1, "No writable handles for inode");
Steve French23e7dd72005-10-20 13:44:56 -07001901 rc = -EBADF;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001902 break;
Steve French37c0eb42005-10-05 14:50:29 -07001903 }
Jeff Laytonfe5f5d22012-03-23 14:40:55 -04001904 wdata->pid = wdata->cfile->pid;
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07001905 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
1906 rc = server->ops->async_writev(wdata);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001907 } while (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN);
Jeff Laytonf3983c22010-09-22 16:17:40 -07001908
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001909 for (i = 0; i < nr_pages; ++i)
1910 unlock_page(wdata->pages[i]);
Jeff Layton941b8532011-01-11 07:24:01 -05001911
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001912 /* send failure -- clean up the mess */
1913 if (rc != 0) {
1914 for (i = 0; i < nr_pages; ++i) {
Jeff Layton941b8532011-01-11 07:24:01 -05001915 if (rc == -EAGAIN)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001916 redirty_page_for_writepage(wbc,
1917 wdata->pages[i]);
1918 else
1919 SetPageError(wdata->pages[i]);
1920 end_page_writeback(wdata->pages[i]);
1921 page_cache_release(wdata->pages[i]);
Steve French37c0eb42005-10-05 14:50:29 -07001922 }
Jeff Layton941b8532011-01-11 07:24:01 -05001923 if (rc != -EAGAIN)
1924 mapping_set_error(mapping, rc);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001925 }
1926 kref_put(&wdata->refcount, cifs_writedata_release);
Jeff Layton941b8532011-01-11 07:24:01 -05001927
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001928 wbc->nr_to_write -= nr_pages;
1929 if (wbc->nr_to_write <= 0)
1930 done = true;
Dave Kleikampb066a482008-11-18 03:49:05 +00001931
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001932 index = next;
Steve French37c0eb42005-10-05 14:50:29 -07001933 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001934
Steve French37c0eb42005-10-05 14:50:29 -07001935 if (!scanned && !done) {
1936 /*
1937 * We hit the last page and there is more work to be done: wrap
1938 * back to the start of the file
1939 */
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001940 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07001941 index = 0;
1942 goto retry;
1943 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001944
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001945 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
Steve French37c0eb42005-10-05 14:50:29 -07001946 mapping->writeback_index = index;
1947
Linus Torvalds1da177e2005-04-16 15:20:36 -07001948 return rc;
1949}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001950
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04001951static int
1952cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001953{
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04001954 int rc;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001955 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001956
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001957 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001958/* BB add check for wbc flags */
1959 page_cache_get(page);
Steve Frenchad7a2922008-02-07 23:25:02 +00001960 if (!PageUptodate(page))
Joe Perchesb6b38f72010-04-21 03:50:45 +00001961 cFYI(1, "ppw - page not up to date");
Linus Torvaldscb876f42006-12-23 16:19:07 -08001962
1963 /*
1964 * Set the "writeback" flag, and clear "dirty" in the radix tree.
1965 *
1966 * A writepage() implementation always needs to do either this,
1967 * or re-dirty the page with "redirty_page_for_writepage()" in
1968 * the case of a failure.
1969 *
1970 * Just unlocking the page will cause the radix tree tag-bits
1971 * to fail to update with the state of the page correctly.
1972 */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001973 set_page_writeback(page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04001974retry_write:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001975 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04001976 if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
1977 goto retry_write;
1978 else if (rc == -EAGAIN)
1979 redirty_page_for_writepage(wbc, page);
1980 else if (rc != 0)
1981 SetPageError(page);
1982 else
1983 SetPageUptodate(page);
Linus Torvaldscb876f42006-12-23 16:19:07 -08001984 end_page_writeback(page);
1985 page_cache_release(page);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001986 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001987 return rc;
1988}
1989
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04001990static int cifs_writepage(struct page *page, struct writeback_control *wbc)
1991{
1992 int rc = cifs_writepage_locked(page, wbc);
1993 unlock_page(page);
1994 return rc;
1995}
1996
Nick Piggind9414772008-09-24 11:32:59 -04001997static int cifs_write_end(struct file *file, struct address_space *mapping,
1998 loff_t pos, unsigned len, unsigned copied,
1999 struct page *page, void *fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002000{
Nick Piggind9414772008-09-24 11:32:59 -04002001 int rc;
2002 struct inode *inode = mapping->host;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002003 struct cifsFileInfo *cfile = file->private_data;
2004 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
2005 __u32 pid;
2006
2007 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2008 pid = cfile->pid;
2009 else
2010 pid = current->tgid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002011
Joe Perchesb6b38f72010-04-21 03:50:45 +00002012 cFYI(1, "write_end for page %p from pos %lld with %d bytes",
2013 page, pos, copied);
Steve Frenchad7a2922008-02-07 23:25:02 +00002014
Jeff Laytona98ee8c2008-11-26 19:32:33 +00002015 if (PageChecked(page)) {
2016 if (copied == len)
2017 SetPageUptodate(page);
2018 ClearPageChecked(page);
2019 } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
Nick Piggind9414772008-09-24 11:32:59 -04002020 SetPageUptodate(page);
2021
Linus Torvalds1da177e2005-04-16 15:20:36 -07002022 if (!PageUptodate(page)) {
Nick Piggind9414772008-09-24 11:32:59 -04002023 char *page_data;
2024 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002025 unsigned int xid;
Nick Piggind9414772008-09-24 11:32:59 -04002026
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002027 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002028 /* this is probably better than directly calling
2029 partialpage_write since in this function the file handle is
2030 known which we might as well leverage */
2031 /* BB check if anything else missing out of ppw
2032 such as updating last write time */
2033 page_data = kmap(page);
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002034 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
Nick Piggind9414772008-09-24 11:32:59 -04002035 /* if (rc < 0) should we set writebehind rc? */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002036 kunmap(page);
Nick Piggind9414772008-09-24 11:32:59 -04002037
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002038 free_xid(xid);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002039 } else {
Nick Piggind9414772008-09-24 11:32:59 -04002040 rc = copied;
2041 pos += copied;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002042 set_page_dirty(page);
2043 }
2044
Nick Piggind9414772008-09-24 11:32:59 -04002045 if (rc > 0) {
2046 spin_lock(&inode->i_lock);
2047 if (pos > inode->i_size)
2048 i_size_write(inode, pos);
2049 spin_unlock(&inode->i_lock);
2050 }
2051
2052 unlock_page(page);
2053 page_cache_release(page);
2054
Linus Torvalds1da177e2005-04-16 15:20:36 -07002055 return rc;
2056}
2057
Josef Bacik02c24a82011-07-16 20:44:56 -04002058int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2059 int datasync)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002060{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002061 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002062 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002063 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002064 struct TCP_Server_Info *server;
Joe Perchesc21dfb62010-07-12 13:50:14 -07002065 struct cifsFileInfo *smbfile = file->private_data;
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002066 struct inode *inode = file->f_path.dentry->d_inode;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002067 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002068
Josef Bacik02c24a82011-07-16 20:44:56 -04002069 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2070 if (rc)
2071 return rc;
2072 mutex_lock(&inode->i_mutex);
2073
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002074 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002075
Joe Perchesb6b38f72010-04-21 03:50:45 +00002076 cFYI(1, "Sync file - name: %s datasync: 0x%x",
Christoph Hellwig7ea80852010-05-26 17:53:25 +02002077 file->f_path.dentry->d_name.name, datasync);
Steve French50c2f752007-07-13 00:33:32 +00002078
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002079 if (!CIFS_I(inode)->clientCanCacheRead) {
2080 rc = cifs_invalidate_mapping(inode);
2081 if (rc) {
2082 cFYI(1, "rc: %d during invalidate phase", rc);
2083 rc = 0; /* don't care about it in fsync */
2084 }
2085 }
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002086
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002087 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002088 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2089 server = tcon->ses->server;
2090 if (server->ops->flush)
2091 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2092 else
2093 rc = -ENOSYS;
2094 }
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002095
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002096 free_xid(xid);
Josef Bacik02c24a82011-07-16 20:44:56 -04002097 mutex_unlock(&inode->i_mutex);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002098 return rc;
2099}
2100
Josef Bacik02c24a82011-07-16 20:44:56 -04002101int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002102{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002103 unsigned int xid;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002104 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002105 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002106 struct TCP_Server_Info *server;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002107 struct cifsFileInfo *smbfile = file->private_data;
2108 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Josef Bacik02c24a82011-07-16 20:44:56 -04002109 struct inode *inode = file->f_mapping->host;
2110
2111 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2112 if (rc)
2113 return rc;
2114 mutex_lock(&inode->i_mutex);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002115
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002116 xid = get_xid();
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002117
2118 cFYI(1, "Sync file - name: %s datasync: 0x%x",
2119 file->f_path.dentry->d_name.name, datasync);
2120
2121 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002122 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2123 server = tcon->ses->server;
2124 if (server->ops->flush)
2125 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2126 else
2127 rc = -ENOSYS;
2128 }
Steve Frenchb298f222009-02-21 21:17:43 +00002129
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002130 free_xid(xid);
Josef Bacik02c24a82011-07-16 20:44:56 -04002131 mutex_unlock(&inode->i_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002132 return rc;
2133}
2134
Linus Torvalds1da177e2005-04-16 15:20:36 -07002135/*
2136 * As file closes, flush all cached write data for this inode checking
2137 * for write behind errors.
2138 */
Miklos Szeredi75e1fcc2006-06-23 02:05:12 -07002139int cifs_flush(struct file *file, fl_owner_t id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002140{
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002141 struct inode *inode = file->f_path.dentry->d_inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002142 int rc = 0;
2143
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002144 if (file->f_mode & FMODE_WRITE)
Jeff Laytond3f13222010-10-15 15:34:07 -04002145 rc = filemap_write_and_wait(inode->i_mapping);
Steve French50c2f752007-07-13 00:33:32 +00002146
Joe Perchesb6b38f72010-04-21 03:50:45 +00002147 cFYI(1, "Flush inode %p file %p rc %d", inode, file, rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002148
2149 return rc;
2150}
2151
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002152static int
2153cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2154{
2155 int rc = 0;
2156 unsigned long i;
2157
2158 for (i = 0; i < num_pages; i++) {
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002159 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002160 if (!pages[i]) {
2161 /*
2162 * save number of pages we have already allocated and
2163 * return with ENOMEM error
2164 */
2165 num_pages = i;
2166 rc = -ENOMEM;
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002167 break;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002168 }
2169 }
2170
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002171 if (rc) {
2172 for (i = 0; i < num_pages; i++)
2173 put_page(pages[i]);
2174 }
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002175 return rc;
2176}
2177
2178static inline
2179size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2180{
2181 size_t num_pages;
2182 size_t clen;
2183
2184 clen = min_t(const size_t, len, wsize);
Jeff Laytona7103b92012-03-23 14:40:56 -04002185 num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002186
2187 if (cur_len)
2188 *cur_len = clen;
2189
2190 return num_pages;
2191}
2192
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002193static void
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002194cifs_uncached_writev_complete(struct work_struct *work)
2195{
2196 int i;
2197 struct cifs_writedata *wdata = container_of(work,
2198 struct cifs_writedata, work);
2199 struct inode *inode = wdata->cfile->dentry->d_inode;
2200 struct cifsInodeInfo *cifsi = CIFS_I(inode);
2201
2202 spin_lock(&inode->i_lock);
2203 cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
2204 if (cifsi->server_eof > inode->i_size)
2205 i_size_write(inode, cifsi->server_eof);
2206 spin_unlock(&inode->i_lock);
2207
2208 complete(&wdata->done);
2209
2210 if (wdata->result != -EAGAIN) {
2211 for (i = 0; i < wdata->nr_pages; i++)
2212 put_page(wdata->pages[i]);
2213 }
2214
2215 kref_put(&wdata->refcount, cifs_writedata_release);
2216}
2217
2218/* attempt to send write to server, retry on any -EAGAIN errors */
2219static int
2220cifs_uncached_retry_writev(struct cifs_writedata *wdata)
2221{
2222 int rc;
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002223 struct TCP_Server_Info *server;
2224
2225 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002226
2227 do {
2228 if (wdata->cfile->invalidHandle) {
2229 rc = cifs_reopen_file(wdata->cfile, false);
2230 if (rc != 0)
2231 continue;
2232 }
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002233 rc = server->ops->async_writev(wdata);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002234 } while (rc == -EAGAIN);
2235
2236 return rc;
2237}
2238
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002239static ssize_t
2240cifs_iovec_write(struct file *file, const struct iovec *iov,
2241 unsigned long nr_segs, loff_t *poffset)
2242{
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002243 unsigned long nr_pages, i;
Pavel Shilovsky76429c12011-01-31 16:03:08 +03002244 size_t copied, len, cur_len;
2245 ssize_t total_written = 0;
Jeff Layton3af9d8f2012-04-13 17:16:59 -04002246 loff_t offset;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002247 struct iov_iter it;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002248 struct cifsFileInfo *open_file;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002249 struct cifs_tcon *tcon;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002250 struct cifs_sb_info *cifs_sb;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002251 struct cifs_writedata *wdata, *tmp;
2252 struct list_head wdata_list;
2253 int rc;
2254 pid_t pid;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002255
2256 len = iov_length(iov, nr_segs);
2257 if (!len)
2258 return 0;
2259
2260 rc = generic_write_checks(file, poffset, &len, 0);
2261 if (rc)
2262 return rc;
2263
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002264 INIT_LIST_HEAD(&wdata_list);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002265 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002266 open_file = file->private_data;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002267 tcon = tlink_tcon(open_file->tlink);
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002268
2269 if (!tcon->ses->server->ops->async_writev)
2270 return -ENOSYS;
2271
Jeff Layton3af9d8f2012-04-13 17:16:59 -04002272 offset = *poffset;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002273
2274 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2275 pid = open_file->pid;
2276 else
2277 pid = current->tgid;
2278
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002279 iov_iter_init(&it, iov, nr_segs, len, 0);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002280 do {
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002281 size_t save_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002282
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002283 nr_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
2284 wdata = cifs_writedata_alloc(nr_pages,
2285 cifs_uncached_writev_complete);
2286 if (!wdata) {
2287 rc = -ENOMEM;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002288 break;
2289 }
2290
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002291 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
2292 if (rc) {
2293 kfree(wdata);
2294 break;
2295 }
2296
2297 save_len = cur_len;
2298 for (i = 0; i < nr_pages; i++) {
2299 copied = min_t(const size_t, cur_len, PAGE_SIZE);
2300 copied = iov_iter_copy_from_user(wdata->pages[i], &it,
2301 0, copied);
2302 cur_len -= copied;
2303 iov_iter_advance(&it, copied);
2304 }
2305 cur_len = save_len - cur_len;
2306
2307 wdata->sync_mode = WB_SYNC_ALL;
2308 wdata->nr_pages = nr_pages;
2309 wdata->offset = (__u64)offset;
2310 wdata->cfile = cifsFileInfo_get(open_file);
2311 wdata->pid = pid;
2312 wdata->bytes = cur_len;
Jeff Laytoneddb0792012-09-18 16:20:35 -07002313 wdata->pagesz = PAGE_SIZE;
2314 wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002315 rc = cifs_uncached_retry_writev(wdata);
2316 if (rc) {
2317 kref_put(&wdata->refcount, cifs_writedata_release);
2318 break;
2319 }
2320
2321 list_add_tail(&wdata->list, &wdata_list);
2322 offset += cur_len;
2323 len -= cur_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002324 } while (len > 0);
2325
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002326 /*
2327 * If at least one write was successfully sent, then discard any rc
2328 * value from the later writes. If the other write succeeds, then
2329 * we'll end up returning whatever was written. If it fails, then
2330 * we'll get a new rc value from that.
2331 */
2332 if (!list_empty(&wdata_list))
2333 rc = 0;
2334
2335 /*
2336 * Wait for and collect replies for any successful sends in order of
2337 * increasing offset. Once an error is hit or we get a fatal signal
2338 * while waiting, then return without waiting for any more replies.
2339 */
2340restart_loop:
2341 list_for_each_entry_safe(wdata, tmp, &wdata_list, list) {
2342 if (!rc) {
2343 /* FIXME: freezable too? */
2344 rc = wait_for_completion_killable(&wdata->done);
2345 if (rc)
2346 rc = -EINTR;
2347 else if (wdata->result)
2348 rc = wdata->result;
2349 else
2350 total_written += wdata->bytes;
2351
2352 /* resend call if it's a retryable error */
2353 if (rc == -EAGAIN) {
2354 rc = cifs_uncached_retry_writev(wdata);
2355 goto restart_loop;
2356 }
2357 }
2358 list_del_init(&wdata->list);
2359 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002360 }
2361
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002362 if (total_written > 0)
2363 *poffset += total_written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002364
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002365 cifs_stats_bytes_written(tcon, total_written);
2366 return total_written ? total_written : (ssize_t)rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002367}
2368
Pavel Shilovsky0b81c1c2011-03-10 10:11:05 +03002369ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002370 unsigned long nr_segs, loff_t pos)
2371{
2372 ssize_t written;
2373 struct inode *inode;
2374
2375 inode = iocb->ki_filp->f_path.dentry->d_inode;
2376
2377 /*
2378 * BB - optimize the way when signing is disabled. We can drop this
2379 * extra memory-to-memory copying and use iovec buffers for constructing
2380 * write request.
2381 */
2382
2383 written = cifs_iovec_write(iocb->ki_filp, iov, nr_segs, &pos);
2384 if (written > 0) {
2385 CIFS_I(inode)->invalid_mapping = true;
2386 iocb->ki_pos = pos;
2387 }
2388
2389 return written;
2390}
2391
2392ssize_t cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
2393 unsigned long nr_segs, loff_t pos)
2394{
2395 struct inode *inode;
2396
2397 inode = iocb->ki_filp->f_path.dentry->d_inode;
2398
2399 if (CIFS_I(inode)->clientCanCacheAll)
2400 return generic_file_aio_write(iocb, iov, nr_segs, pos);
2401
2402 /*
2403 * In strict cache mode we need to write the data to the server exactly
2404 * from the pos to pos+len-1 rather than flush all affected pages
2405 * because it may cause a error with mandatory locks on these pages but
2406 * not on the region from pos to ppos+len-1.
2407 */
2408
2409 return cifs_user_writev(iocb, iov, nr_segs, pos);
2410}
2411
Jeff Layton0471ca32012-05-16 07:13:16 -04002412static struct cifs_readdata *
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002413cifs_readdata_alloc(unsigned int nr_pages, work_func_t complete)
Jeff Layton0471ca32012-05-16 07:13:16 -04002414{
2415 struct cifs_readdata *rdata;
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002416 struct kvec *iov;
Jeff Layton0471ca32012-05-16 07:13:16 -04002417
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002418 iov = kzalloc(sizeof(*iov) * (nr_pages + 1), GFP_KERNEL);
2419 if (!iov)
2420 return (struct cifs_readdata *)iov;
2421
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002422 rdata = kzalloc(sizeof(*rdata) + (sizeof(struct page *) * nr_pages),
2423 GFP_KERNEL);
Jeff Layton0471ca32012-05-16 07:13:16 -04002424 if (rdata != NULL) {
Jeff Layton6993f742012-05-16 07:13:17 -04002425 kref_init(&rdata->refcount);
Jeff Layton1c892542012-05-16 07:13:17 -04002426 INIT_LIST_HEAD(&rdata->list);
2427 init_completion(&rdata->done);
Jeff Layton0471ca32012-05-16 07:13:16 -04002428 INIT_WORK(&rdata->work, complete);
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002429 rdata->iov = iov;
2430 } else {
2431 kfree(iov);
Jeff Layton0471ca32012-05-16 07:13:16 -04002432 }
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002433
Jeff Layton0471ca32012-05-16 07:13:16 -04002434 return rdata;
2435}
2436
Jeff Layton6993f742012-05-16 07:13:17 -04002437void
2438cifs_readdata_release(struct kref *refcount)
Jeff Layton0471ca32012-05-16 07:13:16 -04002439{
Jeff Layton6993f742012-05-16 07:13:17 -04002440 struct cifs_readdata *rdata = container_of(refcount,
2441 struct cifs_readdata, refcount);
2442
2443 if (rdata->cfile)
2444 cifsFileInfo_put(rdata->cfile);
2445
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002446 kfree(rdata->iov);
Jeff Layton0471ca32012-05-16 07:13:16 -04002447 kfree(rdata);
2448}
2449
Jeff Layton2a1bb132012-05-16 07:13:17 -04002450static int
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002451cifs_read_allocate_pages(struct cifs_readdata *rdata, unsigned int nr_pages)
Jeff Layton1c892542012-05-16 07:13:17 -04002452{
2453 int rc = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002454 struct page *page;
Jeff Layton1c892542012-05-16 07:13:17 -04002455 unsigned int i;
2456
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002457 for (i = 0; i < nr_pages; i++) {
Jeff Layton1c892542012-05-16 07:13:17 -04002458 page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
2459 if (!page) {
2460 rc = -ENOMEM;
2461 break;
2462 }
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002463 rdata->pages[i] = page;
Jeff Layton1c892542012-05-16 07:13:17 -04002464 }
2465
2466 if (rc) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002467 for (i = 0; i < nr_pages; i++) {
2468 put_page(rdata->pages[i]);
2469 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04002470 }
2471 }
2472 return rc;
2473}
2474
2475static void
2476cifs_uncached_readdata_release(struct kref *refcount)
2477{
Jeff Layton1c892542012-05-16 07:13:17 -04002478 struct cifs_readdata *rdata = container_of(refcount,
2479 struct cifs_readdata, refcount);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002480 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04002481
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002482 for (i = 0; i < rdata->nr_pages; i++) {
2483 put_page(rdata->pages[i]);
2484 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04002485 }
2486 cifs_readdata_release(refcount);
2487}
2488
2489static int
Jeff Layton2a1bb132012-05-16 07:13:17 -04002490cifs_retry_async_readv(struct cifs_readdata *rdata)
2491{
2492 int rc;
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07002493 struct TCP_Server_Info *server;
2494
2495 server = tlink_tcon(rdata->cfile->tlink)->ses->server;
Jeff Layton2a1bb132012-05-16 07:13:17 -04002496
2497 do {
2498 if (rdata->cfile->invalidHandle) {
2499 rc = cifs_reopen_file(rdata->cfile, true);
2500 if (rc != 0)
2501 continue;
2502 }
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07002503 rc = server->ops->async_readv(rdata);
Jeff Layton2a1bb132012-05-16 07:13:17 -04002504 } while (rc == -EAGAIN);
2505
2506 return rc;
2507}
2508
Jeff Layton1c892542012-05-16 07:13:17 -04002509/**
2510 * cifs_readdata_to_iov - copy data from pages in response to an iovec
2511 * @rdata: the readdata response with list of pages holding data
2512 * @iov: vector in which we should copy the data
2513 * @nr_segs: number of segments in vector
2514 * @offset: offset into file of the first iovec
2515 * @copied: used to return the amount of data copied to the iov
2516 *
2517 * This function copies data from a list of pages in a readdata response into
2518 * an array of iovecs. It will first calculate where the data should go
2519 * based on the info in the readdata and then copy the data into that spot.
2520 */
2521static ssize_t
2522cifs_readdata_to_iov(struct cifs_readdata *rdata, const struct iovec *iov,
2523 unsigned long nr_segs, loff_t offset, ssize_t *copied)
2524{
2525 int rc = 0;
2526 struct iov_iter ii;
2527 size_t pos = rdata->offset - offset;
Jeff Layton1c892542012-05-16 07:13:17 -04002528 ssize_t remaining = rdata->bytes;
2529 unsigned char *pdata;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002530 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04002531
2532 /* set up iov_iter and advance to the correct offset */
2533 iov_iter_init(&ii, iov, nr_segs, iov_length(iov, nr_segs), 0);
2534 iov_iter_advance(&ii, pos);
2535
2536 *copied = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002537 for (i = 0; i < rdata->nr_pages; i++) {
Jeff Layton1c892542012-05-16 07:13:17 -04002538 ssize_t copy;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002539 struct page *page = rdata->pages[i];
Jeff Layton1c892542012-05-16 07:13:17 -04002540
2541 /* copy a whole page or whatever's left */
2542 copy = min_t(ssize_t, remaining, PAGE_SIZE);
2543
2544 /* ...but limit it to whatever space is left in the iov */
2545 copy = min_t(ssize_t, copy, iov_iter_count(&ii));
2546
2547 /* go while there's data to be copied and no errors */
2548 if (copy && !rc) {
2549 pdata = kmap(page);
2550 rc = memcpy_toiovecend(ii.iov, pdata, ii.iov_offset,
2551 (int)copy);
2552 kunmap(page);
2553 if (!rc) {
2554 *copied += copy;
2555 remaining -= copy;
2556 iov_iter_advance(&ii, copy);
2557 }
2558 }
Jeff Layton1c892542012-05-16 07:13:17 -04002559 }
2560
2561 return rc;
2562}
2563
2564static void
2565cifs_uncached_readv_complete(struct work_struct *work)
2566{
2567 struct cifs_readdata *rdata = container_of(work,
2568 struct cifs_readdata, work);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002569 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04002570
2571 /* if the result is non-zero then the pages weren't kmapped */
2572 if (rdata->result == 0) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002573 for (i = 0; i < rdata->nr_pages; i++)
2574 kunmap(rdata->pages[i]);
Jeff Layton1c892542012-05-16 07:13:17 -04002575 }
2576
2577 complete(&rdata->done);
2578 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
2579}
2580
2581static int
2582cifs_uncached_read_marshal_iov(struct cifs_readdata *rdata,
2583 unsigned int remaining)
2584{
2585 int len = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002586 unsigned int i;
2587 unsigned int nr_pages = rdata->nr_pages;
Jeff Layton1c892542012-05-16 07:13:17 -04002588
2589 rdata->nr_iov = 1;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002590 for (i = 0; i < nr_pages; i++) {
2591 struct page *page = rdata->pages[i];
2592
Jeff Layton1c892542012-05-16 07:13:17 -04002593 if (remaining >= PAGE_SIZE) {
2594 /* enough data to fill the page */
2595 rdata->iov[rdata->nr_iov].iov_base = kmap(page);
2596 rdata->iov[rdata->nr_iov].iov_len = PAGE_SIZE;
2597 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
2598 rdata->nr_iov, page->index,
2599 rdata->iov[rdata->nr_iov].iov_base,
2600 rdata->iov[rdata->nr_iov].iov_len);
2601 ++rdata->nr_iov;
2602 len += PAGE_SIZE;
2603 remaining -= PAGE_SIZE;
2604 } else if (remaining > 0) {
2605 /* enough for partial page, fill and zero the rest */
2606 rdata->iov[rdata->nr_iov].iov_base = kmap(page);
2607 rdata->iov[rdata->nr_iov].iov_len = remaining;
2608 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
2609 rdata->nr_iov, page->index,
2610 rdata->iov[rdata->nr_iov].iov_base,
2611 rdata->iov[rdata->nr_iov].iov_len);
2612 memset(rdata->iov[rdata->nr_iov].iov_base + remaining,
2613 '\0', PAGE_SIZE - remaining);
2614 ++rdata->nr_iov;
2615 len += remaining;
2616 remaining = 0;
2617 } else {
2618 /* no need to hold page hostage */
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002619 rdata->pages[i] = NULL;
2620 rdata->nr_pages--;
Jeff Layton1c892542012-05-16 07:13:17 -04002621 put_page(page);
2622 }
2623 }
2624
2625 return len;
2626}
2627
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002628static ssize_t
2629cifs_iovec_read(struct file *file, const struct iovec *iov,
2630 unsigned long nr_segs, loff_t *poffset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002631{
Jeff Layton1c892542012-05-16 07:13:17 -04002632 ssize_t rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002633 size_t len, cur_len;
Jeff Layton1c892542012-05-16 07:13:17 -04002634 ssize_t total_read = 0;
2635 loff_t offset = *poffset;
2636 unsigned int npages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002637 struct cifs_sb_info *cifs_sb;
Jeff Layton1c892542012-05-16 07:13:17 -04002638 struct cifs_tcon *tcon;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002639 struct cifsFileInfo *open_file;
Jeff Layton1c892542012-05-16 07:13:17 -04002640 struct cifs_readdata *rdata, *tmp;
2641 struct list_head rdata_list;
2642 pid_t pid;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002643
2644 if (!nr_segs)
2645 return 0;
2646
2647 len = iov_length(iov, nr_segs);
2648 if (!len)
2649 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002650
Jeff Layton1c892542012-05-16 07:13:17 -04002651 INIT_LIST_HEAD(&rdata_list);
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002652 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Joe Perchesc21dfb62010-07-12 13:50:14 -07002653 open_file = file->private_data;
Jeff Layton1c892542012-05-16 07:13:17 -04002654 tcon = tlink_tcon(open_file->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002655
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07002656 if (!tcon->ses->server->ops->async_readv)
2657 return -ENOSYS;
2658
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002659 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2660 pid = open_file->pid;
2661 else
2662 pid = current->tgid;
2663
Steve Frenchad7a2922008-02-07 23:25:02 +00002664 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesb6b38f72010-04-21 03:50:45 +00002665 cFYI(1, "attempting read on write only file instance");
Steve Frenchad7a2922008-02-07 23:25:02 +00002666
Jeff Layton1c892542012-05-16 07:13:17 -04002667 do {
2668 cur_len = min_t(const size_t, len - total_read, cifs_sb->rsize);
2669 npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002670
Jeff Layton1c892542012-05-16 07:13:17 -04002671 /* allocate a readdata struct */
2672 rdata = cifs_readdata_alloc(npages,
2673 cifs_uncached_readv_complete);
2674 if (!rdata) {
2675 rc = -ENOMEM;
2676 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002677 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002678
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002679 rc = cifs_read_allocate_pages(rdata, npages);
Jeff Layton1c892542012-05-16 07:13:17 -04002680 if (rc)
2681 goto error;
2682
2683 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002684 rdata->nr_pages = npages;
Jeff Layton1c892542012-05-16 07:13:17 -04002685 rdata->offset = offset;
2686 rdata->bytes = cur_len;
2687 rdata->pid = pid;
2688 rdata->marshal_iov = cifs_uncached_read_marshal_iov;
2689
2690 rc = cifs_retry_async_readv(rdata);
2691error:
2692 if (rc) {
2693 kref_put(&rdata->refcount,
2694 cifs_uncached_readdata_release);
2695 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002696 }
Jeff Layton1c892542012-05-16 07:13:17 -04002697
2698 list_add_tail(&rdata->list, &rdata_list);
2699 offset += cur_len;
2700 len -= cur_len;
2701 } while (len > 0);
2702
2703 /* if at least one read request send succeeded, then reset rc */
2704 if (!list_empty(&rdata_list))
2705 rc = 0;
2706
2707 /* the loop below should proceed in the order of increasing offsets */
2708restart_loop:
2709 list_for_each_entry_safe(rdata, tmp, &rdata_list, list) {
2710 if (!rc) {
2711 ssize_t copied;
2712
2713 /* FIXME: freezable sleep too? */
2714 rc = wait_for_completion_killable(&rdata->done);
2715 if (rc)
2716 rc = -EINTR;
2717 else if (rdata->result)
2718 rc = rdata->result;
2719 else {
2720 rc = cifs_readdata_to_iov(rdata, iov,
2721 nr_segs, *poffset,
2722 &copied);
2723 total_read += copied;
2724 }
2725
2726 /* resend call if it's a retryable error */
2727 if (rc == -EAGAIN) {
2728 rc = cifs_retry_async_readv(rdata);
2729 goto restart_loop;
2730 }
2731 }
2732 list_del_init(&rdata->list);
2733 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002734 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002735
Jeff Layton1c892542012-05-16 07:13:17 -04002736 cifs_stats_bytes_read(tcon, total_read);
2737 *poffset += total_read;
2738
Pavel Shilovsky09a47072012-09-18 16:20:29 -07002739 /* mask nodata case */
2740 if (rc == -ENODATA)
2741 rc = 0;
2742
Jeff Layton1c892542012-05-16 07:13:17 -04002743 return total_read ? total_read : rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002744}
2745
Pavel Shilovsky0b81c1c2011-03-10 10:11:05 +03002746ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov,
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002747 unsigned long nr_segs, loff_t pos)
2748{
2749 ssize_t read;
2750
2751 read = cifs_iovec_read(iocb->ki_filp, iov, nr_segs, &pos);
2752 if (read > 0)
2753 iocb->ki_pos = pos;
2754
2755 return read;
2756}
2757
2758ssize_t cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov,
2759 unsigned long nr_segs, loff_t pos)
2760{
2761 struct inode *inode;
2762
2763 inode = iocb->ki_filp->f_path.dentry->d_inode;
2764
2765 if (CIFS_I(inode)->clientCanCacheRead)
2766 return generic_file_aio_read(iocb, iov, nr_segs, pos);
2767
2768 /*
2769 * In strict cache mode we need to read from the server all the time
2770 * if we don't have level II oplock because the server can delay mtime
2771 * change - so we can't make a decision about inode invalidating.
2772 * And we can also fail with pagereading if there are mandatory locks
2773 * on pages affected by this read but not on the region from pos to
2774 * pos+len-1.
2775 */
2776
2777 return cifs_user_readv(iocb, iov, nr_segs, pos);
2778}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002779
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002780static ssize_t
2781cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002782{
2783 int rc = -EACCES;
2784 unsigned int bytes_read = 0;
2785 unsigned int total_read;
2786 unsigned int current_read_size;
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002787 unsigned int rsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002788 struct cifs_sb_info *cifs_sb;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002789 struct cifs_tcon *tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002790 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002791 unsigned int xid;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002792 char *cur_offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002793 struct cifsFileInfo *open_file;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002794 struct cifs_io_parms io_parms;
Steve Frenchec637e32005-12-12 20:53:18 -08002795 int buf_type = CIFS_NO_BUFFER;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002796 __u32 pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002797
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002798 xid = get_xid();
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002799 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002800
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002801 /* FIXME: set up handlers for larger reads and/or convert to async */
2802 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
2803
Linus Torvalds1da177e2005-04-16 15:20:36 -07002804 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05302805 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002806 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05302807 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002808 }
Joe Perchesc21dfb62010-07-12 13:50:14 -07002809 open_file = file->private_data;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002810 tcon = tlink_tcon(open_file->tlink);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002811 server = tcon->ses->server;
2812
2813 if (!server->ops->sync_read) {
2814 free_xid(xid);
2815 return -ENOSYS;
2816 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002817
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002818 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2819 pid = open_file->pid;
2820 else
2821 pid = current->tgid;
2822
Linus Torvalds1da177e2005-04-16 15:20:36 -07002823 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesb6b38f72010-04-21 03:50:45 +00002824 cFYI(1, "attempting read on write only file instance");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002825
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002826 for (total_read = 0, cur_offset = read_data; read_size > total_read;
2827 total_read += bytes_read, cur_offset += bytes_read) {
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002828 current_read_size = min_t(uint, read_size - total_read, rsize);
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002829 /*
2830 * For windows me and 9x we do not want to request more than it
2831 * negotiated since it will refuse the read then.
2832 */
2833 if ((tcon->ses) && !(tcon->ses->capabilities &
2834 tcon->ses->server->vals->cap_large_files)) {
Dan Carpenter7748dd62011-10-18 12:41:35 +03002835 current_read_size = min_t(uint, current_read_size,
Jeff Laytonc974bef2011-10-11 06:41:32 -04002836 CIFSMaxBufSize);
Steve Frenchf9f5c8172005-09-15 23:06:38 -07002837 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002838 rc = -EAGAIN;
2839 while (rc == -EAGAIN) {
Steve Frenchcdff08e2010-10-21 22:46:14 +00002840 if (open_file->invalidHandle) {
Jeff Layton15886172010-10-15 15:33:59 -04002841 rc = cifs_reopen_file(open_file, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002842 if (rc != 0)
2843 break;
2844 }
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002845 io_parms.pid = pid;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002846 io_parms.tcon = tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002847 io_parms.offset = *offset;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002848 io_parms.length = current_read_size;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002849 rc = server->ops->sync_read(xid, open_file, &io_parms,
2850 &bytes_read, &cur_offset,
2851 &buf_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002852 }
2853 if (rc || (bytes_read == 0)) {
2854 if (total_read) {
2855 break;
2856 } else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002857 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002858 return rc;
2859 }
2860 } else {
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002861 cifs_stats_bytes_read(tcon, total_read);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002862 *offset += bytes_read;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002863 }
2864 }
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002865 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002866 return total_read;
2867}
2868
Jeff Laytonca83ce32011-04-12 09:13:44 -04002869/*
2870 * If the page is mmap'ed into a process' page tables, then we need to make
2871 * sure that it doesn't change while being written back.
2872 */
2873static int
2874cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
2875{
2876 struct page *page = vmf->page;
2877
2878 lock_page(page);
2879 return VM_FAULT_LOCKED;
2880}
2881
2882static struct vm_operations_struct cifs_file_vm_ops = {
2883 .fault = filemap_fault,
2884 .page_mkwrite = cifs_page_mkwrite,
2885};
2886
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03002887int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
2888{
2889 int rc, xid;
2890 struct inode *inode = file->f_path.dentry->d_inode;
2891
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002892 xid = get_xid();
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03002893
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002894 if (!CIFS_I(inode)->clientCanCacheRead) {
2895 rc = cifs_invalidate_mapping(inode);
2896 if (rc)
2897 return rc;
2898 }
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03002899
2900 rc = generic_file_mmap(file, vma);
Jeff Laytonca83ce32011-04-12 09:13:44 -04002901 if (rc == 0)
2902 vma->vm_ops = &cifs_file_vm_ops;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002903 free_xid(xid);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03002904 return rc;
2905}
2906
Linus Torvalds1da177e2005-04-16 15:20:36 -07002907int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
2908{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002909 int rc, xid;
2910
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002911 xid = get_xid();
Jeff Laytonabab0952010-02-12 07:44:18 -05002912 rc = cifs_revalidate_file(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002913 if (rc) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00002914 cFYI(1, "Validation prior to mmap failed, error=%d", rc);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002915 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002916 return rc;
2917 }
2918 rc = generic_file_mmap(file, vma);
Jeff Laytonca83ce32011-04-12 09:13:44 -04002919 if (rc == 0)
2920 vma->vm_ops = &cifs_file_vm_ops;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002921 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002922 return rc;
2923}
2924
Jeff Layton0471ca32012-05-16 07:13:16 -04002925static void
2926cifs_readv_complete(struct work_struct *work)
2927{
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002928 unsigned int i;
Jeff Layton0471ca32012-05-16 07:13:16 -04002929 struct cifs_readdata *rdata = container_of(work,
2930 struct cifs_readdata, work);
Jeff Layton0471ca32012-05-16 07:13:16 -04002931
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002932 for (i = 0; i < rdata->nr_pages; i++) {
2933 struct page *page = rdata->pages[i];
2934
Jeff Layton0471ca32012-05-16 07:13:16 -04002935 lru_cache_add_file(page);
2936
2937 if (rdata->result == 0) {
2938 kunmap(page);
2939 flush_dcache_page(page);
2940 SetPageUptodate(page);
2941 }
2942
2943 unlock_page(page);
2944
2945 if (rdata->result == 0)
2946 cifs_readpage_to_fscache(rdata->mapping->host, page);
2947
2948 page_cache_release(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002949 rdata->pages[i] = NULL;
Jeff Layton0471ca32012-05-16 07:13:16 -04002950 }
Jeff Layton6993f742012-05-16 07:13:17 -04002951 kref_put(&rdata->refcount, cifs_readdata_release);
Jeff Layton0471ca32012-05-16 07:13:16 -04002952}
2953
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04002954static int
2955cifs_readpages_marshal_iov(struct cifs_readdata *rdata, unsigned int remaining)
2956{
2957 int len = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002958 unsigned int i;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04002959 u64 eof;
2960 pgoff_t eof_index;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002961 unsigned int nr_pages = rdata->nr_pages;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04002962
2963 /* determine the eof that the server (probably) has */
2964 eof = CIFS_I(rdata->mapping->host)->server_eof;
2965 eof_index = eof ? (eof - 1) >> PAGE_CACHE_SHIFT : 0;
2966 cFYI(1, "eof=%llu eof_index=%lu", eof, eof_index);
2967
2968 rdata->nr_iov = 1;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002969 for (i = 0; i < nr_pages; i++) {
2970 struct page *page = rdata->pages[i];
2971
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04002972 if (remaining >= PAGE_CACHE_SIZE) {
2973 /* enough data to fill the page */
2974 rdata->iov[rdata->nr_iov].iov_base = kmap(page);
2975 rdata->iov[rdata->nr_iov].iov_len = PAGE_CACHE_SIZE;
2976 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
2977 rdata->nr_iov, page->index,
2978 rdata->iov[rdata->nr_iov].iov_base,
2979 rdata->iov[rdata->nr_iov].iov_len);
2980 ++rdata->nr_iov;
2981 len += PAGE_CACHE_SIZE;
2982 remaining -= PAGE_CACHE_SIZE;
2983 } else if (remaining > 0) {
2984 /* enough for partial page, fill and zero the rest */
2985 rdata->iov[rdata->nr_iov].iov_base = kmap(page);
2986 rdata->iov[rdata->nr_iov].iov_len = remaining;
2987 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
2988 rdata->nr_iov, page->index,
2989 rdata->iov[rdata->nr_iov].iov_base,
2990 rdata->iov[rdata->nr_iov].iov_len);
2991 memset(rdata->iov[rdata->nr_iov].iov_base + remaining,
2992 '\0', PAGE_CACHE_SIZE - remaining);
2993 ++rdata->nr_iov;
2994 len += remaining;
2995 remaining = 0;
2996 } else if (page->index > eof_index) {
2997 /*
2998 * The VFS will not try to do readahead past the
2999 * i_size, but it's possible that we have outstanding
3000 * writes with gaps in the middle and the i_size hasn't
3001 * caught up yet. Populate those with zeroed out pages
3002 * to prevent the VFS from repeatedly attempting to
3003 * fill them until the writes are flushed.
3004 */
3005 zero_user(page, 0, PAGE_CACHE_SIZE);
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003006 lru_cache_add_file(page);
3007 flush_dcache_page(page);
3008 SetPageUptodate(page);
3009 unlock_page(page);
3010 page_cache_release(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003011 rdata->pages[i] = NULL;
3012 rdata->nr_pages--;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003013 } else {
3014 /* no need to hold page hostage */
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003015 lru_cache_add_file(page);
3016 unlock_page(page);
3017 page_cache_release(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003018 rdata->pages[i] = NULL;
3019 rdata->nr_pages--;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003020 }
3021 }
3022
3023 return len;
3024}
3025
Linus Torvalds1da177e2005-04-16 15:20:36 -07003026static int cifs_readpages(struct file *file, struct address_space *mapping,
3027 struct list_head *page_list, unsigned num_pages)
3028{
Jeff Layton690c5e32011-10-19 15:30:16 -04003029 int rc;
3030 struct list_head tmplist;
3031 struct cifsFileInfo *open_file = file->private_data;
3032 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
3033 unsigned int rsize = cifs_sb->rsize;
3034 pid_t pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003035
Jeff Layton690c5e32011-10-19 15:30:16 -04003036 /*
3037 * Give up immediately if rsize is too small to read an entire page.
3038 * The VFS will fall back to readpage. We should never reach this
3039 * point however since we set ra_pages to 0 when the rsize is smaller
3040 * than a cache page.
3041 */
3042 if (unlikely(rsize < PAGE_CACHE_SIZE))
3043 return 0;
Steve Frenchbfa0d752005-08-31 21:50:37 -07003044
Suresh Jayaraman56698232010-07-05 18:13:25 +05303045 /*
3046 * Reads as many pages as possible from fscache. Returns -ENOBUFS
3047 * immediately if the cookie is negative
3048 */
3049 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
3050 &num_pages);
3051 if (rc == 0)
Jeff Layton690c5e32011-10-19 15:30:16 -04003052 return rc;
Suresh Jayaraman56698232010-07-05 18:13:25 +05303053
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003054 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3055 pid = open_file->pid;
3056 else
3057 pid = current->tgid;
3058
Jeff Layton690c5e32011-10-19 15:30:16 -04003059 rc = 0;
3060 INIT_LIST_HEAD(&tmplist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003061
Jeff Layton690c5e32011-10-19 15:30:16 -04003062 cFYI(1, "%s: file=%p mapping=%p num_pages=%u", __func__, file,
3063 mapping, num_pages);
3064
3065 /*
3066 * Start with the page at end of list and move it to private
3067 * list. Do the same with any following pages until we hit
3068 * the rsize limit, hit an index discontinuity, or run out of
3069 * pages. Issue the async read and then start the loop again
3070 * until the list is empty.
3071 *
3072 * Note that list order is important. The page_list is in
3073 * the order of declining indexes. When we put the pages in
3074 * the rdata->pages, then we want them in increasing order.
3075 */
3076 while (!list_empty(page_list)) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003077 unsigned int i;
Jeff Layton690c5e32011-10-19 15:30:16 -04003078 unsigned int bytes = PAGE_CACHE_SIZE;
3079 unsigned int expected_index;
3080 unsigned int nr_pages = 1;
3081 loff_t offset;
3082 struct page *page, *tpage;
3083 struct cifs_readdata *rdata;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003084
3085 page = list_entry(page_list->prev, struct page, lru);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003086
Jeff Layton690c5e32011-10-19 15:30:16 -04003087 /*
3088 * Lock the page and put it in the cache. Since no one else
3089 * should have access to this page, we're safe to simply set
3090 * PG_locked without checking it first.
3091 */
3092 __set_page_locked(page);
3093 rc = add_to_page_cache_locked(page, mapping,
3094 page->index, GFP_KERNEL);
3095
3096 /* give up if we can't stick it in the cache */
3097 if (rc) {
3098 __clear_page_locked(page);
3099 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003100 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003101
Jeff Layton690c5e32011-10-19 15:30:16 -04003102 /* move first page to the tmplist */
3103 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
3104 list_move_tail(&page->lru, &tmplist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003105
Jeff Layton690c5e32011-10-19 15:30:16 -04003106 /* now try and add more pages onto the request */
3107 expected_index = page->index + 1;
3108 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
3109 /* discontinuity ? */
3110 if (page->index != expected_index)
3111 break;
3112
3113 /* would this page push the read over the rsize? */
3114 if (bytes + PAGE_CACHE_SIZE > rsize)
3115 break;
3116
3117 __set_page_locked(page);
3118 if (add_to_page_cache_locked(page, mapping,
3119 page->index, GFP_KERNEL)) {
3120 __clear_page_locked(page);
3121 break;
3122 }
3123 list_move_tail(&page->lru, &tmplist);
3124 bytes += PAGE_CACHE_SIZE;
3125 expected_index++;
3126 nr_pages++;
3127 }
3128
Jeff Layton0471ca32012-05-16 07:13:16 -04003129 rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
Jeff Layton690c5e32011-10-19 15:30:16 -04003130 if (!rdata) {
3131 /* best to give up if we're out of mem */
3132 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3133 list_del(&page->lru);
3134 lru_cache_add_file(page);
3135 unlock_page(page);
3136 page_cache_release(page);
3137 }
3138 rc = -ENOMEM;
3139 break;
3140 }
3141
Jeff Layton6993f742012-05-16 07:13:17 -04003142 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Layton690c5e32011-10-19 15:30:16 -04003143 rdata->mapping = mapping;
3144 rdata->offset = offset;
3145 rdata->bytes = bytes;
3146 rdata->pid = pid;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003147 rdata->marshal_iov = cifs_readpages_marshal_iov;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003148
3149 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3150 list_del(&page->lru);
3151 rdata->pages[rdata->nr_pages++] = page;
3152 }
Jeff Layton690c5e32011-10-19 15:30:16 -04003153
Jeff Layton2a1bb132012-05-16 07:13:17 -04003154 rc = cifs_retry_async_readv(rdata);
Jeff Layton690c5e32011-10-19 15:30:16 -04003155 if (rc != 0) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003156 for (i = 0; i < rdata->nr_pages; i++) {
3157 page = rdata->pages[i];
Jeff Layton690c5e32011-10-19 15:30:16 -04003158 lru_cache_add_file(page);
3159 unlock_page(page);
3160 page_cache_release(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003161 }
Jeff Layton6993f742012-05-16 07:13:17 -04003162 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003163 break;
3164 }
Jeff Layton6993f742012-05-16 07:13:17 -04003165
3166 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003167 }
3168
Linus Torvalds1da177e2005-04-16 15:20:36 -07003169 return rc;
3170}
3171
3172static int cifs_readpage_worker(struct file *file, struct page *page,
3173 loff_t *poffset)
3174{
3175 char *read_data;
3176 int rc;
3177
Suresh Jayaraman56698232010-07-05 18:13:25 +05303178 /* Is the page cached? */
3179 rc = cifs_readpage_from_fscache(file->f_path.dentry->d_inode, page);
3180 if (rc == 0)
3181 goto read_complete;
3182
Linus Torvalds1da177e2005-04-16 15:20:36 -07003183 page_cache_get(page);
3184 read_data = kmap(page);
3185 /* for reads over a certain size could initiate async read ahead */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003186
Linus Torvalds1da177e2005-04-16 15:20:36 -07003187 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003188
Linus Torvalds1da177e2005-04-16 15:20:36 -07003189 if (rc < 0)
3190 goto io_error;
3191 else
Joe Perchesb6b38f72010-04-21 03:50:45 +00003192 cFYI(1, "Bytes read %d", rc);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003193
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08003194 file->f_path.dentry->d_inode->i_atime =
3195 current_fs_time(file->f_path.dentry->d_inode->i_sb);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003196
Linus Torvalds1da177e2005-04-16 15:20:36 -07003197 if (PAGE_CACHE_SIZE > rc)
3198 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
3199
3200 flush_dcache_page(page);
3201 SetPageUptodate(page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05303202
3203 /* send this page to the cache */
3204 cifs_readpage_to_fscache(file->f_path.dentry->d_inode, page);
3205
Linus Torvalds1da177e2005-04-16 15:20:36 -07003206 rc = 0;
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003207
Linus Torvalds1da177e2005-04-16 15:20:36 -07003208io_error:
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003209 kunmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003210 page_cache_release(page);
Suresh Jayaraman56698232010-07-05 18:13:25 +05303211
3212read_complete:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003213 return rc;
3214}
3215
3216static int cifs_readpage(struct file *file, struct page *page)
3217{
3218 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
3219 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003220 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003221
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003222 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003223
3224 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303225 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003226 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303227 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003228 }
3229
Jeff Laytonac3aa2f2012-07-23 13:14:28 -04003230 cFYI(1, "readpage %p at offset %d 0x%x",
Joe Perchesb6b38f72010-04-21 03:50:45 +00003231 page, (int)offset, (int)offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003232
3233 rc = cifs_readpage_worker(file, page, &offset);
3234
3235 unlock_page(page);
3236
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003237 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003238 return rc;
3239}
3240
Steve Frencha403a0a2007-07-26 15:54:16 +00003241static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3242{
3243 struct cifsFileInfo *open_file;
3244
Jeff Layton44772882010-10-15 15:34:03 -04003245 spin_lock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003246 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton2e396b82010-10-15 15:34:01 -04003247 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Jeff Layton44772882010-10-15 15:34:03 -04003248 spin_unlock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003249 return 1;
3250 }
3251 }
Jeff Layton44772882010-10-15 15:34:03 -04003252 spin_unlock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003253 return 0;
3254}
3255
Linus Torvalds1da177e2005-04-16 15:20:36 -07003256/* We do not want to update the file size from server for inodes
3257 open for write - to avoid races with writepage extending
3258 the file - in the future we could consider allowing
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003259 refreshing the inode only on increases in the file size
Linus Torvalds1da177e2005-04-16 15:20:36 -07003260 but this is tricky to do without racing with writebehind
3261 page caching in the current Linux kernel design */
Steve French4b18f2a2008-04-29 00:06:05 +00003262bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003263{
Steve Frencha403a0a2007-07-26 15:54:16 +00003264 if (!cifsInode)
Steve French4b18f2a2008-04-29 00:06:05 +00003265 return true;
Steve French23e7dd72005-10-20 13:44:56 -07003266
Steve Frencha403a0a2007-07-26 15:54:16 +00003267 if (is_inode_writable(cifsInode)) {
3268 /* This inode is open for write at least once */
Steve Frenchc32a0b62006-01-12 14:41:28 -08003269 struct cifs_sb_info *cifs_sb;
3270
Steve Frenchc32a0b62006-01-12 14:41:28 -08003271 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
Steve Frenchad7a2922008-02-07 23:25:02 +00003272 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003273 /* since no page cache to corrupt on directio
Steve Frenchc32a0b62006-01-12 14:41:28 -08003274 we can change size safely */
Steve French4b18f2a2008-04-29 00:06:05 +00003275 return true;
Steve Frenchc32a0b62006-01-12 14:41:28 -08003276 }
3277
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003278 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
Steve French4b18f2a2008-04-29 00:06:05 +00003279 return true;
Steve French7ba52632007-02-08 18:14:13 +00003280
Steve French4b18f2a2008-04-29 00:06:05 +00003281 return false;
Steve French23e7dd72005-10-20 13:44:56 -07003282 } else
Steve French4b18f2a2008-04-29 00:06:05 +00003283 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003284}
3285
Nick Piggind9414772008-09-24 11:32:59 -04003286static int cifs_write_begin(struct file *file, struct address_space *mapping,
3287 loff_t pos, unsigned len, unsigned flags,
3288 struct page **pagep, void **fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003289{
Nick Piggind9414772008-09-24 11:32:59 -04003290 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
3291 loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003292 loff_t page_start = pos & PAGE_MASK;
3293 loff_t i_size;
3294 struct page *page;
3295 int rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003296
Joe Perchesb6b38f72010-04-21 03:50:45 +00003297 cFYI(1, "write_begin from %lld len %d", (long long)pos, len);
Nick Piggind9414772008-09-24 11:32:59 -04003298
Nick Piggin54566b22009-01-04 12:00:53 -08003299 page = grab_cache_page_write_begin(mapping, index, flags);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003300 if (!page) {
3301 rc = -ENOMEM;
3302 goto out;
3303 }
Nick Piggind9414772008-09-24 11:32:59 -04003304
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003305 if (PageUptodate(page))
3306 goto out;
Steve French8a236262007-03-06 00:31:00 +00003307
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003308 /*
3309 * If we write a full page it will be up to date, no need to read from
3310 * the server. If the write is short, we'll end up doing a sync write
3311 * instead.
3312 */
3313 if (len == PAGE_CACHE_SIZE)
3314 goto out;
3315
3316 /*
3317 * optimize away the read when we have an oplock, and we're not
3318 * expecting to use any of the data we'd be reading in. That
3319 * is, when the page lies beyond the EOF, or straddles the EOF
3320 * and the write will cover all of the existing data.
3321 */
3322 if (CIFS_I(mapping->host)->clientCanCacheRead) {
3323 i_size = i_size_read(mapping->host);
3324 if (page_start >= i_size ||
3325 (offset == 0 && (pos + len) >= i_size)) {
3326 zero_user_segments(page, 0, offset,
3327 offset + len,
3328 PAGE_CACHE_SIZE);
3329 /*
3330 * PageChecked means that the parts of the page
3331 * to which we're not writing are considered up
3332 * to date. Once the data is copied to the
3333 * page, it can be set uptodate.
3334 */
3335 SetPageChecked(page);
3336 goto out;
3337 }
3338 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003339
Nick Piggind9414772008-09-24 11:32:59 -04003340 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003341 /*
3342 * might as well read a page, it is fast enough. If we get
3343 * an error, we don't need to return it. cifs_write_end will
3344 * do a sync write instead since PG_uptodate isn't set.
3345 */
3346 cifs_readpage_worker(file, page, &page_start);
Steve French8a236262007-03-06 00:31:00 +00003347 } else {
3348 /* we could try using another file handle if there is one -
3349 but how would we lock it to prevent close of that handle
3350 racing with this read? In any case
Nick Piggind9414772008-09-24 11:32:59 -04003351 this will be written out by write_end so is fine */
Steve French8a236262007-03-06 00:31:00 +00003352 }
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003353out:
3354 *pagep = page;
3355 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003356}
3357
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303358static int cifs_release_page(struct page *page, gfp_t gfp)
3359{
3360 if (PagePrivate(page))
3361 return 0;
3362
3363 return cifs_fscache_release_page(page, gfp);
3364}
3365
3366static void cifs_invalidate_page(struct page *page, unsigned long offset)
3367{
3368 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
3369
3370 if (offset == 0)
3371 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
3372}
3373
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003374static int cifs_launder_page(struct page *page)
3375{
3376 int rc = 0;
3377 loff_t range_start = page_offset(page);
3378 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
3379 struct writeback_control wbc = {
3380 .sync_mode = WB_SYNC_ALL,
3381 .nr_to_write = 0,
3382 .range_start = range_start,
3383 .range_end = range_end,
3384 };
3385
3386 cFYI(1, "Launder page: %p", page);
3387
3388 if (clear_page_dirty_for_io(page))
3389 rc = cifs_writepage_locked(page, &wbc);
3390
3391 cifs_fscache_invalidate_page(page, page->mapping->host);
3392 return rc;
3393}
3394
Tejun Heo9b646972010-07-20 22:09:02 +02003395void cifs_oplock_break(struct work_struct *work)
Jeff Layton3bc303c2009-09-21 06:47:50 -04003396{
3397 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3398 oplock_break);
Jeff Laytona5e18bc2010-10-11 15:07:18 -04003399 struct inode *inode = cfile->dentry->d_inode;
Jeff Layton3bc303c2009-09-21 06:47:50 -04003400 struct cifsInodeInfo *cinode = CIFS_I(inode);
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07003401 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Jeff Laytoneb4b7562010-10-22 14:52:29 -04003402 int rc = 0;
Jeff Layton3bc303c2009-09-21 06:47:50 -04003403
3404 if (inode && S_ISREG(inode->i_mode)) {
Steve Frenchd54ff732010-04-27 04:38:15 +00003405 if (cinode->clientCanCacheRead)
Al Viro8737c932009-12-24 06:47:55 -05003406 break_lease(inode, O_RDONLY);
Steve Frenchd54ff732010-04-27 04:38:15 +00003407 else
Al Viro8737c932009-12-24 06:47:55 -05003408 break_lease(inode, O_WRONLY);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003409 rc = filemap_fdatawrite(inode->i_mapping);
3410 if (cinode->clientCanCacheRead == 0) {
Jeff Laytoneb4b7562010-10-22 14:52:29 -04003411 rc = filemap_fdatawait(inode->i_mapping);
3412 mapping_set_error(inode->i_mapping, rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003413 invalidate_remote_inode(inode);
3414 }
Joe Perchesb6b38f72010-04-21 03:50:45 +00003415 cFYI(1, "Oplock flush inode %p rc %d", inode, rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003416 }
3417
Pavel Shilovsky85160e02011-10-22 15:33:29 +04003418 rc = cifs_push_locks(cfile);
3419 if (rc)
3420 cERROR(1, "Push locks rc = %d", rc);
3421
Jeff Layton3bc303c2009-09-21 06:47:50 -04003422 /*
3423 * releasing stale oplock after recent reconnect of smb session using
3424 * a now incorrect file handle is not a data integrity issue but do
3425 * not bother sending an oplock release if session to server still is
3426 * disconnected since oplock already released by the server
3427 */
Steve Frenchcdff08e2010-10-21 22:46:14 +00003428 if (!cfile->oplock_break_cancelled) {
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07003429 rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
3430 cinode);
Joe Perchesb6b38f72010-04-21 03:50:45 +00003431 cFYI(1, "Oplock release rc = %d", rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003432 }
Jeff Layton3bc303c2009-09-21 06:47:50 -04003433}
3434
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07003435const struct address_space_operations cifs_addr_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003436 .readpage = cifs_readpage,
3437 .readpages = cifs_readpages,
3438 .writepage = cifs_writepage,
Steve French37c0eb42005-10-05 14:50:29 -07003439 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04003440 .write_begin = cifs_write_begin,
3441 .write_end = cifs_write_end,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003442 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303443 .releasepage = cifs_release_page,
3444 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003445 .launder_page = cifs_launder_page,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003446};
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003447
3448/*
3449 * cifs_readpages requires the server to support a buffer large enough to
3450 * contain the header plus one complete page of data. Otherwise, we need
3451 * to leave cifs_readpages out of the address space operations.
3452 */
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07003453const struct address_space_operations cifs_addr_ops_smallbuf = {
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003454 .readpage = cifs_readpage,
3455 .writepage = cifs_writepage,
3456 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04003457 .write_begin = cifs_write_begin,
3458 .write_end = cifs_write_end,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003459 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303460 .releasepage = cifs_release_page,
3461 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003462 .launder_page = cifs_launder_page,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003463};