blob: 39fff77e38d4bbabbcd478f1586b58a393e15799 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
Steve Frenchfb8c4b12007-07-10 01:16:18 +00005 *
Steve Frenchf19159d2010-04-21 04:12:10 +00006 * Copyright (C) International Business Machines Corp., 2002,2010
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Author(s): Steve French (sfrench@us.ibm.com)
Jeremy Allison7ee1af72006-08-02 21:56:33 +00008 * Jeremy Allison (jra@samba.org)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
Steve French37c0eb42005-10-05 14:50:29 -070025#include <linux/backing-dev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
Steve French37c0eb42005-10-05 14:50:29 -070030#include <linux/writeback.h>
Andrew Morton6f88cc22006-12-10 02:19:44 -080031#include <linux/task_io_accounting_ops.h>
Steve French23e7dd72005-10-20 13:44:56 -070032#include <linux/delay.h>
Jeff Layton3bc303c2009-09-21 06:47:50 -040033#include <linux/mount.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Jeff Layton690c5e32011-10-19 15:30:16 -040035#include <linux/swap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <asm/div64.h>
37#include "cifsfs.h"
38#include "cifspdu.h"
39#include "cifsglob.h"
40#include "cifsproto.h"
41#include "cifs_unicode.h"
42#include "cifs_debug.h"
43#include "cifs_fs_sb.h"
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +053044#include "fscache.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Linus Torvalds1da177e2005-04-16 15:20:36 -070046static inline int cifs_convert_flags(unsigned int flags)
47{
48 if ((flags & O_ACCMODE) == O_RDONLY)
49 return GENERIC_READ;
50 else if ((flags & O_ACCMODE) == O_WRONLY)
51 return GENERIC_WRITE;
52 else if ((flags & O_ACCMODE) == O_RDWR) {
53 /* GENERIC_ALL is too much permission to request
54 can cause unnecessary access denied on create */
55 /* return GENERIC_ALL; */
56 return (GENERIC_READ | GENERIC_WRITE);
57 }
58
Jeff Laytone10f7b52008-05-14 10:21:33 -070059 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
60 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
61 FILE_READ_DATA);
Steve French7fc8f4e2009-02-23 20:43:11 +000062}
Jeff Laytone10f7b52008-05-14 10:21:33 -070063
Jeff Layton608712f2010-10-15 15:33:56 -040064static u32 cifs_posix_convert_flags(unsigned int flags)
Steve French7fc8f4e2009-02-23 20:43:11 +000065{
Jeff Layton608712f2010-10-15 15:33:56 -040066 u32 posix_flags = 0;
Jeff Laytone10f7b52008-05-14 10:21:33 -070067
Steve French7fc8f4e2009-02-23 20:43:11 +000068 if ((flags & O_ACCMODE) == O_RDONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040069 posix_flags = SMB_O_RDONLY;
Steve French7fc8f4e2009-02-23 20:43:11 +000070 else if ((flags & O_ACCMODE) == O_WRONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040071 posix_flags = SMB_O_WRONLY;
72 else if ((flags & O_ACCMODE) == O_RDWR)
73 posix_flags = SMB_O_RDWR;
74
75 if (flags & O_CREAT)
76 posix_flags |= SMB_O_CREAT;
77 if (flags & O_EXCL)
78 posix_flags |= SMB_O_EXCL;
79 if (flags & O_TRUNC)
80 posix_flags |= SMB_O_TRUNC;
81 /* be safe and imply O_SYNC for O_DSYNC */
Christoph Hellwig6b2f3d12009-10-27 11:05:28 +010082 if (flags & O_DSYNC)
Jeff Layton608712f2010-10-15 15:33:56 -040083 posix_flags |= SMB_O_SYNC;
Steve French7fc8f4e2009-02-23 20:43:11 +000084 if (flags & O_DIRECTORY)
Jeff Layton608712f2010-10-15 15:33:56 -040085 posix_flags |= SMB_O_DIRECTORY;
Steve French7fc8f4e2009-02-23 20:43:11 +000086 if (flags & O_NOFOLLOW)
Jeff Layton608712f2010-10-15 15:33:56 -040087 posix_flags |= SMB_O_NOFOLLOW;
Steve French7fc8f4e2009-02-23 20:43:11 +000088 if (flags & O_DIRECT)
Jeff Layton608712f2010-10-15 15:33:56 -040089 posix_flags |= SMB_O_DIRECT;
Steve French7fc8f4e2009-02-23 20:43:11 +000090
91 return posix_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070092}
93
94static inline int cifs_get_disposition(unsigned int flags)
95{
96 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
97 return FILE_CREATE;
98 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
99 return FILE_OVERWRITE_IF;
100 else if ((flags & O_CREAT) == O_CREAT)
101 return FILE_OPEN_IF;
Steve French55aa2e02006-05-30 18:09:31 +0000102 else if ((flags & O_TRUNC) == O_TRUNC)
103 return FILE_OVERWRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104 else
105 return FILE_OPEN;
106}
107
Jeff Layton608712f2010-10-15 15:33:56 -0400108int cifs_posix_open(char *full_path, struct inode **pinode,
109 struct super_block *sb, int mode, unsigned int f_flags,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400110 __u32 *poplock, __u16 *pnetfid, unsigned int xid)
Jeff Layton608712f2010-10-15 15:33:56 -0400111{
112 int rc;
113 FILE_UNIX_BASIC_INFO *presp_data;
114 __u32 posix_flags = 0;
115 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
116 struct cifs_fattr fattr;
117 struct tcon_link *tlink;
Steve French96daf2b2011-05-27 04:34:02 +0000118 struct cifs_tcon *tcon;
Jeff Layton608712f2010-10-15 15:33:56 -0400119
120 cFYI(1, "posix open %s", full_path);
121
122 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
123 if (presp_data == NULL)
124 return -ENOMEM;
125
126 tlink = cifs_sb_tlink(cifs_sb);
127 if (IS_ERR(tlink)) {
128 rc = PTR_ERR(tlink);
129 goto posix_open_ret;
130 }
131
132 tcon = tlink_tcon(tlink);
133 mode &= ~current_umask();
134
135 posix_flags = cifs_posix_convert_flags(f_flags);
136 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
137 poplock, full_path, cifs_sb->local_nls,
138 cifs_sb->mnt_cifs_flags &
139 CIFS_MOUNT_MAP_SPECIAL_CHR);
140 cifs_put_tlink(tlink);
141
142 if (rc)
143 goto posix_open_ret;
144
145 if (presp_data->Type == cpu_to_le32(-1))
146 goto posix_open_ret; /* open ok, caller does qpathinfo */
147
148 if (!pinode)
149 goto posix_open_ret; /* caller does not need info */
150
151 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
152
153 /* get new inode and set it up */
154 if (*pinode == NULL) {
155 cifs_fill_uniqueid(sb, &fattr);
156 *pinode = cifs_iget(sb, &fattr);
157 if (!*pinode) {
158 rc = -ENOMEM;
159 goto posix_open_ret;
160 }
161 } else {
162 cifs_fattr_to_inode(*pinode, &fattr);
163 }
164
165posix_open_ret:
166 kfree(presp_data);
167 return rc;
168}
169
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300170static int
171cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700172 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
173 struct cifs_fid *fid, unsigned int xid)
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300174{
175 int rc;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700176 int desired_access;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300177 int disposition;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500178 int create_options = CREATE_NOT_DIR;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300179 FILE_ALL_INFO *buf;
180
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700181 if (!tcon->ses->server->ops->open)
182 return -ENOSYS;
183
184 desired_access = cifs_convert_flags(f_flags);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300185
186/*********************************************************************
187 * open flag mapping table:
188 *
189 * POSIX Flag CIFS Disposition
190 * ---------- ----------------
191 * O_CREAT FILE_OPEN_IF
192 * O_CREAT | O_EXCL FILE_CREATE
193 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
194 * O_TRUNC FILE_OVERWRITE
195 * none of the above FILE_OPEN
196 *
197 * Note that there is not a direct match between disposition
198 * FILE_SUPERSEDE (ie create whether or not file exists although
199 * O_CREAT | O_TRUNC is similar but truncates the existing
200 * file rather than creating a new file as FILE_SUPERSEDE does
201 * (which uses the attributes / metadata passed in on open call)
202 *?
203 *? O_SYNC is a reasonable match to CIFS writethrough flag
204 *? and the read write flags match reasonably. O_LARGEFILE
205 *? is irrelevant because largefile support is always used
206 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
207 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
208 *********************************************************************/
209
210 disposition = cifs_get_disposition(f_flags);
211
212 /* BB pass O_SYNC flag through on file attributes .. BB */
213
214 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
215 if (!buf)
216 return -ENOMEM;
217
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500218 if (backup_cred(cifs_sb))
219 create_options |= CREATE_OPEN_BACKUP_INTENT;
220
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700221 rc = tcon->ses->server->ops->open(xid, tcon, full_path, disposition,
222 desired_access, create_options, fid,
223 oplock, buf, cifs_sb);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300224
225 if (rc)
226 goto out;
227
228 if (tcon->unix_ext)
229 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
230 xid);
231 else
232 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700233 xid, &fid->netfid);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300234
235out:
236 kfree(buf);
237 return rc;
238}
239
Jeff Layton15ecb432010-10-15 15:34:02 -0400240struct cifsFileInfo *
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700241cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
Jeff Layton15ecb432010-10-15 15:34:02 -0400242 struct tcon_link *tlink, __u32 oplock)
243{
244 struct dentry *dentry = file->f_path.dentry;
245 struct inode *inode = dentry->d_inode;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700246 struct cifsInodeInfo *cinode = CIFS_I(inode);
247 struct cifsFileInfo *cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400248
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700249 cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
250 if (cfile == NULL)
251 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400252
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700253 cfile->count = 1;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700254 cfile->pid = current->tgid;
255 cfile->uid = current_fsuid();
256 cfile->dentry = dget(dentry);
257 cfile->f_flags = file->f_flags;
258 cfile->invalidHandle = false;
259 cfile->tlink = cifs_get_tlink(tlink);
260 mutex_init(&cfile->fh_mutex);
261 INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
262 INIT_LIST_HEAD(&cfile->llist);
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700263 tlink_tcon(tlink)->ses->server->ops->set_fid(cfile, fid, oplock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400264
Jeff Layton44772882010-10-15 15:34:03 -0400265 spin_lock(&cifs_file_list_lock);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700266 list_add(&cfile->tlist, &(tlink_tcon(tlink)->openFileList));
Jeff Layton15ecb432010-10-15 15:34:02 -0400267 /* if readable file instance put first in list*/
268 if (file->f_mode & FMODE_READ)
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700269 list_add(&cfile->flist, &cinode->openFileList);
Jeff Layton15ecb432010-10-15 15:34:02 -0400270 else
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700271 list_add_tail(&cfile->flist, &cinode->openFileList);
Jeff Layton44772882010-10-15 15:34:03 -0400272 spin_unlock(&cifs_file_list_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400273
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700274 file->private_data = cfile;
275 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400276}
277
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400278static void cifs_del_lock_waiters(struct cifsLockInfo *lock);
279
Jeff Layton764a1b12012-07-25 14:59:54 -0400280struct cifsFileInfo *
281cifsFileInfo_get(struct cifsFileInfo *cifs_file)
282{
283 spin_lock(&cifs_file_list_lock);
284 cifsFileInfo_get_locked(cifs_file);
285 spin_unlock(&cifs_file_list_lock);
286 return cifs_file;
287}
288
Steve Frenchcdff08e2010-10-21 22:46:14 +0000289/*
290 * Release a reference on the file private data. This may involve closing
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400291 * the filehandle out on the server. Must be called without holding
292 * cifs_file_list_lock.
Steve Frenchcdff08e2010-10-21 22:46:14 +0000293 */
Jeff Laytonb33879a2010-10-15 15:34:04 -0400294void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
295{
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300296 struct inode *inode = cifs_file->dentry->d_inode;
Steve French96daf2b2011-05-27 04:34:02 +0000297 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300298 struct cifsInodeInfo *cifsi = CIFS_I(inode);
Pavel Shilovsky4f8ba8a2010-11-21 22:36:12 +0300299 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000300 struct cifsLockInfo *li, *tmp;
301
302 spin_lock(&cifs_file_list_lock);
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400303 if (--cifs_file->count > 0) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000304 spin_unlock(&cifs_file_list_lock);
305 return;
Jeff Laytonb33879a2010-10-15 15:34:04 -0400306 }
Steve Frenchcdff08e2010-10-21 22:46:14 +0000307
308 /* remove it from the lists */
309 list_del(&cifs_file->flist);
310 list_del(&cifs_file->tlist);
311
312 if (list_empty(&cifsi->openFileList)) {
313 cFYI(1, "closing last open instance for inode %p",
314 cifs_file->dentry->d_inode);
Pavel Shilovsky25364132012-09-18 16:20:27 -0700315 /*
316 * In strict cache mode we need invalidate mapping on the last
317 * close because it may cause a error when we open this file
318 * again and get at least level II oplock.
319 */
Pavel Shilovsky4f8ba8a2010-11-21 22:36:12 +0300320 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
321 CIFS_I(inode)->invalid_mapping = true;
Pavel Shilovskyc6723622010-11-03 10:58:57 +0300322 cifs_set_oplock_level(cifsi, 0);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000323 }
324 spin_unlock(&cifs_file_list_lock);
325
Jeff Laytonad635942011-07-26 12:20:17 -0400326 cancel_work_sync(&cifs_file->oplock_break);
327
Steve Frenchcdff08e2010-10-21 22:46:14 +0000328 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700329 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400330 unsigned int xid;
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700331 int rc = -ENOSYS;
332
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400333 xid = get_xid();
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700334 if (server->ops->close)
335 rc = server->ops->close(xid, tcon, &cifs_file->fid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400336 free_xid(xid);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000337 }
338
339 /* Delete any outstanding lock records. We'll lose them when the file
340 * is closed anyway.
341 */
Pavel Shilovskyd59dad22011-09-22 09:53:59 +0400342 mutex_lock(&cifsi->lock_mutex);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300343 list_for_each_entry_safe(li, tmp, &cifs_file->llist, llist) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000344 list_del(&li->llist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400345 cifs_del_lock_waiters(li);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000346 kfree(li);
347 }
Pavel Shilovskyd59dad22011-09-22 09:53:59 +0400348 mutex_unlock(&cifsi->lock_mutex);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000349
350 cifs_put_tlink(cifs_file->tlink);
351 dput(cifs_file->dentry);
352 kfree(cifs_file);
Jeff Laytonb33879a2010-10-15 15:34:04 -0400353}
354
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355int cifs_open(struct inode *inode, struct file *file)
356{
357 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400358 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400359 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +0000361 struct cifs_tcon *tcon;
Jeff Layton7ffec372010-09-29 19:51:11 -0400362 struct tcon_link *tlink;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700363 struct cifsFileInfo *cfile = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364 char *full_path = NULL;
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300365 bool posix_open_ok = false;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700366 struct cifs_fid fid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400368 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369
370 cifs_sb = CIFS_SB(inode->i_sb);
Jeff Layton7ffec372010-09-29 19:51:11 -0400371 tlink = cifs_sb_tlink(cifs_sb);
372 if (IS_ERR(tlink)) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400373 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400374 return PTR_ERR(tlink);
375 }
376 tcon = tlink_tcon(tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -0800378 full_path = build_path_from_dentry(file->f_path.dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379 if (full_path == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530380 rc = -ENOMEM;
Jeff Layton232341b2010-08-05 13:58:38 -0400381 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382 }
383
Joe Perchesb6b38f72010-04-21 03:50:45 +0000384 cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
385 inode, file->f_flags, full_path);
Steve French276a74a2009-03-03 18:00:34 +0000386
Pavel Shilovsky10b9b982012-03-20 12:55:09 +0300387 if (tcon->ses->server->oplocks)
Steve French276a74a2009-03-03 18:00:34 +0000388 oplock = REQ_OPLOCK;
389 else
390 oplock = 0;
391
Steve French64cc2c62009-03-04 19:54:08 +0000392 if (!tcon->broken_posix_open && tcon->unix_ext &&
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400393 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
394 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Steve French276a74a2009-03-03 18:00:34 +0000395 /* can not refresh inode info since size could be stale */
Jeff Layton2422f672010-06-16 13:40:16 -0400396 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
Steve Frenchfa588e02010-04-22 19:21:55 +0000397 cifs_sb->mnt_file_mode /* ignored */,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700398 file->f_flags, &oplock, &fid.netfid, xid);
Steve French276a74a2009-03-03 18:00:34 +0000399 if (rc == 0) {
Joe Perchesb6b38f72010-04-21 03:50:45 +0000400 cFYI(1, "posix open succeeded");
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300401 posix_open_ok = true;
Steve French64cc2c62009-03-04 19:54:08 +0000402 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
403 if (tcon->ses->serverNOS)
Joe Perchesb6b38f72010-04-21 03:50:45 +0000404 cERROR(1, "server %s of type %s returned"
Steve French64cc2c62009-03-04 19:54:08 +0000405 " unexpected error on SMB posix open"
406 ", disabling posix open support."
407 " Check if server update available.",
408 tcon->ses->serverName,
Joe Perchesb6b38f72010-04-21 03:50:45 +0000409 tcon->ses->serverNOS);
Steve French64cc2c62009-03-04 19:54:08 +0000410 tcon->broken_posix_open = true;
Steve French276a74a2009-03-03 18:00:34 +0000411 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
412 (rc != -EOPNOTSUPP)) /* path not found or net err */
413 goto out;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700414 /*
415 * Else fallthrough to retry open the old way on network i/o
416 * or DFS errors.
417 */
Steve French276a74a2009-03-03 18:00:34 +0000418 }
419
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300420 if (!posix_open_ok) {
421 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700422 file->f_flags, &oplock, &fid, xid);
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300423 if (rc)
424 goto out;
425 }
Jeff Layton47c78b72010-06-16 13:40:17 -0400426
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700427 cfile = cifs_new_fileinfo(&fid, file, tlink, oplock);
428 if (cfile == NULL) {
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700429 if (tcon->ses->server->ops->close)
430 tcon->ses->server->ops->close(xid, tcon, &fid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431 rc = -ENOMEM;
432 goto out;
433 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +0530435 cifs_fscache_set_inode_cookie(inode, file);
436
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300437 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700438 /*
439 * Time to set mode which we can not set earlier due to
440 * problems creating new read-only files.
441 */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300442 struct cifs_unix_set_info_args args = {
443 .mode = inode->i_mode,
444 .uid = NO_CHANGE_64,
445 .gid = NO_CHANGE_64,
446 .ctime = NO_CHANGE_64,
447 .atime = NO_CHANGE_64,
448 .mtime = NO_CHANGE_64,
449 .device = 0,
450 };
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700451 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
452 cfile->pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453 }
454
455out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400457 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400458 cifs_put_tlink(tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459 return rc;
460}
461
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700462/*
463 * Try to reacquire byte range locks that were released when session
464 * to server was lost
465 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466static int cifs_relock_file(struct cifsFileInfo *cifsFile)
467{
468 int rc = 0;
469
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700470 /* BB list all locks open on this file and relock */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471
472 return rc;
473}
474
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700475static int
476cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477{
478 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400479 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400480 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +0000482 struct cifs_tcon *tcon;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700483 struct TCP_Server_Info *server;
484 struct cifsInodeInfo *cinode;
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000485 struct inode *inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486 char *full_path = NULL;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700487 int desired_access;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488 int disposition = FILE_OPEN;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500489 int create_options = CREATE_NOT_DIR;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700490 struct cifs_fid fid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400492 xid = get_xid();
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700493 mutex_lock(&cfile->fh_mutex);
494 if (!cfile->invalidHandle) {
495 mutex_unlock(&cfile->fh_mutex);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530496 rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400497 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530498 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499 }
500
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700501 inode = cfile->dentry->d_inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502 cifs_sb = CIFS_SB(inode->i_sb);
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700503 tcon = tlink_tcon(cfile->tlink);
504 server = tcon->ses->server;
Steve French3a9f4622007-04-04 17:10:24 +0000505
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700506 /*
507 * Can not grab rename sem here because various ops, including those
508 * that already have the rename sem can end up causing writepage to get
509 * called and if the server was down that means we end up here, and we
510 * can never tell if the caller already has the rename_sem.
511 */
512 full_path = build_path_from_dentry(cfile->dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513 if (full_path == NULL) {
Steve French3a9f4622007-04-04 17:10:24 +0000514 rc = -ENOMEM;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700515 mutex_unlock(&cfile->fh_mutex);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400516 free_xid(xid);
Steve French3a9f4622007-04-04 17:10:24 +0000517 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518 }
519
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700520 cFYI(1, "inode = 0x%p file flags 0x%x for %s", inode, cfile->f_flags,
521 full_path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522
Pavel Shilovsky10b9b982012-03-20 12:55:09 +0300523 if (tcon->ses->server->oplocks)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524 oplock = REQ_OPLOCK;
525 else
Steve French4b18f2a2008-04-29 00:06:05 +0000526 oplock = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400528 if (tcon->unix_ext && cap_unix(tcon->ses) &&
Steve French7fc8f4e2009-02-23 20:43:11 +0000529 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400530 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Jeff Layton608712f2010-10-15 15:33:56 -0400531 /*
532 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
533 * original open. Must mask them off for a reopen.
534 */
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700535 unsigned int oflags = cfile->f_flags &
Jeff Layton15886172010-10-15 15:33:59 -0400536 ~(O_CREAT | O_EXCL | O_TRUNC);
Jeff Layton608712f2010-10-15 15:33:56 -0400537
Jeff Layton2422f672010-06-16 13:40:16 -0400538 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700539 cifs_sb->mnt_file_mode /* ignored */,
540 oflags, &oplock, &fid.netfid, xid);
Steve French7fc8f4e2009-02-23 20:43:11 +0000541 if (rc == 0) {
Joe Perchesb6b38f72010-04-21 03:50:45 +0000542 cFYI(1, "posix reopen succeeded");
Steve French7fc8f4e2009-02-23 20:43:11 +0000543 goto reopen_success;
544 }
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700545 /*
546 * fallthrough to retry open the old way on errors, especially
547 * in the reconnect path it is important to retry hard
548 */
Steve French7fc8f4e2009-02-23 20:43:11 +0000549 }
550
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700551 desired_access = cifs_convert_flags(cfile->f_flags);
Steve French7fc8f4e2009-02-23 20:43:11 +0000552
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500553 if (backup_cred(cifs_sb))
554 create_options |= CREATE_OPEN_BACKUP_INTENT;
555
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700556 /*
557 * Can not refresh inode by passing in file_info buf to be returned by
558 * CIFSSMBOpen and then calling get_inode_info with returned buf since
559 * file might have write behind data that needs to be flushed and server
560 * version of file size can be stale. If we knew for sure that inode was
561 * not dirty locally we could do this.
562 */
563 rc = server->ops->open(xid, tcon, full_path, disposition,
564 desired_access, create_options, &fid, &oplock,
565 NULL, cifs_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566 if (rc) {
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700567 mutex_unlock(&cfile->fh_mutex);
568 cFYI(1, "cifs_reopen returned 0x%x", rc);
Joe Perchesb6b38f72010-04-21 03:50:45 +0000569 cFYI(1, "oplock: %d", oplock);
Jeff Layton15886172010-10-15 15:33:59 -0400570 goto reopen_error_exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571 }
Jeff Layton15886172010-10-15 15:33:59 -0400572
573reopen_success:
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700574 cfile->invalidHandle = false;
575 mutex_unlock(&cfile->fh_mutex);
576 cinode = CIFS_I(inode);
Jeff Layton15886172010-10-15 15:33:59 -0400577
578 if (can_flush) {
579 rc = filemap_write_and_wait(inode->i_mapping);
Jeff Laytoneb4b7562010-10-22 14:52:29 -0400580 mapping_set_error(inode->i_mapping, rc);
Jeff Layton15886172010-10-15 15:33:59 -0400581
Jeff Layton15886172010-10-15 15:33:59 -0400582 if (tcon->unix_ext)
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700583 rc = cifs_get_inode_info_unix(&inode, full_path,
584 inode->i_sb, xid);
Jeff Layton15886172010-10-15 15:33:59 -0400585 else
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700586 rc = cifs_get_inode_info(&inode, full_path, NULL,
587 inode->i_sb, xid, NULL);
588 }
589 /*
590 * Else we are writing out data to server already and could deadlock if
591 * we tried to flush data, and since we do not know if we have data that
592 * would invalidate the current end of file on the server we can not go
593 * to the server to get the new inode info.
594 */
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300595
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700596 server->ops->set_fid(cfile, &fid, oplock);
597 cifs_relock_file(cfile);
Jeff Layton15886172010-10-15 15:33:59 -0400598
599reopen_error_exit:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400601 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602 return rc;
603}
604
605int cifs_close(struct inode *inode, struct file *file)
606{
Jeff Layton77970692011-04-05 16:23:47 -0700607 if (file->private_data != NULL) {
608 cifsFileInfo_put(file->private_data);
609 file->private_data = NULL;
610 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611
Steve Frenchcdff08e2010-10-21 22:46:14 +0000612 /* return code from the ->release op is always ignored */
613 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614}
615
616int cifs_closedir(struct inode *inode, struct file *file)
617{
618 int rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400619 unsigned int xid;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700620 struct cifsFileInfo *cfile = file->private_data;
621 char *tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622
Joe Perchesb6b38f72010-04-21 03:50:45 +0000623 cFYI(1, "Closedir inode = 0x%p", inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400625 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700627 if (cfile) {
628 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629
Joe Perchesb6b38f72010-04-21 03:50:45 +0000630 cFYI(1, "Freeing private data in close dir");
Jeff Layton44772882010-10-15 15:34:03 -0400631 spin_lock(&cifs_file_list_lock);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700632 if (!cfile->srch_inf.endOfSearch && !cfile->invalidHandle) {
633 cfile->invalidHandle = true;
Jeff Layton44772882010-10-15 15:34:03 -0400634 spin_unlock(&cifs_file_list_lock);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700635 rc = CIFSFindClose(xid, tcon, cfile->fid.netfid);
636 cFYI(1, "Closing uncompleted readdir with rc %d", rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637 /* not much we can do if it fails anyway, ignore rc */
638 rc = 0;
Steve Frenchddb4cbf2008-11-20 20:00:44 +0000639 } else
Jeff Layton44772882010-10-15 15:34:03 -0400640 spin_unlock(&cifs_file_list_lock);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700641 tmp = cfile->srch_inf.ntwrk_buf_start;
642 if (tmp) {
Joe Perchesb6b38f72010-04-21 03:50:45 +0000643 cFYI(1, "closedir free smb buf in srch struct");
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700644 cfile->srch_inf.ntwrk_buf_start = NULL;
645 if (cfile->srch_inf.smallBuf)
646 cifs_small_buf_release(tmp);
Steve Frenchd47d7c12006-02-28 03:45:48 +0000647 else
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700648 cifs_buf_release(tmp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649 }
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700650 cifs_put_tlink(cfile->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651 kfree(file->private_data);
652 file->private_data = NULL;
653 }
654 /* BB can we lock the filestruct while this is going on? */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400655 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656 return rc;
657}
658
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400659static struct cifsLockInfo *
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300660cifs_lock_init(__u64 offset, __u64 length, __u8 type)
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000661{
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400662 struct cifsLockInfo *lock =
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000663 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400664 if (!lock)
665 return lock;
666 lock->offset = offset;
667 lock->length = length;
668 lock->type = type;
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400669 lock->pid = current->tgid;
670 INIT_LIST_HEAD(&lock->blist);
671 init_waitqueue_head(&lock->block_q);
672 return lock;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400673}
674
675static void
676cifs_del_lock_waiters(struct cifsLockInfo *lock)
677{
678 struct cifsLockInfo *li, *tmp;
679 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
680 list_del_init(&li->blist);
681 wake_up(&li->block_q);
682 }
683}
684
685static bool
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300686cifs_find_fid_lock_conflict(struct cifsFileInfo *cfile, __u64 offset,
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300687 __u64 length, __u8 type, struct cifsFileInfo *cur,
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300688 struct cifsLockInfo **conf_lock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400689{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300690 struct cifsLockInfo *li;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300691 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400692
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300693 list_for_each_entry(li, &cfile->llist, llist) {
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400694 if (offset + length <= li->offset ||
695 offset >= li->offset + li->length)
696 continue;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300697 else if ((type & server->vals->shared_lock_type) &&
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300698 ((server->ops->compare_fids(cur, cfile) &&
699 current->tgid == li->pid) || type == li->type))
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400700 continue;
701 else {
702 *conf_lock = li;
703 return true;
704 }
705 }
706 return false;
707}
708
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400709static bool
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300710cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
711 __u8 type, struct cifsLockInfo **conf_lock)
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400712{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300713 bool rc = false;
714 struct cifsFileInfo *fid, *tmp;
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300715 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300716
717 spin_lock(&cifs_file_list_lock);
718 list_for_each_entry_safe(fid, tmp, &cinode->openFileList, flist) {
719 rc = cifs_find_fid_lock_conflict(fid, offset, length, type,
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300720 cfile, conf_lock);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300721 if (rc)
722 break;
723 }
724 spin_unlock(&cifs_file_list_lock);
725
726 return rc;
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400727}
728
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300729/*
730 * Check if there is another lock that prevents us to set the lock (mandatory
731 * style). If such a lock exists, update the flock structure with its
732 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
733 * or leave it the same if we can't. Returns 0 if we don't need to request to
734 * the server or 1 otherwise.
735 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400736static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300737cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
738 __u8 type, struct file_lock *flock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400739{
740 int rc = 0;
741 struct cifsLockInfo *conf_lock;
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300742 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300743 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400744 bool exist;
745
746 mutex_lock(&cinode->lock_mutex);
747
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300748 exist = cifs_find_lock_conflict(cfile, offset, length, type,
749 &conf_lock);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400750 if (exist) {
751 flock->fl_start = conf_lock->offset;
752 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
753 flock->fl_pid = conf_lock->pid;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300754 if (conf_lock->type & server->vals->shared_lock_type)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400755 flock->fl_type = F_RDLCK;
756 else
757 flock->fl_type = F_WRLCK;
758 } else if (!cinode->can_cache_brlcks)
759 rc = 1;
760 else
761 flock->fl_type = F_UNLCK;
762
763 mutex_unlock(&cinode->lock_mutex);
764 return rc;
765}
766
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400767static void
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300768cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400769{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300770 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovskyd59dad22011-09-22 09:53:59 +0400771 mutex_lock(&cinode->lock_mutex);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300772 list_add_tail(&lock->llist, &cfile->llist);
Pavel Shilovskyd59dad22011-09-22 09:53:59 +0400773 mutex_unlock(&cinode->lock_mutex);
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000774}
775
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300776/*
777 * Set the byte-range lock (mandatory style). Returns:
778 * 1) 0, if we set the lock and don't need to request to the server;
779 * 2) 1, if no locks prevent us but we need to request to the server;
780 * 3) -EACCESS, if there is a lock that prevents us and wait is false.
781 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400782static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300783cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400784 bool wait)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400785{
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400786 struct cifsLockInfo *conf_lock;
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300787 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400788 bool exist;
789 int rc = 0;
790
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400791try_again:
792 exist = false;
793 mutex_lock(&cinode->lock_mutex);
794
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300795 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
796 lock->type, &conf_lock);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400797 if (!exist && cinode->can_cache_brlcks) {
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300798 list_add_tail(&lock->llist, &cfile->llist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400799 mutex_unlock(&cinode->lock_mutex);
800 return rc;
801 }
802
803 if (!exist)
804 rc = 1;
805 else if (!wait)
806 rc = -EACCES;
807 else {
808 list_add_tail(&lock->blist, &conf_lock->blist);
809 mutex_unlock(&cinode->lock_mutex);
810 rc = wait_event_interruptible(lock->block_q,
811 (lock->blist.prev == &lock->blist) &&
812 (lock->blist.next == &lock->blist));
813 if (!rc)
814 goto try_again;
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400815 mutex_lock(&cinode->lock_mutex);
816 list_del_init(&lock->blist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400817 }
818
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400819 mutex_unlock(&cinode->lock_mutex);
820 return rc;
821}
822
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300823/*
824 * Check if there is another lock that prevents us to set the lock (posix
825 * style). If such a lock exists, update the flock structure with its
826 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
827 * or leave it the same if we can't. Returns 0 if we don't need to request to
828 * the server or 1 otherwise.
829 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400830static int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400831cifs_posix_lock_test(struct file *file, struct file_lock *flock)
832{
833 int rc = 0;
834 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
835 unsigned char saved_type = flock->fl_type;
836
Pavel Shilovsky50792762011-10-29 17:17:57 +0400837 if ((flock->fl_flags & FL_POSIX) == 0)
838 return 1;
839
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400840 mutex_lock(&cinode->lock_mutex);
841 posix_test_lock(file, flock);
842
843 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
844 flock->fl_type = saved_type;
845 rc = 1;
846 }
847
848 mutex_unlock(&cinode->lock_mutex);
849 return rc;
850}
851
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300852/*
853 * Set the byte-range lock (posix style). Returns:
854 * 1) 0, if we set the lock and don't need to request to the server;
855 * 2) 1, if we need to request to the server;
856 * 3) <0, if the error occurs while setting the lock.
857 */
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400858static int
859cifs_posix_lock_set(struct file *file, struct file_lock *flock)
860{
861 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
Pavel Shilovsky50792762011-10-29 17:17:57 +0400862 int rc = 1;
863
864 if ((flock->fl_flags & FL_POSIX) == 0)
865 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400866
Pavel Shilovsky66189be2012-03-28 21:56:19 +0400867try_again:
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400868 mutex_lock(&cinode->lock_mutex);
869 if (!cinode->can_cache_brlcks) {
870 mutex_unlock(&cinode->lock_mutex);
Pavel Shilovsky50792762011-10-29 17:17:57 +0400871 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400872 }
Pavel Shilovsky66189be2012-03-28 21:56:19 +0400873
874 rc = posix_lock_file(file, flock, NULL);
Steve French9ebb3892012-04-01 13:52:54 -0500875 mutex_unlock(&cinode->lock_mutex);
Pavel Shilovsky66189be2012-03-28 21:56:19 +0400876 if (rc == FILE_LOCK_DEFERRED) {
877 rc = wait_event_interruptible(flock->fl_wait, !flock->fl_next);
878 if (!rc)
879 goto try_again;
880 locks_delete_block(flock);
881 }
Steve French9ebb3892012-04-01 13:52:54 -0500882 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400883}
884
885static int
886cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400887{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400888 unsigned int xid;
889 int rc = 0, stored_rc;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400890 struct cifsLockInfo *li, *tmp;
891 struct cifs_tcon *tcon;
892 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +0400893 unsigned int num, max_num, max_buf;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400894 LOCKING_ANDX_RANGE *buf, *cur;
895 int types[] = {LOCKING_ANDX_LARGE_FILES,
896 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
897 int i;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400898
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400899 xid = get_xid();
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400900 tcon = tlink_tcon(cfile->tlink);
901
902 mutex_lock(&cinode->lock_mutex);
903 if (!cinode->can_cache_brlcks) {
904 mutex_unlock(&cinode->lock_mutex);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400905 free_xid(xid);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400906 return rc;
907 }
908
Pavel Shilovsky0013fb42012-05-31 13:03:26 +0400909 /*
910 * Accessing maxBuf is racy with cifs_reconnect - need to store value
911 * and check it for zero before using.
912 */
913 max_buf = tcon->ses->server->maxBuf;
914 if (!max_buf) {
915 mutex_unlock(&cinode->lock_mutex);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400916 free_xid(xid);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +0400917 return -EINVAL;
918 }
919
920 max_num = (max_buf - sizeof(struct smb_hdr)) /
921 sizeof(LOCKING_ANDX_RANGE);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400922 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
923 if (!buf) {
924 mutex_unlock(&cinode->lock_mutex);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400925 free_xid(xid);
Pavel Shilovskye2f28862012-08-29 21:13:38 +0400926 return -ENOMEM;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400927 }
928
929 for (i = 0; i < 2; i++) {
930 cur = buf;
931 num = 0;
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300932 list_for_each_entry_safe(li, tmp, &cfile->llist, llist) {
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400933 if (li->type != types[i])
934 continue;
935 cur->Pid = cpu_to_le16(li->pid);
936 cur->LengthLow = cpu_to_le32((u32)li->length);
937 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
938 cur->OffsetLow = cpu_to_le32((u32)li->offset);
939 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
940 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700941 stored_rc = cifs_lockv(xid, tcon,
942 cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +0300943 (__u8)li->type, 0, num,
944 buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400945 if (stored_rc)
946 rc = stored_rc;
947 cur = buf;
948 num = 0;
949 } else
950 cur++;
951 }
952
953 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700954 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +0300955 (__u8)types[i], 0, num, buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400956 if (stored_rc)
957 rc = stored_rc;
958 }
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400959 }
960
961 cinode->can_cache_brlcks = false;
962 mutex_unlock(&cinode->lock_mutex);
963
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400964 kfree(buf);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400965 free_xid(xid);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400966 return rc;
967}
968
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400969/* copied from fs/locks.c with a name change */
970#define cifs_for_each_lock(inode, lockp) \
971 for (lockp = &inode->i_flock; *lockp != NULL; \
972 lockp = &(*lockp)->fl_next)
973
Pavel Shilovskyd5751462012-03-05 09:39:20 +0300974struct lock_to_push {
975 struct list_head llist;
976 __u64 offset;
977 __u64 length;
978 __u32 pid;
979 __u16 netfid;
980 __u8 type;
981};
982
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400983static int
984cifs_push_posix_locks(struct cifsFileInfo *cfile)
985{
986 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
987 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
988 struct file_lock *flock, **before;
Pavel Shilovskyd5751462012-03-05 09:39:20 +0300989 unsigned int count = 0, i = 0;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400990 int rc = 0, xid, type;
Pavel Shilovskyd5751462012-03-05 09:39:20 +0300991 struct list_head locks_to_send, *el;
992 struct lock_to_push *lck, *tmp;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400993 __u64 length;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400994
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400995 xid = get_xid();
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400996
997 mutex_lock(&cinode->lock_mutex);
998 if (!cinode->can_cache_brlcks) {
999 mutex_unlock(&cinode->lock_mutex);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001000 free_xid(xid);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001001 return rc;
1002 }
1003
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001004 lock_flocks();
1005 cifs_for_each_lock(cfile->dentry->d_inode, before) {
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001006 if ((*before)->fl_flags & FL_POSIX)
1007 count++;
1008 }
1009 unlock_flocks();
1010
1011 INIT_LIST_HEAD(&locks_to_send);
1012
1013 /*
Pavel Shilovskyce858522012-03-17 09:46:55 +03001014 * Allocating count locks is enough because no FL_POSIX locks can be
1015 * added to the list while we are holding cinode->lock_mutex that
1016 * protects locking operations of this inode.
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001017 */
1018 for (; i < count; i++) {
1019 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1020 if (!lck) {
1021 rc = -ENOMEM;
1022 goto err_out;
1023 }
1024 list_add_tail(&lck->llist, &locks_to_send);
1025 }
1026
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001027 el = locks_to_send.next;
1028 lock_flocks();
1029 cifs_for_each_lock(cfile->dentry->d_inode, before) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001030 flock = *before;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001031 if ((flock->fl_flags & FL_POSIX) == 0)
1032 continue;
Pavel Shilovskyce858522012-03-17 09:46:55 +03001033 if (el == &locks_to_send) {
1034 /*
1035 * The list ended. We don't have enough allocated
1036 * structures - something is really wrong.
1037 */
1038 cERROR(1, "Can't push all brlocks!");
1039 break;
1040 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001041 length = 1 + flock->fl_end - flock->fl_start;
1042 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1043 type = CIFS_RDLCK;
1044 else
1045 type = CIFS_WRLCK;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001046 lck = list_entry(el, struct lock_to_push, llist);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001047 lck->pid = flock->fl_pid;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001048 lck->netfid = cfile->fid.netfid;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001049 lck->length = length;
1050 lck->type = type;
1051 lck->offset = flock->fl_start;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001052 el = el->next;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001053 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001054 unlock_flocks();
1055
1056 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001057 int stored_rc;
1058
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001059 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001060 lck->offset, lck->length, NULL,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001061 lck->type, 0);
1062 if (stored_rc)
1063 rc = stored_rc;
1064 list_del(&lck->llist);
1065 kfree(lck);
1066 }
1067
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001068out:
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001069 cinode->can_cache_brlcks = false;
1070 mutex_unlock(&cinode->lock_mutex);
1071
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001072 free_xid(xid);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001073 return rc;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001074err_out:
1075 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1076 list_del(&lck->llist);
1077 kfree(lck);
1078 }
1079 goto out;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001080}
1081
1082static int
1083cifs_push_locks(struct cifsFileInfo *cfile)
1084{
1085 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1086 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1087
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001088 if (cap_unix(tcon->ses) &&
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001089 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1090 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1091 return cifs_push_posix_locks(cfile);
1092
1093 return cifs_push_mandatory_locks(cfile);
1094}
1095
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001096static void
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001097cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001098 bool *wait_flag, struct TCP_Server_Info *server)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099{
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001100 if (flock->fl_flags & FL_POSIX)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001101 cFYI(1, "Posix");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001102 if (flock->fl_flags & FL_FLOCK)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001103 cFYI(1, "Flock");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001104 if (flock->fl_flags & FL_SLEEP) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001105 cFYI(1, "Blocking lock");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001106 *wait_flag = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001107 }
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001108 if (flock->fl_flags & FL_ACCESS)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001109 cFYI(1, "Process suspended by mandatory locking - "
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001110 "not implemented yet");
1111 if (flock->fl_flags & FL_LEASE)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001112 cFYI(1, "Lease on file - not implemented yet");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001113 if (flock->fl_flags &
Linus Torvalds1da177e2005-04-16 15:20:36 -07001114 (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001115 cFYI(1, "Unknown lock flags 0x%x", flock->fl_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001116
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001117 *type = server->vals->large_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001118 if (flock->fl_type == F_WRLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001119 cFYI(1, "F_WRLCK ");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001120 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001121 *lock = 1;
1122 } else if (flock->fl_type == F_UNLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001123 cFYI(1, "F_UNLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001124 *type |= server->vals->unlock_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001125 *unlock = 1;
1126 /* Check if unlock includes more than one lock range */
1127 } else if (flock->fl_type == F_RDLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001128 cFYI(1, "F_RDLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001129 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001130 *lock = 1;
1131 } else if (flock->fl_type == F_EXLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001132 cFYI(1, "F_EXLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001133 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001134 *lock = 1;
1135 } else if (flock->fl_type == F_SHLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001136 cFYI(1, "F_SHLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001137 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001138 *lock = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001139 } else
Joe Perchesb6b38f72010-04-21 03:50:45 +00001140 cFYI(1, "Unknown type of lock");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001141}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001142
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001143static int
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001144cifs_mandatory_lock(unsigned int xid, struct cifsFileInfo *cfile, __u64 offset,
Pavel Shilovsky55157df2012-02-28 14:04:17 +03001145 __u64 length, __u32 type, int lock, int unlock, bool wait)
1146{
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001147 return CIFSSMBLock(xid, tlink_tcon(cfile->tlink), cfile->fid.netfid,
Pavel Shilovsky55157df2012-02-28 14:04:17 +03001148 current->tgid, length, offset, unlock, lock,
1149 (__u8)type, wait, 0);
1150}
1151
1152static int
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001153cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001154 bool wait_flag, bool posix_lck, unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001155{
1156 int rc = 0;
1157 __u64 length = 1 + flock->fl_end - flock->fl_start;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001158 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1159 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001160 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001161 __u16 netfid = cfile->fid.netfid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001163 if (posix_lck) {
1164 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001165
1166 rc = cifs_posix_lock_test(file, flock);
1167 if (!rc)
1168 return rc;
1169
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001170 if (type & server->vals->shared_lock_type)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001171 posix_lock_type = CIFS_RDLCK;
1172 else
1173 posix_lock_type = CIFS_WRLCK;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001174 rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001175 flock->fl_start, length, flock,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001176 posix_lock_type, wait_flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001177 return rc;
1178 }
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001179
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001180 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001181 if (!rc)
1182 return rc;
1183
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001184 /* BB we could chain these into one lock request BB */
Pavel Shilovsky55157df2012-02-28 14:04:17 +03001185 rc = cifs_mandatory_lock(xid, cfile, flock->fl_start, length, type,
1186 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001187 if (rc == 0) {
Pavel Shilovsky55157df2012-02-28 14:04:17 +03001188 rc = cifs_mandatory_lock(xid, cfile, flock->fl_start, length,
1189 type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001190 flock->fl_type = F_UNLCK;
1191 if (rc != 0)
1192 cERROR(1, "Error unlocking previously locked "
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001193 "range %d during test of lock", rc);
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001194 return 0;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001195 }
1196
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001197 if (type & server->vals->shared_lock_type) {
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001198 flock->fl_type = F_WRLCK;
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001199 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001200 }
1201
Pavel Shilovsky55157df2012-02-28 14:04:17 +03001202 rc = cifs_mandatory_lock(xid, cfile, flock->fl_start, length,
1203 type | server->vals->shared_lock_type, 1, 0,
1204 false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001205 if (rc == 0) {
Pavel Shilovsky55157df2012-02-28 14:04:17 +03001206 rc = cifs_mandatory_lock(xid, cfile, flock->fl_start, length,
1207 type | server->vals->shared_lock_type,
1208 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001209 flock->fl_type = F_RDLCK;
1210 if (rc != 0)
1211 cERROR(1, "Error unlocking previously locked "
1212 "range %d during test of lock", rc);
1213 } else
1214 flock->fl_type = F_WRLCK;
1215
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001216 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001217}
1218
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001219static void
1220cifs_move_llist(struct list_head *source, struct list_head *dest)
1221{
1222 struct list_head *li, *tmp;
1223 list_for_each_safe(li, tmp, source)
1224 list_move(li, dest);
1225}
1226
1227static void
1228cifs_free_llist(struct list_head *llist)
1229{
1230 struct cifsLockInfo *li, *tmp;
1231 list_for_each_entry_safe(li, tmp, llist, llist) {
1232 cifs_del_lock_waiters(li);
1233 list_del(&li->llist);
1234 kfree(li);
1235 }
1236}
1237
1238static int
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001239cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1240 unsigned int xid)
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001241{
1242 int rc = 0, stored_rc;
1243 int types[] = {LOCKING_ANDX_LARGE_FILES,
1244 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1245 unsigned int i;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001246 unsigned int max_num, num, max_buf;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001247 LOCKING_ANDX_RANGE *buf, *cur;
1248 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1249 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
1250 struct cifsLockInfo *li, *tmp;
1251 __u64 length = 1 + flock->fl_end - flock->fl_start;
1252 struct list_head tmp_llist;
1253
1254 INIT_LIST_HEAD(&tmp_llist);
1255
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001256 /*
1257 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1258 * and check it for zero before using.
1259 */
1260 max_buf = tcon->ses->server->maxBuf;
1261 if (!max_buf)
1262 return -EINVAL;
1263
1264 max_num = (max_buf - sizeof(struct smb_hdr)) /
1265 sizeof(LOCKING_ANDX_RANGE);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001266 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1267 if (!buf)
1268 return -ENOMEM;
1269
1270 mutex_lock(&cinode->lock_mutex);
1271 for (i = 0; i < 2; i++) {
1272 cur = buf;
1273 num = 0;
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001274 list_for_each_entry_safe(li, tmp, &cfile->llist, llist) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001275 if (flock->fl_start > li->offset ||
1276 (flock->fl_start + length) <
1277 (li->offset + li->length))
1278 continue;
1279 if (current->tgid != li->pid)
1280 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001281 if (types[i] != li->type)
1282 continue;
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001283 if (cinode->can_cache_brlcks) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001284 /*
1285 * We can cache brlock requests - simply remove
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001286 * a lock from the file's list.
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001287 */
1288 list_del(&li->llist);
1289 cifs_del_lock_waiters(li);
1290 kfree(li);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001291 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001292 }
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001293 cur->Pid = cpu_to_le16(li->pid);
1294 cur->LengthLow = cpu_to_le32((u32)li->length);
1295 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1296 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1297 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1298 /*
1299 * We need to save a lock here to let us add it again to
1300 * the file's list if the unlock range request fails on
1301 * the server.
1302 */
1303 list_move(&li->llist, &tmp_llist);
1304 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001305 stored_rc = cifs_lockv(xid, tcon,
1306 cfile->fid.netfid,
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001307 li->type, num, 0, buf);
1308 if (stored_rc) {
1309 /*
1310 * We failed on the unlock range
1311 * request - add all locks from the tmp
1312 * list to the head of the file's list.
1313 */
1314 cifs_move_llist(&tmp_llist,
1315 &cfile->llist);
1316 rc = stored_rc;
1317 } else
1318 /*
1319 * The unlock range request succeed -
1320 * free the tmp list.
1321 */
1322 cifs_free_llist(&tmp_llist);
1323 cur = buf;
1324 num = 0;
1325 } else
1326 cur++;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001327 }
1328 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001329 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001330 types[i], num, 0, buf);
1331 if (stored_rc) {
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001332 cifs_move_llist(&tmp_llist, &cfile->llist);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001333 rc = stored_rc;
1334 } else
1335 cifs_free_llist(&tmp_llist);
1336 }
1337 }
1338
1339 mutex_unlock(&cinode->lock_mutex);
1340 kfree(buf);
1341 return rc;
1342}
1343
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001344static int
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001345cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001346 bool wait_flag, bool posix_lck, int lock, int unlock,
1347 unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001348{
1349 int rc = 0;
1350 __u64 length = 1 + flock->fl_end - flock->fl_start;
1351 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1352 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001353 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001354 __u16 netfid = cfile->fid.netfid;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001355
1356 if (posix_lck) {
Steve French08547b02006-02-28 22:39:25 +00001357 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001358
1359 rc = cifs_posix_lock_set(file, flock);
1360 if (!rc || rc < 0)
1361 return rc;
1362
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001363 if (type & server->vals->shared_lock_type)
Steve French08547b02006-02-28 22:39:25 +00001364 posix_lock_type = CIFS_RDLCK;
1365 else
1366 posix_lock_type = CIFS_WRLCK;
Steve French50c2f752007-07-13 00:33:32 +00001367
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001368 if (unlock == 1)
Steve Frenchbeb84dc2006-03-03 23:36:34 +00001369 posix_lock_type = CIFS_UNLCK;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001370
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001371 rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001372 flock->fl_start, length, NULL,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001373 posix_lock_type, wait_flag);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001374 goto out;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001375 }
1376
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001377 if (lock) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001378 struct cifsLockInfo *lock;
1379
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001380 lock = cifs_lock_init(flock->fl_start, length, type);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001381 if (!lock)
1382 return -ENOMEM;
1383
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001384 rc = cifs_lock_add_if(cfile, lock, wait_flag);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001385 if (rc < 0)
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001386 kfree(lock);
1387 if (rc <= 0)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001388 goto out;
1389
Pavel Shilovsky7f924472012-03-28 17:10:25 +04001390 rc = cifs_mandatory_lock(xid, cfile, flock->fl_start, length,
1391 type, 1, 0, wait_flag);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001392 if (rc) {
1393 kfree(lock);
1394 goto out;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001395 }
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001396
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001397 cifs_lock_add(cfile, lock);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001398 } else if (unlock)
1399 rc = cifs_unlock_range(cfile, flock, xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001400
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001401out:
1402 if (flock->fl_flags & FL_POSIX)
Steve French9ebb3892012-04-01 13:52:54 -05001403 posix_lock_file_wait(file, flock);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001404 return rc;
1405}
1406
1407int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1408{
1409 int rc, xid;
1410 int lock = 0, unlock = 0;
1411 bool wait_flag = false;
1412 bool posix_lck = false;
1413 struct cifs_sb_info *cifs_sb;
1414 struct cifs_tcon *tcon;
1415 struct cifsInodeInfo *cinode;
1416 struct cifsFileInfo *cfile;
1417 __u16 netfid;
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001418 __u32 type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001419
1420 rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001421 xid = get_xid();
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001422
1423 cFYI(1, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld "
1424 "end: %lld", cmd, flock->fl_flags, flock->fl_type,
1425 flock->fl_start, flock->fl_end);
1426
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001427 cfile = (struct cifsFileInfo *)file->private_data;
1428 tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001429
1430 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
1431 tcon->ses->server);
1432
1433 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001434 netfid = cfile->fid.netfid;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001435 cinode = CIFS_I(file->f_path.dentry->d_inode);
1436
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001437 if (cap_unix(tcon->ses) &&
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001438 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1439 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1440 posix_lck = true;
1441 /*
1442 * BB add code here to normalize offset and length to account for
1443 * negative length which we can not accept over the wire.
1444 */
1445 if (IS_GETLK(cmd)) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001446 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001447 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001448 return rc;
1449 }
1450
1451 if (!lock && !unlock) {
1452 /*
1453 * if no lock or unlock then nothing to do since we do not
1454 * know what it is
1455 */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001456 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001457 return -EOPNOTSUPP;
1458 }
1459
1460 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1461 xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001462 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001463 return rc;
1464}
1465
Jeff Layton597b0272012-03-23 14:40:56 -04001466/*
1467 * update the file size (if needed) after a write. Should be called with
1468 * the inode->i_lock held
1469 */
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05001470void
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001471cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1472 unsigned int bytes_written)
1473{
1474 loff_t end_of_write = offset + bytes_written;
1475
1476 if (end_of_write > cifsi->server_eof)
1477 cifsi->server_eof = end_of_write;
1478}
1479
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001480static ssize_t
1481cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
1482 size_t write_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001483{
1484 int rc = 0;
1485 unsigned int bytes_written = 0;
1486 unsigned int total_written;
1487 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001488 struct cifs_tcon *tcon;
1489 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001490 unsigned int xid;
Jeff Layton7da4b492010-10-15 15:34:00 -04001491 struct dentry *dentry = open_file->dentry;
1492 struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001493 struct cifs_io_parms io_parms;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001494
Jeff Layton7da4b492010-10-15 15:34:00 -04001495 cifs_sb = CIFS_SB(dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001496
Joe Perchesb6b38f72010-04-21 03:50:45 +00001497 cFYI(1, "write %zd bytes to offset %lld of %s", write_size,
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001498 *offset, dentry->d_name.name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001499
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001500 tcon = tlink_tcon(open_file->tlink);
1501 server = tcon->ses->server;
1502
1503 if (!server->ops->sync_write)
1504 return -ENOSYS;
Steve French50c2f752007-07-13 00:33:32 +00001505
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001506 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001507
Linus Torvalds1da177e2005-04-16 15:20:36 -07001508 for (total_written = 0; write_size > total_written;
1509 total_written += bytes_written) {
1510 rc = -EAGAIN;
1511 while (rc == -EAGAIN) {
Jeff Laytonca83ce32011-04-12 09:13:44 -04001512 struct kvec iov[2];
1513 unsigned int len;
1514
Linus Torvalds1da177e2005-04-16 15:20:36 -07001515 if (open_file->invalidHandle) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001516 /* we could deadlock if we called
1517 filemap_fdatawait from here so tell
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001518 reopen_file not to flush data to
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519 server now */
Jeff Layton15886172010-10-15 15:33:59 -04001520 rc = cifs_reopen_file(open_file, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001521 if (rc != 0)
1522 break;
1523 }
Steve French3e844692005-10-03 13:37:24 -07001524
Jeff Laytonca83ce32011-04-12 09:13:44 -04001525 len = min((size_t)cifs_sb->wsize,
1526 write_size - total_written);
1527 /* iov[0] is reserved for smb header */
1528 iov[1].iov_base = (char *)write_data + total_written;
1529 iov[1].iov_len = len;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001530 io_parms.pid = pid;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001531 io_parms.tcon = tcon;
1532 io_parms.offset = *offset;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001533 io_parms.length = len;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001534 rc = server->ops->sync_write(xid, open_file, &io_parms,
1535 &bytes_written, iov, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001536 }
1537 if (rc || (bytes_written == 0)) {
1538 if (total_written)
1539 break;
1540 else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001541 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001542 return rc;
1543 }
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001544 } else {
Jeff Layton597b0272012-03-23 14:40:56 -04001545 spin_lock(&dentry->d_inode->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001546 cifs_update_eof(cifsi, *offset, bytes_written);
Jeff Layton597b0272012-03-23 14:40:56 -04001547 spin_unlock(&dentry->d_inode->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001548 *offset += bytes_written;
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001549 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001550 }
1551
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001552 cifs_stats_bytes_written(tcon, total_written);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001553
Jeff Layton7da4b492010-10-15 15:34:00 -04001554 if (total_written > 0) {
1555 spin_lock(&dentry->d_inode->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001556 if (*offset > dentry->d_inode->i_size)
1557 i_size_write(dentry->d_inode, *offset);
Jeff Layton7da4b492010-10-15 15:34:00 -04001558 spin_unlock(&dentry->d_inode->i_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001559 }
Jeff Layton7da4b492010-10-15 15:34:00 -04001560 mark_inode_dirty_sync(dentry->d_inode);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001561 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001562 return total_written;
1563}
1564
Jeff Layton6508d902010-09-29 19:51:11 -04001565struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1566 bool fsuid_only)
Steve French630f3f0c2007-10-25 21:17:17 +00001567{
1568 struct cifsFileInfo *open_file = NULL;
Jeff Layton6508d902010-09-29 19:51:11 -04001569 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1570
1571 /* only filter by fsuid on multiuser mounts */
1572 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1573 fsuid_only = false;
Steve French630f3f0c2007-10-25 21:17:17 +00001574
Jeff Layton44772882010-10-15 15:34:03 -04001575 spin_lock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001576 /* we could simply get the first_list_entry since write-only entries
1577 are always at the end of the list but since the first entry might
1578 have a close pending, we go through the whole list */
1579 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton6508d902010-09-29 19:51:11 -04001580 if (fsuid_only && open_file->uid != current_fsuid())
1581 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001582 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
Steve French630f3f0c2007-10-25 21:17:17 +00001583 if (!open_file->invalidHandle) {
1584 /* found a good file */
1585 /* lock it so it will not be closed on us */
Jeff Layton764a1b12012-07-25 14:59:54 -04001586 cifsFileInfo_get_locked(open_file);
Jeff Layton44772882010-10-15 15:34:03 -04001587 spin_unlock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001588 return open_file;
1589 } /* else might as well continue, and look for
1590 another, or simply have the caller reopen it
1591 again rather than trying to fix this handle */
1592 } else /* write only file */
1593 break; /* write only files are last so must be done */
1594 }
Jeff Layton44772882010-10-15 15:34:03 -04001595 spin_unlock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001596 return NULL;
1597}
Steve French630f3f0c2007-10-25 21:17:17 +00001598
Jeff Layton6508d902010-09-29 19:51:11 -04001599struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1600 bool fsuid_only)
Steve French6148a742005-10-05 12:23:19 -07001601{
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001602 struct cifsFileInfo *open_file, *inv_file = NULL;
Jeff Laytond3892292010-11-02 16:22:50 -04001603 struct cifs_sb_info *cifs_sb;
Jeff Layton2846d382008-09-22 21:33:33 -04001604 bool any_available = false;
Steve Frenchdd99cd82005-10-05 19:32:49 -07001605 int rc;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001606 unsigned int refind = 0;
Steve French6148a742005-10-05 12:23:19 -07001607
Steve French60808232006-04-22 15:53:05 +00001608 /* Having a null inode here (because mapping->host was set to zero by
1609 the VFS or MM) should not happen but we had reports of on oops (due to
1610 it being zero) during stress testcases so we need to check for it */
1611
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001612 if (cifs_inode == NULL) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001613 cERROR(1, "Null inode passed to cifs_writeable_file");
Steve French60808232006-04-22 15:53:05 +00001614 dump_stack();
1615 return NULL;
1616 }
1617
Jeff Laytond3892292010-11-02 16:22:50 -04001618 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1619
Jeff Layton6508d902010-09-29 19:51:11 -04001620 /* only filter by fsuid on multiuser mounts */
1621 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1622 fsuid_only = false;
1623
Jeff Layton44772882010-10-15 15:34:03 -04001624 spin_lock(&cifs_file_list_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001625refind_writable:
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001626 if (refind > MAX_REOPEN_ATT) {
1627 spin_unlock(&cifs_file_list_lock);
1628 return NULL;
1629 }
Steve French6148a742005-10-05 12:23:19 -07001630 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton6508d902010-09-29 19:51:11 -04001631 if (!any_available && open_file->pid != current->tgid)
1632 continue;
1633 if (fsuid_only && open_file->uid != current_fsuid())
1634 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001635 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Steve French9b22b0b2007-10-02 01:11:08 +00001636 if (!open_file->invalidHandle) {
1637 /* found a good writable file */
Jeff Layton764a1b12012-07-25 14:59:54 -04001638 cifsFileInfo_get_locked(open_file);
Jeff Layton44772882010-10-15 15:34:03 -04001639 spin_unlock(&cifs_file_list_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001640 return open_file;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001641 } else {
1642 if (!inv_file)
1643 inv_file = open_file;
Steve French9b22b0b2007-10-02 01:11:08 +00001644 }
Steve French6148a742005-10-05 12:23:19 -07001645 }
1646 }
Jeff Layton2846d382008-09-22 21:33:33 -04001647 /* couldn't find useable FH with same pid, try any available */
1648 if (!any_available) {
1649 any_available = true;
1650 goto refind_writable;
1651 }
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001652
1653 if (inv_file) {
1654 any_available = false;
Jeff Layton764a1b12012-07-25 14:59:54 -04001655 cifsFileInfo_get_locked(inv_file);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001656 }
1657
Jeff Layton44772882010-10-15 15:34:03 -04001658 spin_unlock(&cifs_file_list_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001659
1660 if (inv_file) {
1661 rc = cifs_reopen_file(inv_file, false);
1662 if (!rc)
1663 return inv_file;
1664 else {
1665 spin_lock(&cifs_file_list_lock);
1666 list_move_tail(&inv_file->flist,
1667 &cifs_inode->openFileList);
1668 spin_unlock(&cifs_file_list_lock);
1669 cifsFileInfo_put(inv_file);
1670 spin_lock(&cifs_file_list_lock);
1671 ++refind;
1672 goto refind_writable;
1673 }
1674 }
1675
Steve French6148a742005-10-05 12:23:19 -07001676 return NULL;
1677}
1678
Linus Torvalds1da177e2005-04-16 15:20:36 -07001679static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1680{
1681 struct address_space *mapping = page->mapping;
1682 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1683 char *write_data;
1684 int rc = -EFAULT;
1685 int bytes_written = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001686 struct inode *inode;
Steve French6148a742005-10-05 12:23:19 -07001687 struct cifsFileInfo *open_file;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001688
1689 if (!mapping || !mapping->host)
1690 return -EFAULT;
1691
1692 inode = page->mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001693
1694 offset += (loff_t)from;
1695 write_data = kmap(page);
1696 write_data += from;
1697
1698 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1699 kunmap(page);
1700 return -EIO;
1701 }
1702
1703 /* racing with truncate? */
1704 if (offset > mapping->host->i_size) {
1705 kunmap(page);
1706 return 0; /* don't care */
1707 }
1708
1709 /* check to make sure that we are not extending the file */
1710 if (mapping->host->i_size - offset < (loff_t)to)
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001711 to = (unsigned)(mapping->host->i_size - offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001712
Jeff Layton6508d902010-09-29 19:51:11 -04001713 open_file = find_writable_file(CIFS_I(mapping->host), false);
Steve French6148a742005-10-05 12:23:19 -07001714 if (open_file) {
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001715 bytes_written = cifs_write(open_file, open_file->pid,
1716 write_data, to - from, &offset);
Dave Kleikamp6ab409b2009-08-31 11:07:12 -04001717 cifsFileInfo_put(open_file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718 /* Does mm or vfs already set times? */
Steve French6148a742005-10-05 12:23:19 -07001719 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001720 if ((bytes_written > 0) && (offset))
Steve French6148a742005-10-05 12:23:19 -07001721 rc = 0;
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001722 else if (bytes_written < 0)
1723 rc = bytes_written;
Steve French6148a742005-10-05 12:23:19 -07001724 } else {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001725 cFYI(1, "No writeable filehandles for inode");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001726 rc = -EIO;
1727 }
1728
1729 kunmap(page);
1730 return rc;
1731}
1732
Jeff Laytone9492872012-03-23 14:40:56 -04001733/*
1734 * Marshal up the iov array, reserving the first one for the header. Also,
1735 * set wdata->bytes.
1736 */
1737static void
1738cifs_writepages_marshal_iov(struct kvec *iov, struct cifs_writedata *wdata)
1739{
1740 int i;
1741 struct inode *inode = wdata->cfile->dentry->d_inode;
1742 loff_t size = i_size_read(inode);
1743
1744 /* marshal up the pages into iov array */
1745 wdata->bytes = 0;
1746 for (i = 0; i < wdata->nr_pages; i++) {
1747 iov[i + 1].iov_len = min(size - page_offset(wdata->pages[i]),
1748 (loff_t)PAGE_CACHE_SIZE);
1749 iov[i + 1].iov_base = kmap(wdata->pages[i]);
1750 wdata->bytes += iov[i + 1].iov_len;
1751 }
1752}
1753
Linus Torvalds1da177e2005-04-16 15:20:36 -07001754static int cifs_writepages(struct address_space *mapping,
Steve French37c0eb42005-10-05 14:50:29 -07001755 struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001756{
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001757 struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
1758 bool done = false, scanned = false, range_whole = false;
1759 pgoff_t end, index;
1760 struct cifs_writedata *wdata;
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07001761 struct TCP_Server_Info *server;
Steve French37c0eb42005-10-05 14:50:29 -07001762 struct page *page;
Steve French37c0eb42005-10-05 14:50:29 -07001763 int rc = 0;
Steve French50c2f752007-07-13 00:33:32 +00001764
Steve French37c0eb42005-10-05 14:50:29 -07001765 /*
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001766 * If wsize is smaller than the page cache size, default to writing
Steve French37c0eb42005-10-05 14:50:29 -07001767 * one page at a time via cifs_writepage
1768 */
1769 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1770 return generic_writepages(mapping, wbc);
1771
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001772 if (wbc->range_cyclic) {
Steve French37c0eb42005-10-05 14:50:29 -07001773 index = mapping->writeback_index; /* Start from prev offset */
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001774 end = -1;
1775 } else {
1776 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1777 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1778 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001779 range_whole = true;
1780 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07001781 }
1782retry:
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001783 while (!done && index <= end) {
1784 unsigned int i, nr_pages, found_pages;
1785 pgoff_t next = 0, tofind;
1786 struct page **pages;
Steve French37c0eb42005-10-05 14:50:29 -07001787
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001788 tofind = min((cifs_sb->wsize / PAGE_CACHE_SIZE) - 1,
1789 end - index) + 1;
Steve French37c0eb42005-10-05 14:50:29 -07001790
Jeff Laytonc2e87642012-03-23 14:40:55 -04001791 wdata = cifs_writedata_alloc((unsigned int)tofind,
1792 cifs_writev_complete);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001793 if (!wdata) {
1794 rc = -ENOMEM;
1795 break;
1796 }
1797
1798 /*
1799 * find_get_pages_tag seems to return a max of 256 on each
1800 * iteration, so we must call it several times in order to
1801 * fill the array or the wsize is effectively limited to
1802 * 256 * PAGE_CACHE_SIZE.
1803 */
1804 found_pages = 0;
1805 pages = wdata->pages;
1806 do {
1807 nr_pages = find_get_pages_tag(mapping, &index,
1808 PAGECACHE_TAG_DIRTY,
1809 tofind, pages);
1810 found_pages += nr_pages;
1811 tofind -= nr_pages;
1812 pages += nr_pages;
1813 } while (nr_pages && tofind && index <= end);
1814
1815 if (found_pages == 0) {
1816 kref_put(&wdata->refcount, cifs_writedata_release);
1817 break;
1818 }
1819
1820 nr_pages = 0;
1821 for (i = 0; i < found_pages; i++) {
1822 page = wdata->pages[i];
Steve French37c0eb42005-10-05 14:50:29 -07001823 /*
1824 * At this point we hold neither mapping->tree_lock nor
1825 * lock on the page itself: the page may be truncated or
1826 * invalidated (changing page->mapping to NULL), or even
1827 * swizzled back from swapper_space to tmpfs file
1828 * mapping
1829 */
1830
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001831 if (nr_pages == 0)
Steve French37c0eb42005-10-05 14:50:29 -07001832 lock_page(page);
Nick Piggin529ae9a2008-08-02 12:01:03 +02001833 else if (!trylock_page(page))
Steve French37c0eb42005-10-05 14:50:29 -07001834 break;
1835
1836 if (unlikely(page->mapping != mapping)) {
1837 unlock_page(page);
1838 break;
1839 }
1840
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001841 if (!wbc->range_cyclic && page->index > end) {
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001842 done = true;
Steve French37c0eb42005-10-05 14:50:29 -07001843 unlock_page(page);
1844 break;
1845 }
1846
1847 if (next && (page->index != next)) {
1848 /* Not next consecutive page */
1849 unlock_page(page);
1850 break;
1851 }
1852
1853 if (wbc->sync_mode != WB_SYNC_NONE)
1854 wait_on_page_writeback(page);
1855
1856 if (PageWriteback(page) ||
Linus Torvaldscb876f42006-12-23 16:19:07 -08001857 !clear_page_dirty_for_io(page)) {
Steve French37c0eb42005-10-05 14:50:29 -07001858 unlock_page(page);
1859 break;
1860 }
Steve French84d2f072005-10-12 15:32:05 -07001861
Linus Torvaldscb876f42006-12-23 16:19:07 -08001862 /*
1863 * This actually clears the dirty bit in the radix tree.
1864 * See cifs_writepage() for more commentary.
1865 */
1866 set_page_writeback(page);
1867
Steve French84d2f072005-10-12 15:32:05 -07001868 if (page_offset(page) >= mapping->host->i_size) {
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001869 done = true;
Steve French84d2f072005-10-12 15:32:05 -07001870 unlock_page(page);
Linus Torvaldscb876f42006-12-23 16:19:07 -08001871 end_page_writeback(page);
Steve French84d2f072005-10-12 15:32:05 -07001872 break;
1873 }
1874
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001875 wdata->pages[i] = page;
Steve French37c0eb42005-10-05 14:50:29 -07001876 next = page->index + 1;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001877 ++nr_pages;
Steve French37c0eb42005-10-05 14:50:29 -07001878 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001879
1880 /* reset index to refind any pages skipped */
1881 if (nr_pages == 0)
1882 index = wdata->pages[0]->index + 1;
1883
1884 /* put any pages we aren't going to use */
1885 for (i = nr_pages; i < found_pages; i++) {
1886 page_cache_release(wdata->pages[i]);
1887 wdata->pages[i] = NULL;
1888 }
1889
1890 /* nothing to write? */
1891 if (nr_pages == 0) {
1892 kref_put(&wdata->refcount, cifs_writedata_release);
1893 continue;
1894 }
1895
1896 wdata->sync_mode = wbc->sync_mode;
1897 wdata->nr_pages = nr_pages;
1898 wdata->offset = page_offset(wdata->pages[0]);
Jeff Laytone9492872012-03-23 14:40:56 -04001899 wdata->marshal_iov = cifs_writepages_marshal_iov;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001900
1901 do {
1902 if (wdata->cfile != NULL)
1903 cifsFileInfo_put(wdata->cfile);
1904 wdata->cfile = find_writable_file(CIFS_I(mapping->host),
1905 false);
1906 if (!wdata->cfile) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001907 cERROR(1, "No writable handles for inode");
Steve French23e7dd72005-10-20 13:44:56 -07001908 rc = -EBADF;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001909 break;
Steve French37c0eb42005-10-05 14:50:29 -07001910 }
Jeff Laytonfe5f5d22012-03-23 14:40:55 -04001911 wdata->pid = wdata->cfile->pid;
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07001912 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
1913 rc = server->ops->async_writev(wdata);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001914 } while (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN);
Jeff Laytonf3983c22010-09-22 16:17:40 -07001915
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001916 for (i = 0; i < nr_pages; ++i)
1917 unlock_page(wdata->pages[i]);
Jeff Layton941b8532011-01-11 07:24:01 -05001918
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001919 /* send failure -- clean up the mess */
1920 if (rc != 0) {
1921 for (i = 0; i < nr_pages; ++i) {
Jeff Layton941b8532011-01-11 07:24:01 -05001922 if (rc == -EAGAIN)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001923 redirty_page_for_writepage(wbc,
1924 wdata->pages[i]);
1925 else
1926 SetPageError(wdata->pages[i]);
1927 end_page_writeback(wdata->pages[i]);
1928 page_cache_release(wdata->pages[i]);
Steve French37c0eb42005-10-05 14:50:29 -07001929 }
Jeff Layton941b8532011-01-11 07:24:01 -05001930 if (rc != -EAGAIN)
1931 mapping_set_error(mapping, rc);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001932 }
1933 kref_put(&wdata->refcount, cifs_writedata_release);
Jeff Layton941b8532011-01-11 07:24:01 -05001934
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001935 wbc->nr_to_write -= nr_pages;
1936 if (wbc->nr_to_write <= 0)
1937 done = true;
Dave Kleikampb066a482008-11-18 03:49:05 +00001938
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001939 index = next;
Steve French37c0eb42005-10-05 14:50:29 -07001940 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001941
Steve French37c0eb42005-10-05 14:50:29 -07001942 if (!scanned && !done) {
1943 /*
1944 * We hit the last page and there is more work to be done: wrap
1945 * back to the start of the file
1946 */
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001947 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07001948 index = 0;
1949 goto retry;
1950 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001951
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001952 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
Steve French37c0eb42005-10-05 14:50:29 -07001953 mapping->writeback_index = index;
1954
Linus Torvalds1da177e2005-04-16 15:20:36 -07001955 return rc;
1956}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001957
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04001958static int
1959cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001960{
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04001961 int rc;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001962 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001963
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001964 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001965/* BB add check for wbc flags */
1966 page_cache_get(page);
Steve Frenchad7a2922008-02-07 23:25:02 +00001967 if (!PageUptodate(page))
Joe Perchesb6b38f72010-04-21 03:50:45 +00001968 cFYI(1, "ppw - page not up to date");
Linus Torvaldscb876f42006-12-23 16:19:07 -08001969
1970 /*
1971 * Set the "writeback" flag, and clear "dirty" in the radix tree.
1972 *
1973 * A writepage() implementation always needs to do either this,
1974 * or re-dirty the page with "redirty_page_for_writepage()" in
1975 * the case of a failure.
1976 *
1977 * Just unlocking the page will cause the radix tree tag-bits
1978 * to fail to update with the state of the page correctly.
1979 */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001980 set_page_writeback(page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04001981retry_write:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001982 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04001983 if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
1984 goto retry_write;
1985 else if (rc == -EAGAIN)
1986 redirty_page_for_writepage(wbc, page);
1987 else if (rc != 0)
1988 SetPageError(page);
1989 else
1990 SetPageUptodate(page);
Linus Torvaldscb876f42006-12-23 16:19:07 -08001991 end_page_writeback(page);
1992 page_cache_release(page);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001993 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001994 return rc;
1995}
1996
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04001997static int cifs_writepage(struct page *page, struct writeback_control *wbc)
1998{
1999 int rc = cifs_writepage_locked(page, wbc);
2000 unlock_page(page);
2001 return rc;
2002}
2003
Nick Piggind9414772008-09-24 11:32:59 -04002004static int cifs_write_end(struct file *file, struct address_space *mapping,
2005 loff_t pos, unsigned len, unsigned copied,
2006 struct page *page, void *fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002007{
Nick Piggind9414772008-09-24 11:32:59 -04002008 int rc;
2009 struct inode *inode = mapping->host;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002010 struct cifsFileInfo *cfile = file->private_data;
2011 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
2012 __u32 pid;
2013
2014 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2015 pid = cfile->pid;
2016 else
2017 pid = current->tgid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002018
Joe Perchesb6b38f72010-04-21 03:50:45 +00002019 cFYI(1, "write_end for page %p from pos %lld with %d bytes",
2020 page, pos, copied);
Steve Frenchad7a2922008-02-07 23:25:02 +00002021
Jeff Laytona98ee8c2008-11-26 19:32:33 +00002022 if (PageChecked(page)) {
2023 if (copied == len)
2024 SetPageUptodate(page);
2025 ClearPageChecked(page);
2026 } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
Nick Piggind9414772008-09-24 11:32:59 -04002027 SetPageUptodate(page);
2028
Linus Torvalds1da177e2005-04-16 15:20:36 -07002029 if (!PageUptodate(page)) {
Nick Piggind9414772008-09-24 11:32:59 -04002030 char *page_data;
2031 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002032 unsigned int xid;
Nick Piggind9414772008-09-24 11:32:59 -04002033
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002034 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002035 /* this is probably better than directly calling
2036 partialpage_write since in this function the file handle is
2037 known which we might as well leverage */
2038 /* BB check if anything else missing out of ppw
2039 such as updating last write time */
2040 page_data = kmap(page);
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002041 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
Nick Piggind9414772008-09-24 11:32:59 -04002042 /* if (rc < 0) should we set writebehind rc? */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002043 kunmap(page);
Nick Piggind9414772008-09-24 11:32:59 -04002044
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002045 free_xid(xid);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002046 } else {
Nick Piggind9414772008-09-24 11:32:59 -04002047 rc = copied;
2048 pos += copied;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002049 set_page_dirty(page);
2050 }
2051
Nick Piggind9414772008-09-24 11:32:59 -04002052 if (rc > 0) {
2053 spin_lock(&inode->i_lock);
2054 if (pos > inode->i_size)
2055 i_size_write(inode, pos);
2056 spin_unlock(&inode->i_lock);
2057 }
2058
2059 unlock_page(page);
2060 page_cache_release(page);
2061
Linus Torvalds1da177e2005-04-16 15:20:36 -07002062 return rc;
2063}
2064
Josef Bacik02c24a82011-07-16 20:44:56 -04002065int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2066 int datasync)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002067{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002068 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002069 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002070 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002071 struct TCP_Server_Info *server;
Joe Perchesc21dfb62010-07-12 13:50:14 -07002072 struct cifsFileInfo *smbfile = file->private_data;
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002073 struct inode *inode = file->f_path.dentry->d_inode;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002074 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002075
Josef Bacik02c24a82011-07-16 20:44:56 -04002076 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2077 if (rc)
2078 return rc;
2079 mutex_lock(&inode->i_mutex);
2080
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002081 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002082
Joe Perchesb6b38f72010-04-21 03:50:45 +00002083 cFYI(1, "Sync file - name: %s datasync: 0x%x",
Christoph Hellwig7ea80852010-05-26 17:53:25 +02002084 file->f_path.dentry->d_name.name, datasync);
Steve French50c2f752007-07-13 00:33:32 +00002085
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002086 if (!CIFS_I(inode)->clientCanCacheRead) {
2087 rc = cifs_invalidate_mapping(inode);
2088 if (rc) {
2089 cFYI(1, "rc: %d during invalidate phase", rc);
2090 rc = 0; /* don't care about it in fsync */
2091 }
2092 }
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002093
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002094 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002095 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2096 server = tcon->ses->server;
2097 if (server->ops->flush)
2098 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2099 else
2100 rc = -ENOSYS;
2101 }
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002102
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002103 free_xid(xid);
Josef Bacik02c24a82011-07-16 20:44:56 -04002104 mutex_unlock(&inode->i_mutex);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002105 return rc;
2106}
2107
Josef Bacik02c24a82011-07-16 20:44:56 -04002108int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002109{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002110 unsigned int xid;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002111 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002112 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002113 struct TCP_Server_Info *server;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002114 struct cifsFileInfo *smbfile = file->private_data;
2115 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Josef Bacik02c24a82011-07-16 20:44:56 -04002116 struct inode *inode = file->f_mapping->host;
2117
2118 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2119 if (rc)
2120 return rc;
2121 mutex_lock(&inode->i_mutex);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002122
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002123 xid = get_xid();
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002124
2125 cFYI(1, "Sync file - name: %s datasync: 0x%x",
2126 file->f_path.dentry->d_name.name, datasync);
2127
2128 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002129 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2130 server = tcon->ses->server;
2131 if (server->ops->flush)
2132 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2133 else
2134 rc = -ENOSYS;
2135 }
Steve Frenchb298f222009-02-21 21:17:43 +00002136
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002137 free_xid(xid);
Josef Bacik02c24a82011-07-16 20:44:56 -04002138 mutex_unlock(&inode->i_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002139 return rc;
2140}
2141
Linus Torvalds1da177e2005-04-16 15:20:36 -07002142/*
2143 * As file closes, flush all cached write data for this inode checking
2144 * for write behind errors.
2145 */
Miklos Szeredi75e1fcc2006-06-23 02:05:12 -07002146int cifs_flush(struct file *file, fl_owner_t id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002147{
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002148 struct inode *inode = file->f_path.dentry->d_inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002149 int rc = 0;
2150
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002151 if (file->f_mode & FMODE_WRITE)
Jeff Laytond3f13222010-10-15 15:34:07 -04002152 rc = filemap_write_and_wait(inode->i_mapping);
Steve French50c2f752007-07-13 00:33:32 +00002153
Joe Perchesb6b38f72010-04-21 03:50:45 +00002154 cFYI(1, "Flush inode %p file %p rc %d", inode, file, rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002155
2156 return rc;
2157}
2158
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002159static int
2160cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2161{
2162 int rc = 0;
2163 unsigned long i;
2164
2165 for (i = 0; i < num_pages; i++) {
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002166 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002167 if (!pages[i]) {
2168 /*
2169 * save number of pages we have already allocated and
2170 * return with ENOMEM error
2171 */
2172 num_pages = i;
2173 rc = -ENOMEM;
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002174 break;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002175 }
2176 }
2177
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002178 if (rc) {
2179 for (i = 0; i < num_pages; i++)
2180 put_page(pages[i]);
2181 }
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002182 return rc;
2183}
2184
2185static inline
2186size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2187{
2188 size_t num_pages;
2189 size_t clen;
2190
2191 clen = min_t(const size_t, len, wsize);
Jeff Laytona7103b92012-03-23 14:40:56 -04002192 num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002193
2194 if (cur_len)
2195 *cur_len = clen;
2196
2197 return num_pages;
2198}
2199
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002200static void
2201cifs_uncached_marshal_iov(struct kvec *iov, struct cifs_writedata *wdata)
2202{
2203 int i;
2204 size_t bytes = wdata->bytes;
2205
2206 /* marshal up the pages into iov array */
2207 for (i = 0; i < wdata->nr_pages; i++) {
Steve Frenchc7ad42b2012-03-23 16:30:56 -05002208 iov[i + 1].iov_len = min_t(size_t, bytes, PAGE_SIZE);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002209 iov[i + 1].iov_base = kmap(wdata->pages[i]);
2210 bytes -= iov[i + 1].iov_len;
2211 }
2212}
2213
2214static void
2215cifs_uncached_writev_complete(struct work_struct *work)
2216{
2217 int i;
2218 struct cifs_writedata *wdata = container_of(work,
2219 struct cifs_writedata, work);
2220 struct inode *inode = wdata->cfile->dentry->d_inode;
2221 struct cifsInodeInfo *cifsi = CIFS_I(inode);
2222
2223 spin_lock(&inode->i_lock);
2224 cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
2225 if (cifsi->server_eof > inode->i_size)
2226 i_size_write(inode, cifsi->server_eof);
2227 spin_unlock(&inode->i_lock);
2228
2229 complete(&wdata->done);
2230
2231 if (wdata->result != -EAGAIN) {
2232 for (i = 0; i < wdata->nr_pages; i++)
2233 put_page(wdata->pages[i]);
2234 }
2235
2236 kref_put(&wdata->refcount, cifs_writedata_release);
2237}
2238
2239/* attempt to send write to server, retry on any -EAGAIN errors */
2240static int
2241cifs_uncached_retry_writev(struct cifs_writedata *wdata)
2242{
2243 int rc;
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002244 struct TCP_Server_Info *server;
2245
2246 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002247
2248 do {
2249 if (wdata->cfile->invalidHandle) {
2250 rc = cifs_reopen_file(wdata->cfile, false);
2251 if (rc != 0)
2252 continue;
2253 }
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002254 rc = server->ops->async_writev(wdata);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002255 } while (rc == -EAGAIN);
2256
2257 return rc;
2258}
2259
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002260static ssize_t
2261cifs_iovec_write(struct file *file, const struct iovec *iov,
2262 unsigned long nr_segs, loff_t *poffset)
2263{
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002264 unsigned long nr_pages, i;
Pavel Shilovsky76429c12011-01-31 16:03:08 +03002265 size_t copied, len, cur_len;
2266 ssize_t total_written = 0;
Jeff Layton3af9d8f2012-04-13 17:16:59 -04002267 loff_t offset;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002268 struct iov_iter it;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002269 struct cifsFileInfo *open_file;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002270 struct cifs_tcon *tcon;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002271 struct cifs_sb_info *cifs_sb;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002272 struct cifs_writedata *wdata, *tmp;
2273 struct list_head wdata_list;
2274 int rc;
2275 pid_t pid;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002276
2277 len = iov_length(iov, nr_segs);
2278 if (!len)
2279 return 0;
2280
2281 rc = generic_write_checks(file, poffset, &len, 0);
2282 if (rc)
2283 return rc;
2284
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002285 INIT_LIST_HEAD(&wdata_list);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002286 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002287 open_file = file->private_data;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002288 tcon = tlink_tcon(open_file->tlink);
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002289
2290 if (!tcon->ses->server->ops->async_writev)
2291 return -ENOSYS;
2292
Jeff Layton3af9d8f2012-04-13 17:16:59 -04002293 offset = *poffset;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002294
2295 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2296 pid = open_file->pid;
2297 else
2298 pid = current->tgid;
2299
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002300 iov_iter_init(&it, iov, nr_segs, len, 0);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002301 do {
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002302 size_t save_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002303
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002304 nr_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
2305 wdata = cifs_writedata_alloc(nr_pages,
2306 cifs_uncached_writev_complete);
2307 if (!wdata) {
2308 rc = -ENOMEM;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002309 break;
2310 }
2311
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002312 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
2313 if (rc) {
2314 kfree(wdata);
2315 break;
2316 }
2317
2318 save_len = cur_len;
2319 for (i = 0; i < nr_pages; i++) {
2320 copied = min_t(const size_t, cur_len, PAGE_SIZE);
2321 copied = iov_iter_copy_from_user(wdata->pages[i], &it,
2322 0, copied);
2323 cur_len -= copied;
2324 iov_iter_advance(&it, copied);
2325 }
2326 cur_len = save_len - cur_len;
2327
2328 wdata->sync_mode = WB_SYNC_ALL;
2329 wdata->nr_pages = nr_pages;
2330 wdata->offset = (__u64)offset;
2331 wdata->cfile = cifsFileInfo_get(open_file);
2332 wdata->pid = pid;
2333 wdata->bytes = cur_len;
2334 wdata->marshal_iov = cifs_uncached_marshal_iov;
2335 rc = cifs_uncached_retry_writev(wdata);
2336 if (rc) {
2337 kref_put(&wdata->refcount, cifs_writedata_release);
2338 break;
2339 }
2340
2341 list_add_tail(&wdata->list, &wdata_list);
2342 offset += cur_len;
2343 len -= cur_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002344 } while (len > 0);
2345
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002346 /*
2347 * If at least one write was successfully sent, then discard any rc
2348 * value from the later writes. If the other write succeeds, then
2349 * we'll end up returning whatever was written. If it fails, then
2350 * we'll get a new rc value from that.
2351 */
2352 if (!list_empty(&wdata_list))
2353 rc = 0;
2354
2355 /*
2356 * Wait for and collect replies for any successful sends in order of
2357 * increasing offset. Once an error is hit or we get a fatal signal
2358 * while waiting, then return without waiting for any more replies.
2359 */
2360restart_loop:
2361 list_for_each_entry_safe(wdata, tmp, &wdata_list, list) {
2362 if (!rc) {
2363 /* FIXME: freezable too? */
2364 rc = wait_for_completion_killable(&wdata->done);
2365 if (rc)
2366 rc = -EINTR;
2367 else if (wdata->result)
2368 rc = wdata->result;
2369 else
2370 total_written += wdata->bytes;
2371
2372 /* resend call if it's a retryable error */
2373 if (rc == -EAGAIN) {
2374 rc = cifs_uncached_retry_writev(wdata);
2375 goto restart_loop;
2376 }
2377 }
2378 list_del_init(&wdata->list);
2379 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002380 }
2381
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002382 if (total_written > 0)
2383 *poffset += total_written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002384
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002385 cifs_stats_bytes_written(tcon, total_written);
2386 return total_written ? total_written : (ssize_t)rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002387}
2388
Pavel Shilovsky0b81c1c2011-03-10 10:11:05 +03002389ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002390 unsigned long nr_segs, loff_t pos)
2391{
2392 ssize_t written;
2393 struct inode *inode;
2394
2395 inode = iocb->ki_filp->f_path.dentry->d_inode;
2396
2397 /*
2398 * BB - optimize the way when signing is disabled. We can drop this
2399 * extra memory-to-memory copying and use iovec buffers for constructing
2400 * write request.
2401 */
2402
2403 written = cifs_iovec_write(iocb->ki_filp, iov, nr_segs, &pos);
2404 if (written > 0) {
2405 CIFS_I(inode)->invalid_mapping = true;
2406 iocb->ki_pos = pos;
2407 }
2408
2409 return written;
2410}
2411
2412ssize_t cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
2413 unsigned long nr_segs, loff_t pos)
2414{
2415 struct inode *inode;
2416
2417 inode = iocb->ki_filp->f_path.dentry->d_inode;
2418
2419 if (CIFS_I(inode)->clientCanCacheAll)
2420 return generic_file_aio_write(iocb, iov, nr_segs, pos);
2421
2422 /*
2423 * In strict cache mode we need to write the data to the server exactly
2424 * from the pos to pos+len-1 rather than flush all affected pages
2425 * because it may cause a error with mandatory locks on these pages but
2426 * not on the region from pos to ppos+len-1.
2427 */
2428
2429 return cifs_user_writev(iocb, iov, nr_segs, pos);
2430}
2431
Jeff Layton0471ca32012-05-16 07:13:16 -04002432static struct cifs_readdata *
2433cifs_readdata_alloc(unsigned int nr_vecs, work_func_t complete)
2434{
2435 struct cifs_readdata *rdata;
2436
2437 rdata = kzalloc(sizeof(*rdata) +
2438 sizeof(struct kvec) * nr_vecs, GFP_KERNEL);
2439 if (rdata != NULL) {
Jeff Layton6993f742012-05-16 07:13:17 -04002440 kref_init(&rdata->refcount);
Jeff Layton1c892542012-05-16 07:13:17 -04002441 INIT_LIST_HEAD(&rdata->list);
2442 init_completion(&rdata->done);
Jeff Layton0471ca32012-05-16 07:13:16 -04002443 INIT_WORK(&rdata->work, complete);
2444 INIT_LIST_HEAD(&rdata->pages);
2445 }
2446 return rdata;
2447}
2448
Jeff Layton6993f742012-05-16 07:13:17 -04002449void
2450cifs_readdata_release(struct kref *refcount)
Jeff Layton0471ca32012-05-16 07:13:16 -04002451{
Jeff Layton6993f742012-05-16 07:13:17 -04002452 struct cifs_readdata *rdata = container_of(refcount,
2453 struct cifs_readdata, refcount);
2454
2455 if (rdata->cfile)
2456 cifsFileInfo_put(rdata->cfile);
2457
Jeff Layton0471ca32012-05-16 07:13:16 -04002458 kfree(rdata);
2459}
2460
Jeff Layton2a1bb132012-05-16 07:13:17 -04002461static int
Jeff Layton1c892542012-05-16 07:13:17 -04002462cifs_read_allocate_pages(struct list_head *list, unsigned int npages)
2463{
2464 int rc = 0;
2465 struct page *page, *tpage;
2466 unsigned int i;
2467
2468 for (i = 0; i < npages; i++) {
2469 page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
2470 if (!page) {
2471 rc = -ENOMEM;
2472 break;
2473 }
2474 list_add(&page->lru, list);
2475 }
2476
2477 if (rc) {
2478 list_for_each_entry_safe(page, tpage, list, lru) {
2479 list_del(&page->lru);
2480 put_page(page);
2481 }
2482 }
2483 return rc;
2484}
2485
2486static void
2487cifs_uncached_readdata_release(struct kref *refcount)
2488{
2489 struct page *page, *tpage;
2490 struct cifs_readdata *rdata = container_of(refcount,
2491 struct cifs_readdata, refcount);
2492
2493 list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
2494 list_del(&page->lru);
2495 put_page(page);
2496 }
2497 cifs_readdata_release(refcount);
2498}
2499
2500static int
Jeff Layton2a1bb132012-05-16 07:13:17 -04002501cifs_retry_async_readv(struct cifs_readdata *rdata)
2502{
2503 int rc;
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07002504 struct TCP_Server_Info *server;
2505
2506 server = tlink_tcon(rdata->cfile->tlink)->ses->server;
Jeff Layton2a1bb132012-05-16 07:13:17 -04002507
2508 do {
2509 if (rdata->cfile->invalidHandle) {
2510 rc = cifs_reopen_file(rdata->cfile, true);
2511 if (rc != 0)
2512 continue;
2513 }
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07002514 rc = server->ops->async_readv(rdata);
Jeff Layton2a1bb132012-05-16 07:13:17 -04002515 } while (rc == -EAGAIN);
2516
2517 return rc;
2518}
2519
Jeff Layton1c892542012-05-16 07:13:17 -04002520/**
2521 * cifs_readdata_to_iov - copy data from pages in response to an iovec
2522 * @rdata: the readdata response with list of pages holding data
2523 * @iov: vector in which we should copy the data
2524 * @nr_segs: number of segments in vector
2525 * @offset: offset into file of the first iovec
2526 * @copied: used to return the amount of data copied to the iov
2527 *
2528 * This function copies data from a list of pages in a readdata response into
2529 * an array of iovecs. It will first calculate where the data should go
2530 * based on the info in the readdata and then copy the data into that spot.
2531 */
2532static ssize_t
2533cifs_readdata_to_iov(struct cifs_readdata *rdata, const struct iovec *iov,
2534 unsigned long nr_segs, loff_t offset, ssize_t *copied)
2535{
2536 int rc = 0;
2537 struct iov_iter ii;
2538 size_t pos = rdata->offset - offset;
2539 struct page *page, *tpage;
2540 ssize_t remaining = rdata->bytes;
2541 unsigned char *pdata;
2542
2543 /* set up iov_iter and advance to the correct offset */
2544 iov_iter_init(&ii, iov, nr_segs, iov_length(iov, nr_segs), 0);
2545 iov_iter_advance(&ii, pos);
2546
2547 *copied = 0;
2548 list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
2549 ssize_t copy;
2550
2551 /* copy a whole page or whatever's left */
2552 copy = min_t(ssize_t, remaining, PAGE_SIZE);
2553
2554 /* ...but limit it to whatever space is left in the iov */
2555 copy = min_t(ssize_t, copy, iov_iter_count(&ii));
2556
2557 /* go while there's data to be copied and no errors */
2558 if (copy && !rc) {
2559 pdata = kmap(page);
2560 rc = memcpy_toiovecend(ii.iov, pdata, ii.iov_offset,
2561 (int)copy);
2562 kunmap(page);
2563 if (!rc) {
2564 *copied += copy;
2565 remaining -= copy;
2566 iov_iter_advance(&ii, copy);
2567 }
2568 }
2569
2570 list_del(&page->lru);
2571 put_page(page);
2572 }
2573
2574 return rc;
2575}
2576
2577static void
2578cifs_uncached_readv_complete(struct work_struct *work)
2579{
2580 struct cifs_readdata *rdata = container_of(work,
2581 struct cifs_readdata, work);
2582
2583 /* if the result is non-zero then the pages weren't kmapped */
2584 if (rdata->result == 0) {
2585 struct page *page;
2586
2587 list_for_each_entry(page, &rdata->pages, lru)
2588 kunmap(page);
2589 }
2590
2591 complete(&rdata->done);
2592 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
2593}
2594
2595static int
2596cifs_uncached_read_marshal_iov(struct cifs_readdata *rdata,
2597 unsigned int remaining)
2598{
2599 int len = 0;
2600 struct page *page, *tpage;
2601
2602 rdata->nr_iov = 1;
2603 list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
2604 if (remaining >= PAGE_SIZE) {
2605 /* enough data to fill the page */
2606 rdata->iov[rdata->nr_iov].iov_base = kmap(page);
2607 rdata->iov[rdata->nr_iov].iov_len = PAGE_SIZE;
2608 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
2609 rdata->nr_iov, page->index,
2610 rdata->iov[rdata->nr_iov].iov_base,
2611 rdata->iov[rdata->nr_iov].iov_len);
2612 ++rdata->nr_iov;
2613 len += PAGE_SIZE;
2614 remaining -= PAGE_SIZE;
2615 } else if (remaining > 0) {
2616 /* enough for partial page, fill and zero the rest */
2617 rdata->iov[rdata->nr_iov].iov_base = kmap(page);
2618 rdata->iov[rdata->nr_iov].iov_len = remaining;
2619 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
2620 rdata->nr_iov, page->index,
2621 rdata->iov[rdata->nr_iov].iov_base,
2622 rdata->iov[rdata->nr_iov].iov_len);
2623 memset(rdata->iov[rdata->nr_iov].iov_base + remaining,
2624 '\0', PAGE_SIZE - remaining);
2625 ++rdata->nr_iov;
2626 len += remaining;
2627 remaining = 0;
2628 } else {
2629 /* no need to hold page hostage */
2630 list_del(&page->lru);
2631 put_page(page);
2632 }
2633 }
2634
2635 return len;
2636}
2637
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002638static ssize_t
2639cifs_iovec_read(struct file *file, const struct iovec *iov,
2640 unsigned long nr_segs, loff_t *poffset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002641{
Jeff Layton1c892542012-05-16 07:13:17 -04002642 ssize_t rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002643 size_t len, cur_len;
Jeff Layton1c892542012-05-16 07:13:17 -04002644 ssize_t total_read = 0;
2645 loff_t offset = *poffset;
2646 unsigned int npages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002647 struct cifs_sb_info *cifs_sb;
Jeff Layton1c892542012-05-16 07:13:17 -04002648 struct cifs_tcon *tcon;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002649 struct cifsFileInfo *open_file;
Jeff Layton1c892542012-05-16 07:13:17 -04002650 struct cifs_readdata *rdata, *tmp;
2651 struct list_head rdata_list;
2652 pid_t pid;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002653
2654 if (!nr_segs)
2655 return 0;
2656
2657 len = iov_length(iov, nr_segs);
2658 if (!len)
2659 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002660
Jeff Layton1c892542012-05-16 07:13:17 -04002661 INIT_LIST_HEAD(&rdata_list);
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002662 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Joe Perchesc21dfb62010-07-12 13:50:14 -07002663 open_file = file->private_data;
Jeff Layton1c892542012-05-16 07:13:17 -04002664 tcon = tlink_tcon(open_file->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002665
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07002666 if (!tcon->ses->server->ops->async_readv)
2667 return -ENOSYS;
2668
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002669 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2670 pid = open_file->pid;
2671 else
2672 pid = current->tgid;
2673
Steve Frenchad7a2922008-02-07 23:25:02 +00002674 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesb6b38f72010-04-21 03:50:45 +00002675 cFYI(1, "attempting read on write only file instance");
Steve Frenchad7a2922008-02-07 23:25:02 +00002676
Jeff Layton1c892542012-05-16 07:13:17 -04002677 do {
2678 cur_len = min_t(const size_t, len - total_read, cifs_sb->rsize);
2679 npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002680
Jeff Layton1c892542012-05-16 07:13:17 -04002681 /* allocate a readdata struct */
2682 rdata = cifs_readdata_alloc(npages,
2683 cifs_uncached_readv_complete);
2684 if (!rdata) {
2685 rc = -ENOMEM;
2686 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002687 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002688
Jeff Layton1c892542012-05-16 07:13:17 -04002689 rc = cifs_read_allocate_pages(&rdata->pages, npages);
2690 if (rc)
2691 goto error;
2692
2693 rdata->cfile = cifsFileInfo_get(open_file);
2694 rdata->offset = offset;
2695 rdata->bytes = cur_len;
2696 rdata->pid = pid;
2697 rdata->marshal_iov = cifs_uncached_read_marshal_iov;
2698
2699 rc = cifs_retry_async_readv(rdata);
2700error:
2701 if (rc) {
2702 kref_put(&rdata->refcount,
2703 cifs_uncached_readdata_release);
2704 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002705 }
Jeff Layton1c892542012-05-16 07:13:17 -04002706
2707 list_add_tail(&rdata->list, &rdata_list);
2708 offset += cur_len;
2709 len -= cur_len;
2710 } while (len > 0);
2711
2712 /* if at least one read request send succeeded, then reset rc */
2713 if (!list_empty(&rdata_list))
2714 rc = 0;
2715
2716 /* the loop below should proceed in the order of increasing offsets */
2717restart_loop:
2718 list_for_each_entry_safe(rdata, tmp, &rdata_list, list) {
2719 if (!rc) {
2720 ssize_t copied;
2721
2722 /* FIXME: freezable sleep too? */
2723 rc = wait_for_completion_killable(&rdata->done);
2724 if (rc)
2725 rc = -EINTR;
2726 else if (rdata->result)
2727 rc = rdata->result;
2728 else {
2729 rc = cifs_readdata_to_iov(rdata, iov,
2730 nr_segs, *poffset,
2731 &copied);
2732 total_read += copied;
2733 }
2734
2735 /* resend call if it's a retryable error */
2736 if (rc == -EAGAIN) {
2737 rc = cifs_retry_async_readv(rdata);
2738 goto restart_loop;
2739 }
2740 }
2741 list_del_init(&rdata->list);
2742 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002743 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002744
Jeff Layton1c892542012-05-16 07:13:17 -04002745 cifs_stats_bytes_read(tcon, total_read);
2746 *poffset += total_read;
2747
Pavel Shilovsky09a47072012-09-18 16:20:29 -07002748 /* mask nodata case */
2749 if (rc == -ENODATA)
2750 rc = 0;
2751
Jeff Layton1c892542012-05-16 07:13:17 -04002752 return total_read ? total_read : rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002753}
2754
Pavel Shilovsky0b81c1c2011-03-10 10:11:05 +03002755ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov,
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002756 unsigned long nr_segs, loff_t pos)
2757{
2758 ssize_t read;
2759
2760 read = cifs_iovec_read(iocb->ki_filp, iov, nr_segs, &pos);
2761 if (read > 0)
2762 iocb->ki_pos = pos;
2763
2764 return read;
2765}
2766
2767ssize_t cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov,
2768 unsigned long nr_segs, loff_t pos)
2769{
2770 struct inode *inode;
2771
2772 inode = iocb->ki_filp->f_path.dentry->d_inode;
2773
2774 if (CIFS_I(inode)->clientCanCacheRead)
2775 return generic_file_aio_read(iocb, iov, nr_segs, pos);
2776
2777 /*
2778 * In strict cache mode we need to read from the server all the time
2779 * if we don't have level II oplock because the server can delay mtime
2780 * change - so we can't make a decision about inode invalidating.
2781 * And we can also fail with pagereading if there are mandatory locks
2782 * on pages affected by this read but not on the region from pos to
2783 * pos+len-1.
2784 */
2785
2786 return cifs_user_readv(iocb, iov, nr_segs, pos);
2787}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002788
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002789static ssize_t
2790cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002791{
2792 int rc = -EACCES;
2793 unsigned int bytes_read = 0;
2794 unsigned int total_read;
2795 unsigned int current_read_size;
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002796 unsigned int rsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002797 struct cifs_sb_info *cifs_sb;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002798 struct cifs_tcon *tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002799 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002800 unsigned int xid;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002801 char *cur_offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002802 struct cifsFileInfo *open_file;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002803 struct cifs_io_parms io_parms;
Steve Frenchec637e32005-12-12 20:53:18 -08002804 int buf_type = CIFS_NO_BUFFER;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002805 __u32 pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002806
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002807 xid = get_xid();
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002808 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002809
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002810 /* FIXME: set up handlers for larger reads and/or convert to async */
2811 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
2812
Linus Torvalds1da177e2005-04-16 15:20:36 -07002813 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05302814 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002815 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05302816 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002817 }
Joe Perchesc21dfb62010-07-12 13:50:14 -07002818 open_file = file->private_data;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002819 tcon = tlink_tcon(open_file->tlink);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002820 server = tcon->ses->server;
2821
2822 if (!server->ops->sync_read) {
2823 free_xid(xid);
2824 return -ENOSYS;
2825 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002826
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002827 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2828 pid = open_file->pid;
2829 else
2830 pid = current->tgid;
2831
Linus Torvalds1da177e2005-04-16 15:20:36 -07002832 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesb6b38f72010-04-21 03:50:45 +00002833 cFYI(1, "attempting read on write only file instance");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002834
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002835 for (total_read = 0, cur_offset = read_data; read_size > total_read;
2836 total_read += bytes_read, cur_offset += bytes_read) {
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002837 current_read_size = min_t(uint, read_size - total_read, rsize);
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002838 /*
2839 * For windows me and 9x we do not want to request more than it
2840 * negotiated since it will refuse the read then.
2841 */
2842 if ((tcon->ses) && !(tcon->ses->capabilities &
2843 tcon->ses->server->vals->cap_large_files)) {
Dan Carpenter7748dd62011-10-18 12:41:35 +03002844 current_read_size = min_t(uint, current_read_size,
Jeff Laytonc974bef2011-10-11 06:41:32 -04002845 CIFSMaxBufSize);
Steve Frenchf9f5c8172005-09-15 23:06:38 -07002846 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002847 rc = -EAGAIN;
2848 while (rc == -EAGAIN) {
Steve Frenchcdff08e2010-10-21 22:46:14 +00002849 if (open_file->invalidHandle) {
Jeff Layton15886172010-10-15 15:33:59 -04002850 rc = cifs_reopen_file(open_file, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002851 if (rc != 0)
2852 break;
2853 }
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002854 io_parms.pid = pid;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002855 io_parms.tcon = tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002856 io_parms.offset = *offset;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002857 io_parms.length = current_read_size;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002858 rc = server->ops->sync_read(xid, open_file, &io_parms,
2859 &bytes_read, &cur_offset,
2860 &buf_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002861 }
2862 if (rc || (bytes_read == 0)) {
2863 if (total_read) {
2864 break;
2865 } else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002866 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002867 return rc;
2868 }
2869 } else {
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002870 cifs_stats_bytes_read(tcon, total_read);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002871 *offset += bytes_read;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002872 }
2873 }
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002874 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002875 return total_read;
2876}
2877
Jeff Laytonca83ce32011-04-12 09:13:44 -04002878/*
2879 * If the page is mmap'ed into a process' page tables, then we need to make
2880 * sure that it doesn't change while being written back.
2881 */
2882static int
2883cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
2884{
2885 struct page *page = vmf->page;
2886
2887 lock_page(page);
2888 return VM_FAULT_LOCKED;
2889}
2890
2891static struct vm_operations_struct cifs_file_vm_ops = {
2892 .fault = filemap_fault,
2893 .page_mkwrite = cifs_page_mkwrite,
2894};
2895
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03002896int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
2897{
2898 int rc, xid;
2899 struct inode *inode = file->f_path.dentry->d_inode;
2900
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002901 xid = get_xid();
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03002902
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002903 if (!CIFS_I(inode)->clientCanCacheRead) {
2904 rc = cifs_invalidate_mapping(inode);
2905 if (rc)
2906 return rc;
2907 }
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03002908
2909 rc = generic_file_mmap(file, vma);
Jeff Laytonca83ce32011-04-12 09:13:44 -04002910 if (rc == 0)
2911 vma->vm_ops = &cifs_file_vm_ops;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002912 free_xid(xid);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03002913 return rc;
2914}
2915
Linus Torvalds1da177e2005-04-16 15:20:36 -07002916int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
2917{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002918 int rc, xid;
2919
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002920 xid = get_xid();
Jeff Laytonabab0952010-02-12 07:44:18 -05002921 rc = cifs_revalidate_file(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002922 if (rc) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00002923 cFYI(1, "Validation prior to mmap failed, error=%d", rc);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002924 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002925 return rc;
2926 }
2927 rc = generic_file_mmap(file, vma);
Jeff Laytonca83ce32011-04-12 09:13:44 -04002928 if (rc == 0)
2929 vma->vm_ops = &cifs_file_vm_ops;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002930 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002931 return rc;
2932}
2933
Jeff Layton0471ca32012-05-16 07:13:16 -04002934static void
2935cifs_readv_complete(struct work_struct *work)
2936{
2937 struct cifs_readdata *rdata = container_of(work,
2938 struct cifs_readdata, work);
2939 struct page *page, *tpage;
2940
2941 list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
2942 list_del(&page->lru);
2943 lru_cache_add_file(page);
2944
2945 if (rdata->result == 0) {
2946 kunmap(page);
2947 flush_dcache_page(page);
2948 SetPageUptodate(page);
2949 }
2950
2951 unlock_page(page);
2952
2953 if (rdata->result == 0)
2954 cifs_readpage_to_fscache(rdata->mapping->host, page);
2955
2956 page_cache_release(page);
2957 }
Jeff Layton6993f742012-05-16 07:13:17 -04002958 kref_put(&rdata->refcount, cifs_readdata_release);
Jeff Layton0471ca32012-05-16 07:13:16 -04002959}
2960
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04002961static int
2962cifs_readpages_marshal_iov(struct cifs_readdata *rdata, unsigned int remaining)
2963{
2964 int len = 0;
2965 struct page *page, *tpage;
2966 u64 eof;
2967 pgoff_t eof_index;
2968
2969 /* determine the eof that the server (probably) has */
2970 eof = CIFS_I(rdata->mapping->host)->server_eof;
2971 eof_index = eof ? (eof - 1) >> PAGE_CACHE_SHIFT : 0;
2972 cFYI(1, "eof=%llu eof_index=%lu", eof, eof_index);
2973
2974 rdata->nr_iov = 1;
2975 list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
2976 if (remaining >= PAGE_CACHE_SIZE) {
2977 /* enough data to fill the page */
2978 rdata->iov[rdata->nr_iov].iov_base = kmap(page);
2979 rdata->iov[rdata->nr_iov].iov_len = PAGE_CACHE_SIZE;
2980 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
2981 rdata->nr_iov, page->index,
2982 rdata->iov[rdata->nr_iov].iov_base,
2983 rdata->iov[rdata->nr_iov].iov_len);
2984 ++rdata->nr_iov;
2985 len += PAGE_CACHE_SIZE;
2986 remaining -= PAGE_CACHE_SIZE;
2987 } else if (remaining > 0) {
2988 /* enough for partial page, fill and zero the rest */
2989 rdata->iov[rdata->nr_iov].iov_base = kmap(page);
2990 rdata->iov[rdata->nr_iov].iov_len = remaining;
2991 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
2992 rdata->nr_iov, page->index,
2993 rdata->iov[rdata->nr_iov].iov_base,
2994 rdata->iov[rdata->nr_iov].iov_len);
2995 memset(rdata->iov[rdata->nr_iov].iov_base + remaining,
2996 '\0', PAGE_CACHE_SIZE - remaining);
2997 ++rdata->nr_iov;
2998 len += remaining;
2999 remaining = 0;
3000 } else if (page->index > eof_index) {
3001 /*
3002 * The VFS will not try to do readahead past the
3003 * i_size, but it's possible that we have outstanding
3004 * writes with gaps in the middle and the i_size hasn't
3005 * caught up yet. Populate those with zeroed out pages
3006 * to prevent the VFS from repeatedly attempting to
3007 * fill them until the writes are flushed.
3008 */
3009 zero_user(page, 0, PAGE_CACHE_SIZE);
3010 list_del(&page->lru);
3011 lru_cache_add_file(page);
3012 flush_dcache_page(page);
3013 SetPageUptodate(page);
3014 unlock_page(page);
3015 page_cache_release(page);
3016 } else {
3017 /* no need to hold page hostage */
3018 list_del(&page->lru);
3019 lru_cache_add_file(page);
3020 unlock_page(page);
3021 page_cache_release(page);
3022 }
3023 }
3024
3025 return len;
3026}
3027
Linus Torvalds1da177e2005-04-16 15:20:36 -07003028static int cifs_readpages(struct file *file, struct address_space *mapping,
3029 struct list_head *page_list, unsigned num_pages)
3030{
Jeff Layton690c5e32011-10-19 15:30:16 -04003031 int rc;
3032 struct list_head tmplist;
3033 struct cifsFileInfo *open_file = file->private_data;
3034 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
3035 unsigned int rsize = cifs_sb->rsize;
3036 pid_t pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003037
Jeff Layton690c5e32011-10-19 15:30:16 -04003038 /*
3039 * Give up immediately if rsize is too small to read an entire page.
3040 * The VFS will fall back to readpage. We should never reach this
3041 * point however since we set ra_pages to 0 when the rsize is smaller
3042 * than a cache page.
3043 */
3044 if (unlikely(rsize < PAGE_CACHE_SIZE))
3045 return 0;
Steve Frenchbfa0d752005-08-31 21:50:37 -07003046
Suresh Jayaraman56698232010-07-05 18:13:25 +05303047 /*
3048 * Reads as many pages as possible from fscache. Returns -ENOBUFS
3049 * immediately if the cookie is negative
3050 */
3051 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
3052 &num_pages);
3053 if (rc == 0)
Jeff Layton690c5e32011-10-19 15:30:16 -04003054 return rc;
Suresh Jayaraman56698232010-07-05 18:13:25 +05303055
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003056 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3057 pid = open_file->pid;
3058 else
3059 pid = current->tgid;
3060
Jeff Layton690c5e32011-10-19 15:30:16 -04003061 rc = 0;
3062 INIT_LIST_HEAD(&tmplist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003063
Jeff Layton690c5e32011-10-19 15:30:16 -04003064 cFYI(1, "%s: file=%p mapping=%p num_pages=%u", __func__, file,
3065 mapping, num_pages);
3066
3067 /*
3068 * Start with the page at end of list and move it to private
3069 * list. Do the same with any following pages until we hit
3070 * the rsize limit, hit an index discontinuity, or run out of
3071 * pages. Issue the async read and then start the loop again
3072 * until the list is empty.
3073 *
3074 * Note that list order is important. The page_list is in
3075 * the order of declining indexes. When we put the pages in
3076 * the rdata->pages, then we want them in increasing order.
3077 */
3078 while (!list_empty(page_list)) {
3079 unsigned int bytes = PAGE_CACHE_SIZE;
3080 unsigned int expected_index;
3081 unsigned int nr_pages = 1;
3082 loff_t offset;
3083 struct page *page, *tpage;
3084 struct cifs_readdata *rdata;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003085
3086 page = list_entry(page_list->prev, struct page, lru);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003087
Jeff Layton690c5e32011-10-19 15:30:16 -04003088 /*
3089 * Lock the page and put it in the cache. Since no one else
3090 * should have access to this page, we're safe to simply set
3091 * PG_locked without checking it first.
3092 */
3093 __set_page_locked(page);
3094 rc = add_to_page_cache_locked(page, mapping,
3095 page->index, GFP_KERNEL);
3096
3097 /* give up if we can't stick it in the cache */
3098 if (rc) {
3099 __clear_page_locked(page);
3100 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003101 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003102
Jeff Layton690c5e32011-10-19 15:30:16 -04003103 /* move first page to the tmplist */
3104 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
3105 list_move_tail(&page->lru, &tmplist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003106
Jeff Layton690c5e32011-10-19 15:30:16 -04003107 /* now try and add more pages onto the request */
3108 expected_index = page->index + 1;
3109 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
3110 /* discontinuity ? */
3111 if (page->index != expected_index)
3112 break;
3113
3114 /* would this page push the read over the rsize? */
3115 if (bytes + PAGE_CACHE_SIZE > rsize)
3116 break;
3117
3118 __set_page_locked(page);
3119 if (add_to_page_cache_locked(page, mapping,
3120 page->index, GFP_KERNEL)) {
3121 __clear_page_locked(page);
3122 break;
3123 }
3124 list_move_tail(&page->lru, &tmplist);
3125 bytes += PAGE_CACHE_SIZE;
3126 expected_index++;
3127 nr_pages++;
3128 }
3129
Jeff Layton0471ca32012-05-16 07:13:16 -04003130 rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
Jeff Layton690c5e32011-10-19 15:30:16 -04003131 if (!rdata) {
3132 /* best to give up if we're out of mem */
3133 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3134 list_del(&page->lru);
3135 lru_cache_add_file(page);
3136 unlock_page(page);
3137 page_cache_release(page);
3138 }
3139 rc = -ENOMEM;
3140 break;
3141 }
3142
Jeff Layton6993f742012-05-16 07:13:17 -04003143 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Layton690c5e32011-10-19 15:30:16 -04003144 rdata->mapping = mapping;
3145 rdata->offset = offset;
3146 rdata->bytes = bytes;
3147 rdata->pid = pid;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003148 rdata->marshal_iov = cifs_readpages_marshal_iov;
Jeff Layton690c5e32011-10-19 15:30:16 -04003149 list_splice_init(&tmplist, &rdata->pages);
3150
Jeff Layton2a1bb132012-05-16 07:13:17 -04003151 rc = cifs_retry_async_readv(rdata);
Jeff Layton690c5e32011-10-19 15:30:16 -04003152 if (rc != 0) {
3153 list_for_each_entry_safe(page, tpage, &rdata->pages,
3154 lru) {
3155 list_del(&page->lru);
3156 lru_cache_add_file(page);
3157 unlock_page(page);
3158 page_cache_release(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003159 }
Jeff Layton6993f742012-05-16 07:13:17 -04003160 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003161 break;
3162 }
Jeff Layton6993f742012-05-16 07:13:17 -04003163
3164 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003165 }
3166
Linus Torvalds1da177e2005-04-16 15:20:36 -07003167 return rc;
3168}
3169
3170static int cifs_readpage_worker(struct file *file, struct page *page,
3171 loff_t *poffset)
3172{
3173 char *read_data;
3174 int rc;
3175
Suresh Jayaraman56698232010-07-05 18:13:25 +05303176 /* Is the page cached? */
3177 rc = cifs_readpage_from_fscache(file->f_path.dentry->d_inode, page);
3178 if (rc == 0)
3179 goto read_complete;
3180
Linus Torvalds1da177e2005-04-16 15:20:36 -07003181 page_cache_get(page);
3182 read_data = kmap(page);
3183 /* for reads over a certain size could initiate async read ahead */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003184
Linus Torvalds1da177e2005-04-16 15:20:36 -07003185 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003186
Linus Torvalds1da177e2005-04-16 15:20:36 -07003187 if (rc < 0)
3188 goto io_error;
3189 else
Joe Perchesb6b38f72010-04-21 03:50:45 +00003190 cFYI(1, "Bytes read %d", rc);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003191
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08003192 file->f_path.dentry->d_inode->i_atime =
3193 current_fs_time(file->f_path.dentry->d_inode->i_sb);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003194
Linus Torvalds1da177e2005-04-16 15:20:36 -07003195 if (PAGE_CACHE_SIZE > rc)
3196 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
3197
3198 flush_dcache_page(page);
3199 SetPageUptodate(page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05303200
3201 /* send this page to the cache */
3202 cifs_readpage_to_fscache(file->f_path.dentry->d_inode, page);
3203
Linus Torvalds1da177e2005-04-16 15:20:36 -07003204 rc = 0;
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003205
Linus Torvalds1da177e2005-04-16 15:20:36 -07003206io_error:
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003207 kunmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003208 page_cache_release(page);
Suresh Jayaraman56698232010-07-05 18:13:25 +05303209
3210read_complete:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003211 return rc;
3212}
3213
3214static int cifs_readpage(struct file *file, struct page *page)
3215{
3216 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
3217 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003218 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003219
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003220 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003221
3222 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303223 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003224 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303225 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003226 }
3227
Jeff Laytonac3aa2f2012-07-23 13:14:28 -04003228 cFYI(1, "readpage %p at offset %d 0x%x",
Joe Perchesb6b38f72010-04-21 03:50:45 +00003229 page, (int)offset, (int)offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003230
3231 rc = cifs_readpage_worker(file, page, &offset);
3232
3233 unlock_page(page);
3234
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003235 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003236 return rc;
3237}
3238
Steve Frencha403a0a2007-07-26 15:54:16 +00003239static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3240{
3241 struct cifsFileInfo *open_file;
3242
Jeff Layton44772882010-10-15 15:34:03 -04003243 spin_lock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003244 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton2e396b82010-10-15 15:34:01 -04003245 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Jeff Layton44772882010-10-15 15:34:03 -04003246 spin_unlock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003247 return 1;
3248 }
3249 }
Jeff Layton44772882010-10-15 15:34:03 -04003250 spin_unlock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003251 return 0;
3252}
3253
Linus Torvalds1da177e2005-04-16 15:20:36 -07003254/* We do not want to update the file size from server for inodes
3255 open for write - to avoid races with writepage extending
3256 the file - in the future we could consider allowing
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003257 refreshing the inode only on increases in the file size
Linus Torvalds1da177e2005-04-16 15:20:36 -07003258 but this is tricky to do without racing with writebehind
3259 page caching in the current Linux kernel design */
Steve French4b18f2a2008-04-29 00:06:05 +00003260bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003261{
Steve Frencha403a0a2007-07-26 15:54:16 +00003262 if (!cifsInode)
Steve French4b18f2a2008-04-29 00:06:05 +00003263 return true;
Steve French23e7dd72005-10-20 13:44:56 -07003264
Steve Frencha403a0a2007-07-26 15:54:16 +00003265 if (is_inode_writable(cifsInode)) {
3266 /* This inode is open for write at least once */
Steve Frenchc32a0b62006-01-12 14:41:28 -08003267 struct cifs_sb_info *cifs_sb;
3268
Steve Frenchc32a0b62006-01-12 14:41:28 -08003269 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
Steve Frenchad7a2922008-02-07 23:25:02 +00003270 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003271 /* since no page cache to corrupt on directio
Steve Frenchc32a0b62006-01-12 14:41:28 -08003272 we can change size safely */
Steve French4b18f2a2008-04-29 00:06:05 +00003273 return true;
Steve Frenchc32a0b62006-01-12 14:41:28 -08003274 }
3275
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003276 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
Steve French4b18f2a2008-04-29 00:06:05 +00003277 return true;
Steve French7ba52632007-02-08 18:14:13 +00003278
Steve French4b18f2a2008-04-29 00:06:05 +00003279 return false;
Steve French23e7dd72005-10-20 13:44:56 -07003280 } else
Steve French4b18f2a2008-04-29 00:06:05 +00003281 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003282}
3283
Nick Piggind9414772008-09-24 11:32:59 -04003284static int cifs_write_begin(struct file *file, struct address_space *mapping,
3285 loff_t pos, unsigned len, unsigned flags,
3286 struct page **pagep, void **fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003287{
Nick Piggind9414772008-09-24 11:32:59 -04003288 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
3289 loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003290 loff_t page_start = pos & PAGE_MASK;
3291 loff_t i_size;
3292 struct page *page;
3293 int rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003294
Joe Perchesb6b38f72010-04-21 03:50:45 +00003295 cFYI(1, "write_begin from %lld len %d", (long long)pos, len);
Nick Piggind9414772008-09-24 11:32:59 -04003296
Nick Piggin54566b22009-01-04 12:00:53 -08003297 page = grab_cache_page_write_begin(mapping, index, flags);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003298 if (!page) {
3299 rc = -ENOMEM;
3300 goto out;
3301 }
Nick Piggind9414772008-09-24 11:32:59 -04003302
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003303 if (PageUptodate(page))
3304 goto out;
Steve French8a236262007-03-06 00:31:00 +00003305
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003306 /*
3307 * If we write a full page it will be up to date, no need to read from
3308 * the server. If the write is short, we'll end up doing a sync write
3309 * instead.
3310 */
3311 if (len == PAGE_CACHE_SIZE)
3312 goto out;
3313
3314 /*
3315 * optimize away the read when we have an oplock, and we're not
3316 * expecting to use any of the data we'd be reading in. That
3317 * is, when the page lies beyond the EOF, or straddles the EOF
3318 * and the write will cover all of the existing data.
3319 */
3320 if (CIFS_I(mapping->host)->clientCanCacheRead) {
3321 i_size = i_size_read(mapping->host);
3322 if (page_start >= i_size ||
3323 (offset == 0 && (pos + len) >= i_size)) {
3324 zero_user_segments(page, 0, offset,
3325 offset + len,
3326 PAGE_CACHE_SIZE);
3327 /*
3328 * PageChecked means that the parts of the page
3329 * to which we're not writing are considered up
3330 * to date. Once the data is copied to the
3331 * page, it can be set uptodate.
3332 */
3333 SetPageChecked(page);
3334 goto out;
3335 }
3336 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003337
Nick Piggind9414772008-09-24 11:32:59 -04003338 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003339 /*
3340 * might as well read a page, it is fast enough. If we get
3341 * an error, we don't need to return it. cifs_write_end will
3342 * do a sync write instead since PG_uptodate isn't set.
3343 */
3344 cifs_readpage_worker(file, page, &page_start);
Steve French8a236262007-03-06 00:31:00 +00003345 } else {
3346 /* we could try using another file handle if there is one -
3347 but how would we lock it to prevent close of that handle
3348 racing with this read? In any case
Nick Piggind9414772008-09-24 11:32:59 -04003349 this will be written out by write_end so is fine */
Steve French8a236262007-03-06 00:31:00 +00003350 }
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003351out:
3352 *pagep = page;
3353 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003354}
3355
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303356static int cifs_release_page(struct page *page, gfp_t gfp)
3357{
3358 if (PagePrivate(page))
3359 return 0;
3360
3361 return cifs_fscache_release_page(page, gfp);
3362}
3363
3364static void cifs_invalidate_page(struct page *page, unsigned long offset)
3365{
3366 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
3367
3368 if (offset == 0)
3369 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
3370}
3371
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003372static int cifs_launder_page(struct page *page)
3373{
3374 int rc = 0;
3375 loff_t range_start = page_offset(page);
3376 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
3377 struct writeback_control wbc = {
3378 .sync_mode = WB_SYNC_ALL,
3379 .nr_to_write = 0,
3380 .range_start = range_start,
3381 .range_end = range_end,
3382 };
3383
3384 cFYI(1, "Launder page: %p", page);
3385
3386 if (clear_page_dirty_for_io(page))
3387 rc = cifs_writepage_locked(page, &wbc);
3388
3389 cifs_fscache_invalidate_page(page, page->mapping->host);
3390 return rc;
3391}
3392
Tejun Heo9b646972010-07-20 22:09:02 +02003393void cifs_oplock_break(struct work_struct *work)
Jeff Layton3bc303c2009-09-21 06:47:50 -04003394{
3395 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3396 oplock_break);
Jeff Laytona5e18bc2010-10-11 15:07:18 -04003397 struct inode *inode = cfile->dentry->d_inode;
Jeff Layton3bc303c2009-09-21 06:47:50 -04003398 struct cifsInodeInfo *cinode = CIFS_I(inode);
Jeff Laytoneb4b7562010-10-22 14:52:29 -04003399 int rc = 0;
Jeff Layton3bc303c2009-09-21 06:47:50 -04003400
3401 if (inode && S_ISREG(inode->i_mode)) {
Steve Frenchd54ff732010-04-27 04:38:15 +00003402 if (cinode->clientCanCacheRead)
Al Viro8737c932009-12-24 06:47:55 -05003403 break_lease(inode, O_RDONLY);
Steve Frenchd54ff732010-04-27 04:38:15 +00003404 else
Al Viro8737c932009-12-24 06:47:55 -05003405 break_lease(inode, O_WRONLY);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003406 rc = filemap_fdatawrite(inode->i_mapping);
3407 if (cinode->clientCanCacheRead == 0) {
Jeff Laytoneb4b7562010-10-22 14:52:29 -04003408 rc = filemap_fdatawait(inode->i_mapping);
3409 mapping_set_error(inode->i_mapping, rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003410 invalidate_remote_inode(inode);
3411 }
Joe Perchesb6b38f72010-04-21 03:50:45 +00003412 cFYI(1, "Oplock flush inode %p rc %d", inode, rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003413 }
3414
Pavel Shilovsky85160e02011-10-22 15:33:29 +04003415 rc = cifs_push_locks(cfile);
3416 if (rc)
3417 cERROR(1, "Push locks rc = %d", rc);
3418
Jeff Layton3bc303c2009-09-21 06:47:50 -04003419 /*
3420 * releasing stale oplock after recent reconnect of smb session using
3421 * a now incorrect file handle is not a data integrity issue but do
3422 * not bother sending an oplock release if session to server still is
3423 * disconnected since oplock already released by the server
3424 */
Steve Frenchcdff08e2010-10-21 22:46:14 +00003425 if (!cfile->oplock_break_cancelled) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07003426 rc = CIFSSMBLock(0, tlink_tcon(cfile->tlink), cfile->fid.netfid,
Pavel Shilovsky03776f42010-08-17 11:26:00 +04003427 current->tgid, 0, 0, 0, 0,
3428 LOCKING_ANDX_OPLOCK_RELEASE, false,
Pavel Shilovsky12fed002011-01-17 20:15:44 +03003429 cinode->clientCanCacheRead ? 1 : 0);
Joe Perchesb6b38f72010-04-21 03:50:45 +00003430 cFYI(1, "Oplock release rc = %d", rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003431 }
Jeff Layton3bc303c2009-09-21 06:47:50 -04003432}
3433
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07003434const struct address_space_operations cifs_addr_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003435 .readpage = cifs_readpage,
3436 .readpages = cifs_readpages,
3437 .writepage = cifs_writepage,
Steve French37c0eb42005-10-05 14:50:29 -07003438 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04003439 .write_begin = cifs_write_begin,
3440 .write_end = cifs_write_end,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003441 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303442 .releasepage = cifs_release_page,
3443 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003444 .launder_page = cifs_launder_page,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003445};
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003446
3447/*
3448 * cifs_readpages requires the server to support a buffer large enough to
3449 * contain the header plus one complete page of data. Otherwise, we need
3450 * to leave cifs_readpages out of the address space operations.
3451 */
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07003452const struct address_space_operations cifs_addr_ops_smallbuf = {
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003453 .readpage = cifs_readpage,
3454 .writepage = cifs_writepage,
3455 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04003456 .write_begin = cifs_write_begin,
3457 .write_end = cifs_write_end,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003458 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303459 .releasepage = cifs_release_page,
3460 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003461 .launder_page = cifs_launder_page,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003462};