blob: dbca2b293e552650a24a47bee7a90742b109aa4e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
Steve Frenchfb8c4b12007-07-10 01:16:18 +00005 *
Steve Frenchf19159d2010-04-21 04:12:10 +00006 * Copyright (C) International Business Machines Corp., 2002,2010
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Author(s): Steve French (sfrench@us.ibm.com)
Jeremy Allison7ee1af72006-08-02 21:56:33 +00008 * Jeremy Allison (jra@samba.org)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
Steve French37c0eb42005-10-05 14:50:29 -070025#include <linux/backing-dev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
Steve French37c0eb42005-10-05 14:50:29 -070030#include <linux/writeback.h>
Andrew Morton6f88cc22006-12-10 02:19:44 -080031#include <linux/task_io_accounting_ops.h>
Steve French23e7dd72005-10-20 13:44:56 -070032#include <linux/delay.h>
Jeff Layton3bc303c2009-09-21 06:47:50 -040033#include <linux/mount.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Jeff Layton690c5e32011-10-19 15:30:16 -040035#include <linux/swap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <asm/div64.h>
37#include "cifsfs.h"
38#include "cifspdu.h"
39#include "cifsglob.h"
40#include "cifsproto.h"
41#include "cifs_unicode.h"
42#include "cifs_debug.h"
43#include "cifs_fs_sb.h"
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +053044#include "fscache.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Linus Torvalds1da177e2005-04-16 15:20:36 -070046static inline int cifs_convert_flags(unsigned int flags)
47{
48 if ((flags & O_ACCMODE) == O_RDONLY)
49 return GENERIC_READ;
50 else if ((flags & O_ACCMODE) == O_WRONLY)
51 return GENERIC_WRITE;
52 else if ((flags & O_ACCMODE) == O_RDWR) {
53 /* GENERIC_ALL is too much permission to request
54 can cause unnecessary access denied on create */
55 /* return GENERIC_ALL; */
56 return (GENERIC_READ | GENERIC_WRITE);
57 }
58
Jeff Laytone10f7b52008-05-14 10:21:33 -070059 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
60 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
61 FILE_READ_DATA);
Steve French7fc8f4e2009-02-23 20:43:11 +000062}
Jeff Laytone10f7b52008-05-14 10:21:33 -070063
Jeff Layton608712f2010-10-15 15:33:56 -040064static u32 cifs_posix_convert_flags(unsigned int flags)
Steve French7fc8f4e2009-02-23 20:43:11 +000065{
Jeff Layton608712f2010-10-15 15:33:56 -040066 u32 posix_flags = 0;
Jeff Laytone10f7b52008-05-14 10:21:33 -070067
Steve French7fc8f4e2009-02-23 20:43:11 +000068 if ((flags & O_ACCMODE) == O_RDONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040069 posix_flags = SMB_O_RDONLY;
Steve French7fc8f4e2009-02-23 20:43:11 +000070 else if ((flags & O_ACCMODE) == O_WRONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040071 posix_flags = SMB_O_WRONLY;
72 else if ((flags & O_ACCMODE) == O_RDWR)
73 posix_flags = SMB_O_RDWR;
74
75 if (flags & O_CREAT)
76 posix_flags |= SMB_O_CREAT;
77 if (flags & O_EXCL)
78 posix_flags |= SMB_O_EXCL;
79 if (flags & O_TRUNC)
80 posix_flags |= SMB_O_TRUNC;
81 /* be safe and imply O_SYNC for O_DSYNC */
Christoph Hellwig6b2f3d12009-10-27 11:05:28 +010082 if (flags & O_DSYNC)
Jeff Layton608712f2010-10-15 15:33:56 -040083 posix_flags |= SMB_O_SYNC;
Steve French7fc8f4e2009-02-23 20:43:11 +000084 if (flags & O_DIRECTORY)
Jeff Layton608712f2010-10-15 15:33:56 -040085 posix_flags |= SMB_O_DIRECTORY;
Steve French7fc8f4e2009-02-23 20:43:11 +000086 if (flags & O_NOFOLLOW)
Jeff Layton608712f2010-10-15 15:33:56 -040087 posix_flags |= SMB_O_NOFOLLOW;
Steve French7fc8f4e2009-02-23 20:43:11 +000088 if (flags & O_DIRECT)
Jeff Layton608712f2010-10-15 15:33:56 -040089 posix_flags |= SMB_O_DIRECT;
Steve French7fc8f4e2009-02-23 20:43:11 +000090
91 return posix_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070092}
93
94static inline int cifs_get_disposition(unsigned int flags)
95{
96 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
97 return FILE_CREATE;
98 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
99 return FILE_OVERWRITE_IF;
100 else if ((flags & O_CREAT) == O_CREAT)
101 return FILE_OPEN_IF;
Steve French55aa2e02006-05-30 18:09:31 +0000102 else if ((flags & O_TRUNC) == O_TRUNC)
103 return FILE_OVERWRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104 else
105 return FILE_OPEN;
106}
107
Jeff Layton608712f2010-10-15 15:33:56 -0400108int cifs_posix_open(char *full_path, struct inode **pinode,
109 struct super_block *sb, int mode, unsigned int f_flags,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400110 __u32 *poplock, __u16 *pnetfid, unsigned int xid)
Jeff Layton608712f2010-10-15 15:33:56 -0400111{
112 int rc;
113 FILE_UNIX_BASIC_INFO *presp_data;
114 __u32 posix_flags = 0;
115 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
116 struct cifs_fattr fattr;
117 struct tcon_link *tlink;
Steve French96daf2b2011-05-27 04:34:02 +0000118 struct cifs_tcon *tcon;
Jeff Layton608712f2010-10-15 15:33:56 -0400119
120 cFYI(1, "posix open %s", full_path);
121
122 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
123 if (presp_data == NULL)
124 return -ENOMEM;
125
126 tlink = cifs_sb_tlink(cifs_sb);
127 if (IS_ERR(tlink)) {
128 rc = PTR_ERR(tlink);
129 goto posix_open_ret;
130 }
131
132 tcon = tlink_tcon(tlink);
133 mode &= ~current_umask();
134
135 posix_flags = cifs_posix_convert_flags(f_flags);
136 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
137 poplock, full_path, cifs_sb->local_nls,
138 cifs_sb->mnt_cifs_flags &
139 CIFS_MOUNT_MAP_SPECIAL_CHR);
140 cifs_put_tlink(tlink);
141
142 if (rc)
143 goto posix_open_ret;
144
145 if (presp_data->Type == cpu_to_le32(-1))
146 goto posix_open_ret; /* open ok, caller does qpathinfo */
147
148 if (!pinode)
149 goto posix_open_ret; /* caller does not need info */
150
151 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
152
153 /* get new inode and set it up */
154 if (*pinode == NULL) {
155 cifs_fill_uniqueid(sb, &fattr);
156 *pinode = cifs_iget(sb, &fattr);
157 if (!*pinode) {
158 rc = -ENOMEM;
159 goto posix_open_ret;
160 }
161 } else {
162 cifs_fattr_to_inode(*pinode, &fattr);
163 }
164
165posix_open_ret:
166 kfree(presp_data);
167 return rc;
168}
169
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300170static int
171cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700172 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
173 struct cifs_fid *fid, unsigned int xid)
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300174{
175 int rc;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700176 int desired_access;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300177 int disposition;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500178 int create_options = CREATE_NOT_DIR;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300179 FILE_ALL_INFO *buf;
180
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700181 if (!tcon->ses->server->ops->open)
182 return -ENOSYS;
183
184 desired_access = cifs_convert_flags(f_flags);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300185
186/*********************************************************************
187 * open flag mapping table:
188 *
189 * POSIX Flag CIFS Disposition
190 * ---------- ----------------
191 * O_CREAT FILE_OPEN_IF
192 * O_CREAT | O_EXCL FILE_CREATE
193 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
194 * O_TRUNC FILE_OVERWRITE
195 * none of the above FILE_OPEN
196 *
197 * Note that there is not a direct match between disposition
198 * FILE_SUPERSEDE (ie create whether or not file exists although
199 * O_CREAT | O_TRUNC is similar but truncates the existing
200 * file rather than creating a new file as FILE_SUPERSEDE does
201 * (which uses the attributes / metadata passed in on open call)
202 *?
203 *? O_SYNC is a reasonable match to CIFS writethrough flag
204 *? and the read write flags match reasonably. O_LARGEFILE
205 *? is irrelevant because largefile support is always used
206 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
207 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
208 *********************************************************************/
209
210 disposition = cifs_get_disposition(f_flags);
211
212 /* BB pass O_SYNC flag through on file attributes .. BB */
213
214 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
215 if (!buf)
216 return -ENOMEM;
217
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500218 if (backup_cred(cifs_sb))
219 create_options |= CREATE_OPEN_BACKUP_INTENT;
220
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700221 rc = tcon->ses->server->ops->open(xid, tcon, full_path, disposition,
222 desired_access, create_options, fid,
223 oplock, buf, cifs_sb);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300224
225 if (rc)
226 goto out;
227
228 if (tcon->unix_ext)
229 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
230 xid);
231 else
232 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700233 xid, &fid->netfid);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300234
235out:
236 kfree(buf);
237 return rc;
238}
239
Jeff Layton15ecb432010-10-15 15:34:02 -0400240struct cifsFileInfo *
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700241cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
Jeff Layton15ecb432010-10-15 15:34:02 -0400242 struct tcon_link *tlink, __u32 oplock)
243{
244 struct dentry *dentry = file->f_path.dentry;
245 struct inode *inode = dentry->d_inode;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700246 struct cifsInodeInfo *cinode = CIFS_I(inode);
247 struct cifsFileInfo *cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400248
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700249 cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
250 if (cfile == NULL)
251 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400252
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700253 cfile->count = 1;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700254 cfile->pid = current->tgid;
255 cfile->uid = current_fsuid();
256 cfile->dentry = dget(dentry);
257 cfile->f_flags = file->f_flags;
258 cfile->invalidHandle = false;
259 cfile->tlink = cifs_get_tlink(tlink);
260 mutex_init(&cfile->fh_mutex);
261 INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
262 INIT_LIST_HEAD(&cfile->llist);
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700263 tlink_tcon(tlink)->ses->server->ops->set_fid(cfile, fid, oplock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400264
Jeff Layton44772882010-10-15 15:34:03 -0400265 spin_lock(&cifs_file_list_lock);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700266 list_add(&cfile->tlist, &(tlink_tcon(tlink)->openFileList));
Jeff Layton15ecb432010-10-15 15:34:02 -0400267 /* if readable file instance put first in list*/
268 if (file->f_mode & FMODE_READ)
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700269 list_add(&cfile->flist, &cinode->openFileList);
Jeff Layton15ecb432010-10-15 15:34:02 -0400270 else
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700271 list_add_tail(&cfile->flist, &cinode->openFileList);
Jeff Layton44772882010-10-15 15:34:03 -0400272 spin_unlock(&cifs_file_list_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400273
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700274 file->private_data = cfile;
275 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400276}
277
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400278static void cifs_del_lock_waiters(struct cifsLockInfo *lock);
279
Jeff Layton764a1b12012-07-25 14:59:54 -0400280struct cifsFileInfo *
281cifsFileInfo_get(struct cifsFileInfo *cifs_file)
282{
283 spin_lock(&cifs_file_list_lock);
284 cifsFileInfo_get_locked(cifs_file);
285 spin_unlock(&cifs_file_list_lock);
286 return cifs_file;
287}
288
Steve Frenchcdff08e2010-10-21 22:46:14 +0000289/*
290 * Release a reference on the file private data. This may involve closing
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400291 * the filehandle out on the server. Must be called without holding
292 * cifs_file_list_lock.
Steve Frenchcdff08e2010-10-21 22:46:14 +0000293 */
Jeff Laytonb33879a2010-10-15 15:34:04 -0400294void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
295{
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300296 struct inode *inode = cifs_file->dentry->d_inode;
Steve French96daf2b2011-05-27 04:34:02 +0000297 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300298 struct cifsInodeInfo *cifsi = CIFS_I(inode);
Pavel Shilovsky4f8ba8a2010-11-21 22:36:12 +0300299 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000300 struct cifsLockInfo *li, *tmp;
301
302 spin_lock(&cifs_file_list_lock);
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400303 if (--cifs_file->count > 0) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000304 spin_unlock(&cifs_file_list_lock);
305 return;
Jeff Laytonb33879a2010-10-15 15:34:04 -0400306 }
Steve Frenchcdff08e2010-10-21 22:46:14 +0000307
308 /* remove it from the lists */
309 list_del(&cifs_file->flist);
310 list_del(&cifs_file->tlist);
311
312 if (list_empty(&cifsi->openFileList)) {
313 cFYI(1, "closing last open instance for inode %p",
314 cifs_file->dentry->d_inode);
Pavel Shilovsky25364132012-09-18 16:20:27 -0700315 /*
316 * In strict cache mode we need invalidate mapping on the last
317 * close because it may cause a error when we open this file
318 * again and get at least level II oplock.
319 */
Pavel Shilovsky4f8ba8a2010-11-21 22:36:12 +0300320 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
321 CIFS_I(inode)->invalid_mapping = true;
Pavel Shilovskyc6723622010-11-03 10:58:57 +0300322 cifs_set_oplock_level(cifsi, 0);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000323 }
324 spin_unlock(&cifs_file_list_lock);
325
Jeff Laytonad635942011-07-26 12:20:17 -0400326 cancel_work_sync(&cifs_file->oplock_break);
327
Steve Frenchcdff08e2010-10-21 22:46:14 +0000328 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700329 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400330 unsigned int xid;
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700331 int rc = -ENOSYS;
332
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400333 xid = get_xid();
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700334 if (server->ops->close)
335 rc = server->ops->close(xid, tcon, &cifs_file->fid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400336 free_xid(xid);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000337 }
338
339 /* Delete any outstanding lock records. We'll lose them when the file
340 * is closed anyway.
341 */
Pavel Shilovskyd59dad22011-09-22 09:53:59 +0400342 mutex_lock(&cifsi->lock_mutex);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300343 list_for_each_entry_safe(li, tmp, &cifs_file->llist, llist) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000344 list_del(&li->llist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400345 cifs_del_lock_waiters(li);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000346 kfree(li);
347 }
Pavel Shilovskyd59dad22011-09-22 09:53:59 +0400348 mutex_unlock(&cifsi->lock_mutex);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000349
350 cifs_put_tlink(cifs_file->tlink);
351 dput(cifs_file->dentry);
352 kfree(cifs_file);
Jeff Laytonb33879a2010-10-15 15:34:04 -0400353}
354
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355int cifs_open(struct inode *inode, struct file *file)
356{
357 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400358 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400359 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +0000361 struct cifs_tcon *tcon;
Jeff Layton7ffec372010-09-29 19:51:11 -0400362 struct tcon_link *tlink;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700363 struct cifsFileInfo *cfile = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364 char *full_path = NULL;
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300365 bool posix_open_ok = false;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700366 struct cifs_fid fid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400368 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369
370 cifs_sb = CIFS_SB(inode->i_sb);
Jeff Layton7ffec372010-09-29 19:51:11 -0400371 tlink = cifs_sb_tlink(cifs_sb);
372 if (IS_ERR(tlink)) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400373 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400374 return PTR_ERR(tlink);
375 }
376 tcon = tlink_tcon(tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -0800378 full_path = build_path_from_dentry(file->f_path.dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379 if (full_path == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530380 rc = -ENOMEM;
Jeff Layton232341b2010-08-05 13:58:38 -0400381 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382 }
383
Joe Perchesb6b38f72010-04-21 03:50:45 +0000384 cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
385 inode, file->f_flags, full_path);
Steve French276a74a2009-03-03 18:00:34 +0000386
Pavel Shilovsky10b9b982012-03-20 12:55:09 +0300387 if (tcon->ses->server->oplocks)
Steve French276a74a2009-03-03 18:00:34 +0000388 oplock = REQ_OPLOCK;
389 else
390 oplock = 0;
391
Steve French64cc2c62009-03-04 19:54:08 +0000392 if (!tcon->broken_posix_open && tcon->unix_ext &&
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400393 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
394 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Steve French276a74a2009-03-03 18:00:34 +0000395 /* can not refresh inode info since size could be stale */
Jeff Layton2422f672010-06-16 13:40:16 -0400396 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
Steve Frenchfa588e02010-04-22 19:21:55 +0000397 cifs_sb->mnt_file_mode /* ignored */,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700398 file->f_flags, &oplock, &fid.netfid, xid);
Steve French276a74a2009-03-03 18:00:34 +0000399 if (rc == 0) {
Joe Perchesb6b38f72010-04-21 03:50:45 +0000400 cFYI(1, "posix open succeeded");
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300401 posix_open_ok = true;
Steve French64cc2c62009-03-04 19:54:08 +0000402 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
403 if (tcon->ses->serverNOS)
Joe Perchesb6b38f72010-04-21 03:50:45 +0000404 cERROR(1, "server %s of type %s returned"
Steve French64cc2c62009-03-04 19:54:08 +0000405 " unexpected error on SMB posix open"
406 ", disabling posix open support."
407 " Check if server update available.",
408 tcon->ses->serverName,
Joe Perchesb6b38f72010-04-21 03:50:45 +0000409 tcon->ses->serverNOS);
Steve French64cc2c62009-03-04 19:54:08 +0000410 tcon->broken_posix_open = true;
Steve French276a74a2009-03-03 18:00:34 +0000411 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
412 (rc != -EOPNOTSUPP)) /* path not found or net err */
413 goto out;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700414 /*
415 * Else fallthrough to retry open the old way on network i/o
416 * or DFS errors.
417 */
Steve French276a74a2009-03-03 18:00:34 +0000418 }
419
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300420 if (!posix_open_ok) {
421 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700422 file->f_flags, &oplock, &fid, xid);
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300423 if (rc)
424 goto out;
425 }
Jeff Layton47c78b72010-06-16 13:40:17 -0400426
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700427 cfile = cifs_new_fileinfo(&fid, file, tlink, oplock);
428 if (cfile == NULL) {
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700429 if (tcon->ses->server->ops->close)
430 tcon->ses->server->ops->close(xid, tcon, &fid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431 rc = -ENOMEM;
432 goto out;
433 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +0530435 cifs_fscache_set_inode_cookie(inode, file);
436
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300437 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700438 /*
439 * Time to set mode which we can not set earlier due to
440 * problems creating new read-only files.
441 */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300442 struct cifs_unix_set_info_args args = {
443 .mode = inode->i_mode,
444 .uid = NO_CHANGE_64,
445 .gid = NO_CHANGE_64,
446 .ctime = NO_CHANGE_64,
447 .atime = NO_CHANGE_64,
448 .mtime = NO_CHANGE_64,
449 .device = 0,
450 };
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700451 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
452 cfile->pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453 }
454
455out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400457 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400458 cifs_put_tlink(tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459 return rc;
460}
461
Adrian Bunk04187262006-06-30 18:23:04 +0200462/* Try to reacquire byte range locks that were released when session */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463/* to server was lost */
464static int cifs_relock_file(struct cifsFileInfo *cifsFile)
465{
466 int rc = 0;
467
468/* BB list all locks open on this file and relock */
469
470 return rc;
471}
472
Jeff Layton15886172010-10-15 15:33:59 -0400473static int cifs_reopen_file(struct cifsFileInfo *pCifsFile, bool can_flush)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474{
475 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400476 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400477 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +0000479 struct cifs_tcon *tcon;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480 struct cifsInodeInfo *pCifsInode;
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000481 struct inode *inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482 char *full_path = NULL;
483 int desiredAccess;
484 int disposition = FILE_OPEN;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500485 int create_options = CREATE_NOT_DIR;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486 __u16 netfid;
487
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400488 xid = get_xid();
Jeff Laytonf0a71eb2009-06-27 07:04:55 -0400489 mutex_lock(&pCifsFile->fh_mutex);
Steve French4b18f2a2008-04-29 00:06:05 +0000490 if (!pCifsFile->invalidHandle) {
Jeff Laytonf0a71eb2009-06-27 07:04:55 -0400491 mutex_unlock(&pCifsFile->fh_mutex);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530492 rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400493 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530494 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495 }
496
Jeff Layton15886172010-10-15 15:33:59 -0400497 inode = pCifsFile->dentry->d_inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498 cifs_sb = CIFS_SB(inode->i_sb);
Jeff Layton13cfb732010-09-29 19:51:11 -0400499 tcon = tlink_tcon(pCifsFile->tlink);
Steve French3a9f4622007-04-04 17:10:24 +0000500
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501/* can not grab rename sem here because various ops, including
502 those that already have the rename sem can end up causing writepage
503 to get called and if the server was down that means we end up here,
504 and we can never tell if the caller already has the rename_sem */
Jeff Layton15886172010-10-15 15:33:59 -0400505 full_path = build_path_from_dentry(pCifsFile->dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506 if (full_path == NULL) {
Steve French3a9f4622007-04-04 17:10:24 +0000507 rc = -ENOMEM;
Jeff Laytonf0a71eb2009-06-27 07:04:55 -0400508 mutex_unlock(&pCifsFile->fh_mutex);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400509 free_xid(xid);
Steve French3a9f4622007-04-04 17:10:24 +0000510 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511 }
512
Joe Perchesb6b38f72010-04-21 03:50:45 +0000513 cFYI(1, "inode = 0x%p file flags 0x%x for %s",
Jeff Layton15886172010-10-15 15:33:59 -0400514 inode, pCifsFile->f_flags, full_path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515
Pavel Shilovsky10b9b982012-03-20 12:55:09 +0300516 if (tcon->ses->server->oplocks)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517 oplock = REQ_OPLOCK;
518 else
Steve French4b18f2a2008-04-29 00:06:05 +0000519 oplock = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400521 if (tcon->unix_ext && cap_unix(tcon->ses) &&
Steve French7fc8f4e2009-02-23 20:43:11 +0000522 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400523 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Jeff Layton608712f2010-10-15 15:33:56 -0400524 /*
525 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
526 * original open. Must mask them off for a reopen.
527 */
Jeff Layton15886172010-10-15 15:33:59 -0400528 unsigned int oflags = pCifsFile->f_flags &
529 ~(O_CREAT | O_EXCL | O_TRUNC);
Jeff Layton608712f2010-10-15 15:33:56 -0400530
Jeff Layton2422f672010-06-16 13:40:16 -0400531 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
Steve Frenchfa588e02010-04-22 19:21:55 +0000532 cifs_sb->mnt_file_mode /* ignored */,
533 oflags, &oplock, &netfid, xid);
Steve French7fc8f4e2009-02-23 20:43:11 +0000534 if (rc == 0) {
Joe Perchesb6b38f72010-04-21 03:50:45 +0000535 cFYI(1, "posix reopen succeeded");
Steve French7fc8f4e2009-02-23 20:43:11 +0000536 goto reopen_success;
537 }
538 /* fallthrough to retry open the old way on errors, especially
539 in the reconnect path it is important to retry hard */
540 }
541
Jeff Layton15886172010-10-15 15:33:59 -0400542 desiredAccess = cifs_convert_flags(pCifsFile->f_flags);
Steve French7fc8f4e2009-02-23 20:43:11 +0000543
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500544 if (backup_cred(cifs_sb))
545 create_options |= CREATE_OPEN_BACKUP_INTENT;
546
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547 /* Can not refresh inode by passing in file_info buf to be returned
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000548 by SMBOpen and then calling get_inode_info with returned buf
549 since file might have write behind data that needs to be flushed
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 and server version of file size can be stale. If we knew for sure
551 that inode was not dirty locally we could do this */
552
Steve French7fc8f4e2009-02-23 20:43:11 +0000553 rc = CIFSSMBOpen(xid, tcon, full_path, disposition, desiredAccess,
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500554 create_options, &netfid, &oplock, NULL,
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000555 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
Steve French737b7582005-04-28 22:41:06 -0700556 CIFS_MOUNT_MAP_SPECIAL_CHR);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557 if (rc) {
Jeff Laytonf0a71eb2009-06-27 07:04:55 -0400558 mutex_unlock(&pCifsFile->fh_mutex);
Joe Perchesb6b38f72010-04-21 03:50:45 +0000559 cFYI(1, "cifs_open returned 0x%x", rc);
560 cFYI(1, "oplock: %d", oplock);
Jeff Layton15886172010-10-15 15:33:59 -0400561 goto reopen_error_exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562 }
Jeff Layton15886172010-10-15 15:33:59 -0400563
564reopen_success:
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700565 pCifsFile->fid.netfid = netfid;
Jeff Layton15886172010-10-15 15:33:59 -0400566 pCifsFile->invalidHandle = false;
567 mutex_unlock(&pCifsFile->fh_mutex);
568 pCifsInode = CIFS_I(inode);
569
570 if (can_flush) {
571 rc = filemap_write_and_wait(inode->i_mapping);
Jeff Laytoneb4b7562010-10-22 14:52:29 -0400572 mapping_set_error(inode->i_mapping, rc);
Jeff Layton15886172010-10-15 15:33:59 -0400573
Jeff Layton15886172010-10-15 15:33:59 -0400574 if (tcon->unix_ext)
575 rc = cifs_get_inode_info_unix(&inode,
576 full_path, inode->i_sb, xid);
577 else
578 rc = cifs_get_inode_info(&inode,
579 full_path, NULL, inode->i_sb,
580 xid, NULL);
581 } /* else we are writing out data to server already
582 and could deadlock if we tried to flush data, and
583 since we do not know if we have data that would
584 invalidate the current end of file on the server
585 we can not go to the server to get the new inod
586 info */
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300587
Pavel Shilovskyc6723622010-11-03 10:58:57 +0300588 cifs_set_oplock_level(pCifsInode, oplock);
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300589
Jeff Layton15886172010-10-15 15:33:59 -0400590 cifs_relock_file(pCifsFile);
591
592reopen_error_exit:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400594 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595 return rc;
596}
597
598int cifs_close(struct inode *inode, struct file *file)
599{
Jeff Layton77970692011-04-05 16:23:47 -0700600 if (file->private_data != NULL) {
601 cifsFileInfo_put(file->private_data);
602 file->private_data = NULL;
603 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604
Steve Frenchcdff08e2010-10-21 22:46:14 +0000605 /* return code from the ->release op is always ignored */
606 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607}
608
609int cifs_closedir(struct inode *inode, struct file *file)
610{
611 int rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400612 unsigned int xid;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700613 struct cifsFileInfo *cfile = file->private_data;
614 char *tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615
Joe Perchesb6b38f72010-04-21 03:50:45 +0000616 cFYI(1, "Closedir inode = 0x%p", inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400618 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700620 if (cfile) {
621 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622
Joe Perchesb6b38f72010-04-21 03:50:45 +0000623 cFYI(1, "Freeing private data in close dir");
Jeff Layton44772882010-10-15 15:34:03 -0400624 spin_lock(&cifs_file_list_lock);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700625 if (!cfile->srch_inf.endOfSearch && !cfile->invalidHandle) {
626 cfile->invalidHandle = true;
Jeff Layton44772882010-10-15 15:34:03 -0400627 spin_unlock(&cifs_file_list_lock);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700628 rc = CIFSFindClose(xid, tcon, cfile->fid.netfid);
629 cFYI(1, "Closing uncompleted readdir with rc %d", rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630 /* not much we can do if it fails anyway, ignore rc */
631 rc = 0;
Steve Frenchddb4cbf2008-11-20 20:00:44 +0000632 } else
Jeff Layton44772882010-10-15 15:34:03 -0400633 spin_unlock(&cifs_file_list_lock);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700634 tmp = cfile->srch_inf.ntwrk_buf_start;
635 if (tmp) {
Joe Perchesb6b38f72010-04-21 03:50:45 +0000636 cFYI(1, "closedir free smb buf in srch struct");
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700637 cfile->srch_inf.ntwrk_buf_start = NULL;
638 if (cfile->srch_inf.smallBuf)
639 cifs_small_buf_release(tmp);
Steve Frenchd47d7c12006-02-28 03:45:48 +0000640 else
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700641 cifs_buf_release(tmp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642 }
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700643 cifs_put_tlink(cfile->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644 kfree(file->private_data);
645 file->private_data = NULL;
646 }
647 /* BB can we lock the filestruct while this is going on? */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400648 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649 return rc;
650}
651
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400652static struct cifsLockInfo *
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300653cifs_lock_init(__u64 offset, __u64 length, __u8 type)
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000654{
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400655 struct cifsLockInfo *lock =
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000656 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400657 if (!lock)
658 return lock;
659 lock->offset = offset;
660 lock->length = length;
661 lock->type = type;
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400662 lock->pid = current->tgid;
663 INIT_LIST_HEAD(&lock->blist);
664 init_waitqueue_head(&lock->block_q);
665 return lock;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400666}
667
668static void
669cifs_del_lock_waiters(struct cifsLockInfo *lock)
670{
671 struct cifsLockInfo *li, *tmp;
672 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
673 list_del_init(&li->blist);
674 wake_up(&li->block_q);
675 }
676}
677
678static bool
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300679cifs_find_fid_lock_conflict(struct cifsFileInfo *cfile, __u64 offset,
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300680 __u64 length, __u8 type, struct cifsFileInfo *cur,
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300681 struct cifsLockInfo **conf_lock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400682{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300683 struct cifsLockInfo *li;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300684 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400685
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300686 list_for_each_entry(li, &cfile->llist, llist) {
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400687 if (offset + length <= li->offset ||
688 offset >= li->offset + li->length)
689 continue;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300690 else if ((type & server->vals->shared_lock_type) &&
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300691 ((server->ops->compare_fids(cur, cfile) &&
692 current->tgid == li->pid) || type == li->type))
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400693 continue;
694 else {
695 *conf_lock = li;
696 return true;
697 }
698 }
699 return false;
700}
701
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400702static bool
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300703cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
704 __u8 type, struct cifsLockInfo **conf_lock)
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400705{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300706 bool rc = false;
707 struct cifsFileInfo *fid, *tmp;
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300708 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300709
710 spin_lock(&cifs_file_list_lock);
711 list_for_each_entry_safe(fid, tmp, &cinode->openFileList, flist) {
712 rc = cifs_find_fid_lock_conflict(fid, offset, length, type,
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300713 cfile, conf_lock);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300714 if (rc)
715 break;
716 }
717 spin_unlock(&cifs_file_list_lock);
718
719 return rc;
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400720}
721
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300722/*
723 * Check if there is another lock that prevents us to set the lock (mandatory
724 * style). If such a lock exists, update the flock structure with its
725 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
726 * or leave it the same if we can't. Returns 0 if we don't need to request to
727 * the server or 1 otherwise.
728 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400729static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300730cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
731 __u8 type, struct file_lock *flock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400732{
733 int rc = 0;
734 struct cifsLockInfo *conf_lock;
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300735 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300736 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400737 bool exist;
738
739 mutex_lock(&cinode->lock_mutex);
740
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300741 exist = cifs_find_lock_conflict(cfile, offset, length, type,
742 &conf_lock);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400743 if (exist) {
744 flock->fl_start = conf_lock->offset;
745 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
746 flock->fl_pid = conf_lock->pid;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300747 if (conf_lock->type & server->vals->shared_lock_type)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400748 flock->fl_type = F_RDLCK;
749 else
750 flock->fl_type = F_WRLCK;
751 } else if (!cinode->can_cache_brlcks)
752 rc = 1;
753 else
754 flock->fl_type = F_UNLCK;
755
756 mutex_unlock(&cinode->lock_mutex);
757 return rc;
758}
759
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400760static void
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300761cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400762{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300763 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovskyd59dad22011-09-22 09:53:59 +0400764 mutex_lock(&cinode->lock_mutex);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300765 list_add_tail(&lock->llist, &cfile->llist);
Pavel Shilovskyd59dad22011-09-22 09:53:59 +0400766 mutex_unlock(&cinode->lock_mutex);
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000767}
768
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300769/*
770 * Set the byte-range lock (mandatory style). Returns:
771 * 1) 0, if we set the lock and don't need to request to the server;
772 * 2) 1, if no locks prevent us but we need to request to the server;
773 * 3) -EACCESS, if there is a lock that prevents us and wait is false.
774 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400775static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300776cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400777 bool wait)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400778{
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400779 struct cifsLockInfo *conf_lock;
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300780 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400781 bool exist;
782 int rc = 0;
783
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400784try_again:
785 exist = false;
786 mutex_lock(&cinode->lock_mutex);
787
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300788 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
789 lock->type, &conf_lock);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400790 if (!exist && cinode->can_cache_brlcks) {
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300791 list_add_tail(&lock->llist, &cfile->llist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400792 mutex_unlock(&cinode->lock_mutex);
793 return rc;
794 }
795
796 if (!exist)
797 rc = 1;
798 else if (!wait)
799 rc = -EACCES;
800 else {
801 list_add_tail(&lock->blist, &conf_lock->blist);
802 mutex_unlock(&cinode->lock_mutex);
803 rc = wait_event_interruptible(lock->block_q,
804 (lock->blist.prev == &lock->blist) &&
805 (lock->blist.next == &lock->blist));
806 if (!rc)
807 goto try_again;
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400808 mutex_lock(&cinode->lock_mutex);
809 list_del_init(&lock->blist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400810 }
811
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400812 mutex_unlock(&cinode->lock_mutex);
813 return rc;
814}
815
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300816/*
817 * Check if there is another lock that prevents us to set the lock (posix
818 * style). If such a lock exists, update the flock structure with its
819 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
820 * or leave it the same if we can't. Returns 0 if we don't need to request to
821 * the server or 1 otherwise.
822 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400823static int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400824cifs_posix_lock_test(struct file *file, struct file_lock *flock)
825{
826 int rc = 0;
827 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
828 unsigned char saved_type = flock->fl_type;
829
Pavel Shilovsky50792762011-10-29 17:17:57 +0400830 if ((flock->fl_flags & FL_POSIX) == 0)
831 return 1;
832
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400833 mutex_lock(&cinode->lock_mutex);
834 posix_test_lock(file, flock);
835
836 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
837 flock->fl_type = saved_type;
838 rc = 1;
839 }
840
841 mutex_unlock(&cinode->lock_mutex);
842 return rc;
843}
844
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300845/*
846 * Set the byte-range lock (posix style). Returns:
847 * 1) 0, if we set the lock and don't need to request to the server;
848 * 2) 1, if we need to request to the server;
849 * 3) <0, if the error occurs while setting the lock.
850 */
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400851static int
852cifs_posix_lock_set(struct file *file, struct file_lock *flock)
853{
854 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
Pavel Shilovsky50792762011-10-29 17:17:57 +0400855 int rc = 1;
856
857 if ((flock->fl_flags & FL_POSIX) == 0)
858 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400859
Pavel Shilovsky66189be2012-03-28 21:56:19 +0400860try_again:
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400861 mutex_lock(&cinode->lock_mutex);
862 if (!cinode->can_cache_brlcks) {
863 mutex_unlock(&cinode->lock_mutex);
Pavel Shilovsky50792762011-10-29 17:17:57 +0400864 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400865 }
Pavel Shilovsky66189be2012-03-28 21:56:19 +0400866
867 rc = posix_lock_file(file, flock, NULL);
Steve French9ebb3892012-04-01 13:52:54 -0500868 mutex_unlock(&cinode->lock_mutex);
Pavel Shilovsky66189be2012-03-28 21:56:19 +0400869 if (rc == FILE_LOCK_DEFERRED) {
870 rc = wait_event_interruptible(flock->fl_wait, !flock->fl_next);
871 if (!rc)
872 goto try_again;
873 locks_delete_block(flock);
874 }
Steve French9ebb3892012-04-01 13:52:54 -0500875 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400876}
877
878static int
879cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400880{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400881 unsigned int xid;
882 int rc = 0, stored_rc;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400883 struct cifsLockInfo *li, *tmp;
884 struct cifs_tcon *tcon;
885 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +0400886 unsigned int num, max_num, max_buf;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400887 LOCKING_ANDX_RANGE *buf, *cur;
888 int types[] = {LOCKING_ANDX_LARGE_FILES,
889 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
890 int i;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400891
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400892 xid = get_xid();
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400893 tcon = tlink_tcon(cfile->tlink);
894
895 mutex_lock(&cinode->lock_mutex);
896 if (!cinode->can_cache_brlcks) {
897 mutex_unlock(&cinode->lock_mutex);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400898 free_xid(xid);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400899 return rc;
900 }
901
Pavel Shilovsky0013fb42012-05-31 13:03:26 +0400902 /*
903 * Accessing maxBuf is racy with cifs_reconnect - need to store value
904 * and check it for zero before using.
905 */
906 max_buf = tcon->ses->server->maxBuf;
907 if (!max_buf) {
908 mutex_unlock(&cinode->lock_mutex);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400909 free_xid(xid);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +0400910 return -EINVAL;
911 }
912
913 max_num = (max_buf - sizeof(struct smb_hdr)) /
914 sizeof(LOCKING_ANDX_RANGE);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400915 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
916 if (!buf) {
917 mutex_unlock(&cinode->lock_mutex);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400918 free_xid(xid);
Pavel Shilovskye2f28862012-08-29 21:13:38 +0400919 return -ENOMEM;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400920 }
921
922 for (i = 0; i < 2; i++) {
923 cur = buf;
924 num = 0;
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300925 list_for_each_entry_safe(li, tmp, &cfile->llist, llist) {
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400926 if (li->type != types[i])
927 continue;
928 cur->Pid = cpu_to_le16(li->pid);
929 cur->LengthLow = cpu_to_le32((u32)li->length);
930 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
931 cur->OffsetLow = cpu_to_le32((u32)li->offset);
932 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
933 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700934 stored_rc = cifs_lockv(xid, tcon,
935 cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +0300936 (__u8)li->type, 0, num,
937 buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400938 if (stored_rc)
939 rc = stored_rc;
940 cur = buf;
941 num = 0;
942 } else
943 cur++;
944 }
945
946 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700947 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +0300948 (__u8)types[i], 0, num, buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400949 if (stored_rc)
950 rc = stored_rc;
951 }
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400952 }
953
954 cinode->can_cache_brlcks = false;
955 mutex_unlock(&cinode->lock_mutex);
956
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400957 kfree(buf);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400958 free_xid(xid);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400959 return rc;
960}
961
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400962/* copied from fs/locks.c with a name change */
963#define cifs_for_each_lock(inode, lockp) \
964 for (lockp = &inode->i_flock; *lockp != NULL; \
965 lockp = &(*lockp)->fl_next)
966
Pavel Shilovskyd5751462012-03-05 09:39:20 +0300967struct lock_to_push {
968 struct list_head llist;
969 __u64 offset;
970 __u64 length;
971 __u32 pid;
972 __u16 netfid;
973 __u8 type;
974};
975
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400976static int
977cifs_push_posix_locks(struct cifsFileInfo *cfile)
978{
979 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
980 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
981 struct file_lock *flock, **before;
Pavel Shilovskyd5751462012-03-05 09:39:20 +0300982 unsigned int count = 0, i = 0;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400983 int rc = 0, xid, type;
Pavel Shilovskyd5751462012-03-05 09:39:20 +0300984 struct list_head locks_to_send, *el;
985 struct lock_to_push *lck, *tmp;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400986 __u64 length;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400987
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400988 xid = get_xid();
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400989
990 mutex_lock(&cinode->lock_mutex);
991 if (!cinode->can_cache_brlcks) {
992 mutex_unlock(&cinode->lock_mutex);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400993 free_xid(xid);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400994 return rc;
995 }
996
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400997 lock_flocks();
998 cifs_for_each_lock(cfile->dentry->d_inode, before) {
Pavel Shilovskyd5751462012-03-05 09:39:20 +0300999 if ((*before)->fl_flags & FL_POSIX)
1000 count++;
1001 }
1002 unlock_flocks();
1003
1004 INIT_LIST_HEAD(&locks_to_send);
1005
1006 /*
Pavel Shilovskyce858522012-03-17 09:46:55 +03001007 * Allocating count locks is enough because no FL_POSIX locks can be
1008 * added to the list while we are holding cinode->lock_mutex that
1009 * protects locking operations of this inode.
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001010 */
1011 for (; i < count; i++) {
1012 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1013 if (!lck) {
1014 rc = -ENOMEM;
1015 goto err_out;
1016 }
1017 list_add_tail(&lck->llist, &locks_to_send);
1018 }
1019
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001020 el = locks_to_send.next;
1021 lock_flocks();
1022 cifs_for_each_lock(cfile->dentry->d_inode, before) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001023 flock = *before;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001024 if ((flock->fl_flags & FL_POSIX) == 0)
1025 continue;
Pavel Shilovskyce858522012-03-17 09:46:55 +03001026 if (el == &locks_to_send) {
1027 /*
1028 * The list ended. We don't have enough allocated
1029 * structures - something is really wrong.
1030 */
1031 cERROR(1, "Can't push all brlocks!");
1032 break;
1033 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001034 length = 1 + flock->fl_end - flock->fl_start;
1035 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1036 type = CIFS_RDLCK;
1037 else
1038 type = CIFS_WRLCK;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001039 lck = list_entry(el, struct lock_to_push, llist);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001040 lck->pid = flock->fl_pid;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001041 lck->netfid = cfile->fid.netfid;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001042 lck->length = length;
1043 lck->type = type;
1044 lck->offset = flock->fl_start;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001045 el = el->next;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001046 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001047 unlock_flocks();
1048
1049 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001050 int stored_rc;
1051
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001052 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001053 lck->offset, lck->length, NULL,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001054 lck->type, 0);
1055 if (stored_rc)
1056 rc = stored_rc;
1057 list_del(&lck->llist);
1058 kfree(lck);
1059 }
1060
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001061out:
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001062 cinode->can_cache_brlcks = false;
1063 mutex_unlock(&cinode->lock_mutex);
1064
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001065 free_xid(xid);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001066 return rc;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001067err_out:
1068 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1069 list_del(&lck->llist);
1070 kfree(lck);
1071 }
1072 goto out;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001073}
1074
1075static int
1076cifs_push_locks(struct cifsFileInfo *cfile)
1077{
1078 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1079 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1080
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001081 if (cap_unix(tcon->ses) &&
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001082 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1083 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1084 return cifs_push_posix_locks(cfile);
1085
1086 return cifs_push_mandatory_locks(cfile);
1087}
1088
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001089static void
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001090cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001091 bool *wait_flag, struct TCP_Server_Info *server)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001092{
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001093 if (flock->fl_flags & FL_POSIX)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001094 cFYI(1, "Posix");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001095 if (flock->fl_flags & FL_FLOCK)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001096 cFYI(1, "Flock");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001097 if (flock->fl_flags & FL_SLEEP) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001098 cFYI(1, "Blocking lock");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001099 *wait_flag = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100 }
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001101 if (flock->fl_flags & FL_ACCESS)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001102 cFYI(1, "Process suspended by mandatory locking - "
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001103 "not implemented yet");
1104 if (flock->fl_flags & FL_LEASE)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001105 cFYI(1, "Lease on file - not implemented yet");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001106 if (flock->fl_flags &
Linus Torvalds1da177e2005-04-16 15:20:36 -07001107 (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001108 cFYI(1, "Unknown lock flags 0x%x", flock->fl_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001109
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001110 *type = server->vals->large_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001111 if (flock->fl_type == F_WRLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001112 cFYI(1, "F_WRLCK ");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001113 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001114 *lock = 1;
1115 } else if (flock->fl_type == F_UNLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001116 cFYI(1, "F_UNLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001117 *type |= server->vals->unlock_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001118 *unlock = 1;
1119 /* Check if unlock includes more than one lock range */
1120 } else if (flock->fl_type == F_RDLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001121 cFYI(1, "F_RDLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001122 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001123 *lock = 1;
1124 } else if (flock->fl_type == F_EXLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001125 cFYI(1, "F_EXLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001126 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001127 *lock = 1;
1128 } else if (flock->fl_type == F_SHLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001129 cFYI(1, "F_SHLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001130 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001131 *lock = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001132 } else
Joe Perchesb6b38f72010-04-21 03:50:45 +00001133 cFYI(1, "Unknown type of lock");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001134}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001135
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001136static int
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001137cifs_mandatory_lock(unsigned int xid, struct cifsFileInfo *cfile, __u64 offset,
Pavel Shilovsky55157df2012-02-28 14:04:17 +03001138 __u64 length, __u32 type, int lock, int unlock, bool wait)
1139{
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001140 return CIFSSMBLock(xid, tlink_tcon(cfile->tlink), cfile->fid.netfid,
Pavel Shilovsky55157df2012-02-28 14:04:17 +03001141 current->tgid, length, offset, unlock, lock,
1142 (__u8)type, wait, 0);
1143}
1144
1145static int
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001146cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001147 bool wait_flag, bool posix_lck, unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001148{
1149 int rc = 0;
1150 __u64 length = 1 + flock->fl_end - flock->fl_start;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001151 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1152 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001153 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001154 __u16 netfid = cfile->fid.netfid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001155
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001156 if (posix_lck) {
1157 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001158
1159 rc = cifs_posix_lock_test(file, flock);
1160 if (!rc)
1161 return rc;
1162
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001163 if (type & server->vals->shared_lock_type)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001164 posix_lock_type = CIFS_RDLCK;
1165 else
1166 posix_lock_type = CIFS_WRLCK;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001167 rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001168 flock->fl_start, length, flock,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001169 posix_lock_type, wait_flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001170 return rc;
1171 }
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001172
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001173 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001174 if (!rc)
1175 return rc;
1176
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001177 /* BB we could chain these into one lock request BB */
Pavel Shilovsky55157df2012-02-28 14:04:17 +03001178 rc = cifs_mandatory_lock(xid, cfile, flock->fl_start, length, type,
1179 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001180 if (rc == 0) {
Pavel Shilovsky55157df2012-02-28 14:04:17 +03001181 rc = cifs_mandatory_lock(xid, cfile, flock->fl_start, length,
1182 type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001183 flock->fl_type = F_UNLCK;
1184 if (rc != 0)
1185 cERROR(1, "Error unlocking previously locked "
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001186 "range %d during test of lock", rc);
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001187 return 0;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001188 }
1189
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001190 if (type & server->vals->shared_lock_type) {
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001191 flock->fl_type = F_WRLCK;
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001192 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001193 }
1194
Pavel Shilovsky55157df2012-02-28 14:04:17 +03001195 rc = cifs_mandatory_lock(xid, cfile, flock->fl_start, length,
1196 type | server->vals->shared_lock_type, 1, 0,
1197 false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001198 if (rc == 0) {
Pavel Shilovsky55157df2012-02-28 14:04:17 +03001199 rc = cifs_mandatory_lock(xid, cfile, flock->fl_start, length,
1200 type | server->vals->shared_lock_type,
1201 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001202 flock->fl_type = F_RDLCK;
1203 if (rc != 0)
1204 cERROR(1, "Error unlocking previously locked "
1205 "range %d during test of lock", rc);
1206 } else
1207 flock->fl_type = F_WRLCK;
1208
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001209 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001210}
1211
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001212static void
1213cifs_move_llist(struct list_head *source, struct list_head *dest)
1214{
1215 struct list_head *li, *tmp;
1216 list_for_each_safe(li, tmp, source)
1217 list_move(li, dest);
1218}
1219
1220static void
1221cifs_free_llist(struct list_head *llist)
1222{
1223 struct cifsLockInfo *li, *tmp;
1224 list_for_each_entry_safe(li, tmp, llist, llist) {
1225 cifs_del_lock_waiters(li);
1226 list_del(&li->llist);
1227 kfree(li);
1228 }
1229}
1230
1231static int
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001232cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1233 unsigned int xid)
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001234{
1235 int rc = 0, stored_rc;
1236 int types[] = {LOCKING_ANDX_LARGE_FILES,
1237 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1238 unsigned int i;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001239 unsigned int max_num, num, max_buf;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001240 LOCKING_ANDX_RANGE *buf, *cur;
1241 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1242 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
1243 struct cifsLockInfo *li, *tmp;
1244 __u64 length = 1 + flock->fl_end - flock->fl_start;
1245 struct list_head tmp_llist;
1246
1247 INIT_LIST_HEAD(&tmp_llist);
1248
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001249 /*
1250 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1251 * and check it for zero before using.
1252 */
1253 max_buf = tcon->ses->server->maxBuf;
1254 if (!max_buf)
1255 return -EINVAL;
1256
1257 max_num = (max_buf - sizeof(struct smb_hdr)) /
1258 sizeof(LOCKING_ANDX_RANGE);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001259 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1260 if (!buf)
1261 return -ENOMEM;
1262
1263 mutex_lock(&cinode->lock_mutex);
1264 for (i = 0; i < 2; i++) {
1265 cur = buf;
1266 num = 0;
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001267 list_for_each_entry_safe(li, tmp, &cfile->llist, llist) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001268 if (flock->fl_start > li->offset ||
1269 (flock->fl_start + length) <
1270 (li->offset + li->length))
1271 continue;
1272 if (current->tgid != li->pid)
1273 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001274 if (types[i] != li->type)
1275 continue;
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001276 if (cinode->can_cache_brlcks) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001277 /*
1278 * We can cache brlock requests - simply remove
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001279 * a lock from the file's list.
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001280 */
1281 list_del(&li->llist);
1282 cifs_del_lock_waiters(li);
1283 kfree(li);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001284 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001285 }
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001286 cur->Pid = cpu_to_le16(li->pid);
1287 cur->LengthLow = cpu_to_le32((u32)li->length);
1288 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1289 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1290 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1291 /*
1292 * We need to save a lock here to let us add it again to
1293 * the file's list if the unlock range request fails on
1294 * the server.
1295 */
1296 list_move(&li->llist, &tmp_llist);
1297 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001298 stored_rc = cifs_lockv(xid, tcon,
1299 cfile->fid.netfid,
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001300 li->type, num, 0, buf);
1301 if (stored_rc) {
1302 /*
1303 * We failed on the unlock range
1304 * request - add all locks from the tmp
1305 * list to the head of the file's list.
1306 */
1307 cifs_move_llist(&tmp_llist,
1308 &cfile->llist);
1309 rc = stored_rc;
1310 } else
1311 /*
1312 * The unlock range request succeed -
1313 * free the tmp list.
1314 */
1315 cifs_free_llist(&tmp_llist);
1316 cur = buf;
1317 num = 0;
1318 } else
1319 cur++;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001320 }
1321 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001322 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001323 types[i], num, 0, buf);
1324 if (stored_rc) {
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001325 cifs_move_llist(&tmp_llist, &cfile->llist);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001326 rc = stored_rc;
1327 } else
1328 cifs_free_llist(&tmp_llist);
1329 }
1330 }
1331
1332 mutex_unlock(&cinode->lock_mutex);
1333 kfree(buf);
1334 return rc;
1335}
1336
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001337static int
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001338cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001339 bool wait_flag, bool posix_lck, int lock, int unlock,
1340 unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001341{
1342 int rc = 0;
1343 __u64 length = 1 + flock->fl_end - flock->fl_start;
1344 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1345 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001346 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001347 __u16 netfid = cfile->fid.netfid;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001348
1349 if (posix_lck) {
Steve French08547b02006-02-28 22:39:25 +00001350 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001351
1352 rc = cifs_posix_lock_set(file, flock);
1353 if (!rc || rc < 0)
1354 return rc;
1355
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001356 if (type & server->vals->shared_lock_type)
Steve French08547b02006-02-28 22:39:25 +00001357 posix_lock_type = CIFS_RDLCK;
1358 else
1359 posix_lock_type = CIFS_WRLCK;
Steve French50c2f752007-07-13 00:33:32 +00001360
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001361 if (unlock == 1)
Steve Frenchbeb84dc2006-03-03 23:36:34 +00001362 posix_lock_type = CIFS_UNLCK;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001363
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001364 rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001365 flock->fl_start, length, NULL,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001366 posix_lock_type, wait_flag);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001367 goto out;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001368 }
1369
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001370 if (lock) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001371 struct cifsLockInfo *lock;
1372
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001373 lock = cifs_lock_init(flock->fl_start, length, type);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001374 if (!lock)
1375 return -ENOMEM;
1376
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001377 rc = cifs_lock_add_if(cfile, lock, wait_flag);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001378 if (rc < 0)
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001379 kfree(lock);
1380 if (rc <= 0)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001381 goto out;
1382
Pavel Shilovsky7f924472012-03-28 17:10:25 +04001383 rc = cifs_mandatory_lock(xid, cfile, flock->fl_start, length,
1384 type, 1, 0, wait_flag);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001385 if (rc) {
1386 kfree(lock);
1387 goto out;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001388 }
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001389
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001390 cifs_lock_add(cfile, lock);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001391 } else if (unlock)
1392 rc = cifs_unlock_range(cfile, flock, xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001393
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001394out:
1395 if (flock->fl_flags & FL_POSIX)
Steve French9ebb3892012-04-01 13:52:54 -05001396 posix_lock_file_wait(file, flock);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001397 return rc;
1398}
1399
1400int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1401{
1402 int rc, xid;
1403 int lock = 0, unlock = 0;
1404 bool wait_flag = false;
1405 bool posix_lck = false;
1406 struct cifs_sb_info *cifs_sb;
1407 struct cifs_tcon *tcon;
1408 struct cifsInodeInfo *cinode;
1409 struct cifsFileInfo *cfile;
1410 __u16 netfid;
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001411 __u32 type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001412
1413 rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001414 xid = get_xid();
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001415
1416 cFYI(1, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld "
1417 "end: %lld", cmd, flock->fl_flags, flock->fl_type,
1418 flock->fl_start, flock->fl_end);
1419
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001420 cfile = (struct cifsFileInfo *)file->private_data;
1421 tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001422
1423 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
1424 tcon->ses->server);
1425
1426 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001427 netfid = cfile->fid.netfid;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001428 cinode = CIFS_I(file->f_path.dentry->d_inode);
1429
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001430 if (cap_unix(tcon->ses) &&
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001431 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1432 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1433 posix_lck = true;
1434 /*
1435 * BB add code here to normalize offset and length to account for
1436 * negative length which we can not accept over the wire.
1437 */
1438 if (IS_GETLK(cmd)) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001439 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001440 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001441 return rc;
1442 }
1443
1444 if (!lock && !unlock) {
1445 /*
1446 * if no lock or unlock then nothing to do since we do not
1447 * know what it is
1448 */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001449 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001450 return -EOPNOTSUPP;
1451 }
1452
1453 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1454 xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001455 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001456 return rc;
1457}
1458
Jeff Layton597b0272012-03-23 14:40:56 -04001459/*
1460 * update the file size (if needed) after a write. Should be called with
1461 * the inode->i_lock held
1462 */
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05001463void
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001464cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1465 unsigned int bytes_written)
1466{
1467 loff_t end_of_write = offset + bytes_written;
1468
1469 if (end_of_write > cifsi->server_eof)
1470 cifsi->server_eof = end_of_write;
1471}
1472
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001473static ssize_t cifs_write(struct cifsFileInfo *open_file, __u32 pid,
Jeff Layton7da4b492010-10-15 15:34:00 -04001474 const char *write_data, size_t write_size,
1475 loff_t *poffset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001476{
1477 int rc = 0;
1478 unsigned int bytes_written = 0;
1479 unsigned int total_written;
1480 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +00001481 struct cifs_tcon *pTcon;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001482 unsigned int xid;
Jeff Layton7da4b492010-10-15 15:34:00 -04001483 struct dentry *dentry = open_file->dentry;
1484 struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001485 struct cifs_io_parms io_parms;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001486
Jeff Layton7da4b492010-10-15 15:34:00 -04001487 cifs_sb = CIFS_SB(dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001488
Joe Perchesb6b38f72010-04-21 03:50:45 +00001489 cFYI(1, "write %zd bytes to offset %lld of %s", write_size,
Jeff Layton7da4b492010-10-15 15:34:00 -04001490 *poffset, dentry->d_name.name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001491
Jeff Layton13cfb732010-09-29 19:51:11 -04001492 pTcon = tlink_tcon(open_file->tlink);
Steve French50c2f752007-07-13 00:33:32 +00001493
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001494 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001495
Linus Torvalds1da177e2005-04-16 15:20:36 -07001496 for (total_written = 0; write_size > total_written;
1497 total_written += bytes_written) {
1498 rc = -EAGAIN;
1499 while (rc == -EAGAIN) {
Jeff Laytonca83ce32011-04-12 09:13:44 -04001500 struct kvec iov[2];
1501 unsigned int len;
1502
Linus Torvalds1da177e2005-04-16 15:20:36 -07001503 if (open_file->invalidHandle) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001504 /* we could deadlock if we called
1505 filemap_fdatawait from here so tell
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001506 reopen_file not to flush data to
Linus Torvalds1da177e2005-04-16 15:20:36 -07001507 server now */
Jeff Layton15886172010-10-15 15:33:59 -04001508 rc = cifs_reopen_file(open_file, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001509 if (rc != 0)
1510 break;
1511 }
Steve French3e844692005-10-03 13:37:24 -07001512
Jeff Laytonca83ce32011-04-12 09:13:44 -04001513 len = min((size_t)cifs_sb->wsize,
1514 write_size - total_written);
1515 /* iov[0] is reserved for smb header */
1516 iov[1].iov_base = (char *)write_data + total_written;
1517 iov[1].iov_len = len;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001518 io_parms.netfid = open_file->fid.netfid;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001519 io_parms.pid = pid;
1520 io_parms.tcon = pTcon;
1521 io_parms.offset = *poffset;
1522 io_parms.length = len;
1523 rc = CIFSSMBWrite2(xid, &io_parms, &bytes_written, iov,
1524 1, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001525 }
1526 if (rc || (bytes_written == 0)) {
1527 if (total_written)
1528 break;
1529 else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001530 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001531 return rc;
1532 }
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001533 } else {
Jeff Layton597b0272012-03-23 14:40:56 -04001534 spin_lock(&dentry->d_inode->i_lock);
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001535 cifs_update_eof(cifsi, *poffset, bytes_written);
Jeff Layton597b0272012-03-23 14:40:56 -04001536 spin_unlock(&dentry->d_inode->i_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001537 *poffset += bytes_written;
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001538 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001539 }
1540
Steve Frencha4544342005-08-24 13:59:35 -07001541 cifs_stats_bytes_written(pTcon, total_written);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001542
Jeff Layton7da4b492010-10-15 15:34:00 -04001543 if (total_written > 0) {
1544 spin_lock(&dentry->d_inode->i_lock);
1545 if (*poffset > dentry->d_inode->i_size)
1546 i_size_write(dentry->d_inode, *poffset);
1547 spin_unlock(&dentry->d_inode->i_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001548 }
Jeff Layton7da4b492010-10-15 15:34:00 -04001549 mark_inode_dirty_sync(dentry->d_inode);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001550 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001551 return total_written;
1552}
1553
Jeff Layton6508d902010-09-29 19:51:11 -04001554struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1555 bool fsuid_only)
Steve French630f3f0c2007-10-25 21:17:17 +00001556{
1557 struct cifsFileInfo *open_file = NULL;
Jeff Layton6508d902010-09-29 19:51:11 -04001558 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1559
1560 /* only filter by fsuid on multiuser mounts */
1561 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1562 fsuid_only = false;
Steve French630f3f0c2007-10-25 21:17:17 +00001563
Jeff Layton44772882010-10-15 15:34:03 -04001564 spin_lock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001565 /* we could simply get the first_list_entry since write-only entries
1566 are always at the end of the list but since the first entry might
1567 have a close pending, we go through the whole list */
1568 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton6508d902010-09-29 19:51:11 -04001569 if (fsuid_only && open_file->uid != current_fsuid())
1570 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001571 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
Steve French630f3f0c2007-10-25 21:17:17 +00001572 if (!open_file->invalidHandle) {
1573 /* found a good file */
1574 /* lock it so it will not be closed on us */
Jeff Layton764a1b12012-07-25 14:59:54 -04001575 cifsFileInfo_get_locked(open_file);
Jeff Layton44772882010-10-15 15:34:03 -04001576 spin_unlock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001577 return open_file;
1578 } /* else might as well continue, and look for
1579 another, or simply have the caller reopen it
1580 again rather than trying to fix this handle */
1581 } else /* write only file */
1582 break; /* write only files are last so must be done */
1583 }
Jeff Layton44772882010-10-15 15:34:03 -04001584 spin_unlock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001585 return NULL;
1586}
Steve French630f3f0c2007-10-25 21:17:17 +00001587
Jeff Layton6508d902010-09-29 19:51:11 -04001588struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1589 bool fsuid_only)
Steve French6148a742005-10-05 12:23:19 -07001590{
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001591 struct cifsFileInfo *open_file, *inv_file = NULL;
Jeff Laytond3892292010-11-02 16:22:50 -04001592 struct cifs_sb_info *cifs_sb;
Jeff Layton2846d382008-09-22 21:33:33 -04001593 bool any_available = false;
Steve Frenchdd99cd82005-10-05 19:32:49 -07001594 int rc;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001595 unsigned int refind = 0;
Steve French6148a742005-10-05 12:23:19 -07001596
Steve French60808232006-04-22 15:53:05 +00001597 /* Having a null inode here (because mapping->host was set to zero by
1598 the VFS or MM) should not happen but we had reports of on oops (due to
1599 it being zero) during stress testcases so we need to check for it */
1600
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001601 if (cifs_inode == NULL) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001602 cERROR(1, "Null inode passed to cifs_writeable_file");
Steve French60808232006-04-22 15:53:05 +00001603 dump_stack();
1604 return NULL;
1605 }
1606
Jeff Laytond3892292010-11-02 16:22:50 -04001607 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1608
Jeff Layton6508d902010-09-29 19:51:11 -04001609 /* only filter by fsuid on multiuser mounts */
1610 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1611 fsuid_only = false;
1612
Jeff Layton44772882010-10-15 15:34:03 -04001613 spin_lock(&cifs_file_list_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001614refind_writable:
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001615 if (refind > MAX_REOPEN_ATT) {
1616 spin_unlock(&cifs_file_list_lock);
1617 return NULL;
1618 }
Steve French6148a742005-10-05 12:23:19 -07001619 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton6508d902010-09-29 19:51:11 -04001620 if (!any_available && open_file->pid != current->tgid)
1621 continue;
1622 if (fsuid_only && open_file->uid != current_fsuid())
1623 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001624 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Steve French9b22b0b2007-10-02 01:11:08 +00001625 if (!open_file->invalidHandle) {
1626 /* found a good writable file */
Jeff Layton764a1b12012-07-25 14:59:54 -04001627 cifsFileInfo_get_locked(open_file);
Jeff Layton44772882010-10-15 15:34:03 -04001628 spin_unlock(&cifs_file_list_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001629 return open_file;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001630 } else {
1631 if (!inv_file)
1632 inv_file = open_file;
Steve French9b22b0b2007-10-02 01:11:08 +00001633 }
Steve French6148a742005-10-05 12:23:19 -07001634 }
1635 }
Jeff Layton2846d382008-09-22 21:33:33 -04001636 /* couldn't find useable FH with same pid, try any available */
1637 if (!any_available) {
1638 any_available = true;
1639 goto refind_writable;
1640 }
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001641
1642 if (inv_file) {
1643 any_available = false;
Jeff Layton764a1b12012-07-25 14:59:54 -04001644 cifsFileInfo_get_locked(inv_file);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001645 }
1646
Jeff Layton44772882010-10-15 15:34:03 -04001647 spin_unlock(&cifs_file_list_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001648
1649 if (inv_file) {
1650 rc = cifs_reopen_file(inv_file, false);
1651 if (!rc)
1652 return inv_file;
1653 else {
1654 spin_lock(&cifs_file_list_lock);
1655 list_move_tail(&inv_file->flist,
1656 &cifs_inode->openFileList);
1657 spin_unlock(&cifs_file_list_lock);
1658 cifsFileInfo_put(inv_file);
1659 spin_lock(&cifs_file_list_lock);
1660 ++refind;
1661 goto refind_writable;
1662 }
1663 }
1664
Steve French6148a742005-10-05 12:23:19 -07001665 return NULL;
1666}
1667
Linus Torvalds1da177e2005-04-16 15:20:36 -07001668static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1669{
1670 struct address_space *mapping = page->mapping;
1671 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1672 char *write_data;
1673 int rc = -EFAULT;
1674 int bytes_written = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001675 struct inode *inode;
Steve French6148a742005-10-05 12:23:19 -07001676 struct cifsFileInfo *open_file;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001677
1678 if (!mapping || !mapping->host)
1679 return -EFAULT;
1680
1681 inode = page->mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001682
1683 offset += (loff_t)from;
1684 write_data = kmap(page);
1685 write_data += from;
1686
1687 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1688 kunmap(page);
1689 return -EIO;
1690 }
1691
1692 /* racing with truncate? */
1693 if (offset > mapping->host->i_size) {
1694 kunmap(page);
1695 return 0; /* don't care */
1696 }
1697
1698 /* check to make sure that we are not extending the file */
1699 if (mapping->host->i_size - offset < (loff_t)to)
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001700 to = (unsigned)(mapping->host->i_size - offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001701
Jeff Layton6508d902010-09-29 19:51:11 -04001702 open_file = find_writable_file(CIFS_I(mapping->host), false);
Steve French6148a742005-10-05 12:23:19 -07001703 if (open_file) {
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001704 bytes_written = cifs_write(open_file, open_file->pid,
1705 write_data, to - from, &offset);
Dave Kleikamp6ab409b2009-08-31 11:07:12 -04001706 cifsFileInfo_put(open_file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001707 /* Does mm or vfs already set times? */
Steve French6148a742005-10-05 12:23:19 -07001708 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001709 if ((bytes_written > 0) && (offset))
Steve French6148a742005-10-05 12:23:19 -07001710 rc = 0;
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001711 else if (bytes_written < 0)
1712 rc = bytes_written;
Steve French6148a742005-10-05 12:23:19 -07001713 } else {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001714 cFYI(1, "No writeable filehandles for inode");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001715 rc = -EIO;
1716 }
1717
1718 kunmap(page);
1719 return rc;
1720}
1721
Jeff Laytone9492872012-03-23 14:40:56 -04001722/*
1723 * Marshal up the iov array, reserving the first one for the header. Also,
1724 * set wdata->bytes.
1725 */
1726static void
1727cifs_writepages_marshal_iov(struct kvec *iov, struct cifs_writedata *wdata)
1728{
1729 int i;
1730 struct inode *inode = wdata->cfile->dentry->d_inode;
1731 loff_t size = i_size_read(inode);
1732
1733 /* marshal up the pages into iov array */
1734 wdata->bytes = 0;
1735 for (i = 0; i < wdata->nr_pages; i++) {
1736 iov[i + 1].iov_len = min(size - page_offset(wdata->pages[i]),
1737 (loff_t)PAGE_CACHE_SIZE);
1738 iov[i + 1].iov_base = kmap(wdata->pages[i]);
1739 wdata->bytes += iov[i + 1].iov_len;
1740 }
1741}
1742
Linus Torvalds1da177e2005-04-16 15:20:36 -07001743static int cifs_writepages(struct address_space *mapping,
Steve French37c0eb42005-10-05 14:50:29 -07001744 struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001745{
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001746 struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
1747 bool done = false, scanned = false, range_whole = false;
1748 pgoff_t end, index;
1749 struct cifs_writedata *wdata;
Steve French37c0eb42005-10-05 14:50:29 -07001750 struct page *page;
Steve French37c0eb42005-10-05 14:50:29 -07001751 int rc = 0;
Steve French50c2f752007-07-13 00:33:32 +00001752
Steve French37c0eb42005-10-05 14:50:29 -07001753 /*
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001754 * If wsize is smaller than the page cache size, default to writing
Steve French37c0eb42005-10-05 14:50:29 -07001755 * one page at a time via cifs_writepage
1756 */
1757 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1758 return generic_writepages(mapping, wbc);
1759
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001760 if (wbc->range_cyclic) {
Steve French37c0eb42005-10-05 14:50:29 -07001761 index = mapping->writeback_index; /* Start from prev offset */
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001762 end = -1;
1763 } else {
1764 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1765 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1766 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001767 range_whole = true;
1768 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07001769 }
1770retry:
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001771 while (!done && index <= end) {
1772 unsigned int i, nr_pages, found_pages;
1773 pgoff_t next = 0, tofind;
1774 struct page **pages;
Steve French37c0eb42005-10-05 14:50:29 -07001775
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001776 tofind = min((cifs_sb->wsize / PAGE_CACHE_SIZE) - 1,
1777 end - index) + 1;
Steve French37c0eb42005-10-05 14:50:29 -07001778
Jeff Laytonc2e87642012-03-23 14:40:55 -04001779 wdata = cifs_writedata_alloc((unsigned int)tofind,
1780 cifs_writev_complete);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001781 if (!wdata) {
1782 rc = -ENOMEM;
1783 break;
1784 }
1785
1786 /*
1787 * find_get_pages_tag seems to return a max of 256 on each
1788 * iteration, so we must call it several times in order to
1789 * fill the array or the wsize is effectively limited to
1790 * 256 * PAGE_CACHE_SIZE.
1791 */
1792 found_pages = 0;
1793 pages = wdata->pages;
1794 do {
1795 nr_pages = find_get_pages_tag(mapping, &index,
1796 PAGECACHE_TAG_DIRTY,
1797 tofind, pages);
1798 found_pages += nr_pages;
1799 tofind -= nr_pages;
1800 pages += nr_pages;
1801 } while (nr_pages && tofind && index <= end);
1802
1803 if (found_pages == 0) {
1804 kref_put(&wdata->refcount, cifs_writedata_release);
1805 break;
1806 }
1807
1808 nr_pages = 0;
1809 for (i = 0; i < found_pages; i++) {
1810 page = wdata->pages[i];
Steve French37c0eb42005-10-05 14:50:29 -07001811 /*
1812 * At this point we hold neither mapping->tree_lock nor
1813 * lock on the page itself: the page may be truncated or
1814 * invalidated (changing page->mapping to NULL), or even
1815 * swizzled back from swapper_space to tmpfs file
1816 * mapping
1817 */
1818
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001819 if (nr_pages == 0)
Steve French37c0eb42005-10-05 14:50:29 -07001820 lock_page(page);
Nick Piggin529ae9a2008-08-02 12:01:03 +02001821 else if (!trylock_page(page))
Steve French37c0eb42005-10-05 14:50:29 -07001822 break;
1823
1824 if (unlikely(page->mapping != mapping)) {
1825 unlock_page(page);
1826 break;
1827 }
1828
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001829 if (!wbc->range_cyclic && page->index > end) {
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001830 done = true;
Steve French37c0eb42005-10-05 14:50:29 -07001831 unlock_page(page);
1832 break;
1833 }
1834
1835 if (next && (page->index != next)) {
1836 /* Not next consecutive page */
1837 unlock_page(page);
1838 break;
1839 }
1840
1841 if (wbc->sync_mode != WB_SYNC_NONE)
1842 wait_on_page_writeback(page);
1843
1844 if (PageWriteback(page) ||
Linus Torvaldscb876f42006-12-23 16:19:07 -08001845 !clear_page_dirty_for_io(page)) {
Steve French37c0eb42005-10-05 14:50:29 -07001846 unlock_page(page);
1847 break;
1848 }
Steve French84d2f072005-10-12 15:32:05 -07001849
Linus Torvaldscb876f42006-12-23 16:19:07 -08001850 /*
1851 * This actually clears the dirty bit in the radix tree.
1852 * See cifs_writepage() for more commentary.
1853 */
1854 set_page_writeback(page);
1855
Steve French84d2f072005-10-12 15:32:05 -07001856 if (page_offset(page) >= mapping->host->i_size) {
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001857 done = true;
Steve French84d2f072005-10-12 15:32:05 -07001858 unlock_page(page);
Linus Torvaldscb876f42006-12-23 16:19:07 -08001859 end_page_writeback(page);
Steve French84d2f072005-10-12 15:32:05 -07001860 break;
1861 }
1862
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001863 wdata->pages[i] = page;
Steve French37c0eb42005-10-05 14:50:29 -07001864 next = page->index + 1;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001865 ++nr_pages;
Steve French37c0eb42005-10-05 14:50:29 -07001866 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001867
1868 /* reset index to refind any pages skipped */
1869 if (nr_pages == 0)
1870 index = wdata->pages[0]->index + 1;
1871
1872 /* put any pages we aren't going to use */
1873 for (i = nr_pages; i < found_pages; i++) {
1874 page_cache_release(wdata->pages[i]);
1875 wdata->pages[i] = NULL;
1876 }
1877
1878 /* nothing to write? */
1879 if (nr_pages == 0) {
1880 kref_put(&wdata->refcount, cifs_writedata_release);
1881 continue;
1882 }
1883
1884 wdata->sync_mode = wbc->sync_mode;
1885 wdata->nr_pages = nr_pages;
1886 wdata->offset = page_offset(wdata->pages[0]);
Jeff Laytone9492872012-03-23 14:40:56 -04001887 wdata->marshal_iov = cifs_writepages_marshal_iov;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001888
1889 do {
1890 if (wdata->cfile != NULL)
1891 cifsFileInfo_put(wdata->cfile);
1892 wdata->cfile = find_writable_file(CIFS_I(mapping->host),
1893 false);
1894 if (!wdata->cfile) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001895 cERROR(1, "No writable handles for inode");
Steve French23e7dd72005-10-20 13:44:56 -07001896 rc = -EBADF;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001897 break;
Steve French37c0eb42005-10-05 14:50:29 -07001898 }
Jeff Laytonfe5f5d22012-03-23 14:40:55 -04001899 wdata->pid = wdata->cfile->pid;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001900 rc = cifs_async_writev(wdata);
1901 } while (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN);
Jeff Laytonf3983c22010-09-22 16:17:40 -07001902
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001903 for (i = 0; i < nr_pages; ++i)
1904 unlock_page(wdata->pages[i]);
Jeff Layton941b8532011-01-11 07:24:01 -05001905
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001906 /* send failure -- clean up the mess */
1907 if (rc != 0) {
1908 for (i = 0; i < nr_pages; ++i) {
Jeff Layton941b8532011-01-11 07:24:01 -05001909 if (rc == -EAGAIN)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001910 redirty_page_for_writepage(wbc,
1911 wdata->pages[i]);
1912 else
1913 SetPageError(wdata->pages[i]);
1914 end_page_writeback(wdata->pages[i]);
1915 page_cache_release(wdata->pages[i]);
Steve French37c0eb42005-10-05 14:50:29 -07001916 }
Jeff Layton941b8532011-01-11 07:24:01 -05001917 if (rc != -EAGAIN)
1918 mapping_set_error(mapping, rc);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001919 }
1920 kref_put(&wdata->refcount, cifs_writedata_release);
Jeff Layton941b8532011-01-11 07:24:01 -05001921
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001922 wbc->nr_to_write -= nr_pages;
1923 if (wbc->nr_to_write <= 0)
1924 done = true;
Dave Kleikampb066a482008-11-18 03:49:05 +00001925
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001926 index = next;
Steve French37c0eb42005-10-05 14:50:29 -07001927 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001928
Steve French37c0eb42005-10-05 14:50:29 -07001929 if (!scanned && !done) {
1930 /*
1931 * We hit the last page and there is more work to be done: wrap
1932 * back to the start of the file
1933 */
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001934 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07001935 index = 0;
1936 goto retry;
1937 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001938
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001939 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
Steve French37c0eb42005-10-05 14:50:29 -07001940 mapping->writeback_index = index;
1941
Linus Torvalds1da177e2005-04-16 15:20:36 -07001942 return rc;
1943}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001944
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04001945static int
1946cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001947{
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04001948 int rc;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001949 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001950
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001951 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001952/* BB add check for wbc flags */
1953 page_cache_get(page);
Steve Frenchad7a2922008-02-07 23:25:02 +00001954 if (!PageUptodate(page))
Joe Perchesb6b38f72010-04-21 03:50:45 +00001955 cFYI(1, "ppw - page not up to date");
Linus Torvaldscb876f42006-12-23 16:19:07 -08001956
1957 /*
1958 * Set the "writeback" flag, and clear "dirty" in the radix tree.
1959 *
1960 * A writepage() implementation always needs to do either this,
1961 * or re-dirty the page with "redirty_page_for_writepage()" in
1962 * the case of a failure.
1963 *
1964 * Just unlocking the page will cause the radix tree tag-bits
1965 * to fail to update with the state of the page correctly.
1966 */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001967 set_page_writeback(page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04001968retry_write:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001969 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04001970 if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
1971 goto retry_write;
1972 else if (rc == -EAGAIN)
1973 redirty_page_for_writepage(wbc, page);
1974 else if (rc != 0)
1975 SetPageError(page);
1976 else
1977 SetPageUptodate(page);
Linus Torvaldscb876f42006-12-23 16:19:07 -08001978 end_page_writeback(page);
1979 page_cache_release(page);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001980 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001981 return rc;
1982}
1983
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04001984static int cifs_writepage(struct page *page, struct writeback_control *wbc)
1985{
1986 int rc = cifs_writepage_locked(page, wbc);
1987 unlock_page(page);
1988 return rc;
1989}
1990
Nick Piggind9414772008-09-24 11:32:59 -04001991static int cifs_write_end(struct file *file, struct address_space *mapping,
1992 loff_t pos, unsigned len, unsigned copied,
1993 struct page *page, void *fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001994{
Nick Piggind9414772008-09-24 11:32:59 -04001995 int rc;
1996 struct inode *inode = mapping->host;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00001997 struct cifsFileInfo *cfile = file->private_data;
1998 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1999 __u32 pid;
2000
2001 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2002 pid = cfile->pid;
2003 else
2004 pid = current->tgid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002005
Joe Perchesb6b38f72010-04-21 03:50:45 +00002006 cFYI(1, "write_end for page %p from pos %lld with %d bytes",
2007 page, pos, copied);
Steve Frenchad7a2922008-02-07 23:25:02 +00002008
Jeff Laytona98ee8c2008-11-26 19:32:33 +00002009 if (PageChecked(page)) {
2010 if (copied == len)
2011 SetPageUptodate(page);
2012 ClearPageChecked(page);
2013 } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
Nick Piggind9414772008-09-24 11:32:59 -04002014 SetPageUptodate(page);
2015
Linus Torvalds1da177e2005-04-16 15:20:36 -07002016 if (!PageUptodate(page)) {
Nick Piggind9414772008-09-24 11:32:59 -04002017 char *page_data;
2018 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002019 unsigned int xid;
Nick Piggind9414772008-09-24 11:32:59 -04002020
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002021 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002022 /* this is probably better than directly calling
2023 partialpage_write since in this function the file handle is
2024 known which we might as well leverage */
2025 /* BB check if anything else missing out of ppw
2026 such as updating last write time */
2027 page_data = kmap(page);
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002028 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
Nick Piggind9414772008-09-24 11:32:59 -04002029 /* if (rc < 0) should we set writebehind rc? */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002030 kunmap(page);
Nick Piggind9414772008-09-24 11:32:59 -04002031
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002032 free_xid(xid);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002033 } else {
Nick Piggind9414772008-09-24 11:32:59 -04002034 rc = copied;
2035 pos += copied;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002036 set_page_dirty(page);
2037 }
2038
Nick Piggind9414772008-09-24 11:32:59 -04002039 if (rc > 0) {
2040 spin_lock(&inode->i_lock);
2041 if (pos > inode->i_size)
2042 i_size_write(inode, pos);
2043 spin_unlock(&inode->i_lock);
2044 }
2045
2046 unlock_page(page);
2047 page_cache_release(page);
2048
Linus Torvalds1da177e2005-04-16 15:20:36 -07002049 return rc;
2050}
2051
Josef Bacik02c24a82011-07-16 20:44:56 -04002052int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2053 int datasync)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002054{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002055 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002056 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002057 struct cifs_tcon *tcon;
Joe Perchesc21dfb62010-07-12 13:50:14 -07002058 struct cifsFileInfo *smbfile = file->private_data;
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002059 struct inode *inode = file->f_path.dentry->d_inode;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002060 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002061
Josef Bacik02c24a82011-07-16 20:44:56 -04002062 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2063 if (rc)
2064 return rc;
2065 mutex_lock(&inode->i_mutex);
2066
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002067 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002068
Joe Perchesb6b38f72010-04-21 03:50:45 +00002069 cFYI(1, "Sync file - name: %s datasync: 0x%x",
Christoph Hellwig7ea80852010-05-26 17:53:25 +02002070 file->f_path.dentry->d_name.name, datasync);
Steve French50c2f752007-07-13 00:33:32 +00002071
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002072 if (!CIFS_I(inode)->clientCanCacheRead) {
2073 rc = cifs_invalidate_mapping(inode);
2074 if (rc) {
2075 cFYI(1, "rc: %d during invalidate phase", rc);
2076 rc = 0; /* don't care about it in fsync */
2077 }
2078 }
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002079
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002080 tcon = tlink_tcon(smbfile->tlink);
2081 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07002082 rc = CIFSSMBFlush(xid, tcon, smbfile->fid.netfid);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002083
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002084 free_xid(xid);
Josef Bacik02c24a82011-07-16 20:44:56 -04002085 mutex_unlock(&inode->i_mutex);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002086 return rc;
2087}
2088
Josef Bacik02c24a82011-07-16 20:44:56 -04002089int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002090{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002091 unsigned int xid;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002092 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002093 struct cifs_tcon *tcon;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002094 struct cifsFileInfo *smbfile = file->private_data;
2095 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Josef Bacik02c24a82011-07-16 20:44:56 -04002096 struct inode *inode = file->f_mapping->host;
2097
2098 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2099 if (rc)
2100 return rc;
2101 mutex_lock(&inode->i_mutex);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002102
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002103 xid = get_xid();
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002104
2105 cFYI(1, "Sync file - name: %s datasync: 0x%x",
2106 file->f_path.dentry->d_name.name, datasync);
2107
2108 tcon = tlink_tcon(smbfile->tlink);
2109 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07002110 rc = CIFSSMBFlush(xid, tcon, smbfile->fid.netfid);
Steve Frenchb298f222009-02-21 21:17:43 +00002111
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002112 free_xid(xid);
Josef Bacik02c24a82011-07-16 20:44:56 -04002113 mutex_unlock(&inode->i_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002114 return rc;
2115}
2116
Linus Torvalds1da177e2005-04-16 15:20:36 -07002117/*
2118 * As file closes, flush all cached write data for this inode checking
2119 * for write behind errors.
2120 */
Miklos Szeredi75e1fcc2006-06-23 02:05:12 -07002121int cifs_flush(struct file *file, fl_owner_t id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002122{
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002123 struct inode *inode = file->f_path.dentry->d_inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002124 int rc = 0;
2125
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002126 if (file->f_mode & FMODE_WRITE)
Jeff Laytond3f13222010-10-15 15:34:07 -04002127 rc = filemap_write_and_wait(inode->i_mapping);
Steve French50c2f752007-07-13 00:33:32 +00002128
Joe Perchesb6b38f72010-04-21 03:50:45 +00002129 cFYI(1, "Flush inode %p file %p rc %d", inode, file, rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002130
2131 return rc;
2132}
2133
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002134static int
2135cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2136{
2137 int rc = 0;
2138 unsigned long i;
2139
2140 for (i = 0; i < num_pages; i++) {
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002141 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002142 if (!pages[i]) {
2143 /*
2144 * save number of pages we have already allocated and
2145 * return with ENOMEM error
2146 */
2147 num_pages = i;
2148 rc = -ENOMEM;
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002149 break;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002150 }
2151 }
2152
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002153 if (rc) {
2154 for (i = 0; i < num_pages; i++)
2155 put_page(pages[i]);
2156 }
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002157 return rc;
2158}
2159
2160static inline
2161size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2162{
2163 size_t num_pages;
2164 size_t clen;
2165
2166 clen = min_t(const size_t, len, wsize);
Jeff Laytona7103b92012-03-23 14:40:56 -04002167 num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002168
2169 if (cur_len)
2170 *cur_len = clen;
2171
2172 return num_pages;
2173}
2174
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002175static void
2176cifs_uncached_marshal_iov(struct kvec *iov, struct cifs_writedata *wdata)
2177{
2178 int i;
2179 size_t bytes = wdata->bytes;
2180
2181 /* marshal up the pages into iov array */
2182 for (i = 0; i < wdata->nr_pages; i++) {
Steve Frenchc7ad42b2012-03-23 16:30:56 -05002183 iov[i + 1].iov_len = min_t(size_t, bytes, PAGE_SIZE);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002184 iov[i + 1].iov_base = kmap(wdata->pages[i]);
2185 bytes -= iov[i + 1].iov_len;
2186 }
2187}
2188
2189static void
2190cifs_uncached_writev_complete(struct work_struct *work)
2191{
2192 int i;
2193 struct cifs_writedata *wdata = container_of(work,
2194 struct cifs_writedata, work);
2195 struct inode *inode = wdata->cfile->dentry->d_inode;
2196 struct cifsInodeInfo *cifsi = CIFS_I(inode);
2197
2198 spin_lock(&inode->i_lock);
2199 cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
2200 if (cifsi->server_eof > inode->i_size)
2201 i_size_write(inode, cifsi->server_eof);
2202 spin_unlock(&inode->i_lock);
2203
2204 complete(&wdata->done);
2205
2206 if (wdata->result != -EAGAIN) {
2207 for (i = 0; i < wdata->nr_pages; i++)
2208 put_page(wdata->pages[i]);
2209 }
2210
2211 kref_put(&wdata->refcount, cifs_writedata_release);
2212}
2213
2214/* attempt to send write to server, retry on any -EAGAIN errors */
2215static int
2216cifs_uncached_retry_writev(struct cifs_writedata *wdata)
2217{
2218 int rc;
2219
2220 do {
2221 if (wdata->cfile->invalidHandle) {
2222 rc = cifs_reopen_file(wdata->cfile, false);
2223 if (rc != 0)
2224 continue;
2225 }
2226 rc = cifs_async_writev(wdata);
2227 } while (rc == -EAGAIN);
2228
2229 return rc;
2230}
2231
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002232static ssize_t
2233cifs_iovec_write(struct file *file, const struct iovec *iov,
2234 unsigned long nr_segs, loff_t *poffset)
2235{
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002236 unsigned long nr_pages, i;
Pavel Shilovsky76429c12011-01-31 16:03:08 +03002237 size_t copied, len, cur_len;
2238 ssize_t total_written = 0;
Jeff Layton3af9d8f2012-04-13 17:16:59 -04002239 loff_t offset;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002240 struct iov_iter it;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002241 struct cifsFileInfo *open_file;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002242 struct cifs_tcon *tcon;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002243 struct cifs_sb_info *cifs_sb;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002244 struct cifs_writedata *wdata, *tmp;
2245 struct list_head wdata_list;
2246 int rc;
2247 pid_t pid;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002248
2249 len = iov_length(iov, nr_segs);
2250 if (!len)
2251 return 0;
2252
2253 rc = generic_write_checks(file, poffset, &len, 0);
2254 if (rc)
2255 return rc;
2256
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002257 INIT_LIST_HEAD(&wdata_list);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002258 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002259 open_file = file->private_data;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002260 tcon = tlink_tcon(open_file->tlink);
Jeff Layton3af9d8f2012-04-13 17:16:59 -04002261 offset = *poffset;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002262
2263 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2264 pid = open_file->pid;
2265 else
2266 pid = current->tgid;
2267
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002268 iov_iter_init(&it, iov, nr_segs, len, 0);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002269 do {
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002270 size_t save_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002271
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002272 nr_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
2273 wdata = cifs_writedata_alloc(nr_pages,
2274 cifs_uncached_writev_complete);
2275 if (!wdata) {
2276 rc = -ENOMEM;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002277 break;
2278 }
2279
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002280 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
2281 if (rc) {
2282 kfree(wdata);
2283 break;
2284 }
2285
2286 save_len = cur_len;
2287 for (i = 0; i < nr_pages; i++) {
2288 copied = min_t(const size_t, cur_len, PAGE_SIZE);
2289 copied = iov_iter_copy_from_user(wdata->pages[i], &it,
2290 0, copied);
2291 cur_len -= copied;
2292 iov_iter_advance(&it, copied);
2293 }
2294 cur_len = save_len - cur_len;
2295
2296 wdata->sync_mode = WB_SYNC_ALL;
2297 wdata->nr_pages = nr_pages;
2298 wdata->offset = (__u64)offset;
2299 wdata->cfile = cifsFileInfo_get(open_file);
2300 wdata->pid = pid;
2301 wdata->bytes = cur_len;
2302 wdata->marshal_iov = cifs_uncached_marshal_iov;
2303 rc = cifs_uncached_retry_writev(wdata);
2304 if (rc) {
2305 kref_put(&wdata->refcount, cifs_writedata_release);
2306 break;
2307 }
2308
2309 list_add_tail(&wdata->list, &wdata_list);
2310 offset += cur_len;
2311 len -= cur_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002312 } while (len > 0);
2313
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002314 /*
2315 * If at least one write was successfully sent, then discard any rc
2316 * value from the later writes. If the other write succeeds, then
2317 * we'll end up returning whatever was written. If it fails, then
2318 * we'll get a new rc value from that.
2319 */
2320 if (!list_empty(&wdata_list))
2321 rc = 0;
2322
2323 /*
2324 * Wait for and collect replies for any successful sends in order of
2325 * increasing offset. Once an error is hit or we get a fatal signal
2326 * while waiting, then return without waiting for any more replies.
2327 */
2328restart_loop:
2329 list_for_each_entry_safe(wdata, tmp, &wdata_list, list) {
2330 if (!rc) {
2331 /* FIXME: freezable too? */
2332 rc = wait_for_completion_killable(&wdata->done);
2333 if (rc)
2334 rc = -EINTR;
2335 else if (wdata->result)
2336 rc = wdata->result;
2337 else
2338 total_written += wdata->bytes;
2339
2340 /* resend call if it's a retryable error */
2341 if (rc == -EAGAIN) {
2342 rc = cifs_uncached_retry_writev(wdata);
2343 goto restart_loop;
2344 }
2345 }
2346 list_del_init(&wdata->list);
2347 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002348 }
2349
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002350 if (total_written > 0)
2351 *poffset += total_written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002352
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002353 cifs_stats_bytes_written(tcon, total_written);
2354 return total_written ? total_written : (ssize_t)rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002355}
2356
Pavel Shilovsky0b81c1c2011-03-10 10:11:05 +03002357ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002358 unsigned long nr_segs, loff_t pos)
2359{
2360 ssize_t written;
2361 struct inode *inode;
2362
2363 inode = iocb->ki_filp->f_path.dentry->d_inode;
2364
2365 /*
2366 * BB - optimize the way when signing is disabled. We can drop this
2367 * extra memory-to-memory copying and use iovec buffers for constructing
2368 * write request.
2369 */
2370
2371 written = cifs_iovec_write(iocb->ki_filp, iov, nr_segs, &pos);
2372 if (written > 0) {
2373 CIFS_I(inode)->invalid_mapping = true;
2374 iocb->ki_pos = pos;
2375 }
2376
2377 return written;
2378}
2379
2380ssize_t cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
2381 unsigned long nr_segs, loff_t pos)
2382{
2383 struct inode *inode;
2384
2385 inode = iocb->ki_filp->f_path.dentry->d_inode;
2386
2387 if (CIFS_I(inode)->clientCanCacheAll)
2388 return generic_file_aio_write(iocb, iov, nr_segs, pos);
2389
2390 /*
2391 * In strict cache mode we need to write the data to the server exactly
2392 * from the pos to pos+len-1 rather than flush all affected pages
2393 * because it may cause a error with mandatory locks on these pages but
2394 * not on the region from pos to ppos+len-1.
2395 */
2396
2397 return cifs_user_writev(iocb, iov, nr_segs, pos);
2398}
2399
Jeff Layton0471ca32012-05-16 07:13:16 -04002400static struct cifs_readdata *
2401cifs_readdata_alloc(unsigned int nr_vecs, work_func_t complete)
2402{
2403 struct cifs_readdata *rdata;
2404
2405 rdata = kzalloc(sizeof(*rdata) +
2406 sizeof(struct kvec) * nr_vecs, GFP_KERNEL);
2407 if (rdata != NULL) {
Jeff Layton6993f742012-05-16 07:13:17 -04002408 kref_init(&rdata->refcount);
Jeff Layton1c892542012-05-16 07:13:17 -04002409 INIT_LIST_HEAD(&rdata->list);
2410 init_completion(&rdata->done);
Jeff Layton0471ca32012-05-16 07:13:16 -04002411 INIT_WORK(&rdata->work, complete);
2412 INIT_LIST_HEAD(&rdata->pages);
2413 }
2414 return rdata;
2415}
2416
Jeff Layton6993f742012-05-16 07:13:17 -04002417void
2418cifs_readdata_release(struct kref *refcount)
Jeff Layton0471ca32012-05-16 07:13:16 -04002419{
Jeff Layton6993f742012-05-16 07:13:17 -04002420 struct cifs_readdata *rdata = container_of(refcount,
2421 struct cifs_readdata, refcount);
2422
2423 if (rdata->cfile)
2424 cifsFileInfo_put(rdata->cfile);
2425
Jeff Layton0471ca32012-05-16 07:13:16 -04002426 kfree(rdata);
2427}
2428
Jeff Layton2a1bb132012-05-16 07:13:17 -04002429static int
Jeff Layton1c892542012-05-16 07:13:17 -04002430cifs_read_allocate_pages(struct list_head *list, unsigned int npages)
2431{
2432 int rc = 0;
2433 struct page *page, *tpage;
2434 unsigned int i;
2435
2436 for (i = 0; i < npages; i++) {
2437 page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
2438 if (!page) {
2439 rc = -ENOMEM;
2440 break;
2441 }
2442 list_add(&page->lru, list);
2443 }
2444
2445 if (rc) {
2446 list_for_each_entry_safe(page, tpage, list, lru) {
2447 list_del(&page->lru);
2448 put_page(page);
2449 }
2450 }
2451 return rc;
2452}
2453
2454static void
2455cifs_uncached_readdata_release(struct kref *refcount)
2456{
2457 struct page *page, *tpage;
2458 struct cifs_readdata *rdata = container_of(refcount,
2459 struct cifs_readdata, refcount);
2460
2461 list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
2462 list_del(&page->lru);
2463 put_page(page);
2464 }
2465 cifs_readdata_release(refcount);
2466}
2467
2468static int
Jeff Layton2a1bb132012-05-16 07:13:17 -04002469cifs_retry_async_readv(struct cifs_readdata *rdata)
2470{
2471 int rc;
2472
2473 do {
2474 if (rdata->cfile->invalidHandle) {
2475 rc = cifs_reopen_file(rdata->cfile, true);
2476 if (rc != 0)
2477 continue;
2478 }
2479 rc = cifs_async_readv(rdata);
2480 } while (rc == -EAGAIN);
2481
2482 return rc;
2483}
2484
Jeff Layton1c892542012-05-16 07:13:17 -04002485/**
2486 * cifs_readdata_to_iov - copy data from pages in response to an iovec
2487 * @rdata: the readdata response with list of pages holding data
2488 * @iov: vector in which we should copy the data
2489 * @nr_segs: number of segments in vector
2490 * @offset: offset into file of the first iovec
2491 * @copied: used to return the amount of data copied to the iov
2492 *
2493 * This function copies data from a list of pages in a readdata response into
2494 * an array of iovecs. It will first calculate where the data should go
2495 * based on the info in the readdata and then copy the data into that spot.
2496 */
2497static ssize_t
2498cifs_readdata_to_iov(struct cifs_readdata *rdata, const struct iovec *iov,
2499 unsigned long nr_segs, loff_t offset, ssize_t *copied)
2500{
2501 int rc = 0;
2502 struct iov_iter ii;
2503 size_t pos = rdata->offset - offset;
2504 struct page *page, *tpage;
2505 ssize_t remaining = rdata->bytes;
2506 unsigned char *pdata;
2507
2508 /* set up iov_iter and advance to the correct offset */
2509 iov_iter_init(&ii, iov, nr_segs, iov_length(iov, nr_segs), 0);
2510 iov_iter_advance(&ii, pos);
2511
2512 *copied = 0;
2513 list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
2514 ssize_t copy;
2515
2516 /* copy a whole page or whatever's left */
2517 copy = min_t(ssize_t, remaining, PAGE_SIZE);
2518
2519 /* ...but limit it to whatever space is left in the iov */
2520 copy = min_t(ssize_t, copy, iov_iter_count(&ii));
2521
2522 /* go while there's data to be copied and no errors */
2523 if (copy && !rc) {
2524 pdata = kmap(page);
2525 rc = memcpy_toiovecend(ii.iov, pdata, ii.iov_offset,
2526 (int)copy);
2527 kunmap(page);
2528 if (!rc) {
2529 *copied += copy;
2530 remaining -= copy;
2531 iov_iter_advance(&ii, copy);
2532 }
2533 }
2534
2535 list_del(&page->lru);
2536 put_page(page);
2537 }
2538
2539 return rc;
2540}
2541
2542static void
2543cifs_uncached_readv_complete(struct work_struct *work)
2544{
2545 struct cifs_readdata *rdata = container_of(work,
2546 struct cifs_readdata, work);
2547
2548 /* if the result is non-zero then the pages weren't kmapped */
2549 if (rdata->result == 0) {
2550 struct page *page;
2551
2552 list_for_each_entry(page, &rdata->pages, lru)
2553 kunmap(page);
2554 }
2555
2556 complete(&rdata->done);
2557 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
2558}
2559
2560static int
2561cifs_uncached_read_marshal_iov(struct cifs_readdata *rdata,
2562 unsigned int remaining)
2563{
2564 int len = 0;
2565 struct page *page, *tpage;
2566
2567 rdata->nr_iov = 1;
2568 list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
2569 if (remaining >= PAGE_SIZE) {
2570 /* enough data to fill the page */
2571 rdata->iov[rdata->nr_iov].iov_base = kmap(page);
2572 rdata->iov[rdata->nr_iov].iov_len = PAGE_SIZE;
2573 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
2574 rdata->nr_iov, page->index,
2575 rdata->iov[rdata->nr_iov].iov_base,
2576 rdata->iov[rdata->nr_iov].iov_len);
2577 ++rdata->nr_iov;
2578 len += PAGE_SIZE;
2579 remaining -= PAGE_SIZE;
2580 } else if (remaining > 0) {
2581 /* enough for partial page, fill and zero the rest */
2582 rdata->iov[rdata->nr_iov].iov_base = kmap(page);
2583 rdata->iov[rdata->nr_iov].iov_len = remaining;
2584 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
2585 rdata->nr_iov, page->index,
2586 rdata->iov[rdata->nr_iov].iov_base,
2587 rdata->iov[rdata->nr_iov].iov_len);
2588 memset(rdata->iov[rdata->nr_iov].iov_base + remaining,
2589 '\0', PAGE_SIZE - remaining);
2590 ++rdata->nr_iov;
2591 len += remaining;
2592 remaining = 0;
2593 } else {
2594 /* no need to hold page hostage */
2595 list_del(&page->lru);
2596 put_page(page);
2597 }
2598 }
2599
2600 return len;
2601}
2602
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002603static ssize_t
2604cifs_iovec_read(struct file *file, const struct iovec *iov,
2605 unsigned long nr_segs, loff_t *poffset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002606{
Jeff Layton1c892542012-05-16 07:13:17 -04002607 ssize_t rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002608 size_t len, cur_len;
Jeff Layton1c892542012-05-16 07:13:17 -04002609 ssize_t total_read = 0;
2610 loff_t offset = *poffset;
2611 unsigned int npages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002612 struct cifs_sb_info *cifs_sb;
Jeff Layton1c892542012-05-16 07:13:17 -04002613 struct cifs_tcon *tcon;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002614 struct cifsFileInfo *open_file;
Jeff Layton1c892542012-05-16 07:13:17 -04002615 struct cifs_readdata *rdata, *tmp;
2616 struct list_head rdata_list;
2617 pid_t pid;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002618
2619 if (!nr_segs)
2620 return 0;
2621
2622 len = iov_length(iov, nr_segs);
2623 if (!len)
2624 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002625
Jeff Layton1c892542012-05-16 07:13:17 -04002626 INIT_LIST_HEAD(&rdata_list);
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002627 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Joe Perchesc21dfb62010-07-12 13:50:14 -07002628 open_file = file->private_data;
Jeff Layton1c892542012-05-16 07:13:17 -04002629 tcon = tlink_tcon(open_file->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002630
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002631 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2632 pid = open_file->pid;
2633 else
2634 pid = current->tgid;
2635
Steve Frenchad7a2922008-02-07 23:25:02 +00002636 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesb6b38f72010-04-21 03:50:45 +00002637 cFYI(1, "attempting read on write only file instance");
Steve Frenchad7a2922008-02-07 23:25:02 +00002638
Jeff Layton1c892542012-05-16 07:13:17 -04002639 do {
2640 cur_len = min_t(const size_t, len - total_read, cifs_sb->rsize);
2641 npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002642
Jeff Layton1c892542012-05-16 07:13:17 -04002643 /* allocate a readdata struct */
2644 rdata = cifs_readdata_alloc(npages,
2645 cifs_uncached_readv_complete);
2646 if (!rdata) {
2647 rc = -ENOMEM;
2648 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002649 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002650
Jeff Layton1c892542012-05-16 07:13:17 -04002651 rc = cifs_read_allocate_pages(&rdata->pages, npages);
2652 if (rc)
2653 goto error;
2654
2655 rdata->cfile = cifsFileInfo_get(open_file);
2656 rdata->offset = offset;
2657 rdata->bytes = cur_len;
2658 rdata->pid = pid;
2659 rdata->marshal_iov = cifs_uncached_read_marshal_iov;
2660
2661 rc = cifs_retry_async_readv(rdata);
2662error:
2663 if (rc) {
2664 kref_put(&rdata->refcount,
2665 cifs_uncached_readdata_release);
2666 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002667 }
Jeff Layton1c892542012-05-16 07:13:17 -04002668
2669 list_add_tail(&rdata->list, &rdata_list);
2670 offset += cur_len;
2671 len -= cur_len;
2672 } while (len > 0);
2673
2674 /* if at least one read request send succeeded, then reset rc */
2675 if (!list_empty(&rdata_list))
2676 rc = 0;
2677
2678 /* the loop below should proceed in the order of increasing offsets */
2679restart_loop:
2680 list_for_each_entry_safe(rdata, tmp, &rdata_list, list) {
2681 if (!rc) {
2682 ssize_t copied;
2683
2684 /* FIXME: freezable sleep too? */
2685 rc = wait_for_completion_killable(&rdata->done);
2686 if (rc)
2687 rc = -EINTR;
2688 else if (rdata->result)
2689 rc = rdata->result;
2690 else {
2691 rc = cifs_readdata_to_iov(rdata, iov,
2692 nr_segs, *poffset,
2693 &copied);
2694 total_read += copied;
2695 }
2696
2697 /* resend call if it's a retryable error */
2698 if (rc == -EAGAIN) {
2699 rc = cifs_retry_async_readv(rdata);
2700 goto restart_loop;
2701 }
2702 }
2703 list_del_init(&rdata->list);
2704 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002705 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002706
Jeff Layton1c892542012-05-16 07:13:17 -04002707 cifs_stats_bytes_read(tcon, total_read);
2708 *poffset += total_read;
2709
2710 return total_read ? total_read : rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002711}
2712
Pavel Shilovsky0b81c1c2011-03-10 10:11:05 +03002713ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov,
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002714 unsigned long nr_segs, loff_t pos)
2715{
2716 ssize_t read;
2717
2718 read = cifs_iovec_read(iocb->ki_filp, iov, nr_segs, &pos);
2719 if (read > 0)
2720 iocb->ki_pos = pos;
2721
2722 return read;
2723}
2724
2725ssize_t cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov,
2726 unsigned long nr_segs, loff_t pos)
2727{
2728 struct inode *inode;
2729
2730 inode = iocb->ki_filp->f_path.dentry->d_inode;
2731
2732 if (CIFS_I(inode)->clientCanCacheRead)
2733 return generic_file_aio_read(iocb, iov, nr_segs, pos);
2734
2735 /*
2736 * In strict cache mode we need to read from the server all the time
2737 * if we don't have level II oplock because the server can delay mtime
2738 * change - so we can't make a decision about inode invalidating.
2739 * And we can also fail with pagereading if there are mandatory locks
2740 * on pages affected by this read but not on the region from pos to
2741 * pos+len-1.
2742 */
2743
2744 return cifs_user_readv(iocb, iov, nr_segs, pos);
2745}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002746
2747static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002748 loff_t *poffset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002749{
2750 int rc = -EACCES;
2751 unsigned int bytes_read = 0;
2752 unsigned int total_read;
2753 unsigned int current_read_size;
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002754 unsigned int rsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002755 struct cifs_sb_info *cifs_sb;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002756 struct cifs_tcon *tcon;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002757 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002758 char *current_offset;
2759 struct cifsFileInfo *open_file;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002760 struct cifs_io_parms io_parms;
Steve Frenchec637e32005-12-12 20:53:18 -08002761 int buf_type = CIFS_NO_BUFFER;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002762 __u32 pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002763
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002764 xid = get_xid();
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002765 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002766
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002767 /* FIXME: set up handlers for larger reads and/or convert to async */
2768 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
2769
Linus Torvalds1da177e2005-04-16 15:20:36 -07002770 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05302771 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002772 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05302773 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002774 }
Joe Perchesc21dfb62010-07-12 13:50:14 -07002775 open_file = file->private_data;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002776 tcon = tlink_tcon(open_file->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002777
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002778 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2779 pid = open_file->pid;
2780 else
2781 pid = current->tgid;
2782
Linus Torvalds1da177e2005-04-16 15:20:36 -07002783 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesb6b38f72010-04-21 03:50:45 +00002784 cFYI(1, "attempting read on write only file instance");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002785
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002786 for (total_read = 0, current_offset = read_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002787 read_size > total_read;
2788 total_read += bytes_read, current_offset += bytes_read) {
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002789 current_read_size = min_t(uint, read_size - total_read, rsize);
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002790 /*
2791 * For windows me and 9x we do not want to request more than it
2792 * negotiated since it will refuse the read then.
2793 */
2794 if ((tcon->ses) && !(tcon->ses->capabilities &
2795 tcon->ses->server->vals->cap_large_files)) {
Dan Carpenter7748dd62011-10-18 12:41:35 +03002796 current_read_size = min_t(uint, current_read_size,
Jeff Laytonc974bef2011-10-11 06:41:32 -04002797 CIFSMaxBufSize);
Steve Frenchf9f5c8172005-09-15 23:06:38 -07002798 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002799 rc = -EAGAIN;
2800 while (rc == -EAGAIN) {
Steve Frenchcdff08e2010-10-21 22:46:14 +00002801 if (open_file->invalidHandle) {
Jeff Layton15886172010-10-15 15:33:59 -04002802 rc = cifs_reopen_file(open_file, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002803 if (rc != 0)
2804 break;
2805 }
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07002806 io_parms.netfid = open_file->fid.netfid;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002807 io_parms.pid = pid;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002808 io_parms.tcon = tcon;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002809 io_parms.offset = *poffset;
2810 io_parms.length = current_read_size;
2811 rc = CIFSSMBRead(xid, &io_parms, &bytes_read,
2812 &current_offset, &buf_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002813 }
2814 if (rc || (bytes_read == 0)) {
2815 if (total_read) {
2816 break;
2817 } else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002818 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002819 return rc;
2820 }
2821 } else {
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002822 cifs_stats_bytes_read(tcon, total_read);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002823 *poffset += bytes_read;
2824 }
2825 }
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002826 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002827 return total_read;
2828}
2829
Jeff Laytonca83ce32011-04-12 09:13:44 -04002830/*
2831 * If the page is mmap'ed into a process' page tables, then we need to make
2832 * sure that it doesn't change while being written back.
2833 */
2834static int
2835cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
2836{
2837 struct page *page = vmf->page;
2838
2839 lock_page(page);
2840 return VM_FAULT_LOCKED;
2841}
2842
2843static struct vm_operations_struct cifs_file_vm_ops = {
2844 .fault = filemap_fault,
2845 .page_mkwrite = cifs_page_mkwrite,
2846};
2847
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03002848int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
2849{
2850 int rc, xid;
2851 struct inode *inode = file->f_path.dentry->d_inode;
2852
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002853 xid = get_xid();
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03002854
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002855 if (!CIFS_I(inode)->clientCanCacheRead) {
2856 rc = cifs_invalidate_mapping(inode);
2857 if (rc)
2858 return rc;
2859 }
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03002860
2861 rc = generic_file_mmap(file, vma);
Jeff Laytonca83ce32011-04-12 09:13:44 -04002862 if (rc == 0)
2863 vma->vm_ops = &cifs_file_vm_ops;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002864 free_xid(xid);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03002865 return rc;
2866}
2867
Linus Torvalds1da177e2005-04-16 15:20:36 -07002868int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
2869{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002870 int rc, xid;
2871
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002872 xid = get_xid();
Jeff Laytonabab0952010-02-12 07:44:18 -05002873 rc = cifs_revalidate_file(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002874 if (rc) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00002875 cFYI(1, "Validation prior to mmap failed, error=%d", rc);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002876 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002877 return rc;
2878 }
2879 rc = generic_file_mmap(file, vma);
Jeff Laytonca83ce32011-04-12 09:13:44 -04002880 if (rc == 0)
2881 vma->vm_ops = &cifs_file_vm_ops;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002882 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002883 return rc;
2884}
2885
Jeff Layton0471ca32012-05-16 07:13:16 -04002886static void
2887cifs_readv_complete(struct work_struct *work)
2888{
2889 struct cifs_readdata *rdata = container_of(work,
2890 struct cifs_readdata, work);
2891 struct page *page, *tpage;
2892
2893 list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
2894 list_del(&page->lru);
2895 lru_cache_add_file(page);
2896
2897 if (rdata->result == 0) {
2898 kunmap(page);
2899 flush_dcache_page(page);
2900 SetPageUptodate(page);
2901 }
2902
2903 unlock_page(page);
2904
2905 if (rdata->result == 0)
2906 cifs_readpage_to_fscache(rdata->mapping->host, page);
2907
2908 page_cache_release(page);
2909 }
Jeff Layton6993f742012-05-16 07:13:17 -04002910 kref_put(&rdata->refcount, cifs_readdata_release);
Jeff Layton0471ca32012-05-16 07:13:16 -04002911}
2912
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04002913static int
2914cifs_readpages_marshal_iov(struct cifs_readdata *rdata, unsigned int remaining)
2915{
2916 int len = 0;
2917 struct page *page, *tpage;
2918 u64 eof;
2919 pgoff_t eof_index;
2920
2921 /* determine the eof that the server (probably) has */
2922 eof = CIFS_I(rdata->mapping->host)->server_eof;
2923 eof_index = eof ? (eof - 1) >> PAGE_CACHE_SHIFT : 0;
2924 cFYI(1, "eof=%llu eof_index=%lu", eof, eof_index);
2925
2926 rdata->nr_iov = 1;
2927 list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
2928 if (remaining >= PAGE_CACHE_SIZE) {
2929 /* enough data to fill the page */
2930 rdata->iov[rdata->nr_iov].iov_base = kmap(page);
2931 rdata->iov[rdata->nr_iov].iov_len = PAGE_CACHE_SIZE;
2932 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
2933 rdata->nr_iov, page->index,
2934 rdata->iov[rdata->nr_iov].iov_base,
2935 rdata->iov[rdata->nr_iov].iov_len);
2936 ++rdata->nr_iov;
2937 len += PAGE_CACHE_SIZE;
2938 remaining -= PAGE_CACHE_SIZE;
2939 } else if (remaining > 0) {
2940 /* enough for partial page, fill and zero the rest */
2941 rdata->iov[rdata->nr_iov].iov_base = kmap(page);
2942 rdata->iov[rdata->nr_iov].iov_len = remaining;
2943 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
2944 rdata->nr_iov, page->index,
2945 rdata->iov[rdata->nr_iov].iov_base,
2946 rdata->iov[rdata->nr_iov].iov_len);
2947 memset(rdata->iov[rdata->nr_iov].iov_base + remaining,
2948 '\0', PAGE_CACHE_SIZE - remaining);
2949 ++rdata->nr_iov;
2950 len += remaining;
2951 remaining = 0;
2952 } else if (page->index > eof_index) {
2953 /*
2954 * The VFS will not try to do readahead past the
2955 * i_size, but it's possible that we have outstanding
2956 * writes with gaps in the middle and the i_size hasn't
2957 * caught up yet. Populate those with zeroed out pages
2958 * to prevent the VFS from repeatedly attempting to
2959 * fill them until the writes are flushed.
2960 */
2961 zero_user(page, 0, PAGE_CACHE_SIZE);
2962 list_del(&page->lru);
2963 lru_cache_add_file(page);
2964 flush_dcache_page(page);
2965 SetPageUptodate(page);
2966 unlock_page(page);
2967 page_cache_release(page);
2968 } else {
2969 /* no need to hold page hostage */
2970 list_del(&page->lru);
2971 lru_cache_add_file(page);
2972 unlock_page(page);
2973 page_cache_release(page);
2974 }
2975 }
2976
2977 return len;
2978}
2979
Linus Torvalds1da177e2005-04-16 15:20:36 -07002980static int cifs_readpages(struct file *file, struct address_space *mapping,
2981 struct list_head *page_list, unsigned num_pages)
2982{
Jeff Layton690c5e32011-10-19 15:30:16 -04002983 int rc;
2984 struct list_head tmplist;
2985 struct cifsFileInfo *open_file = file->private_data;
2986 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
2987 unsigned int rsize = cifs_sb->rsize;
2988 pid_t pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002989
Jeff Layton690c5e32011-10-19 15:30:16 -04002990 /*
2991 * Give up immediately if rsize is too small to read an entire page.
2992 * The VFS will fall back to readpage. We should never reach this
2993 * point however since we set ra_pages to 0 when the rsize is smaller
2994 * than a cache page.
2995 */
2996 if (unlikely(rsize < PAGE_CACHE_SIZE))
2997 return 0;
Steve Frenchbfa0d752005-08-31 21:50:37 -07002998
Suresh Jayaraman56698232010-07-05 18:13:25 +05302999 /*
3000 * Reads as many pages as possible from fscache. Returns -ENOBUFS
3001 * immediately if the cookie is negative
3002 */
3003 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
3004 &num_pages);
3005 if (rc == 0)
Jeff Layton690c5e32011-10-19 15:30:16 -04003006 return rc;
Suresh Jayaraman56698232010-07-05 18:13:25 +05303007
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003008 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3009 pid = open_file->pid;
3010 else
3011 pid = current->tgid;
3012
Jeff Layton690c5e32011-10-19 15:30:16 -04003013 rc = 0;
3014 INIT_LIST_HEAD(&tmplist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003015
Jeff Layton690c5e32011-10-19 15:30:16 -04003016 cFYI(1, "%s: file=%p mapping=%p num_pages=%u", __func__, file,
3017 mapping, num_pages);
3018
3019 /*
3020 * Start with the page at end of list and move it to private
3021 * list. Do the same with any following pages until we hit
3022 * the rsize limit, hit an index discontinuity, or run out of
3023 * pages. Issue the async read and then start the loop again
3024 * until the list is empty.
3025 *
3026 * Note that list order is important. The page_list is in
3027 * the order of declining indexes. When we put the pages in
3028 * the rdata->pages, then we want them in increasing order.
3029 */
3030 while (!list_empty(page_list)) {
3031 unsigned int bytes = PAGE_CACHE_SIZE;
3032 unsigned int expected_index;
3033 unsigned int nr_pages = 1;
3034 loff_t offset;
3035 struct page *page, *tpage;
3036 struct cifs_readdata *rdata;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003037
3038 page = list_entry(page_list->prev, struct page, lru);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003039
Jeff Layton690c5e32011-10-19 15:30:16 -04003040 /*
3041 * Lock the page and put it in the cache. Since no one else
3042 * should have access to this page, we're safe to simply set
3043 * PG_locked without checking it first.
3044 */
3045 __set_page_locked(page);
3046 rc = add_to_page_cache_locked(page, mapping,
3047 page->index, GFP_KERNEL);
3048
3049 /* give up if we can't stick it in the cache */
3050 if (rc) {
3051 __clear_page_locked(page);
3052 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003053 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003054
Jeff Layton690c5e32011-10-19 15:30:16 -04003055 /* move first page to the tmplist */
3056 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
3057 list_move_tail(&page->lru, &tmplist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003058
Jeff Layton690c5e32011-10-19 15:30:16 -04003059 /* now try and add more pages onto the request */
3060 expected_index = page->index + 1;
3061 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
3062 /* discontinuity ? */
3063 if (page->index != expected_index)
3064 break;
3065
3066 /* would this page push the read over the rsize? */
3067 if (bytes + PAGE_CACHE_SIZE > rsize)
3068 break;
3069
3070 __set_page_locked(page);
3071 if (add_to_page_cache_locked(page, mapping,
3072 page->index, GFP_KERNEL)) {
3073 __clear_page_locked(page);
3074 break;
3075 }
3076 list_move_tail(&page->lru, &tmplist);
3077 bytes += PAGE_CACHE_SIZE;
3078 expected_index++;
3079 nr_pages++;
3080 }
3081
Jeff Layton0471ca32012-05-16 07:13:16 -04003082 rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
Jeff Layton690c5e32011-10-19 15:30:16 -04003083 if (!rdata) {
3084 /* best to give up if we're out of mem */
3085 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3086 list_del(&page->lru);
3087 lru_cache_add_file(page);
3088 unlock_page(page);
3089 page_cache_release(page);
3090 }
3091 rc = -ENOMEM;
3092 break;
3093 }
3094
Jeff Layton6993f742012-05-16 07:13:17 -04003095 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Layton690c5e32011-10-19 15:30:16 -04003096 rdata->mapping = mapping;
3097 rdata->offset = offset;
3098 rdata->bytes = bytes;
3099 rdata->pid = pid;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003100 rdata->marshal_iov = cifs_readpages_marshal_iov;
Jeff Layton690c5e32011-10-19 15:30:16 -04003101 list_splice_init(&tmplist, &rdata->pages);
3102
Jeff Layton2a1bb132012-05-16 07:13:17 -04003103 rc = cifs_retry_async_readv(rdata);
Jeff Layton690c5e32011-10-19 15:30:16 -04003104 if (rc != 0) {
3105 list_for_each_entry_safe(page, tpage, &rdata->pages,
3106 lru) {
3107 list_del(&page->lru);
3108 lru_cache_add_file(page);
3109 unlock_page(page);
3110 page_cache_release(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003111 }
Jeff Layton6993f742012-05-16 07:13:17 -04003112 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003113 break;
3114 }
Jeff Layton6993f742012-05-16 07:13:17 -04003115
3116 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003117 }
3118
Linus Torvalds1da177e2005-04-16 15:20:36 -07003119 return rc;
3120}
3121
3122static int cifs_readpage_worker(struct file *file, struct page *page,
3123 loff_t *poffset)
3124{
3125 char *read_data;
3126 int rc;
3127
Suresh Jayaraman56698232010-07-05 18:13:25 +05303128 /* Is the page cached? */
3129 rc = cifs_readpage_from_fscache(file->f_path.dentry->d_inode, page);
3130 if (rc == 0)
3131 goto read_complete;
3132
Linus Torvalds1da177e2005-04-16 15:20:36 -07003133 page_cache_get(page);
3134 read_data = kmap(page);
3135 /* for reads over a certain size could initiate async read ahead */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003136
Linus Torvalds1da177e2005-04-16 15:20:36 -07003137 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003138
Linus Torvalds1da177e2005-04-16 15:20:36 -07003139 if (rc < 0)
3140 goto io_error;
3141 else
Joe Perchesb6b38f72010-04-21 03:50:45 +00003142 cFYI(1, "Bytes read %d", rc);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003143
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08003144 file->f_path.dentry->d_inode->i_atime =
3145 current_fs_time(file->f_path.dentry->d_inode->i_sb);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003146
Linus Torvalds1da177e2005-04-16 15:20:36 -07003147 if (PAGE_CACHE_SIZE > rc)
3148 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
3149
3150 flush_dcache_page(page);
3151 SetPageUptodate(page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05303152
3153 /* send this page to the cache */
3154 cifs_readpage_to_fscache(file->f_path.dentry->d_inode, page);
3155
Linus Torvalds1da177e2005-04-16 15:20:36 -07003156 rc = 0;
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003157
Linus Torvalds1da177e2005-04-16 15:20:36 -07003158io_error:
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003159 kunmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003160 page_cache_release(page);
Suresh Jayaraman56698232010-07-05 18:13:25 +05303161
3162read_complete:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003163 return rc;
3164}
3165
3166static int cifs_readpage(struct file *file, struct page *page)
3167{
3168 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
3169 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003170 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003171
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003172 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003173
3174 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303175 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003176 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303177 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003178 }
3179
Jeff Laytonac3aa2f2012-07-23 13:14:28 -04003180 cFYI(1, "readpage %p at offset %d 0x%x",
Joe Perchesb6b38f72010-04-21 03:50:45 +00003181 page, (int)offset, (int)offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003182
3183 rc = cifs_readpage_worker(file, page, &offset);
3184
3185 unlock_page(page);
3186
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003187 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003188 return rc;
3189}
3190
Steve Frencha403a0a2007-07-26 15:54:16 +00003191static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3192{
3193 struct cifsFileInfo *open_file;
3194
Jeff Layton44772882010-10-15 15:34:03 -04003195 spin_lock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003196 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton2e396b82010-10-15 15:34:01 -04003197 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Jeff Layton44772882010-10-15 15:34:03 -04003198 spin_unlock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003199 return 1;
3200 }
3201 }
Jeff Layton44772882010-10-15 15:34:03 -04003202 spin_unlock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003203 return 0;
3204}
3205
Linus Torvalds1da177e2005-04-16 15:20:36 -07003206/* We do not want to update the file size from server for inodes
3207 open for write - to avoid races with writepage extending
3208 the file - in the future we could consider allowing
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003209 refreshing the inode only on increases in the file size
Linus Torvalds1da177e2005-04-16 15:20:36 -07003210 but this is tricky to do without racing with writebehind
3211 page caching in the current Linux kernel design */
Steve French4b18f2a2008-04-29 00:06:05 +00003212bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003213{
Steve Frencha403a0a2007-07-26 15:54:16 +00003214 if (!cifsInode)
Steve French4b18f2a2008-04-29 00:06:05 +00003215 return true;
Steve French23e7dd72005-10-20 13:44:56 -07003216
Steve Frencha403a0a2007-07-26 15:54:16 +00003217 if (is_inode_writable(cifsInode)) {
3218 /* This inode is open for write at least once */
Steve Frenchc32a0b62006-01-12 14:41:28 -08003219 struct cifs_sb_info *cifs_sb;
3220
Steve Frenchc32a0b62006-01-12 14:41:28 -08003221 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
Steve Frenchad7a2922008-02-07 23:25:02 +00003222 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003223 /* since no page cache to corrupt on directio
Steve Frenchc32a0b62006-01-12 14:41:28 -08003224 we can change size safely */
Steve French4b18f2a2008-04-29 00:06:05 +00003225 return true;
Steve Frenchc32a0b62006-01-12 14:41:28 -08003226 }
3227
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003228 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
Steve French4b18f2a2008-04-29 00:06:05 +00003229 return true;
Steve French7ba52632007-02-08 18:14:13 +00003230
Steve French4b18f2a2008-04-29 00:06:05 +00003231 return false;
Steve French23e7dd72005-10-20 13:44:56 -07003232 } else
Steve French4b18f2a2008-04-29 00:06:05 +00003233 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003234}
3235
Nick Piggind9414772008-09-24 11:32:59 -04003236static int cifs_write_begin(struct file *file, struct address_space *mapping,
3237 loff_t pos, unsigned len, unsigned flags,
3238 struct page **pagep, void **fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003239{
Nick Piggind9414772008-09-24 11:32:59 -04003240 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
3241 loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003242 loff_t page_start = pos & PAGE_MASK;
3243 loff_t i_size;
3244 struct page *page;
3245 int rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003246
Joe Perchesb6b38f72010-04-21 03:50:45 +00003247 cFYI(1, "write_begin from %lld len %d", (long long)pos, len);
Nick Piggind9414772008-09-24 11:32:59 -04003248
Nick Piggin54566b22009-01-04 12:00:53 -08003249 page = grab_cache_page_write_begin(mapping, index, flags);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003250 if (!page) {
3251 rc = -ENOMEM;
3252 goto out;
3253 }
Nick Piggind9414772008-09-24 11:32:59 -04003254
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003255 if (PageUptodate(page))
3256 goto out;
Steve French8a236262007-03-06 00:31:00 +00003257
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003258 /*
3259 * If we write a full page it will be up to date, no need to read from
3260 * the server. If the write is short, we'll end up doing a sync write
3261 * instead.
3262 */
3263 if (len == PAGE_CACHE_SIZE)
3264 goto out;
3265
3266 /*
3267 * optimize away the read when we have an oplock, and we're not
3268 * expecting to use any of the data we'd be reading in. That
3269 * is, when the page lies beyond the EOF, or straddles the EOF
3270 * and the write will cover all of the existing data.
3271 */
3272 if (CIFS_I(mapping->host)->clientCanCacheRead) {
3273 i_size = i_size_read(mapping->host);
3274 if (page_start >= i_size ||
3275 (offset == 0 && (pos + len) >= i_size)) {
3276 zero_user_segments(page, 0, offset,
3277 offset + len,
3278 PAGE_CACHE_SIZE);
3279 /*
3280 * PageChecked means that the parts of the page
3281 * to which we're not writing are considered up
3282 * to date. Once the data is copied to the
3283 * page, it can be set uptodate.
3284 */
3285 SetPageChecked(page);
3286 goto out;
3287 }
3288 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003289
Nick Piggind9414772008-09-24 11:32:59 -04003290 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003291 /*
3292 * might as well read a page, it is fast enough. If we get
3293 * an error, we don't need to return it. cifs_write_end will
3294 * do a sync write instead since PG_uptodate isn't set.
3295 */
3296 cifs_readpage_worker(file, page, &page_start);
Steve French8a236262007-03-06 00:31:00 +00003297 } else {
3298 /* we could try using another file handle if there is one -
3299 but how would we lock it to prevent close of that handle
3300 racing with this read? In any case
Nick Piggind9414772008-09-24 11:32:59 -04003301 this will be written out by write_end so is fine */
Steve French8a236262007-03-06 00:31:00 +00003302 }
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003303out:
3304 *pagep = page;
3305 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003306}
3307
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303308static int cifs_release_page(struct page *page, gfp_t gfp)
3309{
3310 if (PagePrivate(page))
3311 return 0;
3312
3313 return cifs_fscache_release_page(page, gfp);
3314}
3315
3316static void cifs_invalidate_page(struct page *page, unsigned long offset)
3317{
3318 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
3319
3320 if (offset == 0)
3321 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
3322}
3323
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003324static int cifs_launder_page(struct page *page)
3325{
3326 int rc = 0;
3327 loff_t range_start = page_offset(page);
3328 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
3329 struct writeback_control wbc = {
3330 .sync_mode = WB_SYNC_ALL,
3331 .nr_to_write = 0,
3332 .range_start = range_start,
3333 .range_end = range_end,
3334 };
3335
3336 cFYI(1, "Launder page: %p", page);
3337
3338 if (clear_page_dirty_for_io(page))
3339 rc = cifs_writepage_locked(page, &wbc);
3340
3341 cifs_fscache_invalidate_page(page, page->mapping->host);
3342 return rc;
3343}
3344
Tejun Heo9b646972010-07-20 22:09:02 +02003345void cifs_oplock_break(struct work_struct *work)
Jeff Layton3bc303c2009-09-21 06:47:50 -04003346{
3347 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3348 oplock_break);
Jeff Laytona5e18bc2010-10-11 15:07:18 -04003349 struct inode *inode = cfile->dentry->d_inode;
Jeff Layton3bc303c2009-09-21 06:47:50 -04003350 struct cifsInodeInfo *cinode = CIFS_I(inode);
Jeff Laytoneb4b7562010-10-22 14:52:29 -04003351 int rc = 0;
Jeff Layton3bc303c2009-09-21 06:47:50 -04003352
3353 if (inode && S_ISREG(inode->i_mode)) {
Steve Frenchd54ff732010-04-27 04:38:15 +00003354 if (cinode->clientCanCacheRead)
Al Viro8737c932009-12-24 06:47:55 -05003355 break_lease(inode, O_RDONLY);
Steve Frenchd54ff732010-04-27 04:38:15 +00003356 else
Al Viro8737c932009-12-24 06:47:55 -05003357 break_lease(inode, O_WRONLY);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003358 rc = filemap_fdatawrite(inode->i_mapping);
3359 if (cinode->clientCanCacheRead == 0) {
Jeff Laytoneb4b7562010-10-22 14:52:29 -04003360 rc = filemap_fdatawait(inode->i_mapping);
3361 mapping_set_error(inode->i_mapping, rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003362 invalidate_remote_inode(inode);
3363 }
Joe Perchesb6b38f72010-04-21 03:50:45 +00003364 cFYI(1, "Oplock flush inode %p rc %d", inode, rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003365 }
3366
Pavel Shilovsky85160e02011-10-22 15:33:29 +04003367 rc = cifs_push_locks(cfile);
3368 if (rc)
3369 cERROR(1, "Push locks rc = %d", rc);
3370
Jeff Layton3bc303c2009-09-21 06:47:50 -04003371 /*
3372 * releasing stale oplock after recent reconnect of smb session using
3373 * a now incorrect file handle is not a data integrity issue but do
3374 * not bother sending an oplock release if session to server still is
3375 * disconnected since oplock already released by the server
3376 */
Steve Frenchcdff08e2010-10-21 22:46:14 +00003377 if (!cfile->oplock_break_cancelled) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07003378 rc = CIFSSMBLock(0, tlink_tcon(cfile->tlink), cfile->fid.netfid,
Pavel Shilovsky03776f42010-08-17 11:26:00 +04003379 current->tgid, 0, 0, 0, 0,
3380 LOCKING_ANDX_OPLOCK_RELEASE, false,
Pavel Shilovsky12fed002011-01-17 20:15:44 +03003381 cinode->clientCanCacheRead ? 1 : 0);
Joe Perchesb6b38f72010-04-21 03:50:45 +00003382 cFYI(1, "Oplock release rc = %d", rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003383 }
Jeff Layton3bc303c2009-09-21 06:47:50 -04003384}
3385
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07003386const struct address_space_operations cifs_addr_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003387 .readpage = cifs_readpage,
3388 .readpages = cifs_readpages,
3389 .writepage = cifs_writepage,
Steve French37c0eb42005-10-05 14:50:29 -07003390 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04003391 .write_begin = cifs_write_begin,
3392 .write_end = cifs_write_end,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003393 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303394 .releasepage = cifs_release_page,
3395 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003396 .launder_page = cifs_launder_page,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003397};
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003398
3399/*
3400 * cifs_readpages requires the server to support a buffer large enough to
3401 * contain the header plus one complete page of data. Otherwise, we need
3402 * to leave cifs_readpages out of the address space operations.
3403 */
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07003404const struct address_space_operations cifs_addr_ops_smallbuf = {
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003405 .readpage = cifs_readpage,
3406 .writepage = cifs_writepage,
3407 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04003408 .write_begin = cifs_write_begin,
3409 .write_end = cifs_write_end,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003410 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303411 .releasepage = cifs_release_page,
3412 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003413 .launder_page = cifs_launder_page,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003414};