blob: c23fbd81fe1ab1c637841e0db002fc6d4b1404e1 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
Steve Frenchfb8c4b12007-07-10 01:16:18 +00005 *
Steve Frenchf19159d2010-04-21 04:12:10 +00006 * Copyright (C) International Business Machines Corp., 2002,2010
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Author(s): Steve French (sfrench@us.ibm.com)
Jeremy Allison7ee1af72006-08-02 21:56:33 +00008 * Jeremy Allison (jra@samba.org)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
Steve French37c0eb42005-10-05 14:50:29 -070025#include <linux/backing-dev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
Steve French37c0eb42005-10-05 14:50:29 -070030#include <linux/writeback.h>
Andrew Morton6f88cc22006-12-10 02:19:44 -080031#include <linux/task_io_accounting_ops.h>
Steve French23e7dd72005-10-20 13:44:56 -070032#include <linux/delay.h>
Jeff Layton3bc303c2009-09-21 06:47:50 -040033#include <linux/mount.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Jeff Layton690c5e32011-10-19 15:30:16 -040035#include <linux/swap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <asm/div64.h>
37#include "cifsfs.h"
38#include "cifspdu.h"
39#include "cifsglob.h"
40#include "cifsproto.h"
41#include "cifs_unicode.h"
42#include "cifs_debug.h"
43#include "cifs_fs_sb.h"
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +053044#include "fscache.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Linus Torvalds1da177e2005-04-16 15:20:36 -070046static inline int cifs_convert_flags(unsigned int flags)
47{
48 if ((flags & O_ACCMODE) == O_RDONLY)
49 return GENERIC_READ;
50 else if ((flags & O_ACCMODE) == O_WRONLY)
51 return GENERIC_WRITE;
52 else if ((flags & O_ACCMODE) == O_RDWR) {
53 /* GENERIC_ALL is too much permission to request
54 can cause unnecessary access denied on create */
55 /* return GENERIC_ALL; */
56 return (GENERIC_READ | GENERIC_WRITE);
57 }
58
Jeff Laytone10f7b52008-05-14 10:21:33 -070059 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
60 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
61 FILE_READ_DATA);
Steve French7fc8f4e2009-02-23 20:43:11 +000062}
Jeff Laytone10f7b52008-05-14 10:21:33 -070063
Jeff Layton608712f2010-10-15 15:33:56 -040064static u32 cifs_posix_convert_flags(unsigned int flags)
Steve French7fc8f4e2009-02-23 20:43:11 +000065{
Jeff Layton608712f2010-10-15 15:33:56 -040066 u32 posix_flags = 0;
Jeff Laytone10f7b52008-05-14 10:21:33 -070067
Steve French7fc8f4e2009-02-23 20:43:11 +000068 if ((flags & O_ACCMODE) == O_RDONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040069 posix_flags = SMB_O_RDONLY;
Steve French7fc8f4e2009-02-23 20:43:11 +000070 else if ((flags & O_ACCMODE) == O_WRONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040071 posix_flags = SMB_O_WRONLY;
72 else if ((flags & O_ACCMODE) == O_RDWR)
73 posix_flags = SMB_O_RDWR;
74
75 if (flags & O_CREAT)
76 posix_flags |= SMB_O_CREAT;
77 if (flags & O_EXCL)
78 posix_flags |= SMB_O_EXCL;
79 if (flags & O_TRUNC)
80 posix_flags |= SMB_O_TRUNC;
81 /* be safe and imply O_SYNC for O_DSYNC */
Christoph Hellwig6b2f3d12009-10-27 11:05:28 +010082 if (flags & O_DSYNC)
Jeff Layton608712f2010-10-15 15:33:56 -040083 posix_flags |= SMB_O_SYNC;
Steve French7fc8f4e2009-02-23 20:43:11 +000084 if (flags & O_DIRECTORY)
Jeff Layton608712f2010-10-15 15:33:56 -040085 posix_flags |= SMB_O_DIRECTORY;
Steve French7fc8f4e2009-02-23 20:43:11 +000086 if (flags & O_NOFOLLOW)
Jeff Layton608712f2010-10-15 15:33:56 -040087 posix_flags |= SMB_O_NOFOLLOW;
Steve French7fc8f4e2009-02-23 20:43:11 +000088 if (flags & O_DIRECT)
Jeff Layton608712f2010-10-15 15:33:56 -040089 posix_flags |= SMB_O_DIRECT;
Steve French7fc8f4e2009-02-23 20:43:11 +000090
91 return posix_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070092}
93
94static inline int cifs_get_disposition(unsigned int flags)
95{
96 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
97 return FILE_CREATE;
98 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
99 return FILE_OVERWRITE_IF;
100 else if ((flags & O_CREAT) == O_CREAT)
101 return FILE_OPEN_IF;
Steve French55aa2e02006-05-30 18:09:31 +0000102 else if ((flags & O_TRUNC) == O_TRUNC)
103 return FILE_OVERWRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104 else
105 return FILE_OPEN;
106}
107
Jeff Layton608712f2010-10-15 15:33:56 -0400108int cifs_posix_open(char *full_path, struct inode **pinode,
109 struct super_block *sb, int mode, unsigned int f_flags,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400110 __u32 *poplock, __u16 *pnetfid, unsigned int xid)
Jeff Layton608712f2010-10-15 15:33:56 -0400111{
112 int rc;
113 FILE_UNIX_BASIC_INFO *presp_data;
114 __u32 posix_flags = 0;
115 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
116 struct cifs_fattr fattr;
117 struct tcon_link *tlink;
Steve French96daf2b2011-05-27 04:34:02 +0000118 struct cifs_tcon *tcon;
Jeff Layton608712f2010-10-15 15:33:56 -0400119
120 cFYI(1, "posix open %s", full_path);
121
122 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
123 if (presp_data == NULL)
124 return -ENOMEM;
125
126 tlink = cifs_sb_tlink(cifs_sb);
127 if (IS_ERR(tlink)) {
128 rc = PTR_ERR(tlink);
129 goto posix_open_ret;
130 }
131
132 tcon = tlink_tcon(tlink);
133 mode &= ~current_umask();
134
135 posix_flags = cifs_posix_convert_flags(f_flags);
136 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
137 poplock, full_path, cifs_sb->local_nls,
138 cifs_sb->mnt_cifs_flags &
139 CIFS_MOUNT_MAP_SPECIAL_CHR);
140 cifs_put_tlink(tlink);
141
142 if (rc)
143 goto posix_open_ret;
144
145 if (presp_data->Type == cpu_to_le32(-1))
146 goto posix_open_ret; /* open ok, caller does qpathinfo */
147
148 if (!pinode)
149 goto posix_open_ret; /* caller does not need info */
150
151 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
152
153 /* get new inode and set it up */
154 if (*pinode == NULL) {
155 cifs_fill_uniqueid(sb, &fattr);
156 *pinode = cifs_iget(sb, &fattr);
157 if (!*pinode) {
158 rc = -ENOMEM;
159 goto posix_open_ret;
160 }
161 } else {
162 cifs_fattr_to_inode(*pinode, &fattr);
163 }
164
165posix_open_ret:
166 kfree(presp_data);
167 return rc;
168}
169
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300170static int
171cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700172 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
173 struct cifs_fid *fid, unsigned int xid)
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300174{
175 int rc;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700176 int desired_access;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300177 int disposition;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500178 int create_options = CREATE_NOT_DIR;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300179 FILE_ALL_INFO *buf;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700180 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300181
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700182 if (!server->ops->open)
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700183 return -ENOSYS;
184
185 desired_access = cifs_convert_flags(f_flags);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300186
187/*********************************************************************
188 * open flag mapping table:
189 *
190 * POSIX Flag CIFS Disposition
191 * ---------- ----------------
192 * O_CREAT FILE_OPEN_IF
193 * O_CREAT | O_EXCL FILE_CREATE
194 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
195 * O_TRUNC FILE_OVERWRITE
196 * none of the above FILE_OPEN
197 *
198 * Note that there is not a direct match between disposition
199 * FILE_SUPERSEDE (ie create whether or not file exists although
200 * O_CREAT | O_TRUNC is similar but truncates the existing
201 * file rather than creating a new file as FILE_SUPERSEDE does
202 * (which uses the attributes / metadata passed in on open call)
203 *?
204 *? O_SYNC is a reasonable match to CIFS writethrough flag
205 *? and the read write flags match reasonably. O_LARGEFILE
206 *? is irrelevant because largefile support is always used
207 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
208 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
209 *********************************************************************/
210
211 disposition = cifs_get_disposition(f_flags);
212
213 /* BB pass O_SYNC flag through on file attributes .. BB */
214
215 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
216 if (!buf)
217 return -ENOMEM;
218
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500219 if (backup_cred(cifs_sb))
220 create_options |= CREATE_OPEN_BACKUP_INTENT;
221
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700222 rc = server->ops->open(xid, tcon, full_path, disposition,
223 desired_access, create_options, fid, oplock, buf,
224 cifs_sb);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300225
226 if (rc)
227 goto out;
228
229 if (tcon->unix_ext)
230 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
231 xid);
232 else
233 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700234 xid, &fid->netfid);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300235
236out:
237 kfree(buf);
238 return rc;
239}
240
Jeff Layton15ecb432010-10-15 15:34:02 -0400241struct cifsFileInfo *
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700242cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
Jeff Layton15ecb432010-10-15 15:34:02 -0400243 struct tcon_link *tlink, __u32 oplock)
244{
245 struct dentry *dentry = file->f_path.dentry;
246 struct inode *inode = dentry->d_inode;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700247 struct cifsInodeInfo *cinode = CIFS_I(inode);
248 struct cifsFileInfo *cfile;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700249 struct cifs_fid_locks *fdlocks;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700250 struct cifs_tcon *tcon = tlink_tcon(tlink);
Jeff Layton15ecb432010-10-15 15:34:02 -0400251
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700252 cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
253 if (cfile == NULL)
254 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400255
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700256 fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
257 if (!fdlocks) {
258 kfree(cfile);
259 return NULL;
260 }
261
262 INIT_LIST_HEAD(&fdlocks->locks);
263 fdlocks->cfile = cfile;
264 cfile->llist = fdlocks;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700265 down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700266 list_add(&fdlocks->llist, &cinode->llist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700267 up_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700268
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700269 cfile->count = 1;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700270 cfile->pid = current->tgid;
271 cfile->uid = current_fsuid();
272 cfile->dentry = dget(dentry);
273 cfile->f_flags = file->f_flags;
274 cfile->invalidHandle = false;
275 cfile->tlink = cifs_get_tlink(tlink);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700276 INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700277 mutex_init(&cfile->fh_mutex);
Jeff Layton15ecb432010-10-15 15:34:02 -0400278
Jeff Layton44772882010-10-15 15:34:03 -0400279 spin_lock(&cifs_file_list_lock);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700280 if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE)
281 oplock = fid->pending_open->oplock;
282 list_del(&fid->pending_open->olist);
283
284 tlink_tcon(tlink)->ses->server->ops->set_fid(cfile, fid, oplock);
285
286 list_add(&cfile->tlist, &tcon->openFileList);
Jeff Layton15ecb432010-10-15 15:34:02 -0400287 /* if readable file instance put first in list*/
288 if (file->f_mode & FMODE_READ)
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700289 list_add(&cfile->flist, &cinode->openFileList);
Jeff Layton15ecb432010-10-15 15:34:02 -0400290 else
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700291 list_add_tail(&cfile->flist, &cinode->openFileList);
Jeff Layton44772882010-10-15 15:34:03 -0400292 spin_unlock(&cifs_file_list_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400293
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700294 file->private_data = cfile;
295 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400296}
297
Jeff Layton764a1b12012-07-25 14:59:54 -0400298struct cifsFileInfo *
299cifsFileInfo_get(struct cifsFileInfo *cifs_file)
300{
301 spin_lock(&cifs_file_list_lock);
302 cifsFileInfo_get_locked(cifs_file);
303 spin_unlock(&cifs_file_list_lock);
304 return cifs_file;
305}
306
Steve Frenchcdff08e2010-10-21 22:46:14 +0000307/*
308 * Release a reference on the file private data. This may involve closing
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400309 * the filehandle out on the server. Must be called without holding
310 * cifs_file_list_lock.
Steve Frenchcdff08e2010-10-21 22:46:14 +0000311 */
Jeff Laytonb33879a2010-10-15 15:34:04 -0400312void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
313{
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300314 struct inode *inode = cifs_file->dentry->d_inode;
Steve French96daf2b2011-05-27 04:34:02 +0000315 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700316 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300317 struct cifsInodeInfo *cifsi = CIFS_I(inode);
Pavel Shilovsky4f8ba8a2010-11-21 22:36:12 +0300318 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000319 struct cifsLockInfo *li, *tmp;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700320 struct cifs_fid fid;
321 struct cifs_pending_open open;
Steve Frenchcdff08e2010-10-21 22:46:14 +0000322
323 spin_lock(&cifs_file_list_lock);
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400324 if (--cifs_file->count > 0) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000325 spin_unlock(&cifs_file_list_lock);
326 return;
Jeff Laytonb33879a2010-10-15 15:34:04 -0400327 }
Steve Frenchcdff08e2010-10-21 22:46:14 +0000328
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700329 if (server->ops->get_lease_key)
330 server->ops->get_lease_key(inode, &fid);
331
332 /* store open in pending opens to make sure we don't miss lease break */
333 cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
334
Steve Frenchcdff08e2010-10-21 22:46:14 +0000335 /* remove it from the lists */
336 list_del(&cifs_file->flist);
337 list_del(&cifs_file->tlist);
338
339 if (list_empty(&cifsi->openFileList)) {
340 cFYI(1, "closing last open instance for inode %p",
341 cifs_file->dentry->d_inode);
Pavel Shilovsky25364132012-09-18 16:20:27 -0700342 /*
343 * In strict cache mode we need invalidate mapping on the last
344 * close because it may cause a error when we open this file
345 * again and get at least level II oplock.
346 */
Pavel Shilovsky4f8ba8a2010-11-21 22:36:12 +0300347 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
348 CIFS_I(inode)->invalid_mapping = true;
Pavel Shilovskyc6723622010-11-03 10:58:57 +0300349 cifs_set_oplock_level(cifsi, 0);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000350 }
351 spin_unlock(&cifs_file_list_lock);
352
Jeff Laytonad635942011-07-26 12:20:17 -0400353 cancel_work_sync(&cifs_file->oplock_break);
354
Steve Frenchcdff08e2010-10-21 22:46:14 +0000355 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700356 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400357 unsigned int xid;
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700358
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400359 xid = get_xid();
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700360 if (server->ops->close)
Pavel Shilovsky760ad0c2012-09-25 11:00:07 +0400361 server->ops->close(xid, tcon, &cifs_file->fid);
362 _free_xid(xid);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000363 }
364
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700365 cifs_del_pending_open(&open);
366
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700367 /*
368 * Delete any outstanding lock records. We'll lose them when the file
Steve Frenchcdff08e2010-10-21 22:46:14 +0000369 * is closed anyway.
370 */
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700371 down_write(&cifsi->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700372 list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000373 list_del(&li->llist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400374 cifs_del_lock_waiters(li);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000375 kfree(li);
376 }
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700377 list_del(&cifs_file->llist->llist);
378 kfree(cifs_file->llist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700379 up_write(&cifsi->lock_sem);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000380
381 cifs_put_tlink(cifs_file->tlink);
382 dput(cifs_file->dentry);
383 kfree(cifs_file);
Jeff Laytonb33879a2010-10-15 15:34:04 -0400384}
385
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386int cifs_open(struct inode *inode, struct file *file)
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700387
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388{
389 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400390 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400391 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700393 struct TCP_Server_Info *server;
Steve French96daf2b2011-05-27 04:34:02 +0000394 struct cifs_tcon *tcon;
Jeff Layton7ffec372010-09-29 19:51:11 -0400395 struct tcon_link *tlink;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700396 struct cifsFileInfo *cfile = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397 char *full_path = NULL;
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300398 bool posix_open_ok = false;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700399 struct cifs_fid fid;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700400 struct cifs_pending_open open;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400402 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403
404 cifs_sb = CIFS_SB(inode->i_sb);
Jeff Layton7ffec372010-09-29 19:51:11 -0400405 tlink = cifs_sb_tlink(cifs_sb);
406 if (IS_ERR(tlink)) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400407 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400408 return PTR_ERR(tlink);
409 }
410 tcon = tlink_tcon(tlink);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700411 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -0800413 full_path = build_path_from_dentry(file->f_path.dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414 if (full_path == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530415 rc = -ENOMEM;
Jeff Layton232341b2010-08-05 13:58:38 -0400416 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417 }
418
Joe Perchesb6b38f72010-04-21 03:50:45 +0000419 cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
420 inode, file->f_flags, full_path);
Steve French276a74a2009-03-03 18:00:34 +0000421
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700422 if (server->oplocks)
Steve French276a74a2009-03-03 18:00:34 +0000423 oplock = REQ_OPLOCK;
424 else
425 oplock = 0;
426
Steve French64cc2c62009-03-04 19:54:08 +0000427 if (!tcon->broken_posix_open && tcon->unix_ext &&
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400428 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
429 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Steve French276a74a2009-03-03 18:00:34 +0000430 /* can not refresh inode info since size could be stale */
Jeff Layton2422f672010-06-16 13:40:16 -0400431 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
Steve Frenchfa588e02010-04-22 19:21:55 +0000432 cifs_sb->mnt_file_mode /* ignored */,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700433 file->f_flags, &oplock, &fid.netfid, xid);
Steve French276a74a2009-03-03 18:00:34 +0000434 if (rc == 0) {
Joe Perchesb6b38f72010-04-21 03:50:45 +0000435 cFYI(1, "posix open succeeded");
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300436 posix_open_ok = true;
Steve French64cc2c62009-03-04 19:54:08 +0000437 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
438 if (tcon->ses->serverNOS)
Joe Perchesb6b38f72010-04-21 03:50:45 +0000439 cERROR(1, "server %s of type %s returned"
Steve French64cc2c62009-03-04 19:54:08 +0000440 " unexpected error on SMB posix open"
441 ", disabling posix open support."
442 " Check if server update available.",
443 tcon->ses->serverName,
Joe Perchesb6b38f72010-04-21 03:50:45 +0000444 tcon->ses->serverNOS);
Steve French64cc2c62009-03-04 19:54:08 +0000445 tcon->broken_posix_open = true;
Steve French276a74a2009-03-03 18:00:34 +0000446 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
447 (rc != -EOPNOTSUPP)) /* path not found or net err */
448 goto out;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700449 /*
450 * Else fallthrough to retry open the old way on network i/o
451 * or DFS errors.
452 */
Steve French276a74a2009-03-03 18:00:34 +0000453 }
454
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700455 if (server->ops->get_lease_key)
456 server->ops->get_lease_key(inode, &fid);
457
458 cifs_add_pending_open(&fid, tlink, &open);
459
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300460 if (!posix_open_ok) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700461 if (server->ops->get_lease_key)
462 server->ops->get_lease_key(inode, &fid);
463
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300464 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700465 file->f_flags, &oplock, &fid, xid);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700466 if (rc) {
467 cifs_del_pending_open(&open);
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300468 goto out;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700469 }
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300470 }
Jeff Layton47c78b72010-06-16 13:40:17 -0400471
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700472 cfile = cifs_new_fileinfo(&fid, file, tlink, oplock);
473 if (cfile == NULL) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700474 if (server->ops->close)
475 server->ops->close(xid, tcon, &fid);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700476 cifs_del_pending_open(&open);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477 rc = -ENOMEM;
478 goto out;
479 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +0530481 cifs_fscache_set_inode_cookie(inode, file);
482
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300483 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700484 /*
485 * Time to set mode which we can not set earlier due to
486 * problems creating new read-only files.
487 */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300488 struct cifs_unix_set_info_args args = {
489 .mode = inode->i_mode,
Eric W. Biederman49418b22013-02-06 00:57:56 -0800490 .uid = INVALID_UID, /* no change */
491 .gid = INVALID_GID, /* no change */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300492 .ctime = NO_CHANGE_64,
493 .atime = NO_CHANGE_64,
494 .mtime = NO_CHANGE_64,
495 .device = 0,
496 };
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700497 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
498 cfile->pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499 }
500
501out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400503 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400504 cifs_put_tlink(tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505 return rc;
506}
507
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400508static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
509
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700510/*
511 * Try to reacquire byte range locks that were released when session
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400512 * to server was lost.
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700513 */
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400514static int
515cifs_relock_file(struct cifsFileInfo *cfile)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516{
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400517 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
518 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
519 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520 int rc = 0;
521
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400522 /* we are going to update can_cache_brlcks here - need a write access */
523 down_write(&cinode->lock_sem);
524 if (cinode->can_cache_brlcks) {
525 /* can cache locks - no need to push them */
526 up_write(&cinode->lock_sem);
527 return rc;
528 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400530 if (cap_unix(tcon->ses) &&
531 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
532 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
533 rc = cifs_push_posix_locks(cfile);
534 else
535 rc = tcon->ses->server->ops->push_mand_locks(cfile);
536
537 up_write(&cinode->lock_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538 return rc;
539}
540
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700541static int
542cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543{
544 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400545 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400546 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +0000548 struct cifs_tcon *tcon;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700549 struct TCP_Server_Info *server;
550 struct cifsInodeInfo *cinode;
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000551 struct inode *inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552 char *full_path = NULL;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700553 int desired_access;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554 int disposition = FILE_OPEN;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500555 int create_options = CREATE_NOT_DIR;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700556 struct cifs_fid fid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400558 xid = get_xid();
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700559 mutex_lock(&cfile->fh_mutex);
560 if (!cfile->invalidHandle) {
561 mutex_unlock(&cfile->fh_mutex);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530562 rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400563 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530564 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565 }
566
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700567 inode = cfile->dentry->d_inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568 cifs_sb = CIFS_SB(inode->i_sb);
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700569 tcon = tlink_tcon(cfile->tlink);
570 server = tcon->ses->server;
Steve French3a9f4622007-04-04 17:10:24 +0000571
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700572 /*
573 * Can not grab rename sem here because various ops, including those
574 * that already have the rename sem can end up causing writepage to get
575 * called and if the server was down that means we end up here, and we
576 * can never tell if the caller already has the rename_sem.
577 */
578 full_path = build_path_from_dentry(cfile->dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579 if (full_path == NULL) {
Steve French3a9f4622007-04-04 17:10:24 +0000580 rc = -ENOMEM;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700581 mutex_unlock(&cfile->fh_mutex);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400582 free_xid(xid);
Steve French3a9f4622007-04-04 17:10:24 +0000583 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584 }
585
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700586 cFYI(1, "inode = 0x%p file flags 0x%x for %s", inode, cfile->f_flags,
587 full_path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588
Pavel Shilovsky10b9b982012-03-20 12:55:09 +0300589 if (tcon->ses->server->oplocks)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590 oplock = REQ_OPLOCK;
591 else
Steve French4b18f2a2008-04-29 00:06:05 +0000592 oplock = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400594 if (tcon->unix_ext && cap_unix(tcon->ses) &&
Steve French7fc8f4e2009-02-23 20:43:11 +0000595 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400596 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Jeff Layton608712f2010-10-15 15:33:56 -0400597 /*
598 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
599 * original open. Must mask them off for a reopen.
600 */
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700601 unsigned int oflags = cfile->f_flags &
Jeff Layton15886172010-10-15 15:33:59 -0400602 ~(O_CREAT | O_EXCL | O_TRUNC);
Jeff Layton608712f2010-10-15 15:33:56 -0400603
Jeff Layton2422f672010-06-16 13:40:16 -0400604 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700605 cifs_sb->mnt_file_mode /* ignored */,
606 oflags, &oplock, &fid.netfid, xid);
Steve French7fc8f4e2009-02-23 20:43:11 +0000607 if (rc == 0) {
Joe Perchesb6b38f72010-04-21 03:50:45 +0000608 cFYI(1, "posix reopen succeeded");
Steve French7fc8f4e2009-02-23 20:43:11 +0000609 goto reopen_success;
610 }
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700611 /*
612 * fallthrough to retry open the old way on errors, especially
613 * in the reconnect path it is important to retry hard
614 */
Steve French7fc8f4e2009-02-23 20:43:11 +0000615 }
616
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700617 desired_access = cifs_convert_flags(cfile->f_flags);
Steve French7fc8f4e2009-02-23 20:43:11 +0000618
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500619 if (backup_cred(cifs_sb))
620 create_options |= CREATE_OPEN_BACKUP_INTENT;
621
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700622 if (server->ops->get_lease_key)
623 server->ops->get_lease_key(inode, &fid);
624
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700625 /*
626 * Can not refresh inode by passing in file_info buf to be returned by
627 * CIFSSMBOpen and then calling get_inode_info with returned buf since
628 * file might have write behind data that needs to be flushed and server
629 * version of file size can be stale. If we knew for sure that inode was
630 * not dirty locally we could do this.
631 */
632 rc = server->ops->open(xid, tcon, full_path, disposition,
633 desired_access, create_options, &fid, &oplock,
634 NULL, cifs_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635 if (rc) {
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700636 mutex_unlock(&cfile->fh_mutex);
637 cFYI(1, "cifs_reopen returned 0x%x", rc);
Joe Perchesb6b38f72010-04-21 03:50:45 +0000638 cFYI(1, "oplock: %d", oplock);
Jeff Layton15886172010-10-15 15:33:59 -0400639 goto reopen_error_exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640 }
Jeff Layton15886172010-10-15 15:33:59 -0400641
642reopen_success:
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700643 cfile->invalidHandle = false;
644 mutex_unlock(&cfile->fh_mutex);
645 cinode = CIFS_I(inode);
Jeff Layton15886172010-10-15 15:33:59 -0400646
647 if (can_flush) {
648 rc = filemap_write_and_wait(inode->i_mapping);
Jeff Laytoneb4b7562010-10-22 14:52:29 -0400649 mapping_set_error(inode->i_mapping, rc);
Jeff Layton15886172010-10-15 15:33:59 -0400650
Jeff Layton15886172010-10-15 15:33:59 -0400651 if (tcon->unix_ext)
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700652 rc = cifs_get_inode_info_unix(&inode, full_path,
653 inode->i_sb, xid);
Jeff Layton15886172010-10-15 15:33:59 -0400654 else
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700655 rc = cifs_get_inode_info(&inode, full_path, NULL,
656 inode->i_sb, xid, NULL);
657 }
658 /*
659 * Else we are writing out data to server already and could deadlock if
660 * we tried to flush data, and since we do not know if we have data that
661 * would invalidate the current end of file on the server we can not go
662 * to the server to get the new inode info.
663 */
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300664
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700665 server->ops->set_fid(cfile, &fid, oplock);
666 cifs_relock_file(cfile);
Jeff Layton15886172010-10-15 15:33:59 -0400667
668reopen_error_exit:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400670 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671 return rc;
672}
673
674int cifs_close(struct inode *inode, struct file *file)
675{
Jeff Layton77970692011-04-05 16:23:47 -0700676 if (file->private_data != NULL) {
677 cifsFileInfo_put(file->private_data);
678 file->private_data = NULL;
679 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680
Steve Frenchcdff08e2010-10-21 22:46:14 +0000681 /* return code from the ->release op is always ignored */
682 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683}
684
685int cifs_closedir(struct inode *inode, struct file *file)
686{
687 int rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400688 unsigned int xid;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700689 struct cifsFileInfo *cfile = file->private_data;
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700690 struct cifs_tcon *tcon;
691 struct TCP_Server_Info *server;
692 char *buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693
Joe Perchesb6b38f72010-04-21 03:50:45 +0000694 cFYI(1, "Closedir inode = 0x%p", inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700696 if (cfile == NULL)
697 return rc;
698
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400699 xid = get_xid();
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700700 tcon = tlink_tcon(cfile->tlink);
701 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700703 cFYI(1, "Freeing private data in close dir");
704 spin_lock(&cifs_file_list_lock);
705 if (!cfile->srch_inf.endOfSearch && !cfile->invalidHandle) {
706 cfile->invalidHandle = true;
707 spin_unlock(&cifs_file_list_lock);
708 if (server->ops->close_dir)
709 rc = server->ops->close_dir(xid, tcon, &cfile->fid);
710 else
711 rc = -ENOSYS;
712 cFYI(1, "Closing uncompleted readdir with rc %d", rc);
713 /* not much we can do if it fails anyway, ignore rc */
714 rc = 0;
715 } else
716 spin_unlock(&cifs_file_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700718 buf = cfile->srch_inf.ntwrk_buf_start;
719 if (buf) {
720 cFYI(1, "closedir free smb buf in srch struct");
721 cfile->srch_inf.ntwrk_buf_start = NULL;
722 if (cfile->srch_inf.smallBuf)
723 cifs_small_buf_release(buf);
724 else
725 cifs_buf_release(buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726 }
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700727
728 cifs_put_tlink(cfile->tlink);
729 kfree(file->private_data);
730 file->private_data = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731 /* BB can we lock the filestruct while this is going on? */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400732 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733 return rc;
734}
735
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400736static struct cifsLockInfo *
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300737cifs_lock_init(__u64 offset, __u64 length, __u8 type)
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000738{
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400739 struct cifsLockInfo *lock =
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000740 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400741 if (!lock)
742 return lock;
743 lock->offset = offset;
744 lock->length = length;
745 lock->type = type;
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400746 lock->pid = current->tgid;
747 INIT_LIST_HEAD(&lock->blist);
748 init_waitqueue_head(&lock->block_q);
749 return lock;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400750}
751
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -0700752void
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400753cifs_del_lock_waiters(struct cifsLockInfo *lock)
754{
755 struct cifsLockInfo *li, *tmp;
756 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
757 list_del_init(&li->blist);
758 wake_up(&li->block_q);
759 }
760}
761
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400762#define CIFS_LOCK_OP 0
763#define CIFS_READ_OP 1
764#define CIFS_WRITE_OP 2
765
766/* @rw_check : 0 - no op, 1 - read, 2 - write */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400767static bool
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700768cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
769 __u64 length, __u8 type, struct cifsFileInfo *cfile,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400770 struct cifsLockInfo **conf_lock, int rw_check)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400771{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300772 struct cifsLockInfo *li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700773 struct cifsFileInfo *cur_cfile = fdlocks->cfile;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300774 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400775
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700776 list_for_each_entry(li, &fdlocks->locks, llist) {
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400777 if (offset + length <= li->offset ||
778 offset >= li->offset + li->length)
779 continue;
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400780 if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
781 server->ops->compare_fids(cfile, cur_cfile)) {
782 /* shared lock prevents write op through the same fid */
783 if (!(li->type & server->vals->shared_lock_type) ||
784 rw_check != CIFS_WRITE_OP)
785 continue;
786 }
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700787 if ((type & server->vals->shared_lock_type) &&
788 ((server->ops->compare_fids(cfile, cur_cfile) &&
789 current->tgid == li->pid) || type == li->type))
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400790 continue;
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700791 if (conf_lock)
792 *conf_lock = li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700793 return true;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400794 }
795 return false;
796}
797
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700798bool
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300799cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700800 __u8 type, struct cifsLockInfo **conf_lock,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400801 int rw_check)
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400802{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300803 bool rc = false;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700804 struct cifs_fid_locks *cur;
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300805 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300806
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700807 list_for_each_entry(cur, &cinode->llist, llist) {
808 rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700809 cfile, conf_lock, rw_check);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300810 if (rc)
811 break;
812 }
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300813
814 return rc;
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400815}
816
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300817/*
818 * Check if there is another lock that prevents us to set the lock (mandatory
819 * style). If such a lock exists, update the flock structure with its
820 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
821 * or leave it the same if we can't. Returns 0 if we don't need to request to
822 * the server or 1 otherwise.
823 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400824static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300825cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
826 __u8 type, struct file_lock *flock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400827{
828 int rc = 0;
829 struct cifsLockInfo *conf_lock;
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300830 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300831 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400832 bool exist;
833
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700834 down_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400835
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300836 exist = cifs_find_lock_conflict(cfile, offset, length, type,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400837 &conf_lock, CIFS_LOCK_OP);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400838 if (exist) {
839 flock->fl_start = conf_lock->offset;
840 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
841 flock->fl_pid = conf_lock->pid;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300842 if (conf_lock->type & server->vals->shared_lock_type)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400843 flock->fl_type = F_RDLCK;
844 else
845 flock->fl_type = F_WRLCK;
846 } else if (!cinode->can_cache_brlcks)
847 rc = 1;
848 else
849 flock->fl_type = F_UNLCK;
850
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700851 up_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400852 return rc;
853}
854
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400855static void
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300856cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400857{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300858 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700859 down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700860 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700861 up_write(&cinode->lock_sem);
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000862}
863
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300864/*
865 * Set the byte-range lock (mandatory style). Returns:
866 * 1) 0, if we set the lock and don't need to request to the server;
867 * 2) 1, if no locks prevent us but we need to request to the server;
868 * 3) -EACCESS, if there is a lock that prevents us and wait is false.
869 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400870static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300871cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400872 bool wait)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400873{
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400874 struct cifsLockInfo *conf_lock;
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300875 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400876 bool exist;
877 int rc = 0;
878
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400879try_again:
880 exist = false;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700881 down_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400882
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300883 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400884 lock->type, &conf_lock, CIFS_LOCK_OP);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400885 if (!exist && cinode->can_cache_brlcks) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700886 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700887 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400888 return rc;
889 }
890
891 if (!exist)
892 rc = 1;
893 else if (!wait)
894 rc = -EACCES;
895 else {
896 list_add_tail(&lock->blist, &conf_lock->blist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700897 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400898 rc = wait_event_interruptible(lock->block_q,
899 (lock->blist.prev == &lock->blist) &&
900 (lock->blist.next == &lock->blist));
901 if (!rc)
902 goto try_again;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700903 down_write(&cinode->lock_sem);
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400904 list_del_init(&lock->blist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400905 }
906
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700907 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400908 return rc;
909}
910
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300911/*
912 * Check if there is another lock that prevents us to set the lock (posix
913 * style). If such a lock exists, update the flock structure with its
914 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
915 * or leave it the same if we can't. Returns 0 if we don't need to request to
916 * the server or 1 otherwise.
917 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400918static int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400919cifs_posix_lock_test(struct file *file, struct file_lock *flock)
920{
921 int rc = 0;
922 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
923 unsigned char saved_type = flock->fl_type;
924
Pavel Shilovsky50792762011-10-29 17:17:57 +0400925 if ((flock->fl_flags & FL_POSIX) == 0)
926 return 1;
927
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700928 down_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400929 posix_test_lock(file, flock);
930
931 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
932 flock->fl_type = saved_type;
933 rc = 1;
934 }
935
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700936 up_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400937 return rc;
938}
939
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300940/*
941 * Set the byte-range lock (posix style). Returns:
942 * 1) 0, if we set the lock and don't need to request to the server;
943 * 2) 1, if we need to request to the server;
944 * 3) <0, if the error occurs while setting the lock.
945 */
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400946static int
947cifs_posix_lock_set(struct file *file, struct file_lock *flock)
948{
949 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
Pavel Shilovsky50792762011-10-29 17:17:57 +0400950 int rc = 1;
951
952 if ((flock->fl_flags & FL_POSIX) == 0)
953 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400954
Pavel Shilovsky66189be2012-03-28 21:56:19 +0400955try_again:
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700956 down_write(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400957 if (!cinode->can_cache_brlcks) {
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700958 up_write(&cinode->lock_sem);
Pavel Shilovsky50792762011-10-29 17:17:57 +0400959 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400960 }
Pavel Shilovsky66189be2012-03-28 21:56:19 +0400961
962 rc = posix_lock_file(file, flock, NULL);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700963 up_write(&cinode->lock_sem);
Pavel Shilovsky66189be2012-03-28 21:56:19 +0400964 if (rc == FILE_LOCK_DEFERRED) {
965 rc = wait_event_interruptible(flock->fl_wait, !flock->fl_next);
966 if (!rc)
967 goto try_again;
968 locks_delete_block(flock);
969 }
Steve French9ebb3892012-04-01 13:52:54 -0500970 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400971}
972
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -0700973int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400974cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400975{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400976 unsigned int xid;
977 int rc = 0, stored_rc;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400978 struct cifsLockInfo *li, *tmp;
979 struct cifs_tcon *tcon;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +0400980 unsigned int num, max_num, max_buf;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400981 LOCKING_ANDX_RANGE *buf, *cur;
982 int types[] = {LOCKING_ANDX_LARGE_FILES,
983 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
984 int i;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400985
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400986 xid = get_xid();
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400987 tcon = tlink_tcon(cfile->tlink);
988
Pavel Shilovsky0013fb42012-05-31 13:03:26 +0400989 /*
990 * Accessing maxBuf is racy with cifs_reconnect - need to store value
991 * and check it for zero before using.
992 */
993 max_buf = tcon->ses->server->maxBuf;
994 if (!max_buf) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400995 free_xid(xid);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +0400996 return -EINVAL;
997 }
998
999 max_num = (max_buf - sizeof(struct smb_hdr)) /
1000 sizeof(LOCKING_ANDX_RANGE);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001001 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1002 if (!buf) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001003 free_xid(xid);
Pavel Shilovskye2f28862012-08-29 21:13:38 +04001004 return -ENOMEM;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001005 }
1006
1007 for (i = 0; i < 2; i++) {
1008 cur = buf;
1009 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001010 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001011 if (li->type != types[i])
1012 continue;
1013 cur->Pid = cpu_to_le16(li->pid);
1014 cur->LengthLow = cpu_to_le32((u32)li->length);
1015 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1016 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1017 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1018 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001019 stored_rc = cifs_lockv(xid, tcon,
1020 cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001021 (__u8)li->type, 0, num,
1022 buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001023 if (stored_rc)
1024 rc = stored_rc;
1025 cur = buf;
1026 num = 0;
1027 } else
1028 cur++;
1029 }
1030
1031 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001032 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001033 (__u8)types[i], 0, num, buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001034 if (stored_rc)
1035 rc = stored_rc;
1036 }
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001037 }
1038
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001039 kfree(buf);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001040 free_xid(xid);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001041 return rc;
1042}
1043
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001044/* copied from fs/locks.c with a name change */
1045#define cifs_for_each_lock(inode, lockp) \
1046 for (lockp = &inode->i_flock; *lockp != NULL; \
1047 lockp = &(*lockp)->fl_next)
1048
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001049struct lock_to_push {
1050 struct list_head llist;
1051 __u64 offset;
1052 __u64 length;
1053 __u32 pid;
1054 __u16 netfid;
1055 __u8 type;
1056};
1057
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001058static int
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001059cifs_push_posix_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001060{
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001061 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1062 struct file_lock *flock, **before;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001063 unsigned int count = 0, i = 0;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001064 int rc = 0, xid, type;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001065 struct list_head locks_to_send, *el;
1066 struct lock_to_push *lck, *tmp;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001067 __u64 length;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001068
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001069 xid = get_xid();
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001070
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001071 lock_flocks();
1072 cifs_for_each_lock(cfile->dentry->d_inode, before) {
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001073 if ((*before)->fl_flags & FL_POSIX)
1074 count++;
1075 }
1076 unlock_flocks();
1077
1078 INIT_LIST_HEAD(&locks_to_send);
1079
1080 /*
Pavel Shilovskyce858522012-03-17 09:46:55 +03001081 * Allocating count locks is enough because no FL_POSIX locks can be
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001082 * added to the list while we are holding cinode->lock_sem that
Pavel Shilovskyce858522012-03-17 09:46:55 +03001083 * protects locking operations of this inode.
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001084 */
1085 for (; i < count; i++) {
1086 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1087 if (!lck) {
1088 rc = -ENOMEM;
1089 goto err_out;
1090 }
1091 list_add_tail(&lck->llist, &locks_to_send);
1092 }
1093
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001094 el = locks_to_send.next;
1095 lock_flocks();
1096 cifs_for_each_lock(cfile->dentry->d_inode, before) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001097 flock = *before;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001098 if ((flock->fl_flags & FL_POSIX) == 0)
1099 continue;
Pavel Shilovskyce858522012-03-17 09:46:55 +03001100 if (el == &locks_to_send) {
1101 /*
1102 * The list ended. We don't have enough allocated
1103 * structures - something is really wrong.
1104 */
1105 cERROR(1, "Can't push all brlocks!");
1106 break;
1107 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001108 length = 1 + flock->fl_end - flock->fl_start;
1109 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1110 type = CIFS_RDLCK;
1111 else
1112 type = CIFS_WRLCK;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001113 lck = list_entry(el, struct lock_to_push, llist);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001114 lck->pid = flock->fl_pid;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001115 lck->netfid = cfile->fid.netfid;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001116 lck->length = length;
1117 lck->type = type;
1118 lck->offset = flock->fl_start;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001119 el = el->next;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001120 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001121 unlock_flocks();
1122
1123 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001124 int stored_rc;
1125
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001126 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001127 lck->offset, lck->length, NULL,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001128 lck->type, 0);
1129 if (stored_rc)
1130 rc = stored_rc;
1131 list_del(&lck->llist);
1132 kfree(lck);
1133 }
1134
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001135out:
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001136 free_xid(xid);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001137 return rc;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001138err_out:
1139 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1140 list_del(&lck->llist);
1141 kfree(lck);
1142 }
1143 goto out;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001144}
1145
1146static int
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001147cifs_push_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001148{
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001149 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001150 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001151 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001152 int rc = 0;
1153
1154 /* we are going to update can_cache_brlcks here - need a write access */
1155 down_write(&cinode->lock_sem);
1156 if (!cinode->can_cache_brlcks) {
1157 up_write(&cinode->lock_sem);
1158 return rc;
1159 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001160
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001161 if (cap_unix(tcon->ses) &&
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001162 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1163 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001164 rc = cifs_push_posix_locks(cfile);
1165 else
1166 rc = tcon->ses->server->ops->push_mand_locks(cfile);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001167
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001168 cinode->can_cache_brlcks = false;
1169 up_write(&cinode->lock_sem);
1170 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001171}
1172
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001173static void
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001174cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001175 bool *wait_flag, struct TCP_Server_Info *server)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001176{
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001177 if (flock->fl_flags & FL_POSIX)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001178 cFYI(1, "Posix");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001179 if (flock->fl_flags & FL_FLOCK)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001180 cFYI(1, "Flock");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001181 if (flock->fl_flags & FL_SLEEP) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001182 cFYI(1, "Blocking lock");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001183 *wait_flag = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001184 }
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001185 if (flock->fl_flags & FL_ACCESS)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001186 cFYI(1, "Process suspended by mandatory locking - "
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001187 "not implemented yet");
1188 if (flock->fl_flags & FL_LEASE)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001189 cFYI(1, "Lease on file - not implemented yet");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001190 if (flock->fl_flags &
Jeff Layton3d6d8542012-09-19 06:22:46 -07001191 (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
1192 FL_ACCESS | FL_LEASE | FL_CLOSE)))
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001193 cFYI(1, "Unknown lock flags 0x%x", flock->fl_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001194
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001195 *type = server->vals->large_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001196 if (flock->fl_type == F_WRLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001197 cFYI(1, "F_WRLCK ");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001198 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001199 *lock = 1;
1200 } else if (flock->fl_type == F_UNLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001201 cFYI(1, "F_UNLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001202 *type |= server->vals->unlock_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001203 *unlock = 1;
1204 /* Check if unlock includes more than one lock range */
1205 } else if (flock->fl_type == F_RDLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001206 cFYI(1, "F_RDLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001207 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001208 *lock = 1;
1209 } else if (flock->fl_type == F_EXLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001210 cFYI(1, "F_EXLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001211 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001212 *lock = 1;
1213 } else if (flock->fl_type == F_SHLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001214 cFYI(1, "F_SHLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001215 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001216 *lock = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217 } else
Joe Perchesb6b38f72010-04-21 03:50:45 +00001218 cFYI(1, "Unknown type of lock");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001219}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001220
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001221static int
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001222cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001223 bool wait_flag, bool posix_lck, unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001224{
1225 int rc = 0;
1226 __u64 length = 1 + flock->fl_end - flock->fl_start;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001227 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1228 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001229 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001230 __u16 netfid = cfile->fid.netfid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001231
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001232 if (posix_lck) {
1233 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001234
1235 rc = cifs_posix_lock_test(file, flock);
1236 if (!rc)
1237 return rc;
1238
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001239 if (type & server->vals->shared_lock_type)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001240 posix_lock_type = CIFS_RDLCK;
1241 else
1242 posix_lock_type = CIFS_WRLCK;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001243 rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001244 flock->fl_start, length, flock,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001245 posix_lock_type, wait_flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001246 return rc;
1247 }
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001248
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001249 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001250 if (!rc)
1251 return rc;
1252
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001253 /* BB we could chain these into one lock request BB */
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001254 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
1255 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001256 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001257 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1258 type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001259 flock->fl_type = F_UNLCK;
1260 if (rc != 0)
1261 cERROR(1, "Error unlocking previously locked "
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001262 "range %d during test of lock", rc);
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001263 return 0;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001264 }
1265
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001266 if (type & server->vals->shared_lock_type) {
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001267 flock->fl_type = F_WRLCK;
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001268 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001269 }
1270
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001271 type &= ~server->vals->exclusive_lock_type;
1272
1273 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1274 type | server->vals->shared_lock_type,
1275 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001276 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001277 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1278 type | server->vals->shared_lock_type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001279 flock->fl_type = F_RDLCK;
1280 if (rc != 0)
1281 cERROR(1, "Error unlocking previously locked "
1282 "range %d during test of lock", rc);
1283 } else
1284 flock->fl_type = F_WRLCK;
1285
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001286 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001287}
1288
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001289void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001290cifs_move_llist(struct list_head *source, struct list_head *dest)
1291{
1292 struct list_head *li, *tmp;
1293 list_for_each_safe(li, tmp, source)
1294 list_move(li, dest);
1295}
1296
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001297void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001298cifs_free_llist(struct list_head *llist)
1299{
1300 struct cifsLockInfo *li, *tmp;
1301 list_for_each_entry_safe(li, tmp, llist, llist) {
1302 cifs_del_lock_waiters(li);
1303 list_del(&li->llist);
1304 kfree(li);
1305 }
1306}
1307
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001308int
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001309cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1310 unsigned int xid)
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001311{
1312 int rc = 0, stored_rc;
1313 int types[] = {LOCKING_ANDX_LARGE_FILES,
1314 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1315 unsigned int i;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001316 unsigned int max_num, num, max_buf;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001317 LOCKING_ANDX_RANGE *buf, *cur;
1318 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1319 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
1320 struct cifsLockInfo *li, *tmp;
1321 __u64 length = 1 + flock->fl_end - flock->fl_start;
1322 struct list_head tmp_llist;
1323
1324 INIT_LIST_HEAD(&tmp_llist);
1325
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001326 /*
1327 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1328 * and check it for zero before using.
1329 */
1330 max_buf = tcon->ses->server->maxBuf;
1331 if (!max_buf)
1332 return -EINVAL;
1333
1334 max_num = (max_buf - sizeof(struct smb_hdr)) /
1335 sizeof(LOCKING_ANDX_RANGE);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001336 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1337 if (!buf)
1338 return -ENOMEM;
1339
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001340 down_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001341 for (i = 0; i < 2; i++) {
1342 cur = buf;
1343 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001344 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001345 if (flock->fl_start > li->offset ||
1346 (flock->fl_start + length) <
1347 (li->offset + li->length))
1348 continue;
1349 if (current->tgid != li->pid)
1350 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001351 if (types[i] != li->type)
1352 continue;
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001353 if (cinode->can_cache_brlcks) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001354 /*
1355 * We can cache brlock requests - simply remove
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001356 * a lock from the file's list.
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001357 */
1358 list_del(&li->llist);
1359 cifs_del_lock_waiters(li);
1360 kfree(li);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001361 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001362 }
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001363 cur->Pid = cpu_to_le16(li->pid);
1364 cur->LengthLow = cpu_to_le32((u32)li->length);
1365 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1366 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1367 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1368 /*
1369 * We need to save a lock here to let us add it again to
1370 * the file's list if the unlock range request fails on
1371 * the server.
1372 */
1373 list_move(&li->llist, &tmp_llist);
1374 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001375 stored_rc = cifs_lockv(xid, tcon,
1376 cfile->fid.netfid,
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001377 li->type, num, 0, buf);
1378 if (stored_rc) {
1379 /*
1380 * We failed on the unlock range
1381 * request - add all locks from the tmp
1382 * list to the head of the file's list.
1383 */
1384 cifs_move_llist(&tmp_llist,
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001385 &cfile->llist->locks);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001386 rc = stored_rc;
1387 } else
1388 /*
1389 * The unlock range request succeed -
1390 * free the tmp list.
1391 */
1392 cifs_free_llist(&tmp_llist);
1393 cur = buf;
1394 num = 0;
1395 } else
1396 cur++;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001397 }
1398 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001399 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001400 types[i], num, 0, buf);
1401 if (stored_rc) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001402 cifs_move_llist(&tmp_llist,
1403 &cfile->llist->locks);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001404 rc = stored_rc;
1405 } else
1406 cifs_free_llist(&tmp_llist);
1407 }
1408 }
1409
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001410 up_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001411 kfree(buf);
1412 return rc;
1413}
1414
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001415static int
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001416cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001417 bool wait_flag, bool posix_lck, int lock, int unlock,
1418 unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001419{
1420 int rc = 0;
1421 __u64 length = 1 + flock->fl_end - flock->fl_start;
1422 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1423 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001424 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001425
1426 if (posix_lck) {
Steve French08547b02006-02-28 22:39:25 +00001427 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001428
1429 rc = cifs_posix_lock_set(file, flock);
1430 if (!rc || rc < 0)
1431 return rc;
1432
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001433 if (type & server->vals->shared_lock_type)
Steve French08547b02006-02-28 22:39:25 +00001434 posix_lock_type = CIFS_RDLCK;
1435 else
1436 posix_lock_type = CIFS_WRLCK;
Steve French50c2f752007-07-13 00:33:32 +00001437
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001438 if (unlock == 1)
Steve Frenchbeb84dc2006-03-03 23:36:34 +00001439 posix_lock_type = CIFS_UNLCK;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001440
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001441 rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
1442 current->tgid, flock->fl_start, length,
1443 NULL, posix_lock_type, wait_flag);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001444 goto out;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001445 }
1446
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001447 if (lock) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001448 struct cifsLockInfo *lock;
1449
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001450 lock = cifs_lock_init(flock->fl_start, length, type);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001451 if (!lock)
1452 return -ENOMEM;
1453
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001454 rc = cifs_lock_add_if(cfile, lock, wait_flag);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001455 if (rc < 0) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001456 kfree(lock);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001457 return rc;
1458 }
1459 if (!rc)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001460 goto out;
1461
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001462 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1463 type, 1, 0, wait_flag);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001464 if (rc) {
1465 kfree(lock);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001466 return rc;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001467 }
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001468
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001469 cifs_lock_add(cfile, lock);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001470 } else if (unlock)
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001471 rc = server->ops->mand_unlock_range(cfile, flock, xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001472
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001473out:
1474 if (flock->fl_flags & FL_POSIX)
Steve French9ebb3892012-04-01 13:52:54 -05001475 posix_lock_file_wait(file, flock);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001476 return rc;
1477}
1478
1479int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1480{
1481 int rc, xid;
1482 int lock = 0, unlock = 0;
1483 bool wait_flag = false;
1484 bool posix_lck = false;
1485 struct cifs_sb_info *cifs_sb;
1486 struct cifs_tcon *tcon;
1487 struct cifsInodeInfo *cinode;
1488 struct cifsFileInfo *cfile;
1489 __u16 netfid;
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001490 __u32 type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001491
1492 rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001493 xid = get_xid();
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001494
1495 cFYI(1, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld "
1496 "end: %lld", cmd, flock->fl_flags, flock->fl_type,
1497 flock->fl_start, flock->fl_end);
1498
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001499 cfile = (struct cifsFileInfo *)file->private_data;
1500 tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001501
1502 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
1503 tcon->ses->server);
1504
1505 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001506 netfid = cfile->fid.netfid;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001507 cinode = CIFS_I(file->f_path.dentry->d_inode);
1508
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001509 if (cap_unix(tcon->ses) &&
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001510 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1511 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1512 posix_lck = true;
1513 /*
1514 * BB add code here to normalize offset and length to account for
1515 * negative length which we can not accept over the wire.
1516 */
1517 if (IS_GETLK(cmd)) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001518 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001519 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001520 return rc;
1521 }
1522
1523 if (!lock && !unlock) {
1524 /*
1525 * if no lock or unlock then nothing to do since we do not
1526 * know what it is
1527 */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001528 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001529 return -EOPNOTSUPP;
1530 }
1531
1532 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1533 xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001534 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001535 return rc;
1536}
1537
Jeff Layton597b0272012-03-23 14:40:56 -04001538/*
1539 * update the file size (if needed) after a write. Should be called with
1540 * the inode->i_lock held
1541 */
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05001542void
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001543cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1544 unsigned int bytes_written)
1545{
1546 loff_t end_of_write = offset + bytes_written;
1547
1548 if (end_of_write > cifsi->server_eof)
1549 cifsi->server_eof = end_of_write;
1550}
1551
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001552static ssize_t
1553cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
1554 size_t write_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001555{
1556 int rc = 0;
1557 unsigned int bytes_written = 0;
1558 unsigned int total_written;
1559 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001560 struct cifs_tcon *tcon;
1561 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001562 unsigned int xid;
Jeff Layton7da4b492010-10-15 15:34:00 -04001563 struct dentry *dentry = open_file->dentry;
1564 struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001565 struct cifs_io_parms io_parms;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001566
Jeff Layton7da4b492010-10-15 15:34:00 -04001567 cifs_sb = CIFS_SB(dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001568
Joe Perchesb6b38f72010-04-21 03:50:45 +00001569 cFYI(1, "write %zd bytes to offset %lld of %s", write_size,
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001570 *offset, dentry->d_name.name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001572 tcon = tlink_tcon(open_file->tlink);
1573 server = tcon->ses->server;
1574
1575 if (!server->ops->sync_write)
1576 return -ENOSYS;
Steve French50c2f752007-07-13 00:33:32 +00001577
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001578 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001579
Linus Torvalds1da177e2005-04-16 15:20:36 -07001580 for (total_written = 0; write_size > total_written;
1581 total_written += bytes_written) {
1582 rc = -EAGAIN;
1583 while (rc == -EAGAIN) {
Jeff Laytonca83ce32011-04-12 09:13:44 -04001584 struct kvec iov[2];
1585 unsigned int len;
1586
Linus Torvalds1da177e2005-04-16 15:20:36 -07001587 if (open_file->invalidHandle) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001588 /* we could deadlock if we called
1589 filemap_fdatawait from here so tell
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001590 reopen_file not to flush data to
Linus Torvalds1da177e2005-04-16 15:20:36 -07001591 server now */
Jeff Layton15886172010-10-15 15:33:59 -04001592 rc = cifs_reopen_file(open_file, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001593 if (rc != 0)
1594 break;
1595 }
Steve French3e844692005-10-03 13:37:24 -07001596
Jeff Laytonca83ce32011-04-12 09:13:44 -04001597 len = min((size_t)cifs_sb->wsize,
1598 write_size - total_written);
1599 /* iov[0] is reserved for smb header */
1600 iov[1].iov_base = (char *)write_data + total_written;
1601 iov[1].iov_len = len;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001602 io_parms.pid = pid;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001603 io_parms.tcon = tcon;
1604 io_parms.offset = *offset;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001605 io_parms.length = len;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001606 rc = server->ops->sync_write(xid, open_file, &io_parms,
1607 &bytes_written, iov, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001608 }
1609 if (rc || (bytes_written == 0)) {
1610 if (total_written)
1611 break;
1612 else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001613 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001614 return rc;
1615 }
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001616 } else {
Jeff Layton597b0272012-03-23 14:40:56 -04001617 spin_lock(&dentry->d_inode->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001618 cifs_update_eof(cifsi, *offset, bytes_written);
Jeff Layton597b0272012-03-23 14:40:56 -04001619 spin_unlock(&dentry->d_inode->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001620 *offset += bytes_written;
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001621 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001622 }
1623
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001624 cifs_stats_bytes_written(tcon, total_written);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001625
Jeff Layton7da4b492010-10-15 15:34:00 -04001626 if (total_written > 0) {
1627 spin_lock(&dentry->d_inode->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001628 if (*offset > dentry->d_inode->i_size)
1629 i_size_write(dentry->d_inode, *offset);
Jeff Layton7da4b492010-10-15 15:34:00 -04001630 spin_unlock(&dentry->d_inode->i_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001631 }
Jeff Layton7da4b492010-10-15 15:34:00 -04001632 mark_inode_dirty_sync(dentry->d_inode);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001633 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001634 return total_written;
1635}
1636
Jeff Layton6508d902010-09-29 19:51:11 -04001637struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1638 bool fsuid_only)
Steve French630f3f0c2007-10-25 21:17:17 +00001639{
1640 struct cifsFileInfo *open_file = NULL;
Jeff Layton6508d902010-09-29 19:51:11 -04001641 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1642
1643 /* only filter by fsuid on multiuser mounts */
1644 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1645 fsuid_only = false;
Steve French630f3f0c2007-10-25 21:17:17 +00001646
Jeff Layton44772882010-10-15 15:34:03 -04001647 spin_lock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001648 /* we could simply get the first_list_entry since write-only entries
1649 are always at the end of the list but since the first entry might
1650 have a close pending, we go through the whole list */
1651 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Eric W. Biedermanfef59fd2013-02-06 02:23:02 -08001652 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
Jeff Layton6508d902010-09-29 19:51:11 -04001653 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001654 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
Steve French630f3f0c2007-10-25 21:17:17 +00001655 if (!open_file->invalidHandle) {
1656 /* found a good file */
1657 /* lock it so it will not be closed on us */
Jeff Layton764a1b12012-07-25 14:59:54 -04001658 cifsFileInfo_get_locked(open_file);
Jeff Layton44772882010-10-15 15:34:03 -04001659 spin_unlock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001660 return open_file;
1661 } /* else might as well continue, and look for
1662 another, or simply have the caller reopen it
1663 again rather than trying to fix this handle */
1664 } else /* write only file */
1665 break; /* write only files are last so must be done */
1666 }
Jeff Layton44772882010-10-15 15:34:03 -04001667 spin_unlock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001668 return NULL;
1669}
Steve French630f3f0c2007-10-25 21:17:17 +00001670
Jeff Layton6508d902010-09-29 19:51:11 -04001671struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1672 bool fsuid_only)
Steve French6148a742005-10-05 12:23:19 -07001673{
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001674 struct cifsFileInfo *open_file, *inv_file = NULL;
Jeff Laytond3892292010-11-02 16:22:50 -04001675 struct cifs_sb_info *cifs_sb;
Jeff Layton2846d382008-09-22 21:33:33 -04001676 bool any_available = false;
Steve Frenchdd99cd82005-10-05 19:32:49 -07001677 int rc;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001678 unsigned int refind = 0;
Steve French6148a742005-10-05 12:23:19 -07001679
Steve French60808232006-04-22 15:53:05 +00001680 /* Having a null inode here (because mapping->host was set to zero by
1681 the VFS or MM) should not happen but we had reports of on oops (due to
1682 it being zero) during stress testcases so we need to check for it */
1683
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001684 if (cifs_inode == NULL) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001685 cERROR(1, "Null inode passed to cifs_writeable_file");
Steve French60808232006-04-22 15:53:05 +00001686 dump_stack();
1687 return NULL;
1688 }
1689
Jeff Laytond3892292010-11-02 16:22:50 -04001690 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1691
Jeff Layton6508d902010-09-29 19:51:11 -04001692 /* only filter by fsuid on multiuser mounts */
1693 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1694 fsuid_only = false;
1695
Jeff Layton44772882010-10-15 15:34:03 -04001696 spin_lock(&cifs_file_list_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001697refind_writable:
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001698 if (refind > MAX_REOPEN_ATT) {
1699 spin_unlock(&cifs_file_list_lock);
1700 return NULL;
1701 }
Steve French6148a742005-10-05 12:23:19 -07001702 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton6508d902010-09-29 19:51:11 -04001703 if (!any_available && open_file->pid != current->tgid)
1704 continue;
Eric W. Biedermanfef59fd2013-02-06 02:23:02 -08001705 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
Jeff Layton6508d902010-09-29 19:51:11 -04001706 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001707 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Steve French9b22b0b2007-10-02 01:11:08 +00001708 if (!open_file->invalidHandle) {
1709 /* found a good writable file */
Jeff Layton764a1b12012-07-25 14:59:54 -04001710 cifsFileInfo_get_locked(open_file);
Jeff Layton44772882010-10-15 15:34:03 -04001711 spin_unlock(&cifs_file_list_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001712 return open_file;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001713 } else {
1714 if (!inv_file)
1715 inv_file = open_file;
Steve French9b22b0b2007-10-02 01:11:08 +00001716 }
Steve French6148a742005-10-05 12:23:19 -07001717 }
1718 }
Jeff Layton2846d382008-09-22 21:33:33 -04001719 /* couldn't find useable FH with same pid, try any available */
1720 if (!any_available) {
1721 any_available = true;
1722 goto refind_writable;
1723 }
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001724
1725 if (inv_file) {
1726 any_available = false;
Jeff Layton764a1b12012-07-25 14:59:54 -04001727 cifsFileInfo_get_locked(inv_file);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001728 }
1729
Jeff Layton44772882010-10-15 15:34:03 -04001730 spin_unlock(&cifs_file_list_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001731
1732 if (inv_file) {
1733 rc = cifs_reopen_file(inv_file, false);
1734 if (!rc)
1735 return inv_file;
1736 else {
1737 spin_lock(&cifs_file_list_lock);
1738 list_move_tail(&inv_file->flist,
1739 &cifs_inode->openFileList);
1740 spin_unlock(&cifs_file_list_lock);
1741 cifsFileInfo_put(inv_file);
1742 spin_lock(&cifs_file_list_lock);
1743 ++refind;
1744 goto refind_writable;
1745 }
1746 }
1747
Steve French6148a742005-10-05 12:23:19 -07001748 return NULL;
1749}
1750
Linus Torvalds1da177e2005-04-16 15:20:36 -07001751static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1752{
1753 struct address_space *mapping = page->mapping;
1754 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1755 char *write_data;
1756 int rc = -EFAULT;
1757 int bytes_written = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001758 struct inode *inode;
Steve French6148a742005-10-05 12:23:19 -07001759 struct cifsFileInfo *open_file;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001760
1761 if (!mapping || !mapping->host)
1762 return -EFAULT;
1763
1764 inode = page->mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001765
1766 offset += (loff_t)from;
1767 write_data = kmap(page);
1768 write_data += from;
1769
1770 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1771 kunmap(page);
1772 return -EIO;
1773 }
1774
1775 /* racing with truncate? */
1776 if (offset > mapping->host->i_size) {
1777 kunmap(page);
1778 return 0; /* don't care */
1779 }
1780
1781 /* check to make sure that we are not extending the file */
1782 if (mapping->host->i_size - offset < (loff_t)to)
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001783 to = (unsigned)(mapping->host->i_size - offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001784
Jeff Layton6508d902010-09-29 19:51:11 -04001785 open_file = find_writable_file(CIFS_I(mapping->host), false);
Steve French6148a742005-10-05 12:23:19 -07001786 if (open_file) {
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001787 bytes_written = cifs_write(open_file, open_file->pid,
1788 write_data, to - from, &offset);
Dave Kleikamp6ab409b2009-08-31 11:07:12 -04001789 cifsFileInfo_put(open_file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001790 /* Does mm or vfs already set times? */
Steve French6148a742005-10-05 12:23:19 -07001791 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001792 if ((bytes_written > 0) && (offset))
Steve French6148a742005-10-05 12:23:19 -07001793 rc = 0;
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001794 else if (bytes_written < 0)
1795 rc = bytes_written;
Steve French6148a742005-10-05 12:23:19 -07001796 } else {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001797 cFYI(1, "No writeable filehandles for inode");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001798 rc = -EIO;
1799 }
1800
1801 kunmap(page);
1802 return rc;
1803}
1804
Linus Torvalds1da177e2005-04-16 15:20:36 -07001805static int cifs_writepages(struct address_space *mapping,
Steve French37c0eb42005-10-05 14:50:29 -07001806 struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001807{
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001808 struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
1809 bool done = false, scanned = false, range_whole = false;
1810 pgoff_t end, index;
1811 struct cifs_writedata *wdata;
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07001812 struct TCP_Server_Info *server;
Steve French37c0eb42005-10-05 14:50:29 -07001813 struct page *page;
Steve French37c0eb42005-10-05 14:50:29 -07001814 int rc = 0;
Steve French50c2f752007-07-13 00:33:32 +00001815
Steve French37c0eb42005-10-05 14:50:29 -07001816 /*
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001817 * If wsize is smaller than the page cache size, default to writing
Steve French37c0eb42005-10-05 14:50:29 -07001818 * one page at a time via cifs_writepage
1819 */
1820 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1821 return generic_writepages(mapping, wbc);
1822
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001823 if (wbc->range_cyclic) {
Steve French37c0eb42005-10-05 14:50:29 -07001824 index = mapping->writeback_index; /* Start from prev offset */
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001825 end = -1;
1826 } else {
1827 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1828 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1829 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001830 range_whole = true;
1831 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07001832 }
1833retry:
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001834 while (!done && index <= end) {
1835 unsigned int i, nr_pages, found_pages;
1836 pgoff_t next = 0, tofind;
1837 struct page **pages;
Steve French37c0eb42005-10-05 14:50:29 -07001838
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001839 tofind = min((cifs_sb->wsize / PAGE_CACHE_SIZE) - 1,
1840 end - index) + 1;
Steve French37c0eb42005-10-05 14:50:29 -07001841
Jeff Laytonc2e87642012-03-23 14:40:55 -04001842 wdata = cifs_writedata_alloc((unsigned int)tofind,
1843 cifs_writev_complete);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001844 if (!wdata) {
1845 rc = -ENOMEM;
1846 break;
1847 }
1848
1849 /*
1850 * find_get_pages_tag seems to return a max of 256 on each
1851 * iteration, so we must call it several times in order to
1852 * fill the array or the wsize is effectively limited to
1853 * 256 * PAGE_CACHE_SIZE.
1854 */
1855 found_pages = 0;
1856 pages = wdata->pages;
1857 do {
1858 nr_pages = find_get_pages_tag(mapping, &index,
1859 PAGECACHE_TAG_DIRTY,
1860 tofind, pages);
1861 found_pages += nr_pages;
1862 tofind -= nr_pages;
1863 pages += nr_pages;
1864 } while (nr_pages && tofind && index <= end);
1865
1866 if (found_pages == 0) {
1867 kref_put(&wdata->refcount, cifs_writedata_release);
1868 break;
1869 }
1870
1871 nr_pages = 0;
1872 for (i = 0; i < found_pages; i++) {
1873 page = wdata->pages[i];
Steve French37c0eb42005-10-05 14:50:29 -07001874 /*
1875 * At this point we hold neither mapping->tree_lock nor
1876 * lock on the page itself: the page may be truncated or
1877 * invalidated (changing page->mapping to NULL), or even
1878 * swizzled back from swapper_space to tmpfs file
1879 * mapping
1880 */
1881
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001882 if (nr_pages == 0)
Steve French37c0eb42005-10-05 14:50:29 -07001883 lock_page(page);
Nick Piggin529ae9a2008-08-02 12:01:03 +02001884 else if (!trylock_page(page))
Steve French37c0eb42005-10-05 14:50:29 -07001885 break;
1886
1887 if (unlikely(page->mapping != mapping)) {
1888 unlock_page(page);
1889 break;
1890 }
1891
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001892 if (!wbc->range_cyclic && page->index > end) {
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001893 done = true;
Steve French37c0eb42005-10-05 14:50:29 -07001894 unlock_page(page);
1895 break;
1896 }
1897
1898 if (next && (page->index != next)) {
1899 /* Not next consecutive page */
1900 unlock_page(page);
1901 break;
1902 }
1903
1904 if (wbc->sync_mode != WB_SYNC_NONE)
1905 wait_on_page_writeback(page);
1906
1907 if (PageWriteback(page) ||
Linus Torvaldscb876f42006-12-23 16:19:07 -08001908 !clear_page_dirty_for_io(page)) {
Steve French37c0eb42005-10-05 14:50:29 -07001909 unlock_page(page);
1910 break;
1911 }
Steve French84d2f072005-10-12 15:32:05 -07001912
Linus Torvaldscb876f42006-12-23 16:19:07 -08001913 /*
1914 * This actually clears the dirty bit in the radix tree.
1915 * See cifs_writepage() for more commentary.
1916 */
1917 set_page_writeback(page);
1918
Jeff Layton3a98b862012-11-26 09:48:41 -05001919 if (page_offset(page) >= i_size_read(mapping->host)) {
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001920 done = true;
Steve French84d2f072005-10-12 15:32:05 -07001921 unlock_page(page);
Linus Torvaldscb876f42006-12-23 16:19:07 -08001922 end_page_writeback(page);
Steve French84d2f072005-10-12 15:32:05 -07001923 break;
1924 }
1925
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001926 wdata->pages[i] = page;
Steve French37c0eb42005-10-05 14:50:29 -07001927 next = page->index + 1;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001928 ++nr_pages;
Steve French37c0eb42005-10-05 14:50:29 -07001929 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001930
1931 /* reset index to refind any pages skipped */
1932 if (nr_pages == 0)
1933 index = wdata->pages[0]->index + 1;
1934
1935 /* put any pages we aren't going to use */
1936 for (i = nr_pages; i < found_pages; i++) {
1937 page_cache_release(wdata->pages[i]);
1938 wdata->pages[i] = NULL;
1939 }
1940
1941 /* nothing to write? */
1942 if (nr_pages == 0) {
1943 kref_put(&wdata->refcount, cifs_writedata_release);
1944 continue;
1945 }
1946
1947 wdata->sync_mode = wbc->sync_mode;
1948 wdata->nr_pages = nr_pages;
1949 wdata->offset = page_offset(wdata->pages[0]);
Jeff Laytoneddb0792012-09-18 16:20:35 -07001950 wdata->pagesz = PAGE_CACHE_SIZE;
1951 wdata->tailsz =
Jeff Layton3a98b862012-11-26 09:48:41 -05001952 min(i_size_read(mapping->host) -
1953 page_offset(wdata->pages[nr_pages - 1]),
Jeff Laytoneddb0792012-09-18 16:20:35 -07001954 (loff_t)PAGE_CACHE_SIZE);
1955 wdata->bytes = ((nr_pages - 1) * PAGE_CACHE_SIZE) +
1956 wdata->tailsz;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001957
1958 do {
1959 if (wdata->cfile != NULL)
1960 cifsFileInfo_put(wdata->cfile);
1961 wdata->cfile = find_writable_file(CIFS_I(mapping->host),
1962 false);
1963 if (!wdata->cfile) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001964 cERROR(1, "No writable handles for inode");
Steve French23e7dd72005-10-20 13:44:56 -07001965 rc = -EBADF;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001966 break;
Steve French37c0eb42005-10-05 14:50:29 -07001967 }
Jeff Laytonfe5f5d22012-03-23 14:40:55 -04001968 wdata->pid = wdata->cfile->pid;
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07001969 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
1970 rc = server->ops->async_writev(wdata);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001971 } while (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN);
Jeff Laytonf3983c22010-09-22 16:17:40 -07001972
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001973 for (i = 0; i < nr_pages; ++i)
1974 unlock_page(wdata->pages[i]);
Jeff Layton941b8532011-01-11 07:24:01 -05001975
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001976 /* send failure -- clean up the mess */
1977 if (rc != 0) {
1978 for (i = 0; i < nr_pages; ++i) {
Jeff Layton941b8532011-01-11 07:24:01 -05001979 if (rc == -EAGAIN)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001980 redirty_page_for_writepage(wbc,
1981 wdata->pages[i]);
1982 else
1983 SetPageError(wdata->pages[i]);
1984 end_page_writeback(wdata->pages[i]);
1985 page_cache_release(wdata->pages[i]);
Steve French37c0eb42005-10-05 14:50:29 -07001986 }
Jeff Layton941b8532011-01-11 07:24:01 -05001987 if (rc != -EAGAIN)
1988 mapping_set_error(mapping, rc);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001989 }
1990 kref_put(&wdata->refcount, cifs_writedata_release);
Jeff Layton941b8532011-01-11 07:24:01 -05001991
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001992 wbc->nr_to_write -= nr_pages;
1993 if (wbc->nr_to_write <= 0)
1994 done = true;
Dave Kleikampb066a482008-11-18 03:49:05 +00001995
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001996 index = next;
Steve French37c0eb42005-10-05 14:50:29 -07001997 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001998
Steve French37c0eb42005-10-05 14:50:29 -07001999 if (!scanned && !done) {
2000 /*
2001 * We hit the last page and there is more work to be done: wrap
2002 * back to the start of the file
2003 */
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002004 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07002005 index = 0;
2006 goto retry;
2007 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002008
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002009 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
Steve French37c0eb42005-10-05 14:50:29 -07002010 mapping->writeback_index = index;
2011
Linus Torvalds1da177e2005-04-16 15:20:36 -07002012 return rc;
2013}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002014
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002015static int
2016cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002017{
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002018 int rc;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002019 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002020
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002021 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002022/* BB add check for wbc flags */
2023 page_cache_get(page);
Steve Frenchad7a2922008-02-07 23:25:02 +00002024 if (!PageUptodate(page))
Joe Perchesb6b38f72010-04-21 03:50:45 +00002025 cFYI(1, "ppw - page not up to date");
Linus Torvaldscb876f42006-12-23 16:19:07 -08002026
2027 /*
2028 * Set the "writeback" flag, and clear "dirty" in the radix tree.
2029 *
2030 * A writepage() implementation always needs to do either this,
2031 * or re-dirty the page with "redirty_page_for_writepage()" in
2032 * the case of a failure.
2033 *
2034 * Just unlocking the page will cause the radix tree tag-bits
2035 * to fail to update with the state of the page correctly.
2036 */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002037 set_page_writeback(page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002038retry_write:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002039 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002040 if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
2041 goto retry_write;
2042 else if (rc == -EAGAIN)
2043 redirty_page_for_writepage(wbc, page);
2044 else if (rc != 0)
2045 SetPageError(page);
2046 else
2047 SetPageUptodate(page);
Linus Torvaldscb876f42006-12-23 16:19:07 -08002048 end_page_writeback(page);
2049 page_cache_release(page);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002050 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002051 return rc;
2052}
2053
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002054static int cifs_writepage(struct page *page, struct writeback_control *wbc)
2055{
2056 int rc = cifs_writepage_locked(page, wbc);
2057 unlock_page(page);
2058 return rc;
2059}
2060
Nick Piggind9414772008-09-24 11:32:59 -04002061static int cifs_write_end(struct file *file, struct address_space *mapping,
2062 loff_t pos, unsigned len, unsigned copied,
2063 struct page *page, void *fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002064{
Nick Piggind9414772008-09-24 11:32:59 -04002065 int rc;
2066 struct inode *inode = mapping->host;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002067 struct cifsFileInfo *cfile = file->private_data;
2068 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
2069 __u32 pid;
2070
2071 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2072 pid = cfile->pid;
2073 else
2074 pid = current->tgid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002075
Joe Perchesb6b38f72010-04-21 03:50:45 +00002076 cFYI(1, "write_end for page %p from pos %lld with %d bytes",
2077 page, pos, copied);
Steve Frenchad7a2922008-02-07 23:25:02 +00002078
Jeff Laytona98ee8c2008-11-26 19:32:33 +00002079 if (PageChecked(page)) {
2080 if (copied == len)
2081 SetPageUptodate(page);
2082 ClearPageChecked(page);
2083 } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
Nick Piggind9414772008-09-24 11:32:59 -04002084 SetPageUptodate(page);
2085
Linus Torvalds1da177e2005-04-16 15:20:36 -07002086 if (!PageUptodate(page)) {
Nick Piggind9414772008-09-24 11:32:59 -04002087 char *page_data;
2088 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002089 unsigned int xid;
Nick Piggind9414772008-09-24 11:32:59 -04002090
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002091 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002092 /* this is probably better than directly calling
2093 partialpage_write since in this function the file handle is
2094 known which we might as well leverage */
2095 /* BB check if anything else missing out of ppw
2096 such as updating last write time */
2097 page_data = kmap(page);
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002098 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
Nick Piggind9414772008-09-24 11:32:59 -04002099 /* if (rc < 0) should we set writebehind rc? */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002100 kunmap(page);
Nick Piggind9414772008-09-24 11:32:59 -04002101
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002102 free_xid(xid);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002103 } else {
Nick Piggind9414772008-09-24 11:32:59 -04002104 rc = copied;
2105 pos += copied;
Pavel Shilovskyc299dd02012-12-06 22:07:52 +04002106 /*
2107 * When we use strict cache mode and cifs_strict_writev was run
2108 * with level II oplock (indicated by leave_pages_clean field of
2109 * CIFS_I(inode)), we can leave pages clean - cifs_strict_writev
2110 * sent the data to the server itself.
2111 */
2112 if (!CIFS_I(inode)->leave_pages_clean ||
2113 !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO))
2114 set_page_dirty(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002115 }
2116
Nick Piggind9414772008-09-24 11:32:59 -04002117 if (rc > 0) {
2118 spin_lock(&inode->i_lock);
2119 if (pos > inode->i_size)
2120 i_size_write(inode, pos);
2121 spin_unlock(&inode->i_lock);
2122 }
2123
2124 unlock_page(page);
2125 page_cache_release(page);
2126
Linus Torvalds1da177e2005-04-16 15:20:36 -07002127 return rc;
2128}
2129
Josef Bacik02c24a82011-07-16 20:44:56 -04002130int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2131 int datasync)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002132{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002133 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002134 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002135 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002136 struct TCP_Server_Info *server;
Joe Perchesc21dfb62010-07-12 13:50:14 -07002137 struct cifsFileInfo *smbfile = file->private_data;
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002138 struct inode *inode = file->f_path.dentry->d_inode;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002139 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002140
Josef Bacik02c24a82011-07-16 20:44:56 -04002141 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2142 if (rc)
2143 return rc;
2144 mutex_lock(&inode->i_mutex);
2145
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002146 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002147
Joe Perchesb6b38f72010-04-21 03:50:45 +00002148 cFYI(1, "Sync file - name: %s datasync: 0x%x",
Christoph Hellwig7ea80852010-05-26 17:53:25 +02002149 file->f_path.dentry->d_name.name, datasync);
Steve French50c2f752007-07-13 00:33:32 +00002150
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002151 if (!CIFS_I(inode)->clientCanCacheRead) {
2152 rc = cifs_invalidate_mapping(inode);
2153 if (rc) {
2154 cFYI(1, "rc: %d during invalidate phase", rc);
2155 rc = 0; /* don't care about it in fsync */
2156 }
2157 }
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002158
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002159 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002160 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2161 server = tcon->ses->server;
2162 if (server->ops->flush)
2163 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2164 else
2165 rc = -ENOSYS;
2166 }
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002167
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002168 free_xid(xid);
Josef Bacik02c24a82011-07-16 20:44:56 -04002169 mutex_unlock(&inode->i_mutex);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002170 return rc;
2171}
2172
Josef Bacik02c24a82011-07-16 20:44:56 -04002173int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002174{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002175 unsigned int xid;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002176 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002177 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002178 struct TCP_Server_Info *server;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002179 struct cifsFileInfo *smbfile = file->private_data;
2180 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Josef Bacik02c24a82011-07-16 20:44:56 -04002181 struct inode *inode = file->f_mapping->host;
2182
2183 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2184 if (rc)
2185 return rc;
2186 mutex_lock(&inode->i_mutex);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002187
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002188 xid = get_xid();
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002189
2190 cFYI(1, "Sync file - name: %s datasync: 0x%x",
2191 file->f_path.dentry->d_name.name, datasync);
2192
2193 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002194 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2195 server = tcon->ses->server;
2196 if (server->ops->flush)
2197 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2198 else
2199 rc = -ENOSYS;
2200 }
Steve Frenchb298f222009-02-21 21:17:43 +00002201
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002202 free_xid(xid);
Josef Bacik02c24a82011-07-16 20:44:56 -04002203 mutex_unlock(&inode->i_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002204 return rc;
2205}
2206
Linus Torvalds1da177e2005-04-16 15:20:36 -07002207/*
2208 * As file closes, flush all cached write data for this inode checking
2209 * for write behind errors.
2210 */
Miklos Szeredi75e1fcc2006-06-23 02:05:12 -07002211int cifs_flush(struct file *file, fl_owner_t id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002212{
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002213 struct inode *inode = file->f_path.dentry->d_inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002214 int rc = 0;
2215
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002216 if (file->f_mode & FMODE_WRITE)
Jeff Laytond3f13222010-10-15 15:34:07 -04002217 rc = filemap_write_and_wait(inode->i_mapping);
Steve French50c2f752007-07-13 00:33:32 +00002218
Joe Perchesb6b38f72010-04-21 03:50:45 +00002219 cFYI(1, "Flush inode %p file %p rc %d", inode, file, rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002220
2221 return rc;
2222}
2223
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002224static int
2225cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2226{
2227 int rc = 0;
2228 unsigned long i;
2229
2230 for (i = 0; i < num_pages; i++) {
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002231 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002232 if (!pages[i]) {
2233 /*
2234 * save number of pages we have already allocated and
2235 * return with ENOMEM error
2236 */
2237 num_pages = i;
2238 rc = -ENOMEM;
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002239 break;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002240 }
2241 }
2242
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002243 if (rc) {
2244 for (i = 0; i < num_pages; i++)
2245 put_page(pages[i]);
2246 }
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002247 return rc;
2248}
2249
2250static inline
2251size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2252{
2253 size_t num_pages;
2254 size_t clen;
2255
2256 clen = min_t(const size_t, len, wsize);
Jeff Laytona7103b92012-03-23 14:40:56 -04002257 num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002258
2259 if (cur_len)
2260 *cur_len = clen;
2261
2262 return num_pages;
2263}
2264
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002265static void
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002266cifs_uncached_writev_complete(struct work_struct *work)
2267{
2268 int i;
2269 struct cifs_writedata *wdata = container_of(work,
2270 struct cifs_writedata, work);
2271 struct inode *inode = wdata->cfile->dentry->d_inode;
2272 struct cifsInodeInfo *cifsi = CIFS_I(inode);
2273
2274 spin_lock(&inode->i_lock);
2275 cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
2276 if (cifsi->server_eof > inode->i_size)
2277 i_size_write(inode, cifsi->server_eof);
2278 spin_unlock(&inode->i_lock);
2279
2280 complete(&wdata->done);
2281
2282 if (wdata->result != -EAGAIN) {
2283 for (i = 0; i < wdata->nr_pages; i++)
2284 put_page(wdata->pages[i]);
2285 }
2286
2287 kref_put(&wdata->refcount, cifs_writedata_release);
2288}
2289
2290/* attempt to send write to server, retry on any -EAGAIN errors */
2291static int
2292cifs_uncached_retry_writev(struct cifs_writedata *wdata)
2293{
2294 int rc;
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002295 struct TCP_Server_Info *server;
2296
2297 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002298
2299 do {
2300 if (wdata->cfile->invalidHandle) {
2301 rc = cifs_reopen_file(wdata->cfile, false);
2302 if (rc != 0)
2303 continue;
2304 }
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002305 rc = server->ops->async_writev(wdata);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002306 } while (rc == -EAGAIN);
2307
2308 return rc;
2309}
2310
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002311static ssize_t
2312cifs_iovec_write(struct file *file, const struct iovec *iov,
2313 unsigned long nr_segs, loff_t *poffset)
2314{
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002315 unsigned long nr_pages, i;
Pavel Shilovsky76429c12011-01-31 16:03:08 +03002316 size_t copied, len, cur_len;
2317 ssize_t total_written = 0;
Jeff Layton3af9d8f2012-04-13 17:16:59 -04002318 loff_t offset;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002319 struct iov_iter it;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002320 struct cifsFileInfo *open_file;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002321 struct cifs_tcon *tcon;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002322 struct cifs_sb_info *cifs_sb;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002323 struct cifs_writedata *wdata, *tmp;
2324 struct list_head wdata_list;
2325 int rc;
2326 pid_t pid;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002327
2328 len = iov_length(iov, nr_segs);
2329 if (!len)
2330 return 0;
2331
2332 rc = generic_write_checks(file, poffset, &len, 0);
2333 if (rc)
2334 return rc;
2335
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002336 INIT_LIST_HEAD(&wdata_list);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002337 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002338 open_file = file->private_data;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002339 tcon = tlink_tcon(open_file->tlink);
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002340
2341 if (!tcon->ses->server->ops->async_writev)
2342 return -ENOSYS;
2343
Jeff Layton3af9d8f2012-04-13 17:16:59 -04002344 offset = *poffset;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002345
2346 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2347 pid = open_file->pid;
2348 else
2349 pid = current->tgid;
2350
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002351 iov_iter_init(&it, iov, nr_segs, len, 0);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002352 do {
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002353 size_t save_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002354
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002355 nr_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
2356 wdata = cifs_writedata_alloc(nr_pages,
2357 cifs_uncached_writev_complete);
2358 if (!wdata) {
2359 rc = -ENOMEM;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002360 break;
2361 }
2362
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002363 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
2364 if (rc) {
2365 kfree(wdata);
2366 break;
2367 }
2368
2369 save_len = cur_len;
2370 for (i = 0; i < nr_pages; i++) {
2371 copied = min_t(const size_t, cur_len, PAGE_SIZE);
2372 copied = iov_iter_copy_from_user(wdata->pages[i], &it,
2373 0, copied);
2374 cur_len -= copied;
2375 iov_iter_advance(&it, copied);
2376 }
2377 cur_len = save_len - cur_len;
2378
2379 wdata->sync_mode = WB_SYNC_ALL;
2380 wdata->nr_pages = nr_pages;
2381 wdata->offset = (__u64)offset;
2382 wdata->cfile = cifsFileInfo_get(open_file);
2383 wdata->pid = pid;
2384 wdata->bytes = cur_len;
Jeff Laytoneddb0792012-09-18 16:20:35 -07002385 wdata->pagesz = PAGE_SIZE;
2386 wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002387 rc = cifs_uncached_retry_writev(wdata);
2388 if (rc) {
2389 kref_put(&wdata->refcount, cifs_writedata_release);
2390 break;
2391 }
2392
2393 list_add_tail(&wdata->list, &wdata_list);
2394 offset += cur_len;
2395 len -= cur_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002396 } while (len > 0);
2397
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002398 /*
2399 * If at least one write was successfully sent, then discard any rc
2400 * value from the later writes. If the other write succeeds, then
2401 * we'll end up returning whatever was written. If it fails, then
2402 * we'll get a new rc value from that.
2403 */
2404 if (!list_empty(&wdata_list))
2405 rc = 0;
2406
2407 /*
2408 * Wait for and collect replies for any successful sends in order of
2409 * increasing offset. Once an error is hit or we get a fatal signal
2410 * while waiting, then return without waiting for any more replies.
2411 */
2412restart_loop:
2413 list_for_each_entry_safe(wdata, tmp, &wdata_list, list) {
2414 if (!rc) {
2415 /* FIXME: freezable too? */
2416 rc = wait_for_completion_killable(&wdata->done);
2417 if (rc)
2418 rc = -EINTR;
2419 else if (wdata->result)
2420 rc = wdata->result;
2421 else
2422 total_written += wdata->bytes;
2423
2424 /* resend call if it's a retryable error */
2425 if (rc == -EAGAIN) {
2426 rc = cifs_uncached_retry_writev(wdata);
2427 goto restart_loop;
2428 }
2429 }
2430 list_del_init(&wdata->list);
2431 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002432 }
2433
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002434 if (total_written > 0)
2435 *poffset += total_written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002436
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002437 cifs_stats_bytes_written(tcon, total_written);
2438 return total_written ? total_written : (ssize_t)rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002439}
2440
Pavel Shilovsky0b81c1c2011-03-10 10:11:05 +03002441ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002442 unsigned long nr_segs, loff_t pos)
2443{
2444 ssize_t written;
2445 struct inode *inode;
2446
2447 inode = iocb->ki_filp->f_path.dentry->d_inode;
2448
2449 /*
2450 * BB - optimize the way when signing is disabled. We can drop this
2451 * extra memory-to-memory copying and use iovec buffers for constructing
2452 * write request.
2453 */
2454
2455 written = cifs_iovec_write(iocb->ki_filp, iov, nr_segs, &pos);
2456 if (written > 0) {
2457 CIFS_I(inode)->invalid_mapping = true;
2458 iocb->ki_pos = pos;
2459 }
2460
2461 return written;
2462}
2463
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002464static ssize_t
Pavel Shilovskyc299dd02012-12-06 22:07:52 +04002465cifs_pagecache_writev(struct kiocb *iocb, const struct iovec *iov,
2466 unsigned long nr_segs, loff_t pos, bool cache_ex)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002467{
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002468 struct file *file = iocb->ki_filp;
2469 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2470 struct inode *inode = file->f_mapping->host;
2471 struct cifsInodeInfo *cinode = CIFS_I(inode);
2472 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
2473 ssize_t rc = -EACCES;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002474
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002475 BUG_ON(iocb->ki_pos != pos);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002476
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002477 sb_start_write(inode->i_sb);
2478
2479 /*
2480 * We need to hold the sem to be sure nobody modifies lock list
2481 * with a brlock that prevents writing.
2482 */
2483 down_read(&cinode->lock_sem);
2484 if (!cifs_find_lock_conflict(cfile, pos, iov_length(iov, nr_segs),
2485 server->vals->exclusive_lock_type, NULL,
Pavel Shilovsky081c0412012-11-27 18:38:53 +04002486 CIFS_WRITE_OP)) {
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002487 mutex_lock(&inode->i_mutex);
Pavel Shilovskyc299dd02012-12-06 22:07:52 +04002488 if (!cache_ex)
2489 cinode->leave_pages_clean = true;
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002490 rc = __generic_file_aio_write(iocb, iov, nr_segs,
Pavel Shilovskyc299dd02012-12-06 22:07:52 +04002491 &iocb->ki_pos);
2492 if (!cache_ex)
2493 cinode->leave_pages_clean = false;
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002494 mutex_unlock(&inode->i_mutex);
2495 }
2496
2497 if (rc > 0 || rc == -EIOCBQUEUED) {
2498 ssize_t err;
2499
2500 err = generic_write_sync(file, pos, rc);
2501 if (err < 0 && rc > 0)
2502 rc = err;
2503 }
2504
2505 up_read(&cinode->lock_sem);
2506 sb_end_write(inode->i_sb);
2507 return rc;
2508}
2509
2510ssize_t
2511cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
2512 unsigned long nr_segs, loff_t pos)
2513{
2514 struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
2515 struct cifsInodeInfo *cinode = CIFS_I(inode);
2516 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2517 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2518 iocb->ki_filp->private_data;
2519 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovskyc299dd02012-12-06 22:07:52 +04002520 ssize_t written, written2;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002521 /*
Pavel Shilovskyc299dd02012-12-06 22:07:52 +04002522 * We need to store clientCanCacheAll here to prevent race
2523 * conditions - this value can be changed during an execution
2524 * of generic_file_aio_write. For CIFS it can be changed from
2525 * true to false only, but for SMB2 it can be changed both from
2526 * true to false and vice versa. So, we can end up with a data
2527 * stored in the cache, not marked dirty and not sent to the
2528 * server if this value changes its state from false to true
2529 * after cifs_write_end.
Pavel Shilovsky25078102012-09-19 06:22:45 -07002530 */
Pavel Shilovskyc299dd02012-12-06 22:07:52 +04002531 bool cache_ex = cinode->clientCanCacheAll;
2532 bool cache_read = cinode->clientCanCacheRead;
2533 int rc;
2534 loff_t saved_pos;
Pavel Shilovsky25078102012-09-19 06:22:45 -07002535
Pavel Shilovskyc299dd02012-12-06 22:07:52 +04002536 if (cache_ex) {
2537 if (cap_unix(tcon->ses) &&
2538 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0) &&
2539 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(
2540 tcon->fsUnixInfo.Capability)))
2541 return generic_file_aio_write(iocb, iov, nr_segs, pos);
2542 return cifs_pagecache_writev(iocb, iov, nr_segs, pos, cache_ex);
Pavel Shilovsky25078102012-09-19 06:22:45 -07002543 }
Pavel Shilovsky25078102012-09-19 06:22:45 -07002544
2545 /*
Pavel Shilovskyc299dd02012-12-06 22:07:52 +04002546 * For files without exclusive oplock in strict cache mode we need to
2547 * write the data to the server exactly from the pos to pos+len-1 rather
2548 * than flush all affected pages because it may cause a error with
2549 * mandatory locks on these pages but not on the region from pos to
2550 * ppos+len-1.
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002551 */
Pavel Shilovskyc299dd02012-12-06 22:07:52 +04002552 written = cifs_user_writev(iocb, iov, nr_segs, pos);
2553 if (!cache_read || written <= 0)
2554 return written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002555
Pavel Shilovskyc299dd02012-12-06 22:07:52 +04002556 saved_pos = iocb->ki_pos;
2557 iocb->ki_pos = pos;
2558 /* we have a read oplock - need to store a data in the page cache */
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002559 if (cap_unix(tcon->ses) &&
Pavel Shilovskyc299dd02012-12-06 22:07:52 +04002560 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0) &&
2561 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(
2562 tcon->fsUnixInfo.Capability)))
2563 written2 = generic_file_aio_write(iocb, iov, nr_segs, pos);
2564 else
2565 written2 = cifs_pagecache_writev(iocb, iov, nr_segs, pos,
2566 cache_ex);
2567 /* errors occured during writing - invalidate the page cache */
2568 if (written2 < 0) {
2569 rc = cifs_invalidate_mapping(inode);
2570 if (rc)
2571 written = (ssize_t)rc;
2572 else
2573 iocb->ki_pos = saved_pos;
2574 }
2575 return written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002576}
2577
Jeff Layton0471ca32012-05-16 07:13:16 -04002578static struct cifs_readdata *
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002579cifs_readdata_alloc(unsigned int nr_pages, work_func_t complete)
Jeff Layton0471ca32012-05-16 07:13:16 -04002580{
2581 struct cifs_readdata *rdata;
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002582
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002583 rdata = kzalloc(sizeof(*rdata) + (sizeof(struct page *) * nr_pages),
2584 GFP_KERNEL);
Jeff Layton0471ca32012-05-16 07:13:16 -04002585 if (rdata != NULL) {
Jeff Layton6993f742012-05-16 07:13:17 -04002586 kref_init(&rdata->refcount);
Jeff Layton1c892542012-05-16 07:13:17 -04002587 INIT_LIST_HEAD(&rdata->list);
2588 init_completion(&rdata->done);
Jeff Layton0471ca32012-05-16 07:13:16 -04002589 INIT_WORK(&rdata->work, complete);
Jeff Layton0471ca32012-05-16 07:13:16 -04002590 }
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002591
Jeff Layton0471ca32012-05-16 07:13:16 -04002592 return rdata;
2593}
2594
Jeff Layton6993f742012-05-16 07:13:17 -04002595void
2596cifs_readdata_release(struct kref *refcount)
Jeff Layton0471ca32012-05-16 07:13:16 -04002597{
Jeff Layton6993f742012-05-16 07:13:17 -04002598 struct cifs_readdata *rdata = container_of(refcount,
2599 struct cifs_readdata, refcount);
2600
2601 if (rdata->cfile)
2602 cifsFileInfo_put(rdata->cfile);
2603
Jeff Layton0471ca32012-05-16 07:13:16 -04002604 kfree(rdata);
2605}
2606
Jeff Layton2a1bb132012-05-16 07:13:17 -04002607static int
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002608cifs_read_allocate_pages(struct cifs_readdata *rdata, unsigned int nr_pages)
Jeff Layton1c892542012-05-16 07:13:17 -04002609{
2610 int rc = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002611 struct page *page;
Jeff Layton1c892542012-05-16 07:13:17 -04002612 unsigned int i;
2613
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002614 for (i = 0; i < nr_pages; i++) {
Jeff Layton1c892542012-05-16 07:13:17 -04002615 page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
2616 if (!page) {
2617 rc = -ENOMEM;
2618 break;
2619 }
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002620 rdata->pages[i] = page;
Jeff Layton1c892542012-05-16 07:13:17 -04002621 }
2622
2623 if (rc) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002624 for (i = 0; i < nr_pages; i++) {
2625 put_page(rdata->pages[i]);
2626 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04002627 }
2628 }
2629 return rc;
2630}
2631
2632static void
2633cifs_uncached_readdata_release(struct kref *refcount)
2634{
Jeff Layton1c892542012-05-16 07:13:17 -04002635 struct cifs_readdata *rdata = container_of(refcount,
2636 struct cifs_readdata, refcount);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002637 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04002638
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002639 for (i = 0; i < rdata->nr_pages; i++) {
2640 put_page(rdata->pages[i]);
2641 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04002642 }
2643 cifs_readdata_release(refcount);
2644}
2645
2646static int
Jeff Layton2a1bb132012-05-16 07:13:17 -04002647cifs_retry_async_readv(struct cifs_readdata *rdata)
2648{
2649 int rc;
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07002650 struct TCP_Server_Info *server;
2651
2652 server = tlink_tcon(rdata->cfile->tlink)->ses->server;
Jeff Layton2a1bb132012-05-16 07:13:17 -04002653
2654 do {
2655 if (rdata->cfile->invalidHandle) {
2656 rc = cifs_reopen_file(rdata->cfile, true);
2657 if (rc != 0)
2658 continue;
2659 }
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07002660 rc = server->ops->async_readv(rdata);
Jeff Layton2a1bb132012-05-16 07:13:17 -04002661 } while (rc == -EAGAIN);
2662
2663 return rc;
2664}
2665
Jeff Layton1c892542012-05-16 07:13:17 -04002666/**
2667 * cifs_readdata_to_iov - copy data from pages in response to an iovec
2668 * @rdata: the readdata response with list of pages holding data
2669 * @iov: vector in which we should copy the data
2670 * @nr_segs: number of segments in vector
2671 * @offset: offset into file of the first iovec
2672 * @copied: used to return the amount of data copied to the iov
2673 *
2674 * This function copies data from a list of pages in a readdata response into
2675 * an array of iovecs. It will first calculate where the data should go
2676 * based on the info in the readdata and then copy the data into that spot.
2677 */
2678static ssize_t
2679cifs_readdata_to_iov(struct cifs_readdata *rdata, const struct iovec *iov,
2680 unsigned long nr_segs, loff_t offset, ssize_t *copied)
2681{
2682 int rc = 0;
2683 struct iov_iter ii;
2684 size_t pos = rdata->offset - offset;
Jeff Layton1c892542012-05-16 07:13:17 -04002685 ssize_t remaining = rdata->bytes;
2686 unsigned char *pdata;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002687 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04002688
2689 /* set up iov_iter and advance to the correct offset */
2690 iov_iter_init(&ii, iov, nr_segs, iov_length(iov, nr_segs), 0);
2691 iov_iter_advance(&ii, pos);
2692
2693 *copied = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002694 for (i = 0; i < rdata->nr_pages; i++) {
Jeff Layton1c892542012-05-16 07:13:17 -04002695 ssize_t copy;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002696 struct page *page = rdata->pages[i];
Jeff Layton1c892542012-05-16 07:13:17 -04002697
2698 /* copy a whole page or whatever's left */
2699 copy = min_t(ssize_t, remaining, PAGE_SIZE);
2700
2701 /* ...but limit it to whatever space is left in the iov */
2702 copy = min_t(ssize_t, copy, iov_iter_count(&ii));
2703
2704 /* go while there's data to be copied and no errors */
2705 if (copy && !rc) {
2706 pdata = kmap(page);
2707 rc = memcpy_toiovecend(ii.iov, pdata, ii.iov_offset,
2708 (int)copy);
2709 kunmap(page);
2710 if (!rc) {
2711 *copied += copy;
2712 remaining -= copy;
2713 iov_iter_advance(&ii, copy);
2714 }
2715 }
Jeff Layton1c892542012-05-16 07:13:17 -04002716 }
2717
2718 return rc;
2719}
2720
2721static void
2722cifs_uncached_readv_complete(struct work_struct *work)
2723{
2724 struct cifs_readdata *rdata = container_of(work,
2725 struct cifs_readdata, work);
Jeff Layton1c892542012-05-16 07:13:17 -04002726
2727 complete(&rdata->done);
2728 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
2729}
2730
2731static int
Jeff Layton8321fec2012-09-19 06:22:32 -07002732cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
2733 struct cifs_readdata *rdata, unsigned int len)
Jeff Layton1c892542012-05-16 07:13:17 -04002734{
Jeff Layton8321fec2012-09-19 06:22:32 -07002735 int total_read = 0, result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002736 unsigned int i;
2737 unsigned int nr_pages = rdata->nr_pages;
Jeff Layton8321fec2012-09-19 06:22:32 -07002738 struct kvec iov;
Jeff Layton1c892542012-05-16 07:13:17 -04002739
Jeff Layton8321fec2012-09-19 06:22:32 -07002740 rdata->tailsz = PAGE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002741 for (i = 0; i < nr_pages; i++) {
2742 struct page *page = rdata->pages[i];
2743
Jeff Layton8321fec2012-09-19 06:22:32 -07002744 if (len >= PAGE_SIZE) {
Jeff Layton1c892542012-05-16 07:13:17 -04002745 /* enough data to fill the page */
Jeff Layton8321fec2012-09-19 06:22:32 -07002746 iov.iov_base = kmap(page);
2747 iov.iov_len = PAGE_SIZE;
2748 cFYI(1, "%u: iov_base=%p iov_len=%zu",
2749 i, iov.iov_base, iov.iov_len);
2750 len -= PAGE_SIZE;
2751 } else if (len > 0) {
Jeff Layton1c892542012-05-16 07:13:17 -04002752 /* enough for partial page, fill and zero the rest */
Jeff Layton8321fec2012-09-19 06:22:32 -07002753 iov.iov_base = kmap(page);
2754 iov.iov_len = len;
2755 cFYI(1, "%u: iov_base=%p iov_len=%zu",
2756 i, iov.iov_base, iov.iov_len);
2757 memset(iov.iov_base + len, '\0', PAGE_SIZE - len);
2758 rdata->tailsz = len;
2759 len = 0;
Jeff Layton1c892542012-05-16 07:13:17 -04002760 } else {
2761 /* no need to hold page hostage */
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002762 rdata->pages[i] = NULL;
2763 rdata->nr_pages--;
Jeff Layton1c892542012-05-16 07:13:17 -04002764 put_page(page);
Jeff Layton8321fec2012-09-19 06:22:32 -07002765 continue;
Jeff Layton1c892542012-05-16 07:13:17 -04002766 }
Jeff Layton8321fec2012-09-19 06:22:32 -07002767
2768 result = cifs_readv_from_socket(server, &iov, 1, iov.iov_len);
2769 kunmap(page);
2770 if (result < 0)
2771 break;
2772
2773 total_read += result;
Jeff Layton1c892542012-05-16 07:13:17 -04002774 }
2775
Jeff Layton8321fec2012-09-19 06:22:32 -07002776 return total_read > 0 ? total_read : result;
Jeff Layton1c892542012-05-16 07:13:17 -04002777}
2778
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002779static ssize_t
2780cifs_iovec_read(struct file *file, const struct iovec *iov,
2781 unsigned long nr_segs, loff_t *poffset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002782{
Jeff Layton1c892542012-05-16 07:13:17 -04002783 ssize_t rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002784 size_t len, cur_len;
Jeff Layton1c892542012-05-16 07:13:17 -04002785 ssize_t total_read = 0;
2786 loff_t offset = *poffset;
2787 unsigned int npages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002788 struct cifs_sb_info *cifs_sb;
Jeff Layton1c892542012-05-16 07:13:17 -04002789 struct cifs_tcon *tcon;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002790 struct cifsFileInfo *open_file;
Jeff Layton1c892542012-05-16 07:13:17 -04002791 struct cifs_readdata *rdata, *tmp;
2792 struct list_head rdata_list;
2793 pid_t pid;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002794
2795 if (!nr_segs)
2796 return 0;
2797
2798 len = iov_length(iov, nr_segs);
2799 if (!len)
2800 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002801
Jeff Layton1c892542012-05-16 07:13:17 -04002802 INIT_LIST_HEAD(&rdata_list);
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002803 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Joe Perchesc21dfb62010-07-12 13:50:14 -07002804 open_file = file->private_data;
Jeff Layton1c892542012-05-16 07:13:17 -04002805 tcon = tlink_tcon(open_file->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002806
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07002807 if (!tcon->ses->server->ops->async_readv)
2808 return -ENOSYS;
2809
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002810 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2811 pid = open_file->pid;
2812 else
2813 pid = current->tgid;
2814
Steve Frenchad7a2922008-02-07 23:25:02 +00002815 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesb6b38f72010-04-21 03:50:45 +00002816 cFYI(1, "attempting read on write only file instance");
Steve Frenchad7a2922008-02-07 23:25:02 +00002817
Jeff Layton1c892542012-05-16 07:13:17 -04002818 do {
2819 cur_len = min_t(const size_t, len - total_read, cifs_sb->rsize);
2820 npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002821
Jeff Layton1c892542012-05-16 07:13:17 -04002822 /* allocate a readdata struct */
2823 rdata = cifs_readdata_alloc(npages,
2824 cifs_uncached_readv_complete);
2825 if (!rdata) {
2826 rc = -ENOMEM;
2827 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002828 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002829
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002830 rc = cifs_read_allocate_pages(rdata, npages);
Jeff Layton1c892542012-05-16 07:13:17 -04002831 if (rc)
2832 goto error;
2833
2834 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002835 rdata->nr_pages = npages;
Jeff Layton1c892542012-05-16 07:13:17 -04002836 rdata->offset = offset;
2837 rdata->bytes = cur_len;
2838 rdata->pid = pid;
Jeff Layton8321fec2012-09-19 06:22:32 -07002839 rdata->pagesz = PAGE_SIZE;
2840 rdata->read_into_pages = cifs_uncached_read_into_pages;
Jeff Layton1c892542012-05-16 07:13:17 -04002841
2842 rc = cifs_retry_async_readv(rdata);
2843error:
2844 if (rc) {
2845 kref_put(&rdata->refcount,
2846 cifs_uncached_readdata_release);
2847 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002848 }
Jeff Layton1c892542012-05-16 07:13:17 -04002849
2850 list_add_tail(&rdata->list, &rdata_list);
2851 offset += cur_len;
2852 len -= cur_len;
2853 } while (len > 0);
2854
2855 /* if at least one read request send succeeded, then reset rc */
2856 if (!list_empty(&rdata_list))
2857 rc = 0;
2858
2859 /* the loop below should proceed in the order of increasing offsets */
2860restart_loop:
2861 list_for_each_entry_safe(rdata, tmp, &rdata_list, list) {
2862 if (!rc) {
2863 ssize_t copied;
2864
2865 /* FIXME: freezable sleep too? */
2866 rc = wait_for_completion_killable(&rdata->done);
2867 if (rc)
2868 rc = -EINTR;
2869 else if (rdata->result)
2870 rc = rdata->result;
2871 else {
2872 rc = cifs_readdata_to_iov(rdata, iov,
2873 nr_segs, *poffset,
2874 &copied);
2875 total_read += copied;
2876 }
2877
2878 /* resend call if it's a retryable error */
2879 if (rc == -EAGAIN) {
2880 rc = cifs_retry_async_readv(rdata);
2881 goto restart_loop;
2882 }
2883 }
2884 list_del_init(&rdata->list);
2885 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002886 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002887
Jeff Layton1c892542012-05-16 07:13:17 -04002888 cifs_stats_bytes_read(tcon, total_read);
2889 *poffset += total_read;
2890
Pavel Shilovsky09a47072012-09-18 16:20:29 -07002891 /* mask nodata case */
2892 if (rc == -ENODATA)
2893 rc = 0;
2894
Jeff Layton1c892542012-05-16 07:13:17 -04002895 return total_read ? total_read : rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002896}
2897
Pavel Shilovsky0b81c1c2011-03-10 10:11:05 +03002898ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov,
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002899 unsigned long nr_segs, loff_t pos)
2900{
2901 ssize_t read;
2902
2903 read = cifs_iovec_read(iocb->ki_filp, iov, nr_segs, &pos);
2904 if (read > 0)
2905 iocb->ki_pos = pos;
2906
2907 return read;
2908}
2909
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002910ssize_t
2911cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov,
2912 unsigned long nr_segs, loff_t pos)
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002913{
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002914 struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
2915 struct cifsInodeInfo *cinode = CIFS_I(inode);
2916 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2917 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2918 iocb->ki_filp->private_data;
2919 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2920 int rc = -EACCES;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002921
2922 /*
2923 * In strict cache mode we need to read from the server all the time
2924 * if we don't have level II oplock because the server can delay mtime
2925 * change - so we can't make a decision about inode invalidating.
2926 * And we can also fail with pagereading if there are mandatory locks
2927 * on pages affected by this read but not on the region from pos to
2928 * pos+len-1.
2929 */
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002930 if (!cinode->clientCanCacheRead)
2931 return cifs_user_readv(iocb, iov, nr_segs, pos);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002932
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002933 if (cap_unix(tcon->ses) &&
2934 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2935 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2936 return generic_file_aio_read(iocb, iov, nr_segs, pos);
2937
2938 /*
2939 * We need to hold the sem to be sure nobody modifies lock list
2940 * with a brlock that prevents reading.
2941 */
2942 down_read(&cinode->lock_sem);
2943 if (!cifs_find_lock_conflict(cfile, pos, iov_length(iov, nr_segs),
2944 tcon->ses->server->vals->shared_lock_type,
Pavel Shilovsky081c0412012-11-27 18:38:53 +04002945 NULL, CIFS_READ_OP))
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002946 rc = generic_file_aio_read(iocb, iov, nr_segs, pos);
2947 up_read(&cinode->lock_sem);
2948 return rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002949}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002950
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002951static ssize_t
2952cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002953{
2954 int rc = -EACCES;
2955 unsigned int bytes_read = 0;
2956 unsigned int total_read;
2957 unsigned int current_read_size;
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002958 unsigned int rsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002959 struct cifs_sb_info *cifs_sb;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002960 struct cifs_tcon *tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002961 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002962 unsigned int xid;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002963 char *cur_offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002964 struct cifsFileInfo *open_file;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002965 struct cifs_io_parms io_parms;
Steve Frenchec637e32005-12-12 20:53:18 -08002966 int buf_type = CIFS_NO_BUFFER;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002967 __u32 pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002968
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002969 xid = get_xid();
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002970 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002971
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002972 /* FIXME: set up handlers for larger reads and/or convert to async */
2973 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
2974
Linus Torvalds1da177e2005-04-16 15:20:36 -07002975 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05302976 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002977 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05302978 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002979 }
Joe Perchesc21dfb62010-07-12 13:50:14 -07002980 open_file = file->private_data;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002981 tcon = tlink_tcon(open_file->tlink);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002982 server = tcon->ses->server;
2983
2984 if (!server->ops->sync_read) {
2985 free_xid(xid);
2986 return -ENOSYS;
2987 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002988
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002989 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2990 pid = open_file->pid;
2991 else
2992 pid = current->tgid;
2993
Linus Torvalds1da177e2005-04-16 15:20:36 -07002994 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesb6b38f72010-04-21 03:50:45 +00002995 cFYI(1, "attempting read on write only file instance");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002996
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002997 for (total_read = 0, cur_offset = read_data; read_size > total_read;
2998 total_read += bytes_read, cur_offset += bytes_read) {
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002999 current_read_size = min_t(uint, read_size - total_read, rsize);
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003000 /*
3001 * For windows me and 9x we do not want to request more than it
3002 * negotiated since it will refuse the read then.
3003 */
3004 if ((tcon->ses) && !(tcon->ses->capabilities &
3005 tcon->ses->server->vals->cap_large_files)) {
Dan Carpenter7748dd62011-10-18 12:41:35 +03003006 current_read_size = min_t(uint, current_read_size,
Jeff Laytonc974bef2011-10-11 06:41:32 -04003007 CIFSMaxBufSize);
Steve Frenchf9f5c8172005-09-15 23:06:38 -07003008 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003009 rc = -EAGAIN;
3010 while (rc == -EAGAIN) {
Steve Frenchcdff08e2010-10-21 22:46:14 +00003011 if (open_file->invalidHandle) {
Jeff Layton15886172010-10-15 15:33:59 -04003012 rc = cifs_reopen_file(open_file, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003013 if (rc != 0)
3014 break;
3015 }
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003016 io_parms.pid = pid;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003017 io_parms.tcon = tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003018 io_parms.offset = *offset;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003019 io_parms.length = current_read_size;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003020 rc = server->ops->sync_read(xid, open_file, &io_parms,
3021 &bytes_read, &cur_offset,
3022 &buf_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003023 }
3024 if (rc || (bytes_read == 0)) {
3025 if (total_read) {
3026 break;
3027 } else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003028 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003029 return rc;
3030 }
3031 } else {
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003032 cifs_stats_bytes_read(tcon, total_read);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003033 *offset += bytes_read;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003034 }
3035 }
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003036 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003037 return total_read;
3038}
3039
Jeff Laytonca83ce32011-04-12 09:13:44 -04003040/*
3041 * If the page is mmap'ed into a process' page tables, then we need to make
3042 * sure that it doesn't change while being written back.
3043 */
3044static int
3045cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
3046{
3047 struct page *page = vmf->page;
3048
3049 lock_page(page);
3050 return VM_FAULT_LOCKED;
3051}
3052
3053static struct vm_operations_struct cifs_file_vm_ops = {
3054 .fault = filemap_fault,
3055 .page_mkwrite = cifs_page_mkwrite,
Konstantin Khlebnikov0b173bc2012-10-08 16:28:46 -07003056 .remap_pages = generic_file_remap_pages,
Jeff Laytonca83ce32011-04-12 09:13:44 -04003057};
3058
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003059int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
3060{
3061 int rc, xid;
3062 struct inode *inode = file->f_path.dentry->d_inode;
3063
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003064 xid = get_xid();
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003065
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04003066 if (!CIFS_I(inode)->clientCanCacheRead) {
3067 rc = cifs_invalidate_mapping(inode);
3068 if (rc)
3069 return rc;
3070 }
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003071
3072 rc = generic_file_mmap(file, vma);
Jeff Laytonca83ce32011-04-12 09:13:44 -04003073 if (rc == 0)
3074 vma->vm_ops = &cifs_file_vm_ops;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003075 free_xid(xid);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003076 return rc;
3077}
3078
Linus Torvalds1da177e2005-04-16 15:20:36 -07003079int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
3080{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003081 int rc, xid;
3082
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003083 xid = get_xid();
Jeff Laytonabab0952010-02-12 07:44:18 -05003084 rc = cifs_revalidate_file(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003085 if (rc) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00003086 cFYI(1, "Validation prior to mmap failed, error=%d", rc);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003087 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003088 return rc;
3089 }
3090 rc = generic_file_mmap(file, vma);
Jeff Laytonca83ce32011-04-12 09:13:44 -04003091 if (rc == 0)
3092 vma->vm_ops = &cifs_file_vm_ops;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003093 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003094 return rc;
3095}
3096
Jeff Layton0471ca32012-05-16 07:13:16 -04003097static void
3098cifs_readv_complete(struct work_struct *work)
3099{
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003100 unsigned int i;
Jeff Layton0471ca32012-05-16 07:13:16 -04003101 struct cifs_readdata *rdata = container_of(work,
3102 struct cifs_readdata, work);
Jeff Layton0471ca32012-05-16 07:13:16 -04003103
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003104 for (i = 0; i < rdata->nr_pages; i++) {
3105 struct page *page = rdata->pages[i];
3106
Jeff Layton0471ca32012-05-16 07:13:16 -04003107 lru_cache_add_file(page);
3108
3109 if (rdata->result == 0) {
Jeff Layton0471ca32012-05-16 07:13:16 -04003110 flush_dcache_page(page);
3111 SetPageUptodate(page);
3112 }
3113
3114 unlock_page(page);
3115
3116 if (rdata->result == 0)
3117 cifs_readpage_to_fscache(rdata->mapping->host, page);
3118
3119 page_cache_release(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003120 rdata->pages[i] = NULL;
Jeff Layton0471ca32012-05-16 07:13:16 -04003121 }
Jeff Layton6993f742012-05-16 07:13:17 -04003122 kref_put(&rdata->refcount, cifs_readdata_release);
Jeff Layton0471ca32012-05-16 07:13:16 -04003123}
3124
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003125static int
Jeff Layton8321fec2012-09-19 06:22:32 -07003126cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
3127 struct cifs_readdata *rdata, unsigned int len)
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003128{
Jeff Layton8321fec2012-09-19 06:22:32 -07003129 int total_read = 0, result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003130 unsigned int i;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003131 u64 eof;
3132 pgoff_t eof_index;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003133 unsigned int nr_pages = rdata->nr_pages;
Jeff Layton8321fec2012-09-19 06:22:32 -07003134 struct kvec iov;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003135
3136 /* determine the eof that the server (probably) has */
3137 eof = CIFS_I(rdata->mapping->host)->server_eof;
3138 eof_index = eof ? (eof - 1) >> PAGE_CACHE_SHIFT : 0;
3139 cFYI(1, "eof=%llu eof_index=%lu", eof, eof_index);
3140
Jeff Layton8321fec2012-09-19 06:22:32 -07003141 rdata->tailsz = PAGE_CACHE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003142 for (i = 0; i < nr_pages; i++) {
3143 struct page *page = rdata->pages[i];
3144
Jeff Layton8321fec2012-09-19 06:22:32 -07003145 if (len >= PAGE_CACHE_SIZE) {
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003146 /* enough data to fill the page */
Jeff Layton8321fec2012-09-19 06:22:32 -07003147 iov.iov_base = kmap(page);
3148 iov.iov_len = PAGE_CACHE_SIZE;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003149 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
Jeff Layton8321fec2012-09-19 06:22:32 -07003150 i, page->index, iov.iov_base, iov.iov_len);
3151 len -= PAGE_CACHE_SIZE;
3152 } else if (len > 0) {
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003153 /* enough for partial page, fill and zero the rest */
Jeff Layton8321fec2012-09-19 06:22:32 -07003154 iov.iov_base = kmap(page);
3155 iov.iov_len = len;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003156 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
Jeff Layton8321fec2012-09-19 06:22:32 -07003157 i, page->index, iov.iov_base, iov.iov_len);
3158 memset(iov.iov_base + len,
3159 '\0', PAGE_CACHE_SIZE - len);
3160 rdata->tailsz = len;
3161 len = 0;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003162 } else if (page->index > eof_index) {
3163 /*
3164 * The VFS will not try to do readahead past the
3165 * i_size, but it's possible that we have outstanding
3166 * writes with gaps in the middle and the i_size hasn't
3167 * caught up yet. Populate those with zeroed out pages
3168 * to prevent the VFS from repeatedly attempting to
3169 * fill them until the writes are flushed.
3170 */
3171 zero_user(page, 0, PAGE_CACHE_SIZE);
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003172 lru_cache_add_file(page);
3173 flush_dcache_page(page);
3174 SetPageUptodate(page);
3175 unlock_page(page);
3176 page_cache_release(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003177 rdata->pages[i] = NULL;
3178 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07003179 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003180 } else {
3181 /* no need to hold page hostage */
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003182 lru_cache_add_file(page);
3183 unlock_page(page);
3184 page_cache_release(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003185 rdata->pages[i] = NULL;
3186 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07003187 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003188 }
Jeff Layton8321fec2012-09-19 06:22:32 -07003189
3190 result = cifs_readv_from_socket(server, &iov, 1, iov.iov_len);
3191 kunmap(page);
3192 if (result < 0)
3193 break;
3194
3195 total_read += result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003196 }
3197
Jeff Layton8321fec2012-09-19 06:22:32 -07003198 return total_read > 0 ? total_read : result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003199}
3200
Linus Torvalds1da177e2005-04-16 15:20:36 -07003201static int cifs_readpages(struct file *file, struct address_space *mapping,
3202 struct list_head *page_list, unsigned num_pages)
3203{
Jeff Layton690c5e32011-10-19 15:30:16 -04003204 int rc;
3205 struct list_head tmplist;
3206 struct cifsFileInfo *open_file = file->private_data;
3207 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
3208 unsigned int rsize = cifs_sb->rsize;
3209 pid_t pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003210
Jeff Layton690c5e32011-10-19 15:30:16 -04003211 /*
3212 * Give up immediately if rsize is too small to read an entire page.
3213 * The VFS will fall back to readpage. We should never reach this
3214 * point however since we set ra_pages to 0 when the rsize is smaller
3215 * than a cache page.
3216 */
3217 if (unlikely(rsize < PAGE_CACHE_SIZE))
3218 return 0;
Steve Frenchbfa0d752005-08-31 21:50:37 -07003219
Suresh Jayaraman56698232010-07-05 18:13:25 +05303220 /*
3221 * Reads as many pages as possible from fscache. Returns -ENOBUFS
3222 * immediately if the cookie is negative
3223 */
3224 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
3225 &num_pages);
3226 if (rc == 0)
Jeff Layton690c5e32011-10-19 15:30:16 -04003227 return rc;
Suresh Jayaraman56698232010-07-05 18:13:25 +05303228
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003229 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3230 pid = open_file->pid;
3231 else
3232 pid = current->tgid;
3233
Jeff Layton690c5e32011-10-19 15:30:16 -04003234 rc = 0;
3235 INIT_LIST_HEAD(&tmplist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003236
Jeff Layton690c5e32011-10-19 15:30:16 -04003237 cFYI(1, "%s: file=%p mapping=%p num_pages=%u", __func__, file,
3238 mapping, num_pages);
3239
3240 /*
3241 * Start with the page at end of list and move it to private
3242 * list. Do the same with any following pages until we hit
3243 * the rsize limit, hit an index discontinuity, or run out of
3244 * pages. Issue the async read and then start the loop again
3245 * until the list is empty.
3246 *
3247 * Note that list order is important. The page_list is in
3248 * the order of declining indexes. When we put the pages in
3249 * the rdata->pages, then we want them in increasing order.
3250 */
3251 while (!list_empty(page_list)) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003252 unsigned int i;
Jeff Layton690c5e32011-10-19 15:30:16 -04003253 unsigned int bytes = PAGE_CACHE_SIZE;
3254 unsigned int expected_index;
3255 unsigned int nr_pages = 1;
3256 loff_t offset;
3257 struct page *page, *tpage;
3258 struct cifs_readdata *rdata;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003259
3260 page = list_entry(page_list->prev, struct page, lru);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003261
Jeff Layton690c5e32011-10-19 15:30:16 -04003262 /*
3263 * Lock the page and put it in the cache. Since no one else
3264 * should have access to this page, we're safe to simply set
3265 * PG_locked without checking it first.
3266 */
3267 __set_page_locked(page);
3268 rc = add_to_page_cache_locked(page, mapping,
3269 page->index, GFP_KERNEL);
3270
3271 /* give up if we can't stick it in the cache */
3272 if (rc) {
3273 __clear_page_locked(page);
3274 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003275 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003276
Jeff Layton690c5e32011-10-19 15:30:16 -04003277 /* move first page to the tmplist */
3278 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
3279 list_move_tail(&page->lru, &tmplist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003280
Jeff Layton690c5e32011-10-19 15:30:16 -04003281 /* now try and add more pages onto the request */
3282 expected_index = page->index + 1;
3283 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
3284 /* discontinuity ? */
3285 if (page->index != expected_index)
3286 break;
3287
3288 /* would this page push the read over the rsize? */
3289 if (bytes + PAGE_CACHE_SIZE > rsize)
3290 break;
3291
3292 __set_page_locked(page);
3293 if (add_to_page_cache_locked(page, mapping,
3294 page->index, GFP_KERNEL)) {
3295 __clear_page_locked(page);
3296 break;
3297 }
3298 list_move_tail(&page->lru, &tmplist);
3299 bytes += PAGE_CACHE_SIZE;
3300 expected_index++;
3301 nr_pages++;
3302 }
3303
Jeff Layton0471ca32012-05-16 07:13:16 -04003304 rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
Jeff Layton690c5e32011-10-19 15:30:16 -04003305 if (!rdata) {
3306 /* best to give up if we're out of mem */
3307 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3308 list_del(&page->lru);
3309 lru_cache_add_file(page);
3310 unlock_page(page);
3311 page_cache_release(page);
3312 }
3313 rc = -ENOMEM;
3314 break;
3315 }
3316
Jeff Layton6993f742012-05-16 07:13:17 -04003317 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Layton690c5e32011-10-19 15:30:16 -04003318 rdata->mapping = mapping;
3319 rdata->offset = offset;
3320 rdata->bytes = bytes;
3321 rdata->pid = pid;
Jeff Layton8321fec2012-09-19 06:22:32 -07003322 rdata->pagesz = PAGE_CACHE_SIZE;
3323 rdata->read_into_pages = cifs_readpages_read_into_pages;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003324
3325 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3326 list_del(&page->lru);
3327 rdata->pages[rdata->nr_pages++] = page;
3328 }
Jeff Layton690c5e32011-10-19 15:30:16 -04003329
Jeff Layton2a1bb132012-05-16 07:13:17 -04003330 rc = cifs_retry_async_readv(rdata);
Jeff Layton690c5e32011-10-19 15:30:16 -04003331 if (rc != 0) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003332 for (i = 0; i < rdata->nr_pages; i++) {
3333 page = rdata->pages[i];
Jeff Layton690c5e32011-10-19 15:30:16 -04003334 lru_cache_add_file(page);
3335 unlock_page(page);
3336 page_cache_release(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003337 }
Jeff Layton6993f742012-05-16 07:13:17 -04003338 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003339 break;
3340 }
Jeff Layton6993f742012-05-16 07:13:17 -04003341
3342 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003343 }
3344
Linus Torvalds1da177e2005-04-16 15:20:36 -07003345 return rc;
3346}
3347
3348static int cifs_readpage_worker(struct file *file, struct page *page,
3349 loff_t *poffset)
3350{
3351 char *read_data;
3352 int rc;
3353
Suresh Jayaraman56698232010-07-05 18:13:25 +05303354 /* Is the page cached? */
3355 rc = cifs_readpage_from_fscache(file->f_path.dentry->d_inode, page);
3356 if (rc == 0)
3357 goto read_complete;
3358
Linus Torvalds1da177e2005-04-16 15:20:36 -07003359 page_cache_get(page);
3360 read_data = kmap(page);
3361 /* for reads over a certain size could initiate async read ahead */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003362
Linus Torvalds1da177e2005-04-16 15:20:36 -07003363 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003364
Linus Torvalds1da177e2005-04-16 15:20:36 -07003365 if (rc < 0)
3366 goto io_error;
3367 else
Joe Perchesb6b38f72010-04-21 03:50:45 +00003368 cFYI(1, "Bytes read %d", rc);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003369
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08003370 file->f_path.dentry->d_inode->i_atime =
3371 current_fs_time(file->f_path.dentry->d_inode->i_sb);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003372
Linus Torvalds1da177e2005-04-16 15:20:36 -07003373 if (PAGE_CACHE_SIZE > rc)
3374 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
3375
3376 flush_dcache_page(page);
3377 SetPageUptodate(page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05303378
3379 /* send this page to the cache */
3380 cifs_readpage_to_fscache(file->f_path.dentry->d_inode, page);
3381
Linus Torvalds1da177e2005-04-16 15:20:36 -07003382 rc = 0;
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003383
Linus Torvalds1da177e2005-04-16 15:20:36 -07003384io_error:
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003385 kunmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003386 page_cache_release(page);
Suresh Jayaraman56698232010-07-05 18:13:25 +05303387
3388read_complete:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003389 return rc;
3390}
3391
3392static int cifs_readpage(struct file *file, struct page *page)
3393{
3394 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
3395 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003396 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003397
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003398 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003399
3400 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303401 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003402 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303403 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003404 }
3405
Jeff Laytonac3aa2f2012-07-23 13:14:28 -04003406 cFYI(1, "readpage %p at offset %d 0x%x",
Joe Perchesb6b38f72010-04-21 03:50:45 +00003407 page, (int)offset, (int)offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003408
3409 rc = cifs_readpage_worker(file, page, &offset);
3410
3411 unlock_page(page);
3412
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003413 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003414 return rc;
3415}
3416
Steve Frencha403a0a2007-07-26 15:54:16 +00003417static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3418{
3419 struct cifsFileInfo *open_file;
3420
Jeff Layton44772882010-10-15 15:34:03 -04003421 spin_lock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003422 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton2e396b82010-10-15 15:34:01 -04003423 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Jeff Layton44772882010-10-15 15:34:03 -04003424 spin_unlock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003425 return 1;
3426 }
3427 }
Jeff Layton44772882010-10-15 15:34:03 -04003428 spin_unlock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003429 return 0;
3430}
3431
Linus Torvalds1da177e2005-04-16 15:20:36 -07003432/* We do not want to update the file size from server for inodes
3433 open for write - to avoid races with writepage extending
3434 the file - in the future we could consider allowing
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003435 refreshing the inode only on increases in the file size
Linus Torvalds1da177e2005-04-16 15:20:36 -07003436 but this is tricky to do without racing with writebehind
3437 page caching in the current Linux kernel design */
Steve French4b18f2a2008-04-29 00:06:05 +00003438bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003439{
Steve Frencha403a0a2007-07-26 15:54:16 +00003440 if (!cifsInode)
Steve French4b18f2a2008-04-29 00:06:05 +00003441 return true;
Steve French23e7dd72005-10-20 13:44:56 -07003442
Steve Frencha403a0a2007-07-26 15:54:16 +00003443 if (is_inode_writable(cifsInode)) {
3444 /* This inode is open for write at least once */
Steve Frenchc32a0b62006-01-12 14:41:28 -08003445 struct cifs_sb_info *cifs_sb;
3446
Steve Frenchc32a0b62006-01-12 14:41:28 -08003447 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
Steve Frenchad7a2922008-02-07 23:25:02 +00003448 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003449 /* since no page cache to corrupt on directio
Steve Frenchc32a0b62006-01-12 14:41:28 -08003450 we can change size safely */
Steve French4b18f2a2008-04-29 00:06:05 +00003451 return true;
Steve Frenchc32a0b62006-01-12 14:41:28 -08003452 }
3453
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003454 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
Steve French4b18f2a2008-04-29 00:06:05 +00003455 return true;
Steve French7ba52632007-02-08 18:14:13 +00003456
Steve French4b18f2a2008-04-29 00:06:05 +00003457 return false;
Steve French23e7dd72005-10-20 13:44:56 -07003458 } else
Steve French4b18f2a2008-04-29 00:06:05 +00003459 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003460}
3461
Nick Piggind9414772008-09-24 11:32:59 -04003462static int cifs_write_begin(struct file *file, struct address_space *mapping,
3463 loff_t pos, unsigned len, unsigned flags,
3464 struct page **pagep, void **fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003465{
Nick Piggind9414772008-09-24 11:32:59 -04003466 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
3467 loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003468 loff_t page_start = pos & PAGE_MASK;
3469 loff_t i_size;
3470 struct page *page;
3471 int rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003472
Joe Perchesb6b38f72010-04-21 03:50:45 +00003473 cFYI(1, "write_begin from %lld len %d", (long long)pos, len);
Nick Piggind9414772008-09-24 11:32:59 -04003474
Nick Piggin54566b22009-01-04 12:00:53 -08003475 page = grab_cache_page_write_begin(mapping, index, flags);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003476 if (!page) {
3477 rc = -ENOMEM;
3478 goto out;
3479 }
Nick Piggind9414772008-09-24 11:32:59 -04003480
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003481 if (PageUptodate(page))
3482 goto out;
Steve French8a236262007-03-06 00:31:00 +00003483
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003484 /*
3485 * If we write a full page it will be up to date, no need to read from
3486 * the server. If the write is short, we'll end up doing a sync write
3487 * instead.
3488 */
3489 if (len == PAGE_CACHE_SIZE)
3490 goto out;
3491
3492 /*
3493 * optimize away the read when we have an oplock, and we're not
3494 * expecting to use any of the data we'd be reading in. That
3495 * is, when the page lies beyond the EOF, or straddles the EOF
3496 * and the write will cover all of the existing data.
3497 */
3498 if (CIFS_I(mapping->host)->clientCanCacheRead) {
3499 i_size = i_size_read(mapping->host);
3500 if (page_start >= i_size ||
3501 (offset == 0 && (pos + len) >= i_size)) {
3502 zero_user_segments(page, 0, offset,
3503 offset + len,
3504 PAGE_CACHE_SIZE);
3505 /*
3506 * PageChecked means that the parts of the page
3507 * to which we're not writing are considered up
3508 * to date. Once the data is copied to the
3509 * page, it can be set uptodate.
3510 */
3511 SetPageChecked(page);
3512 goto out;
3513 }
3514 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003515
Nick Piggind9414772008-09-24 11:32:59 -04003516 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003517 /*
3518 * might as well read a page, it is fast enough. If we get
3519 * an error, we don't need to return it. cifs_write_end will
3520 * do a sync write instead since PG_uptodate isn't set.
3521 */
3522 cifs_readpage_worker(file, page, &page_start);
Steve French8a236262007-03-06 00:31:00 +00003523 } else {
3524 /* we could try using another file handle if there is one -
3525 but how would we lock it to prevent close of that handle
3526 racing with this read? In any case
Nick Piggind9414772008-09-24 11:32:59 -04003527 this will be written out by write_end so is fine */
Steve French8a236262007-03-06 00:31:00 +00003528 }
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003529out:
3530 *pagep = page;
3531 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003532}
3533
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303534static int cifs_release_page(struct page *page, gfp_t gfp)
3535{
3536 if (PagePrivate(page))
3537 return 0;
3538
3539 return cifs_fscache_release_page(page, gfp);
3540}
3541
3542static void cifs_invalidate_page(struct page *page, unsigned long offset)
3543{
3544 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
3545
3546 if (offset == 0)
3547 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
3548}
3549
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003550static int cifs_launder_page(struct page *page)
3551{
3552 int rc = 0;
3553 loff_t range_start = page_offset(page);
3554 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
3555 struct writeback_control wbc = {
3556 .sync_mode = WB_SYNC_ALL,
3557 .nr_to_write = 0,
3558 .range_start = range_start,
3559 .range_end = range_end,
3560 };
3561
3562 cFYI(1, "Launder page: %p", page);
3563
3564 if (clear_page_dirty_for_io(page))
3565 rc = cifs_writepage_locked(page, &wbc);
3566
3567 cifs_fscache_invalidate_page(page, page->mapping->host);
3568 return rc;
3569}
3570
Tejun Heo9b646972010-07-20 22:09:02 +02003571void cifs_oplock_break(struct work_struct *work)
Jeff Layton3bc303c2009-09-21 06:47:50 -04003572{
3573 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3574 oplock_break);
Jeff Laytona5e18bc2010-10-11 15:07:18 -04003575 struct inode *inode = cfile->dentry->d_inode;
Jeff Layton3bc303c2009-09-21 06:47:50 -04003576 struct cifsInodeInfo *cinode = CIFS_I(inode);
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07003577 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Jeff Laytoneb4b7562010-10-22 14:52:29 -04003578 int rc = 0;
Jeff Layton3bc303c2009-09-21 06:47:50 -04003579
3580 if (inode && S_ISREG(inode->i_mode)) {
Steve Frenchd54ff732010-04-27 04:38:15 +00003581 if (cinode->clientCanCacheRead)
Al Viro8737c932009-12-24 06:47:55 -05003582 break_lease(inode, O_RDONLY);
Steve Frenchd54ff732010-04-27 04:38:15 +00003583 else
Al Viro8737c932009-12-24 06:47:55 -05003584 break_lease(inode, O_WRONLY);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003585 rc = filemap_fdatawrite(inode->i_mapping);
3586 if (cinode->clientCanCacheRead == 0) {
Jeff Laytoneb4b7562010-10-22 14:52:29 -04003587 rc = filemap_fdatawait(inode->i_mapping);
3588 mapping_set_error(inode->i_mapping, rc);
Pavel Shilovsky03eca702012-12-06 21:24:33 +04003589 cifs_invalidate_mapping(inode);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003590 }
Joe Perchesb6b38f72010-04-21 03:50:45 +00003591 cFYI(1, "Oplock flush inode %p rc %d", inode, rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003592 }
3593
Pavel Shilovsky85160e02011-10-22 15:33:29 +04003594 rc = cifs_push_locks(cfile);
3595 if (rc)
3596 cERROR(1, "Push locks rc = %d", rc);
3597
Jeff Layton3bc303c2009-09-21 06:47:50 -04003598 /*
3599 * releasing stale oplock after recent reconnect of smb session using
3600 * a now incorrect file handle is not a data integrity issue but do
3601 * not bother sending an oplock release if session to server still is
3602 * disconnected since oplock already released by the server
3603 */
Steve Frenchcdff08e2010-10-21 22:46:14 +00003604 if (!cfile->oplock_break_cancelled) {
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07003605 rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
3606 cinode);
Joe Perchesb6b38f72010-04-21 03:50:45 +00003607 cFYI(1, "Oplock release rc = %d", rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003608 }
Jeff Layton3bc303c2009-09-21 06:47:50 -04003609}
3610
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07003611const struct address_space_operations cifs_addr_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003612 .readpage = cifs_readpage,
3613 .readpages = cifs_readpages,
3614 .writepage = cifs_writepage,
Steve French37c0eb42005-10-05 14:50:29 -07003615 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04003616 .write_begin = cifs_write_begin,
3617 .write_end = cifs_write_end,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003618 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303619 .releasepage = cifs_release_page,
3620 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003621 .launder_page = cifs_launder_page,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003622};
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003623
3624/*
3625 * cifs_readpages requires the server to support a buffer large enough to
3626 * contain the header plus one complete page of data. Otherwise, we need
3627 * to leave cifs_readpages out of the address space operations.
3628 */
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07003629const struct address_space_operations cifs_addr_ops_smallbuf = {
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003630 .readpage = cifs_readpage,
3631 .writepage = cifs_writepage,
3632 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04003633 .write_begin = cifs_write_begin,
3634 .write_end = cifs_write_end,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003635 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303636 .releasepage = cifs_release_page,
3637 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003638 .launder_page = cifs_launder_page,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003639};