blob: 7a0dd99e45077dfd2f7f780c140954575c9145a6 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
Steve Frenchfb8c4b12007-07-10 01:16:18 +00005 *
Steve Frenchf19159d2010-04-21 04:12:10 +00006 * Copyright (C) International Business Machines Corp., 2002,2010
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Author(s): Steve French (sfrench@us.ibm.com)
Jeremy Allison7ee1af72006-08-02 21:56:33 +00008 * Jeremy Allison (jra@samba.org)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
Steve French37c0eb42005-10-05 14:50:29 -070025#include <linux/backing-dev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
Steve French37c0eb42005-10-05 14:50:29 -070030#include <linux/writeback.h>
Andrew Morton6f88cc22006-12-10 02:19:44 -080031#include <linux/task_io_accounting_ops.h>
Steve French23e7dd72005-10-20 13:44:56 -070032#include <linux/delay.h>
Jeff Layton3bc303c2009-09-21 06:47:50 -040033#include <linux/mount.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Jeff Layton690c5e32011-10-19 15:30:16 -040035#include <linux/swap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <asm/div64.h>
37#include "cifsfs.h"
38#include "cifspdu.h"
39#include "cifsglob.h"
40#include "cifsproto.h"
41#include "cifs_unicode.h"
42#include "cifs_debug.h"
43#include "cifs_fs_sb.h"
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +053044#include "fscache.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Steve French07b92d02013-02-18 10:34:26 -060046
Linus Torvalds1da177e2005-04-16 15:20:36 -070047static inline int cifs_convert_flags(unsigned int flags)
48{
49 if ((flags & O_ACCMODE) == O_RDONLY)
50 return GENERIC_READ;
51 else if ((flags & O_ACCMODE) == O_WRONLY)
52 return GENERIC_WRITE;
53 else if ((flags & O_ACCMODE) == O_RDWR) {
54 /* GENERIC_ALL is too much permission to request
55 can cause unnecessary access denied on create */
56 /* return GENERIC_ALL; */
57 return (GENERIC_READ | GENERIC_WRITE);
58 }
59
Jeff Laytone10f7b52008-05-14 10:21:33 -070060 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
61 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
62 FILE_READ_DATA);
Steve French7fc8f4e2009-02-23 20:43:11 +000063}
Jeff Laytone10f7b52008-05-14 10:21:33 -070064
Jeff Layton608712f2010-10-15 15:33:56 -040065static u32 cifs_posix_convert_flags(unsigned int flags)
Steve French7fc8f4e2009-02-23 20:43:11 +000066{
Jeff Layton608712f2010-10-15 15:33:56 -040067 u32 posix_flags = 0;
Jeff Laytone10f7b52008-05-14 10:21:33 -070068
Steve French7fc8f4e2009-02-23 20:43:11 +000069 if ((flags & O_ACCMODE) == O_RDONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040070 posix_flags = SMB_O_RDONLY;
Steve French7fc8f4e2009-02-23 20:43:11 +000071 else if ((flags & O_ACCMODE) == O_WRONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040072 posix_flags = SMB_O_WRONLY;
73 else if ((flags & O_ACCMODE) == O_RDWR)
74 posix_flags = SMB_O_RDWR;
75
Steve French07b92d02013-02-18 10:34:26 -060076 if (flags & O_CREAT) {
Jeff Layton608712f2010-10-15 15:33:56 -040077 posix_flags |= SMB_O_CREAT;
Steve French07b92d02013-02-18 10:34:26 -060078 if (flags & O_EXCL)
79 posix_flags |= SMB_O_EXCL;
80 } else if (flags & O_EXCL)
81 cFYI(1, "Application %s pid %d has incorrectly set O_EXCL flag"
82 "but not O_CREAT on file open. Ignoring O_EXCL",
83 current->comm, current->tgid);
84
Jeff Layton608712f2010-10-15 15:33:56 -040085 if (flags & O_TRUNC)
86 posix_flags |= SMB_O_TRUNC;
87 /* be safe and imply O_SYNC for O_DSYNC */
Christoph Hellwig6b2f3d12009-10-27 11:05:28 +010088 if (flags & O_DSYNC)
Jeff Layton608712f2010-10-15 15:33:56 -040089 posix_flags |= SMB_O_SYNC;
Steve French7fc8f4e2009-02-23 20:43:11 +000090 if (flags & O_DIRECTORY)
Jeff Layton608712f2010-10-15 15:33:56 -040091 posix_flags |= SMB_O_DIRECTORY;
Steve French7fc8f4e2009-02-23 20:43:11 +000092 if (flags & O_NOFOLLOW)
Jeff Layton608712f2010-10-15 15:33:56 -040093 posix_flags |= SMB_O_NOFOLLOW;
Steve French7fc8f4e2009-02-23 20:43:11 +000094 if (flags & O_DIRECT)
Jeff Layton608712f2010-10-15 15:33:56 -040095 posix_flags |= SMB_O_DIRECT;
Steve French7fc8f4e2009-02-23 20:43:11 +000096
97 return posix_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070098}
99
100static inline int cifs_get_disposition(unsigned int flags)
101{
102 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
103 return FILE_CREATE;
104 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
105 return FILE_OVERWRITE_IF;
106 else if ((flags & O_CREAT) == O_CREAT)
107 return FILE_OPEN_IF;
Steve French55aa2e02006-05-30 18:09:31 +0000108 else if ((flags & O_TRUNC) == O_TRUNC)
109 return FILE_OVERWRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110 else
111 return FILE_OPEN;
112}
113
Jeff Layton608712f2010-10-15 15:33:56 -0400114int cifs_posix_open(char *full_path, struct inode **pinode,
115 struct super_block *sb, int mode, unsigned int f_flags,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400116 __u32 *poplock, __u16 *pnetfid, unsigned int xid)
Jeff Layton608712f2010-10-15 15:33:56 -0400117{
118 int rc;
119 FILE_UNIX_BASIC_INFO *presp_data;
120 __u32 posix_flags = 0;
121 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
122 struct cifs_fattr fattr;
123 struct tcon_link *tlink;
Steve French96daf2b2011-05-27 04:34:02 +0000124 struct cifs_tcon *tcon;
Jeff Layton608712f2010-10-15 15:33:56 -0400125
126 cFYI(1, "posix open %s", full_path);
127
128 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
129 if (presp_data == NULL)
130 return -ENOMEM;
131
132 tlink = cifs_sb_tlink(cifs_sb);
133 if (IS_ERR(tlink)) {
134 rc = PTR_ERR(tlink);
135 goto posix_open_ret;
136 }
137
138 tcon = tlink_tcon(tlink);
139 mode &= ~current_umask();
140
141 posix_flags = cifs_posix_convert_flags(f_flags);
142 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
143 poplock, full_path, cifs_sb->local_nls,
144 cifs_sb->mnt_cifs_flags &
145 CIFS_MOUNT_MAP_SPECIAL_CHR);
146 cifs_put_tlink(tlink);
147
148 if (rc)
149 goto posix_open_ret;
150
151 if (presp_data->Type == cpu_to_le32(-1))
152 goto posix_open_ret; /* open ok, caller does qpathinfo */
153
154 if (!pinode)
155 goto posix_open_ret; /* caller does not need info */
156
157 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
158
159 /* get new inode and set it up */
160 if (*pinode == NULL) {
161 cifs_fill_uniqueid(sb, &fattr);
162 *pinode = cifs_iget(sb, &fattr);
163 if (!*pinode) {
164 rc = -ENOMEM;
165 goto posix_open_ret;
166 }
167 } else {
168 cifs_fattr_to_inode(*pinode, &fattr);
169 }
170
171posix_open_ret:
172 kfree(presp_data);
173 return rc;
174}
175
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300176static int
177cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700178 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
179 struct cifs_fid *fid, unsigned int xid)
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300180{
181 int rc;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700182 int desired_access;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300183 int disposition;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500184 int create_options = CREATE_NOT_DIR;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300185 FILE_ALL_INFO *buf;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700186 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300187
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700188 if (!server->ops->open)
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700189 return -ENOSYS;
190
191 desired_access = cifs_convert_flags(f_flags);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300192
193/*********************************************************************
194 * open flag mapping table:
195 *
196 * POSIX Flag CIFS Disposition
197 * ---------- ----------------
198 * O_CREAT FILE_OPEN_IF
199 * O_CREAT | O_EXCL FILE_CREATE
200 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
201 * O_TRUNC FILE_OVERWRITE
202 * none of the above FILE_OPEN
203 *
204 * Note that there is not a direct match between disposition
205 * FILE_SUPERSEDE (ie create whether or not file exists although
206 * O_CREAT | O_TRUNC is similar but truncates the existing
207 * file rather than creating a new file as FILE_SUPERSEDE does
208 * (which uses the attributes / metadata passed in on open call)
209 *?
210 *? O_SYNC is a reasonable match to CIFS writethrough flag
211 *? and the read write flags match reasonably. O_LARGEFILE
212 *? is irrelevant because largefile support is always used
213 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
214 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
215 *********************************************************************/
216
217 disposition = cifs_get_disposition(f_flags);
218
219 /* BB pass O_SYNC flag through on file attributes .. BB */
220
221 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
222 if (!buf)
223 return -ENOMEM;
224
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500225 if (backup_cred(cifs_sb))
226 create_options |= CREATE_OPEN_BACKUP_INTENT;
227
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700228 rc = server->ops->open(xid, tcon, full_path, disposition,
229 desired_access, create_options, fid, oplock, buf,
230 cifs_sb);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300231
232 if (rc)
233 goto out;
234
235 if (tcon->unix_ext)
236 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
237 xid);
238 else
239 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700240 xid, &fid->netfid);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300241
242out:
243 kfree(buf);
244 return rc;
245}
246
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400247static bool
248cifs_has_mand_locks(struct cifsInodeInfo *cinode)
249{
250 struct cifs_fid_locks *cur;
251 bool has_locks = false;
252
253 down_read(&cinode->lock_sem);
254 list_for_each_entry(cur, &cinode->llist, llist) {
255 if (!list_empty(&cur->locks)) {
256 has_locks = true;
257 break;
258 }
259 }
260 up_read(&cinode->lock_sem);
261 return has_locks;
262}
263
Jeff Layton15ecb432010-10-15 15:34:02 -0400264struct cifsFileInfo *
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700265cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
Jeff Layton15ecb432010-10-15 15:34:02 -0400266 struct tcon_link *tlink, __u32 oplock)
267{
268 struct dentry *dentry = file->f_path.dentry;
269 struct inode *inode = dentry->d_inode;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700270 struct cifsInodeInfo *cinode = CIFS_I(inode);
271 struct cifsFileInfo *cfile;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700272 struct cifs_fid_locks *fdlocks;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700273 struct cifs_tcon *tcon = tlink_tcon(tlink);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400274 struct TCP_Server_Info *server = tcon->ses->server;
Jeff Layton15ecb432010-10-15 15:34:02 -0400275
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700276 cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
277 if (cfile == NULL)
278 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400279
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700280 fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
281 if (!fdlocks) {
282 kfree(cfile);
283 return NULL;
284 }
285
286 INIT_LIST_HEAD(&fdlocks->locks);
287 fdlocks->cfile = cfile;
288 cfile->llist = fdlocks;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700289 down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700290 list_add(&fdlocks->llist, &cinode->llist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700291 up_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700292
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700293 cfile->count = 1;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700294 cfile->pid = current->tgid;
295 cfile->uid = current_fsuid();
296 cfile->dentry = dget(dentry);
297 cfile->f_flags = file->f_flags;
298 cfile->invalidHandle = false;
299 cfile->tlink = cifs_get_tlink(tlink);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700300 INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700301 mutex_init(&cfile->fh_mutex);
Jeff Layton15ecb432010-10-15 15:34:02 -0400302
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100303 cifs_sb_active(inode->i_sb);
304
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400305 /*
306 * If the server returned a read oplock and we have mandatory brlocks,
307 * set oplock level to None.
308 */
309 if (oplock == server->vals->oplock_read &&
310 cifs_has_mand_locks(cinode)) {
311 cFYI(1, "Reset oplock val from read to None due to mand locks");
312 oplock = 0;
313 }
314
Jeff Layton44772882010-10-15 15:34:03 -0400315 spin_lock(&cifs_file_list_lock);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400316 if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700317 oplock = fid->pending_open->oplock;
318 list_del(&fid->pending_open->olist);
319
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400320 server->ops->set_fid(cfile, fid, oplock);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700321
322 list_add(&cfile->tlist, &tcon->openFileList);
Jeff Layton15ecb432010-10-15 15:34:02 -0400323 /* if readable file instance put first in list*/
324 if (file->f_mode & FMODE_READ)
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700325 list_add(&cfile->flist, &cinode->openFileList);
Jeff Layton15ecb432010-10-15 15:34:02 -0400326 else
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700327 list_add_tail(&cfile->flist, &cinode->openFileList);
Jeff Layton44772882010-10-15 15:34:03 -0400328 spin_unlock(&cifs_file_list_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400329
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700330 file->private_data = cfile;
331 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400332}
333
Jeff Layton764a1b12012-07-25 14:59:54 -0400334struct cifsFileInfo *
335cifsFileInfo_get(struct cifsFileInfo *cifs_file)
336{
337 spin_lock(&cifs_file_list_lock);
338 cifsFileInfo_get_locked(cifs_file);
339 spin_unlock(&cifs_file_list_lock);
340 return cifs_file;
341}
342
Steve Frenchcdff08e2010-10-21 22:46:14 +0000343/*
344 * Release a reference on the file private data. This may involve closing
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400345 * the filehandle out on the server. Must be called without holding
346 * cifs_file_list_lock.
Steve Frenchcdff08e2010-10-21 22:46:14 +0000347 */
Jeff Laytonb33879a2010-10-15 15:34:04 -0400348void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
349{
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300350 struct inode *inode = cifs_file->dentry->d_inode;
Steve French96daf2b2011-05-27 04:34:02 +0000351 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700352 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300353 struct cifsInodeInfo *cifsi = CIFS_I(inode);
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100354 struct super_block *sb = inode->i_sb;
355 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000356 struct cifsLockInfo *li, *tmp;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700357 struct cifs_fid fid;
358 struct cifs_pending_open open;
Steve Frenchcdff08e2010-10-21 22:46:14 +0000359
360 spin_lock(&cifs_file_list_lock);
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400361 if (--cifs_file->count > 0) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000362 spin_unlock(&cifs_file_list_lock);
363 return;
Jeff Laytonb33879a2010-10-15 15:34:04 -0400364 }
Steve Frenchcdff08e2010-10-21 22:46:14 +0000365
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700366 if (server->ops->get_lease_key)
367 server->ops->get_lease_key(inode, &fid);
368
369 /* store open in pending opens to make sure we don't miss lease break */
370 cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
371
Steve Frenchcdff08e2010-10-21 22:46:14 +0000372 /* remove it from the lists */
373 list_del(&cifs_file->flist);
374 list_del(&cifs_file->tlist);
375
376 if (list_empty(&cifsi->openFileList)) {
377 cFYI(1, "closing last open instance for inode %p",
378 cifs_file->dentry->d_inode);
Pavel Shilovsky25364132012-09-18 16:20:27 -0700379 /*
380 * In strict cache mode we need invalidate mapping on the last
381 * close because it may cause a error when we open this file
382 * again and get at least level II oplock.
383 */
Pavel Shilovsky4f8ba8a2010-11-21 22:36:12 +0300384 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
385 CIFS_I(inode)->invalid_mapping = true;
Pavel Shilovskyc6723622010-11-03 10:58:57 +0300386 cifs_set_oplock_level(cifsi, 0);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000387 }
388 spin_unlock(&cifs_file_list_lock);
389
Jeff Laytonad635942011-07-26 12:20:17 -0400390 cancel_work_sync(&cifs_file->oplock_break);
391
Steve Frenchcdff08e2010-10-21 22:46:14 +0000392 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700393 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400394 unsigned int xid;
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700395
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400396 xid = get_xid();
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700397 if (server->ops->close)
Pavel Shilovsky760ad0c2012-09-25 11:00:07 +0400398 server->ops->close(xid, tcon, &cifs_file->fid);
399 _free_xid(xid);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000400 }
401
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700402 cifs_del_pending_open(&open);
403
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700404 /*
405 * Delete any outstanding lock records. We'll lose them when the file
Steve Frenchcdff08e2010-10-21 22:46:14 +0000406 * is closed anyway.
407 */
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700408 down_write(&cifsi->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700409 list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000410 list_del(&li->llist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400411 cifs_del_lock_waiters(li);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000412 kfree(li);
413 }
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700414 list_del(&cifs_file->llist->llist);
415 kfree(cifs_file->llist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700416 up_write(&cifsi->lock_sem);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000417
418 cifs_put_tlink(cifs_file->tlink);
419 dput(cifs_file->dentry);
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100420 cifs_sb_deactive(sb);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000421 kfree(cifs_file);
Jeff Laytonb33879a2010-10-15 15:34:04 -0400422}
423
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424int cifs_open(struct inode *inode, struct file *file)
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700425
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426{
427 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400428 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400429 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700431 struct TCP_Server_Info *server;
Steve French96daf2b2011-05-27 04:34:02 +0000432 struct cifs_tcon *tcon;
Jeff Layton7ffec372010-09-29 19:51:11 -0400433 struct tcon_link *tlink;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700434 struct cifsFileInfo *cfile = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435 char *full_path = NULL;
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300436 bool posix_open_ok = false;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700437 struct cifs_fid fid;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700438 struct cifs_pending_open open;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400440 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441
442 cifs_sb = CIFS_SB(inode->i_sb);
Jeff Layton7ffec372010-09-29 19:51:11 -0400443 tlink = cifs_sb_tlink(cifs_sb);
444 if (IS_ERR(tlink)) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400445 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400446 return PTR_ERR(tlink);
447 }
448 tcon = tlink_tcon(tlink);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700449 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -0800451 full_path = build_path_from_dentry(file->f_path.dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452 if (full_path == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530453 rc = -ENOMEM;
Jeff Layton232341b2010-08-05 13:58:38 -0400454 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455 }
456
Joe Perchesb6b38f72010-04-21 03:50:45 +0000457 cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
458 inode, file->f_flags, full_path);
Steve French276a74a2009-03-03 18:00:34 +0000459
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700460 if (server->oplocks)
Steve French276a74a2009-03-03 18:00:34 +0000461 oplock = REQ_OPLOCK;
462 else
463 oplock = 0;
464
Steve French64cc2c62009-03-04 19:54:08 +0000465 if (!tcon->broken_posix_open && tcon->unix_ext &&
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400466 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
467 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Steve French276a74a2009-03-03 18:00:34 +0000468 /* can not refresh inode info since size could be stale */
Jeff Layton2422f672010-06-16 13:40:16 -0400469 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
Steve Frenchfa588e02010-04-22 19:21:55 +0000470 cifs_sb->mnt_file_mode /* ignored */,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700471 file->f_flags, &oplock, &fid.netfid, xid);
Steve French276a74a2009-03-03 18:00:34 +0000472 if (rc == 0) {
Joe Perchesb6b38f72010-04-21 03:50:45 +0000473 cFYI(1, "posix open succeeded");
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300474 posix_open_ok = true;
Steve French64cc2c62009-03-04 19:54:08 +0000475 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
476 if (tcon->ses->serverNOS)
Joe Perchesb6b38f72010-04-21 03:50:45 +0000477 cERROR(1, "server %s of type %s returned"
Steve French64cc2c62009-03-04 19:54:08 +0000478 " unexpected error on SMB posix open"
479 ", disabling posix open support."
480 " Check if server update available.",
481 tcon->ses->serverName,
Joe Perchesb6b38f72010-04-21 03:50:45 +0000482 tcon->ses->serverNOS);
Steve French64cc2c62009-03-04 19:54:08 +0000483 tcon->broken_posix_open = true;
Steve French276a74a2009-03-03 18:00:34 +0000484 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
485 (rc != -EOPNOTSUPP)) /* path not found or net err */
486 goto out;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700487 /*
488 * Else fallthrough to retry open the old way on network i/o
489 * or DFS errors.
490 */
Steve French276a74a2009-03-03 18:00:34 +0000491 }
492
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700493 if (server->ops->get_lease_key)
494 server->ops->get_lease_key(inode, &fid);
495
496 cifs_add_pending_open(&fid, tlink, &open);
497
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300498 if (!posix_open_ok) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700499 if (server->ops->get_lease_key)
500 server->ops->get_lease_key(inode, &fid);
501
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300502 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700503 file->f_flags, &oplock, &fid, xid);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700504 if (rc) {
505 cifs_del_pending_open(&open);
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300506 goto out;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700507 }
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300508 }
Jeff Layton47c78b72010-06-16 13:40:17 -0400509
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700510 cfile = cifs_new_fileinfo(&fid, file, tlink, oplock);
511 if (cfile == NULL) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700512 if (server->ops->close)
513 server->ops->close(xid, tcon, &fid);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700514 cifs_del_pending_open(&open);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515 rc = -ENOMEM;
516 goto out;
517 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +0530519 cifs_fscache_set_inode_cookie(inode, file);
520
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300521 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700522 /*
523 * Time to set mode which we can not set earlier due to
524 * problems creating new read-only files.
525 */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300526 struct cifs_unix_set_info_args args = {
527 .mode = inode->i_mode,
Eric W. Biederman49418b22013-02-06 00:57:56 -0800528 .uid = INVALID_UID, /* no change */
529 .gid = INVALID_GID, /* no change */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300530 .ctime = NO_CHANGE_64,
531 .atime = NO_CHANGE_64,
532 .mtime = NO_CHANGE_64,
533 .device = 0,
534 };
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700535 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
536 cfile->pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537 }
538
539out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400541 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400542 cifs_put_tlink(tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543 return rc;
544}
545
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400546static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
547
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700548/*
549 * Try to reacquire byte range locks that were released when session
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400550 * to server was lost.
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700551 */
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400552static int
553cifs_relock_file(struct cifsFileInfo *cfile)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554{
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400555 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
556 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
557 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558 int rc = 0;
559
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400560 /* we are going to update can_cache_brlcks here - need a write access */
561 down_write(&cinode->lock_sem);
562 if (cinode->can_cache_brlcks) {
563 /* can cache locks - no need to push them */
564 up_write(&cinode->lock_sem);
565 return rc;
566 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400568 if (cap_unix(tcon->ses) &&
569 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
570 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
571 rc = cifs_push_posix_locks(cfile);
572 else
573 rc = tcon->ses->server->ops->push_mand_locks(cfile);
574
575 up_write(&cinode->lock_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576 return rc;
577}
578
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700579static int
580cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581{
582 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400583 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400584 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +0000586 struct cifs_tcon *tcon;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700587 struct TCP_Server_Info *server;
588 struct cifsInodeInfo *cinode;
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000589 struct inode *inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590 char *full_path = NULL;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700591 int desired_access;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592 int disposition = FILE_OPEN;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500593 int create_options = CREATE_NOT_DIR;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700594 struct cifs_fid fid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400596 xid = get_xid();
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700597 mutex_lock(&cfile->fh_mutex);
598 if (!cfile->invalidHandle) {
599 mutex_unlock(&cfile->fh_mutex);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530600 rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400601 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530602 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603 }
604
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700605 inode = cfile->dentry->d_inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606 cifs_sb = CIFS_SB(inode->i_sb);
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700607 tcon = tlink_tcon(cfile->tlink);
608 server = tcon->ses->server;
Steve French3a9f4622007-04-04 17:10:24 +0000609
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700610 /*
611 * Can not grab rename sem here because various ops, including those
612 * that already have the rename sem can end up causing writepage to get
613 * called and if the server was down that means we end up here, and we
614 * can never tell if the caller already has the rename_sem.
615 */
616 full_path = build_path_from_dentry(cfile->dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617 if (full_path == NULL) {
Steve French3a9f4622007-04-04 17:10:24 +0000618 rc = -ENOMEM;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700619 mutex_unlock(&cfile->fh_mutex);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400620 free_xid(xid);
Steve French3a9f4622007-04-04 17:10:24 +0000621 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622 }
623
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700624 cFYI(1, "inode = 0x%p file flags 0x%x for %s", inode, cfile->f_flags,
625 full_path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626
Pavel Shilovsky10b9b982012-03-20 12:55:09 +0300627 if (tcon->ses->server->oplocks)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628 oplock = REQ_OPLOCK;
629 else
Steve French4b18f2a2008-04-29 00:06:05 +0000630 oplock = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400632 if (tcon->unix_ext && cap_unix(tcon->ses) &&
Steve French7fc8f4e2009-02-23 20:43:11 +0000633 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400634 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Jeff Layton608712f2010-10-15 15:33:56 -0400635 /*
636 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
637 * original open. Must mask them off for a reopen.
638 */
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700639 unsigned int oflags = cfile->f_flags &
Jeff Layton15886172010-10-15 15:33:59 -0400640 ~(O_CREAT | O_EXCL | O_TRUNC);
Jeff Layton608712f2010-10-15 15:33:56 -0400641
Jeff Layton2422f672010-06-16 13:40:16 -0400642 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700643 cifs_sb->mnt_file_mode /* ignored */,
644 oflags, &oplock, &fid.netfid, xid);
Steve French7fc8f4e2009-02-23 20:43:11 +0000645 if (rc == 0) {
Joe Perchesb6b38f72010-04-21 03:50:45 +0000646 cFYI(1, "posix reopen succeeded");
Steve French7fc8f4e2009-02-23 20:43:11 +0000647 goto reopen_success;
648 }
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700649 /*
650 * fallthrough to retry open the old way on errors, especially
651 * in the reconnect path it is important to retry hard
652 */
Steve French7fc8f4e2009-02-23 20:43:11 +0000653 }
654
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700655 desired_access = cifs_convert_flags(cfile->f_flags);
Steve French7fc8f4e2009-02-23 20:43:11 +0000656
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500657 if (backup_cred(cifs_sb))
658 create_options |= CREATE_OPEN_BACKUP_INTENT;
659
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700660 if (server->ops->get_lease_key)
661 server->ops->get_lease_key(inode, &fid);
662
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700663 /*
664 * Can not refresh inode by passing in file_info buf to be returned by
665 * CIFSSMBOpen and then calling get_inode_info with returned buf since
666 * file might have write behind data that needs to be flushed and server
667 * version of file size can be stale. If we knew for sure that inode was
668 * not dirty locally we could do this.
669 */
670 rc = server->ops->open(xid, tcon, full_path, disposition,
671 desired_access, create_options, &fid, &oplock,
672 NULL, cifs_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673 if (rc) {
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700674 mutex_unlock(&cfile->fh_mutex);
675 cFYI(1, "cifs_reopen returned 0x%x", rc);
Joe Perchesb6b38f72010-04-21 03:50:45 +0000676 cFYI(1, "oplock: %d", oplock);
Jeff Layton15886172010-10-15 15:33:59 -0400677 goto reopen_error_exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678 }
Jeff Layton15886172010-10-15 15:33:59 -0400679
680reopen_success:
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700681 cfile->invalidHandle = false;
682 mutex_unlock(&cfile->fh_mutex);
683 cinode = CIFS_I(inode);
Jeff Layton15886172010-10-15 15:33:59 -0400684
685 if (can_flush) {
686 rc = filemap_write_and_wait(inode->i_mapping);
Jeff Laytoneb4b7562010-10-22 14:52:29 -0400687 mapping_set_error(inode->i_mapping, rc);
Jeff Layton15886172010-10-15 15:33:59 -0400688
Jeff Layton15886172010-10-15 15:33:59 -0400689 if (tcon->unix_ext)
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700690 rc = cifs_get_inode_info_unix(&inode, full_path,
691 inode->i_sb, xid);
Jeff Layton15886172010-10-15 15:33:59 -0400692 else
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700693 rc = cifs_get_inode_info(&inode, full_path, NULL,
694 inode->i_sb, xid, NULL);
695 }
696 /*
697 * Else we are writing out data to server already and could deadlock if
698 * we tried to flush data, and since we do not know if we have data that
699 * would invalidate the current end of file on the server we can not go
700 * to the server to get the new inode info.
701 */
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300702
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700703 server->ops->set_fid(cfile, &fid, oplock);
704 cifs_relock_file(cfile);
Jeff Layton15886172010-10-15 15:33:59 -0400705
706reopen_error_exit:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400708 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709 return rc;
710}
711
712int cifs_close(struct inode *inode, struct file *file)
713{
Jeff Layton77970692011-04-05 16:23:47 -0700714 if (file->private_data != NULL) {
715 cifsFileInfo_put(file->private_data);
716 file->private_data = NULL;
717 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718
Steve Frenchcdff08e2010-10-21 22:46:14 +0000719 /* return code from the ->release op is always ignored */
720 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721}
722
723int cifs_closedir(struct inode *inode, struct file *file)
724{
725 int rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400726 unsigned int xid;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700727 struct cifsFileInfo *cfile = file->private_data;
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700728 struct cifs_tcon *tcon;
729 struct TCP_Server_Info *server;
730 char *buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731
Joe Perchesb6b38f72010-04-21 03:50:45 +0000732 cFYI(1, "Closedir inode = 0x%p", inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700734 if (cfile == NULL)
735 return rc;
736
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400737 xid = get_xid();
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700738 tcon = tlink_tcon(cfile->tlink);
739 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700741 cFYI(1, "Freeing private data in close dir");
742 spin_lock(&cifs_file_list_lock);
743 if (!cfile->srch_inf.endOfSearch && !cfile->invalidHandle) {
744 cfile->invalidHandle = true;
745 spin_unlock(&cifs_file_list_lock);
746 if (server->ops->close_dir)
747 rc = server->ops->close_dir(xid, tcon, &cfile->fid);
748 else
749 rc = -ENOSYS;
750 cFYI(1, "Closing uncompleted readdir with rc %d", rc);
751 /* not much we can do if it fails anyway, ignore rc */
752 rc = 0;
753 } else
754 spin_unlock(&cifs_file_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700756 buf = cfile->srch_inf.ntwrk_buf_start;
757 if (buf) {
758 cFYI(1, "closedir free smb buf in srch struct");
759 cfile->srch_inf.ntwrk_buf_start = NULL;
760 if (cfile->srch_inf.smallBuf)
761 cifs_small_buf_release(buf);
762 else
763 cifs_buf_release(buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764 }
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700765
766 cifs_put_tlink(cfile->tlink);
767 kfree(file->private_data);
768 file->private_data = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769 /* BB can we lock the filestruct while this is going on? */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400770 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771 return rc;
772}
773
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400774static struct cifsLockInfo *
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300775cifs_lock_init(__u64 offset, __u64 length, __u8 type)
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000776{
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400777 struct cifsLockInfo *lock =
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000778 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400779 if (!lock)
780 return lock;
781 lock->offset = offset;
782 lock->length = length;
783 lock->type = type;
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400784 lock->pid = current->tgid;
785 INIT_LIST_HEAD(&lock->blist);
786 init_waitqueue_head(&lock->block_q);
787 return lock;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400788}
789
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -0700790void
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400791cifs_del_lock_waiters(struct cifsLockInfo *lock)
792{
793 struct cifsLockInfo *li, *tmp;
794 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
795 list_del_init(&li->blist);
796 wake_up(&li->block_q);
797 }
798}
799
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400800#define CIFS_LOCK_OP 0
801#define CIFS_READ_OP 1
802#define CIFS_WRITE_OP 2
803
804/* @rw_check : 0 - no op, 1 - read, 2 - write */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400805static bool
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700806cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
807 __u64 length, __u8 type, struct cifsFileInfo *cfile,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400808 struct cifsLockInfo **conf_lock, int rw_check)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400809{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300810 struct cifsLockInfo *li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700811 struct cifsFileInfo *cur_cfile = fdlocks->cfile;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300812 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400813
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700814 list_for_each_entry(li, &fdlocks->locks, llist) {
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400815 if (offset + length <= li->offset ||
816 offset >= li->offset + li->length)
817 continue;
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400818 if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
819 server->ops->compare_fids(cfile, cur_cfile)) {
820 /* shared lock prevents write op through the same fid */
821 if (!(li->type & server->vals->shared_lock_type) ||
822 rw_check != CIFS_WRITE_OP)
823 continue;
824 }
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700825 if ((type & server->vals->shared_lock_type) &&
826 ((server->ops->compare_fids(cfile, cur_cfile) &&
827 current->tgid == li->pid) || type == li->type))
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400828 continue;
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700829 if (conf_lock)
830 *conf_lock = li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700831 return true;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400832 }
833 return false;
834}
835
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700836bool
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300837cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700838 __u8 type, struct cifsLockInfo **conf_lock,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400839 int rw_check)
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400840{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300841 bool rc = false;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700842 struct cifs_fid_locks *cur;
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300843 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300844
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700845 list_for_each_entry(cur, &cinode->llist, llist) {
846 rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700847 cfile, conf_lock, rw_check);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300848 if (rc)
849 break;
850 }
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300851
852 return rc;
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400853}
854
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300855/*
856 * Check if there is another lock that prevents us to set the lock (mandatory
857 * style). If such a lock exists, update the flock structure with its
858 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
859 * or leave it the same if we can't. Returns 0 if we don't need to request to
860 * the server or 1 otherwise.
861 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400862static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300863cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
864 __u8 type, struct file_lock *flock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400865{
866 int rc = 0;
867 struct cifsLockInfo *conf_lock;
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300868 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300869 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400870 bool exist;
871
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700872 down_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400873
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300874 exist = cifs_find_lock_conflict(cfile, offset, length, type,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400875 &conf_lock, CIFS_LOCK_OP);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400876 if (exist) {
877 flock->fl_start = conf_lock->offset;
878 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
879 flock->fl_pid = conf_lock->pid;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300880 if (conf_lock->type & server->vals->shared_lock_type)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400881 flock->fl_type = F_RDLCK;
882 else
883 flock->fl_type = F_WRLCK;
884 } else if (!cinode->can_cache_brlcks)
885 rc = 1;
886 else
887 flock->fl_type = F_UNLCK;
888
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700889 up_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400890 return rc;
891}
892
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400893static void
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300894cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400895{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300896 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700897 down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700898 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700899 up_write(&cinode->lock_sem);
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000900}
901
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300902/*
903 * Set the byte-range lock (mandatory style). Returns:
904 * 1) 0, if we set the lock and don't need to request to the server;
905 * 2) 1, if no locks prevent us but we need to request to the server;
906 * 3) -EACCESS, if there is a lock that prevents us and wait is false.
907 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400908static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300909cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400910 bool wait)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400911{
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400912 struct cifsLockInfo *conf_lock;
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300913 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400914 bool exist;
915 int rc = 0;
916
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400917try_again:
918 exist = false;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700919 down_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400920
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300921 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400922 lock->type, &conf_lock, CIFS_LOCK_OP);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400923 if (!exist && cinode->can_cache_brlcks) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700924 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700925 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400926 return rc;
927 }
928
929 if (!exist)
930 rc = 1;
931 else if (!wait)
932 rc = -EACCES;
933 else {
934 list_add_tail(&lock->blist, &conf_lock->blist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700935 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400936 rc = wait_event_interruptible(lock->block_q,
937 (lock->blist.prev == &lock->blist) &&
938 (lock->blist.next == &lock->blist));
939 if (!rc)
940 goto try_again;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700941 down_write(&cinode->lock_sem);
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400942 list_del_init(&lock->blist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400943 }
944
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700945 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400946 return rc;
947}
948
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300949/*
950 * Check if there is another lock that prevents us to set the lock (posix
951 * style). If such a lock exists, update the flock structure with its
952 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
953 * or leave it the same if we can't. Returns 0 if we don't need to request to
954 * the server or 1 otherwise.
955 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400956static int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400957cifs_posix_lock_test(struct file *file, struct file_lock *flock)
958{
959 int rc = 0;
Al Viro496ad9a2013-01-23 17:07:38 -0500960 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400961 unsigned char saved_type = flock->fl_type;
962
Pavel Shilovsky50792762011-10-29 17:17:57 +0400963 if ((flock->fl_flags & FL_POSIX) == 0)
964 return 1;
965
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700966 down_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400967 posix_test_lock(file, flock);
968
969 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
970 flock->fl_type = saved_type;
971 rc = 1;
972 }
973
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700974 up_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400975 return rc;
976}
977
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300978/*
979 * Set the byte-range lock (posix style). Returns:
980 * 1) 0, if we set the lock and don't need to request to the server;
981 * 2) 1, if we need to request to the server;
982 * 3) <0, if the error occurs while setting the lock.
983 */
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400984static int
985cifs_posix_lock_set(struct file *file, struct file_lock *flock)
986{
Al Viro496ad9a2013-01-23 17:07:38 -0500987 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
Pavel Shilovsky50792762011-10-29 17:17:57 +0400988 int rc = 1;
989
990 if ((flock->fl_flags & FL_POSIX) == 0)
991 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400992
Pavel Shilovsky66189be2012-03-28 21:56:19 +0400993try_again:
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700994 down_write(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400995 if (!cinode->can_cache_brlcks) {
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700996 up_write(&cinode->lock_sem);
Pavel Shilovsky50792762011-10-29 17:17:57 +0400997 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400998 }
Pavel Shilovsky66189be2012-03-28 21:56:19 +0400999
1000 rc = posix_lock_file(file, flock, NULL);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001001 up_write(&cinode->lock_sem);
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001002 if (rc == FILE_LOCK_DEFERRED) {
1003 rc = wait_event_interruptible(flock->fl_wait, !flock->fl_next);
1004 if (!rc)
1005 goto try_again;
1006 locks_delete_block(flock);
1007 }
Steve French9ebb3892012-04-01 13:52:54 -05001008 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001009}
1010
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001011int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001012cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001013{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001014 unsigned int xid;
1015 int rc = 0, stored_rc;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001016 struct cifsLockInfo *li, *tmp;
1017 struct cifs_tcon *tcon;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001018 unsigned int num, max_num, max_buf;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001019 LOCKING_ANDX_RANGE *buf, *cur;
1020 int types[] = {LOCKING_ANDX_LARGE_FILES,
1021 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1022 int i;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001023
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001024 xid = get_xid();
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001025 tcon = tlink_tcon(cfile->tlink);
1026
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001027 /*
1028 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1029 * and check it for zero before using.
1030 */
1031 max_buf = tcon->ses->server->maxBuf;
1032 if (!max_buf) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001033 free_xid(xid);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001034 return -EINVAL;
1035 }
1036
1037 max_num = (max_buf - sizeof(struct smb_hdr)) /
1038 sizeof(LOCKING_ANDX_RANGE);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001039 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1040 if (!buf) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001041 free_xid(xid);
Pavel Shilovskye2f28862012-08-29 21:13:38 +04001042 return -ENOMEM;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001043 }
1044
1045 for (i = 0; i < 2; i++) {
1046 cur = buf;
1047 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001048 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001049 if (li->type != types[i])
1050 continue;
1051 cur->Pid = cpu_to_le16(li->pid);
1052 cur->LengthLow = cpu_to_le32((u32)li->length);
1053 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1054 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1055 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1056 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001057 stored_rc = cifs_lockv(xid, tcon,
1058 cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001059 (__u8)li->type, 0, num,
1060 buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001061 if (stored_rc)
1062 rc = stored_rc;
1063 cur = buf;
1064 num = 0;
1065 } else
1066 cur++;
1067 }
1068
1069 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001070 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001071 (__u8)types[i], 0, num, buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001072 if (stored_rc)
1073 rc = stored_rc;
1074 }
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001075 }
1076
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001077 kfree(buf);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001078 free_xid(xid);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001079 return rc;
1080}
1081
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001082/* copied from fs/locks.c with a name change */
1083#define cifs_for_each_lock(inode, lockp) \
1084 for (lockp = &inode->i_flock; *lockp != NULL; \
1085 lockp = &(*lockp)->fl_next)
1086
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001087struct lock_to_push {
1088 struct list_head llist;
1089 __u64 offset;
1090 __u64 length;
1091 __u32 pid;
1092 __u16 netfid;
1093 __u8 type;
1094};
1095
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001096static int
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001097cifs_push_posix_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001098{
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001099 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1100 struct file_lock *flock, **before;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001101 unsigned int count = 0, i = 0;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001102 int rc = 0, xid, type;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001103 struct list_head locks_to_send, *el;
1104 struct lock_to_push *lck, *tmp;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001105 __u64 length;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001106
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001107 xid = get_xid();
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001108
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001109 lock_flocks();
1110 cifs_for_each_lock(cfile->dentry->d_inode, before) {
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001111 if ((*before)->fl_flags & FL_POSIX)
1112 count++;
1113 }
1114 unlock_flocks();
1115
1116 INIT_LIST_HEAD(&locks_to_send);
1117
1118 /*
Pavel Shilovskyce858522012-03-17 09:46:55 +03001119 * Allocating count locks is enough because no FL_POSIX locks can be
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001120 * added to the list while we are holding cinode->lock_sem that
Pavel Shilovskyce858522012-03-17 09:46:55 +03001121 * protects locking operations of this inode.
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001122 */
1123 for (; i < count; i++) {
1124 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1125 if (!lck) {
1126 rc = -ENOMEM;
1127 goto err_out;
1128 }
1129 list_add_tail(&lck->llist, &locks_to_send);
1130 }
1131
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001132 el = locks_to_send.next;
1133 lock_flocks();
1134 cifs_for_each_lock(cfile->dentry->d_inode, before) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001135 flock = *before;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001136 if ((flock->fl_flags & FL_POSIX) == 0)
1137 continue;
Pavel Shilovskyce858522012-03-17 09:46:55 +03001138 if (el == &locks_to_send) {
1139 /*
1140 * The list ended. We don't have enough allocated
1141 * structures - something is really wrong.
1142 */
1143 cERROR(1, "Can't push all brlocks!");
1144 break;
1145 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001146 length = 1 + flock->fl_end - flock->fl_start;
1147 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1148 type = CIFS_RDLCK;
1149 else
1150 type = CIFS_WRLCK;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001151 lck = list_entry(el, struct lock_to_push, llist);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001152 lck->pid = flock->fl_pid;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001153 lck->netfid = cfile->fid.netfid;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001154 lck->length = length;
1155 lck->type = type;
1156 lck->offset = flock->fl_start;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001157 el = el->next;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001158 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001159 unlock_flocks();
1160
1161 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001162 int stored_rc;
1163
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001164 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001165 lck->offset, lck->length, NULL,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001166 lck->type, 0);
1167 if (stored_rc)
1168 rc = stored_rc;
1169 list_del(&lck->llist);
1170 kfree(lck);
1171 }
1172
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001173out:
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001174 free_xid(xid);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001175 return rc;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001176err_out:
1177 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1178 list_del(&lck->llist);
1179 kfree(lck);
1180 }
1181 goto out;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001182}
1183
1184static int
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001185cifs_push_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001186{
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001187 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001188 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001189 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001190 int rc = 0;
1191
1192 /* we are going to update can_cache_brlcks here - need a write access */
1193 down_write(&cinode->lock_sem);
1194 if (!cinode->can_cache_brlcks) {
1195 up_write(&cinode->lock_sem);
1196 return rc;
1197 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001198
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001199 if (cap_unix(tcon->ses) &&
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001200 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1201 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001202 rc = cifs_push_posix_locks(cfile);
1203 else
1204 rc = tcon->ses->server->ops->push_mand_locks(cfile);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001205
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001206 cinode->can_cache_brlcks = false;
1207 up_write(&cinode->lock_sem);
1208 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001209}
1210
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001211static void
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001212cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001213 bool *wait_flag, struct TCP_Server_Info *server)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001214{
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001215 if (flock->fl_flags & FL_POSIX)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001216 cFYI(1, "Posix");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001217 if (flock->fl_flags & FL_FLOCK)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001218 cFYI(1, "Flock");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001219 if (flock->fl_flags & FL_SLEEP) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001220 cFYI(1, "Blocking lock");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001221 *wait_flag = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222 }
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001223 if (flock->fl_flags & FL_ACCESS)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001224 cFYI(1, "Process suspended by mandatory locking - "
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001225 "not implemented yet");
1226 if (flock->fl_flags & FL_LEASE)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001227 cFYI(1, "Lease on file - not implemented yet");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001228 if (flock->fl_flags &
Jeff Layton3d6d8542012-09-19 06:22:46 -07001229 (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
1230 FL_ACCESS | FL_LEASE | FL_CLOSE)))
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001231 cFYI(1, "Unknown lock flags 0x%x", flock->fl_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001232
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001233 *type = server->vals->large_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001234 if (flock->fl_type == F_WRLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001235 cFYI(1, "F_WRLCK ");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001236 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001237 *lock = 1;
1238 } else if (flock->fl_type == F_UNLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001239 cFYI(1, "F_UNLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001240 *type |= server->vals->unlock_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001241 *unlock = 1;
1242 /* Check if unlock includes more than one lock range */
1243 } else if (flock->fl_type == F_RDLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001244 cFYI(1, "F_RDLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001245 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001246 *lock = 1;
1247 } else if (flock->fl_type == F_EXLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001248 cFYI(1, "F_EXLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001249 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001250 *lock = 1;
1251 } else if (flock->fl_type == F_SHLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001252 cFYI(1, "F_SHLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001253 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001254 *lock = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255 } else
Joe Perchesb6b38f72010-04-21 03:50:45 +00001256 cFYI(1, "Unknown type of lock");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001257}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001259static int
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001260cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001261 bool wait_flag, bool posix_lck, unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001262{
1263 int rc = 0;
1264 __u64 length = 1 + flock->fl_end - flock->fl_start;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001265 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1266 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001267 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001268 __u16 netfid = cfile->fid.netfid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001269
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001270 if (posix_lck) {
1271 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001272
1273 rc = cifs_posix_lock_test(file, flock);
1274 if (!rc)
1275 return rc;
1276
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001277 if (type & server->vals->shared_lock_type)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001278 posix_lock_type = CIFS_RDLCK;
1279 else
1280 posix_lock_type = CIFS_WRLCK;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001281 rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001282 flock->fl_start, length, flock,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001283 posix_lock_type, wait_flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001284 return rc;
1285 }
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001286
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001287 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001288 if (!rc)
1289 return rc;
1290
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001291 /* BB we could chain these into one lock request BB */
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001292 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
1293 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001294 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001295 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1296 type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001297 flock->fl_type = F_UNLCK;
1298 if (rc != 0)
1299 cERROR(1, "Error unlocking previously locked "
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001300 "range %d during test of lock", rc);
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001301 return 0;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001302 }
1303
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001304 if (type & server->vals->shared_lock_type) {
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001305 flock->fl_type = F_WRLCK;
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001306 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001307 }
1308
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001309 type &= ~server->vals->exclusive_lock_type;
1310
1311 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1312 type | server->vals->shared_lock_type,
1313 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001314 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001315 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1316 type | server->vals->shared_lock_type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001317 flock->fl_type = F_RDLCK;
1318 if (rc != 0)
1319 cERROR(1, "Error unlocking previously locked "
1320 "range %d during test of lock", rc);
1321 } else
1322 flock->fl_type = F_WRLCK;
1323
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001324 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001325}
1326
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001327void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001328cifs_move_llist(struct list_head *source, struct list_head *dest)
1329{
1330 struct list_head *li, *tmp;
1331 list_for_each_safe(li, tmp, source)
1332 list_move(li, dest);
1333}
1334
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001335void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001336cifs_free_llist(struct list_head *llist)
1337{
1338 struct cifsLockInfo *li, *tmp;
1339 list_for_each_entry_safe(li, tmp, llist, llist) {
1340 cifs_del_lock_waiters(li);
1341 list_del(&li->llist);
1342 kfree(li);
1343 }
1344}
1345
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001346int
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001347cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1348 unsigned int xid)
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001349{
1350 int rc = 0, stored_rc;
1351 int types[] = {LOCKING_ANDX_LARGE_FILES,
1352 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1353 unsigned int i;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001354 unsigned int max_num, num, max_buf;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001355 LOCKING_ANDX_RANGE *buf, *cur;
1356 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1357 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
1358 struct cifsLockInfo *li, *tmp;
1359 __u64 length = 1 + flock->fl_end - flock->fl_start;
1360 struct list_head tmp_llist;
1361
1362 INIT_LIST_HEAD(&tmp_llist);
1363
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001364 /*
1365 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1366 * and check it for zero before using.
1367 */
1368 max_buf = tcon->ses->server->maxBuf;
1369 if (!max_buf)
1370 return -EINVAL;
1371
1372 max_num = (max_buf - sizeof(struct smb_hdr)) /
1373 sizeof(LOCKING_ANDX_RANGE);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001374 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1375 if (!buf)
1376 return -ENOMEM;
1377
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001378 down_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001379 for (i = 0; i < 2; i++) {
1380 cur = buf;
1381 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001382 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001383 if (flock->fl_start > li->offset ||
1384 (flock->fl_start + length) <
1385 (li->offset + li->length))
1386 continue;
1387 if (current->tgid != li->pid)
1388 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001389 if (types[i] != li->type)
1390 continue;
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001391 if (cinode->can_cache_brlcks) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001392 /*
1393 * We can cache brlock requests - simply remove
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001394 * a lock from the file's list.
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001395 */
1396 list_del(&li->llist);
1397 cifs_del_lock_waiters(li);
1398 kfree(li);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001399 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001400 }
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001401 cur->Pid = cpu_to_le16(li->pid);
1402 cur->LengthLow = cpu_to_le32((u32)li->length);
1403 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1404 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1405 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1406 /*
1407 * We need to save a lock here to let us add it again to
1408 * the file's list if the unlock range request fails on
1409 * the server.
1410 */
1411 list_move(&li->llist, &tmp_llist);
1412 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001413 stored_rc = cifs_lockv(xid, tcon,
1414 cfile->fid.netfid,
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001415 li->type, num, 0, buf);
1416 if (stored_rc) {
1417 /*
1418 * We failed on the unlock range
1419 * request - add all locks from the tmp
1420 * list to the head of the file's list.
1421 */
1422 cifs_move_llist(&tmp_llist,
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001423 &cfile->llist->locks);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001424 rc = stored_rc;
1425 } else
1426 /*
1427 * The unlock range request succeed -
1428 * free the tmp list.
1429 */
1430 cifs_free_llist(&tmp_llist);
1431 cur = buf;
1432 num = 0;
1433 } else
1434 cur++;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001435 }
1436 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001437 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001438 types[i], num, 0, buf);
1439 if (stored_rc) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001440 cifs_move_llist(&tmp_llist,
1441 &cfile->llist->locks);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001442 rc = stored_rc;
1443 } else
1444 cifs_free_llist(&tmp_llist);
1445 }
1446 }
1447
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001448 up_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001449 kfree(buf);
1450 return rc;
1451}
1452
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001453static int
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001454cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001455 bool wait_flag, bool posix_lck, int lock, int unlock,
1456 unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001457{
1458 int rc = 0;
1459 __u64 length = 1 + flock->fl_end - flock->fl_start;
1460 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1461 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001462 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04001463 struct inode *inode = cfile->dentry->d_inode;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001464
1465 if (posix_lck) {
Steve French08547b02006-02-28 22:39:25 +00001466 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001467
1468 rc = cifs_posix_lock_set(file, flock);
1469 if (!rc || rc < 0)
1470 return rc;
1471
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001472 if (type & server->vals->shared_lock_type)
Steve French08547b02006-02-28 22:39:25 +00001473 posix_lock_type = CIFS_RDLCK;
1474 else
1475 posix_lock_type = CIFS_WRLCK;
Steve French50c2f752007-07-13 00:33:32 +00001476
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001477 if (unlock == 1)
Steve Frenchbeb84dc2006-03-03 23:36:34 +00001478 posix_lock_type = CIFS_UNLCK;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001479
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001480 rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
1481 current->tgid, flock->fl_start, length,
1482 NULL, posix_lock_type, wait_flag);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001483 goto out;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001484 }
1485
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001486 if (lock) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001487 struct cifsLockInfo *lock;
1488
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001489 lock = cifs_lock_init(flock->fl_start, length, type);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001490 if (!lock)
1491 return -ENOMEM;
1492
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001493 rc = cifs_lock_add_if(cfile, lock, wait_flag);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001494 if (rc < 0) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001495 kfree(lock);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001496 return rc;
1497 }
1498 if (!rc)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001499 goto out;
1500
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04001501 /*
1502 * Windows 7 server can delay breaking lease from read to None
1503 * if we set a byte-range lock on a file - break it explicitly
1504 * before sending the lock to the server to be sure the next
1505 * read won't conflict with non-overlapted locks due to
1506 * pagereading.
1507 */
1508 if (!CIFS_I(inode)->clientCanCacheAll &&
1509 CIFS_I(inode)->clientCanCacheRead) {
1510 cifs_invalidate_mapping(inode);
1511 cFYI(1, "Set no oplock for inode=%p due to mand locks",
1512 inode);
1513 CIFS_I(inode)->clientCanCacheRead = false;
1514 }
1515
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001516 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1517 type, 1, 0, wait_flag);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001518 if (rc) {
1519 kfree(lock);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001520 return rc;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001521 }
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001522
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001523 cifs_lock_add(cfile, lock);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001524 } else if (unlock)
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001525 rc = server->ops->mand_unlock_range(cfile, flock, xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001526
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001527out:
1528 if (flock->fl_flags & FL_POSIX)
Steve French9ebb3892012-04-01 13:52:54 -05001529 posix_lock_file_wait(file, flock);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001530 return rc;
1531}
1532
1533int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1534{
1535 int rc, xid;
1536 int lock = 0, unlock = 0;
1537 bool wait_flag = false;
1538 bool posix_lck = false;
1539 struct cifs_sb_info *cifs_sb;
1540 struct cifs_tcon *tcon;
1541 struct cifsInodeInfo *cinode;
1542 struct cifsFileInfo *cfile;
1543 __u16 netfid;
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001544 __u32 type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001545
1546 rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001547 xid = get_xid();
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001548
1549 cFYI(1, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld "
1550 "end: %lld", cmd, flock->fl_flags, flock->fl_type,
1551 flock->fl_start, flock->fl_end);
1552
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001553 cfile = (struct cifsFileInfo *)file->private_data;
1554 tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001555
1556 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
1557 tcon->ses->server);
1558
1559 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001560 netfid = cfile->fid.netfid;
Al Viro496ad9a2013-01-23 17:07:38 -05001561 cinode = CIFS_I(file_inode(file));
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001562
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001563 if (cap_unix(tcon->ses) &&
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001564 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1565 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1566 posix_lck = true;
1567 /*
1568 * BB add code here to normalize offset and length to account for
1569 * negative length which we can not accept over the wire.
1570 */
1571 if (IS_GETLK(cmd)) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001572 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001573 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001574 return rc;
1575 }
1576
1577 if (!lock && !unlock) {
1578 /*
1579 * if no lock or unlock then nothing to do since we do not
1580 * know what it is
1581 */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001582 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001583 return -EOPNOTSUPP;
1584 }
1585
1586 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1587 xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001588 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001589 return rc;
1590}
1591
Jeff Layton597b0272012-03-23 14:40:56 -04001592/*
1593 * update the file size (if needed) after a write. Should be called with
1594 * the inode->i_lock held
1595 */
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05001596void
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001597cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1598 unsigned int bytes_written)
1599{
1600 loff_t end_of_write = offset + bytes_written;
1601
1602 if (end_of_write > cifsi->server_eof)
1603 cifsi->server_eof = end_of_write;
1604}
1605
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001606static ssize_t
1607cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
1608 size_t write_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001609{
1610 int rc = 0;
1611 unsigned int bytes_written = 0;
1612 unsigned int total_written;
1613 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001614 struct cifs_tcon *tcon;
1615 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001616 unsigned int xid;
Jeff Layton7da4b492010-10-15 15:34:00 -04001617 struct dentry *dentry = open_file->dentry;
1618 struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001619 struct cifs_io_parms io_parms;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001620
Jeff Layton7da4b492010-10-15 15:34:00 -04001621 cifs_sb = CIFS_SB(dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001622
Joe Perchesb6b38f72010-04-21 03:50:45 +00001623 cFYI(1, "write %zd bytes to offset %lld of %s", write_size,
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001624 *offset, dentry->d_name.name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001625
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001626 tcon = tlink_tcon(open_file->tlink);
1627 server = tcon->ses->server;
1628
1629 if (!server->ops->sync_write)
1630 return -ENOSYS;
Steve French50c2f752007-07-13 00:33:32 +00001631
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001632 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001633
Linus Torvalds1da177e2005-04-16 15:20:36 -07001634 for (total_written = 0; write_size > total_written;
1635 total_written += bytes_written) {
1636 rc = -EAGAIN;
1637 while (rc == -EAGAIN) {
Jeff Laytonca83ce32011-04-12 09:13:44 -04001638 struct kvec iov[2];
1639 unsigned int len;
1640
Linus Torvalds1da177e2005-04-16 15:20:36 -07001641 if (open_file->invalidHandle) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001642 /* we could deadlock if we called
1643 filemap_fdatawait from here so tell
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001644 reopen_file not to flush data to
Linus Torvalds1da177e2005-04-16 15:20:36 -07001645 server now */
Jeff Layton15886172010-10-15 15:33:59 -04001646 rc = cifs_reopen_file(open_file, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001647 if (rc != 0)
1648 break;
1649 }
Steve French3e844692005-10-03 13:37:24 -07001650
Jeff Laytonca83ce32011-04-12 09:13:44 -04001651 len = min((size_t)cifs_sb->wsize,
1652 write_size - total_written);
1653 /* iov[0] is reserved for smb header */
1654 iov[1].iov_base = (char *)write_data + total_written;
1655 iov[1].iov_len = len;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001656 io_parms.pid = pid;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001657 io_parms.tcon = tcon;
1658 io_parms.offset = *offset;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001659 io_parms.length = len;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001660 rc = server->ops->sync_write(xid, open_file, &io_parms,
1661 &bytes_written, iov, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001662 }
1663 if (rc || (bytes_written == 0)) {
1664 if (total_written)
1665 break;
1666 else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001667 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001668 return rc;
1669 }
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001670 } else {
Jeff Layton597b0272012-03-23 14:40:56 -04001671 spin_lock(&dentry->d_inode->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001672 cifs_update_eof(cifsi, *offset, bytes_written);
Jeff Layton597b0272012-03-23 14:40:56 -04001673 spin_unlock(&dentry->d_inode->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001674 *offset += bytes_written;
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001675 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001676 }
1677
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001678 cifs_stats_bytes_written(tcon, total_written);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001679
Jeff Layton7da4b492010-10-15 15:34:00 -04001680 if (total_written > 0) {
1681 spin_lock(&dentry->d_inode->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001682 if (*offset > dentry->d_inode->i_size)
1683 i_size_write(dentry->d_inode, *offset);
Jeff Layton7da4b492010-10-15 15:34:00 -04001684 spin_unlock(&dentry->d_inode->i_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001685 }
Jeff Layton7da4b492010-10-15 15:34:00 -04001686 mark_inode_dirty_sync(dentry->d_inode);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001687 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001688 return total_written;
1689}
1690
Jeff Layton6508d902010-09-29 19:51:11 -04001691struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1692 bool fsuid_only)
Steve French630f3f0c2007-10-25 21:17:17 +00001693{
1694 struct cifsFileInfo *open_file = NULL;
Jeff Layton6508d902010-09-29 19:51:11 -04001695 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1696
1697 /* only filter by fsuid on multiuser mounts */
1698 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1699 fsuid_only = false;
Steve French630f3f0c2007-10-25 21:17:17 +00001700
Jeff Layton44772882010-10-15 15:34:03 -04001701 spin_lock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001702 /* we could simply get the first_list_entry since write-only entries
1703 are always at the end of the list but since the first entry might
1704 have a close pending, we go through the whole list */
1705 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Eric W. Biedermanfef59fd2013-02-06 02:23:02 -08001706 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
Jeff Layton6508d902010-09-29 19:51:11 -04001707 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001708 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
Steve French630f3f0c2007-10-25 21:17:17 +00001709 if (!open_file->invalidHandle) {
1710 /* found a good file */
1711 /* lock it so it will not be closed on us */
Jeff Layton764a1b12012-07-25 14:59:54 -04001712 cifsFileInfo_get_locked(open_file);
Jeff Layton44772882010-10-15 15:34:03 -04001713 spin_unlock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001714 return open_file;
1715 } /* else might as well continue, and look for
1716 another, or simply have the caller reopen it
1717 again rather than trying to fix this handle */
1718 } else /* write only file */
1719 break; /* write only files are last so must be done */
1720 }
Jeff Layton44772882010-10-15 15:34:03 -04001721 spin_unlock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001722 return NULL;
1723}
Steve French630f3f0c2007-10-25 21:17:17 +00001724
Jeff Layton6508d902010-09-29 19:51:11 -04001725struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1726 bool fsuid_only)
Steve French6148a742005-10-05 12:23:19 -07001727{
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001728 struct cifsFileInfo *open_file, *inv_file = NULL;
Jeff Laytond3892292010-11-02 16:22:50 -04001729 struct cifs_sb_info *cifs_sb;
Jeff Layton2846d382008-09-22 21:33:33 -04001730 bool any_available = false;
Steve Frenchdd99cd82005-10-05 19:32:49 -07001731 int rc;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001732 unsigned int refind = 0;
Steve French6148a742005-10-05 12:23:19 -07001733
Steve French60808232006-04-22 15:53:05 +00001734 /* Having a null inode here (because mapping->host was set to zero by
1735 the VFS or MM) should not happen but we had reports of on oops (due to
1736 it being zero) during stress testcases so we need to check for it */
1737
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001738 if (cifs_inode == NULL) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001739 cERROR(1, "Null inode passed to cifs_writeable_file");
Steve French60808232006-04-22 15:53:05 +00001740 dump_stack();
1741 return NULL;
1742 }
1743
Jeff Laytond3892292010-11-02 16:22:50 -04001744 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1745
Jeff Layton6508d902010-09-29 19:51:11 -04001746 /* only filter by fsuid on multiuser mounts */
1747 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1748 fsuid_only = false;
1749
Jeff Layton44772882010-10-15 15:34:03 -04001750 spin_lock(&cifs_file_list_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001751refind_writable:
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001752 if (refind > MAX_REOPEN_ATT) {
1753 spin_unlock(&cifs_file_list_lock);
1754 return NULL;
1755 }
Steve French6148a742005-10-05 12:23:19 -07001756 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton6508d902010-09-29 19:51:11 -04001757 if (!any_available && open_file->pid != current->tgid)
1758 continue;
Eric W. Biedermanfef59fd2013-02-06 02:23:02 -08001759 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
Jeff Layton6508d902010-09-29 19:51:11 -04001760 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001761 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Steve French9b22b0b2007-10-02 01:11:08 +00001762 if (!open_file->invalidHandle) {
1763 /* found a good writable file */
Jeff Layton764a1b12012-07-25 14:59:54 -04001764 cifsFileInfo_get_locked(open_file);
Jeff Layton44772882010-10-15 15:34:03 -04001765 spin_unlock(&cifs_file_list_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001766 return open_file;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001767 } else {
1768 if (!inv_file)
1769 inv_file = open_file;
Steve French9b22b0b2007-10-02 01:11:08 +00001770 }
Steve French6148a742005-10-05 12:23:19 -07001771 }
1772 }
Jeff Layton2846d382008-09-22 21:33:33 -04001773 /* couldn't find useable FH with same pid, try any available */
1774 if (!any_available) {
1775 any_available = true;
1776 goto refind_writable;
1777 }
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001778
1779 if (inv_file) {
1780 any_available = false;
Jeff Layton764a1b12012-07-25 14:59:54 -04001781 cifsFileInfo_get_locked(inv_file);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001782 }
1783
Jeff Layton44772882010-10-15 15:34:03 -04001784 spin_unlock(&cifs_file_list_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001785
1786 if (inv_file) {
1787 rc = cifs_reopen_file(inv_file, false);
1788 if (!rc)
1789 return inv_file;
1790 else {
1791 spin_lock(&cifs_file_list_lock);
1792 list_move_tail(&inv_file->flist,
1793 &cifs_inode->openFileList);
1794 spin_unlock(&cifs_file_list_lock);
1795 cifsFileInfo_put(inv_file);
1796 spin_lock(&cifs_file_list_lock);
1797 ++refind;
1798 goto refind_writable;
1799 }
1800 }
1801
Steve French6148a742005-10-05 12:23:19 -07001802 return NULL;
1803}
1804
Linus Torvalds1da177e2005-04-16 15:20:36 -07001805static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1806{
1807 struct address_space *mapping = page->mapping;
1808 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1809 char *write_data;
1810 int rc = -EFAULT;
1811 int bytes_written = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001812 struct inode *inode;
Steve French6148a742005-10-05 12:23:19 -07001813 struct cifsFileInfo *open_file;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001814
1815 if (!mapping || !mapping->host)
1816 return -EFAULT;
1817
1818 inode = page->mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001819
1820 offset += (loff_t)from;
1821 write_data = kmap(page);
1822 write_data += from;
1823
1824 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1825 kunmap(page);
1826 return -EIO;
1827 }
1828
1829 /* racing with truncate? */
1830 if (offset > mapping->host->i_size) {
1831 kunmap(page);
1832 return 0; /* don't care */
1833 }
1834
1835 /* check to make sure that we are not extending the file */
1836 if (mapping->host->i_size - offset < (loff_t)to)
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001837 to = (unsigned)(mapping->host->i_size - offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001838
Jeff Layton6508d902010-09-29 19:51:11 -04001839 open_file = find_writable_file(CIFS_I(mapping->host), false);
Steve French6148a742005-10-05 12:23:19 -07001840 if (open_file) {
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001841 bytes_written = cifs_write(open_file, open_file->pid,
1842 write_data, to - from, &offset);
Dave Kleikamp6ab409b2009-08-31 11:07:12 -04001843 cifsFileInfo_put(open_file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001844 /* Does mm or vfs already set times? */
Steve French6148a742005-10-05 12:23:19 -07001845 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001846 if ((bytes_written > 0) && (offset))
Steve French6148a742005-10-05 12:23:19 -07001847 rc = 0;
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001848 else if (bytes_written < 0)
1849 rc = bytes_written;
Steve French6148a742005-10-05 12:23:19 -07001850 } else {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001851 cFYI(1, "No writeable filehandles for inode");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001852 rc = -EIO;
1853 }
1854
1855 kunmap(page);
1856 return rc;
1857}
1858
Linus Torvalds1da177e2005-04-16 15:20:36 -07001859static int cifs_writepages(struct address_space *mapping,
Steve French37c0eb42005-10-05 14:50:29 -07001860 struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001861{
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001862 struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
1863 bool done = false, scanned = false, range_whole = false;
1864 pgoff_t end, index;
1865 struct cifs_writedata *wdata;
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07001866 struct TCP_Server_Info *server;
Steve French37c0eb42005-10-05 14:50:29 -07001867 struct page *page;
Steve French37c0eb42005-10-05 14:50:29 -07001868 int rc = 0;
Steve French50c2f752007-07-13 00:33:32 +00001869
Steve French37c0eb42005-10-05 14:50:29 -07001870 /*
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001871 * If wsize is smaller than the page cache size, default to writing
Steve French37c0eb42005-10-05 14:50:29 -07001872 * one page at a time via cifs_writepage
1873 */
1874 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1875 return generic_writepages(mapping, wbc);
1876
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001877 if (wbc->range_cyclic) {
Steve French37c0eb42005-10-05 14:50:29 -07001878 index = mapping->writeback_index; /* Start from prev offset */
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001879 end = -1;
1880 } else {
1881 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1882 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1883 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001884 range_whole = true;
1885 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07001886 }
1887retry:
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001888 while (!done && index <= end) {
1889 unsigned int i, nr_pages, found_pages;
1890 pgoff_t next = 0, tofind;
1891 struct page **pages;
Steve French37c0eb42005-10-05 14:50:29 -07001892
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001893 tofind = min((cifs_sb->wsize / PAGE_CACHE_SIZE) - 1,
1894 end - index) + 1;
Steve French37c0eb42005-10-05 14:50:29 -07001895
Jeff Laytonc2e87642012-03-23 14:40:55 -04001896 wdata = cifs_writedata_alloc((unsigned int)tofind,
1897 cifs_writev_complete);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001898 if (!wdata) {
1899 rc = -ENOMEM;
1900 break;
1901 }
1902
1903 /*
1904 * find_get_pages_tag seems to return a max of 256 on each
1905 * iteration, so we must call it several times in order to
1906 * fill the array or the wsize is effectively limited to
1907 * 256 * PAGE_CACHE_SIZE.
1908 */
1909 found_pages = 0;
1910 pages = wdata->pages;
1911 do {
1912 nr_pages = find_get_pages_tag(mapping, &index,
1913 PAGECACHE_TAG_DIRTY,
1914 tofind, pages);
1915 found_pages += nr_pages;
1916 tofind -= nr_pages;
1917 pages += nr_pages;
1918 } while (nr_pages && tofind && index <= end);
1919
1920 if (found_pages == 0) {
1921 kref_put(&wdata->refcount, cifs_writedata_release);
1922 break;
1923 }
1924
1925 nr_pages = 0;
1926 for (i = 0; i < found_pages; i++) {
1927 page = wdata->pages[i];
Steve French37c0eb42005-10-05 14:50:29 -07001928 /*
1929 * At this point we hold neither mapping->tree_lock nor
1930 * lock on the page itself: the page may be truncated or
1931 * invalidated (changing page->mapping to NULL), or even
1932 * swizzled back from swapper_space to tmpfs file
1933 * mapping
1934 */
1935
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001936 if (nr_pages == 0)
Steve French37c0eb42005-10-05 14:50:29 -07001937 lock_page(page);
Nick Piggin529ae9a2008-08-02 12:01:03 +02001938 else if (!trylock_page(page))
Steve French37c0eb42005-10-05 14:50:29 -07001939 break;
1940
1941 if (unlikely(page->mapping != mapping)) {
1942 unlock_page(page);
1943 break;
1944 }
1945
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001946 if (!wbc->range_cyclic && page->index > end) {
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001947 done = true;
Steve French37c0eb42005-10-05 14:50:29 -07001948 unlock_page(page);
1949 break;
1950 }
1951
1952 if (next && (page->index != next)) {
1953 /* Not next consecutive page */
1954 unlock_page(page);
1955 break;
1956 }
1957
1958 if (wbc->sync_mode != WB_SYNC_NONE)
1959 wait_on_page_writeback(page);
1960
1961 if (PageWriteback(page) ||
Linus Torvaldscb876f42006-12-23 16:19:07 -08001962 !clear_page_dirty_for_io(page)) {
Steve French37c0eb42005-10-05 14:50:29 -07001963 unlock_page(page);
1964 break;
1965 }
Steve French84d2f072005-10-12 15:32:05 -07001966
Linus Torvaldscb876f42006-12-23 16:19:07 -08001967 /*
1968 * This actually clears the dirty bit in the radix tree.
1969 * See cifs_writepage() for more commentary.
1970 */
1971 set_page_writeback(page);
1972
Jeff Layton3a98b862012-11-26 09:48:41 -05001973 if (page_offset(page) >= i_size_read(mapping->host)) {
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001974 done = true;
Steve French84d2f072005-10-12 15:32:05 -07001975 unlock_page(page);
Linus Torvaldscb876f42006-12-23 16:19:07 -08001976 end_page_writeback(page);
Steve French84d2f072005-10-12 15:32:05 -07001977 break;
1978 }
1979
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001980 wdata->pages[i] = page;
Steve French37c0eb42005-10-05 14:50:29 -07001981 next = page->index + 1;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001982 ++nr_pages;
Steve French37c0eb42005-10-05 14:50:29 -07001983 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001984
1985 /* reset index to refind any pages skipped */
1986 if (nr_pages == 0)
1987 index = wdata->pages[0]->index + 1;
1988
1989 /* put any pages we aren't going to use */
1990 for (i = nr_pages; i < found_pages; i++) {
1991 page_cache_release(wdata->pages[i]);
1992 wdata->pages[i] = NULL;
1993 }
1994
1995 /* nothing to write? */
1996 if (nr_pages == 0) {
1997 kref_put(&wdata->refcount, cifs_writedata_release);
1998 continue;
1999 }
2000
2001 wdata->sync_mode = wbc->sync_mode;
2002 wdata->nr_pages = nr_pages;
2003 wdata->offset = page_offset(wdata->pages[0]);
Jeff Laytoneddb0792012-09-18 16:20:35 -07002004 wdata->pagesz = PAGE_CACHE_SIZE;
2005 wdata->tailsz =
Jeff Layton3a98b862012-11-26 09:48:41 -05002006 min(i_size_read(mapping->host) -
2007 page_offset(wdata->pages[nr_pages - 1]),
Jeff Laytoneddb0792012-09-18 16:20:35 -07002008 (loff_t)PAGE_CACHE_SIZE);
2009 wdata->bytes = ((nr_pages - 1) * PAGE_CACHE_SIZE) +
2010 wdata->tailsz;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002011
2012 do {
2013 if (wdata->cfile != NULL)
2014 cifsFileInfo_put(wdata->cfile);
2015 wdata->cfile = find_writable_file(CIFS_I(mapping->host),
2016 false);
2017 if (!wdata->cfile) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00002018 cERROR(1, "No writable handles for inode");
Steve French23e7dd72005-10-20 13:44:56 -07002019 rc = -EBADF;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002020 break;
Steve French37c0eb42005-10-05 14:50:29 -07002021 }
Jeff Laytonfe5f5d22012-03-23 14:40:55 -04002022 wdata->pid = wdata->cfile->pid;
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002023 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
2024 rc = server->ops->async_writev(wdata);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002025 } while (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN);
Jeff Laytonf3983c22010-09-22 16:17:40 -07002026
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002027 for (i = 0; i < nr_pages; ++i)
2028 unlock_page(wdata->pages[i]);
Jeff Layton941b8532011-01-11 07:24:01 -05002029
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002030 /* send failure -- clean up the mess */
2031 if (rc != 0) {
2032 for (i = 0; i < nr_pages; ++i) {
Jeff Layton941b8532011-01-11 07:24:01 -05002033 if (rc == -EAGAIN)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002034 redirty_page_for_writepage(wbc,
2035 wdata->pages[i]);
2036 else
2037 SetPageError(wdata->pages[i]);
2038 end_page_writeback(wdata->pages[i]);
2039 page_cache_release(wdata->pages[i]);
Steve French37c0eb42005-10-05 14:50:29 -07002040 }
Jeff Layton941b8532011-01-11 07:24:01 -05002041 if (rc != -EAGAIN)
2042 mapping_set_error(mapping, rc);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002043 }
2044 kref_put(&wdata->refcount, cifs_writedata_release);
Jeff Layton941b8532011-01-11 07:24:01 -05002045
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002046 wbc->nr_to_write -= nr_pages;
2047 if (wbc->nr_to_write <= 0)
2048 done = true;
Dave Kleikampb066a482008-11-18 03:49:05 +00002049
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002050 index = next;
Steve French37c0eb42005-10-05 14:50:29 -07002051 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002052
Steve French37c0eb42005-10-05 14:50:29 -07002053 if (!scanned && !done) {
2054 /*
2055 * We hit the last page and there is more work to be done: wrap
2056 * back to the start of the file
2057 */
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002058 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07002059 index = 0;
2060 goto retry;
2061 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002062
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002063 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
Steve French37c0eb42005-10-05 14:50:29 -07002064 mapping->writeback_index = index;
2065
Linus Torvalds1da177e2005-04-16 15:20:36 -07002066 return rc;
2067}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002068
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002069static int
2070cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002071{
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002072 int rc;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002073 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002074
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002075 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002076/* BB add check for wbc flags */
2077 page_cache_get(page);
Steve Frenchad7a2922008-02-07 23:25:02 +00002078 if (!PageUptodate(page))
Joe Perchesb6b38f72010-04-21 03:50:45 +00002079 cFYI(1, "ppw - page not up to date");
Linus Torvaldscb876f42006-12-23 16:19:07 -08002080
2081 /*
2082 * Set the "writeback" flag, and clear "dirty" in the radix tree.
2083 *
2084 * A writepage() implementation always needs to do either this,
2085 * or re-dirty the page with "redirty_page_for_writepage()" in
2086 * the case of a failure.
2087 *
2088 * Just unlocking the page will cause the radix tree tag-bits
2089 * to fail to update with the state of the page correctly.
2090 */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002091 set_page_writeback(page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002092retry_write:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002093 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002094 if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
2095 goto retry_write;
2096 else if (rc == -EAGAIN)
2097 redirty_page_for_writepage(wbc, page);
2098 else if (rc != 0)
2099 SetPageError(page);
2100 else
2101 SetPageUptodate(page);
Linus Torvaldscb876f42006-12-23 16:19:07 -08002102 end_page_writeback(page);
2103 page_cache_release(page);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002104 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002105 return rc;
2106}
2107
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002108static int cifs_writepage(struct page *page, struct writeback_control *wbc)
2109{
2110 int rc = cifs_writepage_locked(page, wbc);
2111 unlock_page(page);
2112 return rc;
2113}
2114
Nick Piggind9414772008-09-24 11:32:59 -04002115static int cifs_write_end(struct file *file, struct address_space *mapping,
2116 loff_t pos, unsigned len, unsigned copied,
2117 struct page *page, void *fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002118{
Nick Piggind9414772008-09-24 11:32:59 -04002119 int rc;
2120 struct inode *inode = mapping->host;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002121 struct cifsFileInfo *cfile = file->private_data;
2122 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
2123 __u32 pid;
2124
2125 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2126 pid = cfile->pid;
2127 else
2128 pid = current->tgid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002129
Joe Perchesb6b38f72010-04-21 03:50:45 +00002130 cFYI(1, "write_end for page %p from pos %lld with %d bytes",
2131 page, pos, copied);
Steve Frenchad7a2922008-02-07 23:25:02 +00002132
Jeff Laytona98ee8c2008-11-26 19:32:33 +00002133 if (PageChecked(page)) {
2134 if (copied == len)
2135 SetPageUptodate(page);
2136 ClearPageChecked(page);
2137 } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
Nick Piggind9414772008-09-24 11:32:59 -04002138 SetPageUptodate(page);
2139
Linus Torvalds1da177e2005-04-16 15:20:36 -07002140 if (!PageUptodate(page)) {
Nick Piggind9414772008-09-24 11:32:59 -04002141 char *page_data;
2142 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002143 unsigned int xid;
Nick Piggind9414772008-09-24 11:32:59 -04002144
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002145 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002146 /* this is probably better than directly calling
2147 partialpage_write since in this function the file handle is
2148 known which we might as well leverage */
2149 /* BB check if anything else missing out of ppw
2150 such as updating last write time */
2151 page_data = kmap(page);
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002152 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
Nick Piggind9414772008-09-24 11:32:59 -04002153 /* if (rc < 0) should we set writebehind rc? */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002154 kunmap(page);
Nick Piggind9414772008-09-24 11:32:59 -04002155
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002156 free_xid(xid);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002157 } else {
Nick Piggind9414772008-09-24 11:32:59 -04002158 rc = copied;
2159 pos += copied;
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002160 set_page_dirty(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002161 }
2162
Nick Piggind9414772008-09-24 11:32:59 -04002163 if (rc > 0) {
2164 spin_lock(&inode->i_lock);
2165 if (pos > inode->i_size)
2166 i_size_write(inode, pos);
2167 spin_unlock(&inode->i_lock);
2168 }
2169
2170 unlock_page(page);
2171 page_cache_release(page);
2172
Linus Torvalds1da177e2005-04-16 15:20:36 -07002173 return rc;
2174}
2175
Josef Bacik02c24a82011-07-16 20:44:56 -04002176int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2177 int datasync)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002178{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002179 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002180 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002181 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002182 struct TCP_Server_Info *server;
Joe Perchesc21dfb62010-07-12 13:50:14 -07002183 struct cifsFileInfo *smbfile = file->private_data;
Al Viro496ad9a2013-01-23 17:07:38 -05002184 struct inode *inode = file_inode(file);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002185 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002186
Josef Bacik02c24a82011-07-16 20:44:56 -04002187 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2188 if (rc)
2189 return rc;
2190 mutex_lock(&inode->i_mutex);
2191
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002192 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002193
Joe Perchesb6b38f72010-04-21 03:50:45 +00002194 cFYI(1, "Sync file - name: %s datasync: 0x%x",
Christoph Hellwig7ea80852010-05-26 17:53:25 +02002195 file->f_path.dentry->d_name.name, datasync);
Steve French50c2f752007-07-13 00:33:32 +00002196
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002197 if (!CIFS_I(inode)->clientCanCacheRead) {
2198 rc = cifs_invalidate_mapping(inode);
2199 if (rc) {
2200 cFYI(1, "rc: %d during invalidate phase", rc);
2201 rc = 0; /* don't care about it in fsync */
2202 }
2203 }
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002204
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002205 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002206 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2207 server = tcon->ses->server;
2208 if (server->ops->flush)
2209 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2210 else
2211 rc = -ENOSYS;
2212 }
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002213
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002214 free_xid(xid);
Josef Bacik02c24a82011-07-16 20:44:56 -04002215 mutex_unlock(&inode->i_mutex);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002216 return rc;
2217}
2218
Josef Bacik02c24a82011-07-16 20:44:56 -04002219int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002220{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002221 unsigned int xid;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002222 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002223 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002224 struct TCP_Server_Info *server;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002225 struct cifsFileInfo *smbfile = file->private_data;
2226 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Josef Bacik02c24a82011-07-16 20:44:56 -04002227 struct inode *inode = file->f_mapping->host;
2228
2229 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2230 if (rc)
2231 return rc;
2232 mutex_lock(&inode->i_mutex);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002233
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002234 xid = get_xid();
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002235
2236 cFYI(1, "Sync file - name: %s datasync: 0x%x",
2237 file->f_path.dentry->d_name.name, datasync);
2238
2239 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002240 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2241 server = tcon->ses->server;
2242 if (server->ops->flush)
2243 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2244 else
2245 rc = -ENOSYS;
2246 }
Steve Frenchb298f222009-02-21 21:17:43 +00002247
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002248 free_xid(xid);
Josef Bacik02c24a82011-07-16 20:44:56 -04002249 mutex_unlock(&inode->i_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002250 return rc;
2251}
2252
Linus Torvalds1da177e2005-04-16 15:20:36 -07002253/*
2254 * As file closes, flush all cached write data for this inode checking
2255 * for write behind errors.
2256 */
Miklos Szeredi75e1fcc2006-06-23 02:05:12 -07002257int cifs_flush(struct file *file, fl_owner_t id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002258{
Al Viro496ad9a2013-01-23 17:07:38 -05002259 struct inode *inode = file_inode(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002260 int rc = 0;
2261
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002262 if (file->f_mode & FMODE_WRITE)
Jeff Laytond3f13222010-10-15 15:34:07 -04002263 rc = filemap_write_and_wait(inode->i_mapping);
Steve French50c2f752007-07-13 00:33:32 +00002264
Joe Perchesb6b38f72010-04-21 03:50:45 +00002265 cFYI(1, "Flush inode %p file %p rc %d", inode, file, rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002266
2267 return rc;
2268}
2269
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002270static int
2271cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2272{
2273 int rc = 0;
2274 unsigned long i;
2275
2276 for (i = 0; i < num_pages; i++) {
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002277 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002278 if (!pages[i]) {
2279 /*
2280 * save number of pages we have already allocated and
2281 * return with ENOMEM error
2282 */
2283 num_pages = i;
2284 rc = -ENOMEM;
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002285 break;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002286 }
2287 }
2288
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002289 if (rc) {
2290 for (i = 0; i < num_pages; i++)
2291 put_page(pages[i]);
2292 }
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002293 return rc;
2294}
2295
2296static inline
2297size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2298{
2299 size_t num_pages;
2300 size_t clen;
2301
2302 clen = min_t(const size_t, len, wsize);
Jeff Laytona7103b92012-03-23 14:40:56 -04002303 num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002304
2305 if (cur_len)
2306 *cur_len = clen;
2307
2308 return num_pages;
2309}
2310
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002311static void
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002312cifs_uncached_writev_complete(struct work_struct *work)
2313{
2314 int i;
2315 struct cifs_writedata *wdata = container_of(work,
2316 struct cifs_writedata, work);
2317 struct inode *inode = wdata->cfile->dentry->d_inode;
2318 struct cifsInodeInfo *cifsi = CIFS_I(inode);
2319
2320 spin_lock(&inode->i_lock);
2321 cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
2322 if (cifsi->server_eof > inode->i_size)
2323 i_size_write(inode, cifsi->server_eof);
2324 spin_unlock(&inode->i_lock);
2325
2326 complete(&wdata->done);
2327
2328 if (wdata->result != -EAGAIN) {
2329 for (i = 0; i < wdata->nr_pages; i++)
2330 put_page(wdata->pages[i]);
2331 }
2332
2333 kref_put(&wdata->refcount, cifs_writedata_release);
2334}
2335
2336/* attempt to send write to server, retry on any -EAGAIN errors */
2337static int
2338cifs_uncached_retry_writev(struct cifs_writedata *wdata)
2339{
2340 int rc;
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002341 struct TCP_Server_Info *server;
2342
2343 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002344
2345 do {
2346 if (wdata->cfile->invalidHandle) {
2347 rc = cifs_reopen_file(wdata->cfile, false);
2348 if (rc != 0)
2349 continue;
2350 }
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002351 rc = server->ops->async_writev(wdata);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002352 } while (rc == -EAGAIN);
2353
2354 return rc;
2355}
2356
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002357static ssize_t
2358cifs_iovec_write(struct file *file, const struct iovec *iov,
2359 unsigned long nr_segs, loff_t *poffset)
2360{
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002361 unsigned long nr_pages, i;
Pavel Shilovsky76429c12011-01-31 16:03:08 +03002362 size_t copied, len, cur_len;
2363 ssize_t total_written = 0;
Jeff Layton3af9d8f2012-04-13 17:16:59 -04002364 loff_t offset;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002365 struct iov_iter it;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002366 struct cifsFileInfo *open_file;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002367 struct cifs_tcon *tcon;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002368 struct cifs_sb_info *cifs_sb;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002369 struct cifs_writedata *wdata, *tmp;
2370 struct list_head wdata_list;
2371 int rc;
2372 pid_t pid;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002373
2374 len = iov_length(iov, nr_segs);
2375 if (!len)
2376 return 0;
2377
2378 rc = generic_write_checks(file, poffset, &len, 0);
2379 if (rc)
2380 return rc;
2381
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002382 INIT_LIST_HEAD(&wdata_list);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002383 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002384 open_file = file->private_data;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002385 tcon = tlink_tcon(open_file->tlink);
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002386
2387 if (!tcon->ses->server->ops->async_writev)
2388 return -ENOSYS;
2389
Jeff Layton3af9d8f2012-04-13 17:16:59 -04002390 offset = *poffset;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002391
2392 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2393 pid = open_file->pid;
2394 else
2395 pid = current->tgid;
2396
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002397 iov_iter_init(&it, iov, nr_segs, len, 0);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002398 do {
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002399 size_t save_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002400
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002401 nr_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
2402 wdata = cifs_writedata_alloc(nr_pages,
2403 cifs_uncached_writev_complete);
2404 if (!wdata) {
2405 rc = -ENOMEM;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002406 break;
2407 }
2408
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002409 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
2410 if (rc) {
2411 kfree(wdata);
2412 break;
2413 }
2414
2415 save_len = cur_len;
2416 for (i = 0; i < nr_pages; i++) {
2417 copied = min_t(const size_t, cur_len, PAGE_SIZE);
2418 copied = iov_iter_copy_from_user(wdata->pages[i], &it,
2419 0, copied);
2420 cur_len -= copied;
2421 iov_iter_advance(&it, copied);
2422 }
2423 cur_len = save_len - cur_len;
2424
2425 wdata->sync_mode = WB_SYNC_ALL;
2426 wdata->nr_pages = nr_pages;
2427 wdata->offset = (__u64)offset;
2428 wdata->cfile = cifsFileInfo_get(open_file);
2429 wdata->pid = pid;
2430 wdata->bytes = cur_len;
Jeff Laytoneddb0792012-09-18 16:20:35 -07002431 wdata->pagesz = PAGE_SIZE;
2432 wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002433 rc = cifs_uncached_retry_writev(wdata);
2434 if (rc) {
2435 kref_put(&wdata->refcount, cifs_writedata_release);
2436 break;
2437 }
2438
2439 list_add_tail(&wdata->list, &wdata_list);
2440 offset += cur_len;
2441 len -= cur_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002442 } while (len > 0);
2443
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002444 /*
2445 * If at least one write was successfully sent, then discard any rc
2446 * value from the later writes. If the other write succeeds, then
2447 * we'll end up returning whatever was written. If it fails, then
2448 * we'll get a new rc value from that.
2449 */
2450 if (!list_empty(&wdata_list))
2451 rc = 0;
2452
2453 /*
2454 * Wait for and collect replies for any successful sends in order of
2455 * increasing offset. Once an error is hit or we get a fatal signal
2456 * while waiting, then return without waiting for any more replies.
2457 */
2458restart_loop:
2459 list_for_each_entry_safe(wdata, tmp, &wdata_list, list) {
2460 if (!rc) {
2461 /* FIXME: freezable too? */
2462 rc = wait_for_completion_killable(&wdata->done);
2463 if (rc)
2464 rc = -EINTR;
2465 else if (wdata->result)
2466 rc = wdata->result;
2467 else
2468 total_written += wdata->bytes;
2469
2470 /* resend call if it's a retryable error */
2471 if (rc == -EAGAIN) {
2472 rc = cifs_uncached_retry_writev(wdata);
2473 goto restart_loop;
2474 }
2475 }
2476 list_del_init(&wdata->list);
2477 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002478 }
2479
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002480 if (total_written > 0)
2481 *poffset += total_written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002482
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002483 cifs_stats_bytes_written(tcon, total_written);
2484 return total_written ? total_written : (ssize_t)rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002485}
2486
Pavel Shilovsky0b81c1c2011-03-10 10:11:05 +03002487ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002488 unsigned long nr_segs, loff_t pos)
2489{
2490 ssize_t written;
2491 struct inode *inode;
2492
Al Viro496ad9a2013-01-23 17:07:38 -05002493 inode = file_inode(iocb->ki_filp);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002494
2495 /*
2496 * BB - optimize the way when signing is disabled. We can drop this
2497 * extra memory-to-memory copying and use iovec buffers for constructing
2498 * write request.
2499 */
2500
2501 written = cifs_iovec_write(iocb->ki_filp, iov, nr_segs, &pos);
2502 if (written > 0) {
2503 CIFS_I(inode)->invalid_mapping = true;
2504 iocb->ki_pos = pos;
2505 }
2506
2507 return written;
2508}
2509
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002510static ssize_t
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002511cifs_writev(struct kiocb *iocb, const struct iovec *iov,
2512 unsigned long nr_segs, loff_t pos)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002513{
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002514 struct file *file = iocb->ki_filp;
2515 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2516 struct inode *inode = file->f_mapping->host;
2517 struct cifsInodeInfo *cinode = CIFS_I(inode);
2518 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
2519 ssize_t rc = -EACCES;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002520
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002521 BUG_ON(iocb->ki_pos != pos);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002522
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002523 sb_start_write(inode->i_sb);
2524
2525 /*
2526 * We need to hold the sem to be sure nobody modifies lock list
2527 * with a brlock that prevents writing.
2528 */
2529 down_read(&cinode->lock_sem);
2530 if (!cifs_find_lock_conflict(cfile, pos, iov_length(iov, nr_segs),
2531 server->vals->exclusive_lock_type, NULL,
Pavel Shilovsky081c0412012-11-27 18:38:53 +04002532 CIFS_WRITE_OP)) {
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002533 mutex_lock(&inode->i_mutex);
2534 rc = __generic_file_aio_write(iocb, iov, nr_segs,
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002535 &iocb->ki_pos);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002536 mutex_unlock(&inode->i_mutex);
2537 }
2538
2539 if (rc > 0 || rc == -EIOCBQUEUED) {
2540 ssize_t err;
2541
2542 err = generic_write_sync(file, pos, rc);
2543 if (err < 0 && rc > 0)
2544 rc = err;
2545 }
2546
2547 up_read(&cinode->lock_sem);
2548 sb_end_write(inode->i_sb);
2549 return rc;
2550}
2551
2552ssize_t
2553cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
2554 unsigned long nr_segs, loff_t pos)
2555{
Al Viro496ad9a2013-01-23 17:07:38 -05002556 struct inode *inode = file_inode(iocb->ki_filp);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002557 struct cifsInodeInfo *cinode = CIFS_I(inode);
2558 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2559 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2560 iocb->ki_filp->private_data;
2561 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002562 ssize_t written;
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002563
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002564 if (cinode->clientCanCacheAll) {
2565 if (cap_unix(tcon->ses) &&
2566 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))
2567 && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2568 return generic_file_aio_write(iocb, iov, nr_segs, pos);
2569 return cifs_writev(iocb, iov, nr_segs, pos);
Pavel Shilovskyc299dd02012-12-06 22:07:52 +04002570 }
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002571 /*
2572 * For non-oplocked files in strict cache mode we need to write the data
2573 * to the server exactly from the pos to pos+len-1 rather than flush all
2574 * affected pages because it may cause a error with mandatory locks on
2575 * these pages but not on the region from pos to ppos+len-1.
2576 */
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002577 written = cifs_user_writev(iocb, iov, nr_segs, pos);
2578 if (written > 0 && cinode->clientCanCacheRead) {
2579 /*
2580 * Windows 7 server can delay breaking level2 oplock if a write
2581 * request comes - break it on the client to prevent reading
2582 * an old data.
2583 */
2584 cifs_invalidate_mapping(inode);
2585 cFYI(1, "Set no oplock for inode=%p after a write operation",
2586 inode);
2587 cinode->clientCanCacheRead = false;
2588 }
2589 return written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002590}
2591
Jeff Layton0471ca32012-05-16 07:13:16 -04002592static struct cifs_readdata *
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002593cifs_readdata_alloc(unsigned int nr_pages, work_func_t complete)
Jeff Layton0471ca32012-05-16 07:13:16 -04002594{
2595 struct cifs_readdata *rdata;
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002596
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002597 rdata = kzalloc(sizeof(*rdata) + (sizeof(struct page *) * nr_pages),
2598 GFP_KERNEL);
Jeff Layton0471ca32012-05-16 07:13:16 -04002599 if (rdata != NULL) {
Jeff Layton6993f742012-05-16 07:13:17 -04002600 kref_init(&rdata->refcount);
Jeff Layton1c892542012-05-16 07:13:17 -04002601 INIT_LIST_HEAD(&rdata->list);
2602 init_completion(&rdata->done);
Jeff Layton0471ca32012-05-16 07:13:16 -04002603 INIT_WORK(&rdata->work, complete);
Jeff Layton0471ca32012-05-16 07:13:16 -04002604 }
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002605
Jeff Layton0471ca32012-05-16 07:13:16 -04002606 return rdata;
2607}
2608
Jeff Layton6993f742012-05-16 07:13:17 -04002609void
2610cifs_readdata_release(struct kref *refcount)
Jeff Layton0471ca32012-05-16 07:13:16 -04002611{
Jeff Layton6993f742012-05-16 07:13:17 -04002612 struct cifs_readdata *rdata = container_of(refcount,
2613 struct cifs_readdata, refcount);
2614
2615 if (rdata->cfile)
2616 cifsFileInfo_put(rdata->cfile);
2617
Jeff Layton0471ca32012-05-16 07:13:16 -04002618 kfree(rdata);
2619}
2620
Jeff Layton2a1bb132012-05-16 07:13:17 -04002621static int
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002622cifs_read_allocate_pages(struct cifs_readdata *rdata, unsigned int nr_pages)
Jeff Layton1c892542012-05-16 07:13:17 -04002623{
2624 int rc = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002625 struct page *page;
Jeff Layton1c892542012-05-16 07:13:17 -04002626 unsigned int i;
2627
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002628 for (i = 0; i < nr_pages; i++) {
Jeff Layton1c892542012-05-16 07:13:17 -04002629 page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
2630 if (!page) {
2631 rc = -ENOMEM;
2632 break;
2633 }
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002634 rdata->pages[i] = page;
Jeff Layton1c892542012-05-16 07:13:17 -04002635 }
2636
2637 if (rc) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002638 for (i = 0; i < nr_pages; i++) {
2639 put_page(rdata->pages[i]);
2640 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04002641 }
2642 }
2643 return rc;
2644}
2645
2646static void
2647cifs_uncached_readdata_release(struct kref *refcount)
2648{
Jeff Layton1c892542012-05-16 07:13:17 -04002649 struct cifs_readdata *rdata = container_of(refcount,
2650 struct cifs_readdata, refcount);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002651 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04002652
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002653 for (i = 0; i < rdata->nr_pages; i++) {
2654 put_page(rdata->pages[i]);
2655 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04002656 }
2657 cifs_readdata_release(refcount);
2658}
2659
2660static int
Jeff Layton2a1bb132012-05-16 07:13:17 -04002661cifs_retry_async_readv(struct cifs_readdata *rdata)
2662{
2663 int rc;
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07002664 struct TCP_Server_Info *server;
2665
2666 server = tlink_tcon(rdata->cfile->tlink)->ses->server;
Jeff Layton2a1bb132012-05-16 07:13:17 -04002667
2668 do {
2669 if (rdata->cfile->invalidHandle) {
2670 rc = cifs_reopen_file(rdata->cfile, true);
2671 if (rc != 0)
2672 continue;
2673 }
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07002674 rc = server->ops->async_readv(rdata);
Jeff Layton2a1bb132012-05-16 07:13:17 -04002675 } while (rc == -EAGAIN);
2676
2677 return rc;
2678}
2679
Jeff Layton1c892542012-05-16 07:13:17 -04002680/**
2681 * cifs_readdata_to_iov - copy data from pages in response to an iovec
2682 * @rdata: the readdata response with list of pages holding data
2683 * @iov: vector in which we should copy the data
2684 * @nr_segs: number of segments in vector
2685 * @offset: offset into file of the first iovec
2686 * @copied: used to return the amount of data copied to the iov
2687 *
2688 * This function copies data from a list of pages in a readdata response into
2689 * an array of iovecs. It will first calculate where the data should go
2690 * based on the info in the readdata and then copy the data into that spot.
2691 */
2692static ssize_t
2693cifs_readdata_to_iov(struct cifs_readdata *rdata, const struct iovec *iov,
2694 unsigned long nr_segs, loff_t offset, ssize_t *copied)
2695{
2696 int rc = 0;
2697 struct iov_iter ii;
2698 size_t pos = rdata->offset - offset;
Jeff Layton1c892542012-05-16 07:13:17 -04002699 ssize_t remaining = rdata->bytes;
2700 unsigned char *pdata;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002701 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04002702
2703 /* set up iov_iter and advance to the correct offset */
2704 iov_iter_init(&ii, iov, nr_segs, iov_length(iov, nr_segs), 0);
2705 iov_iter_advance(&ii, pos);
2706
2707 *copied = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002708 for (i = 0; i < rdata->nr_pages; i++) {
Jeff Layton1c892542012-05-16 07:13:17 -04002709 ssize_t copy;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002710 struct page *page = rdata->pages[i];
Jeff Layton1c892542012-05-16 07:13:17 -04002711
2712 /* copy a whole page or whatever's left */
2713 copy = min_t(ssize_t, remaining, PAGE_SIZE);
2714
2715 /* ...but limit it to whatever space is left in the iov */
2716 copy = min_t(ssize_t, copy, iov_iter_count(&ii));
2717
2718 /* go while there's data to be copied and no errors */
2719 if (copy && !rc) {
2720 pdata = kmap(page);
2721 rc = memcpy_toiovecend(ii.iov, pdata, ii.iov_offset,
2722 (int)copy);
2723 kunmap(page);
2724 if (!rc) {
2725 *copied += copy;
2726 remaining -= copy;
2727 iov_iter_advance(&ii, copy);
2728 }
2729 }
Jeff Layton1c892542012-05-16 07:13:17 -04002730 }
2731
2732 return rc;
2733}
2734
2735static void
2736cifs_uncached_readv_complete(struct work_struct *work)
2737{
2738 struct cifs_readdata *rdata = container_of(work,
2739 struct cifs_readdata, work);
Jeff Layton1c892542012-05-16 07:13:17 -04002740
2741 complete(&rdata->done);
2742 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
2743}
2744
2745static int
Jeff Layton8321fec2012-09-19 06:22:32 -07002746cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
2747 struct cifs_readdata *rdata, unsigned int len)
Jeff Layton1c892542012-05-16 07:13:17 -04002748{
Jeff Layton8321fec2012-09-19 06:22:32 -07002749 int total_read = 0, result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002750 unsigned int i;
2751 unsigned int nr_pages = rdata->nr_pages;
Jeff Layton8321fec2012-09-19 06:22:32 -07002752 struct kvec iov;
Jeff Layton1c892542012-05-16 07:13:17 -04002753
Jeff Layton8321fec2012-09-19 06:22:32 -07002754 rdata->tailsz = PAGE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002755 for (i = 0; i < nr_pages; i++) {
2756 struct page *page = rdata->pages[i];
2757
Jeff Layton8321fec2012-09-19 06:22:32 -07002758 if (len >= PAGE_SIZE) {
Jeff Layton1c892542012-05-16 07:13:17 -04002759 /* enough data to fill the page */
Jeff Layton8321fec2012-09-19 06:22:32 -07002760 iov.iov_base = kmap(page);
2761 iov.iov_len = PAGE_SIZE;
2762 cFYI(1, "%u: iov_base=%p iov_len=%zu",
2763 i, iov.iov_base, iov.iov_len);
2764 len -= PAGE_SIZE;
2765 } else if (len > 0) {
Jeff Layton1c892542012-05-16 07:13:17 -04002766 /* enough for partial page, fill and zero the rest */
Jeff Layton8321fec2012-09-19 06:22:32 -07002767 iov.iov_base = kmap(page);
2768 iov.iov_len = len;
2769 cFYI(1, "%u: iov_base=%p iov_len=%zu",
2770 i, iov.iov_base, iov.iov_len);
2771 memset(iov.iov_base + len, '\0', PAGE_SIZE - len);
2772 rdata->tailsz = len;
2773 len = 0;
Jeff Layton1c892542012-05-16 07:13:17 -04002774 } else {
2775 /* no need to hold page hostage */
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002776 rdata->pages[i] = NULL;
2777 rdata->nr_pages--;
Jeff Layton1c892542012-05-16 07:13:17 -04002778 put_page(page);
Jeff Layton8321fec2012-09-19 06:22:32 -07002779 continue;
Jeff Layton1c892542012-05-16 07:13:17 -04002780 }
Jeff Layton8321fec2012-09-19 06:22:32 -07002781
2782 result = cifs_readv_from_socket(server, &iov, 1, iov.iov_len);
2783 kunmap(page);
2784 if (result < 0)
2785 break;
2786
2787 total_read += result;
Jeff Layton1c892542012-05-16 07:13:17 -04002788 }
2789
Jeff Layton8321fec2012-09-19 06:22:32 -07002790 return total_read > 0 ? total_read : result;
Jeff Layton1c892542012-05-16 07:13:17 -04002791}
2792
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002793static ssize_t
2794cifs_iovec_read(struct file *file, const struct iovec *iov,
2795 unsigned long nr_segs, loff_t *poffset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002796{
Jeff Layton1c892542012-05-16 07:13:17 -04002797 ssize_t rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002798 size_t len, cur_len;
Jeff Layton1c892542012-05-16 07:13:17 -04002799 ssize_t total_read = 0;
2800 loff_t offset = *poffset;
2801 unsigned int npages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002802 struct cifs_sb_info *cifs_sb;
Jeff Layton1c892542012-05-16 07:13:17 -04002803 struct cifs_tcon *tcon;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002804 struct cifsFileInfo *open_file;
Jeff Layton1c892542012-05-16 07:13:17 -04002805 struct cifs_readdata *rdata, *tmp;
2806 struct list_head rdata_list;
2807 pid_t pid;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002808
2809 if (!nr_segs)
2810 return 0;
2811
2812 len = iov_length(iov, nr_segs);
2813 if (!len)
2814 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002815
Jeff Layton1c892542012-05-16 07:13:17 -04002816 INIT_LIST_HEAD(&rdata_list);
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002817 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Joe Perchesc21dfb62010-07-12 13:50:14 -07002818 open_file = file->private_data;
Jeff Layton1c892542012-05-16 07:13:17 -04002819 tcon = tlink_tcon(open_file->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002820
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07002821 if (!tcon->ses->server->ops->async_readv)
2822 return -ENOSYS;
2823
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002824 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2825 pid = open_file->pid;
2826 else
2827 pid = current->tgid;
2828
Steve Frenchad7a2922008-02-07 23:25:02 +00002829 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesb6b38f72010-04-21 03:50:45 +00002830 cFYI(1, "attempting read on write only file instance");
Steve Frenchad7a2922008-02-07 23:25:02 +00002831
Jeff Layton1c892542012-05-16 07:13:17 -04002832 do {
2833 cur_len = min_t(const size_t, len - total_read, cifs_sb->rsize);
2834 npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002835
Jeff Layton1c892542012-05-16 07:13:17 -04002836 /* allocate a readdata struct */
2837 rdata = cifs_readdata_alloc(npages,
2838 cifs_uncached_readv_complete);
2839 if (!rdata) {
2840 rc = -ENOMEM;
2841 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002842 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002843
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002844 rc = cifs_read_allocate_pages(rdata, npages);
Jeff Layton1c892542012-05-16 07:13:17 -04002845 if (rc)
2846 goto error;
2847
2848 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002849 rdata->nr_pages = npages;
Jeff Layton1c892542012-05-16 07:13:17 -04002850 rdata->offset = offset;
2851 rdata->bytes = cur_len;
2852 rdata->pid = pid;
Jeff Layton8321fec2012-09-19 06:22:32 -07002853 rdata->pagesz = PAGE_SIZE;
2854 rdata->read_into_pages = cifs_uncached_read_into_pages;
Jeff Layton1c892542012-05-16 07:13:17 -04002855
2856 rc = cifs_retry_async_readv(rdata);
2857error:
2858 if (rc) {
2859 kref_put(&rdata->refcount,
2860 cifs_uncached_readdata_release);
2861 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002862 }
Jeff Layton1c892542012-05-16 07:13:17 -04002863
2864 list_add_tail(&rdata->list, &rdata_list);
2865 offset += cur_len;
2866 len -= cur_len;
2867 } while (len > 0);
2868
2869 /* if at least one read request send succeeded, then reset rc */
2870 if (!list_empty(&rdata_list))
2871 rc = 0;
2872
2873 /* the loop below should proceed in the order of increasing offsets */
2874restart_loop:
2875 list_for_each_entry_safe(rdata, tmp, &rdata_list, list) {
2876 if (!rc) {
2877 ssize_t copied;
2878
2879 /* FIXME: freezable sleep too? */
2880 rc = wait_for_completion_killable(&rdata->done);
2881 if (rc)
2882 rc = -EINTR;
2883 else if (rdata->result)
2884 rc = rdata->result;
2885 else {
2886 rc = cifs_readdata_to_iov(rdata, iov,
2887 nr_segs, *poffset,
2888 &copied);
2889 total_read += copied;
2890 }
2891
2892 /* resend call if it's a retryable error */
2893 if (rc == -EAGAIN) {
2894 rc = cifs_retry_async_readv(rdata);
2895 goto restart_loop;
2896 }
2897 }
2898 list_del_init(&rdata->list);
2899 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002900 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002901
Jeff Layton1c892542012-05-16 07:13:17 -04002902 cifs_stats_bytes_read(tcon, total_read);
2903 *poffset += total_read;
2904
Pavel Shilovsky09a47072012-09-18 16:20:29 -07002905 /* mask nodata case */
2906 if (rc == -ENODATA)
2907 rc = 0;
2908
Jeff Layton1c892542012-05-16 07:13:17 -04002909 return total_read ? total_read : rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002910}
2911
Pavel Shilovsky0b81c1c2011-03-10 10:11:05 +03002912ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov,
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002913 unsigned long nr_segs, loff_t pos)
2914{
2915 ssize_t read;
2916
2917 read = cifs_iovec_read(iocb->ki_filp, iov, nr_segs, &pos);
2918 if (read > 0)
2919 iocb->ki_pos = pos;
2920
2921 return read;
2922}
2923
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002924ssize_t
2925cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov,
2926 unsigned long nr_segs, loff_t pos)
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002927{
Al Viro496ad9a2013-01-23 17:07:38 -05002928 struct inode *inode = file_inode(iocb->ki_filp);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002929 struct cifsInodeInfo *cinode = CIFS_I(inode);
2930 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2931 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2932 iocb->ki_filp->private_data;
2933 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2934 int rc = -EACCES;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002935
2936 /*
2937 * In strict cache mode we need to read from the server all the time
2938 * if we don't have level II oplock because the server can delay mtime
2939 * change - so we can't make a decision about inode invalidating.
2940 * And we can also fail with pagereading if there are mandatory locks
2941 * on pages affected by this read but not on the region from pos to
2942 * pos+len-1.
2943 */
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002944 if (!cinode->clientCanCacheRead)
2945 return cifs_user_readv(iocb, iov, nr_segs, pos);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002946
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002947 if (cap_unix(tcon->ses) &&
2948 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2949 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2950 return generic_file_aio_read(iocb, iov, nr_segs, pos);
2951
2952 /*
2953 * We need to hold the sem to be sure nobody modifies lock list
2954 * with a brlock that prevents reading.
2955 */
2956 down_read(&cinode->lock_sem);
2957 if (!cifs_find_lock_conflict(cfile, pos, iov_length(iov, nr_segs),
2958 tcon->ses->server->vals->shared_lock_type,
Pavel Shilovsky081c0412012-11-27 18:38:53 +04002959 NULL, CIFS_READ_OP))
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002960 rc = generic_file_aio_read(iocb, iov, nr_segs, pos);
2961 up_read(&cinode->lock_sem);
2962 return rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002963}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002964
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002965static ssize_t
2966cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002967{
2968 int rc = -EACCES;
2969 unsigned int bytes_read = 0;
2970 unsigned int total_read;
2971 unsigned int current_read_size;
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002972 unsigned int rsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002973 struct cifs_sb_info *cifs_sb;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002974 struct cifs_tcon *tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002975 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002976 unsigned int xid;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002977 char *cur_offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002978 struct cifsFileInfo *open_file;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002979 struct cifs_io_parms io_parms;
Steve Frenchec637e32005-12-12 20:53:18 -08002980 int buf_type = CIFS_NO_BUFFER;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002981 __u32 pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002982
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002983 xid = get_xid();
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002984 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002985
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002986 /* FIXME: set up handlers for larger reads and/or convert to async */
2987 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
2988
Linus Torvalds1da177e2005-04-16 15:20:36 -07002989 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05302990 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002991 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05302992 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002993 }
Joe Perchesc21dfb62010-07-12 13:50:14 -07002994 open_file = file->private_data;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002995 tcon = tlink_tcon(open_file->tlink);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002996 server = tcon->ses->server;
2997
2998 if (!server->ops->sync_read) {
2999 free_xid(xid);
3000 return -ENOSYS;
3001 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003002
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003003 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3004 pid = open_file->pid;
3005 else
3006 pid = current->tgid;
3007
Linus Torvalds1da177e2005-04-16 15:20:36 -07003008 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesb6b38f72010-04-21 03:50:45 +00003009 cFYI(1, "attempting read on write only file instance");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003010
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003011 for (total_read = 0, cur_offset = read_data; read_size > total_read;
3012 total_read += bytes_read, cur_offset += bytes_read) {
Jeff Layton5eba8ab2011-10-19 15:30:26 -04003013 current_read_size = min_t(uint, read_size - total_read, rsize);
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003014 /*
3015 * For windows me and 9x we do not want to request more than it
3016 * negotiated since it will refuse the read then.
3017 */
3018 if ((tcon->ses) && !(tcon->ses->capabilities &
3019 tcon->ses->server->vals->cap_large_files)) {
Dan Carpenter7748dd62011-10-18 12:41:35 +03003020 current_read_size = min_t(uint, current_read_size,
Jeff Laytonc974bef2011-10-11 06:41:32 -04003021 CIFSMaxBufSize);
Steve Frenchf9f5c8172005-09-15 23:06:38 -07003022 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003023 rc = -EAGAIN;
3024 while (rc == -EAGAIN) {
Steve Frenchcdff08e2010-10-21 22:46:14 +00003025 if (open_file->invalidHandle) {
Jeff Layton15886172010-10-15 15:33:59 -04003026 rc = cifs_reopen_file(open_file, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003027 if (rc != 0)
3028 break;
3029 }
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003030 io_parms.pid = pid;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003031 io_parms.tcon = tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003032 io_parms.offset = *offset;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003033 io_parms.length = current_read_size;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003034 rc = server->ops->sync_read(xid, open_file, &io_parms,
3035 &bytes_read, &cur_offset,
3036 &buf_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003037 }
3038 if (rc || (bytes_read == 0)) {
3039 if (total_read) {
3040 break;
3041 } else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003042 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003043 return rc;
3044 }
3045 } else {
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003046 cifs_stats_bytes_read(tcon, total_read);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003047 *offset += bytes_read;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003048 }
3049 }
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003050 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003051 return total_read;
3052}
3053
Jeff Laytonca83ce32011-04-12 09:13:44 -04003054/*
3055 * If the page is mmap'ed into a process' page tables, then we need to make
3056 * sure that it doesn't change while being written back.
3057 */
3058static int
3059cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
3060{
3061 struct page *page = vmf->page;
3062
3063 lock_page(page);
3064 return VM_FAULT_LOCKED;
3065}
3066
3067static struct vm_operations_struct cifs_file_vm_ops = {
3068 .fault = filemap_fault,
3069 .page_mkwrite = cifs_page_mkwrite,
Konstantin Khlebnikov0b173bc2012-10-08 16:28:46 -07003070 .remap_pages = generic_file_remap_pages,
Jeff Laytonca83ce32011-04-12 09:13:44 -04003071};
3072
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003073int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
3074{
3075 int rc, xid;
Al Viro496ad9a2013-01-23 17:07:38 -05003076 struct inode *inode = file_inode(file);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003077
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003078 xid = get_xid();
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003079
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04003080 if (!CIFS_I(inode)->clientCanCacheRead) {
3081 rc = cifs_invalidate_mapping(inode);
3082 if (rc)
3083 return rc;
3084 }
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003085
3086 rc = generic_file_mmap(file, vma);
Jeff Laytonca83ce32011-04-12 09:13:44 -04003087 if (rc == 0)
3088 vma->vm_ops = &cifs_file_vm_ops;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003089 free_xid(xid);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003090 return rc;
3091}
3092
Linus Torvalds1da177e2005-04-16 15:20:36 -07003093int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
3094{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003095 int rc, xid;
3096
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003097 xid = get_xid();
Jeff Laytonabab0952010-02-12 07:44:18 -05003098 rc = cifs_revalidate_file(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003099 if (rc) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00003100 cFYI(1, "Validation prior to mmap failed, error=%d", rc);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003101 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003102 return rc;
3103 }
3104 rc = generic_file_mmap(file, vma);
Jeff Laytonca83ce32011-04-12 09:13:44 -04003105 if (rc == 0)
3106 vma->vm_ops = &cifs_file_vm_ops;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003107 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003108 return rc;
3109}
3110
Jeff Layton0471ca32012-05-16 07:13:16 -04003111static void
3112cifs_readv_complete(struct work_struct *work)
3113{
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003114 unsigned int i;
Jeff Layton0471ca32012-05-16 07:13:16 -04003115 struct cifs_readdata *rdata = container_of(work,
3116 struct cifs_readdata, work);
Jeff Layton0471ca32012-05-16 07:13:16 -04003117
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003118 for (i = 0; i < rdata->nr_pages; i++) {
3119 struct page *page = rdata->pages[i];
3120
Jeff Layton0471ca32012-05-16 07:13:16 -04003121 lru_cache_add_file(page);
3122
3123 if (rdata->result == 0) {
Jeff Layton0471ca32012-05-16 07:13:16 -04003124 flush_dcache_page(page);
3125 SetPageUptodate(page);
3126 }
3127
3128 unlock_page(page);
3129
3130 if (rdata->result == 0)
3131 cifs_readpage_to_fscache(rdata->mapping->host, page);
3132
3133 page_cache_release(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003134 rdata->pages[i] = NULL;
Jeff Layton0471ca32012-05-16 07:13:16 -04003135 }
Jeff Layton6993f742012-05-16 07:13:17 -04003136 kref_put(&rdata->refcount, cifs_readdata_release);
Jeff Layton0471ca32012-05-16 07:13:16 -04003137}
3138
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003139static int
Jeff Layton8321fec2012-09-19 06:22:32 -07003140cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
3141 struct cifs_readdata *rdata, unsigned int len)
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003142{
Jeff Layton8321fec2012-09-19 06:22:32 -07003143 int total_read = 0, result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003144 unsigned int i;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003145 u64 eof;
3146 pgoff_t eof_index;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003147 unsigned int nr_pages = rdata->nr_pages;
Jeff Layton8321fec2012-09-19 06:22:32 -07003148 struct kvec iov;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003149
3150 /* determine the eof that the server (probably) has */
3151 eof = CIFS_I(rdata->mapping->host)->server_eof;
3152 eof_index = eof ? (eof - 1) >> PAGE_CACHE_SHIFT : 0;
3153 cFYI(1, "eof=%llu eof_index=%lu", eof, eof_index);
3154
Jeff Layton8321fec2012-09-19 06:22:32 -07003155 rdata->tailsz = PAGE_CACHE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003156 for (i = 0; i < nr_pages; i++) {
3157 struct page *page = rdata->pages[i];
3158
Jeff Layton8321fec2012-09-19 06:22:32 -07003159 if (len >= PAGE_CACHE_SIZE) {
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003160 /* enough data to fill the page */
Jeff Layton8321fec2012-09-19 06:22:32 -07003161 iov.iov_base = kmap(page);
3162 iov.iov_len = PAGE_CACHE_SIZE;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003163 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
Jeff Layton8321fec2012-09-19 06:22:32 -07003164 i, page->index, iov.iov_base, iov.iov_len);
3165 len -= PAGE_CACHE_SIZE;
3166 } else if (len > 0) {
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003167 /* enough for partial page, fill and zero the rest */
Jeff Layton8321fec2012-09-19 06:22:32 -07003168 iov.iov_base = kmap(page);
3169 iov.iov_len = len;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003170 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
Jeff Layton8321fec2012-09-19 06:22:32 -07003171 i, page->index, iov.iov_base, iov.iov_len);
3172 memset(iov.iov_base + len,
3173 '\0', PAGE_CACHE_SIZE - len);
3174 rdata->tailsz = len;
3175 len = 0;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003176 } else if (page->index > eof_index) {
3177 /*
3178 * The VFS will not try to do readahead past the
3179 * i_size, but it's possible that we have outstanding
3180 * writes with gaps in the middle and the i_size hasn't
3181 * caught up yet. Populate those with zeroed out pages
3182 * to prevent the VFS from repeatedly attempting to
3183 * fill them until the writes are flushed.
3184 */
3185 zero_user(page, 0, PAGE_CACHE_SIZE);
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003186 lru_cache_add_file(page);
3187 flush_dcache_page(page);
3188 SetPageUptodate(page);
3189 unlock_page(page);
3190 page_cache_release(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003191 rdata->pages[i] = NULL;
3192 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07003193 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003194 } else {
3195 /* no need to hold page hostage */
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003196 lru_cache_add_file(page);
3197 unlock_page(page);
3198 page_cache_release(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003199 rdata->pages[i] = NULL;
3200 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07003201 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003202 }
Jeff Layton8321fec2012-09-19 06:22:32 -07003203
3204 result = cifs_readv_from_socket(server, &iov, 1, iov.iov_len);
3205 kunmap(page);
3206 if (result < 0)
3207 break;
3208
3209 total_read += result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003210 }
3211
Jeff Layton8321fec2012-09-19 06:22:32 -07003212 return total_read > 0 ? total_read : result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003213}
3214
Linus Torvalds1da177e2005-04-16 15:20:36 -07003215static int cifs_readpages(struct file *file, struct address_space *mapping,
3216 struct list_head *page_list, unsigned num_pages)
3217{
Jeff Layton690c5e32011-10-19 15:30:16 -04003218 int rc;
3219 struct list_head tmplist;
3220 struct cifsFileInfo *open_file = file->private_data;
3221 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
3222 unsigned int rsize = cifs_sb->rsize;
3223 pid_t pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003224
Jeff Layton690c5e32011-10-19 15:30:16 -04003225 /*
3226 * Give up immediately if rsize is too small to read an entire page.
3227 * The VFS will fall back to readpage. We should never reach this
3228 * point however since we set ra_pages to 0 when the rsize is smaller
3229 * than a cache page.
3230 */
3231 if (unlikely(rsize < PAGE_CACHE_SIZE))
3232 return 0;
Steve Frenchbfa0d752005-08-31 21:50:37 -07003233
Suresh Jayaraman56698232010-07-05 18:13:25 +05303234 /*
3235 * Reads as many pages as possible from fscache. Returns -ENOBUFS
3236 * immediately if the cookie is negative
3237 */
3238 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
3239 &num_pages);
3240 if (rc == 0)
Jeff Layton690c5e32011-10-19 15:30:16 -04003241 return rc;
Suresh Jayaraman56698232010-07-05 18:13:25 +05303242
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003243 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3244 pid = open_file->pid;
3245 else
3246 pid = current->tgid;
3247
Jeff Layton690c5e32011-10-19 15:30:16 -04003248 rc = 0;
3249 INIT_LIST_HEAD(&tmplist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003250
Jeff Layton690c5e32011-10-19 15:30:16 -04003251 cFYI(1, "%s: file=%p mapping=%p num_pages=%u", __func__, file,
3252 mapping, num_pages);
3253
3254 /*
3255 * Start with the page at end of list and move it to private
3256 * list. Do the same with any following pages until we hit
3257 * the rsize limit, hit an index discontinuity, or run out of
3258 * pages. Issue the async read and then start the loop again
3259 * until the list is empty.
3260 *
3261 * Note that list order is important. The page_list is in
3262 * the order of declining indexes. When we put the pages in
3263 * the rdata->pages, then we want them in increasing order.
3264 */
3265 while (!list_empty(page_list)) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003266 unsigned int i;
Jeff Layton690c5e32011-10-19 15:30:16 -04003267 unsigned int bytes = PAGE_CACHE_SIZE;
3268 unsigned int expected_index;
3269 unsigned int nr_pages = 1;
3270 loff_t offset;
3271 struct page *page, *tpage;
3272 struct cifs_readdata *rdata;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003273
3274 page = list_entry(page_list->prev, struct page, lru);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003275
Jeff Layton690c5e32011-10-19 15:30:16 -04003276 /*
3277 * Lock the page and put it in the cache. Since no one else
3278 * should have access to this page, we're safe to simply set
3279 * PG_locked without checking it first.
3280 */
3281 __set_page_locked(page);
3282 rc = add_to_page_cache_locked(page, mapping,
3283 page->index, GFP_KERNEL);
3284
3285 /* give up if we can't stick it in the cache */
3286 if (rc) {
3287 __clear_page_locked(page);
3288 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003289 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003290
Jeff Layton690c5e32011-10-19 15:30:16 -04003291 /* move first page to the tmplist */
3292 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
3293 list_move_tail(&page->lru, &tmplist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003294
Jeff Layton690c5e32011-10-19 15:30:16 -04003295 /* now try and add more pages onto the request */
3296 expected_index = page->index + 1;
3297 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
3298 /* discontinuity ? */
3299 if (page->index != expected_index)
3300 break;
3301
3302 /* would this page push the read over the rsize? */
3303 if (bytes + PAGE_CACHE_SIZE > rsize)
3304 break;
3305
3306 __set_page_locked(page);
3307 if (add_to_page_cache_locked(page, mapping,
3308 page->index, GFP_KERNEL)) {
3309 __clear_page_locked(page);
3310 break;
3311 }
3312 list_move_tail(&page->lru, &tmplist);
3313 bytes += PAGE_CACHE_SIZE;
3314 expected_index++;
3315 nr_pages++;
3316 }
3317
Jeff Layton0471ca32012-05-16 07:13:16 -04003318 rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
Jeff Layton690c5e32011-10-19 15:30:16 -04003319 if (!rdata) {
3320 /* best to give up if we're out of mem */
3321 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3322 list_del(&page->lru);
3323 lru_cache_add_file(page);
3324 unlock_page(page);
3325 page_cache_release(page);
3326 }
3327 rc = -ENOMEM;
3328 break;
3329 }
3330
Jeff Layton6993f742012-05-16 07:13:17 -04003331 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Layton690c5e32011-10-19 15:30:16 -04003332 rdata->mapping = mapping;
3333 rdata->offset = offset;
3334 rdata->bytes = bytes;
3335 rdata->pid = pid;
Jeff Layton8321fec2012-09-19 06:22:32 -07003336 rdata->pagesz = PAGE_CACHE_SIZE;
3337 rdata->read_into_pages = cifs_readpages_read_into_pages;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003338
3339 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3340 list_del(&page->lru);
3341 rdata->pages[rdata->nr_pages++] = page;
3342 }
Jeff Layton690c5e32011-10-19 15:30:16 -04003343
Jeff Layton2a1bb132012-05-16 07:13:17 -04003344 rc = cifs_retry_async_readv(rdata);
Jeff Layton690c5e32011-10-19 15:30:16 -04003345 if (rc != 0) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003346 for (i = 0; i < rdata->nr_pages; i++) {
3347 page = rdata->pages[i];
Jeff Layton690c5e32011-10-19 15:30:16 -04003348 lru_cache_add_file(page);
3349 unlock_page(page);
3350 page_cache_release(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003351 }
Jeff Layton6993f742012-05-16 07:13:17 -04003352 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003353 break;
3354 }
Jeff Layton6993f742012-05-16 07:13:17 -04003355
3356 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003357 }
3358
Linus Torvalds1da177e2005-04-16 15:20:36 -07003359 return rc;
3360}
3361
3362static int cifs_readpage_worker(struct file *file, struct page *page,
3363 loff_t *poffset)
3364{
3365 char *read_data;
3366 int rc;
3367
Suresh Jayaraman56698232010-07-05 18:13:25 +05303368 /* Is the page cached? */
Al Viro496ad9a2013-01-23 17:07:38 -05003369 rc = cifs_readpage_from_fscache(file_inode(file), page);
Suresh Jayaraman56698232010-07-05 18:13:25 +05303370 if (rc == 0)
3371 goto read_complete;
3372
Linus Torvalds1da177e2005-04-16 15:20:36 -07003373 page_cache_get(page);
3374 read_data = kmap(page);
3375 /* for reads over a certain size could initiate async read ahead */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003376
Linus Torvalds1da177e2005-04-16 15:20:36 -07003377 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003378
Linus Torvalds1da177e2005-04-16 15:20:36 -07003379 if (rc < 0)
3380 goto io_error;
3381 else
Joe Perchesb6b38f72010-04-21 03:50:45 +00003382 cFYI(1, "Bytes read %d", rc);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003383
Al Viro496ad9a2013-01-23 17:07:38 -05003384 file_inode(file)->i_atime =
3385 current_fs_time(file_inode(file)->i_sb);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003386
Linus Torvalds1da177e2005-04-16 15:20:36 -07003387 if (PAGE_CACHE_SIZE > rc)
3388 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
3389
3390 flush_dcache_page(page);
3391 SetPageUptodate(page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05303392
3393 /* send this page to the cache */
Al Viro496ad9a2013-01-23 17:07:38 -05003394 cifs_readpage_to_fscache(file_inode(file), page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05303395
Linus Torvalds1da177e2005-04-16 15:20:36 -07003396 rc = 0;
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003397
Linus Torvalds1da177e2005-04-16 15:20:36 -07003398io_error:
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003399 kunmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003400 page_cache_release(page);
Suresh Jayaraman56698232010-07-05 18:13:25 +05303401
3402read_complete:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003403 return rc;
3404}
3405
3406static int cifs_readpage(struct file *file, struct page *page)
3407{
3408 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
3409 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003410 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003411
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003412 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003413
3414 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303415 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003416 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303417 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003418 }
3419
Jeff Laytonac3aa2f2012-07-23 13:14:28 -04003420 cFYI(1, "readpage %p at offset %d 0x%x",
Joe Perchesb6b38f72010-04-21 03:50:45 +00003421 page, (int)offset, (int)offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003422
3423 rc = cifs_readpage_worker(file, page, &offset);
3424
3425 unlock_page(page);
3426
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003427 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003428 return rc;
3429}
3430
Steve Frencha403a0a2007-07-26 15:54:16 +00003431static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3432{
3433 struct cifsFileInfo *open_file;
3434
Jeff Layton44772882010-10-15 15:34:03 -04003435 spin_lock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003436 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton2e396b82010-10-15 15:34:01 -04003437 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Jeff Layton44772882010-10-15 15:34:03 -04003438 spin_unlock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003439 return 1;
3440 }
3441 }
Jeff Layton44772882010-10-15 15:34:03 -04003442 spin_unlock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003443 return 0;
3444}
3445
Linus Torvalds1da177e2005-04-16 15:20:36 -07003446/* We do not want to update the file size from server for inodes
3447 open for write - to avoid races with writepage extending
3448 the file - in the future we could consider allowing
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003449 refreshing the inode only on increases in the file size
Linus Torvalds1da177e2005-04-16 15:20:36 -07003450 but this is tricky to do without racing with writebehind
3451 page caching in the current Linux kernel design */
Steve French4b18f2a2008-04-29 00:06:05 +00003452bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003453{
Steve Frencha403a0a2007-07-26 15:54:16 +00003454 if (!cifsInode)
Steve French4b18f2a2008-04-29 00:06:05 +00003455 return true;
Steve French23e7dd72005-10-20 13:44:56 -07003456
Steve Frencha403a0a2007-07-26 15:54:16 +00003457 if (is_inode_writable(cifsInode)) {
3458 /* This inode is open for write at least once */
Steve Frenchc32a0b62006-01-12 14:41:28 -08003459 struct cifs_sb_info *cifs_sb;
3460
Steve Frenchc32a0b62006-01-12 14:41:28 -08003461 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
Steve Frenchad7a2922008-02-07 23:25:02 +00003462 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003463 /* since no page cache to corrupt on directio
Steve Frenchc32a0b62006-01-12 14:41:28 -08003464 we can change size safely */
Steve French4b18f2a2008-04-29 00:06:05 +00003465 return true;
Steve Frenchc32a0b62006-01-12 14:41:28 -08003466 }
3467
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003468 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
Steve French4b18f2a2008-04-29 00:06:05 +00003469 return true;
Steve French7ba52632007-02-08 18:14:13 +00003470
Steve French4b18f2a2008-04-29 00:06:05 +00003471 return false;
Steve French23e7dd72005-10-20 13:44:56 -07003472 } else
Steve French4b18f2a2008-04-29 00:06:05 +00003473 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003474}
3475
Nick Piggind9414772008-09-24 11:32:59 -04003476static int cifs_write_begin(struct file *file, struct address_space *mapping,
3477 loff_t pos, unsigned len, unsigned flags,
3478 struct page **pagep, void **fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003479{
Nick Piggind9414772008-09-24 11:32:59 -04003480 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
3481 loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003482 loff_t page_start = pos & PAGE_MASK;
3483 loff_t i_size;
3484 struct page *page;
3485 int rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003486
Joe Perchesb6b38f72010-04-21 03:50:45 +00003487 cFYI(1, "write_begin from %lld len %d", (long long)pos, len);
Nick Piggind9414772008-09-24 11:32:59 -04003488
Nick Piggin54566b22009-01-04 12:00:53 -08003489 page = grab_cache_page_write_begin(mapping, index, flags);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003490 if (!page) {
3491 rc = -ENOMEM;
3492 goto out;
3493 }
Nick Piggind9414772008-09-24 11:32:59 -04003494
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003495 if (PageUptodate(page))
3496 goto out;
Steve French8a236262007-03-06 00:31:00 +00003497
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003498 /*
3499 * If we write a full page it will be up to date, no need to read from
3500 * the server. If the write is short, we'll end up doing a sync write
3501 * instead.
3502 */
3503 if (len == PAGE_CACHE_SIZE)
3504 goto out;
3505
3506 /*
3507 * optimize away the read when we have an oplock, and we're not
3508 * expecting to use any of the data we'd be reading in. That
3509 * is, when the page lies beyond the EOF, or straddles the EOF
3510 * and the write will cover all of the existing data.
3511 */
3512 if (CIFS_I(mapping->host)->clientCanCacheRead) {
3513 i_size = i_size_read(mapping->host);
3514 if (page_start >= i_size ||
3515 (offset == 0 && (pos + len) >= i_size)) {
3516 zero_user_segments(page, 0, offset,
3517 offset + len,
3518 PAGE_CACHE_SIZE);
3519 /*
3520 * PageChecked means that the parts of the page
3521 * to which we're not writing are considered up
3522 * to date. Once the data is copied to the
3523 * page, it can be set uptodate.
3524 */
3525 SetPageChecked(page);
3526 goto out;
3527 }
3528 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003529
Nick Piggind9414772008-09-24 11:32:59 -04003530 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003531 /*
3532 * might as well read a page, it is fast enough. If we get
3533 * an error, we don't need to return it. cifs_write_end will
3534 * do a sync write instead since PG_uptodate isn't set.
3535 */
3536 cifs_readpage_worker(file, page, &page_start);
Steve French8a236262007-03-06 00:31:00 +00003537 } else {
3538 /* we could try using another file handle if there is one -
3539 but how would we lock it to prevent close of that handle
3540 racing with this read? In any case
Nick Piggind9414772008-09-24 11:32:59 -04003541 this will be written out by write_end so is fine */
Steve French8a236262007-03-06 00:31:00 +00003542 }
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003543out:
3544 *pagep = page;
3545 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003546}
3547
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303548static int cifs_release_page(struct page *page, gfp_t gfp)
3549{
3550 if (PagePrivate(page))
3551 return 0;
3552
3553 return cifs_fscache_release_page(page, gfp);
3554}
3555
3556static void cifs_invalidate_page(struct page *page, unsigned long offset)
3557{
3558 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
3559
3560 if (offset == 0)
3561 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
3562}
3563
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003564static int cifs_launder_page(struct page *page)
3565{
3566 int rc = 0;
3567 loff_t range_start = page_offset(page);
3568 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
3569 struct writeback_control wbc = {
3570 .sync_mode = WB_SYNC_ALL,
3571 .nr_to_write = 0,
3572 .range_start = range_start,
3573 .range_end = range_end,
3574 };
3575
3576 cFYI(1, "Launder page: %p", page);
3577
3578 if (clear_page_dirty_for_io(page))
3579 rc = cifs_writepage_locked(page, &wbc);
3580
3581 cifs_fscache_invalidate_page(page, page->mapping->host);
3582 return rc;
3583}
3584
Tejun Heo9b646972010-07-20 22:09:02 +02003585void cifs_oplock_break(struct work_struct *work)
Jeff Layton3bc303c2009-09-21 06:47:50 -04003586{
3587 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3588 oplock_break);
Jeff Laytona5e18bc2010-10-11 15:07:18 -04003589 struct inode *inode = cfile->dentry->d_inode;
Jeff Layton3bc303c2009-09-21 06:47:50 -04003590 struct cifsInodeInfo *cinode = CIFS_I(inode);
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07003591 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Jeff Laytoneb4b7562010-10-22 14:52:29 -04003592 int rc = 0;
Jeff Layton3bc303c2009-09-21 06:47:50 -04003593
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04003594 if (!cinode->clientCanCacheAll && cinode->clientCanCacheRead &&
3595 cifs_has_mand_locks(cinode)) {
3596 cFYI(1, "Reset oplock to None for inode=%p due to mand locks",
3597 inode);
3598 cinode->clientCanCacheRead = false;
3599 }
3600
Jeff Layton3bc303c2009-09-21 06:47:50 -04003601 if (inode && S_ISREG(inode->i_mode)) {
Steve Frenchd54ff732010-04-27 04:38:15 +00003602 if (cinode->clientCanCacheRead)
Al Viro8737c932009-12-24 06:47:55 -05003603 break_lease(inode, O_RDONLY);
Steve Frenchd54ff732010-04-27 04:38:15 +00003604 else
Al Viro8737c932009-12-24 06:47:55 -05003605 break_lease(inode, O_WRONLY);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003606 rc = filemap_fdatawrite(inode->i_mapping);
3607 if (cinode->clientCanCacheRead == 0) {
Jeff Laytoneb4b7562010-10-22 14:52:29 -04003608 rc = filemap_fdatawait(inode->i_mapping);
3609 mapping_set_error(inode->i_mapping, rc);
Pavel Shilovsky03eca702012-12-06 21:24:33 +04003610 cifs_invalidate_mapping(inode);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003611 }
Joe Perchesb6b38f72010-04-21 03:50:45 +00003612 cFYI(1, "Oplock flush inode %p rc %d", inode, rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003613 }
3614
Pavel Shilovsky85160e02011-10-22 15:33:29 +04003615 rc = cifs_push_locks(cfile);
3616 if (rc)
3617 cERROR(1, "Push locks rc = %d", rc);
3618
Jeff Layton3bc303c2009-09-21 06:47:50 -04003619 /*
3620 * releasing stale oplock after recent reconnect of smb session using
3621 * a now incorrect file handle is not a data integrity issue but do
3622 * not bother sending an oplock release if session to server still is
3623 * disconnected since oplock already released by the server
3624 */
Steve Frenchcdff08e2010-10-21 22:46:14 +00003625 if (!cfile->oplock_break_cancelled) {
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07003626 rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
3627 cinode);
Joe Perchesb6b38f72010-04-21 03:50:45 +00003628 cFYI(1, "Oplock release rc = %d", rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003629 }
Jeff Layton3bc303c2009-09-21 06:47:50 -04003630}
3631
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07003632const struct address_space_operations cifs_addr_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003633 .readpage = cifs_readpage,
3634 .readpages = cifs_readpages,
3635 .writepage = cifs_writepage,
Steve French37c0eb42005-10-05 14:50:29 -07003636 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04003637 .write_begin = cifs_write_begin,
3638 .write_end = cifs_write_end,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003639 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303640 .releasepage = cifs_release_page,
3641 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003642 .launder_page = cifs_launder_page,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003643};
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003644
3645/*
3646 * cifs_readpages requires the server to support a buffer large enough to
3647 * contain the header plus one complete page of data. Otherwise, we need
3648 * to leave cifs_readpages out of the address space operations.
3649 */
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07003650const struct address_space_operations cifs_addr_ops_smallbuf = {
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003651 .readpage = cifs_readpage,
3652 .writepage = cifs_writepage,
3653 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04003654 .write_begin = cifs_write_begin,
3655 .write_end = cifs_write_end,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003656 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303657 .releasepage = cifs_release_page,
3658 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003659 .launder_page = cifs_launder_page,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003660};