blob: 2d4a231dd70b09de5651e813e2082060bac4dc6b [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
Steve Frenchfb8c4b12007-07-10 01:16:18 +00005 *
Steve Frenchf19159d2010-04-21 04:12:10 +00006 * Copyright (C) International Business Machines Corp., 2002,2010
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Author(s): Steve French (sfrench@us.ibm.com)
Jeremy Allison7ee1af72006-08-02 21:56:33 +00008 * Jeremy Allison (jra@samba.org)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
Steve French37c0eb42005-10-05 14:50:29 -070025#include <linux/backing-dev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
Steve French37c0eb42005-10-05 14:50:29 -070030#include <linux/writeback.h>
Andrew Morton6f88cc22006-12-10 02:19:44 -080031#include <linux/task_io_accounting_ops.h>
Steve French23e7dd72005-10-20 13:44:56 -070032#include <linux/delay.h>
Jeff Layton3bc303c2009-09-21 06:47:50 -040033#include <linux/mount.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Jeff Layton690c5e32011-10-19 15:30:16 -040035#include <linux/swap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <asm/div64.h>
37#include "cifsfs.h"
38#include "cifspdu.h"
39#include "cifsglob.h"
40#include "cifsproto.h"
41#include "cifs_unicode.h"
42#include "cifs_debug.h"
43#include "cifs_fs_sb.h"
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +053044#include "fscache.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Steve French07b92d02013-02-18 10:34:26 -060046
Linus Torvalds1da177e2005-04-16 15:20:36 -070047static inline int cifs_convert_flags(unsigned int flags)
48{
49 if ((flags & O_ACCMODE) == O_RDONLY)
50 return GENERIC_READ;
51 else if ((flags & O_ACCMODE) == O_WRONLY)
52 return GENERIC_WRITE;
53 else if ((flags & O_ACCMODE) == O_RDWR) {
54 /* GENERIC_ALL is too much permission to request
55 can cause unnecessary access denied on create */
56 /* return GENERIC_ALL; */
57 return (GENERIC_READ | GENERIC_WRITE);
58 }
59
Jeff Laytone10f7b52008-05-14 10:21:33 -070060 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
61 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
62 FILE_READ_DATA);
Steve French7fc8f4e2009-02-23 20:43:11 +000063}
Jeff Laytone10f7b52008-05-14 10:21:33 -070064
Jeff Layton608712f2010-10-15 15:33:56 -040065static u32 cifs_posix_convert_flags(unsigned int flags)
Steve French7fc8f4e2009-02-23 20:43:11 +000066{
Jeff Layton608712f2010-10-15 15:33:56 -040067 u32 posix_flags = 0;
Jeff Laytone10f7b52008-05-14 10:21:33 -070068
Steve French7fc8f4e2009-02-23 20:43:11 +000069 if ((flags & O_ACCMODE) == O_RDONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040070 posix_flags = SMB_O_RDONLY;
Steve French7fc8f4e2009-02-23 20:43:11 +000071 else if ((flags & O_ACCMODE) == O_WRONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040072 posix_flags = SMB_O_WRONLY;
73 else if ((flags & O_ACCMODE) == O_RDWR)
74 posix_flags = SMB_O_RDWR;
75
Steve French07b92d02013-02-18 10:34:26 -060076 if (flags & O_CREAT) {
Jeff Layton608712f2010-10-15 15:33:56 -040077 posix_flags |= SMB_O_CREAT;
Steve French07b92d02013-02-18 10:34:26 -060078 if (flags & O_EXCL)
79 posix_flags |= SMB_O_EXCL;
80 } else if (flags & O_EXCL)
81 cFYI(1, "Application %s pid %d has incorrectly set O_EXCL flag"
82 "but not O_CREAT on file open. Ignoring O_EXCL",
83 current->comm, current->tgid);
84
Jeff Layton608712f2010-10-15 15:33:56 -040085 if (flags & O_TRUNC)
86 posix_flags |= SMB_O_TRUNC;
87 /* be safe and imply O_SYNC for O_DSYNC */
Christoph Hellwig6b2f3d12009-10-27 11:05:28 +010088 if (flags & O_DSYNC)
Jeff Layton608712f2010-10-15 15:33:56 -040089 posix_flags |= SMB_O_SYNC;
Steve French7fc8f4e2009-02-23 20:43:11 +000090 if (flags & O_DIRECTORY)
Jeff Layton608712f2010-10-15 15:33:56 -040091 posix_flags |= SMB_O_DIRECTORY;
Steve French7fc8f4e2009-02-23 20:43:11 +000092 if (flags & O_NOFOLLOW)
Jeff Layton608712f2010-10-15 15:33:56 -040093 posix_flags |= SMB_O_NOFOLLOW;
Steve French7fc8f4e2009-02-23 20:43:11 +000094 if (flags & O_DIRECT)
Jeff Layton608712f2010-10-15 15:33:56 -040095 posix_flags |= SMB_O_DIRECT;
Steve French7fc8f4e2009-02-23 20:43:11 +000096
97 return posix_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070098}
99
100static inline int cifs_get_disposition(unsigned int flags)
101{
102 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
103 return FILE_CREATE;
104 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
105 return FILE_OVERWRITE_IF;
106 else if ((flags & O_CREAT) == O_CREAT)
107 return FILE_OPEN_IF;
Steve French55aa2e02006-05-30 18:09:31 +0000108 else if ((flags & O_TRUNC) == O_TRUNC)
109 return FILE_OVERWRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110 else
111 return FILE_OPEN;
112}
113
Jeff Layton608712f2010-10-15 15:33:56 -0400114int cifs_posix_open(char *full_path, struct inode **pinode,
115 struct super_block *sb, int mode, unsigned int f_flags,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400116 __u32 *poplock, __u16 *pnetfid, unsigned int xid)
Jeff Layton608712f2010-10-15 15:33:56 -0400117{
118 int rc;
119 FILE_UNIX_BASIC_INFO *presp_data;
120 __u32 posix_flags = 0;
121 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
122 struct cifs_fattr fattr;
123 struct tcon_link *tlink;
Steve French96daf2b2011-05-27 04:34:02 +0000124 struct cifs_tcon *tcon;
Jeff Layton608712f2010-10-15 15:33:56 -0400125
126 cFYI(1, "posix open %s", full_path);
127
128 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
129 if (presp_data == NULL)
130 return -ENOMEM;
131
132 tlink = cifs_sb_tlink(cifs_sb);
133 if (IS_ERR(tlink)) {
134 rc = PTR_ERR(tlink);
135 goto posix_open_ret;
136 }
137
138 tcon = tlink_tcon(tlink);
139 mode &= ~current_umask();
140
141 posix_flags = cifs_posix_convert_flags(f_flags);
142 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
143 poplock, full_path, cifs_sb->local_nls,
144 cifs_sb->mnt_cifs_flags &
145 CIFS_MOUNT_MAP_SPECIAL_CHR);
146 cifs_put_tlink(tlink);
147
148 if (rc)
149 goto posix_open_ret;
150
151 if (presp_data->Type == cpu_to_le32(-1))
152 goto posix_open_ret; /* open ok, caller does qpathinfo */
153
154 if (!pinode)
155 goto posix_open_ret; /* caller does not need info */
156
157 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
158
159 /* get new inode and set it up */
160 if (*pinode == NULL) {
161 cifs_fill_uniqueid(sb, &fattr);
162 *pinode = cifs_iget(sb, &fattr);
163 if (!*pinode) {
164 rc = -ENOMEM;
165 goto posix_open_ret;
166 }
167 } else {
168 cifs_fattr_to_inode(*pinode, &fattr);
169 }
170
171posix_open_ret:
172 kfree(presp_data);
173 return rc;
174}
175
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300176static int
177cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700178 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
179 struct cifs_fid *fid, unsigned int xid)
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300180{
181 int rc;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700182 int desired_access;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300183 int disposition;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500184 int create_options = CREATE_NOT_DIR;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300185 FILE_ALL_INFO *buf;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700186 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300187
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700188 if (!server->ops->open)
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700189 return -ENOSYS;
190
191 desired_access = cifs_convert_flags(f_flags);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300192
193/*********************************************************************
194 * open flag mapping table:
195 *
196 * POSIX Flag CIFS Disposition
197 * ---------- ----------------
198 * O_CREAT FILE_OPEN_IF
199 * O_CREAT | O_EXCL FILE_CREATE
200 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
201 * O_TRUNC FILE_OVERWRITE
202 * none of the above FILE_OPEN
203 *
204 * Note that there is not a direct match between disposition
205 * FILE_SUPERSEDE (ie create whether or not file exists although
206 * O_CREAT | O_TRUNC is similar but truncates the existing
207 * file rather than creating a new file as FILE_SUPERSEDE does
208 * (which uses the attributes / metadata passed in on open call)
209 *?
210 *? O_SYNC is a reasonable match to CIFS writethrough flag
211 *? and the read write flags match reasonably. O_LARGEFILE
212 *? is irrelevant because largefile support is always used
213 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
214 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
215 *********************************************************************/
216
217 disposition = cifs_get_disposition(f_flags);
218
219 /* BB pass O_SYNC flag through on file attributes .. BB */
220
221 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
222 if (!buf)
223 return -ENOMEM;
224
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500225 if (backup_cred(cifs_sb))
226 create_options |= CREATE_OPEN_BACKUP_INTENT;
227
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700228 rc = server->ops->open(xid, tcon, full_path, disposition,
229 desired_access, create_options, fid, oplock, buf,
230 cifs_sb);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300231
232 if (rc)
233 goto out;
234
235 if (tcon->unix_ext)
236 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
237 xid);
238 else
239 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700240 xid, &fid->netfid);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300241
242out:
243 kfree(buf);
244 return rc;
245}
246
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400247static bool
248cifs_has_mand_locks(struct cifsInodeInfo *cinode)
249{
250 struct cifs_fid_locks *cur;
251 bool has_locks = false;
252
253 down_read(&cinode->lock_sem);
254 list_for_each_entry(cur, &cinode->llist, llist) {
255 if (!list_empty(&cur->locks)) {
256 has_locks = true;
257 break;
258 }
259 }
260 up_read(&cinode->lock_sem);
261 return has_locks;
262}
263
Jeff Layton15ecb432010-10-15 15:34:02 -0400264struct cifsFileInfo *
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700265cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
Jeff Layton15ecb432010-10-15 15:34:02 -0400266 struct tcon_link *tlink, __u32 oplock)
267{
268 struct dentry *dentry = file->f_path.dentry;
269 struct inode *inode = dentry->d_inode;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700270 struct cifsInodeInfo *cinode = CIFS_I(inode);
271 struct cifsFileInfo *cfile;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700272 struct cifs_fid_locks *fdlocks;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700273 struct cifs_tcon *tcon = tlink_tcon(tlink);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400274 struct TCP_Server_Info *server = tcon->ses->server;
Jeff Layton15ecb432010-10-15 15:34:02 -0400275
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700276 cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
277 if (cfile == NULL)
278 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400279
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700280 fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
281 if (!fdlocks) {
282 kfree(cfile);
283 return NULL;
284 }
285
286 INIT_LIST_HEAD(&fdlocks->locks);
287 fdlocks->cfile = cfile;
288 cfile->llist = fdlocks;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700289 down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700290 list_add(&fdlocks->llist, &cinode->llist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700291 up_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700292
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700293 cfile->count = 1;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700294 cfile->pid = current->tgid;
295 cfile->uid = current_fsuid();
296 cfile->dentry = dget(dentry);
297 cfile->f_flags = file->f_flags;
298 cfile->invalidHandle = false;
299 cfile->tlink = cifs_get_tlink(tlink);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700300 INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700301 mutex_init(&cfile->fh_mutex);
Jeff Layton15ecb432010-10-15 15:34:02 -0400302
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100303 cifs_sb_active(inode->i_sb);
304
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400305 /*
306 * If the server returned a read oplock and we have mandatory brlocks,
307 * set oplock level to None.
308 */
309 if (oplock == server->vals->oplock_read &&
310 cifs_has_mand_locks(cinode)) {
311 cFYI(1, "Reset oplock val from read to None due to mand locks");
312 oplock = 0;
313 }
314
Jeff Layton44772882010-10-15 15:34:03 -0400315 spin_lock(&cifs_file_list_lock);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400316 if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700317 oplock = fid->pending_open->oplock;
318 list_del(&fid->pending_open->olist);
319
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400320 server->ops->set_fid(cfile, fid, oplock);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700321
322 list_add(&cfile->tlist, &tcon->openFileList);
Jeff Layton15ecb432010-10-15 15:34:02 -0400323 /* if readable file instance put first in list*/
324 if (file->f_mode & FMODE_READ)
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700325 list_add(&cfile->flist, &cinode->openFileList);
Jeff Layton15ecb432010-10-15 15:34:02 -0400326 else
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700327 list_add_tail(&cfile->flist, &cinode->openFileList);
Jeff Layton44772882010-10-15 15:34:03 -0400328 spin_unlock(&cifs_file_list_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400329
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700330 file->private_data = cfile;
331 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400332}
333
Jeff Layton764a1b12012-07-25 14:59:54 -0400334struct cifsFileInfo *
335cifsFileInfo_get(struct cifsFileInfo *cifs_file)
336{
337 spin_lock(&cifs_file_list_lock);
338 cifsFileInfo_get_locked(cifs_file);
339 spin_unlock(&cifs_file_list_lock);
340 return cifs_file;
341}
342
Steve Frenchcdff08e2010-10-21 22:46:14 +0000343/*
344 * Release a reference on the file private data. This may involve closing
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400345 * the filehandle out on the server. Must be called without holding
346 * cifs_file_list_lock.
Steve Frenchcdff08e2010-10-21 22:46:14 +0000347 */
Jeff Laytonb33879a2010-10-15 15:34:04 -0400348void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
349{
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300350 struct inode *inode = cifs_file->dentry->d_inode;
Steve French96daf2b2011-05-27 04:34:02 +0000351 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700352 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300353 struct cifsInodeInfo *cifsi = CIFS_I(inode);
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100354 struct super_block *sb = inode->i_sb;
355 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000356 struct cifsLockInfo *li, *tmp;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700357 struct cifs_fid fid;
358 struct cifs_pending_open open;
Steve Frenchcdff08e2010-10-21 22:46:14 +0000359
360 spin_lock(&cifs_file_list_lock);
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400361 if (--cifs_file->count > 0) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000362 spin_unlock(&cifs_file_list_lock);
363 return;
Jeff Laytonb33879a2010-10-15 15:34:04 -0400364 }
Steve Frenchcdff08e2010-10-21 22:46:14 +0000365
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700366 if (server->ops->get_lease_key)
367 server->ops->get_lease_key(inode, &fid);
368
369 /* store open in pending opens to make sure we don't miss lease break */
370 cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
371
Steve Frenchcdff08e2010-10-21 22:46:14 +0000372 /* remove it from the lists */
373 list_del(&cifs_file->flist);
374 list_del(&cifs_file->tlist);
375
376 if (list_empty(&cifsi->openFileList)) {
377 cFYI(1, "closing last open instance for inode %p",
378 cifs_file->dentry->d_inode);
Pavel Shilovsky25364132012-09-18 16:20:27 -0700379 /*
380 * In strict cache mode we need invalidate mapping on the last
381 * close because it may cause a error when we open this file
382 * again and get at least level II oplock.
383 */
Pavel Shilovsky4f8ba8a2010-11-21 22:36:12 +0300384 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
385 CIFS_I(inode)->invalid_mapping = true;
Pavel Shilovskyc6723622010-11-03 10:58:57 +0300386 cifs_set_oplock_level(cifsi, 0);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000387 }
388 spin_unlock(&cifs_file_list_lock);
389
Jeff Laytonad635942011-07-26 12:20:17 -0400390 cancel_work_sync(&cifs_file->oplock_break);
391
Steve Frenchcdff08e2010-10-21 22:46:14 +0000392 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700393 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400394 unsigned int xid;
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700395
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400396 xid = get_xid();
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700397 if (server->ops->close)
Pavel Shilovsky760ad0c2012-09-25 11:00:07 +0400398 server->ops->close(xid, tcon, &cifs_file->fid);
399 _free_xid(xid);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000400 }
401
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700402 cifs_del_pending_open(&open);
403
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700404 /*
405 * Delete any outstanding lock records. We'll lose them when the file
Steve Frenchcdff08e2010-10-21 22:46:14 +0000406 * is closed anyway.
407 */
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700408 down_write(&cifsi->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700409 list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000410 list_del(&li->llist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400411 cifs_del_lock_waiters(li);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000412 kfree(li);
413 }
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700414 list_del(&cifs_file->llist->llist);
415 kfree(cifs_file->llist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700416 up_write(&cifsi->lock_sem);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000417
418 cifs_put_tlink(cifs_file->tlink);
419 dput(cifs_file->dentry);
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100420 cifs_sb_deactive(sb);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000421 kfree(cifs_file);
Jeff Laytonb33879a2010-10-15 15:34:04 -0400422}
423
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424int cifs_open(struct inode *inode, struct file *file)
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700425
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426{
427 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400428 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400429 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700431 struct TCP_Server_Info *server;
Steve French96daf2b2011-05-27 04:34:02 +0000432 struct cifs_tcon *tcon;
Jeff Layton7ffec372010-09-29 19:51:11 -0400433 struct tcon_link *tlink;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700434 struct cifsFileInfo *cfile = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435 char *full_path = NULL;
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300436 bool posix_open_ok = false;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700437 struct cifs_fid fid;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700438 struct cifs_pending_open open;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400440 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441
442 cifs_sb = CIFS_SB(inode->i_sb);
Jeff Layton7ffec372010-09-29 19:51:11 -0400443 tlink = cifs_sb_tlink(cifs_sb);
444 if (IS_ERR(tlink)) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400445 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400446 return PTR_ERR(tlink);
447 }
448 tcon = tlink_tcon(tlink);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700449 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -0800451 full_path = build_path_from_dentry(file->f_path.dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452 if (full_path == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530453 rc = -ENOMEM;
Jeff Layton232341b2010-08-05 13:58:38 -0400454 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455 }
456
Joe Perchesb6b38f72010-04-21 03:50:45 +0000457 cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
458 inode, file->f_flags, full_path);
Steve French276a74a2009-03-03 18:00:34 +0000459
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700460 if (server->oplocks)
Steve French276a74a2009-03-03 18:00:34 +0000461 oplock = REQ_OPLOCK;
462 else
463 oplock = 0;
464
Steve French64cc2c62009-03-04 19:54:08 +0000465 if (!tcon->broken_posix_open && tcon->unix_ext &&
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400466 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
467 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Steve French276a74a2009-03-03 18:00:34 +0000468 /* can not refresh inode info since size could be stale */
Jeff Layton2422f672010-06-16 13:40:16 -0400469 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
Steve Frenchfa588e02010-04-22 19:21:55 +0000470 cifs_sb->mnt_file_mode /* ignored */,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700471 file->f_flags, &oplock, &fid.netfid, xid);
Steve French276a74a2009-03-03 18:00:34 +0000472 if (rc == 0) {
Joe Perchesb6b38f72010-04-21 03:50:45 +0000473 cFYI(1, "posix open succeeded");
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300474 posix_open_ok = true;
Steve French64cc2c62009-03-04 19:54:08 +0000475 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
476 if (tcon->ses->serverNOS)
Joe Perchesb6b38f72010-04-21 03:50:45 +0000477 cERROR(1, "server %s of type %s returned"
Steve French64cc2c62009-03-04 19:54:08 +0000478 " unexpected error on SMB posix open"
479 ", disabling posix open support."
480 " Check if server update available.",
481 tcon->ses->serverName,
Joe Perchesb6b38f72010-04-21 03:50:45 +0000482 tcon->ses->serverNOS);
Steve French64cc2c62009-03-04 19:54:08 +0000483 tcon->broken_posix_open = true;
Steve French276a74a2009-03-03 18:00:34 +0000484 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
485 (rc != -EOPNOTSUPP)) /* path not found or net err */
486 goto out;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700487 /*
488 * Else fallthrough to retry open the old way on network i/o
489 * or DFS errors.
490 */
Steve French276a74a2009-03-03 18:00:34 +0000491 }
492
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700493 if (server->ops->get_lease_key)
494 server->ops->get_lease_key(inode, &fid);
495
496 cifs_add_pending_open(&fid, tlink, &open);
497
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300498 if (!posix_open_ok) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700499 if (server->ops->get_lease_key)
500 server->ops->get_lease_key(inode, &fid);
501
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300502 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700503 file->f_flags, &oplock, &fid, xid);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700504 if (rc) {
505 cifs_del_pending_open(&open);
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300506 goto out;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700507 }
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300508 }
Jeff Layton47c78b72010-06-16 13:40:17 -0400509
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700510 cfile = cifs_new_fileinfo(&fid, file, tlink, oplock);
511 if (cfile == NULL) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700512 if (server->ops->close)
513 server->ops->close(xid, tcon, &fid);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700514 cifs_del_pending_open(&open);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515 rc = -ENOMEM;
516 goto out;
517 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +0530519 cifs_fscache_set_inode_cookie(inode, file);
520
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300521 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700522 /*
523 * Time to set mode which we can not set earlier due to
524 * problems creating new read-only files.
525 */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300526 struct cifs_unix_set_info_args args = {
527 .mode = inode->i_mode,
Eric W. Biederman49418b22013-02-06 00:57:56 -0800528 .uid = INVALID_UID, /* no change */
529 .gid = INVALID_GID, /* no change */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300530 .ctime = NO_CHANGE_64,
531 .atime = NO_CHANGE_64,
532 .mtime = NO_CHANGE_64,
533 .device = 0,
534 };
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700535 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
536 cfile->pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537 }
538
539out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400541 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400542 cifs_put_tlink(tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543 return rc;
544}
545
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400546static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
547
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700548/*
549 * Try to reacquire byte range locks that were released when session
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400550 * to server was lost.
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700551 */
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400552static int
553cifs_relock_file(struct cifsFileInfo *cfile)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554{
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400555 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
556 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
557 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558 int rc = 0;
559
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400560 /* we are going to update can_cache_brlcks here - need a write access */
561 down_write(&cinode->lock_sem);
562 if (cinode->can_cache_brlcks) {
563 /* can cache locks - no need to push them */
564 up_write(&cinode->lock_sem);
565 return rc;
566 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400568 if (cap_unix(tcon->ses) &&
569 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
570 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
571 rc = cifs_push_posix_locks(cfile);
572 else
573 rc = tcon->ses->server->ops->push_mand_locks(cfile);
574
575 up_write(&cinode->lock_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576 return rc;
577}
578
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700579static int
580cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581{
582 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400583 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400584 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +0000586 struct cifs_tcon *tcon;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700587 struct TCP_Server_Info *server;
588 struct cifsInodeInfo *cinode;
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000589 struct inode *inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590 char *full_path = NULL;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700591 int desired_access;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592 int disposition = FILE_OPEN;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500593 int create_options = CREATE_NOT_DIR;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700594 struct cifs_fid fid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400596 xid = get_xid();
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700597 mutex_lock(&cfile->fh_mutex);
598 if (!cfile->invalidHandle) {
599 mutex_unlock(&cfile->fh_mutex);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530600 rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400601 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530602 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603 }
604
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700605 inode = cfile->dentry->d_inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606 cifs_sb = CIFS_SB(inode->i_sb);
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700607 tcon = tlink_tcon(cfile->tlink);
608 server = tcon->ses->server;
Steve French3a9f4622007-04-04 17:10:24 +0000609
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700610 /*
611 * Can not grab rename sem here because various ops, including those
612 * that already have the rename sem can end up causing writepage to get
613 * called and if the server was down that means we end up here, and we
614 * can never tell if the caller already has the rename_sem.
615 */
616 full_path = build_path_from_dentry(cfile->dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617 if (full_path == NULL) {
Steve French3a9f4622007-04-04 17:10:24 +0000618 rc = -ENOMEM;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700619 mutex_unlock(&cfile->fh_mutex);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400620 free_xid(xid);
Steve French3a9f4622007-04-04 17:10:24 +0000621 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622 }
623
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700624 cFYI(1, "inode = 0x%p file flags 0x%x for %s", inode, cfile->f_flags,
625 full_path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626
Pavel Shilovsky10b9b982012-03-20 12:55:09 +0300627 if (tcon->ses->server->oplocks)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628 oplock = REQ_OPLOCK;
629 else
Steve French4b18f2a2008-04-29 00:06:05 +0000630 oplock = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400632 if (tcon->unix_ext && cap_unix(tcon->ses) &&
Steve French7fc8f4e2009-02-23 20:43:11 +0000633 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400634 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Jeff Layton608712f2010-10-15 15:33:56 -0400635 /*
636 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
637 * original open. Must mask them off for a reopen.
638 */
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700639 unsigned int oflags = cfile->f_flags &
Jeff Layton15886172010-10-15 15:33:59 -0400640 ~(O_CREAT | O_EXCL | O_TRUNC);
Jeff Layton608712f2010-10-15 15:33:56 -0400641
Jeff Layton2422f672010-06-16 13:40:16 -0400642 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700643 cifs_sb->mnt_file_mode /* ignored */,
644 oflags, &oplock, &fid.netfid, xid);
Steve French7fc8f4e2009-02-23 20:43:11 +0000645 if (rc == 0) {
Joe Perchesb6b38f72010-04-21 03:50:45 +0000646 cFYI(1, "posix reopen succeeded");
Steve French7fc8f4e2009-02-23 20:43:11 +0000647 goto reopen_success;
648 }
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700649 /*
650 * fallthrough to retry open the old way on errors, especially
651 * in the reconnect path it is important to retry hard
652 */
Steve French7fc8f4e2009-02-23 20:43:11 +0000653 }
654
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700655 desired_access = cifs_convert_flags(cfile->f_flags);
Steve French7fc8f4e2009-02-23 20:43:11 +0000656
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500657 if (backup_cred(cifs_sb))
658 create_options |= CREATE_OPEN_BACKUP_INTENT;
659
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700660 if (server->ops->get_lease_key)
661 server->ops->get_lease_key(inode, &fid);
662
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700663 /*
664 * Can not refresh inode by passing in file_info buf to be returned by
665 * CIFSSMBOpen and then calling get_inode_info with returned buf since
666 * file might have write behind data that needs to be flushed and server
667 * version of file size can be stale. If we knew for sure that inode was
668 * not dirty locally we could do this.
669 */
670 rc = server->ops->open(xid, tcon, full_path, disposition,
671 desired_access, create_options, &fid, &oplock,
672 NULL, cifs_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673 if (rc) {
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700674 mutex_unlock(&cfile->fh_mutex);
675 cFYI(1, "cifs_reopen returned 0x%x", rc);
Joe Perchesb6b38f72010-04-21 03:50:45 +0000676 cFYI(1, "oplock: %d", oplock);
Jeff Layton15886172010-10-15 15:33:59 -0400677 goto reopen_error_exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678 }
Jeff Layton15886172010-10-15 15:33:59 -0400679
680reopen_success:
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700681 cfile->invalidHandle = false;
682 mutex_unlock(&cfile->fh_mutex);
683 cinode = CIFS_I(inode);
Jeff Layton15886172010-10-15 15:33:59 -0400684
685 if (can_flush) {
686 rc = filemap_write_and_wait(inode->i_mapping);
Jeff Laytoneb4b7562010-10-22 14:52:29 -0400687 mapping_set_error(inode->i_mapping, rc);
Jeff Layton15886172010-10-15 15:33:59 -0400688
Jeff Layton15886172010-10-15 15:33:59 -0400689 if (tcon->unix_ext)
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700690 rc = cifs_get_inode_info_unix(&inode, full_path,
691 inode->i_sb, xid);
Jeff Layton15886172010-10-15 15:33:59 -0400692 else
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700693 rc = cifs_get_inode_info(&inode, full_path, NULL,
694 inode->i_sb, xid, NULL);
695 }
696 /*
697 * Else we are writing out data to server already and could deadlock if
698 * we tried to flush data, and since we do not know if we have data that
699 * would invalidate the current end of file on the server we can not go
700 * to the server to get the new inode info.
701 */
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300702
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700703 server->ops->set_fid(cfile, &fid, oplock);
704 cifs_relock_file(cfile);
Jeff Layton15886172010-10-15 15:33:59 -0400705
706reopen_error_exit:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400708 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709 return rc;
710}
711
712int cifs_close(struct inode *inode, struct file *file)
713{
Jeff Layton77970692011-04-05 16:23:47 -0700714 if (file->private_data != NULL) {
715 cifsFileInfo_put(file->private_data);
716 file->private_data = NULL;
717 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718
Steve Frenchcdff08e2010-10-21 22:46:14 +0000719 /* return code from the ->release op is always ignored */
720 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721}
722
723int cifs_closedir(struct inode *inode, struct file *file)
724{
725 int rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400726 unsigned int xid;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700727 struct cifsFileInfo *cfile = file->private_data;
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700728 struct cifs_tcon *tcon;
729 struct TCP_Server_Info *server;
730 char *buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731
Joe Perchesb6b38f72010-04-21 03:50:45 +0000732 cFYI(1, "Closedir inode = 0x%p", inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700734 if (cfile == NULL)
735 return rc;
736
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400737 xid = get_xid();
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700738 tcon = tlink_tcon(cfile->tlink);
739 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700741 cFYI(1, "Freeing private data in close dir");
742 spin_lock(&cifs_file_list_lock);
743 if (!cfile->srch_inf.endOfSearch && !cfile->invalidHandle) {
744 cfile->invalidHandle = true;
745 spin_unlock(&cifs_file_list_lock);
746 if (server->ops->close_dir)
747 rc = server->ops->close_dir(xid, tcon, &cfile->fid);
748 else
749 rc = -ENOSYS;
750 cFYI(1, "Closing uncompleted readdir with rc %d", rc);
751 /* not much we can do if it fails anyway, ignore rc */
752 rc = 0;
753 } else
754 spin_unlock(&cifs_file_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700756 buf = cfile->srch_inf.ntwrk_buf_start;
757 if (buf) {
758 cFYI(1, "closedir free smb buf in srch struct");
759 cfile->srch_inf.ntwrk_buf_start = NULL;
760 if (cfile->srch_inf.smallBuf)
761 cifs_small_buf_release(buf);
762 else
763 cifs_buf_release(buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764 }
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700765
766 cifs_put_tlink(cfile->tlink);
767 kfree(file->private_data);
768 file->private_data = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769 /* BB can we lock the filestruct while this is going on? */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400770 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771 return rc;
772}
773
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400774static struct cifsLockInfo *
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300775cifs_lock_init(__u64 offset, __u64 length, __u8 type)
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000776{
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400777 struct cifsLockInfo *lock =
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000778 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400779 if (!lock)
780 return lock;
781 lock->offset = offset;
782 lock->length = length;
783 lock->type = type;
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400784 lock->pid = current->tgid;
785 INIT_LIST_HEAD(&lock->blist);
786 init_waitqueue_head(&lock->block_q);
787 return lock;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400788}
789
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -0700790void
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400791cifs_del_lock_waiters(struct cifsLockInfo *lock)
792{
793 struct cifsLockInfo *li, *tmp;
794 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
795 list_del_init(&li->blist);
796 wake_up(&li->block_q);
797 }
798}
799
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400800#define CIFS_LOCK_OP 0
801#define CIFS_READ_OP 1
802#define CIFS_WRITE_OP 2
803
804/* @rw_check : 0 - no op, 1 - read, 2 - write */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400805static bool
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700806cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
807 __u64 length, __u8 type, struct cifsFileInfo *cfile,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400808 struct cifsLockInfo **conf_lock, int rw_check)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400809{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300810 struct cifsLockInfo *li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700811 struct cifsFileInfo *cur_cfile = fdlocks->cfile;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300812 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400813
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700814 list_for_each_entry(li, &fdlocks->locks, llist) {
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400815 if (offset + length <= li->offset ||
816 offset >= li->offset + li->length)
817 continue;
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400818 if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
819 server->ops->compare_fids(cfile, cur_cfile)) {
820 /* shared lock prevents write op through the same fid */
821 if (!(li->type & server->vals->shared_lock_type) ||
822 rw_check != CIFS_WRITE_OP)
823 continue;
824 }
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700825 if ((type & server->vals->shared_lock_type) &&
826 ((server->ops->compare_fids(cfile, cur_cfile) &&
827 current->tgid == li->pid) || type == li->type))
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400828 continue;
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700829 if (conf_lock)
830 *conf_lock = li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700831 return true;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400832 }
833 return false;
834}
835
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700836bool
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300837cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700838 __u8 type, struct cifsLockInfo **conf_lock,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400839 int rw_check)
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400840{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300841 bool rc = false;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700842 struct cifs_fid_locks *cur;
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300843 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300844
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700845 list_for_each_entry(cur, &cinode->llist, llist) {
846 rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700847 cfile, conf_lock, rw_check);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300848 if (rc)
849 break;
850 }
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300851
852 return rc;
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400853}
854
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300855/*
856 * Check if there is another lock that prevents us to set the lock (mandatory
857 * style). If such a lock exists, update the flock structure with its
858 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
859 * or leave it the same if we can't. Returns 0 if we don't need to request to
860 * the server or 1 otherwise.
861 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400862static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300863cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
864 __u8 type, struct file_lock *flock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400865{
866 int rc = 0;
867 struct cifsLockInfo *conf_lock;
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300868 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300869 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400870 bool exist;
871
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700872 down_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400873
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300874 exist = cifs_find_lock_conflict(cfile, offset, length, type,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400875 &conf_lock, CIFS_LOCK_OP);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400876 if (exist) {
877 flock->fl_start = conf_lock->offset;
878 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
879 flock->fl_pid = conf_lock->pid;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300880 if (conf_lock->type & server->vals->shared_lock_type)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400881 flock->fl_type = F_RDLCK;
882 else
883 flock->fl_type = F_WRLCK;
884 } else if (!cinode->can_cache_brlcks)
885 rc = 1;
886 else
887 flock->fl_type = F_UNLCK;
888
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700889 up_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400890 return rc;
891}
892
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400893static void
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300894cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400895{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300896 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700897 down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700898 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700899 up_write(&cinode->lock_sem);
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000900}
901
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300902/*
903 * Set the byte-range lock (mandatory style). Returns:
904 * 1) 0, if we set the lock and don't need to request to the server;
905 * 2) 1, if no locks prevent us but we need to request to the server;
906 * 3) -EACCESS, if there is a lock that prevents us and wait is false.
907 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400908static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300909cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400910 bool wait)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400911{
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400912 struct cifsLockInfo *conf_lock;
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300913 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400914 bool exist;
915 int rc = 0;
916
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400917try_again:
918 exist = false;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700919 down_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400920
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300921 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400922 lock->type, &conf_lock, CIFS_LOCK_OP);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400923 if (!exist && cinode->can_cache_brlcks) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700924 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700925 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400926 return rc;
927 }
928
929 if (!exist)
930 rc = 1;
931 else if (!wait)
932 rc = -EACCES;
933 else {
934 list_add_tail(&lock->blist, &conf_lock->blist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700935 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400936 rc = wait_event_interruptible(lock->block_q,
937 (lock->blist.prev == &lock->blist) &&
938 (lock->blist.next == &lock->blist));
939 if (!rc)
940 goto try_again;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700941 down_write(&cinode->lock_sem);
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400942 list_del_init(&lock->blist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400943 }
944
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700945 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400946 return rc;
947}
948
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300949/*
950 * Check if there is another lock that prevents us to set the lock (posix
951 * style). If such a lock exists, update the flock structure with its
952 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
953 * or leave it the same if we can't. Returns 0 if we don't need to request to
954 * the server or 1 otherwise.
955 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400956static int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400957cifs_posix_lock_test(struct file *file, struct file_lock *flock)
958{
959 int rc = 0;
Al Viro496ad9a2013-01-23 17:07:38 -0500960 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400961 unsigned char saved_type = flock->fl_type;
962
Pavel Shilovsky50792762011-10-29 17:17:57 +0400963 if ((flock->fl_flags & FL_POSIX) == 0)
964 return 1;
965
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700966 down_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400967 posix_test_lock(file, flock);
968
969 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
970 flock->fl_type = saved_type;
971 rc = 1;
972 }
973
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700974 up_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400975 return rc;
976}
977
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300978/*
979 * Set the byte-range lock (posix style). Returns:
980 * 1) 0, if we set the lock and don't need to request to the server;
981 * 2) 1, if we need to request to the server;
982 * 3) <0, if the error occurs while setting the lock.
983 */
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400984static int
985cifs_posix_lock_set(struct file *file, struct file_lock *flock)
986{
Al Viro496ad9a2013-01-23 17:07:38 -0500987 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
Pavel Shilovsky50792762011-10-29 17:17:57 +0400988 int rc = 1;
989
990 if ((flock->fl_flags & FL_POSIX) == 0)
991 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400992
Pavel Shilovsky66189be2012-03-28 21:56:19 +0400993try_again:
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700994 down_write(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400995 if (!cinode->can_cache_brlcks) {
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700996 up_write(&cinode->lock_sem);
Pavel Shilovsky50792762011-10-29 17:17:57 +0400997 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400998 }
Pavel Shilovsky66189be2012-03-28 21:56:19 +0400999
1000 rc = posix_lock_file(file, flock, NULL);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001001 up_write(&cinode->lock_sem);
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001002 if (rc == FILE_LOCK_DEFERRED) {
1003 rc = wait_event_interruptible(flock->fl_wait, !flock->fl_next);
1004 if (!rc)
1005 goto try_again;
1006 locks_delete_block(flock);
1007 }
Steve French9ebb3892012-04-01 13:52:54 -05001008 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001009}
1010
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001011int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001012cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001013{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001014 unsigned int xid;
1015 int rc = 0, stored_rc;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001016 struct cifsLockInfo *li, *tmp;
1017 struct cifs_tcon *tcon;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001018 unsigned int num, max_num, max_buf;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001019 LOCKING_ANDX_RANGE *buf, *cur;
1020 int types[] = {LOCKING_ANDX_LARGE_FILES,
1021 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1022 int i;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001023
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001024 xid = get_xid();
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001025 tcon = tlink_tcon(cfile->tlink);
1026
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001027 /*
1028 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1029 * and check it for zero before using.
1030 */
1031 max_buf = tcon->ses->server->maxBuf;
1032 if (!max_buf) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001033 free_xid(xid);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001034 return -EINVAL;
1035 }
1036
1037 max_num = (max_buf - sizeof(struct smb_hdr)) /
1038 sizeof(LOCKING_ANDX_RANGE);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001039 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1040 if (!buf) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001041 free_xid(xid);
Pavel Shilovskye2f28862012-08-29 21:13:38 +04001042 return -ENOMEM;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001043 }
1044
1045 for (i = 0; i < 2; i++) {
1046 cur = buf;
1047 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001048 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001049 if (li->type != types[i])
1050 continue;
1051 cur->Pid = cpu_to_le16(li->pid);
1052 cur->LengthLow = cpu_to_le32((u32)li->length);
1053 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1054 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1055 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1056 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001057 stored_rc = cifs_lockv(xid, tcon,
1058 cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001059 (__u8)li->type, 0, num,
1060 buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001061 if (stored_rc)
1062 rc = stored_rc;
1063 cur = buf;
1064 num = 0;
1065 } else
1066 cur++;
1067 }
1068
1069 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001070 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001071 (__u8)types[i], 0, num, buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001072 if (stored_rc)
1073 rc = stored_rc;
1074 }
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001075 }
1076
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001077 kfree(buf);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001078 free_xid(xid);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001079 return rc;
1080}
1081
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001082/* copied from fs/locks.c with a name change */
1083#define cifs_for_each_lock(inode, lockp) \
1084 for (lockp = &inode->i_flock; *lockp != NULL; \
1085 lockp = &(*lockp)->fl_next)
1086
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001087struct lock_to_push {
1088 struct list_head llist;
1089 __u64 offset;
1090 __u64 length;
1091 __u32 pid;
1092 __u16 netfid;
1093 __u8 type;
1094};
1095
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001096static int
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001097cifs_push_posix_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001098{
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001099 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1100 struct file_lock *flock, **before;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001101 unsigned int count = 0, i = 0;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001102 int rc = 0, xid, type;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001103 struct list_head locks_to_send, *el;
1104 struct lock_to_push *lck, *tmp;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001105 __u64 length;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001106
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001107 xid = get_xid();
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001108
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001109 lock_flocks();
1110 cifs_for_each_lock(cfile->dentry->d_inode, before) {
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001111 if ((*before)->fl_flags & FL_POSIX)
1112 count++;
1113 }
1114 unlock_flocks();
1115
1116 INIT_LIST_HEAD(&locks_to_send);
1117
1118 /*
Pavel Shilovskyce858522012-03-17 09:46:55 +03001119 * Allocating count locks is enough because no FL_POSIX locks can be
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001120 * added to the list while we are holding cinode->lock_sem that
Pavel Shilovskyce858522012-03-17 09:46:55 +03001121 * protects locking operations of this inode.
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001122 */
1123 for (; i < count; i++) {
1124 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1125 if (!lck) {
1126 rc = -ENOMEM;
1127 goto err_out;
1128 }
1129 list_add_tail(&lck->llist, &locks_to_send);
1130 }
1131
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001132 el = locks_to_send.next;
1133 lock_flocks();
1134 cifs_for_each_lock(cfile->dentry->d_inode, before) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001135 flock = *before;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001136 if ((flock->fl_flags & FL_POSIX) == 0)
1137 continue;
Pavel Shilovskyce858522012-03-17 09:46:55 +03001138 if (el == &locks_to_send) {
1139 /*
1140 * The list ended. We don't have enough allocated
1141 * structures - something is really wrong.
1142 */
1143 cERROR(1, "Can't push all brlocks!");
1144 break;
1145 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001146 length = 1 + flock->fl_end - flock->fl_start;
1147 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1148 type = CIFS_RDLCK;
1149 else
1150 type = CIFS_WRLCK;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001151 lck = list_entry(el, struct lock_to_push, llist);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001152 lck->pid = flock->fl_pid;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001153 lck->netfid = cfile->fid.netfid;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001154 lck->length = length;
1155 lck->type = type;
1156 lck->offset = flock->fl_start;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001157 el = el->next;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001158 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001159 unlock_flocks();
1160
1161 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001162 int stored_rc;
1163
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001164 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001165 lck->offset, lck->length, NULL,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001166 lck->type, 0);
1167 if (stored_rc)
1168 rc = stored_rc;
1169 list_del(&lck->llist);
1170 kfree(lck);
1171 }
1172
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001173out:
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001174 free_xid(xid);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001175 return rc;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001176err_out:
1177 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1178 list_del(&lck->llist);
1179 kfree(lck);
1180 }
1181 goto out;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001182}
1183
1184static int
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001185cifs_push_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001186{
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001187 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001188 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001189 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001190 int rc = 0;
1191
1192 /* we are going to update can_cache_brlcks here - need a write access */
1193 down_write(&cinode->lock_sem);
1194 if (!cinode->can_cache_brlcks) {
1195 up_write(&cinode->lock_sem);
1196 return rc;
1197 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001198
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001199 if (cap_unix(tcon->ses) &&
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001200 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1201 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001202 rc = cifs_push_posix_locks(cfile);
1203 else
1204 rc = tcon->ses->server->ops->push_mand_locks(cfile);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001205
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001206 cinode->can_cache_brlcks = false;
1207 up_write(&cinode->lock_sem);
1208 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001209}
1210
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001211static void
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001212cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001213 bool *wait_flag, struct TCP_Server_Info *server)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001214{
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001215 if (flock->fl_flags & FL_POSIX)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001216 cFYI(1, "Posix");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001217 if (flock->fl_flags & FL_FLOCK)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001218 cFYI(1, "Flock");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001219 if (flock->fl_flags & FL_SLEEP) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001220 cFYI(1, "Blocking lock");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001221 *wait_flag = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222 }
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001223 if (flock->fl_flags & FL_ACCESS)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001224 cFYI(1, "Process suspended by mandatory locking - "
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001225 "not implemented yet");
1226 if (flock->fl_flags & FL_LEASE)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001227 cFYI(1, "Lease on file - not implemented yet");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001228 if (flock->fl_flags &
Jeff Layton3d6d8542012-09-19 06:22:46 -07001229 (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
1230 FL_ACCESS | FL_LEASE | FL_CLOSE)))
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001231 cFYI(1, "Unknown lock flags 0x%x", flock->fl_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001232
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001233 *type = server->vals->large_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001234 if (flock->fl_type == F_WRLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001235 cFYI(1, "F_WRLCK ");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001236 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001237 *lock = 1;
1238 } else if (flock->fl_type == F_UNLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001239 cFYI(1, "F_UNLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001240 *type |= server->vals->unlock_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001241 *unlock = 1;
1242 /* Check if unlock includes more than one lock range */
1243 } else if (flock->fl_type == F_RDLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001244 cFYI(1, "F_RDLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001245 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001246 *lock = 1;
1247 } else if (flock->fl_type == F_EXLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001248 cFYI(1, "F_EXLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001249 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001250 *lock = 1;
1251 } else if (flock->fl_type == F_SHLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001252 cFYI(1, "F_SHLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001253 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001254 *lock = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255 } else
Joe Perchesb6b38f72010-04-21 03:50:45 +00001256 cFYI(1, "Unknown type of lock");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001257}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001259static int
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001260cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001261 bool wait_flag, bool posix_lck, unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001262{
1263 int rc = 0;
1264 __u64 length = 1 + flock->fl_end - flock->fl_start;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001265 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1266 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001267 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001268 __u16 netfid = cfile->fid.netfid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001269
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001270 if (posix_lck) {
1271 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001272
1273 rc = cifs_posix_lock_test(file, flock);
1274 if (!rc)
1275 return rc;
1276
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001277 if (type & server->vals->shared_lock_type)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001278 posix_lock_type = CIFS_RDLCK;
1279 else
1280 posix_lock_type = CIFS_WRLCK;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001281 rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001282 flock->fl_start, length, flock,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001283 posix_lock_type, wait_flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001284 return rc;
1285 }
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001286
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001287 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001288 if (!rc)
1289 return rc;
1290
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001291 /* BB we could chain these into one lock request BB */
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001292 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
1293 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001294 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001295 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1296 type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001297 flock->fl_type = F_UNLCK;
1298 if (rc != 0)
1299 cERROR(1, "Error unlocking previously locked "
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001300 "range %d during test of lock", rc);
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001301 return 0;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001302 }
1303
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001304 if (type & server->vals->shared_lock_type) {
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001305 flock->fl_type = F_WRLCK;
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001306 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001307 }
1308
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001309 type &= ~server->vals->exclusive_lock_type;
1310
1311 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1312 type | server->vals->shared_lock_type,
1313 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001314 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001315 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1316 type | server->vals->shared_lock_type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001317 flock->fl_type = F_RDLCK;
1318 if (rc != 0)
1319 cERROR(1, "Error unlocking previously locked "
1320 "range %d during test of lock", rc);
1321 } else
1322 flock->fl_type = F_WRLCK;
1323
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001324 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001325}
1326
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001327void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001328cifs_move_llist(struct list_head *source, struct list_head *dest)
1329{
1330 struct list_head *li, *tmp;
1331 list_for_each_safe(li, tmp, source)
1332 list_move(li, dest);
1333}
1334
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001335void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001336cifs_free_llist(struct list_head *llist)
1337{
1338 struct cifsLockInfo *li, *tmp;
1339 list_for_each_entry_safe(li, tmp, llist, llist) {
1340 cifs_del_lock_waiters(li);
1341 list_del(&li->llist);
1342 kfree(li);
1343 }
1344}
1345
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001346int
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001347cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1348 unsigned int xid)
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001349{
1350 int rc = 0, stored_rc;
1351 int types[] = {LOCKING_ANDX_LARGE_FILES,
1352 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1353 unsigned int i;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001354 unsigned int max_num, num, max_buf;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001355 LOCKING_ANDX_RANGE *buf, *cur;
1356 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1357 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
1358 struct cifsLockInfo *li, *tmp;
1359 __u64 length = 1 + flock->fl_end - flock->fl_start;
1360 struct list_head tmp_llist;
1361
1362 INIT_LIST_HEAD(&tmp_llist);
1363
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001364 /*
1365 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1366 * and check it for zero before using.
1367 */
1368 max_buf = tcon->ses->server->maxBuf;
1369 if (!max_buf)
1370 return -EINVAL;
1371
1372 max_num = (max_buf - sizeof(struct smb_hdr)) /
1373 sizeof(LOCKING_ANDX_RANGE);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001374 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1375 if (!buf)
1376 return -ENOMEM;
1377
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001378 down_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001379 for (i = 0; i < 2; i++) {
1380 cur = buf;
1381 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001382 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001383 if (flock->fl_start > li->offset ||
1384 (flock->fl_start + length) <
1385 (li->offset + li->length))
1386 continue;
1387 if (current->tgid != li->pid)
1388 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001389 if (types[i] != li->type)
1390 continue;
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001391 if (cinode->can_cache_brlcks) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001392 /*
1393 * We can cache brlock requests - simply remove
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001394 * a lock from the file's list.
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001395 */
1396 list_del(&li->llist);
1397 cifs_del_lock_waiters(li);
1398 kfree(li);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001399 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001400 }
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001401 cur->Pid = cpu_to_le16(li->pid);
1402 cur->LengthLow = cpu_to_le32((u32)li->length);
1403 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1404 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1405 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1406 /*
1407 * We need to save a lock here to let us add it again to
1408 * the file's list if the unlock range request fails on
1409 * the server.
1410 */
1411 list_move(&li->llist, &tmp_llist);
1412 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001413 stored_rc = cifs_lockv(xid, tcon,
1414 cfile->fid.netfid,
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001415 li->type, num, 0, buf);
1416 if (stored_rc) {
1417 /*
1418 * We failed on the unlock range
1419 * request - add all locks from the tmp
1420 * list to the head of the file's list.
1421 */
1422 cifs_move_llist(&tmp_llist,
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001423 &cfile->llist->locks);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001424 rc = stored_rc;
1425 } else
1426 /*
1427 * The unlock range request succeed -
1428 * free the tmp list.
1429 */
1430 cifs_free_llist(&tmp_llist);
1431 cur = buf;
1432 num = 0;
1433 } else
1434 cur++;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001435 }
1436 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001437 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001438 types[i], num, 0, buf);
1439 if (stored_rc) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001440 cifs_move_llist(&tmp_llist,
1441 &cfile->llist->locks);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001442 rc = stored_rc;
1443 } else
1444 cifs_free_llist(&tmp_llist);
1445 }
1446 }
1447
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001448 up_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001449 kfree(buf);
1450 return rc;
1451}
1452
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001453static int
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001454cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001455 bool wait_flag, bool posix_lck, int lock, int unlock,
1456 unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001457{
1458 int rc = 0;
1459 __u64 length = 1 + flock->fl_end - flock->fl_start;
1460 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1461 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001462 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04001463 struct inode *inode = cfile->dentry->d_inode;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001464
1465 if (posix_lck) {
Steve French08547b02006-02-28 22:39:25 +00001466 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001467
1468 rc = cifs_posix_lock_set(file, flock);
1469 if (!rc || rc < 0)
1470 return rc;
1471
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001472 if (type & server->vals->shared_lock_type)
Steve French08547b02006-02-28 22:39:25 +00001473 posix_lock_type = CIFS_RDLCK;
1474 else
1475 posix_lock_type = CIFS_WRLCK;
Steve French50c2f752007-07-13 00:33:32 +00001476
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001477 if (unlock == 1)
Steve Frenchbeb84dc2006-03-03 23:36:34 +00001478 posix_lock_type = CIFS_UNLCK;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001479
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001480 rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
1481 current->tgid, flock->fl_start, length,
1482 NULL, posix_lock_type, wait_flag);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001483 goto out;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001484 }
1485
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001486 if (lock) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001487 struct cifsLockInfo *lock;
1488
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001489 lock = cifs_lock_init(flock->fl_start, length, type);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001490 if (!lock)
1491 return -ENOMEM;
1492
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001493 rc = cifs_lock_add_if(cfile, lock, wait_flag);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001494 if (rc < 0) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001495 kfree(lock);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001496 return rc;
1497 }
1498 if (!rc)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001499 goto out;
1500
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04001501 /*
1502 * Windows 7 server can delay breaking lease from read to None
1503 * if we set a byte-range lock on a file - break it explicitly
1504 * before sending the lock to the server to be sure the next
1505 * read won't conflict with non-overlapted locks due to
1506 * pagereading.
1507 */
1508 if (!CIFS_I(inode)->clientCanCacheAll &&
1509 CIFS_I(inode)->clientCanCacheRead) {
1510 cifs_invalidate_mapping(inode);
1511 cFYI(1, "Set no oplock for inode=%p due to mand locks",
1512 inode);
1513 CIFS_I(inode)->clientCanCacheRead = false;
1514 }
1515
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001516 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1517 type, 1, 0, wait_flag);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001518 if (rc) {
1519 kfree(lock);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001520 return rc;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001521 }
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001522
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001523 cifs_lock_add(cfile, lock);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001524 } else if (unlock)
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001525 rc = server->ops->mand_unlock_range(cfile, flock, xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001526
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001527out:
1528 if (flock->fl_flags & FL_POSIX)
Steve French9ebb3892012-04-01 13:52:54 -05001529 posix_lock_file_wait(file, flock);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001530 return rc;
1531}
1532
1533int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1534{
1535 int rc, xid;
1536 int lock = 0, unlock = 0;
1537 bool wait_flag = false;
1538 bool posix_lck = false;
1539 struct cifs_sb_info *cifs_sb;
1540 struct cifs_tcon *tcon;
1541 struct cifsInodeInfo *cinode;
1542 struct cifsFileInfo *cfile;
1543 __u16 netfid;
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001544 __u32 type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001545
1546 rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001547 xid = get_xid();
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001548
1549 cFYI(1, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld "
1550 "end: %lld", cmd, flock->fl_flags, flock->fl_type,
1551 flock->fl_start, flock->fl_end);
1552
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001553 cfile = (struct cifsFileInfo *)file->private_data;
1554 tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001555
1556 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
1557 tcon->ses->server);
1558
1559 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001560 netfid = cfile->fid.netfid;
Al Viro496ad9a2013-01-23 17:07:38 -05001561 cinode = CIFS_I(file_inode(file));
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001562
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001563 if (cap_unix(tcon->ses) &&
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001564 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1565 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1566 posix_lck = true;
1567 /*
1568 * BB add code here to normalize offset and length to account for
1569 * negative length which we can not accept over the wire.
1570 */
1571 if (IS_GETLK(cmd)) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001572 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001573 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001574 return rc;
1575 }
1576
1577 if (!lock && !unlock) {
1578 /*
1579 * if no lock or unlock then nothing to do since we do not
1580 * know what it is
1581 */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001582 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001583 return -EOPNOTSUPP;
1584 }
1585
1586 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1587 xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001588 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001589 return rc;
1590}
1591
Jeff Layton597b0272012-03-23 14:40:56 -04001592/*
1593 * update the file size (if needed) after a write. Should be called with
1594 * the inode->i_lock held
1595 */
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05001596void
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001597cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1598 unsigned int bytes_written)
1599{
1600 loff_t end_of_write = offset + bytes_written;
1601
1602 if (end_of_write > cifsi->server_eof)
1603 cifsi->server_eof = end_of_write;
1604}
1605
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001606static ssize_t
1607cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
1608 size_t write_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001609{
1610 int rc = 0;
1611 unsigned int bytes_written = 0;
1612 unsigned int total_written;
1613 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001614 struct cifs_tcon *tcon;
1615 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001616 unsigned int xid;
Jeff Layton7da4b492010-10-15 15:34:00 -04001617 struct dentry *dentry = open_file->dentry;
1618 struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001619 struct cifs_io_parms io_parms;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001620
Jeff Layton7da4b492010-10-15 15:34:00 -04001621 cifs_sb = CIFS_SB(dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001622
Joe Perchesb6b38f72010-04-21 03:50:45 +00001623 cFYI(1, "write %zd bytes to offset %lld of %s", write_size,
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001624 *offset, dentry->d_name.name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001625
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001626 tcon = tlink_tcon(open_file->tlink);
1627 server = tcon->ses->server;
1628
1629 if (!server->ops->sync_write)
1630 return -ENOSYS;
Steve French50c2f752007-07-13 00:33:32 +00001631
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001632 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001633
Linus Torvalds1da177e2005-04-16 15:20:36 -07001634 for (total_written = 0; write_size > total_written;
1635 total_written += bytes_written) {
1636 rc = -EAGAIN;
1637 while (rc == -EAGAIN) {
Jeff Laytonca83ce32011-04-12 09:13:44 -04001638 struct kvec iov[2];
1639 unsigned int len;
1640
Linus Torvalds1da177e2005-04-16 15:20:36 -07001641 if (open_file->invalidHandle) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001642 /* we could deadlock if we called
1643 filemap_fdatawait from here so tell
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001644 reopen_file not to flush data to
Linus Torvalds1da177e2005-04-16 15:20:36 -07001645 server now */
Jeff Layton15886172010-10-15 15:33:59 -04001646 rc = cifs_reopen_file(open_file, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001647 if (rc != 0)
1648 break;
1649 }
Steve French3e844692005-10-03 13:37:24 -07001650
Jeff Laytonca83ce32011-04-12 09:13:44 -04001651 len = min((size_t)cifs_sb->wsize,
1652 write_size - total_written);
1653 /* iov[0] is reserved for smb header */
1654 iov[1].iov_base = (char *)write_data + total_written;
1655 iov[1].iov_len = len;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001656 io_parms.pid = pid;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001657 io_parms.tcon = tcon;
1658 io_parms.offset = *offset;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001659 io_parms.length = len;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001660 rc = server->ops->sync_write(xid, open_file, &io_parms,
1661 &bytes_written, iov, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001662 }
1663 if (rc || (bytes_written == 0)) {
1664 if (total_written)
1665 break;
1666 else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001667 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001668 return rc;
1669 }
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001670 } else {
Jeff Layton597b0272012-03-23 14:40:56 -04001671 spin_lock(&dentry->d_inode->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001672 cifs_update_eof(cifsi, *offset, bytes_written);
Jeff Layton597b0272012-03-23 14:40:56 -04001673 spin_unlock(&dentry->d_inode->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001674 *offset += bytes_written;
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001675 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001676 }
1677
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001678 cifs_stats_bytes_written(tcon, total_written);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001679
Jeff Layton7da4b492010-10-15 15:34:00 -04001680 if (total_written > 0) {
1681 spin_lock(&dentry->d_inode->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001682 if (*offset > dentry->d_inode->i_size)
1683 i_size_write(dentry->d_inode, *offset);
Jeff Layton7da4b492010-10-15 15:34:00 -04001684 spin_unlock(&dentry->d_inode->i_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001685 }
Jeff Layton7da4b492010-10-15 15:34:00 -04001686 mark_inode_dirty_sync(dentry->d_inode);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001687 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001688 return total_written;
1689}
1690
Jeff Layton6508d902010-09-29 19:51:11 -04001691struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1692 bool fsuid_only)
Steve French630f3f0c2007-10-25 21:17:17 +00001693{
1694 struct cifsFileInfo *open_file = NULL;
Jeff Layton6508d902010-09-29 19:51:11 -04001695 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1696
1697 /* only filter by fsuid on multiuser mounts */
1698 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1699 fsuid_only = false;
Steve French630f3f0c2007-10-25 21:17:17 +00001700
Jeff Layton44772882010-10-15 15:34:03 -04001701 spin_lock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001702 /* we could simply get the first_list_entry since write-only entries
1703 are always at the end of the list but since the first entry might
1704 have a close pending, we go through the whole list */
1705 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Eric W. Biedermanfef59fd2013-02-06 02:23:02 -08001706 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
Jeff Layton6508d902010-09-29 19:51:11 -04001707 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001708 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
Steve French630f3f0c2007-10-25 21:17:17 +00001709 if (!open_file->invalidHandle) {
1710 /* found a good file */
1711 /* lock it so it will not be closed on us */
Jeff Layton764a1b12012-07-25 14:59:54 -04001712 cifsFileInfo_get_locked(open_file);
Jeff Layton44772882010-10-15 15:34:03 -04001713 spin_unlock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001714 return open_file;
1715 } /* else might as well continue, and look for
1716 another, or simply have the caller reopen it
1717 again rather than trying to fix this handle */
1718 } else /* write only file */
1719 break; /* write only files are last so must be done */
1720 }
Jeff Layton44772882010-10-15 15:34:03 -04001721 spin_unlock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001722 return NULL;
1723}
Steve French630f3f0c2007-10-25 21:17:17 +00001724
Jeff Layton6508d902010-09-29 19:51:11 -04001725struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1726 bool fsuid_only)
Steve French6148a742005-10-05 12:23:19 -07001727{
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001728 struct cifsFileInfo *open_file, *inv_file = NULL;
Jeff Laytond3892292010-11-02 16:22:50 -04001729 struct cifs_sb_info *cifs_sb;
Jeff Layton2846d382008-09-22 21:33:33 -04001730 bool any_available = false;
Steve Frenchdd99cd82005-10-05 19:32:49 -07001731 int rc;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001732 unsigned int refind = 0;
Steve French6148a742005-10-05 12:23:19 -07001733
Steve French60808232006-04-22 15:53:05 +00001734 /* Having a null inode here (because mapping->host was set to zero by
1735 the VFS or MM) should not happen but we had reports of on oops (due to
1736 it being zero) during stress testcases so we need to check for it */
1737
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001738 if (cifs_inode == NULL) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001739 cERROR(1, "Null inode passed to cifs_writeable_file");
Steve French60808232006-04-22 15:53:05 +00001740 dump_stack();
1741 return NULL;
1742 }
1743
Jeff Laytond3892292010-11-02 16:22:50 -04001744 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1745
Jeff Layton6508d902010-09-29 19:51:11 -04001746 /* only filter by fsuid on multiuser mounts */
1747 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1748 fsuid_only = false;
1749
Jeff Layton44772882010-10-15 15:34:03 -04001750 spin_lock(&cifs_file_list_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001751refind_writable:
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001752 if (refind > MAX_REOPEN_ATT) {
1753 spin_unlock(&cifs_file_list_lock);
1754 return NULL;
1755 }
Steve French6148a742005-10-05 12:23:19 -07001756 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton6508d902010-09-29 19:51:11 -04001757 if (!any_available && open_file->pid != current->tgid)
1758 continue;
Eric W. Biedermanfef59fd2013-02-06 02:23:02 -08001759 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
Jeff Layton6508d902010-09-29 19:51:11 -04001760 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001761 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Steve French9b22b0b2007-10-02 01:11:08 +00001762 if (!open_file->invalidHandle) {
1763 /* found a good writable file */
Jeff Layton764a1b12012-07-25 14:59:54 -04001764 cifsFileInfo_get_locked(open_file);
Jeff Layton44772882010-10-15 15:34:03 -04001765 spin_unlock(&cifs_file_list_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001766 return open_file;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001767 } else {
1768 if (!inv_file)
1769 inv_file = open_file;
Steve French9b22b0b2007-10-02 01:11:08 +00001770 }
Steve French6148a742005-10-05 12:23:19 -07001771 }
1772 }
Jeff Layton2846d382008-09-22 21:33:33 -04001773 /* couldn't find useable FH with same pid, try any available */
1774 if (!any_available) {
1775 any_available = true;
1776 goto refind_writable;
1777 }
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001778
1779 if (inv_file) {
1780 any_available = false;
Jeff Layton764a1b12012-07-25 14:59:54 -04001781 cifsFileInfo_get_locked(inv_file);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001782 }
1783
Jeff Layton44772882010-10-15 15:34:03 -04001784 spin_unlock(&cifs_file_list_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001785
1786 if (inv_file) {
1787 rc = cifs_reopen_file(inv_file, false);
1788 if (!rc)
1789 return inv_file;
1790 else {
1791 spin_lock(&cifs_file_list_lock);
1792 list_move_tail(&inv_file->flist,
1793 &cifs_inode->openFileList);
1794 spin_unlock(&cifs_file_list_lock);
1795 cifsFileInfo_put(inv_file);
1796 spin_lock(&cifs_file_list_lock);
1797 ++refind;
1798 goto refind_writable;
1799 }
1800 }
1801
Steve French6148a742005-10-05 12:23:19 -07001802 return NULL;
1803}
1804
Linus Torvalds1da177e2005-04-16 15:20:36 -07001805static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1806{
1807 struct address_space *mapping = page->mapping;
1808 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1809 char *write_data;
1810 int rc = -EFAULT;
1811 int bytes_written = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001812 struct inode *inode;
Steve French6148a742005-10-05 12:23:19 -07001813 struct cifsFileInfo *open_file;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001814
1815 if (!mapping || !mapping->host)
1816 return -EFAULT;
1817
1818 inode = page->mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001819
1820 offset += (loff_t)from;
1821 write_data = kmap(page);
1822 write_data += from;
1823
1824 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1825 kunmap(page);
1826 return -EIO;
1827 }
1828
1829 /* racing with truncate? */
1830 if (offset > mapping->host->i_size) {
1831 kunmap(page);
1832 return 0; /* don't care */
1833 }
1834
1835 /* check to make sure that we are not extending the file */
1836 if (mapping->host->i_size - offset < (loff_t)to)
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001837 to = (unsigned)(mapping->host->i_size - offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001838
Jeff Layton6508d902010-09-29 19:51:11 -04001839 open_file = find_writable_file(CIFS_I(mapping->host), false);
Steve French6148a742005-10-05 12:23:19 -07001840 if (open_file) {
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001841 bytes_written = cifs_write(open_file, open_file->pid,
1842 write_data, to - from, &offset);
Dave Kleikamp6ab409b2009-08-31 11:07:12 -04001843 cifsFileInfo_put(open_file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001844 /* Does mm or vfs already set times? */
Steve French6148a742005-10-05 12:23:19 -07001845 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001846 if ((bytes_written > 0) && (offset))
Steve French6148a742005-10-05 12:23:19 -07001847 rc = 0;
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001848 else if (bytes_written < 0)
1849 rc = bytes_written;
Steve French6148a742005-10-05 12:23:19 -07001850 } else {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001851 cFYI(1, "No writeable filehandles for inode");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001852 rc = -EIO;
1853 }
1854
1855 kunmap(page);
1856 return rc;
1857}
1858
Linus Torvalds1da177e2005-04-16 15:20:36 -07001859static int cifs_writepages(struct address_space *mapping,
Steve French37c0eb42005-10-05 14:50:29 -07001860 struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001861{
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001862 struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
1863 bool done = false, scanned = false, range_whole = false;
1864 pgoff_t end, index;
1865 struct cifs_writedata *wdata;
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07001866 struct TCP_Server_Info *server;
Steve French37c0eb42005-10-05 14:50:29 -07001867 struct page *page;
Steve French37c0eb42005-10-05 14:50:29 -07001868 int rc = 0;
Steve French50c2f752007-07-13 00:33:32 +00001869
Steve French37c0eb42005-10-05 14:50:29 -07001870 /*
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001871 * If wsize is smaller than the page cache size, default to writing
Steve French37c0eb42005-10-05 14:50:29 -07001872 * one page at a time via cifs_writepage
1873 */
1874 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1875 return generic_writepages(mapping, wbc);
1876
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001877 if (wbc->range_cyclic) {
Steve French37c0eb42005-10-05 14:50:29 -07001878 index = mapping->writeback_index; /* Start from prev offset */
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001879 end = -1;
1880 } else {
1881 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1882 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1883 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001884 range_whole = true;
1885 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07001886 }
1887retry:
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001888 while (!done && index <= end) {
1889 unsigned int i, nr_pages, found_pages;
1890 pgoff_t next = 0, tofind;
1891 struct page **pages;
Steve French37c0eb42005-10-05 14:50:29 -07001892
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001893 tofind = min((cifs_sb->wsize / PAGE_CACHE_SIZE) - 1,
1894 end - index) + 1;
Steve French37c0eb42005-10-05 14:50:29 -07001895
Jeff Laytonc2e87642012-03-23 14:40:55 -04001896 wdata = cifs_writedata_alloc((unsigned int)tofind,
1897 cifs_writev_complete);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001898 if (!wdata) {
1899 rc = -ENOMEM;
1900 break;
1901 }
1902
1903 /*
1904 * find_get_pages_tag seems to return a max of 256 on each
1905 * iteration, so we must call it several times in order to
1906 * fill the array or the wsize is effectively limited to
1907 * 256 * PAGE_CACHE_SIZE.
1908 */
1909 found_pages = 0;
1910 pages = wdata->pages;
1911 do {
1912 nr_pages = find_get_pages_tag(mapping, &index,
1913 PAGECACHE_TAG_DIRTY,
1914 tofind, pages);
1915 found_pages += nr_pages;
1916 tofind -= nr_pages;
1917 pages += nr_pages;
1918 } while (nr_pages && tofind && index <= end);
1919
1920 if (found_pages == 0) {
1921 kref_put(&wdata->refcount, cifs_writedata_release);
1922 break;
1923 }
1924
1925 nr_pages = 0;
1926 for (i = 0; i < found_pages; i++) {
1927 page = wdata->pages[i];
Steve French37c0eb42005-10-05 14:50:29 -07001928 /*
1929 * At this point we hold neither mapping->tree_lock nor
1930 * lock on the page itself: the page may be truncated or
1931 * invalidated (changing page->mapping to NULL), or even
1932 * swizzled back from swapper_space to tmpfs file
1933 * mapping
1934 */
1935
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001936 if (nr_pages == 0)
Steve French37c0eb42005-10-05 14:50:29 -07001937 lock_page(page);
Nick Piggin529ae9a2008-08-02 12:01:03 +02001938 else if (!trylock_page(page))
Steve French37c0eb42005-10-05 14:50:29 -07001939 break;
1940
1941 if (unlikely(page->mapping != mapping)) {
1942 unlock_page(page);
1943 break;
1944 }
1945
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001946 if (!wbc->range_cyclic && page->index > end) {
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001947 done = true;
Steve French37c0eb42005-10-05 14:50:29 -07001948 unlock_page(page);
1949 break;
1950 }
1951
1952 if (next && (page->index != next)) {
1953 /* Not next consecutive page */
1954 unlock_page(page);
1955 break;
1956 }
1957
1958 if (wbc->sync_mode != WB_SYNC_NONE)
1959 wait_on_page_writeback(page);
1960
1961 if (PageWriteback(page) ||
Linus Torvaldscb876f42006-12-23 16:19:07 -08001962 !clear_page_dirty_for_io(page)) {
Steve French37c0eb42005-10-05 14:50:29 -07001963 unlock_page(page);
1964 break;
1965 }
Steve French84d2f072005-10-12 15:32:05 -07001966
Linus Torvaldscb876f42006-12-23 16:19:07 -08001967 /*
1968 * This actually clears the dirty bit in the radix tree.
1969 * See cifs_writepage() for more commentary.
1970 */
1971 set_page_writeback(page);
1972
Jeff Layton3a98b862012-11-26 09:48:41 -05001973 if (page_offset(page) >= i_size_read(mapping->host)) {
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001974 done = true;
Steve French84d2f072005-10-12 15:32:05 -07001975 unlock_page(page);
Linus Torvaldscb876f42006-12-23 16:19:07 -08001976 end_page_writeback(page);
Steve French84d2f072005-10-12 15:32:05 -07001977 break;
1978 }
1979
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001980 wdata->pages[i] = page;
Steve French37c0eb42005-10-05 14:50:29 -07001981 next = page->index + 1;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001982 ++nr_pages;
Steve French37c0eb42005-10-05 14:50:29 -07001983 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001984
1985 /* reset index to refind any pages skipped */
1986 if (nr_pages == 0)
1987 index = wdata->pages[0]->index + 1;
1988
1989 /* put any pages we aren't going to use */
1990 for (i = nr_pages; i < found_pages; i++) {
1991 page_cache_release(wdata->pages[i]);
1992 wdata->pages[i] = NULL;
1993 }
1994
1995 /* nothing to write? */
1996 if (nr_pages == 0) {
1997 kref_put(&wdata->refcount, cifs_writedata_release);
1998 continue;
1999 }
2000
2001 wdata->sync_mode = wbc->sync_mode;
2002 wdata->nr_pages = nr_pages;
2003 wdata->offset = page_offset(wdata->pages[0]);
Jeff Laytoneddb0792012-09-18 16:20:35 -07002004 wdata->pagesz = PAGE_CACHE_SIZE;
2005 wdata->tailsz =
Jeff Layton3a98b862012-11-26 09:48:41 -05002006 min(i_size_read(mapping->host) -
2007 page_offset(wdata->pages[nr_pages - 1]),
Jeff Laytoneddb0792012-09-18 16:20:35 -07002008 (loff_t)PAGE_CACHE_SIZE);
2009 wdata->bytes = ((nr_pages - 1) * PAGE_CACHE_SIZE) +
2010 wdata->tailsz;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002011
2012 do {
2013 if (wdata->cfile != NULL)
2014 cifsFileInfo_put(wdata->cfile);
2015 wdata->cfile = find_writable_file(CIFS_I(mapping->host),
2016 false);
2017 if (!wdata->cfile) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00002018 cERROR(1, "No writable handles for inode");
Steve French23e7dd72005-10-20 13:44:56 -07002019 rc = -EBADF;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002020 break;
Steve French37c0eb42005-10-05 14:50:29 -07002021 }
Jeff Laytonfe5f5d22012-03-23 14:40:55 -04002022 wdata->pid = wdata->cfile->pid;
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002023 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
2024 rc = server->ops->async_writev(wdata);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002025 } while (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN);
Jeff Laytonf3983c22010-09-22 16:17:40 -07002026
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002027 for (i = 0; i < nr_pages; ++i)
2028 unlock_page(wdata->pages[i]);
Jeff Layton941b8532011-01-11 07:24:01 -05002029
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002030 /* send failure -- clean up the mess */
2031 if (rc != 0) {
2032 for (i = 0; i < nr_pages; ++i) {
Jeff Layton941b8532011-01-11 07:24:01 -05002033 if (rc == -EAGAIN)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002034 redirty_page_for_writepage(wbc,
2035 wdata->pages[i]);
2036 else
2037 SetPageError(wdata->pages[i]);
2038 end_page_writeback(wdata->pages[i]);
2039 page_cache_release(wdata->pages[i]);
Steve French37c0eb42005-10-05 14:50:29 -07002040 }
Jeff Layton941b8532011-01-11 07:24:01 -05002041 if (rc != -EAGAIN)
2042 mapping_set_error(mapping, rc);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002043 }
2044 kref_put(&wdata->refcount, cifs_writedata_release);
Jeff Layton941b8532011-01-11 07:24:01 -05002045
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002046 wbc->nr_to_write -= nr_pages;
2047 if (wbc->nr_to_write <= 0)
2048 done = true;
Dave Kleikampb066a482008-11-18 03:49:05 +00002049
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002050 index = next;
Steve French37c0eb42005-10-05 14:50:29 -07002051 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002052
Steve French37c0eb42005-10-05 14:50:29 -07002053 if (!scanned && !done) {
2054 /*
2055 * We hit the last page and there is more work to be done: wrap
2056 * back to the start of the file
2057 */
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002058 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07002059 index = 0;
2060 goto retry;
2061 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002062
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002063 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
Steve French37c0eb42005-10-05 14:50:29 -07002064 mapping->writeback_index = index;
2065
Linus Torvalds1da177e2005-04-16 15:20:36 -07002066 return rc;
2067}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002068
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002069static int
2070cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002071{
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002072 int rc;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002073 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002074
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002075 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002076/* BB add check for wbc flags */
2077 page_cache_get(page);
Steve Frenchad7a2922008-02-07 23:25:02 +00002078 if (!PageUptodate(page))
Joe Perchesb6b38f72010-04-21 03:50:45 +00002079 cFYI(1, "ppw - page not up to date");
Linus Torvaldscb876f42006-12-23 16:19:07 -08002080
2081 /*
2082 * Set the "writeback" flag, and clear "dirty" in the radix tree.
2083 *
2084 * A writepage() implementation always needs to do either this,
2085 * or re-dirty the page with "redirty_page_for_writepage()" in
2086 * the case of a failure.
2087 *
2088 * Just unlocking the page will cause the radix tree tag-bits
2089 * to fail to update with the state of the page correctly.
2090 */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002091 set_page_writeback(page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002092retry_write:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002093 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002094 if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
2095 goto retry_write;
2096 else if (rc == -EAGAIN)
2097 redirty_page_for_writepage(wbc, page);
2098 else if (rc != 0)
2099 SetPageError(page);
2100 else
2101 SetPageUptodate(page);
Linus Torvaldscb876f42006-12-23 16:19:07 -08002102 end_page_writeback(page);
2103 page_cache_release(page);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002104 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002105 return rc;
2106}
2107
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002108static int cifs_writepage(struct page *page, struct writeback_control *wbc)
2109{
2110 int rc = cifs_writepage_locked(page, wbc);
2111 unlock_page(page);
2112 return rc;
2113}
2114
Nick Piggind9414772008-09-24 11:32:59 -04002115static int cifs_write_end(struct file *file, struct address_space *mapping,
2116 loff_t pos, unsigned len, unsigned copied,
2117 struct page *page, void *fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002118{
Nick Piggind9414772008-09-24 11:32:59 -04002119 int rc;
2120 struct inode *inode = mapping->host;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002121 struct cifsFileInfo *cfile = file->private_data;
2122 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
2123 __u32 pid;
2124
2125 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2126 pid = cfile->pid;
2127 else
2128 pid = current->tgid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002129
Joe Perchesb6b38f72010-04-21 03:50:45 +00002130 cFYI(1, "write_end for page %p from pos %lld with %d bytes",
2131 page, pos, copied);
Steve Frenchad7a2922008-02-07 23:25:02 +00002132
Jeff Laytona98ee8c2008-11-26 19:32:33 +00002133 if (PageChecked(page)) {
2134 if (copied == len)
2135 SetPageUptodate(page);
2136 ClearPageChecked(page);
2137 } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
Nick Piggind9414772008-09-24 11:32:59 -04002138 SetPageUptodate(page);
2139
Linus Torvalds1da177e2005-04-16 15:20:36 -07002140 if (!PageUptodate(page)) {
Nick Piggind9414772008-09-24 11:32:59 -04002141 char *page_data;
2142 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002143 unsigned int xid;
Nick Piggind9414772008-09-24 11:32:59 -04002144
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002145 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002146 /* this is probably better than directly calling
2147 partialpage_write since in this function the file handle is
2148 known which we might as well leverage */
2149 /* BB check if anything else missing out of ppw
2150 such as updating last write time */
2151 page_data = kmap(page);
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002152 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
Nick Piggind9414772008-09-24 11:32:59 -04002153 /* if (rc < 0) should we set writebehind rc? */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002154 kunmap(page);
Nick Piggind9414772008-09-24 11:32:59 -04002155
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002156 free_xid(xid);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002157 } else {
Nick Piggind9414772008-09-24 11:32:59 -04002158 rc = copied;
2159 pos += copied;
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002160 set_page_dirty(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002161 }
2162
Nick Piggind9414772008-09-24 11:32:59 -04002163 if (rc > 0) {
2164 spin_lock(&inode->i_lock);
2165 if (pos > inode->i_size)
2166 i_size_write(inode, pos);
2167 spin_unlock(&inode->i_lock);
2168 }
2169
2170 unlock_page(page);
2171 page_cache_release(page);
2172
Linus Torvalds1da177e2005-04-16 15:20:36 -07002173 return rc;
2174}
2175
Josef Bacik02c24a82011-07-16 20:44:56 -04002176int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2177 int datasync)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002178{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002179 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002180 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002181 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002182 struct TCP_Server_Info *server;
Joe Perchesc21dfb62010-07-12 13:50:14 -07002183 struct cifsFileInfo *smbfile = file->private_data;
Al Viro496ad9a2013-01-23 17:07:38 -05002184 struct inode *inode = file_inode(file);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002185 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002186
Josef Bacik02c24a82011-07-16 20:44:56 -04002187 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2188 if (rc)
2189 return rc;
2190 mutex_lock(&inode->i_mutex);
2191
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002192 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002193
Joe Perchesb6b38f72010-04-21 03:50:45 +00002194 cFYI(1, "Sync file - name: %s datasync: 0x%x",
Christoph Hellwig7ea80852010-05-26 17:53:25 +02002195 file->f_path.dentry->d_name.name, datasync);
Steve French50c2f752007-07-13 00:33:32 +00002196
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002197 if (!CIFS_I(inode)->clientCanCacheRead) {
2198 rc = cifs_invalidate_mapping(inode);
2199 if (rc) {
2200 cFYI(1, "rc: %d during invalidate phase", rc);
2201 rc = 0; /* don't care about it in fsync */
2202 }
2203 }
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002204
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002205 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002206 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2207 server = tcon->ses->server;
2208 if (server->ops->flush)
2209 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2210 else
2211 rc = -ENOSYS;
2212 }
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002213
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002214 free_xid(xid);
Josef Bacik02c24a82011-07-16 20:44:56 -04002215 mutex_unlock(&inode->i_mutex);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002216 return rc;
2217}
2218
Josef Bacik02c24a82011-07-16 20:44:56 -04002219int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002220{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002221 unsigned int xid;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002222 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002223 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002224 struct TCP_Server_Info *server;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002225 struct cifsFileInfo *smbfile = file->private_data;
2226 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Josef Bacik02c24a82011-07-16 20:44:56 -04002227 struct inode *inode = file->f_mapping->host;
2228
2229 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2230 if (rc)
2231 return rc;
2232 mutex_lock(&inode->i_mutex);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002233
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002234 xid = get_xid();
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002235
2236 cFYI(1, "Sync file - name: %s datasync: 0x%x",
2237 file->f_path.dentry->d_name.name, datasync);
2238
2239 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002240 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2241 server = tcon->ses->server;
2242 if (server->ops->flush)
2243 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2244 else
2245 rc = -ENOSYS;
2246 }
Steve Frenchb298f222009-02-21 21:17:43 +00002247
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002248 free_xid(xid);
Josef Bacik02c24a82011-07-16 20:44:56 -04002249 mutex_unlock(&inode->i_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002250 return rc;
2251}
2252
Linus Torvalds1da177e2005-04-16 15:20:36 -07002253/*
2254 * As file closes, flush all cached write data for this inode checking
2255 * for write behind errors.
2256 */
Miklos Szeredi75e1fcc2006-06-23 02:05:12 -07002257int cifs_flush(struct file *file, fl_owner_t id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002258{
Al Viro496ad9a2013-01-23 17:07:38 -05002259 struct inode *inode = file_inode(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002260 int rc = 0;
2261
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002262 if (file->f_mode & FMODE_WRITE)
Jeff Laytond3f13222010-10-15 15:34:07 -04002263 rc = filemap_write_and_wait(inode->i_mapping);
Steve French50c2f752007-07-13 00:33:32 +00002264
Joe Perchesb6b38f72010-04-21 03:50:45 +00002265 cFYI(1, "Flush inode %p file %p rc %d", inode, file, rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002266
2267 return rc;
2268}
2269
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002270static int
2271cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2272{
2273 int rc = 0;
2274 unsigned long i;
2275
2276 for (i = 0; i < num_pages; i++) {
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002277 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002278 if (!pages[i]) {
2279 /*
2280 * save number of pages we have already allocated and
2281 * return with ENOMEM error
2282 */
2283 num_pages = i;
2284 rc = -ENOMEM;
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002285 break;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002286 }
2287 }
2288
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002289 if (rc) {
2290 for (i = 0; i < num_pages; i++)
2291 put_page(pages[i]);
2292 }
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002293 return rc;
2294}
2295
2296static inline
2297size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2298{
2299 size_t num_pages;
2300 size_t clen;
2301
2302 clen = min_t(const size_t, len, wsize);
Jeff Laytona7103b92012-03-23 14:40:56 -04002303 num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002304
2305 if (cur_len)
2306 *cur_len = clen;
2307
2308 return num_pages;
2309}
2310
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002311static void
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002312cifs_uncached_writev_complete(struct work_struct *work)
2313{
2314 int i;
2315 struct cifs_writedata *wdata = container_of(work,
2316 struct cifs_writedata, work);
2317 struct inode *inode = wdata->cfile->dentry->d_inode;
2318 struct cifsInodeInfo *cifsi = CIFS_I(inode);
2319
2320 spin_lock(&inode->i_lock);
2321 cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
2322 if (cifsi->server_eof > inode->i_size)
2323 i_size_write(inode, cifsi->server_eof);
2324 spin_unlock(&inode->i_lock);
2325
2326 complete(&wdata->done);
2327
2328 if (wdata->result != -EAGAIN) {
2329 for (i = 0; i < wdata->nr_pages; i++)
2330 put_page(wdata->pages[i]);
2331 }
2332
2333 kref_put(&wdata->refcount, cifs_writedata_release);
2334}
2335
2336/* attempt to send write to server, retry on any -EAGAIN errors */
2337static int
2338cifs_uncached_retry_writev(struct cifs_writedata *wdata)
2339{
2340 int rc;
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002341 struct TCP_Server_Info *server;
2342
2343 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002344
2345 do {
2346 if (wdata->cfile->invalidHandle) {
2347 rc = cifs_reopen_file(wdata->cfile, false);
2348 if (rc != 0)
2349 continue;
2350 }
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002351 rc = server->ops->async_writev(wdata);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002352 } while (rc == -EAGAIN);
2353
2354 return rc;
2355}
2356
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002357static ssize_t
2358cifs_iovec_write(struct file *file, const struct iovec *iov,
2359 unsigned long nr_segs, loff_t *poffset)
2360{
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002361 unsigned long nr_pages, i;
Pavel Shilovsky76429c12011-01-31 16:03:08 +03002362 size_t copied, len, cur_len;
2363 ssize_t total_written = 0;
Jeff Layton3af9d8f2012-04-13 17:16:59 -04002364 loff_t offset;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002365 struct iov_iter it;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002366 struct cifsFileInfo *open_file;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002367 struct cifs_tcon *tcon;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002368 struct cifs_sb_info *cifs_sb;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002369 struct cifs_writedata *wdata, *tmp;
2370 struct list_head wdata_list;
2371 int rc;
2372 pid_t pid;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002373
2374 len = iov_length(iov, nr_segs);
2375 if (!len)
2376 return 0;
2377
2378 rc = generic_write_checks(file, poffset, &len, 0);
2379 if (rc)
2380 return rc;
2381
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002382 INIT_LIST_HEAD(&wdata_list);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002383 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002384 open_file = file->private_data;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002385 tcon = tlink_tcon(open_file->tlink);
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002386
2387 if (!tcon->ses->server->ops->async_writev)
2388 return -ENOSYS;
2389
Jeff Layton3af9d8f2012-04-13 17:16:59 -04002390 offset = *poffset;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002391
2392 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2393 pid = open_file->pid;
2394 else
2395 pid = current->tgid;
2396
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002397 iov_iter_init(&it, iov, nr_segs, len, 0);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002398 do {
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002399 size_t save_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002400
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002401 nr_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
2402 wdata = cifs_writedata_alloc(nr_pages,
2403 cifs_uncached_writev_complete);
2404 if (!wdata) {
2405 rc = -ENOMEM;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002406 break;
2407 }
2408
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002409 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
2410 if (rc) {
2411 kfree(wdata);
2412 break;
2413 }
2414
2415 save_len = cur_len;
2416 for (i = 0; i < nr_pages; i++) {
2417 copied = min_t(const size_t, cur_len, PAGE_SIZE);
2418 copied = iov_iter_copy_from_user(wdata->pages[i], &it,
2419 0, copied);
2420 cur_len -= copied;
2421 iov_iter_advance(&it, copied);
2422 }
2423 cur_len = save_len - cur_len;
2424
2425 wdata->sync_mode = WB_SYNC_ALL;
2426 wdata->nr_pages = nr_pages;
2427 wdata->offset = (__u64)offset;
2428 wdata->cfile = cifsFileInfo_get(open_file);
2429 wdata->pid = pid;
2430 wdata->bytes = cur_len;
Jeff Laytoneddb0792012-09-18 16:20:35 -07002431 wdata->pagesz = PAGE_SIZE;
2432 wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002433 rc = cifs_uncached_retry_writev(wdata);
2434 if (rc) {
2435 kref_put(&wdata->refcount, cifs_writedata_release);
2436 break;
2437 }
2438
2439 list_add_tail(&wdata->list, &wdata_list);
2440 offset += cur_len;
2441 len -= cur_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002442 } while (len > 0);
2443
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002444 /*
2445 * If at least one write was successfully sent, then discard any rc
2446 * value from the later writes. If the other write succeeds, then
2447 * we'll end up returning whatever was written. If it fails, then
2448 * we'll get a new rc value from that.
2449 */
2450 if (!list_empty(&wdata_list))
2451 rc = 0;
2452
2453 /*
2454 * Wait for and collect replies for any successful sends in order of
2455 * increasing offset. Once an error is hit or we get a fatal signal
2456 * while waiting, then return without waiting for any more replies.
2457 */
2458restart_loop:
2459 list_for_each_entry_safe(wdata, tmp, &wdata_list, list) {
2460 if (!rc) {
2461 /* FIXME: freezable too? */
2462 rc = wait_for_completion_killable(&wdata->done);
2463 if (rc)
2464 rc = -EINTR;
2465 else if (wdata->result)
2466 rc = wdata->result;
2467 else
2468 total_written += wdata->bytes;
2469
2470 /* resend call if it's a retryable error */
2471 if (rc == -EAGAIN) {
2472 rc = cifs_uncached_retry_writev(wdata);
2473 goto restart_loop;
2474 }
2475 }
2476 list_del_init(&wdata->list);
2477 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002478 }
2479
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002480 if (total_written > 0)
2481 *poffset += total_written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002482
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002483 cifs_stats_bytes_written(tcon, total_written);
2484 return total_written ? total_written : (ssize_t)rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002485}
2486
Pavel Shilovsky0b81c1c2011-03-10 10:11:05 +03002487ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002488 unsigned long nr_segs, loff_t pos)
2489{
2490 ssize_t written;
2491 struct inode *inode;
2492
Al Viro496ad9a2013-01-23 17:07:38 -05002493 inode = file_inode(iocb->ki_filp);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002494
2495 /*
2496 * BB - optimize the way when signing is disabled. We can drop this
2497 * extra memory-to-memory copying and use iovec buffers for constructing
2498 * write request.
2499 */
2500
2501 written = cifs_iovec_write(iocb->ki_filp, iov, nr_segs, &pos);
2502 if (written > 0) {
2503 CIFS_I(inode)->invalid_mapping = true;
2504 iocb->ki_pos = pos;
2505 }
2506
2507 return written;
2508}
2509
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002510static ssize_t
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002511cifs_writev(struct kiocb *iocb, const struct iovec *iov,
2512 unsigned long nr_segs, loff_t pos)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002513{
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002514 struct file *file = iocb->ki_filp;
2515 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2516 struct inode *inode = file->f_mapping->host;
2517 struct cifsInodeInfo *cinode = CIFS_I(inode);
2518 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
2519 ssize_t rc = -EACCES;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002520
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002521 BUG_ON(iocb->ki_pos != pos);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002522
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002523 /*
2524 * We need to hold the sem to be sure nobody modifies lock list
2525 * with a brlock that prevents writing.
2526 */
2527 down_read(&cinode->lock_sem);
2528 if (!cifs_find_lock_conflict(cfile, pos, iov_length(iov, nr_segs),
2529 server->vals->exclusive_lock_type, NULL,
Pavel Shilovsky081c0412012-11-27 18:38:53 +04002530 CIFS_WRITE_OP)) {
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002531 mutex_lock(&inode->i_mutex);
2532 rc = __generic_file_aio_write(iocb, iov, nr_segs,
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002533 &iocb->ki_pos);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002534 mutex_unlock(&inode->i_mutex);
2535 }
2536
2537 if (rc > 0 || rc == -EIOCBQUEUED) {
2538 ssize_t err;
2539
2540 err = generic_write_sync(file, pos, rc);
2541 if (err < 0 && rc > 0)
2542 rc = err;
2543 }
2544
2545 up_read(&cinode->lock_sem);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002546 return rc;
2547}
2548
2549ssize_t
2550cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
2551 unsigned long nr_segs, loff_t pos)
2552{
Al Viro496ad9a2013-01-23 17:07:38 -05002553 struct inode *inode = file_inode(iocb->ki_filp);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002554 struct cifsInodeInfo *cinode = CIFS_I(inode);
2555 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2556 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2557 iocb->ki_filp->private_data;
2558 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002559 ssize_t written;
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002560
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002561 if (cinode->clientCanCacheAll) {
2562 if (cap_unix(tcon->ses) &&
2563 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))
2564 && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2565 return generic_file_aio_write(iocb, iov, nr_segs, pos);
2566 return cifs_writev(iocb, iov, nr_segs, pos);
Pavel Shilovskyc299dd02012-12-06 22:07:52 +04002567 }
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002568 /*
2569 * For non-oplocked files in strict cache mode we need to write the data
2570 * to the server exactly from the pos to pos+len-1 rather than flush all
2571 * affected pages because it may cause a error with mandatory locks on
2572 * these pages but not on the region from pos to ppos+len-1.
2573 */
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002574 written = cifs_user_writev(iocb, iov, nr_segs, pos);
2575 if (written > 0 && cinode->clientCanCacheRead) {
2576 /*
2577 * Windows 7 server can delay breaking level2 oplock if a write
2578 * request comes - break it on the client to prevent reading
2579 * an old data.
2580 */
2581 cifs_invalidate_mapping(inode);
2582 cFYI(1, "Set no oplock for inode=%p after a write operation",
2583 inode);
2584 cinode->clientCanCacheRead = false;
2585 }
2586 return written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002587}
2588
Jeff Layton0471ca32012-05-16 07:13:16 -04002589static struct cifs_readdata *
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002590cifs_readdata_alloc(unsigned int nr_pages, work_func_t complete)
Jeff Layton0471ca32012-05-16 07:13:16 -04002591{
2592 struct cifs_readdata *rdata;
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002593
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002594 rdata = kzalloc(sizeof(*rdata) + (sizeof(struct page *) * nr_pages),
2595 GFP_KERNEL);
Jeff Layton0471ca32012-05-16 07:13:16 -04002596 if (rdata != NULL) {
Jeff Layton6993f742012-05-16 07:13:17 -04002597 kref_init(&rdata->refcount);
Jeff Layton1c892542012-05-16 07:13:17 -04002598 INIT_LIST_HEAD(&rdata->list);
2599 init_completion(&rdata->done);
Jeff Layton0471ca32012-05-16 07:13:16 -04002600 INIT_WORK(&rdata->work, complete);
Jeff Layton0471ca32012-05-16 07:13:16 -04002601 }
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002602
Jeff Layton0471ca32012-05-16 07:13:16 -04002603 return rdata;
2604}
2605
Jeff Layton6993f742012-05-16 07:13:17 -04002606void
2607cifs_readdata_release(struct kref *refcount)
Jeff Layton0471ca32012-05-16 07:13:16 -04002608{
Jeff Layton6993f742012-05-16 07:13:17 -04002609 struct cifs_readdata *rdata = container_of(refcount,
2610 struct cifs_readdata, refcount);
2611
2612 if (rdata->cfile)
2613 cifsFileInfo_put(rdata->cfile);
2614
Jeff Layton0471ca32012-05-16 07:13:16 -04002615 kfree(rdata);
2616}
2617
Jeff Layton2a1bb132012-05-16 07:13:17 -04002618static int
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002619cifs_read_allocate_pages(struct cifs_readdata *rdata, unsigned int nr_pages)
Jeff Layton1c892542012-05-16 07:13:17 -04002620{
2621 int rc = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002622 struct page *page;
Jeff Layton1c892542012-05-16 07:13:17 -04002623 unsigned int i;
2624
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002625 for (i = 0; i < nr_pages; i++) {
Jeff Layton1c892542012-05-16 07:13:17 -04002626 page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
2627 if (!page) {
2628 rc = -ENOMEM;
2629 break;
2630 }
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002631 rdata->pages[i] = page;
Jeff Layton1c892542012-05-16 07:13:17 -04002632 }
2633
2634 if (rc) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002635 for (i = 0; i < nr_pages; i++) {
2636 put_page(rdata->pages[i]);
2637 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04002638 }
2639 }
2640 return rc;
2641}
2642
2643static void
2644cifs_uncached_readdata_release(struct kref *refcount)
2645{
Jeff Layton1c892542012-05-16 07:13:17 -04002646 struct cifs_readdata *rdata = container_of(refcount,
2647 struct cifs_readdata, refcount);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002648 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04002649
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002650 for (i = 0; i < rdata->nr_pages; i++) {
2651 put_page(rdata->pages[i]);
2652 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04002653 }
2654 cifs_readdata_release(refcount);
2655}
2656
2657static int
Jeff Layton2a1bb132012-05-16 07:13:17 -04002658cifs_retry_async_readv(struct cifs_readdata *rdata)
2659{
2660 int rc;
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07002661 struct TCP_Server_Info *server;
2662
2663 server = tlink_tcon(rdata->cfile->tlink)->ses->server;
Jeff Layton2a1bb132012-05-16 07:13:17 -04002664
2665 do {
2666 if (rdata->cfile->invalidHandle) {
2667 rc = cifs_reopen_file(rdata->cfile, true);
2668 if (rc != 0)
2669 continue;
2670 }
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07002671 rc = server->ops->async_readv(rdata);
Jeff Layton2a1bb132012-05-16 07:13:17 -04002672 } while (rc == -EAGAIN);
2673
2674 return rc;
2675}
2676
Jeff Layton1c892542012-05-16 07:13:17 -04002677/**
2678 * cifs_readdata_to_iov - copy data from pages in response to an iovec
2679 * @rdata: the readdata response with list of pages holding data
2680 * @iov: vector in which we should copy the data
2681 * @nr_segs: number of segments in vector
2682 * @offset: offset into file of the first iovec
2683 * @copied: used to return the amount of data copied to the iov
2684 *
2685 * This function copies data from a list of pages in a readdata response into
2686 * an array of iovecs. It will first calculate where the data should go
2687 * based on the info in the readdata and then copy the data into that spot.
2688 */
2689static ssize_t
2690cifs_readdata_to_iov(struct cifs_readdata *rdata, const struct iovec *iov,
2691 unsigned long nr_segs, loff_t offset, ssize_t *copied)
2692{
2693 int rc = 0;
2694 struct iov_iter ii;
2695 size_t pos = rdata->offset - offset;
Jeff Layton1c892542012-05-16 07:13:17 -04002696 ssize_t remaining = rdata->bytes;
2697 unsigned char *pdata;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002698 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04002699
2700 /* set up iov_iter and advance to the correct offset */
2701 iov_iter_init(&ii, iov, nr_segs, iov_length(iov, nr_segs), 0);
2702 iov_iter_advance(&ii, pos);
2703
2704 *copied = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002705 for (i = 0; i < rdata->nr_pages; i++) {
Jeff Layton1c892542012-05-16 07:13:17 -04002706 ssize_t copy;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002707 struct page *page = rdata->pages[i];
Jeff Layton1c892542012-05-16 07:13:17 -04002708
2709 /* copy a whole page or whatever's left */
2710 copy = min_t(ssize_t, remaining, PAGE_SIZE);
2711
2712 /* ...but limit it to whatever space is left in the iov */
2713 copy = min_t(ssize_t, copy, iov_iter_count(&ii));
2714
2715 /* go while there's data to be copied and no errors */
2716 if (copy && !rc) {
2717 pdata = kmap(page);
2718 rc = memcpy_toiovecend(ii.iov, pdata, ii.iov_offset,
2719 (int)copy);
2720 kunmap(page);
2721 if (!rc) {
2722 *copied += copy;
2723 remaining -= copy;
2724 iov_iter_advance(&ii, copy);
2725 }
2726 }
Jeff Layton1c892542012-05-16 07:13:17 -04002727 }
2728
2729 return rc;
2730}
2731
2732static void
2733cifs_uncached_readv_complete(struct work_struct *work)
2734{
2735 struct cifs_readdata *rdata = container_of(work,
2736 struct cifs_readdata, work);
Jeff Layton1c892542012-05-16 07:13:17 -04002737
2738 complete(&rdata->done);
2739 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
2740}
2741
2742static int
Jeff Layton8321fec2012-09-19 06:22:32 -07002743cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
2744 struct cifs_readdata *rdata, unsigned int len)
Jeff Layton1c892542012-05-16 07:13:17 -04002745{
Jeff Layton8321fec2012-09-19 06:22:32 -07002746 int total_read = 0, result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002747 unsigned int i;
2748 unsigned int nr_pages = rdata->nr_pages;
Jeff Layton8321fec2012-09-19 06:22:32 -07002749 struct kvec iov;
Jeff Layton1c892542012-05-16 07:13:17 -04002750
Jeff Layton8321fec2012-09-19 06:22:32 -07002751 rdata->tailsz = PAGE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002752 for (i = 0; i < nr_pages; i++) {
2753 struct page *page = rdata->pages[i];
2754
Jeff Layton8321fec2012-09-19 06:22:32 -07002755 if (len >= PAGE_SIZE) {
Jeff Layton1c892542012-05-16 07:13:17 -04002756 /* enough data to fill the page */
Jeff Layton8321fec2012-09-19 06:22:32 -07002757 iov.iov_base = kmap(page);
2758 iov.iov_len = PAGE_SIZE;
2759 cFYI(1, "%u: iov_base=%p iov_len=%zu",
2760 i, iov.iov_base, iov.iov_len);
2761 len -= PAGE_SIZE;
2762 } else if (len > 0) {
Jeff Layton1c892542012-05-16 07:13:17 -04002763 /* enough for partial page, fill and zero the rest */
Jeff Layton8321fec2012-09-19 06:22:32 -07002764 iov.iov_base = kmap(page);
2765 iov.iov_len = len;
2766 cFYI(1, "%u: iov_base=%p iov_len=%zu",
2767 i, iov.iov_base, iov.iov_len);
2768 memset(iov.iov_base + len, '\0', PAGE_SIZE - len);
2769 rdata->tailsz = len;
2770 len = 0;
Jeff Layton1c892542012-05-16 07:13:17 -04002771 } else {
2772 /* no need to hold page hostage */
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002773 rdata->pages[i] = NULL;
2774 rdata->nr_pages--;
Jeff Layton1c892542012-05-16 07:13:17 -04002775 put_page(page);
Jeff Layton8321fec2012-09-19 06:22:32 -07002776 continue;
Jeff Layton1c892542012-05-16 07:13:17 -04002777 }
Jeff Layton8321fec2012-09-19 06:22:32 -07002778
2779 result = cifs_readv_from_socket(server, &iov, 1, iov.iov_len);
2780 kunmap(page);
2781 if (result < 0)
2782 break;
2783
2784 total_read += result;
Jeff Layton1c892542012-05-16 07:13:17 -04002785 }
2786
Jeff Layton8321fec2012-09-19 06:22:32 -07002787 return total_read > 0 ? total_read : result;
Jeff Layton1c892542012-05-16 07:13:17 -04002788}
2789
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002790static ssize_t
2791cifs_iovec_read(struct file *file, const struct iovec *iov,
2792 unsigned long nr_segs, loff_t *poffset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002793{
Jeff Layton1c892542012-05-16 07:13:17 -04002794 ssize_t rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002795 size_t len, cur_len;
Jeff Layton1c892542012-05-16 07:13:17 -04002796 ssize_t total_read = 0;
2797 loff_t offset = *poffset;
2798 unsigned int npages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002799 struct cifs_sb_info *cifs_sb;
Jeff Layton1c892542012-05-16 07:13:17 -04002800 struct cifs_tcon *tcon;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002801 struct cifsFileInfo *open_file;
Jeff Layton1c892542012-05-16 07:13:17 -04002802 struct cifs_readdata *rdata, *tmp;
2803 struct list_head rdata_list;
2804 pid_t pid;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002805
2806 if (!nr_segs)
2807 return 0;
2808
2809 len = iov_length(iov, nr_segs);
2810 if (!len)
2811 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002812
Jeff Layton1c892542012-05-16 07:13:17 -04002813 INIT_LIST_HEAD(&rdata_list);
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002814 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Joe Perchesc21dfb62010-07-12 13:50:14 -07002815 open_file = file->private_data;
Jeff Layton1c892542012-05-16 07:13:17 -04002816 tcon = tlink_tcon(open_file->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002817
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07002818 if (!tcon->ses->server->ops->async_readv)
2819 return -ENOSYS;
2820
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002821 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2822 pid = open_file->pid;
2823 else
2824 pid = current->tgid;
2825
Steve Frenchad7a2922008-02-07 23:25:02 +00002826 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesb6b38f72010-04-21 03:50:45 +00002827 cFYI(1, "attempting read on write only file instance");
Steve Frenchad7a2922008-02-07 23:25:02 +00002828
Jeff Layton1c892542012-05-16 07:13:17 -04002829 do {
2830 cur_len = min_t(const size_t, len - total_read, cifs_sb->rsize);
2831 npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002832
Jeff Layton1c892542012-05-16 07:13:17 -04002833 /* allocate a readdata struct */
2834 rdata = cifs_readdata_alloc(npages,
2835 cifs_uncached_readv_complete);
2836 if (!rdata) {
2837 rc = -ENOMEM;
2838 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002839 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002840
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002841 rc = cifs_read_allocate_pages(rdata, npages);
Jeff Layton1c892542012-05-16 07:13:17 -04002842 if (rc)
2843 goto error;
2844
2845 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002846 rdata->nr_pages = npages;
Jeff Layton1c892542012-05-16 07:13:17 -04002847 rdata->offset = offset;
2848 rdata->bytes = cur_len;
2849 rdata->pid = pid;
Jeff Layton8321fec2012-09-19 06:22:32 -07002850 rdata->pagesz = PAGE_SIZE;
2851 rdata->read_into_pages = cifs_uncached_read_into_pages;
Jeff Layton1c892542012-05-16 07:13:17 -04002852
2853 rc = cifs_retry_async_readv(rdata);
2854error:
2855 if (rc) {
2856 kref_put(&rdata->refcount,
2857 cifs_uncached_readdata_release);
2858 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002859 }
Jeff Layton1c892542012-05-16 07:13:17 -04002860
2861 list_add_tail(&rdata->list, &rdata_list);
2862 offset += cur_len;
2863 len -= cur_len;
2864 } while (len > 0);
2865
2866 /* if at least one read request send succeeded, then reset rc */
2867 if (!list_empty(&rdata_list))
2868 rc = 0;
2869
2870 /* the loop below should proceed in the order of increasing offsets */
2871restart_loop:
2872 list_for_each_entry_safe(rdata, tmp, &rdata_list, list) {
2873 if (!rc) {
2874 ssize_t copied;
2875
2876 /* FIXME: freezable sleep too? */
2877 rc = wait_for_completion_killable(&rdata->done);
2878 if (rc)
2879 rc = -EINTR;
2880 else if (rdata->result)
2881 rc = rdata->result;
2882 else {
2883 rc = cifs_readdata_to_iov(rdata, iov,
2884 nr_segs, *poffset,
2885 &copied);
2886 total_read += copied;
2887 }
2888
2889 /* resend call if it's a retryable error */
2890 if (rc == -EAGAIN) {
2891 rc = cifs_retry_async_readv(rdata);
2892 goto restart_loop;
2893 }
2894 }
2895 list_del_init(&rdata->list);
2896 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002897 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002898
Jeff Layton1c892542012-05-16 07:13:17 -04002899 cifs_stats_bytes_read(tcon, total_read);
2900 *poffset += total_read;
2901
Pavel Shilovsky09a47072012-09-18 16:20:29 -07002902 /* mask nodata case */
2903 if (rc == -ENODATA)
2904 rc = 0;
2905
Jeff Layton1c892542012-05-16 07:13:17 -04002906 return total_read ? total_read : rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002907}
2908
Pavel Shilovsky0b81c1c2011-03-10 10:11:05 +03002909ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov,
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002910 unsigned long nr_segs, loff_t pos)
2911{
2912 ssize_t read;
2913
2914 read = cifs_iovec_read(iocb->ki_filp, iov, nr_segs, &pos);
2915 if (read > 0)
2916 iocb->ki_pos = pos;
2917
2918 return read;
2919}
2920
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002921ssize_t
2922cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov,
2923 unsigned long nr_segs, loff_t pos)
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002924{
Al Viro496ad9a2013-01-23 17:07:38 -05002925 struct inode *inode = file_inode(iocb->ki_filp);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002926 struct cifsInodeInfo *cinode = CIFS_I(inode);
2927 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2928 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2929 iocb->ki_filp->private_data;
2930 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2931 int rc = -EACCES;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002932
2933 /*
2934 * In strict cache mode we need to read from the server all the time
2935 * if we don't have level II oplock because the server can delay mtime
2936 * change - so we can't make a decision about inode invalidating.
2937 * And we can also fail with pagereading if there are mandatory locks
2938 * on pages affected by this read but not on the region from pos to
2939 * pos+len-1.
2940 */
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002941 if (!cinode->clientCanCacheRead)
2942 return cifs_user_readv(iocb, iov, nr_segs, pos);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002943
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002944 if (cap_unix(tcon->ses) &&
2945 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2946 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2947 return generic_file_aio_read(iocb, iov, nr_segs, pos);
2948
2949 /*
2950 * We need to hold the sem to be sure nobody modifies lock list
2951 * with a brlock that prevents reading.
2952 */
2953 down_read(&cinode->lock_sem);
2954 if (!cifs_find_lock_conflict(cfile, pos, iov_length(iov, nr_segs),
2955 tcon->ses->server->vals->shared_lock_type,
Pavel Shilovsky081c0412012-11-27 18:38:53 +04002956 NULL, CIFS_READ_OP))
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002957 rc = generic_file_aio_read(iocb, iov, nr_segs, pos);
2958 up_read(&cinode->lock_sem);
2959 return rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002960}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002961
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002962static ssize_t
2963cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002964{
2965 int rc = -EACCES;
2966 unsigned int bytes_read = 0;
2967 unsigned int total_read;
2968 unsigned int current_read_size;
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002969 unsigned int rsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002970 struct cifs_sb_info *cifs_sb;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002971 struct cifs_tcon *tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002972 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002973 unsigned int xid;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002974 char *cur_offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002975 struct cifsFileInfo *open_file;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002976 struct cifs_io_parms io_parms;
Steve Frenchec637e32005-12-12 20:53:18 -08002977 int buf_type = CIFS_NO_BUFFER;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002978 __u32 pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002979
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002980 xid = get_xid();
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002981 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002982
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002983 /* FIXME: set up handlers for larger reads and/or convert to async */
2984 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
2985
Linus Torvalds1da177e2005-04-16 15:20:36 -07002986 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05302987 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002988 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05302989 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002990 }
Joe Perchesc21dfb62010-07-12 13:50:14 -07002991 open_file = file->private_data;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002992 tcon = tlink_tcon(open_file->tlink);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002993 server = tcon->ses->server;
2994
2995 if (!server->ops->sync_read) {
2996 free_xid(xid);
2997 return -ENOSYS;
2998 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002999
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003000 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3001 pid = open_file->pid;
3002 else
3003 pid = current->tgid;
3004
Linus Torvalds1da177e2005-04-16 15:20:36 -07003005 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesb6b38f72010-04-21 03:50:45 +00003006 cFYI(1, "attempting read on write only file instance");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003007
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003008 for (total_read = 0, cur_offset = read_data; read_size > total_read;
3009 total_read += bytes_read, cur_offset += bytes_read) {
Jeff Layton5eba8ab2011-10-19 15:30:26 -04003010 current_read_size = min_t(uint, read_size - total_read, rsize);
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003011 /*
3012 * For windows me and 9x we do not want to request more than it
3013 * negotiated since it will refuse the read then.
3014 */
3015 if ((tcon->ses) && !(tcon->ses->capabilities &
3016 tcon->ses->server->vals->cap_large_files)) {
Dan Carpenter7748dd62011-10-18 12:41:35 +03003017 current_read_size = min_t(uint, current_read_size,
Jeff Laytonc974bef2011-10-11 06:41:32 -04003018 CIFSMaxBufSize);
Steve Frenchf9f5c8172005-09-15 23:06:38 -07003019 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003020 rc = -EAGAIN;
3021 while (rc == -EAGAIN) {
Steve Frenchcdff08e2010-10-21 22:46:14 +00003022 if (open_file->invalidHandle) {
Jeff Layton15886172010-10-15 15:33:59 -04003023 rc = cifs_reopen_file(open_file, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003024 if (rc != 0)
3025 break;
3026 }
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003027 io_parms.pid = pid;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003028 io_parms.tcon = tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003029 io_parms.offset = *offset;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003030 io_parms.length = current_read_size;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003031 rc = server->ops->sync_read(xid, open_file, &io_parms,
3032 &bytes_read, &cur_offset,
3033 &buf_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003034 }
3035 if (rc || (bytes_read == 0)) {
3036 if (total_read) {
3037 break;
3038 } else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003039 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003040 return rc;
3041 }
3042 } else {
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003043 cifs_stats_bytes_read(tcon, total_read);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003044 *offset += bytes_read;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003045 }
3046 }
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003047 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003048 return total_read;
3049}
3050
Jeff Laytonca83ce32011-04-12 09:13:44 -04003051/*
3052 * If the page is mmap'ed into a process' page tables, then we need to make
3053 * sure that it doesn't change while being written back.
3054 */
3055static int
3056cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
3057{
3058 struct page *page = vmf->page;
3059
3060 lock_page(page);
3061 return VM_FAULT_LOCKED;
3062}
3063
3064static struct vm_operations_struct cifs_file_vm_ops = {
3065 .fault = filemap_fault,
3066 .page_mkwrite = cifs_page_mkwrite,
Konstantin Khlebnikov0b173bc2012-10-08 16:28:46 -07003067 .remap_pages = generic_file_remap_pages,
Jeff Laytonca83ce32011-04-12 09:13:44 -04003068};
3069
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003070int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
3071{
3072 int rc, xid;
Al Viro496ad9a2013-01-23 17:07:38 -05003073 struct inode *inode = file_inode(file);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003074
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003075 xid = get_xid();
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003076
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04003077 if (!CIFS_I(inode)->clientCanCacheRead) {
3078 rc = cifs_invalidate_mapping(inode);
3079 if (rc)
3080 return rc;
3081 }
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003082
3083 rc = generic_file_mmap(file, vma);
Jeff Laytonca83ce32011-04-12 09:13:44 -04003084 if (rc == 0)
3085 vma->vm_ops = &cifs_file_vm_ops;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003086 free_xid(xid);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003087 return rc;
3088}
3089
Linus Torvalds1da177e2005-04-16 15:20:36 -07003090int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
3091{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003092 int rc, xid;
3093
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003094 xid = get_xid();
Jeff Laytonabab0952010-02-12 07:44:18 -05003095 rc = cifs_revalidate_file(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003096 if (rc) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00003097 cFYI(1, "Validation prior to mmap failed, error=%d", rc);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003098 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003099 return rc;
3100 }
3101 rc = generic_file_mmap(file, vma);
Jeff Laytonca83ce32011-04-12 09:13:44 -04003102 if (rc == 0)
3103 vma->vm_ops = &cifs_file_vm_ops;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003104 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003105 return rc;
3106}
3107
Jeff Layton0471ca32012-05-16 07:13:16 -04003108static void
3109cifs_readv_complete(struct work_struct *work)
3110{
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003111 unsigned int i;
Jeff Layton0471ca32012-05-16 07:13:16 -04003112 struct cifs_readdata *rdata = container_of(work,
3113 struct cifs_readdata, work);
Jeff Layton0471ca32012-05-16 07:13:16 -04003114
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003115 for (i = 0; i < rdata->nr_pages; i++) {
3116 struct page *page = rdata->pages[i];
3117
Jeff Layton0471ca32012-05-16 07:13:16 -04003118 lru_cache_add_file(page);
3119
3120 if (rdata->result == 0) {
Jeff Layton0471ca32012-05-16 07:13:16 -04003121 flush_dcache_page(page);
3122 SetPageUptodate(page);
3123 }
3124
3125 unlock_page(page);
3126
3127 if (rdata->result == 0)
3128 cifs_readpage_to_fscache(rdata->mapping->host, page);
3129
3130 page_cache_release(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003131 rdata->pages[i] = NULL;
Jeff Layton0471ca32012-05-16 07:13:16 -04003132 }
Jeff Layton6993f742012-05-16 07:13:17 -04003133 kref_put(&rdata->refcount, cifs_readdata_release);
Jeff Layton0471ca32012-05-16 07:13:16 -04003134}
3135
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003136static int
Jeff Layton8321fec2012-09-19 06:22:32 -07003137cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
3138 struct cifs_readdata *rdata, unsigned int len)
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003139{
Jeff Layton8321fec2012-09-19 06:22:32 -07003140 int total_read = 0, result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003141 unsigned int i;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003142 u64 eof;
3143 pgoff_t eof_index;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003144 unsigned int nr_pages = rdata->nr_pages;
Jeff Layton8321fec2012-09-19 06:22:32 -07003145 struct kvec iov;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003146
3147 /* determine the eof that the server (probably) has */
3148 eof = CIFS_I(rdata->mapping->host)->server_eof;
3149 eof_index = eof ? (eof - 1) >> PAGE_CACHE_SHIFT : 0;
3150 cFYI(1, "eof=%llu eof_index=%lu", eof, eof_index);
3151
Jeff Layton8321fec2012-09-19 06:22:32 -07003152 rdata->tailsz = PAGE_CACHE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003153 for (i = 0; i < nr_pages; i++) {
3154 struct page *page = rdata->pages[i];
3155
Jeff Layton8321fec2012-09-19 06:22:32 -07003156 if (len >= PAGE_CACHE_SIZE) {
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003157 /* enough data to fill the page */
Jeff Layton8321fec2012-09-19 06:22:32 -07003158 iov.iov_base = kmap(page);
3159 iov.iov_len = PAGE_CACHE_SIZE;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003160 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
Jeff Layton8321fec2012-09-19 06:22:32 -07003161 i, page->index, iov.iov_base, iov.iov_len);
3162 len -= PAGE_CACHE_SIZE;
3163 } else if (len > 0) {
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003164 /* enough for partial page, fill and zero the rest */
Jeff Layton8321fec2012-09-19 06:22:32 -07003165 iov.iov_base = kmap(page);
3166 iov.iov_len = len;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003167 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
Jeff Layton8321fec2012-09-19 06:22:32 -07003168 i, page->index, iov.iov_base, iov.iov_len);
3169 memset(iov.iov_base + len,
3170 '\0', PAGE_CACHE_SIZE - len);
3171 rdata->tailsz = len;
3172 len = 0;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003173 } else if (page->index > eof_index) {
3174 /*
3175 * The VFS will not try to do readahead past the
3176 * i_size, but it's possible that we have outstanding
3177 * writes with gaps in the middle and the i_size hasn't
3178 * caught up yet. Populate those with zeroed out pages
3179 * to prevent the VFS from repeatedly attempting to
3180 * fill them until the writes are flushed.
3181 */
3182 zero_user(page, 0, PAGE_CACHE_SIZE);
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003183 lru_cache_add_file(page);
3184 flush_dcache_page(page);
3185 SetPageUptodate(page);
3186 unlock_page(page);
3187 page_cache_release(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003188 rdata->pages[i] = NULL;
3189 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07003190 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003191 } else {
3192 /* no need to hold page hostage */
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003193 lru_cache_add_file(page);
3194 unlock_page(page);
3195 page_cache_release(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003196 rdata->pages[i] = NULL;
3197 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07003198 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003199 }
Jeff Layton8321fec2012-09-19 06:22:32 -07003200
3201 result = cifs_readv_from_socket(server, &iov, 1, iov.iov_len);
3202 kunmap(page);
3203 if (result < 0)
3204 break;
3205
3206 total_read += result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003207 }
3208
Jeff Layton8321fec2012-09-19 06:22:32 -07003209 return total_read > 0 ? total_read : result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003210}
3211
Linus Torvalds1da177e2005-04-16 15:20:36 -07003212static int cifs_readpages(struct file *file, struct address_space *mapping,
3213 struct list_head *page_list, unsigned num_pages)
3214{
Jeff Layton690c5e32011-10-19 15:30:16 -04003215 int rc;
3216 struct list_head tmplist;
3217 struct cifsFileInfo *open_file = file->private_data;
3218 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
3219 unsigned int rsize = cifs_sb->rsize;
3220 pid_t pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003221
Jeff Layton690c5e32011-10-19 15:30:16 -04003222 /*
3223 * Give up immediately if rsize is too small to read an entire page.
3224 * The VFS will fall back to readpage. We should never reach this
3225 * point however since we set ra_pages to 0 when the rsize is smaller
3226 * than a cache page.
3227 */
3228 if (unlikely(rsize < PAGE_CACHE_SIZE))
3229 return 0;
Steve Frenchbfa0d752005-08-31 21:50:37 -07003230
Suresh Jayaraman56698232010-07-05 18:13:25 +05303231 /*
3232 * Reads as many pages as possible from fscache. Returns -ENOBUFS
3233 * immediately if the cookie is negative
3234 */
3235 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
3236 &num_pages);
3237 if (rc == 0)
Jeff Layton690c5e32011-10-19 15:30:16 -04003238 return rc;
Suresh Jayaraman56698232010-07-05 18:13:25 +05303239
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003240 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3241 pid = open_file->pid;
3242 else
3243 pid = current->tgid;
3244
Jeff Layton690c5e32011-10-19 15:30:16 -04003245 rc = 0;
3246 INIT_LIST_HEAD(&tmplist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003247
Jeff Layton690c5e32011-10-19 15:30:16 -04003248 cFYI(1, "%s: file=%p mapping=%p num_pages=%u", __func__, file,
3249 mapping, num_pages);
3250
3251 /*
3252 * Start with the page at end of list and move it to private
3253 * list. Do the same with any following pages until we hit
3254 * the rsize limit, hit an index discontinuity, or run out of
3255 * pages. Issue the async read and then start the loop again
3256 * until the list is empty.
3257 *
3258 * Note that list order is important. The page_list is in
3259 * the order of declining indexes. When we put the pages in
3260 * the rdata->pages, then we want them in increasing order.
3261 */
3262 while (!list_empty(page_list)) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003263 unsigned int i;
Jeff Layton690c5e32011-10-19 15:30:16 -04003264 unsigned int bytes = PAGE_CACHE_SIZE;
3265 unsigned int expected_index;
3266 unsigned int nr_pages = 1;
3267 loff_t offset;
3268 struct page *page, *tpage;
3269 struct cifs_readdata *rdata;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003270
3271 page = list_entry(page_list->prev, struct page, lru);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003272
Jeff Layton690c5e32011-10-19 15:30:16 -04003273 /*
3274 * Lock the page and put it in the cache. Since no one else
3275 * should have access to this page, we're safe to simply set
3276 * PG_locked without checking it first.
3277 */
3278 __set_page_locked(page);
3279 rc = add_to_page_cache_locked(page, mapping,
3280 page->index, GFP_KERNEL);
3281
3282 /* give up if we can't stick it in the cache */
3283 if (rc) {
3284 __clear_page_locked(page);
3285 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003286 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003287
Jeff Layton690c5e32011-10-19 15:30:16 -04003288 /* move first page to the tmplist */
3289 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
3290 list_move_tail(&page->lru, &tmplist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003291
Jeff Layton690c5e32011-10-19 15:30:16 -04003292 /* now try and add more pages onto the request */
3293 expected_index = page->index + 1;
3294 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
3295 /* discontinuity ? */
3296 if (page->index != expected_index)
3297 break;
3298
3299 /* would this page push the read over the rsize? */
3300 if (bytes + PAGE_CACHE_SIZE > rsize)
3301 break;
3302
3303 __set_page_locked(page);
3304 if (add_to_page_cache_locked(page, mapping,
3305 page->index, GFP_KERNEL)) {
3306 __clear_page_locked(page);
3307 break;
3308 }
3309 list_move_tail(&page->lru, &tmplist);
3310 bytes += PAGE_CACHE_SIZE;
3311 expected_index++;
3312 nr_pages++;
3313 }
3314
Jeff Layton0471ca32012-05-16 07:13:16 -04003315 rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
Jeff Layton690c5e32011-10-19 15:30:16 -04003316 if (!rdata) {
3317 /* best to give up if we're out of mem */
3318 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3319 list_del(&page->lru);
3320 lru_cache_add_file(page);
3321 unlock_page(page);
3322 page_cache_release(page);
3323 }
3324 rc = -ENOMEM;
3325 break;
3326 }
3327
Jeff Layton6993f742012-05-16 07:13:17 -04003328 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Layton690c5e32011-10-19 15:30:16 -04003329 rdata->mapping = mapping;
3330 rdata->offset = offset;
3331 rdata->bytes = bytes;
3332 rdata->pid = pid;
Jeff Layton8321fec2012-09-19 06:22:32 -07003333 rdata->pagesz = PAGE_CACHE_SIZE;
3334 rdata->read_into_pages = cifs_readpages_read_into_pages;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003335
3336 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3337 list_del(&page->lru);
3338 rdata->pages[rdata->nr_pages++] = page;
3339 }
Jeff Layton690c5e32011-10-19 15:30:16 -04003340
Jeff Layton2a1bb132012-05-16 07:13:17 -04003341 rc = cifs_retry_async_readv(rdata);
Jeff Layton690c5e32011-10-19 15:30:16 -04003342 if (rc != 0) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003343 for (i = 0; i < rdata->nr_pages; i++) {
3344 page = rdata->pages[i];
Jeff Layton690c5e32011-10-19 15:30:16 -04003345 lru_cache_add_file(page);
3346 unlock_page(page);
3347 page_cache_release(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003348 }
Jeff Layton6993f742012-05-16 07:13:17 -04003349 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003350 break;
3351 }
Jeff Layton6993f742012-05-16 07:13:17 -04003352
3353 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003354 }
3355
Linus Torvalds1da177e2005-04-16 15:20:36 -07003356 return rc;
3357}
3358
3359static int cifs_readpage_worker(struct file *file, struct page *page,
3360 loff_t *poffset)
3361{
3362 char *read_data;
3363 int rc;
3364
Suresh Jayaraman56698232010-07-05 18:13:25 +05303365 /* Is the page cached? */
Al Viro496ad9a2013-01-23 17:07:38 -05003366 rc = cifs_readpage_from_fscache(file_inode(file), page);
Suresh Jayaraman56698232010-07-05 18:13:25 +05303367 if (rc == 0)
3368 goto read_complete;
3369
Linus Torvalds1da177e2005-04-16 15:20:36 -07003370 page_cache_get(page);
3371 read_data = kmap(page);
3372 /* for reads over a certain size could initiate async read ahead */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003373
Linus Torvalds1da177e2005-04-16 15:20:36 -07003374 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003375
Linus Torvalds1da177e2005-04-16 15:20:36 -07003376 if (rc < 0)
3377 goto io_error;
3378 else
Joe Perchesb6b38f72010-04-21 03:50:45 +00003379 cFYI(1, "Bytes read %d", rc);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003380
Al Viro496ad9a2013-01-23 17:07:38 -05003381 file_inode(file)->i_atime =
3382 current_fs_time(file_inode(file)->i_sb);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003383
Linus Torvalds1da177e2005-04-16 15:20:36 -07003384 if (PAGE_CACHE_SIZE > rc)
3385 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
3386
3387 flush_dcache_page(page);
3388 SetPageUptodate(page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05303389
3390 /* send this page to the cache */
Al Viro496ad9a2013-01-23 17:07:38 -05003391 cifs_readpage_to_fscache(file_inode(file), page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05303392
Linus Torvalds1da177e2005-04-16 15:20:36 -07003393 rc = 0;
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003394
Linus Torvalds1da177e2005-04-16 15:20:36 -07003395io_error:
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003396 kunmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003397 page_cache_release(page);
Suresh Jayaraman56698232010-07-05 18:13:25 +05303398
3399read_complete:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003400 return rc;
3401}
3402
3403static int cifs_readpage(struct file *file, struct page *page)
3404{
3405 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
3406 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003407 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003408
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003409 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003410
3411 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303412 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003413 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303414 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003415 }
3416
Jeff Laytonac3aa2f2012-07-23 13:14:28 -04003417 cFYI(1, "readpage %p at offset %d 0x%x",
Joe Perchesb6b38f72010-04-21 03:50:45 +00003418 page, (int)offset, (int)offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003419
3420 rc = cifs_readpage_worker(file, page, &offset);
3421
3422 unlock_page(page);
3423
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003424 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003425 return rc;
3426}
3427
Steve Frencha403a0a2007-07-26 15:54:16 +00003428static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3429{
3430 struct cifsFileInfo *open_file;
3431
Jeff Layton44772882010-10-15 15:34:03 -04003432 spin_lock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003433 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton2e396b82010-10-15 15:34:01 -04003434 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Jeff Layton44772882010-10-15 15:34:03 -04003435 spin_unlock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003436 return 1;
3437 }
3438 }
Jeff Layton44772882010-10-15 15:34:03 -04003439 spin_unlock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003440 return 0;
3441}
3442
Linus Torvalds1da177e2005-04-16 15:20:36 -07003443/* We do not want to update the file size from server for inodes
3444 open for write - to avoid races with writepage extending
3445 the file - in the future we could consider allowing
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003446 refreshing the inode only on increases in the file size
Linus Torvalds1da177e2005-04-16 15:20:36 -07003447 but this is tricky to do without racing with writebehind
3448 page caching in the current Linux kernel design */
Steve French4b18f2a2008-04-29 00:06:05 +00003449bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003450{
Steve Frencha403a0a2007-07-26 15:54:16 +00003451 if (!cifsInode)
Steve French4b18f2a2008-04-29 00:06:05 +00003452 return true;
Steve French23e7dd72005-10-20 13:44:56 -07003453
Steve Frencha403a0a2007-07-26 15:54:16 +00003454 if (is_inode_writable(cifsInode)) {
3455 /* This inode is open for write at least once */
Steve Frenchc32a0b62006-01-12 14:41:28 -08003456 struct cifs_sb_info *cifs_sb;
3457
Steve Frenchc32a0b62006-01-12 14:41:28 -08003458 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
Steve Frenchad7a2922008-02-07 23:25:02 +00003459 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003460 /* since no page cache to corrupt on directio
Steve Frenchc32a0b62006-01-12 14:41:28 -08003461 we can change size safely */
Steve French4b18f2a2008-04-29 00:06:05 +00003462 return true;
Steve Frenchc32a0b62006-01-12 14:41:28 -08003463 }
3464
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003465 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
Steve French4b18f2a2008-04-29 00:06:05 +00003466 return true;
Steve French7ba52632007-02-08 18:14:13 +00003467
Steve French4b18f2a2008-04-29 00:06:05 +00003468 return false;
Steve French23e7dd72005-10-20 13:44:56 -07003469 } else
Steve French4b18f2a2008-04-29 00:06:05 +00003470 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003471}
3472
Nick Piggind9414772008-09-24 11:32:59 -04003473static int cifs_write_begin(struct file *file, struct address_space *mapping,
3474 loff_t pos, unsigned len, unsigned flags,
3475 struct page **pagep, void **fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003476{
Nick Piggind9414772008-09-24 11:32:59 -04003477 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
3478 loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003479 loff_t page_start = pos & PAGE_MASK;
3480 loff_t i_size;
3481 struct page *page;
3482 int rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003483
Joe Perchesb6b38f72010-04-21 03:50:45 +00003484 cFYI(1, "write_begin from %lld len %d", (long long)pos, len);
Nick Piggind9414772008-09-24 11:32:59 -04003485
Nick Piggin54566b22009-01-04 12:00:53 -08003486 page = grab_cache_page_write_begin(mapping, index, flags);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003487 if (!page) {
3488 rc = -ENOMEM;
3489 goto out;
3490 }
Nick Piggind9414772008-09-24 11:32:59 -04003491
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003492 if (PageUptodate(page))
3493 goto out;
Steve French8a236262007-03-06 00:31:00 +00003494
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003495 /*
3496 * If we write a full page it will be up to date, no need to read from
3497 * the server. If the write is short, we'll end up doing a sync write
3498 * instead.
3499 */
3500 if (len == PAGE_CACHE_SIZE)
3501 goto out;
3502
3503 /*
3504 * optimize away the read when we have an oplock, and we're not
3505 * expecting to use any of the data we'd be reading in. That
3506 * is, when the page lies beyond the EOF, or straddles the EOF
3507 * and the write will cover all of the existing data.
3508 */
3509 if (CIFS_I(mapping->host)->clientCanCacheRead) {
3510 i_size = i_size_read(mapping->host);
3511 if (page_start >= i_size ||
3512 (offset == 0 && (pos + len) >= i_size)) {
3513 zero_user_segments(page, 0, offset,
3514 offset + len,
3515 PAGE_CACHE_SIZE);
3516 /*
3517 * PageChecked means that the parts of the page
3518 * to which we're not writing are considered up
3519 * to date. Once the data is copied to the
3520 * page, it can be set uptodate.
3521 */
3522 SetPageChecked(page);
3523 goto out;
3524 }
3525 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003526
Nick Piggind9414772008-09-24 11:32:59 -04003527 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003528 /*
3529 * might as well read a page, it is fast enough. If we get
3530 * an error, we don't need to return it. cifs_write_end will
3531 * do a sync write instead since PG_uptodate isn't set.
3532 */
3533 cifs_readpage_worker(file, page, &page_start);
Steve French8a236262007-03-06 00:31:00 +00003534 } else {
3535 /* we could try using another file handle if there is one -
3536 but how would we lock it to prevent close of that handle
3537 racing with this read? In any case
Nick Piggind9414772008-09-24 11:32:59 -04003538 this will be written out by write_end so is fine */
Steve French8a236262007-03-06 00:31:00 +00003539 }
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003540out:
3541 *pagep = page;
3542 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003543}
3544
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303545static int cifs_release_page(struct page *page, gfp_t gfp)
3546{
3547 if (PagePrivate(page))
3548 return 0;
3549
3550 return cifs_fscache_release_page(page, gfp);
3551}
3552
3553static void cifs_invalidate_page(struct page *page, unsigned long offset)
3554{
3555 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
3556
3557 if (offset == 0)
3558 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
3559}
3560
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003561static int cifs_launder_page(struct page *page)
3562{
3563 int rc = 0;
3564 loff_t range_start = page_offset(page);
3565 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
3566 struct writeback_control wbc = {
3567 .sync_mode = WB_SYNC_ALL,
3568 .nr_to_write = 0,
3569 .range_start = range_start,
3570 .range_end = range_end,
3571 };
3572
3573 cFYI(1, "Launder page: %p", page);
3574
3575 if (clear_page_dirty_for_io(page))
3576 rc = cifs_writepage_locked(page, &wbc);
3577
3578 cifs_fscache_invalidate_page(page, page->mapping->host);
3579 return rc;
3580}
3581
Tejun Heo9b646972010-07-20 22:09:02 +02003582void cifs_oplock_break(struct work_struct *work)
Jeff Layton3bc303c2009-09-21 06:47:50 -04003583{
3584 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3585 oplock_break);
Jeff Laytona5e18bc2010-10-11 15:07:18 -04003586 struct inode *inode = cfile->dentry->d_inode;
Jeff Layton3bc303c2009-09-21 06:47:50 -04003587 struct cifsInodeInfo *cinode = CIFS_I(inode);
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07003588 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Jeff Laytoneb4b7562010-10-22 14:52:29 -04003589 int rc = 0;
Jeff Layton3bc303c2009-09-21 06:47:50 -04003590
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04003591 if (!cinode->clientCanCacheAll && cinode->clientCanCacheRead &&
3592 cifs_has_mand_locks(cinode)) {
3593 cFYI(1, "Reset oplock to None for inode=%p due to mand locks",
3594 inode);
3595 cinode->clientCanCacheRead = false;
3596 }
3597
Jeff Layton3bc303c2009-09-21 06:47:50 -04003598 if (inode && S_ISREG(inode->i_mode)) {
Steve Frenchd54ff732010-04-27 04:38:15 +00003599 if (cinode->clientCanCacheRead)
Al Viro8737c932009-12-24 06:47:55 -05003600 break_lease(inode, O_RDONLY);
Steve Frenchd54ff732010-04-27 04:38:15 +00003601 else
Al Viro8737c932009-12-24 06:47:55 -05003602 break_lease(inode, O_WRONLY);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003603 rc = filemap_fdatawrite(inode->i_mapping);
3604 if (cinode->clientCanCacheRead == 0) {
Jeff Laytoneb4b7562010-10-22 14:52:29 -04003605 rc = filemap_fdatawait(inode->i_mapping);
3606 mapping_set_error(inode->i_mapping, rc);
Pavel Shilovsky03eca702012-12-06 21:24:33 +04003607 cifs_invalidate_mapping(inode);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003608 }
Joe Perchesb6b38f72010-04-21 03:50:45 +00003609 cFYI(1, "Oplock flush inode %p rc %d", inode, rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003610 }
3611
Pavel Shilovsky85160e02011-10-22 15:33:29 +04003612 rc = cifs_push_locks(cfile);
3613 if (rc)
3614 cERROR(1, "Push locks rc = %d", rc);
3615
Jeff Layton3bc303c2009-09-21 06:47:50 -04003616 /*
3617 * releasing stale oplock after recent reconnect of smb session using
3618 * a now incorrect file handle is not a data integrity issue but do
3619 * not bother sending an oplock release if session to server still is
3620 * disconnected since oplock already released by the server
3621 */
Steve Frenchcdff08e2010-10-21 22:46:14 +00003622 if (!cfile->oplock_break_cancelled) {
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07003623 rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
3624 cinode);
Joe Perchesb6b38f72010-04-21 03:50:45 +00003625 cFYI(1, "Oplock release rc = %d", rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003626 }
Jeff Layton3bc303c2009-09-21 06:47:50 -04003627}
3628
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07003629const struct address_space_operations cifs_addr_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003630 .readpage = cifs_readpage,
3631 .readpages = cifs_readpages,
3632 .writepage = cifs_writepage,
Steve French37c0eb42005-10-05 14:50:29 -07003633 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04003634 .write_begin = cifs_write_begin,
3635 .write_end = cifs_write_end,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003636 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303637 .releasepage = cifs_release_page,
3638 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003639 .launder_page = cifs_launder_page,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003640};
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003641
3642/*
3643 * cifs_readpages requires the server to support a buffer large enough to
3644 * contain the header plus one complete page of data. Otherwise, we need
3645 * to leave cifs_readpages out of the address space operations.
3646 */
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07003647const struct address_space_operations cifs_addr_ops_smallbuf = {
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003648 .readpage = cifs_readpage,
3649 .writepage = cifs_writepage,
3650 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04003651 .write_begin = cifs_write_begin,
3652 .write_end = cifs_write_end,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003653 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303654 .releasepage = cifs_release_page,
3655 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003656 .launder_page = cifs_launder_page,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003657};