blob: e9a8ac0047c7e328c7cd62db9ca839c35265cf6b [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
Steve Frenchfb8c4b12007-07-10 01:16:18 +00005 *
Steve Frenchf19159d2010-04-21 04:12:10 +00006 * Copyright (C) International Business Machines Corp., 2002,2010
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Author(s): Steve French (sfrench@us.ibm.com)
Jeremy Allison7ee1af72006-08-02 21:56:33 +00008 * Jeremy Allison (jra@samba.org)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
Steve French37c0eb42005-10-05 14:50:29 -070025#include <linux/backing-dev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
Steve French37c0eb42005-10-05 14:50:29 -070030#include <linux/writeback.h>
Andrew Morton6f88cc22006-12-10 02:19:44 -080031#include <linux/task_io_accounting_ops.h>
Steve French23e7dd72005-10-20 13:44:56 -070032#include <linux/delay.h>
Jeff Layton3bc303c2009-09-21 06:47:50 -040033#include <linux/mount.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Jeff Layton690c5e32011-10-19 15:30:16 -040035#include <linux/swap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <asm/div64.h>
37#include "cifsfs.h"
38#include "cifspdu.h"
39#include "cifsglob.h"
40#include "cifsproto.h"
41#include "cifs_unicode.h"
42#include "cifs_debug.h"
43#include "cifs_fs_sb.h"
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +053044#include "fscache.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Linus Torvalds1da177e2005-04-16 15:20:36 -070046static inline int cifs_convert_flags(unsigned int flags)
47{
48 if ((flags & O_ACCMODE) == O_RDONLY)
49 return GENERIC_READ;
50 else if ((flags & O_ACCMODE) == O_WRONLY)
51 return GENERIC_WRITE;
52 else if ((flags & O_ACCMODE) == O_RDWR) {
53 /* GENERIC_ALL is too much permission to request
54 can cause unnecessary access denied on create */
55 /* return GENERIC_ALL; */
56 return (GENERIC_READ | GENERIC_WRITE);
57 }
58
Jeff Laytone10f7b52008-05-14 10:21:33 -070059 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
60 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
61 FILE_READ_DATA);
Steve French7fc8f4e2009-02-23 20:43:11 +000062}
Jeff Laytone10f7b52008-05-14 10:21:33 -070063
Jeff Layton608712f2010-10-15 15:33:56 -040064static u32 cifs_posix_convert_flags(unsigned int flags)
Steve French7fc8f4e2009-02-23 20:43:11 +000065{
Jeff Layton608712f2010-10-15 15:33:56 -040066 u32 posix_flags = 0;
Jeff Laytone10f7b52008-05-14 10:21:33 -070067
Steve French7fc8f4e2009-02-23 20:43:11 +000068 if ((flags & O_ACCMODE) == O_RDONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040069 posix_flags = SMB_O_RDONLY;
Steve French7fc8f4e2009-02-23 20:43:11 +000070 else if ((flags & O_ACCMODE) == O_WRONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040071 posix_flags = SMB_O_WRONLY;
72 else if ((flags & O_ACCMODE) == O_RDWR)
73 posix_flags = SMB_O_RDWR;
74
75 if (flags & O_CREAT)
76 posix_flags |= SMB_O_CREAT;
77 if (flags & O_EXCL)
78 posix_flags |= SMB_O_EXCL;
79 if (flags & O_TRUNC)
80 posix_flags |= SMB_O_TRUNC;
81 /* be safe and imply O_SYNC for O_DSYNC */
Christoph Hellwig6b2f3d12009-10-27 11:05:28 +010082 if (flags & O_DSYNC)
Jeff Layton608712f2010-10-15 15:33:56 -040083 posix_flags |= SMB_O_SYNC;
Steve French7fc8f4e2009-02-23 20:43:11 +000084 if (flags & O_DIRECTORY)
Jeff Layton608712f2010-10-15 15:33:56 -040085 posix_flags |= SMB_O_DIRECTORY;
Steve French7fc8f4e2009-02-23 20:43:11 +000086 if (flags & O_NOFOLLOW)
Jeff Layton608712f2010-10-15 15:33:56 -040087 posix_flags |= SMB_O_NOFOLLOW;
Steve French7fc8f4e2009-02-23 20:43:11 +000088 if (flags & O_DIRECT)
Jeff Layton608712f2010-10-15 15:33:56 -040089 posix_flags |= SMB_O_DIRECT;
Steve French7fc8f4e2009-02-23 20:43:11 +000090
91 return posix_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070092}
93
94static inline int cifs_get_disposition(unsigned int flags)
95{
96 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
97 return FILE_CREATE;
98 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
99 return FILE_OVERWRITE_IF;
100 else if ((flags & O_CREAT) == O_CREAT)
101 return FILE_OPEN_IF;
Steve French55aa2e02006-05-30 18:09:31 +0000102 else if ((flags & O_TRUNC) == O_TRUNC)
103 return FILE_OVERWRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104 else
105 return FILE_OPEN;
106}
107
Jeff Layton608712f2010-10-15 15:33:56 -0400108int cifs_posix_open(char *full_path, struct inode **pinode,
109 struct super_block *sb, int mode, unsigned int f_flags,
110 __u32 *poplock, __u16 *pnetfid, int xid)
111{
112 int rc;
113 FILE_UNIX_BASIC_INFO *presp_data;
114 __u32 posix_flags = 0;
115 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
116 struct cifs_fattr fattr;
117 struct tcon_link *tlink;
Steve French96daf2b2011-05-27 04:34:02 +0000118 struct cifs_tcon *tcon;
Jeff Layton608712f2010-10-15 15:33:56 -0400119
120 cFYI(1, "posix open %s", full_path);
121
122 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
123 if (presp_data == NULL)
124 return -ENOMEM;
125
126 tlink = cifs_sb_tlink(cifs_sb);
127 if (IS_ERR(tlink)) {
128 rc = PTR_ERR(tlink);
129 goto posix_open_ret;
130 }
131
132 tcon = tlink_tcon(tlink);
133 mode &= ~current_umask();
134
135 posix_flags = cifs_posix_convert_flags(f_flags);
136 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
137 poplock, full_path, cifs_sb->local_nls,
138 cifs_sb->mnt_cifs_flags &
139 CIFS_MOUNT_MAP_SPECIAL_CHR);
140 cifs_put_tlink(tlink);
141
142 if (rc)
143 goto posix_open_ret;
144
145 if (presp_data->Type == cpu_to_le32(-1))
146 goto posix_open_ret; /* open ok, caller does qpathinfo */
147
148 if (!pinode)
149 goto posix_open_ret; /* caller does not need info */
150
151 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
152
153 /* get new inode and set it up */
154 if (*pinode == NULL) {
155 cifs_fill_uniqueid(sb, &fattr);
156 *pinode = cifs_iget(sb, &fattr);
157 if (!*pinode) {
158 rc = -ENOMEM;
159 goto posix_open_ret;
160 }
161 } else {
162 cifs_fattr_to_inode(*pinode, &fattr);
163 }
164
165posix_open_ret:
166 kfree(presp_data);
167 return rc;
168}
169
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300170static int
171cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
Steve French96daf2b2011-05-27 04:34:02 +0000172 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *poplock,
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300173 __u16 *pnetfid, int xid)
174{
175 int rc;
176 int desiredAccess;
177 int disposition;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500178 int create_options = CREATE_NOT_DIR;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300179 FILE_ALL_INFO *buf;
180
181 desiredAccess = cifs_convert_flags(f_flags);
182
183/*********************************************************************
184 * open flag mapping table:
185 *
186 * POSIX Flag CIFS Disposition
187 * ---------- ----------------
188 * O_CREAT FILE_OPEN_IF
189 * O_CREAT | O_EXCL FILE_CREATE
190 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
191 * O_TRUNC FILE_OVERWRITE
192 * none of the above FILE_OPEN
193 *
194 * Note that there is not a direct match between disposition
195 * FILE_SUPERSEDE (ie create whether or not file exists although
196 * O_CREAT | O_TRUNC is similar but truncates the existing
197 * file rather than creating a new file as FILE_SUPERSEDE does
198 * (which uses the attributes / metadata passed in on open call)
199 *?
200 *? O_SYNC is a reasonable match to CIFS writethrough flag
201 *? and the read write flags match reasonably. O_LARGEFILE
202 *? is irrelevant because largefile support is always used
203 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
204 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
205 *********************************************************************/
206
207 disposition = cifs_get_disposition(f_flags);
208
209 /* BB pass O_SYNC flag through on file attributes .. BB */
210
211 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
212 if (!buf)
213 return -ENOMEM;
214
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500215 if (backup_cred(cifs_sb))
216 create_options |= CREATE_OPEN_BACKUP_INTENT;
217
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300218 if (tcon->ses->capabilities & CAP_NT_SMBS)
219 rc = CIFSSMBOpen(xid, tcon, full_path, disposition,
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500220 desiredAccess, create_options, pnetfid, poplock, buf,
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300221 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
222 & CIFS_MOUNT_MAP_SPECIAL_CHR);
223 else
224 rc = SMBLegacyOpen(xid, tcon, full_path, disposition,
225 desiredAccess, CREATE_NOT_DIR, pnetfid, poplock, buf,
226 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
227 & CIFS_MOUNT_MAP_SPECIAL_CHR);
228
229 if (rc)
230 goto out;
231
232 if (tcon->unix_ext)
233 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
234 xid);
235 else
236 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
237 xid, pnetfid);
238
239out:
240 kfree(buf);
241 return rc;
242}
243
Jeff Layton15ecb432010-10-15 15:34:02 -0400244struct cifsFileInfo *
245cifs_new_fileinfo(__u16 fileHandle, struct file *file,
246 struct tcon_link *tlink, __u32 oplock)
247{
248 struct dentry *dentry = file->f_path.dentry;
249 struct inode *inode = dentry->d_inode;
250 struct cifsInodeInfo *pCifsInode = CIFS_I(inode);
251 struct cifsFileInfo *pCifsFile;
252
253 pCifsFile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
254 if (pCifsFile == NULL)
255 return pCifsFile;
256
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400257 pCifsFile->count = 1;
Jeff Layton15ecb432010-10-15 15:34:02 -0400258 pCifsFile->netfid = fileHandle;
259 pCifsFile->pid = current->tgid;
260 pCifsFile->uid = current_fsuid();
261 pCifsFile->dentry = dget(dentry);
262 pCifsFile->f_flags = file->f_flags;
263 pCifsFile->invalidHandle = false;
Jeff Layton15ecb432010-10-15 15:34:02 -0400264 pCifsFile->tlink = cifs_get_tlink(tlink);
265 mutex_init(&pCifsFile->fh_mutex);
Jeff Layton15ecb432010-10-15 15:34:02 -0400266 INIT_WORK(&pCifsFile->oplock_break, cifs_oplock_break);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300267 INIT_LIST_HEAD(&pCifsFile->llist);
Jeff Layton15ecb432010-10-15 15:34:02 -0400268
Jeff Layton44772882010-10-15 15:34:03 -0400269 spin_lock(&cifs_file_list_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400270 list_add(&pCifsFile->tlist, &(tlink_tcon(tlink)->openFileList));
271 /* if readable file instance put first in list*/
272 if (file->f_mode & FMODE_READ)
273 list_add(&pCifsFile->flist, &pCifsInode->openFileList);
274 else
275 list_add_tail(&pCifsFile->flist, &pCifsInode->openFileList);
Jeff Layton44772882010-10-15 15:34:03 -0400276 spin_unlock(&cifs_file_list_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400277
Pavel Shilovskyc6723622010-11-03 10:58:57 +0300278 cifs_set_oplock_level(pCifsInode, oplock);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400279 pCifsInode->can_cache_brlcks = pCifsInode->clientCanCacheAll;
Jeff Layton15ecb432010-10-15 15:34:02 -0400280
281 file->private_data = pCifsFile;
282 return pCifsFile;
283}
284
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400285static void cifs_del_lock_waiters(struct cifsLockInfo *lock);
286
Steve Frenchcdff08e2010-10-21 22:46:14 +0000287/*
288 * Release a reference on the file private data. This may involve closing
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400289 * the filehandle out on the server. Must be called without holding
290 * cifs_file_list_lock.
Steve Frenchcdff08e2010-10-21 22:46:14 +0000291 */
Jeff Laytonb33879a2010-10-15 15:34:04 -0400292void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
293{
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300294 struct inode *inode = cifs_file->dentry->d_inode;
Steve French96daf2b2011-05-27 04:34:02 +0000295 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300296 struct cifsInodeInfo *cifsi = CIFS_I(inode);
Pavel Shilovsky4f8ba8a2010-11-21 22:36:12 +0300297 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000298 struct cifsLockInfo *li, *tmp;
299
300 spin_lock(&cifs_file_list_lock);
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400301 if (--cifs_file->count > 0) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000302 spin_unlock(&cifs_file_list_lock);
303 return;
Jeff Laytonb33879a2010-10-15 15:34:04 -0400304 }
Steve Frenchcdff08e2010-10-21 22:46:14 +0000305
306 /* remove it from the lists */
307 list_del(&cifs_file->flist);
308 list_del(&cifs_file->tlist);
309
310 if (list_empty(&cifsi->openFileList)) {
311 cFYI(1, "closing last open instance for inode %p",
312 cifs_file->dentry->d_inode);
Pavel Shilovsky4f8ba8a2010-11-21 22:36:12 +0300313
314 /* in strict cache mode we need invalidate mapping on the last
315 close because it may cause a error when we open this file
316 again and get at least level II oplock */
317 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
318 CIFS_I(inode)->invalid_mapping = true;
319
Pavel Shilovskyc6723622010-11-03 10:58:57 +0300320 cifs_set_oplock_level(cifsi, 0);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000321 }
322 spin_unlock(&cifs_file_list_lock);
323
Jeff Laytonad635942011-07-26 12:20:17 -0400324 cancel_work_sync(&cifs_file->oplock_break);
325
Steve Frenchcdff08e2010-10-21 22:46:14 +0000326 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
327 int xid, rc;
328
329 xid = GetXid();
330 rc = CIFSSMBClose(xid, tcon, cifs_file->netfid);
331 FreeXid(xid);
332 }
333
334 /* Delete any outstanding lock records. We'll lose them when the file
335 * is closed anyway.
336 */
Pavel Shilovskyd59dad22011-09-22 09:53:59 +0400337 mutex_lock(&cifsi->lock_mutex);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300338 list_for_each_entry_safe(li, tmp, &cifs_file->llist, llist) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000339 list_del(&li->llist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400340 cifs_del_lock_waiters(li);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000341 kfree(li);
342 }
Pavel Shilovskyd59dad22011-09-22 09:53:59 +0400343 mutex_unlock(&cifsi->lock_mutex);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000344
345 cifs_put_tlink(cifs_file->tlink);
346 dput(cifs_file->dentry);
347 kfree(cifs_file);
Jeff Laytonb33879a2010-10-15 15:34:04 -0400348}
349
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350int cifs_open(struct inode *inode, struct file *file)
351{
352 int rc = -EACCES;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400353 int xid;
354 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +0000356 struct cifs_tcon *tcon;
Jeff Layton7ffec372010-09-29 19:51:11 -0400357 struct tcon_link *tlink;
Jeff Layton6ca9f3b2010-06-16 13:40:16 -0400358 struct cifsFileInfo *pCifsFile = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359 char *full_path = NULL;
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300360 bool posix_open_ok = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361 __u16 netfid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362
363 xid = GetXid();
364
365 cifs_sb = CIFS_SB(inode->i_sb);
Jeff Layton7ffec372010-09-29 19:51:11 -0400366 tlink = cifs_sb_tlink(cifs_sb);
367 if (IS_ERR(tlink)) {
368 FreeXid(xid);
369 return PTR_ERR(tlink);
370 }
371 tcon = tlink_tcon(tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -0800373 full_path = build_path_from_dentry(file->f_path.dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374 if (full_path == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530375 rc = -ENOMEM;
Jeff Layton232341b2010-08-05 13:58:38 -0400376 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377 }
378
Joe Perchesb6b38f72010-04-21 03:50:45 +0000379 cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
380 inode, file->f_flags, full_path);
Steve French276a74a2009-03-03 18:00:34 +0000381
Pavel Shilovsky10b9b982012-03-20 12:55:09 +0300382 if (tcon->ses->server->oplocks)
Steve French276a74a2009-03-03 18:00:34 +0000383 oplock = REQ_OPLOCK;
384 else
385 oplock = 0;
386
Steve French64cc2c62009-03-04 19:54:08 +0000387 if (!tcon->broken_posix_open && tcon->unix_ext &&
388 (tcon->ses->capabilities & CAP_UNIX) &&
Steve French276a74a2009-03-03 18:00:34 +0000389 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
390 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Steve French276a74a2009-03-03 18:00:34 +0000391 /* can not refresh inode info since size could be stale */
Jeff Layton2422f672010-06-16 13:40:16 -0400392 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
Steve Frenchfa588e02010-04-22 19:21:55 +0000393 cifs_sb->mnt_file_mode /* ignored */,
Jeff Layton608712f2010-10-15 15:33:56 -0400394 file->f_flags, &oplock, &netfid, xid);
Steve French276a74a2009-03-03 18:00:34 +0000395 if (rc == 0) {
Joe Perchesb6b38f72010-04-21 03:50:45 +0000396 cFYI(1, "posix open succeeded");
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300397 posix_open_ok = true;
Steve French64cc2c62009-03-04 19:54:08 +0000398 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
399 if (tcon->ses->serverNOS)
Joe Perchesb6b38f72010-04-21 03:50:45 +0000400 cERROR(1, "server %s of type %s returned"
Steve French64cc2c62009-03-04 19:54:08 +0000401 " unexpected error on SMB posix open"
402 ", disabling posix open support."
403 " Check if server update available.",
404 tcon->ses->serverName,
Joe Perchesb6b38f72010-04-21 03:50:45 +0000405 tcon->ses->serverNOS);
Steve French64cc2c62009-03-04 19:54:08 +0000406 tcon->broken_posix_open = true;
Steve French276a74a2009-03-03 18:00:34 +0000407 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
408 (rc != -EOPNOTSUPP)) /* path not found or net err */
409 goto out;
Steve French64cc2c62009-03-04 19:54:08 +0000410 /* else fallthrough to retry open the old way on network i/o
411 or DFS errors */
Steve French276a74a2009-03-03 18:00:34 +0000412 }
413
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300414 if (!posix_open_ok) {
415 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
416 file->f_flags, &oplock, &netfid, xid);
417 if (rc)
418 goto out;
419 }
Jeff Layton47c78b72010-06-16 13:40:17 -0400420
Jeff Laytonabfe1ee2010-10-15 15:33:58 -0400421 pCifsFile = cifs_new_fileinfo(netfid, file, tlink, oplock);
Jeff Layton6ca9f3b2010-06-16 13:40:16 -0400422 if (pCifsFile == NULL) {
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300423 CIFSSMBClose(xid, tcon, netfid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424 rc = -ENOMEM;
425 goto out;
426 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +0530428 cifs_fscache_set_inode_cookie(inode, file);
429
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300430 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431 /* time to set mode which we can not set earlier due to
432 problems creating new read-only files */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300433 struct cifs_unix_set_info_args args = {
434 .mode = inode->i_mode,
435 .uid = NO_CHANGE_64,
436 .gid = NO_CHANGE_64,
437 .ctime = NO_CHANGE_64,
438 .atime = NO_CHANGE_64,
439 .mtime = NO_CHANGE_64,
440 .device = 0,
441 };
Jeff Laytond44a9fe2011-01-07 11:30:29 -0500442 CIFSSMBUnixSetFileInfo(xid, tcon, &args, netfid,
443 pCifsFile->pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444 }
445
446out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447 kfree(full_path);
448 FreeXid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400449 cifs_put_tlink(tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450 return rc;
451}
452
Adrian Bunk04187262006-06-30 18:23:04 +0200453/* Try to reacquire byte range locks that were released when session */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454/* to server was lost */
455static int cifs_relock_file(struct cifsFileInfo *cifsFile)
456{
457 int rc = 0;
458
459/* BB list all locks open on this file and relock */
460
461 return rc;
462}
463
Jeff Layton15886172010-10-15 15:33:59 -0400464static int cifs_reopen_file(struct cifsFileInfo *pCifsFile, bool can_flush)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465{
466 int rc = -EACCES;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400467 int xid;
468 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +0000470 struct cifs_tcon *tcon;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471 struct cifsInodeInfo *pCifsInode;
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000472 struct inode *inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473 char *full_path = NULL;
474 int desiredAccess;
475 int disposition = FILE_OPEN;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500476 int create_options = CREATE_NOT_DIR;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477 __u16 netfid;
478
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479 xid = GetXid();
Jeff Laytonf0a71eb2009-06-27 07:04:55 -0400480 mutex_lock(&pCifsFile->fh_mutex);
Steve French4b18f2a2008-04-29 00:06:05 +0000481 if (!pCifsFile->invalidHandle) {
Jeff Laytonf0a71eb2009-06-27 07:04:55 -0400482 mutex_unlock(&pCifsFile->fh_mutex);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530483 rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484 FreeXid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530485 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486 }
487
Jeff Layton15886172010-10-15 15:33:59 -0400488 inode = pCifsFile->dentry->d_inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489 cifs_sb = CIFS_SB(inode->i_sb);
Jeff Layton13cfb732010-09-29 19:51:11 -0400490 tcon = tlink_tcon(pCifsFile->tlink);
Steve French3a9f4622007-04-04 17:10:24 +0000491
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492/* can not grab rename sem here because various ops, including
493 those that already have the rename sem can end up causing writepage
494 to get called and if the server was down that means we end up here,
495 and we can never tell if the caller already has the rename_sem */
Jeff Layton15886172010-10-15 15:33:59 -0400496 full_path = build_path_from_dentry(pCifsFile->dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497 if (full_path == NULL) {
Steve French3a9f4622007-04-04 17:10:24 +0000498 rc = -ENOMEM;
Jeff Laytonf0a71eb2009-06-27 07:04:55 -0400499 mutex_unlock(&pCifsFile->fh_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500 FreeXid(xid);
Steve French3a9f4622007-04-04 17:10:24 +0000501 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502 }
503
Joe Perchesb6b38f72010-04-21 03:50:45 +0000504 cFYI(1, "inode = 0x%p file flags 0x%x for %s",
Jeff Layton15886172010-10-15 15:33:59 -0400505 inode, pCifsFile->f_flags, full_path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506
Pavel Shilovsky10b9b982012-03-20 12:55:09 +0300507 if (tcon->ses->server->oplocks)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508 oplock = REQ_OPLOCK;
509 else
Steve French4b18f2a2008-04-29 00:06:05 +0000510 oplock = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511
Steve French7fc8f4e2009-02-23 20:43:11 +0000512 if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) &&
513 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
514 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Jeff Layton608712f2010-10-15 15:33:56 -0400515
516 /*
517 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
518 * original open. Must mask them off for a reopen.
519 */
Jeff Layton15886172010-10-15 15:33:59 -0400520 unsigned int oflags = pCifsFile->f_flags &
521 ~(O_CREAT | O_EXCL | O_TRUNC);
Jeff Layton608712f2010-10-15 15:33:56 -0400522
Jeff Layton2422f672010-06-16 13:40:16 -0400523 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
Steve Frenchfa588e02010-04-22 19:21:55 +0000524 cifs_sb->mnt_file_mode /* ignored */,
525 oflags, &oplock, &netfid, xid);
Steve French7fc8f4e2009-02-23 20:43:11 +0000526 if (rc == 0) {
Joe Perchesb6b38f72010-04-21 03:50:45 +0000527 cFYI(1, "posix reopen succeeded");
Steve French7fc8f4e2009-02-23 20:43:11 +0000528 goto reopen_success;
529 }
530 /* fallthrough to retry open the old way on errors, especially
531 in the reconnect path it is important to retry hard */
532 }
533
Jeff Layton15886172010-10-15 15:33:59 -0400534 desiredAccess = cifs_convert_flags(pCifsFile->f_flags);
Steve French7fc8f4e2009-02-23 20:43:11 +0000535
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500536 if (backup_cred(cifs_sb))
537 create_options |= CREATE_OPEN_BACKUP_INTENT;
538
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539 /* Can not refresh inode by passing in file_info buf to be returned
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000540 by SMBOpen and then calling get_inode_info with returned buf
541 since file might have write behind data that needs to be flushed
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542 and server version of file size can be stale. If we knew for sure
543 that inode was not dirty locally we could do this */
544
Steve French7fc8f4e2009-02-23 20:43:11 +0000545 rc = CIFSSMBOpen(xid, tcon, full_path, disposition, desiredAccess,
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500546 create_options, &netfid, &oplock, NULL,
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000547 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
Steve French737b7582005-04-28 22:41:06 -0700548 CIFS_MOUNT_MAP_SPECIAL_CHR);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549 if (rc) {
Jeff Laytonf0a71eb2009-06-27 07:04:55 -0400550 mutex_unlock(&pCifsFile->fh_mutex);
Joe Perchesb6b38f72010-04-21 03:50:45 +0000551 cFYI(1, "cifs_open returned 0x%x", rc);
552 cFYI(1, "oplock: %d", oplock);
Jeff Layton15886172010-10-15 15:33:59 -0400553 goto reopen_error_exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554 }
Jeff Layton15886172010-10-15 15:33:59 -0400555
556reopen_success:
557 pCifsFile->netfid = netfid;
558 pCifsFile->invalidHandle = false;
559 mutex_unlock(&pCifsFile->fh_mutex);
560 pCifsInode = CIFS_I(inode);
561
562 if (can_flush) {
563 rc = filemap_write_and_wait(inode->i_mapping);
Jeff Laytoneb4b7562010-10-22 14:52:29 -0400564 mapping_set_error(inode->i_mapping, rc);
Jeff Layton15886172010-10-15 15:33:59 -0400565
Jeff Layton15886172010-10-15 15:33:59 -0400566 if (tcon->unix_ext)
567 rc = cifs_get_inode_info_unix(&inode,
568 full_path, inode->i_sb, xid);
569 else
570 rc = cifs_get_inode_info(&inode,
571 full_path, NULL, inode->i_sb,
572 xid, NULL);
573 } /* else we are writing out data to server already
574 and could deadlock if we tried to flush data, and
575 since we do not know if we have data that would
576 invalidate the current end of file on the server
577 we can not go to the server to get the new inod
578 info */
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300579
Pavel Shilovskyc6723622010-11-03 10:58:57 +0300580 cifs_set_oplock_level(pCifsInode, oplock);
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300581
Jeff Layton15886172010-10-15 15:33:59 -0400582 cifs_relock_file(pCifsFile);
583
584reopen_error_exit:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585 kfree(full_path);
586 FreeXid(xid);
587 return rc;
588}
589
590int cifs_close(struct inode *inode, struct file *file)
591{
Jeff Layton77970692011-04-05 16:23:47 -0700592 if (file->private_data != NULL) {
593 cifsFileInfo_put(file->private_data);
594 file->private_data = NULL;
595 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596
Steve Frenchcdff08e2010-10-21 22:46:14 +0000597 /* return code from the ->release op is always ignored */
598 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599}
600
601int cifs_closedir(struct inode *inode, struct file *file)
602{
603 int rc = 0;
604 int xid;
Joe Perchesc21dfb62010-07-12 13:50:14 -0700605 struct cifsFileInfo *pCFileStruct = file->private_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606 char *ptmp;
607
Joe Perchesb6b38f72010-04-21 03:50:45 +0000608 cFYI(1, "Closedir inode = 0x%p", inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609
610 xid = GetXid();
611
612 if (pCFileStruct) {
Steve French96daf2b2011-05-27 04:34:02 +0000613 struct cifs_tcon *pTcon = tlink_tcon(pCFileStruct->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614
Joe Perchesb6b38f72010-04-21 03:50:45 +0000615 cFYI(1, "Freeing private data in close dir");
Jeff Layton44772882010-10-15 15:34:03 -0400616 spin_lock(&cifs_file_list_lock);
Steve French4b18f2a2008-04-29 00:06:05 +0000617 if (!pCFileStruct->srch_inf.endOfSearch &&
618 !pCFileStruct->invalidHandle) {
619 pCFileStruct->invalidHandle = true;
Jeff Layton44772882010-10-15 15:34:03 -0400620 spin_unlock(&cifs_file_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621 rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid);
Joe Perchesb6b38f72010-04-21 03:50:45 +0000622 cFYI(1, "Closing uncompleted readdir with rc %d",
623 rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624 /* not much we can do if it fails anyway, ignore rc */
625 rc = 0;
Steve Frenchddb4cbf2008-11-20 20:00:44 +0000626 } else
Jeff Layton44772882010-10-15 15:34:03 -0400627 spin_unlock(&cifs_file_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628 ptmp = pCFileStruct->srch_inf.ntwrk_buf_start;
629 if (ptmp) {
Joe Perchesb6b38f72010-04-21 03:50:45 +0000630 cFYI(1, "closedir free smb buf in srch struct");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631 pCFileStruct->srch_inf.ntwrk_buf_start = NULL;
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000632 if (pCFileStruct->srch_inf.smallBuf)
Steve Frenchd47d7c12006-02-28 03:45:48 +0000633 cifs_small_buf_release(ptmp);
634 else
635 cifs_buf_release(ptmp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636 }
Jeff Layton13cfb732010-09-29 19:51:11 -0400637 cifs_put_tlink(pCFileStruct->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638 kfree(file->private_data);
639 file->private_data = NULL;
640 }
641 /* BB can we lock the filestruct while this is going on? */
642 FreeXid(xid);
643 return rc;
644}
645
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400646static struct cifsLockInfo *
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300647cifs_lock_init(__u64 offset, __u64 length, __u8 type)
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000648{
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400649 struct cifsLockInfo *lock =
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000650 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400651 if (!lock)
652 return lock;
653 lock->offset = offset;
654 lock->length = length;
655 lock->type = type;
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400656 lock->pid = current->tgid;
657 INIT_LIST_HEAD(&lock->blist);
658 init_waitqueue_head(&lock->block_q);
659 return lock;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400660}
661
662static void
663cifs_del_lock_waiters(struct cifsLockInfo *lock)
664{
665 struct cifsLockInfo *li, *tmp;
666 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
667 list_del_init(&li->blist);
668 wake_up(&li->block_q);
669 }
670}
671
672static bool
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300673cifs_find_fid_lock_conflict(struct cifsFileInfo *cfile, __u64 offset,
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300674 __u64 length, __u8 type, struct cifsFileInfo *cur,
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300675 struct cifsLockInfo **conf_lock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400676{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300677 struct cifsLockInfo *li;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300678 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400679
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300680 list_for_each_entry(li, &cfile->llist, llist) {
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400681 if (offset + length <= li->offset ||
682 offset >= li->offset + li->length)
683 continue;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300684 else if ((type & server->vals->shared_lock_type) &&
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300685 ((server->ops->compare_fids(cur, cfile) &&
686 current->tgid == li->pid) || type == li->type))
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400687 continue;
688 else {
689 *conf_lock = li;
690 return true;
691 }
692 }
693 return false;
694}
695
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400696static bool
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300697cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
698 __u8 type, struct cifsLockInfo **conf_lock)
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400699{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300700 bool rc = false;
701 struct cifsFileInfo *fid, *tmp;
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300702 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300703
704 spin_lock(&cifs_file_list_lock);
705 list_for_each_entry_safe(fid, tmp, &cinode->openFileList, flist) {
706 rc = cifs_find_fid_lock_conflict(fid, offset, length, type,
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300707 cfile, conf_lock);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300708 if (rc)
709 break;
710 }
711 spin_unlock(&cifs_file_list_lock);
712
713 return rc;
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400714}
715
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300716/*
717 * Check if there is another lock that prevents us to set the lock (mandatory
718 * style). If such a lock exists, update the flock structure with its
719 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
720 * or leave it the same if we can't. Returns 0 if we don't need to request to
721 * the server or 1 otherwise.
722 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400723static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300724cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
725 __u8 type, struct file_lock *flock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400726{
727 int rc = 0;
728 struct cifsLockInfo *conf_lock;
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300729 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300730 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400731 bool exist;
732
733 mutex_lock(&cinode->lock_mutex);
734
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300735 exist = cifs_find_lock_conflict(cfile, offset, length, type,
736 &conf_lock);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400737 if (exist) {
738 flock->fl_start = conf_lock->offset;
739 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
740 flock->fl_pid = conf_lock->pid;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300741 if (conf_lock->type & server->vals->shared_lock_type)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400742 flock->fl_type = F_RDLCK;
743 else
744 flock->fl_type = F_WRLCK;
745 } else if (!cinode->can_cache_brlcks)
746 rc = 1;
747 else
748 flock->fl_type = F_UNLCK;
749
750 mutex_unlock(&cinode->lock_mutex);
751 return rc;
752}
753
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400754static void
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300755cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400756{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300757 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovskyd59dad22011-09-22 09:53:59 +0400758 mutex_lock(&cinode->lock_mutex);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300759 list_add_tail(&lock->llist, &cfile->llist);
Pavel Shilovskyd59dad22011-09-22 09:53:59 +0400760 mutex_unlock(&cinode->lock_mutex);
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000761}
762
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300763/*
764 * Set the byte-range lock (mandatory style). Returns:
765 * 1) 0, if we set the lock and don't need to request to the server;
766 * 2) 1, if no locks prevent us but we need to request to the server;
767 * 3) -EACCESS, if there is a lock that prevents us and wait is false.
768 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400769static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300770cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400771 bool wait)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400772{
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400773 struct cifsLockInfo *conf_lock;
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300774 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400775 bool exist;
776 int rc = 0;
777
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400778try_again:
779 exist = false;
780 mutex_lock(&cinode->lock_mutex);
781
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300782 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
783 lock->type, &conf_lock);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400784 if (!exist && cinode->can_cache_brlcks) {
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300785 list_add_tail(&lock->llist, &cfile->llist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400786 mutex_unlock(&cinode->lock_mutex);
787 return rc;
788 }
789
790 if (!exist)
791 rc = 1;
792 else if (!wait)
793 rc = -EACCES;
794 else {
795 list_add_tail(&lock->blist, &conf_lock->blist);
796 mutex_unlock(&cinode->lock_mutex);
797 rc = wait_event_interruptible(lock->block_q,
798 (lock->blist.prev == &lock->blist) &&
799 (lock->blist.next == &lock->blist));
800 if (!rc)
801 goto try_again;
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400802 mutex_lock(&cinode->lock_mutex);
803 list_del_init(&lock->blist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400804 }
805
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400806 mutex_unlock(&cinode->lock_mutex);
807 return rc;
808}
809
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300810/*
811 * Check if there is another lock that prevents us to set the lock (posix
812 * style). If such a lock exists, update the flock structure with its
813 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
814 * or leave it the same if we can't. Returns 0 if we don't need to request to
815 * the server or 1 otherwise.
816 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400817static int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400818cifs_posix_lock_test(struct file *file, struct file_lock *flock)
819{
820 int rc = 0;
821 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
822 unsigned char saved_type = flock->fl_type;
823
Pavel Shilovsky50792762011-10-29 17:17:57 +0400824 if ((flock->fl_flags & FL_POSIX) == 0)
825 return 1;
826
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400827 mutex_lock(&cinode->lock_mutex);
828 posix_test_lock(file, flock);
829
830 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
831 flock->fl_type = saved_type;
832 rc = 1;
833 }
834
835 mutex_unlock(&cinode->lock_mutex);
836 return rc;
837}
838
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300839/*
840 * Set the byte-range lock (posix style). Returns:
841 * 1) 0, if we set the lock and don't need to request to the server;
842 * 2) 1, if we need to request to the server;
843 * 3) <0, if the error occurs while setting the lock.
844 */
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400845static int
846cifs_posix_lock_set(struct file *file, struct file_lock *flock)
847{
848 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
Pavel Shilovsky50792762011-10-29 17:17:57 +0400849 int rc = 1;
850
851 if ((flock->fl_flags & FL_POSIX) == 0)
852 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400853
Pavel Shilovsky66189be2012-03-28 21:56:19 +0400854try_again:
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400855 mutex_lock(&cinode->lock_mutex);
856 if (!cinode->can_cache_brlcks) {
857 mutex_unlock(&cinode->lock_mutex);
Pavel Shilovsky50792762011-10-29 17:17:57 +0400858 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400859 }
Pavel Shilovsky66189be2012-03-28 21:56:19 +0400860
861 rc = posix_lock_file(file, flock, NULL);
Steve French9ebb3892012-04-01 13:52:54 -0500862 mutex_unlock(&cinode->lock_mutex);
Pavel Shilovsky66189be2012-03-28 21:56:19 +0400863 if (rc == FILE_LOCK_DEFERRED) {
864 rc = wait_event_interruptible(flock->fl_wait, !flock->fl_next);
865 if (!rc)
866 goto try_again;
867 locks_delete_block(flock);
868 }
Steve French9ebb3892012-04-01 13:52:54 -0500869 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400870}
871
872static int
873cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400874{
875 int xid, rc = 0, stored_rc;
876 struct cifsLockInfo *li, *tmp;
877 struct cifs_tcon *tcon;
878 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +0400879 unsigned int num, max_num, max_buf;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400880 LOCKING_ANDX_RANGE *buf, *cur;
881 int types[] = {LOCKING_ANDX_LARGE_FILES,
882 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
883 int i;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400884
885 xid = GetXid();
886 tcon = tlink_tcon(cfile->tlink);
887
888 mutex_lock(&cinode->lock_mutex);
889 if (!cinode->can_cache_brlcks) {
890 mutex_unlock(&cinode->lock_mutex);
891 FreeXid(xid);
892 return rc;
893 }
894
Pavel Shilovsky0013fb42012-05-31 13:03:26 +0400895 /*
896 * Accessing maxBuf is racy with cifs_reconnect - need to store value
897 * and check it for zero before using.
898 */
899 max_buf = tcon->ses->server->maxBuf;
900 if (!max_buf) {
901 mutex_unlock(&cinode->lock_mutex);
902 FreeXid(xid);
903 return -EINVAL;
904 }
905
906 max_num = (max_buf - sizeof(struct smb_hdr)) /
907 sizeof(LOCKING_ANDX_RANGE);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400908 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
909 if (!buf) {
910 mutex_unlock(&cinode->lock_mutex);
911 FreeXid(xid);
912 return rc;
913 }
914
915 for (i = 0; i < 2; i++) {
916 cur = buf;
917 num = 0;
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300918 list_for_each_entry_safe(li, tmp, &cfile->llist, llist) {
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400919 if (li->type != types[i])
920 continue;
921 cur->Pid = cpu_to_le16(li->pid);
922 cur->LengthLow = cpu_to_le32((u32)li->length);
923 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
924 cur->OffsetLow = cpu_to_le32((u32)li->offset);
925 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
926 if (++num == max_num) {
927 stored_rc = cifs_lockv(xid, tcon, cfile->netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +0300928 (__u8)li->type, 0, num,
929 buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400930 if (stored_rc)
931 rc = stored_rc;
932 cur = buf;
933 num = 0;
934 } else
935 cur++;
936 }
937
938 if (num) {
939 stored_rc = cifs_lockv(xid, tcon, cfile->netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +0300940 (__u8)types[i], 0, num, buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400941 if (stored_rc)
942 rc = stored_rc;
943 }
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400944 }
945
946 cinode->can_cache_brlcks = false;
947 mutex_unlock(&cinode->lock_mutex);
948
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400949 kfree(buf);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400950 FreeXid(xid);
951 return rc;
952}
953
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400954/* copied from fs/locks.c with a name change */
955#define cifs_for_each_lock(inode, lockp) \
956 for (lockp = &inode->i_flock; *lockp != NULL; \
957 lockp = &(*lockp)->fl_next)
958
Pavel Shilovskyd5751462012-03-05 09:39:20 +0300959struct lock_to_push {
960 struct list_head llist;
961 __u64 offset;
962 __u64 length;
963 __u32 pid;
964 __u16 netfid;
965 __u8 type;
966};
967
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400968static int
969cifs_push_posix_locks(struct cifsFileInfo *cfile)
970{
971 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
972 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
973 struct file_lock *flock, **before;
Pavel Shilovskyd5751462012-03-05 09:39:20 +0300974 unsigned int count = 0, i = 0;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400975 int rc = 0, xid, type;
Pavel Shilovskyd5751462012-03-05 09:39:20 +0300976 struct list_head locks_to_send, *el;
977 struct lock_to_push *lck, *tmp;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400978 __u64 length;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400979
980 xid = GetXid();
981
982 mutex_lock(&cinode->lock_mutex);
983 if (!cinode->can_cache_brlcks) {
984 mutex_unlock(&cinode->lock_mutex);
985 FreeXid(xid);
986 return rc;
987 }
988
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400989 lock_flocks();
990 cifs_for_each_lock(cfile->dentry->d_inode, before) {
Pavel Shilovskyd5751462012-03-05 09:39:20 +0300991 if ((*before)->fl_flags & FL_POSIX)
992 count++;
993 }
994 unlock_flocks();
995
996 INIT_LIST_HEAD(&locks_to_send);
997
998 /*
Pavel Shilovskyce858522012-03-17 09:46:55 +0300999 * Allocating count locks is enough because no FL_POSIX locks can be
1000 * added to the list while we are holding cinode->lock_mutex that
1001 * protects locking operations of this inode.
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001002 */
1003 for (; i < count; i++) {
1004 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1005 if (!lck) {
1006 rc = -ENOMEM;
1007 goto err_out;
1008 }
1009 list_add_tail(&lck->llist, &locks_to_send);
1010 }
1011
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001012 el = locks_to_send.next;
1013 lock_flocks();
1014 cifs_for_each_lock(cfile->dentry->d_inode, before) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001015 flock = *before;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001016 if ((flock->fl_flags & FL_POSIX) == 0)
1017 continue;
Pavel Shilovskyce858522012-03-17 09:46:55 +03001018 if (el == &locks_to_send) {
1019 /*
1020 * The list ended. We don't have enough allocated
1021 * structures - something is really wrong.
1022 */
1023 cERROR(1, "Can't push all brlocks!");
1024 break;
1025 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001026 length = 1 + flock->fl_end - flock->fl_start;
1027 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1028 type = CIFS_RDLCK;
1029 else
1030 type = CIFS_WRLCK;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001031 lck = list_entry(el, struct lock_to_push, llist);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001032 lck->pid = flock->fl_pid;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001033 lck->netfid = cfile->netfid;
1034 lck->length = length;
1035 lck->type = type;
1036 lck->offset = flock->fl_start;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001037 el = el->next;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001038 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001039 unlock_flocks();
1040
1041 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001042 int stored_rc;
1043
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001044 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001045 lck->offset, lck->length, NULL,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001046 lck->type, 0);
1047 if (stored_rc)
1048 rc = stored_rc;
1049 list_del(&lck->llist);
1050 kfree(lck);
1051 }
1052
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001053out:
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001054 cinode->can_cache_brlcks = false;
1055 mutex_unlock(&cinode->lock_mutex);
1056
1057 FreeXid(xid);
1058 return rc;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001059err_out:
1060 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1061 list_del(&lck->llist);
1062 kfree(lck);
1063 }
1064 goto out;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001065}
1066
1067static int
1068cifs_push_locks(struct cifsFileInfo *cfile)
1069{
1070 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1071 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1072
1073 if ((tcon->ses->capabilities & CAP_UNIX) &&
1074 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1075 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1076 return cifs_push_posix_locks(cfile);
1077
1078 return cifs_push_mandatory_locks(cfile);
1079}
1080
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001081static void
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001082cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001083 bool *wait_flag, struct TCP_Server_Info *server)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084{
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001085 if (flock->fl_flags & FL_POSIX)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001086 cFYI(1, "Posix");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001087 if (flock->fl_flags & FL_FLOCK)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001088 cFYI(1, "Flock");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001089 if (flock->fl_flags & FL_SLEEP) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001090 cFYI(1, "Blocking lock");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001091 *wait_flag = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001092 }
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001093 if (flock->fl_flags & FL_ACCESS)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001094 cFYI(1, "Process suspended by mandatory locking - "
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001095 "not implemented yet");
1096 if (flock->fl_flags & FL_LEASE)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001097 cFYI(1, "Lease on file - not implemented yet");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001098 if (flock->fl_flags &
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099 (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001100 cFYI(1, "Unknown lock flags 0x%x", flock->fl_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001102 *type = server->vals->large_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001103 if (flock->fl_type == F_WRLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001104 cFYI(1, "F_WRLCK ");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001105 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001106 *lock = 1;
1107 } else if (flock->fl_type == F_UNLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001108 cFYI(1, "F_UNLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001109 *type |= server->vals->unlock_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001110 *unlock = 1;
1111 /* Check if unlock includes more than one lock range */
1112 } else if (flock->fl_type == F_RDLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001113 cFYI(1, "F_RDLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001114 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001115 *lock = 1;
1116 } else if (flock->fl_type == F_EXLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001117 cFYI(1, "F_EXLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001118 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001119 *lock = 1;
1120 } else if (flock->fl_type == F_SHLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001121 cFYI(1, "F_SHLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001122 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001123 *lock = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124 } else
Joe Perchesb6b38f72010-04-21 03:50:45 +00001125 cFYI(1, "Unknown type of lock");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001126}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001127
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001128static int
Pavel Shilovsky55157df2012-02-28 14:04:17 +03001129cifs_mandatory_lock(int xid, struct cifsFileInfo *cfile, __u64 offset,
1130 __u64 length, __u32 type, int lock, int unlock, bool wait)
1131{
1132 return CIFSSMBLock(xid, tlink_tcon(cfile->tlink), cfile->netfid,
1133 current->tgid, length, offset, unlock, lock,
1134 (__u8)type, wait, 0);
1135}
1136
1137static int
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001138cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001139 bool wait_flag, bool posix_lck, int xid)
1140{
1141 int rc = 0;
1142 __u64 length = 1 + flock->fl_end - flock->fl_start;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001143 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1144 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001145 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001146 __u16 netfid = cfile->netfid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001148 if (posix_lck) {
1149 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001150
1151 rc = cifs_posix_lock_test(file, flock);
1152 if (!rc)
1153 return rc;
1154
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001155 if (type & server->vals->shared_lock_type)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001156 posix_lock_type = CIFS_RDLCK;
1157 else
1158 posix_lock_type = CIFS_WRLCK;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001159 rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001160 flock->fl_start, length, flock,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001161 posix_lock_type, wait_flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162 return rc;
1163 }
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001164
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001165 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001166 if (!rc)
1167 return rc;
1168
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001169 /* BB we could chain these into one lock request BB */
Pavel Shilovsky55157df2012-02-28 14:04:17 +03001170 rc = cifs_mandatory_lock(xid, cfile, flock->fl_start, length, type,
1171 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001172 if (rc == 0) {
Pavel Shilovsky55157df2012-02-28 14:04:17 +03001173 rc = cifs_mandatory_lock(xid, cfile, flock->fl_start, length,
1174 type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001175 flock->fl_type = F_UNLCK;
1176 if (rc != 0)
1177 cERROR(1, "Error unlocking previously locked "
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001178 "range %d during test of lock", rc);
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001179 return 0;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001180 }
1181
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001182 if (type & server->vals->shared_lock_type) {
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001183 flock->fl_type = F_WRLCK;
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001184 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001185 }
1186
Pavel Shilovsky55157df2012-02-28 14:04:17 +03001187 rc = cifs_mandatory_lock(xid, cfile, flock->fl_start, length,
1188 type | server->vals->shared_lock_type, 1, 0,
1189 false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001190 if (rc == 0) {
Pavel Shilovsky55157df2012-02-28 14:04:17 +03001191 rc = cifs_mandatory_lock(xid, cfile, flock->fl_start, length,
1192 type | server->vals->shared_lock_type,
1193 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001194 flock->fl_type = F_RDLCK;
1195 if (rc != 0)
1196 cERROR(1, "Error unlocking previously locked "
1197 "range %d during test of lock", rc);
1198 } else
1199 flock->fl_type = F_WRLCK;
1200
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001201 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001202}
1203
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001204static void
1205cifs_move_llist(struct list_head *source, struct list_head *dest)
1206{
1207 struct list_head *li, *tmp;
1208 list_for_each_safe(li, tmp, source)
1209 list_move(li, dest);
1210}
1211
1212static void
1213cifs_free_llist(struct list_head *llist)
1214{
1215 struct cifsLockInfo *li, *tmp;
1216 list_for_each_entry_safe(li, tmp, llist, llist) {
1217 cifs_del_lock_waiters(li);
1218 list_del(&li->llist);
1219 kfree(li);
1220 }
1221}
1222
1223static int
1224cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid)
1225{
1226 int rc = 0, stored_rc;
1227 int types[] = {LOCKING_ANDX_LARGE_FILES,
1228 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1229 unsigned int i;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001230 unsigned int max_num, num, max_buf;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001231 LOCKING_ANDX_RANGE *buf, *cur;
1232 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1233 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
1234 struct cifsLockInfo *li, *tmp;
1235 __u64 length = 1 + flock->fl_end - flock->fl_start;
1236 struct list_head tmp_llist;
1237
1238 INIT_LIST_HEAD(&tmp_llist);
1239
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001240 /*
1241 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1242 * and check it for zero before using.
1243 */
1244 max_buf = tcon->ses->server->maxBuf;
1245 if (!max_buf)
1246 return -EINVAL;
1247
1248 max_num = (max_buf - sizeof(struct smb_hdr)) /
1249 sizeof(LOCKING_ANDX_RANGE);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001250 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1251 if (!buf)
1252 return -ENOMEM;
1253
1254 mutex_lock(&cinode->lock_mutex);
1255 for (i = 0; i < 2; i++) {
1256 cur = buf;
1257 num = 0;
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001258 list_for_each_entry_safe(li, tmp, &cfile->llist, llist) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001259 if (flock->fl_start > li->offset ||
1260 (flock->fl_start + length) <
1261 (li->offset + li->length))
1262 continue;
1263 if (current->tgid != li->pid)
1264 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001265 if (types[i] != li->type)
1266 continue;
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001267 if (cinode->can_cache_brlcks) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001268 /*
1269 * We can cache brlock requests - simply remove
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001270 * a lock from the file's list.
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001271 */
1272 list_del(&li->llist);
1273 cifs_del_lock_waiters(li);
1274 kfree(li);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001275 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001276 }
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001277 cur->Pid = cpu_to_le16(li->pid);
1278 cur->LengthLow = cpu_to_le32((u32)li->length);
1279 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1280 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1281 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1282 /*
1283 * We need to save a lock here to let us add it again to
1284 * the file's list if the unlock range request fails on
1285 * the server.
1286 */
1287 list_move(&li->llist, &tmp_llist);
1288 if (++num == max_num) {
1289 stored_rc = cifs_lockv(xid, tcon, cfile->netfid,
1290 li->type, num, 0, buf);
1291 if (stored_rc) {
1292 /*
1293 * We failed on the unlock range
1294 * request - add all locks from the tmp
1295 * list to the head of the file's list.
1296 */
1297 cifs_move_llist(&tmp_llist,
1298 &cfile->llist);
1299 rc = stored_rc;
1300 } else
1301 /*
1302 * The unlock range request succeed -
1303 * free the tmp list.
1304 */
1305 cifs_free_llist(&tmp_llist);
1306 cur = buf;
1307 num = 0;
1308 } else
1309 cur++;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001310 }
1311 if (num) {
1312 stored_rc = cifs_lockv(xid, tcon, cfile->netfid,
1313 types[i], num, 0, buf);
1314 if (stored_rc) {
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001315 cifs_move_llist(&tmp_llist, &cfile->llist);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001316 rc = stored_rc;
1317 } else
1318 cifs_free_llist(&tmp_llist);
1319 }
1320 }
1321
1322 mutex_unlock(&cinode->lock_mutex);
1323 kfree(buf);
1324 return rc;
1325}
1326
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001327static int
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001328cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001329 bool wait_flag, bool posix_lck, int lock, int unlock, int xid)
1330{
1331 int rc = 0;
1332 __u64 length = 1 + flock->fl_end - flock->fl_start;
1333 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1334 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001335 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001336 __u16 netfid = cfile->netfid;
1337
1338 if (posix_lck) {
Steve French08547b02006-02-28 22:39:25 +00001339 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001340
1341 rc = cifs_posix_lock_set(file, flock);
1342 if (!rc || rc < 0)
1343 return rc;
1344
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001345 if (type & server->vals->shared_lock_type)
Steve French08547b02006-02-28 22:39:25 +00001346 posix_lock_type = CIFS_RDLCK;
1347 else
1348 posix_lock_type = CIFS_WRLCK;
Steve French50c2f752007-07-13 00:33:32 +00001349
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001350 if (unlock == 1)
Steve Frenchbeb84dc2006-03-03 23:36:34 +00001351 posix_lock_type = CIFS_UNLCK;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001352
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001353 rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001354 flock->fl_start, length, NULL,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001355 posix_lock_type, wait_flag);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001356 goto out;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001357 }
1358
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001359 if (lock) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001360 struct cifsLockInfo *lock;
1361
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001362 lock = cifs_lock_init(flock->fl_start, length, type);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001363 if (!lock)
1364 return -ENOMEM;
1365
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001366 rc = cifs_lock_add_if(cfile, lock, wait_flag);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001367 if (rc < 0)
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001368 kfree(lock);
1369 if (rc <= 0)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001370 goto out;
1371
Pavel Shilovsky7f924472012-03-28 17:10:25 +04001372 rc = cifs_mandatory_lock(xid, cfile, flock->fl_start, length,
1373 type, 1, 0, wait_flag);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001374 if (rc) {
1375 kfree(lock);
1376 goto out;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001377 }
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001378
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001379 cifs_lock_add(cfile, lock);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001380 } else if (unlock)
1381 rc = cifs_unlock_range(cfile, flock, xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001382
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001383out:
1384 if (flock->fl_flags & FL_POSIX)
Steve French9ebb3892012-04-01 13:52:54 -05001385 posix_lock_file_wait(file, flock);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001386 return rc;
1387}
1388
1389int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1390{
1391 int rc, xid;
1392 int lock = 0, unlock = 0;
1393 bool wait_flag = false;
1394 bool posix_lck = false;
1395 struct cifs_sb_info *cifs_sb;
1396 struct cifs_tcon *tcon;
1397 struct cifsInodeInfo *cinode;
1398 struct cifsFileInfo *cfile;
1399 __u16 netfid;
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001400 __u32 type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001401
1402 rc = -EACCES;
1403 xid = GetXid();
1404
1405 cFYI(1, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld "
1406 "end: %lld", cmd, flock->fl_flags, flock->fl_type,
1407 flock->fl_start, flock->fl_end);
1408
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001409 cfile = (struct cifsFileInfo *)file->private_data;
1410 tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001411
1412 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
1413 tcon->ses->server);
1414
1415 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001416 netfid = cfile->netfid;
1417 cinode = CIFS_I(file->f_path.dentry->d_inode);
1418
1419 if ((tcon->ses->capabilities & CAP_UNIX) &&
1420 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1421 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1422 posix_lck = true;
1423 /*
1424 * BB add code here to normalize offset and length to account for
1425 * negative length which we can not accept over the wire.
1426 */
1427 if (IS_GETLK(cmd)) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001428 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001429 FreeXid(xid);
1430 return rc;
1431 }
1432
1433 if (!lock && !unlock) {
1434 /*
1435 * if no lock or unlock then nothing to do since we do not
1436 * know what it is
1437 */
1438 FreeXid(xid);
1439 return -EOPNOTSUPP;
1440 }
1441
1442 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1443 xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001444 FreeXid(xid);
1445 return rc;
1446}
1447
Jeff Layton597b0272012-03-23 14:40:56 -04001448/*
1449 * update the file size (if needed) after a write. Should be called with
1450 * the inode->i_lock held
1451 */
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05001452void
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001453cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1454 unsigned int bytes_written)
1455{
1456 loff_t end_of_write = offset + bytes_written;
1457
1458 if (end_of_write > cifsi->server_eof)
1459 cifsi->server_eof = end_of_write;
1460}
1461
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001462static ssize_t cifs_write(struct cifsFileInfo *open_file, __u32 pid,
Jeff Layton7da4b492010-10-15 15:34:00 -04001463 const char *write_data, size_t write_size,
1464 loff_t *poffset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001465{
1466 int rc = 0;
1467 unsigned int bytes_written = 0;
1468 unsigned int total_written;
1469 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +00001470 struct cifs_tcon *pTcon;
Jeff Layton77499812011-01-11 07:24:23 -05001471 int xid;
Jeff Layton7da4b492010-10-15 15:34:00 -04001472 struct dentry *dentry = open_file->dentry;
1473 struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001474 struct cifs_io_parms io_parms;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001475
Jeff Layton7da4b492010-10-15 15:34:00 -04001476 cifs_sb = CIFS_SB(dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001477
Joe Perchesb6b38f72010-04-21 03:50:45 +00001478 cFYI(1, "write %zd bytes to offset %lld of %s", write_size,
Jeff Layton7da4b492010-10-15 15:34:00 -04001479 *poffset, dentry->d_name.name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001480
Jeff Layton13cfb732010-09-29 19:51:11 -04001481 pTcon = tlink_tcon(open_file->tlink);
Steve French50c2f752007-07-13 00:33:32 +00001482
Linus Torvalds1da177e2005-04-16 15:20:36 -07001483 xid = GetXid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001484
Linus Torvalds1da177e2005-04-16 15:20:36 -07001485 for (total_written = 0; write_size > total_written;
1486 total_written += bytes_written) {
1487 rc = -EAGAIN;
1488 while (rc == -EAGAIN) {
Jeff Laytonca83ce32011-04-12 09:13:44 -04001489 struct kvec iov[2];
1490 unsigned int len;
1491
Linus Torvalds1da177e2005-04-16 15:20:36 -07001492 if (open_file->invalidHandle) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001493 /* we could deadlock if we called
1494 filemap_fdatawait from here so tell
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001495 reopen_file not to flush data to
Linus Torvalds1da177e2005-04-16 15:20:36 -07001496 server now */
Jeff Layton15886172010-10-15 15:33:59 -04001497 rc = cifs_reopen_file(open_file, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001498 if (rc != 0)
1499 break;
1500 }
Steve French3e844692005-10-03 13:37:24 -07001501
Jeff Laytonca83ce32011-04-12 09:13:44 -04001502 len = min((size_t)cifs_sb->wsize,
1503 write_size - total_written);
1504 /* iov[0] is reserved for smb header */
1505 iov[1].iov_base = (char *)write_data + total_written;
1506 iov[1].iov_len = len;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001507 io_parms.netfid = open_file->netfid;
1508 io_parms.pid = pid;
1509 io_parms.tcon = pTcon;
1510 io_parms.offset = *poffset;
1511 io_parms.length = len;
1512 rc = CIFSSMBWrite2(xid, &io_parms, &bytes_written, iov,
1513 1, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001514 }
1515 if (rc || (bytes_written == 0)) {
1516 if (total_written)
1517 break;
1518 else {
1519 FreeXid(xid);
1520 return rc;
1521 }
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001522 } else {
Jeff Layton597b0272012-03-23 14:40:56 -04001523 spin_lock(&dentry->d_inode->i_lock);
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001524 cifs_update_eof(cifsi, *poffset, bytes_written);
Jeff Layton597b0272012-03-23 14:40:56 -04001525 spin_unlock(&dentry->d_inode->i_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001526 *poffset += bytes_written;
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001527 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528 }
1529
Steve Frencha4544342005-08-24 13:59:35 -07001530 cifs_stats_bytes_written(pTcon, total_written);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001531
Jeff Layton7da4b492010-10-15 15:34:00 -04001532 if (total_written > 0) {
1533 spin_lock(&dentry->d_inode->i_lock);
1534 if (*poffset > dentry->d_inode->i_size)
1535 i_size_write(dentry->d_inode, *poffset);
1536 spin_unlock(&dentry->d_inode->i_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001537 }
Jeff Layton7da4b492010-10-15 15:34:00 -04001538 mark_inode_dirty_sync(dentry->d_inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001539 FreeXid(xid);
1540 return total_written;
1541}
1542
Jeff Layton6508d902010-09-29 19:51:11 -04001543struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1544 bool fsuid_only)
Steve French630f3f0c2007-10-25 21:17:17 +00001545{
1546 struct cifsFileInfo *open_file = NULL;
Jeff Layton6508d902010-09-29 19:51:11 -04001547 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1548
1549 /* only filter by fsuid on multiuser mounts */
1550 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1551 fsuid_only = false;
Steve French630f3f0c2007-10-25 21:17:17 +00001552
Jeff Layton44772882010-10-15 15:34:03 -04001553 spin_lock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001554 /* we could simply get the first_list_entry since write-only entries
1555 are always at the end of the list but since the first entry might
1556 have a close pending, we go through the whole list */
1557 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton6508d902010-09-29 19:51:11 -04001558 if (fsuid_only && open_file->uid != current_fsuid())
1559 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001560 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
Steve French630f3f0c2007-10-25 21:17:17 +00001561 if (!open_file->invalidHandle) {
1562 /* found a good file */
1563 /* lock it so it will not be closed on us */
Dave Kleikamp6ab409b2009-08-31 11:07:12 -04001564 cifsFileInfo_get(open_file);
Jeff Layton44772882010-10-15 15:34:03 -04001565 spin_unlock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001566 return open_file;
1567 } /* else might as well continue, and look for
1568 another, or simply have the caller reopen it
1569 again rather than trying to fix this handle */
1570 } else /* write only file */
1571 break; /* write only files are last so must be done */
1572 }
Jeff Layton44772882010-10-15 15:34:03 -04001573 spin_unlock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001574 return NULL;
1575}
Steve French630f3f0c2007-10-25 21:17:17 +00001576
Jeff Layton6508d902010-09-29 19:51:11 -04001577struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1578 bool fsuid_only)
Steve French6148a742005-10-05 12:23:19 -07001579{
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001580 struct cifsFileInfo *open_file, *inv_file = NULL;
Jeff Laytond3892292010-11-02 16:22:50 -04001581 struct cifs_sb_info *cifs_sb;
Jeff Layton2846d382008-09-22 21:33:33 -04001582 bool any_available = false;
Steve Frenchdd99cd82005-10-05 19:32:49 -07001583 int rc;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001584 unsigned int refind = 0;
Steve French6148a742005-10-05 12:23:19 -07001585
Steve French60808232006-04-22 15:53:05 +00001586 /* Having a null inode here (because mapping->host was set to zero by
1587 the VFS or MM) should not happen but we had reports of on oops (due to
1588 it being zero) during stress testcases so we need to check for it */
1589
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001590 if (cifs_inode == NULL) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001591 cERROR(1, "Null inode passed to cifs_writeable_file");
Steve French60808232006-04-22 15:53:05 +00001592 dump_stack();
1593 return NULL;
1594 }
1595
Jeff Laytond3892292010-11-02 16:22:50 -04001596 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1597
Jeff Layton6508d902010-09-29 19:51:11 -04001598 /* only filter by fsuid on multiuser mounts */
1599 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1600 fsuid_only = false;
1601
Jeff Layton44772882010-10-15 15:34:03 -04001602 spin_lock(&cifs_file_list_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001603refind_writable:
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001604 if (refind > MAX_REOPEN_ATT) {
1605 spin_unlock(&cifs_file_list_lock);
1606 return NULL;
1607 }
Steve French6148a742005-10-05 12:23:19 -07001608 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton6508d902010-09-29 19:51:11 -04001609 if (!any_available && open_file->pid != current->tgid)
1610 continue;
1611 if (fsuid_only && open_file->uid != current_fsuid())
1612 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001613 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Steve French9b22b0b2007-10-02 01:11:08 +00001614 if (!open_file->invalidHandle) {
1615 /* found a good writable file */
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001616 cifsFileInfo_get(open_file);
Jeff Layton44772882010-10-15 15:34:03 -04001617 spin_unlock(&cifs_file_list_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001618 return open_file;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001619 } else {
1620 if (!inv_file)
1621 inv_file = open_file;
Steve French9b22b0b2007-10-02 01:11:08 +00001622 }
Steve French6148a742005-10-05 12:23:19 -07001623 }
1624 }
Jeff Layton2846d382008-09-22 21:33:33 -04001625 /* couldn't find useable FH with same pid, try any available */
1626 if (!any_available) {
1627 any_available = true;
1628 goto refind_writable;
1629 }
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001630
1631 if (inv_file) {
1632 any_available = false;
1633 cifsFileInfo_get(inv_file);
1634 }
1635
Jeff Layton44772882010-10-15 15:34:03 -04001636 spin_unlock(&cifs_file_list_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001637
1638 if (inv_file) {
1639 rc = cifs_reopen_file(inv_file, false);
1640 if (!rc)
1641 return inv_file;
1642 else {
1643 spin_lock(&cifs_file_list_lock);
1644 list_move_tail(&inv_file->flist,
1645 &cifs_inode->openFileList);
1646 spin_unlock(&cifs_file_list_lock);
1647 cifsFileInfo_put(inv_file);
1648 spin_lock(&cifs_file_list_lock);
1649 ++refind;
1650 goto refind_writable;
1651 }
1652 }
1653
Steve French6148a742005-10-05 12:23:19 -07001654 return NULL;
1655}
1656
Linus Torvalds1da177e2005-04-16 15:20:36 -07001657static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1658{
1659 struct address_space *mapping = page->mapping;
1660 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1661 char *write_data;
1662 int rc = -EFAULT;
1663 int bytes_written = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001664 struct inode *inode;
Steve French6148a742005-10-05 12:23:19 -07001665 struct cifsFileInfo *open_file;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001666
1667 if (!mapping || !mapping->host)
1668 return -EFAULT;
1669
1670 inode = page->mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001671
1672 offset += (loff_t)from;
1673 write_data = kmap(page);
1674 write_data += from;
1675
1676 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1677 kunmap(page);
1678 return -EIO;
1679 }
1680
1681 /* racing with truncate? */
1682 if (offset > mapping->host->i_size) {
1683 kunmap(page);
1684 return 0; /* don't care */
1685 }
1686
1687 /* check to make sure that we are not extending the file */
1688 if (mapping->host->i_size - offset < (loff_t)to)
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001689 to = (unsigned)(mapping->host->i_size - offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001690
Jeff Layton6508d902010-09-29 19:51:11 -04001691 open_file = find_writable_file(CIFS_I(mapping->host), false);
Steve French6148a742005-10-05 12:23:19 -07001692 if (open_file) {
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001693 bytes_written = cifs_write(open_file, open_file->pid,
1694 write_data, to - from, &offset);
Dave Kleikamp6ab409b2009-08-31 11:07:12 -04001695 cifsFileInfo_put(open_file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001696 /* Does mm or vfs already set times? */
Steve French6148a742005-10-05 12:23:19 -07001697 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001698 if ((bytes_written > 0) && (offset))
Steve French6148a742005-10-05 12:23:19 -07001699 rc = 0;
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001700 else if (bytes_written < 0)
1701 rc = bytes_written;
Steve French6148a742005-10-05 12:23:19 -07001702 } else {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001703 cFYI(1, "No writeable filehandles for inode");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001704 rc = -EIO;
1705 }
1706
1707 kunmap(page);
1708 return rc;
1709}
1710
Jeff Laytone9492872012-03-23 14:40:56 -04001711/*
1712 * Marshal up the iov array, reserving the first one for the header. Also,
1713 * set wdata->bytes.
1714 */
1715static void
1716cifs_writepages_marshal_iov(struct kvec *iov, struct cifs_writedata *wdata)
1717{
1718 int i;
1719 struct inode *inode = wdata->cfile->dentry->d_inode;
1720 loff_t size = i_size_read(inode);
1721
1722 /* marshal up the pages into iov array */
1723 wdata->bytes = 0;
1724 for (i = 0; i < wdata->nr_pages; i++) {
1725 iov[i + 1].iov_len = min(size - page_offset(wdata->pages[i]),
1726 (loff_t)PAGE_CACHE_SIZE);
1727 iov[i + 1].iov_base = kmap(wdata->pages[i]);
1728 wdata->bytes += iov[i + 1].iov_len;
1729 }
1730}
1731
Linus Torvalds1da177e2005-04-16 15:20:36 -07001732static int cifs_writepages(struct address_space *mapping,
Steve French37c0eb42005-10-05 14:50:29 -07001733 struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001734{
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001735 struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
1736 bool done = false, scanned = false, range_whole = false;
1737 pgoff_t end, index;
1738 struct cifs_writedata *wdata;
Steve French37c0eb42005-10-05 14:50:29 -07001739 struct page *page;
Steve French37c0eb42005-10-05 14:50:29 -07001740 int rc = 0;
Steve French50c2f752007-07-13 00:33:32 +00001741
Steve French37c0eb42005-10-05 14:50:29 -07001742 /*
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001743 * If wsize is smaller than the page cache size, default to writing
Steve French37c0eb42005-10-05 14:50:29 -07001744 * one page at a time via cifs_writepage
1745 */
1746 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1747 return generic_writepages(mapping, wbc);
1748
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001749 if (wbc->range_cyclic) {
Steve French37c0eb42005-10-05 14:50:29 -07001750 index = mapping->writeback_index; /* Start from prev offset */
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001751 end = -1;
1752 } else {
1753 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1754 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1755 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001756 range_whole = true;
1757 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07001758 }
1759retry:
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001760 while (!done && index <= end) {
1761 unsigned int i, nr_pages, found_pages;
1762 pgoff_t next = 0, tofind;
1763 struct page **pages;
Steve French37c0eb42005-10-05 14:50:29 -07001764
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001765 tofind = min((cifs_sb->wsize / PAGE_CACHE_SIZE) - 1,
1766 end - index) + 1;
Steve French37c0eb42005-10-05 14:50:29 -07001767
Jeff Laytonc2e87642012-03-23 14:40:55 -04001768 wdata = cifs_writedata_alloc((unsigned int)tofind,
1769 cifs_writev_complete);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001770 if (!wdata) {
1771 rc = -ENOMEM;
1772 break;
1773 }
1774
1775 /*
1776 * find_get_pages_tag seems to return a max of 256 on each
1777 * iteration, so we must call it several times in order to
1778 * fill the array or the wsize is effectively limited to
1779 * 256 * PAGE_CACHE_SIZE.
1780 */
1781 found_pages = 0;
1782 pages = wdata->pages;
1783 do {
1784 nr_pages = find_get_pages_tag(mapping, &index,
1785 PAGECACHE_TAG_DIRTY,
1786 tofind, pages);
1787 found_pages += nr_pages;
1788 tofind -= nr_pages;
1789 pages += nr_pages;
1790 } while (nr_pages && tofind && index <= end);
1791
1792 if (found_pages == 0) {
1793 kref_put(&wdata->refcount, cifs_writedata_release);
1794 break;
1795 }
1796
1797 nr_pages = 0;
1798 for (i = 0; i < found_pages; i++) {
1799 page = wdata->pages[i];
Steve French37c0eb42005-10-05 14:50:29 -07001800 /*
1801 * At this point we hold neither mapping->tree_lock nor
1802 * lock on the page itself: the page may be truncated or
1803 * invalidated (changing page->mapping to NULL), or even
1804 * swizzled back from swapper_space to tmpfs file
1805 * mapping
1806 */
1807
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001808 if (nr_pages == 0)
Steve French37c0eb42005-10-05 14:50:29 -07001809 lock_page(page);
Nick Piggin529ae9a2008-08-02 12:01:03 +02001810 else if (!trylock_page(page))
Steve French37c0eb42005-10-05 14:50:29 -07001811 break;
1812
1813 if (unlikely(page->mapping != mapping)) {
1814 unlock_page(page);
1815 break;
1816 }
1817
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001818 if (!wbc->range_cyclic && page->index > end) {
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001819 done = true;
Steve French37c0eb42005-10-05 14:50:29 -07001820 unlock_page(page);
1821 break;
1822 }
1823
1824 if (next && (page->index != next)) {
1825 /* Not next consecutive page */
1826 unlock_page(page);
1827 break;
1828 }
1829
1830 if (wbc->sync_mode != WB_SYNC_NONE)
1831 wait_on_page_writeback(page);
1832
1833 if (PageWriteback(page) ||
Linus Torvaldscb876f42006-12-23 16:19:07 -08001834 !clear_page_dirty_for_io(page)) {
Steve French37c0eb42005-10-05 14:50:29 -07001835 unlock_page(page);
1836 break;
1837 }
Steve French84d2f072005-10-12 15:32:05 -07001838
Linus Torvaldscb876f42006-12-23 16:19:07 -08001839 /*
1840 * This actually clears the dirty bit in the radix tree.
1841 * See cifs_writepage() for more commentary.
1842 */
1843 set_page_writeback(page);
1844
Steve French84d2f072005-10-12 15:32:05 -07001845 if (page_offset(page) >= mapping->host->i_size) {
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001846 done = true;
Steve French84d2f072005-10-12 15:32:05 -07001847 unlock_page(page);
Linus Torvaldscb876f42006-12-23 16:19:07 -08001848 end_page_writeback(page);
Steve French84d2f072005-10-12 15:32:05 -07001849 break;
1850 }
1851
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001852 wdata->pages[i] = page;
Steve French37c0eb42005-10-05 14:50:29 -07001853 next = page->index + 1;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001854 ++nr_pages;
Steve French37c0eb42005-10-05 14:50:29 -07001855 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001856
1857 /* reset index to refind any pages skipped */
1858 if (nr_pages == 0)
1859 index = wdata->pages[0]->index + 1;
1860
1861 /* put any pages we aren't going to use */
1862 for (i = nr_pages; i < found_pages; i++) {
1863 page_cache_release(wdata->pages[i]);
1864 wdata->pages[i] = NULL;
1865 }
1866
1867 /* nothing to write? */
1868 if (nr_pages == 0) {
1869 kref_put(&wdata->refcount, cifs_writedata_release);
1870 continue;
1871 }
1872
1873 wdata->sync_mode = wbc->sync_mode;
1874 wdata->nr_pages = nr_pages;
1875 wdata->offset = page_offset(wdata->pages[0]);
Jeff Laytone9492872012-03-23 14:40:56 -04001876 wdata->marshal_iov = cifs_writepages_marshal_iov;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001877
1878 do {
1879 if (wdata->cfile != NULL)
1880 cifsFileInfo_put(wdata->cfile);
1881 wdata->cfile = find_writable_file(CIFS_I(mapping->host),
1882 false);
1883 if (!wdata->cfile) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001884 cERROR(1, "No writable handles for inode");
Steve French23e7dd72005-10-20 13:44:56 -07001885 rc = -EBADF;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001886 break;
Steve French37c0eb42005-10-05 14:50:29 -07001887 }
Jeff Laytonfe5f5d22012-03-23 14:40:55 -04001888 wdata->pid = wdata->cfile->pid;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001889 rc = cifs_async_writev(wdata);
1890 } while (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN);
Jeff Laytonf3983c22010-09-22 16:17:40 -07001891
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001892 for (i = 0; i < nr_pages; ++i)
1893 unlock_page(wdata->pages[i]);
Jeff Layton941b8532011-01-11 07:24:01 -05001894
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001895 /* send failure -- clean up the mess */
1896 if (rc != 0) {
1897 for (i = 0; i < nr_pages; ++i) {
Jeff Layton941b8532011-01-11 07:24:01 -05001898 if (rc == -EAGAIN)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001899 redirty_page_for_writepage(wbc,
1900 wdata->pages[i]);
1901 else
1902 SetPageError(wdata->pages[i]);
1903 end_page_writeback(wdata->pages[i]);
1904 page_cache_release(wdata->pages[i]);
Steve French37c0eb42005-10-05 14:50:29 -07001905 }
Jeff Layton941b8532011-01-11 07:24:01 -05001906 if (rc != -EAGAIN)
1907 mapping_set_error(mapping, rc);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001908 }
1909 kref_put(&wdata->refcount, cifs_writedata_release);
Jeff Layton941b8532011-01-11 07:24:01 -05001910
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001911 wbc->nr_to_write -= nr_pages;
1912 if (wbc->nr_to_write <= 0)
1913 done = true;
Dave Kleikampb066a482008-11-18 03:49:05 +00001914
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001915 index = next;
Steve French37c0eb42005-10-05 14:50:29 -07001916 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001917
Steve French37c0eb42005-10-05 14:50:29 -07001918 if (!scanned && !done) {
1919 /*
1920 * We hit the last page and there is more work to be done: wrap
1921 * back to the start of the file
1922 */
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001923 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07001924 index = 0;
1925 goto retry;
1926 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001927
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001928 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
Steve French37c0eb42005-10-05 14:50:29 -07001929 mapping->writeback_index = index;
1930
Linus Torvalds1da177e2005-04-16 15:20:36 -07001931 return rc;
1932}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001933
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04001934static int
1935cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001936{
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04001937 int rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001938 int xid;
1939
1940 xid = GetXid();
1941/* BB add check for wbc flags */
1942 page_cache_get(page);
Steve Frenchad7a2922008-02-07 23:25:02 +00001943 if (!PageUptodate(page))
Joe Perchesb6b38f72010-04-21 03:50:45 +00001944 cFYI(1, "ppw - page not up to date");
Linus Torvaldscb876f42006-12-23 16:19:07 -08001945
1946 /*
1947 * Set the "writeback" flag, and clear "dirty" in the radix tree.
1948 *
1949 * A writepage() implementation always needs to do either this,
1950 * or re-dirty the page with "redirty_page_for_writepage()" in
1951 * the case of a failure.
1952 *
1953 * Just unlocking the page will cause the radix tree tag-bits
1954 * to fail to update with the state of the page correctly.
1955 */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001956 set_page_writeback(page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04001957retry_write:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001958 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04001959 if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
1960 goto retry_write;
1961 else if (rc == -EAGAIN)
1962 redirty_page_for_writepage(wbc, page);
1963 else if (rc != 0)
1964 SetPageError(page);
1965 else
1966 SetPageUptodate(page);
Linus Torvaldscb876f42006-12-23 16:19:07 -08001967 end_page_writeback(page);
1968 page_cache_release(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001969 FreeXid(xid);
1970 return rc;
1971}
1972
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04001973static int cifs_writepage(struct page *page, struct writeback_control *wbc)
1974{
1975 int rc = cifs_writepage_locked(page, wbc);
1976 unlock_page(page);
1977 return rc;
1978}
1979
Nick Piggind9414772008-09-24 11:32:59 -04001980static int cifs_write_end(struct file *file, struct address_space *mapping,
1981 loff_t pos, unsigned len, unsigned copied,
1982 struct page *page, void *fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001983{
Nick Piggind9414772008-09-24 11:32:59 -04001984 int rc;
1985 struct inode *inode = mapping->host;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00001986 struct cifsFileInfo *cfile = file->private_data;
1987 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1988 __u32 pid;
1989
1990 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
1991 pid = cfile->pid;
1992 else
1993 pid = current->tgid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001994
Joe Perchesb6b38f72010-04-21 03:50:45 +00001995 cFYI(1, "write_end for page %p from pos %lld with %d bytes",
1996 page, pos, copied);
Steve Frenchad7a2922008-02-07 23:25:02 +00001997
Jeff Laytona98ee8c2008-11-26 19:32:33 +00001998 if (PageChecked(page)) {
1999 if (copied == len)
2000 SetPageUptodate(page);
2001 ClearPageChecked(page);
2002 } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
Nick Piggind9414772008-09-24 11:32:59 -04002003 SetPageUptodate(page);
2004
Linus Torvalds1da177e2005-04-16 15:20:36 -07002005 if (!PageUptodate(page)) {
Nick Piggind9414772008-09-24 11:32:59 -04002006 char *page_data;
2007 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
2008 int xid;
2009
2010 xid = GetXid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002011 /* this is probably better than directly calling
2012 partialpage_write since in this function the file handle is
2013 known which we might as well leverage */
2014 /* BB check if anything else missing out of ppw
2015 such as updating last write time */
2016 page_data = kmap(page);
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002017 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
Nick Piggind9414772008-09-24 11:32:59 -04002018 /* if (rc < 0) should we set writebehind rc? */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002019 kunmap(page);
Nick Piggind9414772008-09-24 11:32:59 -04002020
2021 FreeXid(xid);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002022 } else {
Nick Piggind9414772008-09-24 11:32:59 -04002023 rc = copied;
2024 pos += copied;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002025 set_page_dirty(page);
2026 }
2027
Nick Piggind9414772008-09-24 11:32:59 -04002028 if (rc > 0) {
2029 spin_lock(&inode->i_lock);
2030 if (pos > inode->i_size)
2031 i_size_write(inode, pos);
2032 spin_unlock(&inode->i_lock);
2033 }
2034
2035 unlock_page(page);
2036 page_cache_release(page);
2037
Linus Torvalds1da177e2005-04-16 15:20:36 -07002038 return rc;
2039}
2040
Josef Bacik02c24a82011-07-16 20:44:56 -04002041int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2042 int datasync)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002043{
2044 int xid;
2045 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002046 struct cifs_tcon *tcon;
Joe Perchesc21dfb62010-07-12 13:50:14 -07002047 struct cifsFileInfo *smbfile = file->private_data;
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002048 struct inode *inode = file->f_path.dentry->d_inode;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002049 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002050
Josef Bacik02c24a82011-07-16 20:44:56 -04002051 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2052 if (rc)
2053 return rc;
2054 mutex_lock(&inode->i_mutex);
2055
Linus Torvalds1da177e2005-04-16 15:20:36 -07002056 xid = GetXid();
2057
Joe Perchesb6b38f72010-04-21 03:50:45 +00002058 cFYI(1, "Sync file - name: %s datasync: 0x%x",
Christoph Hellwig7ea80852010-05-26 17:53:25 +02002059 file->f_path.dentry->d_name.name, datasync);
Steve French50c2f752007-07-13 00:33:32 +00002060
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002061 if (!CIFS_I(inode)->clientCanCacheRead) {
2062 rc = cifs_invalidate_mapping(inode);
2063 if (rc) {
2064 cFYI(1, "rc: %d during invalidate phase", rc);
2065 rc = 0; /* don't care about it in fsync */
2066 }
2067 }
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002068
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002069 tcon = tlink_tcon(smbfile->tlink);
2070 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
2071 rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
2072
2073 FreeXid(xid);
Josef Bacik02c24a82011-07-16 20:44:56 -04002074 mutex_unlock(&inode->i_mutex);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002075 return rc;
2076}
2077
Josef Bacik02c24a82011-07-16 20:44:56 -04002078int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002079{
2080 int xid;
2081 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002082 struct cifs_tcon *tcon;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002083 struct cifsFileInfo *smbfile = file->private_data;
2084 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Josef Bacik02c24a82011-07-16 20:44:56 -04002085 struct inode *inode = file->f_mapping->host;
2086
2087 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2088 if (rc)
2089 return rc;
2090 mutex_lock(&inode->i_mutex);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002091
2092 xid = GetXid();
2093
2094 cFYI(1, "Sync file - name: %s datasync: 0x%x",
2095 file->f_path.dentry->d_name.name, datasync);
2096
2097 tcon = tlink_tcon(smbfile->tlink);
2098 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
2099 rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
Steve Frenchb298f222009-02-21 21:17:43 +00002100
Linus Torvalds1da177e2005-04-16 15:20:36 -07002101 FreeXid(xid);
Josef Bacik02c24a82011-07-16 20:44:56 -04002102 mutex_unlock(&inode->i_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002103 return rc;
2104}
2105
Linus Torvalds1da177e2005-04-16 15:20:36 -07002106/*
2107 * As file closes, flush all cached write data for this inode checking
2108 * for write behind errors.
2109 */
Miklos Szeredi75e1fcc2006-06-23 02:05:12 -07002110int cifs_flush(struct file *file, fl_owner_t id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002111{
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002112 struct inode *inode = file->f_path.dentry->d_inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002113 int rc = 0;
2114
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002115 if (file->f_mode & FMODE_WRITE)
Jeff Laytond3f13222010-10-15 15:34:07 -04002116 rc = filemap_write_and_wait(inode->i_mapping);
Steve French50c2f752007-07-13 00:33:32 +00002117
Joe Perchesb6b38f72010-04-21 03:50:45 +00002118 cFYI(1, "Flush inode %p file %p rc %d", inode, file, rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002119
2120 return rc;
2121}
2122
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002123static int
2124cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2125{
2126 int rc = 0;
2127 unsigned long i;
2128
2129 for (i = 0; i < num_pages; i++) {
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002130 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002131 if (!pages[i]) {
2132 /*
2133 * save number of pages we have already allocated and
2134 * return with ENOMEM error
2135 */
2136 num_pages = i;
2137 rc = -ENOMEM;
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002138 break;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002139 }
2140 }
2141
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002142 if (rc) {
2143 for (i = 0; i < num_pages; i++)
2144 put_page(pages[i]);
2145 }
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002146 return rc;
2147}
2148
2149static inline
2150size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2151{
2152 size_t num_pages;
2153 size_t clen;
2154
2155 clen = min_t(const size_t, len, wsize);
Jeff Laytona7103b92012-03-23 14:40:56 -04002156 num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002157
2158 if (cur_len)
2159 *cur_len = clen;
2160
2161 return num_pages;
2162}
2163
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002164static void
2165cifs_uncached_marshal_iov(struct kvec *iov, struct cifs_writedata *wdata)
2166{
2167 int i;
2168 size_t bytes = wdata->bytes;
2169
2170 /* marshal up the pages into iov array */
2171 for (i = 0; i < wdata->nr_pages; i++) {
Steve Frenchc7ad42b2012-03-23 16:30:56 -05002172 iov[i + 1].iov_len = min_t(size_t, bytes, PAGE_SIZE);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002173 iov[i + 1].iov_base = kmap(wdata->pages[i]);
2174 bytes -= iov[i + 1].iov_len;
2175 }
2176}
2177
2178static void
2179cifs_uncached_writev_complete(struct work_struct *work)
2180{
2181 int i;
2182 struct cifs_writedata *wdata = container_of(work,
2183 struct cifs_writedata, work);
2184 struct inode *inode = wdata->cfile->dentry->d_inode;
2185 struct cifsInodeInfo *cifsi = CIFS_I(inode);
2186
2187 spin_lock(&inode->i_lock);
2188 cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
2189 if (cifsi->server_eof > inode->i_size)
2190 i_size_write(inode, cifsi->server_eof);
2191 spin_unlock(&inode->i_lock);
2192
2193 complete(&wdata->done);
2194
2195 if (wdata->result != -EAGAIN) {
2196 for (i = 0; i < wdata->nr_pages; i++)
2197 put_page(wdata->pages[i]);
2198 }
2199
2200 kref_put(&wdata->refcount, cifs_writedata_release);
2201}
2202
2203/* attempt to send write to server, retry on any -EAGAIN errors */
2204static int
2205cifs_uncached_retry_writev(struct cifs_writedata *wdata)
2206{
2207 int rc;
2208
2209 do {
2210 if (wdata->cfile->invalidHandle) {
2211 rc = cifs_reopen_file(wdata->cfile, false);
2212 if (rc != 0)
2213 continue;
2214 }
2215 rc = cifs_async_writev(wdata);
2216 } while (rc == -EAGAIN);
2217
2218 return rc;
2219}
2220
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002221static ssize_t
2222cifs_iovec_write(struct file *file, const struct iovec *iov,
2223 unsigned long nr_segs, loff_t *poffset)
2224{
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002225 unsigned long nr_pages, i;
Pavel Shilovsky76429c12011-01-31 16:03:08 +03002226 size_t copied, len, cur_len;
2227 ssize_t total_written = 0;
Jeff Layton3af9d8f2012-04-13 17:16:59 -04002228 loff_t offset;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002229 struct iov_iter it;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002230 struct cifsFileInfo *open_file;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002231 struct cifs_tcon *tcon;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002232 struct cifs_sb_info *cifs_sb;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002233 struct cifs_writedata *wdata, *tmp;
2234 struct list_head wdata_list;
2235 int rc;
2236 pid_t pid;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002237
2238 len = iov_length(iov, nr_segs);
2239 if (!len)
2240 return 0;
2241
2242 rc = generic_write_checks(file, poffset, &len, 0);
2243 if (rc)
2244 return rc;
2245
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002246 INIT_LIST_HEAD(&wdata_list);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002247 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002248 open_file = file->private_data;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002249 tcon = tlink_tcon(open_file->tlink);
Jeff Layton3af9d8f2012-04-13 17:16:59 -04002250 offset = *poffset;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002251
2252 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2253 pid = open_file->pid;
2254 else
2255 pid = current->tgid;
2256
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002257 iov_iter_init(&it, iov, nr_segs, len, 0);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002258 do {
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002259 size_t save_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002260
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002261 nr_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
2262 wdata = cifs_writedata_alloc(nr_pages,
2263 cifs_uncached_writev_complete);
2264 if (!wdata) {
2265 rc = -ENOMEM;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002266 break;
2267 }
2268
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002269 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
2270 if (rc) {
2271 kfree(wdata);
2272 break;
2273 }
2274
2275 save_len = cur_len;
2276 for (i = 0; i < nr_pages; i++) {
2277 copied = min_t(const size_t, cur_len, PAGE_SIZE);
2278 copied = iov_iter_copy_from_user(wdata->pages[i], &it,
2279 0, copied);
2280 cur_len -= copied;
2281 iov_iter_advance(&it, copied);
2282 }
2283 cur_len = save_len - cur_len;
2284
2285 wdata->sync_mode = WB_SYNC_ALL;
2286 wdata->nr_pages = nr_pages;
2287 wdata->offset = (__u64)offset;
2288 wdata->cfile = cifsFileInfo_get(open_file);
2289 wdata->pid = pid;
2290 wdata->bytes = cur_len;
2291 wdata->marshal_iov = cifs_uncached_marshal_iov;
2292 rc = cifs_uncached_retry_writev(wdata);
2293 if (rc) {
2294 kref_put(&wdata->refcount, cifs_writedata_release);
2295 break;
2296 }
2297
2298 list_add_tail(&wdata->list, &wdata_list);
2299 offset += cur_len;
2300 len -= cur_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002301 } while (len > 0);
2302
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002303 /*
2304 * If at least one write was successfully sent, then discard any rc
2305 * value from the later writes. If the other write succeeds, then
2306 * we'll end up returning whatever was written. If it fails, then
2307 * we'll get a new rc value from that.
2308 */
2309 if (!list_empty(&wdata_list))
2310 rc = 0;
2311
2312 /*
2313 * Wait for and collect replies for any successful sends in order of
2314 * increasing offset. Once an error is hit or we get a fatal signal
2315 * while waiting, then return without waiting for any more replies.
2316 */
2317restart_loop:
2318 list_for_each_entry_safe(wdata, tmp, &wdata_list, list) {
2319 if (!rc) {
2320 /* FIXME: freezable too? */
2321 rc = wait_for_completion_killable(&wdata->done);
2322 if (rc)
2323 rc = -EINTR;
2324 else if (wdata->result)
2325 rc = wdata->result;
2326 else
2327 total_written += wdata->bytes;
2328
2329 /* resend call if it's a retryable error */
2330 if (rc == -EAGAIN) {
2331 rc = cifs_uncached_retry_writev(wdata);
2332 goto restart_loop;
2333 }
2334 }
2335 list_del_init(&wdata->list);
2336 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002337 }
2338
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002339 if (total_written > 0)
2340 *poffset += total_written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002341
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002342 cifs_stats_bytes_written(tcon, total_written);
2343 return total_written ? total_written : (ssize_t)rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002344}
2345
Pavel Shilovsky0b81c1c2011-03-10 10:11:05 +03002346ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002347 unsigned long nr_segs, loff_t pos)
2348{
2349 ssize_t written;
2350 struct inode *inode;
2351
2352 inode = iocb->ki_filp->f_path.dentry->d_inode;
2353
2354 /*
2355 * BB - optimize the way when signing is disabled. We can drop this
2356 * extra memory-to-memory copying and use iovec buffers for constructing
2357 * write request.
2358 */
2359
2360 written = cifs_iovec_write(iocb->ki_filp, iov, nr_segs, &pos);
2361 if (written > 0) {
2362 CIFS_I(inode)->invalid_mapping = true;
2363 iocb->ki_pos = pos;
2364 }
2365
2366 return written;
2367}
2368
2369ssize_t cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
2370 unsigned long nr_segs, loff_t pos)
2371{
2372 struct inode *inode;
2373
2374 inode = iocb->ki_filp->f_path.dentry->d_inode;
2375
2376 if (CIFS_I(inode)->clientCanCacheAll)
2377 return generic_file_aio_write(iocb, iov, nr_segs, pos);
2378
2379 /*
2380 * In strict cache mode we need to write the data to the server exactly
2381 * from the pos to pos+len-1 rather than flush all affected pages
2382 * because it may cause a error with mandatory locks on these pages but
2383 * not on the region from pos to ppos+len-1.
2384 */
2385
2386 return cifs_user_writev(iocb, iov, nr_segs, pos);
2387}
2388
Jeff Layton0471ca32012-05-16 07:13:16 -04002389static struct cifs_readdata *
2390cifs_readdata_alloc(unsigned int nr_vecs, work_func_t complete)
2391{
2392 struct cifs_readdata *rdata;
2393
2394 rdata = kzalloc(sizeof(*rdata) +
2395 sizeof(struct kvec) * nr_vecs, GFP_KERNEL);
2396 if (rdata != NULL) {
Jeff Layton6993f742012-05-16 07:13:17 -04002397 kref_init(&rdata->refcount);
Jeff Layton1c892542012-05-16 07:13:17 -04002398 INIT_LIST_HEAD(&rdata->list);
2399 init_completion(&rdata->done);
Jeff Layton0471ca32012-05-16 07:13:16 -04002400 INIT_WORK(&rdata->work, complete);
2401 INIT_LIST_HEAD(&rdata->pages);
2402 }
2403 return rdata;
2404}
2405
Jeff Layton6993f742012-05-16 07:13:17 -04002406void
2407cifs_readdata_release(struct kref *refcount)
Jeff Layton0471ca32012-05-16 07:13:16 -04002408{
Jeff Layton6993f742012-05-16 07:13:17 -04002409 struct cifs_readdata *rdata = container_of(refcount,
2410 struct cifs_readdata, refcount);
2411
2412 if (rdata->cfile)
2413 cifsFileInfo_put(rdata->cfile);
2414
Jeff Layton0471ca32012-05-16 07:13:16 -04002415 kfree(rdata);
2416}
2417
Jeff Layton2a1bb132012-05-16 07:13:17 -04002418static int
Jeff Layton1c892542012-05-16 07:13:17 -04002419cifs_read_allocate_pages(struct list_head *list, unsigned int npages)
2420{
2421 int rc = 0;
2422 struct page *page, *tpage;
2423 unsigned int i;
2424
2425 for (i = 0; i < npages; i++) {
2426 page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
2427 if (!page) {
2428 rc = -ENOMEM;
2429 break;
2430 }
2431 list_add(&page->lru, list);
2432 }
2433
2434 if (rc) {
2435 list_for_each_entry_safe(page, tpage, list, lru) {
2436 list_del(&page->lru);
2437 put_page(page);
2438 }
2439 }
2440 return rc;
2441}
2442
2443static void
2444cifs_uncached_readdata_release(struct kref *refcount)
2445{
2446 struct page *page, *tpage;
2447 struct cifs_readdata *rdata = container_of(refcount,
2448 struct cifs_readdata, refcount);
2449
2450 list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
2451 list_del(&page->lru);
2452 put_page(page);
2453 }
2454 cifs_readdata_release(refcount);
2455}
2456
2457static int
Jeff Layton2a1bb132012-05-16 07:13:17 -04002458cifs_retry_async_readv(struct cifs_readdata *rdata)
2459{
2460 int rc;
2461
2462 do {
2463 if (rdata->cfile->invalidHandle) {
2464 rc = cifs_reopen_file(rdata->cfile, true);
2465 if (rc != 0)
2466 continue;
2467 }
2468 rc = cifs_async_readv(rdata);
2469 } while (rc == -EAGAIN);
2470
2471 return rc;
2472}
2473
Jeff Layton1c892542012-05-16 07:13:17 -04002474/**
2475 * cifs_readdata_to_iov - copy data from pages in response to an iovec
2476 * @rdata: the readdata response with list of pages holding data
2477 * @iov: vector in which we should copy the data
2478 * @nr_segs: number of segments in vector
2479 * @offset: offset into file of the first iovec
2480 * @copied: used to return the amount of data copied to the iov
2481 *
2482 * This function copies data from a list of pages in a readdata response into
2483 * an array of iovecs. It will first calculate where the data should go
2484 * based on the info in the readdata and then copy the data into that spot.
2485 */
2486static ssize_t
2487cifs_readdata_to_iov(struct cifs_readdata *rdata, const struct iovec *iov,
2488 unsigned long nr_segs, loff_t offset, ssize_t *copied)
2489{
2490 int rc = 0;
2491 struct iov_iter ii;
2492 size_t pos = rdata->offset - offset;
2493 struct page *page, *tpage;
2494 ssize_t remaining = rdata->bytes;
2495 unsigned char *pdata;
2496
2497 /* set up iov_iter and advance to the correct offset */
2498 iov_iter_init(&ii, iov, nr_segs, iov_length(iov, nr_segs), 0);
2499 iov_iter_advance(&ii, pos);
2500
2501 *copied = 0;
2502 list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
2503 ssize_t copy;
2504
2505 /* copy a whole page or whatever's left */
2506 copy = min_t(ssize_t, remaining, PAGE_SIZE);
2507
2508 /* ...but limit it to whatever space is left in the iov */
2509 copy = min_t(ssize_t, copy, iov_iter_count(&ii));
2510
2511 /* go while there's data to be copied and no errors */
2512 if (copy && !rc) {
2513 pdata = kmap(page);
2514 rc = memcpy_toiovecend(ii.iov, pdata, ii.iov_offset,
2515 (int)copy);
2516 kunmap(page);
2517 if (!rc) {
2518 *copied += copy;
2519 remaining -= copy;
2520 iov_iter_advance(&ii, copy);
2521 }
2522 }
2523
2524 list_del(&page->lru);
2525 put_page(page);
2526 }
2527
2528 return rc;
2529}
2530
2531static void
2532cifs_uncached_readv_complete(struct work_struct *work)
2533{
2534 struct cifs_readdata *rdata = container_of(work,
2535 struct cifs_readdata, work);
2536
2537 /* if the result is non-zero then the pages weren't kmapped */
2538 if (rdata->result == 0) {
2539 struct page *page;
2540
2541 list_for_each_entry(page, &rdata->pages, lru)
2542 kunmap(page);
2543 }
2544
2545 complete(&rdata->done);
2546 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
2547}
2548
2549static int
2550cifs_uncached_read_marshal_iov(struct cifs_readdata *rdata,
2551 unsigned int remaining)
2552{
2553 int len = 0;
2554 struct page *page, *tpage;
2555
2556 rdata->nr_iov = 1;
2557 list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
2558 if (remaining >= PAGE_SIZE) {
2559 /* enough data to fill the page */
2560 rdata->iov[rdata->nr_iov].iov_base = kmap(page);
2561 rdata->iov[rdata->nr_iov].iov_len = PAGE_SIZE;
2562 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
2563 rdata->nr_iov, page->index,
2564 rdata->iov[rdata->nr_iov].iov_base,
2565 rdata->iov[rdata->nr_iov].iov_len);
2566 ++rdata->nr_iov;
2567 len += PAGE_SIZE;
2568 remaining -= PAGE_SIZE;
2569 } else if (remaining > 0) {
2570 /* enough for partial page, fill and zero the rest */
2571 rdata->iov[rdata->nr_iov].iov_base = kmap(page);
2572 rdata->iov[rdata->nr_iov].iov_len = remaining;
2573 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
2574 rdata->nr_iov, page->index,
2575 rdata->iov[rdata->nr_iov].iov_base,
2576 rdata->iov[rdata->nr_iov].iov_len);
2577 memset(rdata->iov[rdata->nr_iov].iov_base + remaining,
2578 '\0', PAGE_SIZE - remaining);
2579 ++rdata->nr_iov;
2580 len += remaining;
2581 remaining = 0;
2582 } else {
2583 /* no need to hold page hostage */
2584 list_del(&page->lru);
2585 put_page(page);
2586 }
2587 }
2588
2589 return len;
2590}
2591
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002592static ssize_t
2593cifs_iovec_read(struct file *file, const struct iovec *iov,
2594 unsigned long nr_segs, loff_t *poffset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002595{
Jeff Layton1c892542012-05-16 07:13:17 -04002596 ssize_t rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002597 size_t len, cur_len;
Jeff Layton1c892542012-05-16 07:13:17 -04002598 ssize_t total_read = 0;
2599 loff_t offset = *poffset;
2600 unsigned int npages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002601 struct cifs_sb_info *cifs_sb;
Jeff Layton1c892542012-05-16 07:13:17 -04002602 struct cifs_tcon *tcon;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002603 struct cifsFileInfo *open_file;
Jeff Layton1c892542012-05-16 07:13:17 -04002604 struct cifs_readdata *rdata, *tmp;
2605 struct list_head rdata_list;
2606 pid_t pid;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002607
2608 if (!nr_segs)
2609 return 0;
2610
2611 len = iov_length(iov, nr_segs);
2612 if (!len)
2613 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002614
Jeff Layton1c892542012-05-16 07:13:17 -04002615 INIT_LIST_HEAD(&rdata_list);
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002616 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Joe Perchesc21dfb62010-07-12 13:50:14 -07002617 open_file = file->private_data;
Jeff Layton1c892542012-05-16 07:13:17 -04002618 tcon = tlink_tcon(open_file->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002619
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002620 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2621 pid = open_file->pid;
2622 else
2623 pid = current->tgid;
2624
Steve Frenchad7a2922008-02-07 23:25:02 +00002625 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesb6b38f72010-04-21 03:50:45 +00002626 cFYI(1, "attempting read on write only file instance");
Steve Frenchad7a2922008-02-07 23:25:02 +00002627
Jeff Layton1c892542012-05-16 07:13:17 -04002628 do {
2629 cur_len = min_t(const size_t, len - total_read, cifs_sb->rsize);
2630 npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002631
Jeff Layton1c892542012-05-16 07:13:17 -04002632 /* allocate a readdata struct */
2633 rdata = cifs_readdata_alloc(npages,
2634 cifs_uncached_readv_complete);
2635 if (!rdata) {
2636 rc = -ENOMEM;
2637 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002638 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002639
Jeff Layton1c892542012-05-16 07:13:17 -04002640 rc = cifs_read_allocate_pages(&rdata->pages, npages);
2641 if (rc)
2642 goto error;
2643
2644 rdata->cfile = cifsFileInfo_get(open_file);
2645 rdata->offset = offset;
2646 rdata->bytes = cur_len;
2647 rdata->pid = pid;
2648 rdata->marshal_iov = cifs_uncached_read_marshal_iov;
2649
2650 rc = cifs_retry_async_readv(rdata);
2651error:
2652 if (rc) {
2653 kref_put(&rdata->refcount,
2654 cifs_uncached_readdata_release);
2655 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002656 }
Jeff Layton1c892542012-05-16 07:13:17 -04002657
2658 list_add_tail(&rdata->list, &rdata_list);
2659 offset += cur_len;
2660 len -= cur_len;
2661 } while (len > 0);
2662
2663 /* if at least one read request send succeeded, then reset rc */
2664 if (!list_empty(&rdata_list))
2665 rc = 0;
2666
2667 /* the loop below should proceed in the order of increasing offsets */
2668restart_loop:
2669 list_for_each_entry_safe(rdata, tmp, &rdata_list, list) {
2670 if (!rc) {
2671 ssize_t copied;
2672
2673 /* FIXME: freezable sleep too? */
2674 rc = wait_for_completion_killable(&rdata->done);
2675 if (rc)
2676 rc = -EINTR;
2677 else if (rdata->result)
2678 rc = rdata->result;
2679 else {
2680 rc = cifs_readdata_to_iov(rdata, iov,
2681 nr_segs, *poffset,
2682 &copied);
2683 total_read += copied;
2684 }
2685
2686 /* resend call if it's a retryable error */
2687 if (rc == -EAGAIN) {
2688 rc = cifs_retry_async_readv(rdata);
2689 goto restart_loop;
2690 }
2691 }
2692 list_del_init(&rdata->list);
2693 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002694 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002695
Jeff Layton1c892542012-05-16 07:13:17 -04002696 cifs_stats_bytes_read(tcon, total_read);
2697 *poffset += total_read;
2698
2699 return total_read ? total_read : rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002700}
2701
Pavel Shilovsky0b81c1c2011-03-10 10:11:05 +03002702ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov,
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002703 unsigned long nr_segs, loff_t pos)
2704{
2705 ssize_t read;
2706
2707 read = cifs_iovec_read(iocb->ki_filp, iov, nr_segs, &pos);
2708 if (read > 0)
2709 iocb->ki_pos = pos;
2710
2711 return read;
2712}
2713
2714ssize_t cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov,
2715 unsigned long nr_segs, loff_t pos)
2716{
2717 struct inode *inode;
2718
2719 inode = iocb->ki_filp->f_path.dentry->d_inode;
2720
2721 if (CIFS_I(inode)->clientCanCacheRead)
2722 return generic_file_aio_read(iocb, iov, nr_segs, pos);
2723
2724 /*
2725 * In strict cache mode we need to read from the server all the time
2726 * if we don't have level II oplock because the server can delay mtime
2727 * change - so we can't make a decision about inode invalidating.
2728 * And we can also fail with pagereading if there are mandatory locks
2729 * on pages affected by this read but not on the region from pos to
2730 * pos+len-1.
2731 */
2732
2733 return cifs_user_readv(iocb, iov, nr_segs, pos);
2734}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002735
2736static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002737 loff_t *poffset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002738{
2739 int rc = -EACCES;
2740 unsigned int bytes_read = 0;
2741 unsigned int total_read;
2742 unsigned int current_read_size;
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002743 unsigned int rsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002744 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +00002745 struct cifs_tcon *pTcon;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002746 int xid;
2747 char *current_offset;
2748 struct cifsFileInfo *open_file;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002749 struct cifs_io_parms io_parms;
Steve Frenchec637e32005-12-12 20:53:18 -08002750 int buf_type = CIFS_NO_BUFFER;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002751 __u32 pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002752
2753 xid = GetXid();
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002754 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002755
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002756 /* FIXME: set up handlers for larger reads and/or convert to async */
2757 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
2758
Linus Torvalds1da177e2005-04-16 15:20:36 -07002759 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05302760 rc = -EBADF;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002761 FreeXid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05302762 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002763 }
Joe Perchesc21dfb62010-07-12 13:50:14 -07002764 open_file = file->private_data;
Jeff Layton13cfb732010-09-29 19:51:11 -04002765 pTcon = tlink_tcon(open_file->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002766
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002767 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2768 pid = open_file->pid;
2769 else
2770 pid = current->tgid;
2771
Linus Torvalds1da177e2005-04-16 15:20:36 -07002772 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesb6b38f72010-04-21 03:50:45 +00002773 cFYI(1, "attempting read on write only file instance");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002774
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002775 for (total_read = 0, current_offset = read_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002776 read_size > total_read;
2777 total_read += bytes_read, current_offset += bytes_read) {
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002778 current_read_size = min_t(uint, read_size - total_read, rsize);
2779
Steve Frenchf9f5c8172005-09-15 23:06:38 -07002780 /* For windows me and 9x we do not want to request more
2781 than it negotiated since it will refuse the read then */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002782 if ((pTcon->ses) &&
Steve Frenchf9f5c8172005-09-15 23:06:38 -07002783 !(pTcon->ses->capabilities & CAP_LARGE_FILES)) {
Dan Carpenter7748dd62011-10-18 12:41:35 +03002784 current_read_size = min_t(uint, current_read_size,
Jeff Laytonc974bef2011-10-11 06:41:32 -04002785 CIFSMaxBufSize);
Steve Frenchf9f5c8172005-09-15 23:06:38 -07002786 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002787 rc = -EAGAIN;
2788 while (rc == -EAGAIN) {
Steve Frenchcdff08e2010-10-21 22:46:14 +00002789 if (open_file->invalidHandle) {
Jeff Layton15886172010-10-15 15:33:59 -04002790 rc = cifs_reopen_file(open_file, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002791 if (rc != 0)
2792 break;
2793 }
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002794 io_parms.netfid = open_file->netfid;
2795 io_parms.pid = pid;
2796 io_parms.tcon = pTcon;
2797 io_parms.offset = *poffset;
2798 io_parms.length = current_read_size;
2799 rc = CIFSSMBRead(xid, &io_parms, &bytes_read,
2800 &current_offset, &buf_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002801 }
2802 if (rc || (bytes_read == 0)) {
2803 if (total_read) {
2804 break;
2805 } else {
2806 FreeXid(xid);
2807 return rc;
2808 }
2809 } else {
Steve Frencha4544342005-08-24 13:59:35 -07002810 cifs_stats_bytes_read(pTcon, total_read);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002811 *poffset += bytes_read;
2812 }
2813 }
2814 FreeXid(xid);
2815 return total_read;
2816}
2817
Jeff Laytonca83ce32011-04-12 09:13:44 -04002818/*
2819 * If the page is mmap'ed into a process' page tables, then we need to make
2820 * sure that it doesn't change while being written back.
2821 */
2822static int
2823cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
2824{
2825 struct page *page = vmf->page;
2826
2827 lock_page(page);
2828 return VM_FAULT_LOCKED;
2829}
2830
2831static struct vm_operations_struct cifs_file_vm_ops = {
2832 .fault = filemap_fault,
2833 .page_mkwrite = cifs_page_mkwrite,
2834};
2835
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03002836int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
2837{
2838 int rc, xid;
2839 struct inode *inode = file->f_path.dentry->d_inode;
2840
2841 xid = GetXid();
2842
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002843 if (!CIFS_I(inode)->clientCanCacheRead) {
2844 rc = cifs_invalidate_mapping(inode);
2845 if (rc)
2846 return rc;
2847 }
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03002848
2849 rc = generic_file_mmap(file, vma);
Jeff Laytonca83ce32011-04-12 09:13:44 -04002850 if (rc == 0)
2851 vma->vm_ops = &cifs_file_vm_ops;
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03002852 FreeXid(xid);
2853 return rc;
2854}
2855
Linus Torvalds1da177e2005-04-16 15:20:36 -07002856int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
2857{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002858 int rc, xid;
2859
2860 xid = GetXid();
Jeff Laytonabab0952010-02-12 07:44:18 -05002861 rc = cifs_revalidate_file(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002862 if (rc) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00002863 cFYI(1, "Validation prior to mmap failed, error=%d", rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002864 FreeXid(xid);
2865 return rc;
2866 }
2867 rc = generic_file_mmap(file, vma);
Jeff Laytonca83ce32011-04-12 09:13:44 -04002868 if (rc == 0)
2869 vma->vm_ops = &cifs_file_vm_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002870 FreeXid(xid);
2871 return rc;
2872}
2873
Jeff Layton0471ca32012-05-16 07:13:16 -04002874static void
2875cifs_readv_complete(struct work_struct *work)
2876{
2877 struct cifs_readdata *rdata = container_of(work,
2878 struct cifs_readdata, work);
2879 struct page *page, *tpage;
2880
2881 list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
2882 list_del(&page->lru);
2883 lru_cache_add_file(page);
2884
2885 if (rdata->result == 0) {
2886 kunmap(page);
2887 flush_dcache_page(page);
2888 SetPageUptodate(page);
2889 }
2890
2891 unlock_page(page);
2892
2893 if (rdata->result == 0)
2894 cifs_readpage_to_fscache(rdata->mapping->host, page);
2895
2896 page_cache_release(page);
2897 }
Jeff Layton6993f742012-05-16 07:13:17 -04002898 kref_put(&rdata->refcount, cifs_readdata_release);
Jeff Layton0471ca32012-05-16 07:13:16 -04002899}
2900
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04002901static int
2902cifs_readpages_marshal_iov(struct cifs_readdata *rdata, unsigned int remaining)
2903{
2904 int len = 0;
2905 struct page *page, *tpage;
2906 u64 eof;
2907 pgoff_t eof_index;
2908
2909 /* determine the eof that the server (probably) has */
2910 eof = CIFS_I(rdata->mapping->host)->server_eof;
2911 eof_index = eof ? (eof - 1) >> PAGE_CACHE_SHIFT : 0;
2912 cFYI(1, "eof=%llu eof_index=%lu", eof, eof_index);
2913
2914 rdata->nr_iov = 1;
2915 list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
2916 if (remaining >= PAGE_CACHE_SIZE) {
2917 /* enough data to fill the page */
2918 rdata->iov[rdata->nr_iov].iov_base = kmap(page);
2919 rdata->iov[rdata->nr_iov].iov_len = PAGE_CACHE_SIZE;
2920 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
2921 rdata->nr_iov, page->index,
2922 rdata->iov[rdata->nr_iov].iov_base,
2923 rdata->iov[rdata->nr_iov].iov_len);
2924 ++rdata->nr_iov;
2925 len += PAGE_CACHE_SIZE;
2926 remaining -= PAGE_CACHE_SIZE;
2927 } else if (remaining > 0) {
2928 /* enough for partial page, fill and zero the rest */
2929 rdata->iov[rdata->nr_iov].iov_base = kmap(page);
2930 rdata->iov[rdata->nr_iov].iov_len = remaining;
2931 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
2932 rdata->nr_iov, page->index,
2933 rdata->iov[rdata->nr_iov].iov_base,
2934 rdata->iov[rdata->nr_iov].iov_len);
2935 memset(rdata->iov[rdata->nr_iov].iov_base + remaining,
2936 '\0', PAGE_CACHE_SIZE - remaining);
2937 ++rdata->nr_iov;
2938 len += remaining;
2939 remaining = 0;
2940 } else if (page->index > eof_index) {
2941 /*
2942 * The VFS will not try to do readahead past the
2943 * i_size, but it's possible that we have outstanding
2944 * writes with gaps in the middle and the i_size hasn't
2945 * caught up yet. Populate those with zeroed out pages
2946 * to prevent the VFS from repeatedly attempting to
2947 * fill them until the writes are flushed.
2948 */
2949 zero_user(page, 0, PAGE_CACHE_SIZE);
2950 list_del(&page->lru);
2951 lru_cache_add_file(page);
2952 flush_dcache_page(page);
2953 SetPageUptodate(page);
2954 unlock_page(page);
2955 page_cache_release(page);
2956 } else {
2957 /* no need to hold page hostage */
2958 list_del(&page->lru);
2959 lru_cache_add_file(page);
2960 unlock_page(page);
2961 page_cache_release(page);
2962 }
2963 }
2964
2965 return len;
2966}
2967
Linus Torvalds1da177e2005-04-16 15:20:36 -07002968static int cifs_readpages(struct file *file, struct address_space *mapping,
2969 struct list_head *page_list, unsigned num_pages)
2970{
Jeff Layton690c5e32011-10-19 15:30:16 -04002971 int rc;
2972 struct list_head tmplist;
2973 struct cifsFileInfo *open_file = file->private_data;
2974 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
2975 unsigned int rsize = cifs_sb->rsize;
2976 pid_t pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002977
Jeff Layton690c5e32011-10-19 15:30:16 -04002978 /*
2979 * Give up immediately if rsize is too small to read an entire page.
2980 * The VFS will fall back to readpage. We should never reach this
2981 * point however since we set ra_pages to 0 when the rsize is smaller
2982 * than a cache page.
2983 */
2984 if (unlikely(rsize < PAGE_CACHE_SIZE))
2985 return 0;
Steve Frenchbfa0d752005-08-31 21:50:37 -07002986
Suresh Jayaraman56698232010-07-05 18:13:25 +05302987 /*
2988 * Reads as many pages as possible from fscache. Returns -ENOBUFS
2989 * immediately if the cookie is negative
2990 */
2991 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
2992 &num_pages);
2993 if (rc == 0)
Jeff Layton690c5e32011-10-19 15:30:16 -04002994 return rc;
Suresh Jayaraman56698232010-07-05 18:13:25 +05302995
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002996 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2997 pid = open_file->pid;
2998 else
2999 pid = current->tgid;
3000
Jeff Layton690c5e32011-10-19 15:30:16 -04003001 rc = 0;
3002 INIT_LIST_HEAD(&tmplist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003003
Jeff Layton690c5e32011-10-19 15:30:16 -04003004 cFYI(1, "%s: file=%p mapping=%p num_pages=%u", __func__, file,
3005 mapping, num_pages);
3006
3007 /*
3008 * Start with the page at end of list and move it to private
3009 * list. Do the same with any following pages until we hit
3010 * the rsize limit, hit an index discontinuity, or run out of
3011 * pages. Issue the async read and then start the loop again
3012 * until the list is empty.
3013 *
3014 * Note that list order is important. The page_list is in
3015 * the order of declining indexes. When we put the pages in
3016 * the rdata->pages, then we want them in increasing order.
3017 */
3018 while (!list_empty(page_list)) {
3019 unsigned int bytes = PAGE_CACHE_SIZE;
3020 unsigned int expected_index;
3021 unsigned int nr_pages = 1;
3022 loff_t offset;
3023 struct page *page, *tpage;
3024 struct cifs_readdata *rdata;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003025
3026 page = list_entry(page_list->prev, struct page, lru);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003027
Jeff Layton690c5e32011-10-19 15:30:16 -04003028 /*
3029 * Lock the page and put it in the cache. Since no one else
3030 * should have access to this page, we're safe to simply set
3031 * PG_locked without checking it first.
3032 */
3033 __set_page_locked(page);
3034 rc = add_to_page_cache_locked(page, mapping,
3035 page->index, GFP_KERNEL);
3036
3037 /* give up if we can't stick it in the cache */
3038 if (rc) {
3039 __clear_page_locked(page);
3040 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003041 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003042
Jeff Layton690c5e32011-10-19 15:30:16 -04003043 /* move first page to the tmplist */
3044 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
3045 list_move_tail(&page->lru, &tmplist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003046
Jeff Layton690c5e32011-10-19 15:30:16 -04003047 /* now try and add more pages onto the request */
3048 expected_index = page->index + 1;
3049 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
3050 /* discontinuity ? */
3051 if (page->index != expected_index)
3052 break;
3053
3054 /* would this page push the read over the rsize? */
3055 if (bytes + PAGE_CACHE_SIZE > rsize)
3056 break;
3057
3058 __set_page_locked(page);
3059 if (add_to_page_cache_locked(page, mapping,
3060 page->index, GFP_KERNEL)) {
3061 __clear_page_locked(page);
3062 break;
3063 }
3064 list_move_tail(&page->lru, &tmplist);
3065 bytes += PAGE_CACHE_SIZE;
3066 expected_index++;
3067 nr_pages++;
3068 }
3069
Jeff Layton0471ca32012-05-16 07:13:16 -04003070 rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
Jeff Layton690c5e32011-10-19 15:30:16 -04003071 if (!rdata) {
3072 /* best to give up if we're out of mem */
3073 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3074 list_del(&page->lru);
3075 lru_cache_add_file(page);
3076 unlock_page(page);
3077 page_cache_release(page);
3078 }
3079 rc = -ENOMEM;
3080 break;
3081 }
3082
3083 spin_lock(&cifs_file_list_lock);
Jeff Layton690c5e32011-10-19 15:30:16 -04003084 spin_unlock(&cifs_file_list_lock);
Jeff Layton6993f742012-05-16 07:13:17 -04003085 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Layton690c5e32011-10-19 15:30:16 -04003086 rdata->mapping = mapping;
3087 rdata->offset = offset;
3088 rdata->bytes = bytes;
3089 rdata->pid = pid;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003090 rdata->marshal_iov = cifs_readpages_marshal_iov;
Jeff Layton690c5e32011-10-19 15:30:16 -04003091 list_splice_init(&tmplist, &rdata->pages);
3092
Jeff Layton2a1bb132012-05-16 07:13:17 -04003093 rc = cifs_retry_async_readv(rdata);
Jeff Layton690c5e32011-10-19 15:30:16 -04003094 if (rc != 0) {
3095 list_for_each_entry_safe(page, tpage, &rdata->pages,
3096 lru) {
3097 list_del(&page->lru);
3098 lru_cache_add_file(page);
3099 unlock_page(page);
3100 page_cache_release(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003101 }
Jeff Layton6993f742012-05-16 07:13:17 -04003102 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003103 break;
3104 }
Jeff Layton6993f742012-05-16 07:13:17 -04003105
3106 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003107 }
3108
Linus Torvalds1da177e2005-04-16 15:20:36 -07003109 return rc;
3110}
3111
3112static int cifs_readpage_worker(struct file *file, struct page *page,
3113 loff_t *poffset)
3114{
3115 char *read_data;
3116 int rc;
3117
Suresh Jayaraman56698232010-07-05 18:13:25 +05303118 /* Is the page cached? */
3119 rc = cifs_readpage_from_fscache(file->f_path.dentry->d_inode, page);
3120 if (rc == 0)
3121 goto read_complete;
3122
Linus Torvalds1da177e2005-04-16 15:20:36 -07003123 page_cache_get(page);
3124 read_data = kmap(page);
3125 /* for reads over a certain size could initiate async read ahead */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003126
Linus Torvalds1da177e2005-04-16 15:20:36 -07003127 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003128
Linus Torvalds1da177e2005-04-16 15:20:36 -07003129 if (rc < 0)
3130 goto io_error;
3131 else
Joe Perchesb6b38f72010-04-21 03:50:45 +00003132 cFYI(1, "Bytes read %d", rc);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003133
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08003134 file->f_path.dentry->d_inode->i_atime =
3135 current_fs_time(file->f_path.dentry->d_inode->i_sb);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003136
Linus Torvalds1da177e2005-04-16 15:20:36 -07003137 if (PAGE_CACHE_SIZE > rc)
3138 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
3139
3140 flush_dcache_page(page);
3141 SetPageUptodate(page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05303142
3143 /* send this page to the cache */
3144 cifs_readpage_to_fscache(file->f_path.dentry->d_inode, page);
3145
Linus Torvalds1da177e2005-04-16 15:20:36 -07003146 rc = 0;
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003147
Linus Torvalds1da177e2005-04-16 15:20:36 -07003148io_error:
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003149 kunmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003150 page_cache_release(page);
Suresh Jayaraman56698232010-07-05 18:13:25 +05303151
3152read_complete:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003153 return rc;
3154}
3155
3156static int cifs_readpage(struct file *file, struct page *page)
3157{
3158 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
3159 int rc = -EACCES;
3160 int xid;
3161
3162 xid = GetXid();
3163
3164 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303165 rc = -EBADF;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003166 FreeXid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303167 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003168 }
3169
Jeff Laytonac3aa2f2012-07-23 13:14:28 -04003170 cFYI(1, "readpage %p at offset %d 0x%x",
Joe Perchesb6b38f72010-04-21 03:50:45 +00003171 page, (int)offset, (int)offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003172
3173 rc = cifs_readpage_worker(file, page, &offset);
3174
3175 unlock_page(page);
3176
3177 FreeXid(xid);
3178 return rc;
3179}
3180
Steve Frencha403a0a2007-07-26 15:54:16 +00003181static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3182{
3183 struct cifsFileInfo *open_file;
3184
Jeff Layton44772882010-10-15 15:34:03 -04003185 spin_lock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003186 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton2e396b82010-10-15 15:34:01 -04003187 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Jeff Layton44772882010-10-15 15:34:03 -04003188 spin_unlock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003189 return 1;
3190 }
3191 }
Jeff Layton44772882010-10-15 15:34:03 -04003192 spin_unlock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003193 return 0;
3194}
3195
Linus Torvalds1da177e2005-04-16 15:20:36 -07003196/* We do not want to update the file size from server for inodes
3197 open for write - to avoid races with writepage extending
3198 the file - in the future we could consider allowing
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003199 refreshing the inode only on increases in the file size
Linus Torvalds1da177e2005-04-16 15:20:36 -07003200 but this is tricky to do without racing with writebehind
3201 page caching in the current Linux kernel design */
Steve French4b18f2a2008-04-29 00:06:05 +00003202bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003203{
Steve Frencha403a0a2007-07-26 15:54:16 +00003204 if (!cifsInode)
Steve French4b18f2a2008-04-29 00:06:05 +00003205 return true;
Steve French23e7dd72005-10-20 13:44:56 -07003206
Steve Frencha403a0a2007-07-26 15:54:16 +00003207 if (is_inode_writable(cifsInode)) {
3208 /* This inode is open for write at least once */
Steve Frenchc32a0b62006-01-12 14:41:28 -08003209 struct cifs_sb_info *cifs_sb;
3210
Steve Frenchc32a0b62006-01-12 14:41:28 -08003211 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
Steve Frenchad7a2922008-02-07 23:25:02 +00003212 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003213 /* since no page cache to corrupt on directio
Steve Frenchc32a0b62006-01-12 14:41:28 -08003214 we can change size safely */
Steve French4b18f2a2008-04-29 00:06:05 +00003215 return true;
Steve Frenchc32a0b62006-01-12 14:41:28 -08003216 }
3217
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003218 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
Steve French4b18f2a2008-04-29 00:06:05 +00003219 return true;
Steve French7ba52632007-02-08 18:14:13 +00003220
Steve French4b18f2a2008-04-29 00:06:05 +00003221 return false;
Steve French23e7dd72005-10-20 13:44:56 -07003222 } else
Steve French4b18f2a2008-04-29 00:06:05 +00003223 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003224}
3225
Nick Piggind9414772008-09-24 11:32:59 -04003226static int cifs_write_begin(struct file *file, struct address_space *mapping,
3227 loff_t pos, unsigned len, unsigned flags,
3228 struct page **pagep, void **fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003229{
Nick Piggind9414772008-09-24 11:32:59 -04003230 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
3231 loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003232 loff_t page_start = pos & PAGE_MASK;
3233 loff_t i_size;
3234 struct page *page;
3235 int rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003236
Joe Perchesb6b38f72010-04-21 03:50:45 +00003237 cFYI(1, "write_begin from %lld len %d", (long long)pos, len);
Nick Piggind9414772008-09-24 11:32:59 -04003238
Nick Piggin54566b22009-01-04 12:00:53 -08003239 page = grab_cache_page_write_begin(mapping, index, flags);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003240 if (!page) {
3241 rc = -ENOMEM;
3242 goto out;
3243 }
Nick Piggind9414772008-09-24 11:32:59 -04003244
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003245 if (PageUptodate(page))
3246 goto out;
Steve French8a236262007-03-06 00:31:00 +00003247
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003248 /*
3249 * If we write a full page it will be up to date, no need to read from
3250 * the server. If the write is short, we'll end up doing a sync write
3251 * instead.
3252 */
3253 if (len == PAGE_CACHE_SIZE)
3254 goto out;
3255
3256 /*
3257 * optimize away the read when we have an oplock, and we're not
3258 * expecting to use any of the data we'd be reading in. That
3259 * is, when the page lies beyond the EOF, or straddles the EOF
3260 * and the write will cover all of the existing data.
3261 */
3262 if (CIFS_I(mapping->host)->clientCanCacheRead) {
3263 i_size = i_size_read(mapping->host);
3264 if (page_start >= i_size ||
3265 (offset == 0 && (pos + len) >= i_size)) {
3266 zero_user_segments(page, 0, offset,
3267 offset + len,
3268 PAGE_CACHE_SIZE);
3269 /*
3270 * PageChecked means that the parts of the page
3271 * to which we're not writing are considered up
3272 * to date. Once the data is copied to the
3273 * page, it can be set uptodate.
3274 */
3275 SetPageChecked(page);
3276 goto out;
3277 }
3278 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003279
Nick Piggind9414772008-09-24 11:32:59 -04003280 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003281 /*
3282 * might as well read a page, it is fast enough. If we get
3283 * an error, we don't need to return it. cifs_write_end will
3284 * do a sync write instead since PG_uptodate isn't set.
3285 */
3286 cifs_readpage_worker(file, page, &page_start);
Steve French8a236262007-03-06 00:31:00 +00003287 } else {
3288 /* we could try using another file handle if there is one -
3289 but how would we lock it to prevent close of that handle
3290 racing with this read? In any case
Nick Piggind9414772008-09-24 11:32:59 -04003291 this will be written out by write_end so is fine */
Steve French8a236262007-03-06 00:31:00 +00003292 }
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003293out:
3294 *pagep = page;
3295 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003296}
3297
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303298static int cifs_release_page(struct page *page, gfp_t gfp)
3299{
3300 if (PagePrivate(page))
3301 return 0;
3302
3303 return cifs_fscache_release_page(page, gfp);
3304}
3305
3306static void cifs_invalidate_page(struct page *page, unsigned long offset)
3307{
3308 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
3309
3310 if (offset == 0)
3311 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
3312}
3313
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003314static int cifs_launder_page(struct page *page)
3315{
3316 int rc = 0;
3317 loff_t range_start = page_offset(page);
3318 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
3319 struct writeback_control wbc = {
3320 .sync_mode = WB_SYNC_ALL,
3321 .nr_to_write = 0,
3322 .range_start = range_start,
3323 .range_end = range_end,
3324 };
3325
3326 cFYI(1, "Launder page: %p", page);
3327
3328 if (clear_page_dirty_for_io(page))
3329 rc = cifs_writepage_locked(page, &wbc);
3330
3331 cifs_fscache_invalidate_page(page, page->mapping->host);
3332 return rc;
3333}
3334
Tejun Heo9b646972010-07-20 22:09:02 +02003335void cifs_oplock_break(struct work_struct *work)
Jeff Layton3bc303c2009-09-21 06:47:50 -04003336{
3337 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3338 oplock_break);
Jeff Laytona5e18bc2010-10-11 15:07:18 -04003339 struct inode *inode = cfile->dentry->d_inode;
Jeff Layton3bc303c2009-09-21 06:47:50 -04003340 struct cifsInodeInfo *cinode = CIFS_I(inode);
Jeff Laytoneb4b7562010-10-22 14:52:29 -04003341 int rc = 0;
Jeff Layton3bc303c2009-09-21 06:47:50 -04003342
3343 if (inode && S_ISREG(inode->i_mode)) {
Steve Frenchd54ff732010-04-27 04:38:15 +00003344 if (cinode->clientCanCacheRead)
Al Viro8737c932009-12-24 06:47:55 -05003345 break_lease(inode, O_RDONLY);
Steve Frenchd54ff732010-04-27 04:38:15 +00003346 else
Al Viro8737c932009-12-24 06:47:55 -05003347 break_lease(inode, O_WRONLY);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003348 rc = filemap_fdatawrite(inode->i_mapping);
3349 if (cinode->clientCanCacheRead == 0) {
Jeff Laytoneb4b7562010-10-22 14:52:29 -04003350 rc = filemap_fdatawait(inode->i_mapping);
3351 mapping_set_error(inode->i_mapping, rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003352 invalidate_remote_inode(inode);
3353 }
Joe Perchesb6b38f72010-04-21 03:50:45 +00003354 cFYI(1, "Oplock flush inode %p rc %d", inode, rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003355 }
3356
Pavel Shilovsky85160e02011-10-22 15:33:29 +04003357 rc = cifs_push_locks(cfile);
3358 if (rc)
3359 cERROR(1, "Push locks rc = %d", rc);
3360
Jeff Layton3bc303c2009-09-21 06:47:50 -04003361 /*
3362 * releasing stale oplock after recent reconnect of smb session using
3363 * a now incorrect file handle is not a data integrity issue but do
3364 * not bother sending an oplock release if session to server still is
3365 * disconnected since oplock already released by the server
3366 */
Steve Frenchcdff08e2010-10-21 22:46:14 +00003367 if (!cfile->oplock_break_cancelled) {
Pavel Shilovsky03776f42010-08-17 11:26:00 +04003368 rc = CIFSSMBLock(0, tlink_tcon(cfile->tlink), cfile->netfid,
3369 current->tgid, 0, 0, 0, 0,
3370 LOCKING_ANDX_OPLOCK_RELEASE, false,
Pavel Shilovsky12fed002011-01-17 20:15:44 +03003371 cinode->clientCanCacheRead ? 1 : 0);
Joe Perchesb6b38f72010-04-21 03:50:45 +00003372 cFYI(1, "Oplock release rc = %d", rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003373 }
Jeff Layton3bc303c2009-09-21 06:47:50 -04003374}
3375
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07003376const struct address_space_operations cifs_addr_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003377 .readpage = cifs_readpage,
3378 .readpages = cifs_readpages,
3379 .writepage = cifs_writepage,
Steve French37c0eb42005-10-05 14:50:29 -07003380 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04003381 .write_begin = cifs_write_begin,
3382 .write_end = cifs_write_end,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003383 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303384 .releasepage = cifs_release_page,
3385 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003386 .launder_page = cifs_launder_page,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003387};
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003388
3389/*
3390 * cifs_readpages requires the server to support a buffer large enough to
3391 * contain the header plus one complete page of data. Otherwise, we need
3392 * to leave cifs_readpages out of the address space operations.
3393 */
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07003394const struct address_space_operations cifs_addr_ops_smallbuf = {
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003395 .readpage = cifs_readpage,
3396 .writepage = cifs_writepage,
3397 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04003398 .write_begin = cifs_write_begin,
3399 .write_end = cifs_write_end,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003400 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303401 .releasepage = cifs_release_page,
3402 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003403 .launder_page = cifs_launder_page,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003404};