blob: d9cc07fec99f56ea8048faab3f3ec9e6e47ee2dc [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
Steve Frenchfb8c4b12007-07-10 01:16:18 +00005 *
Steve Frenchf19159d2010-04-21 04:12:10 +00006 * Copyright (C) International Business Machines Corp., 2002,2010
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Author(s): Steve French (sfrench@us.ibm.com)
Jeremy Allison7ee1af72006-08-02 21:56:33 +00008 * Jeremy Allison (jra@samba.org)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
Steve French37c0eb42005-10-05 14:50:29 -070025#include <linux/backing-dev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
Steve French37c0eb42005-10-05 14:50:29 -070030#include <linux/writeback.h>
Andrew Morton6f88cc22006-12-10 02:19:44 -080031#include <linux/task_io_accounting_ops.h>
Steve French23e7dd72005-10-20 13:44:56 -070032#include <linux/delay.h>
Jeff Layton3bc303c2009-09-21 06:47:50 -040033#include <linux/mount.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Jeff Layton690c5e32011-10-19 15:30:16 -040035#include <linux/swap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <asm/div64.h>
37#include "cifsfs.h"
38#include "cifspdu.h"
39#include "cifsglob.h"
40#include "cifsproto.h"
41#include "cifs_unicode.h"
42#include "cifs_debug.h"
43#include "cifs_fs_sb.h"
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +053044#include "fscache.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Linus Torvalds1da177e2005-04-16 15:20:36 -070046static inline int cifs_convert_flags(unsigned int flags)
47{
48 if ((flags & O_ACCMODE) == O_RDONLY)
49 return GENERIC_READ;
50 else if ((flags & O_ACCMODE) == O_WRONLY)
51 return GENERIC_WRITE;
52 else if ((flags & O_ACCMODE) == O_RDWR) {
53 /* GENERIC_ALL is too much permission to request
54 can cause unnecessary access denied on create */
55 /* return GENERIC_ALL; */
56 return (GENERIC_READ | GENERIC_WRITE);
57 }
58
Jeff Laytone10f7b52008-05-14 10:21:33 -070059 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
60 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
61 FILE_READ_DATA);
Steve French7fc8f4e2009-02-23 20:43:11 +000062}
Jeff Laytone10f7b52008-05-14 10:21:33 -070063
Jeff Layton608712f2010-10-15 15:33:56 -040064static u32 cifs_posix_convert_flags(unsigned int flags)
Steve French7fc8f4e2009-02-23 20:43:11 +000065{
Jeff Layton608712f2010-10-15 15:33:56 -040066 u32 posix_flags = 0;
Jeff Laytone10f7b52008-05-14 10:21:33 -070067
Steve French7fc8f4e2009-02-23 20:43:11 +000068 if ((flags & O_ACCMODE) == O_RDONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040069 posix_flags = SMB_O_RDONLY;
Steve French7fc8f4e2009-02-23 20:43:11 +000070 else if ((flags & O_ACCMODE) == O_WRONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040071 posix_flags = SMB_O_WRONLY;
72 else if ((flags & O_ACCMODE) == O_RDWR)
73 posix_flags = SMB_O_RDWR;
74
75 if (flags & O_CREAT)
76 posix_flags |= SMB_O_CREAT;
77 if (flags & O_EXCL)
78 posix_flags |= SMB_O_EXCL;
79 if (flags & O_TRUNC)
80 posix_flags |= SMB_O_TRUNC;
81 /* be safe and imply O_SYNC for O_DSYNC */
Christoph Hellwig6b2f3d12009-10-27 11:05:28 +010082 if (flags & O_DSYNC)
Jeff Layton608712f2010-10-15 15:33:56 -040083 posix_flags |= SMB_O_SYNC;
Steve French7fc8f4e2009-02-23 20:43:11 +000084 if (flags & O_DIRECTORY)
Jeff Layton608712f2010-10-15 15:33:56 -040085 posix_flags |= SMB_O_DIRECTORY;
Steve French7fc8f4e2009-02-23 20:43:11 +000086 if (flags & O_NOFOLLOW)
Jeff Layton608712f2010-10-15 15:33:56 -040087 posix_flags |= SMB_O_NOFOLLOW;
Steve French7fc8f4e2009-02-23 20:43:11 +000088 if (flags & O_DIRECT)
Jeff Layton608712f2010-10-15 15:33:56 -040089 posix_flags |= SMB_O_DIRECT;
Steve French7fc8f4e2009-02-23 20:43:11 +000090
91 return posix_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070092}
93
94static inline int cifs_get_disposition(unsigned int flags)
95{
96 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
97 return FILE_CREATE;
98 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
99 return FILE_OVERWRITE_IF;
100 else if ((flags & O_CREAT) == O_CREAT)
101 return FILE_OPEN_IF;
Steve French55aa2e02006-05-30 18:09:31 +0000102 else if ((flags & O_TRUNC) == O_TRUNC)
103 return FILE_OVERWRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104 else
105 return FILE_OPEN;
106}
107
Jeff Layton608712f2010-10-15 15:33:56 -0400108int cifs_posix_open(char *full_path, struct inode **pinode,
109 struct super_block *sb, int mode, unsigned int f_flags,
110 __u32 *poplock, __u16 *pnetfid, int xid)
111{
112 int rc;
113 FILE_UNIX_BASIC_INFO *presp_data;
114 __u32 posix_flags = 0;
115 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
116 struct cifs_fattr fattr;
117 struct tcon_link *tlink;
Steve French96daf2b2011-05-27 04:34:02 +0000118 struct cifs_tcon *tcon;
Jeff Layton608712f2010-10-15 15:33:56 -0400119
120 cFYI(1, "posix open %s", full_path);
121
122 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
123 if (presp_data == NULL)
124 return -ENOMEM;
125
126 tlink = cifs_sb_tlink(cifs_sb);
127 if (IS_ERR(tlink)) {
128 rc = PTR_ERR(tlink);
129 goto posix_open_ret;
130 }
131
132 tcon = tlink_tcon(tlink);
133 mode &= ~current_umask();
134
135 posix_flags = cifs_posix_convert_flags(f_flags);
136 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
137 poplock, full_path, cifs_sb->local_nls,
138 cifs_sb->mnt_cifs_flags &
139 CIFS_MOUNT_MAP_SPECIAL_CHR);
140 cifs_put_tlink(tlink);
141
142 if (rc)
143 goto posix_open_ret;
144
145 if (presp_data->Type == cpu_to_le32(-1))
146 goto posix_open_ret; /* open ok, caller does qpathinfo */
147
148 if (!pinode)
149 goto posix_open_ret; /* caller does not need info */
150
151 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
152
153 /* get new inode and set it up */
154 if (*pinode == NULL) {
155 cifs_fill_uniqueid(sb, &fattr);
156 *pinode = cifs_iget(sb, &fattr);
157 if (!*pinode) {
158 rc = -ENOMEM;
159 goto posix_open_ret;
160 }
161 } else {
162 cifs_fattr_to_inode(*pinode, &fattr);
163 }
164
165posix_open_ret:
166 kfree(presp_data);
167 return rc;
168}
169
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300170static int
171cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
Steve French96daf2b2011-05-27 04:34:02 +0000172 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *poplock,
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300173 __u16 *pnetfid, int xid)
174{
175 int rc;
176 int desiredAccess;
177 int disposition;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500178 int create_options = CREATE_NOT_DIR;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300179 FILE_ALL_INFO *buf;
180
181 desiredAccess = cifs_convert_flags(f_flags);
182
183/*********************************************************************
184 * open flag mapping table:
185 *
186 * POSIX Flag CIFS Disposition
187 * ---------- ----------------
188 * O_CREAT FILE_OPEN_IF
189 * O_CREAT | O_EXCL FILE_CREATE
190 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
191 * O_TRUNC FILE_OVERWRITE
192 * none of the above FILE_OPEN
193 *
194 * Note that there is not a direct match between disposition
195 * FILE_SUPERSEDE (ie create whether or not file exists although
196 * O_CREAT | O_TRUNC is similar but truncates the existing
197 * file rather than creating a new file as FILE_SUPERSEDE does
198 * (which uses the attributes / metadata passed in on open call)
199 *?
200 *? O_SYNC is a reasonable match to CIFS writethrough flag
201 *? and the read write flags match reasonably. O_LARGEFILE
202 *? is irrelevant because largefile support is always used
203 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
204 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
205 *********************************************************************/
206
207 disposition = cifs_get_disposition(f_flags);
208
209 /* BB pass O_SYNC flag through on file attributes .. BB */
210
211 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
212 if (!buf)
213 return -ENOMEM;
214
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500215 if (backup_cred(cifs_sb))
216 create_options |= CREATE_OPEN_BACKUP_INTENT;
217
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300218 if (tcon->ses->capabilities & CAP_NT_SMBS)
219 rc = CIFSSMBOpen(xid, tcon, full_path, disposition,
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500220 desiredAccess, create_options, pnetfid, poplock, buf,
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300221 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
222 & CIFS_MOUNT_MAP_SPECIAL_CHR);
223 else
224 rc = SMBLegacyOpen(xid, tcon, full_path, disposition,
225 desiredAccess, CREATE_NOT_DIR, pnetfid, poplock, buf,
226 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
227 & CIFS_MOUNT_MAP_SPECIAL_CHR);
228
229 if (rc)
230 goto out;
231
232 if (tcon->unix_ext)
233 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
234 xid);
235 else
236 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
237 xid, pnetfid);
238
239out:
240 kfree(buf);
241 return rc;
242}
243
Jeff Layton15ecb432010-10-15 15:34:02 -0400244struct cifsFileInfo *
245cifs_new_fileinfo(__u16 fileHandle, struct file *file,
246 struct tcon_link *tlink, __u32 oplock)
247{
248 struct dentry *dentry = file->f_path.dentry;
249 struct inode *inode = dentry->d_inode;
250 struct cifsInodeInfo *pCifsInode = CIFS_I(inode);
251 struct cifsFileInfo *pCifsFile;
252
253 pCifsFile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
254 if (pCifsFile == NULL)
255 return pCifsFile;
256
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400257 pCifsFile->count = 1;
Jeff Layton15ecb432010-10-15 15:34:02 -0400258 pCifsFile->netfid = fileHandle;
259 pCifsFile->pid = current->tgid;
260 pCifsFile->uid = current_fsuid();
261 pCifsFile->dentry = dget(dentry);
262 pCifsFile->f_flags = file->f_flags;
263 pCifsFile->invalidHandle = false;
Jeff Layton15ecb432010-10-15 15:34:02 -0400264 pCifsFile->tlink = cifs_get_tlink(tlink);
265 mutex_init(&pCifsFile->fh_mutex);
Jeff Layton15ecb432010-10-15 15:34:02 -0400266 INIT_WORK(&pCifsFile->oplock_break, cifs_oplock_break);
267
Jeff Layton44772882010-10-15 15:34:03 -0400268 spin_lock(&cifs_file_list_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400269 list_add(&pCifsFile->tlist, &(tlink_tcon(tlink)->openFileList));
270 /* if readable file instance put first in list*/
271 if (file->f_mode & FMODE_READ)
272 list_add(&pCifsFile->flist, &pCifsInode->openFileList);
273 else
274 list_add_tail(&pCifsFile->flist, &pCifsInode->openFileList);
Jeff Layton44772882010-10-15 15:34:03 -0400275 spin_unlock(&cifs_file_list_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400276
Pavel Shilovskyc6723622010-11-03 10:58:57 +0300277 cifs_set_oplock_level(pCifsInode, oplock);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400278 pCifsInode->can_cache_brlcks = pCifsInode->clientCanCacheAll;
Jeff Layton15ecb432010-10-15 15:34:02 -0400279
280 file->private_data = pCifsFile;
281 return pCifsFile;
282}
283
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400284static void cifs_del_lock_waiters(struct cifsLockInfo *lock);
285
Steve Frenchcdff08e2010-10-21 22:46:14 +0000286/*
287 * Release a reference on the file private data. This may involve closing
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400288 * the filehandle out on the server. Must be called without holding
289 * cifs_file_list_lock.
Steve Frenchcdff08e2010-10-21 22:46:14 +0000290 */
Jeff Laytonb33879a2010-10-15 15:34:04 -0400291void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
292{
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300293 struct inode *inode = cifs_file->dentry->d_inode;
Steve French96daf2b2011-05-27 04:34:02 +0000294 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300295 struct cifsInodeInfo *cifsi = CIFS_I(inode);
Pavel Shilovsky4f8ba8a2010-11-21 22:36:12 +0300296 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000297 struct cifsLockInfo *li, *tmp;
298
299 spin_lock(&cifs_file_list_lock);
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400300 if (--cifs_file->count > 0) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000301 spin_unlock(&cifs_file_list_lock);
302 return;
Jeff Laytonb33879a2010-10-15 15:34:04 -0400303 }
Steve Frenchcdff08e2010-10-21 22:46:14 +0000304
305 /* remove it from the lists */
306 list_del(&cifs_file->flist);
307 list_del(&cifs_file->tlist);
308
309 if (list_empty(&cifsi->openFileList)) {
310 cFYI(1, "closing last open instance for inode %p",
311 cifs_file->dentry->d_inode);
Pavel Shilovsky4f8ba8a2010-11-21 22:36:12 +0300312
313 /* in strict cache mode we need invalidate mapping on the last
314 close because it may cause a error when we open this file
315 again and get at least level II oplock */
316 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
317 CIFS_I(inode)->invalid_mapping = true;
318
Pavel Shilovskyc6723622010-11-03 10:58:57 +0300319 cifs_set_oplock_level(cifsi, 0);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000320 }
321 spin_unlock(&cifs_file_list_lock);
322
Jeff Laytonad635942011-07-26 12:20:17 -0400323 cancel_work_sync(&cifs_file->oplock_break);
324
Steve Frenchcdff08e2010-10-21 22:46:14 +0000325 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
326 int xid, rc;
327
328 xid = GetXid();
329 rc = CIFSSMBClose(xid, tcon, cifs_file->netfid);
330 FreeXid(xid);
331 }
332
333 /* Delete any outstanding lock records. We'll lose them when the file
334 * is closed anyway.
335 */
Pavel Shilovskyd59dad22011-09-22 09:53:59 +0400336 mutex_lock(&cifsi->lock_mutex);
337 list_for_each_entry_safe(li, tmp, &cifsi->llist, llist) {
338 if (li->netfid != cifs_file->netfid)
339 continue;
Steve Frenchcdff08e2010-10-21 22:46:14 +0000340 list_del(&li->llist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400341 cifs_del_lock_waiters(li);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000342 kfree(li);
343 }
Pavel Shilovskyd59dad22011-09-22 09:53:59 +0400344 mutex_unlock(&cifsi->lock_mutex);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000345
346 cifs_put_tlink(cifs_file->tlink);
347 dput(cifs_file->dentry);
348 kfree(cifs_file);
Jeff Laytonb33879a2010-10-15 15:34:04 -0400349}
350
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351int cifs_open(struct inode *inode, struct file *file)
352{
353 int rc = -EACCES;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400354 int xid;
355 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +0000357 struct cifs_tcon *tcon;
Jeff Layton7ffec372010-09-29 19:51:11 -0400358 struct tcon_link *tlink;
Jeff Layton6ca9f3b2010-06-16 13:40:16 -0400359 struct cifsFileInfo *pCifsFile = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360 char *full_path = NULL;
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300361 bool posix_open_ok = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362 __u16 netfid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363
364 xid = GetXid();
365
366 cifs_sb = CIFS_SB(inode->i_sb);
Jeff Layton7ffec372010-09-29 19:51:11 -0400367 tlink = cifs_sb_tlink(cifs_sb);
368 if (IS_ERR(tlink)) {
369 FreeXid(xid);
370 return PTR_ERR(tlink);
371 }
372 tcon = tlink_tcon(tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -0800374 full_path = build_path_from_dentry(file->f_path.dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375 if (full_path == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530376 rc = -ENOMEM;
Jeff Layton232341b2010-08-05 13:58:38 -0400377 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 }
379
Joe Perchesb6b38f72010-04-21 03:50:45 +0000380 cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
381 inode, file->f_flags, full_path);
Steve French276a74a2009-03-03 18:00:34 +0000382
Steve Frenche7504732011-10-12 17:47:03 -0500383 if (enable_oplocks)
Steve French276a74a2009-03-03 18:00:34 +0000384 oplock = REQ_OPLOCK;
385 else
386 oplock = 0;
387
Steve French64cc2c62009-03-04 19:54:08 +0000388 if (!tcon->broken_posix_open && tcon->unix_ext &&
389 (tcon->ses->capabilities & CAP_UNIX) &&
Steve French276a74a2009-03-03 18:00:34 +0000390 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
391 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Steve French276a74a2009-03-03 18:00:34 +0000392 /* can not refresh inode info since size could be stale */
Jeff Layton2422f672010-06-16 13:40:16 -0400393 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
Steve Frenchfa588e02010-04-22 19:21:55 +0000394 cifs_sb->mnt_file_mode /* ignored */,
Jeff Layton608712f2010-10-15 15:33:56 -0400395 file->f_flags, &oplock, &netfid, xid);
Steve French276a74a2009-03-03 18:00:34 +0000396 if (rc == 0) {
Joe Perchesb6b38f72010-04-21 03:50:45 +0000397 cFYI(1, "posix open succeeded");
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300398 posix_open_ok = true;
Steve French64cc2c62009-03-04 19:54:08 +0000399 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
400 if (tcon->ses->serverNOS)
Joe Perchesb6b38f72010-04-21 03:50:45 +0000401 cERROR(1, "server %s of type %s returned"
Steve French64cc2c62009-03-04 19:54:08 +0000402 " unexpected error on SMB posix open"
403 ", disabling posix open support."
404 " Check if server update available.",
405 tcon->ses->serverName,
Joe Perchesb6b38f72010-04-21 03:50:45 +0000406 tcon->ses->serverNOS);
Steve French64cc2c62009-03-04 19:54:08 +0000407 tcon->broken_posix_open = true;
Steve French276a74a2009-03-03 18:00:34 +0000408 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
409 (rc != -EOPNOTSUPP)) /* path not found or net err */
410 goto out;
Steve French64cc2c62009-03-04 19:54:08 +0000411 /* else fallthrough to retry open the old way on network i/o
412 or DFS errors */
Steve French276a74a2009-03-03 18:00:34 +0000413 }
414
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300415 if (!posix_open_ok) {
416 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
417 file->f_flags, &oplock, &netfid, xid);
418 if (rc)
419 goto out;
420 }
Jeff Layton47c78b72010-06-16 13:40:17 -0400421
Jeff Laytonabfe1ee2010-10-15 15:33:58 -0400422 pCifsFile = cifs_new_fileinfo(netfid, file, tlink, oplock);
Jeff Layton6ca9f3b2010-06-16 13:40:16 -0400423 if (pCifsFile == NULL) {
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300424 CIFSSMBClose(xid, tcon, netfid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425 rc = -ENOMEM;
426 goto out;
427 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +0530429 cifs_fscache_set_inode_cookie(inode, file);
430
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300431 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432 /* time to set mode which we can not set earlier due to
433 problems creating new read-only files */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300434 struct cifs_unix_set_info_args args = {
435 .mode = inode->i_mode,
436 .uid = NO_CHANGE_64,
437 .gid = NO_CHANGE_64,
438 .ctime = NO_CHANGE_64,
439 .atime = NO_CHANGE_64,
440 .mtime = NO_CHANGE_64,
441 .device = 0,
442 };
Jeff Laytond44a9fe2011-01-07 11:30:29 -0500443 CIFSSMBUnixSetFileInfo(xid, tcon, &args, netfid,
444 pCifsFile->pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445 }
446
447out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448 kfree(full_path);
449 FreeXid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400450 cifs_put_tlink(tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451 return rc;
452}
453
Adrian Bunk04187262006-06-30 18:23:04 +0200454/* Try to reacquire byte range locks that were released when session */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455/* to server was lost */
456static int cifs_relock_file(struct cifsFileInfo *cifsFile)
457{
458 int rc = 0;
459
460/* BB list all locks open on this file and relock */
461
462 return rc;
463}
464
Jeff Layton15886172010-10-15 15:33:59 -0400465static int cifs_reopen_file(struct cifsFileInfo *pCifsFile, bool can_flush)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466{
467 int rc = -EACCES;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400468 int xid;
469 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +0000471 struct cifs_tcon *tcon;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472 struct cifsInodeInfo *pCifsInode;
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000473 struct inode *inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474 char *full_path = NULL;
475 int desiredAccess;
476 int disposition = FILE_OPEN;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500477 int create_options = CREATE_NOT_DIR;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478 __u16 netfid;
479
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480 xid = GetXid();
Jeff Laytonf0a71eb2009-06-27 07:04:55 -0400481 mutex_lock(&pCifsFile->fh_mutex);
Steve French4b18f2a2008-04-29 00:06:05 +0000482 if (!pCifsFile->invalidHandle) {
Jeff Laytonf0a71eb2009-06-27 07:04:55 -0400483 mutex_unlock(&pCifsFile->fh_mutex);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530484 rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485 FreeXid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530486 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487 }
488
Jeff Layton15886172010-10-15 15:33:59 -0400489 inode = pCifsFile->dentry->d_inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490 cifs_sb = CIFS_SB(inode->i_sb);
Jeff Layton13cfb732010-09-29 19:51:11 -0400491 tcon = tlink_tcon(pCifsFile->tlink);
Steve French3a9f4622007-04-04 17:10:24 +0000492
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493/* can not grab rename sem here because various ops, including
494 those that already have the rename sem can end up causing writepage
495 to get called and if the server was down that means we end up here,
496 and we can never tell if the caller already has the rename_sem */
Jeff Layton15886172010-10-15 15:33:59 -0400497 full_path = build_path_from_dentry(pCifsFile->dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498 if (full_path == NULL) {
Steve French3a9f4622007-04-04 17:10:24 +0000499 rc = -ENOMEM;
Jeff Laytonf0a71eb2009-06-27 07:04:55 -0400500 mutex_unlock(&pCifsFile->fh_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501 FreeXid(xid);
Steve French3a9f4622007-04-04 17:10:24 +0000502 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503 }
504
Joe Perchesb6b38f72010-04-21 03:50:45 +0000505 cFYI(1, "inode = 0x%p file flags 0x%x for %s",
Jeff Layton15886172010-10-15 15:33:59 -0400506 inode, pCifsFile->f_flags, full_path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507
Steve Frenche7504732011-10-12 17:47:03 -0500508 if (enable_oplocks)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509 oplock = REQ_OPLOCK;
510 else
Steve French4b18f2a2008-04-29 00:06:05 +0000511 oplock = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512
Steve French7fc8f4e2009-02-23 20:43:11 +0000513 if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) &&
514 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
515 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Jeff Layton608712f2010-10-15 15:33:56 -0400516
517 /*
518 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
519 * original open. Must mask them off for a reopen.
520 */
Jeff Layton15886172010-10-15 15:33:59 -0400521 unsigned int oflags = pCifsFile->f_flags &
522 ~(O_CREAT | O_EXCL | O_TRUNC);
Jeff Layton608712f2010-10-15 15:33:56 -0400523
Jeff Layton2422f672010-06-16 13:40:16 -0400524 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
Steve Frenchfa588e02010-04-22 19:21:55 +0000525 cifs_sb->mnt_file_mode /* ignored */,
526 oflags, &oplock, &netfid, xid);
Steve French7fc8f4e2009-02-23 20:43:11 +0000527 if (rc == 0) {
Joe Perchesb6b38f72010-04-21 03:50:45 +0000528 cFYI(1, "posix reopen succeeded");
Steve French7fc8f4e2009-02-23 20:43:11 +0000529 goto reopen_success;
530 }
531 /* fallthrough to retry open the old way on errors, especially
532 in the reconnect path it is important to retry hard */
533 }
534
Jeff Layton15886172010-10-15 15:33:59 -0400535 desiredAccess = cifs_convert_flags(pCifsFile->f_flags);
Steve French7fc8f4e2009-02-23 20:43:11 +0000536
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500537 if (backup_cred(cifs_sb))
538 create_options |= CREATE_OPEN_BACKUP_INTENT;
539
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540 /* Can not refresh inode by passing in file_info buf to be returned
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000541 by SMBOpen and then calling get_inode_info with returned buf
542 since file might have write behind data that needs to be flushed
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543 and server version of file size can be stale. If we knew for sure
544 that inode was not dirty locally we could do this */
545
Steve French7fc8f4e2009-02-23 20:43:11 +0000546 rc = CIFSSMBOpen(xid, tcon, full_path, disposition, desiredAccess,
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500547 create_options, &netfid, &oplock, NULL,
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000548 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
Steve French737b7582005-04-28 22:41:06 -0700549 CIFS_MOUNT_MAP_SPECIAL_CHR);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 if (rc) {
Jeff Laytonf0a71eb2009-06-27 07:04:55 -0400551 mutex_unlock(&pCifsFile->fh_mutex);
Joe Perchesb6b38f72010-04-21 03:50:45 +0000552 cFYI(1, "cifs_open returned 0x%x", rc);
553 cFYI(1, "oplock: %d", oplock);
Jeff Layton15886172010-10-15 15:33:59 -0400554 goto reopen_error_exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555 }
Jeff Layton15886172010-10-15 15:33:59 -0400556
557reopen_success:
558 pCifsFile->netfid = netfid;
559 pCifsFile->invalidHandle = false;
560 mutex_unlock(&pCifsFile->fh_mutex);
561 pCifsInode = CIFS_I(inode);
562
563 if (can_flush) {
564 rc = filemap_write_and_wait(inode->i_mapping);
Jeff Laytoneb4b7562010-10-22 14:52:29 -0400565 mapping_set_error(inode->i_mapping, rc);
Jeff Layton15886172010-10-15 15:33:59 -0400566
Jeff Layton15886172010-10-15 15:33:59 -0400567 if (tcon->unix_ext)
568 rc = cifs_get_inode_info_unix(&inode,
569 full_path, inode->i_sb, xid);
570 else
571 rc = cifs_get_inode_info(&inode,
572 full_path, NULL, inode->i_sb,
573 xid, NULL);
574 } /* else we are writing out data to server already
575 and could deadlock if we tried to flush data, and
576 since we do not know if we have data that would
577 invalidate the current end of file on the server
578 we can not go to the server to get the new inod
579 info */
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300580
Pavel Shilovskyc6723622010-11-03 10:58:57 +0300581 cifs_set_oplock_level(pCifsInode, oplock);
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300582
Jeff Layton15886172010-10-15 15:33:59 -0400583 cifs_relock_file(pCifsFile);
584
585reopen_error_exit:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586 kfree(full_path);
587 FreeXid(xid);
588 return rc;
589}
590
591int cifs_close(struct inode *inode, struct file *file)
592{
Jeff Layton77970692011-04-05 16:23:47 -0700593 if (file->private_data != NULL) {
594 cifsFileInfo_put(file->private_data);
595 file->private_data = NULL;
596 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597
Steve Frenchcdff08e2010-10-21 22:46:14 +0000598 /* return code from the ->release op is always ignored */
599 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600}
601
602int cifs_closedir(struct inode *inode, struct file *file)
603{
604 int rc = 0;
605 int xid;
Joe Perchesc21dfb62010-07-12 13:50:14 -0700606 struct cifsFileInfo *pCFileStruct = file->private_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607 char *ptmp;
608
Joe Perchesb6b38f72010-04-21 03:50:45 +0000609 cFYI(1, "Closedir inode = 0x%p", inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610
611 xid = GetXid();
612
613 if (pCFileStruct) {
Steve French96daf2b2011-05-27 04:34:02 +0000614 struct cifs_tcon *pTcon = tlink_tcon(pCFileStruct->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615
Joe Perchesb6b38f72010-04-21 03:50:45 +0000616 cFYI(1, "Freeing private data in close dir");
Jeff Layton44772882010-10-15 15:34:03 -0400617 spin_lock(&cifs_file_list_lock);
Steve French4b18f2a2008-04-29 00:06:05 +0000618 if (!pCFileStruct->srch_inf.endOfSearch &&
619 !pCFileStruct->invalidHandle) {
620 pCFileStruct->invalidHandle = true;
Jeff Layton44772882010-10-15 15:34:03 -0400621 spin_unlock(&cifs_file_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622 rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid);
Joe Perchesb6b38f72010-04-21 03:50:45 +0000623 cFYI(1, "Closing uncompleted readdir with rc %d",
624 rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625 /* not much we can do if it fails anyway, ignore rc */
626 rc = 0;
Steve Frenchddb4cbf2008-11-20 20:00:44 +0000627 } else
Jeff Layton44772882010-10-15 15:34:03 -0400628 spin_unlock(&cifs_file_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629 ptmp = pCFileStruct->srch_inf.ntwrk_buf_start;
630 if (ptmp) {
Joe Perchesb6b38f72010-04-21 03:50:45 +0000631 cFYI(1, "closedir free smb buf in srch struct");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632 pCFileStruct->srch_inf.ntwrk_buf_start = NULL;
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000633 if (pCFileStruct->srch_inf.smallBuf)
Steve Frenchd47d7c12006-02-28 03:45:48 +0000634 cifs_small_buf_release(ptmp);
635 else
636 cifs_buf_release(ptmp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637 }
Jeff Layton13cfb732010-09-29 19:51:11 -0400638 cifs_put_tlink(pCFileStruct->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639 kfree(file->private_data);
640 file->private_data = NULL;
641 }
642 /* BB can we lock the filestruct while this is going on? */
643 FreeXid(xid);
644 return rc;
645}
646
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400647static struct cifsLockInfo *
648cifs_lock_init(__u64 len, __u64 offset, __u8 type, __u16 netfid)
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000649{
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000650 struct cifsLockInfo *li =
651 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400652 if (!li)
653 return li;
Pavel Shilovskyd59dad22011-09-22 09:53:59 +0400654 li->netfid = netfid;
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000655 li->offset = offset;
656 li->length = len;
Pavel Shilovsky03776f42010-08-17 11:26:00 +0400657 li->type = type;
658 li->pid = current->tgid;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400659 INIT_LIST_HEAD(&li->blist);
660 init_waitqueue_head(&li->block_q);
661 return li;
662}
663
664static void
665cifs_del_lock_waiters(struct cifsLockInfo *lock)
666{
667 struct cifsLockInfo *li, *tmp;
668 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
669 list_del_init(&li->blist);
670 wake_up(&li->block_q);
671 }
672}
673
674static bool
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400675__cifs_find_lock_conflict(struct cifsInodeInfo *cinode, __u64 offset,
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400676 __u64 length, __u8 type, __u16 netfid,
677 struct cifsLockInfo **conf_lock)
678{
679 struct cifsLockInfo *li, *tmp;
680
681 list_for_each_entry_safe(li, tmp, &cinode->llist, llist) {
682 if (offset + length <= li->offset ||
683 offset >= li->offset + li->length)
684 continue;
685 else if ((type & LOCKING_ANDX_SHARED_LOCK) &&
686 ((netfid == li->netfid && current->tgid == li->pid) ||
687 type == li->type))
688 continue;
689 else {
690 *conf_lock = li;
691 return true;
692 }
693 }
694 return false;
695}
696
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400697static bool
698cifs_find_lock_conflict(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock,
699 struct cifsLockInfo **conf_lock)
700{
701 return __cifs_find_lock_conflict(cinode, lock->offset, lock->length,
702 lock->type, lock->netfid, conf_lock);
703}
704
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400705static int
706cifs_lock_test(struct cifsInodeInfo *cinode, __u64 offset, __u64 length,
707 __u8 type, __u16 netfid, struct file_lock *flock)
708{
709 int rc = 0;
710 struct cifsLockInfo *conf_lock;
711 bool exist;
712
713 mutex_lock(&cinode->lock_mutex);
714
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400715 exist = __cifs_find_lock_conflict(cinode, offset, length, type, netfid,
716 &conf_lock);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400717 if (exist) {
718 flock->fl_start = conf_lock->offset;
719 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
720 flock->fl_pid = conf_lock->pid;
721 if (conf_lock->type & LOCKING_ANDX_SHARED_LOCK)
722 flock->fl_type = F_RDLCK;
723 else
724 flock->fl_type = F_WRLCK;
725 } else if (!cinode->can_cache_brlcks)
726 rc = 1;
727 else
728 flock->fl_type = F_UNLCK;
729
730 mutex_unlock(&cinode->lock_mutex);
731 return rc;
732}
733
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400734static void
735cifs_lock_add(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400736{
Pavel Shilovskyd59dad22011-09-22 09:53:59 +0400737 mutex_lock(&cinode->lock_mutex);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400738 list_add_tail(&lock->llist, &cinode->llist);
Pavel Shilovskyd59dad22011-09-22 09:53:59 +0400739 mutex_unlock(&cinode->lock_mutex);
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000740}
741
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400742static int
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400743cifs_lock_add_if(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock,
744 bool wait)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400745{
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400746 struct cifsLockInfo *conf_lock;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400747 bool exist;
748 int rc = 0;
749
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400750try_again:
751 exist = false;
752 mutex_lock(&cinode->lock_mutex);
753
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400754 exist = cifs_find_lock_conflict(cinode, lock, &conf_lock);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400755 if (!exist && cinode->can_cache_brlcks) {
756 list_add_tail(&lock->llist, &cinode->llist);
757 mutex_unlock(&cinode->lock_mutex);
758 return rc;
759 }
760
761 if (!exist)
762 rc = 1;
763 else if (!wait)
764 rc = -EACCES;
765 else {
766 list_add_tail(&lock->blist, &conf_lock->blist);
767 mutex_unlock(&cinode->lock_mutex);
768 rc = wait_event_interruptible(lock->block_q,
769 (lock->blist.prev == &lock->blist) &&
770 (lock->blist.next == &lock->blist));
771 if (!rc)
772 goto try_again;
773 else {
774 mutex_lock(&cinode->lock_mutex);
775 list_del_init(&lock->blist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400776 }
777 }
778
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400779 mutex_unlock(&cinode->lock_mutex);
780 return rc;
781}
782
783static int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400784cifs_posix_lock_test(struct file *file, struct file_lock *flock)
785{
786 int rc = 0;
787 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
788 unsigned char saved_type = flock->fl_type;
789
Pavel Shilovsky50792762011-10-29 17:17:57 +0400790 if ((flock->fl_flags & FL_POSIX) == 0)
791 return 1;
792
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400793 mutex_lock(&cinode->lock_mutex);
794 posix_test_lock(file, flock);
795
796 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
797 flock->fl_type = saved_type;
798 rc = 1;
799 }
800
801 mutex_unlock(&cinode->lock_mutex);
802 return rc;
803}
804
805static int
806cifs_posix_lock_set(struct file *file, struct file_lock *flock)
807{
808 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
Pavel Shilovsky50792762011-10-29 17:17:57 +0400809 int rc = 1;
810
811 if ((flock->fl_flags & FL_POSIX) == 0)
812 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400813
814 mutex_lock(&cinode->lock_mutex);
815 if (!cinode->can_cache_brlcks) {
816 mutex_unlock(&cinode->lock_mutex);
Pavel Shilovsky50792762011-10-29 17:17:57 +0400817 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400818 }
819 rc = posix_lock_file_wait(file, flock);
820 mutex_unlock(&cinode->lock_mutex);
821 return rc;
822}
823
824static int
825cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400826{
827 int xid, rc = 0, stored_rc;
828 struct cifsLockInfo *li, *tmp;
829 struct cifs_tcon *tcon;
830 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400831 unsigned int num, max_num;
832 LOCKING_ANDX_RANGE *buf, *cur;
833 int types[] = {LOCKING_ANDX_LARGE_FILES,
834 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
835 int i;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400836
837 xid = GetXid();
838 tcon = tlink_tcon(cfile->tlink);
839
840 mutex_lock(&cinode->lock_mutex);
841 if (!cinode->can_cache_brlcks) {
842 mutex_unlock(&cinode->lock_mutex);
843 FreeXid(xid);
844 return rc;
845 }
846
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400847 max_num = (tcon->ses->server->maxBuf - sizeof(struct smb_hdr)) /
848 sizeof(LOCKING_ANDX_RANGE);
849 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
850 if (!buf) {
851 mutex_unlock(&cinode->lock_mutex);
852 FreeXid(xid);
853 return rc;
854 }
855
856 for (i = 0; i < 2; i++) {
857 cur = buf;
858 num = 0;
859 list_for_each_entry_safe(li, tmp, &cinode->llist, llist) {
860 if (li->type != types[i])
861 continue;
862 cur->Pid = cpu_to_le16(li->pid);
863 cur->LengthLow = cpu_to_le32((u32)li->length);
864 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
865 cur->OffsetLow = cpu_to_le32((u32)li->offset);
866 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
867 if (++num == max_num) {
868 stored_rc = cifs_lockv(xid, tcon, cfile->netfid,
869 li->type, 0, num, buf);
870 if (stored_rc)
871 rc = stored_rc;
872 cur = buf;
873 num = 0;
874 } else
875 cur++;
876 }
877
878 if (num) {
879 stored_rc = cifs_lockv(xid, tcon, cfile->netfid,
880 types[i], 0, num, buf);
881 if (stored_rc)
882 rc = stored_rc;
883 }
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400884 }
885
886 cinode->can_cache_brlcks = false;
887 mutex_unlock(&cinode->lock_mutex);
888
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400889 kfree(buf);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400890 FreeXid(xid);
891 return rc;
892}
893
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400894/* copied from fs/locks.c with a name change */
895#define cifs_for_each_lock(inode, lockp) \
896 for (lockp = &inode->i_flock; *lockp != NULL; \
897 lockp = &(*lockp)->fl_next)
898
899static int
900cifs_push_posix_locks(struct cifsFileInfo *cfile)
901{
902 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
903 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
904 struct file_lock *flock, **before;
905 struct cifsLockInfo *lck, *tmp;
906 int rc = 0, xid, type;
907 __u64 length;
908 struct list_head locks_to_send;
909
910 xid = GetXid();
911
912 mutex_lock(&cinode->lock_mutex);
913 if (!cinode->can_cache_brlcks) {
914 mutex_unlock(&cinode->lock_mutex);
915 FreeXid(xid);
916 return rc;
917 }
918
919 INIT_LIST_HEAD(&locks_to_send);
920
921 lock_flocks();
922 cifs_for_each_lock(cfile->dentry->d_inode, before) {
923 flock = *before;
924 length = 1 + flock->fl_end - flock->fl_start;
925 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
926 type = CIFS_RDLCK;
927 else
928 type = CIFS_WRLCK;
929
930 lck = cifs_lock_init(length, flock->fl_start, type,
931 cfile->netfid);
932 if (!lck) {
933 rc = -ENOMEM;
934 goto send_locks;
935 }
936 lck->pid = flock->fl_pid;
937
938 list_add_tail(&lck->llist, &locks_to_send);
939 }
940
941send_locks:
942 unlock_flocks();
943
944 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
945 struct file_lock tmp_lock;
946 int stored_rc;
947
948 tmp_lock.fl_start = lck->offset;
949 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
950 0, lck->length, &tmp_lock,
951 lck->type, 0);
952 if (stored_rc)
953 rc = stored_rc;
954 list_del(&lck->llist);
955 kfree(lck);
956 }
957
958 cinode->can_cache_brlcks = false;
959 mutex_unlock(&cinode->lock_mutex);
960
961 FreeXid(xid);
962 return rc;
963}
964
965static int
966cifs_push_locks(struct cifsFileInfo *cfile)
967{
968 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
969 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
970
971 if ((tcon->ses->capabilities & CAP_UNIX) &&
972 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
973 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
974 return cifs_push_posix_locks(cfile);
975
976 return cifs_push_mandatory_locks(cfile);
977}
978
Pavel Shilovsky03776f42010-08-17 11:26:00 +0400979static void
980cifs_read_flock(struct file_lock *flock, __u8 *type, int *lock, int *unlock,
981 bool *wait_flag)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982{
Pavel Shilovsky03776f42010-08-17 11:26:00 +0400983 if (flock->fl_flags & FL_POSIX)
Joe Perchesb6b38f72010-04-21 03:50:45 +0000984 cFYI(1, "Posix");
Pavel Shilovsky03776f42010-08-17 11:26:00 +0400985 if (flock->fl_flags & FL_FLOCK)
Joe Perchesb6b38f72010-04-21 03:50:45 +0000986 cFYI(1, "Flock");
Pavel Shilovsky03776f42010-08-17 11:26:00 +0400987 if (flock->fl_flags & FL_SLEEP) {
Joe Perchesb6b38f72010-04-21 03:50:45 +0000988 cFYI(1, "Blocking lock");
Pavel Shilovsky03776f42010-08-17 11:26:00 +0400989 *wait_flag = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990 }
Pavel Shilovsky03776f42010-08-17 11:26:00 +0400991 if (flock->fl_flags & FL_ACCESS)
Joe Perchesb6b38f72010-04-21 03:50:45 +0000992 cFYI(1, "Process suspended by mandatory locking - "
Pavel Shilovsky03776f42010-08-17 11:26:00 +0400993 "not implemented yet");
994 if (flock->fl_flags & FL_LEASE)
Joe Perchesb6b38f72010-04-21 03:50:45 +0000995 cFYI(1, "Lease on file - not implemented yet");
Pavel Shilovsky03776f42010-08-17 11:26:00 +0400996 if (flock->fl_flags &
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997 (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
Pavel Shilovsky03776f42010-08-17 11:26:00 +0400998 cFYI(1, "Unknown lock flags 0x%x", flock->fl_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001000 *type = LOCKING_ANDX_LARGE_FILES;
1001 if (flock->fl_type == F_WRLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001002 cFYI(1, "F_WRLCK ");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001003 *lock = 1;
1004 } else if (flock->fl_type == F_UNLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001005 cFYI(1, "F_UNLCK");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001006 *unlock = 1;
1007 /* Check if unlock includes more than one lock range */
1008 } else if (flock->fl_type == F_RDLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001009 cFYI(1, "F_RDLCK");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001010 *type |= LOCKING_ANDX_SHARED_LOCK;
1011 *lock = 1;
1012 } else if (flock->fl_type == F_EXLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001013 cFYI(1, "F_EXLCK");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001014 *lock = 1;
1015 } else if (flock->fl_type == F_SHLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001016 cFYI(1, "F_SHLCK");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001017 *type |= LOCKING_ANDX_SHARED_LOCK;
1018 *lock = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001019 } else
Joe Perchesb6b38f72010-04-21 03:50:45 +00001020 cFYI(1, "Unknown type of lock");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001021}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001022
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001023static int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001024cifs_getlk(struct file *file, struct file_lock *flock, __u8 type,
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001025 bool wait_flag, bool posix_lck, int xid)
1026{
1027 int rc = 0;
1028 __u64 length = 1 + flock->fl_end - flock->fl_start;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001029 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1030 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001031 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001032 __u16 netfid = cfile->netfid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001033
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001034 if (posix_lck) {
1035 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001036
1037 rc = cifs_posix_lock_test(file, flock);
1038 if (!rc)
1039 return rc;
1040
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001041 if (type & LOCKING_ANDX_SHARED_LOCK)
1042 posix_lock_type = CIFS_RDLCK;
1043 else
1044 posix_lock_type = CIFS_WRLCK;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001045 rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
1046 1 /* get */, length, flock,
1047 posix_lock_type, wait_flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001048 return rc;
1049 }
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001050
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001051 rc = cifs_lock_test(cinode, flock->fl_start, length, type, netfid,
1052 flock);
1053 if (!rc)
1054 return rc;
1055
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001056 /* BB we could chain these into one lock request BB */
1057 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid, length,
1058 flock->fl_start, 0, 1, type, 0, 0);
1059 if (rc == 0) {
1060 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid,
1061 length, flock->fl_start, 1, 0,
1062 type, 0, 0);
1063 flock->fl_type = F_UNLCK;
1064 if (rc != 0)
1065 cERROR(1, "Error unlocking previously locked "
1066 "range %d during test of lock", rc);
1067 rc = 0;
1068 return rc;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001069 }
1070
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001071 if (type & LOCKING_ANDX_SHARED_LOCK) {
1072 flock->fl_type = F_WRLCK;
1073 rc = 0;
1074 return rc;
1075 }
1076
1077 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid, length,
1078 flock->fl_start, 0, 1,
1079 type | LOCKING_ANDX_SHARED_LOCK, 0, 0);
1080 if (rc == 0) {
1081 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid,
1082 length, flock->fl_start, 1, 0,
1083 type | LOCKING_ANDX_SHARED_LOCK,
1084 0, 0);
1085 flock->fl_type = F_RDLCK;
1086 if (rc != 0)
1087 cERROR(1, "Error unlocking previously locked "
1088 "range %d during test of lock", rc);
1089 } else
1090 flock->fl_type = F_WRLCK;
1091
1092 rc = 0;
1093 return rc;
1094}
1095
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001096static void
1097cifs_move_llist(struct list_head *source, struct list_head *dest)
1098{
1099 struct list_head *li, *tmp;
1100 list_for_each_safe(li, tmp, source)
1101 list_move(li, dest);
1102}
1103
1104static void
1105cifs_free_llist(struct list_head *llist)
1106{
1107 struct cifsLockInfo *li, *tmp;
1108 list_for_each_entry_safe(li, tmp, llist, llist) {
1109 cifs_del_lock_waiters(li);
1110 list_del(&li->llist);
1111 kfree(li);
1112 }
1113}
1114
1115static int
1116cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid)
1117{
1118 int rc = 0, stored_rc;
1119 int types[] = {LOCKING_ANDX_LARGE_FILES,
1120 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1121 unsigned int i;
1122 unsigned int max_num, num;
1123 LOCKING_ANDX_RANGE *buf, *cur;
1124 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1125 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
1126 struct cifsLockInfo *li, *tmp;
1127 __u64 length = 1 + flock->fl_end - flock->fl_start;
1128 struct list_head tmp_llist;
1129
1130 INIT_LIST_HEAD(&tmp_llist);
1131
1132 max_num = (tcon->ses->server->maxBuf - sizeof(struct smb_hdr)) /
1133 sizeof(LOCKING_ANDX_RANGE);
1134 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1135 if (!buf)
1136 return -ENOMEM;
1137
1138 mutex_lock(&cinode->lock_mutex);
1139 for (i = 0; i < 2; i++) {
1140 cur = buf;
1141 num = 0;
1142 list_for_each_entry_safe(li, tmp, &cinode->llist, llist) {
1143 if (flock->fl_start > li->offset ||
1144 (flock->fl_start + length) <
1145 (li->offset + li->length))
1146 continue;
1147 if (current->tgid != li->pid)
1148 continue;
1149 if (cfile->netfid != li->netfid)
1150 continue;
1151 if (types[i] != li->type)
1152 continue;
1153 if (!cinode->can_cache_brlcks) {
1154 cur->Pid = cpu_to_le16(li->pid);
1155 cur->LengthLow = cpu_to_le32((u32)li->length);
1156 cur->LengthHigh =
1157 cpu_to_le32((u32)(li->length>>32));
1158 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1159 cur->OffsetHigh =
1160 cpu_to_le32((u32)(li->offset>>32));
1161 /*
1162 * We need to save a lock here to let us add
1163 * it again to the inode list if the unlock
1164 * range request fails on the server.
1165 */
1166 list_move(&li->llist, &tmp_llist);
1167 if (++num == max_num) {
1168 stored_rc = cifs_lockv(xid, tcon,
1169 cfile->netfid,
1170 li->type, num,
1171 0, buf);
1172 if (stored_rc) {
1173 /*
1174 * We failed on the unlock range
1175 * request - add all locks from
1176 * the tmp list to the head of
1177 * the inode list.
1178 */
1179 cifs_move_llist(&tmp_llist,
1180 &cinode->llist);
1181 rc = stored_rc;
1182 } else
1183 /*
1184 * The unlock range request
1185 * succeed - free the tmp list.
1186 */
1187 cifs_free_llist(&tmp_llist);
1188 cur = buf;
1189 num = 0;
1190 } else
1191 cur++;
1192 } else {
1193 /*
1194 * We can cache brlock requests - simply remove
1195 * a lock from the inode list.
1196 */
1197 list_del(&li->llist);
1198 cifs_del_lock_waiters(li);
1199 kfree(li);
1200 }
1201 }
1202 if (num) {
1203 stored_rc = cifs_lockv(xid, tcon, cfile->netfid,
1204 types[i], num, 0, buf);
1205 if (stored_rc) {
1206 cifs_move_llist(&tmp_llist, &cinode->llist);
1207 rc = stored_rc;
1208 } else
1209 cifs_free_llist(&tmp_llist);
1210 }
1211 }
1212
1213 mutex_unlock(&cinode->lock_mutex);
1214 kfree(buf);
1215 return rc;
1216}
1217
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001218static int
1219cifs_setlk(struct file *file, struct file_lock *flock, __u8 type,
1220 bool wait_flag, bool posix_lck, int lock, int unlock, int xid)
1221{
1222 int rc = 0;
1223 __u64 length = 1 + flock->fl_end - flock->fl_start;
1224 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1225 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovskyd59dad22011-09-22 09:53:59 +04001226 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001227 __u16 netfid = cfile->netfid;
1228
1229 if (posix_lck) {
Steve French08547b02006-02-28 22:39:25 +00001230 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001231
1232 rc = cifs_posix_lock_set(file, flock);
1233 if (!rc || rc < 0)
1234 return rc;
1235
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001236 if (type & LOCKING_ANDX_SHARED_LOCK)
Steve French08547b02006-02-28 22:39:25 +00001237 posix_lock_type = CIFS_RDLCK;
1238 else
1239 posix_lock_type = CIFS_WRLCK;
Steve French50c2f752007-07-13 00:33:32 +00001240
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001241 if (unlock == 1)
Steve Frenchbeb84dc2006-03-03 23:36:34 +00001242 posix_lock_type = CIFS_UNLCK;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001243
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001244 rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
1245 0 /* set */, length, flock,
1246 posix_lock_type, wait_flag);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001247 goto out;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001248 }
1249
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001250 if (lock) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001251 struct cifsLockInfo *lock;
1252
1253 lock = cifs_lock_init(length, flock->fl_start, type, netfid);
1254 if (!lock)
1255 return -ENOMEM;
1256
1257 rc = cifs_lock_add_if(cinode, lock, wait_flag);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001258 if (rc < 0)
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001259 kfree(lock);
1260 if (rc <= 0)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001261 goto out;
1262
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001263 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid, length,
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001264 flock->fl_start, 0, 1, type, wait_flag, 0);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001265 if (rc) {
1266 kfree(lock);
1267 goto out;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001268 }
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001269
1270 cifs_lock_add(cinode, lock);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001271 } else if (unlock)
1272 rc = cifs_unlock_range(cfile, flock, xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001273
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001274out:
1275 if (flock->fl_flags & FL_POSIX)
1276 posix_lock_file_wait(file, flock);
1277 return rc;
1278}
1279
1280int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1281{
1282 int rc, xid;
1283 int lock = 0, unlock = 0;
1284 bool wait_flag = false;
1285 bool posix_lck = false;
1286 struct cifs_sb_info *cifs_sb;
1287 struct cifs_tcon *tcon;
1288 struct cifsInodeInfo *cinode;
1289 struct cifsFileInfo *cfile;
1290 __u16 netfid;
1291 __u8 type;
1292
1293 rc = -EACCES;
1294 xid = GetXid();
1295
1296 cFYI(1, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld "
1297 "end: %lld", cmd, flock->fl_flags, flock->fl_type,
1298 flock->fl_start, flock->fl_end);
1299
1300 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag);
1301
1302 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1303 cfile = (struct cifsFileInfo *)file->private_data;
1304 tcon = tlink_tcon(cfile->tlink);
1305 netfid = cfile->netfid;
1306 cinode = CIFS_I(file->f_path.dentry->d_inode);
1307
1308 if ((tcon->ses->capabilities & CAP_UNIX) &&
1309 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1310 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1311 posix_lck = true;
1312 /*
1313 * BB add code here to normalize offset and length to account for
1314 * negative length which we can not accept over the wire.
1315 */
1316 if (IS_GETLK(cmd)) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001317 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001318 FreeXid(xid);
1319 return rc;
1320 }
1321
1322 if (!lock && !unlock) {
1323 /*
1324 * if no lock or unlock then nothing to do since we do not
1325 * know what it is
1326 */
1327 FreeXid(xid);
1328 return -EOPNOTSUPP;
1329 }
1330
1331 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1332 xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001333 FreeXid(xid);
1334 return rc;
1335}
1336
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001337/* update the file size (if needed) after a write */
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05001338void
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001339cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1340 unsigned int bytes_written)
1341{
1342 loff_t end_of_write = offset + bytes_written;
1343
1344 if (end_of_write > cifsi->server_eof)
1345 cifsi->server_eof = end_of_write;
1346}
1347
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001348static ssize_t cifs_write(struct cifsFileInfo *open_file, __u32 pid,
Jeff Layton7da4b492010-10-15 15:34:00 -04001349 const char *write_data, size_t write_size,
1350 loff_t *poffset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001351{
1352 int rc = 0;
1353 unsigned int bytes_written = 0;
1354 unsigned int total_written;
1355 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +00001356 struct cifs_tcon *pTcon;
Jeff Layton77499812011-01-11 07:24:23 -05001357 int xid;
Jeff Layton7da4b492010-10-15 15:34:00 -04001358 struct dentry *dentry = open_file->dentry;
1359 struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001360 struct cifs_io_parms io_parms;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001361
Jeff Layton7da4b492010-10-15 15:34:00 -04001362 cifs_sb = CIFS_SB(dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001363
Joe Perchesb6b38f72010-04-21 03:50:45 +00001364 cFYI(1, "write %zd bytes to offset %lld of %s", write_size,
Jeff Layton7da4b492010-10-15 15:34:00 -04001365 *poffset, dentry->d_name.name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001366
Jeff Layton13cfb732010-09-29 19:51:11 -04001367 pTcon = tlink_tcon(open_file->tlink);
Steve French50c2f752007-07-13 00:33:32 +00001368
Linus Torvalds1da177e2005-04-16 15:20:36 -07001369 xid = GetXid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001370
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371 for (total_written = 0; write_size > total_written;
1372 total_written += bytes_written) {
1373 rc = -EAGAIN;
1374 while (rc == -EAGAIN) {
Jeff Laytonca83ce32011-04-12 09:13:44 -04001375 struct kvec iov[2];
1376 unsigned int len;
1377
Linus Torvalds1da177e2005-04-16 15:20:36 -07001378 if (open_file->invalidHandle) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001379 /* we could deadlock if we called
1380 filemap_fdatawait from here so tell
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001381 reopen_file not to flush data to
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382 server now */
Jeff Layton15886172010-10-15 15:33:59 -04001383 rc = cifs_reopen_file(open_file, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001384 if (rc != 0)
1385 break;
1386 }
Steve French3e844692005-10-03 13:37:24 -07001387
Jeff Laytonca83ce32011-04-12 09:13:44 -04001388 len = min((size_t)cifs_sb->wsize,
1389 write_size - total_written);
1390 /* iov[0] is reserved for smb header */
1391 iov[1].iov_base = (char *)write_data + total_written;
1392 iov[1].iov_len = len;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001393 io_parms.netfid = open_file->netfid;
1394 io_parms.pid = pid;
1395 io_parms.tcon = pTcon;
1396 io_parms.offset = *poffset;
1397 io_parms.length = len;
1398 rc = CIFSSMBWrite2(xid, &io_parms, &bytes_written, iov,
1399 1, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001400 }
1401 if (rc || (bytes_written == 0)) {
1402 if (total_written)
1403 break;
1404 else {
1405 FreeXid(xid);
1406 return rc;
1407 }
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001408 } else {
1409 cifs_update_eof(cifsi, *poffset, bytes_written);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001410 *poffset += bytes_written;
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001411 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001412 }
1413
Steve Frencha4544342005-08-24 13:59:35 -07001414 cifs_stats_bytes_written(pTcon, total_written);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415
Jeff Layton7da4b492010-10-15 15:34:00 -04001416 if (total_written > 0) {
1417 spin_lock(&dentry->d_inode->i_lock);
1418 if (*poffset > dentry->d_inode->i_size)
1419 i_size_write(dentry->d_inode, *poffset);
1420 spin_unlock(&dentry->d_inode->i_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421 }
Jeff Layton7da4b492010-10-15 15:34:00 -04001422 mark_inode_dirty_sync(dentry->d_inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001423 FreeXid(xid);
1424 return total_written;
1425}
1426
Jeff Layton6508d902010-09-29 19:51:11 -04001427struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1428 bool fsuid_only)
Steve French630f3f0c2007-10-25 21:17:17 +00001429{
1430 struct cifsFileInfo *open_file = NULL;
Jeff Layton6508d902010-09-29 19:51:11 -04001431 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1432
1433 /* only filter by fsuid on multiuser mounts */
1434 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1435 fsuid_only = false;
Steve French630f3f0c2007-10-25 21:17:17 +00001436
Jeff Layton44772882010-10-15 15:34:03 -04001437 spin_lock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001438 /* we could simply get the first_list_entry since write-only entries
1439 are always at the end of the list but since the first entry might
1440 have a close pending, we go through the whole list */
1441 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton6508d902010-09-29 19:51:11 -04001442 if (fsuid_only && open_file->uid != current_fsuid())
1443 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001444 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
Steve French630f3f0c2007-10-25 21:17:17 +00001445 if (!open_file->invalidHandle) {
1446 /* found a good file */
1447 /* lock it so it will not be closed on us */
Dave Kleikamp6ab409b2009-08-31 11:07:12 -04001448 cifsFileInfo_get(open_file);
Jeff Layton44772882010-10-15 15:34:03 -04001449 spin_unlock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001450 return open_file;
1451 } /* else might as well continue, and look for
1452 another, or simply have the caller reopen it
1453 again rather than trying to fix this handle */
1454 } else /* write only file */
1455 break; /* write only files are last so must be done */
1456 }
Jeff Layton44772882010-10-15 15:34:03 -04001457 spin_unlock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001458 return NULL;
1459}
Steve French630f3f0c2007-10-25 21:17:17 +00001460
Jeff Layton6508d902010-09-29 19:51:11 -04001461struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1462 bool fsuid_only)
Steve French6148a742005-10-05 12:23:19 -07001463{
1464 struct cifsFileInfo *open_file;
Jeff Laytond3892292010-11-02 16:22:50 -04001465 struct cifs_sb_info *cifs_sb;
Jeff Layton2846d382008-09-22 21:33:33 -04001466 bool any_available = false;
Steve Frenchdd99cd82005-10-05 19:32:49 -07001467 int rc;
Steve French6148a742005-10-05 12:23:19 -07001468
Steve French60808232006-04-22 15:53:05 +00001469 /* Having a null inode here (because mapping->host was set to zero by
1470 the VFS or MM) should not happen but we had reports of on oops (due to
1471 it being zero) during stress testcases so we need to check for it */
1472
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001473 if (cifs_inode == NULL) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001474 cERROR(1, "Null inode passed to cifs_writeable_file");
Steve French60808232006-04-22 15:53:05 +00001475 dump_stack();
1476 return NULL;
1477 }
1478
Jeff Laytond3892292010-11-02 16:22:50 -04001479 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1480
Jeff Layton6508d902010-09-29 19:51:11 -04001481 /* only filter by fsuid on multiuser mounts */
1482 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1483 fsuid_only = false;
1484
Jeff Layton44772882010-10-15 15:34:03 -04001485 spin_lock(&cifs_file_list_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001486refind_writable:
Steve French6148a742005-10-05 12:23:19 -07001487 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton6508d902010-09-29 19:51:11 -04001488 if (!any_available && open_file->pid != current->tgid)
1489 continue;
1490 if (fsuid_only && open_file->uid != current_fsuid())
1491 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001492 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Dave Kleikamp6ab409b2009-08-31 11:07:12 -04001493 cifsFileInfo_get(open_file);
Steve French9b22b0b2007-10-02 01:11:08 +00001494
1495 if (!open_file->invalidHandle) {
1496 /* found a good writable file */
Jeff Layton44772882010-10-15 15:34:03 -04001497 spin_unlock(&cifs_file_list_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001498 return open_file;
1499 }
Steve French8840dee2007-11-16 23:05:52 +00001500
Jeff Layton44772882010-10-15 15:34:03 -04001501 spin_unlock(&cifs_file_list_lock);
Steve Frenchcdff08e2010-10-21 22:46:14 +00001502
Steve French9b22b0b2007-10-02 01:11:08 +00001503 /* Had to unlock since following call can block */
Jeff Layton15886172010-10-15 15:33:59 -04001504 rc = cifs_reopen_file(open_file, false);
Steve Frenchcdff08e2010-10-21 22:46:14 +00001505 if (!rc)
1506 return open_file;
Steve French9b22b0b2007-10-02 01:11:08 +00001507
Steve Frenchcdff08e2010-10-21 22:46:14 +00001508 /* if it fails, try another handle if possible */
Joe Perchesb6b38f72010-04-21 03:50:45 +00001509 cFYI(1, "wp failed on reopen file");
Dave Kleikamp6ab409b2009-08-31 11:07:12 -04001510 cifsFileInfo_put(open_file);
Steve French8840dee2007-11-16 23:05:52 +00001511
Steve Frenchcdff08e2010-10-21 22:46:14 +00001512 spin_lock(&cifs_file_list_lock);
1513
Steve French9b22b0b2007-10-02 01:11:08 +00001514 /* else we simply continue to the next entry. Thus
1515 we do not loop on reopen errors. If we
1516 can not reopen the file, for example if we
1517 reconnected to a server with another client
1518 racing to delete or lock the file we would not
1519 make progress if we restarted before the beginning
1520 of the loop here. */
Steve French6148a742005-10-05 12:23:19 -07001521 }
1522 }
Jeff Layton2846d382008-09-22 21:33:33 -04001523 /* couldn't find useable FH with same pid, try any available */
1524 if (!any_available) {
1525 any_available = true;
1526 goto refind_writable;
1527 }
Jeff Layton44772882010-10-15 15:34:03 -04001528 spin_unlock(&cifs_file_list_lock);
Steve French6148a742005-10-05 12:23:19 -07001529 return NULL;
1530}
1531
Linus Torvalds1da177e2005-04-16 15:20:36 -07001532static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1533{
1534 struct address_space *mapping = page->mapping;
1535 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1536 char *write_data;
1537 int rc = -EFAULT;
1538 int bytes_written = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001539 struct inode *inode;
Steve French6148a742005-10-05 12:23:19 -07001540 struct cifsFileInfo *open_file;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001541
1542 if (!mapping || !mapping->host)
1543 return -EFAULT;
1544
1545 inode = page->mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001546
1547 offset += (loff_t)from;
1548 write_data = kmap(page);
1549 write_data += from;
1550
1551 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1552 kunmap(page);
1553 return -EIO;
1554 }
1555
1556 /* racing with truncate? */
1557 if (offset > mapping->host->i_size) {
1558 kunmap(page);
1559 return 0; /* don't care */
1560 }
1561
1562 /* check to make sure that we are not extending the file */
1563 if (mapping->host->i_size - offset < (loff_t)to)
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001564 to = (unsigned)(mapping->host->i_size - offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001565
Jeff Layton6508d902010-09-29 19:51:11 -04001566 open_file = find_writable_file(CIFS_I(mapping->host), false);
Steve French6148a742005-10-05 12:23:19 -07001567 if (open_file) {
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001568 bytes_written = cifs_write(open_file, open_file->pid,
1569 write_data, to - from, &offset);
Dave Kleikamp6ab409b2009-08-31 11:07:12 -04001570 cifsFileInfo_put(open_file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571 /* Does mm or vfs already set times? */
Steve French6148a742005-10-05 12:23:19 -07001572 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001573 if ((bytes_written > 0) && (offset))
Steve French6148a742005-10-05 12:23:19 -07001574 rc = 0;
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001575 else if (bytes_written < 0)
1576 rc = bytes_written;
Steve French6148a742005-10-05 12:23:19 -07001577 } else {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001578 cFYI(1, "No writeable filehandles for inode");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001579 rc = -EIO;
1580 }
1581
1582 kunmap(page);
1583 return rc;
1584}
1585
Linus Torvalds1da177e2005-04-16 15:20:36 -07001586static int cifs_writepages(struct address_space *mapping,
Steve French37c0eb42005-10-05 14:50:29 -07001587 struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001588{
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001589 struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
1590 bool done = false, scanned = false, range_whole = false;
1591 pgoff_t end, index;
1592 struct cifs_writedata *wdata;
Steve French37c0eb42005-10-05 14:50:29 -07001593 struct page *page;
Steve French37c0eb42005-10-05 14:50:29 -07001594 int rc = 0;
Steve French50c2f752007-07-13 00:33:32 +00001595
Steve French37c0eb42005-10-05 14:50:29 -07001596 /*
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001597 * If wsize is smaller than the page cache size, default to writing
Steve French37c0eb42005-10-05 14:50:29 -07001598 * one page at a time via cifs_writepage
1599 */
1600 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1601 return generic_writepages(mapping, wbc);
1602
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001603 if (wbc->range_cyclic) {
Steve French37c0eb42005-10-05 14:50:29 -07001604 index = mapping->writeback_index; /* Start from prev offset */
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001605 end = -1;
1606 } else {
1607 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1608 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1609 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001610 range_whole = true;
1611 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07001612 }
1613retry:
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001614 while (!done && index <= end) {
1615 unsigned int i, nr_pages, found_pages;
1616 pgoff_t next = 0, tofind;
1617 struct page **pages;
Steve French37c0eb42005-10-05 14:50:29 -07001618
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001619 tofind = min((cifs_sb->wsize / PAGE_CACHE_SIZE) - 1,
1620 end - index) + 1;
Steve French37c0eb42005-10-05 14:50:29 -07001621
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001622 wdata = cifs_writedata_alloc((unsigned int)tofind);
1623 if (!wdata) {
1624 rc = -ENOMEM;
1625 break;
1626 }
1627
1628 /*
1629 * find_get_pages_tag seems to return a max of 256 on each
1630 * iteration, so we must call it several times in order to
1631 * fill the array or the wsize is effectively limited to
1632 * 256 * PAGE_CACHE_SIZE.
1633 */
1634 found_pages = 0;
1635 pages = wdata->pages;
1636 do {
1637 nr_pages = find_get_pages_tag(mapping, &index,
1638 PAGECACHE_TAG_DIRTY,
1639 tofind, pages);
1640 found_pages += nr_pages;
1641 tofind -= nr_pages;
1642 pages += nr_pages;
1643 } while (nr_pages && tofind && index <= end);
1644
1645 if (found_pages == 0) {
1646 kref_put(&wdata->refcount, cifs_writedata_release);
1647 break;
1648 }
1649
1650 nr_pages = 0;
1651 for (i = 0; i < found_pages; i++) {
1652 page = wdata->pages[i];
Steve French37c0eb42005-10-05 14:50:29 -07001653 /*
1654 * At this point we hold neither mapping->tree_lock nor
1655 * lock on the page itself: the page may be truncated or
1656 * invalidated (changing page->mapping to NULL), or even
1657 * swizzled back from swapper_space to tmpfs file
1658 * mapping
1659 */
1660
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001661 if (nr_pages == 0)
Steve French37c0eb42005-10-05 14:50:29 -07001662 lock_page(page);
Nick Piggin529ae9a2008-08-02 12:01:03 +02001663 else if (!trylock_page(page))
Steve French37c0eb42005-10-05 14:50:29 -07001664 break;
1665
1666 if (unlikely(page->mapping != mapping)) {
1667 unlock_page(page);
1668 break;
1669 }
1670
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001671 if (!wbc->range_cyclic && page->index > end) {
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001672 done = true;
Steve French37c0eb42005-10-05 14:50:29 -07001673 unlock_page(page);
1674 break;
1675 }
1676
1677 if (next && (page->index != next)) {
1678 /* Not next consecutive page */
1679 unlock_page(page);
1680 break;
1681 }
1682
1683 if (wbc->sync_mode != WB_SYNC_NONE)
1684 wait_on_page_writeback(page);
1685
1686 if (PageWriteback(page) ||
Linus Torvaldscb876f42006-12-23 16:19:07 -08001687 !clear_page_dirty_for_io(page)) {
Steve French37c0eb42005-10-05 14:50:29 -07001688 unlock_page(page);
1689 break;
1690 }
Steve French84d2f072005-10-12 15:32:05 -07001691
Linus Torvaldscb876f42006-12-23 16:19:07 -08001692 /*
1693 * This actually clears the dirty bit in the radix tree.
1694 * See cifs_writepage() for more commentary.
1695 */
1696 set_page_writeback(page);
1697
Steve French84d2f072005-10-12 15:32:05 -07001698 if (page_offset(page) >= mapping->host->i_size) {
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001699 done = true;
Steve French84d2f072005-10-12 15:32:05 -07001700 unlock_page(page);
Linus Torvaldscb876f42006-12-23 16:19:07 -08001701 end_page_writeback(page);
Steve French84d2f072005-10-12 15:32:05 -07001702 break;
1703 }
1704
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001705 wdata->pages[i] = page;
Steve French37c0eb42005-10-05 14:50:29 -07001706 next = page->index + 1;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001707 ++nr_pages;
Steve French37c0eb42005-10-05 14:50:29 -07001708 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001709
1710 /* reset index to refind any pages skipped */
1711 if (nr_pages == 0)
1712 index = wdata->pages[0]->index + 1;
1713
1714 /* put any pages we aren't going to use */
1715 for (i = nr_pages; i < found_pages; i++) {
1716 page_cache_release(wdata->pages[i]);
1717 wdata->pages[i] = NULL;
1718 }
1719
1720 /* nothing to write? */
1721 if (nr_pages == 0) {
1722 kref_put(&wdata->refcount, cifs_writedata_release);
1723 continue;
1724 }
1725
1726 wdata->sync_mode = wbc->sync_mode;
1727 wdata->nr_pages = nr_pages;
1728 wdata->offset = page_offset(wdata->pages[0]);
1729
1730 do {
1731 if (wdata->cfile != NULL)
1732 cifsFileInfo_put(wdata->cfile);
1733 wdata->cfile = find_writable_file(CIFS_I(mapping->host),
1734 false);
1735 if (!wdata->cfile) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001736 cERROR(1, "No writable handles for inode");
Steve French23e7dd72005-10-20 13:44:56 -07001737 rc = -EBADF;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001738 break;
Steve French37c0eb42005-10-05 14:50:29 -07001739 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001740 rc = cifs_async_writev(wdata);
1741 } while (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN);
Jeff Laytonf3983c22010-09-22 16:17:40 -07001742
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001743 for (i = 0; i < nr_pages; ++i)
1744 unlock_page(wdata->pages[i]);
Jeff Layton941b8532011-01-11 07:24:01 -05001745
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001746 /* send failure -- clean up the mess */
1747 if (rc != 0) {
1748 for (i = 0; i < nr_pages; ++i) {
Jeff Layton941b8532011-01-11 07:24:01 -05001749 if (rc == -EAGAIN)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001750 redirty_page_for_writepage(wbc,
1751 wdata->pages[i]);
1752 else
1753 SetPageError(wdata->pages[i]);
1754 end_page_writeback(wdata->pages[i]);
1755 page_cache_release(wdata->pages[i]);
Steve French37c0eb42005-10-05 14:50:29 -07001756 }
Jeff Layton941b8532011-01-11 07:24:01 -05001757 if (rc != -EAGAIN)
1758 mapping_set_error(mapping, rc);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001759 }
1760 kref_put(&wdata->refcount, cifs_writedata_release);
Jeff Layton941b8532011-01-11 07:24:01 -05001761
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001762 wbc->nr_to_write -= nr_pages;
1763 if (wbc->nr_to_write <= 0)
1764 done = true;
Dave Kleikampb066a482008-11-18 03:49:05 +00001765
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001766 index = next;
Steve French37c0eb42005-10-05 14:50:29 -07001767 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001768
Steve French37c0eb42005-10-05 14:50:29 -07001769 if (!scanned && !done) {
1770 /*
1771 * We hit the last page and there is more work to be done: wrap
1772 * back to the start of the file
1773 */
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001774 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07001775 index = 0;
1776 goto retry;
1777 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001778
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001779 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
Steve French37c0eb42005-10-05 14:50:29 -07001780 mapping->writeback_index = index;
1781
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782 return rc;
1783}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001784
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04001785static int
1786cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001787{
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04001788 int rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001789 int xid;
1790
1791 xid = GetXid();
1792/* BB add check for wbc flags */
1793 page_cache_get(page);
Steve Frenchad7a2922008-02-07 23:25:02 +00001794 if (!PageUptodate(page))
Joe Perchesb6b38f72010-04-21 03:50:45 +00001795 cFYI(1, "ppw - page not up to date");
Linus Torvaldscb876f42006-12-23 16:19:07 -08001796
1797 /*
1798 * Set the "writeback" flag, and clear "dirty" in the radix tree.
1799 *
1800 * A writepage() implementation always needs to do either this,
1801 * or re-dirty the page with "redirty_page_for_writepage()" in
1802 * the case of a failure.
1803 *
1804 * Just unlocking the page will cause the radix tree tag-bits
1805 * to fail to update with the state of the page correctly.
1806 */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001807 set_page_writeback(page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04001808retry_write:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001809 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04001810 if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
1811 goto retry_write;
1812 else if (rc == -EAGAIN)
1813 redirty_page_for_writepage(wbc, page);
1814 else if (rc != 0)
1815 SetPageError(page);
1816 else
1817 SetPageUptodate(page);
Linus Torvaldscb876f42006-12-23 16:19:07 -08001818 end_page_writeback(page);
1819 page_cache_release(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001820 FreeXid(xid);
1821 return rc;
1822}
1823
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04001824static int cifs_writepage(struct page *page, struct writeback_control *wbc)
1825{
1826 int rc = cifs_writepage_locked(page, wbc);
1827 unlock_page(page);
1828 return rc;
1829}
1830
Nick Piggind9414772008-09-24 11:32:59 -04001831static int cifs_write_end(struct file *file, struct address_space *mapping,
1832 loff_t pos, unsigned len, unsigned copied,
1833 struct page *page, void *fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001834{
Nick Piggind9414772008-09-24 11:32:59 -04001835 int rc;
1836 struct inode *inode = mapping->host;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00001837 struct cifsFileInfo *cfile = file->private_data;
1838 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1839 __u32 pid;
1840
1841 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
1842 pid = cfile->pid;
1843 else
1844 pid = current->tgid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001845
Joe Perchesb6b38f72010-04-21 03:50:45 +00001846 cFYI(1, "write_end for page %p from pos %lld with %d bytes",
1847 page, pos, copied);
Steve Frenchad7a2922008-02-07 23:25:02 +00001848
Jeff Laytona98ee8c2008-11-26 19:32:33 +00001849 if (PageChecked(page)) {
1850 if (copied == len)
1851 SetPageUptodate(page);
1852 ClearPageChecked(page);
1853 } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
Nick Piggind9414772008-09-24 11:32:59 -04001854 SetPageUptodate(page);
1855
Linus Torvalds1da177e2005-04-16 15:20:36 -07001856 if (!PageUptodate(page)) {
Nick Piggind9414772008-09-24 11:32:59 -04001857 char *page_data;
1858 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
1859 int xid;
1860
1861 xid = GetXid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001862 /* this is probably better than directly calling
1863 partialpage_write since in this function the file handle is
1864 known which we might as well leverage */
1865 /* BB check if anything else missing out of ppw
1866 such as updating last write time */
1867 page_data = kmap(page);
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00001868 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
Nick Piggind9414772008-09-24 11:32:59 -04001869 /* if (rc < 0) should we set writebehind rc? */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001870 kunmap(page);
Nick Piggind9414772008-09-24 11:32:59 -04001871
1872 FreeXid(xid);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001873 } else {
Nick Piggind9414772008-09-24 11:32:59 -04001874 rc = copied;
1875 pos += copied;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001876 set_page_dirty(page);
1877 }
1878
Nick Piggind9414772008-09-24 11:32:59 -04001879 if (rc > 0) {
1880 spin_lock(&inode->i_lock);
1881 if (pos > inode->i_size)
1882 i_size_write(inode, pos);
1883 spin_unlock(&inode->i_lock);
1884 }
1885
1886 unlock_page(page);
1887 page_cache_release(page);
1888
Linus Torvalds1da177e2005-04-16 15:20:36 -07001889 return rc;
1890}
1891
Josef Bacik02c24a82011-07-16 20:44:56 -04001892int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
1893 int datasync)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001894{
1895 int xid;
1896 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00001897 struct cifs_tcon *tcon;
Joe Perchesc21dfb62010-07-12 13:50:14 -07001898 struct cifsFileInfo *smbfile = file->private_data;
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08001899 struct inode *inode = file->f_path.dentry->d_inode;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03001900 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001901
Josef Bacik02c24a82011-07-16 20:44:56 -04001902 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
1903 if (rc)
1904 return rc;
1905 mutex_lock(&inode->i_mutex);
1906
Linus Torvalds1da177e2005-04-16 15:20:36 -07001907 xid = GetXid();
1908
Joe Perchesb6b38f72010-04-21 03:50:45 +00001909 cFYI(1, "Sync file - name: %s datasync: 0x%x",
Christoph Hellwig7ea80852010-05-26 17:53:25 +02001910 file->f_path.dentry->d_name.name, datasync);
Steve French50c2f752007-07-13 00:33:32 +00001911
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04001912 if (!CIFS_I(inode)->clientCanCacheRead) {
1913 rc = cifs_invalidate_mapping(inode);
1914 if (rc) {
1915 cFYI(1, "rc: %d during invalidate phase", rc);
1916 rc = 0; /* don't care about it in fsync */
1917 }
1918 }
Jeff Laytoneb4b7562010-10-22 14:52:29 -04001919
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03001920 tcon = tlink_tcon(smbfile->tlink);
1921 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
1922 rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
1923
1924 FreeXid(xid);
Josef Bacik02c24a82011-07-16 20:44:56 -04001925 mutex_unlock(&inode->i_mutex);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03001926 return rc;
1927}
1928
Josef Bacik02c24a82011-07-16 20:44:56 -04001929int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03001930{
1931 int xid;
1932 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00001933 struct cifs_tcon *tcon;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03001934 struct cifsFileInfo *smbfile = file->private_data;
1935 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Josef Bacik02c24a82011-07-16 20:44:56 -04001936 struct inode *inode = file->f_mapping->host;
1937
1938 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
1939 if (rc)
1940 return rc;
1941 mutex_lock(&inode->i_mutex);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03001942
1943 xid = GetXid();
1944
1945 cFYI(1, "Sync file - name: %s datasync: 0x%x",
1946 file->f_path.dentry->d_name.name, datasync);
1947
1948 tcon = tlink_tcon(smbfile->tlink);
1949 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
1950 rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
Steve Frenchb298f222009-02-21 21:17:43 +00001951
Linus Torvalds1da177e2005-04-16 15:20:36 -07001952 FreeXid(xid);
Josef Bacik02c24a82011-07-16 20:44:56 -04001953 mutex_unlock(&inode->i_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001954 return rc;
1955}
1956
Linus Torvalds1da177e2005-04-16 15:20:36 -07001957/*
1958 * As file closes, flush all cached write data for this inode checking
1959 * for write behind errors.
1960 */
Miklos Szeredi75e1fcc2006-06-23 02:05:12 -07001961int cifs_flush(struct file *file, fl_owner_t id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001962{
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001963 struct inode *inode = file->f_path.dentry->d_inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001964 int rc = 0;
1965
Jeff Laytoneb4b7562010-10-22 14:52:29 -04001966 if (file->f_mode & FMODE_WRITE)
Jeff Laytond3f13222010-10-15 15:34:07 -04001967 rc = filemap_write_and_wait(inode->i_mapping);
Steve French50c2f752007-07-13 00:33:32 +00001968
Joe Perchesb6b38f72010-04-21 03:50:45 +00001969 cFYI(1, "Flush inode %p file %p rc %d", inode, file, rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001970
1971 return rc;
1972}
1973
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05001974static int
1975cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
1976{
1977 int rc = 0;
1978 unsigned long i;
1979
1980 for (i = 0; i < num_pages; i++) {
1981 pages[i] = alloc_page(__GFP_HIGHMEM);
1982 if (!pages[i]) {
1983 /*
1984 * save number of pages we have already allocated and
1985 * return with ENOMEM error
1986 */
1987 num_pages = i;
1988 rc = -ENOMEM;
1989 goto error;
1990 }
1991 }
1992
1993 return rc;
1994
1995error:
1996 for (i = 0; i < num_pages; i++)
1997 put_page(pages[i]);
1998 return rc;
1999}
2000
2001static inline
2002size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2003{
2004 size_t num_pages;
2005 size_t clen;
2006
2007 clen = min_t(const size_t, len, wsize);
2008 num_pages = clen / PAGE_CACHE_SIZE;
2009 if (clen % PAGE_CACHE_SIZE)
2010 num_pages++;
2011
2012 if (cur_len)
2013 *cur_len = clen;
2014
2015 return num_pages;
2016}
2017
2018static ssize_t
2019cifs_iovec_write(struct file *file, const struct iovec *iov,
2020 unsigned long nr_segs, loff_t *poffset)
2021{
Pavel Shilovsky76429c12011-01-31 16:03:08 +03002022 unsigned int written;
2023 unsigned long num_pages, npages, i;
2024 size_t copied, len, cur_len;
2025 ssize_t total_written = 0;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002026 struct kvec *to_send;
2027 struct page **pages;
2028 struct iov_iter it;
2029 struct inode *inode;
2030 struct cifsFileInfo *open_file;
Steve French96daf2b2011-05-27 04:34:02 +00002031 struct cifs_tcon *pTcon;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002032 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04002033 struct cifs_io_parms io_parms;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002034 int xid, rc;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002035 __u32 pid;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002036
2037 len = iov_length(iov, nr_segs);
2038 if (!len)
2039 return 0;
2040
2041 rc = generic_write_checks(file, poffset, &len, 0);
2042 if (rc)
2043 return rc;
2044
2045 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
2046 num_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
2047
2048 pages = kmalloc(sizeof(struct pages *)*num_pages, GFP_KERNEL);
2049 if (!pages)
2050 return -ENOMEM;
2051
2052 to_send = kmalloc(sizeof(struct kvec)*(num_pages + 1), GFP_KERNEL);
2053 if (!to_send) {
2054 kfree(pages);
2055 return -ENOMEM;
2056 }
2057
2058 rc = cifs_write_allocate_pages(pages, num_pages);
2059 if (rc) {
2060 kfree(pages);
2061 kfree(to_send);
2062 return rc;
2063 }
2064
2065 xid = GetXid();
2066 open_file = file->private_data;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002067
2068 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2069 pid = open_file->pid;
2070 else
2071 pid = current->tgid;
2072
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002073 pTcon = tlink_tcon(open_file->tlink);
2074 inode = file->f_path.dentry->d_inode;
2075
2076 iov_iter_init(&it, iov, nr_segs, len, 0);
2077 npages = num_pages;
2078
2079 do {
2080 size_t save_len = cur_len;
2081 for (i = 0; i < npages; i++) {
2082 copied = min_t(const size_t, cur_len, PAGE_CACHE_SIZE);
2083 copied = iov_iter_copy_from_user(pages[i], &it, 0,
2084 copied);
2085 cur_len -= copied;
2086 iov_iter_advance(&it, copied);
2087 to_send[i+1].iov_base = kmap(pages[i]);
2088 to_send[i+1].iov_len = copied;
2089 }
2090
2091 cur_len = save_len - cur_len;
2092
2093 do {
2094 if (open_file->invalidHandle) {
2095 rc = cifs_reopen_file(open_file, false);
2096 if (rc != 0)
2097 break;
2098 }
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04002099 io_parms.netfid = open_file->netfid;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002100 io_parms.pid = pid;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04002101 io_parms.tcon = pTcon;
2102 io_parms.offset = *poffset;
2103 io_parms.length = cur_len;
2104 rc = CIFSSMBWrite2(xid, &io_parms, &written, to_send,
2105 npages, 0);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002106 } while (rc == -EAGAIN);
2107
2108 for (i = 0; i < npages; i++)
2109 kunmap(pages[i]);
2110
2111 if (written) {
2112 len -= written;
2113 total_written += written;
2114 cifs_update_eof(CIFS_I(inode), *poffset, written);
2115 *poffset += written;
2116 } else if (rc < 0) {
2117 if (!total_written)
2118 total_written = rc;
2119 break;
2120 }
2121
2122 /* get length and number of kvecs of the next write */
2123 npages = get_numpages(cifs_sb->wsize, len, &cur_len);
2124 } while (len > 0);
2125
2126 if (total_written > 0) {
2127 spin_lock(&inode->i_lock);
2128 if (*poffset > inode->i_size)
2129 i_size_write(inode, *poffset);
2130 spin_unlock(&inode->i_lock);
2131 }
2132
2133 cifs_stats_bytes_written(pTcon, total_written);
2134 mark_inode_dirty_sync(inode);
2135
2136 for (i = 0; i < num_pages; i++)
2137 put_page(pages[i]);
2138 kfree(to_send);
2139 kfree(pages);
2140 FreeXid(xid);
2141 return total_written;
2142}
2143
Pavel Shilovsky0b81c1c2011-03-10 10:11:05 +03002144ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002145 unsigned long nr_segs, loff_t pos)
2146{
2147 ssize_t written;
2148 struct inode *inode;
2149
2150 inode = iocb->ki_filp->f_path.dentry->d_inode;
2151
2152 /*
2153 * BB - optimize the way when signing is disabled. We can drop this
2154 * extra memory-to-memory copying and use iovec buffers for constructing
2155 * write request.
2156 */
2157
2158 written = cifs_iovec_write(iocb->ki_filp, iov, nr_segs, &pos);
2159 if (written > 0) {
2160 CIFS_I(inode)->invalid_mapping = true;
2161 iocb->ki_pos = pos;
2162 }
2163
2164 return written;
2165}
2166
2167ssize_t cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
2168 unsigned long nr_segs, loff_t pos)
2169{
2170 struct inode *inode;
2171
2172 inode = iocb->ki_filp->f_path.dentry->d_inode;
2173
2174 if (CIFS_I(inode)->clientCanCacheAll)
2175 return generic_file_aio_write(iocb, iov, nr_segs, pos);
2176
2177 /*
2178 * In strict cache mode we need to write the data to the server exactly
2179 * from the pos to pos+len-1 rather than flush all affected pages
2180 * because it may cause a error with mandatory locks on these pages but
2181 * not on the region from pos to ppos+len-1.
2182 */
2183
2184 return cifs_user_writev(iocb, iov, nr_segs, pos);
2185}
2186
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002187static ssize_t
2188cifs_iovec_read(struct file *file, const struct iovec *iov,
2189 unsigned long nr_segs, loff_t *poffset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002190{
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002191 int rc;
2192 int xid;
Pavel Shilovsky76429c12011-01-31 16:03:08 +03002193 ssize_t total_read;
2194 unsigned int bytes_read = 0;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002195 size_t len, cur_len;
2196 int iov_offset = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002197 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +00002198 struct cifs_tcon *pTcon;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002199 struct cifsFileInfo *open_file;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002200 struct smb_com_read_rsp *pSMBr;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002201 struct cifs_io_parms io_parms;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002202 char *read_data;
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002203 unsigned int rsize;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002204 __u32 pid;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002205
2206 if (!nr_segs)
2207 return 0;
2208
2209 len = iov_length(iov, nr_segs);
2210 if (!len)
2211 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002212
2213 xid = GetXid();
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002214 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002215
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002216 /* FIXME: set up handlers for larger reads and/or convert to async */
2217 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
2218
Joe Perchesc21dfb62010-07-12 13:50:14 -07002219 open_file = file->private_data;
Jeff Layton13cfb732010-09-29 19:51:11 -04002220 pTcon = tlink_tcon(open_file->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002221
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002222 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2223 pid = open_file->pid;
2224 else
2225 pid = current->tgid;
2226
Steve Frenchad7a2922008-02-07 23:25:02 +00002227 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesb6b38f72010-04-21 03:50:45 +00002228 cFYI(1, "attempting read on write only file instance");
Steve Frenchad7a2922008-02-07 23:25:02 +00002229
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002230 for (total_read = 0; total_read < len; total_read += bytes_read) {
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002231 cur_len = min_t(const size_t, len - total_read, rsize);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002232 rc = -EAGAIN;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002233 read_data = NULL;
2234
Linus Torvalds1da177e2005-04-16 15:20:36 -07002235 while (rc == -EAGAIN) {
Steve Frenchec637e32005-12-12 20:53:18 -08002236 int buf_type = CIFS_NO_BUFFER;
Steve Frenchcdff08e2010-10-21 22:46:14 +00002237 if (open_file->invalidHandle) {
Jeff Layton15886172010-10-15 15:33:59 -04002238 rc = cifs_reopen_file(open_file, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002239 if (rc != 0)
2240 break;
2241 }
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002242 io_parms.netfid = open_file->netfid;
2243 io_parms.pid = pid;
2244 io_parms.tcon = pTcon;
2245 io_parms.offset = *poffset;
Pavel Shilovsky2cebaa52011-07-20 18:24:09 +04002246 io_parms.length = cur_len;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002247 rc = CIFSSMBRead(xid, &io_parms, &bytes_read,
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002248 &read_data, &buf_type);
2249 pSMBr = (struct smb_com_read_rsp *)read_data;
2250 if (read_data) {
2251 char *data_offset = read_data + 4 +
2252 le16_to_cpu(pSMBr->DataOffset);
2253 if (memcpy_toiovecend(iov, data_offset,
2254 iov_offset, bytes_read))
Steve French93544cc2006-02-14 22:30:52 -06002255 rc = -EFAULT;
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002256 if (buf_type == CIFS_SMALL_BUFFER)
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002257 cifs_small_buf_release(read_data);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002258 else if (buf_type == CIFS_LARGE_BUFFER)
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002259 cifs_buf_release(read_data);
2260 read_data = NULL;
2261 iov_offset += bytes_read;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002262 }
2263 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002264
Linus Torvalds1da177e2005-04-16 15:20:36 -07002265 if (rc || (bytes_read == 0)) {
2266 if (total_read) {
2267 break;
2268 } else {
2269 FreeXid(xid);
2270 return rc;
2271 }
2272 } else {
Steve Frencha4544342005-08-24 13:59:35 -07002273 cifs_stats_bytes_read(pTcon, bytes_read);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002274 *poffset += bytes_read;
2275 }
2276 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002277
Linus Torvalds1da177e2005-04-16 15:20:36 -07002278 FreeXid(xid);
2279 return total_read;
2280}
2281
Pavel Shilovsky0b81c1c2011-03-10 10:11:05 +03002282ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov,
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002283 unsigned long nr_segs, loff_t pos)
2284{
2285 ssize_t read;
2286
2287 read = cifs_iovec_read(iocb->ki_filp, iov, nr_segs, &pos);
2288 if (read > 0)
2289 iocb->ki_pos = pos;
2290
2291 return read;
2292}
2293
2294ssize_t cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov,
2295 unsigned long nr_segs, loff_t pos)
2296{
2297 struct inode *inode;
2298
2299 inode = iocb->ki_filp->f_path.dentry->d_inode;
2300
2301 if (CIFS_I(inode)->clientCanCacheRead)
2302 return generic_file_aio_read(iocb, iov, nr_segs, pos);
2303
2304 /*
2305 * In strict cache mode we need to read from the server all the time
2306 * if we don't have level II oplock because the server can delay mtime
2307 * change - so we can't make a decision about inode invalidating.
2308 * And we can also fail with pagereading if there are mandatory locks
2309 * on pages affected by this read but not on the region from pos to
2310 * pos+len-1.
2311 */
2312
2313 return cifs_user_readv(iocb, iov, nr_segs, pos);
2314}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002315
2316static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002317 loff_t *poffset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002318{
2319 int rc = -EACCES;
2320 unsigned int bytes_read = 0;
2321 unsigned int total_read;
2322 unsigned int current_read_size;
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002323 unsigned int rsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002324 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +00002325 struct cifs_tcon *pTcon;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002326 int xid;
2327 char *current_offset;
2328 struct cifsFileInfo *open_file;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002329 struct cifs_io_parms io_parms;
Steve Frenchec637e32005-12-12 20:53:18 -08002330 int buf_type = CIFS_NO_BUFFER;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002331 __u32 pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002332
2333 xid = GetXid();
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002334 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002335
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002336 /* FIXME: set up handlers for larger reads and/or convert to async */
2337 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
2338
Linus Torvalds1da177e2005-04-16 15:20:36 -07002339 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05302340 rc = -EBADF;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002341 FreeXid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05302342 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002343 }
Joe Perchesc21dfb62010-07-12 13:50:14 -07002344 open_file = file->private_data;
Jeff Layton13cfb732010-09-29 19:51:11 -04002345 pTcon = tlink_tcon(open_file->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002346
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002347 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2348 pid = open_file->pid;
2349 else
2350 pid = current->tgid;
2351
Linus Torvalds1da177e2005-04-16 15:20:36 -07002352 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesb6b38f72010-04-21 03:50:45 +00002353 cFYI(1, "attempting read on write only file instance");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002354
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002355 for (total_read = 0, current_offset = read_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002356 read_size > total_read;
2357 total_read += bytes_read, current_offset += bytes_read) {
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002358 current_read_size = min_t(uint, read_size - total_read, rsize);
2359
Steve Frenchf9f5c8172005-09-15 23:06:38 -07002360 /* For windows me and 9x we do not want to request more
2361 than it negotiated since it will refuse the read then */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002362 if ((pTcon->ses) &&
Steve Frenchf9f5c8172005-09-15 23:06:38 -07002363 !(pTcon->ses->capabilities & CAP_LARGE_FILES)) {
Dan Carpenter7748dd62011-10-18 12:41:35 +03002364 current_read_size = min_t(uint, current_read_size,
Jeff Laytonc974bef2011-10-11 06:41:32 -04002365 CIFSMaxBufSize);
Steve Frenchf9f5c8172005-09-15 23:06:38 -07002366 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002367 rc = -EAGAIN;
2368 while (rc == -EAGAIN) {
Steve Frenchcdff08e2010-10-21 22:46:14 +00002369 if (open_file->invalidHandle) {
Jeff Layton15886172010-10-15 15:33:59 -04002370 rc = cifs_reopen_file(open_file, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002371 if (rc != 0)
2372 break;
2373 }
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002374 io_parms.netfid = open_file->netfid;
2375 io_parms.pid = pid;
2376 io_parms.tcon = pTcon;
2377 io_parms.offset = *poffset;
2378 io_parms.length = current_read_size;
2379 rc = CIFSSMBRead(xid, &io_parms, &bytes_read,
2380 &current_offset, &buf_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002381 }
2382 if (rc || (bytes_read == 0)) {
2383 if (total_read) {
2384 break;
2385 } else {
2386 FreeXid(xid);
2387 return rc;
2388 }
2389 } else {
Steve Frencha4544342005-08-24 13:59:35 -07002390 cifs_stats_bytes_read(pTcon, total_read);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002391 *poffset += bytes_read;
2392 }
2393 }
2394 FreeXid(xid);
2395 return total_read;
2396}
2397
Jeff Laytonca83ce32011-04-12 09:13:44 -04002398/*
2399 * If the page is mmap'ed into a process' page tables, then we need to make
2400 * sure that it doesn't change while being written back.
2401 */
2402static int
2403cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
2404{
2405 struct page *page = vmf->page;
2406
2407 lock_page(page);
2408 return VM_FAULT_LOCKED;
2409}
2410
2411static struct vm_operations_struct cifs_file_vm_ops = {
2412 .fault = filemap_fault,
2413 .page_mkwrite = cifs_page_mkwrite,
2414};
2415
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03002416int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
2417{
2418 int rc, xid;
2419 struct inode *inode = file->f_path.dentry->d_inode;
2420
2421 xid = GetXid();
2422
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002423 if (!CIFS_I(inode)->clientCanCacheRead) {
2424 rc = cifs_invalidate_mapping(inode);
2425 if (rc)
2426 return rc;
2427 }
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03002428
2429 rc = generic_file_mmap(file, vma);
Jeff Laytonca83ce32011-04-12 09:13:44 -04002430 if (rc == 0)
2431 vma->vm_ops = &cifs_file_vm_ops;
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03002432 FreeXid(xid);
2433 return rc;
2434}
2435
Linus Torvalds1da177e2005-04-16 15:20:36 -07002436int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
2437{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002438 int rc, xid;
2439
2440 xid = GetXid();
Jeff Laytonabab0952010-02-12 07:44:18 -05002441 rc = cifs_revalidate_file(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002442 if (rc) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00002443 cFYI(1, "Validation prior to mmap failed, error=%d", rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002444 FreeXid(xid);
2445 return rc;
2446 }
2447 rc = generic_file_mmap(file, vma);
Jeff Laytonca83ce32011-04-12 09:13:44 -04002448 if (rc == 0)
2449 vma->vm_ops = &cifs_file_vm_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002450 FreeXid(xid);
2451 return rc;
2452}
2453
Linus Torvalds1da177e2005-04-16 15:20:36 -07002454static int cifs_readpages(struct file *file, struct address_space *mapping,
2455 struct list_head *page_list, unsigned num_pages)
2456{
Jeff Layton690c5e32011-10-19 15:30:16 -04002457 int rc;
2458 struct list_head tmplist;
2459 struct cifsFileInfo *open_file = file->private_data;
2460 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
2461 unsigned int rsize = cifs_sb->rsize;
2462 pid_t pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002463
Jeff Layton690c5e32011-10-19 15:30:16 -04002464 /*
2465 * Give up immediately if rsize is too small to read an entire page.
2466 * The VFS will fall back to readpage. We should never reach this
2467 * point however since we set ra_pages to 0 when the rsize is smaller
2468 * than a cache page.
2469 */
2470 if (unlikely(rsize < PAGE_CACHE_SIZE))
2471 return 0;
Steve Frenchbfa0d752005-08-31 21:50:37 -07002472
Suresh Jayaraman56698232010-07-05 18:13:25 +05302473 /*
2474 * Reads as many pages as possible from fscache. Returns -ENOBUFS
2475 * immediately if the cookie is negative
2476 */
2477 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
2478 &num_pages);
2479 if (rc == 0)
Jeff Layton690c5e32011-10-19 15:30:16 -04002480 return rc;
Suresh Jayaraman56698232010-07-05 18:13:25 +05302481
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002482 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2483 pid = open_file->pid;
2484 else
2485 pid = current->tgid;
2486
Jeff Layton690c5e32011-10-19 15:30:16 -04002487 rc = 0;
2488 INIT_LIST_HEAD(&tmplist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002489
Jeff Layton690c5e32011-10-19 15:30:16 -04002490 cFYI(1, "%s: file=%p mapping=%p num_pages=%u", __func__, file,
2491 mapping, num_pages);
2492
2493 /*
2494 * Start with the page at end of list and move it to private
2495 * list. Do the same with any following pages until we hit
2496 * the rsize limit, hit an index discontinuity, or run out of
2497 * pages. Issue the async read and then start the loop again
2498 * until the list is empty.
2499 *
2500 * Note that list order is important. The page_list is in
2501 * the order of declining indexes. When we put the pages in
2502 * the rdata->pages, then we want them in increasing order.
2503 */
2504 while (!list_empty(page_list)) {
2505 unsigned int bytes = PAGE_CACHE_SIZE;
2506 unsigned int expected_index;
2507 unsigned int nr_pages = 1;
2508 loff_t offset;
2509 struct page *page, *tpage;
2510 struct cifs_readdata *rdata;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002511
2512 page = list_entry(page_list->prev, struct page, lru);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002513
Jeff Layton690c5e32011-10-19 15:30:16 -04002514 /*
2515 * Lock the page and put it in the cache. Since no one else
2516 * should have access to this page, we're safe to simply set
2517 * PG_locked without checking it first.
2518 */
2519 __set_page_locked(page);
2520 rc = add_to_page_cache_locked(page, mapping,
2521 page->index, GFP_KERNEL);
2522
2523 /* give up if we can't stick it in the cache */
2524 if (rc) {
2525 __clear_page_locked(page);
2526 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002527 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002528
Jeff Layton690c5e32011-10-19 15:30:16 -04002529 /* move first page to the tmplist */
2530 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
2531 list_move_tail(&page->lru, &tmplist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002532
Jeff Layton690c5e32011-10-19 15:30:16 -04002533 /* now try and add more pages onto the request */
2534 expected_index = page->index + 1;
2535 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
2536 /* discontinuity ? */
2537 if (page->index != expected_index)
2538 break;
2539
2540 /* would this page push the read over the rsize? */
2541 if (bytes + PAGE_CACHE_SIZE > rsize)
2542 break;
2543
2544 __set_page_locked(page);
2545 if (add_to_page_cache_locked(page, mapping,
2546 page->index, GFP_KERNEL)) {
2547 __clear_page_locked(page);
2548 break;
2549 }
2550 list_move_tail(&page->lru, &tmplist);
2551 bytes += PAGE_CACHE_SIZE;
2552 expected_index++;
2553 nr_pages++;
2554 }
2555
2556 rdata = cifs_readdata_alloc(nr_pages);
2557 if (!rdata) {
2558 /* best to give up if we're out of mem */
2559 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
2560 list_del(&page->lru);
2561 lru_cache_add_file(page);
2562 unlock_page(page);
2563 page_cache_release(page);
2564 }
2565 rc = -ENOMEM;
2566 break;
2567 }
2568
2569 spin_lock(&cifs_file_list_lock);
2570 cifsFileInfo_get(open_file);
2571 spin_unlock(&cifs_file_list_lock);
2572 rdata->cfile = open_file;
2573 rdata->mapping = mapping;
2574 rdata->offset = offset;
2575 rdata->bytes = bytes;
2576 rdata->pid = pid;
2577 list_splice_init(&tmplist, &rdata->pages);
2578
2579 do {
Steve Frenchcdff08e2010-10-21 22:46:14 +00002580 if (open_file->invalidHandle) {
Jeff Layton15886172010-10-15 15:33:59 -04002581 rc = cifs_reopen_file(open_file, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002582 if (rc != 0)
Jeff Layton690c5e32011-10-19 15:30:16 -04002583 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002584 }
Jeff Layton690c5e32011-10-19 15:30:16 -04002585 rc = cifs_async_readv(rdata);
2586 } while (rc == -EAGAIN);
2587
2588 if (rc != 0) {
2589 list_for_each_entry_safe(page, tpage, &rdata->pages,
2590 lru) {
2591 list_del(&page->lru);
2592 lru_cache_add_file(page);
2593 unlock_page(page);
2594 page_cache_release(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002595 }
Jeff Layton690c5e32011-10-19 15:30:16 -04002596 cifs_readdata_free(rdata);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002597 break;
2598 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002599 }
2600
Linus Torvalds1da177e2005-04-16 15:20:36 -07002601 return rc;
2602}
2603
2604static int cifs_readpage_worker(struct file *file, struct page *page,
2605 loff_t *poffset)
2606{
2607 char *read_data;
2608 int rc;
2609
Suresh Jayaraman56698232010-07-05 18:13:25 +05302610 /* Is the page cached? */
2611 rc = cifs_readpage_from_fscache(file->f_path.dentry->d_inode, page);
2612 if (rc == 0)
2613 goto read_complete;
2614
Linus Torvalds1da177e2005-04-16 15:20:36 -07002615 page_cache_get(page);
2616 read_data = kmap(page);
2617 /* for reads over a certain size could initiate async read ahead */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002618
Linus Torvalds1da177e2005-04-16 15:20:36 -07002619 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002620
Linus Torvalds1da177e2005-04-16 15:20:36 -07002621 if (rc < 0)
2622 goto io_error;
2623 else
Joe Perchesb6b38f72010-04-21 03:50:45 +00002624 cFYI(1, "Bytes read %d", rc);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002625
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002626 file->f_path.dentry->d_inode->i_atime =
2627 current_fs_time(file->f_path.dentry->d_inode->i_sb);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002628
Linus Torvalds1da177e2005-04-16 15:20:36 -07002629 if (PAGE_CACHE_SIZE > rc)
2630 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
2631
2632 flush_dcache_page(page);
2633 SetPageUptodate(page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05302634
2635 /* send this page to the cache */
2636 cifs_readpage_to_fscache(file->f_path.dentry->d_inode, page);
2637
Linus Torvalds1da177e2005-04-16 15:20:36 -07002638 rc = 0;
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002639
Linus Torvalds1da177e2005-04-16 15:20:36 -07002640io_error:
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002641 kunmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002642 page_cache_release(page);
Suresh Jayaraman56698232010-07-05 18:13:25 +05302643
2644read_complete:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002645 return rc;
2646}
2647
2648static int cifs_readpage(struct file *file, struct page *page)
2649{
2650 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
2651 int rc = -EACCES;
2652 int xid;
2653
2654 xid = GetXid();
2655
2656 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05302657 rc = -EBADF;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002658 FreeXid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05302659 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002660 }
2661
Joe Perchesb6b38f72010-04-21 03:50:45 +00002662 cFYI(1, "readpage %p at offset %d 0x%x\n",
2663 page, (int)offset, (int)offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002664
2665 rc = cifs_readpage_worker(file, page, &offset);
2666
2667 unlock_page(page);
2668
2669 FreeXid(xid);
2670 return rc;
2671}
2672
Steve Frencha403a0a2007-07-26 15:54:16 +00002673static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
2674{
2675 struct cifsFileInfo *open_file;
2676
Jeff Layton44772882010-10-15 15:34:03 -04002677 spin_lock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00002678 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton2e396b82010-10-15 15:34:01 -04002679 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Jeff Layton44772882010-10-15 15:34:03 -04002680 spin_unlock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00002681 return 1;
2682 }
2683 }
Jeff Layton44772882010-10-15 15:34:03 -04002684 spin_unlock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00002685 return 0;
2686}
2687
Linus Torvalds1da177e2005-04-16 15:20:36 -07002688/* We do not want to update the file size from server for inodes
2689 open for write - to avoid races with writepage extending
2690 the file - in the future we could consider allowing
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002691 refreshing the inode only on increases in the file size
Linus Torvalds1da177e2005-04-16 15:20:36 -07002692 but this is tricky to do without racing with writebehind
2693 page caching in the current Linux kernel design */
Steve French4b18f2a2008-04-29 00:06:05 +00002694bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002695{
Steve Frencha403a0a2007-07-26 15:54:16 +00002696 if (!cifsInode)
Steve French4b18f2a2008-04-29 00:06:05 +00002697 return true;
Steve French23e7dd72005-10-20 13:44:56 -07002698
Steve Frencha403a0a2007-07-26 15:54:16 +00002699 if (is_inode_writable(cifsInode)) {
2700 /* This inode is open for write at least once */
Steve Frenchc32a0b62006-01-12 14:41:28 -08002701 struct cifs_sb_info *cifs_sb;
2702
Steve Frenchc32a0b62006-01-12 14:41:28 -08002703 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
Steve Frenchad7a2922008-02-07 23:25:02 +00002704 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002705 /* since no page cache to corrupt on directio
Steve Frenchc32a0b62006-01-12 14:41:28 -08002706 we can change size safely */
Steve French4b18f2a2008-04-29 00:06:05 +00002707 return true;
Steve Frenchc32a0b62006-01-12 14:41:28 -08002708 }
2709
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002710 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
Steve French4b18f2a2008-04-29 00:06:05 +00002711 return true;
Steve French7ba52632007-02-08 18:14:13 +00002712
Steve French4b18f2a2008-04-29 00:06:05 +00002713 return false;
Steve French23e7dd72005-10-20 13:44:56 -07002714 } else
Steve French4b18f2a2008-04-29 00:06:05 +00002715 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002716}
2717
Nick Piggind9414772008-09-24 11:32:59 -04002718static int cifs_write_begin(struct file *file, struct address_space *mapping,
2719 loff_t pos, unsigned len, unsigned flags,
2720 struct page **pagep, void **fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002721{
Nick Piggind9414772008-09-24 11:32:59 -04002722 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
2723 loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00002724 loff_t page_start = pos & PAGE_MASK;
2725 loff_t i_size;
2726 struct page *page;
2727 int rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002728
Joe Perchesb6b38f72010-04-21 03:50:45 +00002729 cFYI(1, "write_begin from %lld len %d", (long long)pos, len);
Nick Piggind9414772008-09-24 11:32:59 -04002730
Nick Piggin54566b22009-01-04 12:00:53 -08002731 page = grab_cache_page_write_begin(mapping, index, flags);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00002732 if (!page) {
2733 rc = -ENOMEM;
2734 goto out;
2735 }
Nick Piggind9414772008-09-24 11:32:59 -04002736
Jeff Laytona98ee8c2008-11-26 19:32:33 +00002737 if (PageUptodate(page))
2738 goto out;
Steve French8a236262007-03-06 00:31:00 +00002739
Jeff Laytona98ee8c2008-11-26 19:32:33 +00002740 /*
2741 * If we write a full page it will be up to date, no need to read from
2742 * the server. If the write is short, we'll end up doing a sync write
2743 * instead.
2744 */
2745 if (len == PAGE_CACHE_SIZE)
2746 goto out;
2747
2748 /*
2749 * optimize away the read when we have an oplock, and we're not
2750 * expecting to use any of the data we'd be reading in. That
2751 * is, when the page lies beyond the EOF, or straddles the EOF
2752 * and the write will cover all of the existing data.
2753 */
2754 if (CIFS_I(mapping->host)->clientCanCacheRead) {
2755 i_size = i_size_read(mapping->host);
2756 if (page_start >= i_size ||
2757 (offset == 0 && (pos + len) >= i_size)) {
2758 zero_user_segments(page, 0, offset,
2759 offset + len,
2760 PAGE_CACHE_SIZE);
2761 /*
2762 * PageChecked means that the parts of the page
2763 * to which we're not writing are considered up
2764 * to date. Once the data is copied to the
2765 * page, it can be set uptodate.
2766 */
2767 SetPageChecked(page);
2768 goto out;
2769 }
2770 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002771
Nick Piggind9414772008-09-24 11:32:59 -04002772 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
Jeff Laytona98ee8c2008-11-26 19:32:33 +00002773 /*
2774 * might as well read a page, it is fast enough. If we get
2775 * an error, we don't need to return it. cifs_write_end will
2776 * do a sync write instead since PG_uptodate isn't set.
2777 */
2778 cifs_readpage_worker(file, page, &page_start);
Steve French8a236262007-03-06 00:31:00 +00002779 } else {
2780 /* we could try using another file handle if there is one -
2781 but how would we lock it to prevent close of that handle
2782 racing with this read? In any case
Nick Piggind9414772008-09-24 11:32:59 -04002783 this will be written out by write_end so is fine */
Steve French8a236262007-03-06 00:31:00 +00002784 }
Jeff Laytona98ee8c2008-11-26 19:32:33 +00002785out:
2786 *pagep = page;
2787 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002788}
2789
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05302790static int cifs_release_page(struct page *page, gfp_t gfp)
2791{
2792 if (PagePrivate(page))
2793 return 0;
2794
2795 return cifs_fscache_release_page(page, gfp);
2796}
2797
2798static void cifs_invalidate_page(struct page *page, unsigned long offset)
2799{
2800 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
2801
2802 if (offset == 0)
2803 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
2804}
2805
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002806static int cifs_launder_page(struct page *page)
2807{
2808 int rc = 0;
2809 loff_t range_start = page_offset(page);
2810 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
2811 struct writeback_control wbc = {
2812 .sync_mode = WB_SYNC_ALL,
2813 .nr_to_write = 0,
2814 .range_start = range_start,
2815 .range_end = range_end,
2816 };
2817
2818 cFYI(1, "Launder page: %p", page);
2819
2820 if (clear_page_dirty_for_io(page))
2821 rc = cifs_writepage_locked(page, &wbc);
2822
2823 cifs_fscache_invalidate_page(page, page->mapping->host);
2824 return rc;
2825}
2826
Tejun Heo9b646972010-07-20 22:09:02 +02002827void cifs_oplock_break(struct work_struct *work)
Jeff Layton3bc303c2009-09-21 06:47:50 -04002828{
2829 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
2830 oplock_break);
Jeff Laytona5e18bc2010-10-11 15:07:18 -04002831 struct inode *inode = cfile->dentry->d_inode;
Jeff Layton3bc303c2009-09-21 06:47:50 -04002832 struct cifsInodeInfo *cinode = CIFS_I(inode);
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002833 int rc = 0;
Jeff Layton3bc303c2009-09-21 06:47:50 -04002834
2835 if (inode && S_ISREG(inode->i_mode)) {
Steve Frenchd54ff732010-04-27 04:38:15 +00002836 if (cinode->clientCanCacheRead)
Al Viro8737c932009-12-24 06:47:55 -05002837 break_lease(inode, O_RDONLY);
Steve Frenchd54ff732010-04-27 04:38:15 +00002838 else
Al Viro8737c932009-12-24 06:47:55 -05002839 break_lease(inode, O_WRONLY);
Jeff Layton3bc303c2009-09-21 06:47:50 -04002840 rc = filemap_fdatawrite(inode->i_mapping);
2841 if (cinode->clientCanCacheRead == 0) {
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002842 rc = filemap_fdatawait(inode->i_mapping);
2843 mapping_set_error(inode->i_mapping, rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04002844 invalidate_remote_inode(inode);
2845 }
Joe Perchesb6b38f72010-04-21 03:50:45 +00002846 cFYI(1, "Oplock flush inode %p rc %d", inode, rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04002847 }
2848
Pavel Shilovsky85160e02011-10-22 15:33:29 +04002849 rc = cifs_push_locks(cfile);
2850 if (rc)
2851 cERROR(1, "Push locks rc = %d", rc);
2852
Jeff Layton3bc303c2009-09-21 06:47:50 -04002853 /*
2854 * releasing stale oplock after recent reconnect of smb session using
2855 * a now incorrect file handle is not a data integrity issue but do
2856 * not bother sending an oplock release if session to server still is
2857 * disconnected since oplock already released by the server
2858 */
Steve Frenchcdff08e2010-10-21 22:46:14 +00002859 if (!cfile->oplock_break_cancelled) {
Pavel Shilovsky03776f42010-08-17 11:26:00 +04002860 rc = CIFSSMBLock(0, tlink_tcon(cfile->tlink), cfile->netfid,
2861 current->tgid, 0, 0, 0, 0,
2862 LOCKING_ANDX_OPLOCK_RELEASE, false,
Pavel Shilovsky12fed002011-01-17 20:15:44 +03002863 cinode->clientCanCacheRead ? 1 : 0);
Joe Perchesb6b38f72010-04-21 03:50:45 +00002864 cFYI(1, "Oplock release rc = %d", rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04002865 }
Jeff Layton3bc303c2009-09-21 06:47:50 -04002866}
2867
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07002868const struct address_space_operations cifs_addr_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002869 .readpage = cifs_readpage,
2870 .readpages = cifs_readpages,
2871 .writepage = cifs_writepage,
Steve French37c0eb42005-10-05 14:50:29 -07002872 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04002873 .write_begin = cifs_write_begin,
2874 .write_end = cifs_write_end,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002875 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05302876 .releasepage = cifs_release_page,
2877 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002878 .launder_page = cifs_launder_page,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002879};
Dave Kleikamp273d81d2006-06-01 19:41:23 +00002880
2881/*
2882 * cifs_readpages requires the server to support a buffer large enough to
2883 * contain the header plus one complete page of data. Otherwise, we need
2884 * to leave cifs_readpages out of the address space operations.
2885 */
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07002886const struct address_space_operations cifs_addr_ops_smallbuf = {
Dave Kleikamp273d81d2006-06-01 19:41:23 +00002887 .readpage = cifs_readpage,
2888 .writepage = cifs_writepage,
2889 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04002890 .write_begin = cifs_write_begin,
2891 .write_end = cifs_write_end,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00002892 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05302893 .releasepage = cifs_release_page,
2894 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002895 .launder_page = cifs_launder_page,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00002896};