blob: ea096ce5d4f7e86b94df26ac319cca8769c38595 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
Steve Frenchfb8c4b12007-07-10 01:16:18 +00005 *
Steve Frenchf19159d2010-04-21 04:12:10 +00006 * Copyright (C) International Business Machines Corp., 2002,2010
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Author(s): Steve French (sfrench@us.ibm.com)
Jeremy Allison7ee1af72006-08-02 21:56:33 +00008 * Jeremy Allison (jra@samba.org)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
Steve French37c0eb42005-10-05 14:50:29 -070025#include <linux/backing-dev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
Steve French37c0eb42005-10-05 14:50:29 -070030#include <linux/writeback.h>
Andrew Morton6f88cc22006-12-10 02:19:44 -080031#include <linux/task_io_accounting_ops.h>
Steve French23e7dd72005-10-20 13:44:56 -070032#include <linux/delay.h>
Jeff Layton3bc303c2009-09-21 06:47:50 -040033#include <linux/mount.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Jeff Layton690c5e32011-10-19 15:30:16 -040035#include <linux/swap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <asm/div64.h>
37#include "cifsfs.h"
38#include "cifspdu.h"
39#include "cifsglob.h"
40#include "cifsproto.h"
41#include "cifs_unicode.h"
42#include "cifs_debug.h"
43#include "cifs_fs_sb.h"
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +053044#include "fscache.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Linus Torvalds1da177e2005-04-16 15:20:36 -070046static inline int cifs_convert_flags(unsigned int flags)
47{
48 if ((flags & O_ACCMODE) == O_RDONLY)
49 return GENERIC_READ;
50 else if ((flags & O_ACCMODE) == O_WRONLY)
51 return GENERIC_WRITE;
52 else if ((flags & O_ACCMODE) == O_RDWR) {
53 /* GENERIC_ALL is too much permission to request
54 can cause unnecessary access denied on create */
55 /* return GENERIC_ALL; */
56 return (GENERIC_READ | GENERIC_WRITE);
57 }
58
Jeff Laytone10f7b52008-05-14 10:21:33 -070059 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
60 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
61 FILE_READ_DATA);
Steve French7fc8f4e2009-02-23 20:43:11 +000062}
Jeff Laytone10f7b52008-05-14 10:21:33 -070063
Jeff Layton608712f2010-10-15 15:33:56 -040064static u32 cifs_posix_convert_flags(unsigned int flags)
Steve French7fc8f4e2009-02-23 20:43:11 +000065{
Jeff Layton608712f2010-10-15 15:33:56 -040066 u32 posix_flags = 0;
Jeff Laytone10f7b52008-05-14 10:21:33 -070067
Steve French7fc8f4e2009-02-23 20:43:11 +000068 if ((flags & O_ACCMODE) == O_RDONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040069 posix_flags = SMB_O_RDONLY;
Steve French7fc8f4e2009-02-23 20:43:11 +000070 else if ((flags & O_ACCMODE) == O_WRONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040071 posix_flags = SMB_O_WRONLY;
72 else if ((flags & O_ACCMODE) == O_RDWR)
73 posix_flags = SMB_O_RDWR;
74
75 if (flags & O_CREAT)
76 posix_flags |= SMB_O_CREAT;
77 if (flags & O_EXCL)
78 posix_flags |= SMB_O_EXCL;
79 if (flags & O_TRUNC)
80 posix_flags |= SMB_O_TRUNC;
81 /* be safe and imply O_SYNC for O_DSYNC */
Christoph Hellwig6b2f3d12009-10-27 11:05:28 +010082 if (flags & O_DSYNC)
Jeff Layton608712f2010-10-15 15:33:56 -040083 posix_flags |= SMB_O_SYNC;
Steve French7fc8f4e2009-02-23 20:43:11 +000084 if (flags & O_DIRECTORY)
Jeff Layton608712f2010-10-15 15:33:56 -040085 posix_flags |= SMB_O_DIRECTORY;
Steve French7fc8f4e2009-02-23 20:43:11 +000086 if (flags & O_NOFOLLOW)
Jeff Layton608712f2010-10-15 15:33:56 -040087 posix_flags |= SMB_O_NOFOLLOW;
Steve French7fc8f4e2009-02-23 20:43:11 +000088 if (flags & O_DIRECT)
Jeff Layton608712f2010-10-15 15:33:56 -040089 posix_flags |= SMB_O_DIRECT;
Steve French7fc8f4e2009-02-23 20:43:11 +000090
91 return posix_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070092}
93
94static inline int cifs_get_disposition(unsigned int flags)
95{
96 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
97 return FILE_CREATE;
98 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
99 return FILE_OVERWRITE_IF;
100 else if ((flags & O_CREAT) == O_CREAT)
101 return FILE_OPEN_IF;
Steve French55aa2e02006-05-30 18:09:31 +0000102 else if ((flags & O_TRUNC) == O_TRUNC)
103 return FILE_OVERWRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104 else
105 return FILE_OPEN;
106}
107
Jeff Layton608712f2010-10-15 15:33:56 -0400108int cifs_posix_open(char *full_path, struct inode **pinode,
109 struct super_block *sb, int mode, unsigned int f_flags,
110 __u32 *poplock, __u16 *pnetfid, int xid)
111{
112 int rc;
113 FILE_UNIX_BASIC_INFO *presp_data;
114 __u32 posix_flags = 0;
115 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
116 struct cifs_fattr fattr;
117 struct tcon_link *tlink;
Steve French96daf2b2011-05-27 04:34:02 +0000118 struct cifs_tcon *tcon;
Jeff Layton608712f2010-10-15 15:33:56 -0400119
120 cFYI(1, "posix open %s", full_path);
121
122 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
123 if (presp_data == NULL)
124 return -ENOMEM;
125
126 tlink = cifs_sb_tlink(cifs_sb);
127 if (IS_ERR(tlink)) {
128 rc = PTR_ERR(tlink);
129 goto posix_open_ret;
130 }
131
132 tcon = tlink_tcon(tlink);
133 mode &= ~current_umask();
134
135 posix_flags = cifs_posix_convert_flags(f_flags);
136 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
137 poplock, full_path, cifs_sb->local_nls,
138 cifs_sb->mnt_cifs_flags &
139 CIFS_MOUNT_MAP_SPECIAL_CHR);
140 cifs_put_tlink(tlink);
141
142 if (rc)
143 goto posix_open_ret;
144
145 if (presp_data->Type == cpu_to_le32(-1))
146 goto posix_open_ret; /* open ok, caller does qpathinfo */
147
148 if (!pinode)
149 goto posix_open_ret; /* caller does not need info */
150
151 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
152
153 /* get new inode and set it up */
154 if (*pinode == NULL) {
155 cifs_fill_uniqueid(sb, &fattr);
156 *pinode = cifs_iget(sb, &fattr);
157 if (!*pinode) {
158 rc = -ENOMEM;
159 goto posix_open_ret;
160 }
161 } else {
162 cifs_fattr_to_inode(*pinode, &fattr);
163 }
164
165posix_open_ret:
166 kfree(presp_data);
167 return rc;
168}
169
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300170static int
171cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
Steve French96daf2b2011-05-27 04:34:02 +0000172 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *poplock,
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300173 __u16 *pnetfid, int xid)
174{
175 int rc;
176 int desiredAccess;
177 int disposition;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500178 int create_options = CREATE_NOT_DIR;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300179 FILE_ALL_INFO *buf;
180
181 desiredAccess = cifs_convert_flags(f_flags);
182
183/*********************************************************************
184 * open flag mapping table:
185 *
186 * POSIX Flag CIFS Disposition
187 * ---------- ----------------
188 * O_CREAT FILE_OPEN_IF
189 * O_CREAT | O_EXCL FILE_CREATE
190 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
191 * O_TRUNC FILE_OVERWRITE
192 * none of the above FILE_OPEN
193 *
194 * Note that there is not a direct match between disposition
195 * FILE_SUPERSEDE (ie create whether or not file exists although
196 * O_CREAT | O_TRUNC is similar but truncates the existing
197 * file rather than creating a new file as FILE_SUPERSEDE does
198 * (which uses the attributes / metadata passed in on open call)
199 *?
200 *? O_SYNC is a reasonable match to CIFS writethrough flag
201 *? and the read write flags match reasonably. O_LARGEFILE
202 *? is irrelevant because largefile support is always used
203 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
204 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
205 *********************************************************************/
206
207 disposition = cifs_get_disposition(f_flags);
208
209 /* BB pass O_SYNC flag through on file attributes .. BB */
210
211 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
212 if (!buf)
213 return -ENOMEM;
214
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500215 if (backup_cred(cifs_sb))
216 create_options |= CREATE_OPEN_BACKUP_INTENT;
217
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300218 if (tcon->ses->capabilities & CAP_NT_SMBS)
219 rc = CIFSSMBOpen(xid, tcon, full_path, disposition,
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500220 desiredAccess, create_options, pnetfid, poplock, buf,
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300221 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
222 & CIFS_MOUNT_MAP_SPECIAL_CHR);
223 else
224 rc = SMBLegacyOpen(xid, tcon, full_path, disposition,
225 desiredAccess, CREATE_NOT_DIR, pnetfid, poplock, buf,
226 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
227 & CIFS_MOUNT_MAP_SPECIAL_CHR);
228
229 if (rc)
230 goto out;
231
232 if (tcon->unix_ext)
233 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
234 xid);
235 else
236 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
237 xid, pnetfid);
238
239out:
240 kfree(buf);
241 return rc;
242}
243
Jeff Layton15ecb432010-10-15 15:34:02 -0400244struct cifsFileInfo *
245cifs_new_fileinfo(__u16 fileHandle, struct file *file,
246 struct tcon_link *tlink, __u32 oplock)
247{
248 struct dentry *dentry = file->f_path.dentry;
249 struct inode *inode = dentry->d_inode;
250 struct cifsInodeInfo *pCifsInode = CIFS_I(inode);
251 struct cifsFileInfo *pCifsFile;
252
253 pCifsFile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
254 if (pCifsFile == NULL)
255 return pCifsFile;
256
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400257 pCifsFile->count = 1;
Jeff Layton15ecb432010-10-15 15:34:02 -0400258 pCifsFile->netfid = fileHandle;
259 pCifsFile->pid = current->tgid;
260 pCifsFile->uid = current_fsuid();
261 pCifsFile->dentry = dget(dentry);
262 pCifsFile->f_flags = file->f_flags;
263 pCifsFile->invalidHandle = false;
Jeff Layton15ecb432010-10-15 15:34:02 -0400264 pCifsFile->tlink = cifs_get_tlink(tlink);
265 mutex_init(&pCifsFile->fh_mutex);
Jeff Layton15ecb432010-10-15 15:34:02 -0400266 INIT_WORK(&pCifsFile->oplock_break, cifs_oplock_break);
267
Jeff Layton44772882010-10-15 15:34:03 -0400268 spin_lock(&cifs_file_list_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400269 list_add(&pCifsFile->tlist, &(tlink_tcon(tlink)->openFileList));
270 /* if readable file instance put first in list*/
271 if (file->f_mode & FMODE_READ)
272 list_add(&pCifsFile->flist, &pCifsInode->openFileList);
273 else
274 list_add_tail(&pCifsFile->flist, &pCifsInode->openFileList);
Jeff Layton44772882010-10-15 15:34:03 -0400275 spin_unlock(&cifs_file_list_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400276
Pavel Shilovskyc6723622010-11-03 10:58:57 +0300277 cifs_set_oplock_level(pCifsInode, oplock);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400278 pCifsInode->can_cache_brlcks = pCifsInode->clientCanCacheAll;
Jeff Layton15ecb432010-10-15 15:34:02 -0400279
280 file->private_data = pCifsFile;
281 return pCifsFile;
282}
283
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400284static void cifs_del_lock_waiters(struct cifsLockInfo *lock);
285
Steve Frenchcdff08e2010-10-21 22:46:14 +0000286/*
287 * Release a reference on the file private data. This may involve closing
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400288 * the filehandle out on the server. Must be called without holding
289 * cifs_file_list_lock.
Steve Frenchcdff08e2010-10-21 22:46:14 +0000290 */
Jeff Laytonb33879a2010-10-15 15:34:04 -0400291void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
292{
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300293 struct inode *inode = cifs_file->dentry->d_inode;
Steve French96daf2b2011-05-27 04:34:02 +0000294 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300295 struct cifsInodeInfo *cifsi = CIFS_I(inode);
Pavel Shilovsky4f8ba8a2010-11-21 22:36:12 +0300296 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000297 struct cifsLockInfo *li, *tmp;
298
299 spin_lock(&cifs_file_list_lock);
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400300 if (--cifs_file->count > 0) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000301 spin_unlock(&cifs_file_list_lock);
302 return;
Jeff Laytonb33879a2010-10-15 15:34:04 -0400303 }
Steve Frenchcdff08e2010-10-21 22:46:14 +0000304
305 /* remove it from the lists */
306 list_del(&cifs_file->flist);
307 list_del(&cifs_file->tlist);
308
309 if (list_empty(&cifsi->openFileList)) {
310 cFYI(1, "closing last open instance for inode %p",
311 cifs_file->dentry->d_inode);
Pavel Shilovsky4f8ba8a2010-11-21 22:36:12 +0300312
313 /* in strict cache mode we need invalidate mapping on the last
314 close because it may cause a error when we open this file
315 again and get at least level II oplock */
316 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
317 CIFS_I(inode)->invalid_mapping = true;
318
Pavel Shilovskyc6723622010-11-03 10:58:57 +0300319 cifs_set_oplock_level(cifsi, 0);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000320 }
321 spin_unlock(&cifs_file_list_lock);
322
Jeff Laytonad635942011-07-26 12:20:17 -0400323 cancel_work_sync(&cifs_file->oplock_break);
324
Steve Frenchcdff08e2010-10-21 22:46:14 +0000325 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
326 int xid, rc;
327
328 xid = GetXid();
329 rc = CIFSSMBClose(xid, tcon, cifs_file->netfid);
330 FreeXid(xid);
331 }
332
333 /* Delete any outstanding lock records. We'll lose them when the file
334 * is closed anyway.
335 */
Pavel Shilovskyd59dad22011-09-22 09:53:59 +0400336 mutex_lock(&cifsi->lock_mutex);
337 list_for_each_entry_safe(li, tmp, &cifsi->llist, llist) {
338 if (li->netfid != cifs_file->netfid)
339 continue;
Steve Frenchcdff08e2010-10-21 22:46:14 +0000340 list_del(&li->llist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400341 cifs_del_lock_waiters(li);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000342 kfree(li);
343 }
Pavel Shilovskyd59dad22011-09-22 09:53:59 +0400344 mutex_unlock(&cifsi->lock_mutex);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000345
346 cifs_put_tlink(cifs_file->tlink);
347 dput(cifs_file->dentry);
348 kfree(cifs_file);
Jeff Laytonb33879a2010-10-15 15:34:04 -0400349}
350
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351int cifs_open(struct inode *inode, struct file *file)
352{
353 int rc = -EACCES;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400354 int xid;
355 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +0000357 struct cifs_tcon *tcon;
Jeff Layton7ffec372010-09-29 19:51:11 -0400358 struct tcon_link *tlink;
Jeff Layton6ca9f3b2010-06-16 13:40:16 -0400359 struct cifsFileInfo *pCifsFile = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360 char *full_path = NULL;
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300361 bool posix_open_ok = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362 __u16 netfid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363
364 xid = GetXid();
365
366 cifs_sb = CIFS_SB(inode->i_sb);
Jeff Layton7ffec372010-09-29 19:51:11 -0400367 tlink = cifs_sb_tlink(cifs_sb);
368 if (IS_ERR(tlink)) {
369 FreeXid(xid);
370 return PTR_ERR(tlink);
371 }
372 tcon = tlink_tcon(tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -0800374 full_path = build_path_from_dentry(file->f_path.dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375 if (full_path == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530376 rc = -ENOMEM;
Jeff Layton232341b2010-08-05 13:58:38 -0400377 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 }
379
Joe Perchesb6b38f72010-04-21 03:50:45 +0000380 cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
381 inode, file->f_flags, full_path);
Steve French276a74a2009-03-03 18:00:34 +0000382
Steve Frenche7504732011-10-12 17:47:03 -0500383 if (enable_oplocks)
Steve French276a74a2009-03-03 18:00:34 +0000384 oplock = REQ_OPLOCK;
385 else
386 oplock = 0;
387
Steve French64cc2c62009-03-04 19:54:08 +0000388 if (!tcon->broken_posix_open && tcon->unix_ext &&
389 (tcon->ses->capabilities & CAP_UNIX) &&
Steve French276a74a2009-03-03 18:00:34 +0000390 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
391 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Steve French276a74a2009-03-03 18:00:34 +0000392 /* can not refresh inode info since size could be stale */
Jeff Layton2422f672010-06-16 13:40:16 -0400393 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
Steve Frenchfa588e02010-04-22 19:21:55 +0000394 cifs_sb->mnt_file_mode /* ignored */,
Jeff Layton608712f2010-10-15 15:33:56 -0400395 file->f_flags, &oplock, &netfid, xid);
Steve French276a74a2009-03-03 18:00:34 +0000396 if (rc == 0) {
Joe Perchesb6b38f72010-04-21 03:50:45 +0000397 cFYI(1, "posix open succeeded");
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300398 posix_open_ok = true;
Steve French64cc2c62009-03-04 19:54:08 +0000399 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
400 if (tcon->ses->serverNOS)
Joe Perchesb6b38f72010-04-21 03:50:45 +0000401 cERROR(1, "server %s of type %s returned"
Steve French64cc2c62009-03-04 19:54:08 +0000402 " unexpected error on SMB posix open"
403 ", disabling posix open support."
404 " Check if server update available.",
405 tcon->ses->serverName,
Joe Perchesb6b38f72010-04-21 03:50:45 +0000406 tcon->ses->serverNOS);
Steve French64cc2c62009-03-04 19:54:08 +0000407 tcon->broken_posix_open = true;
Steve French276a74a2009-03-03 18:00:34 +0000408 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
409 (rc != -EOPNOTSUPP)) /* path not found or net err */
410 goto out;
Steve French64cc2c62009-03-04 19:54:08 +0000411 /* else fallthrough to retry open the old way on network i/o
412 or DFS errors */
Steve French276a74a2009-03-03 18:00:34 +0000413 }
414
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300415 if (!posix_open_ok) {
416 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
417 file->f_flags, &oplock, &netfid, xid);
418 if (rc)
419 goto out;
420 }
Jeff Layton47c78b72010-06-16 13:40:17 -0400421
Jeff Laytonabfe1ee2010-10-15 15:33:58 -0400422 pCifsFile = cifs_new_fileinfo(netfid, file, tlink, oplock);
Jeff Layton6ca9f3b2010-06-16 13:40:16 -0400423 if (pCifsFile == NULL) {
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300424 CIFSSMBClose(xid, tcon, netfid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425 rc = -ENOMEM;
426 goto out;
427 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +0530429 cifs_fscache_set_inode_cookie(inode, file);
430
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300431 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432 /* time to set mode which we can not set earlier due to
433 problems creating new read-only files */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300434 struct cifs_unix_set_info_args args = {
435 .mode = inode->i_mode,
436 .uid = NO_CHANGE_64,
437 .gid = NO_CHANGE_64,
438 .ctime = NO_CHANGE_64,
439 .atime = NO_CHANGE_64,
440 .mtime = NO_CHANGE_64,
441 .device = 0,
442 };
Jeff Laytond44a9fe2011-01-07 11:30:29 -0500443 CIFSSMBUnixSetFileInfo(xid, tcon, &args, netfid,
444 pCifsFile->pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445 }
446
447out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448 kfree(full_path);
449 FreeXid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400450 cifs_put_tlink(tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451 return rc;
452}
453
Adrian Bunk04187262006-06-30 18:23:04 +0200454/* Try to reacquire byte range locks that were released when session */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455/* to server was lost */
456static int cifs_relock_file(struct cifsFileInfo *cifsFile)
457{
458 int rc = 0;
459
460/* BB list all locks open on this file and relock */
461
462 return rc;
463}
464
Jeff Layton15886172010-10-15 15:33:59 -0400465static int cifs_reopen_file(struct cifsFileInfo *pCifsFile, bool can_flush)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466{
467 int rc = -EACCES;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400468 int xid;
469 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +0000471 struct cifs_tcon *tcon;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472 struct cifsInodeInfo *pCifsInode;
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000473 struct inode *inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474 char *full_path = NULL;
475 int desiredAccess;
476 int disposition = FILE_OPEN;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500477 int create_options = CREATE_NOT_DIR;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478 __u16 netfid;
479
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480 xid = GetXid();
Jeff Laytonf0a71eb2009-06-27 07:04:55 -0400481 mutex_lock(&pCifsFile->fh_mutex);
Steve French4b18f2a2008-04-29 00:06:05 +0000482 if (!pCifsFile->invalidHandle) {
Jeff Laytonf0a71eb2009-06-27 07:04:55 -0400483 mutex_unlock(&pCifsFile->fh_mutex);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530484 rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485 FreeXid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530486 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487 }
488
Jeff Layton15886172010-10-15 15:33:59 -0400489 inode = pCifsFile->dentry->d_inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490 cifs_sb = CIFS_SB(inode->i_sb);
Jeff Layton13cfb732010-09-29 19:51:11 -0400491 tcon = tlink_tcon(pCifsFile->tlink);
Steve French3a9f4622007-04-04 17:10:24 +0000492
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493/* can not grab rename sem here because various ops, including
494 those that already have the rename sem can end up causing writepage
495 to get called and if the server was down that means we end up here,
496 and we can never tell if the caller already has the rename_sem */
Jeff Layton15886172010-10-15 15:33:59 -0400497 full_path = build_path_from_dentry(pCifsFile->dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498 if (full_path == NULL) {
Steve French3a9f4622007-04-04 17:10:24 +0000499 rc = -ENOMEM;
Jeff Laytonf0a71eb2009-06-27 07:04:55 -0400500 mutex_unlock(&pCifsFile->fh_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501 FreeXid(xid);
Steve French3a9f4622007-04-04 17:10:24 +0000502 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503 }
504
Joe Perchesb6b38f72010-04-21 03:50:45 +0000505 cFYI(1, "inode = 0x%p file flags 0x%x for %s",
Jeff Layton15886172010-10-15 15:33:59 -0400506 inode, pCifsFile->f_flags, full_path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507
Steve Frenche7504732011-10-12 17:47:03 -0500508 if (enable_oplocks)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509 oplock = REQ_OPLOCK;
510 else
Steve French4b18f2a2008-04-29 00:06:05 +0000511 oplock = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512
Steve French7fc8f4e2009-02-23 20:43:11 +0000513 if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) &&
514 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
515 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Jeff Layton608712f2010-10-15 15:33:56 -0400516
517 /*
518 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
519 * original open. Must mask them off for a reopen.
520 */
Jeff Layton15886172010-10-15 15:33:59 -0400521 unsigned int oflags = pCifsFile->f_flags &
522 ~(O_CREAT | O_EXCL | O_TRUNC);
Jeff Layton608712f2010-10-15 15:33:56 -0400523
Jeff Layton2422f672010-06-16 13:40:16 -0400524 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
Steve Frenchfa588e02010-04-22 19:21:55 +0000525 cifs_sb->mnt_file_mode /* ignored */,
526 oflags, &oplock, &netfid, xid);
Steve French7fc8f4e2009-02-23 20:43:11 +0000527 if (rc == 0) {
Joe Perchesb6b38f72010-04-21 03:50:45 +0000528 cFYI(1, "posix reopen succeeded");
Steve French7fc8f4e2009-02-23 20:43:11 +0000529 goto reopen_success;
530 }
531 /* fallthrough to retry open the old way on errors, especially
532 in the reconnect path it is important to retry hard */
533 }
534
Jeff Layton15886172010-10-15 15:33:59 -0400535 desiredAccess = cifs_convert_flags(pCifsFile->f_flags);
Steve French7fc8f4e2009-02-23 20:43:11 +0000536
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500537 if (backup_cred(cifs_sb))
538 create_options |= CREATE_OPEN_BACKUP_INTENT;
539
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540 /* Can not refresh inode by passing in file_info buf to be returned
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000541 by SMBOpen and then calling get_inode_info with returned buf
542 since file might have write behind data that needs to be flushed
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543 and server version of file size can be stale. If we knew for sure
544 that inode was not dirty locally we could do this */
545
Steve French7fc8f4e2009-02-23 20:43:11 +0000546 rc = CIFSSMBOpen(xid, tcon, full_path, disposition, desiredAccess,
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500547 create_options, &netfid, &oplock, NULL,
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000548 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
Steve French737b7582005-04-28 22:41:06 -0700549 CIFS_MOUNT_MAP_SPECIAL_CHR);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 if (rc) {
Jeff Laytonf0a71eb2009-06-27 07:04:55 -0400551 mutex_unlock(&pCifsFile->fh_mutex);
Joe Perchesb6b38f72010-04-21 03:50:45 +0000552 cFYI(1, "cifs_open returned 0x%x", rc);
553 cFYI(1, "oplock: %d", oplock);
Jeff Layton15886172010-10-15 15:33:59 -0400554 goto reopen_error_exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555 }
Jeff Layton15886172010-10-15 15:33:59 -0400556
557reopen_success:
558 pCifsFile->netfid = netfid;
559 pCifsFile->invalidHandle = false;
560 mutex_unlock(&pCifsFile->fh_mutex);
561 pCifsInode = CIFS_I(inode);
562
563 if (can_flush) {
564 rc = filemap_write_and_wait(inode->i_mapping);
Jeff Laytoneb4b7562010-10-22 14:52:29 -0400565 mapping_set_error(inode->i_mapping, rc);
Jeff Layton15886172010-10-15 15:33:59 -0400566
Jeff Layton15886172010-10-15 15:33:59 -0400567 if (tcon->unix_ext)
568 rc = cifs_get_inode_info_unix(&inode,
569 full_path, inode->i_sb, xid);
570 else
571 rc = cifs_get_inode_info(&inode,
572 full_path, NULL, inode->i_sb,
573 xid, NULL);
574 } /* else we are writing out data to server already
575 and could deadlock if we tried to flush data, and
576 since we do not know if we have data that would
577 invalidate the current end of file on the server
578 we can not go to the server to get the new inod
579 info */
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300580
Pavel Shilovskyc6723622010-11-03 10:58:57 +0300581 cifs_set_oplock_level(pCifsInode, oplock);
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300582
Jeff Layton15886172010-10-15 15:33:59 -0400583 cifs_relock_file(pCifsFile);
584
585reopen_error_exit:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586 kfree(full_path);
587 FreeXid(xid);
588 return rc;
589}
590
591int cifs_close(struct inode *inode, struct file *file)
592{
Jeff Layton77970692011-04-05 16:23:47 -0700593 if (file->private_data != NULL) {
594 cifsFileInfo_put(file->private_data);
595 file->private_data = NULL;
596 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597
Steve Frenchcdff08e2010-10-21 22:46:14 +0000598 /* return code from the ->release op is always ignored */
599 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600}
601
602int cifs_closedir(struct inode *inode, struct file *file)
603{
604 int rc = 0;
605 int xid;
Joe Perchesc21dfb62010-07-12 13:50:14 -0700606 struct cifsFileInfo *pCFileStruct = file->private_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607 char *ptmp;
608
Joe Perchesb6b38f72010-04-21 03:50:45 +0000609 cFYI(1, "Closedir inode = 0x%p", inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610
611 xid = GetXid();
612
613 if (pCFileStruct) {
Steve French96daf2b2011-05-27 04:34:02 +0000614 struct cifs_tcon *pTcon = tlink_tcon(pCFileStruct->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615
Joe Perchesb6b38f72010-04-21 03:50:45 +0000616 cFYI(1, "Freeing private data in close dir");
Jeff Layton44772882010-10-15 15:34:03 -0400617 spin_lock(&cifs_file_list_lock);
Steve French4b18f2a2008-04-29 00:06:05 +0000618 if (!pCFileStruct->srch_inf.endOfSearch &&
619 !pCFileStruct->invalidHandle) {
620 pCFileStruct->invalidHandle = true;
Jeff Layton44772882010-10-15 15:34:03 -0400621 spin_unlock(&cifs_file_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622 rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid);
Joe Perchesb6b38f72010-04-21 03:50:45 +0000623 cFYI(1, "Closing uncompleted readdir with rc %d",
624 rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625 /* not much we can do if it fails anyway, ignore rc */
626 rc = 0;
Steve Frenchddb4cbf2008-11-20 20:00:44 +0000627 } else
Jeff Layton44772882010-10-15 15:34:03 -0400628 spin_unlock(&cifs_file_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629 ptmp = pCFileStruct->srch_inf.ntwrk_buf_start;
630 if (ptmp) {
Joe Perchesb6b38f72010-04-21 03:50:45 +0000631 cFYI(1, "closedir free smb buf in srch struct");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632 pCFileStruct->srch_inf.ntwrk_buf_start = NULL;
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000633 if (pCFileStruct->srch_inf.smallBuf)
Steve Frenchd47d7c12006-02-28 03:45:48 +0000634 cifs_small_buf_release(ptmp);
635 else
636 cifs_buf_release(ptmp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637 }
Jeff Layton13cfb732010-09-29 19:51:11 -0400638 cifs_put_tlink(pCFileStruct->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639 kfree(file->private_data);
640 file->private_data = NULL;
641 }
642 /* BB can we lock the filestruct while this is going on? */
643 FreeXid(xid);
644 return rc;
645}
646
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400647static struct cifsLockInfo *
648cifs_lock_init(__u64 len, __u64 offset, __u8 type, __u16 netfid)
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000649{
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000650 struct cifsLockInfo *li =
651 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400652 if (!li)
653 return li;
Pavel Shilovskyd59dad22011-09-22 09:53:59 +0400654 li->netfid = netfid;
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000655 li->offset = offset;
656 li->length = len;
Pavel Shilovsky03776f42010-08-17 11:26:00 +0400657 li->type = type;
658 li->pid = current->tgid;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400659 INIT_LIST_HEAD(&li->blist);
660 init_waitqueue_head(&li->block_q);
661 return li;
662}
663
664static void
665cifs_del_lock_waiters(struct cifsLockInfo *lock)
666{
667 struct cifsLockInfo *li, *tmp;
668 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
669 list_del_init(&li->blist);
670 wake_up(&li->block_q);
671 }
672}
673
674static bool
675cifs_find_lock_conflict(struct cifsInodeInfo *cinode, __u64 offset,
676 __u64 length, __u8 type, __u16 netfid,
677 struct cifsLockInfo **conf_lock)
678{
679 struct cifsLockInfo *li, *tmp;
680
681 list_for_each_entry_safe(li, tmp, &cinode->llist, llist) {
682 if (offset + length <= li->offset ||
683 offset >= li->offset + li->length)
684 continue;
685 else if ((type & LOCKING_ANDX_SHARED_LOCK) &&
686 ((netfid == li->netfid && current->tgid == li->pid) ||
687 type == li->type))
688 continue;
689 else {
690 *conf_lock = li;
691 return true;
692 }
693 }
694 return false;
695}
696
697static int
698cifs_lock_test(struct cifsInodeInfo *cinode, __u64 offset, __u64 length,
699 __u8 type, __u16 netfid, struct file_lock *flock)
700{
701 int rc = 0;
702 struct cifsLockInfo *conf_lock;
703 bool exist;
704
705 mutex_lock(&cinode->lock_mutex);
706
707 exist = cifs_find_lock_conflict(cinode, offset, length, type, netfid,
708 &conf_lock);
709 if (exist) {
710 flock->fl_start = conf_lock->offset;
711 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
712 flock->fl_pid = conf_lock->pid;
713 if (conf_lock->type & LOCKING_ANDX_SHARED_LOCK)
714 flock->fl_type = F_RDLCK;
715 else
716 flock->fl_type = F_WRLCK;
717 } else if (!cinode->can_cache_brlcks)
718 rc = 1;
719 else
720 flock->fl_type = F_UNLCK;
721
722 mutex_unlock(&cinode->lock_mutex);
723 return rc;
724}
725
726static int
727cifs_lock_add(struct cifsInodeInfo *cinode, __u64 len, __u64 offset,
728 __u8 type, __u16 netfid)
729{
730 struct cifsLockInfo *li;
731
732 li = cifs_lock_init(len, offset, type, netfid);
733 if (!li)
734 return -ENOMEM;
735
Pavel Shilovskyd59dad22011-09-22 09:53:59 +0400736 mutex_lock(&cinode->lock_mutex);
737 list_add_tail(&li->llist, &cinode->llist);
738 mutex_unlock(&cinode->lock_mutex);
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000739 return 0;
740}
741
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400742static int
743cifs_lock_add_if(struct cifsInodeInfo *cinode, __u64 offset, __u64 length,
744 __u8 type, __u16 netfid, bool wait)
745{
746 struct cifsLockInfo *lock, *conf_lock;
747 bool exist;
748 int rc = 0;
749
750 lock = cifs_lock_init(length, offset, type, netfid);
751 if (!lock)
752 return -ENOMEM;
753
754try_again:
755 exist = false;
756 mutex_lock(&cinode->lock_mutex);
757
758 exist = cifs_find_lock_conflict(cinode, offset, length, type, netfid,
759 &conf_lock);
760 if (!exist && cinode->can_cache_brlcks) {
761 list_add_tail(&lock->llist, &cinode->llist);
762 mutex_unlock(&cinode->lock_mutex);
763 return rc;
764 }
765
766 if (!exist)
767 rc = 1;
768 else if (!wait)
769 rc = -EACCES;
770 else {
771 list_add_tail(&lock->blist, &conf_lock->blist);
772 mutex_unlock(&cinode->lock_mutex);
773 rc = wait_event_interruptible(lock->block_q,
774 (lock->blist.prev == &lock->blist) &&
775 (lock->blist.next == &lock->blist));
776 if (!rc)
777 goto try_again;
778 else {
779 mutex_lock(&cinode->lock_mutex);
780 list_del_init(&lock->blist);
781 mutex_unlock(&cinode->lock_mutex);
782 }
783 }
784
785 kfree(lock);
786 mutex_unlock(&cinode->lock_mutex);
787 return rc;
788}
789
790static int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400791cifs_posix_lock_test(struct file *file, struct file_lock *flock)
792{
793 int rc = 0;
794 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
795 unsigned char saved_type = flock->fl_type;
796
797 mutex_lock(&cinode->lock_mutex);
798 posix_test_lock(file, flock);
799
800 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
801 flock->fl_type = saved_type;
802 rc = 1;
803 }
804
805 mutex_unlock(&cinode->lock_mutex);
806 return rc;
807}
808
809static int
810cifs_posix_lock_set(struct file *file, struct file_lock *flock)
811{
812 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
813 int rc;
814
815 mutex_lock(&cinode->lock_mutex);
816 if (!cinode->can_cache_brlcks) {
817 mutex_unlock(&cinode->lock_mutex);
818 return 1;
819 }
820 rc = posix_lock_file_wait(file, flock);
821 mutex_unlock(&cinode->lock_mutex);
822 return rc;
823}
824
825static int
826cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400827{
828 int xid, rc = 0, stored_rc;
829 struct cifsLockInfo *li, *tmp;
830 struct cifs_tcon *tcon;
831 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400832 unsigned int num, max_num;
833 LOCKING_ANDX_RANGE *buf, *cur;
834 int types[] = {LOCKING_ANDX_LARGE_FILES,
835 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
836 int i;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400837
838 xid = GetXid();
839 tcon = tlink_tcon(cfile->tlink);
840
841 mutex_lock(&cinode->lock_mutex);
842 if (!cinode->can_cache_brlcks) {
843 mutex_unlock(&cinode->lock_mutex);
844 FreeXid(xid);
845 return rc;
846 }
847
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400848 max_num = (tcon->ses->server->maxBuf - sizeof(struct smb_hdr)) /
849 sizeof(LOCKING_ANDX_RANGE);
850 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
851 if (!buf) {
852 mutex_unlock(&cinode->lock_mutex);
853 FreeXid(xid);
854 return rc;
855 }
856
857 for (i = 0; i < 2; i++) {
858 cur = buf;
859 num = 0;
860 list_for_each_entry_safe(li, tmp, &cinode->llist, llist) {
861 if (li->type != types[i])
862 continue;
863 cur->Pid = cpu_to_le16(li->pid);
864 cur->LengthLow = cpu_to_le32((u32)li->length);
865 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
866 cur->OffsetLow = cpu_to_le32((u32)li->offset);
867 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
868 if (++num == max_num) {
869 stored_rc = cifs_lockv(xid, tcon, cfile->netfid,
870 li->type, 0, num, buf);
871 if (stored_rc)
872 rc = stored_rc;
873 cur = buf;
874 num = 0;
875 } else
876 cur++;
877 }
878
879 if (num) {
880 stored_rc = cifs_lockv(xid, tcon, cfile->netfid,
881 types[i], 0, num, buf);
882 if (stored_rc)
883 rc = stored_rc;
884 }
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400885 }
886
887 cinode->can_cache_brlcks = false;
888 mutex_unlock(&cinode->lock_mutex);
889
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400890 kfree(buf);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400891 FreeXid(xid);
892 return rc;
893}
894
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400895/* copied from fs/locks.c with a name change */
896#define cifs_for_each_lock(inode, lockp) \
897 for (lockp = &inode->i_flock; *lockp != NULL; \
898 lockp = &(*lockp)->fl_next)
899
900static int
901cifs_push_posix_locks(struct cifsFileInfo *cfile)
902{
903 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
904 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
905 struct file_lock *flock, **before;
906 struct cifsLockInfo *lck, *tmp;
907 int rc = 0, xid, type;
908 __u64 length;
909 struct list_head locks_to_send;
910
911 xid = GetXid();
912
913 mutex_lock(&cinode->lock_mutex);
914 if (!cinode->can_cache_brlcks) {
915 mutex_unlock(&cinode->lock_mutex);
916 FreeXid(xid);
917 return rc;
918 }
919
920 INIT_LIST_HEAD(&locks_to_send);
921
922 lock_flocks();
923 cifs_for_each_lock(cfile->dentry->d_inode, before) {
924 flock = *before;
925 length = 1 + flock->fl_end - flock->fl_start;
926 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
927 type = CIFS_RDLCK;
928 else
929 type = CIFS_WRLCK;
930
931 lck = cifs_lock_init(length, flock->fl_start, type,
932 cfile->netfid);
933 if (!lck) {
934 rc = -ENOMEM;
935 goto send_locks;
936 }
937 lck->pid = flock->fl_pid;
938
939 list_add_tail(&lck->llist, &locks_to_send);
940 }
941
942send_locks:
943 unlock_flocks();
944
945 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
946 struct file_lock tmp_lock;
947 int stored_rc;
948
949 tmp_lock.fl_start = lck->offset;
950 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
951 0, lck->length, &tmp_lock,
952 lck->type, 0);
953 if (stored_rc)
954 rc = stored_rc;
955 list_del(&lck->llist);
956 kfree(lck);
957 }
958
959 cinode->can_cache_brlcks = false;
960 mutex_unlock(&cinode->lock_mutex);
961
962 FreeXid(xid);
963 return rc;
964}
965
966static int
967cifs_push_locks(struct cifsFileInfo *cfile)
968{
969 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
970 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
971
972 if ((tcon->ses->capabilities & CAP_UNIX) &&
973 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
974 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
975 return cifs_push_posix_locks(cfile);
976
977 return cifs_push_mandatory_locks(cfile);
978}
979
Pavel Shilovsky03776f42010-08-17 11:26:00 +0400980static void
981cifs_read_flock(struct file_lock *flock, __u8 *type, int *lock, int *unlock,
982 bool *wait_flag)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983{
Pavel Shilovsky03776f42010-08-17 11:26:00 +0400984 if (flock->fl_flags & FL_POSIX)
Joe Perchesb6b38f72010-04-21 03:50:45 +0000985 cFYI(1, "Posix");
Pavel Shilovsky03776f42010-08-17 11:26:00 +0400986 if (flock->fl_flags & FL_FLOCK)
Joe Perchesb6b38f72010-04-21 03:50:45 +0000987 cFYI(1, "Flock");
Pavel Shilovsky03776f42010-08-17 11:26:00 +0400988 if (flock->fl_flags & FL_SLEEP) {
Joe Perchesb6b38f72010-04-21 03:50:45 +0000989 cFYI(1, "Blocking lock");
Pavel Shilovsky03776f42010-08-17 11:26:00 +0400990 *wait_flag = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991 }
Pavel Shilovsky03776f42010-08-17 11:26:00 +0400992 if (flock->fl_flags & FL_ACCESS)
Joe Perchesb6b38f72010-04-21 03:50:45 +0000993 cFYI(1, "Process suspended by mandatory locking - "
Pavel Shilovsky03776f42010-08-17 11:26:00 +0400994 "not implemented yet");
995 if (flock->fl_flags & FL_LEASE)
Joe Perchesb6b38f72010-04-21 03:50:45 +0000996 cFYI(1, "Lease on file - not implemented yet");
Pavel Shilovsky03776f42010-08-17 11:26:00 +0400997 if (flock->fl_flags &
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998 (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
Pavel Shilovsky03776f42010-08-17 11:26:00 +0400999 cFYI(1, "Unknown lock flags 0x%x", flock->fl_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001000
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001001 *type = LOCKING_ANDX_LARGE_FILES;
1002 if (flock->fl_type == F_WRLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001003 cFYI(1, "F_WRLCK ");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001004 *lock = 1;
1005 } else if (flock->fl_type == F_UNLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001006 cFYI(1, "F_UNLCK");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001007 *unlock = 1;
1008 /* Check if unlock includes more than one lock range */
1009 } else if (flock->fl_type == F_RDLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001010 cFYI(1, "F_RDLCK");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001011 *type |= LOCKING_ANDX_SHARED_LOCK;
1012 *lock = 1;
1013 } else if (flock->fl_type == F_EXLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001014 cFYI(1, "F_EXLCK");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001015 *lock = 1;
1016 } else if (flock->fl_type == F_SHLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001017 cFYI(1, "F_SHLCK");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001018 *type |= LOCKING_ANDX_SHARED_LOCK;
1019 *lock = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001020 } else
Joe Perchesb6b38f72010-04-21 03:50:45 +00001021 cFYI(1, "Unknown type of lock");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001022}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001023
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001024static int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001025cifs_getlk(struct file *file, struct file_lock *flock, __u8 type,
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001026 bool wait_flag, bool posix_lck, int xid)
1027{
1028 int rc = 0;
1029 __u64 length = 1 + flock->fl_end - flock->fl_start;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001030 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1031 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001032 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001033 __u16 netfid = cfile->netfid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001035 if (posix_lck) {
1036 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001037
1038 rc = cifs_posix_lock_test(file, flock);
1039 if (!rc)
1040 return rc;
1041
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001042 if (type & LOCKING_ANDX_SHARED_LOCK)
1043 posix_lock_type = CIFS_RDLCK;
1044 else
1045 posix_lock_type = CIFS_WRLCK;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001046 rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
1047 1 /* get */, length, flock,
1048 posix_lock_type, wait_flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001049 return rc;
1050 }
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001051
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001052 rc = cifs_lock_test(cinode, flock->fl_start, length, type, netfid,
1053 flock);
1054 if (!rc)
1055 return rc;
1056
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001057 /* BB we could chain these into one lock request BB */
1058 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid, length,
1059 flock->fl_start, 0, 1, type, 0, 0);
1060 if (rc == 0) {
1061 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid,
1062 length, flock->fl_start, 1, 0,
1063 type, 0, 0);
1064 flock->fl_type = F_UNLCK;
1065 if (rc != 0)
1066 cERROR(1, "Error unlocking previously locked "
1067 "range %d during test of lock", rc);
1068 rc = 0;
1069 return rc;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001070 }
1071
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001072 if (type & LOCKING_ANDX_SHARED_LOCK) {
1073 flock->fl_type = F_WRLCK;
1074 rc = 0;
1075 return rc;
1076 }
1077
1078 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid, length,
1079 flock->fl_start, 0, 1,
1080 type | LOCKING_ANDX_SHARED_LOCK, 0, 0);
1081 if (rc == 0) {
1082 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid,
1083 length, flock->fl_start, 1, 0,
1084 type | LOCKING_ANDX_SHARED_LOCK,
1085 0, 0);
1086 flock->fl_type = F_RDLCK;
1087 if (rc != 0)
1088 cERROR(1, "Error unlocking previously locked "
1089 "range %d during test of lock", rc);
1090 } else
1091 flock->fl_type = F_WRLCK;
1092
1093 rc = 0;
1094 return rc;
1095}
1096
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001097static void
1098cifs_move_llist(struct list_head *source, struct list_head *dest)
1099{
1100 struct list_head *li, *tmp;
1101 list_for_each_safe(li, tmp, source)
1102 list_move(li, dest);
1103}
1104
1105static void
1106cifs_free_llist(struct list_head *llist)
1107{
1108 struct cifsLockInfo *li, *tmp;
1109 list_for_each_entry_safe(li, tmp, llist, llist) {
1110 cifs_del_lock_waiters(li);
1111 list_del(&li->llist);
1112 kfree(li);
1113 }
1114}
1115
1116static int
1117cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid)
1118{
1119 int rc = 0, stored_rc;
1120 int types[] = {LOCKING_ANDX_LARGE_FILES,
1121 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1122 unsigned int i;
1123 unsigned int max_num, num;
1124 LOCKING_ANDX_RANGE *buf, *cur;
1125 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1126 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
1127 struct cifsLockInfo *li, *tmp;
1128 __u64 length = 1 + flock->fl_end - flock->fl_start;
1129 struct list_head tmp_llist;
1130
1131 INIT_LIST_HEAD(&tmp_llist);
1132
1133 max_num = (tcon->ses->server->maxBuf - sizeof(struct smb_hdr)) /
1134 sizeof(LOCKING_ANDX_RANGE);
1135 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1136 if (!buf)
1137 return -ENOMEM;
1138
1139 mutex_lock(&cinode->lock_mutex);
1140 for (i = 0; i < 2; i++) {
1141 cur = buf;
1142 num = 0;
1143 list_for_each_entry_safe(li, tmp, &cinode->llist, llist) {
1144 if (flock->fl_start > li->offset ||
1145 (flock->fl_start + length) <
1146 (li->offset + li->length))
1147 continue;
1148 if (current->tgid != li->pid)
1149 continue;
1150 if (cfile->netfid != li->netfid)
1151 continue;
1152 if (types[i] != li->type)
1153 continue;
1154 if (!cinode->can_cache_brlcks) {
1155 cur->Pid = cpu_to_le16(li->pid);
1156 cur->LengthLow = cpu_to_le32((u32)li->length);
1157 cur->LengthHigh =
1158 cpu_to_le32((u32)(li->length>>32));
1159 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1160 cur->OffsetHigh =
1161 cpu_to_le32((u32)(li->offset>>32));
1162 /*
1163 * We need to save a lock here to let us add
1164 * it again to the inode list if the unlock
1165 * range request fails on the server.
1166 */
1167 list_move(&li->llist, &tmp_llist);
1168 if (++num == max_num) {
1169 stored_rc = cifs_lockv(xid, tcon,
1170 cfile->netfid,
1171 li->type, num,
1172 0, buf);
1173 if (stored_rc) {
1174 /*
1175 * We failed on the unlock range
1176 * request - add all locks from
1177 * the tmp list to the head of
1178 * the inode list.
1179 */
1180 cifs_move_llist(&tmp_llist,
1181 &cinode->llist);
1182 rc = stored_rc;
1183 } else
1184 /*
1185 * The unlock range request
1186 * succeed - free the tmp list.
1187 */
1188 cifs_free_llist(&tmp_llist);
1189 cur = buf;
1190 num = 0;
1191 } else
1192 cur++;
1193 } else {
1194 /*
1195 * We can cache brlock requests - simply remove
1196 * a lock from the inode list.
1197 */
1198 list_del(&li->llist);
1199 cifs_del_lock_waiters(li);
1200 kfree(li);
1201 }
1202 }
1203 if (num) {
1204 stored_rc = cifs_lockv(xid, tcon, cfile->netfid,
1205 types[i], num, 0, buf);
1206 if (stored_rc) {
1207 cifs_move_llist(&tmp_llist, &cinode->llist);
1208 rc = stored_rc;
1209 } else
1210 cifs_free_llist(&tmp_llist);
1211 }
1212 }
1213
1214 mutex_unlock(&cinode->lock_mutex);
1215 kfree(buf);
1216 return rc;
1217}
1218
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001219static int
1220cifs_setlk(struct file *file, struct file_lock *flock, __u8 type,
1221 bool wait_flag, bool posix_lck, int lock, int unlock, int xid)
1222{
1223 int rc = 0;
1224 __u64 length = 1 + flock->fl_end - flock->fl_start;
1225 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1226 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovskyd59dad22011-09-22 09:53:59 +04001227 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001228 __u16 netfid = cfile->netfid;
1229
1230 if (posix_lck) {
Steve French08547b02006-02-28 22:39:25 +00001231 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001232
1233 rc = cifs_posix_lock_set(file, flock);
1234 if (!rc || rc < 0)
1235 return rc;
1236
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001237 if (type & LOCKING_ANDX_SHARED_LOCK)
Steve French08547b02006-02-28 22:39:25 +00001238 posix_lock_type = CIFS_RDLCK;
1239 else
1240 posix_lock_type = CIFS_WRLCK;
Steve French50c2f752007-07-13 00:33:32 +00001241
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001242 if (unlock == 1)
Steve Frenchbeb84dc2006-03-03 23:36:34 +00001243 posix_lock_type = CIFS_UNLCK;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001244
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001245 rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
1246 0 /* set */, length, flock,
1247 posix_lock_type, wait_flag);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001248 goto out;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001249 }
1250
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001251 if (lock) {
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001252 rc = cifs_lock_add_if(cinode, flock->fl_start, length,
1253 type, netfid, wait_flag);
1254 if (rc < 0)
1255 return rc;
1256 else if (!rc)
1257 goto out;
1258
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001259 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid, length,
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001260 flock->fl_start, 0, 1, type, wait_flag, 0);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001261 if (rc == 0) {
1262 /* For Windows locks we must store them. */
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001263 rc = cifs_lock_add(cinode, length, flock->fl_start,
1264 type, netfid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001265 }
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001266 } else if (unlock)
1267 rc = cifs_unlock_range(cfile, flock, xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001268
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001269out:
1270 if (flock->fl_flags & FL_POSIX)
1271 posix_lock_file_wait(file, flock);
1272 return rc;
1273}
1274
1275int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1276{
1277 int rc, xid;
1278 int lock = 0, unlock = 0;
1279 bool wait_flag = false;
1280 bool posix_lck = false;
1281 struct cifs_sb_info *cifs_sb;
1282 struct cifs_tcon *tcon;
1283 struct cifsInodeInfo *cinode;
1284 struct cifsFileInfo *cfile;
1285 __u16 netfid;
1286 __u8 type;
1287
1288 rc = -EACCES;
1289 xid = GetXid();
1290
1291 cFYI(1, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld "
1292 "end: %lld", cmd, flock->fl_flags, flock->fl_type,
1293 flock->fl_start, flock->fl_end);
1294
1295 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag);
1296
1297 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1298 cfile = (struct cifsFileInfo *)file->private_data;
1299 tcon = tlink_tcon(cfile->tlink);
1300 netfid = cfile->netfid;
1301 cinode = CIFS_I(file->f_path.dentry->d_inode);
1302
1303 if ((tcon->ses->capabilities & CAP_UNIX) &&
1304 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1305 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1306 posix_lck = true;
1307 /*
1308 * BB add code here to normalize offset and length to account for
1309 * negative length which we can not accept over the wire.
1310 */
1311 if (IS_GETLK(cmd)) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001312 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001313 FreeXid(xid);
1314 return rc;
1315 }
1316
1317 if (!lock && !unlock) {
1318 /*
1319 * if no lock or unlock then nothing to do since we do not
1320 * know what it is
1321 */
1322 FreeXid(xid);
1323 return -EOPNOTSUPP;
1324 }
1325
1326 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1327 xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001328 FreeXid(xid);
1329 return rc;
1330}
1331
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001332/* update the file size (if needed) after a write */
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05001333void
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001334cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1335 unsigned int bytes_written)
1336{
1337 loff_t end_of_write = offset + bytes_written;
1338
1339 if (end_of_write > cifsi->server_eof)
1340 cifsi->server_eof = end_of_write;
1341}
1342
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001343static ssize_t cifs_write(struct cifsFileInfo *open_file, __u32 pid,
Jeff Layton7da4b492010-10-15 15:34:00 -04001344 const char *write_data, size_t write_size,
1345 loff_t *poffset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346{
1347 int rc = 0;
1348 unsigned int bytes_written = 0;
1349 unsigned int total_written;
1350 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +00001351 struct cifs_tcon *pTcon;
Jeff Layton77499812011-01-11 07:24:23 -05001352 int xid;
Jeff Layton7da4b492010-10-15 15:34:00 -04001353 struct dentry *dentry = open_file->dentry;
1354 struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001355 struct cifs_io_parms io_parms;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001356
Jeff Layton7da4b492010-10-15 15:34:00 -04001357 cifs_sb = CIFS_SB(dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001358
Joe Perchesb6b38f72010-04-21 03:50:45 +00001359 cFYI(1, "write %zd bytes to offset %lld of %s", write_size,
Jeff Layton7da4b492010-10-15 15:34:00 -04001360 *poffset, dentry->d_name.name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001361
Jeff Layton13cfb732010-09-29 19:51:11 -04001362 pTcon = tlink_tcon(open_file->tlink);
Steve French50c2f752007-07-13 00:33:32 +00001363
Linus Torvalds1da177e2005-04-16 15:20:36 -07001364 xid = GetXid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001365
Linus Torvalds1da177e2005-04-16 15:20:36 -07001366 for (total_written = 0; write_size > total_written;
1367 total_written += bytes_written) {
1368 rc = -EAGAIN;
1369 while (rc == -EAGAIN) {
Jeff Laytonca83ce32011-04-12 09:13:44 -04001370 struct kvec iov[2];
1371 unsigned int len;
1372
Linus Torvalds1da177e2005-04-16 15:20:36 -07001373 if (open_file->invalidHandle) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001374 /* we could deadlock if we called
1375 filemap_fdatawait from here so tell
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001376 reopen_file not to flush data to
Linus Torvalds1da177e2005-04-16 15:20:36 -07001377 server now */
Jeff Layton15886172010-10-15 15:33:59 -04001378 rc = cifs_reopen_file(open_file, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001379 if (rc != 0)
1380 break;
1381 }
Steve French3e844692005-10-03 13:37:24 -07001382
Jeff Laytonca83ce32011-04-12 09:13:44 -04001383 len = min((size_t)cifs_sb->wsize,
1384 write_size - total_written);
1385 /* iov[0] is reserved for smb header */
1386 iov[1].iov_base = (char *)write_data + total_written;
1387 iov[1].iov_len = len;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001388 io_parms.netfid = open_file->netfid;
1389 io_parms.pid = pid;
1390 io_parms.tcon = pTcon;
1391 io_parms.offset = *poffset;
1392 io_parms.length = len;
1393 rc = CIFSSMBWrite2(xid, &io_parms, &bytes_written, iov,
1394 1, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001395 }
1396 if (rc || (bytes_written == 0)) {
1397 if (total_written)
1398 break;
1399 else {
1400 FreeXid(xid);
1401 return rc;
1402 }
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001403 } else {
1404 cifs_update_eof(cifsi, *poffset, bytes_written);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001405 *poffset += bytes_written;
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001406 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001407 }
1408
Steve Frencha45443472005-08-24 13:59:35 -07001409 cifs_stats_bytes_written(pTcon, total_written);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001410
Jeff Layton7da4b492010-10-15 15:34:00 -04001411 if (total_written > 0) {
1412 spin_lock(&dentry->d_inode->i_lock);
1413 if (*poffset > dentry->d_inode->i_size)
1414 i_size_write(dentry->d_inode, *poffset);
1415 spin_unlock(&dentry->d_inode->i_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001416 }
Jeff Layton7da4b492010-10-15 15:34:00 -04001417 mark_inode_dirty_sync(dentry->d_inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001418 FreeXid(xid);
1419 return total_written;
1420}
1421
Jeff Layton6508d902010-09-29 19:51:11 -04001422struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1423 bool fsuid_only)
Steve French630f3f0c2007-10-25 21:17:17 +00001424{
1425 struct cifsFileInfo *open_file = NULL;
Jeff Layton6508d902010-09-29 19:51:11 -04001426 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1427
1428 /* only filter by fsuid on multiuser mounts */
1429 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1430 fsuid_only = false;
Steve French630f3f0c2007-10-25 21:17:17 +00001431
Jeff Layton44772882010-10-15 15:34:03 -04001432 spin_lock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001433 /* we could simply get the first_list_entry since write-only entries
1434 are always at the end of the list but since the first entry might
1435 have a close pending, we go through the whole list */
1436 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton6508d902010-09-29 19:51:11 -04001437 if (fsuid_only && open_file->uid != current_fsuid())
1438 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001439 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
Steve French630f3f0c2007-10-25 21:17:17 +00001440 if (!open_file->invalidHandle) {
1441 /* found a good file */
1442 /* lock it so it will not be closed on us */
Dave Kleikamp6ab409b2009-08-31 11:07:12 -04001443 cifsFileInfo_get(open_file);
Jeff Layton44772882010-10-15 15:34:03 -04001444 spin_unlock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001445 return open_file;
1446 } /* else might as well continue, and look for
1447 another, or simply have the caller reopen it
1448 again rather than trying to fix this handle */
1449 } else /* write only file */
1450 break; /* write only files are last so must be done */
1451 }
Jeff Layton44772882010-10-15 15:34:03 -04001452 spin_unlock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001453 return NULL;
1454}
Steve French630f3f0c2007-10-25 21:17:17 +00001455
Jeff Layton6508d902010-09-29 19:51:11 -04001456struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1457 bool fsuid_only)
Steve French6148a742005-10-05 12:23:19 -07001458{
1459 struct cifsFileInfo *open_file;
Jeff Laytond3892292010-11-02 16:22:50 -04001460 struct cifs_sb_info *cifs_sb;
Jeff Layton2846d382008-09-22 21:33:33 -04001461 bool any_available = false;
Steve Frenchdd99cd82005-10-05 19:32:49 -07001462 int rc;
Steve French6148a742005-10-05 12:23:19 -07001463
Steve French60808232006-04-22 15:53:05 +00001464 /* Having a null inode here (because mapping->host was set to zero by
1465 the VFS or MM) should not happen but we had reports of on oops (due to
1466 it being zero) during stress testcases so we need to check for it */
1467
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001468 if (cifs_inode == NULL) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001469 cERROR(1, "Null inode passed to cifs_writeable_file");
Steve French60808232006-04-22 15:53:05 +00001470 dump_stack();
1471 return NULL;
1472 }
1473
Jeff Laytond3892292010-11-02 16:22:50 -04001474 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1475
Jeff Layton6508d902010-09-29 19:51:11 -04001476 /* only filter by fsuid on multiuser mounts */
1477 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1478 fsuid_only = false;
1479
Jeff Layton44772882010-10-15 15:34:03 -04001480 spin_lock(&cifs_file_list_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001481refind_writable:
Steve French6148a742005-10-05 12:23:19 -07001482 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton6508d902010-09-29 19:51:11 -04001483 if (!any_available && open_file->pid != current->tgid)
1484 continue;
1485 if (fsuid_only && open_file->uid != current_fsuid())
1486 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001487 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Dave Kleikamp6ab409b2009-08-31 11:07:12 -04001488 cifsFileInfo_get(open_file);
Steve French9b22b0b2007-10-02 01:11:08 +00001489
1490 if (!open_file->invalidHandle) {
1491 /* found a good writable file */
Jeff Layton44772882010-10-15 15:34:03 -04001492 spin_unlock(&cifs_file_list_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001493 return open_file;
1494 }
Steve French8840dee2007-11-16 23:05:52 +00001495
Jeff Layton44772882010-10-15 15:34:03 -04001496 spin_unlock(&cifs_file_list_lock);
Steve Frenchcdff08e2010-10-21 22:46:14 +00001497
Steve French9b22b0b2007-10-02 01:11:08 +00001498 /* Had to unlock since following call can block */
Jeff Layton15886172010-10-15 15:33:59 -04001499 rc = cifs_reopen_file(open_file, false);
Steve Frenchcdff08e2010-10-21 22:46:14 +00001500 if (!rc)
1501 return open_file;
Steve French9b22b0b2007-10-02 01:11:08 +00001502
Steve Frenchcdff08e2010-10-21 22:46:14 +00001503 /* if it fails, try another handle if possible */
Joe Perchesb6b38f72010-04-21 03:50:45 +00001504 cFYI(1, "wp failed on reopen file");
Dave Kleikamp6ab409b2009-08-31 11:07:12 -04001505 cifsFileInfo_put(open_file);
Steve French8840dee2007-11-16 23:05:52 +00001506
Steve Frenchcdff08e2010-10-21 22:46:14 +00001507 spin_lock(&cifs_file_list_lock);
1508
Steve French9b22b0b2007-10-02 01:11:08 +00001509 /* else we simply continue to the next entry. Thus
1510 we do not loop on reopen errors. If we
1511 can not reopen the file, for example if we
1512 reconnected to a server with another client
1513 racing to delete or lock the file we would not
1514 make progress if we restarted before the beginning
1515 of the loop here. */
Steve French6148a742005-10-05 12:23:19 -07001516 }
1517 }
Jeff Layton2846d382008-09-22 21:33:33 -04001518 /* couldn't find useable FH with same pid, try any available */
1519 if (!any_available) {
1520 any_available = true;
1521 goto refind_writable;
1522 }
Jeff Layton44772882010-10-15 15:34:03 -04001523 spin_unlock(&cifs_file_list_lock);
Steve French6148a742005-10-05 12:23:19 -07001524 return NULL;
1525}
1526
Linus Torvalds1da177e2005-04-16 15:20:36 -07001527static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1528{
1529 struct address_space *mapping = page->mapping;
1530 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1531 char *write_data;
1532 int rc = -EFAULT;
1533 int bytes_written = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534 struct inode *inode;
Steve French6148a742005-10-05 12:23:19 -07001535 struct cifsFileInfo *open_file;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001536
1537 if (!mapping || !mapping->host)
1538 return -EFAULT;
1539
1540 inode = page->mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001541
1542 offset += (loff_t)from;
1543 write_data = kmap(page);
1544 write_data += from;
1545
1546 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1547 kunmap(page);
1548 return -EIO;
1549 }
1550
1551 /* racing with truncate? */
1552 if (offset > mapping->host->i_size) {
1553 kunmap(page);
1554 return 0; /* don't care */
1555 }
1556
1557 /* check to make sure that we are not extending the file */
1558 if (mapping->host->i_size - offset < (loff_t)to)
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001559 to = (unsigned)(mapping->host->i_size - offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001560
Jeff Layton6508d902010-09-29 19:51:11 -04001561 open_file = find_writable_file(CIFS_I(mapping->host), false);
Steve French6148a742005-10-05 12:23:19 -07001562 if (open_file) {
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001563 bytes_written = cifs_write(open_file, open_file->pid,
1564 write_data, to - from, &offset);
Dave Kleikamp6ab409b2009-08-31 11:07:12 -04001565 cifsFileInfo_put(open_file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001566 /* Does mm or vfs already set times? */
Steve French6148a742005-10-05 12:23:19 -07001567 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001568 if ((bytes_written > 0) && (offset))
Steve French6148a742005-10-05 12:23:19 -07001569 rc = 0;
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001570 else if (bytes_written < 0)
1571 rc = bytes_written;
Steve French6148a742005-10-05 12:23:19 -07001572 } else {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001573 cFYI(1, "No writeable filehandles for inode");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001574 rc = -EIO;
1575 }
1576
1577 kunmap(page);
1578 return rc;
1579}
1580
Linus Torvalds1da177e2005-04-16 15:20:36 -07001581static int cifs_writepages(struct address_space *mapping,
Steve French37c0eb42005-10-05 14:50:29 -07001582 struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001583{
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001584 struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
1585 bool done = false, scanned = false, range_whole = false;
1586 pgoff_t end, index;
1587 struct cifs_writedata *wdata;
Steve French37c0eb42005-10-05 14:50:29 -07001588 struct page *page;
Steve French37c0eb42005-10-05 14:50:29 -07001589 int rc = 0;
Steve French50c2f752007-07-13 00:33:32 +00001590
Steve French37c0eb42005-10-05 14:50:29 -07001591 /*
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001592 * If wsize is smaller than the page cache size, default to writing
Steve French37c0eb42005-10-05 14:50:29 -07001593 * one page at a time via cifs_writepage
1594 */
1595 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1596 return generic_writepages(mapping, wbc);
1597
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001598 if (wbc->range_cyclic) {
Steve French37c0eb42005-10-05 14:50:29 -07001599 index = mapping->writeback_index; /* Start from prev offset */
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001600 end = -1;
1601 } else {
1602 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1603 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1604 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001605 range_whole = true;
1606 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07001607 }
1608retry:
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001609 while (!done && index <= end) {
1610 unsigned int i, nr_pages, found_pages;
1611 pgoff_t next = 0, tofind;
1612 struct page **pages;
Steve French37c0eb42005-10-05 14:50:29 -07001613
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001614 tofind = min((cifs_sb->wsize / PAGE_CACHE_SIZE) - 1,
1615 end - index) + 1;
Steve French37c0eb42005-10-05 14:50:29 -07001616
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001617 wdata = cifs_writedata_alloc((unsigned int)tofind);
1618 if (!wdata) {
1619 rc = -ENOMEM;
1620 break;
1621 }
1622
1623 /*
1624 * find_get_pages_tag seems to return a max of 256 on each
1625 * iteration, so we must call it several times in order to
1626 * fill the array or the wsize is effectively limited to
1627 * 256 * PAGE_CACHE_SIZE.
1628 */
1629 found_pages = 0;
1630 pages = wdata->pages;
1631 do {
1632 nr_pages = find_get_pages_tag(mapping, &index,
1633 PAGECACHE_TAG_DIRTY,
1634 tofind, pages);
1635 found_pages += nr_pages;
1636 tofind -= nr_pages;
1637 pages += nr_pages;
1638 } while (nr_pages && tofind && index <= end);
1639
1640 if (found_pages == 0) {
1641 kref_put(&wdata->refcount, cifs_writedata_release);
1642 break;
1643 }
1644
1645 nr_pages = 0;
1646 for (i = 0; i < found_pages; i++) {
1647 page = wdata->pages[i];
Steve French37c0eb42005-10-05 14:50:29 -07001648 /*
1649 * At this point we hold neither mapping->tree_lock nor
1650 * lock on the page itself: the page may be truncated or
1651 * invalidated (changing page->mapping to NULL), or even
1652 * swizzled back from swapper_space to tmpfs file
1653 * mapping
1654 */
1655
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001656 if (nr_pages == 0)
Steve French37c0eb42005-10-05 14:50:29 -07001657 lock_page(page);
Nick Piggin529ae9a2008-08-02 12:01:03 +02001658 else if (!trylock_page(page))
Steve French37c0eb42005-10-05 14:50:29 -07001659 break;
1660
1661 if (unlikely(page->mapping != mapping)) {
1662 unlock_page(page);
1663 break;
1664 }
1665
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001666 if (!wbc->range_cyclic && page->index > end) {
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001667 done = true;
Steve French37c0eb42005-10-05 14:50:29 -07001668 unlock_page(page);
1669 break;
1670 }
1671
1672 if (next && (page->index != next)) {
1673 /* Not next consecutive page */
1674 unlock_page(page);
1675 break;
1676 }
1677
1678 if (wbc->sync_mode != WB_SYNC_NONE)
1679 wait_on_page_writeback(page);
1680
1681 if (PageWriteback(page) ||
Linus Torvaldscb876f42006-12-23 16:19:07 -08001682 !clear_page_dirty_for_io(page)) {
Steve French37c0eb42005-10-05 14:50:29 -07001683 unlock_page(page);
1684 break;
1685 }
Steve French84d2f072005-10-12 15:32:05 -07001686
Linus Torvaldscb876f42006-12-23 16:19:07 -08001687 /*
1688 * This actually clears the dirty bit in the radix tree.
1689 * See cifs_writepage() for more commentary.
1690 */
1691 set_page_writeback(page);
1692
Steve French84d2f072005-10-12 15:32:05 -07001693 if (page_offset(page) >= mapping->host->i_size) {
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001694 done = true;
Steve French84d2f072005-10-12 15:32:05 -07001695 unlock_page(page);
Linus Torvaldscb876f42006-12-23 16:19:07 -08001696 end_page_writeback(page);
Steve French84d2f072005-10-12 15:32:05 -07001697 break;
1698 }
1699
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001700 wdata->pages[i] = page;
Steve French37c0eb42005-10-05 14:50:29 -07001701 next = page->index + 1;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001702 ++nr_pages;
Steve French37c0eb42005-10-05 14:50:29 -07001703 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001704
1705 /* reset index to refind any pages skipped */
1706 if (nr_pages == 0)
1707 index = wdata->pages[0]->index + 1;
1708
1709 /* put any pages we aren't going to use */
1710 for (i = nr_pages; i < found_pages; i++) {
1711 page_cache_release(wdata->pages[i]);
1712 wdata->pages[i] = NULL;
1713 }
1714
1715 /* nothing to write? */
1716 if (nr_pages == 0) {
1717 kref_put(&wdata->refcount, cifs_writedata_release);
1718 continue;
1719 }
1720
1721 wdata->sync_mode = wbc->sync_mode;
1722 wdata->nr_pages = nr_pages;
1723 wdata->offset = page_offset(wdata->pages[0]);
1724
1725 do {
1726 if (wdata->cfile != NULL)
1727 cifsFileInfo_put(wdata->cfile);
1728 wdata->cfile = find_writable_file(CIFS_I(mapping->host),
1729 false);
1730 if (!wdata->cfile) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001731 cERROR(1, "No writable handles for inode");
Steve French23e7dd72005-10-20 13:44:56 -07001732 rc = -EBADF;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001733 break;
Steve French37c0eb42005-10-05 14:50:29 -07001734 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001735 rc = cifs_async_writev(wdata);
1736 } while (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN);
Jeff Laytonf3983c22010-09-22 16:17:40 -07001737
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001738 for (i = 0; i < nr_pages; ++i)
1739 unlock_page(wdata->pages[i]);
Jeff Layton941b8532011-01-11 07:24:01 -05001740
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001741 /* send failure -- clean up the mess */
1742 if (rc != 0) {
1743 for (i = 0; i < nr_pages; ++i) {
Jeff Layton941b8532011-01-11 07:24:01 -05001744 if (rc == -EAGAIN)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001745 redirty_page_for_writepage(wbc,
1746 wdata->pages[i]);
1747 else
1748 SetPageError(wdata->pages[i]);
1749 end_page_writeback(wdata->pages[i]);
1750 page_cache_release(wdata->pages[i]);
Steve French37c0eb42005-10-05 14:50:29 -07001751 }
Jeff Layton941b8532011-01-11 07:24:01 -05001752 if (rc != -EAGAIN)
1753 mapping_set_error(mapping, rc);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001754 }
1755 kref_put(&wdata->refcount, cifs_writedata_release);
Jeff Layton941b8532011-01-11 07:24:01 -05001756
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001757 wbc->nr_to_write -= nr_pages;
1758 if (wbc->nr_to_write <= 0)
1759 done = true;
Dave Kleikampb066a482008-11-18 03:49:05 +00001760
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001761 index = next;
Steve French37c0eb42005-10-05 14:50:29 -07001762 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001763
Steve French37c0eb42005-10-05 14:50:29 -07001764 if (!scanned && !done) {
1765 /*
1766 * We hit the last page and there is more work to be done: wrap
1767 * back to the start of the file
1768 */
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001769 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07001770 index = 0;
1771 goto retry;
1772 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001773
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001774 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
Steve French37c0eb42005-10-05 14:50:29 -07001775 mapping->writeback_index = index;
1776
Linus Torvalds1da177e2005-04-16 15:20:36 -07001777 return rc;
1778}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001779
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04001780static int
1781cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782{
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04001783 int rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001784 int xid;
1785
1786 xid = GetXid();
1787/* BB add check for wbc flags */
1788 page_cache_get(page);
Steve Frenchad7a2922008-02-07 23:25:02 +00001789 if (!PageUptodate(page))
Joe Perchesb6b38f72010-04-21 03:50:45 +00001790 cFYI(1, "ppw - page not up to date");
Linus Torvaldscb876f42006-12-23 16:19:07 -08001791
1792 /*
1793 * Set the "writeback" flag, and clear "dirty" in the radix tree.
1794 *
1795 * A writepage() implementation always needs to do either this,
1796 * or re-dirty the page with "redirty_page_for_writepage()" in
1797 * the case of a failure.
1798 *
1799 * Just unlocking the page will cause the radix tree tag-bits
1800 * to fail to update with the state of the page correctly.
1801 */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001802 set_page_writeback(page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04001803retry_write:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001804 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04001805 if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
1806 goto retry_write;
1807 else if (rc == -EAGAIN)
1808 redirty_page_for_writepage(wbc, page);
1809 else if (rc != 0)
1810 SetPageError(page);
1811 else
1812 SetPageUptodate(page);
Linus Torvaldscb876f42006-12-23 16:19:07 -08001813 end_page_writeback(page);
1814 page_cache_release(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001815 FreeXid(xid);
1816 return rc;
1817}
1818
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04001819static int cifs_writepage(struct page *page, struct writeback_control *wbc)
1820{
1821 int rc = cifs_writepage_locked(page, wbc);
1822 unlock_page(page);
1823 return rc;
1824}
1825
Nick Piggind9414772008-09-24 11:32:59 -04001826static int cifs_write_end(struct file *file, struct address_space *mapping,
1827 loff_t pos, unsigned len, unsigned copied,
1828 struct page *page, void *fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001829{
Nick Piggind9414772008-09-24 11:32:59 -04001830 int rc;
1831 struct inode *inode = mapping->host;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00001832 struct cifsFileInfo *cfile = file->private_data;
1833 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1834 __u32 pid;
1835
1836 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
1837 pid = cfile->pid;
1838 else
1839 pid = current->tgid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001840
Joe Perchesb6b38f72010-04-21 03:50:45 +00001841 cFYI(1, "write_end for page %p from pos %lld with %d bytes",
1842 page, pos, copied);
Steve Frenchad7a2922008-02-07 23:25:02 +00001843
Jeff Laytona98ee8c2008-11-26 19:32:33 +00001844 if (PageChecked(page)) {
1845 if (copied == len)
1846 SetPageUptodate(page);
1847 ClearPageChecked(page);
1848 } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
Nick Piggind9414772008-09-24 11:32:59 -04001849 SetPageUptodate(page);
1850
Linus Torvalds1da177e2005-04-16 15:20:36 -07001851 if (!PageUptodate(page)) {
Nick Piggind9414772008-09-24 11:32:59 -04001852 char *page_data;
1853 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
1854 int xid;
1855
1856 xid = GetXid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001857 /* this is probably better than directly calling
1858 partialpage_write since in this function the file handle is
1859 known which we might as well leverage */
1860 /* BB check if anything else missing out of ppw
1861 such as updating last write time */
1862 page_data = kmap(page);
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00001863 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
Nick Piggind9414772008-09-24 11:32:59 -04001864 /* if (rc < 0) should we set writebehind rc? */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001865 kunmap(page);
Nick Piggind9414772008-09-24 11:32:59 -04001866
1867 FreeXid(xid);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001868 } else {
Nick Piggind9414772008-09-24 11:32:59 -04001869 rc = copied;
1870 pos += copied;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001871 set_page_dirty(page);
1872 }
1873
Nick Piggind9414772008-09-24 11:32:59 -04001874 if (rc > 0) {
1875 spin_lock(&inode->i_lock);
1876 if (pos > inode->i_size)
1877 i_size_write(inode, pos);
1878 spin_unlock(&inode->i_lock);
1879 }
1880
1881 unlock_page(page);
1882 page_cache_release(page);
1883
Linus Torvalds1da177e2005-04-16 15:20:36 -07001884 return rc;
1885}
1886
Josef Bacik02c24a82011-07-16 20:44:56 -04001887int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
1888 int datasync)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001889{
1890 int xid;
1891 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00001892 struct cifs_tcon *tcon;
Joe Perchesc21dfb62010-07-12 13:50:14 -07001893 struct cifsFileInfo *smbfile = file->private_data;
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08001894 struct inode *inode = file->f_path.dentry->d_inode;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03001895 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001896
Josef Bacik02c24a82011-07-16 20:44:56 -04001897 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
1898 if (rc)
1899 return rc;
1900 mutex_lock(&inode->i_mutex);
1901
Linus Torvalds1da177e2005-04-16 15:20:36 -07001902 xid = GetXid();
1903
Joe Perchesb6b38f72010-04-21 03:50:45 +00001904 cFYI(1, "Sync file - name: %s datasync: 0x%x",
Christoph Hellwig7ea80852010-05-26 17:53:25 +02001905 file->f_path.dentry->d_name.name, datasync);
Steve French50c2f752007-07-13 00:33:32 +00001906
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04001907 if (!CIFS_I(inode)->clientCanCacheRead) {
1908 rc = cifs_invalidate_mapping(inode);
1909 if (rc) {
1910 cFYI(1, "rc: %d during invalidate phase", rc);
1911 rc = 0; /* don't care about it in fsync */
1912 }
1913 }
Jeff Laytoneb4b7562010-10-22 14:52:29 -04001914
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03001915 tcon = tlink_tcon(smbfile->tlink);
1916 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
1917 rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
1918
1919 FreeXid(xid);
Josef Bacik02c24a82011-07-16 20:44:56 -04001920 mutex_unlock(&inode->i_mutex);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03001921 return rc;
1922}
1923
Josef Bacik02c24a82011-07-16 20:44:56 -04001924int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03001925{
1926 int xid;
1927 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00001928 struct cifs_tcon *tcon;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03001929 struct cifsFileInfo *smbfile = file->private_data;
1930 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Josef Bacik02c24a82011-07-16 20:44:56 -04001931 struct inode *inode = file->f_mapping->host;
1932
1933 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
1934 if (rc)
1935 return rc;
1936 mutex_lock(&inode->i_mutex);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03001937
1938 xid = GetXid();
1939
1940 cFYI(1, "Sync file - name: %s datasync: 0x%x",
1941 file->f_path.dentry->d_name.name, datasync);
1942
1943 tcon = tlink_tcon(smbfile->tlink);
1944 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
1945 rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
Steve Frenchb298f222009-02-21 21:17:43 +00001946
Linus Torvalds1da177e2005-04-16 15:20:36 -07001947 FreeXid(xid);
Josef Bacik02c24a82011-07-16 20:44:56 -04001948 mutex_unlock(&inode->i_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001949 return rc;
1950}
1951
Linus Torvalds1da177e2005-04-16 15:20:36 -07001952/*
1953 * As file closes, flush all cached write data for this inode checking
1954 * for write behind errors.
1955 */
Miklos Szeredi75e1fcc2006-06-23 02:05:12 -07001956int cifs_flush(struct file *file, fl_owner_t id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001957{
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001958 struct inode *inode = file->f_path.dentry->d_inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001959 int rc = 0;
1960
Jeff Laytoneb4b7562010-10-22 14:52:29 -04001961 if (file->f_mode & FMODE_WRITE)
Jeff Laytond3f13222010-10-15 15:34:07 -04001962 rc = filemap_write_and_wait(inode->i_mapping);
Steve French50c2f752007-07-13 00:33:32 +00001963
Joe Perchesb6b38f72010-04-21 03:50:45 +00001964 cFYI(1, "Flush inode %p file %p rc %d", inode, file, rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001965
1966 return rc;
1967}
1968
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05001969static int
1970cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
1971{
1972 int rc = 0;
1973 unsigned long i;
1974
1975 for (i = 0; i < num_pages; i++) {
1976 pages[i] = alloc_page(__GFP_HIGHMEM);
1977 if (!pages[i]) {
1978 /*
1979 * save number of pages we have already allocated and
1980 * return with ENOMEM error
1981 */
1982 num_pages = i;
1983 rc = -ENOMEM;
1984 goto error;
1985 }
1986 }
1987
1988 return rc;
1989
1990error:
1991 for (i = 0; i < num_pages; i++)
1992 put_page(pages[i]);
1993 return rc;
1994}
1995
1996static inline
1997size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
1998{
1999 size_t num_pages;
2000 size_t clen;
2001
2002 clen = min_t(const size_t, len, wsize);
2003 num_pages = clen / PAGE_CACHE_SIZE;
2004 if (clen % PAGE_CACHE_SIZE)
2005 num_pages++;
2006
2007 if (cur_len)
2008 *cur_len = clen;
2009
2010 return num_pages;
2011}
2012
2013static ssize_t
2014cifs_iovec_write(struct file *file, const struct iovec *iov,
2015 unsigned long nr_segs, loff_t *poffset)
2016{
Pavel Shilovsky76429c12011-01-31 16:03:08 +03002017 unsigned int written;
2018 unsigned long num_pages, npages, i;
2019 size_t copied, len, cur_len;
2020 ssize_t total_written = 0;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002021 struct kvec *to_send;
2022 struct page **pages;
2023 struct iov_iter it;
2024 struct inode *inode;
2025 struct cifsFileInfo *open_file;
Steve French96daf2b2011-05-27 04:34:02 +00002026 struct cifs_tcon *pTcon;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002027 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04002028 struct cifs_io_parms io_parms;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002029 int xid, rc;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002030 __u32 pid;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002031
2032 len = iov_length(iov, nr_segs);
2033 if (!len)
2034 return 0;
2035
2036 rc = generic_write_checks(file, poffset, &len, 0);
2037 if (rc)
2038 return rc;
2039
2040 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
2041 num_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
2042
2043 pages = kmalloc(sizeof(struct pages *)*num_pages, GFP_KERNEL);
2044 if (!pages)
2045 return -ENOMEM;
2046
2047 to_send = kmalloc(sizeof(struct kvec)*(num_pages + 1), GFP_KERNEL);
2048 if (!to_send) {
2049 kfree(pages);
2050 return -ENOMEM;
2051 }
2052
2053 rc = cifs_write_allocate_pages(pages, num_pages);
2054 if (rc) {
2055 kfree(pages);
2056 kfree(to_send);
2057 return rc;
2058 }
2059
2060 xid = GetXid();
2061 open_file = file->private_data;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002062
2063 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2064 pid = open_file->pid;
2065 else
2066 pid = current->tgid;
2067
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002068 pTcon = tlink_tcon(open_file->tlink);
2069 inode = file->f_path.dentry->d_inode;
2070
2071 iov_iter_init(&it, iov, nr_segs, len, 0);
2072 npages = num_pages;
2073
2074 do {
2075 size_t save_len = cur_len;
2076 for (i = 0; i < npages; i++) {
2077 copied = min_t(const size_t, cur_len, PAGE_CACHE_SIZE);
2078 copied = iov_iter_copy_from_user(pages[i], &it, 0,
2079 copied);
2080 cur_len -= copied;
2081 iov_iter_advance(&it, copied);
2082 to_send[i+1].iov_base = kmap(pages[i]);
2083 to_send[i+1].iov_len = copied;
2084 }
2085
2086 cur_len = save_len - cur_len;
2087
2088 do {
2089 if (open_file->invalidHandle) {
2090 rc = cifs_reopen_file(open_file, false);
2091 if (rc != 0)
2092 break;
2093 }
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04002094 io_parms.netfid = open_file->netfid;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002095 io_parms.pid = pid;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04002096 io_parms.tcon = pTcon;
2097 io_parms.offset = *poffset;
2098 io_parms.length = cur_len;
2099 rc = CIFSSMBWrite2(xid, &io_parms, &written, to_send,
2100 npages, 0);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002101 } while (rc == -EAGAIN);
2102
2103 for (i = 0; i < npages; i++)
2104 kunmap(pages[i]);
2105
2106 if (written) {
2107 len -= written;
2108 total_written += written;
2109 cifs_update_eof(CIFS_I(inode), *poffset, written);
2110 *poffset += written;
2111 } else if (rc < 0) {
2112 if (!total_written)
2113 total_written = rc;
2114 break;
2115 }
2116
2117 /* get length and number of kvecs of the next write */
2118 npages = get_numpages(cifs_sb->wsize, len, &cur_len);
2119 } while (len > 0);
2120
2121 if (total_written > 0) {
2122 spin_lock(&inode->i_lock);
2123 if (*poffset > inode->i_size)
2124 i_size_write(inode, *poffset);
2125 spin_unlock(&inode->i_lock);
2126 }
2127
2128 cifs_stats_bytes_written(pTcon, total_written);
2129 mark_inode_dirty_sync(inode);
2130
2131 for (i = 0; i < num_pages; i++)
2132 put_page(pages[i]);
2133 kfree(to_send);
2134 kfree(pages);
2135 FreeXid(xid);
2136 return total_written;
2137}
2138
Pavel Shilovsky0b81c1c2011-03-10 10:11:05 +03002139ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002140 unsigned long nr_segs, loff_t pos)
2141{
2142 ssize_t written;
2143 struct inode *inode;
2144
2145 inode = iocb->ki_filp->f_path.dentry->d_inode;
2146
2147 /*
2148 * BB - optimize the way when signing is disabled. We can drop this
2149 * extra memory-to-memory copying and use iovec buffers for constructing
2150 * write request.
2151 */
2152
2153 written = cifs_iovec_write(iocb->ki_filp, iov, nr_segs, &pos);
2154 if (written > 0) {
2155 CIFS_I(inode)->invalid_mapping = true;
2156 iocb->ki_pos = pos;
2157 }
2158
2159 return written;
2160}
2161
2162ssize_t cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
2163 unsigned long nr_segs, loff_t pos)
2164{
2165 struct inode *inode;
2166
2167 inode = iocb->ki_filp->f_path.dentry->d_inode;
2168
2169 if (CIFS_I(inode)->clientCanCacheAll)
2170 return generic_file_aio_write(iocb, iov, nr_segs, pos);
2171
2172 /*
2173 * In strict cache mode we need to write the data to the server exactly
2174 * from the pos to pos+len-1 rather than flush all affected pages
2175 * because it may cause a error with mandatory locks on these pages but
2176 * not on the region from pos to ppos+len-1.
2177 */
2178
2179 return cifs_user_writev(iocb, iov, nr_segs, pos);
2180}
2181
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002182static ssize_t
2183cifs_iovec_read(struct file *file, const struct iovec *iov,
2184 unsigned long nr_segs, loff_t *poffset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002185{
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002186 int rc;
2187 int xid;
Pavel Shilovsky76429c12011-01-31 16:03:08 +03002188 ssize_t total_read;
2189 unsigned int bytes_read = 0;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002190 size_t len, cur_len;
2191 int iov_offset = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002192 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +00002193 struct cifs_tcon *pTcon;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002194 struct cifsFileInfo *open_file;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002195 struct smb_com_read_rsp *pSMBr;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002196 struct cifs_io_parms io_parms;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002197 char *read_data;
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002198 unsigned int rsize;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002199 __u32 pid;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002200
2201 if (!nr_segs)
2202 return 0;
2203
2204 len = iov_length(iov, nr_segs);
2205 if (!len)
2206 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002207
2208 xid = GetXid();
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002209 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002210
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002211 /* FIXME: set up handlers for larger reads and/or convert to async */
2212 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
2213
Joe Perchesc21dfb62010-07-12 13:50:14 -07002214 open_file = file->private_data;
Jeff Layton13cfb732010-09-29 19:51:11 -04002215 pTcon = tlink_tcon(open_file->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002216
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002217 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2218 pid = open_file->pid;
2219 else
2220 pid = current->tgid;
2221
Steve Frenchad7a2922008-02-07 23:25:02 +00002222 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesb6b38f72010-04-21 03:50:45 +00002223 cFYI(1, "attempting read on write only file instance");
Steve Frenchad7a2922008-02-07 23:25:02 +00002224
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002225 for (total_read = 0; total_read < len; total_read += bytes_read) {
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002226 cur_len = min_t(const size_t, len - total_read, rsize);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002227 rc = -EAGAIN;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002228 read_data = NULL;
2229
Linus Torvalds1da177e2005-04-16 15:20:36 -07002230 while (rc == -EAGAIN) {
Steve Frenchec637e32005-12-12 20:53:18 -08002231 int buf_type = CIFS_NO_BUFFER;
Steve Frenchcdff08e2010-10-21 22:46:14 +00002232 if (open_file->invalidHandle) {
Jeff Layton15886172010-10-15 15:33:59 -04002233 rc = cifs_reopen_file(open_file, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002234 if (rc != 0)
2235 break;
2236 }
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002237 io_parms.netfid = open_file->netfid;
2238 io_parms.pid = pid;
2239 io_parms.tcon = pTcon;
2240 io_parms.offset = *poffset;
Pavel Shilovsky2cebaa52011-07-20 18:24:09 +04002241 io_parms.length = cur_len;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002242 rc = CIFSSMBRead(xid, &io_parms, &bytes_read,
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002243 &read_data, &buf_type);
2244 pSMBr = (struct smb_com_read_rsp *)read_data;
2245 if (read_data) {
2246 char *data_offset = read_data + 4 +
2247 le16_to_cpu(pSMBr->DataOffset);
2248 if (memcpy_toiovecend(iov, data_offset,
2249 iov_offset, bytes_read))
Steve French93544cc2006-02-14 22:30:52 -06002250 rc = -EFAULT;
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002251 if (buf_type == CIFS_SMALL_BUFFER)
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002252 cifs_small_buf_release(read_data);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002253 else if (buf_type == CIFS_LARGE_BUFFER)
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002254 cifs_buf_release(read_data);
2255 read_data = NULL;
2256 iov_offset += bytes_read;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002257 }
2258 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002259
Linus Torvalds1da177e2005-04-16 15:20:36 -07002260 if (rc || (bytes_read == 0)) {
2261 if (total_read) {
2262 break;
2263 } else {
2264 FreeXid(xid);
2265 return rc;
2266 }
2267 } else {
Steve Frencha45443472005-08-24 13:59:35 -07002268 cifs_stats_bytes_read(pTcon, bytes_read);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002269 *poffset += bytes_read;
2270 }
2271 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002272
Linus Torvalds1da177e2005-04-16 15:20:36 -07002273 FreeXid(xid);
2274 return total_read;
2275}
2276
Pavel Shilovsky0b81c1c2011-03-10 10:11:05 +03002277ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov,
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002278 unsigned long nr_segs, loff_t pos)
2279{
2280 ssize_t read;
2281
2282 read = cifs_iovec_read(iocb->ki_filp, iov, nr_segs, &pos);
2283 if (read > 0)
2284 iocb->ki_pos = pos;
2285
2286 return read;
2287}
2288
2289ssize_t cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov,
2290 unsigned long nr_segs, loff_t pos)
2291{
2292 struct inode *inode;
2293
2294 inode = iocb->ki_filp->f_path.dentry->d_inode;
2295
2296 if (CIFS_I(inode)->clientCanCacheRead)
2297 return generic_file_aio_read(iocb, iov, nr_segs, pos);
2298
2299 /*
2300 * In strict cache mode we need to read from the server all the time
2301 * if we don't have level II oplock because the server can delay mtime
2302 * change - so we can't make a decision about inode invalidating.
2303 * And we can also fail with pagereading if there are mandatory locks
2304 * on pages affected by this read but not on the region from pos to
2305 * pos+len-1.
2306 */
2307
2308 return cifs_user_readv(iocb, iov, nr_segs, pos);
2309}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002310
2311static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002312 loff_t *poffset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002313{
2314 int rc = -EACCES;
2315 unsigned int bytes_read = 0;
2316 unsigned int total_read;
2317 unsigned int current_read_size;
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002318 unsigned int rsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002319 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +00002320 struct cifs_tcon *pTcon;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002321 int xid;
2322 char *current_offset;
2323 struct cifsFileInfo *open_file;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002324 struct cifs_io_parms io_parms;
Steve Frenchec637e32005-12-12 20:53:18 -08002325 int buf_type = CIFS_NO_BUFFER;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002326 __u32 pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002327
2328 xid = GetXid();
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002329 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002330
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002331 /* FIXME: set up handlers for larger reads and/or convert to async */
2332 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
2333
Linus Torvalds1da177e2005-04-16 15:20:36 -07002334 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05302335 rc = -EBADF;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002336 FreeXid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05302337 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002338 }
Joe Perchesc21dfb62010-07-12 13:50:14 -07002339 open_file = file->private_data;
Jeff Layton13cfb732010-09-29 19:51:11 -04002340 pTcon = tlink_tcon(open_file->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002341
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002342 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2343 pid = open_file->pid;
2344 else
2345 pid = current->tgid;
2346
Linus Torvalds1da177e2005-04-16 15:20:36 -07002347 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesb6b38f72010-04-21 03:50:45 +00002348 cFYI(1, "attempting read on write only file instance");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002349
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002350 for (total_read = 0, current_offset = read_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002351 read_size > total_read;
2352 total_read += bytes_read, current_offset += bytes_read) {
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002353 current_read_size = min_t(uint, read_size - total_read, rsize);
2354
Steve Frenchf9f5c8172005-09-15 23:06:38 -07002355 /* For windows me and 9x we do not want to request more
2356 than it negotiated since it will refuse the read then */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002357 if ((pTcon->ses) &&
Steve Frenchf9f5c8172005-09-15 23:06:38 -07002358 !(pTcon->ses->capabilities & CAP_LARGE_FILES)) {
Dan Carpenter7748dd62011-10-18 12:41:35 +03002359 current_read_size = min_t(uint, current_read_size,
Jeff Laytonc974bef2011-10-11 06:41:32 -04002360 CIFSMaxBufSize);
Steve Frenchf9f5c8172005-09-15 23:06:38 -07002361 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002362 rc = -EAGAIN;
2363 while (rc == -EAGAIN) {
Steve Frenchcdff08e2010-10-21 22:46:14 +00002364 if (open_file->invalidHandle) {
Jeff Layton15886172010-10-15 15:33:59 -04002365 rc = cifs_reopen_file(open_file, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002366 if (rc != 0)
2367 break;
2368 }
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002369 io_parms.netfid = open_file->netfid;
2370 io_parms.pid = pid;
2371 io_parms.tcon = pTcon;
2372 io_parms.offset = *poffset;
2373 io_parms.length = current_read_size;
2374 rc = CIFSSMBRead(xid, &io_parms, &bytes_read,
2375 &current_offset, &buf_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002376 }
2377 if (rc || (bytes_read == 0)) {
2378 if (total_read) {
2379 break;
2380 } else {
2381 FreeXid(xid);
2382 return rc;
2383 }
2384 } else {
Steve Frencha45443472005-08-24 13:59:35 -07002385 cifs_stats_bytes_read(pTcon, total_read);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002386 *poffset += bytes_read;
2387 }
2388 }
2389 FreeXid(xid);
2390 return total_read;
2391}
2392
Jeff Laytonca83ce32011-04-12 09:13:44 -04002393/*
2394 * If the page is mmap'ed into a process' page tables, then we need to make
2395 * sure that it doesn't change while being written back.
2396 */
2397static int
2398cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
2399{
2400 struct page *page = vmf->page;
2401
2402 lock_page(page);
2403 return VM_FAULT_LOCKED;
2404}
2405
2406static struct vm_operations_struct cifs_file_vm_ops = {
2407 .fault = filemap_fault,
2408 .page_mkwrite = cifs_page_mkwrite,
2409};
2410
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03002411int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
2412{
2413 int rc, xid;
2414 struct inode *inode = file->f_path.dentry->d_inode;
2415
2416 xid = GetXid();
2417
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002418 if (!CIFS_I(inode)->clientCanCacheRead) {
2419 rc = cifs_invalidate_mapping(inode);
2420 if (rc)
2421 return rc;
2422 }
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03002423
2424 rc = generic_file_mmap(file, vma);
Jeff Laytonca83ce32011-04-12 09:13:44 -04002425 if (rc == 0)
2426 vma->vm_ops = &cifs_file_vm_ops;
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03002427 FreeXid(xid);
2428 return rc;
2429}
2430
Linus Torvalds1da177e2005-04-16 15:20:36 -07002431int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
2432{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002433 int rc, xid;
2434
2435 xid = GetXid();
Jeff Laytonabab0952010-02-12 07:44:18 -05002436 rc = cifs_revalidate_file(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002437 if (rc) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00002438 cFYI(1, "Validation prior to mmap failed, error=%d", rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002439 FreeXid(xid);
2440 return rc;
2441 }
2442 rc = generic_file_mmap(file, vma);
Jeff Laytonca83ce32011-04-12 09:13:44 -04002443 if (rc == 0)
2444 vma->vm_ops = &cifs_file_vm_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002445 FreeXid(xid);
2446 return rc;
2447}
2448
Linus Torvalds1da177e2005-04-16 15:20:36 -07002449static int cifs_readpages(struct file *file, struct address_space *mapping,
2450 struct list_head *page_list, unsigned num_pages)
2451{
Jeff Layton690c5e32011-10-19 15:30:16 -04002452 int rc;
2453 struct list_head tmplist;
2454 struct cifsFileInfo *open_file = file->private_data;
2455 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
2456 unsigned int rsize = cifs_sb->rsize;
2457 pid_t pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002458
Jeff Layton690c5e32011-10-19 15:30:16 -04002459 /*
2460 * Give up immediately if rsize is too small to read an entire page.
2461 * The VFS will fall back to readpage. We should never reach this
2462 * point however since we set ra_pages to 0 when the rsize is smaller
2463 * than a cache page.
2464 */
2465 if (unlikely(rsize < PAGE_CACHE_SIZE))
2466 return 0;
Steve Frenchbfa0d752005-08-31 21:50:37 -07002467
Suresh Jayaraman56698232010-07-05 18:13:25 +05302468 /*
2469 * Reads as many pages as possible from fscache. Returns -ENOBUFS
2470 * immediately if the cookie is negative
2471 */
2472 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
2473 &num_pages);
2474 if (rc == 0)
Jeff Layton690c5e32011-10-19 15:30:16 -04002475 return rc;
Suresh Jayaraman56698232010-07-05 18:13:25 +05302476
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002477 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2478 pid = open_file->pid;
2479 else
2480 pid = current->tgid;
2481
Jeff Layton690c5e32011-10-19 15:30:16 -04002482 rc = 0;
2483 INIT_LIST_HEAD(&tmplist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002484
Jeff Layton690c5e32011-10-19 15:30:16 -04002485 cFYI(1, "%s: file=%p mapping=%p num_pages=%u", __func__, file,
2486 mapping, num_pages);
2487
2488 /*
2489 * Start with the page at end of list and move it to private
2490 * list. Do the same with any following pages until we hit
2491 * the rsize limit, hit an index discontinuity, or run out of
2492 * pages. Issue the async read and then start the loop again
2493 * until the list is empty.
2494 *
2495 * Note that list order is important. The page_list is in
2496 * the order of declining indexes. When we put the pages in
2497 * the rdata->pages, then we want them in increasing order.
2498 */
2499 while (!list_empty(page_list)) {
2500 unsigned int bytes = PAGE_CACHE_SIZE;
2501 unsigned int expected_index;
2502 unsigned int nr_pages = 1;
2503 loff_t offset;
2504 struct page *page, *tpage;
2505 struct cifs_readdata *rdata;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002506
2507 page = list_entry(page_list->prev, struct page, lru);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002508
Jeff Layton690c5e32011-10-19 15:30:16 -04002509 /*
2510 * Lock the page and put it in the cache. Since no one else
2511 * should have access to this page, we're safe to simply set
2512 * PG_locked without checking it first.
2513 */
2514 __set_page_locked(page);
2515 rc = add_to_page_cache_locked(page, mapping,
2516 page->index, GFP_KERNEL);
2517
2518 /* give up if we can't stick it in the cache */
2519 if (rc) {
2520 __clear_page_locked(page);
2521 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002522 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002523
Jeff Layton690c5e32011-10-19 15:30:16 -04002524 /* move first page to the tmplist */
2525 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
2526 list_move_tail(&page->lru, &tmplist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002527
Jeff Layton690c5e32011-10-19 15:30:16 -04002528 /* now try and add more pages onto the request */
2529 expected_index = page->index + 1;
2530 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
2531 /* discontinuity ? */
2532 if (page->index != expected_index)
2533 break;
2534
2535 /* would this page push the read over the rsize? */
2536 if (bytes + PAGE_CACHE_SIZE > rsize)
2537 break;
2538
2539 __set_page_locked(page);
2540 if (add_to_page_cache_locked(page, mapping,
2541 page->index, GFP_KERNEL)) {
2542 __clear_page_locked(page);
2543 break;
2544 }
2545 list_move_tail(&page->lru, &tmplist);
2546 bytes += PAGE_CACHE_SIZE;
2547 expected_index++;
2548 nr_pages++;
2549 }
2550
2551 rdata = cifs_readdata_alloc(nr_pages);
2552 if (!rdata) {
2553 /* best to give up if we're out of mem */
2554 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
2555 list_del(&page->lru);
2556 lru_cache_add_file(page);
2557 unlock_page(page);
2558 page_cache_release(page);
2559 }
2560 rc = -ENOMEM;
2561 break;
2562 }
2563
2564 spin_lock(&cifs_file_list_lock);
2565 cifsFileInfo_get(open_file);
2566 spin_unlock(&cifs_file_list_lock);
2567 rdata->cfile = open_file;
2568 rdata->mapping = mapping;
2569 rdata->offset = offset;
2570 rdata->bytes = bytes;
2571 rdata->pid = pid;
2572 list_splice_init(&tmplist, &rdata->pages);
2573
2574 do {
Steve Frenchcdff08e2010-10-21 22:46:14 +00002575 if (open_file->invalidHandle) {
Jeff Layton15886172010-10-15 15:33:59 -04002576 rc = cifs_reopen_file(open_file, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002577 if (rc != 0)
Jeff Layton690c5e32011-10-19 15:30:16 -04002578 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002579 }
Jeff Layton690c5e32011-10-19 15:30:16 -04002580 rc = cifs_async_readv(rdata);
2581 } while (rc == -EAGAIN);
2582
2583 if (rc != 0) {
2584 list_for_each_entry_safe(page, tpage, &rdata->pages,
2585 lru) {
2586 list_del(&page->lru);
2587 lru_cache_add_file(page);
2588 unlock_page(page);
2589 page_cache_release(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002590 }
Jeff Layton690c5e32011-10-19 15:30:16 -04002591 cifs_readdata_free(rdata);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002592 break;
2593 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002594 }
2595
Linus Torvalds1da177e2005-04-16 15:20:36 -07002596 return rc;
2597}
2598
2599static int cifs_readpage_worker(struct file *file, struct page *page,
2600 loff_t *poffset)
2601{
2602 char *read_data;
2603 int rc;
2604
Suresh Jayaraman56698232010-07-05 18:13:25 +05302605 /* Is the page cached? */
2606 rc = cifs_readpage_from_fscache(file->f_path.dentry->d_inode, page);
2607 if (rc == 0)
2608 goto read_complete;
2609
Linus Torvalds1da177e2005-04-16 15:20:36 -07002610 page_cache_get(page);
2611 read_data = kmap(page);
2612 /* for reads over a certain size could initiate async read ahead */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002613
Linus Torvalds1da177e2005-04-16 15:20:36 -07002614 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002615
Linus Torvalds1da177e2005-04-16 15:20:36 -07002616 if (rc < 0)
2617 goto io_error;
2618 else
Joe Perchesb6b38f72010-04-21 03:50:45 +00002619 cFYI(1, "Bytes read %d", rc);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002620
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002621 file->f_path.dentry->d_inode->i_atime =
2622 current_fs_time(file->f_path.dentry->d_inode->i_sb);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002623
Linus Torvalds1da177e2005-04-16 15:20:36 -07002624 if (PAGE_CACHE_SIZE > rc)
2625 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
2626
2627 flush_dcache_page(page);
2628 SetPageUptodate(page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05302629
2630 /* send this page to the cache */
2631 cifs_readpage_to_fscache(file->f_path.dentry->d_inode, page);
2632
Linus Torvalds1da177e2005-04-16 15:20:36 -07002633 rc = 0;
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002634
Linus Torvalds1da177e2005-04-16 15:20:36 -07002635io_error:
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002636 kunmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002637 page_cache_release(page);
Suresh Jayaraman56698232010-07-05 18:13:25 +05302638
2639read_complete:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002640 return rc;
2641}
2642
2643static int cifs_readpage(struct file *file, struct page *page)
2644{
2645 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
2646 int rc = -EACCES;
2647 int xid;
2648
2649 xid = GetXid();
2650
2651 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05302652 rc = -EBADF;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002653 FreeXid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05302654 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002655 }
2656
Joe Perchesb6b38f72010-04-21 03:50:45 +00002657 cFYI(1, "readpage %p at offset %d 0x%x\n",
2658 page, (int)offset, (int)offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002659
2660 rc = cifs_readpage_worker(file, page, &offset);
2661
2662 unlock_page(page);
2663
2664 FreeXid(xid);
2665 return rc;
2666}
2667
Steve Frencha403a0a2007-07-26 15:54:16 +00002668static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
2669{
2670 struct cifsFileInfo *open_file;
2671
Jeff Layton44772882010-10-15 15:34:03 -04002672 spin_lock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00002673 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton2e396b82010-10-15 15:34:01 -04002674 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Jeff Layton44772882010-10-15 15:34:03 -04002675 spin_unlock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00002676 return 1;
2677 }
2678 }
Jeff Layton44772882010-10-15 15:34:03 -04002679 spin_unlock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00002680 return 0;
2681}
2682
Linus Torvalds1da177e2005-04-16 15:20:36 -07002683/* We do not want to update the file size from server for inodes
2684 open for write - to avoid races with writepage extending
2685 the file - in the future we could consider allowing
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002686 refreshing the inode only on increases in the file size
Linus Torvalds1da177e2005-04-16 15:20:36 -07002687 but this is tricky to do without racing with writebehind
2688 page caching in the current Linux kernel design */
Steve French4b18f2a2008-04-29 00:06:05 +00002689bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002690{
Steve Frencha403a0a2007-07-26 15:54:16 +00002691 if (!cifsInode)
Steve French4b18f2a2008-04-29 00:06:05 +00002692 return true;
Steve French23e7dd72005-10-20 13:44:56 -07002693
Steve Frencha403a0a2007-07-26 15:54:16 +00002694 if (is_inode_writable(cifsInode)) {
2695 /* This inode is open for write at least once */
Steve Frenchc32a0b62006-01-12 14:41:28 -08002696 struct cifs_sb_info *cifs_sb;
2697
Steve Frenchc32a0b62006-01-12 14:41:28 -08002698 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
Steve Frenchad7a2922008-02-07 23:25:02 +00002699 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002700 /* since no page cache to corrupt on directio
Steve Frenchc32a0b62006-01-12 14:41:28 -08002701 we can change size safely */
Steve French4b18f2a2008-04-29 00:06:05 +00002702 return true;
Steve Frenchc32a0b62006-01-12 14:41:28 -08002703 }
2704
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002705 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
Steve French4b18f2a2008-04-29 00:06:05 +00002706 return true;
Steve French7ba52632007-02-08 18:14:13 +00002707
Steve French4b18f2a2008-04-29 00:06:05 +00002708 return false;
Steve French23e7dd72005-10-20 13:44:56 -07002709 } else
Steve French4b18f2a2008-04-29 00:06:05 +00002710 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002711}
2712
Nick Piggind9414772008-09-24 11:32:59 -04002713static int cifs_write_begin(struct file *file, struct address_space *mapping,
2714 loff_t pos, unsigned len, unsigned flags,
2715 struct page **pagep, void **fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002716{
Nick Piggind9414772008-09-24 11:32:59 -04002717 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
2718 loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00002719 loff_t page_start = pos & PAGE_MASK;
2720 loff_t i_size;
2721 struct page *page;
2722 int rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002723
Joe Perchesb6b38f72010-04-21 03:50:45 +00002724 cFYI(1, "write_begin from %lld len %d", (long long)pos, len);
Nick Piggind9414772008-09-24 11:32:59 -04002725
Nick Piggin54566b22009-01-04 12:00:53 -08002726 page = grab_cache_page_write_begin(mapping, index, flags);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00002727 if (!page) {
2728 rc = -ENOMEM;
2729 goto out;
2730 }
Nick Piggind9414772008-09-24 11:32:59 -04002731
Jeff Laytona98ee8c2008-11-26 19:32:33 +00002732 if (PageUptodate(page))
2733 goto out;
Steve French8a236262007-03-06 00:31:00 +00002734
Jeff Laytona98ee8c2008-11-26 19:32:33 +00002735 /*
2736 * If we write a full page it will be up to date, no need to read from
2737 * the server. If the write is short, we'll end up doing a sync write
2738 * instead.
2739 */
2740 if (len == PAGE_CACHE_SIZE)
2741 goto out;
2742
2743 /*
2744 * optimize away the read when we have an oplock, and we're not
2745 * expecting to use any of the data we'd be reading in. That
2746 * is, when the page lies beyond the EOF, or straddles the EOF
2747 * and the write will cover all of the existing data.
2748 */
2749 if (CIFS_I(mapping->host)->clientCanCacheRead) {
2750 i_size = i_size_read(mapping->host);
2751 if (page_start >= i_size ||
2752 (offset == 0 && (pos + len) >= i_size)) {
2753 zero_user_segments(page, 0, offset,
2754 offset + len,
2755 PAGE_CACHE_SIZE);
2756 /*
2757 * PageChecked means that the parts of the page
2758 * to which we're not writing are considered up
2759 * to date. Once the data is copied to the
2760 * page, it can be set uptodate.
2761 */
2762 SetPageChecked(page);
2763 goto out;
2764 }
2765 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002766
Nick Piggind9414772008-09-24 11:32:59 -04002767 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
Jeff Laytona98ee8c2008-11-26 19:32:33 +00002768 /*
2769 * might as well read a page, it is fast enough. If we get
2770 * an error, we don't need to return it. cifs_write_end will
2771 * do a sync write instead since PG_uptodate isn't set.
2772 */
2773 cifs_readpage_worker(file, page, &page_start);
Steve French8a236262007-03-06 00:31:00 +00002774 } else {
2775 /* we could try using another file handle if there is one -
2776 but how would we lock it to prevent close of that handle
2777 racing with this read? In any case
Nick Piggind9414772008-09-24 11:32:59 -04002778 this will be written out by write_end so is fine */
Steve French8a236262007-03-06 00:31:00 +00002779 }
Jeff Laytona98ee8c2008-11-26 19:32:33 +00002780out:
2781 *pagep = page;
2782 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002783}
2784
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05302785static int cifs_release_page(struct page *page, gfp_t gfp)
2786{
2787 if (PagePrivate(page))
2788 return 0;
2789
2790 return cifs_fscache_release_page(page, gfp);
2791}
2792
2793static void cifs_invalidate_page(struct page *page, unsigned long offset)
2794{
2795 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
2796
2797 if (offset == 0)
2798 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
2799}
2800
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002801static int cifs_launder_page(struct page *page)
2802{
2803 int rc = 0;
2804 loff_t range_start = page_offset(page);
2805 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
2806 struct writeback_control wbc = {
2807 .sync_mode = WB_SYNC_ALL,
2808 .nr_to_write = 0,
2809 .range_start = range_start,
2810 .range_end = range_end,
2811 };
2812
2813 cFYI(1, "Launder page: %p", page);
2814
2815 if (clear_page_dirty_for_io(page))
2816 rc = cifs_writepage_locked(page, &wbc);
2817
2818 cifs_fscache_invalidate_page(page, page->mapping->host);
2819 return rc;
2820}
2821
Tejun Heo9b646972010-07-20 22:09:02 +02002822void cifs_oplock_break(struct work_struct *work)
Jeff Layton3bc303c2009-09-21 06:47:50 -04002823{
2824 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
2825 oplock_break);
Jeff Laytona5e18bc2010-10-11 15:07:18 -04002826 struct inode *inode = cfile->dentry->d_inode;
Jeff Layton3bc303c2009-09-21 06:47:50 -04002827 struct cifsInodeInfo *cinode = CIFS_I(inode);
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002828 int rc = 0;
Jeff Layton3bc303c2009-09-21 06:47:50 -04002829
2830 if (inode && S_ISREG(inode->i_mode)) {
Steve Frenchd54ff732010-04-27 04:38:15 +00002831 if (cinode->clientCanCacheRead)
Al Viro8737c932009-12-24 06:47:55 -05002832 break_lease(inode, O_RDONLY);
Steve Frenchd54ff732010-04-27 04:38:15 +00002833 else
Al Viro8737c932009-12-24 06:47:55 -05002834 break_lease(inode, O_WRONLY);
Jeff Layton3bc303c2009-09-21 06:47:50 -04002835 rc = filemap_fdatawrite(inode->i_mapping);
2836 if (cinode->clientCanCacheRead == 0) {
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002837 rc = filemap_fdatawait(inode->i_mapping);
2838 mapping_set_error(inode->i_mapping, rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04002839 invalidate_remote_inode(inode);
2840 }
Joe Perchesb6b38f72010-04-21 03:50:45 +00002841 cFYI(1, "Oplock flush inode %p rc %d", inode, rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04002842 }
2843
Pavel Shilovsky85160e02011-10-22 15:33:29 +04002844 rc = cifs_push_locks(cfile);
2845 if (rc)
2846 cERROR(1, "Push locks rc = %d", rc);
2847
Jeff Layton3bc303c2009-09-21 06:47:50 -04002848 /*
2849 * releasing stale oplock after recent reconnect of smb session using
2850 * a now incorrect file handle is not a data integrity issue but do
2851 * not bother sending an oplock release if session to server still is
2852 * disconnected since oplock already released by the server
2853 */
Steve Frenchcdff08e2010-10-21 22:46:14 +00002854 if (!cfile->oplock_break_cancelled) {
Pavel Shilovsky03776f42010-08-17 11:26:00 +04002855 rc = CIFSSMBLock(0, tlink_tcon(cfile->tlink), cfile->netfid,
2856 current->tgid, 0, 0, 0, 0,
2857 LOCKING_ANDX_OPLOCK_RELEASE, false,
Pavel Shilovsky12fed002011-01-17 20:15:44 +03002858 cinode->clientCanCacheRead ? 1 : 0);
Joe Perchesb6b38f72010-04-21 03:50:45 +00002859 cFYI(1, "Oplock release rc = %d", rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04002860 }
Jeff Layton3bc303c2009-09-21 06:47:50 -04002861}
2862
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07002863const struct address_space_operations cifs_addr_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002864 .readpage = cifs_readpage,
2865 .readpages = cifs_readpages,
2866 .writepage = cifs_writepage,
Steve French37c0eb42005-10-05 14:50:29 -07002867 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04002868 .write_begin = cifs_write_begin,
2869 .write_end = cifs_write_end,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002870 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05302871 .releasepage = cifs_release_page,
2872 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002873 .launder_page = cifs_launder_page,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002874};
Dave Kleikamp273d81d2006-06-01 19:41:23 +00002875
2876/*
2877 * cifs_readpages requires the server to support a buffer large enough to
2878 * contain the header plus one complete page of data. Otherwise, we need
2879 * to leave cifs_readpages out of the address space operations.
2880 */
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07002881const struct address_space_operations cifs_addr_ops_smallbuf = {
Dave Kleikamp273d81d2006-06-01 19:41:23 +00002882 .readpage = cifs_readpage,
2883 .writepage = cifs_writepage,
2884 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04002885 .write_begin = cifs_write_begin,
2886 .write_end = cifs_write_end,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00002887 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05302888 .releasepage = cifs_release_page,
2889 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002890 .launder_page = cifs_launder_page,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00002891};