blob: 0a11dbbbb1310a5f545880d12e6b22ad7a403482 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
Steve Frenchfb8c4b12007-07-10 01:16:18 +00005 *
Steve Frenchf19159d2010-04-21 04:12:10 +00006 * Copyright (C) International Business Machines Corp., 2002,2010
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Author(s): Steve French (sfrench@us.ibm.com)
Jeremy Allison7ee1af72006-08-02 21:56:33 +00008 * Jeremy Allison (jra@samba.org)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
Steve French37c0eb42005-10-05 14:50:29 -070025#include <linux/backing-dev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
Steve French37c0eb42005-10-05 14:50:29 -070030#include <linux/writeback.h>
Andrew Morton6f88cc22006-12-10 02:19:44 -080031#include <linux/task_io_accounting_ops.h>
Steve French23e7dd72005-10-20 13:44:56 -070032#include <linux/delay.h>
Jeff Layton3bc303c2009-09-21 06:47:50 -040033#include <linux/mount.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Jeff Layton690c5e32011-10-19 15:30:16 -040035#include <linux/swap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <asm/div64.h>
37#include "cifsfs.h"
38#include "cifspdu.h"
39#include "cifsglob.h"
40#include "cifsproto.h"
41#include "cifs_unicode.h"
42#include "cifs_debug.h"
43#include "cifs_fs_sb.h"
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +053044#include "fscache.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Linus Torvalds1da177e2005-04-16 15:20:36 -070046static inline int cifs_convert_flags(unsigned int flags)
47{
48 if ((flags & O_ACCMODE) == O_RDONLY)
49 return GENERIC_READ;
50 else if ((flags & O_ACCMODE) == O_WRONLY)
51 return GENERIC_WRITE;
52 else if ((flags & O_ACCMODE) == O_RDWR) {
53 /* GENERIC_ALL is too much permission to request
54 can cause unnecessary access denied on create */
55 /* return GENERIC_ALL; */
56 return (GENERIC_READ | GENERIC_WRITE);
57 }
58
Jeff Laytone10f7b52008-05-14 10:21:33 -070059 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
60 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
61 FILE_READ_DATA);
Steve French7fc8f4e2009-02-23 20:43:11 +000062}
Jeff Laytone10f7b52008-05-14 10:21:33 -070063
Jeff Layton608712f2010-10-15 15:33:56 -040064static u32 cifs_posix_convert_flags(unsigned int flags)
Steve French7fc8f4e2009-02-23 20:43:11 +000065{
Jeff Layton608712f2010-10-15 15:33:56 -040066 u32 posix_flags = 0;
Jeff Laytone10f7b52008-05-14 10:21:33 -070067
Steve French7fc8f4e2009-02-23 20:43:11 +000068 if ((flags & O_ACCMODE) == O_RDONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040069 posix_flags = SMB_O_RDONLY;
Steve French7fc8f4e2009-02-23 20:43:11 +000070 else if ((flags & O_ACCMODE) == O_WRONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040071 posix_flags = SMB_O_WRONLY;
72 else if ((flags & O_ACCMODE) == O_RDWR)
73 posix_flags = SMB_O_RDWR;
74
75 if (flags & O_CREAT)
76 posix_flags |= SMB_O_CREAT;
77 if (flags & O_EXCL)
78 posix_flags |= SMB_O_EXCL;
79 if (flags & O_TRUNC)
80 posix_flags |= SMB_O_TRUNC;
81 /* be safe and imply O_SYNC for O_DSYNC */
Christoph Hellwig6b2f3d12009-10-27 11:05:28 +010082 if (flags & O_DSYNC)
Jeff Layton608712f2010-10-15 15:33:56 -040083 posix_flags |= SMB_O_SYNC;
Steve French7fc8f4e2009-02-23 20:43:11 +000084 if (flags & O_DIRECTORY)
Jeff Layton608712f2010-10-15 15:33:56 -040085 posix_flags |= SMB_O_DIRECTORY;
Steve French7fc8f4e2009-02-23 20:43:11 +000086 if (flags & O_NOFOLLOW)
Jeff Layton608712f2010-10-15 15:33:56 -040087 posix_flags |= SMB_O_NOFOLLOW;
Steve French7fc8f4e2009-02-23 20:43:11 +000088 if (flags & O_DIRECT)
Jeff Layton608712f2010-10-15 15:33:56 -040089 posix_flags |= SMB_O_DIRECT;
Steve French7fc8f4e2009-02-23 20:43:11 +000090
91 return posix_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070092}
93
94static inline int cifs_get_disposition(unsigned int flags)
95{
96 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
97 return FILE_CREATE;
98 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
99 return FILE_OVERWRITE_IF;
100 else if ((flags & O_CREAT) == O_CREAT)
101 return FILE_OPEN_IF;
Steve French55aa2e02006-05-30 18:09:31 +0000102 else if ((flags & O_TRUNC) == O_TRUNC)
103 return FILE_OVERWRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104 else
105 return FILE_OPEN;
106}
107
Jeff Layton608712f2010-10-15 15:33:56 -0400108int cifs_posix_open(char *full_path, struct inode **pinode,
109 struct super_block *sb, int mode, unsigned int f_flags,
110 __u32 *poplock, __u16 *pnetfid, int xid)
111{
112 int rc;
113 FILE_UNIX_BASIC_INFO *presp_data;
114 __u32 posix_flags = 0;
115 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
116 struct cifs_fattr fattr;
117 struct tcon_link *tlink;
Steve French96daf2b2011-05-27 04:34:02 +0000118 struct cifs_tcon *tcon;
Jeff Layton608712f2010-10-15 15:33:56 -0400119
120 cFYI(1, "posix open %s", full_path);
121
122 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
123 if (presp_data == NULL)
124 return -ENOMEM;
125
126 tlink = cifs_sb_tlink(cifs_sb);
127 if (IS_ERR(tlink)) {
128 rc = PTR_ERR(tlink);
129 goto posix_open_ret;
130 }
131
132 tcon = tlink_tcon(tlink);
133 mode &= ~current_umask();
134
135 posix_flags = cifs_posix_convert_flags(f_flags);
136 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
137 poplock, full_path, cifs_sb->local_nls,
138 cifs_sb->mnt_cifs_flags &
139 CIFS_MOUNT_MAP_SPECIAL_CHR);
140 cifs_put_tlink(tlink);
141
142 if (rc)
143 goto posix_open_ret;
144
145 if (presp_data->Type == cpu_to_le32(-1))
146 goto posix_open_ret; /* open ok, caller does qpathinfo */
147
148 if (!pinode)
149 goto posix_open_ret; /* caller does not need info */
150
151 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
152
153 /* get new inode and set it up */
154 if (*pinode == NULL) {
155 cifs_fill_uniqueid(sb, &fattr);
156 *pinode = cifs_iget(sb, &fattr);
157 if (!*pinode) {
158 rc = -ENOMEM;
159 goto posix_open_ret;
160 }
161 } else {
162 cifs_fattr_to_inode(*pinode, &fattr);
163 }
164
165posix_open_ret:
166 kfree(presp_data);
167 return rc;
168}
169
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300170static int
171cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
Steve French96daf2b2011-05-27 04:34:02 +0000172 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *poplock,
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300173 __u16 *pnetfid, int xid)
174{
175 int rc;
176 int desiredAccess;
177 int disposition;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500178 int create_options = CREATE_NOT_DIR;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300179 FILE_ALL_INFO *buf;
180
181 desiredAccess = cifs_convert_flags(f_flags);
182
183/*********************************************************************
184 * open flag mapping table:
185 *
186 * POSIX Flag CIFS Disposition
187 * ---------- ----------------
188 * O_CREAT FILE_OPEN_IF
189 * O_CREAT | O_EXCL FILE_CREATE
190 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
191 * O_TRUNC FILE_OVERWRITE
192 * none of the above FILE_OPEN
193 *
194 * Note that there is not a direct match between disposition
195 * FILE_SUPERSEDE (ie create whether or not file exists although
196 * O_CREAT | O_TRUNC is similar but truncates the existing
197 * file rather than creating a new file as FILE_SUPERSEDE does
198 * (which uses the attributes / metadata passed in on open call)
199 *?
200 *? O_SYNC is a reasonable match to CIFS writethrough flag
201 *? and the read write flags match reasonably. O_LARGEFILE
202 *? is irrelevant because largefile support is always used
203 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
204 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
205 *********************************************************************/
206
207 disposition = cifs_get_disposition(f_flags);
208
209 /* BB pass O_SYNC flag through on file attributes .. BB */
210
211 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
212 if (!buf)
213 return -ENOMEM;
214
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500215 if (backup_cred(cifs_sb))
216 create_options |= CREATE_OPEN_BACKUP_INTENT;
217
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300218 if (tcon->ses->capabilities & CAP_NT_SMBS)
219 rc = CIFSSMBOpen(xid, tcon, full_path, disposition,
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500220 desiredAccess, create_options, pnetfid, poplock, buf,
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300221 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
222 & CIFS_MOUNT_MAP_SPECIAL_CHR);
223 else
224 rc = SMBLegacyOpen(xid, tcon, full_path, disposition,
225 desiredAccess, CREATE_NOT_DIR, pnetfid, poplock, buf,
226 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
227 & CIFS_MOUNT_MAP_SPECIAL_CHR);
228
229 if (rc)
230 goto out;
231
232 if (tcon->unix_ext)
233 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
234 xid);
235 else
236 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
237 xid, pnetfid);
238
239out:
240 kfree(buf);
241 return rc;
242}
243
Jeff Layton15ecb432010-10-15 15:34:02 -0400244struct cifsFileInfo *
245cifs_new_fileinfo(__u16 fileHandle, struct file *file,
246 struct tcon_link *tlink, __u32 oplock)
247{
248 struct dentry *dentry = file->f_path.dentry;
249 struct inode *inode = dentry->d_inode;
250 struct cifsInodeInfo *pCifsInode = CIFS_I(inode);
251 struct cifsFileInfo *pCifsFile;
252
253 pCifsFile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
254 if (pCifsFile == NULL)
255 return pCifsFile;
256
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400257 pCifsFile->count = 1;
Jeff Layton15ecb432010-10-15 15:34:02 -0400258 pCifsFile->netfid = fileHandle;
259 pCifsFile->pid = current->tgid;
260 pCifsFile->uid = current_fsuid();
261 pCifsFile->dentry = dget(dentry);
262 pCifsFile->f_flags = file->f_flags;
263 pCifsFile->invalidHandle = false;
Jeff Layton15ecb432010-10-15 15:34:02 -0400264 pCifsFile->tlink = cifs_get_tlink(tlink);
265 mutex_init(&pCifsFile->fh_mutex);
Jeff Layton15ecb432010-10-15 15:34:02 -0400266 INIT_WORK(&pCifsFile->oplock_break, cifs_oplock_break);
267
Jeff Layton44772882010-10-15 15:34:03 -0400268 spin_lock(&cifs_file_list_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400269 list_add(&pCifsFile->tlist, &(tlink_tcon(tlink)->openFileList));
270 /* if readable file instance put first in list*/
271 if (file->f_mode & FMODE_READ)
272 list_add(&pCifsFile->flist, &pCifsInode->openFileList);
273 else
274 list_add_tail(&pCifsFile->flist, &pCifsInode->openFileList);
Jeff Layton44772882010-10-15 15:34:03 -0400275 spin_unlock(&cifs_file_list_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400276
Pavel Shilovskyc6723622010-11-03 10:58:57 +0300277 cifs_set_oplock_level(pCifsInode, oplock);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400278 pCifsInode->can_cache_brlcks = pCifsInode->clientCanCacheAll;
Jeff Layton15ecb432010-10-15 15:34:02 -0400279
280 file->private_data = pCifsFile;
281 return pCifsFile;
282}
283
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400284static void cifs_del_lock_waiters(struct cifsLockInfo *lock);
285
Steve Frenchcdff08e2010-10-21 22:46:14 +0000286/*
287 * Release a reference on the file private data. This may involve closing
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400288 * the filehandle out on the server. Must be called without holding
289 * cifs_file_list_lock.
Steve Frenchcdff08e2010-10-21 22:46:14 +0000290 */
Jeff Laytonb33879a2010-10-15 15:34:04 -0400291void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
292{
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300293 struct inode *inode = cifs_file->dentry->d_inode;
Steve French96daf2b2011-05-27 04:34:02 +0000294 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300295 struct cifsInodeInfo *cifsi = CIFS_I(inode);
Pavel Shilovsky4f8ba8a2010-11-21 22:36:12 +0300296 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000297 struct cifsLockInfo *li, *tmp;
298
299 spin_lock(&cifs_file_list_lock);
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400300 if (--cifs_file->count > 0) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000301 spin_unlock(&cifs_file_list_lock);
302 return;
Jeff Laytonb33879a2010-10-15 15:34:04 -0400303 }
Steve Frenchcdff08e2010-10-21 22:46:14 +0000304
305 /* remove it from the lists */
306 list_del(&cifs_file->flist);
307 list_del(&cifs_file->tlist);
308
309 if (list_empty(&cifsi->openFileList)) {
310 cFYI(1, "closing last open instance for inode %p",
311 cifs_file->dentry->d_inode);
Pavel Shilovsky4f8ba8a2010-11-21 22:36:12 +0300312
313 /* in strict cache mode we need invalidate mapping on the last
314 close because it may cause a error when we open this file
315 again and get at least level II oplock */
316 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
317 CIFS_I(inode)->invalid_mapping = true;
318
Pavel Shilovskyc6723622010-11-03 10:58:57 +0300319 cifs_set_oplock_level(cifsi, 0);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000320 }
321 spin_unlock(&cifs_file_list_lock);
322
Jeff Laytonad635942011-07-26 12:20:17 -0400323 cancel_work_sync(&cifs_file->oplock_break);
324
Steve Frenchcdff08e2010-10-21 22:46:14 +0000325 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
326 int xid, rc;
327
328 xid = GetXid();
329 rc = CIFSSMBClose(xid, tcon, cifs_file->netfid);
330 FreeXid(xid);
331 }
332
333 /* Delete any outstanding lock records. We'll lose them when the file
334 * is closed anyway.
335 */
Pavel Shilovskyd59dad22011-09-22 09:53:59 +0400336 mutex_lock(&cifsi->lock_mutex);
337 list_for_each_entry_safe(li, tmp, &cifsi->llist, llist) {
338 if (li->netfid != cifs_file->netfid)
339 continue;
Steve Frenchcdff08e2010-10-21 22:46:14 +0000340 list_del(&li->llist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400341 cifs_del_lock_waiters(li);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000342 kfree(li);
343 }
Pavel Shilovskyd59dad22011-09-22 09:53:59 +0400344 mutex_unlock(&cifsi->lock_mutex);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000345
346 cifs_put_tlink(cifs_file->tlink);
347 dput(cifs_file->dentry);
348 kfree(cifs_file);
Jeff Laytonb33879a2010-10-15 15:34:04 -0400349}
350
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351int cifs_open(struct inode *inode, struct file *file)
352{
353 int rc = -EACCES;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400354 int xid;
355 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +0000357 struct cifs_tcon *tcon;
Jeff Layton7ffec372010-09-29 19:51:11 -0400358 struct tcon_link *tlink;
Jeff Layton6ca9f3b2010-06-16 13:40:16 -0400359 struct cifsFileInfo *pCifsFile = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360 char *full_path = NULL;
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300361 bool posix_open_ok = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362 __u16 netfid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363
364 xid = GetXid();
365
366 cifs_sb = CIFS_SB(inode->i_sb);
Jeff Layton7ffec372010-09-29 19:51:11 -0400367 tlink = cifs_sb_tlink(cifs_sb);
368 if (IS_ERR(tlink)) {
369 FreeXid(xid);
370 return PTR_ERR(tlink);
371 }
372 tcon = tlink_tcon(tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -0800374 full_path = build_path_from_dentry(file->f_path.dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375 if (full_path == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530376 rc = -ENOMEM;
Jeff Layton232341b2010-08-05 13:58:38 -0400377 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 }
379
Joe Perchesb6b38f72010-04-21 03:50:45 +0000380 cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
381 inode, file->f_flags, full_path);
Steve French276a74a2009-03-03 18:00:34 +0000382
Pavel Shilovsky10b9b982012-03-20 12:55:09 +0300383 if (tcon->ses->server->oplocks)
Steve French276a74a2009-03-03 18:00:34 +0000384 oplock = REQ_OPLOCK;
385 else
386 oplock = 0;
387
Steve French64cc2c62009-03-04 19:54:08 +0000388 if (!tcon->broken_posix_open && tcon->unix_ext &&
389 (tcon->ses->capabilities & CAP_UNIX) &&
Steve French276a74a2009-03-03 18:00:34 +0000390 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
391 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Steve French276a74a2009-03-03 18:00:34 +0000392 /* can not refresh inode info since size could be stale */
Jeff Layton2422f672010-06-16 13:40:16 -0400393 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
Steve Frenchfa588e02010-04-22 19:21:55 +0000394 cifs_sb->mnt_file_mode /* ignored */,
Jeff Layton608712f2010-10-15 15:33:56 -0400395 file->f_flags, &oplock, &netfid, xid);
Steve French276a74a2009-03-03 18:00:34 +0000396 if (rc == 0) {
Joe Perchesb6b38f72010-04-21 03:50:45 +0000397 cFYI(1, "posix open succeeded");
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300398 posix_open_ok = true;
Steve French64cc2c62009-03-04 19:54:08 +0000399 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
400 if (tcon->ses->serverNOS)
Joe Perchesb6b38f72010-04-21 03:50:45 +0000401 cERROR(1, "server %s of type %s returned"
Steve French64cc2c62009-03-04 19:54:08 +0000402 " unexpected error on SMB posix open"
403 ", disabling posix open support."
404 " Check if server update available.",
405 tcon->ses->serverName,
Joe Perchesb6b38f72010-04-21 03:50:45 +0000406 tcon->ses->serverNOS);
Steve French64cc2c62009-03-04 19:54:08 +0000407 tcon->broken_posix_open = true;
Steve French276a74a2009-03-03 18:00:34 +0000408 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
409 (rc != -EOPNOTSUPP)) /* path not found or net err */
410 goto out;
Steve French64cc2c62009-03-04 19:54:08 +0000411 /* else fallthrough to retry open the old way on network i/o
412 or DFS errors */
Steve French276a74a2009-03-03 18:00:34 +0000413 }
414
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300415 if (!posix_open_ok) {
416 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
417 file->f_flags, &oplock, &netfid, xid);
418 if (rc)
419 goto out;
420 }
Jeff Layton47c78b72010-06-16 13:40:17 -0400421
Jeff Laytonabfe1ee2010-10-15 15:33:58 -0400422 pCifsFile = cifs_new_fileinfo(netfid, file, tlink, oplock);
Jeff Layton6ca9f3b2010-06-16 13:40:16 -0400423 if (pCifsFile == NULL) {
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300424 CIFSSMBClose(xid, tcon, netfid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425 rc = -ENOMEM;
426 goto out;
427 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +0530429 cifs_fscache_set_inode_cookie(inode, file);
430
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300431 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432 /* time to set mode which we can not set earlier due to
433 problems creating new read-only files */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300434 struct cifs_unix_set_info_args args = {
435 .mode = inode->i_mode,
436 .uid = NO_CHANGE_64,
437 .gid = NO_CHANGE_64,
438 .ctime = NO_CHANGE_64,
439 .atime = NO_CHANGE_64,
440 .mtime = NO_CHANGE_64,
441 .device = 0,
442 };
Jeff Laytond44a9fe2011-01-07 11:30:29 -0500443 CIFSSMBUnixSetFileInfo(xid, tcon, &args, netfid,
444 pCifsFile->pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445 }
446
447out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448 kfree(full_path);
449 FreeXid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400450 cifs_put_tlink(tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451 return rc;
452}
453
Adrian Bunk04187262006-06-30 18:23:04 +0200454/* Try to reacquire byte range locks that were released when session */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455/* to server was lost */
456static int cifs_relock_file(struct cifsFileInfo *cifsFile)
457{
458 int rc = 0;
459
460/* BB list all locks open on this file and relock */
461
462 return rc;
463}
464
Jeff Layton15886172010-10-15 15:33:59 -0400465static int cifs_reopen_file(struct cifsFileInfo *pCifsFile, bool can_flush)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466{
467 int rc = -EACCES;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400468 int xid;
469 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +0000471 struct cifs_tcon *tcon;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472 struct cifsInodeInfo *pCifsInode;
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000473 struct inode *inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474 char *full_path = NULL;
475 int desiredAccess;
476 int disposition = FILE_OPEN;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500477 int create_options = CREATE_NOT_DIR;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478 __u16 netfid;
479
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480 xid = GetXid();
Jeff Laytonf0a71eb2009-06-27 07:04:55 -0400481 mutex_lock(&pCifsFile->fh_mutex);
Steve French4b18f2a2008-04-29 00:06:05 +0000482 if (!pCifsFile->invalidHandle) {
Jeff Laytonf0a71eb2009-06-27 07:04:55 -0400483 mutex_unlock(&pCifsFile->fh_mutex);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530484 rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485 FreeXid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530486 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487 }
488
Jeff Layton15886172010-10-15 15:33:59 -0400489 inode = pCifsFile->dentry->d_inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490 cifs_sb = CIFS_SB(inode->i_sb);
Jeff Layton13cfb732010-09-29 19:51:11 -0400491 tcon = tlink_tcon(pCifsFile->tlink);
Steve French3a9f4622007-04-04 17:10:24 +0000492
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493/* can not grab rename sem here because various ops, including
494 those that already have the rename sem can end up causing writepage
495 to get called and if the server was down that means we end up here,
496 and we can never tell if the caller already has the rename_sem */
Jeff Layton15886172010-10-15 15:33:59 -0400497 full_path = build_path_from_dentry(pCifsFile->dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498 if (full_path == NULL) {
Steve French3a9f4622007-04-04 17:10:24 +0000499 rc = -ENOMEM;
Jeff Laytonf0a71eb2009-06-27 07:04:55 -0400500 mutex_unlock(&pCifsFile->fh_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501 FreeXid(xid);
Steve French3a9f4622007-04-04 17:10:24 +0000502 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503 }
504
Joe Perchesb6b38f72010-04-21 03:50:45 +0000505 cFYI(1, "inode = 0x%p file flags 0x%x for %s",
Jeff Layton15886172010-10-15 15:33:59 -0400506 inode, pCifsFile->f_flags, full_path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507
Pavel Shilovsky10b9b982012-03-20 12:55:09 +0300508 if (tcon->ses->server->oplocks)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509 oplock = REQ_OPLOCK;
510 else
Steve French4b18f2a2008-04-29 00:06:05 +0000511 oplock = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512
Steve French7fc8f4e2009-02-23 20:43:11 +0000513 if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) &&
514 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
515 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Jeff Layton608712f2010-10-15 15:33:56 -0400516
517 /*
518 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
519 * original open. Must mask them off for a reopen.
520 */
Jeff Layton15886172010-10-15 15:33:59 -0400521 unsigned int oflags = pCifsFile->f_flags &
522 ~(O_CREAT | O_EXCL | O_TRUNC);
Jeff Layton608712f2010-10-15 15:33:56 -0400523
Jeff Layton2422f672010-06-16 13:40:16 -0400524 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
Steve Frenchfa588e02010-04-22 19:21:55 +0000525 cifs_sb->mnt_file_mode /* ignored */,
526 oflags, &oplock, &netfid, xid);
Steve French7fc8f4e2009-02-23 20:43:11 +0000527 if (rc == 0) {
Joe Perchesb6b38f72010-04-21 03:50:45 +0000528 cFYI(1, "posix reopen succeeded");
Steve French7fc8f4e2009-02-23 20:43:11 +0000529 goto reopen_success;
530 }
531 /* fallthrough to retry open the old way on errors, especially
532 in the reconnect path it is important to retry hard */
533 }
534
Jeff Layton15886172010-10-15 15:33:59 -0400535 desiredAccess = cifs_convert_flags(pCifsFile->f_flags);
Steve French7fc8f4e2009-02-23 20:43:11 +0000536
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500537 if (backup_cred(cifs_sb))
538 create_options |= CREATE_OPEN_BACKUP_INTENT;
539
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540 /* Can not refresh inode by passing in file_info buf to be returned
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000541 by SMBOpen and then calling get_inode_info with returned buf
542 since file might have write behind data that needs to be flushed
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543 and server version of file size can be stale. If we knew for sure
544 that inode was not dirty locally we could do this */
545
Steve French7fc8f4e2009-02-23 20:43:11 +0000546 rc = CIFSSMBOpen(xid, tcon, full_path, disposition, desiredAccess,
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500547 create_options, &netfid, &oplock, NULL,
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000548 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
Steve French737b7582005-04-28 22:41:06 -0700549 CIFS_MOUNT_MAP_SPECIAL_CHR);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 if (rc) {
Jeff Laytonf0a71eb2009-06-27 07:04:55 -0400551 mutex_unlock(&pCifsFile->fh_mutex);
Joe Perchesb6b38f72010-04-21 03:50:45 +0000552 cFYI(1, "cifs_open returned 0x%x", rc);
553 cFYI(1, "oplock: %d", oplock);
Jeff Layton15886172010-10-15 15:33:59 -0400554 goto reopen_error_exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555 }
Jeff Layton15886172010-10-15 15:33:59 -0400556
557reopen_success:
558 pCifsFile->netfid = netfid;
559 pCifsFile->invalidHandle = false;
560 mutex_unlock(&pCifsFile->fh_mutex);
561 pCifsInode = CIFS_I(inode);
562
563 if (can_flush) {
564 rc = filemap_write_and_wait(inode->i_mapping);
Jeff Laytoneb4b7562010-10-22 14:52:29 -0400565 mapping_set_error(inode->i_mapping, rc);
Jeff Layton15886172010-10-15 15:33:59 -0400566
Jeff Layton15886172010-10-15 15:33:59 -0400567 if (tcon->unix_ext)
568 rc = cifs_get_inode_info_unix(&inode,
569 full_path, inode->i_sb, xid);
570 else
571 rc = cifs_get_inode_info(&inode,
572 full_path, NULL, inode->i_sb,
573 xid, NULL);
574 } /* else we are writing out data to server already
575 and could deadlock if we tried to flush data, and
576 since we do not know if we have data that would
577 invalidate the current end of file on the server
578 we can not go to the server to get the new inod
579 info */
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300580
Pavel Shilovskyc6723622010-11-03 10:58:57 +0300581 cifs_set_oplock_level(pCifsInode, oplock);
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300582
Jeff Layton15886172010-10-15 15:33:59 -0400583 cifs_relock_file(pCifsFile);
584
585reopen_error_exit:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586 kfree(full_path);
587 FreeXid(xid);
588 return rc;
589}
590
591int cifs_close(struct inode *inode, struct file *file)
592{
Jeff Layton77970692011-04-05 16:23:47 -0700593 if (file->private_data != NULL) {
594 cifsFileInfo_put(file->private_data);
595 file->private_data = NULL;
596 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597
Steve Frenchcdff08e2010-10-21 22:46:14 +0000598 /* return code from the ->release op is always ignored */
599 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600}
601
602int cifs_closedir(struct inode *inode, struct file *file)
603{
604 int rc = 0;
605 int xid;
Joe Perchesc21dfb62010-07-12 13:50:14 -0700606 struct cifsFileInfo *pCFileStruct = file->private_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607 char *ptmp;
608
Joe Perchesb6b38f72010-04-21 03:50:45 +0000609 cFYI(1, "Closedir inode = 0x%p", inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610
611 xid = GetXid();
612
613 if (pCFileStruct) {
Steve French96daf2b2011-05-27 04:34:02 +0000614 struct cifs_tcon *pTcon = tlink_tcon(pCFileStruct->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615
Joe Perchesb6b38f72010-04-21 03:50:45 +0000616 cFYI(1, "Freeing private data in close dir");
Jeff Layton44772882010-10-15 15:34:03 -0400617 spin_lock(&cifs_file_list_lock);
Steve French4b18f2a2008-04-29 00:06:05 +0000618 if (!pCFileStruct->srch_inf.endOfSearch &&
619 !pCFileStruct->invalidHandle) {
620 pCFileStruct->invalidHandle = true;
Jeff Layton44772882010-10-15 15:34:03 -0400621 spin_unlock(&cifs_file_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622 rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid);
Joe Perchesb6b38f72010-04-21 03:50:45 +0000623 cFYI(1, "Closing uncompleted readdir with rc %d",
624 rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625 /* not much we can do if it fails anyway, ignore rc */
626 rc = 0;
Steve Frenchddb4cbf2008-11-20 20:00:44 +0000627 } else
Jeff Layton44772882010-10-15 15:34:03 -0400628 spin_unlock(&cifs_file_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629 ptmp = pCFileStruct->srch_inf.ntwrk_buf_start;
630 if (ptmp) {
Joe Perchesb6b38f72010-04-21 03:50:45 +0000631 cFYI(1, "closedir free smb buf in srch struct");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632 pCFileStruct->srch_inf.ntwrk_buf_start = NULL;
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000633 if (pCFileStruct->srch_inf.smallBuf)
Steve Frenchd47d7c12006-02-28 03:45:48 +0000634 cifs_small_buf_release(ptmp);
635 else
636 cifs_buf_release(ptmp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637 }
Jeff Layton13cfb732010-09-29 19:51:11 -0400638 cifs_put_tlink(pCFileStruct->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639 kfree(file->private_data);
640 file->private_data = NULL;
641 }
642 /* BB can we lock the filestruct while this is going on? */
643 FreeXid(xid);
644 return rc;
645}
646
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400647static struct cifsLockInfo *
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400648cifs_lock_init(__u64 offset, __u64 length, __u8 type, __u16 netfid)
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000649{
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400650 struct cifsLockInfo *lock =
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000651 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400652 if (!lock)
653 return lock;
654 lock->offset = offset;
655 lock->length = length;
656 lock->type = type;
657 lock->netfid = netfid;
658 lock->pid = current->tgid;
659 INIT_LIST_HEAD(&lock->blist);
660 init_waitqueue_head(&lock->block_q);
661 return lock;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400662}
663
664static void
665cifs_del_lock_waiters(struct cifsLockInfo *lock)
666{
667 struct cifsLockInfo *li, *tmp;
668 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
669 list_del_init(&li->blist);
670 wake_up(&li->block_q);
671 }
672}
673
Pavel Shilovskyb5efb972012-03-27 15:36:15 +0400674/*
675 * Copied from fs/locks.c with small changes.
676 * Remove waiter from blocker's block list.
677 * When blocker ends up pointing to itself then the list is empty.
678 */
679static void
680cifs_locks_delete_block(struct file_lock *waiter)
681{
682 lock_flocks();
683 list_del_init(&waiter->fl_block);
684 list_del_init(&waiter->fl_link);
685 waiter->fl_next = NULL;
686 unlock_flocks();
687}
688
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400689static bool
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400690__cifs_find_lock_conflict(struct cifsInodeInfo *cinode, __u64 offset,
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400691 __u64 length, __u8 type, __u16 netfid,
692 struct cifsLockInfo **conf_lock)
693{
694 struct cifsLockInfo *li, *tmp;
695
696 list_for_each_entry_safe(li, tmp, &cinode->llist, llist) {
697 if (offset + length <= li->offset ||
698 offset >= li->offset + li->length)
699 continue;
700 else if ((type & LOCKING_ANDX_SHARED_LOCK) &&
701 ((netfid == li->netfid && current->tgid == li->pid) ||
702 type == li->type))
703 continue;
704 else {
705 *conf_lock = li;
706 return true;
707 }
708 }
709 return false;
710}
711
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400712static bool
713cifs_find_lock_conflict(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock,
714 struct cifsLockInfo **conf_lock)
715{
716 return __cifs_find_lock_conflict(cinode, lock->offset, lock->length,
717 lock->type, lock->netfid, conf_lock);
718}
719
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300720/*
721 * Check if there is another lock that prevents us to set the lock (mandatory
722 * style). If such a lock exists, update the flock structure with its
723 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
724 * or leave it the same if we can't. Returns 0 if we don't need to request to
725 * the server or 1 otherwise.
726 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400727static int
728cifs_lock_test(struct cifsInodeInfo *cinode, __u64 offset, __u64 length,
729 __u8 type, __u16 netfid, struct file_lock *flock)
730{
731 int rc = 0;
732 struct cifsLockInfo *conf_lock;
733 bool exist;
734
735 mutex_lock(&cinode->lock_mutex);
736
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400737 exist = __cifs_find_lock_conflict(cinode, offset, length, type, netfid,
738 &conf_lock);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400739 if (exist) {
740 flock->fl_start = conf_lock->offset;
741 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
742 flock->fl_pid = conf_lock->pid;
743 if (conf_lock->type & LOCKING_ANDX_SHARED_LOCK)
744 flock->fl_type = F_RDLCK;
745 else
746 flock->fl_type = F_WRLCK;
747 } else if (!cinode->can_cache_brlcks)
748 rc = 1;
749 else
750 flock->fl_type = F_UNLCK;
751
752 mutex_unlock(&cinode->lock_mutex);
753 return rc;
754}
755
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400756static void
757cifs_lock_add(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400758{
Pavel Shilovskyd59dad22011-09-22 09:53:59 +0400759 mutex_lock(&cinode->lock_mutex);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400760 list_add_tail(&lock->llist, &cinode->llist);
Pavel Shilovskyd59dad22011-09-22 09:53:59 +0400761 mutex_unlock(&cinode->lock_mutex);
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000762}
763
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300764/*
765 * Set the byte-range lock (mandatory style). Returns:
766 * 1) 0, if we set the lock and don't need to request to the server;
767 * 2) 1, if no locks prevent us but we need to request to the server;
768 * 3) -EACCESS, if there is a lock that prevents us and wait is false.
769 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400770static int
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400771cifs_lock_add_if(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock,
772 bool wait)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400773{
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400774 struct cifsLockInfo *conf_lock;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400775 bool exist;
776 int rc = 0;
777
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400778try_again:
779 exist = false;
780 mutex_lock(&cinode->lock_mutex);
781
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400782 exist = cifs_find_lock_conflict(cinode, lock, &conf_lock);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400783 if (!exist && cinode->can_cache_brlcks) {
784 list_add_tail(&lock->llist, &cinode->llist);
785 mutex_unlock(&cinode->lock_mutex);
786 return rc;
787 }
788
789 if (!exist)
790 rc = 1;
791 else if (!wait)
792 rc = -EACCES;
793 else {
794 list_add_tail(&lock->blist, &conf_lock->blist);
795 mutex_unlock(&cinode->lock_mutex);
796 rc = wait_event_interruptible(lock->block_q,
797 (lock->blist.prev == &lock->blist) &&
798 (lock->blist.next == &lock->blist));
799 if (!rc)
800 goto try_again;
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400801 mutex_lock(&cinode->lock_mutex);
802 list_del_init(&lock->blist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400803 }
804
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400805 mutex_unlock(&cinode->lock_mutex);
806 return rc;
807}
808
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300809/*
810 * Check if there is another lock that prevents us to set the lock (posix
811 * style). If such a lock exists, update the flock structure with its
812 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
813 * or leave it the same if we can't. Returns 0 if we don't need to request to
814 * the server or 1 otherwise.
815 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400816static int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400817cifs_posix_lock_test(struct file *file, struct file_lock *flock)
818{
819 int rc = 0;
820 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
821 unsigned char saved_type = flock->fl_type;
822
Pavel Shilovsky50792762011-10-29 17:17:57 +0400823 if ((flock->fl_flags & FL_POSIX) == 0)
824 return 1;
825
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400826 mutex_lock(&cinode->lock_mutex);
827 posix_test_lock(file, flock);
828
829 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
830 flock->fl_type = saved_type;
831 rc = 1;
832 }
833
834 mutex_unlock(&cinode->lock_mutex);
835 return rc;
836}
837
Pavel Shilovskyb5efb972012-03-27 15:36:15 +0400838/* Called with locked lock_mutex, return with unlocked. */
839static int
840cifs_posix_lock_file_wait_locked(struct file *file, struct file_lock *flock)
841{
842 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
843 int rc;
844
845 while (true) {
846 rc = posix_lock_file(file, flock, NULL);
847 mutex_unlock(&cinode->lock_mutex);
848 if (rc != FILE_LOCK_DEFERRED)
849 break;
850 rc = wait_event_interruptible(flock->fl_wait, !flock->fl_next);
851 if (!rc) {
852 mutex_lock(&cinode->lock_mutex);
853 continue;
854 }
855 cifs_locks_delete_block(flock);
856 break;
857 }
858 return rc;
859}
860
861static int
862cifs_posix_lock_file_wait(struct file *file, struct file_lock *flock)
863{
864 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
865
866 mutex_lock(&cinode->lock_mutex);
867 /* lock_mutex will be released by the function below */
868 return cifs_posix_lock_file_wait_locked(file, flock);
869}
870
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300871/*
872 * Set the byte-range lock (posix style). Returns:
873 * 1) 0, if we set the lock and don't need to request to the server;
874 * 2) 1, if we need to request to the server;
875 * 3) <0, if the error occurs while setting the lock.
876 */
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400877static int
878cifs_posix_lock_set(struct file *file, struct file_lock *flock)
879{
880 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
Pavel Shilovsky50792762011-10-29 17:17:57 +0400881 int rc = 1;
882
883 if ((flock->fl_flags & FL_POSIX) == 0)
884 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400885
886 mutex_lock(&cinode->lock_mutex);
887 if (!cinode->can_cache_brlcks) {
888 mutex_unlock(&cinode->lock_mutex);
Pavel Shilovsky50792762011-10-29 17:17:57 +0400889 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400890 }
Pavel Shilovskyb5efb972012-03-27 15:36:15 +0400891
892 /* lock_mutex will be released by the function below */
893 return cifs_posix_lock_file_wait_locked(file, flock);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400894}
895
896static int
897cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400898{
899 int xid, rc = 0, stored_rc;
900 struct cifsLockInfo *li, *tmp;
901 struct cifs_tcon *tcon;
902 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400903 unsigned int num, max_num;
904 LOCKING_ANDX_RANGE *buf, *cur;
905 int types[] = {LOCKING_ANDX_LARGE_FILES,
906 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
907 int i;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400908
909 xid = GetXid();
910 tcon = tlink_tcon(cfile->tlink);
911
912 mutex_lock(&cinode->lock_mutex);
913 if (!cinode->can_cache_brlcks) {
914 mutex_unlock(&cinode->lock_mutex);
915 FreeXid(xid);
916 return rc;
917 }
918
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400919 max_num = (tcon->ses->server->maxBuf - sizeof(struct smb_hdr)) /
920 sizeof(LOCKING_ANDX_RANGE);
921 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
922 if (!buf) {
923 mutex_unlock(&cinode->lock_mutex);
924 FreeXid(xid);
925 return rc;
926 }
927
928 for (i = 0; i < 2; i++) {
929 cur = buf;
930 num = 0;
931 list_for_each_entry_safe(li, tmp, &cinode->llist, llist) {
932 if (li->type != types[i])
933 continue;
934 cur->Pid = cpu_to_le16(li->pid);
935 cur->LengthLow = cpu_to_le32((u32)li->length);
936 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
937 cur->OffsetLow = cpu_to_le32((u32)li->offset);
938 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
939 if (++num == max_num) {
940 stored_rc = cifs_lockv(xid, tcon, cfile->netfid,
941 li->type, 0, num, buf);
942 if (stored_rc)
943 rc = stored_rc;
944 cur = buf;
945 num = 0;
946 } else
947 cur++;
948 }
949
950 if (num) {
951 stored_rc = cifs_lockv(xid, tcon, cfile->netfid,
952 types[i], 0, num, buf);
953 if (stored_rc)
954 rc = stored_rc;
955 }
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400956 }
957
958 cinode->can_cache_brlcks = false;
959 mutex_unlock(&cinode->lock_mutex);
960
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400961 kfree(buf);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400962 FreeXid(xid);
963 return rc;
964}
965
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400966/* copied from fs/locks.c with a name change */
967#define cifs_for_each_lock(inode, lockp) \
968 for (lockp = &inode->i_flock; *lockp != NULL; \
969 lockp = &(*lockp)->fl_next)
970
Pavel Shilovskyd5751462012-03-05 09:39:20 +0300971struct lock_to_push {
972 struct list_head llist;
973 __u64 offset;
974 __u64 length;
975 __u32 pid;
976 __u16 netfid;
977 __u8 type;
978};
979
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400980static int
981cifs_push_posix_locks(struct cifsFileInfo *cfile)
982{
983 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
984 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
985 struct file_lock *flock, **before;
Pavel Shilovskyd5751462012-03-05 09:39:20 +0300986 unsigned int count = 0, i = 0;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400987 int rc = 0, xid, type;
Pavel Shilovskyd5751462012-03-05 09:39:20 +0300988 struct list_head locks_to_send, *el;
989 struct lock_to_push *lck, *tmp;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400990 __u64 length;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400991
992 xid = GetXid();
993
994 mutex_lock(&cinode->lock_mutex);
995 if (!cinode->can_cache_brlcks) {
996 mutex_unlock(&cinode->lock_mutex);
997 FreeXid(xid);
998 return rc;
999 }
1000
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001001 lock_flocks();
1002 cifs_for_each_lock(cfile->dentry->d_inode, before) {
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001003 if ((*before)->fl_flags & FL_POSIX)
1004 count++;
1005 }
1006 unlock_flocks();
1007
1008 INIT_LIST_HEAD(&locks_to_send);
1009
1010 /*
Pavel Shilovskyce858522012-03-17 09:46:55 +03001011 * Allocating count locks is enough because no FL_POSIX locks can be
1012 * added to the list while we are holding cinode->lock_mutex that
1013 * protects locking operations of this inode.
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001014 */
1015 for (; i < count; i++) {
1016 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1017 if (!lck) {
1018 rc = -ENOMEM;
1019 goto err_out;
1020 }
1021 list_add_tail(&lck->llist, &locks_to_send);
1022 }
1023
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001024 el = locks_to_send.next;
1025 lock_flocks();
1026 cifs_for_each_lock(cfile->dentry->d_inode, before) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001027 flock = *before;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001028 if ((flock->fl_flags & FL_POSIX) == 0)
1029 continue;
Pavel Shilovskyce858522012-03-17 09:46:55 +03001030 if (el == &locks_to_send) {
1031 /*
1032 * The list ended. We don't have enough allocated
1033 * structures - something is really wrong.
1034 */
1035 cERROR(1, "Can't push all brlocks!");
1036 break;
1037 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001038 length = 1 + flock->fl_end - flock->fl_start;
1039 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1040 type = CIFS_RDLCK;
1041 else
1042 type = CIFS_WRLCK;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001043 lck = list_entry(el, struct lock_to_push, llist);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001044 lck->pid = flock->fl_pid;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001045 lck->netfid = cfile->netfid;
1046 lck->length = length;
1047 lck->type = type;
1048 lck->offset = flock->fl_start;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001049 el = el->next;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001050 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001051 unlock_flocks();
1052
1053 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1054 struct file_lock tmp_lock;
1055 int stored_rc;
1056
1057 tmp_lock.fl_start = lck->offset;
1058 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
1059 0, lck->length, &tmp_lock,
1060 lck->type, 0);
1061 if (stored_rc)
1062 rc = stored_rc;
1063 list_del(&lck->llist);
1064 kfree(lck);
1065 }
1066
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001067out:
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001068 cinode->can_cache_brlcks = false;
1069 mutex_unlock(&cinode->lock_mutex);
1070
1071 FreeXid(xid);
1072 return rc;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001073err_out:
1074 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1075 list_del(&lck->llist);
1076 kfree(lck);
1077 }
1078 goto out;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001079}
1080
1081static int
1082cifs_push_locks(struct cifsFileInfo *cfile)
1083{
1084 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1085 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1086
1087 if ((tcon->ses->capabilities & CAP_UNIX) &&
1088 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1089 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1090 return cifs_push_posix_locks(cfile);
1091
1092 return cifs_push_mandatory_locks(cfile);
1093}
1094
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001095static void
1096cifs_read_flock(struct file_lock *flock, __u8 *type, int *lock, int *unlock,
1097 bool *wait_flag)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001098{
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001099 if (flock->fl_flags & FL_POSIX)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001100 cFYI(1, "Posix");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001101 if (flock->fl_flags & FL_FLOCK)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001102 cFYI(1, "Flock");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001103 if (flock->fl_flags & FL_SLEEP) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001104 cFYI(1, "Blocking lock");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001105 *wait_flag = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001106 }
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001107 if (flock->fl_flags & FL_ACCESS)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001108 cFYI(1, "Process suspended by mandatory locking - "
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001109 "not implemented yet");
1110 if (flock->fl_flags & FL_LEASE)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001111 cFYI(1, "Lease on file - not implemented yet");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001112 if (flock->fl_flags &
Linus Torvalds1da177e2005-04-16 15:20:36 -07001113 (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001114 cFYI(1, "Unknown lock flags 0x%x", flock->fl_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001115
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001116 *type = LOCKING_ANDX_LARGE_FILES;
1117 if (flock->fl_type == F_WRLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001118 cFYI(1, "F_WRLCK ");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001119 *lock = 1;
1120 } else if (flock->fl_type == F_UNLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001121 cFYI(1, "F_UNLCK");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001122 *unlock = 1;
1123 /* Check if unlock includes more than one lock range */
1124 } else if (flock->fl_type == F_RDLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001125 cFYI(1, "F_RDLCK");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001126 *type |= LOCKING_ANDX_SHARED_LOCK;
1127 *lock = 1;
1128 } else if (flock->fl_type == F_EXLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001129 cFYI(1, "F_EXLCK");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001130 *lock = 1;
1131 } else if (flock->fl_type == F_SHLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001132 cFYI(1, "F_SHLCK");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001133 *type |= LOCKING_ANDX_SHARED_LOCK;
1134 *lock = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001135 } else
Joe Perchesb6b38f72010-04-21 03:50:45 +00001136 cFYI(1, "Unknown type of lock");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001137}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001138
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001139static int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001140cifs_getlk(struct file *file, struct file_lock *flock, __u8 type,
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001141 bool wait_flag, bool posix_lck, int xid)
1142{
1143 int rc = 0;
1144 __u64 length = 1 + flock->fl_end - flock->fl_start;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001145 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1146 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001147 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001148 __u16 netfid = cfile->netfid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001149
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001150 if (posix_lck) {
1151 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001152
1153 rc = cifs_posix_lock_test(file, flock);
1154 if (!rc)
1155 return rc;
1156
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001157 if (type & LOCKING_ANDX_SHARED_LOCK)
1158 posix_lock_type = CIFS_RDLCK;
1159 else
1160 posix_lock_type = CIFS_WRLCK;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001161 rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
1162 1 /* get */, length, flock,
1163 posix_lock_type, wait_flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001164 return rc;
1165 }
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001166
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001167 rc = cifs_lock_test(cinode, flock->fl_start, length, type, netfid,
1168 flock);
1169 if (!rc)
1170 return rc;
1171
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001172 /* BB we could chain these into one lock request BB */
1173 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid, length,
1174 flock->fl_start, 0, 1, type, 0, 0);
1175 if (rc == 0) {
1176 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid,
1177 length, flock->fl_start, 1, 0,
1178 type, 0, 0);
1179 flock->fl_type = F_UNLCK;
1180 if (rc != 0)
1181 cERROR(1, "Error unlocking previously locked "
1182 "range %d during test of lock", rc);
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001183 return 0;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001184 }
1185
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001186 if (type & LOCKING_ANDX_SHARED_LOCK) {
1187 flock->fl_type = F_WRLCK;
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001188 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001189 }
1190
1191 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid, length,
1192 flock->fl_start, 0, 1,
1193 type | LOCKING_ANDX_SHARED_LOCK, 0, 0);
1194 if (rc == 0) {
1195 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid,
1196 length, flock->fl_start, 1, 0,
1197 type | LOCKING_ANDX_SHARED_LOCK,
1198 0, 0);
1199 flock->fl_type = F_RDLCK;
1200 if (rc != 0)
1201 cERROR(1, "Error unlocking previously locked "
1202 "range %d during test of lock", rc);
1203 } else
1204 flock->fl_type = F_WRLCK;
1205
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001206 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001207}
1208
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001209static void
1210cifs_move_llist(struct list_head *source, struct list_head *dest)
1211{
1212 struct list_head *li, *tmp;
1213 list_for_each_safe(li, tmp, source)
1214 list_move(li, dest);
1215}
1216
1217static void
1218cifs_free_llist(struct list_head *llist)
1219{
1220 struct cifsLockInfo *li, *tmp;
1221 list_for_each_entry_safe(li, tmp, llist, llist) {
1222 cifs_del_lock_waiters(li);
1223 list_del(&li->llist);
1224 kfree(li);
1225 }
1226}
1227
1228static int
1229cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid)
1230{
1231 int rc = 0, stored_rc;
1232 int types[] = {LOCKING_ANDX_LARGE_FILES,
1233 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1234 unsigned int i;
1235 unsigned int max_num, num;
1236 LOCKING_ANDX_RANGE *buf, *cur;
1237 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1238 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
1239 struct cifsLockInfo *li, *tmp;
1240 __u64 length = 1 + flock->fl_end - flock->fl_start;
1241 struct list_head tmp_llist;
1242
1243 INIT_LIST_HEAD(&tmp_llist);
1244
1245 max_num = (tcon->ses->server->maxBuf - sizeof(struct smb_hdr)) /
1246 sizeof(LOCKING_ANDX_RANGE);
1247 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1248 if (!buf)
1249 return -ENOMEM;
1250
1251 mutex_lock(&cinode->lock_mutex);
1252 for (i = 0; i < 2; i++) {
1253 cur = buf;
1254 num = 0;
1255 list_for_each_entry_safe(li, tmp, &cinode->llist, llist) {
1256 if (flock->fl_start > li->offset ||
1257 (flock->fl_start + length) <
1258 (li->offset + li->length))
1259 continue;
1260 if (current->tgid != li->pid)
1261 continue;
1262 if (cfile->netfid != li->netfid)
1263 continue;
1264 if (types[i] != li->type)
1265 continue;
1266 if (!cinode->can_cache_brlcks) {
1267 cur->Pid = cpu_to_le16(li->pid);
1268 cur->LengthLow = cpu_to_le32((u32)li->length);
1269 cur->LengthHigh =
1270 cpu_to_le32((u32)(li->length>>32));
1271 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1272 cur->OffsetHigh =
1273 cpu_to_le32((u32)(li->offset>>32));
1274 /*
1275 * We need to save a lock here to let us add
1276 * it again to the inode list if the unlock
1277 * range request fails on the server.
1278 */
1279 list_move(&li->llist, &tmp_llist);
1280 if (++num == max_num) {
1281 stored_rc = cifs_lockv(xid, tcon,
1282 cfile->netfid,
1283 li->type, num,
1284 0, buf);
1285 if (stored_rc) {
1286 /*
1287 * We failed on the unlock range
1288 * request - add all locks from
1289 * the tmp list to the head of
1290 * the inode list.
1291 */
1292 cifs_move_llist(&tmp_llist,
1293 &cinode->llist);
1294 rc = stored_rc;
1295 } else
1296 /*
1297 * The unlock range request
1298 * succeed - free the tmp list.
1299 */
1300 cifs_free_llist(&tmp_llist);
1301 cur = buf;
1302 num = 0;
1303 } else
1304 cur++;
1305 } else {
1306 /*
1307 * We can cache brlock requests - simply remove
1308 * a lock from the inode list.
1309 */
1310 list_del(&li->llist);
1311 cifs_del_lock_waiters(li);
1312 kfree(li);
1313 }
1314 }
1315 if (num) {
1316 stored_rc = cifs_lockv(xid, tcon, cfile->netfid,
1317 types[i], num, 0, buf);
1318 if (stored_rc) {
1319 cifs_move_llist(&tmp_llist, &cinode->llist);
1320 rc = stored_rc;
1321 } else
1322 cifs_free_llist(&tmp_llist);
1323 }
1324 }
1325
1326 mutex_unlock(&cinode->lock_mutex);
1327 kfree(buf);
1328 return rc;
1329}
1330
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001331static int
1332cifs_setlk(struct file *file, struct file_lock *flock, __u8 type,
1333 bool wait_flag, bool posix_lck, int lock, int unlock, int xid)
1334{
1335 int rc = 0;
1336 __u64 length = 1 + flock->fl_end - flock->fl_start;
1337 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1338 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovskyd59dad22011-09-22 09:53:59 +04001339 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001340 __u16 netfid = cfile->netfid;
1341
1342 if (posix_lck) {
Steve French08547b02006-02-28 22:39:25 +00001343 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001344
1345 rc = cifs_posix_lock_set(file, flock);
1346 if (!rc || rc < 0)
1347 return rc;
1348
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001349 if (type & LOCKING_ANDX_SHARED_LOCK)
Steve French08547b02006-02-28 22:39:25 +00001350 posix_lock_type = CIFS_RDLCK;
1351 else
1352 posix_lock_type = CIFS_WRLCK;
Steve French50c2f752007-07-13 00:33:32 +00001353
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001354 if (unlock == 1)
Steve Frenchbeb84dc2006-03-03 23:36:34 +00001355 posix_lock_type = CIFS_UNLCK;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001356
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001357 rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
1358 0 /* set */, length, flock,
1359 posix_lock_type, wait_flag);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001360 goto out;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001361 }
1362
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001363 if (lock) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001364 struct cifsLockInfo *lock;
1365
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001366 lock = cifs_lock_init(flock->fl_start, length, type, netfid);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001367 if (!lock)
1368 return -ENOMEM;
1369
1370 rc = cifs_lock_add_if(cinode, lock, wait_flag);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001371 if (rc < 0)
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001372 kfree(lock);
1373 if (rc <= 0)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001374 goto out;
1375
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001376 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid, length,
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001377 flock->fl_start, 0, 1, type, wait_flag, 0);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001378 if (rc) {
1379 kfree(lock);
1380 goto out;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001381 }
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001382
1383 cifs_lock_add(cinode, lock);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001384 } else if (unlock)
1385 rc = cifs_unlock_range(cfile, flock, xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001386
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001387out:
1388 if (flock->fl_flags & FL_POSIX)
Pavel Shilovskyb5efb972012-03-27 15:36:15 +04001389 cifs_posix_lock_file_wait(file, flock);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001390 return rc;
1391}
1392
1393int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1394{
1395 int rc, xid;
1396 int lock = 0, unlock = 0;
1397 bool wait_flag = false;
1398 bool posix_lck = false;
1399 struct cifs_sb_info *cifs_sb;
1400 struct cifs_tcon *tcon;
1401 struct cifsInodeInfo *cinode;
1402 struct cifsFileInfo *cfile;
1403 __u16 netfid;
1404 __u8 type;
1405
1406 rc = -EACCES;
1407 xid = GetXid();
1408
1409 cFYI(1, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld "
1410 "end: %lld", cmd, flock->fl_flags, flock->fl_type,
1411 flock->fl_start, flock->fl_end);
1412
1413 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag);
1414
1415 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1416 cfile = (struct cifsFileInfo *)file->private_data;
1417 tcon = tlink_tcon(cfile->tlink);
1418 netfid = cfile->netfid;
1419 cinode = CIFS_I(file->f_path.dentry->d_inode);
1420
1421 if ((tcon->ses->capabilities & CAP_UNIX) &&
1422 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1423 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1424 posix_lck = true;
1425 /*
1426 * BB add code here to normalize offset and length to account for
1427 * negative length which we can not accept over the wire.
1428 */
1429 if (IS_GETLK(cmd)) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001430 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001431 FreeXid(xid);
1432 return rc;
1433 }
1434
1435 if (!lock && !unlock) {
1436 /*
1437 * if no lock or unlock then nothing to do since we do not
1438 * know what it is
1439 */
1440 FreeXid(xid);
1441 return -EOPNOTSUPP;
1442 }
1443
1444 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1445 xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001446 FreeXid(xid);
1447 return rc;
1448}
1449
Jeff Layton597b0272012-03-23 14:40:56 -04001450/*
1451 * update the file size (if needed) after a write. Should be called with
1452 * the inode->i_lock held
1453 */
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05001454void
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001455cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1456 unsigned int bytes_written)
1457{
1458 loff_t end_of_write = offset + bytes_written;
1459
1460 if (end_of_write > cifsi->server_eof)
1461 cifsi->server_eof = end_of_write;
1462}
1463
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001464static ssize_t cifs_write(struct cifsFileInfo *open_file, __u32 pid,
Jeff Layton7da4b492010-10-15 15:34:00 -04001465 const char *write_data, size_t write_size,
1466 loff_t *poffset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001467{
1468 int rc = 0;
1469 unsigned int bytes_written = 0;
1470 unsigned int total_written;
1471 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +00001472 struct cifs_tcon *pTcon;
Jeff Layton77499812011-01-11 07:24:23 -05001473 int xid;
Jeff Layton7da4b492010-10-15 15:34:00 -04001474 struct dentry *dentry = open_file->dentry;
1475 struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001476 struct cifs_io_parms io_parms;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001477
Jeff Layton7da4b492010-10-15 15:34:00 -04001478 cifs_sb = CIFS_SB(dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001479
Joe Perchesb6b38f72010-04-21 03:50:45 +00001480 cFYI(1, "write %zd bytes to offset %lld of %s", write_size,
Jeff Layton7da4b492010-10-15 15:34:00 -04001481 *poffset, dentry->d_name.name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001482
Jeff Layton13cfb732010-09-29 19:51:11 -04001483 pTcon = tlink_tcon(open_file->tlink);
Steve French50c2f752007-07-13 00:33:32 +00001484
Linus Torvalds1da177e2005-04-16 15:20:36 -07001485 xid = GetXid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001486
Linus Torvalds1da177e2005-04-16 15:20:36 -07001487 for (total_written = 0; write_size > total_written;
1488 total_written += bytes_written) {
1489 rc = -EAGAIN;
1490 while (rc == -EAGAIN) {
Jeff Laytonca83ce32011-04-12 09:13:44 -04001491 struct kvec iov[2];
1492 unsigned int len;
1493
Linus Torvalds1da177e2005-04-16 15:20:36 -07001494 if (open_file->invalidHandle) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001495 /* we could deadlock if we called
1496 filemap_fdatawait from here so tell
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001497 reopen_file not to flush data to
Linus Torvalds1da177e2005-04-16 15:20:36 -07001498 server now */
Jeff Layton15886172010-10-15 15:33:59 -04001499 rc = cifs_reopen_file(open_file, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001500 if (rc != 0)
1501 break;
1502 }
Steve French3e844692005-10-03 13:37:24 -07001503
Jeff Laytonca83ce32011-04-12 09:13:44 -04001504 len = min((size_t)cifs_sb->wsize,
1505 write_size - total_written);
1506 /* iov[0] is reserved for smb header */
1507 iov[1].iov_base = (char *)write_data + total_written;
1508 iov[1].iov_len = len;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001509 io_parms.netfid = open_file->netfid;
1510 io_parms.pid = pid;
1511 io_parms.tcon = pTcon;
1512 io_parms.offset = *poffset;
1513 io_parms.length = len;
1514 rc = CIFSSMBWrite2(xid, &io_parms, &bytes_written, iov,
1515 1, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001516 }
1517 if (rc || (bytes_written == 0)) {
1518 if (total_written)
1519 break;
1520 else {
1521 FreeXid(xid);
1522 return rc;
1523 }
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001524 } else {
Jeff Layton597b0272012-03-23 14:40:56 -04001525 spin_lock(&dentry->d_inode->i_lock);
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001526 cifs_update_eof(cifsi, *poffset, bytes_written);
Jeff Layton597b0272012-03-23 14:40:56 -04001527 spin_unlock(&dentry->d_inode->i_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528 *poffset += bytes_written;
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001529 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001530 }
1531
Steve Frencha4544342005-08-24 13:59:35 -07001532 cifs_stats_bytes_written(pTcon, total_written);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001533
Jeff Layton7da4b492010-10-15 15:34:00 -04001534 if (total_written > 0) {
1535 spin_lock(&dentry->d_inode->i_lock);
1536 if (*poffset > dentry->d_inode->i_size)
1537 i_size_write(dentry->d_inode, *poffset);
1538 spin_unlock(&dentry->d_inode->i_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001539 }
Jeff Layton7da4b492010-10-15 15:34:00 -04001540 mark_inode_dirty_sync(dentry->d_inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001541 FreeXid(xid);
1542 return total_written;
1543}
1544
Jeff Layton6508d902010-09-29 19:51:11 -04001545struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1546 bool fsuid_only)
Steve French630f3f0c2007-10-25 21:17:17 +00001547{
1548 struct cifsFileInfo *open_file = NULL;
Jeff Layton6508d902010-09-29 19:51:11 -04001549 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1550
1551 /* only filter by fsuid on multiuser mounts */
1552 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1553 fsuid_only = false;
Steve French630f3f0c2007-10-25 21:17:17 +00001554
Jeff Layton44772882010-10-15 15:34:03 -04001555 spin_lock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001556 /* we could simply get the first_list_entry since write-only entries
1557 are always at the end of the list but since the first entry might
1558 have a close pending, we go through the whole list */
1559 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton6508d902010-09-29 19:51:11 -04001560 if (fsuid_only && open_file->uid != current_fsuid())
1561 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001562 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
Steve French630f3f0c2007-10-25 21:17:17 +00001563 if (!open_file->invalidHandle) {
1564 /* found a good file */
1565 /* lock it so it will not be closed on us */
Dave Kleikamp6ab409b2009-08-31 11:07:12 -04001566 cifsFileInfo_get(open_file);
Jeff Layton44772882010-10-15 15:34:03 -04001567 spin_unlock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001568 return open_file;
1569 } /* else might as well continue, and look for
1570 another, or simply have the caller reopen it
1571 again rather than trying to fix this handle */
1572 } else /* write only file */
1573 break; /* write only files are last so must be done */
1574 }
Jeff Layton44772882010-10-15 15:34:03 -04001575 spin_unlock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001576 return NULL;
1577}
Steve French630f3f0c2007-10-25 21:17:17 +00001578
Jeff Layton6508d902010-09-29 19:51:11 -04001579struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1580 bool fsuid_only)
Steve French6148a742005-10-05 12:23:19 -07001581{
1582 struct cifsFileInfo *open_file;
Jeff Laytond3892292010-11-02 16:22:50 -04001583 struct cifs_sb_info *cifs_sb;
Jeff Layton2846d382008-09-22 21:33:33 -04001584 bool any_available = false;
Steve Frenchdd99cd82005-10-05 19:32:49 -07001585 int rc;
Steve French6148a742005-10-05 12:23:19 -07001586
Steve French60808232006-04-22 15:53:05 +00001587 /* Having a null inode here (because mapping->host was set to zero by
1588 the VFS or MM) should not happen but we had reports of on oops (due to
1589 it being zero) during stress testcases so we need to check for it */
1590
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001591 if (cifs_inode == NULL) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001592 cERROR(1, "Null inode passed to cifs_writeable_file");
Steve French60808232006-04-22 15:53:05 +00001593 dump_stack();
1594 return NULL;
1595 }
1596
Jeff Laytond3892292010-11-02 16:22:50 -04001597 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1598
Jeff Layton6508d902010-09-29 19:51:11 -04001599 /* only filter by fsuid on multiuser mounts */
1600 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1601 fsuid_only = false;
1602
Jeff Layton44772882010-10-15 15:34:03 -04001603 spin_lock(&cifs_file_list_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001604refind_writable:
Steve French6148a742005-10-05 12:23:19 -07001605 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton6508d902010-09-29 19:51:11 -04001606 if (!any_available && open_file->pid != current->tgid)
1607 continue;
1608 if (fsuid_only && open_file->uid != current_fsuid())
1609 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001610 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Dave Kleikamp6ab409b2009-08-31 11:07:12 -04001611 cifsFileInfo_get(open_file);
Steve French9b22b0b2007-10-02 01:11:08 +00001612
1613 if (!open_file->invalidHandle) {
1614 /* found a good writable file */
Jeff Layton44772882010-10-15 15:34:03 -04001615 spin_unlock(&cifs_file_list_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001616 return open_file;
1617 }
Steve French8840dee2007-11-16 23:05:52 +00001618
Jeff Layton44772882010-10-15 15:34:03 -04001619 spin_unlock(&cifs_file_list_lock);
Steve Frenchcdff08e2010-10-21 22:46:14 +00001620
Steve French9b22b0b2007-10-02 01:11:08 +00001621 /* Had to unlock since following call can block */
Jeff Layton15886172010-10-15 15:33:59 -04001622 rc = cifs_reopen_file(open_file, false);
Steve Frenchcdff08e2010-10-21 22:46:14 +00001623 if (!rc)
1624 return open_file;
Steve French9b22b0b2007-10-02 01:11:08 +00001625
Steve Frenchcdff08e2010-10-21 22:46:14 +00001626 /* if it fails, try another handle if possible */
Joe Perchesb6b38f72010-04-21 03:50:45 +00001627 cFYI(1, "wp failed on reopen file");
Dave Kleikamp6ab409b2009-08-31 11:07:12 -04001628 cifsFileInfo_put(open_file);
Steve French8840dee2007-11-16 23:05:52 +00001629
Steve Frenchcdff08e2010-10-21 22:46:14 +00001630 spin_lock(&cifs_file_list_lock);
1631
Steve French9b22b0b2007-10-02 01:11:08 +00001632 /* else we simply continue to the next entry. Thus
1633 we do not loop on reopen errors. If we
1634 can not reopen the file, for example if we
1635 reconnected to a server with another client
1636 racing to delete or lock the file we would not
1637 make progress if we restarted before the beginning
1638 of the loop here. */
Steve French6148a742005-10-05 12:23:19 -07001639 }
1640 }
Jeff Layton2846d382008-09-22 21:33:33 -04001641 /* couldn't find useable FH with same pid, try any available */
1642 if (!any_available) {
1643 any_available = true;
1644 goto refind_writable;
1645 }
Jeff Layton44772882010-10-15 15:34:03 -04001646 spin_unlock(&cifs_file_list_lock);
Steve French6148a742005-10-05 12:23:19 -07001647 return NULL;
1648}
1649
Linus Torvalds1da177e2005-04-16 15:20:36 -07001650static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1651{
1652 struct address_space *mapping = page->mapping;
1653 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1654 char *write_data;
1655 int rc = -EFAULT;
1656 int bytes_written = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001657 struct inode *inode;
Steve French6148a742005-10-05 12:23:19 -07001658 struct cifsFileInfo *open_file;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001659
1660 if (!mapping || !mapping->host)
1661 return -EFAULT;
1662
1663 inode = page->mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001664
1665 offset += (loff_t)from;
1666 write_data = kmap(page);
1667 write_data += from;
1668
1669 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1670 kunmap(page);
1671 return -EIO;
1672 }
1673
1674 /* racing with truncate? */
1675 if (offset > mapping->host->i_size) {
1676 kunmap(page);
1677 return 0; /* don't care */
1678 }
1679
1680 /* check to make sure that we are not extending the file */
1681 if (mapping->host->i_size - offset < (loff_t)to)
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001682 to = (unsigned)(mapping->host->i_size - offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001683
Jeff Layton6508d902010-09-29 19:51:11 -04001684 open_file = find_writable_file(CIFS_I(mapping->host), false);
Steve French6148a742005-10-05 12:23:19 -07001685 if (open_file) {
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001686 bytes_written = cifs_write(open_file, open_file->pid,
1687 write_data, to - from, &offset);
Dave Kleikamp6ab409b2009-08-31 11:07:12 -04001688 cifsFileInfo_put(open_file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001689 /* Does mm or vfs already set times? */
Steve French6148a742005-10-05 12:23:19 -07001690 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001691 if ((bytes_written > 0) && (offset))
Steve French6148a742005-10-05 12:23:19 -07001692 rc = 0;
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001693 else if (bytes_written < 0)
1694 rc = bytes_written;
Steve French6148a742005-10-05 12:23:19 -07001695 } else {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001696 cFYI(1, "No writeable filehandles for inode");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697 rc = -EIO;
1698 }
1699
1700 kunmap(page);
1701 return rc;
1702}
1703
Jeff Laytone9492872012-03-23 14:40:56 -04001704/*
1705 * Marshal up the iov array, reserving the first one for the header. Also,
1706 * set wdata->bytes.
1707 */
1708static void
1709cifs_writepages_marshal_iov(struct kvec *iov, struct cifs_writedata *wdata)
1710{
1711 int i;
1712 struct inode *inode = wdata->cfile->dentry->d_inode;
1713 loff_t size = i_size_read(inode);
1714
1715 /* marshal up the pages into iov array */
1716 wdata->bytes = 0;
1717 for (i = 0; i < wdata->nr_pages; i++) {
1718 iov[i + 1].iov_len = min(size - page_offset(wdata->pages[i]),
1719 (loff_t)PAGE_CACHE_SIZE);
1720 iov[i + 1].iov_base = kmap(wdata->pages[i]);
1721 wdata->bytes += iov[i + 1].iov_len;
1722 }
1723}
1724
Linus Torvalds1da177e2005-04-16 15:20:36 -07001725static int cifs_writepages(struct address_space *mapping,
Steve French37c0eb42005-10-05 14:50:29 -07001726 struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001727{
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001728 struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
1729 bool done = false, scanned = false, range_whole = false;
1730 pgoff_t end, index;
1731 struct cifs_writedata *wdata;
Steve French37c0eb42005-10-05 14:50:29 -07001732 struct page *page;
Steve French37c0eb42005-10-05 14:50:29 -07001733 int rc = 0;
Steve French50c2f752007-07-13 00:33:32 +00001734
Steve French37c0eb42005-10-05 14:50:29 -07001735 /*
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001736 * If wsize is smaller than the page cache size, default to writing
Steve French37c0eb42005-10-05 14:50:29 -07001737 * one page at a time via cifs_writepage
1738 */
1739 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1740 return generic_writepages(mapping, wbc);
1741
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001742 if (wbc->range_cyclic) {
Steve French37c0eb42005-10-05 14:50:29 -07001743 index = mapping->writeback_index; /* Start from prev offset */
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001744 end = -1;
1745 } else {
1746 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1747 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1748 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001749 range_whole = true;
1750 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07001751 }
1752retry:
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001753 while (!done && index <= end) {
1754 unsigned int i, nr_pages, found_pages;
1755 pgoff_t next = 0, tofind;
1756 struct page **pages;
Steve French37c0eb42005-10-05 14:50:29 -07001757
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001758 tofind = min((cifs_sb->wsize / PAGE_CACHE_SIZE) - 1,
1759 end - index) + 1;
Steve French37c0eb42005-10-05 14:50:29 -07001760
Jeff Laytonc2e87642012-03-23 14:40:55 -04001761 wdata = cifs_writedata_alloc((unsigned int)tofind,
1762 cifs_writev_complete);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001763 if (!wdata) {
1764 rc = -ENOMEM;
1765 break;
1766 }
1767
1768 /*
1769 * find_get_pages_tag seems to return a max of 256 on each
1770 * iteration, so we must call it several times in order to
1771 * fill the array or the wsize is effectively limited to
1772 * 256 * PAGE_CACHE_SIZE.
1773 */
1774 found_pages = 0;
1775 pages = wdata->pages;
1776 do {
1777 nr_pages = find_get_pages_tag(mapping, &index,
1778 PAGECACHE_TAG_DIRTY,
1779 tofind, pages);
1780 found_pages += nr_pages;
1781 tofind -= nr_pages;
1782 pages += nr_pages;
1783 } while (nr_pages && tofind && index <= end);
1784
1785 if (found_pages == 0) {
1786 kref_put(&wdata->refcount, cifs_writedata_release);
1787 break;
1788 }
1789
1790 nr_pages = 0;
1791 for (i = 0; i < found_pages; i++) {
1792 page = wdata->pages[i];
Steve French37c0eb42005-10-05 14:50:29 -07001793 /*
1794 * At this point we hold neither mapping->tree_lock nor
1795 * lock on the page itself: the page may be truncated or
1796 * invalidated (changing page->mapping to NULL), or even
1797 * swizzled back from swapper_space to tmpfs file
1798 * mapping
1799 */
1800
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001801 if (nr_pages == 0)
Steve French37c0eb42005-10-05 14:50:29 -07001802 lock_page(page);
Nick Piggin529ae9a2008-08-02 12:01:03 +02001803 else if (!trylock_page(page))
Steve French37c0eb42005-10-05 14:50:29 -07001804 break;
1805
1806 if (unlikely(page->mapping != mapping)) {
1807 unlock_page(page);
1808 break;
1809 }
1810
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001811 if (!wbc->range_cyclic && page->index > end) {
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001812 done = true;
Steve French37c0eb42005-10-05 14:50:29 -07001813 unlock_page(page);
1814 break;
1815 }
1816
1817 if (next && (page->index != next)) {
1818 /* Not next consecutive page */
1819 unlock_page(page);
1820 break;
1821 }
1822
1823 if (wbc->sync_mode != WB_SYNC_NONE)
1824 wait_on_page_writeback(page);
1825
1826 if (PageWriteback(page) ||
Linus Torvaldscb876f42006-12-23 16:19:07 -08001827 !clear_page_dirty_for_io(page)) {
Steve French37c0eb42005-10-05 14:50:29 -07001828 unlock_page(page);
1829 break;
1830 }
Steve French84d2f072005-10-12 15:32:05 -07001831
Linus Torvaldscb876f42006-12-23 16:19:07 -08001832 /*
1833 * This actually clears the dirty bit in the radix tree.
1834 * See cifs_writepage() for more commentary.
1835 */
1836 set_page_writeback(page);
1837
Steve French84d2f072005-10-12 15:32:05 -07001838 if (page_offset(page) >= mapping->host->i_size) {
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001839 done = true;
Steve French84d2f072005-10-12 15:32:05 -07001840 unlock_page(page);
Linus Torvaldscb876f42006-12-23 16:19:07 -08001841 end_page_writeback(page);
Steve French84d2f072005-10-12 15:32:05 -07001842 break;
1843 }
1844
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001845 wdata->pages[i] = page;
Steve French37c0eb42005-10-05 14:50:29 -07001846 next = page->index + 1;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001847 ++nr_pages;
Steve French37c0eb42005-10-05 14:50:29 -07001848 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001849
1850 /* reset index to refind any pages skipped */
1851 if (nr_pages == 0)
1852 index = wdata->pages[0]->index + 1;
1853
1854 /* put any pages we aren't going to use */
1855 for (i = nr_pages; i < found_pages; i++) {
1856 page_cache_release(wdata->pages[i]);
1857 wdata->pages[i] = NULL;
1858 }
1859
1860 /* nothing to write? */
1861 if (nr_pages == 0) {
1862 kref_put(&wdata->refcount, cifs_writedata_release);
1863 continue;
1864 }
1865
1866 wdata->sync_mode = wbc->sync_mode;
1867 wdata->nr_pages = nr_pages;
1868 wdata->offset = page_offset(wdata->pages[0]);
Jeff Laytone9492872012-03-23 14:40:56 -04001869 wdata->marshal_iov = cifs_writepages_marshal_iov;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001870
1871 do {
1872 if (wdata->cfile != NULL)
1873 cifsFileInfo_put(wdata->cfile);
1874 wdata->cfile = find_writable_file(CIFS_I(mapping->host),
1875 false);
1876 if (!wdata->cfile) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001877 cERROR(1, "No writable handles for inode");
Steve French23e7dd72005-10-20 13:44:56 -07001878 rc = -EBADF;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001879 break;
Steve French37c0eb42005-10-05 14:50:29 -07001880 }
Jeff Laytonfe5f5d22012-03-23 14:40:55 -04001881 wdata->pid = wdata->cfile->pid;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001882 rc = cifs_async_writev(wdata);
1883 } while (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN);
Jeff Laytonf3983c22010-09-22 16:17:40 -07001884
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001885 for (i = 0; i < nr_pages; ++i)
1886 unlock_page(wdata->pages[i]);
Jeff Layton941b8532011-01-11 07:24:01 -05001887
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001888 /* send failure -- clean up the mess */
1889 if (rc != 0) {
1890 for (i = 0; i < nr_pages; ++i) {
Jeff Layton941b8532011-01-11 07:24:01 -05001891 if (rc == -EAGAIN)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001892 redirty_page_for_writepage(wbc,
1893 wdata->pages[i]);
1894 else
1895 SetPageError(wdata->pages[i]);
1896 end_page_writeback(wdata->pages[i]);
1897 page_cache_release(wdata->pages[i]);
Steve French37c0eb42005-10-05 14:50:29 -07001898 }
Jeff Layton941b8532011-01-11 07:24:01 -05001899 if (rc != -EAGAIN)
1900 mapping_set_error(mapping, rc);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001901 }
1902 kref_put(&wdata->refcount, cifs_writedata_release);
Jeff Layton941b8532011-01-11 07:24:01 -05001903
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001904 wbc->nr_to_write -= nr_pages;
1905 if (wbc->nr_to_write <= 0)
1906 done = true;
Dave Kleikampb066a482008-11-18 03:49:05 +00001907
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001908 index = next;
Steve French37c0eb42005-10-05 14:50:29 -07001909 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001910
Steve French37c0eb42005-10-05 14:50:29 -07001911 if (!scanned && !done) {
1912 /*
1913 * We hit the last page and there is more work to be done: wrap
1914 * back to the start of the file
1915 */
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001916 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07001917 index = 0;
1918 goto retry;
1919 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001920
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001921 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
Steve French37c0eb42005-10-05 14:50:29 -07001922 mapping->writeback_index = index;
1923
Linus Torvalds1da177e2005-04-16 15:20:36 -07001924 return rc;
1925}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001926
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04001927static int
1928cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001929{
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04001930 int rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001931 int xid;
1932
1933 xid = GetXid();
1934/* BB add check for wbc flags */
1935 page_cache_get(page);
Steve Frenchad7a2922008-02-07 23:25:02 +00001936 if (!PageUptodate(page))
Joe Perchesb6b38f72010-04-21 03:50:45 +00001937 cFYI(1, "ppw - page not up to date");
Linus Torvaldscb876f42006-12-23 16:19:07 -08001938
1939 /*
1940 * Set the "writeback" flag, and clear "dirty" in the radix tree.
1941 *
1942 * A writepage() implementation always needs to do either this,
1943 * or re-dirty the page with "redirty_page_for_writepage()" in
1944 * the case of a failure.
1945 *
1946 * Just unlocking the page will cause the radix tree tag-bits
1947 * to fail to update with the state of the page correctly.
1948 */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001949 set_page_writeback(page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04001950retry_write:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001951 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04001952 if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
1953 goto retry_write;
1954 else if (rc == -EAGAIN)
1955 redirty_page_for_writepage(wbc, page);
1956 else if (rc != 0)
1957 SetPageError(page);
1958 else
1959 SetPageUptodate(page);
Linus Torvaldscb876f42006-12-23 16:19:07 -08001960 end_page_writeback(page);
1961 page_cache_release(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001962 FreeXid(xid);
1963 return rc;
1964}
1965
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04001966static int cifs_writepage(struct page *page, struct writeback_control *wbc)
1967{
1968 int rc = cifs_writepage_locked(page, wbc);
1969 unlock_page(page);
1970 return rc;
1971}
1972
Nick Piggind9414772008-09-24 11:32:59 -04001973static int cifs_write_end(struct file *file, struct address_space *mapping,
1974 loff_t pos, unsigned len, unsigned copied,
1975 struct page *page, void *fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001976{
Nick Piggind9414772008-09-24 11:32:59 -04001977 int rc;
1978 struct inode *inode = mapping->host;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00001979 struct cifsFileInfo *cfile = file->private_data;
1980 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1981 __u32 pid;
1982
1983 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
1984 pid = cfile->pid;
1985 else
1986 pid = current->tgid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001987
Joe Perchesb6b38f72010-04-21 03:50:45 +00001988 cFYI(1, "write_end for page %p from pos %lld with %d bytes",
1989 page, pos, copied);
Steve Frenchad7a2922008-02-07 23:25:02 +00001990
Jeff Laytona98ee8c2008-11-26 19:32:33 +00001991 if (PageChecked(page)) {
1992 if (copied == len)
1993 SetPageUptodate(page);
1994 ClearPageChecked(page);
1995 } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
Nick Piggind9414772008-09-24 11:32:59 -04001996 SetPageUptodate(page);
1997
Linus Torvalds1da177e2005-04-16 15:20:36 -07001998 if (!PageUptodate(page)) {
Nick Piggind9414772008-09-24 11:32:59 -04001999 char *page_data;
2000 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
2001 int xid;
2002
2003 xid = GetXid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002004 /* this is probably better than directly calling
2005 partialpage_write since in this function the file handle is
2006 known which we might as well leverage */
2007 /* BB check if anything else missing out of ppw
2008 such as updating last write time */
2009 page_data = kmap(page);
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002010 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
Nick Piggind9414772008-09-24 11:32:59 -04002011 /* if (rc < 0) should we set writebehind rc? */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002012 kunmap(page);
Nick Piggind9414772008-09-24 11:32:59 -04002013
2014 FreeXid(xid);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002015 } else {
Nick Piggind9414772008-09-24 11:32:59 -04002016 rc = copied;
2017 pos += copied;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002018 set_page_dirty(page);
2019 }
2020
Nick Piggind9414772008-09-24 11:32:59 -04002021 if (rc > 0) {
2022 spin_lock(&inode->i_lock);
2023 if (pos > inode->i_size)
2024 i_size_write(inode, pos);
2025 spin_unlock(&inode->i_lock);
2026 }
2027
2028 unlock_page(page);
2029 page_cache_release(page);
2030
Linus Torvalds1da177e2005-04-16 15:20:36 -07002031 return rc;
2032}
2033
Josef Bacik02c24a82011-07-16 20:44:56 -04002034int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2035 int datasync)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002036{
2037 int xid;
2038 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002039 struct cifs_tcon *tcon;
Joe Perchesc21dfb62010-07-12 13:50:14 -07002040 struct cifsFileInfo *smbfile = file->private_data;
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002041 struct inode *inode = file->f_path.dentry->d_inode;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002042 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002043
Josef Bacik02c24a82011-07-16 20:44:56 -04002044 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2045 if (rc)
2046 return rc;
2047 mutex_lock(&inode->i_mutex);
2048
Linus Torvalds1da177e2005-04-16 15:20:36 -07002049 xid = GetXid();
2050
Joe Perchesb6b38f72010-04-21 03:50:45 +00002051 cFYI(1, "Sync file - name: %s datasync: 0x%x",
Christoph Hellwig7ea80852010-05-26 17:53:25 +02002052 file->f_path.dentry->d_name.name, datasync);
Steve French50c2f752007-07-13 00:33:32 +00002053
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002054 if (!CIFS_I(inode)->clientCanCacheRead) {
2055 rc = cifs_invalidate_mapping(inode);
2056 if (rc) {
2057 cFYI(1, "rc: %d during invalidate phase", rc);
2058 rc = 0; /* don't care about it in fsync */
2059 }
2060 }
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002061
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002062 tcon = tlink_tcon(smbfile->tlink);
2063 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
2064 rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
2065
2066 FreeXid(xid);
Josef Bacik02c24a82011-07-16 20:44:56 -04002067 mutex_unlock(&inode->i_mutex);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002068 return rc;
2069}
2070
Josef Bacik02c24a82011-07-16 20:44:56 -04002071int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002072{
2073 int xid;
2074 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002075 struct cifs_tcon *tcon;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002076 struct cifsFileInfo *smbfile = file->private_data;
2077 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Josef Bacik02c24a82011-07-16 20:44:56 -04002078 struct inode *inode = file->f_mapping->host;
2079
2080 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2081 if (rc)
2082 return rc;
2083 mutex_lock(&inode->i_mutex);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002084
2085 xid = GetXid();
2086
2087 cFYI(1, "Sync file - name: %s datasync: 0x%x",
2088 file->f_path.dentry->d_name.name, datasync);
2089
2090 tcon = tlink_tcon(smbfile->tlink);
2091 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
2092 rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
Steve Frenchb298f222009-02-21 21:17:43 +00002093
Linus Torvalds1da177e2005-04-16 15:20:36 -07002094 FreeXid(xid);
Josef Bacik02c24a82011-07-16 20:44:56 -04002095 mutex_unlock(&inode->i_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002096 return rc;
2097}
2098
Linus Torvalds1da177e2005-04-16 15:20:36 -07002099/*
2100 * As file closes, flush all cached write data for this inode checking
2101 * for write behind errors.
2102 */
Miklos Szeredi75e1fcc2006-06-23 02:05:12 -07002103int cifs_flush(struct file *file, fl_owner_t id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002104{
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002105 struct inode *inode = file->f_path.dentry->d_inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002106 int rc = 0;
2107
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002108 if (file->f_mode & FMODE_WRITE)
Jeff Laytond3f13222010-10-15 15:34:07 -04002109 rc = filemap_write_and_wait(inode->i_mapping);
Steve French50c2f752007-07-13 00:33:32 +00002110
Joe Perchesb6b38f72010-04-21 03:50:45 +00002111 cFYI(1, "Flush inode %p file %p rc %d", inode, file, rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002112
2113 return rc;
2114}
2115
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002116static int
2117cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2118{
2119 int rc = 0;
2120 unsigned long i;
2121
2122 for (i = 0; i < num_pages; i++) {
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002123 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002124 if (!pages[i]) {
2125 /*
2126 * save number of pages we have already allocated and
2127 * return with ENOMEM error
2128 */
2129 num_pages = i;
2130 rc = -ENOMEM;
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002131 break;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002132 }
2133 }
2134
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002135 if (rc) {
2136 for (i = 0; i < num_pages; i++)
2137 put_page(pages[i]);
2138 }
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002139 return rc;
2140}
2141
2142static inline
2143size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2144{
2145 size_t num_pages;
2146 size_t clen;
2147
2148 clen = min_t(const size_t, len, wsize);
Jeff Laytona7103b92012-03-23 14:40:56 -04002149 num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002150
2151 if (cur_len)
2152 *cur_len = clen;
2153
2154 return num_pages;
2155}
2156
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002157static void
2158cifs_uncached_marshal_iov(struct kvec *iov, struct cifs_writedata *wdata)
2159{
2160 int i;
2161 size_t bytes = wdata->bytes;
2162
2163 /* marshal up the pages into iov array */
2164 for (i = 0; i < wdata->nr_pages; i++) {
Steve Frenchc7ad42b2012-03-23 16:30:56 -05002165 iov[i + 1].iov_len = min_t(size_t, bytes, PAGE_SIZE);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002166 iov[i + 1].iov_base = kmap(wdata->pages[i]);
2167 bytes -= iov[i + 1].iov_len;
2168 }
2169}
2170
2171static void
2172cifs_uncached_writev_complete(struct work_struct *work)
2173{
2174 int i;
2175 struct cifs_writedata *wdata = container_of(work,
2176 struct cifs_writedata, work);
2177 struct inode *inode = wdata->cfile->dentry->d_inode;
2178 struct cifsInodeInfo *cifsi = CIFS_I(inode);
2179
2180 spin_lock(&inode->i_lock);
2181 cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
2182 if (cifsi->server_eof > inode->i_size)
2183 i_size_write(inode, cifsi->server_eof);
2184 spin_unlock(&inode->i_lock);
2185
2186 complete(&wdata->done);
2187
2188 if (wdata->result != -EAGAIN) {
2189 for (i = 0; i < wdata->nr_pages; i++)
2190 put_page(wdata->pages[i]);
2191 }
2192
2193 kref_put(&wdata->refcount, cifs_writedata_release);
2194}
2195
2196/* attempt to send write to server, retry on any -EAGAIN errors */
2197static int
2198cifs_uncached_retry_writev(struct cifs_writedata *wdata)
2199{
2200 int rc;
2201
2202 do {
2203 if (wdata->cfile->invalidHandle) {
2204 rc = cifs_reopen_file(wdata->cfile, false);
2205 if (rc != 0)
2206 continue;
2207 }
2208 rc = cifs_async_writev(wdata);
2209 } while (rc == -EAGAIN);
2210
2211 return rc;
2212}
2213
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002214static ssize_t
2215cifs_iovec_write(struct file *file, const struct iovec *iov,
2216 unsigned long nr_segs, loff_t *poffset)
2217{
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002218 unsigned long nr_pages, i;
Pavel Shilovsky76429c12011-01-31 16:03:08 +03002219 size_t copied, len, cur_len;
2220 ssize_t total_written = 0;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002221 loff_t offset = *poffset;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002222 struct iov_iter it;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002223 struct cifsFileInfo *open_file;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002224 struct cifs_tcon *tcon;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002225 struct cifs_sb_info *cifs_sb;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002226 struct cifs_writedata *wdata, *tmp;
2227 struct list_head wdata_list;
2228 int rc;
2229 pid_t pid;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002230
2231 len = iov_length(iov, nr_segs);
2232 if (!len)
2233 return 0;
2234
2235 rc = generic_write_checks(file, poffset, &len, 0);
2236 if (rc)
2237 return rc;
2238
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002239 INIT_LIST_HEAD(&wdata_list);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002240 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002241 open_file = file->private_data;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002242 tcon = tlink_tcon(open_file->tlink);
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002243
2244 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2245 pid = open_file->pid;
2246 else
2247 pid = current->tgid;
2248
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002249 iov_iter_init(&it, iov, nr_segs, len, 0);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002250 do {
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002251 size_t save_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002252
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002253 nr_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
2254 wdata = cifs_writedata_alloc(nr_pages,
2255 cifs_uncached_writev_complete);
2256 if (!wdata) {
2257 rc = -ENOMEM;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002258 break;
2259 }
2260
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002261 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
2262 if (rc) {
2263 kfree(wdata);
2264 break;
2265 }
2266
2267 save_len = cur_len;
2268 for (i = 0; i < nr_pages; i++) {
2269 copied = min_t(const size_t, cur_len, PAGE_SIZE);
2270 copied = iov_iter_copy_from_user(wdata->pages[i], &it,
2271 0, copied);
2272 cur_len -= copied;
2273 iov_iter_advance(&it, copied);
2274 }
2275 cur_len = save_len - cur_len;
2276
2277 wdata->sync_mode = WB_SYNC_ALL;
2278 wdata->nr_pages = nr_pages;
2279 wdata->offset = (__u64)offset;
2280 wdata->cfile = cifsFileInfo_get(open_file);
2281 wdata->pid = pid;
2282 wdata->bytes = cur_len;
2283 wdata->marshal_iov = cifs_uncached_marshal_iov;
2284 rc = cifs_uncached_retry_writev(wdata);
2285 if (rc) {
2286 kref_put(&wdata->refcount, cifs_writedata_release);
2287 break;
2288 }
2289
2290 list_add_tail(&wdata->list, &wdata_list);
2291 offset += cur_len;
2292 len -= cur_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002293 } while (len > 0);
2294
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002295 /*
2296 * If at least one write was successfully sent, then discard any rc
2297 * value from the later writes. If the other write succeeds, then
2298 * we'll end up returning whatever was written. If it fails, then
2299 * we'll get a new rc value from that.
2300 */
2301 if (!list_empty(&wdata_list))
2302 rc = 0;
2303
2304 /*
2305 * Wait for and collect replies for any successful sends in order of
2306 * increasing offset. Once an error is hit or we get a fatal signal
2307 * while waiting, then return without waiting for any more replies.
2308 */
2309restart_loop:
2310 list_for_each_entry_safe(wdata, tmp, &wdata_list, list) {
2311 if (!rc) {
2312 /* FIXME: freezable too? */
2313 rc = wait_for_completion_killable(&wdata->done);
2314 if (rc)
2315 rc = -EINTR;
2316 else if (wdata->result)
2317 rc = wdata->result;
2318 else
2319 total_written += wdata->bytes;
2320
2321 /* resend call if it's a retryable error */
2322 if (rc == -EAGAIN) {
2323 rc = cifs_uncached_retry_writev(wdata);
2324 goto restart_loop;
2325 }
2326 }
2327 list_del_init(&wdata->list);
2328 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002329 }
2330
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002331 if (total_written > 0)
2332 *poffset += total_written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002333
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002334 cifs_stats_bytes_written(tcon, total_written);
2335 return total_written ? total_written : (ssize_t)rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002336}
2337
Pavel Shilovsky0b81c1c2011-03-10 10:11:05 +03002338ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002339 unsigned long nr_segs, loff_t pos)
2340{
2341 ssize_t written;
2342 struct inode *inode;
2343
2344 inode = iocb->ki_filp->f_path.dentry->d_inode;
2345
2346 /*
2347 * BB - optimize the way when signing is disabled. We can drop this
2348 * extra memory-to-memory copying and use iovec buffers for constructing
2349 * write request.
2350 */
2351
2352 written = cifs_iovec_write(iocb->ki_filp, iov, nr_segs, &pos);
2353 if (written > 0) {
2354 CIFS_I(inode)->invalid_mapping = true;
2355 iocb->ki_pos = pos;
2356 }
2357
2358 return written;
2359}
2360
2361ssize_t cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
2362 unsigned long nr_segs, loff_t pos)
2363{
2364 struct inode *inode;
2365
2366 inode = iocb->ki_filp->f_path.dentry->d_inode;
2367
2368 if (CIFS_I(inode)->clientCanCacheAll)
2369 return generic_file_aio_write(iocb, iov, nr_segs, pos);
2370
2371 /*
2372 * In strict cache mode we need to write the data to the server exactly
2373 * from the pos to pos+len-1 rather than flush all affected pages
2374 * because it may cause a error with mandatory locks on these pages but
2375 * not on the region from pos to ppos+len-1.
2376 */
2377
2378 return cifs_user_writev(iocb, iov, nr_segs, pos);
2379}
2380
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002381static ssize_t
2382cifs_iovec_read(struct file *file, const struct iovec *iov,
2383 unsigned long nr_segs, loff_t *poffset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002384{
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002385 int rc;
2386 int xid;
Pavel Shilovsky76429c12011-01-31 16:03:08 +03002387 ssize_t total_read;
2388 unsigned int bytes_read = 0;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002389 size_t len, cur_len;
2390 int iov_offset = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002391 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +00002392 struct cifs_tcon *pTcon;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002393 struct cifsFileInfo *open_file;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002394 struct smb_com_read_rsp *pSMBr;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002395 struct cifs_io_parms io_parms;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002396 char *read_data;
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002397 unsigned int rsize;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002398 __u32 pid;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002399
2400 if (!nr_segs)
2401 return 0;
2402
2403 len = iov_length(iov, nr_segs);
2404 if (!len)
2405 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002406
2407 xid = GetXid();
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002408 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002409
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002410 /* FIXME: set up handlers for larger reads and/or convert to async */
2411 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
2412
Joe Perchesc21dfb62010-07-12 13:50:14 -07002413 open_file = file->private_data;
Jeff Layton13cfb732010-09-29 19:51:11 -04002414 pTcon = tlink_tcon(open_file->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002415
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002416 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2417 pid = open_file->pid;
2418 else
2419 pid = current->tgid;
2420
Steve Frenchad7a2922008-02-07 23:25:02 +00002421 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesb6b38f72010-04-21 03:50:45 +00002422 cFYI(1, "attempting read on write only file instance");
Steve Frenchad7a2922008-02-07 23:25:02 +00002423
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002424 for (total_read = 0; total_read < len; total_read += bytes_read) {
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002425 cur_len = min_t(const size_t, len - total_read, rsize);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002426 rc = -EAGAIN;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002427 read_data = NULL;
2428
Linus Torvalds1da177e2005-04-16 15:20:36 -07002429 while (rc == -EAGAIN) {
Steve Frenchec637e32005-12-12 20:53:18 -08002430 int buf_type = CIFS_NO_BUFFER;
Steve Frenchcdff08e2010-10-21 22:46:14 +00002431 if (open_file->invalidHandle) {
Jeff Layton15886172010-10-15 15:33:59 -04002432 rc = cifs_reopen_file(open_file, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002433 if (rc != 0)
2434 break;
2435 }
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002436 io_parms.netfid = open_file->netfid;
2437 io_parms.pid = pid;
2438 io_parms.tcon = pTcon;
2439 io_parms.offset = *poffset;
Pavel Shilovsky2cebaa52011-07-20 18:24:09 +04002440 io_parms.length = cur_len;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002441 rc = CIFSSMBRead(xid, &io_parms, &bytes_read,
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002442 &read_data, &buf_type);
2443 pSMBr = (struct smb_com_read_rsp *)read_data;
2444 if (read_data) {
2445 char *data_offset = read_data + 4 +
2446 le16_to_cpu(pSMBr->DataOffset);
2447 if (memcpy_toiovecend(iov, data_offset,
2448 iov_offset, bytes_read))
Steve French93544cc2006-02-14 22:30:52 -06002449 rc = -EFAULT;
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002450 if (buf_type == CIFS_SMALL_BUFFER)
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002451 cifs_small_buf_release(read_data);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002452 else if (buf_type == CIFS_LARGE_BUFFER)
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002453 cifs_buf_release(read_data);
2454 read_data = NULL;
2455 iov_offset += bytes_read;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002456 }
2457 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002458
Linus Torvalds1da177e2005-04-16 15:20:36 -07002459 if (rc || (bytes_read == 0)) {
2460 if (total_read) {
2461 break;
2462 } else {
2463 FreeXid(xid);
2464 return rc;
2465 }
2466 } else {
Steve Frencha4544342005-08-24 13:59:35 -07002467 cifs_stats_bytes_read(pTcon, bytes_read);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002468 *poffset += bytes_read;
2469 }
2470 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002471
Linus Torvalds1da177e2005-04-16 15:20:36 -07002472 FreeXid(xid);
2473 return total_read;
2474}
2475
Pavel Shilovsky0b81c1c2011-03-10 10:11:05 +03002476ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov,
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002477 unsigned long nr_segs, loff_t pos)
2478{
2479 ssize_t read;
2480
2481 read = cifs_iovec_read(iocb->ki_filp, iov, nr_segs, &pos);
2482 if (read > 0)
2483 iocb->ki_pos = pos;
2484
2485 return read;
2486}
2487
2488ssize_t cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov,
2489 unsigned long nr_segs, loff_t pos)
2490{
2491 struct inode *inode;
2492
2493 inode = iocb->ki_filp->f_path.dentry->d_inode;
2494
2495 if (CIFS_I(inode)->clientCanCacheRead)
2496 return generic_file_aio_read(iocb, iov, nr_segs, pos);
2497
2498 /*
2499 * In strict cache mode we need to read from the server all the time
2500 * if we don't have level II oplock because the server can delay mtime
2501 * change - so we can't make a decision about inode invalidating.
2502 * And we can also fail with pagereading if there are mandatory locks
2503 * on pages affected by this read but not on the region from pos to
2504 * pos+len-1.
2505 */
2506
2507 return cifs_user_readv(iocb, iov, nr_segs, pos);
2508}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002509
2510static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002511 loff_t *poffset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002512{
2513 int rc = -EACCES;
2514 unsigned int bytes_read = 0;
2515 unsigned int total_read;
2516 unsigned int current_read_size;
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002517 unsigned int rsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002518 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +00002519 struct cifs_tcon *pTcon;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002520 int xid;
2521 char *current_offset;
2522 struct cifsFileInfo *open_file;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002523 struct cifs_io_parms io_parms;
Steve Frenchec637e32005-12-12 20:53:18 -08002524 int buf_type = CIFS_NO_BUFFER;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002525 __u32 pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002526
2527 xid = GetXid();
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002528 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002529
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002530 /* FIXME: set up handlers for larger reads and/or convert to async */
2531 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
2532
Linus Torvalds1da177e2005-04-16 15:20:36 -07002533 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05302534 rc = -EBADF;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002535 FreeXid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05302536 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002537 }
Joe Perchesc21dfb62010-07-12 13:50:14 -07002538 open_file = file->private_data;
Jeff Layton13cfb732010-09-29 19:51:11 -04002539 pTcon = tlink_tcon(open_file->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002540
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002541 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2542 pid = open_file->pid;
2543 else
2544 pid = current->tgid;
2545
Linus Torvalds1da177e2005-04-16 15:20:36 -07002546 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesb6b38f72010-04-21 03:50:45 +00002547 cFYI(1, "attempting read on write only file instance");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002548
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002549 for (total_read = 0, current_offset = read_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002550 read_size > total_read;
2551 total_read += bytes_read, current_offset += bytes_read) {
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002552 current_read_size = min_t(uint, read_size - total_read, rsize);
2553
Steve Frenchf9f5c8172005-09-15 23:06:38 -07002554 /* For windows me and 9x we do not want to request more
2555 than it negotiated since it will refuse the read then */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002556 if ((pTcon->ses) &&
Steve Frenchf9f5c8172005-09-15 23:06:38 -07002557 !(pTcon->ses->capabilities & CAP_LARGE_FILES)) {
Dan Carpenter7748dd62011-10-18 12:41:35 +03002558 current_read_size = min_t(uint, current_read_size,
Jeff Laytonc974bef2011-10-11 06:41:32 -04002559 CIFSMaxBufSize);
Steve Frenchf9f5c8172005-09-15 23:06:38 -07002560 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002561 rc = -EAGAIN;
2562 while (rc == -EAGAIN) {
Steve Frenchcdff08e2010-10-21 22:46:14 +00002563 if (open_file->invalidHandle) {
Jeff Layton15886172010-10-15 15:33:59 -04002564 rc = cifs_reopen_file(open_file, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002565 if (rc != 0)
2566 break;
2567 }
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002568 io_parms.netfid = open_file->netfid;
2569 io_parms.pid = pid;
2570 io_parms.tcon = pTcon;
2571 io_parms.offset = *poffset;
2572 io_parms.length = current_read_size;
2573 rc = CIFSSMBRead(xid, &io_parms, &bytes_read,
2574 &current_offset, &buf_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002575 }
2576 if (rc || (bytes_read == 0)) {
2577 if (total_read) {
2578 break;
2579 } else {
2580 FreeXid(xid);
2581 return rc;
2582 }
2583 } else {
Steve Frencha4544342005-08-24 13:59:35 -07002584 cifs_stats_bytes_read(pTcon, total_read);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002585 *poffset += bytes_read;
2586 }
2587 }
2588 FreeXid(xid);
2589 return total_read;
2590}
2591
Jeff Laytonca83ce32011-04-12 09:13:44 -04002592/*
2593 * If the page is mmap'ed into a process' page tables, then we need to make
2594 * sure that it doesn't change while being written back.
2595 */
2596static int
2597cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
2598{
2599 struct page *page = vmf->page;
2600
2601 lock_page(page);
2602 return VM_FAULT_LOCKED;
2603}
2604
2605static struct vm_operations_struct cifs_file_vm_ops = {
2606 .fault = filemap_fault,
2607 .page_mkwrite = cifs_page_mkwrite,
2608};
2609
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03002610int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
2611{
2612 int rc, xid;
2613 struct inode *inode = file->f_path.dentry->d_inode;
2614
2615 xid = GetXid();
2616
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002617 if (!CIFS_I(inode)->clientCanCacheRead) {
2618 rc = cifs_invalidate_mapping(inode);
2619 if (rc)
2620 return rc;
2621 }
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03002622
2623 rc = generic_file_mmap(file, vma);
Jeff Laytonca83ce32011-04-12 09:13:44 -04002624 if (rc == 0)
2625 vma->vm_ops = &cifs_file_vm_ops;
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03002626 FreeXid(xid);
2627 return rc;
2628}
2629
Linus Torvalds1da177e2005-04-16 15:20:36 -07002630int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
2631{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002632 int rc, xid;
2633
2634 xid = GetXid();
Jeff Laytonabab0952010-02-12 07:44:18 -05002635 rc = cifs_revalidate_file(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002636 if (rc) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00002637 cFYI(1, "Validation prior to mmap failed, error=%d", rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002638 FreeXid(xid);
2639 return rc;
2640 }
2641 rc = generic_file_mmap(file, vma);
Jeff Laytonca83ce32011-04-12 09:13:44 -04002642 if (rc == 0)
2643 vma->vm_ops = &cifs_file_vm_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002644 FreeXid(xid);
2645 return rc;
2646}
2647
Linus Torvalds1da177e2005-04-16 15:20:36 -07002648static int cifs_readpages(struct file *file, struct address_space *mapping,
2649 struct list_head *page_list, unsigned num_pages)
2650{
Jeff Layton690c5e32011-10-19 15:30:16 -04002651 int rc;
2652 struct list_head tmplist;
2653 struct cifsFileInfo *open_file = file->private_data;
2654 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
2655 unsigned int rsize = cifs_sb->rsize;
2656 pid_t pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002657
Jeff Layton690c5e32011-10-19 15:30:16 -04002658 /*
2659 * Give up immediately if rsize is too small to read an entire page.
2660 * The VFS will fall back to readpage. We should never reach this
2661 * point however since we set ra_pages to 0 when the rsize is smaller
2662 * than a cache page.
2663 */
2664 if (unlikely(rsize < PAGE_CACHE_SIZE))
2665 return 0;
Steve Frenchbfa0d752005-08-31 21:50:37 -07002666
Suresh Jayaraman56698232010-07-05 18:13:25 +05302667 /*
2668 * Reads as many pages as possible from fscache. Returns -ENOBUFS
2669 * immediately if the cookie is negative
2670 */
2671 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
2672 &num_pages);
2673 if (rc == 0)
Jeff Layton690c5e32011-10-19 15:30:16 -04002674 return rc;
Suresh Jayaraman56698232010-07-05 18:13:25 +05302675
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002676 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2677 pid = open_file->pid;
2678 else
2679 pid = current->tgid;
2680
Jeff Layton690c5e32011-10-19 15:30:16 -04002681 rc = 0;
2682 INIT_LIST_HEAD(&tmplist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002683
Jeff Layton690c5e32011-10-19 15:30:16 -04002684 cFYI(1, "%s: file=%p mapping=%p num_pages=%u", __func__, file,
2685 mapping, num_pages);
2686
2687 /*
2688 * Start with the page at end of list and move it to private
2689 * list. Do the same with any following pages until we hit
2690 * the rsize limit, hit an index discontinuity, or run out of
2691 * pages. Issue the async read and then start the loop again
2692 * until the list is empty.
2693 *
2694 * Note that list order is important. The page_list is in
2695 * the order of declining indexes. When we put the pages in
2696 * the rdata->pages, then we want them in increasing order.
2697 */
2698 while (!list_empty(page_list)) {
2699 unsigned int bytes = PAGE_CACHE_SIZE;
2700 unsigned int expected_index;
2701 unsigned int nr_pages = 1;
2702 loff_t offset;
2703 struct page *page, *tpage;
2704 struct cifs_readdata *rdata;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002705
2706 page = list_entry(page_list->prev, struct page, lru);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002707
Jeff Layton690c5e32011-10-19 15:30:16 -04002708 /*
2709 * Lock the page and put it in the cache. Since no one else
2710 * should have access to this page, we're safe to simply set
2711 * PG_locked without checking it first.
2712 */
2713 __set_page_locked(page);
2714 rc = add_to_page_cache_locked(page, mapping,
2715 page->index, GFP_KERNEL);
2716
2717 /* give up if we can't stick it in the cache */
2718 if (rc) {
2719 __clear_page_locked(page);
2720 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002721 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002722
Jeff Layton690c5e32011-10-19 15:30:16 -04002723 /* move first page to the tmplist */
2724 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
2725 list_move_tail(&page->lru, &tmplist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002726
Jeff Layton690c5e32011-10-19 15:30:16 -04002727 /* now try and add more pages onto the request */
2728 expected_index = page->index + 1;
2729 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
2730 /* discontinuity ? */
2731 if (page->index != expected_index)
2732 break;
2733
2734 /* would this page push the read over the rsize? */
2735 if (bytes + PAGE_CACHE_SIZE > rsize)
2736 break;
2737
2738 __set_page_locked(page);
2739 if (add_to_page_cache_locked(page, mapping,
2740 page->index, GFP_KERNEL)) {
2741 __clear_page_locked(page);
2742 break;
2743 }
2744 list_move_tail(&page->lru, &tmplist);
2745 bytes += PAGE_CACHE_SIZE;
2746 expected_index++;
2747 nr_pages++;
2748 }
2749
2750 rdata = cifs_readdata_alloc(nr_pages);
2751 if (!rdata) {
2752 /* best to give up if we're out of mem */
2753 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
2754 list_del(&page->lru);
2755 lru_cache_add_file(page);
2756 unlock_page(page);
2757 page_cache_release(page);
2758 }
2759 rc = -ENOMEM;
2760 break;
2761 }
2762
2763 spin_lock(&cifs_file_list_lock);
2764 cifsFileInfo_get(open_file);
2765 spin_unlock(&cifs_file_list_lock);
2766 rdata->cfile = open_file;
2767 rdata->mapping = mapping;
2768 rdata->offset = offset;
2769 rdata->bytes = bytes;
2770 rdata->pid = pid;
2771 list_splice_init(&tmplist, &rdata->pages);
2772
2773 do {
Steve Frenchcdff08e2010-10-21 22:46:14 +00002774 if (open_file->invalidHandle) {
Jeff Layton15886172010-10-15 15:33:59 -04002775 rc = cifs_reopen_file(open_file, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002776 if (rc != 0)
Jeff Layton690c5e32011-10-19 15:30:16 -04002777 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002778 }
Jeff Layton690c5e32011-10-19 15:30:16 -04002779 rc = cifs_async_readv(rdata);
2780 } while (rc == -EAGAIN);
2781
2782 if (rc != 0) {
2783 list_for_each_entry_safe(page, tpage, &rdata->pages,
2784 lru) {
2785 list_del(&page->lru);
2786 lru_cache_add_file(page);
2787 unlock_page(page);
2788 page_cache_release(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002789 }
Jeff Layton690c5e32011-10-19 15:30:16 -04002790 cifs_readdata_free(rdata);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002791 break;
2792 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002793 }
2794
Linus Torvalds1da177e2005-04-16 15:20:36 -07002795 return rc;
2796}
2797
2798static int cifs_readpage_worker(struct file *file, struct page *page,
2799 loff_t *poffset)
2800{
2801 char *read_data;
2802 int rc;
2803
Suresh Jayaraman56698232010-07-05 18:13:25 +05302804 /* Is the page cached? */
2805 rc = cifs_readpage_from_fscache(file->f_path.dentry->d_inode, page);
2806 if (rc == 0)
2807 goto read_complete;
2808
Linus Torvalds1da177e2005-04-16 15:20:36 -07002809 page_cache_get(page);
2810 read_data = kmap(page);
2811 /* for reads over a certain size could initiate async read ahead */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002812
Linus Torvalds1da177e2005-04-16 15:20:36 -07002813 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002814
Linus Torvalds1da177e2005-04-16 15:20:36 -07002815 if (rc < 0)
2816 goto io_error;
2817 else
Joe Perchesb6b38f72010-04-21 03:50:45 +00002818 cFYI(1, "Bytes read %d", rc);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002819
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002820 file->f_path.dentry->d_inode->i_atime =
2821 current_fs_time(file->f_path.dentry->d_inode->i_sb);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002822
Linus Torvalds1da177e2005-04-16 15:20:36 -07002823 if (PAGE_CACHE_SIZE > rc)
2824 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
2825
2826 flush_dcache_page(page);
2827 SetPageUptodate(page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05302828
2829 /* send this page to the cache */
2830 cifs_readpage_to_fscache(file->f_path.dentry->d_inode, page);
2831
Linus Torvalds1da177e2005-04-16 15:20:36 -07002832 rc = 0;
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002833
Linus Torvalds1da177e2005-04-16 15:20:36 -07002834io_error:
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002835 kunmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002836 page_cache_release(page);
Suresh Jayaraman56698232010-07-05 18:13:25 +05302837
2838read_complete:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002839 return rc;
2840}
2841
2842static int cifs_readpage(struct file *file, struct page *page)
2843{
2844 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
2845 int rc = -EACCES;
2846 int xid;
2847
2848 xid = GetXid();
2849
2850 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05302851 rc = -EBADF;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002852 FreeXid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05302853 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002854 }
2855
Joe Perchesb6b38f72010-04-21 03:50:45 +00002856 cFYI(1, "readpage %p at offset %d 0x%x\n",
2857 page, (int)offset, (int)offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002858
2859 rc = cifs_readpage_worker(file, page, &offset);
2860
2861 unlock_page(page);
2862
2863 FreeXid(xid);
2864 return rc;
2865}
2866
Steve Frencha403a0a2007-07-26 15:54:16 +00002867static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
2868{
2869 struct cifsFileInfo *open_file;
2870
Jeff Layton44772882010-10-15 15:34:03 -04002871 spin_lock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00002872 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton2e396b82010-10-15 15:34:01 -04002873 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Jeff Layton44772882010-10-15 15:34:03 -04002874 spin_unlock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00002875 return 1;
2876 }
2877 }
Jeff Layton44772882010-10-15 15:34:03 -04002878 spin_unlock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00002879 return 0;
2880}
2881
Linus Torvalds1da177e2005-04-16 15:20:36 -07002882/* We do not want to update the file size from server for inodes
2883 open for write - to avoid races with writepage extending
2884 the file - in the future we could consider allowing
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002885 refreshing the inode only on increases in the file size
Linus Torvalds1da177e2005-04-16 15:20:36 -07002886 but this is tricky to do without racing with writebehind
2887 page caching in the current Linux kernel design */
Steve French4b18f2a2008-04-29 00:06:05 +00002888bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002889{
Steve Frencha403a0a2007-07-26 15:54:16 +00002890 if (!cifsInode)
Steve French4b18f2a2008-04-29 00:06:05 +00002891 return true;
Steve French23e7dd72005-10-20 13:44:56 -07002892
Steve Frencha403a0a2007-07-26 15:54:16 +00002893 if (is_inode_writable(cifsInode)) {
2894 /* This inode is open for write at least once */
Steve Frenchc32a0b62006-01-12 14:41:28 -08002895 struct cifs_sb_info *cifs_sb;
2896
Steve Frenchc32a0b62006-01-12 14:41:28 -08002897 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
Steve Frenchad7a2922008-02-07 23:25:02 +00002898 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002899 /* since no page cache to corrupt on directio
Steve Frenchc32a0b62006-01-12 14:41:28 -08002900 we can change size safely */
Steve French4b18f2a2008-04-29 00:06:05 +00002901 return true;
Steve Frenchc32a0b62006-01-12 14:41:28 -08002902 }
2903
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002904 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
Steve French4b18f2a2008-04-29 00:06:05 +00002905 return true;
Steve French7ba52632007-02-08 18:14:13 +00002906
Steve French4b18f2a2008-04-29 00:06:05 +00002907 return false;
Steve French23e7dd72005-10-20 13:44:56 -07002908 } else
Steve French4b18f2a2008-04-29 00:06:05 +00002909 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002910}
2911
Nick Piggind9414772008-09-24 11:32:59 -04002912static int cifs_write_begin(struct file *file, struct address_space *mapping,
2913 loff_t pos, unsigned len, unsigned flags,
2914 struct page **pagep, void **fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002915{
Nick Piggind9414772008-09-24 11:32:59 -04002916 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
2917 loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00002918 loff_t page_start = pos & PAGE_MASK;
2919 loff_t i_size;
2920 struct page *page;
2921 int rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002922
Joe Perchesb6b38f72010-04-21 03:50:45 +00002923 cFYI(1, "write_begin from %lld len %d", (long long)pos, len);
Nick Piggind9414772008-09-24 11:32:59 -04002924
Nick Piggin54566b22009-01-04 12:00:53 -08002925 page = grab_cache_page_write_begin(mapping, index, flags);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00002926 if (!page) {
2927 rc = -ENOMEM;
2928 goto out;
2929 }
Nick Piggind9414772008-09-24 11:32:59 -04002930
Jeff Laytona98ee8c2008-11-26 19:32:33 +00002931 if (PageUptodate(page))
2932 goto out;
Steve French8a236262007-03-06 00:31:00 +00002933
Jeff Laytona98ee8c2008-11-26 19:32:33 +00002934 /*
2935 * If we write a full page it will be up to date, no need to read from
2936 * the server. If the write is short, we'll end up doing a sync write
2937 * instead.
2938 */
2939 if (len == PAGE_CACHE_SIZE)
2940 goto out;
2941
2942 /*
2943 * optimize away the read when we have an oplock, and we're not
2944 * expecting to use any of the data we'd be reading in. That
2945 * is, when the page lies beyond the EOF, or straddles the EOF
2946 * and the write will cover all of the existing data.
2947 */
2948 if (CIFS_I(mapping->host)->clientCanCacheRead) {
2949 i_size = i_size_read(mapping->host);
2950 if (page_start >= i_size ||
2951 (offset == 0 && (pos + len) >= i_size)) {
2952 zero_user_segments(page, 0, offset,
2953 offset + len,
2954 PAGE_CACHE_SIZE);
2955 /*
2956 * PageChecked means that the parts of the page
2957 * to which we're not writing are considered up
2958 * to date. Once the data is copied to the
2959 * page, it can be set uptodate.
2960 */
2961 SetPageChecked(page);
2962 goto out;
2963 }
2964 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002965
Nick Piggind9414772008-09-24 11:32:59 -04002966 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
Jeff Laytona98ee8c2008-11-26 19:32:33 +00002967 /*
2968 * might as well read a page, it is fast enough. If we get
2969 * an error, we don't need to return it. cifs_write_end will
2970 * do a sync write instead since PG_uptodate isn't set.
2971 */
2972 cifs_readpage_worker(file, page, &page_start);
Steve French8a236262007-03-06 00:31:00 +00002973 } else {
2974 /* we could try using another file handle if there is one -
2975 but how would we lock it to prevent close of that handle
2976 racing with this read? In any case
Nick Piggind9414772008-09-24 11:32:59 -04002977 this will be written out by write_end so is fine */
Steve French8a236262007-03-06 00:31:00 +00002978 }
Jeff Laytona98ee8c2008-11-26 19:32:33 +00002979out:
2980 *pagep = page;
2981 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002982}
2983
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05302984static int cifs_release_page(struct page *page, gfp_t gfp)
2985{
2986 if (PagePrivate(page))
2987 return 0;
2988
2989 return cifs_fscache_release_page(page, gfp);
2990}
2991
2992static void cifs_invalidate_page(struct page *page, unsigned long offset)
2993{
2994 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
2995
2996 if (offset == 0)
2997 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
2998}
2999
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003000static int cifs_launder_page(struct page *page)
3001{
3002 int rc = 0;
3003 loff_t range_start = page_offset(page);
3004 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
3005 struct writeback_control wbc = {
3006 .sync_mode = WB_SYNC_ALL,
3007 .nr_to_write = 0,
3008 .range_start = range_start,
3009 .range_end = range_end,
3010 };
3011
3012 cFYI(1, "Launder page: %p", page);
3013
3014 if (clear_page_dirty_for_io(page))
3015 rc = cifs_writepage_locked(page, &wbc);
3016
3017 cifs_fscache_invalidate_page(page, page->mapping->host);
3018 return rc;
3019}
3020
Tejun Heo9b646972010-07-20 22:09:02 +02003021void cifs_oplock_break(struct work_struct *work)
Jeff Layton3bc303c2009-09-21 06:47:50 -04003022{
3023 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3024 oplock_break);
Jeff Laytona5e18bc2010-10-11 15:07:18 -04003025 struct inode *inode = cfile->dentry->d_inode;
Jeff Layton3bc303c2009-09-21 06:47:50 -04003026 struct cifsInodeInfo *cinode = CIFS_I(inode);
Jeff Laytoneb4b7562010-10-22 14:52:29 -04003027 int rc = 0;
Jeff Layton3bc303c2009-09-21 06:47:50 -04003028
3029 if (inode && S_ISREG(inode->i_mode)) {
Steve Frenchd54ff732010-04-27 04:38:15 +00003030 if (cinode->clientCanCacheRead)
Al Viro8737c932009-12-24 06:47:55 -05003031 break_lease(inode, O_RDONLY);
Steve Frenchd54ff732010-04-27 04:38:15 +00003032 else
Al Viro8737c932009-12-24 06:47:55 -05003033 break_lease(inode, O_WRONLY);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003034 rc = filemap_fdatawrite(inode->i_mapping);
3035 if (cinode->clientCanCacheRead == 0) {
Jeff Laytoneb4b7562010-10-22 14:52:29 -04003036 rc = filemap_fdatawait(inode->i_mapping);
3037 mapping_set_error(inode->i_mapping, rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003038 invalidate_remote_inode(inode);
3039 }
Joe Perchesb6b38f72010-04-21 03:50:45 +00003040 cFYI(1, "Oplock flush inode %p rc %d", inode, rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003041 }
3042
Pavel Shilovsky85160e02011-10-22 15:33:29 +04003043 rc = cifs_push_locks(cfile);
3044 if (rc)
3045 cERROR(1, "Push locks rc = %d", rc);
3046
Jeff Layton3bc303c2009-09-21 06:47:50 -04003047 /*
3048 * releasing stale oplock after recent reconnect of smb session using
3049 * a now incorrect file handle is not a data integrity issue but do
3050 * not bother sending an oplock release if session to server still is
3051 * disconnected since oplock already released by the server
3052 */
Steve Frenchcdff08e2010-10-21 22:46:14 +00003053 if (!cfile->oplock_break_cancelled) {
Pavel Shilovsky03776f42010-08-17 11:26:00 +04003054 rc = CIFSSMBLock(0, tlink_tcon(cfile->tlink), cfile->netfid,
3055 current->tgid, 0, 0, 0, 0,
3056 LOCKING_ANDX_OPLOCK_RELEASE, false,
Pavel Shilovsky12fed002011-01-17 20:15:44 +03003057 cinode->clientCanCacheRead ? 1 : 0);
Joe Perchesb6b38f72010-04-21 03:50:45 +00003058 cFYI(1, "Oplock release rc = %d", rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003059 }
Jeff Layton3bc303c2009-09-21 06:47:50 -04003060}
3061
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07003062const struct address_space_operations cifs_addr_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003063 .readpage = cifs_readpage,
3064 .readpages = cifs_readpages,
3065 .writepage = cifs_writepage,
Steve French37c0eb42005-10-05 14:50:29 -07003066 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04003067 .write_begin = cifs_write_begin,
3068 .write_end = cifs_write_end,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003069 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303070 .releasepage = cifs_release_page,
3071 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003072 .launder_page = cifs_launder_page,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003073};
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003074
3075/*
3076 * cifs_readpages requires the server to support a buffer large enough to
3077 * contain the header plus one complete page of data. Otherwise, we need
3078 * to leave cifs_readpages out of the address space operations.
3079 */
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07003080const struct address_space_operations cifs_addr_ops_smallbuf = {
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003081 .readpage = cifs_readpage,
3082 .writepage = cifs_writepage,
3083 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04003084 .write_begin = cifs_write_begin,
3085 .write_end = cifs_write_end,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003086 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303087 .releasepage = cifs_release_page,
3088 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003089 .launder_page = cifs_launder_page,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003090};