blob: 1b322d041f1ef24bcee43b1206fc3630cf40d4b6 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
Steve Frenchfb8c4b12007-07-10 01:16:18 +00005 *
Steve Frenchf19159d2010-04-21 04:12:10 +00006 * Copyright (C) International Business Machines Corp., 2002,2010
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Author(s): Steve French (sfrench@us.ibm.com)
Jeremy Allison7ee1af72006-08-02 21:56:33 +00008 * Jeremy Allison (jra@samba.org)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
Steve French37c0eb42005-10-05 14:50:29 -070025#include <linux/backing-dev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
Steve French37c0eb42005-10-05 14:50:29 -070030#include <linux/writeback.h>
Andrew Morton6f88cc22006-12-10 02:19:44 -080031#include <linux/task_io_accounting_ops.h>
Steve French23e7dd72005-10-20 13:44:56 -070032#include <linux/delay.h>
Jeff Layton3bc303c2009-09-21 06:47:50 -040033#include <linux/mount.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Jeff Layton690c5e32011-10-19 15:30:16 -040035#include <linux/swap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <asm/div64.h>
37#include "cifsfs.h"
38#include "cifspdu.h"
39#include "cifsglob.h"
40#include "cifsproto.h"
41#include "cifs_unicode.h"
42#include "cifs_debug.h"
43#include "cifs_fs_sb.h"
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +053044#include "fscache.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Linus Torvalds1da177e2005-04-16 15:20:36 -070046static inline int cifs_convert_flags(unsigned int flags)
47{
48 if ((flags & O_ACCMODE) == O_RDONLY)
49 return GENERIC_READ;
50 else if ((flags & O_ACCMODE) == O_WRONLY)
51 return GENERIC_WRITE;
52 else if ((flags & O_ACCMODE) == O_RDWR) {
53 /* GENERIC_ALL is too much permission to request
54 can cause unnecessary access denied on create */
55 /* return GENERIC_ALL; */
56 return (GENERIC_READ | GENERIC_WRITE);
57 }
58
Jeff Laytone10f7b52008-05-14 10:21:33 -070059 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
60 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
61 FILE_READ_DATA);
Steve French7fc8f4e2009-02-23 20:43:11 +000062}
Jeff Laytone10f7b52008-05-14 10:21:33 -070063
Jeff Layton608712f2010-10-15 15:33:56 -040064static u32 cifs_posix_convert_flags(unsigned int flags)
Steve French7fc8f4e2009-02-23 20:43:11 +000065{
Jeff Layton608712f2010-10-15 15:33:56 -040066 u32 posix_flags = 0;
Jeff Laytone10f7b52008-05-14 10:21:33 -070067
Steve French7fc8f4e2009-02-23 20:43:11 +000068 if ((flags & O_ACCMODE) == O_RDONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040069 posix_flags = SMB_O_RDONLY;
Steve French7fc8f4e2009-02-23 20:43:11 +000070 else if ((flags & O_ACCMODE) == O_WRONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040071 posix_flags = SMB_O_WRONLY;
72 else if ((flags & O_ACCMODE) == O_RDWR)
73 posix_flags = SMB_O_RDWR;
74
75 if (flags & O_CREAT)
76 posix_flags |= SMB_O_CREAT;
77 if (flags & O_EXCL)
78 posix_flags |= SMB_O_EXCL;
79 if (flags & O_TRUNC)
80 posix_flags |= SMB_O_TRUNC;
81 /* be safe and imply O_SYNC for O_DSYNC */
Christoph Hellwig6b2f3d12009-10-27 11:05:28 +010082 if (flags & O_DSYNC)
Jeff Layton608712f2010-10-15 15:33:56 -040083 posix_flags |= SMB_O_SYNC;
Steve French7fc8f4e2009-02-23 20:43:11 +000084 if (flags & O_DIRECTORY)
Jeff Layton608712f2010-10-15 15:33:56 -040085 posix_flags |= SMB_O_DIRECTORY;
Steve French7fc8f4e2009-02-23 20:43:11 +000086 if (flags & O_NOFOLLOW)
Jeff Layton608712f2010-10-15 15:33:56 -040087 posix_flags |= SMB_O_NOFOLLOW;
Steve French7fc8f4e2009-02-23 20:43:11 +000088 if (flags & O_DIRECT)
Jeff Layton608712f2010-10-15 15:33:56 -040089 posix_flags |= SMB_O_DIRECT;
Steve French7fc8f4e2009-02-23 20:43:11 +000090
91 return posix_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070092}
93
94static inline int cifs_get_disposition(unsigned int flags)
95{
96 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
97 return FILE_CREATE;
98 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
99 return FILE_OVERWRITE_IF;
100 else if ((flags & O_CREAT) == O_CREAT)
101 return FILE_OPEN_IF;
Steve French55aa2e02006-05-30 18:09:31 +0000102 else if ((flags & O_TRUNC) == O_TRUNC)
103 return FILE_OVERWRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104 else
105 return FILE_OPEN;
106}
107
Jeff Layton608712f2010-10-15 15:33:56 -0400108int cifs_posix_open(char *full_path, struct inode **pinode,
109 struct super_block *sb, int mode, unsigned int f_flags,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400110 __u32 *poplock, __u16 *pnetfid, unsigned int xid)
Jeff Layton608712f2010-10-15 15:33:56 -0400111{
112 int rc;
113 FILE_UNIX_BASIC_INFO *presp_data;
114 __u32 posix_flags = 0;
115 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
116 struct cifs_fattr fattr;
117 struct tcon_link *tlink;
Steve French96daf2b2011-05-27 04:34:02 +0000118 struct cifs_tcon *tcon;
Jeff Layton608712f2010-10-15 15:33:56 -0400119
120 cFYI(1, "posix open %s", full_path);
121
122 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
123 if (presp_data == NULL)
124 return -ENOMEM;
125
126 tlink = cifs_sb_tlink(cifs_sb);
127 if (IS_ERR(tlink)) {
128 rc = PTR_ERR(tlink);
129 goto posix_open_ret;
130 }
131
132 tcon = tlink_tcon(tlink);
133 mode &= ~current_umask();
134
135 posix_flags = cifs_posix_convert_flags(f_flags);
136 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
137 poplock, full_path, cifs_sb->local_nls,
138 cifs_sb->mnt_cifs_flags &
139 CIFS_MOUNT_MAP_SPECIAL_CHR);
140 cifs_put_tlink(tlink);
141
142 if (rc)
143 goto posix_open_ret;
144
145 if (presp_data->Type == cpu_to_le32(-1))
146 goto posix_open_ret; /* open ok, caller does qpathinfo */
147
148 if (!pinode)
149 goto posix_open_ret; /* caller does not need info */
150
151 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
152
153 /* get new inode and set it up */
154 if (*pinode == NULL) {
155 cifs_fill_uniqueid(sb, &fattr);
156 *pinode = cifs_iget(sb, &fattr);
157 if (!*pinode) {
158 rc = -ENOMEM;
159 goto posix_open_ret;
160 }
161 } else {
162 cifs_fattr_to_inode(*pinode, &fattr);
163 }
164
165posix_open_ret:
166 kfree(presp_data);
167 return rc;
168}
169
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300170static int
171cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700172 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
173 struct cifs_fid *fid, unsigned int xid)
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300174{
175 int rc;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700176 int desired_access;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300177 int disposition;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500178 int create_options = CREATE_NOT_DIR;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300179 FILE_ALL_INFO *buf;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700180 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300181
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700182 if (!server->ops->open)
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700183 return -ENOSYS;
184
185 desired_access = cifs_convert_flags(f_flags);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300186
187/*********************************************************************
188 * open flag mapping table:
189 *
190 * POSIX Flag CIFS Disposition
191 * ---------- ----------------
192 * O_CREAT FILE_OPEN_IF
193 * O_CREAT | O_EXCL FILE_CREATE
194 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
195 * O_TRUNC FILE_OVERWRITE
196 * none of the above FILE_OPEN
197 *
198 * Note that there is not a direct match between disposition
199 * FILE_SUPERSEDE (ie create whether or not file exists although
200 * O_CREAT | O_TRUNC is similar but truncates the existing
201 * file rather than creating a new file as FILE_SUPERSEDE does
202 * (which uses the attributes / metadata passed in on open call)
203 *?
204 *? O_SYNC is a reasonable match to CIFS writethrough flag
205 *? and the read write flags match reasonably. O_LARGEFILE
206 *? is irrelevant because largefile support is always used
207 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
208 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
209 *********************************************************************/
210
211 disposition = cifs_get_disposition(f_flags);
212
213 /* BB pass O_SYNC flag through on file attributes .. BB */
214
215 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
216 if (!buf)
217 return -ENOMEM;
218
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500219 if (backup_cred(cifs_sb))
220 create_options |= CREATE_OPEN_BACKUP_INTENT;
221
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700222 rc = server->ops->open(xid, tcon, full_path, disposition,
223 desired_access, create_options, fid, oplock, buf,
224 cifs_sb);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300225
226 if (rc)
227 goto out;
228
229 if (tcon->unix_ext)
230 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
231 xid);
232 else
233 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700234 xid, &fid->netfid);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300235
236out:
237 kfree(buf);
238 return rc;
239}
240
Jeff Layton15ecb432010-10-15 15:34:02 -0400241struct cifsFileInfo *
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700242cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
Jeff Layton15ecb432010-10-15 15:34:02 -0400243 struct tcon_link *tlink, __u32 oplock)
244{
245 struct dentry *dentry = file->f_path.dentry;
246 struct inode *inode = dentry->d_inode;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700247 struct cifsInodeInfo *cinode = CIFS_I(inode);
248 struct cifsFileInfo *cfile;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700249 struct cifs_fid_locks *fdlocks;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700250 struct cifs_tcon *tcon = tlink_tcon(tlink);
Jeff Layton15ecb432010-10-15 15:34:02 -0400251
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700252 cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
253 if (cfile == NULL)
254 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400255
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700256 fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
257 if (!fdlocks) {
258 kfree(cfile);
259 return NULL;
260 }
261
262 INIT_LIST_HEAD(&fdlocks->locks);
263 fdlocks->cfile = cfile;
264 cfile->llist = fdlocks;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700265 down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700266 list_add(&fdlocks->llist, &cinode->llist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700267 up_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700268
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700269 cfile->count = 1;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700270 cfile->pid = current->tgid;
271 cfile->uid = current_fsuid();
272 cfile->dentry = dget(dentry);
273 cfile->f_flags = file->f_flags;
274 cfile->invalidHandle = false;
275 cfile->tlink = cifs_get_tlink(tlink);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700276 INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700277 mutex_init(&cfile->fh_mutex);
Jeff Layton15ecb432010-10-15 15:34:02 -0400278
Jeff Layton44772882010-10-15 15:34:03 -0400279 spin_lock(&cifs_file_list_lock);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700280 if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE)
281 oplock = fid->pending_open->oplock;
282 list_del(&fid->pending_open->olist);
283
284 tlink_tcon(tlink)->ses->server->ops->set_fid(cfile, fid, oplock);
285
286 list_add(&cfile->tlist, &tcon->openFileList);
Jeff Layton15ecb432010-10-15 15:34:02 -0400287 /* if readable file instance put first in list*/
288 if (file->f_mode & FMODE_READ)
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700289 list_add(&cfile->flist, &cinode->openFileList);
Jeff Layton15ecb432010-10-15 15:34:02 -0400290 else
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700291 list_add_tail(&cfile->flist, &cinode->openFileList);
Jeff Layton44772882010-10-15 15:34:03 -0400292 spin_unlock(&cifs_file_list_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400293
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700294 file->private_data = cfile;
295 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400296}
297
Jeff Layton764a1b12012-07-25 14:59:54 -0400298struct cifsFileInfo *
299cifsFileInfo_get(struct cifsFileInfo *cifs_file)
300{
301 spin_lock(&cifs_file_list_lock);
302 cifsFileInfo_get_locked(cifs_file);
303 spin_unlock(&cifs_file_list_lock);
304 return cifs_file;
305}
306
Steve Frenchcdff08e2010-10-21 22:46:14 +0000307/*
308 * Release a reference on the file private data. This may involve closing
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400309 * the filehandle out on the server. Must be called without holding
310 * cifs_file_list_lock.
Steve Frenchcdff08e2010-10-21 22:46:14 +0000311 */
Jeff Laytonb33879a2010-10-15 15:34:04 -0400312void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
313{
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300314 struct inode *inode = cifs_file->dentry->d_inode;
Steve French96daf2b2011-05-27 04:34:02 +0000315 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700316 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300317 struct cifsInodeInfo *cifsi = CIFS_I(inode);
Pavel Shilovsky4f8ba8a2010-11-21 22:36:12 +0300318 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000319 struct cifsLockInfo *li, *tmp;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700320 struct cifs_fid fid;
321 struct cifs_pending_open open;
Steve Frenchcdff08e2010-10-21 22:46:14 +0000322
323 spin_lock(&cifs_file_list_lock);
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400324 if (--cifs_file->count > 0) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000325 spin_unlock(&cifs_file_list_lock);
326 return;
Jeff Laytonb33879a2010-10-15 15:34:04 -0400327 }
Steve Frenchcdff08e2010-10-21 22:46:14 +0000328
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700329 if (server->ops->get_lease_key)
330 server->ops->get_lease_key(inode, &fid);
331
332 /* store open in pending opens to make sure we don't miss lease break */
333 cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
334
Steve Frenchcdff08e2010-10-21 22:46:14 +0000335 /* remove it from the lists */
336 list_del(&cifs_file->flist);
337 list_del(&cifs_file->tlist);
338
339 if (list_empty(&cifsi->openFileList)) {
340 cFYI(1, "closing last open instance for inode %p",
341 cifs_file->dentry->d_inode);
Pavel Shilovsky25364132012-09-18 16:20:27 -0700342 /*
343 * In strict cache mode we need invalidate mapping on the last
344 * close because it may cause a error when we open this file
345 * again and get at least level II oplock.
346 */
Pavel Shilovsky4f8ba8a2010-11-21 22:36:12 +0300347 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
348 CIFS_I(inode)->invalid_mapping = true;
Pavel Shilovskyc6723622010-11-03 10:58:57 +0300349 cifs_set_oplock_level(cifsi, 0);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000350 }
351 spin_unlock(&cifs_file_list_lock);
352
Jeff Laytonad635942011-07-26 12:20:17 -0400353 cancel_work_sync(&cifs_file->oplock_break);
354
Steve Frenchcdff08e2010-10-21 22:46:14 +0000355 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700356 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400357 unsigned int xid;
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700358
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400359 xid = get_xid();
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700360 if (server->ops->close)
Pavel Shilovsky760ad0c2012-09-25 11:00:07 +0400361 server->ops->close(xid, tcon, &cifs_file->fid);
362 _free_xid(xid);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000363 }
364
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700365 cifs_del_pending_open(&open);
366
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700367 /*
368 * Delete any outstanding lock records. We'll lose them when the file
Steve Frenchcdff08e2010-10-21 22:46:14 +0000369 * is closed anyway.
370 */
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700371 down_write(&cifsi->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700372 list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000373 list_del(&li->llist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400374 cifs_del_lock_waiters(li);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000375 kfree(li);
376 }
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700377 list_del(&cifs_file->llist->llist);
378 kfree(cifs_file->llist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700379 up_write(&cifsi->lock_sem);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000380
381 cifs_put_tlink(cifs_file->tlink);
382 dput(cifs_file->dentry);
383 kfree(cifs_file);
Jeff Laytonb33879a2010-10-15 15:34:04 -0400384}
385
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386int cifs_open(struct inode *inode, struct file *file)
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700387
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388{
389 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400390 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400391 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700393 struct TCP_Server_Info *server;
Steve French96daf2b2011-05-27 04:34:02 +0000394 struct cifs_tcon *tcon;
Jeff Layton7ffec372010-09-29 19:51:11 -0400395 struct tcon_link *tlink;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700396 struct cifsFileInfo *cfile = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397 char *full_path = NULL;
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300398 bool posix_open_ok = false;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700399 struct cifs_fid fid;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700400 struct cifs_pending_open open;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400402 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403
404 cifs_sb = CIFS_SB(inode->i_sb);
Jeff Layton7ffec372010-09-29 19:51:11 -0400405 tlink = cifs_sb_tlink(cifs_sb);
406 if (IS_ERR(tlink)) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400407 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400408 return PTR_ERR(tlink);
409 }
410 tcon = tlink_tcon(tlink);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700411 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -0800413 full_path = build_path_from_dentry(file->f_path.dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414 if (full_path == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530415 rc = -ENOMEM;
Jeff Layton232341b2010-08-05 13:58:38 -0400416 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417 }
418
Joe Perchesb6b38f72010-04-21 03:50:45 +0000419 cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
420 inode, file->f_flags, full_path);
Steve French276a74a2009-03-03 18:00:34 +0000421
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700422 if (server->oplocks)
Steve French276a74a2009-03-03 18:00:34 +0000423 oplock = REQ_OPLOCK;
424 else
425 oplock = 0;
426
Steve French64cc2c62009-03-04 19:54:08 +0000427 if (!tcon->broken_posix_open && tcon->unix_ext &&
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400428 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
429 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Steve French276a74a2009-03-03 18:00:34 +0000430 /* can not refresh inode info since size could be stale */
Jeff Layton2422f672010-06-16 13:40:16 -0400431 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
Steve Frenchfa588e02010-04-22 19:21:55 +0000432 cifs_sb->mnt_file_mode /* ignored */,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700433 file->f_flags, &oplock, &fid.netfid, xid);
Steve French276a74a2009-03-03 18:00:34 +0000434 if (rc == 0) {
Joe Perchesb6b38f72010-04-21 03:50:45 +0000435 cFYI(1, "posix open succeeded");
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300436 posix_open_ok = true;
Steve French64cc2c62009-03-04 19:54:08 +0000437 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
438 if (tcon->ses->serverNOS)
Joe Perchesb6b38f72010-04-21 03:50:45 +0000439 cERROR(1, "server %s of type %s returned"
Steve French64cc2c62009-03-04 19:54:08 +0000440 " unexpected error on SMB posix open"
441 ", disabling posix open support."
442 " Check if server update available.",
443 tcon->ses->serverName,
Joe Perchesb6b38f72010-04-21 03:50:45 +0000444 tcon->ses->serverNOS);
Steve French64cc2c62009-03-04 19:54:08 +0000445 tcon->broken_posix_open = true;
Steve French276a74a2009-03-03 18:00:34 +0000446 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
447 (rc != -EOPNOTSUPP)) /* path not found or net err */
448 goto out;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700449 /*
450 * Else fallthrough to retry open the old way on network i/o
451 * or DFS errors.
452 */
Steve French276a74a2009-03-03 18:00:34 +0000453 }
454
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700455 if (server->ops->get_lease_key)
456 server->ops->get_lease_key(inode, &fid);
457
458 cifs_add_pending_open(&fid, tlink, &open);
459
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300460 if (!posix_open_ok) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700461 if (server->ops->get_lease_key)
462 server->ops->get_lease_key(inode, &fid);
463
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300464 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700465 file->f_flags, &oplock, &fid, xid);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700466 if (rc) {
467 cifs_del_pending_open(&open);
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300468 goto out;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700469 }
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300470 }
Jeff Layton47c78b72010-06-16 13:40:17 -0400471
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700472 cfile = cifs_new_fileinfo(&fid, file, tlink, oplock);
473 if (cfile == NULL) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700474 if (server->ops->close)
475 server->ops->close(xid, tcon, &fid);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700476 cifs_del_pending_open(&open);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477 rc = -ENOMEM;
478 goto out;
479 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +0530481 cifs_fscache_set_inode_cookie(inode, file);
482
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300483 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700484 /*
485 * Time to set mode which we can not set earlier due to
486 * problems creating new read-only files.
487 */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300488 struct cifs_unix_set_info_args args = {
489 .mode = inode->i_mode,
490 .uid = NO_CHANGE_64,
491 .gid = NO_CHANGE_64,
492 .ctime = NO_CHANGE_64,
493 .atime = NO_CHANGE_64,
494 .mtime = NO_CHANGE_64,
495 .device = 0,
496 };
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700497 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
498 cfile->pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499 }
500
501out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400503 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400504 cifs_put_tlink(tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505 return rc;
506}
507
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400508static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
509
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700510/*
511 * Try to reacquire byte range locks that were released when session
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400512 * to server was lost.
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700513 */
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400514static int
515cifs_relock_file(struct cifsFileInfo *cfile)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516{
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400517 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
518 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
519 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520 int rc = 0;
521
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400522 /* we are going to update can_cache_brlcks here - need a write access */
523 down_write(&cinode->lock_sem);
524 if (cinode->can_cache_brlcks) {
525 /* can cache locks - no need to push them */
526 up_write(&cinode->lock_sem);
527 return rc;
528 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400530 if (cap_unix(tcon->ses) &&
531 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
532 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
533 rc = cifs_push_posix_locks(cfile);
534 else
535 rc = tcon->ses->server->ops->push_mand_locks(cfile);
536
537 up_write(&cinode->lock_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538 return rc;
539}
540
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700541static int
542cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543{
544 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400545 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400546 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +0000548 struct cifs_tcon *tcon;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700549 struct TCP_Server_Info *server;
550 struct cifsInodeInfo *cinode;
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000551 struct inode *inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552 char *full_path = NULL;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700553 int desired_access;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554 int disposition = FILE_OPEN;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500555 int create_options = CREATE_NOT_DIR;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700556 struct cifs_fid fid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400558 xid = get_xid();
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700559 mutex_lock(&cfile->fh_mutex);
560 if (!cfile->invalidHandle) {
561 mutex_unlock(&cfile->fh_mutex);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530562 rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400563 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530564 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565 }
566
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700567 inode = cfile->dentry->d_inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568 cifs_sb = CIFS_SB(inode->i_sb);
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700569 tcon = tlink_tcon(cfile->tlink);
570 server = tcon->ses->server;
Steve French3a9f4622007-04-04 17:10:24 +0000571
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700572 /*
573 * Can not grab rename sem here because various ops, including those
574 * that already have the rename sem can end up causing writepage to get
575 * called and if the server was down that means we end up here, and we
576 * can never tell if the caller already has the rename_sem.
577 */
578 full_path = build_path_from_dentry(cfile->dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579 if (full_path == NULL) {
Steve French3a9f4622007-04-04 17:10:24 +0000580 rc = -ENOMEM;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700581 mutex_unlock(&cfile->fh_mutex);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400582 free_xid(xid);
Steve French3a9f4622007-04-04 17:10:24 +0000583 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584 }
585
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700586 cFYI(1, "inode = 0x%p file flags 0x%x for %s", inode, cfile->f_flags,
587 full_path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588
Pavel Shilovsky10b9b982012-03-20 12:55:09 +0300589 if (tcon->ses->server->oplocks)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590 oplock = REQ_OPLOCK;
591 else
Steve French4b18f2a2008-04-29 00:06:05 +0000592 oplock = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400594 if (tcon->unix_ext && cap_unix(tcon->ses) &&
Steve French7fc8f4e2009-02-23 20:43:11 +0000595 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400596 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Jeff Layton608712f2010-10-15 15:33:56 -0400597 /*
598 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
599 * original open. Must mask them off for a reopen.
600 */
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700601 unsigned int oflags = cfile->f_flags &
Jeff Layton15886172010-10-15 15:33:59 -0400602 ~(O_CREAT | O_EXCL | O_TRUNC);
Jeff Layton608712f2010-10-15 15:33:56 -0400603
Jeff Layton2422f672010-06-16 13:40:16 -0400604 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700605 cifs_sb->mnt_file_mode /* ignored */,
606 oflags, &oplock, &fid.netfid, xid);
Steve French7fc8f4e2009-02-23 20:43:11 +0000607 if (rc == 0) {
Joe Perchesb6b38f72010-04-21 03:50:45 +0000608 cFYI(1, "posix reopen succeeded");
Steve French7fc8f4e2009-02-23 20:43:11 +0000609 goto reopen_success;
610 }
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700611 /*
612 * fallthrough to retry open the old way on errors, especially
613 * in the reconnect path it is important to retry hard
614 */
Steve French7fc8f4e2009-02-23 20:43:11 +0000615 }
616
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700617 desired_access = cifs_convert_flags(cfile->f_flags);
Steve French7fc8f4e2009-02-23 20:43:11 +0000618
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500619 if (backup_cred(cifs_sb))
620 create_options |= CREATE_OPEN_BACKUP_INTENT;
621
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700622 if (server->ops->get_lease_key)
623 server->ops->get_lease_key(inode, &fid);
624
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700625 /*
626 * Can not refresh inode by passing in file_info buf to be returned by
627 * CIFSSMBOpen and then calling get_inode_info with returned buf since
628 * file might have write behind data that needs to be flushed and server
629 * version of file size can be stale. If we knew for sure that inode was
630 * not dirty locally we could do this.
631 */
632 rc = server->ops->open(xid, tcon, full_path, disposition,
633 desired_access, create_options, &fid, &oplock,
634 NULL, cifs_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635 if (rc) {
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700636 mutex_unlock(&cfile->fh_mutex);
637 cFYI(1, "cifs_reopen returned 0x%x", rc);
Joe Perchesb6b38f72010-04-21 03:50:45 +0000638 cFYI(1, "oplock: %d", oplock);
Jeff Layton15886172010-10-15 15:33:59 -0400639 goto reopen_error_exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640 }
Jeff Layton15886172010-10-15 15:33:59 -0400641
642reopen_success:
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700643 cfile->invalidHandle = false;
644 mutex_unlock(&cfile->fh_mutex);
645 cinode = CIFS_I(inode);
Jeff Layton15886172010-10-15 15:33:59 -0400646
647 if (can_flush) {
648 rc = filemap_write_and_wait(inode->i_mapping);
Jeff Laytoneb4b7562010-10-22 14:52:29 -0400649 mapping_set_error(inode->i_mapping, rc);
Jeff Layton15886172010-10-15 15:33:59 -0400650
Jeff Layton15886172010-10-15 15:33:59 -0400651 if (tcon->unix_ext)
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700652 rc = cifs_get_inode_info_unix(&inode, full_path,
653 inode->i_sb, xid);
Jeff Layton15886172010-10-15 15:33:59 -0400654 else
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700655 rc = cifs_get_inode_info(&inode, full_path, NULL,
656 inode->i_sb, xid, NULL);
657 }
658 /*
659 * Else we are writing out data to server already and could deadlock if
660 * we tried to flush data, and since we do not know if we have data that
661 * would invalidate the current end of file on the server we can not go
662 * to the server to get the new inode info.
663 */
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300664
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700665 server->ops->set_fid(cfile, &fid, oplock);
666 cifs_relock_file(cfile);
Jeff Layton15886172010-10-15 15:33:59 -0400667
668reopen_error_exit:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400670 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671 return rc;
672}
673
674int cifs_close(struct inode *inode, struct file *file)
675{
Jeff Layton77970692011-04-05 16:23:47 -0700676 if (file->private_data != NULL) {
677 cifsFileInfo_put(file->private_data);
678 file->private_data = NULL;
679 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680
Steve Frenchcdff08e2010-10-21 22:46:14 +0000681 /* return code from the ->release op is always ignored */
682 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683}
684
685int cifs_closedir(struct inode *inode, struct file *file)
686{
687 int rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400688 unsigned int xid;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700689 struct cifsFileInfo *cfile = file->private_data;
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700690 struct cifs_tcon *tcon;
691 struct TCP_Server_Info *server;
692 char *buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693
Joe Perchesb6b38f72010-04-21 03:50:45 +0000694 cFYI(1, "Closedir inode = 0x%p", inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700696 if (cfile == NULL)
697 return rc;
698
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400699 xid = get_xid();
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700700 tcon = tlink_tcon(cfile->tlink);
701 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700703 cFYI(1, "Freeing private data in close dir");
704 spin_lock(&cifs_file_list_lock);
705 if (!cfile->srch_inf.endOfSearch && !cfile->invalidHandle) {
706 cfile->invalidHandle = true;
707 spin_unlock(&cifs_file_list_lock);
708 if (server->ops->close_dir)
709 rc = server->ops->close_dir(xid, tcon, &cfile->fid);
710 else
711 rc = -ENOSYS;
712 cFYI(1, "Closing uncompleted readdir with rc %d", rc);
713 /* not much we can do if it fails anyway, ignore rc */
714 rc = 0;
715 } else
716 spin_unlock(&cifs_file_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700718 buf = cfile->srch_inf.ntwrk_buf_start;
719 if (buf) {
720 cFYI(1, "closedir free smb buf in srch struct");
721 cfile->srch_inf.ntwrk_buf_start = NULL;
722 if (cfile->srch_inf.smallBuf)
723 cifs_small_buf_release(buf);
724 else
725 cifs_buf_release(buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726 }
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700727
728 cifs_put_tlink(cfile->tlink);
729 kfree(file->private_data);
730 file->private_data = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731 /* BB can we lock the filestruct while this is going on? */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400732 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733 return rc;
734}
735
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400736static struct cifsLockInfo *
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300737cifs_lock_init(__u64 offset, __u64 length, __u8 type)
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000738{
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400739 struct cifsLockInfo *lock =
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000740 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400741 if (!lock)
742 return lock;
743 lock->offset = offset;
744 lock->length = length;
745 lock->type = type;
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400746 lock->pid = current->tgid;
747 INIT_LIST_HEAD(&lock->blist);
748 init_waitqueue_head(&lock->block_q);
749 return lock;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400750}
751
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -0700752void
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400753cifs_del_lock_waiters(struct cifsLockInfo *lock)
754{
755 struct cifsLockInfo *li, *tmp;
756 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
757 list_del_init(&li->blist);
758 wake_up(&li->block_q);
759 }
760}
761
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400762#define CIFS_LOCK_OP 0
763#define CIFS_READ_OP 1
764#define CIFS_WRITE_OP 2
765
766/* @rw_check : 0 - no op, 1 - read, 2 - write */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400767static bool
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700768cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
769 __u64 length, __u8 type, struct cifsFileInfo *cfile,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400770 struct cifsLockInfo **conf_lock, int rw_check)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400771{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300772 struct cifsLockInfo *li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700773 struct cifsFileInfo *cur_cfile = fdlocks->cfile;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300774 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400775
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700776 list_for_each_entry(li, &fdlocks->locks, llist) {
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400777 if (offset + length <= li->offset ||
778 offset >= li->offset + li->length)
779 continue;
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400780 if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
781 server->ops->compare_fids(cfile, cur_cfile)) {
782 /* shared lock prevents write op through the same fid */
783 if (!(li->type & server->vals->shared_lock_type) ||
784 rw_check != CIFS_WRITE_OP)
785 continue;
786 }
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700787 if ((type & server->vals->shared_lock_type) &&
788 ((server->ops->compare_fids(cfile, cur_cfile) &&
789 current->tgid == li->pid) || type == li->type))
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400790 continue;
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700791 if (conf_lock)
792 *conf_lock = li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700793 return true;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400794 }
795 return false;
796}
797
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700798bool
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300799cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700800 __u8 type, struct cifsLockInfo **conf_lock,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400801 int rw_check)
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400802{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300803 bool rc = false;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700804 struct cifs_fid_locks *cur;
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300805 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300806
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700807 list_for_each_entry(cur, &cinode->llist, llist) {
808 rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700809 cfile, conf_lock, rw_check);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300810 if (rc)
811 break;
812 }
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300813
814 return rc;
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400815}
816
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300817/*
818 * Check if there is another lock that prevents us to set the lock (mandatory
819 * style). If such a lock exists, update the flock structure with its
820 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
821 * or leave it the same if we can't. Returns 0 if we don't need to request to
822 * the server or 1 otherwise.
823 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400824static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300825cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
826 __u8 type, struct file_lock *flock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400827{
828 int rc = 0;
829 struct cifsLockInfo *conf_lock;
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300830 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300831 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400832 bool exist;
833
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700834 down_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400835
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300836 exist = cifs_find_lock_conflict(cfile, offset, length, type,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400837 &conf_lock, CIFS_LOCK_OP);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400838 if (exist) {
839 flock->fl_start = conf_lock->offset;
840 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
841 flock->fl_pid = conf_lock->pid;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300842 if (conf_lock->type & server->vals->shared_lock_type)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400843 flock->fl_type = F_RDLCK;
844 else
845 flock->fl_type = F_WRLCK;
846 } else if (!cinode->can_cache_brlcks)
847 rc = 1;
848 else
849 flock->fl_type = F_UNLCK;
850
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700851 up_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400852 return rc;
853}
854
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400855static void
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300856cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400857{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300858 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700859 down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700860 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700861 up_write(&cinode->lock_sem);
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000862}
863
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300864/*
865 * Set the byte-range lock (mandatory style). Returns:
866 * 1) 0, if we set the lock and don't need to request to the server;
867 * 2) 1, if no locks prevent us but we need to request to the server;
868 * 3) -EACCESS, if there is a lock that prevents us and wait is false.
869 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400870static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300871cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400872 bool wait)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400873{
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400874 struct cifsLockInfo *conf_lock;
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300875 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400876 bool exist;
877 int rc = 0;
878
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400879try_again:
880 exist = false;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700881 down_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400882
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300883 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400884 lock->type, &conf_lock, CIFS_LOCK_OP);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400885 if (!exist && cinode->can_cache_brlcks) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700886 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700887 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400888 return rc;
889 }
890
891 if (!exist)
892 rc = 1;
893 else if (!wait)
894 rc = -EACCES;
895 else {
896 list_add_tail(&lock->blist, &conf_lock->blist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700897 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400898 rc = wait_event_interruptible(lock->block_q,
899 (lock->blist.prev == &lock->blist) &&
900 (lock->blist.next == &lock->blist));
901 if (!rc)
902 goto try_again;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700903 down_write(&cinode->lock_sem);
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400904 list_del_init(&lock->blist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400905 }
906
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700907 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400908 return rc;
909}
910
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300911/*
912 * Check if there is another lock that prevents us to set the lock (posix
913 * style). If such a lock exists, update the flock structure with its
914 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
915 * or leave it the same if we can't. Returns 0 if we don't need to request to
916 * the server or 1 otherwise.
917 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400918static int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400919cifs_posix_lock_test(struct file *file, struct file_lock *flock)
920{
921 int rc = 0;
922 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
923 unsigned char saved_type = flock->fl_type;
924
Pavel Shilovsky50792762011-10-29 17:17:57 +0400925 if ((flock->fl_flags & FL_POSIX) == 0)
926 return 1;
927
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700928 down_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400929 posix_test_lock(file, flock);
930
931 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
932 flock->fl_type = saved_type;
933 rc = 1;
934 }
935
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700936 up_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400937 return rc;
938}
939
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300940/*
941 * Set the byte-range lock (posix style). Returns:
942 * 1) 0, if we set the lock and don't need to request to the server;
943 * 2) 1, if we need to request to the server;
944 * 3) <0, if the error occurs while setting the lock.
945 */
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400946static int
947cifs_posix_lock_set(struct file *file, struct file_lock *flock)
948{
949 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
Pavel Shilovsky50792762011-10-29 17:17:57 +0400950 int rc = 1;
951
952 if ((flock->fl_flags & FL_POSIX) == 0)
953 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400954
Pavel Shilovsky66189be2012-03-28 21:56:19 +0400955try_again:
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700956 down_write(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400957 if (!cinode->can_cache_brlcks) {
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700958 up_write(&cinode->lock_sem);
Pavel Shilovsky50792762011-10-29 17:17:57 +0400959 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400960 }
Pavel Shilovsky66189be2012-03-28 21:56:19 +0400961
962 rc = posix_lock_file(file, flock, NULL);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700963 up_write(&cinode->lock_sem);
Pavel Shilovsky66189be2012-03-28 21:56:19 +0400964 if (rc == FILE_LOCK_DEFERRED) {
965 rc = wait_event_interruptible(flock->fl_wait, !flock->fl_next);
966 if (!rc)
967 goto try_again;
968 locks_delete_block(flock);
969 }
Steve French9ebb3892012-04-01 13:52:54 -0500970 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400971}
972
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -0700973int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400974cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400975{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400976 unsigned int xid;
977 int rc = 0, stored_rc;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400978 struct cifsLockInfo *li, *tmp;
979 struct cifs_tcon *tcon;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +0400980 unsigned int num, max_num, max_buf;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400981 LOCKING_ANDX_RANGE *buf, *cur;
982 int types[] = {LOCKING_ANDX_LARGE_FILES,
983 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
984 int i;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400985
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400986 xid = get_xid();
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400987 tcon = tlink_tcon(cfile->tlink);
988
Pavel Shilovsky0013fb42012-05-31 13:03:26 +0400989 /*
990 * Accessing maxBuf is racy with cifs_reconnect - need to store value
991 * and check it for zero before using.
992 */
993 max_buf = tcon->ses->server->maxBuf;
994 if (!max_buf) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400995 free_xid(xid);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +0400996 return -EINVAL;
997 }
998
999 max_num = (max_buf - sizeof(struct smb_hdr)) /
1000 sizeof(LOCKING_ANDX_RANGE);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001001 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1002 if (!buf) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001003 free_xid(xid);
Pavel Shilovskye2f28862012-08-29 21:13:38 +04001004 return -ENOMEM;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001005 }
1006
1007 for (i = 0; i < 2; i++) {
1008 cur = buf;
1009 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001010 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001011 if (li->type != types[i])
1012 continue;
1013 cur->Pid = cpu_to_le16(li->pid);
1014 cur->LengthLow = cpu_to_le32((u32)li->length);
1015 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1016 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1017 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1018 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001019 stored_rc = cifs_lockv(xid, tcon,
1020 cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001021 (__u8)li->type, 0, num,
1022 buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001023 if (stored_rc)
1024 rc = stored_rc;
1025 cur = buf;
1026 num = 0;
1027 } else
1028 cur++;
1029 }
1030
1031 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001032 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001033 (__u8)types[i], 0, num, buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001034 if (stored_rc)
1035 rc = stored_rc;
1036 }
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001037 }
1038
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001039 kfree(buf);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001040 free_xid(xid);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001041 return rc;
1042}
1043
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001044/* copied from fs/locks.c with a name change */
1045#define cifs_for_each_lock(inode, lockp) \
1046 for (lockp = &inode->i_flock; *lockp != NULL; \
1047 lockp = &(*lockp)->fl_next)
1048
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001049struct lock_to_push {
1050 struct list_head llist;
1051 __u64 offset;
1052 __u64 length;
1053 __u32 pid;
1054 __u16 netfid;
1055 __u8 type;
1056};
1057
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001058static int
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001059cifs_push_posix_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001060{
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001061 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1062 struct file_lock *flock, **before;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001063 unsigned int count = 0, i = 0;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001064 int rc = 0, xid, type;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001065 struct list_head locks_to_send, *el;
1066 struct lock_to_push *lck, *tmp;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001067 __u64 length;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001068
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001069 xid = get_xid();
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001070
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001071 lock_flocks();
1072 cifs_for_each_lock(cfile->dentry->d_inode, before) {
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001073 if ((*before)->fl_flags & FL_POSIX)
1074 count++;
1075 }
1076 unlock_flocks();
1077
1078 INIT_LIST_HEAD(&locks_to_send);
1079
1080 /*
Pavel Shilovskyce858522012-03-17 09:46:55 +03001081 * Allocating count locks is enough because no FL_POSIX locks can be
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001082 * added to the list while we are holding cinode->lock_sem that
Pavel Shilovskyce858522012-03-17 09:46:55 +03001083 * protects locking operations of this inode.
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001084 */
1085 for (; i < count; i++) {
1086 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1087 if (!lck) {
1088 rc = -ENOMEM;
1089 goto err_out;
1090 }
1091 list_add_tail(&lck->llist, &locks_to_send);
1092 }
1093
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001094 el = locks_to_send.next;
1095 lock_flocks();
1096 cifs_for_each_lock(cfile->dentry->d_inode, before) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001097 flock = *before;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001098 if ((flock->fl_flags & FL_POSIX) == 0)
1099 continue;
Pavel Shilovskyce858522012-03-17 09:46:55 +03001100 if (el == &locks_to_send) {
1101 /*
1102 * The list ended. We don't have enough allocated
1103 * structures - something is really wrong.
1104 */
1105 cERROR(1, "Can't push all brlocks!");
1106 break;
1107 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001108 length = 1 + flock->fl_end - flock->fl_start;
1109 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1110 type = CIFS_RDLCK;
1111 else
1112 type = CIFS_WRLCK;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001113 lck = list_entry(el, struct lock_to_push, llist);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001114 lck->pid = flock->fl_pid;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001115 lck->netfid = cfile->fid.netfid;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001116 lck->length = length;
1117 lck->type = type;
1118 lck->offset = flock->fl_start;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001119 el = el->next;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001120 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001121 unlock_flocks();
1122
1123 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001124 int stored_rc;
1125
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001126 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001127 lck->offset, lck->length, NULL,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001128 lck->type, 0);
1129 if (stored_rc)
1130 rc = stored_rc;
1131 list_del(&lck->llist);
1132 kfree(lck);
1133 }
1134
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001135out:
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001136 free_xid(xid);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001137 return rc;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001138err_out:
1139 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1140 list_del(&lck->llist);
1141 kfree(lck);
1142 }
1143 goto out;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001144}
1145
1146static int
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001147cifs_push_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001148{
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001149 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001150 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001151 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001152 int rc = 0;
1153
1154 /* we are going to update can_cache_brlcks here - need a write access */
1155 down_write(&cinode->lock_sem);
1156 if (!cinode->can_cache_brlcks) {
1157 up_write(&cinode->lock_sem);
1158 return rc;
1159 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001160
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001161 if (cap_unix(tcon->ses) &&
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001162 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1163 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001164 rc = cifs_push_posix_locks(cfile);
1165 else
1166 rc = tcon->ses->server->ops->push_mand_locks(cfile);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001167
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001168 cinode->can_cache_brlcks = false;
1169 up_write(&cinode->lock_sem);
1170 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001171}
1172
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001173static void
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001174cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001175 bool *wait_flag, struct TCP_Server_Info *server)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001176{
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001177 if (flock->fl_flags & FL_POSIX)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001178 cFYI(1, "Posix");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001179 if (flock->fl_flags & FL_FLOCK)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001180 cFYI(1, "Flock");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001181 if (flock->fl_flags & FL_SLEEP) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001182 cFYI(1, "Blocking lock");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001183 *wait_flag = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001184 }
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001185 if (flock->fl_flags & FL_ACCESS)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001186 cFYI(1, "Process suspended by mandatory locking - "
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001187 "not implemented yet");
1188 if (flock->fl_flags & FL_LEASE)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001189 cFYI(1, "Lease on file - not implemented yet");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001190 if (flock->fl_flags &
Jeff Layton3d6d8542012-09-19 06:22:46 -07001191 (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
1192 FL_ACCESS | FL_LEASE | FL_CLOSE)))
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001193 cFYI(1, "Unknown lock flags 0x%x", flock->fl_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001194
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001195 *type = server->vals->large_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001196 if (flock->fl_type == F_WRLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001197 cFYI(1, "F_WRLCK ");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001198 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001199 *lock = 1;
1200 } else if (flock->fl_type == F_UNLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001201 cFYI(1, "F_UNLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001202 *type |= server->vals->unlock_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001203 *unlock = 1;
1204 /* Check if unlock includes more than one lock range */
1205 } else if (flock->fl_type == F_RDLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001206 cFYI(1, "F_RDLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001207 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001208 *lock = 1;
1209 } else if (flock->fl_type == F_EXLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001210 cFYI(1, "F_EXLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001211 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001212 *lock = 1;
1213 } else if (flock->fl_type == F_SHLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001214 cFYI(1, "F_SHLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001215 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001216 *lock = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217 } else
Joe Perchesb6b38f72010-04-21 03:50:45 +00001218 cFYI(1, "Unknown type of lock");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001219}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001220
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001221static int
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001222cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001223 bool wait_flag, bool posix_lck, unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001224{
1225 int rc = 0;
1226 __u64 length = 1 + flock->fl_end - flock->fl_start;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001227 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1228 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001229 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001230 __u16 netfid = cfile->fid.netfid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001231
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001232 if (posix_lck) {
1233 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001234
1235 rc = cifs_posix_lock_test(file, flock);
1236 if (!rc)
1237 return rc;
1238
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001239 if (type & server->vals->shared_lock_type)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001240 posix_lock_type = CIFS_RDLCK;
1241 else
1242 posix_lock_type = CIFS_WRLCK;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001243 rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001244 flock->fl_start, length, flock,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001245 posix_lock_type, wait_flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001246 return rc;
1247 }
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001248
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001249 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001250 if (!rc)
1251 return rc;
1252
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001253 /* BB we could chain these into one lock request BB */
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001254 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
1255 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001256 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001257 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1258 type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001259 flock->fl_type = F_UNLCK;
1260 if (rc != 0)
1261 cERROR(1, "Error unlocking previously locked "
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001262 "range %d during test of lock", rc);
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001263 return 0;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001264 }
1265
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001266 if (type & server->vals->shared_lock_type) {
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001267 flock->fl_type = F_WRLCK;
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001268 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001269 }
1270
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001271 type &= ~server->vals->exclusive_lock_type;
1272
1273 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1274 type | server->vals->shared_lock_type,
1275 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001276 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001277 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1278 type | server->vals->shared_lock_type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001279 flock->fl_type = F_RDLCK;
1280 if (rc != 0)
1281 cERROR(1, "Error unlocking previously locked "
1282 "range %d during test of lock", rc);
1283 } else
1284 flock->fl_type = F_WRLCK;
1285
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001286 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001287}
1288
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001289void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001290cifs_move_llist(struct list_head *source, struct list_head *dest)
1291{
1292 struct list_head *li, *tmp;
1293 list_for_each_safe(li, tmp, source)
1294 list_move(li, dest);
1295}
1296
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001297void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001298cifs_free_llist(struct list_head *llist)
1299{
1300 struct cifsLockInfo *li, *tmp;
1301 list_for_each_entry_safe(li, tmp, llist, llist) {
1302 cifs_del_lock_waiters(li);
1303 list_del(&li->llist);
1304 kfree(li);
1305 }
1306}
1307
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001308int
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001309cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1310 unsigned int xid)
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001311{
1312 int rc = 0, stored_rc;
1313 int types[] = {LOCKING_ANDX_LARGE_FILES,
1314 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1315 unsigned int i;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001316 unsigned int max_num, num, max_buf;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001317 LOCKING_ANDX_RANGE *buf, *cur;
1318 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1319 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
1320 struct cifsLockInfo *li, *tmp;
1321 __u64 length = 1 + flock->fl_end - flock->fl_start;
1322 struct list_head tmp_llist;
1323
1324 INIT_LIST_HEAD(&tmp_llist);
1325
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001326 /*
1327 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1328 * and check it for zero before using.
1329 */
1330 max_buf = tcon->ses->server->maxBuf;
1331 if (!max_buf)
1332 return -EINVAL;
1333
1334 max_num = (max_buf - sizeof(struct smb_hdr)) /
1335 sizeof(LOCKING_ANDX_RANGE);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001336 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1337 if (!buf)
1338 return -ENOMEM;
1339
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001340 down_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001341 for (i = 0; i < 2; i++) {
1342 cur = buf;
1343 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001344 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001345 if (flock->fl_start > li->offset ||
1346 (flock->fl_start + length) <
1347 (li->offset + li->length))
1348 continue;
1349 if (current->tgid != li->pid)
1350 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001351 if (types[i] != li->type)
1352 continue;
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001353 if (cinode->can_cache_brlcks) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001354 /*
1355 * We can cache brlock requests - simply remove
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001356 * a lock from the file's list.
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001357 */
1358 list_del(&li->llist);
1359 cifs_del_lock_waiters(li);
1360 kfree(li);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001361 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001362 }
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001363 cur->Pid = cpu_to_le16(li->pid);
1364 cur->LengthLow = cpu_to_le32((u32)li->length);
1365 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1366 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1367 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1368 /*
1369 * We need to save a lock here to let us add it again to
1370 * the file's list if the unlock range request fails on
1371 * the server.
1372 */
1373 list_move(&li->llist, &tmp_llist);
1374 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001375 stored_rc = cifs_lockv(xid, tcon,
1376 cfile->fid.netfid,
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001377 li->type, num, 0, buf);
1378 if (stored_rc) {
1379 /*
1380 * We failed on the unlock range
1381 * request - add all locks from the tmp
1382 * list to the head of the file's list.
1383 */
1384 cifs_move_llist(&tmp_llist,
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001385 &cfile->llist->locks);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001386 rc = stored_rc;
1387 } else
1388 /*
1389 * The unlock range request succeed -
1390 * free the tmp list.
1391 */
1392 cifs_free_llist(&tmp_llist);
1393 cur = buf;
1394 num = 0;
1395 } else
1396 cur++;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001397 }
1398 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001399 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001400 types[i], num, 0, buf);
1401 if (stored_rc) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001402 cifs_move_llist(&tmp_llist,
1403 &cfile->llist->locks);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001404 rc = stored_rc;
1405 } else
1406 cifs_free_llist(&tmp_llist);
1407 }
1408 }
1409
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001410 up_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001411 kfree(buf);
1412 return rc;
1413}
1414
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001415static int
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001416cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001417 bool wait_flag, bool posix_lck, int lock, int unlock,
1418 unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001419{
1420 int rc = 0;
1421 __u64 length = 1 + flock->fl_end - flock->fl_start;
1422 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1423 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001424 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001425
1426 if (posix_lck) {
Steve French08547b02006-02-28 22:39:25 +00001427 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001428
1429 rc = cifs_posix_lock_set(file, flock);
1430 if (!rc || rc < 0)
1431 return rc;
1432
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001433 if (type & server->vals->shared_lock_type)
Steve French08547b02006-02-28 22:39:25 +00001434 posix_lock_type = CIFS_RDLCK;
1435 else
1436 posix_lock_type = CIFS_WRLCK;
Steve French50c2f752007-07-13 00:33:32 +00001437
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001438 if (unlock == 1)
Steve Frenchbeb84dc2006-03-03 23:36:34 +00001439 posix_lock_type = CIFS_UNLCK;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001440
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001441 rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
1442 current->tgid, flock->fl_start, length,
1443 NULL, posix_lock_type, wait_flag);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001444 goto out;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001445 }
1446
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001447 if (lock) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001448 struct cifsLockInfo *lock;
1449
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001450 lock = cifs_lock_init(flock->fl_start, length, type);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001451 if (!lock)
1452 return -ENOMEM;
1453
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001454 rc = cifs_lock_add_if(cfile, lock, wait_flag);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001455 if (rc < 0) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001456 kfree(lock);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001457 return rc;
1458 }
1459 if (!rc)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001460 goto out;
1461
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001462 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1463 type, 1, 0, wait_flag);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001464 if (rc) {
1465 kfree(lock);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001466 return rc;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001467 }
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001468
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001469 cifs_lock_add(cfile, lock);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001470 } else if (unlock)
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001471 rc = server->ops->mand_unlock_range(cfile, flock, xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001472
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001473out:
1474 if (flock->fl_flags & FL_POSIX)
Steve French9ebb3892012-04-01 13:52:54 -05001475 posix_lock_file_wait(file, flock);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001476 return rc;
1477}
1478
1479int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1480{
1481 int rc, xid;
1482 int lock = 0, unlock = 0;
1483 bool wait_flag = false;
1484 bool posix_lck = false;
1485 struct cifs_sb_info *cifs_sb;
1486 struct cifs_tcon *tcon;
1487 struct cifsInodeInfo *cinode;
1488 struct cifsFileInfo *cfile;
1489 __u16 netfid;
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001490 __u32 type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001491
1492 rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001493 xid = get_xid();
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001494
1495 cFYI(1, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld "
1496 "end: %lld", cmd, flock->fl_flags, flock->fl_type,
1497 flock->fl_start, flock->fl_end);
1498
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001499 cfile = (struct cifsFileInfo *)file->private_data;
1500 tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001501
1502 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
1503 tcon->ses->server);
1504
1505 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001506 netfid = cfile->fid.netfid;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001507 cinode = CIFS_I(file->f_path.dentry->d_inode);
1508
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001509 if (cap_unix(tcon->ses) &&
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001510 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1511 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1512 posix_lck = true;
1513 /*
1514 * BB add code here to normalize offset and length to account for
1515 * negative length which we can not accept over the wire.
1516 */
1517 if (IS_GETLK(cmd)) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001518 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001519 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001520 return rc;
1521 }
1522
1523 if (!lock && !unlock) {
1524 /*
1525 * if no lock or unlock then nothing to do since we do not
1526 * know what it is
1527 */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001528 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001529 return -EOPNOTSUPP;
1530 }
1531
1532 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1533 xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001534 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001535 return rc;
1536}
1537
Jeff Layton597b0272012-03-23 14:40:56 -04001538/*
1539 * update the file size (if needed) after a write. Should be called with
1540 * the inode->i_lock held
1541 */
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05001542void
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001543cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1544 unsigned int bytes_written)
1545{
1546 loff_t end_of_write = offset + bytes_written;
1547
1548 if (end_of_write > cifsi->server_eof)
1549 cifsi->server_eof = end_of_write;
1550}
1551
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001552static ssize_t
1553cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
1554 size_t write_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001555{
1556 int rc = 0;
1557 unsigned int bytes_written = 0;
1558 unsigned int total_written;
1559 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001560 struct cifs_tcon *tcon;
1561 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001562 unsigned int xid;
Jeff Layton7da4b492010-10-15 15:34:00 -04001563 struct dentry *dentry = open_file->dentry;
1564 struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001565 struct cifs_io_parms io_parms;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001566
Jeff Layton7da4b492010-10-15 15:34:00 -04001567 cifs_sb = CIFS_SB(dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001568
Joe Perchesb6b38f72010-04-21 03:50:45 +00001569 cFYI(1, "write %zd bytes to offset %lld of %s", write_size,
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001570 *offset, dentry->d_name.name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001572 tcon = tlink_tcon(open_file->tlink);
1573 server = tcon->ses->server;
1574
1575 if (!server->ops->sync_write)
1576 return -ENOSYS;
Steve French50c2f752007-07-13 00:33:32 +00001577
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001578 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001579
Linus Torvalds1da177e2005-04-16 15:20:36 -07001580 for (total_written = 0; write_size > total_written;
1581 total_written += bytes_written) {
1582 rc = -EAGAIN;
1583 while (rc == -EAGAIN) {
Jeff Laytonca83ce32011-04-12 09:13:44 -04001584 struct kvec iov[2];
1585 unsigned int len;
1586
Linus Torvalds1da177e2005-04-16 15:20:36 -07001587 if (open_file->invalidHandle) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001588 /* we could deadlock if we called
1589 filemap_fdatawait from here so tell
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001590 reopen_file not to flush data to
Linus Torvalds1da177e2005-04-16 15:20:36 -07001591 server now */
Jeff Layton15886172010-10-15 15:33:59 -04001592 rc = cifs_reopen_file(open_file, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001593 if (rc != 0)
1594 break;
1595 }
Steve French3e844692005-10-03 13:37:24 -07001596
Jeff Laytonca83ce32011-04-12 09:13:44 -04001597 len = min((size_t)cifs_sb->wsize,
1598 write_size - total_written);
1599 /* iov[0] is reserved for smb header */
1600 iov[1].iov_base = (char *)write_data + total_written;
1601 iov[1].iov_len = len;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001602 io_parms.pid = pid;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001603 io_parms.tcon = tcon;
1604 io_parms.offset = *offset;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001605 io_parms.length = len;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001606 rc = server->ops->sync_write(xid, open_file, &io_parms,
1607 &bytes_written, iov, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001608 }
1609 if (rc || (bytes_written == 0)) {
1610 if (total_written)
1611 break;
1612 else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001613 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001614 return rc;
1615 }
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001616 } else {
Jeff Layton597b0272012-03-23 14:40:56 -04001617 spin_lock(&dentry->d_inode->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001618 cifs_update_eof(cifsi, *offset, bytes_written);
Jeff Layton597b0272012-03-23 14:40:56 -04001619 spin_unlock(&dentry->d_inode->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001620 *offset += bytes_written;
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001621 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001622 }
1623
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001624 cifs_stats_bytes_written(tcon, total_written);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001625
Jeff Layton7da4b492010-10-15 15:34:00 -04001626 if (total_written > 0) {
1627 spin_lock(&dentry->d_inode->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001628 if (*offset > dentry->d_inode->i_size)
1629 i_size_write(dentry->d_inode, *offset);
Jeff Layton7da4b492010-10-15 15:34:00 -04001630 spin_unlock(&dentry->d_inode->i_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001631 }
Jeff Layton7da4b492010-10-15 15:34:00 -04001632 mark_inode_dirty_sync(dentry->d_inode);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001633 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001634 return total_written;
1635}
1636
Jeff Layton6508d902010-09-29 19:51:11 -04001637struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1638 bool fsuid_only)
Steve French630f3f0c2007-10-25 21:17:17 +00001639{
1640 struct cifsFileInfo *open_file = NULL;
Jeff Layton6508d902010-09-29 19:51:11 -04001641 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1642
1643 /* only filter by fsuid on multiuser mounts */
1644 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1645 fsuid_only = false;
Steve French630f3f0c2007-10-25 21:17:17 +00001646
Jeff Layton44772882010-10-15 15:34:03 -04001647 spin_lock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001648 /* we could simply get the first_list_entry since write-only entries
1649 are always at the end of the list but since the first entry might
1650 have a close pending, we go through the whole list */
1651 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton6508d902010-09-29 19:51:11 -04001652 if (fsuid_only && open_file->uid != current_fsuid())
1653 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001654 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
Steve French630f3f0c2007-10-25 21:17:17 +00001655 if (!open_file->invalidHandle) {
1656 /* found a good file */
1657 /* lock it so it will not be closed on us */
Jeff Layton764a1b12012-07-25 14:59:54 -04001658 cifsFileInfo_get_locked(open_file);
Jeff Layton44772882010-10-15 15:34:03 -04001659 spin_unlock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001660 return open_file;
1661 } /* else might as well continue, and look for
1662 another, or simply have the caller reopen it
1663 again rather than trying to fix this handle */
1664 } else /* write only file */
1665 break; /* write only files are last so must be done */
1666 }
Jeff Layton44772882010-10-15 15:34:03 -04001667 spin_unlock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001668 return NULL;
1669}
Steve French630f3f0c2007-10-25 21:17:17 +00001670
Jeff Layton6508d902010-09-29 19:51:11 -04001671struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1672 bool fsuid_only)
Steve French6148a742005-10-05 12:23:19 -07001673{
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001674 struct cifsFileInfo *open_file, *inv_file = NULL;
Jeff Laytond3892292010-11-02 16:22:50 -04001675 struct cifs_sb_info *cifs_sb;
Jeff Layton2846d382008-09-22 21:33:33 -04001676 bool any_available = false;
Steve Frenchdd99cd82005-10-05 19:32:49 -07001677 int rc;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001678 unsigned int refind = 0;
Steve French6148a742005-10-05 12:23:19 -07001679
Steve French60808232006-04-22 15:53:05 +00001680 /* Having a null inode here (because mapping->host was set to zero by
1681 the VFS or MM) should not happen but we had reports of on oops (due to
1682 it being zero) during stress testcases so we need to check for it */
1683
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001684 if (cifs_inode == NULL) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001685 cERROR(1, "Null inode passed to cifs_writeable_file");
Steve French60808232006-04-22 15:53:05 +00001686 dump_stack();
1687 return NULL;
1688 }
1689
Jeff Laytond3892292010-11-02 16:22:50 -04001690 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1691
Jeff Layton6508d902010-09-29 19:51:11 -04001692 /* only filter by fsuid on multiuser mounts */
1693 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1694 fsuid_only = false;
1695
Jeff Layton44772882010-10-15 15:34:03 -04001696 spin_lock(&cifs_file_list_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001697refind_writable:
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001698 if (refind > MAX_REOPEN_ATT) {
1699 spin_unlock(&cifs_file_list_lock);
1700 return NULL;
1701 }
Steve French6148a742005-10-05 12:23:19 -07001702 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton6508d902010-09-29 19:51:11 -04001703 if (!any_available && open_file->pid != current->tgid)
1704 continue;
1705 if (fsuid_only && open_file->uid != current_fsuid())
1706 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001707 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Steve French9b22b0b2007-10-02 01:11:08 +00001708 if (!open_file->invalidHandle) {
1709 /* found a good writable file */
Jeff Layton764a1b12012-07-25 14:59:54 -04001710 cifsFileInfo_get_locked(open_file);
Jeff Layton44772882010-10-15 15:34:03 -04001711 spin_unlock(&cifs_file_list_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001712 return open_file;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001713 } else {
1714 if (!inv_file)
1715 inv_file = open_file;
Steve French9b22b0b2007-10-02 01:11:08 +00001716 }
Steve French6148a742005-10-05 12:23:19 -07001717 }
1718 }
Jeff Layton2846d382008-09-22 21:33:33 -04001719 /* couldn't find useable FH with same pid, try any available */
1720 if (!any_available) {
1721 any_available = true;
1722 goto refind_writable;
1723 }
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001724
1725 if (inv_file) {
1726 any_available = false;
Jeff Layton764a1b12012-07-25 14:59:54 -04001727 cifsFileInfo_get_locked(inv_file);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001728 }
1729
Jeff Layton44772882010-10-15 15:34:03 -04001730 spin_unlock(&cifs_file_list_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001731
1732 if (inv_file) {
1733 rc = cifs_reopen_file(inv_file, false);
1734 if (!rc)
1735 return inv_file;
1736 else {
1737 spin_lock(&cifs_file_list_lock);
1738 list_move_tail(&inv_file->flist,
1739 &cifs_inode->openFileList);
1740 spin_unlock(&cifs_file_list_lock);
1741 cifsFileInfo_put(inv_file);
1742 spin_lock(&cifs_file_list_lock);
1743 ++refind;
1744 goto refind_writable;
1745 }
1746 }
1747
Steve French6148a742005-10-05 12:23:19 -07001748 return NULL;
1749}
1750
Linus Torvalds1da177e2005-04-16 15:20:36 -07001751static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1752{
1753 struct address_space *mapping = page->mapping;
1754 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1755 char *write_data;
1756 int rc = -EFAULT;
1757 int bytes_written = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001758 struct inode *inode;
Steve French6148a742005-10-05 12:23:19 -07001759 struct cifsFileInfo *open_file;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001760
1761 if (!mapping || !mapping->host)
1762 return -EFAULT;
1763
1764 inode = page->mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001765
1766 offset += (loff_t)from;
1767 write_data = kmap(page);
1768 write_data += from;
1769
1770 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1771 kunmap(page);
1772 return -EIO;
1773 }
1774
1775 /* racing with truncate? */
1776 if (offset > mapping->host->i_size) {
1777 kunmap(page);
1778 return 0; /* don't care */
1779 }
1780
1781 /* check to make sure that we are not extending the file */
1782 if (mapping->host->i_size - offset < (loff_t)to)
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001783 to = (unsigned)(mapping->host->i_size - offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001784
Jeff Layton6508d902010-09-29 19:51:11 -04001785 open_file = find_writable_file(CIFS_I(mapping->host), false);
Steve French6148a742005-10-05 12:23:19 -07001786 if (open_file) {
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001787 bytes_written = cifs_write(open_file, open_file->pid,
1788 write_data, to - from, &offset);
Dave Kleikamp6ab409b2009-08-31 11:07:12 -04001789 cifsFileInfo_put(open_file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001790 /* Does mm or vfs already set times? */
Steve French6148a742005-10-05 12:23:19 -07001791 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001792 if ((bytes_written > 0) && (offset))
Steve French6148a742005-10-05 12:23:19 -07001793 rc = 0;
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001794 else if (bytes_written < 0)
1795 rc = bytes_written;
Steve French6148a742005-10-05 12:23:19 -07001796 } else {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001797 cFYI(1, "No writeable filehandles for inode");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001798 rc = -EIO;
1799 }
1800
1801 kunmap(page);
1802 return rc;
1803}
1804
Linus Torvalds1da177e2005-04-16 15:20:36 -07001805static int cifs_writepages(struct address_space *mapping,
Steve French37c0eb42005-10-05 14:50:29 -07001806 struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001807{
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001808 struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
1809 bool done = false, scanned = false, range_whole = false;
1810 pgoff_t end, index;
1811 struct cifs_writedata *wdata;
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07001812 struct TCP_Server_Info *server;
Steve French37c0eb42005-10-05 14:50:29 -07001813 struct page *page;
Steve French37c0eb42005-10-05 14:50:29 -07001814 int rc = 0;
Steve French50c2f752007-07-13 00:33:32 +00001815
Steve French37c0eb42005-10-05 14:50:29 -07001816 /*
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001817 * If wsize is smaller than the page cache size, default to writing
Steve French37c0eb42005-10-05 14:50:29 -07001818 * one page at a time via cifs_writepage
1819 */
1820 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1821 return generic_writepages(mapping, wbc);
1822
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001823 if (wbc->range_cyclic) {
Steve French37c0eb42005-10-05 14:50:29 -07001824 index = mapping->writeback_index; /* Start from prev offset */
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001825 end = -1;
1826 } else {
1827 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1828 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1829 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001830 range_whole = true;
1831 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07001832 }
1833retry:
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001834 while (!done && index <= end) {
1835 unsigned int i, nr_pages, found_pages;
1836 pgoff_t next = 0, tofind;
1837 struct page **pages;
Steve French37c0eb42005-10-05 14:50:29 -07001838
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001839 tofind = min((cifs_sb->wsize / PAGE_CACHE_SIZE) - 1,
1840 end - index) + 1;
Steve French37c0eb42005-10-05 14:50:29 -07001841
Jeff Laytonc2e87642012-03-23 14:40:55 -04001842 wdata = cifs_writedata_alloc((unsigned int)tofind,
1843 cifs_writev_complete);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001844 if (!wdata) {
1845 rc = -ENOMEM;
1846 break;
1847 }
1848
1849 /*
1850 * find_get_pages_tag seems to return a max of 256 on each
1851 * iteration, so we must call it several times in order to
1852 * fill the array or the wsize is effectively limited to
1853 * 256 * PAGE_CACHE_SIZE.
1854 */
1855 found_pages = 0;
1856 pages = wdata->pages;
1857 do {
1858 nr_pages = find_get_pages_tag(mapping, &index,
1859 PAGECACHE_TAG_DIRTY,
1860 tofind, pages);
1861 found_pages += nr_pages;
1862 tofind -= nr_pages;
1863 pages += nr_pages;
1864 } while (nr_pages && tofind && index <= end);
1865
1866 if (found_pages == 0) {
1867 kref_put(&wdata->refcount, cifs_writedata_release);
1868 break;
1869 }
1870
1871 nr_pages = 0;
1872 for (i = 0; i < found_pages; i++) {
1873 page = wdata->pages[i];
Steve French37c0eb42005-10-05 14:50:29 -07001874 /*
1875 * At this point we hold neither mapping->tree_lock nor
1876 * lock on the page itself: the page may be truncated or
1877 * invalidated (changing page->mapping to NULL), or even
1878 * swizzled back from swapper_space to tmpfs file
1879 * mapping
1880 */
1881
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001882 if (nr_pages == 0)
Steve French37c0eb42005-10-05 14:50:29 -07001883 lock_page(page);
Nick Piggin529ae9a2008-08-02 12:01:03 +02001884 else if (!trylock_page(page))
Steve French37c0eb42005-10-05 14:50:29 -07001885 break;
1886
1887 if (unlikely(page->mapping != mapping)) {
1888 unlock_page(page);
1889 break;
1890 }
1891
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001892 if (!wbc->range_cyclic && page->index > end) {
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001893 done = true;
Steve French37c0eb42005-10-05 14:50:29 -07001894 unlock_page(page);
1895 break;
1896 }
1897
1898 if (next && (page->index != next)) {
1899 /* Not next consecutive page */
1900 unlock_page(page);
1901 break;
1902 }
1903
1904 if (wbc->sync_mode != WB_SYNC_NONE)
1905 wait_on_page_writeback(page);
1906
1907 if (PageWriteback(page) ||
Linus Torvaldscb876f42006-12-23 16:19:07 -08001908 !clear_page_dirty_for_io(page)) {
Steve French37c0eb42005-10-05 14:50:29 -07001909 unlock_page(page);
1910 break;
1911 }
Steve French84d2f072005-10-12 15:32:05 -07001912
Linus Torvaldscb876f42006-12-23 16:19:07 -08001913 /*
1914 * This actually clears the dirty bit in the radix tree.
1915 * See cifs_writepage() for more commentary.
1916 */
1917 set_page_writeback(page);
1918
Jeff Layton3a98b862012-11-26 09:48:41 -05001919 if (page_offset(page) >= i_size_read(mapping->host)) {
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001920 done = true;
Steve French84d2f072005-10-12 15:32:05 -07001921 unlock_page(page);
Linus Torvaldscb876f42006-12-23 16:19:07 -08001922 end_page_writeback(page);
Steve French84d2f072005-10-12 15:32:05 -07001923 break;
1924 }
1925
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001926 wdata->pages[i] = page;
Steve French37c0eb42005-10-05 14:50:29 -07001927 next = page->index + 1;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001928 ++nr_pages;
Steve French37c0eb42005-10-05 14:50:29 -07001929 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001930
1931 /* reset index to refind any pages skipped */
1932 if (nr_pages == 0)
1933 index = wdata->pages[0]->index + 1;
1934
1935 /* put any pages we aren't going to use */
1936 for (i = nr_pages; i < found_pages; i++) {
1937 page_cache_release(wdata->pages[i]);
1938 wdata->pages[i] = NULL;
1939 }
1940
1941 /* nothing to write? */
1942 if (nr_pages == 0) {
1943 kref_put(&wdata->refcount, cifs_writedata_release);
1944 continue;
1945 }
1946
1947 wdata->sync_mode = wbc->sync_mode;
1948 wdata->nr_pages = nr_pages;
1949 wdata->offset = page_offset(wdata->pages[0]);
Jeff Laytoneddb0792012-09-18 16:20:35 -07001950 wdata->pagesz = PAGE_CACHE_SIZE;
1951 wdata->tailsz =
Jeff Layton3a98b862012-11-26 09:48:41 -05001952 min(i_size_read(mapping->host) -
1953 page_offset(wdata->pages[nr_pages - 1]),
Jeff Laytoneddb0792012-09-18 16:20:35 -07001954 (loff_t)PAGE_CACHE_SIZE);
1955 wdata->bytes = ((nr_pages - 1) * PAGE_CACHE_SIZE) +
1956 wdata->tailsz;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001957
1958 do {
1959 if (wdata->cfile != NULL)
1960 cifsFileInfo_put(wdata->cfile);
1961 wdata->cfile = find_writable_file(CIFS_I(mapping->host),
1962 false);
1963 if (!wdata->cfile) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001964 cERROR(1, "No writable handles for inode");
Steve French23e7dd72005-10-20 13:44:56 -07001965 rc = -EBADF;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001966 break;
Steve French37c0eb42005-10-05 14:50:29 -07001967 }
Jeff Laytonfe5f5d22012-03-23 14:40:55 -04001968 wdata->pid = wdata->cfile->pid;
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07001969 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
1970 rc = server->ops->async_writev(wdata);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001971 } while (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN);
Jeff Laytonf3983c22010-09-22 16:17:40 -07001972
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001973 for (i = 0; i < nr_pages; ++i)
1974 unlock_page(wdata->pages[i]);
Jeff Layton941b8532011-01-11 07:24:01 -05001975
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001976 /* send failure -- clean up the mess */
1977 if (rc != 0) {
1978 for (i = 0; i < nr_pages; ++i) {
Jeff Layton941b8532011-01-11 07:24:01 -05001979 if (rc == -EAGAIN)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001980 redirty_page_for_writepage(wbc,
1981 wdata->pages[i]);
1982 else
1983 SetPageError(wdata->pages[i]);
1984 end_page_writeback(wdata->pages[i]);
1985 page_cache_release(wdata->pages[i]);
Steve French37c0eb42005-10-05 14:50:29 -07001986 }
Jeff Layton941b8532011-01-11 07:24:01 -05001987 if (rc != -EAGAIN)
1988 mapping_set_error(mapping, rc);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001989 }
1990 kref_put(&wdata->refcount, cifs_writedata_release);
Jeff Layton941b8532011-01-11 07:24:01 -05001991
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001992 wbc->nr_to_write -= nr_pages;
1993 if (wbc->nr_to_write <= 0)
1994 done = true;
Dave Kleikampb066a482008-11-18 03:49:05 +00001995
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001996 index = next;
Steve French37c0eb42005-10-05 14:50:29 -07001997 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001998
Steve French37c0eb42005-10-05 14:50:29 -07001999 if (!scanned && !done) {
2000 /*
2001 * We hit the last page and there is more work to be done: wrap
2002 * back to the start of the file
2003 */
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002004 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07002005 index = 0;
2006 goto retry;
2007 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002008
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002009 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
Steve French37c0eb42005-10-05 14:50:29 -07002010 mapping->writeback_index = index;
2011
Linus Torvalds1da177e2005-04-16 15:20:36 -07002012 return rc;
2013}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002014
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002015static int
2016cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002017{
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002018 int rc;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002019 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002020
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002021 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002022/* BB add check for wbc flags */
2023 page_cache_get(page);
Steve Frenchad7a2922008-02-07 23:25:02 +00002024 if (!PageUptodate(page))
Joe Perchesb6b38f72010-04-21 03:50:45 +00002025 cFYI(1, "ppw - page not up to date");
Linus Torvaldscb876f42006-12-23 16:19:07 -08002026
2027 /*
2028 * Set the "writeback" flag, and clear "dirty" in the radix tree.
2029 *
2030 * A writepage() implementation always needs to do either this,
2031 * or re-dirty the page with "redirty_page_for_writepage()" in
2032 * the case of a failure.
2033 *
2034 * Just unlocking the page will cause the radix tree tag-bits
2035 * to fail to update with the state of the page correctly.
2036 */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002037 set_page_writeback(page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002038retry_write:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002039 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002040 if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
2041 goto retry_write;
2042 else if (rc == -EAGAIN)
2043 redirty_page_for_writepage(wbc, page);
2044 else if (rc != 0)
2045 SetPageError(page);
2046 else
2047 SetPageUptodate(page);
Linus Torvaldscb876f42006-12-23 16:19:07 -08002048 end_page_writeback(page);
2049 page_cache_release(page);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002050 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002051 return rc;
2052}
2053
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002054static int cifs_writepage(struct page *page, struct writeback_control *wbc)
2055{
2056 int rc = cifs_writepage_locked(page, wbc);
2057 unlock_page(page);
2058 return rc;
2059}
2060
Nick Piggind9414772008-09-24 11:32:59 -04002061static int cifs_write_end(struct file *file, struct address_space *mapping,
2062 loff_t pos, unsigned len, unsigned copied,
2063 struct page *page, void *fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002064{
Nick Piggind9414772008-09-24 11:32:59 -04002065 int rc;
2066 struct inode *inode = mapping->host;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002067 struct cifsFileInfo *cfile = file->private_data;
2068 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
2069 __u32 pid;
2070
2071 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2072 pid = cfile->pid;
2073 else
2074 pid = current->tgid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002075
Joe Perchesb6b38f72010-04-21 03:50:45 +00002076 cFYI(1, "write_end for page %p from pos %lld with %d bytes",
2077 page, pos, copied);
Steve Frenchad7a2922008-02-07 23:25:02 +00002078
Jeff Laytona98ee8c2008-11-26 19:32:33 +00002079 if (PageChecked(page)) {
2080 if (copied == len)
2081 SetPageUptodate(page);
2082 ClearPageChecked(page);
2083 } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
Nick Piggind9414772008-09-24 11:32:59 -04002084 SetPageUptodate(page);
2085
Linus Torvalds1da177e2005-04-16 15:20:36 -07002086 if (!PageUptodate(page)) {
Nick Piggind9414772008-09-24 11:32:59 -04002087 char *page_data;
2088 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002089 unsigned int xid;
Nick Piggind9414772008-09-24 11:32:59 -04002090
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002091 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002092 /* this is probably better than directly calling
2093 partialpage_write since in this function the file handle is
2094 known which we might as well leverage */
2095 /* BB check if anything else missing out of ppw
2096 such as updating last write time */
2097 page_data = kmap(page);
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002098 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
Nick Piggind9414772008-09-24 11:32:59 -04002099 /* if (rc < 0) should we set writebehind rc? */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002100 kunmap(page);
Nick Piggind9414772008-09-24 11:32:59 -04002101
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002102 free_xid(xid);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002103 } else {
Nick Piggind9414772008-09-24 11:32:59 -04002104 rc = copied;
2105 pos += copied;
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002106 set_page_dirty(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002107 }
2108
Nick Piggind9414772008-09-24 11:32:59 -04002109 if (rc > 0) {
2110 spin_lock(&inode->i_lock);
2111 if (pos > inode->i_size)
2112 i_size_write(inode, pos);
2113 spin_unlock(&inode->i_lock);
2114 }
2115
2116 unlock_page(page);
2117 page_cache_release(page);
2118
Linus Torvalds1da177e2005-04-16 15:20:36 -07002119 return rc;
2120}
2121
Josef Bacik02c24a82011-07-16 20:44:56 -04002122int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2123 int datasync)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002124{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002125 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002126 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002127 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002128 struct TCP_Server_Info *server;
Joe Perchesc21dfb62010-07-12 13:50:14 -07002129 struct cifsFileInfo *smbfile = file->private_data;
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002130 struct inode *inode = file->f_path.dentry->d_inode;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002131 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002132
Josef Bacik02c24a82011-07-16 20:44:56 -04002133 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2134 if (rc)
2135 return rc;
2136 mutex_lock(&inode->i_mutex);
2137
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002138 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002139
Joe Perchesb6b38f72010-04-21 03:50:45 +00002140 cFYI(1, "Sync file - name: %s datasync: 0x%x",
Christoph Hellwig7ea80852010-05-26 17:53:25 +02002141 file->f_path.dentry->d_name.name, datasync);
Steve French50c2f752007-07-13 00:33:32 +00002142
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002143 if (!CIFS_I(inode)->clientCanCacheRead) {
2144 rc = cifs_invalidate_mapping(inode);
2145 if (rc) {
2146 cFYI(1, "rc: %d during invalidate phase", rc);
2147 rc = 0; /* don't care about it in fsync */
2148 }
2149 }
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002150
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002151 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002152 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2153 server = tcon->ses->server;
2154 if (server->ops->flush)
2155 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2156 else
2157 rc = -ENOSYS;
2158 }
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002159
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002160 free_xid(xid);
Josef Bacik02c24a82011-07-16 20:44:56 -04002161 mutex_unlock(&inode->i_mutex);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002162 return rc;
2163}
2164
Josef Bacik02c24a82011-07-16 20:44:56 -04002165int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002166{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002167 unsigned int xid;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002168 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002169 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002170 struct TCP_Server_Info *server;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002171 struct cifsFileInfo *smbfile = file->private_data;
2172 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Josef Bacik02c24a82011-07-16 20:44:56 -04002173 struct inode *inode = file->f_mapping->host;
2174
2175 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2176 if (rc)
2177 return rc;
2178 mutex_lock(&inode->i_mutex);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002179
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002180 xid = get_xid();
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002181
2182 cFYI(1, "Sync file - name: %s datasync: 0x%x",
2183 file->f_path.dentry->d_name.name, datasync);
2184
2185 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002186 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2187 server = tcon->ses->server;
2188 if (server->ops->flush)
2189 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2190 else
2191 rc = -ENOSYS;
2192 }
Steve Frenchb298f222009-02-21 21:17:43 +00002193
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002194 free_xid(xid);
Josef Bacik02c24a82011-07-16 20:44:56 -04002195 mutex_unlock(&inode->i_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002196 return rc;
2197}
2198
Linus Torvalds1da177e2005-04-16 15:20:36 -07002199/*
2200 * As file closes, flush all cached write data for this inode checking
2201 * for write behind errors.
2202 */
Miklos Szeredi75e1fcc2006-06-23 02:05:12 -07002203int cifs_flush(struct file *file, fl_owner_t id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002204{
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002205 struct inode *inode = file->f_path.dentry->d_inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002206 int rc = 0;
2207
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002208 if (file->f_mode & FMODE_WRITE)
Jeff Laytond3f13222010-10-15 15:34:07 -04002209 rc = filemap_write_and_wait(inode->i_mapping);
Steve French50c2f752007-07-13 00:33:32 +00002210
Joe Perchesb6b38f72010-04-21 03:50:45 +00002211 cFYI(1, "Flush inode %p file %p rc %d", inode, file, rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002212
2213 return rc;
2214}
2215
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002216static int
2217cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2218{
2219 int rc = 0;
2220 unsigned long i;
2221
2222 for (i = 0; i < num_pages; i++) {
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002223 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002224 if (!pages[i]) {
2225 /*
2226 * save number of pages we have already allocated and
2227 * return with ENOMEM error
2228 */
2229 num_pages = i;
2230 rc = -ENOMEM;
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002231 break;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002232 }
2233 }
2234
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002235 if (rc) {
2236 for (i = 0; i < num_pages; i++)
2237 put_page(pages[i]);
2238 }
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002239 return rc;
2240}
2241
2242static inline
2243size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2244{
2245 size_t num_pages;
2246 size_t clen;
2247
2248 clen = min_t(const size_t, len, wsize);
Jeff Laytona7103b92012-03-23 14:40:56 -04002249 num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002250
2251 if (cur_len)
2252 *cur_len = clen;
2253
2254 return num_pages;
2255}
2256
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002257static void
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002258cifs_uncached_writev_complete(struct work_struct *work)
2259{
2260 int i;
2261 struct cifs_writedata *wdata = container_of(work,
2262 struct cifs_writedata, work);
2263 struct inode *inode = wdata->cfile->dentry->d_inode;
2264 struct cifsInodeInfo *cifsi = CIFS_I(inode);
2265
2266 spin_lock(&inode->i_lock);
2267 cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
2268 if (cifsi->server_eof > inode->i_size)
2269 i_size_write(inode, cifsi->server_eof);
2270 spin_unlock(&inode->i_lock);
2271
2272 complete(&wdata->done);
2273
2274 if (wdata->result != -EAGAIN) {
2275 for (i = 0; i < wdata->nr_pages; i++)
2276 put_page(wdata->pages[i]);
2277 }
2278
2279 kref_put(&wdata->refcount, cifs_writedata_release);
2280}
2281
2282/* attempt to send write to server, retry on any -EAGAIN errors */
2283static int
2284cifs_uncached_retry_writev(struct cifs_writedata *wdata)
2285{
2286 int rc;
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002287 struct TCP_Server_Info *server;
2288
2289 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002290
2291 do {
2292 if (wdata->cfile->invalidHandle) {
2293 rc = cifs_reopen_file(wdata->cfile, false);
2294 if (rc != 0)
2295 continue;
2296 }
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002297 rc = server->ops->async_writev(wdata);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002298 } while (rc == -EAGAIN);
2299
2300 return rc;
2301}
2302
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002303static ssize_t
2304cifs_iovec_write(struct file *file, const struct iovec *iov,
2305 unsigned long nr_segs, loff_t *poffset)
2306{
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002307 unsigned long nr_pages, i;
Pavel Shilovsky76429c12011-01-31 16:03:08 +03002308 size_t copied, len, cur_len;
2309 ssize_t total_written = 0;
Jeff Layton3af9d8f2012-04-13 17:16:59 -04002310 loff_t offset;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002311 struct iov_iter it;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002312 struct cifsFileInfo *open_file;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002313 struct cifs_tcon *tcon;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002314 struct cifs_sb_info *cifs_sb;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002315 struct cifs_writedata *wdata, *tmp;
2316 struct list_head wdata_list;
2317 int rc;
2318 pid_t pid;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002319
2320 len = iov_length(iov, nr_segs);
2321 if (!len)
2322 return 0;
2323
2324 rc = generic_write_checks(file, poffset, &len, 0);
2325 if (rc)
2326 return rc;
2327
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002328 INIT_LIST_HEAD(&wdata_list);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002329 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002330 open_file = file->private_data;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002331 tcon = tlink_tcon(open_file->tlink);
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002332
2333 if (!tcon->ses->server->ops->async_writev)
2334 return -ENOSYS;
2335
Jeff Layton3af9d8f2012-04-13 17:16:59 -04002336 offset = *poffset;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002337
2338 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2339 pid = open_file->pid;
2340 else
2341 pid = current->tgid;
2342
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002343 iov_iter_init(&it, iov, nr_segs, len, 0);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002344 do {
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002345 size_t save_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002346
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002347 nr_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
2348 wdata = cifs_writedata_alloc(nr_pages,
2349 cifs_uncached_writev_complete);
2350 if (!wdata) {
2351 rc = -ENOMEM;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002352 break;
2353 }
2354
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002355 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
2356 if (rc) {
2357 kfree(wdata);
2358 break;
2359 }
2360
2361 save_len = cur_len;
2362 for (i = 0; i < nr_pages; i++) {
2363 copied = min_t(const size_t, cur_len, PAGE_SIZE);
2364 copied = iov_iter_copy_from_user(wdata->pages[i], &it,
2365 0, copied);
2366 cur_len -= copied;
2367 iov_iter_advance(&it, copied);
2368 }
2369 cur_len = save_len - cur_len;
2370
2371 wdata->sync_mode = WB_SYNC_ALL;
2372 wdata->nr_pages = nr_pages;
2373 wdata->offset = (__u64)offset;
2374 wdata->cfile = cifsFileInfo_get(open_file);
2375 wdata->pid = pid;
2376 wdata->bytes = cur_len;
Jeff Laytoneddb0792012-09-18 16:20:35 -07002377 wdata->pagesz = PAGE_SIZE;
2378 wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002379 rc = cifs_uncached_retry_writev(wdata);
2380 if (rc) {
2381 kref_put(&wdata->refcount, cifs_writedata_release);
2382 break;
2383 }
2384
2385 list_add_tail(&wdata->list, &wdata_list);
2386 offset += cur_len;
2387 len -= cur_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002388 } while (len > 0);
2389
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002390 /*
2391 * If at least one write was successfully sent, then discard any rc
2392 * value from the later writes. If the other write succeeds, then
2393 * we'll end up returning whatever was written. If it fails, then
2394 * we'll get a new rc value from that.
2395 */
2396 if (!list_empty(&wdata_list))
2397 rc = 0;
2398
2399 /*
2400 * Wait for and collect replies for any successful sends in order of
2401 * increasing offset. Once an error is hit or we get a fatal signal
2402 * while waiting, then return without waiting for any more replies.
2403 */
2404restart_loop:
2405 list_for_each_entry_safe(wdata, tmp, &wdata_list, list) {
2406 if (!rc) {
2407 /* FIXME: freezable too? */
2408 rc = wait_for_completion_killable(&wdata->done);
2409 if (rc)
2410 rc = -EINTR;
2411 else if (wdata->result)
2412 rc = wdata->result;
2413 else
2414 total_written += wdata->bytes;
2415
2416 /* resend call if it's a retryable error */
2417 if (rc == -EAGAIN) {
2418 rc = cifs_uncached_retry_writev(wdata);
2419 goto restart_loop;
2420 }
2421 }
2422 list_del_init(&wdata->list);
2423 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002424 }
2425
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002426 if (total_written > 0)
2427 *poffset += total_written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002428
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002429 cifs_stats_bytes_written(tcon, total_written);
2430 return total_written ? total_written : (ssize_t)rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002431}
2432
Pavel Shilovsky0b81c1c2011-03-10 10:11:05 +03002433ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002434 unsigned long nr_segs, loff_t pos)
2435{
2436 ssize_t written;
2437 struct inode *inode;
2438
2439 inode = iocb->ki_filp->f_path.dentry->d_inode;
2440
2441 /*
2442 * BB - optimize the way when signing is disabled. We can drop this
2443 * extra memory-to-memory copying and use iovec buffers for constructing
2444 * write request.
2445 */
2446
2447 written = cifs_iovec_write(iocb->ki_filp, iov, nr_segs, &pos);
2448 if (written > 0) {
2449 CIFS_I(inode)->invalid_mapping = true;
2450 iocb->ki_pos = pos;
2451 }
2452
2453 return written;
2454}
2455
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002456static ssize_t
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002457cifs_writev(struct kiocb *iocb, const struct iovec *iov,
2458 unsigned long nr_segs, loff_t pos)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002459{
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002460 struct file *file = iocb->ki_filp;
2461 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2462 struct inode *inode = file->f_mapping->host;
2463 struct cifsInodeInfo *cinode = CIFS_I(inode);
2464 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
2465 ssize_t rc = -EACCES;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002466
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002467 BUG_ON(iocb->ki_pos != pos);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002468
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002469 sb_start_write(inode->i_sb);
2470
2471 /*
2472 * We need to hold the sem to be sure nobody modifies lock list
2473 * with a brlock that prevents writing.
2474 */
2475 down_read(&cinode->lock_sem);
2476 if (!cifs_find_lock_conflict(cfile, pos, iov_length(iov, nr_segs),
2477 server->vals->exclusive_lock_type, NULL,
Pavel Shilovsky081c0412012-11-27 18:38:53 +04002478 CIFS_WRITE_OP)) {
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002479 mutex_lock(&inode->i_mutex);
2480 rc = __generic_file_aio_write(iocb, iov, nr_segs,
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002481 &iocb->ki_pos);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002482 mutex_unlock(&inode->i_mutex);
2483 }
2484
2485 if (rc > 0 || rc == -EIOCBQUEUED) {
2486 ssize_t err;
2487
2488 err = generic_write_sync(file, pos, rc);
2489 if (err < 0 && rc > 0)
2490 rc = err;
2491 }
2492
2493 up_read(&cinode->lock_sem);
2494 sb_end_write(inode->i_sb);
2495 return rc;
2496}
2497
2498ssize_t
2499cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
2500 unsigned long nr_segs, loff_t pos)
2501{
2502 struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
2503 struct cifsInodeInfo *cinode = CIFS_I(inode);
2504 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2505 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2506 iocb->ki_filp->private_data;
2507 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002508
2509#ifdef CONFIG_CIFS_SMB2
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002510 /*
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002511 * If we have an oplock for read and want to write a data to the file
2512 * we need to store it in the page cache and then push it to the server
2513 * to be sure the next read will get a valid data.
Pavel Shilovsky25078102012-09-19 06:22:45 -07002514 */
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002515 if (!cinode->clientCanCacheAll && cinode->clientCanCacheRead) {
2516 ssize_t written;
2517 int rc;
Pavel Shilovsky25078102012-09-19 06:22:45 -07002518
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002519 written = generic_file_aio_write(iocb, iov, nr_segs, pos);
2520 rc = filemap_fdatawrite(inode->i_mapping);
Pavel Shilovskyc299dd02012-12-06 22:07:52 +04002521 if (rc)
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002522 return (ssize_t)rc;
2523
2524 return written;
Pavel Shilovskyc299dd02012-12-06 22:07:52 +04002525 }
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002526#endif
2527
2528 /*
2529 * For non-oplocked files in strict cache mode we need to write the data
2530 * to the server exactly from the pos to pos+len-1 rather than flush all
2531 * affected pages because it may cause a error with mandatory locks on
2532 * these pages but not on the region from pos to ppos+len-1.
2533 */
2534
2535 if (!cinode->clientCanCacheAll)
2536 return cifs_user_writev(iocb, iov, nr_segs, pos);
2537
2538 if (cap_unix(tcon->ses) &&
2539 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2540 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2541 return generic_file_aio_write(iocb, iov, nr_segs, pos);
2542
2543 return cifs_writev(iocb, iov, nr_segs, pos);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002544}
2545
Jeff Layton0471ca32012-05-16 07:13:16 -04002546static struct cifs_readdata *
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002547cifs_readdata_alloc(unsigned int nr_pages, work_func_t complete)
Jeff Layton0471ca32012-05-16 07:13:16 -04002548{
2549 struct cifs_readdata *rdata;
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002550
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002551 rdata = kzalloc(sizeof(*rdata) + (sizeof(struct page *) * nr_pages),
2552 GFP_KERNEL);
Jeff Layton0471ca32012-05-16 07:13:16 -04002553 if (rdata != NULL) {
Jeff Layton6993f742012-05-16 07:13:17 -04002554 kref_init(&rdata->refcount);
Jeff Layton1c892542012-05-16 07:13:17 -04002555 INIT_LIST_HEAD(&rdata->list);
2556 init_completion(&rdata->done);
Jeff Layton0471ca32012-05-16 07:13:16 -04002557 INIT_WORK(&rdata->work, complete);
Jeff Layton0471ca32012-05-16 07:13:16 -04002558 }
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002559
Jeff Layton0471ca32012-05-16 07:13:16 -04002560 return rdata;
2561}
2562
Jeff Layton6993f742012-05-16 07:13:17 -04002563void
2564cifs_readdata_release(struct kref *refcount)
Jeff Layton0471ca32012-05-16 07:13:16 -04002565{
Jeff Layton6993f742012-05-16 07:13:17 -04002566 struct cifs_readdata *rdata = container_of(refcount,
2567 struct cifs_readdata, refcount);
2568
2569 if (rdata->cfile)
2570 cifsFileInfo_put(rdata->cfile);
2571
Jeff Layton0471ca32012-05-16 07:13:16 -04002572 kfree(rdata);
2573}
2574
Jeff Layton2a1bb132012-05-16 07:13:17 -04002575static int
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002576cifs_read_allocate_pages(struct cifs_readdata *rdata, unsigned int nr_pages)
Jeff Layton1c892542012-05-16 07:13:17 -04002577{
2578 int rc = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002579 struct page *page;
Jeff Layton1c892542012-05-16 07:13:17 -04002580 unsigned int i;
2581
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002582 for (i = 0; i < nr_pages; i++) {
Jeff Layton1c892542012-05-16 07:13:17 -04002583 page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
2584 if (!page) {
2585 rc = -ENOMEM;
2586 break;
2587 }
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002588 rdata->pages[i] = page;
Jeff Layton1c892542012-05-16 07:13:17 -04002589 }
2590
2591 if (rc) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002592 for (i = 0; i < nr_pages; i++) {
2593 put_page(rdata->pages[i]);
2594 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04002595 }
2596 }
2597 return rc;
2598}
2599
2600static void
2601cifs_uncached_readdata_release(struct kref *refcount)
2602{
Jeff Layton1c892542012-05-16 07:13:17 -04002603 struct cifs_readdata *rdata = container_of(refcount,
2604 struct cifs_readdata, refcount);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002605 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04002606
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002607 for (i = 0; i < rdata->nr_pages; i++) {
2608 put_page(rdata->pages[i]);
2609 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04002610 }
2611 cifs_readdata_release(refcount);
2612}
2613
2614static int
Jeff Layton2a1bb132012-05-16 07:13:17 -04002615cifs_retry_async_readv(struct cifs_readdata *rdata)
2616{
2617 int rc;
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07002618 struct TCP_Server_Info *server;
2619
2620 server = tlink_tcon(rdata->cfile->tlink)->ses->server;
Jeff Layton2a1bb132012-05-16 07:13:17 -04002621
2622 do {
2623 if (rdata->cfile->invalidHandle) {
2624 rc = cifs_reopen_file(rdata->cfile, true);
2625 if (rc != 0)
2626 continue;
2627 }
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07002628 rc = server->ops->async_readv(rdata);
Jeff Layton2a1bb132012-05-16 07:13:17 -04002629 } while (rc == -EAGAIN);
2630
2631 return rc;
2632}
2633
Jeff Layton1c892542012-05-16 07:13:17 -04002634/**
2635 * cifs_readdata_to_iov - copy data from pages in response to an iovec
2636 * @rdata: the readdata response with list of pages holding data
2637 * @iov: vector in which we should copy the data
2638 * @nr_segs: number of segments in vector
2639 * @offset: offset into file of the first iovec
2640 * @copied: used to return the amount of data copied to the iov
2641 *
2642 * This function copies data from a list of pages in a readdata response into
2643 * an array of iovecs. It will first calculate where the data should go
2644 * based on the info in the readdata and then copy the data into that spot.
2645 */
2646static ssize_t
2647cifs_readdata_to_iov(struct cifs_readdata *rdata, const struct iovec *iov,
2648 unsigned long nr_segs, loff_t offset, ssize_t *copied)
2649{
2650 int rc = 0;
2651 struct iov_iter ii;
2652 size_t pos = rdata->offset - offset;
Jeff Layton1c892542012-05-16 07:13:17 -04002653 ssize_t remaining = rdata->bytes;
2654 unsigned char *pdata;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002655 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04002656
2657 /* set up iov_iter and advance to the correct offset */
2658 iov_iter_init(&ii, iov, nr_segs, iov_length(iov, nr_segs), 0);
2659 iov_iter_advance(&ii, pos);
2660
2661 *copied = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002662 for (i = 0; i < rdata->nr_pages; i++) {
Jeff Layton1c892542012-05-16 07:13:17 -04002663 ssize_t copy;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002664 struct page *page = rdata->pages[i];
Jeff Layton1c892542012-05-16 07:13:17 -04002665
2666 /* copy a whole page or whatever's left */
2667 copy = min_t(ssize_t, remaining, PAGE_SIZE);
2668
2669 /* ...but limit it to whatever space is left in the iov */
2670 copy = min_t(ssize_t, copy, iov_iter_count(&ii));
2671
2672 /* go while there's data to be copied and no errors */
2673 if (copy && !rc) {
2674 pdata = kmap(page);
2675 rc = memcpy_toiovecend(ii.iov, pdata, ii.iov_offset,
2676 (int)copy);
2677 kunmap(page);
2678 if (!rc) {
2679 *copied += copy;
2680 remaining -= copy;
2681 iov_iter_advance(&ii, copy);
2682 }
2683 }
Jeff Layton1c892542012-05-16 07:13:17 -04002684 }
2685
2686 return rc;
2687}
2688
2689static void
2690cifs_uncached_readv_complete(struct work_struct *work)
2691{
2692 struct cifs_readdata *rdata = container_of(work,
2693 struct cifs_readdata, work);
Jeff Layton1c892542012-05-16 07:13:17 -04002694
2695 complete(&rdata->done);
2696 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
2697}
2698
2699static int
Jeff Layton8321fec2012-09-19 06:22:32 -07002700cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
2701 struct cifs_readdata *rdata, unsigned int len)
Jeff Layton1c892542012-05-16 07:13:17 -04002702{
Jeff Layton8321fec2012-09-19 06:22:32 -07002703 int total_read = 0, result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002704 unsigned int i;
2705 unsigned int nr_pages = rdata->nr_pages;
Jeff Layton8321fec2012-09-19 06:22:32 -07002706 struct kvec iov;
Jeff Layton1c892542012-05-16 07:13:17 -04002707
Jeff Layton8321fec2012-09-19 06:22:32 -07002708 rdata->tailsz = PAGE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002709 for (i = 0; i < nr_pages; i++) {
2710 struct page *page = rdata->pages[i];
2711
Jeff Layton8321fec2012-09-19 06:22:32 -07002712 if (len >= PAGE_SIZE) {
Jeff Layton1c892542012-05-16 07:13:17 -04002713 /* enough data to fill the page */
Jeff Layton8321fec2012-09-19 06:22:32 -07002714 iov.iov_base = kmap(page);
2715 iov.iov_len = PAGE_SIZE;
2716 cFYI(1, "%u: iov_base=%p iov_len=%zu",
2717 i, iov.iov_base, iov.iov_len);
2718 len -= PAGE_SIZE;
2719 } else if (len > 0) {
Jeff Layton1c892542012-05-16 07:13:17 -04002720 /* enough for partial page, fill and zero the rest */
Jeff Layton8321fec2012-09-19 06:22:32 -07002721 iov.iov_base = kmap(page);
2722 iov.iov_len = len;
2723 cFYI(1, "%u: iov_base=%p iov_len=%zu",
2724 i, iov.iov_base, iov.iov_len);
2725 memset(iov.iov_base + len, '\0', PAGE_SIZE - len);
2726 rdata->tailsz = len;
2727 len = 0;
Jeff Layton1c892542012-05-16 07:13:17 -04002728 } else {
2729 /* no need to hold page hostage */
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002730 rdata->pages[i] = NULL;
2731 rdata->nr_pages--;
Jeff Layton1c892542012-05-16 07:13:17 -04002732 put_page(page);
Jeff Layton8321fec2012-09-19 06:22:32 -07002733 continue;
Jeff Layton1c892542012-05-16 07:13:17 -04002734 }
Jeff Layton8321fec2012-09-19 06:22:32 -07002735
2736 result = cifs_readv_from_socket(server, &iov, 1, iov.iov_len);
2737 kunmap(page);
2738 if (result < 0)
2739 break;
2740
2741 total_read += result;
Jeff Layton1c892542012-05-16 07:13:17 -04002742 }
2743
Jeff Layton8321fec2012-09-19 06:22:32 -07002744 return total_read > 0 ? total_read : result;
Jeff Layton1c892542012-05-16 07:13:17 -04002745}
2746
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002747static ssize_t
2748cifs_iovec_read(struct file *file, const struct iovec *iov,
2749 unsigned long nr_segs, loff_t *poffset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002750{
Jeff Layton1c892542012-05-16 07:13:17 -04002751 ssize_t rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002752 size_t len, cur_len;
Jeff Layton1c892542012-05-16 07:13:17 -04002753 ssize_t total_read = 0;
2754 loff_t offset = *poffset;
2755 unsigned int npages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002756 struct cifs_sb_info *cifs_sb;
Jeff Layton1c892542012-05-16 07:13:17 -04002757 struct cifs_tcon *tcon;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002758 struct cifsFileInfo *open_file;
Jeff Layton1c892542012-05-16 07:13:17 -04002759 struct cifs_readdata *rdata, *tmp;
2760 struct list_head rdata_list;
2761 pid_t pid;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002762
2763 if (!nr_segs)
2764 return 0;
2765
2766 len = iov_length(iov, nr_segs);
2767 if (!len)
2768 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002769
Jeff Layton1c892542012-05-16 07:13:17 -04002770 INIT_LIST_HEAD(&rdata_list);
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002771 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Joe Perchesc21dfb62010-07-12 13:50:14 -07002772 open_file = file->private_data;
Jeff Layton1c892542012-05-16 07:13:17 -04002773 tcon = tlink_tcon(open_file->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002774
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07002775 if (!tcon->ses->server->ops->async_readv)
2776 return -ENOSYS;
2777
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002778 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2779 pid = open_file->pid;
2780 else
2781 pid = current->tgid;
2782
Steve Frenchad7a2922008-02-07 23:25:02 +00002783 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesb6b38f72010-04-21 03:50:45 +00002784 cFYI(1, "attempting read on write only file instance");
Steve Frenchad7a2922008-02-07 23:25:02 +00002785
Jeff Layton1c892542012-05-16 07:13:17 -04002786 do {
2787 cur_len = min_t(const size_t, len - total_read, cifs_sb->rsize);
2788 npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002789
Jeff Layton1c892542012-05-16 07:13:17 -04002790 /* allocate a readdata struct */
2791 rdata = cifs_readdata_alloc(npages,
2792 cifs_uncached_readv_complete);
2793 if (!rdata) {
2794 rc = -ENOMEM;
2795 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002796 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002797
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002798 rc = cifs_read_allocate_pages(rdata, npages);
Jeff Layton1c892542012-05-16 07:13:17 -04002799 if (rc)
2800 goto error;
2801
2802 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002803 rdata->nr_pages = npages;
Jeff Layton1c892542012-05-16 07:13:17 -04002804 rdata->offset = offset;
2805 rdata->bytes = cur_len;
2806 rdata->pid = pid;
Jeff Layton8321fec2012-09-19 06:22:32 -07002807 rdata->pagesz = PAGE_SIZE;
2808 rdata->read_into_pages = cifs_uncached_read_into_pages;
Jeff Layton1c892542012-05-16 07:13:17 -04002809
2810 rc = cifs_retry_async_readv(rdata);
2811error:
2812 if (rc) {
2813 kref_put(&rdata->refcount,
2814 cifs_uncached_readdata_release);
2815 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002816 }
Jeff Layton1c892542012-05-16 07:13:17 -04002817
2818 list_add_tail(&rdata->list, &rdata_list);
2819 offset += cur_len;
2820 len -= cur_len;
2821 } while (len > 0);
2822
2823 /* if at least one read request send succeeded, then reset rc */
2824 if (!list_empty(&rdata_list))
2825 rc = 0;
2826
2827 /* the loop below should proceed in the order of increasing offsets */
2828restart_loop:
2829 list_for_each_entry_safe(rdata, tmp, &rdata_list, list) {
2830 if (!rc) {
2831 ssize_t copied;
2832
2833 /* FIXME: freezable sleep too? */
2834 rc = wait_for_completion_killable(&rdata->done);
2835 if (rc)
2836 rc = -EINTR;
2837 else if (rdata->result)
2838 rc = rdata->result;
2839 else {
2840 rc = cifs_readdata_to_iov(rdata, iov,
2841 nr_segs, *poffset,
2842 &copied);
2843 total_read += copied;
2844 }
2845
2846 /* resend call if it's a retryable error */
2847 if (rc == -EAGAIN) {
2848 rc = cifs_retry_async_readv(rdata);
2849 goto restart_loop;
2850 }
2851 }
2852 list_del_init(&rdata->list);
2853 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002854 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002855
Jeff Layton1c892542012-05-16 07:13:17 -04002856 cifs_stats_bytes_read(tcon, total_read);
2857 *poffset += total_read;
2858
Pavel Shilovsky09a47072012-09-18 16:20:29 -07002859 /* mask nodata case */
2860 if (rc == -ENODATA)
2861 rc = 0;
2862
Jeff Layton1c892542012-05-16 07:13:17 -04002863 return total_read ? total_read : rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002864}
2865
Pavel Shilovsky0b81c1c2011-03-10 10:11:05 +03002866ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov,
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002867 unsigned long nr_segs, loff_t pos)
2868{
2869 ssize_t read;
2870
2871 read = cifs_iovec_read(iocb->ki_filp, iov, nr_segs, &pos);
2872 if (read > 0)
2873 iocb->ki_pos = pos;
2874
2875 return read;
2876}
2877
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002878ssize_t
2879cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov,
2880 unsigned long nr_segs, loff_t pos)
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002881{
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002882 struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
2883 struct cifsInodeInfo *cinode = CIFS_I(inode);
2884 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2885 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2886 iocb->ki_filp->private_data;
2887 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2888 int rc = -EACCES;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002889
2890 /*
2891 * In strict cache mode we need to read from the server all the time
2892 * if we don't have level II oplock because the server can delay mtime
2893 * change - so we can't make a decision about inode invalidating.
2894 * And we can also fail with pagereading if there are mandatory locks
2895 * on pages affected by this read but not on the region from pos to
2896 * pos+len-1.
2897 */
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002898 if (!cinode->clientCanCacheRead)
2899 return cifs_user_readv(iocb, iov, nr_segs, pos);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002900
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002901 if (cap_unix(tcon->ses) &&
2902 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2903 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2904 return generic_file_aio_read(iocb, iov, nr_segs, pos);
2905
2906 /*
2907 * We need to hold the sem to be sure nobody modifies lock list
2908 * with a brlock that prevents reading.
2909 */
2910 down_read(&cinode->lock_sem);
2911 if (!cifs_find_lock_conflict(cfile, pos, iov_length(iov, nr_segs),
2912 tcon->ses->server->vals->shared_lock_type,
Pavel Shilovsky081c0412012-11-27 18:38:53 +04002913 NULL, CIFS_READ_OP))
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002914 rc = generic_file_aio_read(iocb, iov, nr_segs, pos);
2915 up_read(&cinode->lock_sem);
2916 return rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002917}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002918
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002919static ssize_t
2920cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002921{
2922 int rc = -EACCES;
2923 unsigned int bytes_read = 0;
2924 unsigned int total_read;
2925 unsigned int current_read_size;
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002926 unsigned int rsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002927 struct cifs_sb_info *cifs_sb;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002928 struct cifs_tcon *tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002929 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002930 unsigned int xid;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002931 char *cur_offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002932 struct cifsFileInfo *open_file;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002933 struct cifs_io_parms io_parms;
Steve Frenchec637e32005-12-12 20:53:18 -08002934 int buf_type = CIFS_NO_BUFFER;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002935 __u32 pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002936
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002937 xid = get_xid();
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002938 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002939
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002940 /* FIXME: set up handlers for larger reads and/or convert to async */
2941 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
2942
Linus Torvalds1da177e2005-04-16 15:20:36 -07002943 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05302944 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002945 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05302946 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002947 }
Joe Perchesc21dfb62010-07-12 13:50:14 -07002948 open_file = file->private_data;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002949 tcon = tlink_tcon(open_file->tlink);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002950 server = tcon->ses->server;
2951
2952 if (!server->ops->sync_read) {
2953 free_xid(xid);
2954 return -ENOSYS;
2955 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002956
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002957 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2958 pid = open_file->pid;
2959 else
2960 pid = current->tgid;
2961
Linus Torvalds1da177e2005-04-16 15:20:36 -07002962 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesb6b38f72010-04-21 03:50:45 +00002963 cFYI(1, "attempting read on write only file instance");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002964
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002965 for (total_read = 0, cur_offset = read_data; read_size > total_read;
2966 total_read += bytes_read, cur_offset += bytes_read) {
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002967 current_read_size = min_t(uint, read_size - total_read, rsize);
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002968 /*
2969 * For windows me and 9x we do not want to request more than it
2970 * negotiated since it will refuse the read then.
2971 */
2972 if ((tcon->ses) && !(tcon->ses->capabilities &
2973 tcon->ses->server->vals->cap_large_files)) {
Dan Carpenter7748dd62011-10-18 12:41:35 +03002974 current_read_size = min_t(uint, current_read_size,
Jeff Laytonc974bef2011-10-11 06:41:32 -04002975 CIFSMaxBufSize);
Steve Frenchf9f5c8172005-09-15 23:06:38 -07002976 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002977 rc = -EAGAIN;
2978 while (rc == -EAGAIN) {
Steve Frenchcdff08e2010-10-21 22:46:14 +00002979 if (open_file->invalidHandle) {
Jeff Layton15886172010-10-15 15:33:59 -04002980 rc = cifs_reopen_file(open_file, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002981 if (rc != 0)
2982 break;
2983 }
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002984 io_parms.pid = pid;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002985 io_parms.tcon = tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002986 io_parms.offset = *offset;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002987 io_parms.length = current_read_size;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002988 rc = server->ops->sync_read(xid, open_file, &io_parms,
2989 &bytes_read, &cur_offset,
2990 &buf_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002991 }
2992 if (rc || (bytes_read == 0)) {
2993 if (total_read) {
2994 break;
2995 } else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002996 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002997 return rc;
2998 }
2999 } else {
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003000 cifs_stats_bytes_read(tcon, total_read);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003001 *offset += bytes_read;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003002 }
3003 }
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003004 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003005 return total_read;
3006}
3007
Jeff Laytonca83ce32011-04-12 09:13:44 -04003008/*
3009 * If the page is mmap'ed into a process' page tables, then we need to make
3010 * sure that it doesn't change while being written back.
3011 */
3012static int
3013cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
3014{
3015 struct page *page = vmf->page;
3016
3017 lock_page(page);
3018 return VM_FAULT_LOCKED;
3019}
3020
3021static struct vm_operations_struct cifs_file_vm_ops = {
3022 .fault = filemap_fault,
3023 .page_mkwrite = cifs_page_mkwrite,
Konstantin Khlebnikov0b173bc2012-10-08 16:28:46 -07003024 .remap_pages = generic_file_remap_pages,
Jeff Laytonca83ce32011-04-12 09:13:44 -04003025};
3026
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003027int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
3028{
3029 int rc, xid;
3030 struct inode *inode = file->f_path.dentry->d_inode;
3031
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003032 xid = get_xid();
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003033
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04003034 if (!CIFS_I(inode)->clientCanCacheRead) {
3035 rc = cifs_invalidate_mapping(inode);
3036 if (rc)
3037 return rc;
3038 }
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003039
3040 rc = generic_file_mmap(file, vma);
Jeff Laytonca83ce32011-04-12 09:13:44 -04003041 if (rc == 0)
3042 vma->vm_ops = &cifs_file_vm_ops;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003043 free_xid(xid);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003044 return rc;
3045}
3046
Linus Torvalds1da177e2005-04-16 15:20:36 -07003047int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
3048{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003049 int rc, xid;
3050
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003051 xid = get_xid();
Jeff Laytonabab0952010-02-12 07:44:18 -05003052 rc = cifs_revalidate_file(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003053 if (rc) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00003054 cFYI(1, "Validation prior to mmap failed, error=%d", rc);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003055 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003056 return rc;
3057 }
3058 rc = generic_file_mmap(file, vma);
Jeff Laytonca83ce32011-04-12 09:13:44 -04003059 if (rc == 0)
3060 vma->vm_ops = &cifs_file_vm_ops;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003061 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003062 return rc;
3063}
3064
Jeff Layton0471ca32012-05-16 07:13:16 -04003065static void
3066cifs_readv_complete(struct work_struct *work)
3067{
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003068 unsigned int i;
Jeff Layton0471ca32012-05-16 07:13:16 -04003069 struct cifs_readdata *rdata = container_of(work,
3070 struct cifs_readdata, work);
Jeff Layton0471ca32012-05-16 07:13:16 -04003071
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003072 for (i = 0; i < rdata->nr_pages; i++) {
3073 struct page *page = rdata->pages[i];
3074
Jeff Layton0471ca32012-05-16 07:13:16 -04003075 lru_cache_add_file(page);
3076
3077 if (rdata->result == 0) {
Jeff Layton0471ca32012-05-16 07:13:16 -04003078 flush_dcache_page(page);
3079 SetPageUptodate(page);
3080 }
3081
3082 unlock_page(page);
3083
3084 if (rdata->result == 0)
3085 cifs_readpage_to_fscache(rdata->mapping->host, page);
3086
3087 page_cache_release(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003088 rdata->pages[i] = NULL;
Jeff Layton0471ca32012-05-16 07:13:16 -04003089 }
Jeff Layton6993f742012-05-16 07:13:17 -04003090 kref_put(&rdata->refcount, cifs_readdata_release);
Jeff Layton0471ca32012-05-16 07:13:16 -04003091}
3092
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003093static int
Jeff Layton8321fec2012-09-19 06:22:32 -07003094cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
3095 struct cifs_readdata *rdata, unsigned int len)
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003096{
Jeff Layton8321fec2012-09-19 06:22:32 -07003097 int total_read = 0, result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003098 unsigned int i;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003099 u64 eof;
3100 pgoff_t eof_index;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003101 unsigned int nr_pages = rdata->nr_pages;
Jeff Layton8321fec2012-09-19 06:22:32 -07003102 struct kvec iov;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003103
3104 /* determine the eof that the server (probably) has */
3105 eof = CIFS_I(rdata->mapping->host)->server_eof;
3106 eof_index = eof ? (eof - 1) >> PAGE_CACHE_SHIFT : 0;
3107 cFYI(1, "eof=%llu eof_index=%lu", eof, eof_index);
3108
Jeff Layton8321fec2012-09-19 06:22:32 -07003109 rdata->tailsz = PAGE_CACHE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003110 for (i = 0; i < nr_pages; i++) {
3111 struct page *page = rdata->pages[i];
3112
Jeff Layton8321fec2012-09-19 06:22:32 -07003113 if (len >= PAGE_CACHE_SIZE) {
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003114 /* enough data to fill the page */
Jeff Layton8321fec2012-09-19 06:22:32 -07003115 iov.iov_base = kmap(page);
3116 iov.iov_len = PAGE_CACHE_SIZE;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003117 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
Jeff Layton8321fec2012-09-19 06:22:32 -07003118 i, page->index, iov.iov_base, iov.iov_len);
3119 len -= PAGE_CACHE_SIZE;
3120 } else if (len > 0) {
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003121 /* enough for partial page, fill and zero the rest */
Jeff Layton8321fec2012-09-19 06:22:32 -07003122 iov.iov_base = kmap(page);
3123 iov.iov_len = len;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003124 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
Jeff Layton8321fec2012-09-19 06:22:32 -07003125 i, page->index, iov.iov_base, iov.iov_len);
3126 memset(iov.iov_base + len,
3127 '\0', PAGE_CACHE_SIZE - len);
3128 rdata->tailsz = len;
3129 len = 0;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003130 } else if (page->index > eof_index) {
3131 /*
3132 * The VFS will not try to do readahead past the
3133 * i_size, but it's possible that we have outstanding
3134 * writes with gaps in the middle and the i_size hasn't
3135 * caught up yet. Populate those with zeroed out pages
3136 * to prevent the VFS from repeatedly attempting to
3137 * fill them until the writes are flushed.
3138 */
3139 zero_user(page, 0, PAGE_CACHE_SIZE);
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003140 lru_cache_add_file(page);
3141 flush_dcache_page(page);
3142 SetPageUptodate(page);
3143 unlock_page(page);
3144 page_cache_release(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003145 rdata->pages[i] = NULL;
3146 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07003147 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003148 } else {
3149 /* no need to hold page hostage */
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003150 lru_cache_add_file(page);
3151 unlock_page(page);
3152 page_cache_release(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003153 rdata->pages[i] = NULL;
3154 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07003155 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003156 }
Jeff Layton8321fec2012-09-19 06:22:32 -07003157
3158 result = cifs_readv_from_socket(server, &iov, 1, iov.iov_len);
3159 kunmap(page);
3160 if (result < 0)
3161 break;
3162
3163 total_read += result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003164 }
3165
Jeff Layton8321fec2012-09-19 06:22:32 -07003166 return total_read > 0 ? total_read : result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003167}
3168
Linus Torvalds1da177e2005-04-16 15:20:36 -07003169static int cifs_readpages(struct file *file, struct address_space *mapping,
3170 struct list_head *page_list, unsigned num_pages)
3171{
Jeff Layton690c5e32011-10-19 15:30:16 -04003172 int rc;
3173 struct list_head tmplist;
3174 struct cifsFileInfo *open_file = file->private_data;
3175 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
3176 unsigned int rsize = cifs_sb->rsize;
3177 pid_t pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003178
Jeff Layton690c5e32011-10-19 15:30:16 -04003179 /*
3180 * Give up immediately if rsize is too small to read an entire page.
3181 * The VFS will fall back to readpage. We should never reach this
3182 * point however since we set ra_pages to 0 when the rsize is smaller
3183 * than a cache page.
3184 */
3185 if (unlikely(rsize < PAGE_CACHE_SIZE))
3186 return 0;
Steve Frenchbfa0d752005-08-31 21:50:37 -07003187
Suresh Jayaraman56698232010-07-05 18:13:25 +05303188 /*
3189 * Reads as many pages as possible from fscache. Returns -ENOBUFS
3190 * immediately if the cookie is negative
3191 */
3192 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
3193 &num_pages);
3194 if (rc == 0)
Jeff Layton690c5e32011-10-19 15:30:16 -04003195 return rc;
Suresh Jayaraman56698232010-07-05 18:13:25 +05303196
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003197 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3198 pid = open_file->pid;
3199 else
3200 pid = current->tgid;
3201
Jeff Layton690c5e32011-10-19 15:30:16 -04003202 rc = 0;
3203 INIT_LIST_HEAD(&tmplist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003204
Jeff Layton690c5e32011-10-19 15:30:16 -04003205 cFYI(1, "%s: file=%p mapping=%p num_pages=%u", __func__, file,
3206 mapping, num_pages);
3207
3208 /*
3209 * Start with the page at end of list and move it to private
3210 * list. Do the same with any following pages until we hit
3211 * the rsize limit, hit an index discontinuity, or run out of
3212 * pages. Issue the async read and then start the loop again
3213 * until the list is empty.
3214 *
3215 * Note that list order is important. The page_list is in
3216 * the order of declining indexes. When we put the pages in
3217 * the rdata->pages, then we want them in increasing order.
3218 */
3219 while (!list_empty(page_list)) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003220 unsigned int i;
Jeff Layton690c5e32011-10-19 15:30:16 -04003221 unsigned int bytes = PAGE_CACHE_SIZE;
3222 unsigned int expected_index;
3223 unsigned int nr_pages = 1;
3224 loff_t offset;
3225 struct page *page, *tpage;
3226 struct cifs_readdata *rdata;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003227
3228 page = list_entry(page_list->prev, struct page, lru);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003229
Jeff Layton690c5e32011-10-19 15:30:16 -04003230 /*
3231 * Lock the page and put it in the cache. Since no one else
3232 * should have access to this page, we're safe to simply set
3233 * PG_locked without checking it first.
3234 */
3235 __set_page_locked(page);
3236 rc = add_to_page_cache_locked(page, mapping,
3237 page->index, GFP_KERNEL);
3238
3239 /* give up if we can't stick it in the cache */
3240 if (rc) {
3241 __clear_page_locked(page);
3242 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003243 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003244
Jeff Layton690c5e32011-10-19 15:30:16 -04003245 /* move first page to the tmplist */
3246 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
3247 list_move_tail(&page->lru, &tmplist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003248
Jeff Layton690c5e32011-10-19 15:30:16 -04003249 /* now try and add more pages onto the request */
3250 expected_index = page->index + 1;
3251 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
3252 /* discontinuity ? */
3253 if (page->index != expected_index)
3254 break;
3255
3256 /* would this page push the read over the rsize? */
3257 if (bytes + PAGE_CACHE_SIZE > rsize)
3258 break;
3259
3260 __set_page_locked(page);
3261 if (add_to_page_cache_locked(page, mapping,
3262 page->index, GFP_KERNEL)) {
3263 __clear_page_locked(page);
3264 break;
3265 }
3266 list_move_tail(&page->lru, &tmplist);
3267 bytes += PAGE_CACHE_SIZE;
3268 expected_index++;
3269 nr_pages++;
3270 }
3271
Jeff Layton0471ca32012-05-16 07:13:16 -04003272 rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
Jeff Layton690c5e32011-10-19 15:30:16 -04003273 if (!rdata) {
3274 /* best to give up if we're out of mem */
3275 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3276 list_del(&page->lru);
3277 lru_cache_add_file(page);
3278 unlock_page(page);
3279 page_cache_release(page);
3280 }
3281 rc = -ENOMEM;
3282 break;
3283 }
3284
Jeff Layton6993f742012-05-16 07:13:17 -04003285 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Layton690c5e32011-10-19 15:30:16 -04003286 rdata->mapping = mapping;
3287 rdata->offset = offset;
3288 rdata->bytes = bytes;
3289 rdata->pid = pid;
Jeff Layton8321fec2012-09-19 06:22:32 -07003290 rdata->pagesz = PAGE_CACHE_SIZE;
3291 rdata->read_into_pages = cifs_readpages_read_into_pages;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003292
3293 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3294 list_del(&page->lru);
3295 rdata->pages[rdata->nr_pages++] = page;
3296 }
Jeff Layton690c5e32011-10-19 15:30:16 -04003297
Jeff Layton2a1bb132012-05-16 07:13:17 -04003298 rc = cifs_retry_async_readv(rdata);
Jeff Layton690c5e32011-10-19 15:30:16 -04003299 if (rc != 0) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003300 for (i = 0; i < rdata->nr_pages; i++) {
3301 page = rdata->pages[i];
Jeff Layton690c5e32011-10-19 15:30:16 -04003302 lru_cache_add_file(page);
3303 unlock_page(page);
3304 page_cache_release(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003305 }
Jeff Layton6993f742012-05-16 07:13:17 -04003306 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003307 break;
3308 }
Jeff Layton6993f742012-05-16 07:13:17 -04003309
3310 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003311 }
3312
Linus Torvalds1da177e2005-04-16 15:20:36 -07003313 return rc;
3314}
3315
3316static int cifs_readpage_worker(struct file *file, struct page *page,
3317 loff_t *poffset)
3318{
3319 char *read_data;
3320 int rc;
3321
Suresh Jayaraman56698232010-07-05 18:13:25 +05303322 /* Is the page cached? */
3323 rc = cifs_readpage_from_fscache(file->f_path.dentry->d_inode, page);
3324 if (rc == 0)
3325 goto read_complete;
3326
Linus Torvalds1da177e2005-04-16 15:20:36 -07003327 page_cache_get(page);
3328 read_data = kmap(page);
3329 /* for reads over a certain size could initiate async read ahead */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003330
Linus Torvalds1da177e2005-04-16 15:20:36 -07003331 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003332
Linus Torvalds1da177e2005-04-16 15:20:36 -07003333 if (rc < 0)
3334 goto io_error;
3335 else
Joe Perchesb6b38f72010-04-21 03:50:45 +00003336 cFYI(1, "Bytes read %d", rc);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003337
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08003338 file->f_path.dentry->d_inode->i_atime =
3339 current_fs_time(file->f_path.dentry->d_inode->i_sb);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003340
Linus Torvalds1da177e2005-04-16 15:20:36 -07003341 if (PAGE_CACHE_SIZE > rc)
3342 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
3343
3344 flush_dcache_page(page);
3345 SetPageUptodate(page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05303346
3347 /* send this page to the cache */
3348 cifs_readpage_to_fscache(file->f_path.dentry->d_inode, page);
3349
Linus Torvalds1da177e2005-04-16 15:20:36 -07003350 rc = 0;
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003351
Linus Torvalds1da177e2005-04-16 15:20:36 -07003352io_error:
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003353 kunmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003354 page_cache_release(page);
Suresh Jayaraman56698232010-07-05 18:13:25 +05303355
3356read_complete:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003357 return rc;
3358}
3359
3360static int cifs_readpage(struct file *file, struct page *page)
3361{
3362 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
3363 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003364 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003365
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003366 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003367
3368 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303369 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003370 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303371 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003372 }
3373
Jeff Laytonac3aa2f2012-07-23 13:14:28 -04003374 cFYI(1, "readpage %p at offset %d 0x%x",
Joe Perchesb6b38f72010-04-21 03:50:45 +00003375 page, (int)offset, (int)offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003376
3377 rc = cifs_readpage_worker(file, page, &offset);
3378
3379 unlock_page(page);
3380
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003381 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003382 return rc;
3383}
3384
Steve Frencha403a0a2007-07-26 15:54:16 +00003385static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3386{
3387 struct cifsFileInfo *open_file;
3388
Jeff Layton44772882010-10-15 15:34:03 -04003389 spin_lock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003390 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton2e396b82010-10-15 15:34:01 -04003391 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Jeff Layton44772882010-10-15 15:34:03 -04003392 spin_unlock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003393 return 1;
3394 }
3395 }
Jeff Layton44772882010-10-15 15:34:03 -04003396 spin_unlock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003397 return 0;
3398}
3399
Linus Torvalds1da177e2005-04-16 15:20:36 -07003400/* We do not want to update the file size from server for inodes
3401 open for write - to avoid races with writepage extending
3402 the file - in the future we could consider allowing
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003403 refreshing the inode only on increases in the file size
Linus Torvalds1da177e2005-04-16 15:20:36 -07003404 but this is tricky to do without racing with writebehind
3405 page caching in the current Linux kernel design */
Steve French4b18f2a2008-04-29 00:06:05 +00003406bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003407{
Steve Frencha403a0a2007-07-26 15:54:16 +00003408 if (!cifsInode)
Steve French4b18f2a2008-04-29 00:06:05 +00003409 return true;
Steve French23e7dd72005-10-20 13:44:56 -07003410
Steve Frencha403a0a2007-07-26 15:54:16 +00003411 if (is_inode_writable(cifsInode)) {
3412 /* This inode is open for write at least once */
Steve Frenchc32a0b62006-01-12 14:41:28 -08003413 struct cifs_sb_info *cifs_sb;
3414
Steve Frenchc32a0b62006-01-12 14:41:28 -08003415 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
Steve Frenchad7a2922008-02-07 23:25:02 +00003416 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003417 /* since no page cache to corrupt on directio
Steve Frenchc32a0b62006-01-12 14:41:28 -08003418 we can change size safely */
Steve French4b18f2a2008-04-29 00:06:05 +00003419 return true;
Steve Frenchc32a0b62006-01-12 14:41:28 -08003420 }
3421
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003422 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
Steve French4b18f2a2008-04-29 00:06:05 +00003423 return true;
Steve French7ba52632007-02-08 18:14:13 +00003424
Steve French4b18f2a2008-04-29 00:06:05 +00003425 return false;
Steve French23e7dd72005-10-20 13:44:56 -07003426 } else
Steve French4b18f2a2008-04-29 00:06:05 +00003427 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003428}
3429
Nick Piggind9414772008-09-24 11:32:59 -04003430static int cifs_write_begin(struct file *file, struct address_space *mapping,
3431 loff_t pos, unsigned len, unsigned flags,
3432 struct page **pagep, void **fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003433{
Nick Piggind9414772008-09-24 11:32:59 -04003434 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
3435 loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003436 loff_t page_start = pos & PAGE_MASK;
3437 loff_t i_size;
3438 struct page *page;
3439 int rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003440
Joe Perchesb6b38f72010-04-21 03:50:45 +00003441 cFYI(1, "write_begin from %lld len %d", (long long)pos, len);
Nick Piggind9414772008-09-24 11:32:59 -04003442
Nick Piggin54566b22009-01-04 12:00:53 -08003443 page = grab_cache_page_write_begin(mapping, index, flags);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003444 if (!page) {
3445 rc = -ENOMEM;
3446 goto out;
3447 }
Nick Piggind9414772008-09-24 11:32:59 -04003448
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003449 if (PageUptodate(page))
3450 goto out;
Steve French8a236262007-03-06 00:31:00 +00003451
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003452 /*
3453 * If we write a full page it will be up to date, no need to read from
3454 * the server. If the write is short, we'll end up doing a sync write
3455 * instead.
3456 */
3457 if (len == PAGE_CACHE_SIZE)
3458 goto out;
3459
3460 /*
3461 * optimize away the read when we have an oplock, and we're not
3462 * expecting to use any of the data we'd be reading in. That
3463 * is, when the page lies beyond the EOF, or straddles the EOF
3464 * and the write will cover all of the existing data.
3465 */
3466 if (CIFS_I(mapping->host)->clientCanCacheRead) {
3467 i_size = i_size_read(mapping->host);
3468 if (page_start >= i_size ||
3469 (offset == 0 && (pos + len) >= i_size)) {
3470 zero_user_segments(page, 0, offset,
3471 offset + len,
3472 PAGE_CACHE_SIZE);
3473 /*
3474 * PageChecked means that the parts of the page
3475 * to which we're not writing are considered up
3476 * to date. Once the data is copied to the
3477 * page, it can be set uptodate.
3478 */
3479 SetPageChecked(page);
3480 goto out;
3481 }
3482 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003483
Nick Piggind9414772008-09-24 11:32:59 -04003484 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003485 /*
3486 * might as well read a page, it is fast enough. If we get
3487 * an error, we don't need to return it. cifs_write_end will
3488 * do a sync write instead since PG_uptodate isn't set.
3489 */
3490 cifs_readpage_worker(file, page, &page_start);
Steve French8a236262007-03-06 00:31:00 +00003491 } else {
3492 /* we could try using another file handle if there is one -
3493 but how would we lock it to prevent close of that handle
3494 racing with this read? In any case
Nick Piggind9414772008-09-24 11:32:59 -04003495 this will be written out by write_end so is fine */
Steve French8a236262007-03-06 00:31:00 +00003496 }
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003497out:
3498 *pagep = page;
3499 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003500}
3501
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303502static int cifs_release_page(struct page *page, gfp_t gfp)
3503{
3504 if (PagePrivate(page))
3505 return 0;
3506
3507 return cifs_fscache_release_page(page, gfp);
3508}
3509
3510static void cifs_invalidate_page(struct page *page, unsigned long offset)
3511{
3512 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
3513
3514 if (offset == 0)
3515 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
3516}
3517
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003518static int cifs_launder_page(struct page *page)
3519{
3520 int rc = 0;
3521 loff_t range_start = page_offset(page);
3522 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
3523 struct writeback_control wbc = {
3524 .sync_mode = WB_SYNC_ALL,
3525 .nr_to_write = 0,
3526 .range_start = range_start,
3527 .range_end = range_end,
3528 };
3529
3530 cFYI(1, "Launder page: %p", page);
3531
3532 if (clear_page_dirty_for_io(page))
3533 rc = cifs_writepage_locked(page, &wbc);
3534
3535 cifs_fscache_invalidate_page(page, page->mapping->host);
3536 return rc;
3537}
3538
Tejun Heo9b646972010-07-20 22:09:02 +02003539void cifs_oplock_break(struct work_struct *work)
Jeff Layton3bc303c2009-09-21 06:47:50 -04003540{
3541 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3542 oplock_break);
Jeff Laytona5e18bc2010-10-11 15:07:18 -04003543 struct inode *inode = cfile->dentry->d_inode;
Jeff Layton3bc303c2009-09-21 06:47:50 -04003544 struct cifsInodeInfo *cinode = CIFS_I(inode);
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07003545 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Jeff Laytoneb4b7562010-10-22 14:52:29 -04003546 int rc = 0;
Jeff Layton3bc303c2009-09-21 06:47:50 -04003547
3548 if (inode && S_ISREG(inode->i_mode)) {
Steve Frenchd54ff732010-04-27 04:38:15 +00003549 if (cinode->clientCanCacheRead)
Al Viro8737c932009-12-24 06:47:55 -05003550 break_lease(inode, O_RDONLY);
Steve Frenchd54ff732010-04-27 04:38:15 +00003551 else
Al Viro8737c932009-12-24 06:47:55 -05003552 break_lease(inode, O_WRONLY);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003553 rc = filemap_fdatawrite(inode->i_mapping);
3554 if (cinode->clientCanCacheRead == 0) {
Jeff Laytoneb4b7562010-10-22 14:52:29 -04003555 rc = filemap_fdatawait(inode->i_mapping);
3556 mapping_set_error(inode->i_mapping, rc);
Pavel Shilovsky03eca702012-12-06 21:24:33 +04003557 cifs_invalidate_mapping(inode);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003558 }
Joe Perchesb6b38f72010-04-21 03:50:45 +00003559 cFYI(1, "Oplock flush inode %p rc %d", inode, rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003560 }
3561
Pavel Shilovsky85160e02011-10-22 15:33:29 +04003562 rc = cifs_push_locks(cfile);
3563 if (rc)
3564 cERROR(1, "Push locks rc = %d", rc);
3565
Jeff Layton3bc303c2009-09-21 06:47:50 -04003566 /*
3567 * releasing stale oplock after recent reconnect of smb session using
3568 * a now incorrect file handle is not a data integrity issue but do
3569 * not bother sending an oplock release if session to server still is
3570 * disconnected since oplock already released by the server
3571 */
Steve Frenchcdff08e2010-10-21 22:46:14 +00003572 if (!cfile->oplock_break_cancelled) {
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07003573 rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
3574 cinode);
Joe Perchesb6b38f72010-04-21 03:50:45 +00003575 cFYI(1, "Oplock release rc = %d", rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003576 }
Jeff Layton3bc303c2009-09-21 06:47:50 -04003577}
3578
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07003579const struct address_space_operations cifs_addr_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003580 .readpage = cifs_readpage,
3581 .readpages = cifs_readpages,
3582 .writepage = cifs_writepage,
Steve French37c0eb42005-10-05 14:50:29 -07003583 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04003584 .write_begin = cifs_write_begin,
3585 .write_end = cifs_write_end,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003586 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303587 .releasepage = cifs_release_page,
3588 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003589 .launder_page = cifs_launder_page,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003590};
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003591
3592/*
3593 * cifs_readpages requires the server to support a buffer large enough to
3594 * contain the header plus one complete page of data. Otherwise, we need
3595 * to leave cifs_readpages out of the address space operations.
3596 */
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07003597const struct address_space_operations cifs_addr_ops_smallbuf = {
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003598 .readpage = cifs_readpage,
3599 .writepage = cifs_writepage,
3600 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04003601 .write_begin = cifs_write_begin,
3602 .write_end = cifs_write_end,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003603 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303604 .releasepage = cifs_release_page,
3605 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003606 .launder_page = cifs_launder_page,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003607};