blob: 09d83275c20ba00a70fb7e0c96f7daf6f01b8900 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
Steve Frenchfb8c4b12007-07-10 01:16:18 +00005 *
Steve Frenchf19159d2010-04-21 04:12:10 +00006 * Copyright (C) International Business Machines Corp., 2002,2010
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Author(s): Steve French (sfrench@us.ibm.com)
Jeremy Allison7ee1af72006-08-02 21:56:33 +00008 * Jeremy Allison (jra@samba.org)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
Steve French37c0eb42005-10-05 14:50:29 -070025#include <linux/backing-dev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
Steve French37c0eb42005-10-05 14:50:29 -070030#include <linux/writeback.h>
Andrew Morton6f88cc22006-12-10 02:19:44 -080031#include <linux/task_io_accounting_ops.h>
Steve French23e7dd72005-10-20 13:44:56 -070032#include <linux/delay.h>
Jeff Layton3bc303c2009-09-21 06:47:50 -040033#include <linux/mount.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Jeff Layton690c5e32011-10-19 15:30:16 -040035#include <linux/swap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <asm/div64.h>
37#include "cifsfs.h"
38#include "cifspdu.h"
39#include "cifsglob.h"
40#include "cifsproto.h"
41#include "cifs_unicode.h"
42#include "cifs_debug.h"
43#include "cifs_fs_sb.h"
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +053044#include "fscache.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Steve French07b92d02013-02-18 10:34:26 -060046
Linus Torvalds1da177e2005-04-16 15:20:36 -070047static inline int cifs_convert_flags(unsigned int flags)
48{
49 if ((flags & O_ACCMODE) == O_RDONLY)
50 return GENERIC_READ;
51 else if ((flags & O_ACCMODE) == O_WRONLY)
52 return GENERIC_WRITE;
53 else if ((flags & O_ACCMODE) == O_RDWR) {
54 /* GENERIC_ALL is too much permission to request
55 can cause unnecessary access denied on create */
56 /* return GENERIC_ALL; */
57 return (GENERIC_READ | GENERIC_WRITE);
58 }
59
Jeff Laytone10f7b52008-05-14 10:21:33 -070060 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
61 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
62 FILE_READ_DATA);
Steve French7fc8f4e2009-02-23 20:43:11 +000063}
Jeff Laytone10f7b52008-05-14 10:21:33 -070064
Jeff Layton608712f2010-10-15 15:33:56 -040065static u32 cifs_posix_convert_flags(unsigned int flags)
Steve French7fc8f4e2009-02-23 20:43:11 +000066{
Jeff Layton608712f2010-10-15 15:33:56 -040067 u32 posix_flags = 0;
Jeff Laytone10f7b52008-05-14 10:21:33 -070068
Steve French7fc8f4e2009-02-23 20:43:11 +000069 if ((flags & O_ACCMODE) == O_RDONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040070 posix_flags = SMB_O_RDONLY;
Steve French7fc8f4e2009-02-23 20:43:11 +000071 else if ((flags & O_ACCMODE) == O_WRONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040072 posix_flags = SMB_O_WRONLY;
73 else if ((flags & O_ACCMODE) == O_RDWR)
74 posix_flags = SMB_O_RDWR;
75
Steve French07b92d02013-02-18 10:34:26 -060076 if (flags & O_CREAT) {
Jeff Layton608712f2010-10-15 15:33:56 -040077 posix_flags |= SMB_O_CREAT;
Steve French07b92d02013-02-18 10:34:26 -060078 if (flags & O_EXCL)
79 posix_flags |= SMB_O_EXCL;
80 } else if (flags & O_EXCL)
Joe Perchesf96637b2013-05-04 22:12:25 -050081 cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
82 current->comm, current->tgid);
Steve French07b92d02013-02-18 10:34:26 -060083
Jeff Layton608712f2010-10-15 15:33:56 -040084 if (flags & O_TRUNC)
85 posix_flags |= SMB_O_TRUNC;
86 /* be safe and imply O_SYNC for O_DSYNC */
Christoph Hellwig6b2f3d12009-10-27 11:05:28 +010087 if (flags & O_DSYNC)
Jeff Layton608712f2010-10-15 15:33:56 -040088 posix_flags |= SMB_O_SYNC;
Steve French7fc8f4e2009-02-23 20:43:11 +000089 if (flags & O_DIRECTORY)
Jeff Layton608712f2010-10-15 15:33:56 -040090 posix_flags |= SMB_O_DIRECTORY;
Steve French7fc8f4e2009-02-23 20:43:11 +000091 if (flags & O_NOFOLLOW)
Jeff Layton608712f2010-10-15 15:33:56 -040092 posix_flags |= SMB_O_NOFOLLOW;
Steve French7fc8f4e2009-02-23 20:43:11 +000093 if (flags & O_DIRECT)
Jeff Layton608712f2010-10-15 15:33:56 -040094 posix_flags |= SMB_O_DIRECT;
Steve French7fc8f4e2009-02-23 20:43:11 +000095
96 return posix_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070097}
98
99static inline int cifs_get_disposition(unsigned int flags)
100{
101 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
102 return FILE_CREATE;
103 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
104 return FILE_OVERWRITE_IF;
105 else if ((flags & O_CREAT) == O_CREAT)
106 return FILE_OPEN_IF;
Steve French55aa2e02006-05-30 18:09:31 +0000107 else if ((flags & O_TRUNC) == O_TRUNC)
108 return FILE_OVERWRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109 else
110 return FILE_OPEN;
111}
112
Jeff Layton608712f2010-10-15 15:33:56 -0400113int cifs_posix_open(char *full_path, struct inode **pinode,
114 struct super_block *sb, int mode, unsigned int f_flags,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400115 __u32 *poplock, __u16 *pnetfid, unsigned int xid)
Jeff Layton608712f2010-10-15 15:33:56 -0400116{
117 int rc;
118 FILE_UNIX_BASIC_INFO *presp_data;
119 __u32 posix_flags = 0;
120 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
121 struct cifs_fattr fattr;
122 struct tcon_link *tlink;
Steve French96daf2b2011-05-27 04:34:02 +0000123 struct cifs_tcon *tcon;
Jeff Layton608712f2010-10-15 15:33:56 -0400124
Joe Perchesf96637b2013-05-04 22:12:25 -0500125 cifs_dbg(FYI, "posix open %s\n", full_path);
Jeff Layton608712f2010-10-15 15:33:56 -0400126
127 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
128 if (presp_data == NULL)
129 return -ENOMEM;
130
131 tlink = cifs_sb_tlink(cifs_sb);
132 if (IS_ERR(tlink)) {
133 rc = PTR_ERR(tlink);
134 goto posix_open_ret;
135 }
136
137 tcon = tlink_tcon(tlink);
138 mode &= ~current_umask();
139
140 posix_flags = cifs_posix_convert_flags(f_flags);
141 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
142 poplock, full_path, cifs_sb->local_nls,
Nakajima Akirabc8ebdc42015-02-13 15:35:58 +0900143 cifs_remap(cifs_sb));
Jeff Layton608712f2010-10-15 15:33:56 -0400144 cifs_put_tlink(tlink);
145
146 if (rc)
147 goto posix_open_ret;
148
149 if (presp_data->Type == cpu_to_le32(-1))
150 goto posix_open_ret; /* open ok, caller does qpathinfo */
151
152 if (!pinode)
153 goto posix_open_ret; /* caller does not need info */
154
155 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
156
157 /* get new inode and set it up */
158 if (*pinode == NULL) {
159 cifs_fill_uniqueid(sb, &fattr);
160 *pinode = cifs_iget(sb, &fattr);
161 if (!*pinode) {
162 rc = -ENOMEM;
163 goto posix_open_ret;
164 }
165 } else {
166 cifs_fattr_to_inode(*pinode, &fattr);
167 }
168
169posix_open_ret:
170 kfree(presp_data);
171 return rc;
172}
173
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300174static int
175cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700176 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
177 struct cifs_fid *fid, unsigned int xid)
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300178{
179 int rc;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700180 int desired_access;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300181 int disposition;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500182 int create_options = CREATE_NOT_DIR;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300183 FILE_ALL_INFO *buf;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700184 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400185 struct cifs_open_parms oparms;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300186
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700187 if (!server->ops->open)
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700188 return -ENOSYS;
189
190 desired_access = cifs_convert_flags(f_flags);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300191
192/*********************************************************************
193 * open flag mapping table:
194 *
195 * POSIX Flag CIFS Disposition
196 * ---------- ----------------
197 * O_CREAT FILE_OPEN_IF
198 * O_CREAT | O_EXCL FILE_CREATE
199 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
200 * O_TRUNC FILE_OVERWRITE
201 * none of the above FILE_OPEN
202 *
203 * Note that there is not a direct match between disposition
204 * FILE_SUPERSEDE (ie create whether or not file exists although
205 * O_CREAT | O_TRUNC is similar but truncates the existing
206 * file rather than creating a new file as FILE_SUPERSEDE does
207 * (which uses the attributes / metadata passed in on open call)
208 *?
209 *? O_SYNC is a reasonable match to CIFS writethrough flag
210 *? and the read write flags match reasonably. O_LARGEFILE
211 *? is irrelevant because largefile support is always used
212 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
213 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
214 *********************************************************************/
215
216 disposition = cifs_get_disposition(f_flags);
217
218 /* BB pass O_SYNC flag through on file attributes .. BB */
219
220 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
221 if (!buf)
222 return -ENOMEM;
223
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500224 if (backup_cred(cifs_sb))
225 create_options |= CREATE_OPEN_BACKUP_INTENT;
226
Steve French18a89a12017-09-22 01:40:27 -0500227 /* O_SYNC also has bit for O_DSYNC so following check picks up either */
228 if (f_flags & O_SYNC)
229 create_options |= CREATE_WRITE_THROUGH;
230
231 if (f_flags & O_DIRECT)
232 create_options |= CREATE_NO_BUFFER;
233
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400234 oparms.tcon = tcon;
235 oparms.cifs_sb = cifs_sb;
236 oparms.desired_access = desired_access;
237 oparms.create_options = create_options;
238 oparms.disposition = disposition;
239 oparms.path = full_path;
240 oparms.fid = fid;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400241 oparms.reconnect = false;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400242
243 rc = server->ops->open(xid, &oparms, oplock, buf);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300244
245 if (rc)
246 goto out;
247
248 if (tcon->unix_ext)
249 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
250 xid);
251 else
252 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
Steve French42eacf92014-02-10 14:08:16 -0600253 xid, fid);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300254
Pavel Shilovsky3a539d92019-09-30 10:06:18 -0700255 if (rc) {
256 server->ops->close(xid, tcon, fid);
257 if (rc == -ESTALE)
258 rc = -EOPENSTALE;
259 }
260
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300261out:
262 kfree(buf);
263 return rc;
264}
265
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400266static bool
267cifs_has_mand_locks(struct cifsInodeInfo *cinode)
268{
269 struct cifs_fid_locks *cur;
270 bool has_locks = false;
271
272 down_read(&cinode->lock_sem);
273 list_for_each_entry(cur, &cinode->llist, llist) {
274 if (!list_empty(&cur->locks)) {
275 has_locks = true;
276 break;
277 }
278 }
279 up_read(&cinode->lock_sem);
280 return has_locks;
281}
282
Dave Wysochanskifa25e112019-10-23 05:02:33 -0400283void
284cifs_down_write(struct rw_semaphore *sem)
285{
286 while (!down_write_trylock(sem))
287 msleep(10);
288}
289
Jeff Layton15ecb432010-10-15 15:34:02 -0400290struct cifsFileInfo *
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700291cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
Jeff Layton15ecb432010-10-15 15:34:02 -0400292 struct tcon_link *tlink, __u32 oplock)
293{
Goldwyn Rodrigues1f1735c2016-04-18 06:41:52 -0500294 struct dentry *dentry = file_dentry(file);
David Howells2b0143b2015-03-17 22:25:59 +0000295 struct inode *inode = d_inode(dentry);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700296 struct cifsInodeInfo *cinode = CIFS_I(inode);
297 struct cifsFileInfo *cfile;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700298 struct cifs_fid_locks *fdlocks;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700299 struct cifs_tcon *tcon = tlink_tcon(tlink);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400300 struct TCP_Server_Info *server = tcon->ses->server;
Jeff Layton15ecb432010-10-15 15:34:02 -0400301
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700302 cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
303 if (cfile == NULL)
304 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400305
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700306 fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
307 if (!fdlocks) {
308 kfree(cfile);
309 return NULL;
310 }
311
312 INIT_LIST_HEAD(&fdlocks->locks);
313 fdlocks->cfile = cfile;
314 cfile->llist = fdlocks;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700315
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700316 cfile->count = 1;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700317 cfile->pid = current->tgid;
318 cfile->uid = current_fsuid();
319 cfile->dentry = dget(dentry);
320 cfile->f_flags = file->f_flags;
321 cfile->invalidHandle = false;
322 cfile->tlink = cifs_get_tlink(tlink);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700323 INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700324 mutex_init(&cfile->fh_mutex);
Steve French3afca262016-09-22 18:58:16 -0500325 spin_lock_init(&cfile->file_info_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400326
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100327 cifs_sb_active(inode->i_sb);
328
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400329 /*
330 * If the server returned a read oplock and we have mandatory brlocks,
331 * set oplock level to None.
332 */
Pavel Shilovsky53ef1012013-09-05 16:11:28 +0400333 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500334 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400335 oplock = 0;
336 }
337
Pavel Shilovsky0c474332019-11-27 16:18:39 -0800338 cifs_down_write(&cinode->lock_sem);
339 list_add(&fdlocks->llist, &cinode->llist);
340 up_write(&cinode->lock_sem);
341
Steve French3afca262016-09-22 18:58:16 -0500342 spin_lock(&tcon->open_file_lock);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400343 if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700344 oplock = fid->pending_open->oplock;
345 list_del(&fid->pending_open->olist);
346
Pavel Shilovsky42873b02013-09-05 21:30:16 +0400347 fid->purge_cache = false;
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400348 server->ops->set_fid(cfile, fid, oplock);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700349
350 list_add(&cfile->tlist, &tcon->openFileList);
Steve French3afca262016-09-22 18:58:16 -0500351
Jeff Layton15ecb432010-10-15 15:34:02 -0400352 /* if readable file instance put first in list*/
353 if (file->f_mode & FMODE_READ)
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700354 list_add(&cfile->flist, &cinode->openFileList);
Jeff Layton15ecb432010-10-15 15:34:02 -0400355 else
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700356 list_add_tail(&cfile->flist, &cinode->openFileList);
Steve French3afca262016-09-22 18:58:16 -0500357 spin_unlock(&tcon->open_file_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400358
Pavel Shilovsky42873b02013-09-05 21:30:16 +0400359 if (fid->purge_cache)
Jeff Layton4f73c7d2014-04-30 09:31:47 -0400360 cifs_zap_mapping(inode);
Pavel Shilovsky42873b02013-09-05 21:30:16 +0400361
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700362 file->private_data = cfile;
363 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400364}
365
Jeff Layton764a1b12012-07-25 14:59:54 -0400366struct cifsFileInfo *
367cifsFileInfo_get(struct cifsFileInfo *cifs_file)
368{
Steve French3afca262016-09-22 18:58:16 -0500369 spin_lock(&cifs_file->file_info_lock);
Jeff Layton764a1b12012-07-25 14:59:54 -0400370 cifsFileInfo_get_locked(cifs_file);
Steve French3afca262016-09-22 18:58:16 -0500371 spin_unlock(&cifs_file->file_info_lock);
Jeff Layton764a1b12012-07-25 14:59:54 -0400372 return cifs_file;
373}
374
Aurelien Aptel1ee4f2d2019-03-29 10:49:12 +0100375/**
376 * cifsFileInfo_put - release a reference of file priv data
377 *
378 * Always potentially wait for oplock handler. See _cifsFileInfo_put().
Steve Frenchcdff08e2010-10-21 22:46:14 +0000379 */
Jeff Laytonb33879a2010-10-15 15:34:04 -0400380void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
381{
Aurelien Aptel1ee4f2d2019-03-29 10:49:12 +0100382 _cifsFileInfo_put(cifs_file, true);
383}
384
385/**
386 * _cifsFileInfo_put - release a reference of file priv data
387 *
388 * This may involve closing the filehandle @cifs_file out on the
389 * server. Must be called without holding tcon->open_file_lock and
390 * cifs_file->file_info_lock.
391 *
392 * If @wait_for_oplock_handler is true and we are releasing the last
393 * reference, wait for any running oplock break handler of the file
394 * and cancel any pending one. If calling this function from the
395 * oplock break handler, you need to pass false.
396 *
397 */
398void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler)
399{
David Howells2b0143b2015-03-17 22:25:59 +0000400 struct inode *inode = d_inode(cifs_file->dentry);
Steve French96daf2b2011-05-27 04:34:02 +0000401 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700402 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300403 struct cifsInodeInfo *cifsi = CIFS_I(inode);
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100404 struct super_block *sb = inode->i_sb;
405 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000406 struct cifsLockInfo *li, *tmp;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700407 struct cifs_fid fid;
408 struct cifs_pending_open open;
Sachin Prabhuca7df8e2015-01-15 12:22:04 +0000409 bool oplock_break_cancelled;
Steve Frenchcdff08e2010-10-21 22:46:14 +0000410
Steve French3afca262016-09-22 18:58:16 -0500411 spin_lock(&tcon->open_file_lock);
412
413 spin_lock(&cifs_file->file_info_lock);
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400414 if (--cifs_file->count > 0) {
Steve French3afca262016-09-22 18:58:16 -0500415 spin_unlock(&cifs_file->file_info_lock);
416 spin_unlock(&tcon->open_file_lock);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000417 return;
Jeff Laytonb33879a2010-10-15 15:34:04 -0400418 }
Steve French3afca262016-09-22 18:58:16 -0500419 spin_unlock(&cifs_file->file_info_lock);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000420
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700421 if (server->ops->get_lease_key)
422 server->ops->get_lease_key(inode, &fid);
423
424 /* store open in pending opens to make sure we don't miss lease break */
425 cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
426
Steve Frenchcdff08e2010-10-21 22:46:14 +0000427 /* remove it from the lists */
428 list_del(&cifs_file->flist);
429 list_del(&cifs_file->tlist);
430
431 if (list_empty(&cifsi->openFileList)) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500432 cifs_dbg(FYI, "closing last open instance for inode %p\n",
David Howells2b0143b2015-03-17 22:25:59 +0000433 d_inode(cifs_file->dentry));
Pavel Shilovsky25364132012-09-18 16:20:27 -0700434 /*
435 * In strict cache mode we need invalidate mapping on the last
436 * close because it may cause a error when we open this file
437 * again and get at least level II oplock.
438 */
Pavel Shilovsky4f8ba8a2010-11-21 22:36:12 +0300439 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
Jeff Laytonaff8d5c2014-04-30 09:31:45 -0400440 set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
Pavel Shilovskyc6723622010-11-03 10:58:57 +0300441 cifs_set_oplock_level(cifsi, 0);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000442 }
Steve French3afca262016-09-22 18:58:16 -0500443
444 spin_unlock(&tcon->open_file_lock);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000445
Aurelien Aptel1ee4f2d2019-03-29 10:49:12 +0100446 oplock_break_cancelled = wait_oplock_handler ?
447 cancel_work_sync(&cifs_file->oplock_break) : false;
Jeff Laytonad635942011-07-26 12:20:17 -0400448
Steve Frenchcdff08e2010-10-21 22:46:14 +0000449 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700450 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400451 unsigned int xid;
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700452
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400453 xid = get_xid();
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700454 if (server->ops->close)
Pavel Shilovsky760ad0c2012-09-25 11:00:07 +0400455 server->ops->close(xid, tcon, &cifs_file->fid);
456 _free_xid(xid);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000457 }
458
Sachin Prabhuca7df8e2015-01-15 12:22:04 +0000459 if (oplock_break_cancelled)
460 cifs_done_oplock_break(cifsi);
461
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700462 cifs_del_pending_open(&open);
463
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700464 /*
465 * Delete any outstanding lock records. We'll lose them when the file
Steve Frenchcdff08e2010-10-21 22:46:14 +0000466 * is closed anyway.
467 */
Dave Wysochanskifa25e112019-10-23 05:02:33 -0400468 cifs_down_write(&cifsi->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700469 list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000470 list_del(&li->llist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400471 cifs_del_lock_waiters(li);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000472 kfree(li);
473 }
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700474 list_del(&cifs_file->llist->llist);
475 kfree(cifs_file->llist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700476 up_write(&cifsi->lock_sem);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000477
478 cifs_put_tlink(cifs_file->tlink);
479 dput(cifs_file->dentry);
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100480 cifs_sb_deactive(sb);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000481 kfree(cifs_file);
Jeff Laytonb33879a2010-10-15 15:34:04 -0400482}
483
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484int cifs_open(struct inode *inode, struct file *file)
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700485
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486{
487 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400488 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400489 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700491 struct TCP_Server_Info *server;
Steve French96daf2b2011-05-27 04:34:02 +0000492 struct cifs_tcon *tcon;
Jeff Layton7ffec372010-09-29 19:51:11 -0400493 struct tcon_link *tlink;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700494 struct cifsFileInfo *cfile = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495 char *full_path = NULL;
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300496 bool posix_open_ok = false;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700497 struct cifs_fid fid;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700498 struct cifs_pending_open open;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400500 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501
502 cifs_sb = CIFS_SB(inode->i_sb);
Jeff Layton7ffec372010-09-29 19:51:11 -0400503 tlink = cifs_sb_tlink(cifs_sb);
504 if (IS_ERR(tlink)) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400505 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400506 return PTR_ERR(tlink);
507 }
508 tcon = tlink_tcon(tlink);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700509 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510
Goldwyn Rodrigues1f1735c2016-04-18 06:41:52 -0500511 full_path = build_path_from_dentry(file_dentry(file));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512 if (full_path == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530513 rc = -ENOMEM;
Jeff Layton232341b2010-08-05 13:58:38 -0400514 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515 }
516
Joe Perchesf96637b2013-05-04 22:12:25 -0500517 cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +0000518 inode, file->f_flags, full_path);
Steve French276a74a2009-03-03 18:00:34 +0000519
Namjae Jeon787aded2014-08-22 14:22:51 +0900520 if (file->f_flags & O_DIRECT &&
521 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
522 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
523 file->f_op = &cifs_file_direct_nobrl_ops;
524 else
525 file->f_op = &cifs_file_direct_ops;
526 }
527
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700528 if (server->oplocks)
Steve French276a74a2009-03-03 18:00:34 +0000529 oplock = REQ_OPLOCK;
530 else
531 oplock = 0;
532
Steve French64cc2c62009-03-04 19:54:08 +0000533 if (!tcon->broken_posix_open && tcon->unix_ext &&
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400534 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
535 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Steve French276a74a2009-03-03 18:00:34 +0000536 /* can not refresh inode info since size could be stale */
Jeff Layton2422f672010-06-16 13:40:16 -0400537 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
Steve Frenchfa588e02010-04-22 19:21:55 +0000538 cifs_sb->mnt_file_mode /* ignored */,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700539 file->f_flags, &oplock, &fid.netfid, xid);
Steve French276a74a2009-03-03 18:00:34 +0000540 if (rc == 0) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500541 cifs_dbg(FYI, "posix open succeeded\n");
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300542 posix_open_ok = true;
Steve French64cc2c62009-03-04 19:54:08 +0000543 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
544 if (tcon->ses->serverNOS)
Joe Perchesf96637b2013-05-04 22:12:25 -0500545 cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
546 tcon->ses->serverName,
547 tcon->ses->serverNOS);
Steve French64cc2c62009-03-04 19:54:08 +0000548 tcon->broken_posix_open = true;
Steve French276a74a2009-03-03 18:00:34 +0000549 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
550 (rc != -EOPNOTSUPP)) /* path not found or net err */
551 goto out;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700552 /*
553 * Else fallthrough to retry open the old way on network i/o
554 * or DFS errors.
555 */
Steve French276a74a2009-03-03 18:00:34 +0000556 }
557
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700558 if (server->ops->get_lease_key)
559 server->ops->get_lease_key(inode, &fid);
560
561 cifs_add_pending_open(&fid, tlink, &open);
562
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300563 if (!posix_open_ok) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700564 if (server->ops->get_lease_key)
565 server->ops->get_lease_key(inode, &fid);
566
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300567 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700568 file->f_flags, &oplock, &fid, xid);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700569 if (rc) {
570 cifs_del_pending_open(&open);
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300571 goto out;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700572 }
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300573 }
Jeff Layton47c78b72010-06-16 13:40:17 -0400574
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700575 cfile = cifs_new_fileinfo(&fid, file, tlink, oplock);
576 if (cfile == NULL) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700577 if (server->ops->close)
578 server->ops->close(xid, tcon, &fid);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700579 cifs_del_pending_open(&open);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580 rc = -ENOMEM;
581 goto out;
582 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +0530584 cifs_fscache_set_inode_cookie(inode, file);
585
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300586 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700587 /*
588 * Time to set mode which we can not set earlier due to
589 * problems creating new read-only files.
590 */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300591 struct cifs_unix_set_info_args args = {
592 .mode = inode->i_mode,
Eric W. Biederman49418b22013-02-06 00:57:56 -0800593 .uid = INVALID_UID, /* no change */
594 .gid = INVALID_GID, /* no change */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300595 .ctime = NO_CHANGE_64,
596 .atime = NO_CHANGE_64,
597 .mtime = NO_CHANGE_64,
598 .device = 0,
599 };
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700600 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
601 cfile->pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602 }
603
604out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400606 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400607 cifs_put_tlink(tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608 return rc;
609}
610
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400611static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
612
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700613/*
614 * Try to reacquire byte range locks that were released when session
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400615 * to server was lost.
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700616 */
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400617static int
618cifs_relock_file(struct cifsFileInfo *cfile)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619{
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400620 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
David Howells2b0143b2015-03-17 22:25:59 +0000621 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400622 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623 int rc = 0;
624
Rabin Vincent6ed24ef2017-05-03 17:17:21 +0200625 down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING);
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400626 if (cinode->can_cache_brlcks) {
Pavel Shilovsky689c3db2013-07-11 11:17:45 +0400627 /* can cache locks - no need to relock */
628 up_read(&cinode->lock_sem);
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400629 return rc;
630 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400632 if (cap_unix(tcon->ses) &&
633 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
634 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
635 rc = cifs_push_posix_locks(cfile);
636 else
637 rc = tcon->ses->server->ops->push_mand_locks(cfile);
638
Pavel Shilovsky689c3db2013-07-11 11:17:45 +0400639 up_read(&cinode->lock_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640 return rc;
641}
642
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700643static int
644cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645{
646 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400647 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400648 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +0000650 struct cifs_tcon *tcon;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700651 struct TCP_Server_Info *server;
652 struct cifsInodeInfo *cinode;
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000653 struct inode *inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654 char *full_path = NULL;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700655 int desired_access;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656 int disposition = FILE_OPEN;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500657 int create_options = CREATE_NOT_DIR;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400658 struct cifs_open_parms oparms;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400660 xid = get_xid();
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700661 mutex_lock(&cfile->fh_mutex);
662 if (!cfile->invalidHandle) {
663 mutex_unlock(&cfile->fh_mutex);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530664 rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400665 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530666 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667 }
668
David Howells2b0143b2015-03-17 22:25:59 +0000669 inode = d_inode(cfile->dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670 cifs_sb = CIFS_SB(inode->i_sb);
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700671 tcon = tlink_tcon(cfile->tlink);
672 server = tcon->ses->server;
Steve French3a9f4622007-04-04 17:10:24 +0000673
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700674 /*
675 * Can not grab rename sem here because various ops, including those
676 * that already have the rename sem can end up causing writepage to get
677 * called and if the server was down that means we end up here, and we
678 * can never tell if the caller already has the rename_sem.
679 */
680 full_path = build_path_from_dentry(cfile->dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681 if (full_path == NULL) {
Steve French3a9f4622007-04-04 17:10:24 +0000682 rc = -ENOMEM;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700683 mutex_unlock(&cfile->fh_mutex);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400684 free_xid(xid);
Steve French3a9f4622007-04-04 17:10:24 +0000685 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686 }
687
Joe Perchesf96637b2013-05-04 22:12:25 -0500688 cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
689 inode, cfile->f_flags, full_path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690
Pavel Shilovsky10b9b982012-03-20 12:55:09 +0300691 if (tcon->ses->server->oplocks)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692 oplock = REQ_OPLOCK;
693 else
Steve French4b18f2a2008-04-29 00:06:05 +0000694 oplock = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400696 if (tcon->unix_ext && cap_unix(tcon->ses) &&
Steve French7fc8f4e2009-02-23 20:43:11 +0000697 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400698 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Jeff Layton608712f2010-10-15 15:33:56 -0400699 /*
700 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
701 * original open. Must mask them off for a reopen.
702 */
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700703 unsigned int oflags = cfile->f_flags &
Jeff Layton15886172010-10-15 15:33:59 -0400704 ~(O_CREAT | O_EXCL | O_TRUNC);
Jeff Layton608712f2010-10-15 15:33:56 -0400705
Jeff Layton2422f672010-06-16 13:40:16 -0400706 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700707 cifs_sb->mnt_file_mode /* ignored */,
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400708 oflags, &oplock, &cfile->fid.netfid, xid);
Steve French7fc8f4e2009-02-23 20:43:11 +0000709 if (rc == 0) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500710 cifs_dbg(FYI, "posix reopen succeeded\n");
Andi Shytife090e42013-07-29 20:04:35 +0200711 oparms.reconnect = true;
Steve French7fc8f4e2009-02-23 20:43:11 +0000712 goto reopen_success;
713 }
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700714 /*
715 * fallthrough to retry open the old way on errors, especially
716 * in the reconnect path it is important to retry hard
717 */
Steve French7fc8f4e2009-02-23 20:43:11 +0000718 }
719
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700720 desired_access = cifs_convert_flags(cfile->f_flags);
Steve French7fc8f4e2009-02-23 20:43:11 +0000721
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500722 if (backup_cred(cifs_sb))
723 create_options |= CREATE_OPEN_BACKUP_INTENT;
724
Pavel Shilovsky5234d7e2019-11-12 17:16:35 -0800725 /* O_SYNC also has bit for O_DSYNC so following check picks up either */
726 if (cfile->f_flags & O_SYNC)
727 create_options |= CREATE_WRITE_THROUGH;
728
729 if (cfile->f_flags & O_DIRECT)
730 create_options |= CREATE_NO_BUFFER;
731
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700732 if (server->ops->get_lease_key)
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400733 server->ops->get_lease_key(inode, &cfile->fid);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700734
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400735 oparms.tcon = tcon;
736 oparms.cifs_sb = cifs_sb;
737 oparms.desired_access = desired_access;
738 oparms.create_options = create_options;
739 oparms.disposition = disposition;
740 oparms.path = full_path;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400741 oparms.fid = &cfile->fid;
742 oparms.reconnect = true;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400743
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700744 /*
745 * Can not refresh inode by passing in file_info buf to be returned by
Pavel Shilovskyd81b8a42014-01-16 15:53:36 +0400746 * ops->open and then calling get_inode_info with returned buf since
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700747 * file might have write behind data that needs to be flushed and server
748 * version of file size can be stale. If we knew for sure that inode was
749 * not dirty locally we could do this.
750 */
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400751 rc = server->ops->open(xid, &oparms, &oplock, NULL);
Pavel Shilovskyb33fcf12013-07-11 10:58:30 +0400752 if (rc == -ENOENT && oparms.reconnect == false) {
753 /* durable handle timeout is expired - open the file again */
754 rc = server->ops->open(xid, &oparms, &oplock, NULL);
755 /* indicate that we need to relock the file */
756 oparms.reconnect = true;
757 }
758
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759 if (rc) {
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700760 mutex_unlock(&cfile->fh_mutex);
Joe Perchesf96637b2013-05-04 22:12:25 -0500761 cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
762 cifs_dbg(FYI, "oplock: %d\n", oplock);
Jeff Layton15886172010-10-15 15:33:59 -0400763 goto reopen_error_exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764 }
Jeff Layton15886172010-10-15 15:33:59 -0400765
766reopen_success:
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700767 cfile->invalidHandle = false;
768 mutex_unlock(&cfile->fh_mutex);
769 cinode = CIFS_I(inode);
Jeff Layton15886172010-10-15 15:33:59 -0400770
771 if (can_flush) {
772 rc = filemap_write_and_wait(inode->i_mapping);
Jeff Laytoneb4b7562010-10-22 14:52:29 -0400773 mapping_set_error(inode->i_mapping, rc);
Jeff Layton15886172010-10-15 15:33:59 -0400774
Jeff Layton15886172010-10-15 15:33:59 -0400775 if (tcon->unix_ext)
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700776 rc = cifs_get_inode_info_unix(&inode, full_path,
777 inode->i_sb, xid);
Jeff Layton15886172010-10-15 15:33:59 -0400778 else
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700779 rc = cifs_get_inode_info(&inode, full_path, NULL,
780 inode->i_sb, xid, NULL);
781 }
782 /*
783 * Else we are writing out data to server already and could deadlock if
784 * we tried to flush data, and since we do not know if we have data that
785 * would invalidate the current end of file on the server we can not go
786 * to the server to get the new inode info.
787 */
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300788
Pavel Shilovskyde740252016-10-11 15:34:07 -0700789 /*
790 * If the server returned a read oplock and we have mandatory brlocks,
791 * set oplock level to None.
792 */
793 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
794 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
795 oplock = 0;
796 }
797
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400798 server->ops->set_fid(cfile, &cfile->fid, oplock);
799 if (oparms.reconnect)
800 cifs_relock_file(cfile);
Jeff Layton15886172010-10-15 15:33:59 -0400801
802reopen_error_exit:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400804 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700805 return rc;
806}
807
808int cifs_close(struct inode *inode, struct file *file)
809{
Jeff Layton77970692011-04-05 16:23:47 -0700810 if (file->private_data != NULL) {
811 cifsFileInfo_put(file->private_data);
812 file->private_data = NULL;
813 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814
Steve Frenchcdff08e2010-10-21 22:46:14 +0000815 /* return code from the ->release op is always ignored */
816 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700817}
818
Steve French52ace1e2016-09-22 19:23:56 -0500819void
820cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
821{
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700822 struct cifsFileInfo *open_file;
Steve French52ace1e2016-09-22 19:23:56 -0500823 struct list_head *tmp;
824 struct list_head *tmp1;
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700825 struct list_head tmp_list;
826
Pavel Shilovsky46890ff2016-11-29 11:31:23 -0800827 if (!tcon->use_persistent || !tcon->need_reopen_files)
828 return;
829
830 tcon->need_reopen_files = false;
831
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700832 cifs_dbg(FYI, "Reopen persistent handles");
833 INIT_LIST_HEAD(&tmp_list);
Steve French52ace1e2016-09-22 19:23:56 -0500834
835 /* list all files open on tree connection, reopen resilient handles */
836 spin_lock(&tcon->open_file_lock);
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700837 list_for_each(tmp, &tcon->openFileList) {
Steve French52ace1e2016-09-22 19:23:56 -0500838 open_file = list_entry(tmp, struct cifsFileInfo, tlist);
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700839 if (!open_file->invalidHandle)
840 continue;
841 cifsFileInfo_get(open_file);
842 list_add_tail(&open_file->rlist, &tmp_list);
Steve French52ace1e2016-09-22 19:23:56 -0500843 }
844 spin_unlock(&tcon->open_file_lock);
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700845
846 list_for_each_safe(tmp, tmp1, &tmp_list) {
847 open_file = list_entry(tmp, struct cifsFileInfo, rlist);
Pavel Shilovsky46890ff2016-11-29 11:31:23 -0800848 if (cifs_reopen_file(open_file, false /* do not flush */))
849 tcon->need_reopen_files = true;
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700850 list_del_init(&open_file->rlist);
851 cifsFileInfo_put(open_file);
852 }
Steve French52ace1e2016-09-22 19:23:56 -0500853}
854
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855int cifs_closedir(struct inode *inode, struct file *file)
856{
857 int rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400858 unsigned int xid;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700859 struct cifsFileInfo *cfile = file->private_data;
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700860 struct cifs_tcon *tcon;
861 struct TCP_Server_Info *server;
862 char *buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863
Joe Perchesf96637b2013-05-04 22:12:25 -0500864 cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700866 if (cfile == NULL)
867 return rc;
868
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400869 xid = get_xid();
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700870 tcon = tlink_tcon(cfile->tlink);
871 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872
Joe Perchesf96637b2013-05-04 22:12:25 -0500873 cifs_dbg(FYI, "Freeing private data in close dir\n");
Steve French3afca262016-09-22 18:58:16 -0500874 spin_lock(&cfile->file_info_lock);
Pavel Shilovsky52755802014-08-18 20:49:57 +0400875 if (server->ops->dir_needs_close(cfile)) {
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700876 cfile->invalidHandle = true;
Steve French3afca262016-09-22 18:58:16 -0500877 spin_unlock(&cfile->file_info_lock);
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700878 if (server->ops->close_dir)
879 rc = server->ops->close_dir(xid, tcon, &cfile->fid);
880 else
881 rc = -ENOSYS;
Joe Perchesf96637b2013-05-04 22:12:25 -0500882 cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700883 /* not much we can do if it fails anyway, ignore rc */
884 rc = 0;
885 } else
Steve French3afca262016-09-22 18:58:16 -0500886 spin_unlock(&cfile->file_info_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700887
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700888 buf = cfile->srch_inf.ntwrk_buf_start;
889 if (buf) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500890 cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700891 cfile->srch_inf.ntwrk_buf_start = NULL;
892 if (cfile->srch_inf.smallBuf)
893 cifs_small_buf_release(buf);
894 else
895 cifs_buf_release(buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896 }
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700897
898 cifs_put_tlink(cfile->tlink);
899 kfree(file->private_data);
900 file->private_data = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901 /* BB can we lock the filestruct while this is going on? */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400902 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903 return rc;
904}
905
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400906static struct cifsLockInfo *
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300907cifs_lock_init(__u64 offset, __u64 length, __u8 type)
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000908{
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400909 struct cifsLockInfo *lock =
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000910 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400911 if (!lock)
912 return lock;
913 lock->offset = offset;
914 lock->length = length;
915 lock->type = type;
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400916 lock->pid = current->tgid;
917 INIT_LIST_HEAD(&lock->blist);
918 init_waitqueue_head(&lock->block_q);
919 return lock;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400920}
921
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -0700922void
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400923cifs_del_lock_waiters(struct cifsLockInfo *lock)
924{
925 struct cifsLockInfo *li, *tmp;
926 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
927 list_del_init(&li->blist);
928 wake_up(&li->block_q);
929 }
930}
931
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400932#define CIFS_LOCK_OP 0
933#define CIFS_READ_OP 1
934#define CIFS_WRITE_OP 2
935
936/* @rw_check : 0 - no op, 1 - read, 2 - write */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400937static bool
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700938cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
939 __u64 length, __u8 type, struct cifsFileInfo *cfile,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400940 struct cifsLockInfo **conf_lock, int rw_check)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400941{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300942 struct cifsLockInfo *li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700943 struct cifsFileInfo *cur_cfile = fdlocks->cfile;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300944 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400945
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700946 list_for_each_entry(li, &fdlocks->locks, llist) {
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400947 if (offset + length <= li->offset ||
948 offset >= li->offset + li->length)
949 continue;
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400950 if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
951 server->ops->compare_fids(cfile, cur_cfile)) {
952 /* shared lock prevents write op through the same fid */
953 if (!(li->type & server->vals->shared_lock_type) ||
954 rw_check != CIFS_WRITE_OP)
955 continue;
956 }
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700957 if ((type & server->vals->shared_lock_type) &&
958 ((server->ops->compare_fids(cfile, cur_cfile) &&
959 current->tgid == li->pid) || type == li->type))
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400960 continue;
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700961 if (conf_lock)
962 *conf_lock = li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700963 return true;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400964 }
965 return false;
966}
967
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700968bool
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300969cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700970 __u8 type, struct cifsLockInfo **conf_lock,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400971 int rw_check)
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400972{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300973 bool rc = false;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700974 struct cifs_fid_locks *cur;
David Howells2b0143b2015-03-17 22:25:59 +0000975 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300976
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700977 list_for_each_entry(cur, &cinode->llist, llist) {
978 rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700979 cfile, conf_lock, rw_check);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300980 if (rc)
981 break;
982 }
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300983
984 return rc;
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400985}
986
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300987/*
988 * Check if there is another lock that prevents us to set the lock (mandatory
989 * style). If such a lock exists, update the flock structure with its
990 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
991 * or leave it the same if we can't. Returns 0 if we don't need to request to
992 * the server or 1 otherwise.
993 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400994static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300995cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
996 __u8 type, struct file_lock *flock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400997{
998 int rc = 0;
999 struct cifsLockInfo *conf_lock;
David Howells2b0143b2015-03-17 22:25:59 +00001000 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001001 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001002 bool exist;
1003
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001004 down_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001005
Pavel Shilovsky55157df2012-02-28 14:04:17 +03001006 exist = cifs_find_lock_conflict(cfile, offset, length, type,
Pavel Shilovsky081c0412012-11-27 18:38:53 +04001007 &conf_lock, CIFS_LOCK_OP);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001008 if (exist) {
1009 flock->fl_start = conf_lock->offset;
1010 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
1011 flock->fl_pid = conf_lock->pid;
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001012 if (conf_lock->type & server->vals->shared_lock_type)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001013 flock->fl_type = F_RDLCK;
1014 else
1015 flock->fl_type = F_WRLCK;
1016 } else if (!cinode->can_cache_brlcks)
1017 rc = 1;
1018 else
1019 flock->fl_type = F_UNLCK;
1020
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001021 up_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001022 return rc;
1023}
1024
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001025static void
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001026cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001027{
David Howells2b0143b2015-03-17 22:25:59 +00001028 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Dave Wysochanskifa25e112019-10-23 05:02:33 -04001029 cifs_down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001030 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001031 up_write(&cinode->lock_sem);
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001032}
1033
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001034/*
1035 * Set the byte-range lock (mandatory style). Returns:
1036 * 1) 0, if we set the lock and don't need to request to the server;
1037 * 2) 1, if no locks prevent us but we need to request to the server;
1038 * 3) -EACCESS, if there is a lock that prevents us and wait is false.
1039 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001040static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001041cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001042 bool wait)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001043{
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001044 struct cifsLockInfo *conf_lock;
David Howells2b0143b2015-03-17 22:25:59 +00001045 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001046 bool exist;
1047 int rc = 0;
1048
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001049try_again:
1050 exist = false;
Dave Wysochanskifa25e112019-10-23 05:02:33 -04001051 cifs_down_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001052
Pavel Shilovsky55157df2012-02-28 14:04:17 +03001053 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
Pavel Shilovsky081c0412012-11-27 18:38:53 +04001054 lock->type, &conf_lock, CIFS_LOCK_OP);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001055 if (!exist && cinode->can_cache_brlcks) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001056 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001057 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001058 return rc;
1059 }
1060
1061 if (!exist)
1062 rc = 1;
1063 else if (!wait)
1064 rc = -EACCES;
1065 else {
1066 list_add_tail(&lock->blist, &conf_lock->blist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001067 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001068 rc = wait_event_interruptible(lock->block_q,
1069 (lock->blist.prev == &lock->blist) &&
1070 (lock->blist.next == &lock->blist));
1071 if (!rc)
1072 goto try_again;
Dave Wysochanskifa25e112019-10-23 05:02:33 -04001073 cifs_down_write(&cinode->lock_sem);
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001074 list_del_init(&lock->blist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001075 }
1076
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001077 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001078 return rc;
1079}
1080
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001081/*
1082 * Check if there is another lock that prevents us to set the lock (posix
1083 * style). If such a lock exists, update the flock structure with its
1084 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1085 * or leave it the same if we can't. Returns 0 if we don't need to request to
1086 * the server or 1 otherwise.
1087 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001088static int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001089cifs_posix_lock_test(struct file *file, struct file_lock *flock)
1090{
1091 int rc = 0;
Al Viro496ad9a2013-01-23 17:07:38 -05001092 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001093 unsigned char saved_type = flock->fl_type;
1094
Pavel Shilovsky50792762011-10-29 17:17:57 +04001095 if ((flock->fl_flags & FL_POSIX) == 0)
1096 return 1;
1097
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001098 down_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001099 posix_test_lock(file, flock);
1100
1101 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
1102 flock->fl_type = saved_type;
1103 rc = 1;
1104 }
1105
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001106 up_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001107 return rc;
1108}
1109
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001110/*
1111 * Set the byte-range lock (posix style). Returns:
1112 * 1) 0, if we set the lock and don't need to request to the server;
1113 * 2) 1, if we need to request to the server;
1114 * 3) <0, if the error occurs while setting the lock.
1115 */
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001116static int
1117cifs_posix_lock_set(struct file *file, struct file_lock *flock)
1118{
Al Viro496ad9a2013-01-23 17:07:38 -05001119 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
Pavel Shilovsky50792762011-10-29 17:17:57 +04001120 int rc = 1;
1121
1122 if ((flock->fl_flags & FL_POSIX) == 0)
1123 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001124
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001125try_again:
Dave Wysochanskifa25e112019-10-23 05:02:33 -04001126 cifs_down_write(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001127 if (!cinode->can_cache_brlcks) {
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001128 up_write(&cinode->lock_sem);
Pavel Shilovsky50792762011-10-29 17:17:57 +04001129 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001130 }
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001131
1132 rc = posix_lock_file(file, flock, NULL);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001133 up_write(&cinode->lock_sem);
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001134 if (rc == FILE_LOCK_DEFERRED) {
1135 rc = wait_event_interruptible(flock->fl_wait, !flock->fl_next);
1136 if (!rc)
1137 goto try_again;
Jeff Layton1a9e64a2013-06-21 08:58:10 -04001138 posix_unblock_lock(flock);
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001139 }
Steve French9ebb3892012-04-01 13:52:54 -05001140 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001141}
1142
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001143int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001144cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001145{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001146 unsigned int xid;
1147 int rc = 0, stored_rc;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001148 struct cifsLockInfo *li, *tmp;
1149 struct cifs_tcon *tcon;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001150 unsigned int num, max_num, max_buf;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001151 LOCKING_ANDX_RANGE *buf, *cur;
1152 int types[] = {LOCKING_ANDX_LARGE_FILES,
1153 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1154 int i;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001155
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001156 xid = get_xid();
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001157 tcon = tlink_tcon(cfile->tlink);
1158
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001159 /*
1160 * Accessing maxBuf is racy with cifs_reconnect - need to store value
Ross Lagerwallbaf6b192019-01-08 18:30:57 +00001161 * and check it before using.
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001162 */
1163 max_buf = tcon->ses->server->maxBuf;
Ross Lagerwallbaf6b192019-01-08 18:30:57 +00001164 if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001165 free_xid(xid);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001166 return -EINVAL;
1167 }
1168
Ross Lagerwall04d76802019-01-08 18:30:56 +00001169 BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1170 PAGE_SIZE);
1171 max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1172 PAGE_SIZE);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001173 max_num = (max_buf - sizeof(struct smb_hdr)) /
1174 sizeof(LOCKING_ANDX_RANGE);
Fabian Frederick4b99d392014-12-10 15:41:17 -08001175 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001176 if (!buf) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001177 free_xid(xid);
Pavel Shilovskye2f28862012-08-29 21:13:38 +04001178 return -ENOMEM;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001179 }
1180
1181 for (i = 0; i < 2; i++) {
1182 cur = buf;
1183 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001184 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001185 if (li->type != types[i])
1186 continue;
1187 cur->Pid = cpu_to_le16(li->pid);
1188 cur->LengthLow = cpu_to_le32((u32)li->length);
1189 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1190 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1191 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1192 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001193 stored_rc = cifs_lockv(xid, tcon,
1194 cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001195 (__u8)li->type, 0, num,
1196 buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001197 if (stored_rc)
1198 rc = stored_rc;
1199 cur = buf;
1200 num = 0;
1201 } else
1202 cur++;
1203 }
1204
1205 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001206 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001207 (__u8)types[i], 0, num, buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001208 if (stored_rc)
1209 rc = stored_rc;
1210 }
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001211 }
1212
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001213 kfree(buf);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001214 free_xid(xid);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001215 return rc;
1216}
1217
Jeff Layton3d224622016-05-24 06:27:44 -04001218static __u32
1219hash_lockowner(fl_owner_t owner)
1220{
1221 return cifs_lock_secret ^ hash32_ptr((const void *)owner);
1222}
1223
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001224struct lock_to_push {
1225 struct list_head llist;
1226 __u64 offset;
1227 __u64 length;
1228 __u32 pid;
1229 __u16 netfid;
1230 __u8 type;
1231};
1232
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001233static int
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001234cifs_push_posix_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001235{
David Howells2b0143b2015-03-17 22:25:59 +00001236 struct inode *inode = d_inode(cfile->dentry);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001237 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Jeff Laytonbd61e0a2015-01-16 15:05:55 -05001238 struct file_lock *flock;
1239 struct file_lock_context *flctx = inode->i_flctx;
Jeff Laytone084c1b2015-02-16 14:32:03 -05001240 unsigned int count = 0, i;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001241 int rc = 0, xid, type;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001242 struct list_head locks_to_send, *el;
1243 struct lock_to_push *lck, *tmp;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001244 __u64 length;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001245
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001246 xid = get_xid();
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001247
Jeff Laytonbd61e0a2015-01-16 15:05:55 -05001248 if (!flctx)
1249 goto out;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001250
Jeff Laytone084c1b2015-02-16 14:32:03 -05001251 spin_lock(&flctx->flc_lock);
1252 list_for_each(el, &flctx->flc_posix) {
1253 count++;
1254 }
1255 spin_unlock(&flctx->flc_lock);
1256
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001257 INIT_LIST_HEAD(&locks_to_send);
1258
1259 /*
Jeff Laytone084c1b2015-02-16 14:32:03 -05001260 * Allocating count locks is enough because no FL_POSIX locks can be
1261 * added to the list while we are holding cinode->lock_sem that
Pavel Shilovskyce858522012-03-17 09:46:55 +03001262 * protects locking operations of this inode.
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001263 */
Jeff Laytone084c1b2015-02-16 14:32:03 -05001264 for (i = 0; i < count; i++) {
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001265 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1266 if (!lck) {
1267 rc = -ENOMEM;
1268 goto err_out;
1269 }
1270 list_add_tail(&lck->llist, &locks_to_send);
1271 }
1272
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001273 el = locks_to_send.next;
Jeff Layton6109c852015-01-16 15:05:57 -05001274 spin_lock(&flctx->flc_lock);
Jeff Laytonbd61e0a2015-01-16 15:05:55 -05001275 list_for_each_entry(flock, &flctx->flc_posix, fl_list) {
Pavel Shilovskyce858522012-03-17 09:46:55 +03001276 if (el == &locks_to_send) {
1277 /*
1278 * The list ended. We don't have enough allocated
1279 * structures - something is really wrong.
1280 */
Joe Perchesf96637b2013-05-04 22:12:25 -05001281 cifs_dbg(VFS, "Can't push all brlocks!\n");
Pavel Shilovskyce858522012-03-17 09:46:55 +03001282 break;
1283 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001284 length = 1 + flock->fl_end - flock->fl_start;
1285 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1286 type = CIFS_RDLCK;
1287 else
1288 type = CIFS_WRLCK;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001289 lck = list_entry(el, struct lock_to_push, llist);
Jeff Layton3d224622016-05-24 06:27:44 -04001290 lck->pid = hash_lockowner(flock->fl_owner);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001291 lck->netfid = cfile->fid.netfid;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001292 lck->length = length;
1293 lck->type = type;
1294 lck->offset = flock->fl_start;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001295 }
Jeff Layton6109c852015-01-16 15:05:57 -05001296 spin_unlock(&flctx->flc_lock);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001297
1298 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001299 int stored_rc;
1300
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001301 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001302 lck->offset, lck->length, NULL,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001303 lck->type, 0);
1304 if (stored_rc)
1305 rc = stored_rc;
1306 list_del(&lck->llist);
1307 kfree(lck);
1308 }
1309
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001310out:
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001311 free_xid(xid);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001312 return rc;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001313err_out:
1314 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1315 list_del(&lck->llist);
1316 kfree(lck);
1317 }
1318 goto out;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001319}
1320
1321static int
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001322cifs_push_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001323{
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001324 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
David Howells2b0143b2015-03-17 22:25:59 +00001325 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001326 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001327 int rc = 0;
1328
1329 /* we are going to update can_cache_brlcks here - need a write access */
Dave Wysochanskifa25e112019-10-23 05:02:33 -04001330 cifs_down_write(&cinode->lock_sem);
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001331 if (!cinode->can_cache_brlcks) {
1332 up_write(&cinode->lock_sem);
1333 return rc;
1334 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001335
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001336 if (cap_unix(tcon->ses) &&
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001337 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1338 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001339 rc = cifs_push_posix_locks(cfile);
1340 else
1341 rc = tcon->ses->server->ops->push_mand_locks(cfile);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001342
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001343 cinode->can_cache_brlcks = false;
1344 up_write(&cinode->lock_sem);
1345 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001346}
1347
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001348static void
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001349cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001350 bool *wait_flag, struct TCP_Server_Info *server)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001351{
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001352 if (flock->fl_flags & FL_POSIX)
Joe Perchesf96637b2013-05-04 22:12:25 -05001353 cifs_dbg(FYI, "Posix\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001354 if (flock->fl_flags & FL_FLOCK)
Joe Perchesf96637b2013-05-04 22:12:25 -05001355 cifs_dbg(FYI, "Flock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001356 if (flock->fl_flags & FL_SLEEP) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001357 cifs_dbg(FYI, "Blocking lock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001358 *wait_flag = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001359 }
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001360 if (flock->fl_flags & FL_ACCESS)
Joe Perchesf96637b2013-05-04 22:12:25 -05001361 cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001362 if (flock->fl_flags & FL_LEASE)
Joe Perchesf96637b2013-05-04 22:12:25 -05001363 cifs_dbg(FYI, "Lease on file - not implemented yet\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001364 if (flock->fl_flags &
Jeff Layton3d6d8542012-09-19 06:22:46 -07001365 (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
1366 FL_ACCESS | FL_LEASE | FL_CLOSE)))
Joe Perchesf96637b2013-05-04 22:12:25 -05001367 cifs_dbg(FYI, "Unknown lock flags 0x%x\n", flock->fl_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001368
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001369 *type = server->vals->large_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001370 if (flock->fl_type == F_WRLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001371 cifs_dbg(FYI, "F_WRLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001372 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001373 *lock = 1;
1374 } else if (flock->fl_type == F_UNLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001375 cifs_dbg(FYI, "F_UNLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001376 *type |= server->vals->unlock_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001377 *unlock = 1;
1378 /* Check if unlock includes more than one lock range */
1379 } else if (flock->fl_type == F_RDLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001380 cifs_dbg(FYI, "F_RDLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001381 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001382 *lock = 1;
1383 } else if (flock->fl_type == F_EXLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001384 cifs_dbg(FYI, "F_EXLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001385 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001386 *lock = 1;
1387 } else if (flock->fl_type == F_SHLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001388 cifs_dbg(FYI, "F_SHLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001389 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001390 *lock = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001391 } else
Joe Perchesf96637b2013-05-04 22:12:25 -05001392 cifs_dbg(FYI, "Unknown type of lock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001393}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001394
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001395static int
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001396cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001397 bool wait_flag, bool posix_lck, unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001398{
1399 int rc = 0;
1400 __u64 length = 1 + flock->fl_end - flock->fl_start;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001401 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1402 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001403 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001404 __u16 netfid = cfile->fid.netfid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001405
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001406 if (posix_lck) {
1407 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001408
1409 rc = cifs_posix_lock_test(file, flock);
1410 if (!rc)
1411 return rc;
1412
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001413 if (type & server->vals->shared_lock_type)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001414 posix_lock_type = CIFS_RDLCK;
1415 else
1416 posix_lock_type = CIFS_WRLCK;
Jeff Layton3d224622016-05-24 06:27:44 -04001417 rc = CIFSSMBPosixLock(xid, tcon, netfid,
1418 hash_lockowner(flock->fl_owner),
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001419 flock->fl_start, length, flock,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001420 posix_lock_type, wait_flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421 return rc;
1422 }
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001423
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001424 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001425 if (!rc)
1426 return rc;
1427
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001428 /* BB we could chain these into one lock request BB */
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001429 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
1430 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001431 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001432 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1433 type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001434 flock->fl_type = F_UNLCK;
1435 if (rc != 0)
Joe Perchesf96637b2013-05-04 22:12:25 -05001436 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1437 rc);
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001438 return 0;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001439 }
1440
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001441 if (type & server->vals->shared_lock_type) {
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001442 flock->fl_type = F_WRLCK;
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001443 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001444 }
1445
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001446 type &= ~server->vals->exclusive_lock_type;
1447
1448 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1449 type | server->vals->shared_lock_type,
1450 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001451 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001452 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1453 type | server->vals->shared_lock_type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001454 flock->fl_type = F_RDLCK;
1455 if (rc != 0)
Joe Perchesf96637b2013-05-04 22:12:25 -05001456 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1457 rc);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001458 } else
1459 flock->fl_type = F_WRLCK;
1460
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001461 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001462}
1463
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001464void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001465cifs_move_llist(struct list_head *source, struct list_head *dest)
1466{
1467 struct list_head *li, *tmp;
1468 list_for_each_safe(li, tmp, source)
1469 list_move(li, dest);
1470}
1471
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001472void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001473cifs_free_llist(struct list_head *llist)
1474{
1475 struct cifsLockInfo *li, *tmp;
1476 list_for_each_entry_safe(li, tmp, llist, llist) {
1477 cifs_del_lock_waiters(li);
1478 list_del(&li->llist);
1479 kfree(li);
1480 }
1481}
1482
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001483int
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001484cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1485 unsigned int xid)
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001486{
1487 int rc = 0, stored_rc;
1488 int types[] = {LOCKING_ANDX_LARGE_FILES,
1489 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1490 unsigned int i;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001491 unsigned int max_num, num, max_buf;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001492 LOCKING_ANDX_RANGE *buf, *cur;
1493 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
David Howells2b0143b2015-03-17 22:25:59 +00001494 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001495 struct cifsLockInfo *li, *tmp;
1496 __u64 length = 1 + flock->fl_end - flock->fl_start;
1497 struct list_head tmp_llist;
1498
1499 INIT_LIST_HEAD(&tmp_llist);
1500
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001501 /*
1502 * Accessing maxBuf is racy with cifs_reconnect - need to store value
Ross Lagerwallbaf6b192019-01-08 18:30:57 +00001503 * and check it before using.
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001504 */
1505 max_buf = tcon->ses->server->maxBuf;
Ross Lagerwallbaf6b192019-01-08 18:30:57 +00001506 if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001507 return -EINVAL;
1508
Ross Lagerwall04d76802019-01-08 18:30:56 +00001509 BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1510 PAGE_SIZE);
1511 max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1512 PAGE_SIZE);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001513 max_num = (max_buf - sizeof(struct smb_hdr)) /
1514 sizeof(LOCKING_ANDX_RANGE);
Fabian Frederick4b99d392014-12-10 15:41:17 -08001515 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001516 if (!buf)
1517 return -ENOMEM;
1518
Dave Wysochanskifa25e112019-10-23 05:02:33 -04001519 cifs_down_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001520 for (i = 0; i < 2; i++) {
1521 cur = buf;
1522 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001523 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001524 if (flock->fl_start > li->offset ||
1525 (flock->fl_start + length) <
1526 (li->offset + li->length))
1527 continue;
1528 if (current->tgid != li->pid)
1529 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001530 if (types[i] != li->type)
1531 continue;
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001532 if (cinode->can_cache_brlcks) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001533 /*
1534 * We can cache brlock requests - simply remove
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001535 * a lock from the file's list.
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001536 */
1537 list_del(&li->llist);
1538 cifs_del_lock_waiters(li);
1539 kfree(li);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001540 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001541 }
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001542 cur->Pid = cpu_to_le16(li->pid);
1543 cur->LengthLow = cpu_to_le32((u32)li->length);
1544 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1545 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1546 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1547 /*
1548 * We need to save a lock here to let us add it again to
1549 * the file's list if the unlock range request fails on
1550 * the server.
1551 */
1552 list_move(&li->llist, &tmp_llist);
1553 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001554 stored_rc = cifs_lockv(xid, tcon,
1555 cfile->fid.netfid,
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001556 li->type, num, 0, buf);
1557 if (stored_rc) {
1558 /*
1559 * We failed on the unlock range
1560 * request - add all locks from the tmp
1561 * list to the head of the file's list.
1562 */
1563 cifs_move_llist(&tmp_llist,
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001564 &cfile->llist->locks);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001565 rc = stored_rc;
1566 } else
1567 /*
1568 * The unlock range request succeed -
1569 * free the tmp list.
1570 */
1571 cifs_free_llist(&tmp_llist);
1572 cur = buf;
1573 num = 0;
1574 } else
1575 cur++;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001576 }
1577 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001578 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001579 types[i], num, 0, buf);
1580 if (stored_rc) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001581 cifs_move_llist(&tmp_llist,
1582 &cfile->llist->locks);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001583 rc = stored_rc;
1584 } else
1585 cifs_free_llist(&tmp_llist);
1586 }
1587 }
1588
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001589 up_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001590 kfree(buf);
1591 return rc;
1592}
1593
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001594static int
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001595cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001596 bool wait_flag, bool posix_lck, int lock, int unlock,
1597 unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001598{
1599 int rc = 0;
1600 __u64 length = 1 + flock->fl_end - flock->fl_start;
1601 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1602 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001603 struct TCP_Server_Info *server = tcon->ses->server;
David Howells2b0143b2015-03-17 22:25:59 +00001604 struct inode *inode = d_inode(cfile->dentry);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001605
1606 if (posix_lck) {
Steve French08547b02006-02-28 22:39:25 +00001607 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001608
1609 rc = cifs_posix_lock_set(file, flock);
1610 if (!rc || rc < 0)
1611 return rc;
1612
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001613 if (type & server->vals->shared_lock_type)
Steve French08547b02006-02-28 22:39:25 +00001614 posix_lock_type = CIFS_RDLCK;
1615 else
1616 posix_lock_type = CIFS_WRLCK;
Steve French50c2f752007-07-13 00:33:32 +00001617
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001618 if (unlock == 1)
Steve Frenchbeb84dc2006-03-03 23:36:34 +00001619 posix_lock_type = CIFS_UNLCK;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001620
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001621 rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
Jeff Layton3d224622016-05-24 06:27:44 -04001622 hash_lockowner(flock->fl_owner),
1623 flock->fl_start, length,
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001624 NULL, posix_lock_type, wait_flag);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001625 goto out;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001626 }
1627
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001628 if (lock) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001629 struct cifsLockInfo *lock;
1630
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001631 lock = cifs_lock_init(flock->fl_start, length, type);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001632 if (!lock)
1633 return -ENOMEM;
1634
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001635 rc = cifs_lock_add_if(cfile, lock, wait_flag);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001636 if (rc < 0) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001637 kfree(lock);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001638 return rc;
1639 }
1640 if (!rc)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001641 goto out;
1642
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04001643 /*
1644 * Windows 7 server can delay breaking lease from read to None
1645 * if we set a byte-range lock on a file - break it explicitly
1646 * before sending the lock to the server to be sure the next
1647 * read won't conflict with non-overlapted locks due to
1648 * pagereading.
1649 */
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04001650 if (!CIFS_CACHE_WRITE(CIFS_I(inode)) &&
1651 CIFS_CACHE_READ(CIFS_I(inode))) {
Jeff Layton4f73c7d2014-04-30 09:31:47 -04001652 cifs_zap_mapping(inode);
Joe Perchesf96637b2013-05-04 22:12:25 -05001653 cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
1654 inode);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04001655 CIFS_I(inode)->oplock = 0;
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04001656 }
1657
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001658 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1659 type, 1, 0, wait_flag);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001660 if (rc) {
1661 kfree(lock);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001662 return rc;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001663 }
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001664
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001665 cifs_lock_add(cfile, lock);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001666 } else if (unlock)
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001667 rc = server->ops->mand_unlock_range(cfile, flock, xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001668
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001669out:
Aurelien Aptel56300d32019-03-14 18:44:16 +01001670 if (flock->fl_flags & FL_POSIX) {
1671 /*
1672 * If this is a request to remove all locks because we
1673 * are closing the file, it doesn't matter if the
1674 * unlocking failed as both cifs.ko and the SMB server
1675 * remove the lock on file close
1676 */
1677 if (rc) {
1678 cifs_dbg(VFS, "%s failed rc=%d\n", __func__, rc);
1679 if (!(flock->fl_flags & FL_CLOSE))
1680 return rc;
1681 }
Benjamin Coddington4f656362015-10-22 13:38:14 -04001682 rc = locks_lock_file_wait(file, flock);
Aurelien Aptel56300d32019-03-14 18:44:16 +01001683 }
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001684 return rc;
1685}
1686
1687int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1688{
1689 int rc, xid;
1690 int lock = 0, unlock = 0;
1691 bool wait_flag = false;
1692 bool posix_lck = false;
1693 struct cifs_sb_info *cifs_sb;
1694 struct cifs_tcon *tcon;
1695 struct cifsInodeInfo *cinode;
1696 struct cifsFileInfo *cfile;
1697 __u16 netfid;
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001698 __u32 type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001699
1700 rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001701 xid = get_xid();
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001702
Joe Perchesf96637b2013-05-04 22:12:25 -05001703 cifs_dbg(FYI, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld end: %lld\n",
1704 cmd, flock->fl_flags, flock->fl_type,
1705 flock->fl_start, flock->fl_end);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001706
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001707 cfile = (struct cifsFileInfo *)file->private_data;
1708 tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001709
1710 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
1711 tcon->ses->server);
1712
Al Viro7119e222014-10-22 00:25:12 -04001713 cifs_sb = CIFS_FILE_SB(file);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001714 netfid = cfile->fid.netfid;
Al Viro496ad9a2013-01-23 17:07:38 -05001715 cinode = CIFS_I(file_inode(file));
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001716
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001717 if (cap_unix(tcon->ses) &&
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001718 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1719 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1720 posix_lck = true;
1721 /*
1722 * BB add code here to normalize offset and length to account for
1723 * negative length which we can not accept over the wire.
1724 */
1725 if (IS_GETLK(cmd)) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001726 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001727 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001728 return rc;
1729 }
1730
1731 if (!lock && !unlock) {
1732 /*
1733 * if no lock or unlock then nothing to do since we do not
1734 * know what it is
1735 */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001736 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001737 return -EOPNOTSUPP;
1738 }
1739
1740 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1741 xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001742 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001743 return rc;
1744}
1745
Jeff Layton597b0272012-03-23 14:40:56 -04001746/*
1747 * update the file size (if needed) after a write. Should be called with
1748 * the inode->i_lock held
1749 */
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05001750void
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001751cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1752 unsigned int bytes_written)
1753{
1754 loff_t end_of_write = offset + bytes_written;
1755
1756 if (end_of_write > cifsi->server_eof)
1757 cifsi->server_eof = end_of_write;
1758}
1759
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001760static ssize_t
1761cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
1762 size_t write_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001763{
1764 int rc = 0;
1765 unsigned int bytes_written = 0;
1766 unsigned int total_written;
1767 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001768 struct cifs_tcon *tcon;
1769 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001770 unsigned int xid;
Jeff Layton7da4b492010-10-15 15:34:00 -04001771 struct dentry *dentry = open_file->dentry;
David Howells2b0143b2015-03-17 22:25:59 +00001772 struct cifsInodeInfo *cifsi = CIFS_I(d_inode(dentry));
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001773 struct cifs_io_parms io_parms;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001774
Jeff Layton7da4b492010-10-15 15:34:00 -04001775 cifs_sb = CIFS_SB(dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001776
Al Viro35c265e2014-08-19 20:25:34 -04001777 cifs_dbg(FYI, "write %zd bytes to offset %lld of %pd\n",
1778 write_size, *offset, dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001779
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001780 tcon = tlink_tcon(open_file->tlink);
1781 server = tcon->ses->server;
1782
1783 if (!server->ops->sync_write)
1784 return -ENOSYS;
Steve French50c2f752007-07-13 00:33:32 +00001785
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001786 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001787
Linus Torvalds1da177e2005-04-16 15:20:36 -07001788 for (total_written = 0; write_size > total_written;
1789 total_written += bytes_written) {
1790 rc = -EAGAIN;
1791 while (rc == -EAGAIN) {
Jeff Laytonca83ce32011-04-12 09:13:44 -04001792 struct kvec iov[2];
1793 unsigned int len;
1794
Linus Torvalds1da177e2005-04-16 15:20:36 -07001795 if (open_file->invalidHandle) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001796 /* we could deadlock if we called
1797 filemap_fdatawait from here so tell
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001798 reopen_file not to flush data to
Linus Torvalds1da177e2005-04-16 15:20:36 -07001799 server now */
Jeff Layton15886172010-10-15 15:33:59 -04001800 rc = cifs_reopen_file(open_file, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001801 if (rc != 0)
1802 break;
1803 }
Steve French3e844692005-10-03 13:37:24 -07001804
David Howells2b0143b2015-03-17 22:25:59 +00001805 len = min(server->ops->wp_retry_size(d_inode(dentry)),
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04001806 (unsigned int)write_size - total_written);
Jeff Laytonca83ce32011-04-12 09:13:44 -04001807 /* iov[0] is reserved for smb header */
1808 iov[1].iov_base = (char *)write_data + total_written;
1809 iov[1].iov_len = len;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001810 io_parms.pid = pid;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001811 io_parms.tcon = tcon;
1812 io_parms.offset = *offset;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001813 io_parms.length = len;
Steve Frenchdb8b6312014-09-22 05:13:55 -05001814 rc = server->ops->sync_write(xid, &open_file->fid,
1815 &io_parms, &bytes_written, iov, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001816 }
1817 if (rc || (bytes_written == 0)) {
1818 if (total_written)
1819 break;
1820 else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001821 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001822 return rc;
1823 }
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001824 } else {
David Howells2b0143b2015-03-17 22:25:59 +00001825 spin_lock(&d_inode(dentry)->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001826 cifs_update_eof(cifsi, *offset, bytes_written);
David Howells2b0143b2015-03-17 22:25:59 +00001827 spin_unlock(&d_inode(dentry)->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001828 *offset += bytes_written;
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001829 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001830 }
1831
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001832 cifs_stats_bytes_written(tcon, total_written);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001833
Jeff Layton7da4b492010-10-15 15:34:00 -04001834 if (total_written > 0) {
David Howells2b0143b2015-03-17 22:25:59 +00001835 spin_lock(&d_inode(dentry)->i_lock);
1836 if (*offset > d_inode(dentry)->i_size)
1837 i_size_write(d_inode(dentry), *offset);
1838 spin_unlock(&d_inode(dentry)->i_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001839 }
David Howells2b0143b2015-03-17 22:25:59 +00001840 mark_inode_dirty_sync(d_inode(dentry));
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001841 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001842 return total_written;
1843}
1844
Jeff Layton6508d902010-09-29 19:51:11 -04001845struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1846 bool fsuid_only)
Steve French630f3f0c2007-10-25 21:17:17 +00001847{
1848 struct cifsFileInfo *open_file = NULL;
Jeff Layton6508d902010-09-29 19:51:11 -04001849 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
Steve French3afca262016-09-22 18:58:16 -05001850 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
Jeff Layton6508d902010-09-29 19:51:11 -04001851
1852 /* only filter by fsuid on multiuser mounts */
1853 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1854 fsuid_only = false;
Steve French630f3f0c2007-10-25 21:17:17 +00001855
Steve French3afca262016-09-22 18:58:16 -05001856 spin_lock(&tcon->open_file_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001857 /* we could simply get the first_list_entry since write-only entries
1858 are always at the end of the list but since the first entry might
1859 have a close pending, we go through the whole list */
1860 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Eric W. Biedermanfef59fd2013-02-06 02:23:02 -08001861 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
Jeff Layton6508d902010-09-29 19:51:11 -04001862 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001863 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
Steve French630f3f0c2007-10-25 21:17:17 +00001864 if (!open_file->invalidHandle) {
1865 /* found a good file */
1866 /* lock it so it will not be closed on us */
Steve French3afca262016-09-22 18:58:16 -05001867 cifsFileInfo_get(open_file);
1868 spin_unlock(&tcon->open_file_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001869 return open_file;
1870 } /* else might as well continue, and look for
1871 another, or simply have the caller reopen it
1872 again rather than trying to fix this handle */
1873 } else /* write only file */
1874 break; /* write only files are last so must be done */
1875 }
Steve French3afca262016-09-22 18:58:16 -05001876 spin_unlock(&tcon->open_file_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001877 return NULL;
1878}
Steve French630f3f0c2007-10-25 21:17:17 +00001879
Jeff Layton6508d902010-09-29 19:51:11 -04001880struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1881 bool fsuid_only)
Steve French6148a742005-10-05 12:23:19 -07001882{
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001883 struct cifsFileInfo *open_file, *inv_file = NULL;
Jeff Laytond3892292010-11-02 16:22:50 -04001884 struct cifs_sb_info *cifs_sb;
Steve French3afca262016-09-22 18:58:16 -05001885 struct cifs_tcon *tcon;
Jeff Layton2846d382008-09-22 21:33:33 -04001886 bool any_available = false;
Steve Frenchdd99cd82005-10-05 19:32:49 -07001887 int rc;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001888 unsigned int refind = 0;
Steve French6148a742005-10-05 12:23:19 -07001889
Steve French60808232006-04-22 15:53:05 +00001890 /* Having a null inode here (because mapping->host was set to zero by
1891 the VFS or MM) should not happen but we had reports of on oops (due to
1892 it being zero) during stress testcases so we need to check for it */
1893
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001894 if (cifs_inode == NULL) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001895 cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
Steve French60808232006-04-22 15:53:05 +00001896 dump_stack();
1897 return NULL;
1898 }
1899
Jeff Laytond3892292010-11-02 16:22:50 -04001900 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
Steve French3afca262016-09-22 18:58:16 -05001901 tcon = cifs_sb_master_tcon(cifs_sb);
Jeff Laytond3892292010-11-02 16:22:50 -04001902
Jeff Layton6508d902010-09-29 19:51:11 -04001903 /* only filter by fsuid on multiuser mounts */
1904 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1905 fsuid_only = false;
1906
Steve French3afca262016-09-22 18:58:16 -05001907 spin_lock(&tcon->open_file_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001908refind_writable:
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001909 if (refind > MAX_REOPEN_ATT) {
Steve French3afca262016-09-22 18:58:16 -05001910 spin_unlock(&tcon->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001911 return NULL;
1912 }
Steve French6148a742005-10-05 12:23:19 -07001913 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton6508d902010-09-29 19:51:11 -04001914 if (!any_available && open_file->pid != current->tgid)
1915 continue;
Eric W. Biedermanfef59fd2013-02-06 02:23:02 -08001916 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
Jeff Layton6508d902010-09-29 19:51:11 -04001917 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001918 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Steve French9b22b0b2007-10-02 01:11:08 +00001919 if (!open_file->invalidHandle) {
1920 /* found a good writable file */
Steve French3afca262016-09-22 18:58:16 -05001921 cifsFileInfo_get(open_file);
1922 spin_unlock(&tcon->open_file_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001923 return open_file;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001924 } else {
1925 if (!inv_file)
1926 inv_file = open_file;
Steve French9b22b0b2007-10-02 01:11:08 +00001927 }
Steve French6148a742005-10-05 12:23:19 -07001928 }
1929 }
Jeff Layton2846d382008-09-22 21:33:33 -04001930 /* couldn't find useable FH with same pid, try any available */
1931 if (!any_available) {
1932 any_available = true;
1933 goto refind_writable;
1934 }
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001935
1936 if (inv_file) {
1937 any_available = false;
Steve French3afca262016-09-22 18:58:16 -05001938 cifsFileInfo_get(inv_file);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001939 }
1940
Steve French3afca262016-09-22 18:58:16 -05001941 spin_unlock(&tcon->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001942
1943 if (inv_file) {
1944 rc = cifs_reopen_file(inv_file, false);
1945 if (!rc)
1946 return inv_file;
1947 else {
Steve French3afca262016-09-22 18:58:16 -05001948 spin_lock(&tcon->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001949 list_move_tail(&inv_file->flist,
1950 &cifs_inode->openFileList);
Steve French3afca262016-09-22 18:58:16 -05001951 spin_unlock(&tcon->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001952 cifsFileInfo_put(inv_file);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001953 ++refind;
David Disseldorpe1e9bda2015-03-13 14:20:29 +01001954 inv_file = NULL;
Steve French3afca262016-09-22 18:58:16 -05001955 spin_lock(&tcon->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001956 goto refind_writable;
1957 }
1958 }
1959
Steve French6148a742005-10-05 12:23:19 -07001960 return NULL;
1961}
1962
Linus Torvalds1da177e2005-04-16 15:20:36 -07001963static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1964{
1965 struct address_space *mapping = page->mapping;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001966 loff_t offset = (loff_t)page->index << PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001967 char *write_data;
1968 int rc = -EFAULT;
1969 int bytes_written = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001970 struct inode *inode;
Steve French6148a742005-10-05 12:23:19 -07001971 struct cifsFileInfo *open_file;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001972
1973 if (!mapping || !mapping->host)
1974 return -EFAULT;
1975
1976 inode = page->mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001977
1978 offset += (loff_t)from;
1979 write_data = kmap(page);
1980 write_data += from;
1981
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001982 if ((to > PAGE_SIZE) || (from > to)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001983 kunmap(page);
1984 return -EIO;
1985 }
1986
1987 /* racing with truncate? */
1988 if (offset > mapping->host->i_size) {
1989 kunmap(page);
1990 return 0; /* don't care */
1991 }
1992
1993 /* check to make sure that we are not extending the file */
1994 if (mapping->host->i_size - offset < (loff_t)to)
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001995 to = (unsigned)(mapping->host->i_size - offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001996
Jeff Layton6508d902010-09-29 19:51:11 -04001997 open_file = find_writable_file(CIFS_I(mapping->host), false);
Steve French6148a742005-10-05 12:23:19 -07001998 if (open_file) {
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001999 bytes_written = cifs_write(open_file, open_file->pid,
2000 write_data, to - from, &offset);
Dave Kleikamp6ab409b2009-08-31 11:07:12 -04002001 cifsFileInfo_put(open_file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002002 /* Does mm or vfs already set times? */
Deepa Dinamanic2050a42016-09-14 07:48:06 -07002003 inode->i_atime = inode->i_mtime = current_time(inode);
Steve Frenchbb5a9a02007-12-31 04:21:29 +00002004 if ((bytes_written > 0) && (offset))
Steve French6148a742005-10-05 12:23:19 -07002005 rc = 0;
Steve Frenchbb5a9a02007-12-31 04:21:29 +00002006 else if (bytes_written < 0)
2007 rc = bytes_written;
Steve French6148a742005-10-05 12:23:19 -07002008 } else {
Joe Perchesf96637b2013-05-04 22:12:25 -05002009 cifs_dbg(FYI, "No writeable filehandles for inode\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002010 rc = -EIO;
2011 }
2012
2013 kunmap(page);
2014 return rc;
2015}
2016
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04002017static struct cifs_writedata *
2018wdata_alloc_and_fillpages(pgoff_t tofind, struct address_space *mapping,
2019 pgoff_t end, pgoff_t *index,
2020 unsigned int *found_pages)
2021{
2022 unsigned int nr_pages;
2023 struct page **pages;
2024 struct cifs_writedata *wdata;
2025
2026 wdata = cifs_writedata_alloc((unsigned int)tofind,
2027 cifs_writev_complete);
2028 if (!wdata)
2029 return NULL;
2030
2031 /*
2032 * find_get_pages_tag seems to return a max of 256 on each
2033 * iteration, so we must call it several times in order to
2034 * fill the array or the wsize is effectively limited to
Kirill A. Shutemovea1754a2016-04-01 15:29:48 +03002035 * 256 * PAGE_SIZE.
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04002036 */
2037 *found_pages = 0;
2038 pages = wdata->pages;
2039 do {
2040 nr_pages = find_get_pages_tag(mapping, index,
2041 PAGECACHE_TAG_DIRTY, tofind,
2042 pages);
2043 *found_pages += nr_pages;
2044 tofind -= nr_pages;
2045 pages += nr_pages;
2046 } while (nr_pages && tofind && *index <= end);
2047
2048 return wdata;
2049}
2050
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04002051static unsigned int
2052wdata_prepare_pages(struct cifs_writedata *wdata, unsigned int found_pages,
2053 struct address_space *mapping,
2054 struct writeback_control *wbc,
2055 pgoff_t end, pgoff_t *index, pgoff_t *next, bool *done)
2056{
2057 unsigned int nr_pages = 0, i;
2058 struct page *page;
2059
2060 for (i = 0; i < found_pages; i++) {
2061 page = wdata->pages[i];
2062 /*
2063 * At this point we hold neither mapping->tree_lock nor
2064 * lock on the page itself: the page may be truncated or
2065 * invalidated (changing page->mapping to NULL), or even
2066 * swizzled back from swapper_space to tmpfs file
2067 * mapping
2068 */
2069
2070 if (nr_pages == 0)
2071 lock_page(page);
2072 else if (!trylock_page(page))
2073 break;
2074
2075 if (unlikely(page->mapping != mapping)) {
2076 unlock_page(page);
2077 break;
2078 }
2079
2080 if (!wbc->range_cyclic && page->index > end) {
2081 *done = true;
2082 unlock_page(page);
2083 break;
2084 }
2085
2086 if (*next && (page->index != *next)) {
2087 /* Not next consecutive page */
2088 unlock_page(page);
2089 break;
2090 }
2091
2092 if (wbc->sync_mode != WB_SYNC_NONE)
2093 wait_on_page_writeback(page);
2094
2095 if (PageWriteback(page) ||
2096 !clear_page_dirty_for_io(page)) {
2097 unlock_page(page);
2098 break;
2099 }
2100
2101 /*
2102 * This actually clears the dirty bit in the radix tree.
2103 * See cifs_writepage() for more commentary.
2104 */
2105 set_page_writeback(page);
2106 if (page_offset(page) >= i_size_read(mapping->host)) {
2107 *done = true;
2108 unlock_page(page);
2109 end_page_writeback(page);
2110 break;
2111 }
2112
2113 wdata->pages[i] = page;
2114 *next = page->index + 1;
2115 ++nr_pages;
2116 }
2117
2118 /* reset index to refind any pages skipped */
2119 if (nr_pages == 0)
2120 *index = wdata->pages[0]->index + 1;
2121
2122 /* put any pages we aren't going to use */
2123 for (i = nr_pages; i < found_pages; i++) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002124 put_page(wdata->pages[i]);
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04002125 wdata->pages[i] = NULL;
2126 }
2127
2128 return nr_pages;
2129}
2130
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002131static int
2132wdata_send_pages(struct cifs_writedata *wdata, unsigned int nr_pages,
2133 struct address_space *mapping, struct writeback_control *wbc)
2134{
2135 int rc = 0;
2136 struct TCP_Server_Info *server;
2137 unsigned int i;
2138
2139 wdata->sync_mode = wbc->sync_mode;
2140 wdata->nr_pages = nr_pages;
2141 wdata->offset = page_offset(wdata->pages[0]);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002142 wdata->pagesz = PAGE_SIZE;
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002143 wdata->tailsz = min(i_size_read(mapping->host) -
2144 page_offset(wdata->pages[nr_pages - 1]),
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002145 (loff_t)PAGE_SIZE);
2146 wdata->bytes = ((nr_pages - 1) * PAGE_SIZE) + wdata->tailsz;
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002147
Pavel Shilovsky66231a42014-06-19 16:15:16 +04002148 if (wdata->cfile != NULL)
2149 cifsFileInfo_put(wdata->cfile);
2150 wdata->cfile = find_writable_file(CIFS_I(mapping->host), false);
2151 if (!wdata->cfile) {
2152 cifs_dbg(VFS, "No writable handles for inode\n");
2153 rc = -EBADF;
2154 } else {
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002155 wdata->pid = wdata->cfile->pid;
2156 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
2157 rc = server->ops->async_writev(wdata, cifs_writedata_release);
Pavel Shilovsky66231a42014-06-19 16:15:16 +04002158 }
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002159
2160 for (i = 0; i < nr_pages; ++i)
2161 unlock_page(wdata->pages[i]);
2162
2163 return rc;
2164}
2165
Linus Torvalds1da177e2005-04-16 15:20:36 -07002166static int cifs_writepages(struct address_space *mapping,
Steve French37c0eb42005-10-05 14:50:29 -07002167 struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002168{
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002169 struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002170 struct TCP_Server_Info *server;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002171 bool done = false, scanned = false, range_whole = false;
2172 pgoff_t end, index;
2173 struct cifs_writedata *wdata;
Steve French37c0eb42005-10-05 14:50:29 -07002174 int rc = 0;
Steve French50c2f752007-07-13 00:33:32 +00002175
Steve French37c0eb42005-10-05 14:50:29 -07002176 /*
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002177 * If wsize is smaller than the page cache size, default to writing
Steve French37c0eb42005-10-05 14:50:29 -07002178 * one page at a time via cifs_writepage
2179 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002180 if (cifs_sb->wsize < PAGE_SIZE)
Steve French37c0eb42005-10-05 14:50:29 -07002181 return generic_writepages(mapping, wbc);
2182
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002183 if (wbc->range_cyclic) {
Steve French37c0eb42005-10-05 14:50:29 -07002184 index = mapping->writeback_index; /* Start from prev offset */
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002185 end = -1;
2186 } else {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002187 index = wbc->range_start >> PAGE_SHIFT;
2188 end = wbc->range_end >> PAGE_SHIFT;
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002189 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002190 range_whole = true;
2191 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07002192 }
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002193 server = cifs_sb_master_tcon(cifs_sb)->ses->server;
Steve French37c0eb42005-10-05 14:50:29 -07002194retry:
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002195 while (!done && index <= end) {
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002196 unsigned int i, nr_pages, found_pages, wsize, credits;
Pavel Shilovsky66231a42014-06-19 16:15:16 +04002197 pgoff_t next = 0, tofind, saved_index = index;
Steve French37c0eb42005-10-05 14:50:29 -07002198
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002199 rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize,
2200 &wsize, &credits);
2201 if (rc)
2202 break;
Steve French37c0eb42005-10-05 14:50:29 -07002203
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002204 tofind = min((wsize / PAGE_SIZE) - 1, end - index) + 1;
Steve French37c0eb42005-10-05 14:50:29 -07002205
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04002206 wdata = wdata_alloc_and_fillpages(tofind, mapping, end, &index,
2207 &found_pages);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002208 if (!wdata) {
2209 rc = -ENOMEM;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002210 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002211 break;
2212 }
2213
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002214 if (found_pages == 0) {
2215 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002216 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002217 break;
2218 }
2219
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04002220 nr_pages = wdata_prepare_pages(wdata, found_pages, mapping, wbc,
2221 end, &index, &next, &done);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002222
2223 /* nothing to write? */
2224 if (nr_pages == 0) {
2225 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002226 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002227 continue;
2228 }
2229
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002230 wdata->credits = credits;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002231
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002232 rc = wdata_send_pages(wdata, nr_pages, mapping, wbc);
Jeff Layton941b8532011-01-11 07:24:01 -05002233
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002234 /* send failure -- clean up the mess */
2235 if (rc != 0) {
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002236 add_credits_and_wake_if(server, wdata->credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002237 for (i = 0; i < nr_pages; ++i) {
Jeff Layton941b8532011-01-11 07:24:01 -05002238 if (rc == -EAGAIN)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002239 redirty_page_for_writepage(wbc,
2240 wdata->pages[i]);
2241 else
2242 SetPageError(wdata->pages[i]);
2243 end_page_writeback(wdata->pages[i]);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002244 put_page(wdata->pages[i]);
Steve French37c0eb42005-10-05 14:50:29 -07002245 }
Jeff Layton941b8532011-01-11 07:24:01 -05002246 if (rc != -EAGAIN)
2247 mapping_set_error(mapping, rc);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002248 }
2249 kref_put(&wdata->refcount, cifs_writedata_release);
Jeff Layton941b8532011-01-11 07:24:01 -05002250
Pavel Shilovsky66231a42014-06-19 16:15:16 +04002251 if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN) {
2252 index = saved_index;
2253 continue;
2254 }
2255
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002256 wbc->nr_to_write -= nr_pages;
2257 if (wbc->nr_to_write <= 0)
2258 done = true;
Dave Kleikampb066a482008-11-18 03:49:05 +00002259
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002260 index = next;
Steve French37c0eb42005-10-05 14:50:29 -07002261 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002262
Steve French37c0eb42005-10-05 14:50:29 -07002263 if (!scanned && !done) {
2264 /*
2265 * We hit the last page and there is more work to be done: wrap
2266 * back to the start of the file
2267 */
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002268 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07002269 index = 0;
2270 goto retry;
2271 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002272
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002273 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
Steve French37c0eb42005-10-05 14:50:29 -07002274 mapping->writeback_index = index;
2275
Linus Torvalds1da177e2005-04-16 15:20:36 -07002276 return rc;
2277}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002278
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002279static int
2280cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002281{
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002282 int rc;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002283 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002284
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002285 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002286/* BB add check for wbc flags */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002287 get_page(page);
Steve Frenchad7a2922008-02-07 23:25:02 +00002288 if (!PageUptodate(page))
Joe Perchesf96637b2013-05-04 22:12:25 -05002289 cifs_dbg(FYI, "ppw - page not up to date\n");
Linus Torvaldscb876f42006-12-23 16:19:07 -08002290
2291 /*
2292 * Set the "writeback" flag, and clear "dirty" in the radix tree.
2293 *
2294 * A writepage() implementation always needs to do either this,
2295 * or re-dirty the page with "redirty_page_for_writepage()" in
2296 * the case of a failure.
2297 *
2298 * Just unlocking the page will cause the radix tree tag-bits
2299 * to fail to update with the state of the page correctly.
2300 */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002301 set_page_writeback(page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002302retry_write:
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002303 rc = cifs_partialpagewrite(page, 0, PAGE_SIZE);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002304 if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
2305 goto retry_write;
2306 else if (rc == -EAGAIN)
2307 redirty_page_for_writepage(wbc, page);
2308 else if (rc != 0)
2309 SetPageError(page);
2310 else
2311 SetPageUptodate(page);
Linus Torvaldscb876f42006-12-23 16:19:07 -08002312 end_page_writeback(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002313 put_page(page);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002314 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002315 return rc;
2316}
2317
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002318static int cifs_writepage(struct page *page, struct writeback_control *wbc)
2319{
2320 int rc = cifs_writepage_locked(page, wbc);
2321 unlock_page(page);
2322 return rc;
2323}
2324
Nick Piggind9414772008-09-24 11:32:59 -04002325static int cifs_write_end(struct file *file, struct address_space *mapping,
2326 loff_t pos, unsigned len, unsigned copied,
2327 struct page *page, void *fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002328{
Nick Piggind9414772008-09-24 11:32:59 -04002329 int rc;
2330 struct inode *inode = mapping->host;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002331 struct cifsFileInfo *cfile = file->private_data;
2332 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
2333 __u32 pid;
2334
2335 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2336 pid = cfile->pid;
2337 else
2338 pid = current->tgid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002339
Joe Perchesf96637b2013-05-04 22:12:25 -05002340 cifs_dbg(FYI, "write_end for page %p from pos %lld with %d bytes\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +00002341 page, pos, copied);
Steve Frenchad7a2922008-02-07 23:25:02 +00002342
Jeff Laytona98ee8c2008-11-26 19:32:33 +00002343 if (PageChecked(page)) {
2344 if (copied == len)
2345 SetPageUptodate(page);
2346 ClearPageChecked(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002347 } else if (!PageUptodate(page) && copied == PAGE_SIZE)
Nick Piggind9414772008-09-24 11:32:59 -04002348 SetPageUptodate(page);
2349
Linus Torvalds1da177e2005-04-16 15:20:36 -07002350 if (!PageUptodate(page)) {
Nick Piggind9414772008-09-24 11:32:59 -04002351 char *page_data;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002352 unsigned offset = pos & (PAGE_SIZE - 1);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002353 unsigned int xid;
Nick Piggind9414772008-09-24 11:32:59 -04002354
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002355 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002356 /* this is probably better than directly calling
2357 partialpage_write since in this function the file handle is
2358 known which we might as well leverage */
2359 /* BB check if anything else missing out of ppw
2360 such as updating last write time */
2361 page_data = kmap(page);
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002362 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
Nick Piggind9414772008-09-24 11:32:59 -04002363 /* if (rc < 0) should we set writebehind rc? */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002364 kunmap(page);
Nick Piggind9414772008-09-24 11:32:59 -04002365
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002366 free_xid(xid);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002367 } else {
Nick Piggind9414772008-09-24 11:32:59 -04002368 rc = copied;
2369 pos += copied;
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002370 set_page_dirty(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002371 }
2372
Nick Piggind9414772008-09-24 11:32:59 -04002373 if (rc > 0) {
2374 spin_lock(&inode->i_lock);
2375 if (pos > inode->i_size)
2376 i_size_write(inode, pos);
2377 spin_unlock(&inode->i_lock);
2378 }
2379
2380 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002381 put_page(page);
Nick Piggind9414772008-09-24 11:32:59 -04002382
Linus Torvalds1da177e2005-04-16 15:20:36 -07002383 return rc;
2384}
2385
Josef Bacik02c24a82011-07-16 20:44:56 -04002386int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2387 int datasync)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002388{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002389 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002390 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002391 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002392 struct TCP_Server_Info *server;
Joe Perchesc21dfb62010-07-12 13:50:14 -07002393 struct cifsFileInfo *smbfile = file->private_data;
Al Viro496ad9a2013-01-23 17:07:38 -05002394 struct inode *inode = file_inode(file);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002395 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002396
Josef Bacik02c24a82011-07-16 20:44:56 -04002397 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2398 if (rc)
2399 return rc;
Al Viro59551022016-01-22 15:40:57 -05002400 inode_lock(inode);
Josef Bacik02c24a82011-07-16 20:44:56 -04002401
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002402 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002403
Al Viro35c265e2014-08-19 20:25:34 -04002404 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2405 file, datasync);
Steve French50c2f752007-07-13 00:33:32 +00002406
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04002407 if (!CIFS_CACHE_READ(CIFS_I(inode))) {
Jeff Layton4f73c7d2014-04-30 09:31:47 -04002408 rc = cifs_zap_mapping(inode);
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002409 if (rc) {
Joe Perchesf96637b2013-05-04 22:12:25 -05002410 cifs_dbg(FYI, "rc: %d during invalidate phase\n", rc);
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002411 rc = 0; /* don't care about it in fsync */
2412 }
2413 }
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002414
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002415 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002416 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2417 server = tcon->ses->server;
2418 if (server->ops->flush)
2419 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2420 else
2421 rc = -ENOSYS;
2422 }
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002423
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002424 free_xid(xid);
Al Viro59551022016-01-22 15:40:57 -05002425 inode_unlock(inode);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002426 return rc;
2427}
2428
Josef Bacik02c24a82011-07-16 20:44:56 -04002429int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002430{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002431 unsigned int xid;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002432 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002433 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002434 struct TCP_Server_Info *server;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002435 struct cifsFileInfo *smbfile = file->private_data;
Al Viro7119e222014-10-22 00:25:12 -04002436 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
Josef Bacik02c24a82011-07-16 20:44:56 -04002437 struct inode *inode = file->f_mapping->host;
2438
2439 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2440 if (rc)
2441 return rc;
Al Viro59551022016-01-22 15:40:57 -05002442 inode_lock(inode);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002443
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002444 xid = get_xid();
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002445
Al Viro35c265e2014-08-19 20:25:34 -04002446 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2447 file, datasync);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002448
2449 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002450 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2451 server = tcon->ses->server;
2452 if (server->ops->flush)
2453 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2454 else
2455 rc = -ENOSYS;
2456 }
Steve Frenchb298f222009-02-21 21:17:43 +00002457
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002458 free_xid(xid);
Al Viro59551022016-01-22 15:40:57 -05002459 inode_unlock(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002460 return rc;
2461}
2462
Linus Torvalds1da177e2005-04-16 15:20:36 -07002463/*
2464 * As file closes, flush all cached write data for this inode checking
2465 * for write behind errors.
2466 */
Miklos Szeredi75e1fcc2006-06-23 02:05:12 -07002467int cifs_flush(struct file *file, fl_owner_t id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002468{
Al Viro496ad9a2013-01-23 17:07:38 -05002469 struct inode *inode = file_inode(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002470 int rc = 0;
2471
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002472 if (file->f_mode & FMODE_WRITE)
Jeff Laytond3f13222010-10-15 15:34:07 -04002473 rc = filemap_write_and_wait(inode->i_mapping);
Steve French50c2f752007-07-13 00:33:32 +00002474
Joe Perchesf96637b2013-05-04 22:12:25 -05002475 cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002476
2477 return rc;
2478}
2479
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002480static int
2481cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2482{
2483 int rc = 0;
2484 unsigned long i;
2485
2486 for (i = 0; i < num_pages; i++) {
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002487 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002488 if (!pages[i]) {
2489 /*
2490 * save number of pages we have already allocated and
2491 * return with ENOMEM error
2492 */
2493 num_pages = i;
2494 rc = -ENOMEM;
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002495 break;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002496 }
2497 }
2498
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002499 if (rc) {
2500 for (i = 0; i < num_pages; i++)
2501 put_page(pages[i]);
2502 }
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002503 return rc;
2504}
2505
2506static inline
2507size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2508{
2509 size_t num_pages;
2510 size_t clen;
2511
2512 clen = min_t(const size_t, len, wsize);
Jeff Laytona7103b92012-03-23 14:40:56 -04002513 num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002514
2515 if (cur_len)
2516 *cur_len = clen;
2517
2518 return num_pages;
2519}
2520
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002521static void
Steve French4a5c80d2014-02-07 20:45:12 -06002522cifs_uncached_writedata_release(struct kref *refcount)
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002523{
2524 int i;
Steve French4a5c80d2014-02-07 20:45:12 -06002525 struct cifs_writedata *wdata = container_of(refcount,
2526 struct cifs_writedata, refcount);
2527
2528 for (i = 0; i < wdata->nr_pages; i++)
2529 put_page(wdata->pages[i]);
2530 cifs_writedata_release(refcount);
2531}
2532
2533static void
2534cifs_uncached_writev_complete(struct work_struct *work)
2535{
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002536 struct cifs_writedata *wdata = container_of(work,
2537 struct cifs_writedata, work);
David Howells2b0143b2015-03-17 22:25:59 +00002538 struct inode *inode = d_inode(wdata->cfile->dentry);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002539 struct cifsInodeInfo *cifsi = CIFS_I(inode);
2540
2541 spin_lock(&inode->i_lock);
2542 cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
2543 if (cifsi->server_eof > inode->i_size)
2544 i_size_write(inode, cifsi->server_eof);
2545 spin_unlock(&inode->i_lock);
2546
2547 complete(&wdata->done);
2548
Steve French4a5c80d2014-02-07 20:45:12 -06002549 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002550}
2551
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002552static int
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002553wdata_fill_from_iovec(struct cifs_writedata *wdata, struct iov_iter *from,
2554 size_t *len, unsigned long *num_pages)
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002555{
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002556 size_t save_len, copied, bytes, cur_len = *len;
2557 unsigned long i, nr_pages = *num_pages;
2558
2559 save_len = cur_len;
2560 for (i = 0; i < nr_pages; i++) {
2561 bytes = min_t(const size_t, cur_len, PAGE_SIZE);
2562 copied = copy_page_from_iter(wdata->pages[i], 0, bytes, from);
2563 cur_len -= copied;
2564 /*
2565 * If we didn't copy as much as we expected, then that
2566 * may mean we trod into an unmapped area. Stop copying
2567 * at that point. On the next pass through the big
2568 * loop, we'll likely end up getting a zero-length
2569 * write and bailing out of it.
2570 */
2571 if (copied < bytes)
2572 break;
2573 }
2574 cur_len = save_len - cur_len;
2575 *len = cur_len;
2576
2577 /*
2578 * If we have no data to send, then that probably means that
2579 * the copy above failed altogether. That's most likely because
2580 * the address in the iovec was bogus. Return -EFAULT and let
2581 * the caller free anything we allocated and bail out.
2582 */
2583 if (!cur_len)
2584 return -EFAULT;
2585
2586 /*
2587 * i + 1 now represents the number of pages we actually used in
2588 * the copy phase above.
2589 */
2590 *num_pages = i + 1;
2591 return 0;
2592}
2593
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002594static int
2595cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
2596 struct cifsFileInfo *open_file,
2597 struct cifs_sb_info *cifs_sb, struct list_head *wdata_list)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002598{
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002599 int rc = 0;
2600 size_t cur_len;
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002601 unsigned long nr_pages, num_pages, i;
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002602 struct cifs_writedata *wdata;
Al Virofc56b982016-09-21 18:18:23 -04002603 struct iov_iter saved_from = *from;
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002604 loff_t saved_offset = offset;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002605 pid_t pid;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002606 struct TCP_Server_Info *server;
2607
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002608 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2609 pid = open_file->pid;
2610 else
2611 pid = current->tgid;
2612
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002613 server = tlink_tcon(open_file->tlink)->ses->server;
Pavel Shilovsky76429c12011-01-31 16:03:08 +03002614
2615 do {
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002616 unsigned int wsize, credits;
2617
2618 rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize,
2619 &wsize, &credits);
2620 if (rc)
2621 break;
2622
2623 nr_pages = get_numpages(wsize, len, &cur_len);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002624 wdata = cifs_writedata_alloc(nr_pages,
2625 cifs_uncached_writev_complete);
2626 if (!wdata) {
2627 rc = -ENOMEM;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002628 add_credits_and_wake_if(server, credits, 0);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002629 break;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002630 }
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002631
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002632 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
2633 if (rc) {
2634 kfree(wdata);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002635 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002636 break;
2637 }
2638
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002639 num_pages = nr_pages;
2640 rc = wdata_fill_from_iovec(wdata, from, &cur_len, &num_pages);
2641 if (rc) {
Jeff Layton5d81de82014-02-14 07:20:35 -05002642 for (i = 0; i < nr_pages; i++)
2643 put_page(wdata->pages[i]);
2644 kfree(wdata);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002645 add_credits_and_wake_if(server, credits, 0);
Jeff Layton5d81de82014-02-14 07:20:35 -05002646 break;
2647 }
2648
2649 /*
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002650 * Bring nr_pages down to the number of pages we actually used,
2651 * and free any pages that we didn't use.
Jeff Layton5d81de82014-02-14 07:20:35 -05002652 */
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002653 for ( ; nr_pages > num_pages; nr_pages--)
Jeff Layton5d81de82014-02-14 07:20:35 -05002654 put_page(wdata->pages[nr_pages - 1]);
2655
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002656 wdata->sync_mode = WB_SYNC_ALL;
2657 wdata->nr_pages = nr_pages;
2658 wdata->offset = (__u64)offset;
2659 wdata->cfile = cifsFileInfo_get(open_file);
2660 wdata->pid = pid;
2661 wdata->bytes = cur_len;
Jeff Laytoneddb0792012-09-18 16:20:35 -07002662 wdata->pagesz = PAGE_SIZE;
2663 wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002664 wdata->credits = credits;
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002665
2666 if (!wdata->cfile->invalidHandle ||
Germano Percossi730fecb2017-04-07 12:29:38 +01002667 !(rc = cifs_reopen_file(wdata->cfile, false)))
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002668 rc = server->ops->async_writev(wdata,
2669 cifs_uncached_writedata_release);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002670 if (rc) {
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002671 add_credits_and_wake_if(server, wdata->credits, 0);
Steve French4a5c80d2014-02-07 20:45:12 -06002672 kref_put(&wdata->refcount,
2673 cifs_uncached_writedata_release);
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002674 if (rc == -EAGAIN) {
Al Virofc56b982016-09-21 18:18:23 -04002675 *from = saved_from;
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002676 iov_iter_advance(from, offset - saved_offset);
2677 continue;
2678 }
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002679 break;
2680 }
2681
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002682 list_add_tail(&wdata->list, wdata_list);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002683 offset += cur_len;
2684 len -= cur_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002685 } while (len > 0);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002686
2687 return rc;
2688}
2689
Al Viroe9d15932015-04-06 22:44:11 -04002690ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002691{
Al Viroe9d15932015-04-06 22:44:11 -04002692 struct file *file = iocb->ki_filp;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002693 ssize_t total_written = 0;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002694 struct cifsFileInfo *open_file;
2695 struct cifs_tcon *tcon;
2696 struct cifs_sb_info *cifs_sb;
2697 struct cifs_writedata *wdata, *tmp;
2698 struct list_head wdata_list;
Al Virofc56b982016-09-21 18:18:23 -04002699 struct iov_iter saved_from = *from;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002700 int rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002701
Al Viroe9d15932015-04-06 22:44:11 -04002702 /*
2703 * BB - optimize the way when signing is disabled. We can drop this
2704 * extra memory-to-memory copying and use iovec buffers for constructing
2705 * write request.
2706 */
2707
Al Viro3309dd02015-04-09 12:55:47 -04002708 rc = generic_write_checks(iocb, from);
2709 if (rc <= 0)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002710 return rc;
2711
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002712 INIT_LIST_HEAD(&wdata_list);
Al Viro7119e222014-10-22 00:25:12 -04002713 cifs_sb = CIFS_FILE_SB(file);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002714 open_file = file->private_data;
2715 tcon = tlink_tcon(open_file->tlink);
2716
2717 if (!tcon->ses->server->ops->async_writev)
2718 return -ENOSYS;
2719
Al Viro3309dd02015-04-09 12:55:47 -04002720 rc = cifs_write_from_iter(iocb->ki_pos, iov_iter_count(from), from,
2721 open_file, cifs_sb, &wdata_list);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002722
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002723 /*
2724 * If at least one write was successfully sent, then discard any rc
2725 * value from the later writes. If the other write succeeds, then
2726 * we'll end up returning whatever was written. If it fails, then
2727 * we'll get a new rc value from that.
2728 */
2729 if (!list_empty(&wdata_list))
2730 rc = 0;
2731
2732 /*
2733 * Wait for and collect replies for any successful sends in order of
2734 * increasing offset. Once an error is hit or we get a fatal signal
2735 * while waiting, then return without waiting for any more replies.
2736 */
2737restart_loop:
2738 list_for_each_entry_safe(wdata, tmp, &wdata_list, list) {
2739 if (!rc) {
2740 /* FIXME: freezable too? */
2741 rc = wait_for_completion_killable(&wdata->done);
2742 if (rc)
2743 rc = -EINTR;
2744 else if (wdata->result)
2745 rc = wdata->result;
2746 else
2747 total_written += wdata->bytes;
2748
2749 /* resend call if it's a retryable error */
2750 if (rc == -EAGAIN) {
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002751 struct list_head tmp_list;
Al Virofc56b982016-09-21 18:18:23 -04002752 struct iov_iter tmp_from = saved_from;
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002753
2754 INIT_LIST_HEAD(&tmp_list);
2755 list_del_init(&wdata->list);
2756
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002757 iov_iter_advance(&tmp_from,
Al Viroe9d15932015-04-06 22:44:11 -04002758 wdata->offset - iocb->ki_pos);
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002759
2760 rc = cifs_write_from_iter(wdata->offset,
2761 wdata->bytes, &tmp_from,
2762 open_file, cifs_sb, &tmp_list);
2763
2764 list_splice(&tmp_list, &wdata_list);
2765
2766 kref_put(&wdata->refcount,
2767 cifs_uncached_writedata_release);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002768 goto restart_loop;
2769 }
2770 }
2771 list_del_init(&wdata->list);
Steve French4a5c80d2014-02-07 20:45:12 -06002772 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002773 }
2774
Al Viroe9d15932015-04-06 22:44:11 -04002775 if (unlikely(!total_written))
2776 return rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002777
Al Viroe9d15932015-04-06 22:44:11 -04002778 iocb->ki_pos += total_written;
2779 set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(file_inode(file))->flags);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002780 cifs_stats_bytes_written(tcon, total_written);
Al Viroe9d15932015-04-06 22:44:11 -04002781 return total_written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002782}
2783
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002784static ssize_t
Al Viro3dae8752014-04-03 12:05:17 -04002785cifs_writev(struct kiocb *iocb, struct iov_iter *from)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002786{
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002787 struct file *file = iocb->ki_filp;
2788 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2789 struct inode *inode = file->f_mapping->host;
2790 struct cifsInodeInfo *cinode = CIFS_I(inode);
2791 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Al Viro5f380c72015-04-07 11:28:12 -04002792 ssize_t rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002793
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002794 /*
2795 * We need to hold the sem to be sure nobody modifies lock list
2796 * with a brlock that prevents writing.
2797 */
2798 down_read(&cinode->lock_sem);
Al Viro59551022016-01-22 15:40:57 -05002799 inode_lock(inode);
Al Viro5f380c72015-04-07 11:28:12 -04002800
Al Viro3309dd02015-04-09 12:55:47 -04002801 rc = generic_write_checks(iocb, from);
2802 if (rc <= 0)
Al Viro5f380c72015-04-07 11:28:12 -04002803 goto out;
2804
Al Viro5f380c72015-04-07 11:28:12 -04002805 if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002806 server->vals->exclusive_lock_type, NULL,
Al Viro5f380c72015-04-07 11:28:12 -04002807 CIFS_WRITE_OP))
Al Viro3dae8752014-04-03 12:05:17 -04002808 rc = __generic_file_write_iter(iocb, from);
Al Viro5f380c72015-04-07 11:28:12 -04002809 else
2810 rc = -EACCES;
2811out:
Al Viro59551022016-01-22 15:40:57 -05002812 inode_unlock(inode);
Al Viro19dfc1f2014-04-03 10:27:17 -04002813
Christoph Hellwige2592212016-04-07 08:52:01 -07002814 if (rc > 0)
2815 rc = generic_write_sync(iocb, rc);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002816 up_read(&cinode->lock_sem);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002817 return rc;
2818}
2819
2820ssize_t
Al Viro3dae8752014-04-03 12:05:17 -04002821cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002822{
Al Viro496ad9a2013-01-23 17:07:38 -05002823 struct inode *inode = file_inode(iocb->ki_filp);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002824 struct cifsInodeInfo *cinode = CIFS_I(inode);
2825 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2826 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2827 iocb->ki_filp->private_data;
2828 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002829 ssize_t written;
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002830
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00002831 written = cifs_get_writer(cinode);
2832 if (written)
2833 return written;
2834
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04002835 if (CIFS_CACHE_WRITE(cinode)) {
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002836 if (cap_unix(tcon->ses) &&
2837 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00002838 && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
Al Viro3dae8752014-04-03 12:05:17 -04002839 written = generic_file_write_iter(iocb, from);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00002840 goto out;
2841 }
Al Viro3dae8752014-04-03 12:05:17 -04002842 written = cifs_writev(iocb, from);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00002843 goto out;
Pavel Shilovskyc299dd02012-12-06 22:07:52 +04002844 }
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002845 /*
2846 * For non-oplocked files in strict cache mode we need to write the data
2847 * to the server exactly from the pos to pos+len-1 rather than flush all
2848 * affected pages because it may cause a error with mandatory locks on
2849 * these pages but not on the region from pos to ppos+len-1.
2850 */
Al Viro3dae8752014-04-03 12:05:17 -04002851 written = cifs_user_writev(iocb, from);
Pavel Shilovsky776f01e2019-03-04 17:48:01 -08002852 if (CIFS_CACHE_READ(cinode)) {
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002853 /*
Pavel Shilovsky776f01e2019-03-04 17:48:01 -08002854 * We have read level caching and we have just sent a write
2855 * request to the server thus making data in the cache stale.
2856 * Zap the cache and set oplock/lease level to NONE to avoid
2857 * reading stale data from the cache. All subsequent read
2858 * operations will read new data from the server.
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002859 */
Jeff Layton4f73c7d2014-04-30 09:31:47 -04002860 cifs_zap_mapping(inode);
Pavel Shilovsky776f01e2019-03-04 17:48:01 -08002861 cifs_dbg(FYI, "Set Oplock/Lease to NONE for inode=%p after write\n",
Joe Perchesf96637b2013-05-04 22:12:25 -05002862 inode);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04002863 cinode->oplock = 0;
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002864 }
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00002865out:
2866 cifs_put_writer(cinode);
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002867 return written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002868}
2869
Jeff Layton0471ca32012-05-16 07:13:16 -04002870static struct cifs_readdata *
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002871cifs_readdata_alloc(unsigned int nr_pages, work_func_t complete)
Jeff Layton0471ca32012-05-16 07:13:16 -04002872{
2873 struct cifs_readdata *rdata;
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002874
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002875 rdata = kzalloc(sizeof(*rdata) + (sizeof(struct page *) * nr_pages),
2876 GFP_KERNEL);
Jeff Layton0471ca32012-05-16 07:13:16 -04002877 if (rdata != NULL) {
Jeff Layton6993f742012-05-16 07:13:17 -04002878 kref_init(&rdata->refcount);
Jeff Layton1c892542012-05-16 07:13:17 -04002879 INIT_LIST_HEAD(&rdata->list);
2880 init_completion(&rdata->done);
Jeff Layton0471ca32012-05-16 07:13:16 -04002881 INIT_WORK(&rdata->work, complete);
Jeff Layton0471ca32012-05-16 07:13:16 -04002882 }
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002883
Jeff Layton0471ca32012-05-16 07:13:16 -04002884 return rdata;
2885}
2886
Jeff Layton6993f742012-05-16 07:13:17 -04002887void
2888cifs_readdata_release(struct kref *refcount)
Jeff Layton0471ca32012-05-16 07:13:16 -04002889{
Jeff Layton6993f742012-05-16 07:13:17 -04002890 struct cifs_readdata *rdata = container_of(refcount,
2891 struct cifs_readdata, refcount);
2892
2893 if (rdata->cfile)
2894 cifsFileInfo_put(rdata->cfile);
2895
Jeff Layton0471ca32012-05-16 07:13:16 -04002896 kfree(rdata);
2897}
2898
Jeff Layton2a1bb132012-05-16 07:13:17 -04002899static int
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002900cifs_read_allocate_pages(struct cifs_readdata *rdata, unsigned int nr_pages)
Jeff Layton1c892542012-05-16 07:13:17 -04002901{
2902 int rc = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002903 struct page *page;
Jeff Layton1c892542012-05-16 07:13:17 -04002904 unsigned int i;
2905
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002906 for (i = 0; i < nr_pages; i++) {
Jeff Layton1c892542012-05-16 07:13:17 -04002907 page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
2908 if (!page) {
2909 rc = -ENOMEM;
2910 break;
2911 }
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002912 rdata->pages[i] = page;
Jeff Layton1c892542012-05-16 07:13:17 -04002913 }
2914
2915 if (rc) {
Roberto Bergantinos Corpasdf2b6af2019-05-28 09:38:14 +02002916 unsigned int nr_page_failed = i;
2917
2918 for (i = 0; i < nr_page_failed; i++) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002919 put_page(rdata->pages[i]);
2920 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04002921 }
2922 }
2923 return rc;
2924}
2925
2926static void
2927cifs_uncached_readdata_release(struct kref *refcount)
2928{
Jeff Layton1c892542012-05-16 07:13:17 -04002929 struct cifs_readdata *rdata = container_of(refcount,
2930 struct cifs_readdata, refcount);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002931 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04002932
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002933 for (i = 0; i < rdata->nr_pages; i++) {
2934 put_page(rdata->pages[i]);
2935 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04002936 }
2937 cifs_readdata_release(refcount);
2938}
2939
Jeff Layton1c892542012-05-16 07:13:17 -04002940/**
2941 * cifs_readdata_to_iov - copy data from pages in response to an iovec
2942 * @rdata: the readdata response with list of pages holding data
Al Viro7f25bba2014-02-04 14:07:43 -05002943 * @iter: destination for our data
Jeff Layton1c892542012-05-16 07:13:17 -04002944 *
2945 * This function copies data from a list of pages in a readdata response into
2946 * an array of iovecs. It will first calculate where the data should go
2947 * based on the info in the readdata and then copy the data into that spot.
2948 */
Al Viro7f25bba2014-02-04 14:07:43 -05002949static int
2950cifs_readdata_to_iov(struct cifs_readdata *rdata, struct iov_iter *iter)
Jeff Layton1c892542012-05-16 07:13:17 -04002951{
Pavel Shilovsky34a54d62014-07-10 10:03:29 +04002952 size_t remaining = rdata->got_bytes;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002953 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04002954
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002955 for (i = 0; i < rdata->nr_pages; i++) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002956 struct page *page = rdata->pages[i];
Geert Uytterhoevene686bd82014-04-13 20:46:21 +02002957 size_t copy = min_t(size_t, remaining, PAGE_SIZE);
Pavel Shilovskyc06d74d2017-01-19 13:53:15 -08002958 size_t written;
2959
2960 if (unlikely(iter->type & ITER_PIPE)) {
2961 void *addr = kmap_atomic(page);
2962
2963 written = copy_to_iter(addr, copy, iter);
2964 kunmap_atomic(addr);
2965 } else
2966 written = copy_page_to_iter(page, 0, copy, iter);
Al Viro7f25bba2014-02-04 14:07:43 -05002967 remaining -= written;
2968 if (written < copy && iov_iter_count(iter) > 0)
2969 break;
Jeff Layton1c892542012-05-16 07:13:17 -04002970 }
Al Viro7f25bba2014-02-04 14:07:43 -05002971 return remaining ? -EFAULT : 0;
Jeff Layton1c892542012-05-16 07:13:17 -04002972}
2973
2974static void
2975cifs_uncached_readv_complete(struct work_struct *work)
2976{
2977 struct cifs_readdata *rdata = container_of(work,
2978 struct cifs_readdata, work);
Jeff Layton1c892542012-05-16 07:13:17 -04002979
2980 complete(&rdata->done);
2981 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
2982}
2983
2984static int
Jeff Layton8321fec2012-09-19 06:22:32 -07002985cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
2986 struct cifs_readdata *rdata, unsigned int len)
Jeff Layton1c892542012-05-16 07:13:17 -04002987{
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04002988 int result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002989 unsigned int i;
2990 unsigned int nr_pages = rdata->nr_pages;
Jeff Layton1c892542012-05-16 07:13:17 -04002991
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04002992 rdata->got_bytes = 0;
Jeff Layton8321fec2012-09-19 06:22:32 -07002993 rdata->tailsz = PAGE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002994 for (i = 0; i < nr_pages; i++) {
2995 struct page *page = rdata->pages[i];
Al Viro71335662016-01-09 19:54:50 -05002996 size_t n;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002997
Al Viro71335662016-01-09 19:54:50 -05002998 if (len <= 0) {
Jeff Layton1c892542012-05-16 07:13:17 -04002999 /* no need to hold page hostage */
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003000 rdata->pages[i] = NULL;
3001 rdata->nr_pages--;
Jeff Layton1c892542012-05-16 07:13:17 -04003002 put_page(page);
Jeff Layton8321fec2012-09-19 06:22:32 -07003003 continue;
Jeff Layton1c892542012-05-16 07:13:17 -04003004 }
Al Viro71335662016-01-09 19:54:50 -05003005 n = len;
3006 if (len >= PAGE_SIZE) {
3007 /* enough data to fill the page */
3008 n = PAGE_SIZE;
3009 len -= n;
3010 } else {
3011 zero_user(page, len, PAGE_SIZE - len);
3012 rdata->tailsz = len;
3013 len = 0;
3014 }
3015 result = cifs_read_page_from_socket(server, page, n);
Jeff Layton8321fec2012-09-19 06:22:32 -07003016 if (result < 0)
3017 break;
3018
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003019 rdata->got_bytes += result;
Jeff Layton1c892542012-05-16 07:13:17 -04003020 }
3021
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003022 return rdata->got_bytes > 0 && result != -ECONNABORTED ?
3023 rdata->got_bytes : result;
Jeff Layton1c892542012-05-16 07:13:17 -04003024}
3025
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003026static int
3027cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
3028 struct cifs_sb_info *cifs_sb, struct list_head *rdata_list)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003029{
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003030 struct cifs_readdata *rdata;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003031 unsigned int npages, rsize, credits;
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003032 size_t cur_len;
3033 int rc;
Jeff Layton1c892542012-05-16 07:13:17 -04003034 pid_t pid;
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003035 struct TCP_Server_Info *server;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003036
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003037 server = tlink_tcon(open_file->tlink)->ses->server;
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07003038
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003039 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3040 pid = open_file->pid;
3041 else
3042 pid = current->tgid;
3043
Jeff Layton1c892542012-05-16 07:13:17 -04003044 do {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003045 rc = server->ops->wait_mtu_credits(server, cifs_sb->rsize,
3046 &rsize, &credits);
3047 if (rc)
3048 break;
3049
3050 cur_len = min_t(const size_t, len, rsize);
Jeff Layton1c892542012-05-16 07:13:17 -04003051 npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003052
Jeff Layton1c892542012-05-16 07:13:17 -04003053 /* allocate a readdata struct */
3054 rdata = cifs_readdata_alloc(npages,
3055 cifs_uncached_readv_complete);
3056 if (!rdata) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003057 add_credits_and_wake_if(server, credits, 0);
Jeff Layton1c892542012-05-16 07:13:17 -04003058 rc = -ENOMEM;
Jeff Laytonbae9f742014-04-15 12:48:49 -04003059 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003060 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003061
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003062 rc = cifs_read_allocate_pages(rdata, npages);
Jeff Layton1c892542012-05-16 07:13:17 -04003063 if (rc)
3064 goto error;
3065
3066 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003067 rdata->nr_pages = npages;
Jeff Layton1c892542012-05-16 07:13:17 -04003068 rdata->offset = offset;
3069 rdata->bytes = cur_len;
3070 rdata->pid = pid;
Jeff Layton8321fec2012-09-19 06:22:32 -07003071 rdata->pagesz = PAGE_SIZE;
3072 rdata->read_into_pages = cifs_uncached_read_into_pages;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003073 rdata->credits = credits;
Jeff Layton1c892542012-05-16 07:13:17 -04003074
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003075 if (!rdata->cfile->invalidHandle ||
Germano Percossi730fecb2017-04-07 12:29:38 +01003076 !(rc = cifs_reopen_file(rdata->cfile, true)))
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003077 rc = server->ops->async_readv(rdata);
Jeff Layton1c892542012-05-16 07:13:17 -04003078error:
3079 if (rc) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003080 add_credits_and_wake_if(server, rdata->credits, 0);
Jeff Layton1c892542012-05-16 07:13:17 -04003081 kref_put(&rdata->refcount,
3082 cifs_uncached_readdata_release);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003083 if (rc == -EAGAIN)
3084 continue;
Jeff Layton1c892542012-05-16 07:13:17 -04003085 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003086 }
Jeff Layton1c892542012-05-16 07:13:17 -04003087
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003088 list_add_tail(&rdata->list, rdata_list);
Jeff Layton1c892542012-05-16 07:13:17 -04003089 offset += cur_len;
3090 len -= cur_len;
3091 } while (len > 0);
3092
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003093 return rc;
3094}
3095
3096ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to)
3097{
3098 struct file *file = iocb->ki_filp;
3099 ssize_t rc;
3100 size_t len;
3101 ssize_t total_read = 0;
3102 loff_t offset = iocb->ki_pos;
3103 struct cifs_sb_info *cifs_sb;
3104 struct cifs_tcon *tcon;
3105 struct cifsFileInfo *open_file;
3106 struct cifs_readdata *rdata, *tmp;
3107 struct list_head rdata_list;
3108
3109 len = iov_iter_count(to);
3110 if (!len)
3111 return 0;
3112
3113 INIT_LIST_HEAD(&rdata_list);
Al Viro7119e222014-10-22 00:25:12 -04003114 cifs_sb = CIFS_FILE_SB(file);
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003115 open_file = file->private_data;
3116 tcon = tlink_tcon(open_file->tlink);
3117
3118 if (!tcon->ses->server->ops->async_readv)
3119 return -ENOSYS;
3120
3121 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
3122 cifs_dbg(FYI, "attempting read on write only file instance\n");
3123
3124 rc = cifs_send_async_read(offset, len, open_file, cifs_sb, &rdata_list);
3125
Jeff Layton1c892542012-05-16 07:13:17 -04003126 /* if at least one read request send succeeded, then reset rc */
3127 if (!list_empty(&rdata_list))
3128 rc = 0;
3129
Al Viroe6a7bcb2014-04-02 19:53:36 -04003130 len = iov_iter_count(to);
Jeff Layton1c892542012-05-16 07:13:17 -04003131 /* the loop below should proceed in the order of increasing offsets */
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003132again:
Jeff Layton1c892542012-05-16 07:13:17 -04003133 list_for_each_entry_safe(rdata, tmp, &rdata_list, list) {
3134 if (!rc) {
Jeff Layton1c892542012-05-16 07:13:17 -04003135 /* FIXME: freezable sleep too? */
3136 rc = wait_for_completion_killable(&rdata->done);
3137 if (rc)
3138 rc = -EINTR;
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003139 else if (rdata->result == -EAGAIN) {
Al Viro74027f42014-02-04 13:47:26 -05003140 /* resend call if it's a retryable error */
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003141 struct list_head tmp_list;
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04003142 unsigned int got_bytes = rdata->got_bytes;
Jeff Layton1c892542012-05-16 07:13:17 -04003143
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003144 list_del_init(&rdata->list);
3145 INIT_LIST_HEAD(&tmp_list);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003146
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04003147 /*
3148 * Got a part of data and then reconnect has
3149 * happened -- fill the buffer and continue
3150 * reading.
3151 */
3152 if (got_bytes && got_bytes < rdata->bytes) {
3153 rc = cifs_readdata_to_iov(rdata, to);
3154 if (rc) {
3155 kref_put(&rdata->refcount,
3156 cifs_uncached_readdata_release);
3157 continue;
3158 }
3159 }
3160
3161 rc = cifs_send_async_read(
3162 rdata->offset + got_bytes,
3163 rdata->bytes - got_bytes,
3164 rdata->cfile, cifs_sb,
3165 &tmp_list);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003166
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003167 list_splice(&tmp_list, &rdata_list);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003168
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003169 kref_put(&rdata->refcount,
3170 cifs_uncached_readdata_release);
3171 goto again;
3172 } else if (rdata->result)
3173 rc = rdata->result;
3174 else
Jeff Layton1c892542012-05-16 07:13:17 -04003175 rc = cifs_readdata_to_iov(rdata, to);
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003176
Pavel Shilovsky2e8a05d2014-07-10 10:21:15 +04003177 /* if there was a short read -- discard anything left */
3178 if (rdata->got_bytes && rdata->got_bytes < rdata->bytes)
3179 rc = -ENODATA;
Jeff Layton1c892542012-05-16 07:13:17 -04003180 }
3181 list_del_init(&rdata->list);
3182 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003183 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003184
Al Viroe6a7bcb2014-04-02 19:53:36 -04003185 total_read = len - iov_iter_count(to);
Al Viro7f25bba2014-02-04 14:07:43 -05003186
Jeff Layton1c892542012-05-16 07:13:17 -04003187 cifs_stats_bytes_read(tcon, total_read);
Jeff Layton1c892542012-05-16 07:13:17 -04003188
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003189 /* mask nodata case */
3190 if (rc == -ENODATA)
3191 rc = 0;
3192
Al Viro0165e812014-02-04 14:19:48 -05003193 if (total_read) {
Al Viroe6a7bcb2014-04-02 19:53:36 -04003194 iocb->ki_pos += total_read;
Al Viro0165e812014-02-04 14:19:48 -05003195 return total_read;
3196 }
3197 return rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003198}
3199
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003200ssize_t
Al Viroe6a7bcb2014-04-02 19:53:36 -04003201cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003202{
Al Viro496ad9a2013-01-23 17:07:38 -05003203 struct inode *inode = file_inode(iocb->ki_filp);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003204 struct cifsInodeInfo *cinode = CIFS_I(inode);
3205 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
3206 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
3207 iocb->ki_filp->private_data;
3208 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
3209 int rc = -EACCES;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003210
3211 /*
3212 * In strict cache mode we need to read from the server all the time
3213 * if we don't have level II oplock because the server can delay mtime
3214 * change - so we can't make a decision about inode invalidating.
3215 * And we can also fail with pagereading if there are mandatory locks
3216 * on pages affected by this read but not on the region from pos to
3217 * pos+len-1.
3218 */
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003219 if (!CIFS_CACHE_READ(cinode))
Al Viroe6a7bcb2014-04-02 19:53:36 -04003220 return cifs_user_readv(iocb, to);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003221
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003222 if (cap_unix(tcon->ses) &&
3223 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
3224 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
Al Viroe6a7bcb2014-04-02 19:53:36 -04003225 return generic_file_read_iter(iocb, to);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003226
3227 /*
3228 * We need to hold the sem to be sure nobody modifies lock list
3229 * with a brlock that prevents reading.
3230 */
3231 down_read(&cinode->lock_sem);
Al Viroe6a7bcb2014-04-02 19:53:36 -04003232 if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(to),
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003233 tcon->ses->server->vals->shared_lock_type,
Pavel Shilovsky081c0412012-11-27 18:38:53 +04003234 NULL, CIFS_READ_OP))
Al Viroe6a7bcb2014-04-02 19:53:36 -04003235 rc = generic_file_read_iter(iocb, to);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003236 up_read(&cinode->lock_sem);
3237 return rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003238}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003239
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003240static ssize_t
3241cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003242{
3243 int rc = -EACCES;
3244 unsigned int bytes_read = 0;
3245 unsigned int total_read;
3246 unsigned int current_read_size;
Jeff Layton5eba8ab2011-10-19 15:30:26 -04003247 unsigned int rsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003248 struct cifs_sb_info *cifs_sb;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003249 struct cifs_tcon *tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003250 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003251 unsigned int xid;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003252 char *cur_offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003253 struct cifsFileInfo *open_file;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003254 struct cifs_io_parms io_parms;
Steve Frenchec637e32005-12-12 20:53:18 -08003255 int buf_type = CIFS_NO_BUFFER;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003256 __u32 pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003257
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003258 xid = get_xid();
Al Viro7119e222014-10-22 00:25:12 -04003259 cifs_sb = CIFS_FILE_SB(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003260
Jeff Layton5eba8ab2011-10-19 15:30:26 -04003261 /* FIXME: set up handlers for larger reads and/or convert to async */
3262 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
3263
Linus Torvalds1da177e2005-04-16 15:20:36 -07003264 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303265 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003266 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303267 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003268 }
Joe Perchesc21dfb62010-07-12 13:50:14 -07003269 open_file = file->private_data;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003270 tcon = tlink_tcon(open_file->tlink);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003271 server = tcon->ses->server;
3272
3273 if (!server->ops->sync_read) {
3274 free_xid(xid);
3275 return -ENOSYS;
3276 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003277
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003278 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3279 pid = open_file->pid;
3280 else
3281 pid = current->tgid;
3282
Linus Torvalds1da177e2005-04-16 15:20:36 -07003283 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesf96637b2013-05-04 22:12:25 -05003284 cifs_dbg(FYI, "attempting read on write only file instance\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003285
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003286 for (total_read = 0, cur_offset = read_data; read_size > total_read;
3287 total_read += bytes_read, cur_offset += bytes_read) {
Pavel Shilovskye374d902014-06-25 16:19:02 +04003288 do {
3289 current_read_size = min_t(uint, read_size - total_read,
3290 rsize);
3291 /*
3292 * For windows me and 9x we do not want to request more
3293 * than it negotiated since it will refuse the read
3294 * then.
3295 */
3296 if ((tcon->ses) && !(tcon->ses->capabilities &
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003297 tcon->ses->server->vals->cap_large_files)) {
Pavel Shilovskye374d902014-06-25 16:19:02 +04003298 current_read_size = min_t(uint,
3299 current_read_size, CIFSMaxBufSize);
3300 }
Steve Frenchcdff08e2010-10-21 22:46:14 +00003301 if (open_file->invalidHandle) {
Jeff Layton15886172010-10-15 15:33:59 -04003302 rc = cifs_reopen_file(open_file, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003303 if (rc != 0)
3304 break;
3305 }
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003306 io_parms.pid = pid;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003307 io_parms.tcon = tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003308 io_parms.offset = *offset;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003309 io_parms.length = current_read_size;
Steve Frenchdb8b6312014-09-22 05:13:55 -05003310 rc = server->ops->sync_read(xid, &open_file->fid, &io_parms,
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003311 &bytes_read, &cur_offset,
3312 &buf_type);
Pavel Shilovskye374d902014-06-25 16:19:02 +04003313 } while (rc == -EAGAIN);
3314
Linus Torvalds1da177e2005-04-16 15:20:36 -07003315 if (rc || (bytes_read == 0)) {
3316 if (total_read) {
3317 break;
3318 } else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003319 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003320 return rc;
3321 }
3322 } else {
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003323 cifs_stats_bytes_read(tcon, total_read);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003324 *offset += bytes_read;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003325 }
3326 }
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003327 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003328 return total_read;
3329}
3330
Jeff Laytonca83ce32011-04-12 09:13:44 -04003331/*
3332 * If the page is mmap'ed into a process' page tables, then we need to make
3333 * sure that it doesn't change while being written back.
3334 */
3335static int
3336cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
3337{
3338 struct page *page = vmf->page;
3339
3340 lock_page(page);
3341 return VM_FAULT_LOCKED;
3342}
3343
Kirill A. Shutemov7cbea8d2015-09-09 15:39:26 -07003344static const struct vm_operations_struct cifs_file_vm_ops = {
Jeff Laytonca83ce32011-04-12 09:13:44 -04003345 .fault = filemap_fault,
Kirill A. Shutemovf1820362014-04-07 15:37:19 -07003346 .map_pages = filemap_map_pages,
Jeff Laytonca83ce32011-04-12 09:13:44 -04003347 .page_mkwrite = cifs_page_mkwrite,
3348};
3349
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003350int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
3351{
Matthew Wilcoxee6858f2017-12-15 12:48:32 -08003352 int xid, rc = 0;
Al Viro496ad9a2013-01-23 17:07:38 -05003353 struct inode *inode = file_inode(file);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003354
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003355 xid = get_xid();
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003356
Matthew Wilcoxee6858f2017-12-15 12:48:32 -08003357 if (!CIFS_CACHE_READ(CIFS_I(inode)))
Jeff Layton4f73c7d2014-04-30 09:31:47 -04003358 rc = cifs_zap_mapping(inode);
Matthew Wilcoxee6858f2017-12-15 12:48:32 -08003359 if (!rc)
3360 rc = generic_file_mmap(file, vma);
3361 if (!rc)
Jeff Laytonca83ce32011-04-12 09:13:44 -04003362 vma->vm_ops = &cifs_file_vm_ops;
Matthew Wilcoxee6858f2017-12-15 12:48:32 -08003363
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003364 free_xid(xid);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003365 return rc;
3366}
3367
Linus Torvalds1da177e2005-04-16 15:20:36 -07003368int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
3369{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003370 int rc, xid;
3371
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003372 xid = get_xid();
Matthew Wilcoxee6858f2017-12-15 12:48:32 -08003373
Jeff Laytonabab0952010-02-12 07:44:18 -05003374 rc = cifs_revalidate_file(file);
Matthew Wilcoxee6858f2017-12-15 12:48:32 -08003375 if (rc)
Joe Perchesf96637b2013-05-04 22:12:25 -05003376 cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
3377 rc);
Matthew Wilcoxee6858f2017-12-15 12:48:32 -08003378 if (!rc)
3379 rc = generic_file_mmap(file, vma);
3380 if (!rc)
Jeff Laytonca83ce32011-04-12 09:13:44 -04003381 vma->vm_ops = &cifs_file_vm_ops;
Matthew Wilcoxee6858f2017-12-15 12:48:32 -08003382
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003383 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003384 return rc;
3385}
3386
Jeff Layton0471ca32012-05-16 07:13:16 -04003387static void
3388cifs_readv_complete(struct work_struct *work)
3389{
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003390 unsigned int i, got_bytes;
Jeff Layton0471ca32012-05-16 07:13:16 -04003391 struct cifs_readdata *rdata = container_of(work,
3392 struct cifs_readdata, work);
Jeff Layton0471ca32012-05-16 07:13:16 -04003393
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003394 got_bytes = rdata->got_bytes;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003395 for (i = 0; i < rdata->nr_pages; i++) {
3396 struct page *page = rdata->pages[i];
3397
Jeff Layton0471ca32012-05-16 07:13:16 -04003398 lru_cache_add_file(page);
3399
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003400 if (rdata->result == 0 ||
3401 (rdata->result == -EAGAIN && got_bytes)) {
Jeff Layton0471ca32012-05-16 07:13:16 -04003402 flush_dcache_page(page);
3403 SetPageUptodate(page);
3404 }
3405
3406 unlock_page(page);
3407
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003408 if (rdata->result == 0 ||
3409 (rdata->result == -EAGAIN && got_bytes))
Jeff Layton0471ca32012-05-16 07:13:16 -04003410 cifs_readpage_to_fscache(rdata->mapping->host, page);
3411
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003412 got_bytes -= min_t(unsigned int, PAGE_SIZE, got_bytes);
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003413
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003414 put_page(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003415 rdata->pages[i] = NULL;
Jeff Layton0471ca32012-05-16 07:13:16 -04003416 }
Jeff Layton6993f742012-05-16 07:13:17 -04003417 kref_put(&rdata->refcount, cifs_readdata_release);
Jeff Layton0471ca32012-05-16 07:13:16 -04003418}
3419
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003420static int
Jeff Layton8321fec2012-09-19 06:22:32 -07003421cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
3422 struct cifs_readdata *rdata, unsigned int len)
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003423{
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003424 int result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003425 unsigned int i;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003426 u64 eof;
3427 pgoff_t eof_index;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003428 unsigned int nr_pages = rdata->nr_pages;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003429
3430 /* determine the eof that the server (probably) has */
3431 eof = CIFS_I(rdata->mapping->host)->server_eof;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003432 eof_index = eof ? (eof - 1) >> PAGE_SHIFT : 0;
Joe Perchesf96637b2013-05-04 22:12:25 -05003433 cifs_dbg(FYI, "eof=%llu eof_index=%lu\n", eof, eof_index);
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003434
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003435 rdata->got_bytes = 0;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003436 rdata->tailsz = PAGE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003437 for (i = 0; i < nr_pages; i++) {
3438 struct page *page = rdata->pages[i];
Linus Torvalds442c9ac2016-05-18 10:17:56 -07003439 size_t n = PAGE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003440
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003441 if (len >= PAGE_SIZE) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003442 len -= PAGE_SIZE;
Jeff Layton8321fec2012-09-19 06:22:32 -07003443 } else if (len > 0) {
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003444 /* enough for partial page, fill and zero the rest */
Linus Torvalds442c9ac2016-05-18 10:17:56 -07003445 zero_user(page, len, PAGE_SIZE - len);
Al Viro71335662016-01-09 19:54:50 -05003446 n = rdata->tailsz = len;
Jeff Layton8321fec2012-09-19 06:22:32 -07003447 len = 0;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003448 } else if (page->index > eof_index) {
3449 /*
3450 * The VFS will not try to do readahead past the
3451 * i_size, but it's possible that we have outstanding
3452 * writes with gaps in the middle and the i_size hasn't
3453 * caught up yet. Populate those with zeroed out pages
3454 * to prevent the VFS from repeatedly attempting to
3455 * fill them until the writes are flushed.
3456 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003457 zero_user(page, 0, PAGE_SIZE);
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003458 lru_cache_add_file(page);
3459 flush_dcache_page(page);
3460 SetPageUptodate(page);
3461 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003462 put_page(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003463 rdata->pages[i] = NULL;
3464 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07003465 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003466 } else {
3467 /* no need to hold page hostage */
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003468 lru_cache_add_file(page);
3469 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003470 put_page(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003471 rdata->pages[i] = NULL;
3472 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07003473 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003474 }
Jeff Layton8321fec2012-09-19 06:22:32 -07003475
Al Viro71335662016-01-09 19:54:50 -05003476 result = cifs_read_page_from_socket(server, page, n);
Jeff Layton8321fec2012-09-19 06:22:32 -07003477 if (result < 0)
3478 break;
3479
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003480 rdata->got_bytes += result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003481 }
3482
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003483 return rdata->got_bytes > 0 && result != -ECONNABORTED ?
3484 rdata->got_bytes : result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003485}
3486
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003487static int
3488readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
3489 unsigned int rsize, struct list_head *tmplist,
3490 unsigned int *nr_pages, loff_t *offset, unsigned int *bytes)
3491{
3492 struct page *page, *tpage;
3493 unsigned int expected_index;
3494 int rc;
Michal Hocko8a5c7432016-07-26 15:24:53 -07003495 gfp_t gfp = readahead_gfp_mask(mapping);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003496
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003497 INIT_LIST_HEAD(tmplist);
3498
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003499 page = list_entry(page_list->prev, struct page, lru);
3500
3501 /*
3502 * Lock the page and put it in the cache. Since no one else
3503 * should have access to this page, we're safe to simply set
3504 * PG_locked without checking it first.
3505 */
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08003506 __SetPageLocked(page);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003507 rc = add_to_page_cache_locked(page, mapping,
Michal Hocko063d99b2015-10-15 15:28:24 -07003508 page->index, gfp);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003509
3510 /* give up if we can't stick it in the cache */
3511 if (rc) {
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08003512 __ClearPageLocked(page);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003513 return rc;
3514 }
3515
3516 /* move first page to the tmplist */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003517 *offset = (loff_t)page->index << PAGE_SHIFT;
3518 *bytes = PAGE_SIZE;
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003519 *nr_pages = 1;
3520 list_move_tail(&page->lru, tmplist);
3521
3522 /* now try and add more pages onto the request */
3523 expected_index = page->index + 1;
3524 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
3525 /* discontinuity ? */
3526 if (page->index != expected_index)
3527 break;
3528
3529 /* would this page push the read over the rsize? */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003530 if (*bytes + PAGE_SIZE > rsize)
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003531 break;
3532
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08003533 __SetPageLocked(page);
Michal Hocko063d99b2015-10-15 15:28:24 -07003534 if (add_to_page_cache_locked(page, mapping, page->index, gfp)) {
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08003535 __ClearPageLocked(page);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003536 break;
3537 }
3538 list_move_tail(&page->lru, tmplist);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003539 (*bytes) += PAGE_SIZE;
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003540 expected_index++;
3541 (*nr_pages)++;
3542 }
3543 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003544}
3545
Linus Torvalds1da177e2005-04-16 15:20:36 -07003546static int cifs_readpages(struct file *file, struct address_space *mapping,
3547 struct list_head *page_list, unsigned num_pages)
3548{
Jeff Layton690c5e32011-10-19 15:30:16 -04003549 int rc;
3550 struct list_head tmplist;
3551 struct cifsFileInfo *open_file = file->private_data;
Al Viro7119e222014-10-22 00:25:12 -04003552 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003553 struct TCP_Server_Info *server;
Jeff Layton690c5e32011-10-19 15:30:16 -04003554 pid_t pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003555
Jeff Layton690c5e32011-10-19 15:30:16 -04003556 /*
Suresh Jayaraman56698232010-07-05 18:13:25 +05303557 * Reads as many pages as possible from fscache. Returns -ENOBUFS
3558 * immediately if the cookie is negative
David Howells54afa992013-09-04 17:10:39 +00003559 *
3560 * After this point, every page in the list might have PG_fscache set,
3561 * so we will need to clean that up off of every page we don't use.
Suresh Jayaraman56698232010-07-05 18:13:25 +05303562 */
3563 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
3564 &num_pages);
3565 if (rc == 0)
Jeff Layton690c5e32011-10-19 15:30:16 -04003566 return rc;
Suresh Jayaraman56698232010-07-05 18:13:25 +05303567
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003568 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3569 pid = open_file->pid;
3570 else
3571 pid = current->tgid;
3572
Jeff Layton690c5e32011-10-19 15:30:16 -04003573 rc = 0;
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003574 server = tlink_tcon(open_file->tlink)->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003575
Joe Perchesf96637b2013-05-04 22:12:25 -05003576 cifs_dbg(FYI, "%s: file=%p mapping=%p num_pages=%u\n",
3577 __func__, file, mapping, num_pages);
Jeff Layton690c5e32011-10-19 15:30:16 -04003578
3579 /*
3580 * Start with the page at end of list and move it to private
3581 * list. Do the same with any following pages until we hit
3582 * the rsize limit, hit an index discontinuity, or run out of
3583 * pages. Issue the async read and then start the loop again
3584 * until the list is empty.
3585 *
3586 * Note that list order is important. The page_list is in
3587 * the order of declining indexes. When we put the pages in
3588 * the rdata->pages, then we want them in increasing order.
3589 */
3590 while (!list_empty(page_list)) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003591 unsigned int i, nr_pages, bytes, rsize;
Jeff Layton690c5e32011-10-19 15:30:16 -04003592 loff_t offset;
3593 struct page *page, *tpage;
3594 struct cifs_readdata *rdata;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003595 unsigned credits;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003596
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003597 rc = server->ops->wait_mtu_credits(server, cifs_sb->rsize,
3598 &rsize, &credits);
3599 if (rc)
3600 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003601
Jeff Layton690c5e32011-10-19 15:30:16 -04003602 /*
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003603 * Give up immediately if rsize is too small to read an entire
3604 * page. The VFS will fall back to readpage. We should never
3605 * reach this point however since we set ra_pages to 0 when the
3606 * rsize is smaller than a cache page.
Jeff Layton690c5e32011-10-19 15:30:16 -04003607 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003608 if (unlikely(rsize < PAGE_SIZE)) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003609 add_credits_and_wake_if(server, credits, 0);
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003610 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003611 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003612
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003613 rc = readpages_get_pages(mapping, page_list, rsize, &tmplist,
3614 &nr_pages, &offset, &bytes);
3615 if (rc) {
3616 add_credits_and_wake_if(server, credits, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003617 break;
Jeff Layton690c5e32011-10-19 15:30:16 -04003618 }
3619
Jeff Layton0471ca32012-05-16 07:13:16 -04003620 rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
Jeff Layton690c5e32011-10-19 15:30:16 -04003621 if (!rdata) {
3622 /* best to give up if we're out of mem */
3623 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3624 list_del(&page->lru);
3625 lru_cache_add_file(page);
3626 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003627 put_page(page);
Jeff Layton690c5e32011-10-19 15:30:16 -04003628 }
3629 rc = -ENOMEM;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003630 add_credits_and_wake_if(server, credits, 0);
Jeff Layton690c5e32011-10-19 15:30:16 -04003631 break;
3632 }
3633
Jeff Layton6993f742012-05-16 07:13:17 -04003634 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Layton690c5e32011-10-19 15:30:16 -04003635 rdata->mapping = mapping;
3636 rdata->offset = offset;
3637 rdata->bytes = bytes;
3638 rdata->pid = pid;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003639 rdata->pagesz = PAGE_SIZE;
Jeff Layton8321fec2012-09-19 06:22:32 -07003640 rdata->read_into_pages = cifs_readpages_read_into_pages;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003641 rdata->credits = credits;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003642
3643 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3644 list_del(&page->lru);
3645 rdata->pages[rdata->nr_pages++] = page;
3646 }
Jeff Layton690c5e32011-10-19 15:30:16 -04003647
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003648 if (!rdata->cfile->invalidHandle ||
Germano Percossi730fecb2017-04-07 12:29:38 +01003649 !(rc = cifs_reopen_file(rdata->cfile, true)))
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003650 rc = server->ops->async_readv(rdata);
3651 if (rc) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003652 add_credits_and_wake_if(server, rdata->credits, 0);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003653 for (i = 0; i < rdata->nr_pages; i++) {
3654 page = rdata->pages[i];
Jeff Layton690c5e32011-10-19 15:30:16 -04003655 lru_cache_add_file(page);
3656 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003657 put_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003658 }
Pavel Shilovsky1209bbd2014-10-02 20:13:35 +04003659 /* Fallback to the readpage in error/reconnect cases */
Jeff Layton6993f742012-05-16 07:13:17 -04003660 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003661 break;
3662 }
Jeff Layton6993f742012-05-16 07:13:17 -04003663
3664 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003665 }
3666
David Howells54afa992013-09-04 17:10:39 +00003667 /* Any pages that have been shown to fscache but didn't get added to
3668 * the pagecache must be uncached before they get returned to the
3669 * allocator.
3670 */
3671 cifs_fscache_readpages_cancel(mapping->host, page_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003672 return rc;
3673}
3674
Sachin Prabhua9e9b7b2013-09-13 14:11:56 +01003675/*
3676 * cifs_readpage_worker must be called with the page pinned
3677 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003678static int cifs_readpage_worker(struct file *file, struct page *page,
3679 loff_t *poffset)
3680{
3681 char *read_data;
3682 int rc;
3683
Suresh Jayaraman56698232010-07-05 18:13:25 +05303684 /* Is the page cached? */
Al Viro496ad9a2013-01-23 17:07:38 -05003685 rc = cifs_readpage_from_fscache(file_inode(file), page);
Suresh Jayaraman56698232010-07-05 18:13:25 +05303686 if (rc == 0)
3687 goto read_complete;
3688
Linus Torvalds1da177e2005-04-16 15:20:36 -07003689 read_data = kmap(page);
3690 /* for reads over a certain size could initiate async read ahead */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003691
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003692 rc = cifs_read(file, read_data, PAGE_SIZE, poffset);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003693
Linus Torvalds1da177e2005-04-16 15:20:36 -07003694 if (rc < 0)
3695 goto io_error;
3696 else
Joe Perchesf96637b2013-05-04 22:12:25 -05003697 cifs_dbg(FYI, "Bytes read %d\n", rc);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003698
Al Viro496ad9a2013-01-23 17:07:38 -05003699 file_inode(file)->i_atime =
Deepa Dinamanic2050a42016-09-14 07:48:06 -07003700 current_time(file_inode(file));
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003701
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003702 if (PAGE_SIZE > rc)
3703 memset(read_data + rc, 0, PAGE_SIZE - rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003704
3705 flush_dcache_page(page);
3706 SetPageUptodate(page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05303707
3708 /* send this page to the cache */
Al Viro496ad9a2013-01-23 17:07:38 -05003709 cifs_readpage_to_fscache(file_inode(file), page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05303710
Linus Torvalds1da177e2005-04-16 15:20:36 -07003711 rc = 0;
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003712
Linus Torvalds1da177e2005-04-16 15:20:36 -07003713io_error:
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003714 kunmap(page);
Sachin Prabhu466bd312013-09-13 14:11:57 +01003715 unlock_page(page);
Suresh Jayaraman56698232010-07-05 18:13:25 +05303716
3717read_complete:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003718 return rc;
3719}
3720
3721static int cifs_readpage(struct file *file, struct page *page)
3722{
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003723 loff_t offset = (loff_t)page->index << PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003724 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003725 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003726
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003727 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003728
3729 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303730 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003731 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303732 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003733 }
3734
Joe Perchesf96637b2013-05-04 22:12:25 -05003735 cifs_dbg(FYI, "readpage %p at offset %d 0x%x\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +00003736 page, (int)offset, (int)offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003737
3738 rc = cifs_readpage_worker(file, page, &offset);
3739
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003740 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003741 return rc;
3742}
3743
Steve Frencha403a0a2007-07-26 15:54:16 +00003744static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3745{
3746 struct cifsFileInfo *open_file;
Steve French3afca262016-09-22 18:58:16 -05003747 struct cifs_tcon *tcon =
3748 cifs_sb_master_tcon(CIFS_SB(cifs_inode->vfs_inode.i_sb));
Steve Frencha403a0a2007-07-26 15:54:16 +00003749
Steve French3afca262016-09-22 18:58:16 -05003750 spin_lock(&tcon->open_file_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003751 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton2e396b82010-10-15 15:34:01 -04003752 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Steve French3afca262016-09-22 18:58:16 -05003753 spin_unlock(&tcon->open_file_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003754 return 1;
3755 }
3756 }
Steve French3afca262016-09-22 18:58:16 -05003757 spin_unlock(&tcon->open_file_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003758 return 0;
3759}
3760
Linus Torvalds1da177e2005-04-16 15:20:36 -07003761/* We do not want to update the file size from server for inodes
3762 open for write - to avoid races with writepage extending
3763 the file - in the future we could consider allowing
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003764 refreshing the inode only on increases in the file size
Linus Torvalds1da177e2005-04-16 15:20:36 -07003765 but this is tricky to do without racing with writebehind
3766 page caching in the current Linux kernel design */
Steve French4b18f2a2008-04-29 00:06:05 +00003767bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003768{
Steve Frencha403a0a2007-07-26 15:54:16 +00003769 if (!cifsInode)
Steve French4b18f2a2008-04-29 00:06:05 +00003770 return true;
Steve French23e7dd72005-10-20 13:44:56 -07003771
Steve Frencha403a0a2007-07-26 15:54:16 +00003772 if (is_inode_writable(cifsInode)) {
3773 /* This inode is open for write at least once */
Steve Frenchc32a0b62006-01-12 14:41:28 -08003774 struct cifs_sb_info *cifs_sb;
3775
Steve Frenchc32a0b62006-01-12 14:41:28 -08003776 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
Steve Frenchad7a2922008-02-07 23:25:02 +00003777 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003778 /* since no page cache to corrupt on directio
Steve Frenchc32a0b62006-01-12 14:41:28 -08003779 we can change size safely */
Steve French4b18f2a2008-04-29 00:06:05 +00003780 return true;
Steve Frenchc32a0b62006-01-12 14:41:28 -08003781 }
3782
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003783 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
Steve French4b18f2a2008-04-29 00:06:05 +00003784 return true;
Steve French7ba52632007-02-08 18:14:13 +00003785
Steve French4b18f2a2008-04-29 00:06:05 +00003786 return false;
Steve French23e7dd72005-10-20 13:44:56 -07003787 } else
Steve French4b18f2a2008-04-29 00:06:05 +00003788 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003789}
3790
Nick Piggind9414772008-09-24 11:32:59 -04003791static int cifs_write_begin(struct file *file, struct address_space *mapping,
3792 loff_t pos, unsigned len, unsigned flags,
3793 struct page **pagep, void **fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003794{
Sachin Prabhu466bd312013-09-13 14:11:57 +01003795 int oncethru = 0;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003796 pgoff_t index = pos >> PAGE_SHIFT;
3797 loff_t offset = pos & (PAGE_SIZE - 1);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003798 loff_t page_start = pos & PAGE_MASK;
3799 loff_t i_size;
3800 struct page *page;
3801 int rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003802
Joe Perchesf96637b2013-05-04 22:12:25 -05003803 cifs_dbg(FYI, "write_begin from %lld len %d\n", (long long)pos, len);
Nick Piggind9414772008-09-24 11:32:59 -04003804
Sachin Prabhu466bd312013-09-13 14:11:57 +01003805start:
Nick Piggin54566b22009-01-04 12:00:53 -08003806 page = grab_cache_page_write_begin(mapping, index, flags);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003807 if (!page) {
3808 rc = -ENOMEM;
3809 goto out;
3810 }
Nick Piggind9414772008-09-24 11:32:59 -04003811
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003812 if (PageUptodate(page))
3813 goto out;
Steve French8a236262007-03-06 00:31:00 +00003814
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003815 /*
3816 * If we write a full page it will be up to date, no need to read from
3817 * the server. If the write is short, we'll end up doing a sync write
3818 * instead.
3819 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003820 if (len == PAGE_SIZE)
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003821 goto out;
3822
3823 /*
3824 * optimize away the read when we have an oplock, and we're not
3825 * expecting to use any of the data we'd be reading in. That
3826 * is, when the page lies beyond the EOF, or straddles the EOF
3827 * and the write will cover all of the existing data.
3828 */
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003829 if (CIFS_CACHE_READ(CIFS_I(mapping->host))) {
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003830 i_size = i_size_read(mapping->host);
3831 if (page_start >= i_size ||
3832 (offset == 0 && (pos + len) >= i_size)) {
3833 zero_user_segments(page, 0, offset,
3834 offset + len,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003835 PAGE_SIZE);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003836 /*
3837 * PageChecked means that the parts of the page
3838 * to which we're not writing are considered up
3839 * to date. Once the data is copied to the
3840 * page, it can be set uptodate.
3841 */
3842 SetPageChecked(page);
3843 goto out;
3844 }
3845 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003846
Sachin Prabhu466bd312013-09-13 14:11:57 +01003847 if ((file->f_flags & O_ACCMODE) != O_WRONLY && !oncethru) {
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003848 /*
3849 * might as well read a page, it is fast enough. If we get
3850 * an error, we don't need to return it. cifs_write_end will
3851 * do a sync write instead since PG_uptodate isn't set.
3852 */
3853 cifs_readpage_worker(file, page, &page_start);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003854 put_page(page);
Sachin Prabhu466bd312013-09-13 14:11:57 +01003855 oncethru = 1;
3856 goto start;
Steve French8a236262007-03-06 00:31:00 +00003857 } else {
3858 /* we could try using another file handle if there is one -
3859 but how would we lock it to prevent close of that handle
3860 racing with this read? In any case
Nick Piggind9414772008-09-24 11:32:59 -04003861 this will be written out by write_end so is fine */
Steve French8a236262007-03-06 00:31:00 +00003862 }
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003863out:
3864 *pagep = page;
3865 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003866}
3867
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303868static int cifs_release_page(struct page *page, gfp_t gfp)
3869{
3870 if (PagePrivate(page))
3871 return 0;
3872
3873 return cifs_fscache_release_page(page, gfp);
3874}
3875
Lukas Czernerd47992f2013-05-21 23:17:23 -04003876static void cifs_invalidate_page(struct page *page, unsigned int offset,
3877 unsigned int length)
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303878{
3879 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
3880
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003881 if (offset == 0 && length == PAGE_SIZE)
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303882 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
3883}
3884
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003885static int cifs_launder_page(struct page *page)
3886{
3887 int rc = 0;
3888 loff_t range_start = page_offset(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003889 loff_t range_end = range_start + (loff_t)(PAGE_SIZE - 1);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003890 struct writeback_control wbc = {
3891 .sync_mode = WB_SYNC_ALL,
3892 .nr_to_write = 0,
3893 .range_start = range_start,
3894 .range_end = range_end,
3895 };
3896
Joe Perchesf96637b2013-05-04 22:12:25 -05003897 cifs_dbg(FYI, "Launder page: %p\n", page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003898
3899 if (clear_page_dirty_for_io(page))
3900 rc = cifs_writepage_locked(page, &wbc);
3901
3902 cifs_fscache_invalidate_page(page, page->mapping->host);
3903 return rc;
3904}
3905
Tejun Heo9b646972010-07-20 22:09:02 +02003906void cifs_oplock_break(struct work_struct *work)
Jeff Layton3bc303c2009-09-21 06:47:50 -04003907{
3908 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3909 oplock_break);
David Howells2b0143b2015-03-17 22:25:59 +00003910 struct inode *inode = d_inode(cfile->dentry);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003911 struct cifsInodeInfo *cinode = CIFS_I(inode);
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07003912 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003913 struct TCP_Server_Info *server = tcon->ses->server;
Jeff Laytoneb4b7562010-10-22 14:52:29 -04003914 int rc = 0;
Jeff Layton3bc303c2009-09-21 06:47:50 -04003915
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003916 wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
NeilBrown74316202014-07-07 15:16:04 +10003917 TASK_UNINTERRUPTIBLE);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003918
3919 server->ops->downgrade_oplock(server, cinode,
3920 test_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, &cinode->flags));
3921
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003922 if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) &&
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04003923 cifs_has_mand_locks(cinode)) {
Joe Perchesf96637b2013-05-04 22:12:25 -05003924 cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
3925 inode);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003926 cinode->oplock = 0;
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04003927 }
3928
Jeff Layton3bc303c2009-09-21 06:47:50 -04003929 if (inode && S_ISREG(inode->i_mode)) {
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003930 if (CIFS_CACHE_READ(cinode))
Al Viro8737c932009-12-24 06:47:55 -05003931 break_lease(inode, O_RDONLY);
Steve Frenchd54ff732010-04-27 04:38:15 +00003932 else
Al Viro8737c932009-12-24 06:47:55 -05003933 break_lease(inode, O_WRONLY);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003934 rc = filemap_fdatawrite(inode->i_mapping);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003935 if (!CIFS_CACHE_READ(cinode)) {
Jeff Laytoneb4b7562010-10-22 14:52:29 -04003936 rc = filemap_fdatawait(inode->i_mapping);
3937 mapping_set_error(inode->i_mapping, rc);
Jeff Layton4f73c7d2014-04-30 09:31:47 -04003938 cifs_zap_mapping(inode);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003939 }
Joe Perchesf96637b2013-05-04 22:12:25 -05003940 cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003941 }
3942
Pavel Shilovsky85160e02011-10-22 15:33:29 +04003943 rc = cifs_push_locks(cfile);
3944 if (rc)
Joe Perchesf96637b2013-05-04 22:12:25 -05003945 cifs_dbg(VFS, "Push locks rc = %d\n", rc);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04003946
Jeff Layton3bc303c2009-09-21 06:47:50 -04003947 /*
3948 * releasing stale oplock after recent reconnect of smb session using
3949 * a now incorrect file handle is not a data integrity issue but do
3950 * not bother sending an oplock release if session to server still is
3951 * disconnected since oplock already released by the server
3952 */
Steve Frenchcdff08e2010-10-21 22:46:14 +00003953 if (!cfile->oplock_break_cancelled) {
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07003954 rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
3955 cinode);
Joe Perchesf96637b2013-05-04 22:12:25 -05003956 cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003957 }
Aurelien Aptel1ee4f2d2019-03-29 10:49:12 +01003958 _cifsFileInfo_put(cfile, false /* do not wait for ourself */);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003959 cifs_done_oplock_break(cinode);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003960}
3961
Steve Frenchdca69282013-11-11 16:42:37 -06003962/*
3963 * The presence of cifs_direct_io() in the address space ops vector
3964 * allowes open() O_DIRECT flags which would have failed otherwise.
3965 *
3966 * In the non-cached mode (mount with cache=none), we shunt off direct read and write requests
3967 * so this method should never be called.
3968 *
3969 * Direct IO is not yet supported in the cached mode.
3970 */
3971static ssize_t
Christoph Hellwigc8b8e322016-04-07 08:51:58 -07003972cifs_direct_io(struct kiocb *iocb, struct iov_iter *iter)
Steve Frenchdca69282013-11-11 16:42:37 -06003973{
3974 /*
3975 * FIXME
3976 * Eventually need to support direct IO for non forcedirectio mounts
3977 */
3978 return -EINVAL;
3979}
3980
3981
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07003982const struct address_space_operations cifs_addr_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003983 .readpage = cifs_readpage,
3984 .readpages = cifs_readpages,
3985 .writepage = cifs_writepage,
Steve French37c0eb42005-10-05 14:50:29 -07003986 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04003987 .write_begin = cifs_write_begin,
3988 .write_end = cifs_write_end,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003989 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303990 .releasepage = cifs_release_page,
Steve Frenchdca69282013-11-11 16:42:37 -06003991 .direct_IO = cifs_direct_io,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303992 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003993 .launder_page = cifs_launder_page,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003994};
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003995
3996/*
3997 * cifs_readpages requires the server to support a buffer large enough to
3998 * contain the header plus one complete page of data. Otherwise, we need
3999 * to leave cifs_readpages out of the address space operations.
4000 */
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07004001const struct address_space_operations cifs_addr_ops_smallbuf = {
Dave Kleikamp273d81d2006-06-01 19:41:23 +00004002 .readpage = cifs_readpage,
4003 .writepage = cifs_writepage,
4004 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04004005 .write_begin = cifs_write_begin,
4006 .write_end = cifs_write_end,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00004007 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304008 .releasepage = cifs_release_page,
4009 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04004010 .launder_page = cifs_launder_page,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00004011};