blob: 3417340bf89e677fe0c46bf98cf922dd39d29a3a [file] [log] [blame]
Pavel Shilovskyec2e4522011-12-27 16:12:43 +04001/*
2 * fs/cifs/smb2pdu.c
3 *
Steve French2b80d042013-06-23 18:43:37 -05004 * Copyright (C) International Business Machines Corp., 2009, 2013
Pavel Shilovskyec2e4522011-12-27 16:12:43 +04005 * Etersoft, 2012
6 * Author(s): Steve French (sfrench@us.ibm.com)
7 * Pavel Shilovsky (pshilovsky@samba.org) 2012
8 *
9 * Contains the routines for constructing the SMB2 PDUs themselves
10 *
11 * This library is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU Lesser General Public License as published
13 * by the Free Software Foundation; either version 2.1 of the License, or
14 * (at your option) any later version.
15 *
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
19 * the GNU Lesser General Public License for more details.
20 *
21 * You should have received a copy of the GNU Lesser General Public License
22 * along with this library; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 */
25
26 /* SMB2 PDU handling routines here - except for leftovers (eg session setup) */
27 /* Note that there are handle based routines which must be */
28 /* treated slightly differently for reconnection purposes since we never */
29 /* want to reuse a stale file handle and only the caller knows the file info */
30
31#include <linux/fs.h>
32#include <linux/kernel.h>
33#include <linux/vfs.h>
Pavel Shilovsky09a47072012-09-18 16:20:29 -070034#include <linux/task_io_accounting_ops.h>
Pavel Shilovskyec2e4522011-12-27 16:12:43 +040035#include <linux/uaccess.h>
Pavel Shilovsky33319142012-09-18 16:20:29 -070036#include <linux/pagemap.h>
Pavel Shilovskyec2e4522011-12-27 16:12:43 +040037#include <linux/xattr.h>
38#include "smb2pdu.h"
39#include "cifsglob.h"
40#include "cifsacl.h"
41#include "cifsproto.h"
42#include "smb2proto.h"
43#include "cifs_unicode.h"
44#include "cifs_debug.h"
45#include "ntlmssp.h"
46#include "smb2status.h"
Pavel Shilovsky09a47072012-09-18 16:20:29 -070047#include "smb2glob.h"
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -070048#include "cifspdu.h"
Pavel Shilovskyec2e4522011-12-27 16:12:43 +040049
50/*
51 * The following table defines the expected "StructureSize" of SMB2 requests
52 * in order by SMB2 command. This is similar to "wct" in SMB/CIFS requests.
53 *
54 * Note that commands are defined in smb2pdu.h in le16 but the array below is
55 * indexed by command in host byte order.
56 */
57static const int smb2_req_struct_sizes[NUMBER_OF_SMB2_COMMANDS] = {
58 /* SMB2_NEGOTIATE */ 36,
59 /* SMB2_SESSION_SETUP */ 25,
60 /* SMB2_LOGOFF */ 4,
61 /* SMB2_TREE_CONNECT */ 9,
62 /* SMB2_TREE_DISCONNECT */ 4,
63 /* SMB2_CREATE */ 57,
64 /* SMB2_CLOSE */ 24,
65 /* SMB2_FLUSH */ 24,
66 /* SMB2_READ */ 49,
67 /* SMB2_WRITE */ 49,
68 /* SMB2_LOCK */ 48,
69 /* SMB2_IOCTL */ 57,
70 /* SMB2_CANCEL */ 4,
71 /* SMB2_ECHO */ 4,
72 /* SMB2_QUERY_DIRECTORY */ 33,
73 /* SMB2_CHANGE_NOTIFY */ 32,
74 /* SMB2_QUERY_INFO */ 41,
75 /* SMB2_SET_INFO */ 33,
76 /* SMB2_OPLOCK_BREAK */ 24 /* BB this is 36 for LEASE_BREAK variant */
77};
78
79
80static void
81smb2_hdr_assemble(struct smb2_hdr *hdr, __le16 smb2_cmd /* command */ ,
82 const struct cifs_tcon *tcon)
83{
84 struct smb2_pdu *pdu = (struct smb2_pdu *)hdr;
85 char *temp = (char *)hdr;
86 /* lookup word count ie StructureSize from table */
87 __u16 parmsize = smb2_req_struct_sizes[le16_to_cpu(smb2_cmd)];
88
89 /*
90 * smaller than SMALL_BUFFER_SIZE but bigger than fixed area of
91 * largest operations (Create)
92 */
93 memset(temp, 0, 256);
94
95 /* Note this is only network field converted to big endian */
96 hdr->smb2_buf_length = cpu_to_be32(parmsize + sizeof(struct smb2_hdr)
97 - 4 /* RFC 1001 length field itself not counted */);
98
99 hdr->ProtocolId[0] = 0xFE;
100 hdr->ProtocolId[1] = 'S';
101 hdr->ProtocolId[2] = 'M';
102 hdr->ProtocolId[3] = 'B';
103 hdr->StructureSize = cpu_to_le16(64);
104 hdr->Command = smb2_cmd;
105 hdr->CreditRequest = cpu_to_le16(2); /* BB make this dynamic */
106 hdr->ProcessId = cpu_to_le32((__u16)current->tgid);
107
108 if (!tcon)
109 goto out;
110
Steve French2b80d042013-06-23 18:43:37 -0500111 /* GLOBAL_CAP_LARGE_MTU will only be set if dialect > SMB2.02 */
112 /* See sections 2.2.4 and 3.2.4.1.5 of MS-SMB2 */
Steve French84ceeb92013-06-26 17:52:17 -0500113 if ((tcon->ses) &&
114 (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
Steve French2b80d042013-06-23 18:43:37 -0500115 hdr->CreditCharge = cpu_to_le16(1);
116 /* else CreditCharge MBZ */
117
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400118 hdr->TreeId = tcon->tid;
119 /* Uid is not converted */
120 if (tcon->ses)
121 hdr->SessionId = tcon->ses->Suid;
Steve Frenchf87ab882013-06-26 19:14:55 -0500122
123 /*
124 * If we would set SMB2_FLAGS_DFS_OPERATIONS on open we also would have
125 * to pass the path on the Open SMB prefixed by \\server\share.
126 * Not sure when we would need to do the augmented path (if ever) and
127 * setting this flag breaks the SMB2 open operation since it is
128 * illegal to send an empty path name (without \\server\share prefix)
129 * when the DFS flag is set in the SMB open header. We could
130 * consider setting the flag on all operations other than open
131 * but it is safer to net set it for now.
132 */
133/* if (tcon->share_flags & SHI1005_FLAGS_DFS)
134 hdr->Flags |= SMB2_FLAGS_DFS_OPERATIONS; */
135
Jeff Layton38d77c52013-05-26 07:01:00 -0400136 if (tcon->ses && tcon->ses->server && tcon->ses->server->sign)
Pavel Shilovsky3c1bf7e2012-09-18 16:20:30 -0700137 hdr->Flags |= SMB2_FLAGS_SIGNED;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400138out:
139 pdu->StructureSize2 = cpu_to_le16(parmsize);
140 return;
141}
142
143static int
144smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
145{
146 int rc = 0;
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400147 struct nls_table *nls_codepage;
148 struct cifs_ses *ses;
149 struct TCP_Server_Info *server;
150
151 /*
152 * SMB2s NegProt, SessSetup, Logoff do not have tcon yet so
153 * check for tcp and smb session status done differently
154 * for those three - in the calling routine.
155 */
156 if (tcon == NULL)
157 return rc;
158
159 if (smb2_command == SMB2_TREE_CONNECT)
160 return rc;
161
162 if (tcon->tidStatus == CifsExiting) {
163 /*
164 * only tree disconnect, open, and write,
165 * (and ulogoff which does not have tcon)
166 * are allowed as we start force umount.
167 */
168 if ((smb2_command != SMB2_WRITE) &&
169 (smb2_command != SMB2_CREATE) &&
170 (smb2_command != SMB2_TREE_DISCONNECT)) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500171 cifs_dbg(FYI, "can not send cmd %d while umounting\n",
172 smb2_command);
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400173 return -ENODEV;
174 }
175 }
176 if ((!tcon->ses) || (tcon->ses->status == CifsExiting) ||
177 (!tcon->ses->server))
178 return -EIO;
179
180 ses = tcon->ses;
181 server = ses->server;
182
183 /*
184 * Give demultiplex thread up to 10 seconds to reconnect, should be
185 * greater than cifs socket timeout which is 7 seconds
186 */
187 while (server->tcpStatus == CifsNeedReconnect) {
188 /*
189 * Return to caller for TREE_DISCONNECT and LOGOFF and CLOSE
190 * here since they are implicitly done when session drops.
191 */
192 switch (smb2_command) {
193 /*
194 * BB Should we keep oplock break and add flush to exceptions?
195 */
196 case SMB2_TREE_DISCONNECT:
197 case SMB2_CANCEL:
198 case SMB2_CLOSE:
199 case SMB2_OPLOCK_BREAK:
200 return -EAGAIN;
201 }
202
203 wait_event_interruptible_timeout(server->response_q,
204 (server->tcpStatus != CifsNeedReconnect), 10 * HZ);
205
206 /* are we still trying to reconnect? */
207 if (server->tcpStatus != CifsNeedReconnect)
208 break;
209
210 /*
211 * on "soft" mounts we wait once. Hard mounts keep
212 * retrying until process is killed or server comes
213 * back on-line
214 */
215 if (!tcon->retry) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500216 cifs_dbg(FYI, "gave up waiting on reconnect in smb_init\n");
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400217 return -EHOSTDOWN;
218 }
219 }
220
221 if (!tcon->ses->need_reconnect && !tcon->need_reconnect)
222 return rc;
223
224 nls_codepage = load_nls_default();
225
226 /*
227 * need to prevent multiple threads trying to simultaneously reconnect
228 * the same SMB session
229 */
230 mutex_lock(&tcon->ses->session_mutex);
231 rc = cifs_negotiate_protocol(0, tcon->ses);
232 if (!rc && tcon->ses->need_reconnect)
233 rc = cifs_setup_session(0, tcon->ses, nls_codepage);
234
235 if (rc || !tcon->need_reconnect) {
236 mutex_unlock(&tcon->ses->session_mutex);
237 goto out;
238 }
239
240 cifs_mark_open_files_invalid(tcon);
241 rc = SMB2_tcon(0, tcon->ses, tcon->treeName, tcon, nls_codepage);
242 mutex_unlock(&tcon->ses->session_mutex);
Joe Perchesf96637b2013-05-04 22:12:25 -0500243 cifs_dbg(FYI, "reconnect tcon rc = %d\n", rc);
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400244 if (rc)
245 goto out;
246 atomic_inc(&tconInfoReconnectCount);
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400247out:
248 /*
249 * Check if handle based operation so we know whether we can continue
250 * or not without returning to caller to reset file handle.
251 */
252 /*
253 * BB Is flush done by server on drop of tcp session? Should we special
254 * case it and skip above?
255 */
256 switch (smb2_command) {
257 case SMB2_FLUSH:
258 case SMB2_READ:
259 case SMB2_WRITE:
260 case SMB2_LOCK:
261 case SMB2_IOCTL:
262 case SMB2_QUERY_DIRECTORY:
263 case SMB2_CHANGE_NOTIFY:
264 case SMB2_QUERY_INFO:
265 case SMB2_SET_INFO:
266 return -EAGAIN;
267 }
268 unload_nls(nls_codepage);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400269 return rc;
270}
271
272/*
273 * Allocate and return pointer to an SMB request hdr, and set basic
274 * SMB information in the SMB header. If the return code is zero, this
275 * function must have filled in request_buf pointer.
276 */
277static int
278small_smb2_init(__le16 smb2_command, struct cifs_tcon *tcon,
279 void **request_buf)
280{
281 int rc = 0;
282
283 rc = smb2_reconnect(smb2_command, tcon);
284 if (rc)
285 return rc;
286
287 /* BB eventually switch this to SMB2 specific small buf size */
288 *request_buf = cifs_small_buf_get();
289 if (*request_buf == NULL) {
290 /* BB should we add a retry in here if not a writepage? */
291 return -ENOMEM;
292 }
293
294 smb2_hdr_assemble((struct smb2_hdr *) *request_buf, smb2_command, tcon);
295
296 if (tcon != NULL) {
297#ifdef CONFIG_CIFS_STATS2
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400298 uint16_t com_code = le16_to_cpu(smb2_command);
299 cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_sent[com_code]);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400300#endif
301 cifs_stats_inc(&tcon->num_smbs_sent);
302 }
303
304 return rc;
305}
306
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400307/*
308 *
309 * SMB2 Worker functions follow:
310 *
311 * The general structure of the worker functions is:
312 * 1) Call smb2_init (assembles SMB2 header)
313 * 2) Initialize SMB2 command specific fields in fixed length area of SMB
314 * 3) Call smb_sendrcv2 (sends request on socket and waits for response)
315 * 4) Decode SMB2 command specific fields in the fixed length area
316 * 5) Decode variable length data area (if any for this SMB2 command type)
317 * 6) Call free smb buffer
318 * 7) return
319 *
320 */
321
322int
323SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
324{
325 struct smb2_negotiate_req *req;
326 struct smb2_negotiate_rsp *rsp;
327 struct kvec iov[1];
328 int rc = 0;
329 int resp_buftype;
Jeff Layton3534b852013-05-24 07:41:01 -0400330 struct TCP_Server_Info *server = ses->server;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400331 int blob_offset, blob_length;
332 char *security_blob;
333 int flags = CIFS_NEG_OP;
334
Joe Perchesf96637b2013-05-04 22:12:25 -0500335 cifs_dbg(FYI, "Negotiate protocol\n");
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400336
Jeff Layton3534b852013-05-24 07:41:01 -0400337 if (!server) {
338 WARN(1, "%s: server is NULL!\n", __func__);
339 return -EIO;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400340 }
341
342 rc = small_smb2_init(SMB2_NEGOTIATE, NULL, (void **) &req);
343 if (rc)
344 return rc;
345
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400346 req->hdr.SessionId = 0;
347
Steve Frenche4aa25e2012-10-01 12:26:22 -0500348 req->Dialects[0] = cpu_to_le16(ses->server->vals->protocol_id);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400349
Steve Frenche4aa25e2012-10-01 12:26:22 -0500350 req->DialectCount = cpu_to_le16(1); /* One vers= at a time for now */
351 inc_rfc1001_len(req, 2);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400352
353 /* only one of SMB2 signing flags may be set in SMB2 request */
Jeff Layton38d77c52013-05-26 07:01:00 -0400354 if (ses->sign)
Steve French9cd2e622013-06-12 19:59:03 -0500355 req->SecurityMode = cpu_to_le16(SMB2_NEGOTIATE_SIGNING_REQUIRED);
Jeff Layton38d77c52013-05-26 07:01:00 -0400356 else if (global_secflags & CIFSSEC_MAY_SIGN)
Steve French9cd2e622013-06-12 19:59:03 -0500357 req->SecurityMode = cpu_to_le16(SMB2_NEGOTIATE_SIGNING_ENABLED);
Jeff Layton38d77c52013-05-26 07:01:00 -0400358 else
359 req->SecurityMode = 0;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400360
Steve Frenche4aa25e2012-10-01 12:26:22 -0500361 req->Capabilities = cpu_to_le32(ses->server->vals->req_capabilities);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400362
Steve French3c5f9be12014-05-13 13:37:45 -0700363 /* ClientGUID must be zero for SMB2.02 dialect */
364 if (ses->server->vals->protocol_id == SMB20_PROT_ID)
365 memset(req->ClientGUID, 0, SMB2_CLIENT_GUID_SIZE);
366 else
367 memcpy(req->ClientGUID, server->client_guid,
368 SMB2_CLIENT_GUID_SIZE);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700369
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400370 iov[0].iov_base = (char *)req;
371 /* 4 for rfc1002 length field */
372 iov[0].iov_len = get_rfc1002_length(req) + 4;
373
374 rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, flags);
375
376 rsp = (struct smb2_negotiate_rsp *)iov[0].iov_base;
377 /*
378 * No tcon so can't do
379 * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]);
380 */
381 if (rc != 0)
382 goto neg_exit;
383
Joe Perchesf96637b2013-05-04 22:12:25 -0500384 cifs_dbg(FYI, "mode 0x%x\n", rsp->SecurityMode);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400385
Steve Frenche4aa25e2012-10-01 12:26:22 -0500386 /* BB we may eventually want to match the negotiated vs. requested
387 dialect, even though we are only requesting one at a time */
388 if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID))
Joe Perchesf96637b2013-05-04 22:12:25 -0500389 cifs_dbg(FYI, "negotiated smb2.0 dialect\n");
Steve Frenche4aa25e2012-10-01 12:26:22 -0500390 else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID))
Joe Perchesf96637b2013-05-04 22:12:25 -0500391 cifs_dbg(FYI, "negotiated smb2.1 dialect\n");
Steve Frenche4aa25e2012-10-01 12:26:22 -0500392 else if (rsp->DialectRevision == cpu_to_le16(SMB30_PROT_ID))
Joe Perchesf96637b2013-05-04 22:12:25 -0500393 cifs_dbg(FYI, "negotiated smb3.0 dialect\n");
Steve French20b6d8b2013-06-12 22:48:41 -0500394 else if (rsp->DialectRevision == cpu_to_le16(SMB302_PROT_ID))
395 cifs_dbg(FYI, "negotiated smb3.02 dialect\n");
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400396 else {
Joe Perchesf96637b2013-05-04 22:12:25 -0500397 cifs_dbg(VFS, "Illegal dialect returned by server %d\n",
398 le16_to_cpu(rsp->DialectRevision));
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400399 rc = -EIO;
400 goto neg_exit;
401 }
402 server->dialect = le16_to_cpu(rsp->DialectRevision);
403
Jeff Laytone598d1d82013-05-26 07:00:59 -0400404 /* SMB2 only has an extended negflavor */
405 server->negflavor = CIFS_NEGFLAVOR_EXTENDED;
Pavel Shilovsky2365c4e2014-02-14 13:31:02 +0400406 /* set it to the maximum buffer size value we can send with 1 credit */
407 server->maxBuf = min_t(unsigned int, le32_to_cpu(rsp->MaxTransactSize),
408 SMB2_MAX_BUFFER_SIZE);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400409 server->max_read = le32_to_cpu(rsp->MaxReadSize);
410 server->max_write = le32_to_cpu(rsp->MaxWriteSize);
411 /* BB Do we need to validate the SecurityMode? */
412 server->sec_mode = le16_to_cpu(rsp->SecurityMode);
413 server->capabilities = le32_to_cpu(rsp->Capabilities);
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400414 /* Internal types */
415 server->capabilities |= SMB2_NT_FIND | SMB2_LARGE_FILES;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400416
417 security_blob = smb2_get_data_area_len(&blob_offset, &blob_length,
418 &rsp->hdr);
Steve French5d875cc2013-06-25 15:33:41 -0500419 /*
420 * See MS-SMB2 section 2.2.4: if no blob, client picks default which
421 * for us will be
422 * ses->sectype = RawNTLMSSP;
423 * but for time being this is our only auth choice so doesn't matter.
424 * We just found a server which sets blob length to zero expecting raw.
425 */
426 if (blob_length == 0)
427 cifs_dbg(FYI, "missing security blob on negprot\n");
Pavel Shilovsky3c1bf7e2012-09-18 16:20:30 -0700428
Jeff Layton38d77c52013-05-26 07:01:00 -0400429 rc = cifs_enable_signing(server, ses->sign);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400430#ifdef CONFIG_SMB2_ASN1 /* BB REMOVEME when updated asn1.c ready */
Jeff Layton9ddec562013-05-26 07:00:58 -0400431 if (rc)
432 goto neg_exit;
Steve French5d875cc2013-06-25 15:33:41 -0500433 if (blob_length)
Steve Frenchebdd2072014-10-20 12:48:23 -0500434 rc = decode_negTokenInit(security_blob, blob_length, server);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400435 if (rc == 1)
436 rc = 0;
437 else if (rc == 0) {
438 rc = -EIO;
439 goto neg_exit;
440 }
441#endif
442
443neg_exit:
444 free_rsp_buf(resp_buftype, rsp);
445 return rc;
446}
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400447
Steve Frenchff1c0382013-11-19 23:44:46 -0600448int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
449{
450 int rc = 0;
451 struct validate_negotiate_info_req vneg_inbuf;
452 struct validate_negotiate_info_rsp *pneg_rsp;
453 u32 rsplen;
454
455 cifs_dbg(FYI, "validate negotiate\n");
456
457 /*
458 * validation ioctl must be signed, so no point sending this if we
459 * can not sign it. We could eventually change this to selectively
460 * sign just this, the first and only signed request on a connection.
461 * This is good enough for now since a user who wants better security
462 * would also enable signing on the mount. Having validation of
463 * negotiate info for signed connections helps reduce attack vectors
464 */
465 if (tcon->ses->server->sign == false)
466 return 0; /* validation requires signing */
467
468 vneg_inbuf.Capabilities =
469 cpu_to_le32(tcon->ses->server->vals->req_capabilities);
Sachin Prabhu39552ea2014-05-13 00:48:12 +0100470 memcpy(vneg_inbuf.Guid, tcon->ses->server->client_guid,
471 SMB2_CLIENT_GUID_SIZE);
Steve Frenchff1c0382013-11-19 23:44:46 -0600472
473 if (tcon->ses->sign)
474 vneg_inbuf.SecurityMode =
475 cpu_to_le16(SMB2_NEGOTIATE_SIGNING_REQUIRED);
476 else if (global_secflags & CIFSSEC_MAY_SIGN)
477 vneg_inbuf.SecurityMode =
478 cpu_to_le16(SMB2_NEGOTIATE_SIGNING_ENABLED);
479 else
480 vneg_inbuf.SecurityMode = 0;
481
482 vneg_inbuf.DialectCount = cpu_to_le16(1);
483 vneg_inbuf.Dialects[0] =
484 cpu_to_le16(tcon->ses->server->vals->protocol_id);
485
486 rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
487 FSCTL_VALIDATE_NEGOTIATE_INFO, true /* is_fsctl */,
488 (char *)&vneg_inbuf, sizeof(struct validate_negotiate_info_req),
489 (char **)&pneg_rsp, &rsplen);
490
491 if (rc != 0) {
492 cifs_dbg(VFS, "validate protocol negotiate failed: %d\n", rc);
493 return -EIO;
494 }
495
496 if (rsplen != sizeof(struct validate_negotiate_info_rsp)) {
497 cifs_dbg(VFS, "invalid size of protocol negotiate response\n");
498 return -EIO;
499 }
500
501 /* check validate negotiate info response matches what we got earlier */
502 if (pneg_rsp->Dialect !=
503 cpu_to_le16(tcon->ses->server->vals->protocol_id))
504 goto vneg_out;
505
506 if (pneg_rsp->SecurityMode != cpu_to_le16(tcon->ses->server->sec_mode))
507 goto vneg_out;
508
509 /* do not validate server guid because not saved at negprot time yet */
510
511 if ((le32_to_cpu(pneg_rsp->Capabilities) | SMB2_NT_FIND |
512 SMB2_LARGE_FILES) != tcon->ses->server->capabilities)
513 goto vneg_out;
514
515 /* validate negotiate successful */
516 cifs_dbg(FYI, "validate negotiate info successful\n");
517 return 0;
518
519vneg_out:
520 cifs_dbg(VFS, "protocol revalidation - security settings mismatch\n");
521 return -EIO;
522}
523
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400524int
525SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
526 const struct nls_table *nls_cp)
527{
528 struct smb2_sess_setup_req *req;
529 struct smb2_sess_setup_rsp *rsp = NULL;
530 struct kvec iov[2];
531 int rc = 0;
Namjae Jeon7de975e2014-08-20 19:39:41 +0900532 int resp_buftype = CIFS_NO_BUFFER;
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400533 __le32 phase = NtLmNegotiate; /* NTLMSSP, if needed, is multistage */
Jeff Layton3534b852013-05-24 07:41:01 -0400534 struct TCP_Server_Info *server = ses->server;
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400535 u16 blob_length = 0;
536 char *security_blob;
537 char *ntlmssp_blob = NULL;
538 bool use_spnego = false; /* else use raw ntlmssp */
539
Joe Perchesf96637b2013-05-04 22:12:25 -0500540 cifs_dbg(FYI, "Session Setup\n");
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400541
Jeff Layton3534b852013-05-24 07:41:01 -0400542 if (!server) {
543 WARN(1, "%s: server is NULL!\n", __func__);
544 return -EIO;
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400545 }
546
547 /*
Shirish Pargaonkard4e63bd2013-08-29 08:35:09 -0500548 * If we are here due to reconnect, free per-smb session key
549 * in case signing was required.
550 */
551 kfree(ses->auth_key.response);
552 ses->auth_key.response = NULL;
553
554 /*
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400555 * If memory allocation is successful, caller of this function
556 * frees it.
557 */
558 ses->ntlmssp = kmalloc(sizeof(struct ntlmssp_auth), GFP_KERNEL);
559 if (!ses->ntlmssp)
560 return -ENOMEM;
Shirish Pargaonkar5c234aa2013-08-29 08:35:10 -0500561 ses->ntlmssp->sesskey_per_smbsess = true;
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400562
Jeff Layton3f618222013-06-12 19:52:14 -0500563 /* FIXME: allow for other auth types besides NTLMSSP (e.g. krb5) */
564 ses->sectype = RawNTLMSSP;
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400565
566ssetup_ntlmssp_authenticate:
567 if (phase == NtLmChallenge)
568 phase = NtLmAuthenticate; /* if ntlmssp, now final phase */
569
570 rc = small_smb2_init(SMB2_SESSION_SETUP, NULL, (void **) &req);
571 if (rc)
572 return rc;
573
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400574 req->hdr.SessionId = 0; /* First session, not a reauthenticate */
575 req->VcNumber = 0; /* MBZ */
576 /* to enable echos and oplocks */
577 req->hdr.CreditRequest = cpu_to_le16(3);
578
579 /* only one of SMB2 signing flags may be set in SMB2 request */
Jeff Layton38d77c52013-05-26 07:01:00 -0400580 if (server->sign)
581 req->SecurityMode = SMB2_NEGOTIATE_SIGNING_REQUIRED;
582 else if (global_secflags & CIFSSEC_MAY_SIGN) /* one flag unlike MUST_ */
583 req->SecurityMode = SMB2_NEGOTIATE_SIGNING_ENABLED;
584 else
585 req->SecurityMode = 0;
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400586
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400587 req->Capabilities = 0;
588 req->Channel = 0; /* MBZ */
589
590 iov[0].iov_base = (char *)req;
591 /* 4 for rfc1002 length field and 1 for pad */
592 iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
593 if (phase == NtLmNegotiate) {
594 ntlmssp_blob = kmalloc(sizeof(struct _NEGOTIATE_MESSAGE),
595 GFP_KERNEL);
596 if (ntlmssp_blob == NULL) {
597 rc = -ENOMEM;
598 goto ssetup_exit;
599 }
600 build_ntlmssp_negotiate_blob(ntlmssp_blob, ses);
601 if (use_spnego) {
602 /* blob_length = build_spnego_ntlmssp_blob(
603 &security_blob,
604 sizeof(struct _NEGOTIATE_MESSAGE),
605 ntlmssp_blob); */
606 /* BB eventually need to add this */
Joe Perchesf96637b2013-05-04 22:12:25 -0500607 cifs_dbg(VFS, "spnego not supported for SMB2 yet\n");
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400608 rc = -EOPNOTSUPP;
609 kfree(ntlmssp_blob);
610 goto ssetup_exit;
611 } else {
612 blob_length = sizeof(struct _NEGOTIATE_MESSAGE);
613 /* with raw NTLMSSP we don't encapsulate in SPNEGO */
614 security_blob = ntlmssp_blob;
615 }
616 } else if (phase == NtLmAuthenticate) {
617 req->hdr.SessionId = ses->Suid;
618 ntlmssp_blob = kzalloc(sizeof(struct _NEGOTIATE_MESSAGE) + 500,
619 GFP_KERNEL);
620 if (ntlmssp_blob == NULL) {
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400621 rc = -ENOMEM;
622 goto ssetup_exit;
623 }
624 rc = build_ntlmssp_auth_blob(ntlmssp_blob, &blob_length, ses,
625 nls_cp);
626 if (rc) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500627 cifs_dbg(FYI, "build_ntlmssp_auth_blob failed %d\n",
628 rc);
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400629 goto ssetup_exit; /* BB double check error handling */
630 }
631 if (use_spnego) {
632 /* blob_length = build_spnego_ntlmssp_blob(
633 &security_blob,
634 blob_length,
635 ntlmssp_blob); */
Joe Perchesf96637b2013-05-04 22:12:25 -0500636 cifs_dbg(VFS, "spnego not supported for SMB2 yet\n");
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400637 rc = -EOPNOTSUPP;
638 kfree(ntlmssp_blob);
639 goto ssetup_exit;
640 } else {
641 security_blob = ntlmssp_blob;
642 }
643 } else {
Joe Perchesf96637b2013-05-04 22:12:25 -0500644 cifs_dbg(VFS, "illegal ntlmssp phase\n");
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400645 rc = -EIO;
646 goto ssetup_exit;
647 }
648
649 /* Testing shows that buffer offset must be at location of Buffer[0] */
650 req->SecurityBufferOffset =
651 cpu_to_le16(sizeof(struct smb2_sess_setup_req) -
652 1 /* pad */ - 4 /* rfc1001 len */);
653 req->SecurityBufferLength = cpu_to_le16(blob_length);
654 iov[1].iov_base = security_blob;
655 iov[1].iov_len = blob_length;
656
657 inc_rfc1001_len(req, blob_length - 1 /* pad */);
658
659 /* BB add code to build os and lm fields */
660
Steve French6d8b59d2012-12-08 22:36:29 -0600661 rc = SendReceive2(xid, ses, iov, 2, &resp_buftype,
662 CIFS_LOG_ERROR | CIFS_NEG_OP);
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400663
664 kfree(security_blob);
665 rsp = (struct smb2_sess_setup_rsp *)iov[0].iov_base;
Pavel Shilovsky4ca3a992012-09-25 11:00:09 +0400666 if (resp_buftype != CIFS_NO_BUFFER &&
667 rsp->hdr.Status == STATUS_MORE_PROCESSING_REQUIRED) {
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400668 if (phase != NtLmNegotiate) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500669 cifs_dbg(VFS, "Unexpected more processing error\n");
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400670 goto ssetup_exit;
671 }
672 if (offsetof(struct smb2_sess_setup_rsp, Buffer) - 4 !=
Pavel Shilovsky4ca3a992012-09-25 11:00:09 +0400673 le16_to_cpu(rsp->SecurityBufferOffset)) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500674 cifs_dbg(VFS, "Invalid security buffer offset %d\n",
675 le16_to_cpu(rsp->SecurityBufferOffset));
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400676 rc = -EIO;
677 goto ssetup_exit;
678 }
679
680 /* NTLMSSP Negotiate sent now processing challenge (response) */
681 phase = NtLmChallenge; /* process ntlmssp challenge */
682 rc = 0; /* MORE_PROCESSING is not an error here but expected */
683 ses->Suid = rsp->hdr.SessionId;
684 rc = decode_ntlmssp_challenge(rsp->Buffer,
685 le16_to_cpu(rsp->SecurityBufferLength), ses);
686 }
687
688 /*
689 * BB eventually add code for SPNEGO decoding of NtlmChallenge blob,
690 * but at least the raw NTLMSSP case works.
691 */
692 /*
693 * No tcon so can't do
694 * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]);
695 */
696 if (rc != 0)
697 goto ssetup_exit;
698
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400699 ses->session_flags = le16_to_cpu(rsp->SessionFlags);
Steve French0cbaa532013-11-15 23:50:24 -0600700 if (ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA)
701 cifs_dbg(VFS, "SMB3 encryption not supported yet\n");
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400702ssetup_exit:
703 free_rsp_buf(resp_buftype, rsp);
704
705 /* if ntlmssp, and negotiate succeeded, proceed to authenticate phase */
706 if ((phase == NtLmChallenge) && (rc == 0))
707 goto ssetup_ntlmssp_authenticate;
Shirish Pargaonkard4e63bd2013-08-29 08:35:09 -0500708
709 if (!rc) {
710 mutex_lock(&server->srv_mutex);
Shirish Pargaonkar32811d22013-08-29 08:35:11 -0500711 if (server->sign && server->ops->generate_signingkey) {
712 rc = server->ops->generate_signingkey(ses);
713 kfree(ses->auth_key.response);
714 ses->auth_key.response = NULL;
715 if (rc) {
716 cifs_dbg(FYI,
717 "SMB3 session key generation failed\n");
718 mutex_unlock(&server->srv_mutex);
719 goto keygen_exit;
720 }
721 }
Shirish Pargaonkard4e63bd2013-08-29 08:35:09 -0500722 if (!server->session_estab) {
723 server->sequence_number = 0x2;
724 server->session_estab = true;
Shirish Pargaonkard4e63bd2013-08-29 08:35:09 -0500725 }
726 mutex_unlock(&server->srv_mutex);
727
728 cifs_dbg(FYI, "SMB2/3 session established successfully\n");
729 spin_lock(&GlobalMid_Lock);
730 ses->status = CifsGood;
731 ses->need_reconnect = false;
732 spin_unlock(&GlobalMid_Lock);
733 }
734
Shirish Pargaonkar32811d22013-08-29 08:35:11 -0500735keygen_exit:
Shirish Pargaonkard4e63bd2013-08-29 08:35:09 -0500736 if (!server->sign) {
737 kfree(ses->auth_key.response);
738 ses->auth_key.response = NULL;
739 }
740 kfree(ses->ntlmssp);
741
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400742 return rc;
743}
744
745int
746SMB2_logoff(const unsigned int xid, struct cifs_ses *ses)
747{
748 struct smb2_logoff_req *req; /* response is also trivial struct */
749 int rc = 0;
750 struct TCP_Server_Info *server;
751
Joe Perchesf96637b2013-05-04 22:12:25 -0500752 cifs_dbg(FYI, "disconnect session %p\n", ses);
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400753
754 if (ses && (ses->server))
755 server = ses->server;
756 else
757 return -EIO;
758
Shirish Pargaonkareb4c7df2013-10-03 05:44:45 -0500759 /* no need to send SMB logoff if uid already closed due to reconnect */
760 if (ses->need_reconnect)
761 goto smb2_session_already_dead;
762
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400763 rc = small_smb2_init(SMB2_LOGOFF, NULL, (void **) &req);
764 if (rc)
765 return rc;
766
767 /* since no tcon, smb2_init can not do this, so do here */
768 req->hdr.SessionId = ses->Suid;
Jeff Layton38d77c52013-05-26 07:01:00 -0400769 if (server->sign)
Pavel Shilovsky3c1bf7e2012-09-18 16:20:30 -0700770 req->hdr.Flags |= SMB2_FLAGS_SIGNED;
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400771
772 rc = SendReceiveNoRsp(xid, ses, (char *) &req->hdr, 0);
773 /*
774 * No tcon so can't do
775 * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]);
776 */
Shirish Pargaonkareb4c7df2013-10-03 05:44:45 -0500777
778smb2_session_already_dead:
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400779 return rc;
780}
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +0400781
782static inline void cifs_stats_fail_inc(struct cifs_tcon *tcon, uint16_t code)
783{
Pavel Shilovskyd60622e2012-05-28 15:19:39 +0400784 cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_failed[code]);
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +0400785}
786
787#define MAX_SHARENAME_LENGTH (255 /* server */ + 80 /* share */ + 1 /* NULL */)
788
Steve Frenchde9f68d2013-11-15 11:26:24 -0600789/* These are similar values to what Windows uses */
790static inline void init_copy_chunk_defaults(struct cifs_tcon *tcon)
791{
792 tcon->max_chunks = 256;
793 tcon->max_bytes_chunk = 1048576;
794 tcon->max_bytes_copy = 16777216;
795}
796
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +0400797int
798SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
799 struct cifs_tcon *tcon, const struct nls_table *cp)
800{
801 struct smb2_tree_connect_req *req;
802 struct smb2_tree_connect_rsp *rsp = NULL;
803 struct kvec iov[2];
804 int rc = 0;
805 int resp_buftype;
806 int unc_path_len;
807 struct TCP_Server_Info *server;
808 __le16 *unc_path = NULL;
809
Joe Perchesf96637b2013-05-04 22:12:25 -0500810 cifs_dbg(FYI, "TCON\n");
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +0400811
812 if ((ses->server) && tree)
813 server = ses->server;
814 else
815 return -EIO;
816
817 if (tcon && tcon->bad_network_name)
818 return -ENOENT;
819
820 unc_path = kmalloc(MAX_SHARENAME_LENGTH * 2, GFP_KERNEL);
821 if (unc_path == NULL)
822 return -ENOMEM;
823
824 unc_path_len = cifs_strtoUTF16(unc_path, tree, strlen(tree), cp) + 1;
825 unc_path_len *= 2;
826 if (unc_path_len < 2) {
827 kfree(unc_path);
828 return -EINVAL;
829 }
830
831 rc = small_smb2_init(SMB2_TREE_CONNECT, tcon, (void **) &req);
832 if (rc) {
833 kfree(unc_path);
834 return rc;
835 }
836
837 if (tcon == NULL) {
838 /* since no tcon, smb2_init can not do this, so do here */
839 req->hdr.SessionId = ses->Suid;
840 /* if (ses->server->sec_mode & SECMODE_SIGN_REQUIRED)
841 req->hdr.Flags |= SMB2_FLAGS_SIGNED; */
842 }
843
844 iov[0].iov_base = (char *)req;
845 /* 4 for rfc1002 length field and 1 for pad */
846 iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
847
848 /* Testing shows that buffer offset must be at location of Buffer[0] */
849 req->PathOffset = cpu_to_le16(sizeof(struct smb2_tree_connect_req)
850 - 1 /* pad */ - 4 /* do not count rfc1001 len field */);
851 req->PathLength = cpu_to_le16(unc_path_len - 2);
852 iov[1].iov_base = unc_path;
853 iov[1].iov_len = unc_path_len;
854
855 inc_rfc1001_len(req, unc_path_len - 1 /* pad */);
856
857 rc = SendReceive2(xid, ses, iov, 2, &resp_buftype, 0);
858 rsp = (struct smb2_tree_connect_rsp *)iov[0].iov_base;
859
860 if (rc != 0) {
861 if (tcon) {
862 cifs_stats_fail_inc(tcon, SMB2_TREE_CONNECT_HE);
863 tcon->need_reconnect = true;
864 }
865 goto tcon_error_exit;
866 }
867
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +0400868 if (tcon == NULL) {
869 ses->ipc_tid = rsp->hdr.TreeId;
870 goto tcon_exit;
871 }
872
873 if (rsp->ShareType & SMB2_SHARE_TYPE_DISK)
Joe Perchesf96637b2013-05-04 22:12:25 -0500874 cifs_dbg(FYI, "connection to disk share\n");
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +0400875 else if (rsp->ShareType & SMB2_SHARE_TYPE_PIPE) {
876 tcon->ipc = true;
Joe Perchesf96637b2013-05-04 22:12:25 -0500877 cifs_dbg(FYI, "connection to pipe share\n");
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +0400878 } else if (rsp->ShareType & SMB2_SHARE_TYPE_PRINT) {
879 tcon->print = true;
Joe Perchesf96637b2013-05-04 22:12:25 -0500880 cifs_dbg(FYI, "connection to printer\n");
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +0400881 } else {
Joe Perchesf96637b2013-05-04 22:12:25 -0500882 cifs_dbg(VFS, "unknown share type %d\n", rsp->ShareType);
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +0400883 rc = -EOPNOTSUPP;
884 goto tcon_error_exit;
885 }
886
887 tcon->share_flags = le32_to_cpu(rsp->ShareFlags);
Steve French769ee6a2013-06-19 14:15:30 -0500888 tcon->capabilities = rsp->Capabilities; /* we keep caps little endian */
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +0400889 tcon->maximal_access = le32_to_cpu(rsp->MaximalAccess);
890 tcon->tidStatus = CifsGood;
891 tcon->need_reconnect = false;
892 tcon->tid = rsp->hdr.TreeId;
Zhao Hongjiang46b51d02013-06-24 01:57:47 -0500893 strlcpy(tcon->treeName, tree, sizeof(tcon->treeName));
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +0400894
895 if ((rsp->Capabilities & SMB2_SHARE_CAP_DFS) &&
896 ((tcon->share_flags & SHI1005_FLAGS_DFS) == 0))
Joe Perchesf96637b2013-05-04 22:12:25 -0500897 cifs_dbg(VFS, "DFS capability contradicts DFS flag\n");
Steve Frenchde9f68d2013-11-15 11:26:24 -0600898 init_copy_chunk_defaults(tcon);
Steve Frenchff1c0382013-11-19 23:44:46 -0600899 if (tcon->ses->server->ops->validate_negotiate)
900 rc = tcon->ses->server->ops->validate_negotiate(xid, tcon);
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +0400901tcon_exit:
902 free_rsp_buf(resp_buftype, rsp);
903 kfree(unc_path);
904 return rc;
905
906tcon_error_exit:
907 if (rsp->hdr.Status == STATUS_BAD_NETWORK_NAME) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500908 cifs_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree);
Steve French18f39e72014-08-17 00:22:24 -0500909 if (tcon)
910 tcon->bad_network_name = true;
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +0400911 }
912 goto tcon_exit;
913}
914
915int
916SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon)
917{
918 struct smb2_tree_disconnect_req *req; /* response is trivial */
919 int rc = 0;
920 struct TCP_Server_Info *server;
921 struct cifs_ses *ses = tcon->ses;
922
Joe Perchesf96637b2013-05-04 22:12:25 -0500923 cifs_dbg(FYI, "Tree Disconnect\n");
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +0400924
925 if (ses && (ses->server))
926 server = ses->server;
927 else
928 return -EIO;
929
930 if ((tcon->need_reconnect) || (tcon->ses->need_reconnect))
931 return 0;
932
933 rc = small_smb2_init(SMB2_TREE_DISCONNECT, tcon, (void **) &req);
934 if (rc)
935 return rc;
936
937 rc = SendReceiveNoRsp(xid, ses, (char *)&req->hdr, 0);
938 if (rc)
939 cifs_stats_fail_inc(tcon, SMB2_TREE_DISCONNECT_HE);
940
941 return rc;
942}
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400943
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700944
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +0400945static struct create_durable *
946create_durable_buf(void)
947{
948 struct create_durable *buf;
949
950 buf = kzalloc(sizeof(struct create_durable), GFP_KERNEL);
951 if (!buf)
952 return NULL;
953
954 buf->ccontext.DataOffset = cpu_to_le16(offsetof
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400955 (struct create_durable, Data));
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +0400956 buf->ccontext.DataLength = cpu_to_le32(16);
957 buf->ccontext.NameOffset = cpu_to_le16(offsetof
958 (struct create_durable, Name));
959 buf->ccontext.NameLength = cpu_to_le16(4);
Steve French12197a72014-05-14 05:29:40 -0700960 /* SMB2_CREATE_DURABLE_HANDLE_REQUEST is "DHnQ" */
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +0400961 buf->Name[0] = 'D';
962 buf->Name[1] = 'H';
963 buf->Name[2] = 'n';
964 buf->Name[3] = 'Q';
965 return buf;
966}
967
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400968static struct create_durable *
969create_reconnect_durable_buf(struct cifs_fid *fid)
970{
971 struct create_durable *buf;
972
973 buf = kzalloc(sizeof(struct create_durable), GFP_KERNEL);
974 if (!buf)
975 return NULL;
976
977 buf->ccontext.DataOffset = cpu_to_le16(offsetof
978 (struct create_durable, Data));
979 buf->ccontext.DataLength = cpu_to_le32(16);
980 buf->ccontext.NameOffset = cpu_to_le16(offsetof
981 (struct create_durable, Name));
982 buf->ccontext.NameLength = cpu_to_le16(4);
983 buf->Data.Fid.PersistentFileId = fid->persistent_fid;
984 buf->Data.Fid.VolatileFileId = fid->volatile_fid;
Steve French12197a72014-05-14 05:29:40 -0700985 /* SMB2_CREATE_DURABLE_HANDLE_RECONNECT is "DHnC" */
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400986 buf->Name[0] = 'D';
987 buf->Name[1] = 'H';
988 buf->Name[2] = 'n';
989 buf->Name[3] = 'C';
990 return buf;
991}
992
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700993static __u8
Pavel Shilovsky42873b02013-09-05 21:30:16 +0400994parse_lease_state(struct TCP_Server_Info *server, struct smb2_create_rsp *rsp,
995 unsigned int *epoch)
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700996{
997 char *data_offset;
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +0400998 struct create_context *cc;
Pavel Shilovskyfd554392013-07-09 19:44:56 +0400999 unsigned int next = 0;
1000 char *name;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07001001
Pavel Shilovskyfd554392013-07-09 19:44:56 +04001002 data_offset = (char *)rsp + 4 + le32_to_cpu(rsp->CreateContextsOffset);
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04001003 cc = (struct create_context *)data_offset;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07001004 do {
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04001005 cc = (struct create_context *)((char *)cc + next);
1006 name = le16_to_cpu(cc->NameOffset) + (char *)cc;
1007 if (le16_to_cpu(cc->NameLength) != 4 ||
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07001008 strncmp(name, "RqLs", 4)) {
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04001009 next = le32_to_cpu(cc->Next);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07001010 continue;
1011 }
Pavel Shilovsky42873b02013-09-05 21:30:16 +04001012 return server->ops->parse_lease_buf(cc, epoch);
Pavel Shilovskyfd554392013-07-09 19:44:56 +04001013 } while (next != 0);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07001014
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04001015 return 0;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07001016}
1017
Pavel Shilovskyd22cbfe2013-07-04 19:10:00 +04001018static int
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04001019add_lease_context(struct TCP_Server_Info *server, struct kvec *iov,
1020 unsigned int *num_iovec, __u8 *oplock)
Pavel Shilovskyd22cbfe2013-07-04 19:10:00 +04001021{
1022 struct smb2_create_req *req = iov[0].iov_base;
1023 unsigned int num = *num_iovec;
1024
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04001025 iov[num].iov_base = server->ops->create_lease_buf(oplock+1, *oplock);
Pavel Shilovskyd22cbfe2013-07-04 19:10:00 +04001026 if (iov[num].iov_base == NULL)
1027 return -ENOMEM;
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04001028 iov[num].iov_len = server->vals->create_lease_size;
Pavel Shilovskyd22cbfe2013-07-04 19:10:00 +04001029 req->RequestedOplockLevel = SMB2_OPLOCK_LEVEL_LEASE;
1030 if (!req->CreateContextsOffset)
1031 req->CreateContextsOffset = cpu_to_le32(
1032 sizeof(struct smb2_create_req) - 4 +
1033 iov[num - 1].iov_len);
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04001034 le32_add_cpu(&req->CreateContextsLength,
1035 server->vals->create_lease_size);
1036 inc_rfc1001_len(&req->hdr, server->vals->create_lease_size);
Pavel Shilovskyd22cbfe2013-07-04 19:10:00 +04001037 *num_iovec = num + 1;
1038 return 0;
1039}
1040
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +04001041static int
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +04001042add_durable_context(struct kvec *iov, unsigned int *num_iovec,
1043 struct cifs_open_parms *oparms)
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +04001044{
1045 struct smb2_create_req *req = iov[0].iov_base;
1046 unsigned int num = *num_iovec;
1047
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +04001048 if (oparms->reconnect) {
1049 iov[num].iov_base = create_reconnect_durable_buf(oparms->fid);
1050 /* indicate that we don't need to relock the file */
1051 oparms->reconnect = false;
1052 } else
1053 iov[num].iov_base = create_durable_buf();
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +04001054 if (iov[num].iov_base == NULL)
1055 return -ENOMEM;
1056 iov[num].iov_len = sizeof(struct create_durable);
1057 if (!req->CreateContextsOffset)
1058 req->CreateContextsOffset =
1059 cpu_to_le32(sizeof(struct smb2_create_req) - 4 +
1060 iov[1].iov_len);
Wei Yongjun31f92e92013-08-26 14:34:46 +08001061 le32_add_cpu(&req->CreateContextsLength, sizeof(struct create_durable));
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +04001062 inc_rfc1001_len(&req->hdr, sizeof(struct create_durable));
1063 *num_iovec = num + 1;
1064 return 0;
1065}
1066
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001067int
Pavel Shilovsky064f6042013-07-09 18:20:30 +04001068SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04001069 __u8 *oplock, struct smb2_file_all_info *buf,
1070 struct smb2_err_rsp **err_buf)
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001071{
1072 struct smb2_create_req *req;
1073 struct smb2_create_rsp *rsp;
1074 struct TCP_Server_Info *server;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04001075 struct cifs_tcon *tcon = oparms->tcon;
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001076 struct cifs_ses *ses = tcon->ses;
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +04001077 struct kvec iov[4];
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001078 int resp_buftype;
1079 int uni_path_len;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07001080 __le16 *copy_path = NULL;
1081 int copy_size;
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001082 int rc = 0;
Pavel Shilovsky59aa3712013-07-04 19:41:24 +04001083 unsigned int num_iovecs = 2;
Pavel Shilovskyca819832013-07-05 12:21:26 +04001084 __u32 file_attributes = 0;
Pavel Shilovsky663a96212014-05-24 16:42:02 +04001085 char *dhc_buf = NULL, *lc_buf = NULL;
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001086
Joe Perchesf96637b2013-05-04 22:12:25 -05001087 cifs_dbg(FYI, "create/open\n");
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001088
1089 if (ses && (ses->server))
1090 server = ses->server;
1091 else
1092 return -EIO;
1093
1094 rc = small_smb2_init(SMB2_CREATE, tcon, (void **) &req);
1095 if (rc)
1096 return rc;
1097
Pavel Shilovsky064f6042013-07-09 18:20:30 +04001098 if (oparms->create_options & CREATE_OPTION_READONLY)
Pavel Shilovskyca819832013-07-05 12:21:26 +04001099 file_attributes |= ATTR_READONLY;
Steve Frenchdb8b6312014-09-22 05:13:55 -05001100 if (oparms->create_options & CREATE_OPTION_SPECIAL)
1101 file_attributes |= ATTR_SYSTEM;
Pavel Shilovskyca819832013-07-05 12:21:26 +04001102
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001103 req->ImpersonationLevel = IL_IMPERSONATION;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04001104 req->DesiredAccess = cpu_to_le32(oparms->desired_access);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001105 /* File attributes ignored on open (used in create though) */
1106 req->FileAttributes = cpu_to_le32(file_attributes);
1107 req->ShareAccess = FILE_SHARE_ALL_LE;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04001108 req->CreateDisposition = cpu_to_le32(oparms->disposition);
1109 req->CreateOptions = cpu_to_le32(oparms->create_options & CREATE_OPTIONS_MASK);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001110 uni_path_len = (2 * UniStrnlen((wchar_t *)path, PATH_MAX)) + 2;
Pavel Shilovsky59aa3712013-07-04 19:41:24 +04001111 /* do not count rfc1001 len field */
1112 req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req) - 4);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001113
1114 iov[0].iov_base = (char *)req;
1115 /* 4 for rfc1002 length field */
1116 iov[0].iov_len = get_rfc1002_length(req) + 4;
1117
1118 /* MUST set path len (NameLength) to 0 opening root of share */
Pavel Shilovsky59aa3712013-07-04 19:41:24 +04001119 req->NameLength = cpu_to_le16(uni_path_len - 2);
1120 /* -1 since last byte is buf[0] which is sent below (path) */
1121 iov[0].iov_len--;
1122 if (uni_path_len % 8 != 0) {
1123 copy_size = uni_path_len / 8 * 8;
1124 if (copy_size < uni_path_len)
1125 copy_size += 8;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07001126
Pavel Shilovsky59aa3712013-07-04 19:41:24 +04001127 copy_path = kzalloc(copy_size, GFP_KERNEL);
1128 if (!copy_path)
1129 return -ENOMEM;
1130 memcpy((char *)copy_path, (const char *)path,
1131 uni_path_len);
1132 uni_path_len = copy_size;
1133 path = copy_path;
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001134 }
1135
Pavel Shilovsky59aa3712013-07-04 19:41:24 +04001136 iov[1].iov_len = uni_path_len;
1137 iov[1].iov_base = path;
1138 /* -1 since last byte is buf[0] which was counted in smb2_buf_len */
1139 inc_rfc1001_len(req, uni_path_len - 1);
1140
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07001141 if (!server->oplocks)
1142 *oplock = SMB2_OPLOCK_LEVEL_NONE;
1143
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04001144 if (!(server->capabilities & SMB2_GLOBAL_CAP_LEASING) ||
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07001145 *oplock == SMB2_OPLOCK_LEVEL_NONE)
1146 req->RequestedOplockLevel = *oplock;
1147 else {
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04001148 rc = add_lease_context(server, iov, &num_iovecs, oplock);
Pavel Shilovskyd22cbfe2013-07-04 19:10:00 +04001149 if (rc) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07001150 cifs_small_buf_release(req);
1151 kfree(copy_path);
Pavel Shilovskyd22cbfe2013-07-04 19:10:00 +04001152 return rc;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07001153 }
Pavel Shilovsky663a96212014-05-24 16:42:02 +04001154 lc_buf = iov[num_iovecs-1].iov_base;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07001155 }
1156
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +04001157 if (*oplock == SMB2_OPLOCK_LEVEL_BATCH) {
1158 /* need to set Next field of lease context if we request it */
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04001159 if (server->capabilities & SMB2_GLOBAL_CAP_LEASING) {
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +04001160 struct create_context *ccontext =
1161 (struct create_context *)iov[num_iovecs-1].iov_base;
Steve French1c469432013-07-10 12:50:57 -05001162 ccontext->Next =
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04001163 cpu_to_le32(server->vals->create_lease_size);
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +04001164 }
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +04001165 rc = add_durable_context(iov, &num_iovecs, oparms);
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +04001166 if (rc) {
1167 cifs_small_buf_release(req);
1168 kfree(copy_path);
Pavel Shilovsky663a96212014-05-24 16:42:02 +04001169 kfree(lc_buf);
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +04001170 return rc;
1171 }
Pavel Shilovsky663a96212014-05-24 16:42:02 +04001172 dhc_buf = iov[num_iovecs-1].iov_base;
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +04001173 }
1174
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001175 rc = SendReceive2(xid, ses, iov, num_iovecs, &resp_buftype, 0);
1176 rsp = (struct smb2_create_rsp *)iov[0].iov_base;
1177
1178 if (rc != 0) {
1179 cifs_stats_fail_inc(tcon, SMB2_CREATE_HE);
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04001180 if (err_buf)
1181 *err_buf = kmemdup(rsp, get_rfc1002_length(rsp) + 4,
1182 GFP_KERNEL);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001183 goto creat_exit;
1184 }
1185
Pavel Shilovsky064f6042013-07-09 18:20:30 +04001186 oparms->fid->persistent_fid = rsp->PersistentFileId;
1187 oparms->fid->volatile_fid = rsp->VolatileFileId;
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001188
1189 if (buf) {
1190 memcpy(buf, &rsp->CreationTime, 32);
1191 buf->AllocationSize = rsp->AllocationSize;
1192 buf->EndOfFile = rsp->EndofFile;
1193 buf->Attributes = rsp->FileAttributes;
1194 buf->NumberOfLinks = cpu_to_le32(1);
1195 buf->DeletePending = 0;
1196 }
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07001197
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07001198 if (rsp->OplockLevel == SMB2_OPLOCK_LEVEL_LEASE)
Pavel Shilovsky42873b02013-09-05 21:30:16 +04001199 *oplock = parse_lease_state(server, rsp, &oparms->fid->epoch);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07001200 else
1201 *oplock = rsp->OplockLevel;
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001202creat_exit:
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07001203 kfree(copy_path);
Pavel Shilovsky663a96212014-05-24 16:42:02 +04001204 kfree(lc_buf);
1205 kfree(dhc_buf);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001206 free_rsp_buf(resp_buftype, rsp);
1207 return rc;
1208}
1209
Steve French4a72daf2013-06-25 00:20:49 -05001210/*
1211 * SMB2 IOCTL is used for both IOCTLs and FSCTLs
1212 */
1213int
1214SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
1215 u64 volatile_fid, u32 opcode, bool is_fsctl, char *in_data,
1216 u32 indatalen, char **out_data, u32 *plen /* returned data len */)
1217{
1218 struct smb2_ioctl_req *req;
1219 struct smb2_ioctl_rsp *rsp;
1220 struct TCP_Server_Info *server;
1221 struct cifs_ses *ses = tcon->ses;
1222 struct kvec iov[2];
1223 int resp_buftype;
1224 int num_iovecs;
1225 int rc = 0;
1226
1227 cifs_dbg(FYI, "SMB2 IOCTL\n");
1228
Steve French3d1a3742014-08-11 21:05:25 -05001229 if (out_data != NULL)
1230 *out_data = NULL;
1231
Steve French4a72daf2013-06-25 00:20:49 -05001232 /* zero out returned data len, in case of error */
1233 if (plen)
1234 *plen = 0;
1235
1236 if (ses && (ses->server))
1237 server = ses->server;
1238 else
1239 return -EIO;
1240
1241 rc = small_smb2_init(SMB2_IOCTL, tcon, (void **) &req);
1242 if (rc)
1243 return rc;
1244
1245 req->CtlCode = cpu_to_le32(opcode);
1246 req->PersistentFileId = persistent_fid;
1247 req->VolatileFileId = volatile_fid;
1248
1249 if (indatalen) {
1250 req->InputCount = cpu_to_le32(indatalen);
1251 /* do not set InputOffset if no input data */
1252 req->InputOffset =
1253 cpu_to_le32(offsetof(struct smb2_ioctl_req, Buffer) - 4);
1254 iov[1].iov_base = in_data;
1255 iov[1].iov_len = indatalen;
1256 num_iovecs = 2;
1257 } else
1258 num_iovecs = 1;
1259
1260 req->OutputOffset = 0;
1261 req->OutputCount = 0; /* MBZ */
1262
1263 /*
1264 * Could increase MaxOutputResponse, but that would require more
1265 * than one credit. Windows typically sets this smaller, but for some
1266 * ioctls it may be useful to allow server to send more. No point
1267 * limiting what the server can send as long as fits in one credit
1268 */
1269 req->MaxOutputResponse = cpu_to_le32(0xFF00); /* < 64K uses 1 credit */
1270
1271 if (is_fsctl)
1272 req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL);
1273 else
1274 req->Flags = 0;
1275
1276 iov[0].iov_base = (char *)req;
Steve French4a72daf2013-06-25 00:20:49 -05001277
Steve French7ff8d452013-10-14 00:44:19 -05001278 /*
1279 * If no input data, the size of ioctl struct in
1280 * protocol spec still includes a 1 byte data buffer,
1281 * but if input data passed to ioctl, we do not
1282 * want to double count this, so we do not send
1283 * the dummy one byte of data in iovec[0] if sending
1284 * input data (in iovec[1]). We also must add 4 bytes
1285 * in first iovec to allow for rfc1002 length field.
1286 */
1287
1288 if (indatalen) {
1289 iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
1290 inc_rfc1001_len(req, indatalen - 1);
1291 } else
1292 iov[0].iov_len = get_rfc1002_length(req) + 4;
1293
Steve French4a72daf2013-06-25 00:20:49 -05001294
1295 rc = SendReceive2(xid, ses, iov, num_iovecs, &resp_buftype, 0);
1296 rsp = (struct smb2_ioctl_rsp *)iov[0].iov_base;
1297
Steve French9bf0c9c2013-11-16 18:05:28 -06001298 if ((rc != 0) && (rc != -EINVAL)) {
Steve French4a72daf2013-06-25 00:20:49 -05001299 if (tcon)
1300 cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
1301 goto ioctl_exit;
Steve French9bf0c9c2013-11-16 18:05:28 -06001302 } else if (rc == -EINVAL) {
1303 if ((opcode != FSCTL_SRV_COPYCHUNK_WRITE) &&
1304 (opcode != FSCTL_SRV_COPYCHUNK)) {
1305 if (tcon)
1306 cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
1307 goto ioctl_exit;
1308 }
Steve French4a72daf2013-06-25 00:20:49 -05001309 }
1310
1311 /* check if caller wants to look at return data or just return rc */
1312 if ((plen == NULL) || (out_data == NULL))
1313 goto ioctl_exit;
1314
1315 *plen = le32_to_cpu(rsp->OutputCount);
1316
1317 /* We check for obvious errors in the output buffer length and offset */
1318 if (*plen == 0)
1319 goto ioctl_exit; /* server returned no data */
1320 else if (*plen > 0xFF00) {
1321 cifs_dbg(VFS, "srv returned invalid ioctl length: %d\n", *plen);
1322 *plen = 0;
1323 rc = -EIO;
1324 goto ioctl_exit;
1325 }
1326
1327 if (get_rfc1002_length(rsp) < le32_to_cpu(rsp->OutputOffset) + *plen) {
1328 cifs_dbg(VFS, "Malformed ioctl resp: len %d offset %d\n", *plen,
1329 le32_to_cpu(rsp->OutputOffset));
1330 *plen = 0;
1331 rc = -EIO;
1332 goto ioctl_exit;
1333 }
1334
1335 *out_data = kmalloc(*plen, GFP_KERNEL);
1336 if (*out_data == NULL) {
1337 rc = -ENOMEM;
1338 goto ioctl_exit;
1339 }
1340
1341 memcpy(*out_data, rsp->hdr.ProtocolId + le32_to_cpu(rsp->OutputOffset),
1342 *plen);
1343ioctl_exit:
1344 free_rsp_buf(resp_buftype, rsp);
1345 return rc;
1346}
1347
Steve French64a5cfa2013-10-14 15:31:32 -05001348/*
1349 * Individual callers to ioctl worker function follow
1350 */
1351
1352int
1353SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
1354 u64 persistent_fid, u64 volatile_fid)
1355{
1356 int rc;
Steve French64a5cfa2013-10-14 15:31:32 -05001357 struct compress_ioctl fsctl_input;
1358 char *ret_data = NULL;
1359
1360 fsctl_input.CompressionState =
Fabian Frederickbc09d142014-12-10 15:41:15 -08001361 cpu_to_le16(COMPRESSION_FORMAT_DEFAULT);
Steve French64a5cfa2013-10-14 15:31:32 -05001362
1363 rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid,
1364 FSCTL_SET_COMPRESSION, true /* is_fsctl */,
1365 (char *)&fsctl_input /* data input */,
1366 2 /* in data len */, &ret_data /* out data */, NULL);
1367
1368 cifs_dbg(FYI, "set compression rc %d\n", rc);
Steve French64a5cfa2013-10-14 15:31:32 -05001369
1370 return rc;
1371}
1372
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001373int
1374SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
1375 u64 persistent_fid, u64 volatile_fid)
1376{
1377 struct smb2_close_req *req;
1378 struct smb2_close_rsp *rsp;
1379 struct TCP_Server_Info *server;
1380 struct cifs_ses *ses = tcon->ses;
1381 struct kvec iov[1];
1382 int resp_buftype;
1383 int rc = 0;
1384
Joe Perchesf96637b2013-05-04 22:12:25 -05001385 cifs_dbg(FYI, "Close\n");
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001386
1387 if (ses && (ses->server))
1388 server = ses->server;
1389 else
1390 return -EIO;
1391
1392 rc = small_smb2_init(SMB2_CLOSE, tcon, (void **) &req);
1393 if (rc)
1394 return rc;
1395
1396 req->PersistentFileId = persistent_fid;
1397 req->VolatileFileId = volatile_fid;
1398
1399 iov[0].iov_base = (char *)req;
1400 /* 4 for rfc1002 length field */
1401 iov[0].iov_len = get_rfc1002_length(req) + 4;
1402
1403 rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, 0);
1404 rsp = (struct smb2_close_rsp *)iov[0].iov_base;
1405
1406 if (rc != 0) {
Namjae Jeond4a029d2014-08-20 19:39:59 +09001407 cifs_stats_fail_inc(tcon, SMB2_CLOSE_HE);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001408 goto close_exit;
1409 }
1410
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001411 /* BB FIXME - decode close response, update inode for caching */
1412
1413close_exit:
1414 free_rsp_buf(resp_buftype, rsp);
1415 return rc;
1416}
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04001417
1418static int
1419validate_buf(unsigned int offset, unsigned int buffer_length,
1420 struct smb2_hdr *hdr, unsigned int min_buf_size)
1421
1422{
1423 unsigned int smb_len = be32_to_cpu(hdr->smb2_buf_length);
1424 char *end_of_smb = smb_len + 4 /* RFC1001 length field */ + (char *)hdr;
1425 char *begin_of_buf = 4 /* RFC1001 len field */ + offset + (char *)hdr;
1426 char *end_of_buf = begin_of_buf + buffer_length;
1427
1428
1429 if (buffer_length < min_buf_size) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001430 cifs_dbg(VFS, "buffer length %d smaller than minimum size %d\n",
1431 buffer_length, min_buf_size);
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04001432 return -EINVAL;
1433 }
1434
1435 /* check if beyond RFC1001 maximum length */
1436 if ((smb_len > 0x7FFFFF) || (buffer_length > 0x7FFFFF)) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001437 cifs_dbg(VFS, "buffer length %d or smb length %d too large\n",
1438 buffer_length, smb_len);
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04001439 return -EINVAL;
1440 }
1441
1442 if ((begin_of_buf > end_of_smb) || (end_of_buf > end_of_smb)) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001443 cifs_dbg(VFS, "illegal server response, bad offset to data\n");
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04001444 return -EINVAL;
1445 }
1446
1447 return 0;
1448}
1449
1450/*
1451 * If SMB buffer fields are valid, copy into temporary buffer to hold result.
1452 * Caller must free buffer.
1453 */
1454static int
1455validate_and_copy_buf(unsigned int offset, unsigned int buffer_length,
1456 struct smb2_hdr *hdr, unsigned int minbufsize,
1457 char *data)
1458
1459{
1460 char *begin_of_buf = 4 /* RFC1001 len field */ + offset + (char *)hdr;
1461 int rc;
1462
1463 if (!data)
1464 return -EINVAL;
1465
1466 rc = validate_buf(offset, buffer_length, hdr, minbufsize);
1467 if (rc)
1468 return rc;
1469
1470 memcpy(data, begin_of_buf, buffer_length);
1471
1472 return 0;
1473}
1474
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001475static int
1476query_info(const unsigned int xid, struct cifs_tcon *tcon,
1477 u64 persistent_fid, u64 volatile_fid, u8 info_class,
1478 size_t output_len, size_t min_len, void *data)
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04001479{
1480 struct smb2_query_info_req *req;
1481 struct smb2_query_info_rsp *rsp = NULL;
1482 struct kvec iov[2];
1483 int rc = 0;
1484 int resp_buftype;
1485 struct TCP_Server_Info *server;
1486 struct cifs_ses *ses = tcon->ses;
1487
Joe Perchesf96637b2013-05-04 22:12:25 -05001488 cifs_dbg(FYI, "Query Info\n");
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04001489
1490 if (ses && (ses->server))
1491 server = ses->server;
1492 else
1493 return -EIO;
1494
1495 rc = small_smb2_init(SMB2_QUERY_INFO, tcon, (void **) &req);
1496 if (rc)
1497 return rc;
1498
1499 req->InfoType = SMB2_O_INFO_FILE;
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001500 req->FileInfoClass = info_class;
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04001501 req->PersistentFileId = persistent_fid;
1502 req->VolatileFileId = volatile_fid;
1503 /* 4 for rfc1002 length field and 1 for Buffer */
1504 req->InputBufferOffset =
1505 cpu_to_le16(sizeof(struct smb2_query_info_req) - 1 - 4);
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001506 req->OutputBufferLength = cpu_to_le32(output_len);
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04001507
1508 iov[0].iov_base = (char *)req;
1509 /* 4 for rfc1002 length field */
1510 iov[0].iov_len = get_rfc1002_length(req) + 4;
1511
1512 rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, 0);
Pavel Shilovskye5d04882012-09-19 16:03:26 +04001513 rsp = (struct smb2_query_info_rsp *)iov[0].iov_base;
1514
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04001515 if (rc) {
1516 cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
1517 goto qinf_exit;
1518 }
1519
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04001520 rc = validate_and_copy_buf(le16_to_cpu(rsp->OutputBufferOffset),
1521 le32_to_cpu(rsp->OutputBufferLength),
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001522 &rsp->hdr, min_len, data);
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04001523
1524qinf_exit:
1525 free_rsp_buf(resp_buftype, rsp);
1526 return rc;
1527}
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04001528
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001529int
1530SMB2_query_info(const unsigned int xid, struct cifs_tcon *tcon,
1531 u64 persistent_fid, u64 volatile_fid,
1532 struct smb2_file_all_info *data)
1533{
1534 return query_info(xid, tcon, persistent_fid, volatile_fid,
1535 FILE_ALL_INFORMATION,
Pavel Shilovsky1bbe4992014-08-22 13:32:11 +04001536 sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001537 sizeof(struct smb2_file_all_info), data);
1538}
1539
1540int
1541SMB2_get_srv_num(const unsigned int xid, struct cifs_tcon *tcon,
1542 u64 persistent_fid, u64 volatile_fid, __le64 *uniqueid)
1543{
1544 return query_info(xid, tcon, persistent_fid, volatile_fid,
1545 FILE_INTERNAL_INFORMATION,
1546 sizeof(struct smb2_file_internal_info),
1547 sizeof(struct smb2_file_internal_info), uniqueid);
1548}
1549
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04001550/*
1551 * This is a no-op for now. We're not really interested in the reply, but
1552 * rather in the fact that the server sent one and that server->lstrp
1553 * gets updated.
1554 *
1555 * FIXME: maybe we should consider checking that the reply matches request?
1556 */
1557static void
1558smb2_echo_callback(struct mid_q_entry *mid)
1559{
1560 struct TCP_Server_Info *server = mid->callback_data;
1561 struct smb2_echo_rsp *smb2 = (struct smb2_echo_rsp *)mid->resp_buf;
1562 unsigned int credits_received = 1;
1563
1564 if (mid->mid_state == MID_RESPONSE_RECEIVED)
1565 credits_received = le16_to_cpu(smb2->hdr.CreditRequest);
1566
1567 DeleteMidQEntry(mid);
1568 add_credits(server, credits_received, CIFS_ECHO_OP);
1569}
1570
1571int
1572SMB2_echo(struct TCP_Server_Info *server)
1573{
1574 struct smb2_echo_req *req;
1575 int rc = 0;
1576 struct kvec iov;
Jeff Laytonfec344e2012-09-18 16:20:35 -07001577 struct smb_rqst rqst = { .rq_iov = &iov,
1578 .rq_nvec = 1 };
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04001579
Joe Perchesf96637b2013-05-04 22:12:25 -05001580 cifs_dbg(FYI, "In echo request\n");
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04001581
1582 rc = small_smb2_init(SMB2_ECHO, NULL, (void **)&req);
1583 if (rc)
1584 return rc;
1585
1586 req->hdr.CreditRequest = cpu_to_le16(1);
1587
1588 iov.iov_base = (char *)req;
1589 /* 4 for rfc1002 length field */
1590 iov.iov_len = get_rfc1002_length(req) + 4;
1591
Jeff Laytonfec344e2012-09-18 16:20:35 -07001592 rc = cifs_call_async(server, &rqst, NULL, smb2_echo_callback, server,
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04001593 CIFS_ECHO_OP);
1594 if (rc)
Joe Perchesf96637b2013-05-04 22:12:25 -05001595 cifs_dbg(FYI, "Echo request failed: %d\n", rc);
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04001596
1597 cifs_small_buf_release(req);
1598 return rc;
1599}
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07001600
1601int
1602SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
1603 u64 volatile_fid)
1604{
1605 struct smb2_flush_req *req;
1606 struct TCP_Server_Info *server;
1607 struct cifs_ses *ses = tcon->ses;
1608 struct kvec iov[1];
1609 int resp_buftype;
1610 int rc = 0;
1611
Joe Perchesf96637b2013-05-04 22:12:25 -05001612 cifs_dbg(FYI, "Flush\n");
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07001613
1614 if (ses && (ses->server))
1615 server = ses->server;
1616 else
1617 return -EIO;
1618
1619 rc = small_smb2_init(SMB2_FLUSH, tcon, (void **) &req);
1620 if (rc)
1621 return rc;
1622
1623 req->PersistentFileId = persistent_fid;
1624 req->VolatileFileId = volatile_fid;
1625
1626 iov[0].iov_base = (char *)req;
1627 /* 4 for rfc1002 length field */
1628 iov[0].iov_len = get_rfc1002_length(req) + 4;
1629
1630 rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, 0);
1631
1632 if ((rc != 0) && tcon)
1633 cifs_stats_fail_inc(tcon, SMB2_FLUSH_HE);
1634
1635 free_rsp_buf(resp_buftype, iov[0].iov_base);
1636 return rc;
1637}
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001638
1639/*
1640 * To form a chain of read requests, any read requests after the first should
1641 * have the end_of_chain boolean set to true.
1642 */
1643static int
1644smb2_new_read_req(struct kvec *iov, struct cifs_io_parms *io_parms,
1645 unsigned int remaining_bytes, int request_type)
1646{
1647 int rc = -EACCES;
1648 struct smb2_read_req *req = NULL;
1649
1650 rc = small_smb2_init(SMB2_READ, io_parms->tcon, (void **) &req);
1651 if (rc)
1652 return rc;
1653 if (io_parms->tcon->ses->server == NULL)
1654 return -ECONNABORTED;
1655
1656 req->hdr.ProcessId = cpu_to_le32(io_parms->pid);
1657
1658 req->PersistentFileId = io_parms->persistent_fid;
1659 req->VolatileFileId = io_parms->volatile_fid;
1660 req->ReadChannelInfoOffset = 0; /* reserved */
1661 req->ReadChannelInfoLength = 0; /* reserved */
1662 req->Channel = 0; /* reserved */
1663 req->MinimumCount = 0;
1664 req->Length = cpu_to_le32(io_parms->length);
1665 req->Offset = cpu_to_le64(io_parms->offset);
1666
1667 if (request_type & CHAINED_REQUEST) {
1668 if (!(request_type & END_OF_CHAIN)) {
1669 /* 4 for rfc1002 length field */
1670 req->hdr.NextCommand =
1671 cpu_to_le32(get_rfc1002_length(req) + 4);
1672 } else /* END_OF_CHAIN */
1673 req->hdr.NextCommand = 0;
1674 if (request_type & RELATED_REQUEST) {
1675 req->hdr.Flags |= SMB2_FLAGS_RELATED_OPERATIONS;
1676 /*
1677 * Related requests use info from previous read request
1678 * in chain.
1679 */
1680 req->hdr.SessionId = 0xFFFFFFFF;
1681 req->hdr.TreeId = 0xFFFFFFFF;
1682 req->PersistentFileId = 0xFFFFFFFF;
1683 req->VolatileFileId = 0xFFFFFFFF;
1684 }
1685 }
1686 if (remaining_bytes > io_parms->length)
1687 req->RemainingBytes = cpu_to_le32(remaining_bytes);
1688 else
1689 req->RemainingBytes = 0;
1690
1691 iov[0].iov_base = (char *)req;
1692 /* 4 for rfc1002 length field */
1693 iov[0].iov_len = get_rfc1002_length(req) + 4;
1694 return rc;
1695}
1696
1697static void
1698smb2_readv_callback(struct mid_q_entry *mid)
1699{
1700 struct cifs_readdata *rdata = mid->callback_data;
1701 struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink);
1702 struct TCP_Server_Info *server = tcon->ses->server;
Jeff Layton58195752012-09-19 06:22:34 -07001703 struct smb2_hdr *buf = (struct smb2_hdr *)rdata->iov.iov_base;
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001704 unsigned int credits_received = 1;
Jeff Layton58195752012-09-19 06:22:34 -07001705 struct smb_rqst rqst = { .rq_iov = &rdata->iov,
Jeff Layton8321fec2012-09-19 06:22:32 -07001706 .rq_nvec = 1,
1707 .rq_pages = rdata->pages,
1708 .rq_npages = rdata->nr_pages,
1709 .rq_pagesz = rdata->pagesz,
1710 .rq_tailsz = rdata->tailsz };
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001711
Joe Perchesf96637b2013-05-04 22:12:25 -05001712 cifs_dbg(FYI, "%s: mid=%llu state=%d result=%d bytes=%u\n",
1713 __func__, mid->mid, mid->mid_state, rdata->result,
1714 rdata->bytes);
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001715
1716 switch (mid->mid_state) {
1717 case MID_RESPONSE_RECEIVED:
1718 credits_received = le16_to_cpu(buf->CreditRequest);
1719 /* result already set, check signature */
Jeff Layton38d77c52013-05-26 07:01:00 -04001720 if (server->sign) {
Pavel Shilovsky3c1bf7e2012-09-18 16:20:30 -07001721 int rc;
1722
Jeff Layton0b688cf2012-09-18 16:20:34 -07001723 rc = smb2_verify_signature(&rqst, server);
Pavel Shilovsky3c1bf7e2012-09-18 16:20:30 -07001724 if (rc)
Joe Perchesf96637b2013-05-04 22:12:25 -05001725 cifs_dbg(VFS, "SMB signature verification returned error = %d\n",
1726 rc);
Pavel Shilovsky3c1bf7e2012-09-18 16:20:30 -07001727 }
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001728 /* FIXME: should this be counted toward the initiating task? */
Pavel Shilovsky34a54d62014-07-10 10:03:29 +04001729 task_io_account_read(rdata->got_bytes);
1730 cifs_stats_bytes_read(tcon, rdata->got_bytes);
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001731 break;
1732 case MID_REQUEST_SUBMITTED:
1733 case MID_RETRY_NEEDED:
1734 rdata->result = -EAGAIN;
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04001735 if (server->sign && rdata->got_bytes)
1736 /* reset bytes number since we can not check a sign */
1737 rdata->got_bytes = 0;
1738 /* FIXME: should this be counted toward the initiating task? */
1739 task_io_account_read(rdata->got_bytes);
1740 cifs_stats_bytes_read(tcon, rdata->got_bytes);
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001741 break;
1742 default:
1743 if (rdata->result != -ENODATA)
1744 rdata->result = -EIO;
1745 }
1746
1747 if (rdata->result)
1748 cifs_stats_fail_inc(tcon, SMB2_READ_HE);
1749
1750 queue_work(cifsiod_wq, &rdata->work);
1751 DeleteMidQEntry(mid);
1752 add_credits(server, credits_received, 0);
1753}
1754
1755/* smb2_async_readv - send an async write, and set up mid to handle result */
1756int
1757smb2_async_readv(struct cifs_readdata *rdata)
1758{
Pavel Shilovskybed9da02014-06-25 11:28:57 +04001759 int rc, flags = 0;
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001760 struct smb2_hdr *buf;
1761 struct cifs_io_parms io_parms;
Jeff Layton58195752012-09-19 06:22:34 -07001762 struct smb_rqst rqst = { .rq_iov = &rdata->iov,
Jeff Laytonfec344e2012-09-18 16:20:35 -07001763 .rq_nvec = 1 };
Pavel Shilovskybed9da02014-06-25 11:28:57 +04001764 struct TCP_Server_Info *server;
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001765
Joe Perchesf96637b2013-05-04 22:12:25 -05001766 cifs_dbg(FYI, "%s: offset=%llu bytes=%u\n",
1767 __func__, rdata->offset, rdata->bytes);
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001768
1769 io_parms.tcon = tlink_tcon(rdata->cfile->tlink);
1770 io_parms.offset = rdata->offset;
1771 io_parms.length = rdata->bytes;
1772 io_parms.persistent_fid = rdata->cfile->fid.persistent_fid;
1773 io_parms.volatile_fid = rdata->cfile->fid.volatile_fid;
1774 io_parms.pid = rdata->pid;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04001775
1776 server = io_parms.tcon->ses->server;
1777
Jeff Layton58195752012-09-19 06:22:34 -07001778 rc = smb2_new_read_req(&rdata->iov, &io_parms, 0, 0);
Pavel Shilovskybed9da02014-06-25 11:28:57 +04001779 if (rc) {
1780 if (rc == -EAGAIN && rdata->credits) {
1781 /* credits was reset by reconnect */
1782 rdata->credits = 0;
1783 /* reduce in_flight value since we won't send the req */
1784 spin_lock(&server->req_lock);
1785 server->in_flight--;
1786 spin_unlock(&server->req_lock);
1787 }
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001788 return rc;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04001789 }
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001790
Jeff Layton58195752012-09-19 06:22:34 -07001791 buf = (struct smb2_hdr *)rdata->iov.iov_base;
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001792 /* 4 for rfc1002 length field */
Jeff Layton58195752012-09-19 06:22:34 -07001793 rdata->iov.iov_len = get_rfc1002_length(rdata->iov.iov_base) + 4;
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001794
Pavel Shilovskybed9da02014-06-25 11:28:57 +04001795 if (rdata->credits) {
1796 buf->CreditCharge = cpu_to_le16(DIV_ROUND_UP(rdata->bytes,
1797 SMB2_MAX_BUFFER_SIZE));
1798 spin_lock(&server->req_lock);
1799 server->credits += rdata->credits -
1800 le16_to_cpu(buf->CreditCharge);
1801 spin_unlock(&server->req_lock);
1802 wake_up(&server->request_q);
1803 flags = CIFS_HAS_CREDITS;
1804 }
1805
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001806 kref_get(&rdata->refcount);
Jeff Laytonfec344e2012-09-18 16:20:35 -07001807 rc = cifs_call_async(io_parms.tcon->ses->server, &rqst,
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001808 cifs_readv_receive, smb2_readv_callback,
Pavel Shilovskybed9da02014-06-25 11:28:57 +04001809 rdata, flags);
Pavel Shilovskye5d04882012-09-19 16:03:26 +04001810 if (rc) {
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001811 kref_put(&rdata->refcount, cifs_readdata_release);
Pavel Shilovskye5d04882012-09-19 16:03:26 +04001812 cifs_stats_fail_inc(io_parms.tcon, SMB2_READ_HE);
1813 }
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001814
1815 cifs_small_buf_release(buf);
1816 return rc;
1817}
Pavel Shilovsky33319142012-09-18 16:20:29 -07001818
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07001819int
1820SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
1821 unsigned int *nbytes, char **buf, int *buf_type)
1822{
1823 int resp_buftype, rc = -EACCES;
1824 struct smb2_read_rsp *rsp = NULL;
1825 struct kvec iov[1];
1826
1827 *nbytes = 0;
1828 rc = smb2_new_read_req(iov, io_parms, 0, 0);
1829 if (rc)
1830 return rc;
1831
1832 rc = SendReceive2(xid, io_parms->tcon->ses, iov, 1,
1833 &resp_buftype, CIFS_LOG_ERROR);
1834
1835 rsp = (struct smb2_read_rsp *)iov[0].iov_base;
1836
1837 if (rsp->hdr.Status == STATUS_END_OF_FILE) {
1838 free_rsp_buf(resp_buftype, iov[0].iov_base);
1839 return 0;
1840 }
1841
1842 if (rc) {
1843 cifs_stats_fail_inc(io_parms->tcon, SMB2_READ_HE);
Joe Perchesf96637b2013-05-04 22:12:25 -05001844 cifs_dbg(VFS, "Send error in read = %d\n", rc);
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07001845 } else {
1846 *nbytes = le32_to_cpu(rsp->DataLength);
1847 if ((*nbytes > CIFS_MAX_MSGSIZE) ||
1848 (*nbytes > io_parms->length)) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001849 cifs_dbg(FYI, "bad length %d for count %d\n",
1850 *nbytes, io_parms->length);
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07001851 rc = -EIO;
1852 *nbytes = 0;
1853 }
1854 }
1855
1856 if (*buf) {
1857 memcpy(*buf, (char *)rsp->hdr.ProtocolId + rsp->DataOffset,
1858 *nbytes);
1859 free_rsp_buf(resp_buftype, iov[0].iov_base);
1860 } else if (resp_buftype != CIFS_NO_BUFFER) {
1861 *buf = iov[0].iov_base;
1862 if (resp_buftype == CIFS_SMALL_BUFFER)
1863 *buf_type = CIFS_SMALL_BUFFER;
1864 else if (resp_buftype == CIFS_LARGE_BUFFER)
1865 *buf_type = CIFS_LARGE_BUFFER;
1866 }
1867 return rc;
1868}
1869
Pavel Shilovsky33319142012-09-18 16:20:29 -07001870/*
1871 * Check the mid_state and signature on received buffer (if any), and queue the
1872 * workqueue completion task.
1873 */
1874static void
1875smb2_writev_callback(struct mid_q_entry *mid)
1876{
1877 struct cifs_writedata *wdata = mid->callback_data;
1878 struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
1879 unsigned int written;
1880 struct smb2_write_rsp *rsp = (struct smb2_write_rsp *)mid->resp_buf;
1881 unsigned int credits_received = 1;
1882
1883 switch (mid->mid_state) {
1884 case MID_RESPONSE_RECEIVED:
1885 credits_received = le16_to_cpu(rsp->hdr.CreditRequest);
1886 wdata->result = smb2_check_receive(mid, tcon->ses->server, 0);
1887 if (wdata->result != 0)
1888 break;
1889
1890 written = le32_to_cpu(rsp->DataLength);
1891 /*
1892 * Mask off high 16 bits when bytes written as returned
1893 * by the server is greater than bytes requested by the
1894 * client. OS/2 servers are known to set incorrect
1895 * CountHigh values.
1896 */
1897 if (written > wdata->bytes)
1898 written &= 0xFFFF;
1899
1900 if (written < wdata->bytes)
1901 wdata->result = -ENOSPC;
1902 else
1903 wdata->bytes = written;
1904 break;
1905 case MID_REQUEST_SUBMITTED:
1906 case MID_RETRY_NEEDED:
1907 wdata->result = -EAGAIN;
1908 break;
1909 default:
1910 wdata->result = -EIO;
1911 break;
1912 }
1913
1914 if (wdata->result)
1915 cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
1916
1917 queue_work(cifsiod_wq, &wdata->work);
1918 DeleteMidQEntry(mid);
1919 add_credits(tcon->ses->server, credits_received, 0);
1920}
1921
1922/* smb2_async_writev - send an async write, and set up mid to handle result */
1923int
Steve French4a5c80d2014-02-07 20:45:12 -06001924smb2_async_writev(struct cifs_writedata *wdata,
1925 void (*release)(struct kref *kref))
Pavel Shilovsky33319142012-09-18 16:20:29 -07001926{
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04001927 int rc = -EACCES, flags = 0;
Pavel Shilovsky33319142012-09-18 16:20:29 -07001928 struct smb2_write_req *req = NULL;
1929 struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04001930 struct TCP_Server_Info *server = tcon->ses->server;
Jeff Laytoneddb0792012-09-18 16:20:35 -07001931 struct kvec iov;
Jeff Laytonfec344e2012-09-18 16:20:35 -07001932 struct smb_rqst rqst;
Pavel Shilovsky33319142012-09-18 16:20:29 -07001933
1934 rc = small_smb2_init(SMB2_WRITE, tcon, (void **) &req);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04001935 if (rc) {
1936 if (rc == -EAGAIN && wdata->credits) {
1937 /* credits was reset by reconnect */
1938 wdata->credits = 0;
1939 /* reduce in_flight value since we won't send the req */
1940 spin_lock(&server->req_lock);
1941 server->in_flight--;
1942 spin_unlock(&server->req_lock);
1943 }
Pavel Shilovsky33319142012-09-18 16:20:29 -07001944 goto async_writev_out;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04001945 }
Pavel Shilovsky33319142012-09-18 16:20:29 -07001946
Pavel Shilovsky33319142012-09-18 16:20:29 -07001947 req->hdr.ProcessId = cpu_to_le32(wdata->cfile->pid);
1948
1949 req->PersistentFileId = wdata->cfile->fid.persistent_fid;
1950 req->VolatileFileId = wdata->cfile->fid.volatile_fid;
1951 req->WriteChannelInfoOffset = 0;
1952 req->WriteChannelInfoLength = 0;
1953 req->Channel = 0;
1954 req->Offset = cpu_to_le64(wdata->offset);
1955 /* 4 for rfc1002 length field */
1956 req->DataOffset = cpu_to_le16(
1957 offsetof(struct smb2_write_req, Buffer) - 4);
1958 req->RemainingBytes = 0;
1959
1960 /* 4 for rfc1002 length field and 1 for Buffer */
Jeff Laytoneddb0792012-09-18 16:20:35 -07001961 iov.iov_len = get_rfc1002_length(req) + 4 - 1;
1962 iov.iov_base = req;
Pavel Shilovsky33319142012-09-18 16:20:29 -07001963
Jeff Laytoneddb0792012-09-18 16:20:35 -07001964 rqst.rq_iov = &iov;
1965 rqst.rq_nvec = 1;
1966 rqst.rq_pages = wdata->pages;
1967 rqst.rq_npages = wdata->nr_pages;
1968 rqst.rq_pagesz = wdata->pagesz;
1969 rqst.rq_tailsz = wdata->tailsz;
Pavel Shilovsky33319142012-09-18 16:20:29 -07001970
Joe Perchesf96637b2013-05-04 22:12:25 -05001971 cifs_dbg(FYI, "async write at %llu %u bytes\n",
1972 wdata->offset, wdata->bytes);
Pavel Shilovsky33319142012-09-18 16:20:29 -07001973
1974 req->Length = cpu_to_le32(wdata->bytes);
1975
1976 inc_rfc1001_len(&req->hdr, wdata->bytes - 1 /* Buffer */);
1977
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04001978 if (wdata->credits) {
1979 req->hdr.CreditCharge = cpu_to_le16(DIV_ROUND_UP(wdata->bytes,
1980 SMB2_MAX_BUFFER_SIZE));
1981 spin_lock(&server->req_lock);
1982 server->credits += wdata->credits -
1983 le16_to_cpu(req->hdr.CreditCharge);
1984 spin_unlock(&server->req_lock);
1985 wake_up(&server->request_q);
1986 flags = CIFS_HAS_CREDITS;
1987 }
1988
Pavel Shilovsky33319142012-09-18 16:20:29 -07001989 kref_get(&wdata->refcount);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04001990 rc = cifs_call_async(server, &rqst, NULL, smb2_writev_callback, wdata,
1991 flags);
Pavel Shilovsky33319142012-09-18 16:20:29 -07001992
Pavel Shilovskye5d04882012-09-19 16:03:26 +04001993 if (rc) {
Steve French4a5c80d2014-02-07 20:45:12 -06001994 kref_put(&wdata->refcount, release);
Pavel Shilovskye5d04882012-09-19 16:03:26 +04001995 cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
1996 }
Pavel Shilovsky33319142012-09-18 16:20:29 -07001997
Pavel Shilovsky33319142012-09-18 16:20:29 -07001998async_writev_out:
1999 cifs_small_buf_release(req);
Pavel Shilovsky33319142012-09-18 16:20:29 -07002000 return rc;
2001}
Pavel Shilovsky009d3442012-09-18 16:20:30 -07002002
2003/*
2004 * SMB2_write function gets iov pointer to kvec array with n_vec as a length.
2005 * The length field from io_parms must be at least 1 and indicates a number of
2006 * elements with data to write that begins with position 1 in iov array. All
2007 * data length is specified by count.
2008 */
2009int
2010SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
2011 unsigned int *nbytes, struct kvec *iov, int n_vec)
2012{
2013 int rc = 0;
2014 struct smb2_write_req *req = NULL;
2015 struct smb2_write_rsp *rsp = NULL;
2016 int resp_buftype;
2017 *nbytes = 0;
2018
2019 if (n_vec < 1)
2020 return rc;
2021
2022 rc = small_smb2_init(SMB2_WRITE, io_parms->tcon, (void **) &req);
2023 if (rc)
2024 return rc;
2025
2026 if (io_parms->tcon->ses->server == NULL)
2027 return -ECONNABORTED;
2028
2029 req->hdr.ProcessId = cpu_to_le32(io_parms->pid);
2030
2031 req->PersistentFileId = io_parms->persistent_fid;
2032 req->VolatileFileId = io_parms->volatile_fid;
2033 req->WriteChannelInfoOffset = 0;
2034 req->WriteChannelInfoLength = 0;
2035 req->Channel = 0;
2036 req->Length = cpu_to_le32(io_parms->length);
2037 req->Offset = cpu_to_le64(io_parms->offset);
2038 /* 4 for rfc1002 length field */
2039 req->DataOffset = cpu_to_le16(
2040 offsetof(struct smb2_write_req, Buffer) - 4);
2041 req->RemainingBytes = 0;
2042
2043 iov[0].iov_base = (char *)req;
2044 /* 4 for rfc1002 length field and 1 for Buffer */
2045 iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
2046
2047 /* length of entire message including data to be written */
2048 inc_rfc1001_len(req, io_parms->length - 1 /* Buffer */);
2049
2050 rc = SendReceive2(xid, io_parms->tcon->ses, iov, n_vec + 1,
2051 &resp_buftype, 0);
Pavel Shilovskye5d04882012-09-19 16:03:26 +04002052 rsp = (struct smb2_write_rsp *)iov[0].iov_base;
Pavel Shilovsky009d3442012-09-18 16:20:30 -07002053
2054 if (rc) {
2055 cifs_stats_fail_inc(io_parms->tcon, SMB2_WRITE_HE);
Joe Perchesf96637b2013-05-04 22:12:25 -05002056 cifs_dbg(VFS, "Send error in write = %d\n", rc);
Pavel Shilovskye5d04882012-09-19 16:03:26 +04002057 } else
Pavel Shilovsky009d3442012-09-18 16:20:30 -07002058 *nbytes = le32_to_cpu(rsp->DataLength);
Pavel Shilovskye5d04882012-09-19 16:03:26 +04002059
2060 free_rsp_buf(resp_buftype, rsp);
Pavel Shilovsky009d3442012-09-18 16:20:30 -07002061 return rc;
2062}
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07002063
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002064static unsigned int
2065num_entries(char *bufstart, char *end_of_buf, char **lastentry, size_t size)
2066{
2067 int len;
2068 unsigned int entrycount = 0;
2069 unsigned int next_offset = 0;
2070 FILE_DIRECTORY_INFO *entryptr;
2071
2072 if (bufstart == NULL)
2073 return 0;
2074
2075 entryptr = (FILE_DIRECTORY_INFO *)bufstart;
2076
2077 while (1) {
2078 entryptr = (FILE_DIRECTORY_INFO *)
2079 ((char *)entryptr + next_offset);
2080
2081 if ((char *)entryptr + size > end_of_buf) {
Joe Perchesf96637b2013-05-04 22:12:25 -05002082 cifs_dbg(VFS, "malformed search entry would overflow\n");
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002083 break;
2084 }
2085
2086 len = le32_to_cpu(entryptr->FileNameLength);
2087 if ((char *)entryptr + len + size > end_of_buf) {
Joe Perchesf96637b2013-05-04 22:12:25 -05002088 cifs_dbg(VFS, "directory entry name would overflow frame end of buf %p\n",
2089 end_of_buf);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002090 break;
2091 }
2092
2093 *lastentry = (char *)entryptr;
2094 entrycount++;
2095
2096 next_offset = le32_to_cpu(entryptr->NextEntryOffset);
2097 if (!next_offset)
2098 break;
2099 }
2100
2101 return entrycount;
2102}
2103
2104/*
2105 * Readdir/FindFirst
2106 */
2107int
2108SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
2109 u64 persistent_fid, u64 volatile_fid, int index,
2110 struct cifs_search_info *srch_inf)
2111{
2112 struct smb2_query_directory_req *req;
2113 struct smb2_query_directory_rsp *rsp = NULL;
2114 struct kvec iov[2];
2115 int rc = 0;
2116 int len;
2117 int resp_buftype;
2118 unsigned char *bufptr;
2119 struct TCP_Server_Info *server;
2120 struct cifs_ses *ses = tcon->ses;
2121 __le16 asteriks = cpu_to_le16('*');
2122 char *end_of_smb;
2123 unsigned int output_size = CIFSMaxBufSize;
2124 size_t info_buf_size;
2125
2126 if (ses && (ses->server))
2127 server = ses->server;
2128 else
2129 return -EIO;
2130
2131 rc = small_smb2_init(SMB2_QUERY_DIRECTORY, tcon, (void **) &req);
2132 if (rc)
2133 return rc;
2134
2135 switch (srch_inf->info_level) {
2136 case SMB_FIND_FILE_DIRECTORY_INFO:
2137 req->FileInformationClass = FILE_DIRECTORY_INFORMATION;
2138 info_buf_size = sizeof(FILE_DIRECTORY_INFO) - 1;
2139 break;
2140 case SMB_FIND_FILE_ID_FULL_DIR_INFO:
2141 req->FileInformationClass = FILEID_FULL_DIRECTORY_INFORMATION;
2142 info_buf_size = sizeof(SEARCH_ID_FULL_DIR_INFO) - 1;
2143 break;
2144 default:
Joe Perchesf96637b2013-05-04 22:12:25 -05002145 cifs_dbg(VFS, "info level %u isn't supported\n",
2146 srch_inf->info_level);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002147 rc = -EINVAL;
2148 goto qdir_exit;
2149 }
2150
2151 req->FileIndex = cpu_to_le32(index);
2152 req->PersistentFileId = persistent_fid;
2153 req->VolatileFileId = volatile_fid;
2154
2155 len = 0x2;
2156 bufptr = req->Buffer;
2157 memcpy(bufptr, &asteriks, len);
2158
2159 req->FileNameOffset =
2160 cpu_to_le16(sizeof(struct smb2_query_directory_req) - 1 - 4);
2161 req->FileNameLength = cpu_to_le16(len);
2162 /*
2163 * BB could be 30 bytes or so longer if we used SMB2 specific
2164 * buffer lengths, but this is safe and close enough.
2165 */
2166 output_size = min_t(unsigned int, output_size, server->maxBuf);
2167 output_size = min_t(unsigned int, output_size, 2 << 15);
2168 req->OutputBufferLength = cpu_to_le32(output_size);
2169
2170 iov[0].iov_base = (char *)req;
2171 /* 4 for RFC1001 length and 1 for Buffer */
2172 iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
2173
2174 iov[1].iov_base = (char *)(req->Buffer);
2175 iov[1].iov_len = len;
2176
2177 inc_rfc1001_len(req, len - 1 /* Buffer */);
2178
2179 rc = SendReceive2(xid, ses, iov, 2, &resp_buftype, 0);
Pavel Shilovskye5d04882012-09-19 16:03:26 +04002180 rsp = (struct smb2_query_directory_rsp *)iov[0].iov_base;
2181
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002182 if (rc) {
Pavel Shilovsky52755802014-08-18 20:49:57 +04002183 if (rc == -ENODATA && rsp->hdr.Status == STATUS_NO_MORE_FILES) {
2184 srch_inf->endOfSearch = true;
2185 rc = 0;
2186 }
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002187 cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE);
2188 goto qdir_exit;
2189 }
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002190
2191 rc = validate_buf(le16_to_cpu(rsp->OutputBufferOffset),
2192 le32_to_cpu(rsp->OutputBufferLength), &rsp->hdr,
2193 info_buf_size);
2194 if (rc)
2195 goto qdir_exit;
2196
2197 srch_inf->unicode = true;
2198
2199 if (srch_inf->ntwrk_buf_start) {
2200 if (srch_inf->smallBuf)
2201 cifs_small_buf_release(srch_inf->ntwrk_buf_start);
2202 else
2203 cifs_buf_release(srch_inf->ntwrk_buf_start);
2204 }
2205 srch_inf->ntwrk_buf_start = (char *)rsp;
2206 srch_inf->srch_entries_start = srch_inf->last_entry = 4 /* rfclen */ +
2207 (char *)&rsp->hdr + le16_to_cpu(rsp->OutputBufferOffset);
2208 /* 4 for rfc1002 length field */
2209 end_of_smb = get_rfc1002_length(rsp) + 4 + (char *)&rsp->hdr;
2210 srch_inf->entries_in_buffer =
2211 num_entries(srch_inf->srch_entries_start, end_of_smb,
2212 &srch_inf->last_entry, info_buf_size);
2213 srch_inf->index_of_last_entry += srch_inf->entries_in_buffer;
Joe Perchesf96637b2013-05-04 22:12:25 -05002214 cifs_dbg(FYI, "num entries %d last_index %lld srch start %p srch end %p\n",
2215 srch_inf->entries_in_buffer, srch_inf->index_of_last_entry,
2216 srch_inf->srch_entries_start, srch_inf->last_entry);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002217 if (resp_buftype == CIFS_LARGE_BUFFER)
2218 srch_inf->smallBuf = false;
2219 else if (resp_buftype == CIFS_SMALL_BUFFER)
2220 srch_inf->smallBuf = true;
2221 else
Joe Perchesf96637b2013-05-04 22:12:25 -05002222 cifs_dbg(VFS, "illegal search buffer type\n");
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002223
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002224 return rc;
2225
2226qdir_exit:
2227 free_rsp_buf(resp_buftype, rsp);
2228 return rc;
2229}
2230
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07002231static int
2232send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07002233 u64 persistent_fid, u64 volatile_fid, u32 pid, int info_class,
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07002234 unsigned int num, void **data, unsigned int *size)
2235{
2236 struct smb2_set_info_req *req;
2237 struct smb2_set_info_rsp *rsp = NULL;
2238 struct kvec *iov;
2239 int rc = 0;
2240 int resp_buftype;
2241 unsigned int i;
2242 struct TCP_Server_Info *server;
2243 struct cifs_ses *ses = tcon->ses;
2244
2245 if (ses && (ses->server))
2246 server = ses->server;
2247 else
2248 return -EIO;
2249
2250 if (!num)
2251 return -EINVAL;
2252
2253 iov = kmalloc(sizeof(struct kvec) * num, GFP_KERNEL);
2254 if (!iov)
2255 return -ENOMEM;
2256
2257 rc = small_smb2_init(SMB2_SET_INFO, tcon, (void **) &req);
2258 if (rc) {
2259 kfree(iov);
2260 return rc;
2261 }
2262
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07002263 req->hdr.ProcessId = cpu_to_le32(pid);
2264
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07002265 req->InfoType = SMB2_O_INFO_FILE;
2266 req->FileInfoClass = info_class;
2267 req->PersistentFileId = persistent_fid;
2268 req->VolatileFileId = volatile_fid;
2269
2270 /* 4 for RFC1001 length and 1 for Buffer */
2271 req->BufferOffset =
2272 cpu_to_le16(sizeof(struct smb2_set_info_req) - 1 - 4);
2273 req->BufferLength = cpu_to_le32(*size);
2274
2275 inc_rfc1001_len(req, *size - 1 /* Buffer */);
2276
2277 memcpy(req->Buffer, *data, *size);
2278
2279 iov[0].iov_base = (char *)req;
2280 /* 4 for RFC1001 length */
2281 iov[0].iov_len = get_rfc1002_length(req) + 4;
2282
2283 for (i = 1; i < num; i++) {
2284 inc_rfc1001_len(req, size[i]);
2285 le32_add_cpu(&req->BufferLength, size[i]);
2286 iov[i].iov_base = (char *)data[i];
2287 iov[i].iov_len = size[i];
2288 }
2289
2290 rc = SendReceive2(xid, ses, iov, num, &resp_buftype, 0);
2291 rsp = (struct smb2_set_info_rsp *)iov[0].iov_base;
2292
Steve French7d3fb242013-11-18 09:56:28 -06002293 if (rc != 0)
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07002294 cifs_stats_fail_inc(tcon, SMB2_SET_INFO_HE);
Steve French7d3fb242013-11-18 09:56:28 -06002295
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07002296 free_rsp_buf(resp_buftype, rsp);
2297 kfree(iov);
2298 return rc;
2299}
2300
2301int
2302SMB2_rename(const unsigned int xid, struct cifs_tcon *tcon,
2303 u64 persistent_fid, u64 volatile_fid, __le16 *target_file)
2304{
2305 struct smb2_file_rename_info info;
2306 void **data;
2307 unsigned int size[2];
2308 int rc;
2309 int len = (2 * UniStrnlen((wchar_t *)target_file, PATH_MAX));
2310
2311 data = kmalloc(sizeof(void *) * 2, GFP_KERNEL);
2312 if (!data)
2313 return -ENOMEM;
2314
2315 info.ReplaceIfExists = 1; /* 1 = replace existing target with new */
2316 /* 0 = fail if target already exists */
2317 info.RootDirectory = 0; /* MBZ for network ops (why does spec say?) */
2318 info.FileNameLength = cpu_to_le32(len);
2319
2320 data[0] = &info;
2321 size[0] = sizeof(struct smb2_file_rename_info);
2322
2323 data[1] = target_file;
2324 size[1] = len + 2 /* null */;
2325
2326 rc = send_set_info(xid, tcon, persistent_fid, volatile_fid,
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07002327 current->tgid, FILE_RENAME_INFORMATION, 2, data,
2328 size);
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07002329 kfree(data);
2330 return rc;
2331}
Pavel Shilovsky568798c2012-09-18 16:20:31 -07002332
2333int
2334SMB2_set_hardlink(const unsigned int xid, struct cifs_tcon *tcon,
2335 u64 persistent_fid, u64 volatile_fid, __le16 *target_file)
2336{
2337 struct smb2_file_link_info info;
2338 void **data;
2339 unsigned int size[2];
2340 int rc;
2341 int len = (2 * UniStrnlen((wchar_t *)target_file, PATH_MAX));
2342
2343 data = kmalloc(sizeof(void *) * 2, GFP_KERNEL);
2344 if (!data)
2345 return -ENOMEM;
2346
2347 info.ReplaceIfExists = 0; /* 1 = replace existing link with new */
2348 /* 0 = fail if link already exists */
2349 info.RootDirectory = 0; /* MBZ for network ops (why does spec say?) */
2350 info.FileNameLength = cpu_to_le32(len);
2351
2352 data[0] = &info;
2353 size[0] = sizeof(struct smb2_file_link_info);
2354
2355 data[1] = target_file;
2356 size[1] = len + 2 /* null */;
2357
2358 rc = send_set_info(xid, tcon, persistent_fid, volatile_fid,
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07002359 current->tgid, FILE_LINK_INFORMATION, 2, data, size);
Pavel Shilovsky568798c2012-09-18 16:20:31 -07002360 kfree(data);
2361 return rc;
2362}
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07002363
2364int
2365SMB2_set_eof(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
Steve Frenchf29ebb42014-07-19 21:44:58 -05002366 u64 volatile_fid, u32 pid, __le64 *eof, bool is_falloc)
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07002367{
2368 struct smb2_file_eof_info info;
2369 void *data;
2370 unsigned int size;
2371
2372 info.EndOfFile = *eof;
2373
2374 data = &info;
2375 size = sizeof(struct smb2_file_eof_info);
2376
Steve Frenchf29ebb42014-07-19 21:44:58 -05002377 if (is_falloc)
2378 return send_set_info(xid, tcon, persistent_fid, volatile_fid,
2379 pid, FILE_ALLOCATION_INFORMATION, 1, &data, &size);
2380 else
2381 return send_set_info(xid, tcon, persistent_fid, volatile_fid,
2382 pid, FILE_END_OF_FILE_INFORMATION, 1, &data, &size);
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07002383}
Pavel Shilovsky1feeaac2012-09-18 16:20:32 -07002384
2385int
2386SMB2_set_info(const unsigned int xid, struct cifs_tcon *tcon,
2387 u64 persistent_fid, u64 volatile_fid, FILE_BASIC_INFO *buf)
2388{
2389 unsigned int size;
2390 size = sizeof(FILE_BASIC_INFO);
2391 return send_set_info(xid, tcon, persistent_fid, volatile_fid,
2392 current->tgid, FILE_BASIC_INFORMATION, 1,
2393 (void **)&buf, &size);
2394}
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07002395
2396int
2397SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
2398 const u64 persistent_fid, const u64 volatile_fid,
2399 __u8 oplock_level)
2400{
2401 int rc;
2402 struct smb2_oplock_break *req = NULL;
2403
Joe Perchesf96637b2013-05-04 22:12:25 -05002404 cifs_dbg(FYI, "SMB2_oplock_break\n");
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07002405 rc = small_smb2_init(SMB2_OPLOCK_BREAK, tcon, (void **) &req);
2406
2407 if (rc)
2408 return rc;
2409
2410 req->VolatileFid = volatile_fid;
2411 req->PersistentFid = persistent_fid;
2412 req->OplockLevel = oplock_level;
2413 req->hdr.CreditRequest = cpu_to_le16(1);
2414
2415 rc = SendReceiveNoRsp(xid, tcon->ses, (char *) req, CIFS_OBREAK_OP);
2416 /* SMB2 buffer freed by function above */
2417
2418 if (rc) {
2419 cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE);
Joe Perchesf96637b2013-05-04 22:12:25 -05002420 cifs_dbg(FYI, "Send error in Oplock Break = %d\n", rc);
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07002421 }
2422
2423 return rc;
2424}
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002425
2426static void
2427copy_fs_info_to_kstatfs(struct smb2_fs_full_size_info *pfs_inf,
2428 struct kstatfs *kst)
2429{
2430 kst->f_bsize = le32_to_cpu(pfs_inf->BytesPerSector) *
2431 le32_to_cpu(pfs_inf->SectorsPerAllocationUnit);
2432 kst->f_blocks = le64_to_cpu(pfs_inf->TotalAllocationUnits);
2433 kst->f_bfree = le64_to_cpu(pfs_inf->ActualAvailableAllocationUnits);
2434 kst->f_bavail = le64_to_cpu(pfs_inf->CallerAvailableAllocationUnits);
2435 return;
2436}
2437
2438static int
2439build_qfs_info_req(struct kvec *iov, struct cifs_tcon *tcon, int level,
2440 int outbuf_len, u64 persistent_fid, u64 volatile_fid)
2441{
2442 int rc;
2443 struct smb2_query_info_req *req;
2444
Joe Perchesf96637b2013-05-04 22:12:25 -05002445 cifs_dbg(FYI, "Query FSInfo level %d\n", level);
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002446
2447 if ((tcon->ses == NULL) || (tcon->ses->server == NULL))
2448 return -EIO;
2449
2450 rc = small_smb2_init(SMB2_QUERY_INFO, tcon, (void **) &req);
2451 if (rc)
2452 return rc;
2453
2454 req->InfoType = SMB2_O_INFO_FILESYSTEM;
2455 req->FileInfoClass = level;
2456 req->PersistentFileId = persistent_fid;
2457 req->VolatileFileId = volatile_fid;
2458 /* 4 for rfc1002 length field and 1 for pad */
2459 req->InputBufferOffset =
2460 cpu_to_le16(sizeof(struct smb2_query_info_req) - 1 - 4);
2461 req->OutputBufferLength = cpu_to_le32(
2462 outbuf_len + sizeof(struct smb2_query_info_rsp) - 1 - 4);
2463
2464 iov->iov_base = (char *)req;
2465 /* 4 for rfc1002 length field */
2466 iov->iov_len = get_rfc1002_length(req) + 4;
2467 return 0;
2468}
2469
2470int
2471SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
2472 u64 persistent_fid, u64 volatile_fid, struct kstatfs *fsdata)
2473{
2474 struct smb2_query_info_rsp *rsp = NULL;
2475 struct kvec iov;
2476 int rc = 0;
2477 int resp_buftype;
2478 struct cifs_ses *ses = tcon->ses;
2479 struct smb2_fs_full_size_info *info = NULL;
2480
2481 rc = build_qfs_info_req(&iov, tcon, FS_FULL_SIZE_INFORMATION,
2482 sizeof(struct smb2_fs_full_size_info),
2483 persistent_fid, volatile_fid);
2484 if (rc)
2485 return rc;
2486
2487 rc = SendReceive2(xid, ses, &iov, 1, &resp_buftype, 0);
2488 if (rc) {
2489 cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
Steve French34f62642013-10-09 02:07:00 -05002490 goto qfsinf_exit;
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002491 }
2492 rsp = (struct smb2_query_info_rsp *)iov.iov_base;
2493
2494 info = (struct smb2_fs_full_size_info *)(4 /* RFC1001 len */ +
2495 le16_to_cpu(rsp->OutputBufferOffset) + (char *)&rsp->hdr);
2496 rc = validate_buf(le16_to_cpu(rsp->OutputBufferOffset),
2497 le32_to_cpu(rsp->OutputBufferLength), &rsp->hdr,
2498 sizeof(struct smb2_fs_full_size_info));
2499 if (!rc)
2500 copy_fs_info_to_kstatfs(info, fsdata);
2501
Steve French34f62642013-10-09 02:07:00 -05002502qfsinf_exit:
2503 free_rsp_buf(resp_buftype, iov.iov_base);
2504 return rc;
2505}
2506
2507int
2508SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
Steven French21671142013-10-09 13:36:35 -05002509 u64 persistent_fid, u64 volatile_fid, int level)
Steve French34f62642013-10-09 02:07:00 -05002510{
2511 struct smb2_query_info_rsp *rsp = NULL;
2512 struct kvec iov;
2513 int rc = 0;
Steven French21671142013-10-09 13:36:35 -05002514 int resp_buftype, max_len, min_len;
Steve French34f62642013-10-09 02:07:00 -05002515 struct cifs_ses *ses = tcon->ses;
2516 unsigned int rsp_len, offset;
2517
Steven French21671142013-10-09 13:36:35 -05002518 if (level == FS_DEVICE_INFORMATION) {
2519 max_len = sizeof(FILE_SYSTEM_DEVICE_INFO);
2520 min_len = sizeof(FILE_SYSTEM_DEVICE_INFO);
2521 } else if (level == FS_ATTRIBUTE_INFORMATION) {
2522 max_len = sizeof(FILE_SYSTEM_ATTRIBUTE_INFO);
2523 min_len = MIN_FS_ATTR_INFO_SIZE;
Steven Frenchaf6a12e2013-10-09 20:55:53 -05002524 } else if (level == FS_SECTOR_SIZE_INFORMATION) {
2525 max_len = sizeof(struct smb3_fs_ss_info);
2526 min_len = sizeof(struct smb3_fs_ss_info);
Steven French21671142013-10-09 13:36:35 -05002527 } else {
Steven Frenchaf6a12e2013-10-09 20:55:53 -05002528 cifs_dbg(FYI, "Invalid qfsinfo level %d\n", level);
Steven French21671142013-10-09 13:36:35 -05002529 return -EINVAL;
2530 }
2531
2532 rc = build_qfs_info_req(&iov, tcon, level, max_len,
Steve French34f62642013-10-09 02:07:00 -05002533 persistent_fid, volatile_fid);
2534 if (rc)
2535 return rc;
2536
2537 rc = SendReceive2(xid, ses, &iov, 1, &resp_buftype, 0);
2538 if (rc) {
2539 cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
2540 goto qfsattr_exit;
2541 }
2542 rsp = (struct smb2_query_info_rsp *)iov.iov_base;
2543
2544 rsp_len = le32_to_cpu(rsp->OutputBufferLength);
2545 offset = le16_to_cpu(rsp->OutputBufferOffset);
Steven French21671142013-10-09 13:36:35 -05002546 rc = validate_buf(offset, rsp_len, &rsp->hdr, min_len);
2547 if (rc)
2548 goto qfsattr_exit;
2549
2550 if (level == FS_ATTRIBUTE_INFORMATION)
Steve French34f62642013-10-09 02:07:00 -05002551 memcpy(&tcon->fsAttrInfo, 4 /* RFC1001 len */ + offset
2552 + (char *)&rsp->hdr, min_t(unsigned int,
Steven French21671142013-10-09 13:36:35 -05002553 rsp_len, max_len));
2554 else if (level == FS_DEVICE_INFORMATION)
2555 memcpy(&tcon->fsDevInfo, 4 /* RFC1001 len */ + offset
2556 + (char *)&rsp->hdr, sizeof(FILE_SYSTEM_DEVICE_INFO));
Steven Frenchaf6a12e2013-10-09 20:55:53 -05002557 else if (level == FS_SECTOR_SIZE_INFORMATION) {
2558 struct smb3_fs_ss_info *ss_info = (struct smb3_fs_ss_info *)
2559 (4 /* RFC1001 len */ + offset + (char *)&rsp->hdr);
2560 tcon->ss_flags = le32_to_cpu(ss_info->Flags);
2561 tcon->perf_sector_size =
2562 le32_to_cpu(ss_info->PhysicalBytesPerSectorForPerf);
2563 }
Steve French34f62642013-10-09 02:07:00 -05002564
2565qfsattr_exit:
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002566 free_rsp_buf(resp_buftype, iov.iov_base);
2567 return rc;
2568}
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07002569
2570int
2571smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon,
2572 const __u64 persist_fid, const __u64 volatile_fid, const __u32 pid,
2573 const __u32 num_lock, struct smb2_lock_element *buf)
2574{
2575 int rc = 0;
2576 struct smb2_lock_req *req = NULL;
2577 struct kvec iov[2];
2578 int resp_buf_type;
2579 unsigned int count;
2580
Joe Perchesf96637b2013-05-04 22:12:25 -05002581 cifs_dbg(FYI, "smb2_lockv num lock %d\n", num_lock);
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07002582
2583 rc = small_smb2_init(SMB2_LOCK, tcon, (void **) &req);
2584 if (rc)
2585 return rc;
2586
2587 req->hdr.ProcessId = cpu_to_le32(pid);
2588 req->LockCount = cpu_to_le16(num_lock);
2589
2590 req->PersistentFileId = persist_fid;
2591 req->VolatileFileId = volatile_fid;
2592
2593 count = num_lock * sizeof(struct smb2_lock_element);
2594 inc_rfc1001_len(req, count - sizeof(struct smb2_lock_element));
2595
2596 iov[0].iov_base = (char *)req;
2597 /* 4 for rfc1002 length field and count for all locks */
2598 iov[0].iov_len = get_rfc1002_length(req) + 4 - count;
2599 iov[1].iov_base = (char *)buf;
2600 iov[1].iov_len = count;
2601
2602 cifs_stats_inc(&tcon->stats.cifs_stats.num_locks);
2603 rc = SendReceive2(xid, tcon->ses, iov, 2, &resp_buf_type, CIFS_NO_RESP);
2604 if (rc) {
Joe Perchesf96637b2013-05-04 22:12:25 -05002605 cifs_dbg(FYI, "Send error in smb2_lockv = %d\n", rc);
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07002606 cifs_stats_fail_inc(tcon, SMB2_LOCK_HE);
2607 }
2608
2609 return rc;
2610}
2611
2612int
2613SMB2_lock(const unsigned int xid, struct cifs_tcon *tcon,
2614 const __u64 persist_fid, const __u64 volatile_fid, const __u32 pid,
2615 const __u64 length, const __u64 offset, const __u32 lock_flags,
2616 const bool wait)
2617{
2618 struct smb2_lock_element lock;
2619
2620 lock.Offset = cpu_to_le64(offset);
2621 lock.Length = cpu_to_le64(length);
2622 lock.Flags = cpu_to_le32(lock_flags);
2623 if (!wait && lock_flags != SMB2_LOCKFLAG_UNLOCK)
2624 lock.Flags |= cpu_to_le32(SMB2_LOCKFLAG_FAIL_IMMEDIATELY);
2625
2626 return smb2_lockv(xid, tcon, persist_fid, volatile_fid, pid, 1, &lock);
2627}
Pavel Shilovsky0822f512012-09-19 06:22:45 -07002628
2629int
2630SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon,
2631 __u8 *lease_key, const __le32 lease_state)
2632{
2633 int rc;
2634 struct smb2_lease_ack *req = NULL;
2635
Joe Perchesf96637b2013-05-04 22:12:25 -05002636 cifs_dbg(FYI, "SMB2_lease_break\n");
Pavel Shilovsky0822f512012-09-19 06:22:45 -07002637 rc = small_smb2_init(SMB2_OPLOCK_BREAK, tcon, (void **) &req);
2638
2639 if (rc)
2640 return rc;
2641
2642 req->hdr.CreditRequest = cpu_to_le16(1);
2643 req->StructureSize = cpu_to_le16(36);
2644 inc_rfc1001_len(req, 12);
2645
2646 memcpy(req->LeaseKey, lease_key, 16);
2647 req->LeaseState = lease_state;
2648
2649 rc = SendReceiveNoRsp(xid, tcon->ses, (char *) req, CIFS_OBREAK_OP);
2650 /* SMB2 buffer freed by function above */
2651
2652 if (rc) {
2653 cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE);
Joe Perchesf96637b2013-05-04 22:12:25 -05002654 cifs_dbg(FYI, "Send error in Lease Break = %d\n", rc);
Pavel Shilovsky0822f512012-09-19 06:22:45 -07002655 }
2656
2657 return rc;
2658}