blob: b8b4f08ee094e2f8a2b811f076ca0eb4cc3e68b9 [file] [log] [blame]
Pavel Shilovskyec2e4522011-12-27 16:12:43 +04001/*
2 * fs/cifs/smb2pdu.c
3 *
Steve French2b80d042013-06-23 18:43:37 -05004 * Copyright (C) International Business Machines Corp., 2009, 2013
Pavel Shilovskyec2e4522011-12-27 16:12:43 +04005 * Etersoft, 2012
6 * Author(s): Steve French (sfrench@us.ibm.com)
7 * Pavel Shilovsky (pshilovsky@samba.org) 2012
8 *
9 * Contains the routines for constructing the SMB2 PDUs themselves
10 *
11 * This library is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU Lesser General Public License as published
13 * by the Free Software Foundation; either version 2.1 of the License, or
14 * (at your option) any later version.
15 *
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
19 * the GNU Lesser General Public License for more details.
20 *
21 * You should have received a copy of the GNU Lesser General Public License
22 * along with this library; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 */
25
26 /* SMB2 PDU handling routines here - except for leftovers (eg session setup) */
27 /* Note that there are handle based routines which must be */
28 /* treated slightly differently for reconnection purposes since we never */
29 /* want to reuse a stale file handle and only the caller knows the file info */
30
31#include <linux/fs.h>
32#include <linux/kernel.h>
33#include <linux/vfs.h>
Pavel Shilovsky09a47072012-09-18 16:20:29 -070034#include <linux/task_io_accounting_ops.h>
Pavel Shilovskyec2e4522011-12-27 16:12:43 +040035#include <linux/uaccess.h>
Pavel Shilovsky33319142012-09-18 16:20:29 -070036#include <linux/pagemap.h>
Pavel Shilovskyec2e4522011-12-27 16:12:43 +040037#include <linux/xattr.h>
38#include "smb2pdu.h"
39#include "cifsglob.h"
40#include "cifsacl.h"
41#include "cifsproto.h"
42#include "smb2proto.h"
43#include "cifs_unicode.h"
44#include "cifs_debug.h"
45#include "ntlmssp.h"
46#include "smb2status.h"
Pavel Shilovsky09a47072012-09-18 16:20:29 -070047#include "smb2glob.h"
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -070048#include "cifspdu.h"
Pavel Shilovskyec2e4522011-12-27 16:12:43 +040049
50/*
51 * The following table defines the expected "StructureSize" of SMB2 requests
52 * in order by SMB2 command. This is similar to "wct" in SMB/CIFS requests.
53 *
54 * Note that commands are defined in smb2pdu.h in le16 but the array below is
55 * indexed by command in host byte order.
56 */
57static const int smb2_req_struct_sizes[NUMBER_OF_SMB2_COMMANDS] = {
58 /* SMB2_NEGOTIATE */ 36,
59 /* SMB2_SESSION_SETUP */ 25,
60 /* SMB2_LOGOFF */ 4,
61 /* SMB2_TREE_CONNECT */ 9,
62 /* SMB2_TREE_DISCONNECT */ 4,
63 /* SMB2_CREATE */ 57,
64 /* SMB2_CLOSE */ 24,
65 /* SMB2_FLUSH */ 24,
66 /* SMB2_READ */ 49,
67 /* SMB2_WRITE */ 49,
68 /* SMB2_LOCK */ 48,
69 /* SMB2_IOCTL */ 57,
70 /* SMB2_CANCEL */ 4,
71 /* SMB2_ECHO */ 4,
72 /* SMB2_QUERY_DIRECTORY */ 33,
73 /* SMB2_CHANGE_NOTIFY */ 32,
74 /* SMB2_QUERY_INFO */ 41,
75 /* SMB2_SET_INFO */ 33,
76 /* SMB2_OPLOCK_BREAK */ 24 /* BB this is 36 for LEASE_BREAK variant */
77};
78
79
80static void
81smb2_hdr_assemble(struct smb2_hdr *hdr, __le16 smb2_cmd /* command */ ,
82 const struct cifs_tcon *tcon)
83{
84 struct smb2_pdu *pdu = (struct smb2_pdu *)hdr;
85 char *temp = (char *)hdr;
86 /* lookup word count ie StructureSize from table */
87 __u16 parmsize = smb2_req_struct_sizes[le16_to_cpu(smb2_cmd)];
88
89 /*
90 * smaller than SMALL_BUFFER_SIZE but bigger than fixed area of
91 * largest operations (Create)
92 */
93 memset(temp, 0, 256);
94
95 /* Note this is only network field converted to big endian */
96 hdr->smb2_buf_length = cpu_to_be32(parmsize + sizeof(struct smb2_hdr)
97 - 4 /* RFC 1001 length field itself not counted */);
98
99 hdr->ProtocolId[0] = 0xFE;
100 hdr->ProtocolId[1] = 'S';
101 hdr->ProtocolId[2] = 'M';
102 hdr->ProtocolId[3] = 'B';
103 hdr->StructureSize = cpu_to_le16(64);
104 hdr->Command = smb2_cmd;
105 hdr->CreditRequest = cpu_to_le16(2); /* BB make this dynamic */
106 hdr->ProcessId = cpu_to_le32((__u16)current->tgid);
107
108 if (!tcon)
109 goto out;
110
Steve French2b80d042013-06-23 18:43:37 -0500111 /* GLOBAL_CAP_LARGE_MTU will only be set if dialect > SMB2.02 */
112 /* See sections 2.2.4 and 3.2.4.1.5 of MS-SMB2 */
Steve French1dc92c42015-05-20 09:32:21 -0500113 if ((tcon->ses) && (tcon->ses->server) &&
Steve French84ceeb92013-06-26 17:52:17 -0500114 (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
Steve French2b80d042013-06-23 18:43:37 -0500115 hdr->CreditCharge = cpu_to_le16(1);
116 /* else CreditCharge MBZ */
117
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400118 hdr->TreeId = tcon->tid;
119 /* Uid is not converted */
120 if (tcon->ses)
121 hdr->SessionId = tcon->ses->Suid;
Steve Frenchf87ab882013-06-26 19:14:55 -0500122
123 /*
124 * If we would set SMB2_FLAGS_DFS_OPERATIONS on open we also would have
125 * to pass the path on the Open SMB prefixed by \\server\share.
126 * Not sure when we would need to do the augmented path (if ever) and
127 * setting this flag breaks the SMB2 open operation since it is
128 * illegal to send an empty path name (without \\server\share prefix)
129 * when the DFS flag is set in the SMB open header. We could
130 * consider setting the flag on all operations other than open
131 * but it is safer to net set it for now.
132 */
133/* if (tcon->share_flags & SHI1005_FLAGS_DFS)
134 hdr->Flags |= SMB2_FLAGS_DFS_OPERATIONS; */
135
Jeff Layton38d77c52013-05-26 07:01:00 -0400136 if (tcon->ses && tcon->ses->server && tcon->ses->server->sign)
Pavel Shilovsky3c1bf7e2012-09-18 16:20:30 -0700137 hdr->Flags |= SMB2_FLAGS_SIGNED;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400138out:
139 pdu->StructureSize2 = cpu_to_le16(parmsize);
140 return;
141}
142
143static int
144smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
145{
146 int rc = 0;
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400147 struct nls_table *nls_codepage;
148 struct cifs_ses *ses;
149 struct TCP_Server_Info *server;
150
151 /*
152 * SMB2s NegProt, SessSetup, Logoff do not have tcon yet so
153 * check for tcp and smb session status done differently
154 * for those three - in the calling routine.
155 */
156 if (tcon == NULL)
157 return rc;
158
159 if (smb2_command == SMB2_TREE_CONNECT)
160 return rc;
161
162 if (tcon->tidStatus == CifsExiting) {
163 /*
164 * only tree disconnect, open, and write,
165 * (and ulogoff which does not have tcon)
166 * are allowed as we start force umount.
167 */
168 if ((smb2_command != SMB2_WRITE) &&
169 (smb2_command != SMB2_CREATE) &&
170 (smb2_command != SMB2_TREE_DISCONNECT)) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500171 cifs_dbg(FYI, "can not send cmd %d while umounting\n",
172 smb2_command);
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400173 return -ENODEV;
174 }
175 }
176 if ((!tcon->ses) || (tcon->ses->status == CifsExiting) ||
177 (!tcon->ses->server))
178 return -EIO;
179
180 ses = tcon->ses;
181 server = ses->server;
182
183 /*
184 * Give demultiplex thread up to 10 seconds to reconnect, should be
185 * greater than cifs socket timeout which is 7 seconds
186 */
187 while (server->tcpStatus == CifsNeedReconnect) {
188 /*
189 * Return to caller for TREE_DISCONNECT and LOGOFF and CLOSE
190 * here since they are implicitly done when session drops.
191 */
192 switch (smb2_command) {
193 /*
194 * BB Should we keep oplock break and add flush to exceptions?
195 */
196 case SMB2_TREE_DISCONNECT:
197 case SMB2_CANCEL:
198 case SMB2_CLOSE:
199 case SMB2_OPLOCK_BREAK:
200 return -EAGAIN;
201 }
202
203 wait_event_interruptible_timeout(server->response_q,
204 (server->tcpStatus != CifsNeedReconnect), 10 * HZ);
205
206 /* are we still trying to reconnect? */
207 if (server->tcpStatus != CifsNeedReconnect)
208 break;
209
210 /*
211 * on "soft" mounts we wait once. Hard mounts keep
212 * retrying until process is killed or server comes
213 * back on-line
214 */
215 if (!tcon->retry) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500216 cifs_dbg(FYI, "gave up waiting on reconnect in smb_init\n");
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400217 return -EHOSTDOWN;
218 }
219 }
220
221 if (!tcon->ses->need_reconnect && !tcon->need_reconnect)
222 return rc;
223
224 nls_codepage = load_nls_default();
225
226 /*
227 * need to prevent multiple threads trying to simultaneously reconnect
228 * the same SMB session
229 */
230 mutex_lock(&tcon->ses->session_mutex);
231 rc = cifs_negotiate_protocol(0, tcon->ses);
232 if (!rc && tcon->ses->need_reconnect)
233 rc = cifs_setup_session(0, tcon->ses, nls_codepage);
234
235 if (rc || !tcon->need_reconnect) {
236 mutex_unlock(&tcon->ses->session_mutex);
237 goto out;
238 }
239
240 cifs_mark_open_files_invalid(tcon);
241 rc = SMB2_tcon(0, tcon->ses, tcon->treeName, tcon, nls_codepage);
242 mutex_unlock(&tcon->ses->session_mutex);
Joe Perchesf96637b2013-05-04 22:12:25 -0500243 cifs_dbg(FYI, "reconnect tcon rc = %d\n", rc);
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400244 if (rc)
245 goto out;
246 atomic_inc(&tconInfoReconnectCount);
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400247out:
248 /*
249 * Check if handle based operation so we know whether we can continue
250 * or not without returning to caller to reset file handle.
251 */
252 /*
253 * BB Is flush done by server on drop of tcp session? Should we special
254 * case it and skip above?
255 */
256 switch (smb2_command) {
257 case SMB2_FLUSH:
258 case SMB2_READ:
259 case SMB2_WRITE:
260 case SMB2_LOCK:
261 case SMB2_IOCTL:
262 case SMB2_QUERY_DIRECTORY:
263 case SMB2_CHANGE_NOTIFY:
264 case SMB2_QUERY_INFO:
265 case SMB2_SET_INFO:
266 return -EAGAIN;
267 }
268 unload_nls(nls_codepage);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400269 return rc;
270}
271
272/*
273 * Allocate and return pointer to an SMB request hdr, and set basic
274 * SMB information in the SMB header. If the return code is zero, this
275 * function must have filled in request_buf pointer.
276 */
277static int
278small_smb2_init(__le16 smb2_command, struct cifs_tcon *tcon,
279 void **request_buf)
280{
281 int rc = 0;
282
283 rc = smb2_reconnect(smb2_command, tcon);
284 if (rc)
285 return rc;
286
287 /* BB eventually switch this to SMB2 specific small buf size */
288 *request_buf = cifs_small_buf_get();
289 if (*request_buf == NULL) {
290 /* BB should we add a retry in here if not a writepage? */
291 return -ENOMEM;
292 }
293
294 smb2_hdr_assemble((struct smb2_hdr *) *request_buf, smb2_command, tcon);
295
296 if (tcon != NULL) {
297#ifdef CONFIG_CIFS_STATS2
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400298 uint16_t com_code = le16_to_cpu(smb2_command);
299 cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_sent[com_code]);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400300#endif
301 cifs_stats_inc(&tcon->num_smbs_sent);
302 }
303
304 return rc;
305}
306
Steve Frenchebb3a9d2015-06-18 04:49:47 -0500307#ifdef CONFIG_CIFS_SMB311
308/* offset is sizeof smb2_negotiate_req - 4 but rounded up to 8 bytes */
309#define OFFSET_OF_NEG_CONTEXT 0x68 /* sizeof(struct smb2_negotiate_req) - 4 */
310
311
312#define SMB2_PREAUTH_INTEGRITY_CAPABILITIES cpu_to_le16(1)
313#define SMB2_ENCRYPTION_CAPABILITIES cpu_to_le16(2)
314
315static void
316build_preauth_ctxt(struct smb2_preauth_neg_context *pneg_ctxt)
317{
318 pneg_ctxt->ContextType = SMB2_PREAUTH_INTEGRITY_CAPABILITIES;
319 pneg_ctxt->DataLength = cpu_to_le16(38);
320 pneg_ctxt->HashAlgorithmCount = cpu_to_le16(1);
321 pneg_ctxt->SaltLength = cpu_to_le16(SMB311_SALT_SIZE);
322 get_random_bytes(pneg_ctxt->Salt, SMB311_SALT_SIZE);
323 pneg_ctxt->HashAlgorithms = SMB2_PREAUTH_INTEGRITY_SHA512;
324}
325
326static void
327build_encrypt_ctxt(struct smb2_encryption_neg_context *pneg_ctxt)
328{
329 pneg_ctxt->ContextType = SMB2_ENCRYPTION_CAPABILITIES;
330 pneg_ctxt->DataLength = cpu_to_le16(6);
331 pneg_ctxt->CipherCount = cpu_to_le16(2);
332 pneg_ctxt->Ciphers[0] = SMB2_ENCRYPTION_AES128_GCM;
333 pneg_ctxt->Ciphers[1] = SMB2_ENCRYPTION_AES128_CCM;
334}
335
336static void
337assemble_neg_contexts(struct smb2_negotiate_req *req)
338{
339
340 /* +4 is to account for the RFC1001 len field */
341 char *pneg_ctxt = (char *)req + OFFSET_OF_NEG_CONTEXT + 4;
342
343 build_preauth_ctxt((struct smb2_preauth_neg_context *)pneg_ctxt);
344 /* Add 2 to size to round to 8 byte boundary */
345 pneg_ctxt += 2 + sizeof(struct smb2_preauth_neg_context);
346 build_encrypt_ctxt((struct smb2_encryption_neg_context *)pneg_ctxt);
347 req->NegotiateContextOffset = cpu_to_le32(OFFSET_OF_NEG_CONTEXT);
348 req->NegotiateContextCount = cpu_to_le16(2);
349 inc_rfc1001_len(req, 4 + sizeof(struct smb2_preauth_neg_context) + 2
350 + sizeof(struct smb2_encryption_neg_context)); /* calculate hash */
351}
352#else
353static void assemble_neg_contexts(struct smb2_negotiate_req *req)
354{
355 return;
356}
357#endif /* SMB311 */
358
359
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400360/*
361 *
362 * SMB2 Worker functions follow:
363 *
364 * The general structure of the worker functions is:
365 * 1) Call smb2_init (assembles SMB2 header)
366 * 2) Initialize SMB2 command specific fields in fixed length area of SMB
367 * 3) Call smb_sendrcv2 (sends request on socket and waits for response)
368 * 4) Decode SMB2 command specific fields in the fixed length area
369 * 5) Decode variable length data area (if any for this SMB2 command type)
370 * 6) Call free smb buffer
371 * 7) return
372 *
373 */
374
375int
376SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
377{
378 struct smb2_negotiate_req *req;
379 struct smb2_negotiate_rsp *rsp;
380 struct kvec iov[1];
381 int rc = 0;
382 int resp_buftype;
Jeff Layton3534b852013-05-24 07:41:01 -0400383 struct TCP_Server_Info *server = ses->server;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400384 int blob_offset, blob_length;
385 char *security_blob;
386 int flags = CIFS_NEG_OP;
387
Joe Perchesf96637b2013-05-04 22:12:25 -0500388 cifs_dbg(FYI, "Negotiate protocol\n");
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400389
Jeff Layton3534b852013-05-24 07:41:01 -0400390 if (!server) {
391 WARN(1, "%s: server is NULL!\n", __func__);
392 return -EIO;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400393 }
394
395 rc = small_smb2_init(SMB2_NEGOTIATE, NULL, (void **) &req);
396 if (rc)
397 return rc;
398
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400399 req->hdr.SessionId = 0;
400
Steve Frenche4aa25e2012-10-01 12:26:22 -0500401 req->Dialects[0] = cpu_to_le16(ses->server->vals->protocol_id);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400402
Steve Frenche4aa25e2012-10-01 12:26:22 -0500403 req->DialectCount = cpu_to_le16(1); /* One vers= at a time for now */
404 inc_rfc1001_len(req, 2);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400405
406 /* only one of SMB2 signing flags may be set in SMB2 request */
Jeff Layton38d77c52013-05-26 07:01:00 -0400407 if (ses->sign)
Steve French9cd2e622013-06-12 19:59:03 -0500408 req->SecurityMode = cpu_to_le16(SMB2_NEGOTIATE_SIGNING_REQUIRED);
Jeff Layton38d77c52013-05-26 07:01:00 -0400409 else if (global_secflags & CIFSSEC_MAY_SIGN)
Steve French9cd2e622013-06-12 19:59:03 -0500410 req->SecurityMode = cpu_to_le16(SMB2_NEGOTIATE_SIGNING_ENABLED);
Jeff Layton38d77c52013-05-26 07:01:00 -0400411 else
412 req->SecurityMode = 0;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400413
Steve Frenche4aa25e2012-10-01 12:26:22 -0500414 req->Capabilities = cpu_to_le32(ses->server->vals->req_capabilities);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400415
Steve French3c5f9be12014-05-13 13:37:45 -0700416 /* ClientGUID must be zero for SMB2.02 dialect */
417 if (ses->server->vals->protocol_id == SMB20_PROT_ID)
418 memset(req->ClientGUID, 0, SMB2_CLIENT_GUID_SIZE);
Steve Frenchebb3a9d2015-06-18 04:49:47 -0500419 else {
Steve French3c5f9be12014-05-13 13:37:45 -0700420 memcpy(req->ClientGUID, server->client_guid,
421 SMB2_CLIENT_GUID_SIZE);
Steve Frenchebb3a9d2015-06-18 04:49:47 -0500422 if (ses->server->vals->protocol_id == SMB311_PROT_ID)
423 assemble_neg_contexts(req);
424 }
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400425 iov[0].iov_base = (char *)req;
426 /* 4 for rfc1002 length field */
427 iov[0].iov_len = get_rfc1002_length(req) + 4;
428
429 rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, flags);
430
431 rsp = (struct smb2_negotiate_rsp *)iov[0].iov_base;
432 /*
433 * No tcon so can't do
434 * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]);
435 */
436 if (rc != 0)
437 goto neg_exit;
438
Joe Perchesf96637b2013-05-04 22:12:25 -0500439 cifs_dbg(FYI, "mode 0x%x\n", rsp->SecurityMode);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400440
Steve Frenche4aa25e2012-10-01 12:26:22 -0500441 /* BB we may eventually want to match the negotiated vs. requested
442 dialect, even though we are only requesting one at a time */
443 if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID))
Joe Perchesf96637b2013-05-04 22:12:25 -0500444 cifs_dbg(FYI, "negotiated smb2.0 dialect\n");
Steve Frenche4aa25e2012-10-01 12:26:22 -0500445 else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID))
Joe Perchesf96637b2013-05-04 22:12:25 -0500446 cifs_dbg(FYI, "negotiated smb2.1 dialect\n");
Steve Frenche4aa25e2012-10-01 12:26:22 -0500447 else if (rsp->DialectRevision == cpu_to_le16(SMB30_PROT_ID))
Joe Perchesf96637b2013-05-04 22:12:25 -0500448 cifs_dbg(FYI, "negotiated smb3.0 dialect\n");
Steve French20b6d8b2013-06-12 22:48:41 -0500449 else if (rsp->DialectRevision == cpu_to_le16(SMB302_PROT_ID))
450 cifs_dbg(FYI, "negotiated smb3.02 dialect\n");
Steve French5f7fbf72014-12-17 22:52:58 -0600451#ifdef CONFIG_CIFS_SMB311
452 else if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID))
453 cifs_dbg(FYI, "negotiated smb3.1.1 dialect\n");
454#endif /* SMB311 */
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400455 else {
Steve Frenchf799d622015-06-18 05:07:52 -0500456 cifs_dbg(VFS, "Illegal dialect returned by server 0x%x\n",
Joe Perchesf96637b2013-05-04 22:12:25 -0500457 le16_to_cpu(rsp->DialectRevision));
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400458 rc = -EIO;
459 goto neg_exit;
460 }
461 server->dialect = le16_to_cpu(rsp->DialectRevision);
462
Jeff Laytone598d1d82013-05-26 07:00:59 -0400463 /* SMB2 only has an extended negflavor */
464 server->negflavor = CIFS_NEGFLAVOR_EXTENDED;
Pavel Shilovsky2365c4e2014-02-14 13:31:02 +0400465 /* set it to the maximum buffer size value we can send with 1 credit */
466 server->maxBuf = min_t(unsigned int, le32_to_cpu(rsp->MaxTransactSize),
467 SMB2_MAX_BUFFER_SIZE);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400468 server->max_read = le32_to_cpu(rsp->MaxReadSize);
469 server->max_write = le32_to_cpu(rsp->MaxWriteSize);
470 /* BB Do we need to validate the SecurityMode? */
471 server->sec_mode = le16_to_cpu(rsp->SecurityMode);
472 server->capabilities = le32_to_cpu(rsp->Capabilities);
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400473 /* Internal types */
474 server->capabilities |= SMB2_NT_FIND | SMB2_LARGE_FILES;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400475
476 security_blob = smb2_get_data_area_len(&blob_offset, &blob_length,
477 &rsp->hdr);
Steve French5d875cc2013-06-25 15:33:41 -0500478 /*
479 * See MS-SMB2 section 2.2.4: if no blob, client picks default which
480 * for us will be
481 * ses->sectype = RawNTLMSSP;
482 * but for time being this is our only auth choice so doesn't matter.
483 * We just found a server which sets blob length to zero expecting raw.
484 */
485 if (blob_length == 0)
486 cifs_dbg(FYI, "missing security blob on negprot\n");
Pavel Shilovsky3c1bf7e2012-09-18 16:20:30 -0700487
Jeff Layton38d77c52013-05-26 07:01:00 -0400488 rc = cifs_enable_signing(server, ses->sign);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400489#ifdef CONFIG_SMB2_ASN1 /* BB REMOVEME when updated asn1.c ready */
Jeff Layton9ddec562013-05-26 07:00:58 -0400490 if (rc)
491 goto neg_exit;
Steve French5d875cc2013-06-25 15:33:41 -0500492 if (blob_length)
Steve Frenchebdd2072014-10-20 12:48:23 -0500493 rc = decode_negTokenInit(security_blob, blob_length, server);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400494 if (rc == 1)
495 rc = 0;
496 else if (rc == 0) {
497 rc = -EIO;
498 goto neg_exit;
499 }
500#endif
501
502neg_exit:
503 free_rsp_buf(resp_buftype, rsp);
504 return rc;
505}
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400506
Steve Frenchff1c0382013-11-19 23:44:46 -0600507int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
508{
509 int rc = 0;
510 struct validate_negotiate_info_req vneg_inbuf;
511 struct validate_negotiate_info_rsp *pneg_rsp;
512 u32 rsplen;
513
514 cifs_dbg(FYI, "validate negotiate\n");
515
516 /*
517 * validation ioctl must be signed, so no point sending this if we
518 * can not sign it. We could eventually change this to selectively
519 * sign just this, the first and only signed request on a connection.
520 * This is good enough for now since a user who wants better security
521 * would also enable signing on the mount. Having validation of
522 * negotiate info for signed connections helps reduce attack vectors
523 */
524 if (tcon->ses->server->sign == false)
525 return 0; /* validation requires signing */
526
527 vneg_inbuf.Capabilities =
528 cpu_to_le32(tcon->ses->server->vals->req_capabilities);
Sachin Prabhu39552ea2014-05-13 00:48:12 +0100529 memcpy(vneg_inbuf.Guid, tcon->ses->server->client_guid,
530 SMB2_CLIENT_GUID_SIZE);
Steve Frenchff1c0382013-11-19 23:44:46 -0600531
532 if (tcon->ses->sign)
533 vneg_inbuf.SecurityMode =
534 cpu_to_le16(SMB2_NEGOTIATE_SIGNING_REQUIRED);
535 else if (global_secflags & CIFSSEC_MAY_SIGN)
536 vneg_inbuf.SecurityMode =
537 cpu_to_le16(SMB2_NEGOTIATE_SIGNING_ENABLED);
538 else
539 vneg_inbuf.SecurityMode = 0;
540
541 vneg_inbuf.DialectCount = cpu_to_le16(1);
542 vneg_inbuf.Dialects[0] =
543 cpu_to_le16(tcon->ses->server->vals->protocol_id);
544
545 rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
546 FSCTL_VALIDATE_NEGOTIATE_INFO, true /* is_fsctl */,
547 (char *)&vneg_inbuf, sizeof(struct validate_negotiate_info_req),
548 (char **)&pneg_rsp, &rsplen);
549
550 if (rc != 0) {
551 cifs_dbg(VFS, "validate protocol negotiate failed: %d\n", rc);
552 return -EIO;
553 }
554
555 if (rsplen != sizeof(struct validate_negotiate_info_rsp)) {
556 cifs_dbg(VFS, "invalid size of protocol negotiate response\n");
557 return -EIO;
558 }
559
560 /* check validate negotiate info response matches what we got earlier */
561 if (pneg_rsp->Dialect !=
562 cpu_to_le16(tcon->ses->server->vals->protocol_id))
563 goto vneg_out;
564
565 if (pneg_rsp->SecurityMode != cpu_to_le16(tcon->ses->server->sec_mode))
566 goto vneg_out;
567
568 /* do not validate server guid because not saved at negprot time yet */
569
570 if ((le32_to_cpu(pneg_rsp->Capabilities) | SMB2_NT_FIND |
571 SMB2_LARGE_FILES) != tcon->ses->server->capabilities)
572 goto vneg_out;
573
574 /* validate negotiate successful */
575 cifs_dbg(FYI, "validate negotiate info successful\n");
576 return 0;
577
578vneg_out:
579 cifs_dbg(VFS, "protocol revalidation - security settings mismatch\n");
580 return -EIO;
581}
582
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400583int
584SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
585 const struct nls_table *nls_cp)
586{
587 struct smb2_sess_setup_req *req;
588 struct smb2_sess_setup_rsp *rsp = NULL;
589 struct kvec iov[2];
590 int rc = 0;
Namjae Jeon7de975e2014-08-20 19:39:41 +0900591 int resp_buftype = CIFS_NO_BUFFER;
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400592 __le32 phase = NtLmNegotiate; /* NTLMSSP, if needed, is multistage */
Jeff Layton3534b852013-05-24 07:41:01 -0400593 struct TCP_Server_Info *server = ses->server;
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400594 u16 blob_length = 0;
595 char *security_blob;
596 char *ntlmssp_blob = NULL;
597 bool use_spnego = false; /* else use raw ntlmssp */
598
Joe Perchesf96637b2013-05-04 22:12:25 -0500599 cifs_dbg(FYI, "Session Setup\n");
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400600
Jeff Layton3534b852013-05-24 07:41:01 -0400601 if (!server) {
602 WARN(1, "%s: server is NULL!\n", __func__);
603 return -EIO;
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400604 }
605
606 /*
Shirish Pargaonkard4e63bd2013-08-29 08:35:09 -0500607 * If we are here due to reconnect, free per-smb session key
608 * in case signing was required.
609 */
610 kfree(ses->auth_key.response);
611 ses->auth_key.response = NULL;
612
613 /*
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400614 * If memory allocation is successful, caller of this function
615 * frees it.
616 */
617 ses->ntlmssp = kmalloc(sizeof(struct ntlmssp_auth), GFP_KERNEL);
618 if (!ses->ntlmssp)
619 return -ENOMEM;
Shirish Pargaonkar5c234aa2013-08-29 08:35:10 -0500620 ses->ntlmssp->sesskey_per_smbsess = true;
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400621
Jeff Layton3f618222013-06-12 19:52:14 -0500622 /* FIXME: allow for other auth types besides NTLMSSP (e.g. krb5) */
623 ses->sectype = RawNTLMSSP;
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400624
625ssetup_ntlmssp_authenticate:
626 if (phase == NtLmChallenge)
627 phase = NtLmAuthenticate; /* if ntlmssp, now final phase */
628
629 rc = small_smb2_init(SMB2_SESSION_SETUP, NULL, (void **) &req);
630 if (rc)
631 return rc;
632
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400633 req->hdr.SessionId = 0; /* First session, not a reauthenticate */
Steve Frencheed0e172015-02-06 00:03:52 -0600634 req->Flags = 0; /* MBZ */
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400635 /* to enable echos and oplocks */
636 req->hdr.CreditRequest = cpu_to_le16(3);
637
638 /* only one of SMB2 signing flags may be set in SMB2 request */
Jeff Layton38d77c52013-05-26 07:01:00 -0400639 if (server->sign)
640 req->SecurityMode = SMB2_NEGOTIATE_SIGNING_REQUIRED;
641 else if (global_secflags & CIFSSEC_MAY_SIGN) /* one flag unlike MUST_ */
642 req->SecurityMode = SMB2_NEGOTIATE_SIGNING_ENABLED;
643 else
644 req->SecurityMode = 0;
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400645
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400646 req->Capabilities = 0;
647 req->Channel = 0; /* MBZ */
648
649 iov[0].iov_base = (char *)req;
650 /* 4 for rfc1002 length field and 1 for pad */
651 iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
652 if (phase == NtLmNegotiate) {
653 ntlmssp_blob = kmalloc(sizeof(struct _NEGOTIATE_MESSAGE),
654 GFP_KERNEL);
655 if (ntlmssp_blob == NULL) {
656 rc = -ENOMEM;
657 goto ssetup_exit;
658 }
659 build_ntlmssp_negotiate_blob(ntlmssp_blob, ses);
660 if (use_spnego) {
661 /* blob_length = build_spnego_ntlmssp_blob(
662 &security_blob,
663 sizeof(struct _NEGOTIATE_MESSAGE),
664 ntlmssp_blob); */
665 /* BB eventually need to add this */
Joe Perchesf96637b2013-05-04 22:12:25 -0500666 cifs_dbg(VFS, "spnego not supported for SMB2 yet\n");
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400667 rc = -EOPNOTSUPP;
668 kfree(ntlmssp_blob);
669 goto ssetup_exit;
670 } else {
671 blob_length = sizeof(struct _NEGOTIATE_MESSAGE);
672 /* with raw NTLMSSP we don't encapsulate in SPNEGO */
673 security_blob = ntlmssp_blob;
674 }
675 } else if (phase == NtLmAuthenticate) {
676 req->hdr.SessionId = ses->Suid;
677 ntlmssp_blob = kzalloc(sizeof(struct _NEGOTIATE_MESSAGE) + 500,
678 GFP_KERNEL);
679 if (ntlmssp_blob == NULL) {
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400680 rc = -ENOMEM;
681 goto ssetup_exit;
682 }
683 rc = build_ntlmssp_auth_blob(ntlmssp_blob, &blob_length, ses,
684 nls_cp);
685 if (rc) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500686 cifs_dbg(FYI, "build_ntlmssp_auth_blob failed %d\n",
687 rc);
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400688 goto ssetup_exit; /* BB double check error handling */
689 }
690 if (use_spnego) {
691 /* blob_length = build_spnego_ntlmssp_blob(
692 &security_blob,
693 blob_length,
694 ntlmssp_blob); */
Joe Perchesf96637b2013-05-04 22:12:25 -0500695 cifs_dbg(VFS, "spnego not supported for SMB2 yet\n");
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400696 rc = -EOPNOTSUPP;
697 kfree(ntlmssp_blob);
698 goto ssetup_exit;
699 } else {
700 security_blob = ntlmssp_blob;
701 }
702 } else {
Joe Perchesf96637b2013-05-04 22:12:25 -0500703 cifs_dbg(VFS, "illegal ntlmssp phase\n");
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400704 rc = -EIO;
705 goto ssetup_exit;
706 }
707
708 /* Testing shows that buffer offset must be at location of Buffer[0] */
709 req->SecurityBufferOffset =
710 cpu_to_le16(sizeof(struct smb2_sess_setup_req) -
711 1 /* pad */ - 4 /* rfc1001 len */);
712 req->SecurityBufferLength = cpu_to_le16(blob_length);
713 iov[1].iov_base = security_blob;
714 iov[1].iov_len = blob_length;
715
716 inc_rfc1001_len(req, blob_length - 1 /* pad */);
717
718 /* BB add code to build os and lm fields */
719
Steve French6d8b59d2012-12-08 22:36:29 -0600720 rc = SendReceive2(xid, ses, iov, 2, &resp_buftype,
721 CIFS_LOG_ERROR | CIFS_NEG_OP);
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400722
723 kfree(security_blob);
724 rsp = (struct smb2_sess_setup_rsp *)iov[0].iov_base;
Pavel Shilovsky4ca3a992012-09-25 11:00:09 +0400725 if (resp_buftype != CIFS_NO_BUFFER &&
726 rsp->hdr.Status == STATUS_MORE_PROCESSING_REQUIRED) {
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400727 if (phase != NtLmNegotiate) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500728 cifs_dbg(VFS, "Unexpected more processing error\n");
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400729 goto ssetup_exit;
730 }
731 if (offsetof(struct smb2_sess_setup_rsp, Buffer) - 4 !=
Pavel Shilovsky4ca3a992012-09-25 11:00:09 +0400732 le16_to_cpu(rsp->SecurityBufferOffset)) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500733 cifs_dbg(VFS, "Invalid security buffer offset %d\n",
734 le16_to_cpu(rsp->SecurityBufferOffset));
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400735 rc = -EIO;
736 goto ssetup_exit;
737 }
738
739 /* NTLMSSP Negotiate sent now processing challenge (response) */
740 phase = NtLmChallenge; /* process ntlmssp challenge */
741 rc = 0; /* MORE_PROCESSING is not an error here but expected */
742 ses->Suid = rsp->hdr.SessionId;
743 rc = decode_ntlmssp_challenge(rsp->Buffer,
744 le16_to_cpu(rsp->SecurityBufferLength), ses);
745 }
746
747 /*
748 * BB eventually add code for SPNEGO decoding of NtlmChallenge blob,
749 * but at least the raw NTLMSSP case works.
750 */
751 /*
752 * No tcon so can't do
753 * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]);
754 */
755 if (rc != 0)
756 goto ssetup_exit;
757
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400758 ses->session_flags = le16_to_cpu(rsp->SessionFlags);
Steve French0cbaa532013-11-15 23:50:24 -0600759 if (ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA)
760 cifs_dbg(VFS, "SMB3 encryption not supported yet\n");
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400761ssetup_exit:
762 free_rsp_buf(resp_buftype, rsp);
763
764 /* if ntlmssp, and negotiate succeeded, proceed to authenticate phase */
765 if ((phase == NtLmChallenge) && (rc == 0))
766 goto ssetup_ntlmssp_authenticate;
Shirish Pargaonkard4e63bd2013-08-29 08:35:09 -0500767
768 if (!rc) {
769 mutex_lock(&server->srv_mutex);
Shirish Pargaonkar32811d22013-08-29 08:35:11 -0500770 if (server->sign && server->ops->generate_signingkey) {
771 rc = server->ops->generate_signingkey(ses);
772 kfree(ses->auth_key.response);
773 ses->auth_key.response = NULL;
774 if (rc) {
775 cifs_dbg(FYI,
776 "SMB3 session key generation failed\n");
777 mutex_unlock(&server->srv_mutex);
778 goto keygen_exit;
779 }
780 }
Shirish Pargaonkard4e63bd2013-08-29 08:35:09 -0500781 if (!server->session_estab) {
782 server->sequence_number = 0x2;
783 server->session_estab = true;
Shirish Pargaonkard4e63bd2013-08-29 08:35:09 -0500784 }
785 mutex_unlock(&server->srv_mutex);
786
787 cifs_dbg(FYI, "SMB2/3 session established successfully\n");
788 spin_lock(&GlobalMid_Lock);
789 ses->status = CifsGood;
790 ses->need_reconnect = false;
791 spin_unlock(&GlobalMid_Lock);
792 }
793
Shirish Pargaonkar32811d22013-08-29 08:35:11 -0500794keygen_exit:
Shirish Pargaonkard4e63bd2013-08-29 08:35:09 -0500795 if (!server->sign) {
796 kfree(ses->auth_key.response);
797 ses->auth_key.response = NULL;
798 }
799 kfree(ses->ntlmssp);
800
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400801 return rc;
802}
803
804int
805SMB2_logoff(const unsigned int xid, struct cifs_ses *ses)
806{
807 struct smb2_logoff_req *req; /* response is also trivial struct */
808 int rc = 0;
809 struct TCP_Server_Info *server;
810
Joe Perchesf96637b2013-05-04 22:12:25 -0500811 cifs_dbg(FYI, "disconnect session %p\n", ses);
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400812
813 if (ses && (ses->server))
814 server = ses->server;
815 else
816 return -EIO;
817
Shirish Pargaonkareb4c7df2013-10-03 05:44:45 -0500818 /* no need to send SMB logoff if uid already closed due to reconnect */
819 if (ses->need_reconnect)
820 goto smb2_session_already_dead;
821
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400822 rc = small_smb2_init(SMB2_LOGOFF, NULL, (void **) &req);
823 if (rc)
824 return rc;
825
826 /* since no tcon, smb2_init can not do this, so do here */
827 req->hdr.SessionId = ses->Suid;
Jeff Layton38d77c52013-05-26 07:01:00 -0400828 if (server->sign)
Pavel Shilovsky3c1bf7e2012-09-18 16:20:30 -0700829 req->hdr.Flags |= SMB2_FLAGS_SIGNED;
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400830
831 rc = SendReceiveNoRsp(xid, ses, (char *) &req->hdr, 0);
832 /*
833 * No tcon so can't do
834 * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]);
835 */
Shirish Pargaonkareb4c7df2013-10-03 05:44:45 -0500836
837smb2_session_already_dead:
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400838 return rc;
839}
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +0400840
841static inline void cifs_stats_fail_inc(struct cifs_tcon *tcon, uint16_t code)
842{
Pavel Shilovskyd60622e2012-05-28 15:19:39 +0400843 cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_failed[code]);
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +0400844}
845
846#define MAX_SHARENAME_LENGTH (255 /* server */ + 80 /* share */ + 1 /* NULL */)
847
Steve Frenchde9f68d2013-11-15 11:26:24 -0600848/* These are similar values to what Windows uses */
849static inline void init_copy_chunk_defaults(struct cifs_tcon *tcon)
850{
851 tcon->max_chunks = 256;
852 tcon->max_bytes_chunk = 1048576;
853 tcon->max_bytes_copy = 16777216;
854}
855
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +0400856int
857SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
858 struct cifs_tcon *tcon, const struct nls_table *cp)
859{
860 struct smb2_tree_connect_req *req;
861 struct smb2_tree_connect_rsp *rsp = NULL;
862 struct kvec iov[2];
863 int rc = 0;
864 int resp_buftype;
865 int unc_path_len;
866 struct TCP_Server_Info *server;
867 __le16 *unc_path = NULL;
868
Joe Perchesf96637b2013-05-04 22:12:25 -0500869 cifs_dbg(FYI, "TCON\n");
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +0400870
871 if ((ses->server) && tree)
872 server = ses->server;
873 else
874 return -EIO;
875
876 if (tcon && tcon->bad_network_name)
877 return -ENOENT;
878
879 unc_path = kmalloc(MAX_SHARENAME_LENGTH * 2, GFP_KERNEL);
880 if (unc_path == NULL)
881 return -ENOMEM;
882
883 unc_path_len = cifs_strtoUTF16(unc_path, tree, strlen(tree), cp) + 1;
884 unc_path_len *= 2;
885 if (unc_path_len < 2) {
886 kfree(unc_path);
887 return -EINVAL;
888 }
889
890 rc = small_smb2_init(SMB2_TREE_CONNECT, tcon, (void **) &req);
891 if (rc) {
892 kfree(unc_path);
893 return rc;
894 }
895
896 if (tcon == NULL) {
897 /* since no tcon, smb2_init can not do this, so do here */
898 req->hdr.SessionId = ses->Suid;
899 /* if (ses->server->sec_mode & SECMODE_SIGN_REQUIRED)
900 req->hdr.Flags |= SMB2_FLAGS_SIGNED; */
901 }
902
903 iov[0].iov_base = (char *)req;
904 /* 4 for rfc1002 length field and 1 for pad */
905 iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
906
907 /* Testing shows that buffer offset must be at location of Buffer[0] */
908 req->PathOffset = cpu_to_le16(sizeof(struct smb2_tree_connect_req)
909 - 1 /* pad */ - 4 /* do not count rfc1001 len field */);
910 req->PathLength = cpu_to_le16(unc_path_len - 2);
911 iov[1].iov_base = unc_path;
912 iov[1].iov_len = unc_path_len;
913
914 inc_rfc1001_len(req, unc_path_len - 1 /* pad */);
915
916 rc = SendReceive2(xid, ses, iov, 2, &resp_buftype, 0);
917 rsp = (struct smb2_tree_connect_rsp *)iov[0].iov_base;
918
919 if (rc != 0) {
920 if (tcon) {
921 cifs_stats_fail_inc(tcon, SMB2_TREE_CONNECT_HE);
922 tcon->need_reconnect = true;
923 }
924 goto tcon_error_exit;
925 }
926
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +0400927 if (tcon == NULL) {
928 ses->ipc_tid = rsp->hdr.TreeId;
929 goto tcon_exit;
930 }
931
932 if (rsp->ShareType & SMB2_SHARE_TYPE_DISK)
Joe Perchesf96637b2013-05-04 22:12:25 -0500933 cifs_dbg(FYI, "connection to disk share\n");
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +0400934 else if (rsp->ShareType & SMB2_SHARE_TYPE_PIPE) {
935 tcon->ipc = true;
Joe Perchesf96637b2013-05-04 22:12:25 -0500936 cifs_dbg(FYI, "connection to pipe share\n");
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +0400937 } else if (rsp->ShareType & SMB2_SHARE_TYPE_PRINT) {
938 tcon->print = true;
Joe Perchesf96637b2013-05-04 22:12:25 -0500939 cifs_dbg(FYI, "connection to printer\n");
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +0400940 } else {
Joe Perchesf96637b2013-05-04 22:12:25 -0500941 cifs_dbg(VFS, "unknown share type %d\n", rsp->ShareType);
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +0400942 rc = -EOPNOTSUPP;
943 goto tcon_error_exit;
944 }
945
946 tcon->share_flags = le32_to_cpu(rsp->ShareFlags);
Steve French769ee6a2013-06-19 14:15:30 -0500947 tcon->capabilities = rsp->Capabilities; /* we keep caps little endian */
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +0400948 tcon->maximal_access = le32_to_cpu(rsp->MaximalAccess);
949 tcon->tidStatus = CifsGood;
950 tcon->need_reconnect = false;
951 tcon->tid = rsp->hdr.TreeId;
Zhao Hongjiang46b51d02013-06-24 01:57:47 -0500952 strlcpy(tcon->treeName, tree, sizeof(tcon->treeName));
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +0400953
954 if ((rsp->Capabilities & SMB2_SHARE_CAP_DFS) &&
955 ((tcon->share_flags & SHI1005_FLAGS_DFS) == 0))
Joe Perchesf96637b2013-05-04 22:12:25 -0500956 cifs_dbg(VFS, "DFS capability contradicts DFS flag\n");
Steve Frenchde9f68d2013-11-15 11:26:24 -0600957 init_copy_chunk_defaults(tcon);
Steve Frenchff1c0382013-11-19 23:44:46 -0600958 if (tcon->ses->server->ops->validate_negotiate)
959 rc = tcon->ses->server->ops->validate_negotiate(xid, tcon);
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +0400960tcon_exit:
961 free_rsp_buf(resp_buftype, rsp);
962 kfree(unc_path);
963 return rc;
964
965tcon_error_exit:
966 if (rsp->hdr.Status == STATUS_BAD_NETWORK_NAME) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500967 cifs_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree);
Steve French18f39e72014-08-17 00:22:24 -0500968 if (tcon)
969 tcon->bad_network_name = true;
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +0400970 }
971 goto tcon_exit;
972}
973
974int
975SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon)
976{
977 struct smb2_tree_disconnect_req *req; /* response is trivial */
978 int rc = 0;
979 struct TCP_Server_Info *server;
980 struct cifs_ses *ses = tcon->ses;
981
Joe Perchesf96637b2013-05-04 22:12:25 -0500982 cifs_dbg(FYI, "Tree Disconnect\n");
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +0400983
984 if (ses && (ses->server))
985 server = ses->server;
986 else
987 return -EIO;
988
989 if ((tcon->need_reconnect) || (tcon->ses->need_reconnect))
990 return 0;
991
992 rc = small_smb2_init(SMB2_TREE_DISCONNECT, tcon, (void **) &req);
993 if (rc)
994 return rc;
995
996 rc = SendReceiveNoRsp(xid, ses, (char *)&req->hdr, 0);
997 if (rc)
998 cifs_stats_fail_inc(tcon, SMB2_TREE_DISCONNECT_HE);
999
1000 return rc;
1001}
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001002
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07001003
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +04001004static struct create_durable *
1005create_durable_buf(void)
1006{
1007 struct create_durable *buf;
1008
1009 buf = kzalloc(sizeof(struct create_durable), GFP_KERNEL);
1010 if (!buf)
1011 return NULL;
1012
1013 buf->ccontext.DataOffset = cpu_to_le16(offsetof
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +04001014 (struct create_durable, Data));
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +04001015 buf->ccontext.DataLength = cpu_to_le32(16);
1016 buf->ccontext.NameOffset = cpu_to_le16(offsetof
1017 (struct create_durable, Name));
1018 buf->ccontext.NameLength = cpu_to_le16(4);
Steve French12197a72014-05-14 05:29:40 -07001019 /* SMB2_CREATE_DURABLE_HANDLE_REQUEST is "DHnQ" */
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +04001020 buf->Name[0] = 'D';
1021 buf->Name[1] = 'H';
1022 buf->Name[2] = 'n';
1023 buf->Name[3] = 'Q';
1024 return buf;
1025}
1026
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +04001027static struct create_durable *
1028create_reconnect_durable_buf(struct cifs_fid *fid)
1029{
1030 struct create_durable *buf;
1031
1032 buf = kzalloc(sizeof(struct create_durable), GFP_KERNEL);
1033 if (!buf)
1034 return NULL;
1035
1036 buf->ccontext.DataOffset = cpu_to_le16(offsetof
1037 (struct create_durable, Data));
1038 buf->ccontext.DataLength = cpu_to_le32(16);
1039 buf->ccontext.NameOffset = cpu_to_le16(offsetof
1040 (struct create_durable, Name));
1041 buf->ccontext.NameLength = cpu_to_le16(4);
1042 buf->Data.Fid.PersistentFileId = fid->persistent_fid;
1043 buf->Data.Fid.VolatileFileId = fid->volatile_fid;
Steve French12197a72014-05-14 05:29:40 -07001044 /* SMB2_CREATE_DURABLE_HANDLE_RECONNECT is "DHnC" */
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +04001045 buf->Name[0] = 'D';
1046 buf->Name[1] = 'H';
1047 buf->Name[2] = 'n';
1048 buf->Name[3] = 'C';
1049 return buf;
1050}
1051
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07001052static __u8
Pavel Shilovsky42873b02013-09-05 21:30:16 +04001053parse_lease_state(struct TCP_Server_Info *server, struct smb2_create_rsp *rsp,
1054 unsigned int *epoch)
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07001055{
1056 char *data_offset;
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04001057 struct create_context *cc;
Pavel Shilovskyfd554392013-07-09 19:44:56 +04001058 unsigned int next = 0;
1059 char *name;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07001060
Pavel Shilovskyfd554392013-07-09 19:44:56 +04001061 data_offset = (char *)rsp + 4 + le32_to_cpu(rsp->CreateContextsOffset);
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04001062 cc = (struct create_context *)data_offset;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07001063 do {
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04001064 cc = (struct create_context *)((char *)cc + next);
1065 name = le16_to_cpu(cc->NameOffset) + (char *)cc;
1066 if (le16_to_cpu(cc->NameLength) != 4 ||
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07001067 strncmp(name, "RqLs", 4)) {
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04001068 next = le32_to_cpu(cc->Next);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07001069 continue;
1070 }
Pavel Shilovsky42873b02013-09-05 21:30:16 +04001071 return server->ops->parse_lease_buf(cc, epoch);
Pavel Shilovskyfd554392013-07-09 19:44:56 +04001072 } while (next != 0);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07001073
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04001074 return 0;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07001075}
1076
Pavel Shilovskyd22cbfe2013-07-04 19:10:00 +04001077static int
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04001078add_lease_context(struct TCP_Server_Info *server, struct kvec *iov,
1079 unsigned int *num_iovec, __u8 *oplock)
Pavel Shilovskyd22cbfe2013-07-04 19:10:00 +04001080{
1081 struct smb2_create_req *req = iov[0].iov_base;
1082 unsigned int num = *num_iovec;
1083
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04001084 iov[num].iov_base = server->ops->create_lease_buf(oplock+1, *oplock);
Pavel Shilovskyd22cbfe2013-07-04 19:10:00 +04001085 if (iov[num].iov_base == NULL)
1086 return -ENOMEM;
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04001087 iov[num].iov_len = server->vals->create_lease_size;
Pavel Shilovskyd22cbfe2013-07-04 19:10:00 +04001088 req->RequestedOplockLevel = SMB2_OPLOCK_LEVEL_LEASE;
1089 if (!req->CreateContextsOffset)
1090 req->CreateContextsOffset = cpu_to_le32(
1091 sizeof(struct smb2_create_req) - 4 +
1092 iov[num - 1].iov_len);
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04001093 le32_add_cpu(&req->CreateContextsLength,
1094 server->vals->create_lease_size);
1095 inc_rfc1001_len(&req->hdr, server->vals->create_lease_size);
Pavel Shilovskyd22cbfe2013-07-04 19:10:00 +04001096 *num_iovec = num + 1;
1097 return 0;
1098}
1099
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +04001100static int
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +04001101add_durable_context(struct kvec *iov, unsigned int *num_iovec,
1102 struct cifs_open_parms *oparms)
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +04001103{
1104 struct smb2_create_req *req = iov[0].iov_base;
1105 unsigned int num = *num_iovec;
1106
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +04001107 if (oparms->reconnect) {
1108 iov[num].iov_base = create_reconnect_durable_buf(oparms->fid);
1109 /* indicate that we don't need to relock the file */
1110 oparms->reconnect = false;
1111 } else
1112 iov[num].iov_base = create_durable_buf();
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +04001113 if (iov[num].iov_base == NULL)
1114 return -ENOMEM;
1115 iov[num].iov_len = sizeof(struct create_durable);
1116 if (!req->CreateContextsOffset)
1117 req->CreateContextsOffset =
1118 cpu_to_le32(sizeof(struct smb2_create_req) - 4 +
1119 iov[1].iov_len);
Wei Yongjun31f92e92013-08-26 14:34:46 +08001120 le32_add_cpu(&req->CreateContextsLength, sizeof(struct create_durable));
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +04001121 inc_rfc1001_len(&req->hdr, sizeof(struct create_durable));
1122 *num_iovec = num + 1;
1123 return 0;
1124}
1125
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001126int
Pavel Shilovsky064f6042013-07-09 18:20:30 +04001127SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04001128 __u8 *oplock, struct smb2_file_all_info *buf,
1129 struct smb2_err_rsp **err_buf)
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001130{
1131 struct smb2_create_req *req;
1132 struct smb2_create_rsp *rsp;
1133 struct TCP_Server_Info *server;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04001134 struct cifs_tcon *tcon = oparms->tcon;
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001135 struct cifs_ses *ses = tcon->ses;
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +04001136 struct kvec iov[4];
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001137 int resp_buftype;
1138 int uni_path_len;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07001139 __le16 *copy_path = NULL;
1140 int copy_size;
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001141 int rc = 0;
Pavel Shilovsky59aa3712013-07-04 19:41:24 +04001142 unsigned int num_iovecs = 2;
Pavel Shilovskyca819832013-07-05 12:21:26 +04001143 __u32 file_attributes = 0;
Pavel Shilovsky663a96212014-05-24 16:42:02 +04001144 char *dhc_buf = NULL, *lc_buf = NULL;
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001145
Joe Perchesf96637b2013-05-04 22:12:25 -05001146 cifs_dbg(FYI, "create/open\n");
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001147
1148 if (ses && (ses->server))
1149 server = ses->server;
1150 else
1151 return -EIO;
1152
1153 rc = small_smb2_init(SMB2_CREATE, tcon, (void **) &req);
1154 if (rc)
1155 return rc;
1156
Pavel Shilovsky064f6042013-07-09 18:20:30 +04001157 if (oparms->create_options & CREATE_OPTION_READONLY)
Pavel Shilovskyca819832013-07-05 12:21:26 +04001158 file_attributes |= ATTR_READONLY;
Steve Frenchdb8b6312014-09-22 05:13:55 -05001159 if (oparms->create_options & CREATE_OPTION_SPECIAL)
1160 file_attributes |= ATTR_SYSTEM;
Pavel Shilovskyca819832013-07-05 12:21:26 +04001161
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001162 req->ImpersonationLevel = IL_IMPERSONATION;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04001163 req->DesiredAccess = cpu_to_le32(oparms->desired_access);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001164 /* File attributes ignored on open (used in create though) */
1165 req->FileAttributes = cpu_to_le32(file_attributes);
1166 req->ShareAccess = FILE_SHARE_ALL_LE;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04001167 req->CreateDisposition = cpu_to_le32(oparms->disposition);
1168 req->CreateOptions = cpu_to_le32(oparms->create_options & CREATE_OPTIONS_MASK);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001169 uni_path_len = (2 * UniStrnlen((wchar_t *)path, PATH_MAX)) + 2;
Pavel Shilovsky59aa3712013-07-04 19:41:24 +04001170 /* do not count rfc1001 len field */
1171 req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req) - 4);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001172
1173 iov[0].iov_base = (char *)req;
1174 /* 4 for rfc1002 length field */
1175 iov[0].iov_len = get_rfc1002_length(req) + 4;
1176
1177 /* MUST set path len (NameLength) to 0 opening root of share */
Pavel Shilovsky59aa3712013-07-04 19:41:24 +04001178 req->NameLength = cpu_to_le16(uni_path_len - 2);
1179 /* -1 since last byte is buf[0] which is sent below (path) */
1180 iov[0].iov_len--;
1181 if (uni_path_len % 8 != 0) {
1182 copy_size = uni_path_len / 8 * 8;
1183 if (copy_size < uni_path_len)
1184 copy_size += 8;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07001185
Pavel Shilovsky59aa3712013-07-04 19:41:24 +04001186 copy_path = kzalloc(copy_size, GFP_KERNEL);
1187 if (!copy_path)
1188 return -ENOMEM;
1189 memcpy((char *)copy_path, (const char *)path,
1190 uni_path_len);
1191 uni_path_len = copy_size;
1192 path = copy_path;
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001193 }
1194
Pavel Shilovsky59aa3712013-07-04 19:41:24 +04001195 iov[1].iov_len = uni_path_len;
1196 iov[1].iov_base = path;
1197 /* -1 since last byte is buf[0] which was counted in smb2_buf_len */
1198 inc_rfc1001_len(req, uni_path_len - 1);
1199
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07001200 if (!server->oplocks)
1201 *oplock = SMB2_OPLOCK_LEVEL_NONE;
1202
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04001203 if (!(server->capabilities & SMB2_GLOBAL_CAP_LEASING) ||
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07001204 *oplock == SMB2_OPLOCK_LEVEL_NONE)
1205 req->RequestedOplockLevel = *oplock;
1206 else {
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04001207 rc = add_lease_context(server, iov, &num_iovecs, oplock);
Pavel Shilovskyd22cbfe2013-07-04 19:10:00 +04001208 if (rc) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07001209 cifs_small_buf_release(req);
1210 kfree(copy_path);
Pavel Shilovskyd22cbfe2013-07-04 19:10:00 +04001211 return rc;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07001212 }
Pavel Shilovsky663a96212014-05-24 16:42:02 +04001213 lc_buf = iov[num_iovecs-1].iov_base;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07001214 }
1215
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +04001216 if (*oplock == SMB2_OPLOCK_LEVEL_BATCH) {
1217 /* need to set Next field of lease context if we request it */
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04001218 if (server->capabilities & SMB2_GLOBAL_CAP_LEASING) {
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +04001219 struct create_context *ccontext =
1220 (struct create_context *)iov[num_iovecs-1].iov_base;
Steve French1c469432013-07-10 12:50:57 -05001221 ccontext->Next =
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04001222 cpu_to_le32(server->vals->create_lease_size);
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +04001223 }
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +04001224 rc = add_durable_context(iov, &num_iovecs, oparms);
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +04001225 if (rc) {
1226 cifs_small_buf_release(req);
1227 kfree(copy_path);
Pavel Shilovsky663a96212014-05-24 16:42:02 +04001228 kfree(lc_buf);
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +04001229 return rc;
1230 }
Pavel Shilovsky663a96212014-05-24 16:42:02 +04001231 dhc_buf = iov[num_iovecs-1].iov_base;
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +04001232 }
1233
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001234 rc = SendReceive2(xid, ses, iov, num_iovecs, &resp_buftype, 0);
1235 rsp = (struct smb2_create_rsp *)iov[0].iov_base;
1236
1237 if (rc != 0) {
1238 cifs_stats_fail_inc(tcon, SMB2_CREATE_HE);
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04001239 if (err_buf)
1240 *err_buf = kmemdup(rsp, get_rfc1002_length(rsp) + 4,
1241 GFP_KERNEL);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001242 goto creat_exit;
1243 }
1244
Pavel Shilovsky064f6042013-07-09 18:20:30 +04001245 oparms->fid->persistent_fid = rsp->PersistentFileId;
1246 oparms->fid->volatile_fid = rsp->VolatileFileId;
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001247
1248 if (buf) {
1249 memcpy(buf, &rsp->CreationTime, 32);
1250 buf->AllocationSize = rsp->AllocationSize;
1251 buf->EndOfFile = rsp->EndofFile;
1252 buf->Attributes = rsp->FileAttributes;
1253 buf->NumberOfLinks = cpu_to_le32(1);
1254 buf->DeletePending = 0;
1255 }
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07001256
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07001257 if (rsp->OplockLevel == SMB2_OPLOCK_LEVEL_LEASE)
Pavel Shilovsky42873b02013-09-05 21:30:16 +04001258 *oplock = parse_lease_state(server, rsp, &oparms->fid->epoch);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07001259 else
1260 *oplock = rsp->OplockLevel;
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001261creat_exit:
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07001262 kfree(copy_path);
Pavel Shilovsky663a96212014-05-24 16:42:02 +04001263 kfree(lc_buf);
1264 kfree(dhc_buf);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001265 free_rsp_buf(resp_buftype, rsp);
1266 return rc;
1267}
1268
Steve French4a72daf2013-06-25 00:20:49 -05001269/*
1270 * SMB2 IOCTL is used for both IOCTLs and FSCTLs
1271 */
1272int
1273SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
1274 u64 volatile_fid, u32 opcode, bool is_fsctl, char *in_data,
1275 u32 indatalen, char **out_data, u32 *plen /* returned data len */)
1276{
1277 struct smb2_ioctl_req *req;
1278 struct smb2_ioctl_rsp *rsp;
1279 struct TCP_Server_Info *server;
Steve French8e353102015-03-26 19:47:02 -05001280 struct cifs_ses *ses;
Steve French4a72daf2013-06-25 00:20:49 -05001281 struct kvec iov[2];
1282 int resp_buftype;
1283 int num_iovecs;
1284 int rc = 0;
1285
1286 cifs_dbg(FYI, "SMB2 IOCTL\n");
1287
Steve French3d1a3742014-08-11 21:05:25 -05001288 if (out_data != NULL)
1289 *out_data = NULL;
1290
Steve French4a72daf2013-06-25 00:20:49 -05001291 /* zero out returned data len, in case of error */
1292 if (plen)
1293 *plen = 0;
1294
Steve French8e353102015-03-26 19:47:02 -05001295 if (tcon)
1296 ses = tcon->ses;
1297 else
1298 return -EIO;
1299
Steve French4a72daf2013-06-25 00:20:49 -05001300 if (ses && (ses->server))
1301 server = ses->server;
1302 else
1303 return -EIO;
1304
1305 rc = small_smb2_init(SMB2_IOCTL, tcon, (void **) &req);
1306 if (rc)
1307 return rc;
1308
1309 req->CtlCode = cpu_to_le32(opcode);
1310 req->PersistentFileId = persistent_fid;
1311 req->VolatileFileId = volatile_fid;
1312
1313 if (indatalen) {
1314 req->InputCount = cpu_to_le32(indatalen);
1315 /* do not set InputOffset if no input data */
1316 req->InputOffset =
1317 cpu_to_le32(offsetof(struct smb2_ioctl_req, Buffer) - 4);
1318 iov[1].iov_base = in_data;
1319 iov[1].iov_len = indatalen;
1320 num_iovecs = 2;
1321 } else
1322 num_iovecs = 1;
1323
1324 req->OutputOffset = 0;
1325 req->OutputCount = 0; /* MBZ */
1326
1327 /*
1328 * Could increase MaxOutputResponse, but that would require more
1329 * than one credit. Windows typically sets this smaller, but for some
1330 * ioctls it may be useful to allow server to send more. No point
1331 * limiting what the server can send as long as fits in one credit
1332 */
1333 req->MaxOutputResponse = cpu_to_le32(0xFF00); /* < 64K uses 1 credit */
1334
1335 if (is_fsctl)
1336 req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL);
1337 else
1338 req->Flags = 0;
1339
1340 iov[0].iov_base = (char *)req;
Steve French4a72daf2013-06-25 00:20:49 -05001341
Steve French7ff8d452013-10-14 00:44:19 -05001342 /*
1343 * If no input data, the size of ioctl struct in
1344 * protocol spec still includes a 1 byte data buffer,
1345 * but if input data passed to ioctl, we do not
1346 * want to double count this, so we do not send
1347 * the dummy one byte of data in iovec[0] if sending
1348 * input data (in iovec[1]). We also must add 4 bytes
1349 * in first iovec to allow for rfc1002 length field.
1350 */
1351
1352 if (indatalen) {
1353 iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
1354 inc_rfc1001_len(req, indatalen - 1);
1355 } else
1356 iov[0].iov_len = get_rfc1002_length(req) + 4;
1357
Steve French4a72daf2013-06-25 00:20:49 -05001358
1359 rc = SendReceive2(xid, ses, iov, num_iovecs, &resp_buftype, 0);
1360 rsp = (struct smb2_ioctl_rsp *)iov[0].iov_base;
1361
Steve French9bf0c9c2013-11-16 18:05:28 -06001362 if ((rc != 0) && (rc != -EINVAL)) {
Steve French8e353102015-03-26 19:47:02 -05001363 cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
Steve French4a72daf2013-06-25 00:20:49 -05001364 goto ioctl_exit;
Steve French9bf0c9c2013-11-16 18:05:28 -06001365 } else if (rc == -EINVAL) {
1366 if ((opcode != FSCTL_SRV_COPYCHUNK_WRITE) &&
1367 (opcode != FSCTL_SRV_COPYCHUNK)) {
Steve French8e353102015-03-26 19:47:02 -05001368 cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
Steve French9bf0c9c2013-11-16 18:05:28 -06001369 goto ioctl_exit;
1370 }
Steve French4a72daf2013-06-25 00:20:49 -05001371 }
1372
1373 /* check if caller wants to look at return data or just return rc */
1374 if ((plen == NULL) || (out_data == NULL))
1375 goto ioctl_exit;
1376
1377 *plen = le32_to_cpu(rsp->OutputCount);
1378
1379 /* We check for obvious errors in the output buffer length and offset */
1380 if (*plen == 0)
1381 goto ioctl_exit; /* server returned no data */
1382 else if (*plen > 0xFF00) {
1383 cifs_dbg(VFS, "srv returned invalid ioctl length: %d\n", *plen);
1384 *plen = 0;
1385 rc = -EIO;
1386 goto ioctl_exit;
1387 }
1388
1389 if (get_rfc1002_length(rsp) < le32_to_cpu(rsp->OutputOffset) + *plen) {
1390 cifs_dbg(VFS, "Malformed ioctl resp: len %d offset %d\n", *plen,
1391 le32_to_cpu(rsp->OutputOffset));
1392 *plen = 0;
1393 rc = -EIO;
1394 goto ioctl_exit;
1395 }
1396
1397 *out_data = kmalloc(*plen, GFP_KERNEL);
1398 if (*out_data == NULL) {
1399 rc = -ENOMEM;
1400 goto ioctl_exit;
1401 }
1402
1403 memcpy(*out_data, rsp->hdr.ProtocolId + le32_to_cpu(rsp->OutputOffset),
1404 *plen);
1405ioctl_exit:
1406 free_rsp_buf(resp_buftype, rsp);
1407 return rc;
1408}
1409
Steve French64a5cfa2013-10-14 15:31:32 -05001410/*
1411 * Individual callers to ioctl worker function follow
1412 */
1413
1414int
1415SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
1416 u64 persistent_fid, u64 volatile_fid)
1417{
1418 int rc;
Steve French64a5cfa2013-10-14 15:31:32 -05001419 struct compress_ioctl fsctl_input;
1420 char *ret_data = NULL;
1421
1422 fsctl_input.CompressionState =
Fabian Frederickbc09d142014-12-10 15:41:15 -08001423 cpu_to_le16(COMPRESSION_FORMAT_DEFAULT);
Steve French64a5cfa2013-10-14 15:31:32 -05001424
1425 rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid,
1426 FSCTL_SET_COMPRESSION, true /* is_fsctl */,
1427 (char *)&fsctl_input /* data input */,
1428 2 /* in data len */, &ret_data /* out data */, NULL);
1429
1430 cifs_dbg(FYI, "set compression rc %d\n", rc);
Steve French64a5cfa2013-10-14 15:31:32 -05001431
1432 return rc;
1433}
1434
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001435int
1436SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
1437 u64 persistent_fid, u64 volatile_fid)
1438{
1439 struct smb2_close_req *req;
1440 struct smb2_close_rsp *rsp;
1441 struct TCP_Server_Info *server;
1442 struct cifs_ses *ses = tcon->ses;
1443 struct kvec iov[1];
1444 int resp_buftype;
1445 int rc = 0;
1446
Joe Perchesf96637b2013-05-04 22:12:25 -05001447 cifs_dbg(FYI, "Close\n");
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001448
1449 if (ses && (ses->server))
1450 server = ses->server;
1451 else
1452 return -EIO;
1453
1454 rc = small_smb2_init(SMB2_CLOSE, tcon, (void **) &req);
1455 if (rc)
1456 return rc;
1457
1458 req->PersistentFileId = persistent_fid;
1459 req->VolatileFileId = volatile_fid;
1460
1461 iov[0].iov_base = (char *)req;
1462 /* 4 for rfc1002 length field */
1463 iov[0].iov_len = get_rfc1002_length(req) + 4;
1464
1465 rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, 0);
1466 rsp = (struct smb2_close_rsp *)iov[0].iov_base;
1467
1468 if (rc != 0) {
Namjae Jeond4a029d2014-08-20 19:39:59 +09001469 cifs_stats_fail_inc(tcon, SMB2_CLOSE_HE);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001470 goto close_exit;
1471 }
1472
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001473 /* BB FIXME - decode close response, update inode for caching */
1474
1475close_exit:
1476 free_rsp_buf(resp_buftype, rsp);
1477 return rc;
1478}
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04001479
1480static int
1481validate_buf(unsigned int offset, unsigned int buffer_length,
1482 struct smb2_hdr *hdr, unsigned int min_buf_size)
1483
1484{
1485 unsigned int smb_len = be32_to_cpu(hdr->smb2_buf_length);
1486 char *end_of_smb = smb_len + 4 /* RFC1001 length field */ + (char *)hdr;
1487 char *begin_of_buf = 4 /* RFC1001 len field */ + offset + (char *)hdr;
1488 char *end_of_buf = begin_of_buf + buffer_length;
1489
1490
1491 if (buffer_length < min_buf_size) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001492 cifs_dbg(VFS, "buffer length %d smaller than minimum size %d\n",
1493 buffer_length, min_buf_size);
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04001494 return -EINVAL;
1495 }
1496
1497 /* check if beyond RFC1001 maximum length */
1498 if ((smb_len > 0x7FFFFF) || (buffer_length > 0x7FFFFF)) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001499 cifs_dbg(VFS, "buffer length %d or smb length %d too large\n",
1500 buffer_length, smb_len);
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04001501 return -EINVAL;
1502 }
1503
1504 if ((begin_of_buf > end_of_smb) || (end_of_buf > end_of_smb)) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001505 cifs_dbg(VFS, "illegal server response, bad offset to data\n");
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04001506 return -EINVAL;
1507 }
1508
1509 return 0;
1510}
1511
1512/*
1513 * If SMB buffer fields are valid, copy into temporary buffer to hold result.
1514 * Caller must free buffer.
1515 */
1516static int
1517validate_and_copy_buf(unsigned int offset, unsigned int buffer_length,
1518 struct smb2_hdr *hdr, unsigned int minbufsize,
1519 char *data)
1520
1521{
1522 char *begin_of_buf = 4 /* RFC1001 len field */ + offset + (char *)hdr;
1523 int rc;
1524
1525 if (!data)
1526 return -EINVAL;
1527
1528 rc = validate_buf(offset, buffer_length, hdr, minbufsize);
1529 if (rc)
1530 return rc;
1531
1532 memcpy(data, begin_of_buf, buffer_length);
1533
1534 return 0;
1535}
1536
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001537static int
1538query_info(const unsigned int xid, struct cifs_tcon *tcon,
1539 u64 persistent_fid, u64 volatile_fid, u8 info_class,
1540 size_t output_len, size_t min_len, void *data)
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04001541{
1542 struct smb2_query_info_req *req;
1543 struct smb2_query_info_rsp *rsp = NULL;
1544 struct kvec iov[2];
1545 int rc = 0;
1546 int resp_buftype;
1547 struct TCP_Server_Info *server;
1548 struct cifs_ses *ses = tcon->ses;
1549
Joe Perchesf96637b2013-05-04 22:12:25 -05001550 cifs_dbg(FYI, "Query Info\n");
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04001551
1552 if (ses && (ses->server))
1553 server = ses->server;
1554 else
1555 return -EIO;
1556
1557 rc = small_smb2_init(SMB2_QUERY_INFO, tcon, (void **) &req);
1558 if (rc)
1559 return rc;
1560
1561 req->InfoType = SMB2_O_INFO_FILE;
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001562 req->FileInfoClass = info_class;
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04001563 req->PersistentFileId = persistent_fid;
1564 req->VolatileFileId = volatile_fid;
1565 /* 4 for rfc1002 length field and 1 for Buffer */
1566 req->InputBufferOffset =
1567 cpu_to_le16(sizeof(struct smb2_query_info_req) - 1 - 4);
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001568 req->OutputBufferLength = cpu_to_le32(output_len);
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04001569
1570 iov[0].iov_base = (char *)req;
1571 /* 4 for rfc1002 length field */
1572 iov[0].iov_len = get_rfc1002_length(req) + 4;
1573
1574 rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, 0);
Pavel Shilovskye5d04882012-09-19 16:03:26 +04001575 rsp = (struct smb2_query_info_rsp *)iov[0].iov_base;
1576
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04001577 if (rc) {
1578 cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
1579 goto qinf_exit;
1580 }
1581
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04001582 rc = validate_and_copy_buf(le16_to_cpu(rsp->OutputBufferOffset),
1583 le32_to_cpu(rsp->OutputBufferLength),
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001584 &rsp->hdr, min_len, data);
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04001585
1586qinf_exit:
1587 free_rsp_buf(resp_buftype, rsp);
1588 return rc;
1589}
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04001590
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001591int
1592SMB2_query_info(const unsigned int xid, struct cifs_tcon *tcon,
1593 u64 persistent_fid, u64 volatile_fid,
1594 struct smb2_file_all_info *data)
1595{
1596 return query_info(xid, tcon, persistent_fid, volatile_fid,
1597 FILE_ALL_INFORMATION,
Pavel Shilovsky1bbe4992014-08-22 13:32:11 +04001598 sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001599 sizeof(struct smb2_file_all_info), data);
1600}
1601
1602int
1603SMB2_get_srv_num(const unsigned int xid, struct cifs_tcon *tcon,
1604 u64 persistent_fid, u64 volatile_fid, __le64 *uniqueid)
1605{
1606 return query_info(xid, tcon, persistent_fid, volatile_fid,
1607 FILE_INTERNAL_INFORMATION,
1608 sizeof(struct smb2_file_internal_info),
1609 sizeof(struct smb2_file_internal_info), uniqueid);
1610}
1611
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04001612/*
1613 * This is a no-op for now. We're not really interested in the reply, but
1614 * rather in the fact that the server sent one and that server->lstrp
1615 * gets updated.
1616 *
1617 * FIXME: maybe we should consider checking that the reply matches request?
1618 */
1619static void
1620smb2_echo_callback(struct mid_q_entry *mid)
1621{
1622 struct TCP_Server_Info *server = mid->callback_data;
1623 struct smb2_echo_rsp *smb2 = (struct smb2_echo_rsp *)mid->resp_buf;
1624 unsigned int credits_received = 1;
1625
1626 if (mid->mid_state == MID_RESPONSE_RECEIVED)
1627 credits_received = le16_to_cpu(smb2->hdr.CreditRequest);
1628
1629 DeleteMidQEntry(mid);
1630 add_credits(server, credits_received, CIFS_ECHO_OP);
1631}
1632
1633int
1634SMB2_echo(struct TCP_Server_Info *server)
1635{
1636 struct smb2_echo_req *req;
1637 int rc = 0;
1638 struct kvec iov;
Jeff Laytonfec344e2012-09-18 16:20:35 -07001639 struct smb_rqst rqst = { .rq_iov = &iov,
1640 .rq_nvec = 1 };
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04001641
Joe Perchesf96637b2013-05-04 22:12:25 -05001642 cifs_dbg(FYI, "In echo request\n");
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04001643
1644 rc = small_smb2_init(SMB2_ECHO, NULL, (void **)&req);
1645 if (rc)
1646 return rc;
1647
1648 req->hdr.CreditRequest = cpu_to_le16(1);
1649
1650 iov.iov_base = (char *)req;
1651 /* 4 for rfc1002 length field */
1652 iov.iov_len = get_rfc1002_length(req) + 4;
1653
Jeff Laytonfec344e2012-09-18 16:20:35 -07001654 rc = cifs_call_async(server, &rqst, NULL, smb2_echo_callback, server,
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04001655 CIFS_ECHO_OP);
1656 if (rc)
Joe Perchesf96637b2013-05-04 22:12:25 -05001657 cifs_dbg(FYI, "Echo request failed: %d\n", rc);
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04001658
1659 cifs_small_buf_release(req);
1660 return rc;
1661}
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07001662
1663int
1664SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
1665 u64 volatile_fid)
1666{
1667 struct smb2_flush_req *req;
1668 struct TCP_Server_Info *server;
1669 struct cifs_ses *ses = tcon->ses;
1670 struct kvec iov[1];
1671 int resp_buftype;
1672 int rc = 0;
1673
Joe Perchesf96637b2013-05-04 22:12:25 -05001674 cifs_dbg(FYI, "Flush\n");
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07001675
1676 if (ses && (ses->server))
1677 server = ses->server;
1678 else
1679 return -EIO;
1680
1681 rc = small_smb2_init(SMB2_FLUSH, tcon, (void **) &req);
1682 if (rc)
1683 return rc;
1684
1685 req->PersistentFileId = persistent_fid;
1686 req->VolatileFileId = volatile_fid;
1687
1688 iov[0].iov_base = (char *)req;
1689 /* 4 for rfc1002 length field */
1690 iov[0].iov_len = get_rfc1002_length(req) + 4;
1691
1692 rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, 0);
1693
Steve Frenchdfebe402015-03-27 01:00:06 -05001694 if (rc != 0)
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07001695 cifs_stats_fail_inc(tcon, SMB2_FLUSH_HE);
1696
1697 free_rsp_buf(resp_buftype, iov[0].iov_base);
1698 return rc;
1699}
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001700
1701/*
1702 * To form a chain of read requests, any read requests after the first should
1703 * have the end_of_chain boolean set to true.
1704 */
1705static int
1706smb2_new_read_req(struct kvec *iov, struct cifs_io_parms *io_parms,
1707 unsigned int remaining_bytes, int request_type)
1708{
1709 int rc = -EACCES;
1710 struct smb2_read_req *req = NULL;
1711
1712 rc = small_smb2_init(SMB2_READ, io_parms->tcon, (void **) &req);
1713 if (rc)
1714 return rc;
1715 if (io_parms->tcon->ses->server == NULL)
1716 return -ECONNABORTED;
1717
1718 req->hdr.ProcessId = cpu_to_le32(io_parms->pid);
1719
1720 req->PersistentFileId = io_parms->persistent_fid;
1721 req->VolatileFileId = io_parms->volatile_fid;
1722 req->ReadChannelInfoOffset = 0; /* reserved */
1723 req->ReadChannelInfoLength = 0; /* reserved */
1724 req->Channel = 0; /* reserved */
1725 req->MinimumCount = 0;
1726 req->Length = cpu_to_le32(io_parms->length);
1727 req->Offset = cpu_to_le64(io_parms->offset);
1728
1729 if (request_type & CHAINED_REQUEST) {
1730 if (!(request_type & END_OF_CHAIN)) {
1731 /* 4 for rfc1002 length field */
1732 req->hdr.NextCommand =
1733 cpu_to_le32(get_rfc1002_length(req) + 4);
1734 } else /* END_OF_CHAIN */
1735 req->hdr.NextCommand = 0;
1736 if (request_type & RELATED_REQUEST) {
1737 req->hdr.Flags |= SMB2_FLAGS_RELATED_OPERATIONS;
1738 /*
1739 * Related requests use info from previous read request
1740 * in chain.
1741 */
1742 req->hdr.SessionId = 0xFFFFFFFF;
1743 req->hdr.TreeId = 0xFFFFFFFF;
1744 req->PersistentFileId = 0xFFFFFFFF;
1745 req->VolatileFileId = 0xFFFFFFFF;
1746 }
1747 }
1748 if (remaining_bytes > io_parms->length)
1749 req->RemainingBytes = cpu_to_le32(remaining_bytes);
1750 else
1751 req->RemainingBytes = 0;
1752
1753 iov[0].iov_base = (char *)req;
1754 /* 4 for rfc1002 length field */
1755 iov[0].iov_len = get_rfc1002_length(req) + 4;
1756 return rc;
1757}
1758
1759static void
1760smb2_readv_callback(struct mid_q_entry *mid)
1761{
1762 struct cifs_readdata *rdata = mid->callback_data;
1763 struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink);
1764 struct TCP_Server_Info *server = tcon->ses->server;
Jeff Layton58195752012-09-19 06:22:34 -07001765 struct smb2_hdr *buf = (struct smb2_hdr *)rdata->iov.iov_base;
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001766 unsigned int credits_received = 1;
Jeff Layton58195752012-09-19 06:22:34 -07001767 struct smb_rqst rqst = { .rq_iov = &rdata->iov,
Jeff Layton8321fec2012-09-19 06:22:32 -07001768 .rq_nvec = 1,
1769 .rq_pages = rdata->pages,
1770 .rq_npages = rdata->nr_pages,
1771 .rq_pagesz = rdata->pagesz,
1772 .rq_tailsz = rdata->tailsz };
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001773
Joe Perchesf96637b2013-05-04 22:12:25 -05001774 cifs_dbg(FYI, "%s: mid=%llu state=%d result=%d bytes=%u\n",
1775 __func__, mid->mid, mid->mid_state, rdata->result,
1776 rdata->bytes);
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001777
1778 switch (mid->mid_state) {
1779 case MID_RESPONSE_RECEIVED:
1780 credits_received = le16_to_cpu(buf->CreditRequest);
1781 /* result already set, check signature */
Jeff Layton38d77c52013-05-26 07:01:00 -04001782 if (server->sign) {
Pavel Shilovsky3c1bf7e2012-09-18 16:20:30 -07001783 int rc;
1784
Jeff Layton0b688cf2012-09-18 16:20:34 -07001785 rc = smb2_verify_signature(&rqst, server);
Pavel Shilovsky3c1bf7e2012-09-18 16:20:30 -07001786 if (rc)
Joe Perchesf96637b2013-05-04 22:12:25 -05001787 cifs_dbg(VFS, "SMB signature verification returned error = %d\n",
1788 rc);
Pavel Shilovsky3c1bf7e2012-09-18 16:20:30 -07001789 }
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001790 /* FIXME: should this be counted toward the initiating task? */
Pavel Shilovsky34a54d62014-07-10 10:03:29 +04001791 task_io_account_read(rdata->got_bytes);
1792 cifs_stats_bytes_read(tcon, rdata->got_bytes);
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001793 break;
1794 case MID_REQUEST_SUBMITTED:
1795 case MID_RETRY_NEEDED:
1796 rdata->result = -EAGAIN;
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04001797 if (server->sign && rdata->got_bytes)
1798 /* reset bytes number since we can not check a sign */
1799 rdata->got_bytes = 0;
1800 /* FIXME: should this be counted toward the initiating task? */
1801 task_io_account_read(rdata->got_bytes);
1802 cifs_stats_bytes_read(tcon, rdata->got_bytes);
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001803 break;
1804 default:
1805 if (rdata->result != -ENODATA)
1806 rdata->result = -EIO;
1807 }
1808
1809 if (rdata->result)
1810 cifs_stats_fail_inc(tcon, SMB2_READ_HE);
1811
1812 queue_work(cifsiod_wq, &rdata->work);
1813 DeleteMidQEntry(mid);
1814 add_credits(server, credits_received, 0);
1815}
1816
1817/* smb2_async_readv - send an async write, and set up mid to handle result */
1818int
1819smb2_async_readv(struct cifs_readdata *rdata)
1820{
Pavel Shilovskybed9da02014-06-25 11:28:57 +04001821 int rc, flags = 0;
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001822 struct smb2_hdr *buf;
1823 struct cifs_io_parms io_parms;
Jeff Layton58195752012-09-19 06:22:34 -07001824 struct smb_rqst rqst = { .rq_iov = &rdata->iov,
Jeff Laytonfec344e2012-09-18 16:20:35 -07001825 .rq_nvec = 1 };
Pavel Shilovskybed9da02014-06-25 11:28:57 +04001826 struct TCP_Server_Info *server;
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001827
Joe Perchesf96637b2013-05-04 22:12:25 -05001828 cifs_dbg(FYI, "%s: offset=%llu bytes=%u\n",
1829 __func__, rdata->offset, rdata->bytes);
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001830
1831 io_parms.tcon = tlink_tcon(rdata->cfile->tlink);
1832 io_parms.offset = rdata->offset;
1833 io_parms.length = rdata->bytes;
1834 io_parms.persistent_fid = rdata->cfile->fid.persistent_fid;
1835 io_parms.volatile_fid = rdata->cfile->fid.volatile_fid;
1836 io_parms.pid = rdata->pid;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04001837
1838 server = io_parms.tcon->ses->server;
1839
Jeff Layton58195752012-09-19 06:22:34 -07001840 rc = smb2_new_read_req(&rdata->iov, &io_parms, 0, 0);
Pavel Shilovskybed9da02014-06-25 11:28:57 +04001841 if (rc) {
1842 if (rc == -EAGAIN && rdata->credits) {
1843 /* credits was reset by reconnect */
1844 rdata->credits = 0;
1845 /* reduce in_flight value since we won't send the req */
1846 spin_lock(&server->req_lock);
1847 server->in_flight--;
1848 spin_unlock(&server->req_lock);
1849 }
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001850 return rc;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04001851 }
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001852
Jeff Layton58195752012-09-19 06:22:34 -07001853 buf = (struct smb2_hdr *)rdata->iov.iov_base;
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001854 /* 4 for rfc1002 length field */
Jeff Layton58195752012-09-19 06:22:34 -07001855 rdata->iov.iov_len = get_rfc1002_length(rdata->iov.iov_base) + 4;
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001856
Pavel Shilovskybed9da02014-06-25 11:28:57 +04001857 if (rdata->credits) {
1858 buf->CreditCharge = cpu_to_le16(DIV_ROUND_UP(rdata->bytes,
1859 SMB2_MAX_BUFFER_SIZE));
1860 spin_lock(&server->req_lock);
1861 server->credits += rdata->credits -
1862 le16_to_cpu(buf->CreditCharge);
1863 spin_unlock(&server->req_lock);
1864 wake_up(&server->request_q);
1865 flags = CIFS_HAS_CREDITS;
1866 }
1867
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001868 kref_get(&rdata->refcount);
Jeff Laytonfec344e2012-09-18 16:20:35 -07001869 rc = cifs_call_async(io_parms.tcon->ses->server, &rqst,
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001870 cifs_readv_receive, smb2_readv_callback,
Pavel Shilovskybed9da02014-06-25 11:28:57 +04001871 rdata, flags);
Pavel Shilovskye5d04882012-09-19 16:03:26 +04001872 if (rc) {
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001873 kref_put(&rdata->refcount, cifs_readdata_release);
Pavel Shilovskye5d04882012-09-19 16:03:26 +04001874 cifs_stats_fail_inc(io_parms.tcon, SMB2_READ_HE);
1875 }
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001876
1877 cifs_small_buf_release(buf);
1878 return rc;
1879}
Pavel Shilovsky33319142012-09-18 16:20:29 -07001880
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07001881int
1882SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
1883 unsigned int *nbytes, char **buf, int *buf_type)
1884{
1885 int resp_buftype, rc = -EACCES;
1886 struct smb2_read_rsp *rsp = NULL;
1887 struct kvec iov[1];
1888
1889 *nbytes = 0;
1890 rc = smb2_new_read_req(iov, io_parms, 0, 0);
1891 if (rc)
1892 return rc;
1893
1894 rc = SendReceive2(xid, io_parms->tcon->ses, iov, 1,
1895 &resp_buftype, CIFS_LOG_ERROR);
1896
1897 rsp = (struct smb2_read_rsp *)iov[0].iov_base;
1898
1899 if (rsp->hdr.Status == STATUS_END_OF_FILE) {
1900 free_rsp_buf(resp_buftype, iov[0].iov_base);
1901 return 0;
1902 }
1903
1904 if (rc) {
1905 cifs_stats_fail_inc(io_parms->tcon, SMB2_READ_HE);
Joe Perchesf96637b2013-05-04 22:12:25 -05001906 cifs_dbg(VFS, "Send error in read = %d\n", rc);
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07001907 } else {
1908 *nbytes = le32_to_cpu(rsp->DataLength);
1909 if ((*nbytes > CIFS_MAX_MSGSIZE) ||
1910 (*nbytes > io_parms->length)) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001911 cifs_dbg(FYI, "bad length %d for count %d\n",
1912 *nbytes, io_parms->length);
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07001913 rc = -EIO;
1914 *nbytes = 0;
1915 }
1916 }
1917
1918 if (*buf) {
1919 memcpy(*buf, (char *)rsp->hdr.ProtocolId + rsp->DataOffset,
1920 *nbytes);
1921 free_rsp_buf(resp_buftype, iov[0].iov_base);
1922 } else if (resp_buftype != CIFS_NO_BUFFER) {
1923 *buf = iov[0].iov_base;
1924 if (resp_buftype == CIFS_SMALL_BUFFER)
1925 *buf_type = CIFS_SMALL_BUFFER;
1926 else if (resp_buftype == CIFS_LARGE_BUFFER)
1927 *buf_type = CIFS_LARGE_BUFFER;
1928 }
1929 return rc;
1930}
1931
Pavel Shilovsky33319142012-09-18 16:20:29 -07001932/*
1933 * Check the mid_state and signature on received buffer (if any), and queue the
1934 * workqueue completion task.
1935 */
1936static void
1937smb2_writev_callback(struct mid_q_entry *mid)
1938{
1939 struct cifs_writedata *wdata = mid->callback_data;
1940 struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
1941 unsigned int written;
1942 struct smb2_write_rsp *rsp = (struct smb2_write_rsp *)mid->resp_buf;
1943 unsigned int credits_received = 1;
1944
1945 switch (mid->mid_state) {
1946 case MID_RESPONSE_RECEIVED:
1947 credits_received = le16_to_cpu(rsp->hdr.CreditRequest);
1948 wdata->result = smb2_check_receive(mid, tcon->ses->server, 0);
1949 if (wdata->result != 0)
1950 break;
1951
1952 written = le32_to_cpu(rsp->DataLength);
1953 /*
1954 * Mask off high 16 bits when bytes written as returned
1955 * by the server is greater than bytes requested by the
1956 * client. OS/2 servers are known to set incorrect
1957 * CountHigh values.
1958 */
1959 if (written > wdata->bytes)
1960 written &= 0xFFFF;
1961
1962 if (written < wdata->bytes)
1963 wdata->result = -ENOSPC;
1964 else
1965 wdata->bytes = written;
1966 break;
1967 case MID_REQUEST_SUBMITTED:
1968 case MID_RETRY_NEEDED:
1969 wdata->result = -EAGAIN;
1970 break;
1971 default:
1972 wdata->result = -EIO;
1973 break;
1974 }
1975
1976 if (wdata->result)
1977 cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
1978
1979 queue_work(cifsiod_wq, &wdata->work);
1980 DeleteMidQEntry(mid);
1981 add_credits(tcon->ses->server, credits_received, 0);
1982}
1983
1984/* smb2_async_writev - send an async write, and set up mid to handle result */
1985int
Steve French4a5c80d2014-02-07 20:45:12 -06001986smb2_async_writev(struct cifs_writedata *wdata,
1987 void (*release)(struct kref *kref))
Pavel Shilovsky33319142012-09-18 16:20:29 -07001988{
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04001989 int rc = -EACCES, flags = 0;
Pavel Shilovsky33319142012-09-18 16:20:29 -07001990 struct smb2_write_req *req = NULL;
1991 struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04001992 struct TCP_Server_Info *server = tcon->ses->server;
Jeff Laytoneddb0792012-09-18 16:20:35 -07001993 struct kvec iov;
Jeff Laytonfec344e2012-09-18 16:20:35 -07001994 struct smb_rqst rqst;
Pavel Shilovsky33319142012-09-18 16:20:29 -07001995
1996 rc = small_smb2_init(SMB2_WRITE, tcon, (void **) &req);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04001997 if (rc) {
1998 if (rc == -EAGAIN && wdata->credits) {
1999 /* credits was reset by reconnect */
2000 wdata->credits = 0;
2001 /* reduce in_flight value since we won't send the req */
2002 spin_lock(&server->req_lock);
2003 server->in_flight--;
2004 spin_unlock(&server->req_lock);
2005 }
Pavel Shilovsky33319142012-09-18 16:20:29 -07002006 goto async_writev_out;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002007 }
Pavel Shilovsky33319142012-09-18 16:20:29 -07002008
Pavel Shilovsky33319142012-09-18 16:20:29 -07002009 req->hdr.ProcessId = cpu_to_le32(wdata->cfile->pid);
2010
2011 req->PersistentFileId = wdata->cfile->fid.persistent_fid;
2012 req->VolatileFileId = wdata->cfile->fid.volatile_fid;
2013 req->WriteChannelInfoOffset = 0;
2014 req->WriteChannelInfoLength = 0;
2015 req->Channel = 0;
2016 req->Offset = cpu_to_le64(wdata->offset);
2017 /* 4 for rfc1002 length field */
2018 req->DataOffset = cpu_to_le16(
2019 offsetof(struct smb2_write_req, Buffer) - 4);
2020 req->RemainingBytes = 0;
2021
2022 /* 4 for rfc1002 length field and 1 for Buffer */
Jeff Laytoneddb0792012-09-18 16:20:35 -07002023 iov.iov_len = get_rfc1002_length(req) + 4 - 1;
2024 iov.iov_base = req;
Pavel Shilovsky33319142012-09-18 16:20:29 -07002025
Jeff Laytoneddb0792012-09-18 16:20:35 -07002026 rqst.rq_iov = &iov;
2027 rqst.rq_nvec = 1;
2028 rqst.rq_pages = wdata->pages;
2029 rqst.rq_npages = wdata->nr_pages;
2030 rqst.rq_pagesz = wdata->pagesz;
2031 rqst.rq_tailsz = wdata->tailsz;
Pavel Shilovsky33319142012-09-18 16:20:29 -07002032
Joe Perchesf96637b2013-05-04 22:12:25 -05002033 cifs_dbg(FYI, "async write at %llu %u bytes\n",
2034 wdata->offset, wdata->bytes);
Pavel Shilovsky33319142012-09-18 16:20:29 -07002035
2036 req->Length = cpu_to_le32(wdata->bytes);
2037
2038 inc_rfc1001_len(&req->hdr, wdata->bytes - 1 /* Buffer */);
2039
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002040 if (wdata->credits) {
2041 req->hdr.CreditCharge = cpu_to_le16(DIV_ROUND_UP(wdata->bytes,
2042 SMB2_MAX_BUFFER_SIZE));
2043 spin_lock(&server->req_lock);
2044 server->credits += wdata->credits -
2045 le16_to_cpu(req->hdr.CreditCharge);
2046 spin_unlock(&server->req_lock);
2047 wake_up(&server->request_q);
2048 flags = CIFS_HAS_CREDITS;
2049 }
2050
Pavel Shilovsky33319142012-09-18 16:20:29 -07002051 kref_get(&wdata->refcount);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002052 rc = cifs_call_async(server, &rqst, NULL, smb2_writev_callback, wdata,
2053 flags);
Pavel Shilovsky33319142012-09-18 16:20:29 -07002054
Pavel Shilovskye5d04882012-09-19 16:03:26 +04002055 if (rc) {
Steve French4a5c80d2014-02-07 20:45:12 -06002056 kref_put(&wdata->refcount, release);
Pavel Shilovskye5d04882012-09-19 16:03:26 +04002057 cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
2058 }
Pavel Shilovsky33319142012-09-18 16:20:29 -07002059
Pavel Shilovsky33319142012-09-18 16:20:29 -07002060async_writev_out:
2061 cifs_small_buf_release(req);
Pavel Shilovsky33319142012-09-18 16:20:29 -07002062 return rc;
2063}
Pavel Shilovsky009d3442012-09-18 16:20:30 -07002064
2065/*
2066 * SMB2_write function gets iov pointer to kvec array with n_vec as a length.
2067 * The length field from io_parms must be at least 1 and indicates a number of
2068 * elements with data to write that begins with position 1 in iov array. All
2069 * data length is specified by count.
2070 */
2071int
2072SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
2073 unsigned int *nbytes, struct kvec *iov, int n_vec)
2074{
2075 int rc = 0;
2076 struct smb2_write_req *req = NULL;
2077 struct smb2_write_rsp *rsp = NULL;
2078 int resp_buftype;
2079 *nbytes = 0;
2080
2081 if (n_vec < 1)
2082 return rc;
2083
2084 rc = small_smb2_init(SMB2_WRITE, io_parms->tcon, (void **) &req);
2085 if (rc)
2086 return rc;
2087
2088 if (io_parms->tcon->ses->server == NULL)
2089 return -ECONNABORTED;
2090
2091 req->hdr.ProcessId = cpu_to_le32(io_parms->pid);
2092
2093 req->PersistentFileId = io_parms->persistent_fid;
2094 req->VolatileFileId = io_parms->volatile_fid;
2095 req->WriteChannelInfoOffset = 0;
2096 req->WriteChannelInfoLength = 0;
2097 req->Channel = 0;
2098 req->Length = cpu_to_le32(io_parms->length);
2099 req->Offset = cpu_to_le64(io_parms->offset);
2100 /* 4 for rfc1002 length field */
2101 req->DataOffset = cpu_to_le16(
2102 offsetof(struct smb2_write_req, Buffer) - 4);
2103 req->RemainingBytes = 0;
2104
2105 iov[0].iov_base = (char *)req;
2106 /* 4 for rfc1002 length field and 1 for Buffer */
2107 iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
2108
2109 /* length of entire message including data to be written */
2110 inc_rfc1001_len(req, io_parms->length - 1 /* Buffer */);
2111
2112 rc = SendReceive2(xid, io_parms->tcon->ses, iov, n_vec + 1,
2113 &resp_buftype, 0);
Pavel Shilovskye5d04882012-09-19 16:03:26 +04002114 rsp = (struct smb2_write_rsp *)iov[0].iov_base;
Pavel Shilovsky009d3442012-09-18 16:20:30 -07002115
2116 if (rc) {
2117 cifs_stats_fail_inc(io_parms->tcon, SMB2_WRITE_HE);
Joe Perchesf96637b2013-05-04 22:12:25 -05002118 cifs_dbg(VFS, "Send error in write = %d\n", rc);
Pavel Shilovskye5d04882012-09-19 16:03:26 +04002119 } else
Pavel Shilovsky009d3442012-09-18 16:20:30 -07002120 *nbytes = le32_to_cpu(rsp->DataLength);
Pavel Shilovskye5d04882012-09-19 16:03:26 +04002121
2122 free_rsp_buf(resp_buftype, rsp);
Pavel Shilovsky009d3442012-09-18 16:20:30 -07002123 return rc;
2124}
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07002125
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002126static unsigned int
2127num_entries(char *bufstart, char *end_of_buf, char **lastentry, size_t size)
2128{
2129 int len;
2130 unsigned int entrycount = 0;
2131 unsigned int next_offset = 0;
2132 FILE_DIRECTORY_INFO *entryptr;
2133
2134 if (bufstart == NULL)
2135 return 0;
2136
2137 entryptr = (FILE_DIRECTORY_INFO *)bufstart;
2138
2139 while (1) {
2140 entryptr = (FILE_DIRECTORY_INFO *)
2141 ((char *)entryptr + next_offset);
2142
2143 if ((char *)entryptr + size > end_of_buf) {
Joe Perchesf96637b2013-05-04 22:12:25 -05002144 cifs_dbg(VFS, "malformed search entry would overflow\n");
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002145 break;
2146 }
2147
2148 len = le32_to_cpu(entryptr->FileNameLength);
2149 if ((char *)entryptr + len + size > end_of_buf) {
Joe Perchesf96637b2013-05-04 22:12:25 -05002150 cifs_dbg(VFS, "directory entry name would overflow frame end of buf %p\n",
2151 end_of_buf);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002152 break;
2153 }
2154
2155 *lastentry = (char *)entryptr;
2156 entrycount++;
2157
2158 next_offset = le32_to_cpu(entryptr->NextEntryOffset);
2159 if (!next_offset)
2160 break;
2161 }
2162
2163 return entrycount;
2164}
2165
2166/*
2167 * Readdir/FindFirst
2168 */
2169int
2170SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
2171 u64 persistent_fid, u64 volatile_fid, int index,
2172 struct cifs_search_info *srch_inf)
2173{
2174 struct smb2_query_directory_req *req;
2175 struct smb2_query_directory_rsp *rsp = NULL;
2176 struct kvec iov[2];
2177 int rc = 0;
2178 int len;
Steve French75fdfc82015-03-25 18:51:57 -05002179 int resp_buftype = CIFS_NO_BUFFER;
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002180 unsigned char *bufptr;
2181 struct TCP_Server_Info *server;
2182 struct cifs_ses *ses = tcon->ses;
2183 __le16 asteriks = cpu_to_le16('*');
2184 char *end_of_smb;
2185 unsigned int output_size = CIFSMaxBufSize;
2186 size_t info_buf_size;
2187
2188 if (ses && (ses->server))
2189 server = ses->server;
2190 else
2191 return -EIO;
2192
2193 rc = small_smb2_init(SMB2_QUERY_DIRECTORY, tcon, (void **) &req);
2194 if (rc)
2195 return rc;
2196
2197 switch (srch_inf->info_level) {
2198 case SMB_FIND_FILE_DIRECTORY_INFO:
2199 req->FileInformationClass = FILE_DIRECTORY_INFORMATION;
2200 info_buf_size = sizeof(FILE_DIRECTORY_INFO) - 1;
2201 break;
2202 case SMB_FIND_FILE_ID_FULL_DIR_INFO:
2203 req->FileInformationClass = FILEID_FULL_DIRECTORY_INFORMATION;
2204 info_buf_size = sizeof(SEARCH_ID_FULL_DIR_INFO) - 1;
2205 break;
2206 default:
Joe Perchesf96637b2013-05-04 22:12:25 -05002207 cifs_dbg(VFS, "info level %u isn't supported\n",
2208 srch_inf->info_level);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002209 rc = -EINVAL;
2210 goto qdir_exit;
2211 }
2212
2213 req->FileIndex = cpu_to_le32(index);
2214 req->PersistentFileId = persistent_fid;
2215 req->VolatileFileId = volatile_fid;
2216
2217 len = 0x2;
2218 bufptr = req->Buffer;
2219 memcpy(bufptr, &asteriks, len);
2220
2221 req->FileNameOffset =
2222 cpu_to_le16(sizeof(struct smb2_query_directory_req) - 1 - 4);
2223 req->FileNameLength = cpu_to_le16(len);
2224 /*
2225 * BB could be 30 bytes or so longer if we used SMB2 specific
2226 * buffer lengths, but this is safe and close enough.
2227 */
2228 output_size = min_t(unsigned int, output_size, server->maxBuf);
2229 output_size = min_t(unsigned int, output_size, 2 << 15);
2230 req->OutputBufferLength = cpu_to_le32(output_size);
2231
2232 iov[0].iov_base = (char *)req;
2233 /* 4 for RFC1001 length and 1 for Buffer */
2234 iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
2235
2236 iov[1].iov_base = (char *)(req->Buffer);
2237 iov[1].iov_len = len;
2238
2239 inc_rfc1001_len(req, len - 1 /* Buffer */);
2240
2241 rc = SendReceive2(xid, ses, iov, 2, &resp_buftype, 0);
Pavel Shilovskye5d04882012-09-19 16:03:26 +04002242 rsp = (struct smb2_query_directory_rsp *)iov[0].iov_base;
2243
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002244 if (rc) {
Pavel Shilovsky52755802014-08-18 20:49:57 +04002245 if (rc == -ENODATA && rsp->hdr.Status == STATUS_NO_MORE_FILES) {
2246 srch_inf->endOfSearch = true;
2247 rc = 0;
2248 }
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002249 cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE);
2250 goto qdir_exit;
2251 }
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002252
2253 rc = validate_buf(le16_to_cpu(rsp->OutputBufferOffset),
2254 le32_to_cpu(rsp->OutputBufferLength), &rsp->hdr,
2255 info_buf_size);
2256 if (rc)
2257 goto qdir_exit;
2258
2259 srch_inf->unicode = true;
2260
2261 if (srch_inf->ntwrk_buf_start) {
2262 if (srch_inf->smallBuf)
2263 cifs_small_buf_release(srch_inf->ntwrk_buf_start);
2264 else
2265 cifs_buf_release(srch_inf->ntwrk_buf_start);
2266 }
2267 srch_inf->ntwrk_buf_start = (char *)rsp;
2268 srch_inf->srch_entries_start = srch_inf->last_entry = 4 /* rfclen */ +
2269 (char *)&rsp->hdr + le16_to_cpu(rsp->OutputBufferOffset);
2270 /* 4 for rfc1002 length field */
2271 end_of_smb = get_rfc1002_length(rsp) + 4 + (char *)&rsp->hdr;
2272 srch_inf->entries_in_buffer =
2273 num_entries(srch_inf->srch_entries_start, end_of_smb,
2274 &srch_inf->last_entry, info_buf_size);
2275 srch_inf->index_of_last_entry += srch_inf->entries_in_buffer;
Joe Perchesf96637b2013-05-04 22:12:25 -05002276 cifs_dbg(FYI, "num entries %d last_index %lld srch start %p srch end %p\n",
2277 srch_inf->entries_in_buffer, srch_inf->index_of_last_entry,
2278 srch_inf->srch_entries_start, srch_inf->last_entry);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002279 if (resp_buftype == CIFS_LARGE_BUFFER)
2280 srch_inf->smallBuf = false;
2281 else if (resp_buftype == CIFS_SMALL_BUFFER)
2282 srch_inf->smallBuf = true;
2283 else
Joe Perchesf96637b2013-05-04 22:12:25 -05002284 cifs_dbg(VFS, "illegal search buffer type\n");
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002285
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002286 return rc;
2287
2288qdir_exit:
2289 free_rsp_buf(resp_buftype, rsp);
2290 return rc;
2291}
2292
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07002293static int
2294send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07002295 u64 persistent_fid, u64 volatile_fid, u32 pid, int info_class,
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07002296 unsigned int num, void **data, unsigned int *size)
2297{
2298 struct smb2_set_info_req *req;
2299 struct smb2_set_info_rsp *rsp = NULL;
2300 struct kvec *iov;
2301 int rc = 0;
2302 int resp_buftype;
2303 unsigned int i;
2304 struct TCP_Server_Info *server;
2305 struct cifs_ses *ses = tcon->ses;
2306
2307 if (ses && (ses->server))
2308 server = ses->server;
2309 else
2310 return -EIO;
2311
2312 if (!num)
2313 return -EINVAL;
2314
2315 iov = kmalloc(sizeof(struct kvec) * num, GFP_KERNEL);
2316 if (!iov)
2317 return -ENOMEM;
2318
2319 rc = small_smb2_init(SMB2_SET_INFO, tcon, (void **) &req);
2320 if (rc) {
2321 kfree(iov);
2322 return rc;
2323 }
2324
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07002325 req->hdr.ProcessId = cpu_to_le32(pid);
2326
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07002327 req->InfoType = SMB2_O_INFO_FILE;
2328 req->FileInfoClass = info_class;
2329 req->PersistentFileId = persistent_fid;
2330 req->VolatileFileId = volatile_fid;
2331
2332 /* 4 for RFC1001 length and 1 for Buffer */
2333 req->BufferOffset =
2334 cpu_to_le16(sizeof(struct smb2_set_info_req) - 1 - 4);
2335 req->BufferLength = cpu_to_le32(*size);
2336
2337 inc_rfc1001_len(req, *size - 1 /* Buffer */);
2338
2339 memcpy(req->Buffer, *data, *size);
2340
2341 iov[0].iov_base = (char *)req;
2342 /* 4 for RFC1001 length */
2343 iov[0].iov_len = get_rfc1002_length(req) + 4;
2344
2345 for (i = 1; i < num; i++) {
2346 inc_rfc1001_len(req, size[i]);
2347 le32_add_cpu(&req->BufferLength, size[i]);
2348 iov[i].iov_base = (char *)data[i];
2349 iov[i].iov_len = size[i];
2350 }
2351
2352 rc = SendReceive2(xid, ses, iov, num, &resp_buftype, 0);
2353 rsp = (struct smb2_set_info_rsp *)iov[0].iov_base;
2354
Steve French7d3fb242013-11-18 09:56:28 -06002355 if (rc != 0)
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07002356 cifs_stats_fail_inc(tcon, SMB2_SET_INFO_HE);
Steve French7d3fb242013-11-18 09:56:28 -06002357
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07002358 free_rsp_buf(resp_buftype, rsp);
2359 kfree(iov);
2360 return rc;
2361}
2362
2363int
2364SMB2_rename(const unsigned int xid, struct cifs_tcon *tcon,
2365 u64 persistent_fid, u64 volatile_fid, __le16 *target_file)
2366{
2367 struct smb2_file_rename_info info;
2368 void **data;
2369 unsigned int size[2];
2370 int rc;
2371 int len = (2 * UniStrnlen((wchar_t *)target_file, PATH_MAX));
2372
2373 data = kmalloc(sizeof(void *) * 2, GFP_KERNEL);
2374 if (!data)
2375 return -ENOMEM;
2376
2377 info.ReplaceIfExists = 1; /* 1 = replace existing target with new */
2378 /* 0 = fail if target already exists */
2379 info.RootDirectory = 0; /* MBZ for network ops (why does spec say?) */
2380 info.FileNameLength = cpu_to_le32(len);
2381
2382 data[0] = &info;
2383 size[0] = sizeof(struct smb2_file_rename_info);
2384
2385 data[1] = target_file;
2386 size[1] = len + 2 /* null */;
2387
2388 rc = send_set_info(xid, tcon, persistent_fid, volatile_fid,
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07002389 current->tgid, FILE_RENAME_INFORMATION, 2, data,
2390 size);
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07002391 kfree(data);
2392 return rc;
2393}
Pavel Shilovsky568798c2012-09-18 16:20:31 -07002394
2395int
2396SMB2_set_hardlink(const unsigned int xid, struct cifs_tcon *tcon,
2397 u64 persistent_fid, u64 volatile_fid, __le16 *target_file)
2398{
2399 struct smb2_file_link_info info;
2400 void **data;
2401 unsigned int size[2];
2402 int rc;
2403 int len = (2 * UniStrnlen((wchar_t *)target_file, PATH_MAX));
2404
2405 data = kmalloc(sizeof(void *) * 2, GFP_KERNEL);
2406 if (!data)
2407 return -ENOMEM;
2408
2409 info.ReplaceIfExists = 0; /* 1 = replace existing link with new */
2410 /* 0 = fail if link already exists */
2411 info.RootDirectory = 0; /* MBZ for network ops (why does spec say?) */
2412 info.FileNameLength = cpu_to_le32(len);
2413
2414 data[0] = &info;
2415 size[0] = sizeof(struct smb2_file_link_info);
2416
2417 data[1] = target_file;
2418 size[1] = len + 2 /* null */;
2419
2420 rc = send_set_info(xid, tcon, persistent_fid, volatile_fid,
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07002421 current->tgid, FILE_LINK_INFORMATION, 2, data, size);
Pavel Shilovsky568798c2012-09-18 16:20:31 -07002422 kfree(data);
2423 return rc;
2424}
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07002425
2426int
2427SMB2_set_eof(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
Steve Frenchf29ebb42014-07-19 21:44:58 -05002428 u64 volatile_fid, u32 pid, __le64 *eof, bool is_falloc)
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07002429{
2430 struct smb2_file_eof_info info;
2431 void *data;
2432 unsigned int size;
2433
2434 info.EndOfFile = *eof;
2435
2436 data = &info;
2437 size = sizeof(struct smb2_file_eof_info);
2438
Steve Frenchf29ebb42014-07-19 21:44:58 -05002439 if (is_falloc)
2440 return send_set_info(xid, tcon, persistent_fid, volatile_fid,
2441 pid, FILE_ALLOCATION_INFORMATION, 1, &data, &size);
2442 else
2443 return send_set_info(xid, tcon, persistent_fid, volatile_fid,
2444 pid, FILE_END_OF_FILE_INFORMATION, 1, &data, &size);
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07002445}
Pavel Shilovsky1feeaac2012-09-18 16:20:32 -07002446
2447int
2448SMB2_set_info(const unsigned int xid, struct cifs_tcon *tcon,
2449 u64 persistent_fid, u64 volatile_fid, FILE_BASIC_INFO *buf)
2450{
2451 unsigned int size;
2452 size = sizeof(FILE_BASIC_INFO);
2453 return send_set_info(xid, tcon, persistent_fid, volatile_fid,
2454 current->tgid, FILE_BASIC_INFORMATION, 1,
2455 (void **)&buf, &size);
2456}
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07002457
2458int
2459SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
2460 const u64 persistent_fid, const u64 volatile_fid,
2461 __u8 oplock_level)
2462{
2463 int rc;
2464 struct smb2_oplock_break *req = NULL;
2465
Joe Perchesf96637b2013-05-04 22:12:25 -05002466 cifs_dbg(FYI, "SMB2_oplock_break\n");
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07002467 rc = small_smb2_init(SMB2_OPLOCK_BREAK, tcon, (void **) &req);
2468
2469 if (rc)
2470 return rc;
2471
2472 req->VolatileFid = volatile_fid;
2473 req->PersistentFid = persistent_fid;
2474 req->OplockLevel = oplock_level;
2475 req->hdr.CreditRequest = cpu_to_le16(1);
2476
2477 rc = SendReceiveNoRsp(xid, tcon->ses, (char *) req, CIFS_OBREAK_OP);
2478 /* SMB2 buffer freed by function above */
2479
2480 if (rc) {
2481 cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE);
Joe Perchesf96637b2013-05-04 22:12:25 -05002482 cifs_dbg(FYI, "Send error in Oplock Break = %d\n", rc);
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07002483 }
2484
2485 return rc;
2486}
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002487
2488static void
2489copy_fs_info_to_kstatfs(struct smb2_fs_full_size_info *pfs_inf,
2490 struct kstatfs *kst)
2491{
2492 kst->f_bsize = le32_to_cpu(pfs_inf->BytesPerSector) *
2493 le32_to_cpu(pfs_inf->SectorsPerAllocationUnit);
2494 kst->f_blocks = le64_to_cpu(pfs_inf->TotalAllocationUnits);
2495 kst->f_bfree = le64_to_cpu(pfs_inf->ActualAvailableAllocationUnits);
2496 kst->f_bavail = le64_to_cpu(pfs_inf->CallerAvailableAllocationUnits);
2497 return;
2498}
2499
2500static int
2501build_qfs_info_req(struct kvec *iov, struct cifs_tcon *tcon, int level,
2502 int outbuf_len, u64 persistent_fid, u64 volatile_fid)
2503{
2504 int rc;
2505 struct smb2_query_info_req *req;
2506
Joe Perchesf96637b2013-05-04 22:12:25 -05002507 cifs_dbg(FYI, "Query FSInfo level %d\n", level);
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002508
2509 if ((tcon->ses == NULL) || (tcon->ses->server == NULL))
2510 return -EIO;
2511
2512 rc = small_smb2_init(SMB2_QUERY_INFO, tcon, (void **) &req);
2513 if (rc)
2514 return rc;
2515
2516 req->InfoType = SMB2_O_INFO_FILESYSTEM;
2517 req->FileInfoClass = level;
2518 req->PersistentFileId = persistent_fid;
2519 req->VolatileFileId = volatile_fid;
2520 /* 4 for rfc1002 length field and 1 for pad */
2521 req->InputBufferOffset =
2522 cpu_to_le16(sizeof(struct smb2_query_info_req) - 1 - 4);
2523 req->OutputBufferLength = cpu_to_le32(
2524 outbuf_len + sizeof(struct smb2_query_info_rsp) - 1 - 4);
2525
2526 iov->iov_base = (char *)req;
2527 /* 4 for rfc1002 length field */
2528 iov->iov_len = get_rfc1002_length(req) + 4;
2529 return 0;
2530}
2531
2532int
2533SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
2534 u64 persistent_fid, u64 volatile_fid, struct kstatfs *fsdata)
2535{
2536 struct smb2_query_info_rsp *rsp = NULL;
2537 struct kvec iov;
2538 int rc = 0;
2539 int resp_buftype;
2540 struct cifs_ses *ses = tcon->ses;
2541 struct smb2_fs_full_size_info *info = NULL;
2542
2543 rc = build_qfs_info_req(&iov, tcon, FS_FULL_SIZE_INFORMATION,
2544 sizeof(struct smb2_fs_full_size_info),
2545 persistent_fid, volatile_fid);
2546 if (rc)
2547 return rc;
2548
2549 rc = SendReceive2(xid, ses, &iov, 1, &resp_buftype, 0);
2550 if (rc) {
2551 cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
Steve French34f62642013-10-09 02:07:00 -05002552 goto qfsinf_exit;
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002553 }
2554 rsp = (struct smb2_query_info_rsp *)iov.iov_base;
2555
2556 info = (struct smb2_fs_full_size_info *)(4 /* RFC1001 len */ +
2557 le16_to_cpu(rsp->OutputBufferOffset) + (char *)&rsp->hdr);
2558 rc = validate_buf(le16_to_cpu(rsp->OutputBufferOffset),
2559 le32_to_cpu(rsp->OutputBufferLength), &rsp->hdr,
2560 sizeof(struct smb2_fs_full_size_info));
2561 if (!rc)
2562 copy_fs_info_to_kstatfs(info, fsdata);
2563
Steve French34f62642013-10-09 02:07:00 -05002564qfsinf_exit:
2565 free_rsp_buf(resp_buftype, iov.iov_base);
2566 return rc;
2567}
2568
2569int
2570SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
Steven French21671142013-10-09 13:36:35 -05002571 u64 persistent_fid, u64 volatile_fid, int level)
Steve French34f62642013-10-09 02:07:00 -05002572{
2573 struct smb2_query_info_rsp *rsp = NULL;
2574 struct kvec iov;
2575 int rc = 0;
Steven French21671142013-10-09 13:36:35 -05002576 int resp_buftype, max_len, min_len;
Steve French34f62642013-10-09 02:07:00 -05002577 struct cifs_ses *ses = tcon->ses;
2578 unsigned int rsp_len, offset;
2579
Steven French21671142013-10-09 13:36:35 -05002580 if (level == FS_DEVICE_INFORMATION) {
2581 max_len = sizeof(FILE_SYSTEM_DEVICE_INFO);
2582 min_len = sizeof(FILE_SYSTEM_DEVICE_INFO);
2583 } else if (level == FS_ATTRIBUTE_INFORMATION) {
2584 max_len = sizeof(FILE_SYSTEM_ATTRIBUTE_INFO);
2585 min_len = MIN_FS_ATTR_INFO_SIZE;
Steven Frenchaf6a12e2013-10-09 20:55:53 -05002586 } else if (level == FS_SECTOR_SIZE_INFORMATION) {
2587 max_len = sizeof(struct smb3_fs_ss_info);
2588 min_len = sizeof(struct smb3_fs_ss_info);
Steven French21671142013-10-09 13:36:35 -05002589 } else {
Steven Frenchaf6a12e2013-10-09 20:55:53 -05002590 cifs_dbg(FYI, "Invalid qfsinfo level %d\n", level);
Steven French21671142013-10-09 13:36:35 -05002591 return -EINVAL;
2592 }
2593
2594 rc = build_qfs_info_req(&iov, tcon, level, max_len,
Steve French34f62642013-10-09 02:07:00 -05002595 persistent_fid, volatile_fid);
2596 if (rc)
2597 return rc;
2598
2599 rc = SendReceive2(xid, ses, &iov, 1, &resp_buftype, 0);
2600 if (rc) {
2601 cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
2602 goto qfsattr_exit;
2603 }
2604 rsp = (struct smb2_query_info_rsp *)iov.iov_base;
2605
2606 rsp_len = le32_to_cpu(rsp->OutputBufferLength);
2607 offset = le16_to_cpu(rsp->OutputBufferOffset);
Steven French21671142013-10-09 13:36:35 -05002608 rc = validate_buf(offset, rsp_len, &rsp->hdr, min_len);
2609 if (rc)
2610 goto qfsattr_exit;
2611
2612 if (level == FS_ATTRIBUTE_INFORMATION)
Steve French34f62642013-10-09 02:07:00 -05002613 memcpy(&tcon->fsAttrInfo, 4 /* RFC1001 len */ + offset
2614 + (char *)&rsp->hdr, min_t(unsigned int,
Steven French21671142013-10-09 13:36:35 -05002615 rsp_len, max_len));
2616 else if (level == FS_DEVICE_INFORMATION)
2617 memcpy(&tcon->fsDevInfo, 4 /* RFC1001 len */ + offset
2618 + (char *)&rsp->hdr, sizeof(FILE_SYSTEM_DEVICE_INFO));
Steven Frenchaf6a12e2013-10-09 20:55:53 -05002619 else if (level == FS_SECTOR_SIZE_INFORMATION) {
2620 struct smb3_fs_ss_info *ss_info = (struct smb3_fs_ss_info *)
2621 (4 /* RFC1001 len */ + offset + (char *)&rsp->hdr);
2622 tcon->ss_flags = le32_to_cpu(ss_info->Flags);
2623 tcon->perf_sector_size =
2624 le32_to_cpu(ss_info->PhysicalBytesPerSectorForPerf);
2625 }
Steve French34f62642013-10-09 02:07:00 -05002626
2627qfsattr_exit:
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002628 free_rsp_buf(resp_buftype, iov.iov_base);
2629 return rc;
2630}
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07002631
2632int
2633smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon,
2634 const __u64 persist_fid, const __u64 volatile_fid, const __u32 pid,
2635 const __u32 num_lock, struct smb2_lock_element *buf)
2636{
2637 int rc = 0;
2638 struct smb2_lock_req *req = NULL;
2639 struct kvec iov[2];
2640 int resp_buf_type;
2641 unsigned int count;
2642
Joe Perchesf96637b2013-05-04 22:12:25 -05002643 cifs_dbg(FYI, "smb2_lockv num lock %d\n", num_lock);
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07002644
2645 rc = small_smb2_init(SMB2_LOCK, tcon, (void **) &req);
2646 if (rc)
2647 return rc;
2648
2649 req->hdr.ProcessId = cpu_to_le32(pid);
2650 req->LockCount = cpu_to_le16(num_lock);
2651
2652 req->PersistentFileId = persist_fid;
2653 req->VolatileFileId = volatile_fid;
2654
2655 count = num_lock * sizeof(struct smb2_lock_element);
2656 inc_rfc1001_len(req, count - sizeof(struct smb2_lock_element));
2657
2658 iov[0].iov_base = (char *)req;
2659 /* 4 for rfc1002 length field and count for all locks */
2660 iov[0].iov_len = get_rfc1002_length(req) + 4 - count;
2661 iov[1].iov_base = (char *)buf;
2662 iov[1].iov_len = count;
2663
2664 cifs_stats_inc(&tcon->stats.cifs_stats.num_locks);
2665 rc = SendReceive2(xid, tcon->ses, iov, 2, &resp_buf_type, CIFS_NO_RESP);
2666 if (rc) {
Joe Perchesf96637b2013-05-04 22:12:25 -05002667 cifs_dbg(FYI, "Send error in smb2_lockv = %d\n", rc);
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07002668 cifs_stats_fail_inc(tcon, SMB2_LOCK_HE);
2669 }
2670
2671 return rc;
2672}
2673
2674int
2675SMB2_lock(const unsigned int xid, struct cifs_tcon *tcon,
2676 const __u64 persist_fid, const __u64 volatile_fid, const __u32 pid,
2677 const __u64 length, const __u64 offset, const __u32 lock_flags,
2678 const bool wait)
2679{
2680 struct smb2_lock_element lock;
2681
2682 lock.Offset = cpu_to_le64(offset);
2683 lock.Length = cpu_to_le64(length);
2684 lock.Flags = cpu_to_le32(lock_flags);
2685 if (!wait && lock_flags != SMB2_LOCKFLAG_UNLOCK)
2686 lock.Flags |= cpu_to_le32(SMB2_LOCKFLAG_FAIL_IMMEDIATELY);
2687
2688 return smb2_lockv(xid, tcon, persist_fid, volatile_fid, pid, 1, &lock);
2689}
Pavel Shilovsky0822f512012-09-19 06:22:45 -07002690
2691int
2692SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon,
2693 __u8 *lease_key, const __le32 lease_state)
2694{
2695 int rc;
2696 struct smb2_lease_ack *req = NULL;
2697
Joe Perchesf96637b2013-05-04 22:12:25 -05002698 cifs_dbg(FYI, "SMB2_lease_break\n");
Pavel Shilovsky0822f512012-09-19 06:22:45 -07002699 rc = small_smb2_init(SMB2_OPLOCK_BREAK, tcon, (void **) &req);
2700
2701 if (rc)
2702 return rc;
2703
2704 req->hdr.CreditRequest = cpu_to_le16(1);
2705 req->StructureSize = cpu_to_le16(36);
2706 inc_rfc1001_len(req, 12);
2707
2708 memcpy(req->LeaseKey, lease_key, 16);
2709 req->LeaseState = lease_state;
2710
2711 rc = SendReceiveNoRsp(xid, tcon->ses, (char *) req, CIFS_OBREAK_OP);
2712 /* SMB2 buffer freed by function above */
2713
2714 if (rc) {
2715 cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE);
Joe Perchesf96637b2013-05-04 22:12:25 -05002716 cifs_dbg(FYI, "Send error in Lease Break = %d\n", rc);
Pavel Shilovsky0822f512012-09-19 06:22:45 -07002717 }
2718
2719 return rc;
2720}