blob: 42ebc1a8be6cb241c5ee77e1944f063ae15a472d [file] [log] [blame]
Pavel Shilovskyec2e4522011-12-27 16:12:43 +04001/*
2 * fs/cifs/smb2pdu.c
3 *
Steve French2b80d042013-06-23 18:43:37 -05004 * Copyright (C) International Business Machines Corp., 2009, 2013
Pavel Shilovskyec2e4522011-12-27 16:12:43 +04005 * Etersoft, 2012
6 * Author(s): Steve French (sfrench@us.ibm.com)
7 * Pavel Shilovsky (pshilovsky@samba.org) 2012
8 *
9 * Contains the routines for constructing the SMB2 PDUs themselves
10 *
11 * This library is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU Lesser General Public License as published
13 * by the Free Software Foundation; either version 2.1 of the License, or
14 * (at your option) any later version.
15 *
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
19 * the GNU Lesser General Public License for more details.
20 *
21 * You should have received a copy of the GNU Lesser General Public License
22 * along with this library; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 */
25
26 /* SMB2 PDU handling routines here - except for leftovers (eg session setup) */
27 /* Note that there are handle based routines which must be */
28 /* treated slightly differently for reconnection purposes since we never */
29 /* want to reuse a stale file handle and only the caller knows the file info */
30
31#include <linux/fs.h>
32#include <linux/kernel.h>
33#include <linux/vfs.h>
Pavel Shilovsky09a47072012-09-18 16:20:29 -070034#include <linux/task_io_accounting_ops.h>
Pavel Shilovskyec2e4522011-12-27 16:12:43 +040035#include <linux/uaccess.h>
Pavel Shilovsky33319142012-09-18 16:20:29 -070036#include <linux/pagemap.h>
Pavel Shilovskyec2e4522011-12-27 16:12:43 +040037#include <linux/xattr.h>
38#include "smb2pdu.h"
39#include "cifsglob.h"
40#include "cifsacl.h"
41#include "cifsproto.h"
42#include "smb2proto.h"
43#include "cifs_unicode.h"
44#include "cifs_debug.h"
45#include "ntlmssp.h"
46#include "smb2status.h"
Pavel Shilovsky09a47072012-09-18 16:20:29 -070047#include "smb2glob.h"
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -070048#include "cifspdu.h"
Pavel Shilovskyec2e4522011-12-27 16:12:43 +040049
50/*
51 * The following table defines the expected "StructureSize" of SMB2 requests
52 * in order by SMB2 command. This is similar to "wct" in SMB/CIFS requests.
53 *
54 * Note that commands are defined in smb2pdu.h in le16 but the array below is
55 * indexed by command in host byte order.
56 */
57static const int smb2_req_struct_sizes[NUMBER_OF_SMB2_COMMANDS] = {
58 /* SMB2_NEGOTIATE */ 36,
59 /* SMB2_SESSION_SETUP */ 25,
60 /* SMB2_LOGOFF */ 4,
61 /* SMB2_TREE_CONNECT */ 9,
62 /* SMB2_TREE_DISCONNECT */ 4,
63 /* SMB2_CREATE */ 57,
64 /* SMB2_CLOSE */ 24,
65 /* SMB2_FLUSH */ 24,
66 /* SMB2_READ */ 49,
67 /* SMB2_WRITE */ 49,
68 /* SMB2_LOCK */ 48,
69 /* SMB2_IOCTL */ 57,
70 /* SMB2_CANCEL */ 4,
71 /* SMB2_ECHO */ 4,
72 /* SMB2_QUERY_DIRECTORY */ 33,
73 /* SMB2_CHANGE_NOTIFY */ 32,
74 /* SMB2_QUERY_INFO */ 41,
75 /* SMB2_SET_INFO */ 33,
76 /* SMB2_OPLOCK_BREAK */ 24 /* BB this is 36 for LEASE_BREAK variant */
77};
78
79
80static void
81smb2_hdr_assemble(struct smb2_hdr *hdr, __le16 smb2_cmd /* command */ ,
82 const struct cifs_tcon *tcon)
83{
84 struct smb2_pdu *pdu = (struct smb2_pdu *)hdr;
85 char *temp = (char *)hdr;
86 /* lookup word count ie StructureSize from table */
87 __u16 parmsize = smb2_req_struct_sizes[le16_to_cpu(smb2_cmd)];
88
89 /*
90 * smaller than SMALL_BUFFER_SIZE but bigger than fixed area of
91 * largest operations (Create)
92 */
93 memset(temp, 0, 256);
94
95 /* Note this is only network field converted to big endian */
96 hdr->smb2_buf_length = cpu_to_be32(parmsize + sizeof(struct smb2_hdr)
97 - 4 /* RFC 1001 length field itself not counted */);
98
99 hdr->ProtocolId[0] = 0xFE;
100 hdr->ProtocolId[1] = 'S';
101 hdr->ProtocolId[2] = 'M';
102 hdr->ProtocolId[3] = 'B';
103 hdr->StructureSize = cpu_to_le16(64);
104 hdr->Command = smb2_cmd;
105 hdr->CreditRequest = cpu_to_le16(2); /* BB make this dynamic */
106 hdr->ProcessId = cpu_to_le32((__u16)current->tgid);
107
108 if (!tcon)
109 goto out;
110
Steve French2b80d042013-06-23 18:43:37 -0500111 /* GLOBAL_CAP_LARGE_MTU will only be set if dialect > SMB2.02 */
112 /* See sections 2.2.4 and 3.2.4.1.5 of MS-SMB2 */
Steve French84ceeb92013-06-26 17:52:17 -0500113 if ((tcon->ses) &&
114 (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
Steve French2b80d042013-06-23 18:43:37 -0500115 hdr->CreditCharge = cpu_to_le16(1);
116 /* else CreditCharge MBZ */
117
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400118 hdr->TreeId = tcon->tid;
119 /* Uid is not converted */
120 if (tcon->ses)
121 hdr->SessionId = tcon->ses->Suid;
Steve Frenchf87ab882013-06-26 19:14:55 -0500122
123 /*
124 * If we would set SMB2_FLAGS_DFS_OPERATIONS on open we also would have
125 * to pass the path on the Open SMB prefixed by \\server\share.
126 * Not sure when we would need to do the augmented path (if ever) and
127 * setting this flag breaks the SMB2 open operation since it is
128 * illegal to send an empty path name (without \\server\share prefix)
129 * when the DFS flag is set in the SMB open header. We could
130 * consider setting the flag on all operations other than open
131 * but it is safer to net set it for now.
132 */
133/* if (tcon->share_flags & SHI1005_FLAGS_DFS)
134 hdr->Flags |= SMB2_FLAGS_DFS_OPERATIONS; */
135
Jeff Layton38d77c52013-05-26 07:01:00 -0400136 if (tcon->ses && tcon->ses->server && tcon->ses->server->sign)
Pavel Shilovsky3c1bf7e2012-09-18 16:20:30 -0700137 hdr->Flags |= SMB2_FLAGS_SIGNED;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400138out:
139 pdu->StructureSize2 = cpu_to_le16(parmsize);
140 return;
141}
142
143static int
144smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
145{
146 int rc = 0;
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400147 struct nls_table *nls_codepage;
148 struct cifs_ses *ses;
149 struct TCP_Server_Info *server;
150
151 /*
152 * SMB2s NegProt, SessSetup, Logoff do not have tcon yet so
153 * check for tcp and smb session status done differently
154 * for those three - in the calling routine.
155 */
156 if (tcon == NULL)
157 return rc;
158
159 if (smb2_command == SMB2_TREE_CONNECT)
160 return rc;
161
162 if (tcon->tidStatus == CifsExiting) {
163 /*
164 * only tree disconnect, open, and write,
165 * (and ulogoff which does not have tcon)
166 * are allowed as we start force umount.
167 */
168 if ((smb2_command != SMB2_WRITE) &&
169 (smb2_command != SMB2_CREATE) &&
170 (smb2_command != SMB2_TREE_DISCONNECT)) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500171 cifs_dbg(FYI, "can not send cmd %d while umounting\n",
172 smb2_command);
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400173 return -ENODEV;
174 }
175 }
176 if ((!tcon->ses) || (tcon->ses->status == CifsExiting) ||
177 (!tcon->ses->server))
178 return -EIO;
179
180 ses = tcon->ses;
181 server = ses->server;
182
183 /*
184 * Give demultiplex thread up to 10 seconds to reconnect, should be
185 * greater than cifs socket timeout which is 7 seconds
186 */
187 while (server->tcpStatus == CifsNeedReconnect) {
188 /*
189 * Return to caller for TREE_DISCONNECT and LOGOFF and CLOSE
190 * here since they are implicitly done when session drops.
191 */
192 switch (smb2_command) {
193 /*
194 * BB Should we keep oplock break and add flush to exceptions?
195 */
196 case SMB2_TREE_DISCONNECT:
197 case SMB2_CANCEL:
198 case SMB2_CLOSE:
199 case SMB2_OPLOCK_BREAK:
200 return -EAGAIN;
201 }
202
203 wait_event_interruptible_timeout(server->response_q,
204 (server->tcpStatus != CifsNeedReconnect), 10 * HZ);
205
206 /* are we still trying to reconnect? */
207 if (server->tcpStatus != CifsNeedReconnect)
208 break;
209
210 /*
211 * on "soft" mounts we wait once. Hard mounts keep
212 * retrying until process is killed or server comes
213 * back on-line
214 */
215 if (!tcon->retry) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500216 cifs_dbg(FYI, "gave up waiting on reconnect in smb_init\n");
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400217 return -EHOSTDOWN;
218 }
219 }
220
221 if (!tcon->ses->need_reconnect && !tcon->need_reconnect)
222 return rc;
223
224 nls_codepage = load_nls_default();
225
226 /*
227 * need to prevent multiple threads trying to simultaneously reconnect
228 * the same SMB session
229 */
230 mutex_lock(&tcon->ses->session_mutex);
231 rc = cifs_negotiate_protocol(0, tcon->ses);
232 if (!rc && tcon->ses->need_reconnect)
233 rc = cifs_setup_session(0, tcon->ses, nls_codepage);
234
235 if (rc || !tcon->need_reconnect) {
236 mutex_unlock(&tcon->ses->session_mutex);
237 goto out;
238 }
239
240 cifs_mark_open_files_invalid(tcon);
241 rc = SMB2_tcon(0, tcon->ses, tcon->treeName, tcon, nls_codepage);
242 mutex_unlock(&tcon->ses->session_mutex);
Joe Perchesf96637b2013-05-04 22:12:25 -0500243 cifs_dbg(FYI, "reconnect tcon rc = %d\n", rc);
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400244 if (rc)
245 goto out;
246 atomic_inc(&tconInfoReconnectCount);
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400247out:
248 /*
249 * Check if handle based operation so we know whether we can continue
250 * or not without returning to caller to reset file handle.
251 */
252 /*
253 * BB Is flush done by server on drop of tcp session? Should we special
254 * case it and skip above?
255 */
256 switch (smb2_command) {
257 case SMB2_FLUSH:
258 case SMB2_READ:
259 case SMB2_WRITE:
260 case SMB2_LOCK:
261 case SMB2_IOCTL:
262 case SMB2_QUERY_DIRECTORY:
263 case SMB2_CHANGE_NOTIFY:
264 case SMB2_QUERY_INFO:
265 case SMB2_SET_INFO:
266 return -EAGAIN;
267 }
268 unload_nls(nls_codepage);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400269 return rc;
270}
271
272/*
273 * Allocate and return pointer to an SMB request hdr, and set basic
274 * SMB information in the SMB header. If the return code is zero, this
275 * function must have filled in request_buf pointer.
276 */
277static int
278small_smb2_init(__le16 smb2_command, struct cifs_tcon *tcon,
279 void **request_buf)
280{
281 int rc = 0;
282
283 rc = smb2_reconnect(smb2_command, tcon);
284 if (rc)
285 return rc;
286
287 /* BB eventually switch this to SMB2 specific small buf size */
288 *request_buf = cifs_small_buf_get();
289 if (*request_buf == NULL) {
290 /* BB should we add a retry in here if not a writepage? */
291 return -ENOMEM;
292 }
293
294 smb2_hdr_assemble((struct smb2_hdr *) *request_buf, smb2_command, tcon);
295
296 if (tcon != NULL) {
297#ifdef CONFIG_CIFS_STATS2
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400298 uint16_t com_code = le16_to_cpu(smb2_command);
299 cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_sent[com_code]);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400300#endif
301 cifs_stats_inc(&tcon->num_smbs_sent);
302 }
303
304 return rc;
305}
306
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400307/*
308 *
309 * SMB2 Worker functions follow:
310 *
311 * The general structure of the worker functions is:
312 * 1) Call smb2_init (assembles SMB2 header)
313 * 2) Initialize SMB2 command specific fields in fixed length area of SMB
314 * 3) Call smb_sendrcv2 (sends request on socket and waits for response)
315 * 4) Decode SMB2 command specific fields in the fixed length area
316 * 5) Decode variable length data area (if any for this SMB2 command type)
317 * 6) Call free smb buffer
318 * 7) return
319 *
320 */
321
322int
323SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
324{
325 struct smb2_negotiate_req *req;
326 struct smb2_negotiate_rsp *rsp;
327 struct kvec iov[1];
328 int rc = 0;
329 int resp_buftype;
Jeff Layton3534b852013-05-24 07:41:01 -0400330 struct TCP_Server_Info *server = ses->server;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400331 int blob_offset, blob_length;
332 char *security_blob;
333 int flags = CIFS_NEG_OP;
334
Joe Perchesf96637b2013-05-04 22:12:25 -0500335 cifs_dbg(FYI, "Negotiate protocol\n");
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400336
Jeff Layton3534b852013-05-24 07:41:01 -0400337 if (!server) {
338 WARN(1, "%s: server is NULL!\n", __func__);
339 return -EIO;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400340 }
341
342 rc = small_smb2_init(SMB2_NEGOTIATE, NULL, (void **) &req);
343 if (rc)
344 return rc;
345
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400346 req->hdr.SessionId = 0;
347
Steve Frenche4aa25e2012-10-01 12:26:22 -0500348 req->Dialects[0] = cpu_to_le16(ses->server->vals->protocol_id);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400349
Steve Frenche4aa25e2012-10-01 12:26:22 -0500350 req->DialectCount = cpu_to_le16(1); /* One vers= at a time for now */
351 inc_rfc1001_len(req, 2);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400352
353 /* only one of SMB2 signing flags may be set in SMB2 request */
Jeff Layton38d77c52013-05-26 07:01:00 -0400354 if (ses->sign)
Steve French9cd2e622013-06-12 19:59:03 -0500355 req->SecurityMode = cpu_to_le16(SMB2_NEGOTIATE_SIGNING_REQUIRED);
Jeff Layton38d77c52013-05-26 07:01:00 -0400356 else if (global_secflags & CIFSSEC_MAY_SIGN)
Steve French9cd2e622013-06-12 19:59:03 -0500357 req->SecurityMode = cpu_to_le16(SMB2_NEGOTIATE_SIGNING_ENABLED);
Jeff Layton38d77c52013-05-26 07:01:00 -0400358 else
359 req->SecurityMode = 0;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400360
Steve Frenche4aa25e2012-10-01 12:26:22 -0500361 req->Capabilities = cpu_to_le32(ses->server->vals->req_capabilities);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400362
Steve French3c5f9be12014-05-13 13:37:45 -0700363 /* ClientGUID must be zero for SMB2.02 dialect */
364 if (ses->server->vals->protocol_id == SMB20_PROT_ID)
365 memset(req->ClientGUID, 0, SMB2_CLIENT_GUID_SIZE);
366 else
367 memcpy(req->ClientGUID, server->client_guid,
368 SMB2_CLIENT_GUID_SIZE);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700369
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400370 iov[0].iov_base = (char *)req;
371 /* 4 for rfc1002 length field */
372 iov[0].iov_len = get_rfc1002_length(req) + 4;
373
374 rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, flags);
375
376 rsp = (struct smb2_negotiate_rsp *)iov[0].iov_base;
377 /*
378 * No tcon so can't do
379 * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]);
380 */
381 if (rc != 0)
382 goto neg_exit;
383
Joe Perchesf96637b2013-05-04 22:12:25 -0500384 cifs_dbg(FYI, "mode 0x%x\n", rsp->SecurityMode);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400385
Steve Frenche4aa25e2012-10-01 12:26:22 -0500386 /* BB we may eventually want to match the negotiated vs. requested
387 dialect, even though we are only requesting one at a time */
388 if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID))
Joe Perchesf96637b2013-05-04 22:12:25 -0500389 cifs_dbg(FYI, "negotiated smb2.0 dialect\n");
Steve Frenche4aa25e2012-10-01 12:26:22 -0500390 else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID))
Joe Perchesf96637b2013-05-04 22:12:25 -0500391 cifs_dbg(FYI, "negotiated smb2.1 dialect\n");
Steve Frenche4aa25e2012-10-01 12:26:22 -0500392 else if (rsp->DialectRevision == cpu_to_le16(SMB30_PROT_ID))
Joe Perchesf96637b2013-05-04 22:12:25 -0500393 cifs_dbg(FYI, "negotiated smb3.0 dialect\n");
Steve French20b6d8b2013-06-12 22:48:41 -0500394 else if (rsp->DialectRevision == cpu_to_le16(SMB302_PROT_ID))
395 cifs_dbg(FYI, "negotiated smb3.02 dialect\n");
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400396 else {
Joe Perchesf96637b2013-05-04 22:12:25 -0500397 cifs_dbg(VFS, "Illegal dialect returned by server %d\n",
398 le16_to_cpu(rsp->DialectRevision));
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400399 rc = -EIO;
400 goto neg_exit;
401 }
402 server->dialect = le16_to_cpu(rsp->DialectRevision);
403
Jeff Laytone598d1d82013-05-26 07:00:59 -0400404 /* SMB2 only has an extended negflavor */
405 server->negflavor = CIFS_NEGFLAVOR_EXTENDED;
Pavel Shilovsky2365c4e2014-02-14 13:31:02 +0400406 /* set it to the maximum buffer size value we can send with 1 credit */
407 server->maxBuf = min_t(unsigned int, le32_to_cpu(rsp->MaxTransactSize),
408 SMB2_MAX_BUFFER_SIZE);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400409 server->max_read = le32_to_cpu(rsp->MaxReadSize);
410 server->max_write = le32_to_cpu(rsp->MaxWriteSize);
411 /* BB Do we need to validate the SecurityMode? */
412 server->sec_mode = le16_to_cpu(rsp->SecurityMode);
413 server->capabilities = le32_to_cpu(rsp->Capabilities);
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400414 /* Internal types */
415 server->capabilities |= SMB2_NT_FIND | SMB2_LARGE_FILES;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400416
417 security_blob = smb2_get_data_area_len(&blob_offset, &blob_length,
418 &rsp->hdr);
Steve French5d875cc2013-06-25 15:33:41 -0500419 /*
420 * See MS-SMB2 section 2.2.4: if no blob, client picks default which
421 * for us will be
422 * ses->sectype = RawNTLMSSP;
423 * but for time being this is our only auth choice so doesn't matter.
424 * We just found a server which sets blob length to zero expecting raw.
425 */
426 if (blob_length == 0)
427 cifs_dbg(FYI, "missing security blob on negprot\n");
Pavel Shilovsky3c1bf7e2012-09-18 16:20:30 -0700428
Jeff Layton38d77c52013-05-26 07:01:00 -0400429 rc = cifs_enable_signing(server, ses->sign);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400430#ifdef CONFIG_SMB2_ASN1 /* BB REMOVEME when updated asn1.c ready */
Jeff Layton9ddec562013-05-26 07:00:58 -0400431 if (rc)
432 goto neg_exit;
Steve French5d875cc2013-06-25 15:33:41 -0500433 if (blob_length)
434 rc = decode_neg_token_init(security_blob, blob_length,
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400435 &server->sec_type);
436 if (rc == 1)
437 rc = 0;
438 else if (rc == 0) {
439 rc = -EIO;
440 goto neg_exit;
441 }
442#endif
443
444neg_exit:
445 free_rsp_buf(resp_buftype, rsp);
446 return rc;
447}
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400448
Steve Frenchff1c0382013-11-19 23:44:46 -0600449int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
450{
451 int rc = 0;
452 struct validate_negotiate_info_req vneg_inbuf;
453 struct validate_negotiate_info_rsp *pneg_rsp;
454 u32 rsplen;
455
456 cifs_dbg(FYI, "validate negotiate\n");
457
458 /*
459 * validation ioctl must be signed, so no point sending this if we
460 * can not sign it. We could eventually change this to selectively
461 * sign just this, the first and only signed request on a connection.
462 * This is good enough for now since a user who wants better security
463 * would also enable signing on the mount. Having validation of
464 * negotiate info for signed connections helps reduce attack vectors
465 */
466 if (tcon->ses->server->sign == false)
467 return 0; /* validation requires signing */
468
469 vneg_inbuf.Capabilities =
470 cpu_to_le32(tcon->ses->server->vals->req_capabilities);
Sachin Prabhu39552ea2014-05-13 00:48:12 +0100471 memcpy(vneg_inbuf.Guid, tcon->ses->server->client_guid,
472 SMB2_CLIENT_GUID_SIZE);
Steve Frenchff1c0382013-11-19 23:44:46 -0600473
474 if (tcon->ses->sign)
475 vneg_inbuf.SecurityMode =
476 cpu_to_le16(SMB2_NEGOTIATE_SIGNING_REQUIRED);
477 else if (global_secflags & CIFSSEC_MAY_SIGN)
478 vneg_inbuf.SecurityMode =
479 cpu_to_le16(SMB2_NEGOTIATE_SIGNING_ENABLED);
480 else
481 vneg_inbuf.SecurityMode = 0;
482
483 vneg_inbuf.DialectCount = cpu_to_le16(1);
484 vneg_inbuf.Dialects[0] =
485 cpu_to_le16(tcon->ses->server->vals->protocol_id);
486
487 rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
488 FSCTL_VALIDATE_NEGOTIATE_INFO, true /* is_fsctl */,
489 (char *)&vneg_inbuf, sizeof(struct validate_negotiate_info_req),
490 (char **)&pneg_rsp, &rsplen);
491
492 if (rc != 0) {
493 cifs_dbg(VFS, "validate protocol negotiate failed: %d\n", rc);
494 return -EIO;
495 }
496
497 if (rsplen != sizeof(struct validate_negotiate_info_rsp)) {
498 cifs_dbg(VFS, "invalid size of protocol negotiate response\n");
499 return -EIO;
500 }
501
502 /* check validate negotiate info response matches what we got earlier */
503 if (pneg_rsp->Dialect !=
504 cpu_to_le16(tcon->ses->server->vals->protocol_id))
505 goto vneg_out;
506
507 if (pneg_rsp->SecurityMode != cpu_to_le16(tcon->ses->server->sec_mode))
508 goto vneg_out;
509
510 /* do not validate server guid because not saved at negprot time yet */
511
512 if ((le32_to_cpu(pneg_rsp->Capabilities) | SMB2_NT_FIND |
513 SMB2_LARGE_FILES) != tcon->ses->server->capabilities)
514 goto vneg_out;
515
516 /* validate negotiate successful */
517 cifs_dbg(FYI, "validate negotiate info successful\n");
518 return 0;
519
520vneg_out:
521 cifs_dbg(VFS, "protocol revalidation - security settings mismatch\n");
522 return -EIO;
523}
524
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400525int
526SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
527 const struct nls_table *nls_cp)
528{
529 struct smb2_sess_setup_req *req;
530 struct smb2_sess_setup_rsp *rsp = NULL;
531 struct kvec iov[2];
532 int rc = 0;
533 int resp_buftype;
534 __le32 phase = NtLmNegotiate; /* NTLMSSP, if needed, is multistage */
Jeff Layton3534b852013-05-24 07:41:01 -0400535 struct TCP_Server_Info *server = ses->server;
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400536 u16 blob_length = 0;
537 char *security_blob;
538 char *ntlmssp_blob = NULL;
539 bool use_spnego = false; /* else use raw ntlmssp */
540
Joe Perchesf96637b2013-05-04 22:12:25 -0500541 cifs_dbg(FYI, "Session Setup\n");
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400542
Jeff Layton3534b852013-05-24 07:41:01 -0400543 if (!server) {
544 WARN(1, "%s: server is NULL!\n", __func__);
545 return -EIO;
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400546 }
547
548 /*
Shirish Pargaonkard4e63bd2013-08-29 08:35:09 -0500549 * If we are here due to reconnect, free per-smb session key
550 * in case signing was required.
551 */
552 kfree(ses->auth_key.response);
553 ses->auth_key.response = NULL;
554
555 /*
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400556 * If memory allocation is successful, caller of this function
557 * frees it.
558 */
559 ses->ntlmssp = kmalloc(sizeof(struct ntlmssp_auth), GFP_KERNEL);
560 if (!ses->ntlmssp)
561 return -ENOMEM;
Shirish Pargaonkar5c234aa2013-08-29 08:35:10 -0500562 ses->ntlmssp->sesskey_per_smbsess = true;
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400563
Jeff Layton3f618222013-06-12 19:52:14 -0500564 /* FIXME: allow for other auth types besides NTLMSSP (e.g. krb5) */
565 ses->sectype = RawNTLMSSP;
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400566
567ssetup_ntlmssp_authenticate:
568 if (phase == NtLmChallenge)
569 phase = NtLmAuthenticate; /* if ntlmssp, now final phase */
570
571 rc = small_smb2_init(SMB2_SESSION_SETUP, NULL, (void **) &req);
572 if (rc)
573 return rc;
574
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400575 req->hdr.SessionId = 0; /* First session, not a reauthenticate */
576 req->VcNumber = 0; /* MBZ */
577 /* to enable echos and oplocks */
578 req->hdr.CreditRequest = cpu_to_le16(3);
579
580 /* only one of SMB2 signing flags may be set in SMB2 request */
Jeff Layton38d77c52013-05-26 07:01:00 -0400581 if (server->sign)
582 req->SecurityMode = SMB2_NEGOTIATE_SIGNING_REQUIRED;
583 else if (global_secflags & CIFSSEC_MAY_SIGN) /* one flag unlike MUST_ */
584 req->SecurityMode = SMB2_NEGOTIATE_SIGNING_ENABLED;
585 else
586 req->SecurityMode = 0;
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400587
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400588 req->Capabilities = 0;
589 req->Channel = 0; /* MBZ */
590
591 iov[0].iov_base = (char *)req;
592 /* 4 for rfc1002 length field and 1 for pad */
593 iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
594 if (phase == NtLmNegotiate) {
595 ntlmssp_blob = kmalloc(sizeof(struct _NEGOTIATE_MESSAGE),
596 GFP_KERNEL);
597 if (ntlmssp_blob == NULL) {
598 rc = -ENOMEM;
599 goto ssetup_exit;
600 }
601 build_ntlmssp_negotiate_blob(ntlmssp_blob, ses);
602 if (use_spnego) {
603 /* blob_length = build_spnego_ntlmssp_blob(
604 &security_blob,
605 sizeof(struct _NEGOTIATE_MESSAGE),
606 ntlmssp_blob); */
607 /* BB eventually need to add this */
Joe Perchesf96637b2013-05-04 22:12:25 -0500608 cifs_dbg(VFS, "spnego not supported for SMB2 yet\n");
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400609 rc = -EOPNOTSUPP;
610 kfree(ntlmssp_blob);
611 goto ssetup_exit;
612 } else {
613 blob_length = sizeof(struct _NEGOTIATE_MESSAGE);
614 /* with raw NTLMSSP we don't encapsulate in SPNEGO */
615 security_blob = ntlmssp_blob;
616 }
617 } else if (phase == NtLmAuthenticate) {
618 req->hdr.SessionId = ses->Suid;
619 ntlmssp_blob = kzalloc(sizeof(struct _NEGOTIATE_MESSAGE) + 500,
620 GFP_KERNEL);
621 if (ntlmssp_blob == NULL) {
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400622 rc = -ENOMEM;
623 goto ssetup_exit;
624 }
625 rc = build_ntlmssp_auth_blob(ntlmssp_blob, &blob_length, ses,
626 nls_cp);
627 if (rc) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500628 cifs_dbg(FYI, "build_ntlmssp_auth_blob failed %d\n",
629 rc);
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400630 goto ssetup_exit; /* BB double check error handling */
631 }
632 if (use_spnego) {
633 /* blob_length = build_spnego_ntlmssp_blob(
634 &security_blob,
635 blob_length,
636 ntlmssp_blob); */
Joe Perchesf96637b2013-05-04 22:12:25 -0500637 cifs_dbg(VFS, "spnego not supported for SMB2 yet\n");
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400638 rc = -EOPNOTSUPP;
639 kfree(ntlmssp_blob);
640 goto ssetup_exit;
641 } else {
642 security_blob = ntlmssp_blob;
643 }
644 } else {
Joe Perchesf96637b2013-05-04 22:12:25 -0500645 cifs_dbg(VFS, "illegal ntlmssp phase\n");
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400646 rc = -EIO;
647 goto ssetup_exit;
648 }
649
650 /* Testing shows that buffer offset must be at location of Buffer[0] */
651 req->SecurityBufferOffset =
652 cpu_to_le16(sizeof(struct smb2_sess_setup_req) -
653 1 /* pad */ - 4 /* rfc1001 len */);
654 req->SecurityBufferLength = cpu_to_le16(blob_length);
655 iov[1].iov_base = security_blob;
656 iov[1].iov_len = blob_length;
657
658 inc_rfc1001_len(req, blob_length - 1 /* pad */);
659
660 /* BB add code to build os and lm fields */
661
Steve French6d8b59d2012-12-08 22:36:29 -0600662 rc = SendReceive2(xid, ses, iov, 2, &resp_buftype,
663 CIFS_LOG_ERROR | CIFS_NEG_OP);
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400664
665 kfree(security_blob);
666 rsp = (struct smb2_sess_setup_rsp *)iov[0].iov_base;
Pavel Shilovsky4ca3a992012-09-25 11:00:09 +0400667 if (resp_buftype != CIFS_NO_BUFFER &&
668 rsp->hdr.Status == STATUS_MORE_PROCESSING_REQUIRED) {
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400669 if (phase != NtLmNegotiate) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500670 cifs_dbg(VFS, "Unexpected more processing error\n");
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400671 goto ssetup_exit;
672 }
673 if (offsetof(struct smb2_sess_setup_rsp, Buffer) - 4 !=
Pavel Shilovsky4ca3a992012-09-25 11:00:09 +0400674 le16_to_cpu(rsp->SecurityBufferOffset)) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500675 cifs_dbg(VFS, "Invalid security buffer offset %d\n",
676 le16_to_cpu(rsp->SecurityBufferOffset));
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400677 rc = -EIO;
678 goto ssetup_exit;
679 }
680
681 /* NTLMSSP Negotiate sent now processing challenge (response) */
682 phase = NtLmChallenge; /* process ntlmssp challenge */
683 rc = 0; /* MORE_PROCESSING is not an error here but expected */
684 ses->Suid = rsp->hdr.SessionId;
685 rc = decode_ntlmssp_challenge(rsp->Buffer,
686 le16_to_cpu(rsp->SecurityBufferLength), ses);
687 }
688
689 /*
690 * BB eventually add code for SPNEGO decoding of NtlmChallenge blob,
691 * but at least the raw NTLMSSP case works.
692 */
693 /*
694 * No tcon so can't do
695 * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]);
696 */
697 if (rc != 0)
698 goto ssetup_exit;
699
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400700 ses->session_flags = le16_to_cpu(rsp->SessionFlags);
Steve French0cbaa532013-11-15 23:50:24 -0600701 if (ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA)
702 cifs_dbg(VFS, "SMB3 encryption not supported yet\n");
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400703ssetup_exit:
704 free_rsp_buf(resp_buftype, rsp);
705
706 /* if ntlmssp, and negotiate succeeded, proceed to authenticate phase */
707 if ((phase == NtLmChallenge) && (rc == 0))
708 goto ssetup_ntlmssp_authenticate;
Shirish Pargaonkard4e63bd2013-08-29 08:35:09 -0500709
710 if (!rc) {
711 mutex_lock(&server->srv_mutex);
Shirish Pargaonkar32811d22013-08-29 08:35:11 -0500712 if (server->sign && server->ops->generate_signingkey) {
713 rc = server->ops->generate_signingkey(ses);
714 kfree(ses->auth_key.response);
715 ses->auth_key.response = NULL;
716 if (rc) {
717 cifs_dbg(FYI,
718 "SMB3 session key generation failed\n");
719 mutex_unlock(&server->srv_mutex);
720 goto keygen_exit;
721 }
722 }
Shirish Pargaonkard4e63bd2013-08-29 08:35:09 -0500723 if (!server->session_estab) {
724 server->sequence_number = 0x2;
725 server->session_estab = true;
Shirish Pargaonkard4e63bd2013-08-29 08:35:09 -0500726 }
727 mutex_unlock(&server->srv_mutex);
728
729 cifs_dbg(FYI, "SMB2/3 session established successfully\n");
730 spin_lock(&GlobalMid_Lock);
731 ses->status = CifsGood;
732 ses->need_reconnect = false;
733 spin_unlock(&GlobalMid_Lock);
734 }
735
Shirish Pargaonkar32811d22013-08-29 08:35:11 -0500736keygen_exit:
Shirish Pargaonkard4e63bd2013-08-29 08:35:09 -0500737 if (!server->sign) {
738 kfree(ses->auth_key.response);
739 ses->auth_key.response = NULL;
740 }
741 kfree(ses->ntlmssp);
742
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400743 return rc;
744}
745
746int
747SMB2_logoff(const unsigned int xid, struct cifs_ses *ses)
748{
749 struct smb2_logoff_req *req; /* response is also trivial struct */
750 int rc = 0;
751 struct TCP_Server_Info *server;
752
Joe Perchesf96637b2013-05-04 22:12:25 -0500753 cifs_dbg(FYI, "disconnect session %p\n", ses);
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400754
755 if (ses && (ses->server))
756 server = ses->server;
757 else
758 return -EIO;
759
Shirish Pargaonkareb4c7df2013-10-03 05:44:45 -0500760 /* no need to send SMB logoff if uid already closed due to reconnect */
761 if (ses->need_reconnect)
762 goto smb2_session_already_dead;
763
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400764 rc = small_smb2_init(SMB2_LOGOFF, NULL, (void **) &req);
765 if (rc)
766 return rc;
767
768 /* since no tcon, smb2_init can not do this, so do here */
769 req->hdr.SessionId = ses->Suid;
Jeff Layton38d77c52013-05-26 07:01:00 -0400770 if (server->sign)
Pavel Shilovsky3c1bf7e2012-09-18 16:20:30 -0700771 req->hdr.Flags |= SMB2_FLAGS_SIGNED;
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400772
773 rc = SendReceiveNoRsp(xid, ses, (char *) &req->hdr, 0);
774 /*
775 * No tcon so can't do
776 * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]);
777 */
Shirish Pargaonkareb4c7df2013-10-03 05:44:45 -0500778
779smb2_session_already_dead:
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400780 return rc;
781}
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +0400782
783static inline void cifs_stats_fail_inc(struct cifs_tcon *tcon, uint16_t code)
784{
Pavel Shilovskyd60622e2012-05-28 15:19:39 +0400785 cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_failed[code]);
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +0400786}
787
788#define MAX_SHARENAME_LENGTH (255 /* server */ + 80 /* share */ + 1 /* NULL */)
789
Steve Frenchde9f68d2013-11-15 11:26:24 -0600790/* These are similar values to what Windows uses */
791static inline void init_copy_chunk_defaults(struct cifs_tcon *tcon)
792{
793 tcon->max_chunks = 256;
794 tcon->max_bytes_chunk = 1048576;
795 tcon->max_bytes_copy = 16777216;
796}
797
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +0400798int
799SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
800 struct cifs_tcon *tcon, const struct nls_table *cp)
801{
802 struct smb2_tree_connect_req *req;
803 struct smb2_tree_connect_rsp *rsp = NULL;
804 struct kvec iov[2];
805 int rc = 0;
806 int resp_buftype;
807 int unc_path_len;
808 struct TCP_Server_Info *server;
809 __le16 *unc_path = NULL;
810
Joe Perchesf96637b2013-05-04 22:12:25 -0500811 cifs_dbg(FYI, "TCON\n");
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +0400812
813 if ((ses->server) && tree)
814 server = ses->server;
815 else
816 return -EIO;
817
818 if (tcon && tcon->bad_network_name)
819 return -ENOENT;
820
821 unc_path = kmalloc(MAX_SHARENAME_LENGTH * 2, GFP_KERNEL);
822 if (unc_path == NULL)
823 return -ENOMEM;
824
825 unc_path_len = cifs_strtoUTF16(unc_path, tree, strlen(tree), cp) + 1;
826 unc_path_len *= 2;
827 if (unc_path_len < 2) {
828 kfree(unc_path);
829 return -EINVAL;
830 }
831
832 rc = small_smb2_init(SMB2_TREE_CONNECT, tcon, (void **) &req);
833 if (rc) {
834 kfree(unc_path);
835 return rc;
836 }
837
838 if (tcon == NULL) {
839 /* since no tcon, smb2_init can not do this, so do here */
840 req->hdr.SessionId = ses->Suid;
841 /* if (ses->server->sec_mode & SECMODE_SIGN_REQUIRED)
842 req->hdr.Flags |= SMB2_FLAGS_SIGNED; */
843 }
844
845 iov[0].iov_base = (char *)req;
846 /* 4 for rfc1002 length field and 1 for pad */
847 iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
848
849 /* Testing shows that buffer offset must be at location of Buffer[0] */
850 req->PathOffset = cpu_to_le16(sizeof(struct smb2_tree_connect_req)
851 - 1 /* pad */ - 4 /* do not count rfc1001 len field */);
852 req->PathLength = cpu_to_le16(unc_path_len - 2);
853 iov[1].iov_base = unc_path;
854 iov[1].iov_len = unc_path_len;
855
856 inc_rfc1001_len(req, unc_path_len - 1 /* pad */);
857
858 rc = SendReceive2(xid, ses, iov, 2, &resp_buftype, 0);
859 rsp = (struct smb2_tree_connect_rsp *)iov[0].iov_base;
860
861 if (rc != 0) {
862 if (tcon) {
863 cifs_stats_fail_inc(tcon, SMB2_TREE_CONNECT_HE);
864 tcon->need_reconnect = true;
865 }
866 goto tcon_error_exit;
867 }
868
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +0400869 if (tcon == NULL) {
870 ses->ipc_tid = rsp->hdr.TreeId;
871 goto tcon_exit;
872 }
873
874 if (rsp->ShareType & SMB2_SHARE_TYPE_DISK)
Joe Perchesf96637b2013-05-04 22:12:25 -0500875 cifs_dbg(FYI, "connection to disk share\n");
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +0400876 else if (rsp->ShareType & SMB2_SHARE_TYPE_PIPE) {
877 tcon->ipc = true;
Joe Perchesf96637b2013-05-04 22:12:25 -0500878 cifs_dbg(FYI, "connection to pipe share\n");
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +0400879 } else if (rsp->ShareType & SMB2_SHARE_TYPE_PRINT) {
880 tcon->print = true;
Joe Perchesf96637b2013-05-04 22:12:25 -0500881 cifs_dbg(FYI, "connection to printer\n");
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +0400882 } else {
Joe Perchesf96637b2013-05-04 22:12:25 -0500883 cifs_dbg(VFS, "unknown share type %d\n", rsp->ShareType);
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +0400884 rc = -EOPNOTSUPP;
885 goto tcon_error_exit;
886 }
887
888 tcon->share_flags = le32_to_cpu(rsp->ShareFlags);
Steve French769ee6a2013-06-19 14:15:30 -0500889 tcon->capabilities = rsp->Capabilities; /* we keep caps little endian */
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +0400890 tcon->maximal_access = le32_to_cpu(rsp->MaximalAccess);
891 tcon->tidStatus = CifsGood;
892 tcon->need_reconnect = false;
893 tcon->tid = rsp->hdr.TreeId;
Zhao Hongjiang46b51d02013-06-24 01:57:47 -0500894 strlcpy(tcon->treeName, tree, sizeof(tcon->treeName));
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +0400895
896 if ((rsp->Capabilities & SMB2_SHARE_CAP_DFS) &&
897 ((tcon->share_flags & SHI1005_FLAGS_DFS) == 0))
Joe Perchesf96637b2013-05-04 22:12:25 -0500898 cifs_dbg(VFS, "DFS capability contradicts DFS flag\n");
Steve Frenchde9f68d2013-11-15 11:26:24 -0600899 init_copy_chunk_defaults(tcon);
Steve Frenchff1c0382013-11-19 23:44:46 -0600900 if (tcon->ses->server->ops->validate_negotiate)
901 rc = tcon->ses->server->ops->validate_negotiate(xid, tcon);
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +0400902tcon_exit:
903 free_rsp_buf(resp_buftype, rsp);
904 kfree(unc_path);
905 return rc;
906
907tcon_error_exit:
908 if (rsp->hdr.Status == STATUS_BAD_NETWORK_NAME) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500909 cifs_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree);
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +0400910 tcon->bad_network_name = true;
911 }
912 goto tcon_exit;
913}
914
915int
916SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon)
917{
918 struct smb2_tree_disconnect_req *req; /* response is trivial */
919 int rc = 0;
920 struct TCP_Server_Info *server;
921 struct cifs_ses *ses = tcon->ses;
922
Joe Perchesf96637b2013-05-04 22:12:25 -0500923 cifs_dbg(FYI, "Tree Disconnect\n");
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +0400924
925 if (ses && (ses->server))
926 server = ses->server;
927 else
928 return -EIO;
929
930 if ((tcon->need_reconnect) || (tcon->ses->need_reconnect))
931 return 0;
932
933 rc = small_smb2_init(SMB2_TREE_DISCONNECT, tcon, (void **) &req);
934 if (rc)
935 return rc;
936
937 rc = SendReceiveNoRsp(xid, ses, (char *)&req->hdr, 0);
938 if (rc)
939 cifs_stats_fail_inc(tcon, SMB2_TREE_DISCONNECT_HE);
940
941 return rc;
942}
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400943
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700944
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +0400945static struct create_durable *
946create_durable_buf(void)
947{
948 struct create_durable *buf;
949
950 buf = kzalloc(sizeof(struct create_durable), GFP_KERNEL);
951 if (!buf)
952 return NULL;
953
954 buf->ccontext.DataOffset = cpu_to_le16(offsetof
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400955 (struct create_durable, Data));
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +0400956 buf->ccontext.DataLength = cpu_to_le32(16);
957 buf->ccontext.NameOffset = cpu_to_le16(offsetof
958 (struct create_durable, Name));
959 buf->ccontext.NameLength = cpu_to_le16(4);
Steve French12197a72014-05-14 05:29:40 -0700960 /* SMB2_CREATE_DURABLE_HANDLE_REQUEST is "DHnQ" */
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +0400961 buf->Name[0] = 'D';
962 buf->Name[1] = 'H';
963 buf->Name[2] = 'n';
964 buf->Name[3] = 'Q';
965 return buf;
966}
967
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400968static struct create_durable *
969create_reconnect_durable_buf(struct cifs_fid *fid)
970{
971 struct create_durable *buf;
972
973 buf = kzalloc(sizeof(struct create_durable), GFP_KERNEL);
974 if (!buf)
975 return NULL;
976
977 buf->ccontext.DataOffset = cpu_to_le16(offsetof
978 (struct create_durable, Data));
979 buf->ccontext.DataLength = cpu_to_le32(16);
980 buf->ccontext.NameOffset = cpu_to_le16(offsetof
981 (struct create_durable, Name));
982 buf->ccontext.NameLength = cpu_to_le16(4);
983 buf->Data.Fid.PersistentFileId = fid->persistent_fid;
984 buf->Data.Fid.VolatileFileId = fid->volatile_fid;
Steve French12197a72014-05-14 05:29:40 -0700985 /* SMB2_CREATE_DURABLE_HANDLE_RECONNECT is "DHnC" */
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400986 buf->Name[0] = 'D';
987 buf->Name[1] = 'H';
988 buf->Name[2] = 'n';
989 buf->Name[3] = 'C';
990 return buf;
991}
992
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700993static __u8
Pavel Shilovsky42873b02013-09-05 21:30:16 +0400994parse_lease_state(struct TCP_Server_Info *server, struct smb2_create_rsp *rsp,
995 unsigned int *epoch)
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700996{
997 char *data_offset;
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +0400998 struct create_context *cc;
Pavel Shilovskyfd554392013-07-09 19:44:56 +0400999 unsigned int next = 0;
1000 char *name;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07001001
Pavel Shilovskyfd554392013-07-09 19:44:56 +04001002 data_offset = (char *)rsp + 4 + le32_to_cpu(rsp->CreateContextsOffset);
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04001003 cc = (struct create_context *)data_offset;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07001004 do {
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04001005 cc = (struct create_context *)((char *)cc + next);
1006 name = le16_to_cpu(cc->NameOffset) + (char *)cc;
1007 if (le16_to_cpu(cc->NameLength) != 4 ||
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07001008 strncmp(name, "RqLs", 4)) {
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04001009 next = le32_to_cpu(cc->Next);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07001010 continue;
1011 }
Pavel Shilovsky42873b02013-09-05 21:30:16 +04001012 return server->ops->parse_lease_buf(cc, epoch);
Pavel Shilovskyfd554392013-07-09 19:44:56 +04001013 } while (next != 0);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07001014
Pavel Shilovskyb5c7cde2013-09-05 20:16:45 +04001015 return 0;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07001016}
1017
Pavel Shilovskyd22cbfe2013-07-04 19:10:00 +04001018static int
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04001019add_lease_context(struct TCP_Server_Info *server, struct kvec *iov,
1020 unsigned int *num_iovec, __u8 *oplock)
Pavel Shilovskyd22cbfe2013-07-04 19:10:00 +04001021{
1022 struct smb2_create_req *req = iov[0].iov_base;
1023 unsigned int num = *num_iovec;
1024
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04001025 iov[num].iov_base = server->ops->create_lease_buf(oplock+1, *oplock);
Pavel Shilovskyd22cbfe2013-07-04 19:10:00 +04001026 if (iov[num].iov_base == NULL)
1027 return -ENOMEM;
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04001028 iov[num].iov_len = server->vals->create_lease_size;
Pavel Shilovskyd22cbfe2013-07-04 19:10:00 +04001029 req->RequestedOplockLevel = SMB2_OPLOCK_LEVEL_LEASE;
1030 if (!req->CreateContextsOffset)
1031 req->CreateContextsOffset = cpu_to_le32(
1032 sizeof(struct smb2_create_req) - 4 +
1033 iov[num - 1].iov_len);
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04001034 le32_add_cpu(&req->CreateContextsLength,
1035 server->vals->create_lease_size);
1036 inc_rfc1001_len(&req->hdr, server->vals->create_lease_size);
Pavel Shilovskyd22cbfe2013-07-04 19:10:00 +04001037 *num_iovec = num + 1;
1038 return 0;
1039}
1040
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +04001041static int
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +04001042add_durable_context(struct kvec *iov, unsigned int *num_iovec,
1043 struct cifs_open_parms *oparms)
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +04001044{
1045 struct smb2_create_req *req = iov[0].iov_base;
1046 unsigned int num = *num_iovec;
1047
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +04001048 if (oparms->reconnect) {
1049 iov[num].iov_base = create_reconnect_durable_buf(oparms->fid);
1050 /* indicate that we don't need to relock the file */
1051 oparms->reconnect = false;
1052 } else
1053 iov[num].iov_base = create_durable_buf();
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +04001054 if (iov[num].iov_base == NULL)
1055 return -ENOMEM;
1056 iov[num].iov_len = sizeof(struct create_durable);
1057 if (!req->CreateContextsOffset)
1058 req->CreateContextsOffset =
1059 cpu_to_le32(sizeof(struct smb2_create_req) - 4 +
1060 iov[1].iov_len);
Wei Yongjun31f92e92013-08-26 14:34:46 +08001061 le32_add_cpu(&req->CreateContextsLength, sizeof(struct create_durable));
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +04001062 inc_rfc1001_len(&req->hdr, sizeof(struct create_durable));
1063 *num_iovec = num + 1;
1064 return 0;
1065}
1066
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001067int
Pavel Shilovsky064f6042013-07-09 18:20:30 +04001068SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04001069 __u8 *oplock, struct smb2_file_all_info *buf,
1070 struct smb2_err_rsp **err_buf)
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001071{
1072 struct smb2_create_req *req;
1073 struct smb2_create_rsp *rsp;
1074 struct TCP_Server_Info *server;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04001075 struct cifs_tcon *tcon = oparms->tcon;
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001076 struct cifs_ses *ses = tcon->ses;
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +04001077 struct kvec iov[4];
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001078 int resp_buftype;
1079 int uni_path_len;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07001080 __le16 *copy_path = NULL;
1081 int copy_size;
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001082 int rc = 0;
Pavel Shilovsky59aa3712013-07-04 19:41:24 +04001083 unsigned int num_iovecs = 2;
Pavel Shilovskyca819832013-07-05 12:21:26 +04001084 __u32 file_attributes = 0;
Pavel Shilovsky663a96212014-05-24 16:42:02 +04001085 char *dhc_buf = NULL, *lc_buf = NULL;
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001086
Joe Perchesf96637b2013-05-04 22:12:25 -05001087 cifs_dbg(FYI, "create/open\n");
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001088
1089 if (ses && (ses->server))
1090 server = ses->server;
1091 else
1092 return -EIO;
1093
1094 rc = small_smb2_init(SMB2_CREATE, tcon, (void **) &req);
1095 if (rc)
1096 return rc;
1097
Pavel Shilovsky064f6042013-07-09 18:20:30 +04001098 if (oparms->create_options & CREATE_OPTION_READONLY)
Pavel Shilovskyca819832013-07-05 12:21:26 +04001099 file_attributes |= ATTR_READONLY;
1100
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001101 req->ImpersonationLevel = IL_IMPERSONATION;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04001102 req->DesiredAccess = cpu_to_le32(oparms->desired_access);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001103 /* File attributes ignored on open (used in create though) */
1104 req->FileAttributes = cpu_to_le32(file_attributes);
1105 req->ShareAccess = FILE_SHARE_ALL_LE;
Pavel Shilovsky064f6042013-07-09 18:20:30 +04001106 req->CreateDisposition = cpu_to_le32(oparms->disposition);
1107 req->CreateOptions = cpu_to_le32(oparms->create_options & CREATE_OPTIONS_MASK);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001108 uni_path_len = (2 * UniStrnlen((wchar_t *)path, PATH_MAX)) + 2;
Pavel Shilovsky59aa3712013-07-04 19:41:24 +04001109 /* do not count rfc1001 len field */
1110 req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req) - 4);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001111
1112 iov[0].iov_base = (char *)req;
1113 /* 4 for rfc1002 length field */
1114 iov[0].iov_len = get_rfc1002_length(req) + 4;
1115
1116 /* MUST set path len (NameLength) to 0 opening root of share */
Pavel Shilovsky59aa3712013-07-04 19:41:24 +04001117 req->NameLength = cpu_to_le16(uni_path_len - 2);
1118 /* -1 since last byte is buf[0] which is sent below (path) */
1119 iov[0].iov_len--;
1120 if (uni_path_len % 8 != 0) {
1121 copy_size = uni_path_len / 8 * 8;
1122 if (copy_size < uni_path_len)
1123 copy_size += 8;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07001124
Pavel Shilovsky59aa3712013-07-04 19:41:24 +04001125 copy_path = kzalloc(copy_size, GFP_KERNEL);
1126 if (!copy_path)
1127 return -ENOMEM;
1128 memcpy((char *)copy_path, (const char *)path,
1129 uni_path_len);
1130 uni_path_len = copy_size;
1131 path = copy_path;
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001132 }
1133
Pavel Shilovsky59aa3712013-07-04 19:41:24 +04001134 iov[1].iov_len = uni_path_len;
1135 iov[1].iov_base = path;
1136 /* -1 since last byte is buf[0] which was counted in smb2_buf_len */
1137 inc_rfc1001_len(req, uni_path_len - 1);
1138
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07001139 if (!server->oplocks)
1140 *oplock = SMB2_OPLOCK_LEVEL_NONE;
1141
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04001142 if (!(server->capabilities & SMB2_GLOBAL_CAP_LEASING) ||
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07001143 *oplock == SMB2_OPLOCK_LEVEL_NONE)
1144 req->RequestedOplockLevel = *oplock;
1145 else {
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04001146 rc = add_lease_context(server, iov, &num_iovecs, oplock);
Pavel Shilovskyd22cbfe2013-07-04 19:10:00 +04001147 if (rc) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07001148 cifs_small_buf_release(req);
1149 kfree(copy_path);
Pavel Shilovskyd22cbfe2013-07-04 19:10:00 +04001150 return rc;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07001151 }
Pavel Shilovsky663a96212014-05-24 16:42:02 +04001152 lc_buf = iov[num_iovecs-1].iov_base;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07001153 }
1154
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +04001155 if (*oplock == SMB2_OPLOCK_LEVEL_BATCH) {
1156 /* need to set Next field of lease context if we request it */
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04001157 if (server->capabilities & SMB2_GLOBAL_CAP_LEASING) {
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +04001158 struct create_context *ccontext =
1159 (struct create_context *)iov[num_iovecs-1].iov_base;
Steve French1c469432013-07-10 12:50:57 -05001160 ccontext->Next =
Pavel Shilovskya41a28b2013-09-04 13:07:41 +04001161 cpu_to_le32(server->vals->create_lease_size);
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +04001162 }
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +04001163 rc = add_durable_context(iov, &num_iovecs, oparms);
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +04001164 if (rc) {
1165 cifs_small_buf_release(req);
1166 kfree(copy_path);
Pavel Shilovsky663a96212014-05-24 16:42:02 +04001167 kfree(lc_buf);
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +04001168 return rc;
1169 }
Pavel Shilovsky663a96212014-05-24 16:42:02 +04001170 dhc_buf = iov[num_iovecs-1].iov_base;
Pavel Shilovsky63eb3de2013-07-04 18:41:09 +04001171 }
1172
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001173 rc = SendReceive2(xid, ses, iov, num_iovecs, &resp_buftype, 0);
1174 rsp = (struct smb2_create_rsp *)iov[0].iov_base;
1175
1176 if (rc != 0) {
1177 cifs_stats_fail_inc(tcon, SMB2_CREATE_HE);
Pavel Shilovskyb42bf882013-08-14 19:25:21 +04001178 if (err_buf)
1179 *err_buf = kmemdup(rsp, get_rfc1002_length(rsp) + 4,
1180 GFP_KERNEL);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001181 goto creat_exit;
1182 }
1183
Pavel Shilovsky064f6042013-07-09 18:20:30 +04001184 oparms->fid->persistent_fid = rsp->PersistentFileId;
1185 oparms->fid->volatile_fid = rsp->VolatileFileId;
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001186
1187 if (buf) {
1188 memcpy(buf, &rsp->CreationTime, 32);
1189 buf->AllocationSize = rsp->AllocationSize;
1190 buf->EndOfFile = rsp->EndofFile;
1191 buf->Attributes = rsp->FileAttributes;
1192 buf->NumberOfLinks = cpu_to_le32(1);
1193 buf->DeletePending = 0;
1194 }
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07001195
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07001196 if (rsp->OplockLevel == SMB2_OPLOCK_LEVEL_LEASE)
Pavel Shilovsky42873b02013-09-05 21:30:16 +04001197 *oplock = parse_lease_state(server, rsp, &oparms->fid->epoch);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07001198 else
1199 *oplock = rsp->OplockLevel;
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001200creat_exit:
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07001201 kfree(copy_path);
Pavel Shilovsky663a96212014-05-24 16:42:02 +04001202 kfree(lc_buf);
1203 kfree(dhc_buf);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001204 free_rsp_buf(resp_buftype, rsp);
1205 return rc;
1206}
1207
Steve French4a72daf2013-06-25 00:20:49 -05001208/*
1209 * SMB2 IOCTL is used for both IOCTLs and FSCTLs
1210 */
1211int
1212SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
1213 u64 volatile_fid, u32 opcode, bool is_fsctl, char *in_data,
1214 u32 indatalen, char **out_data, u32 *plen /* returned data len */)
1215{
1216 struct smb2_ioctl_req *req;
1217 struct smb2_ioctl_rsp *rsp;
1218 struct TCP_Server_Info *server;
1219 struct cifs_ses *ses = tcon->ses;
1220 struct kvec iov[2];
1221 int resp_buftype;
1222 int num_iovecs;
1223 int rc = 0;
1224
1225 cifs_dbg(FYI, "SMB2 IOCTL\n");
1226
Steve French7ff8d452013-10-14 00:44:19 -05001227 *out_data = NULL;
Steve French4a72daf2013-06-25 00:20:49 -05001228 /* zero out returned data len, in case of error */
1229 if (plen)
1230 *plen = 0;
1231
1232 if (ses && (ses->server))
1233 server = ses->server;
1234 else
1235 return -EIO;
1236
1237 rc = small_smb2_init(SMB2_IOCTL, tcon, (void **) &req);
1238 if (rc)
1239 return rc;
1240
1241 req->CtlCode = cpu_to_le32(opcode);
1242 req->PersistentFileId = persistent_fid;
1243 req->VolatileFileId = volatile_fid;
1244
1245 if (indatalen) {
1246 req->InputCount = cpu_to_le32(indatalen);
1247 /* do not set InputOffset if no input data */
1248 req->InputOffset =
1249 cpu_to_le32(offsetof(struct smb2_ioctl_req, Buffer) - 4);
1250 iov[1].iov_base = in_data;
1251 iov[1].iov_len = indatalen;
1252 num_iovecs = 2;
1253 } else
1254 num_iovecs = 1;
1255
1256 req->OutputOffset = 0;
1257 req->OutputCount = 0; /* MBZ */
1258
1259 /*
1260 * Could increase MaxOutputResponse, but that would require more
1261 * than one credit. Windows typically sets this smaller, but for some
1262 * ioctls it may be useful to allow server to send more. No point
1263 * limiting what the server can send as long as fits in one credit
1264 */
1265 req->MaxOutputResponse = cpu_to_le32(0xFF00); /* < 64K uses 1 credit */
1266
1267 if (is_fsctl)
1268 req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL);
1269 else
1270 req->Flags = 0;
1271
1272 iov[0].iov_base = (char *)req;
Steve French4a72daf2013-06-25 00:20:49 -05001273
Steve French7ff8d452013-10-14 00:44:19 -05001274 /*
1275 * If no input data, the size of ioctl struct in
1276 * protocol spec still includes a 1 byte data buffer,
1277 * but if input data passed to ioctl, we do not
1278 * want to double count this, so we do not send
1279 * the dummy one byte of data in iovec[0] if sending
1280 * input data (in iovec[1]). We also must add 4 bytes
1281 * in first iovec to allow for rfc1002 length field.
1282 */
1283
1284 if (indatalen) {
1285 iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
1286 inc_rfc1001_len(req, indatalen - 1);
1287 } else
1288 iov[0].iov_len = get_rfc1002_length(req) + 4;
1289
Steve French4a72daf2013-06-25 00:20:49 -05001290
1291 rc = SendReceive2(xid, ses, iov, num_iovecs, &resp_buftype, 0);
1292 rsp = (struct smb2_ioctl_rsp *)iov[0].iov_base;
1293
Steve French9bf0c9c2013-11-16 18:05:28 -06001294 if ((rc != 0) && (rc != -EINVAL)) {
Steve French4a72daf2013-06-25 00:20:49 -05001295 if (tcon)
1296 cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
1297 goto ioctl_exit;
Steve French9bf0c9c2013-11-16 18:05:28 -06001298 } else if (rc == -EINVAL) {
1299 if ((opcode != FSCTL_SRV_COPYCHUNK_WRITE) &&
1300 (opcode != FSCTL_SRV_COPYCHUNK)) {
1301 if (tcon)
1302 cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
1303 goto ioctl_exit;
1304 }
Steve French4a72daf2013-06-25 00:20:49 -05001305 }
1306
1307 /* check if caller wants to look at return data or just return rc */
1308 if ((plen == NULL) || (out_data == NULL))
1309 goto ioctl_exit;
1310
1311 *plen = le32_to_cpu(rsp->OutputCount);
1312
1313 /* We check for obvious errors in the output buffer length and offset */
1314 if (*plen == 0)
1315 goto ioctl_exit; /* server returned no data */
1316 else if (*plen > 0xFF00) {
1317 cifs_dbg(VFS, "srv returned invalid ioctl length: %d\n", *plen);
1318 *plen = 0;
1319 rc = -EIO;
1320 goto ioctl_exit;
1321 }
1322
1323 if (get_rfc1002_length(rsp) < le32_to_cpu(rsp->OutputOffset) + *plen) {
1324 cifs_dbg(VFS, "Malformed ioctl resp: len %d offset %d\n", *plen,
1325 le32_to_cpu(rsp->OutputOffset));
1326 *plen = 0;
1327 rc = -EIO;
1328 goto ioctl_exit;
1329 }
1330
1331 *out_data = kmalloc(*plen, GFP_KERNEL);
1332 if (*out_data == NULL) {
1333 rc = -ENOMEM;
1334 goto ioctl_exit;
1335 }
1336
1337 memcpy(*out_data, rsp->hdr.ProtocolId + le32_to_cpu(rsp->OutputOffset),
1338 *plen);
1339ioctl_exit:
1340 free_rsp_buf(resp_buftype, rsp);
1341 return rc;
1342}
1343
Steve French64a5cfa2013-10-14 15:31:32 -05001344/*
1345 * Individual callers to ioctl worker function follow
1346 */
1347
1348int
1349SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
1350 u64 persistent_fid, u64 volatile_fid)
1351{
1352 int rc;
Steve French64a5cfa2013-10-14 15:31:32 -05001353 struct compress_ioctl fsctl_input;
1354 char *ret_data = NULL;
1355
1356 fsctl_input.CompressionState =
1357 __constant_cpu_to_le16(COMPRESSION_FORMAT_DEFAULT);
1358
1359 rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid,
1360 FSCTL_SET_COMPRESSION, true /* is_fsctl */,
1361 (char *)&fsctl_input /* data input */,
1362 2 /* in data len */, &ret_data /* out data */, NULL);
1363
1364 cifs_dbg(FYI, "set compression rc %d\n", rc);
Steve French64a5cfa2013-10-14 15:31:32 -05001365
1366 return rc;
1367}
1368
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001369int
1370SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
1371 u64 persistent_fid, u64 volatile_fid)
1372{
1373 struct smb2_close_req *req;
1374 struct smb2_close_rsp *rsp;
1375 struct TCP_Server_Info *server;
1376 struct cifs_ses *ses = tcon->ses;
1377 struct kvec iov[1];
1378 int resp_buftype;
1379 int rc = 0;
1380
Joe Perchesf96637b2013-05-04 22:12:25 -05001381 cifs_dbg(FYI, "Close\n");
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001382
1383 if (ses && (ses->server))
1384 server = ses->server;
1385 else
1386 return -EIO;
1387
1388 rc = small_smb2_init(SMB2_CLOSE, tcon, (void **) &req);
1389 if (rc)
1390 return rc;
1391
1392 req->PersistentFileId = persistent_fid;
1393 req->VolatileFileId = volatile_fid;
1394
1395 iov[0].iov_base = (char *)req;
1396 /* 4 for rfc1002 length field */
1397 iov[0].iov_len = get_rfc1002_length(req) + 4;
1398
1399 rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, 0);
1400 rsp = (struct smb2_close_rsp *)iov[0].iov_base;
1401
1402 if (rc != 0) {
1403 if (tcon)
1404 cifs_stats_fail_inc(tcon, SMB2_CLOSE_HE);
1405 goto close_exit;
1406 }
1407
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001408 /* BB FIXME - decode close response, update inode for caching */
1409
1410close_exit:
1411 free_rsp_buf(resp_buftype, rsp);
1412 return rc;
1413}
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04001414
1415static int
1416validate_buf(unsigned int offset, unsigned int buffer_length,
1417 struct smb2_hdr *hdr, unsigned int min_buf_size)
1418
1419{
1420 unsigned int smb_len = be32_to_cpu(hdr->smb2_buf_length);
1421 char *end_of_smb = smb_len + 4 /* RFC1001 length field */ + (char *)hdr;
1422 char *begin_of_buf = 4 /* RFC1001 len field */ + offset + (char *)hdr;
1423 char *end_of_buf = begin_of_buf + buffer_length;
1424
1425
1426 if (buffer_length < min_buf_size) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001427 cifs_dbg(VFS, "buffer length %d smaller than minimum size %d\n",
1428 buffer_length, min_buf_size);
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04001429 return -EINVAL;
1430 }
1431
1432 /* check if beyond RFC1001 maximum length */
1433 if ((smb_len > 0x7FFFFF) || (buffer_length > 0x7FFFFF)) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001434 cifs_dbg(VFS, "buffer length %d or smb length %d too large\n",
1435 buffer_length, smb_len);
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04001436 return -EINVAL;
1437 }
1438
1439 if ((begin_of_buf > end_of_smb) || (end_of_buf > end_of_smb)) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001440 cifs_dbg(VFS, "illegal server response, bad offset to data\n");
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04001441 return -EINVAL;
1442 }
1443
1444 return 0;
1445}
1446
1447/*
1448 * If SMB buffer fields are valid, copy into temporary buffer to hold result.
1449 * Caller must free buffer.
1450 */
1451static int
1452validate_and_copy_buf(unsigned int offset, unsigned int buffer_length,
1453 struct smb2_hdr *hdr, unsigned int minbufsize,
1454 char *data)
1455
1456{
1457 char *begin_of_buf = 4 /* RFC1001 len field */ + offset + (char *)hdr;
1458 int rc;
1459
1460 if (!data)
1461 return -EINVAL;
1462
1463 rc = validate_buf(offset, buffer_length, hdr, minbufsize);
1464 if (rc)
1465 return rc;
1466
1467 memcpy(data, begin_of_buf, buffer_length);
1468
1469 return 0;
1470}
1471
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001472static int
1473query_info(const unsigned int xid, struct cifs_tcon *tcon,
1474 u64 persistent_fid, u64 volatile_fid, u8 info_class,
1475 size_t output_len, size_t min_len, void *data)
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04001476{
1477 struct smb2_query_info_req *req;
1478 struct smb2_query_info_rsp *rsp = NULL;
1479 struct kvec iov[2];
1480 int rc = 0;
1481 int resp_buftype;
1482 struct TCP_Server_Info *server;
1483 struct cifs_ses *ses = tcon->ses;
1484
Joe Perchesf96637b2013-05-04 22:12:25 -05001485 cifs_dbg(FYI, "Query Info\n");
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04001486
1487 if (ses && (ses->server))
1488 server = ses->server;
1489 else
1490 return -EIO;
1491
1492 rc = small_smb2_init(SMB2_QUERY_INFO, tcon, (void **) &req);
1493 if (rc)
1494 return rc;
1495
1496 req->InfoType = SMB2_O_INFO_FILE;
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001497 req->FileInfoClass = info_class;
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04001498 req->PersistentFileId = persistent_fid;
1499 req->VolatileFileId = volatile_fid;
1500 /* 4 for rfc1002 length field and 1 for Buffer */
1501 req->InputBufferOffset =
1502 cpu_to_le16(sizeof(struct smb2_query_info_req) - 1 - 4);
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001503 req->OutputBufferLength = cpu_to_le32(output_len);
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04001504
1505 iov[0].iov_base = (char *)req;
1506 /* 4 for rfc1002 length field */
1507 iov[0].iov_len = get_rfc1002_length(req) + 4;
1508
1509 rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, 0);
Pavel Shilovskye5d04882012-09-19 16:03:26 +04001510 rsp = (struct smb2_query_info_rsp *)iov[0].iov_base;
1511
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04001512 if (rc) {
1513 cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
1514 goto qinf_exit;
1515 }
1516
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04001517 rc = validate_and_copy_buf(le16_to_cpu(rsp->OutputBufferOffset),
1518 le32_to_cpu(rsp->OutputBufferLength),
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001519 &rsp->hdr, min_len, data);
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04001520
1521qinf_exit:
1522 free_rsp_buf(resp_buftype, rsp);
1523 return rc;
1524}
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04001525
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001526int
1527SMB2_query_info(const unsigned int xid, struct cifs_tcon *tcon,
1528 u64 persistent_fid, u64 volatile_fid,
1529 struct smb2_file_all_info *data)
1530{
1531 return query_info(xid, tcon, persistent_fid, volatile_fid,
1532 FILE_ALL_INFORMATION,
1533 sizeof(struct smb2_file_all_info) + MAX_NAME * 2,
1534 sizeof(struct smb2_file_all_info), data);
1535}
1536
1537int
1538SMB2_get_srv_num(const unsigned int xid, struct cifs_tcon *tcon,
1539 u64 persistent_fid, u64 volatile_fid, __le64 *uniqueid)
1540{
1541 return query_info(xid, tcon, persistent_fid, volatile_fid,
1542 FILE_INTERNAL_INFORMATION,
1543 sizeof(struct smb2_file_internal_info),
1544 sizeof(struct smb2_file_internal_info), uniqueid);
1545}
1546
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04001547/*
1548 * This is a no-op for now. We're not really interested in the reply, but
1549 * rather in the fact that the server sent one and that server->lstrp
1550 * gets updated.
1551 *
1552 * FIXME: maybe we should consider checking that the reply matches request?
1553 */
1554static void
1555smb2_echo_callback(struct mid_q_entry *mid)
1556{
1557 struct TCP_Server_Info *server = mid->callback_data;
1558 struct smb2_echo_rsp *smb2 = (struct smb2_echo_rsp *)mid->resp_buf;
1559 unsigned int credits_received = 1;
1560
1561 if (mid->mid_state == MID_RESPONSE_RECEIVED)
1562 credits_received = le16_to_cpu(smb2->hdr.CreditRequest);
1563
1564 DeleteMidQEntry(mid);
1565 add_credits(server, credits_received, CIFS_ECHO_OP);
1566}
1567
1568int
1569SMB2_echo(struct TCP_Server_Info *server)
1570{
1571 struct smb2_echo_req *req;
1572 int rc = 0;
1573 struct kvec iov;
Jeff Laytonfec344e2012-09-18 16:20:35 -07001574 struct smb_rqst rqst = { .rq_iov = &iov,
1575 .rq_nvec = 1 };
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04001576
Joe Perchesf96637b2013-05-04 22:12:25 -05001577 cifs_dbg(FYI, "In echo request\n");
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04001578
1579 rc = small_smb2_init(SMB2_ECHO, NULL, (void **)&req);
1580 if (rc)
1581 return rc;
1582
1583 req->hdr.CreditRequest = cpu_to_le16(1);
1584
1585 iov.iov_base = (char *)req;
1586 /* 4 for rfc1002 length field */
1587 iov.iov_len = get_rfc1002_length(req) + 4;
1588
Jeff Laytonfec344e2012-09-18 16:20:35 -07001589 rc = cifs_call_async(server, &rqst, NULL, smb2_echo_callback, server,
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04001590 CIFS_ECHO_OP);
1591 if (rc)
Joe Perchesf96637b2013-05-04 22:12:25 -05001592 cifs_dbg(FYI, "Echo request failed: %d\n", rc);
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04001593
1594 cifs_small_buf_release(req);
1595 return rc;
1596}
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07001597
1598int
1599SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
1600 u64 volatile_fid)
1601{
1602 struct smb2_flush_req *req;
1603 struct TCP_Server_Info *server;
1604 struct cifs_ses *ses = tcon->ses;
1605 struct kvec iov[1];
1606 int resp_buftype;
1607 int rc = 0;
1608
Joe Perchesf96637b2013-05-04 22:12:25 -05001609 cifs_dbg(FYI, "Flush\n");
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07001610
1611 if (ses && (ses->server))
1612 server = ses->server;
1613 else
1614 return -EIO;
1615
1616 rc = small_smb2_init(SMB2_FLUSH, tcon, (void **) &req);
1617 if (rc)
1618 return rc;
1619
1620 req->PersistentFileId = persistent_fid;
1621 req->VolatileFileId = volatile_fid;
1622
1623 iov[0].iov_base = (char *)req;
1624 /* 4 for rfc1002 length field */
1625 iov[0].iov_len = get_rfc1002_length(req) + 4;
1626
1627 rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, 0);
1628
1629 if ((rc != 0) && tcon)
1630 cifs_stats_fail_inc(tcon, SMB2_FLUSH_HE);
1631
1632 free_rsp_buf(resp_buftype, iov[0].iov_base);
1633 return rc;
1634}
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001635
1636/*
1637 * To form a chain of read requests, any read requests after the first should
1638 * have the end_of_chain boolean set to true.
1639 */
1640static int
1641smb2_new_read_req(struct kvec *iov, struct cifs_io_parms *io_parms,
1642 unsigned int remaining_bytes, int request_type)
1643{
1644 int rc = -EACCES;
1645 struct smb2_read_req *req = NULL;
1646
1647 rc = small_smb2_init(SMB2_READ, io_parms->tcon, (void **) &req);
1648 if (rc)
1649 return rc;
1650 if (io_parms->tcon->ses->server == NULL)
1651 return -ECONNABORTED;
1652
1653 req->hdr.ProcessId = cpu_to_le32(io_parms->pid);
1654
1655 req->PersistentFileId = io_parms->persistent_fid;
1656 req->VolatileFileId = io_parms->volatile_fid;
1657 req->ReadChannelInfoOffset = 0; /* reserved */
1658 req->ReadChannelInfoLength = 0; /* reserved */
1659 req->Channel = 0; /* reserved */
1660 req->MinimumCount = 0;
1661 req->Length = cpu_to_le32(io_parms->length);
1662 req->Offset = cpu_to_le64(io_parms->offset);
1663
1664 if (request_type & CHAINED_REQUEST) {
1665 if (!(request_type & END_OF_CHAIN)) {
1666 /* 4 for rfc1002 length field */
1667 req->hdr.NextCommand =
1668 cpu_to_le32(get_rfc1002_length(req) + 4);
1669 } else /* END_OF_CHAIN */
1670 req->hdr.NextCommand = 0;
1671 if (request_type & RELATED_REQUEST) {
1672 req->hdr.Flags |= SMB2_FLAGS_RELATED_OPERATIONS;
1673 /*
1674 * Related requests use info from previous read request
1675 * in chain.
1676 */
1677 req->hdr.SessionId = 0xFFFFFFFF;
1678 req->hdr.TreeId = 0xFFFFFFFF;
1679 req->PersistentFileId = 0xFFFFFFFF;
1680 req->VolatileFileId = 0xFFFFFFFF;
1681 }
1682 }
1683 if (remaining_bytes > io_parms->length)
1684 req->RemainingBytes = cpu_to_le32(remaining_bytes);
1685 else
1686 req->RemainingBytes = 0;
1687
1688 iov[0].iov_base = (char *)req;
1689 /* 4 for rfc1002 length field */
1690 iov[0].iov_len = get_rfc1002_length(req) + 4;
1691 return rc;
1692}
1693
1694static void
1695smb2_readv_callback(struct mid_q_entry *mid)
1696{
1697 struct cifs_readdata *rdata = mid->callback_data;
1698 struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink);
1699 struct TCP_Server_Info *server = tcon->ses->server;
Jeff Layton58195752012-09-19 06:22:34 -07001700 struct smb2_hdr *buf = (struct smb2_hdr *)rdata->iov.iov_base;
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001701 unsigned int credits_received = 1;
Jeff Layton58195752012-09-19 06:22:34 -07001702 struct smb_rqst rqst = { .rq_iov = &rdata->iov,
Jeff Layton8321fec2012-09-19 06:22:32 -07001703 .rq_nvec = 1,
1704 .rq_pages = rdata->pages,
1705 .rq_npages = rdata->nr_pages,
1706 .rq_pagesz = rdata->pagesz,
1707 .rq_tailsz = rdata->tailsz };
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001708
Joe Perchesf96637b2013-05-04 22:12:25 -05001709 cifs_dbg(FYI, "%s: mid=%llu state=%d result=%d bytes=%u\n",
1710 __func__, mid->mid, mid->mid_state, rdata->result,
1711 rdata->bytes);
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001712
1713 switch (mid->mid_state) {
1714 case MID_RESPONSE_RECEIVED:
1715 credits_received = le16_to_cpu(buf->CreditRequest);
1716 /* result already set, check signature */
Jeff Layton38d77c52013-05-26 07:01:00 -04001717 if (server->sign) {
Pavel Shilovsky3c1bf7e2012-09-18 16:20:30 -07001718 int rc;
1719
Jeff Layton0b688cf2012-09-18 16:20:34 -07001720 rc = smb2_verify_signature(&rqst, server);
Pavel Shilovsky3c1bf7e2012-09-18 16:20:30 -07001721 if (rc)
Joe Perchesf96637b2013-05-04 22:12:25 -05001722 cifs_dbg(VFS, "SMB signature verification returned error = %d\n",
1723 rc);
Pavel Shilovsky3c1bf7e2012-09-18 16:20:30 -07001724 }
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001725 /* FIXME: should this be counted toward the initiating task? */
Pavel Shilovsky34a54d62014-07-10 10:03:29 +04001726 task_io_account_read(rdata->got_bytes);
1727 cifs_stats_bytes_read(tcon, rdata->got_bytes);
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001728 break;
1729 case MID_REQUEST_SUBMITTED:
1730 case MID_RETRY_NEEDED:
1731 rdata->result = -EAGAIN;
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04001732 if (server->sign && rdata->got_bytes)
1733 /* reset bytes number since we can not check a sign */
1734 rdata->got_bytes = 0;
1735 /* FIXME: should this be counted toward the initiating task? */
1736 task_io_account_read(rdata->got_bytes);
1737 cifs_stats_bytes_read(tcon, rdata->got_bytes);
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001738 break;
1739 default:
1740 if (rdata->result != -ENODATA)
1741 rdata->result = -EIO;
1742 }
1743
1744 if (rdata->result)
1745 cifs_stats_fail_inc(tcon, SMB2_READ_HE);
1746
1747 queue_work(cifsiod_wq, &rdata->work);
1748 DeleteMidQEntry(mid);
1749 add_credits(server, credits_received, 0);
1750}
1751
1752/* smb2_async_readv - send an async write, and set up mid to handle result */
1753int
1754smb2_async_readv(struct cifs_readdata *rdata)
1755{
Pavel Shilovskybed9da02014-06-25 11:28:57 +04001756 int rc, flags = 0;
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001757 struct smb2_hdr *buf;
1758 struct cifs_io_parms io_parms;
Jeff Layton58195752012-09-19 06:22:34 -07001759 struct smb_rqst rqst = { .rq_iov = &rdata->iov,
Jeff Laytonfec344e2012-09-18 16:20:35 -07001760 .rq_nvec = 1 };
Pavel Shilovskybed9da02014-06-25 11:28:57 +04001761 struct TCP_Server_Info *server;
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001762
Joe Perchesf96637b2013-05-04 22:12:25 -05001763 cifs_dbg(FYI, "%s: offset=%llu bytes=%u\n",
1764 __func__, rdata->offset, rdata->bytes);
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001765
1766 io_parms.tcon = tlink_tcon(rdata->cfile->tlink);
1767 io_parms.offset = rdata->offset;
1768 io_parms.length = rdata->bytes;
1769 io_parms.persistent_fid = rdata->cfile->fid.persistent_fid;
1770 io_parms.volatile_fid = rdata->cfile->fid.volatile_fid;
1771 io_parms.pid = rdata->pid;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04001772
1773 server = io_parms.tcon->ses->server;
1774
Jeff Layton58195752012-09-19 06:22:34 -07001775 rc = smb2_new_read_req(&rdata->iov, &io_parms, 0, 0);
Pavel Shilovskybed9da02014-06-25 11:28:57 +04001776 if (rc) {
1777 if (rc == -EAGAIN && rdata->credits) {
1778 /* credits was reset by reconnect */
1779 rdata->credits = 0;
1780 /* reduce in_flight value since we won't send the req */
1781 spin_lock(&server->req_lock);
1782 server->in_flight--;
1783 spin_unlock(&server->req_lock);
1784 }
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001785 return rc;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04001786 }
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001787
Jeff Layton58195752012-09-19 06:22:34 -07001788 buf = (struct smb2_hdr *)rdata->iov.iov_base;
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001789 /* 4 for rfc1002 length field */
Jeff Layton58195752012-09-19 06:22:34 -07001790 rdata->iov.iov_len = get_rfc1002_length(rdata->iov.iov_base) + 4;
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001791
Pavel Shilovskybed9da02014-06-25 11:28:57 +04001792 if (rdata->credits) {
1793 buf->CreditCharge = cpu_to_le16(DIV_ROUND_UP(rdata->bytes,
1794 SMB2_MAX_BUFFER_SIZE));
1795 spin_lock(&server->req_lock);
1796 server->credits += rdata->credits -
1797 le16_to_cpu(buf->CreditCharge);
1798 spin_unlock(&server->req_lock);
1799 wake_up(&server->request_q);
1800 flags = CIFS_HAS_CREDITS;
1801 }
1802
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001803 kref_get(&rdata->refcount);
Jeff Laytonfec344e2012-09-18 16:20:35 -07001804 rc = cifs_call_async(io_parms.tcon->ses->server, &rqst,
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001805 cifs_readv_receive, smb2_readv_callback,
Pavel Shilovskybed9da02014-06-25 11:28:57 +04001806 rdata, flags);
Pavel Shilovskye5d04882012-09-19 16:03:26 +04001807 if (rc) {
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001808 kref_put(&rdata->refcount, cifs_readdata_release);
Pavel Shilovskye5d04882012-09-19 16:03:26 +04001809 cifs_stats_fail_inc(io_parms.tcon, SMB2_READ_HE);
1810 }
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001811
1812 cifs_small_buf_release(buf);
1813 return rc;
1814}
Pavel Shilovsky33319142012-09-18 16:20:29 -07001815
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07001816int
1817SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
1818 unsigned int *nbytes, char **buf, int *buf_type)
1819{
1820 int resp_buftype, rc = -EACCES;
1821 struct smb2_read_rsp *rsp = NULL;
1822 struct kvec iov[1];
1823
1824 *nbytes = 0;
1825 rc = smb2_new_read_req(iov, io_parms, 0, 0);
1826 if (rc)
1827 return rc;
1828
1829 rc = SendReceive2(xid, io_parms->tcon->ses, iov, 1,
1830 &resp_buftype, CIFS_LOG_ERROR);
1831
1832 rsp = (struct smb2_read_rsp *)iov[0].iov_base;
1833
1834 if (rsp->hdr.Status == STATUS_END_OF_FILE) {
1835 free_rsp_buf(resp_buftype, iov[0].iov_base);
1836 return 0;
1837 }
1838
1839 if (rc) {
1840 cifs_stats_fail_inc(io_parms->tcon, SMB2_READ_HE);
Joe Perchesf96637b2013-05-04 22:12:25 -05001841 cifs_dbg(VFS, "Send error in read = %d\n", rc);
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07001842 } else {
1843 *nbytes = le32_to_cpu(rsp->DataLength);
1844 if ((*nbytes > CIFS_MAX_MSGSIZE) ||
1845 (*nbytes > io_parms->length)) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001846 cifs_dbg(FYI, "bad length %d for count %d\n",
1847 *nbytes, io_parms->length);
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07001848 rc = -EIO;
1849 *nbytes = 0;
1850 }
1851 }
1852
1853 if (*buf) {
1854 memcpy(*buf, (char *)rsp->hdr.ProtocolId + rsp->DataOffset,
1855 *nbytes);
1856 free_rsp_buf(resp_buftype, iov[0].iov_base);
1857 } else if (resp_buftype != CIFS_NO_BUFFER) {
1858 *buf = iov[0].iov_base;
1859 if (resp_buftype == CIFS_SMALL_BUFFER)
1860 *buf_type = CIFS_SMALL_BUFFER;
1861 else if (resp_buftype == CIFS_LARGE_BUFFER)
1862 *buf_type = CIFS_LARGE_BUFFER;
1863 }
1864 return rc;
1865}
1866
Pavel Shilovsky33319142012-09-18 16:20:29 -07001867/*
1868 * Check the mid_state and signature on received buffer (if any), and queue the
1869 * workqueue completion task.
1870 */
1871static void
1872smb2_writev_callback(struct mid_q_entry *mid)
1873{
1874 struct cifs_writedata *wdata = mid->callback_data;
1875 struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
1876 unsigned int written;
1877 struct smb2_write_rsp *rsp = (struct smb2_write_rsp *)mid->resp_buf;
1878 unsigned int credits_received = 1;
1879
1880 switch (mid->mid_state) {
1881 case MID_RESPONSE_RECEIVED:
1882 credits_received = le16_to_cpu(rsp->hdr.CreditRequest);
1883 wdata->result = smb2_check_receive(mid, tcon->ses->server, 0);
1884 if (wdata->result != 0)
1885 break;
1886
1887 written = le32_to_cpu(rsp->DataLength);
1888 /*
1889 * Mask off high 16 bits when bytes written as returned
1890 * by the server is greater than bytes requested by the
1891 * client. OS/2 servers are known to set incorrect
1892 * CountHigh values.
1893 */
1894 if (written > wdata->bytes)
1895 written &= 0xFFFF;
1896
1897 if (written < wdata->bytes)
1898 wdata->result = -ENOSPC;
1899 else
1900 wdata->bytes = written;
1901 break;
1902 case MID_REQUEST_SUBMITTED:
1903 case MID_RETRY_NEEDED:
1904 wdata->result = -EAGAIN;
1905 break;
1906 default:
1907 wdata->result = -EIO;
1908 break;
1909 }
1910
1911 if (wdata->result)
1912 cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
1913
1914 queue_work(cifsiod_wq, &wdata->work);
1915 DeleteMidQEntry(mid);
1916 add_credits(tcon->ses->server, credits_received, 0);
1917}
1918
1919/* smb2_async_writev - send an async write, and set up mid to handle result */
1920int
Steve French4a5c80d2014-02-07 20:45:12 -06001921smb2_async_writev(struct cifs_writedata *wdata,
1922 void (*release)(struct kref *kref))
Pavel Shilovsky33319142012-09-18 16:20:29 -07001923{
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04001924 int rc = -EACCES, flags = 0;
Pavel Shilovsky33319142012-09-18 16:20:29 -07001925 struct smb2_write_req *req = NULL;
1926 struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04001927 struct TCP_Server_Info *server = tcon->ses->server;
Jeff Laytoneddb0792012-09-18 16:20:35 -07001928 struct kvec iov;
Jeff Laytonfec344e2012-09-18 16:20:35 -07001929 struct smb_rqst rqst;
Pavel Shilovsky33319142012-09-18 16:20:29 -07001930
1931 rc = small_smb2_init(SMB2_WRITE, tcon, (void **) &req);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04001932 if (rc) {
1933 if (rc == -EAGAIN && wdata->credits) {
1934 /* credits was reset by reconnect */
1935 wdata->credits = 0;
1936 /* reduce in_flight value since we won't send the req */
1937 spin_lock(&server->req_lock);
1938 server->in_flight--;
1939 spin_unlock(&server->req_lock);
1940 }
Pavel Shilovsky33319142012-09-18 16:20:29 -07001941 goto async_writev_out;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04001942 }
Pavel Shilovsky33319142012-09-18 16:20:29 -07001943
Pavel Shilovsky33319142012-09-18 16:20:29 -07001944 req->hdr.ProcessId = cpu_to_le32(wdata->cfile->pid);
1945
1946 req->PersistentFileId = wdata->cfile->fid.persistent_fid;
1947 req->VolatileFileId = wdata->cfile->fid.volatile_fid;
1948 req->WriteChannelInfoOffset = 0;
1949 req->WriteChannelInfoLength = 0;
1950 req->Channel = 0;
1951 req->Offset = cpu_to_le64(wdata->offset);
1952 /* 4 for rfc1002 length field */
1953 req->DataOffset = cpu_to_le16(
1954 offsetof(struct smb2_write_req, Buffer) - 4);
1955 req->RemainingBytes = 0;
1956
1957 /* 4 for rfc1002 length field and 1 for Buffer */
Jeff Laytoneddb0792012-09-18 16:20:35 -07001958 iov.iov_len = get_rfc1002_length(req) + 4 - 1;
1959 iov.iov_base = req;
Pavel Shilovsky33319142012-09-18 16:20:29 -07001960
Jeff Laytoneddb0792012-09-18 16:20:35 -07001961 rqst.rq_iov = &iov;
1962 rqst.rq_nvec = 1;
1963 rqst.rq_pages = wdata->pages;
1964 rqst.rq_npages = wdata->nr_pages;
1965 rqst.rq_pagesz = wdata->pagesz;
1966 rqst.rq_tailsz = wdata->tailsz;
Pavel Shilovsky33319142012-09-18 16:20:29 -07001967
Joe Perchesf96637b2013-05-04 22:12:25 -05001968 cifs_dbg(FYI, "async write at %llu %u bytes\n",
1969 wdata->offset, wdata->bytes);
Pavel Shilovsky33319142012-09-18 16:20:29 -07001970
1971 req->Length = cpu_to_le32(wdata->bytes);
1972
1973 inc_rfc1001_len(&req->hdr, wdata->bytes - 1 /* Buffer */);
1974
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04001975 if (wdata->credits) {
1976 req->hdr.CreditCharge = cpu_to_le16(DIV_ROUND_UP(wdata->bytes,
1977 SMB2_MAX_BUFFER_SIZE));
1978 spin_lock(&server->req_lock);
1979 server->credits += wdata->credits -
1980 le16_to_cpu(req->hdr.CreditCharge);
1981 spin_unlock(&server->req_lock);
1982 wake_up(&server->request_q);
1983 flags = CIFS_HAS_CREDITS;
1984 }
1985
Pavel Shilovsky33319142012-09-18 16:20:29 -07001986 kref_get(&wdata->refcount);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04001987 rc = cifs_call_async(server, &rqst, NULL, smb2_writev_callback, wdata,
1988 flags);
Pavel Shilovsky33319142012-09-18 16:20:29 -07001989
Pavel Shilovskye5d04882012-09-19 16:03:26 +04001990 if (rc) {
Steve French4a5c80d2014-02-07 20:45:12 -06001991 kref_put(&wdata->refcount, release);
Pavel Shilovskye5d04882012-09-19 16:03:26 +04001992 cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
1993 }
Pavel Shilovsky33319142012-09-18 16:20:29 -07001994
Pavel Shilovsky33319142012-09-18 16:20:29 -07001995async_writev_out:
1996 cifs_small_buf_release(req);
Pavel Shilovsky33319142012-09-18 16:20:29 -07001997 return rc;
1998}
Pavel Shilovsky009d3442012-09-18 16:20:30 -07001999
2000/*
2001 * SMB2_write function gets iov pointer to kvec array with n_vec as a length.
2002 * The length field from io_parms must be at least 1 and indicates a number of
2003 * elements with data to write that begins with position 1 in iov array. All
2004 * data length is specified by count.
2005 */
2006int
2007SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
2008 unsigned int *nbytes, struct kvec *iov, int n_vec)
2009{
2010 int rc = 0;
2011 struct smb2_write_req *req = NULL;
2012 struct smb2_write_rsp *rsp = NULL;
2013 int resp_buftype;
2014 *nbytes = 0;
2015
2016 if (n_vec < 1)
2017 return rc;
2018
2019 rc = small_smb2_init(SMB2_WRITE, io_parms->tcon, (void **) &req);
2020 if (rc)
2021 return rc;
2022
2023 if (io_parms->tcon->ses->server == NULL)
2024 return -ECONNABORTED;
2025
2026 req->hdr.ProcessId = cpu_to_le32(io_parms->pid);
2027
2028 req->PersistentFileId = io_parms->persistent_fid;
2029 req->VolatileFileId = io_parms->volatile_fid;
2030 req->WriteChannelInfoOffset = 0;
2031 req->WriteChannelInfoLength = 0;
2032 req->Channel = 0;
2033 req->Length = cpu_to_le32(io_parms->length);
2034 req->Offset = cpu_to_le64(io_parms->offset);
2035 /* 4 for rfc1002 length field */
2036 req->DataOffset = cpu_to_le16(
2037 offsetof(struct smb2_write_req, Buffer) - 4);
2038 req->RemainingBytes = 0;
2039
2040 iov[0].iov_base = (char *)req;
2041 /* 4 for rfc1002 length field and 1 for Buffer */
2042 iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
2043
2044 /* length of entire message including data to be written */
2045 inc_rfc1001_len(req, io_parms->length - 1 /* Buffer */);
2046
2047 rc = SendReceive2(xid, io_parms->tcon->ses, iov, n_vec + 1,
2048 &resp_buftype, 0);
Pavel Shilovskye5d04882012-09-19 16:03:26 +04002049 rsp = (struct smb2_write_rsp *)iov[0].iov_base;
Pavel Shilovsky009d3442012-09-18 16:20:30 -07002050
2051 if (rc) {
2052 cifs_stats_fail_inc(io_parms->tcon, SMB2_WRITE_HE);
Joe Perchesf96637b2013-05-04 22:12:25 -05002053 cifs_dbg(VFS, "Send error in write = %d\n", rc);
Pavel Shilovskye5d04882012-09-19 16:03:26 +04002054 } else
Pavel Shilovsky009d3442012-09-18 16:20:30 -07002055 *nbytes = le32_to_cpu(rsp->DataLength);
Pavel Shilovskye5d04882012-09-19 16:03:26 +04002056
2057 free_rsp_buf(resp_buftype, rsp);
Pavel Shilovsky009d3442012-09-18 16:20:30 -07002058 return rc;
2059}
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07002060
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002061static unsigned int
2062num_entries(char *bufstart, char *end_of_buf, char **lastentry, size_t size)
2063{
2064 int len;
2065 unsigned int entrycount = 0;
2066 unsigned int next_offset = 0;
2067 FILE_DIRECTORY_INFO *entryptr;
2068
2069 if (bufstart == NULL)
2070 return 0;
2071
2072 entryptr = (FILE_DIRECTORY_INFO *)bufstart;
2073
2074 while (1) {
2075 entryptr = (FILE_DIRECTORY_INFO *)
2076 ((char *)entryptr + next_offset);
2077
2078 if ((char *)entryptr + size > end_of_buf) {
Joe Perchesf96637b2013-05-04 22:12:25 -05002079 cifs_dbg(VFS, "malformed search entry would overflow\n");
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002080 break;
2081 }
2082
2083 len = le32_to_cpu(entryptr->FileNameLength);
2084 if ((char *)entryptr + len + size > end_of_buf) {
Joe Perchesf96637b2013-05-04 22:12:25 -05002085 cifs_dbg(VFS, "directory entry name would overflow frame end of buf %p\n",
2086 end_of_buf);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002087 break;
2088 }
2089
2090 *lastentry = (char *)entryptr;
2091 entrycount++;
2092
2093 next_offset = le32_to_cpu(entryptr->NextEntryOffset);
2094 if (!next_offset)
2095 break;
2096 }
2097
2098 return entrycount;
2099}
2100
2101/*
2102 * Readdir/FindFirst
2103 */
2104int
2105SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
2106 u64 persistent_fid, u64 volatile_fid, int index,
2107 struct cifs_search_info *srch_inf)
2108{
2109 struct smb2_query_directory_req *req;
2110 struct smb2_query_directory_rsp *rsp = NULL;
2111 struct kvec iov[2];
2112 int rc = 0;
2113 int len;
2114 int resp_buftype;
2115 unsigned char *bufptr;
2116 struct TCP_Server_Info *server;
2117 struct cifs_ses *ses = tcon->ses;
2118 __le16 asteriks = cpu_to_le16('*');
2119 char *end_of_smb;
2120 unsigned int output_size = CIFSMaxBufSize;
2121 size_t info_buf_size;
2122
2123 if (ses && (ses->server))
2124 server = ses->server;
2125 else
2126 return -EIO;
2127
2128 rc = small_smb2_init(SMB2_QUERY_DIRECTORY, tcon, (void **) &req);
2129 if (rc)
2130 return rc;
2131
2132 switch (srch_inf->info_level) {
2133 case SMB_FIND_FILE_DIRECTORY_INFO:
2134 req->FileInformationClass = FILE_DIRECTORY_INFORMATION;
2135 info_buf_size = sizeof(FILE_DIRECTORY_INFO) - 1;
2136 break;
2137 case SMB_FIND_FILE_ID_FULL_DIR_INFO:
2138 req->FileInformationClass = FILEID_FULL_DIRECTORY_INFORMATION;
2139 info_buf_size = sizeof(SEARCH_ID_FULL_DIR_INFO) - 1;
2140 break;
2141 default:
Joe Perchesf96637b2013-05-04 22:12:25 -05002142 cifs_dbg(VFS, "info level %u isn't supported\n",
2143 srch_inf->info_level);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002144 rc = -EINVAL;
2145 goto qdir_exit;
2146 }
2147
2148 req->FileIndex = cpu_to_le32(index);
2149 req->PersistentFileId = persistent_fid;
2150 req->VolatileFileId = volatile_fid;
2151
2152 len = 0x2;
2153 bufptr = req->Buffer;
2154 memcpy(bufptr, &asteriks, len);
2155
2156 req->FileNameOffset =
2157 cpu_to_le16(sizeof(struct smb2_query_directory_req) - 1 - 4);
2158 req->FileNameLength = cpu_to_le16(len);
2159 /*
2160 * BB could be 30 bytes or so longer if we used SMB2 specific
2161 * buffer lengths, but this is safe and close enough.
2162 */
2163 output_size = min_t(unsigned int, output_size, server->maxBuf);
2164 output_size = min_t(unsigned int, output_size, 2 << 15);
2165 req->OutputBufferLength = cpu_to_le32(output_size);
2166
2167 iov[0].iov_base = (char *)req;
2168 /* 4 for RFC1001 length and 1 for Buffer */
2169 iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
2170
2171 iov[1].iov_base = (char *)(req->Buffer);
2172 iov[1].iov_len = len;
2173
2174 inc_rfc1001_len(req, len - 1 /* Buffer */);
2175
2176 rc = SendReceive2(xid, ses, iov, 2, &resp_buftype, 0);
Pavel Shilovskye5d04882012-09-19 16:03:26 +04002177 rsp = (struct smb2_query_directory_rsp *)iov[0].iov_base;
2178
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002179 if (rc) {
2180 cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE);
2181 goto qdir_exit;
2182 }
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002183
2184 rc = validate_buf(le16_to_cpu(rsp->OutputBufferOffset),
2185 le32_to_cpu(rsp->OutputBufferLength), &rsp->hdr,
2186 info_buf_size);
2187 if (rc)
2188 goto qdir_exit;
2189
2190 srch_inf->unicode = true;
2191
2192 if (srch_inf->ntwrk_buf_start) {
2193 if (srch_inf->smallBuf)
2194 cifs_small_buf_release(srch_inf->ntwrk_buf_start);
2195 else
2196 cifs_buf_release(srch_inf->ntwrk_buf_start);
2197 }
2198 srch_inf->ntwrk_buf_start = (char *)rsp;
2199 srch_inf->srch_entries_start = srch_inf->last_entry = 4 /* rfclen */ +
2200 (char *)&rsp->hdr + le16_to_cpu(rsp->OutputBufferOffset);
2201 /* 4 for rfc1002 length field */
2202 end_of_smb = get_rfc1002_length(rsp) + 4 + (char *)&rsp->hdr;
2203 srch_inf->entries_in_buffer =
2204 num_entries(srch_inf->srch_entries_start, end_of_smb,
2205 &srch_inf->last_entry, info_buf_size);
2206 srch_inf->index_of_last_entry += srch_inf->entries_in_buffer;
Joe Perchesf96637b2013-05-04 22:12:25 -05002207 cifs_dbg(FYI, "num entries %d last_index %lld srch start %p srch end %p\n",
2208 srch_inf->entries_in_buffer, srch_inf->index_of_last_entry,
2209 srch_inf->srch_entries_start, srch_inf->last_entry);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002210 if (resp_buftype == CIFS_LARGE_BUFFER)
2211 srch_inf->smallBuf = false;
2212 else if (resp_buftype == CIFS_SMALL_BUFFER)
2213 srch_inf->smallBuf = true;
2214 else
Joe Perchesf96637b2013-05-04 22:12:25 -05002215 cifs_dbg(VFS, "illegal search buffer type\n");
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07002216
2217 if (rsp->hdr.Status == STATUS_NO_MORE_FILES)
2218 srch_inf->endOfSearch = 1;
2219 else
2220 srch_inf->endOfSearch = 0;
2221
2222 return rc;
2223
2224qdir_exit:
2225 free_rsp_buf(resp_buftype, rsp);
2226 return rc;
2227}
2228
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07002229static int
2230send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07002231 u64 persistent_fid, u64 volatile_fid, u32 pid, int info_class,
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07002232 unsigned int num, void **data, unsigned int *size)
2233{
2234 struct smb2_set_info_req *req;
2235 struct smb2_set_info_rsp *rsp = NULL;
2236 struct kvec *iov;
2237 int rc = 0;
2238 int resp_buftype;
2239 unsigned int i;
2240 struct TCP_Server_Info *server;
2241 struct cifs_ses *ses = tcon->ses;
2242
2243 if (ses && (ses->server))
2244 server = ses->server;
2245 else
2246 return -EIO;
2247
2248 if (!num)
2249 return -EINVAL;
2250
2251 iov = kmalloc(sizeof(struct kvec) * num, GFP_KERNEL);
2252 if (!iov)
2253 return -ENOMEM;
2254
2255 rc = small_smb2_init(SMB2_SET_INFO, tcon, (void **) &req);
2256 if (rc) {
2257 kfree(iov);
2258 return rc;
2259 }
2260
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07002261 req->hdr.ProcessId = cpu_to_le32(pid);
2262
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07002263 req->InfoType = SMB2_O_INFO_FILE;
2264 req->FileInfoClass = info_class;
2265 req->PersistentFileId = persistent_fid;
2266 req->VolatileFileId = volatile_fid;
2267
2268 /* 4 for RFC1001 length and 1 for Buffer */
2269 req->BufferOffset =
2270 cpu_to_le16(sizeof(struct smb2_set_info_req) - 1 - 4);
2271 req->BufferLength = cpu_to_le32(*size);
2272
2273 inc_rfc1001_len(req, *size - 1 /* Buffer */);
2274
2275 memcpy(req->Buffer, *data, *size);
2276
2277 iov[0].iov_base = (char *)req;
2278 /* 4 for RFC1001 length */
2279 iov[0].iov_len = get_rfc1002_length(req) + 4;
2280
2281 for (i = 1; i < num; i++) {
2282 inc_rfc1001_len(req, size[i]);
2283 le32_add_cpu(&req->BufferLength, size[i]);
2284 iov[i].iov_base = (char *)data[i];
2285 iov[i].iov_len = size[i];
2286 }
2287
2288 rc = SendReceive2(xid, ses, iov, num, &resp_buftype, 0);
2289 rsp = (struct smb2_set_info_rsp *)iov[0].iov_base;
2290
Steve French7d3fb242013-11-18 09:56:28 -06002291 if (rc != 0)
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07002292 cifs_stats_fail_inc(tcon, SMB2_SET_INFO_HE);
Steve French7d3fb242013-11-18 09:56:28 -06002293
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07002294 free_rsp_buf(resp_buftype, rsp);
2295 kfree(iov);
2296 return rc;
2297}
2298
2299int
2300SMB2_rename(const unsigned int xid, struct cifs_tcon *tcon,
2301 u64 persistent_fid, u64 volatile_fid, __le16 *target_file)
2302{
2303 struct smb2_file_rename_info info;
2304 void **data;
2305 unsigned int size[2];
2306 int rc;
2307 int len = (2 * UniStrnlen((wchar_t *)target_file, PATH_MAX));
2308
2309 data = kmalloc(sizeof(void *) * 2, GFP_KERNEL);
2310 if (!data)
2311 return -ENOMEM;
2312
2313 info.ReplaceIfExists = 1; /* 1 = replace existing target with new */
2314 /* 0 = fail if target already exists */
2315 info.RootDirectory = 0; /* MBZ for network ops (why does spec say?) */
2316 info.FileNameLength = cpu_to_le32(len);
2317
2318 data[0] = &info;
2319 size[0] = sizeof(struct smb2_file_rename_info);
2320
2321 data[1] = target_file;
2322 size[1] = len + 2 /* null */;
2323
2324 rc = send_set_info(xid, tcon, persistent_fid, volatile_fid,
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07002325 current->tgid, FILE_RENAME_INFORMATION, 2, data,
2326 size);
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07002327 kfree(data);
2328 return rc;
2329}
Pavel Shilovsky568798c2012-09-18 16:20:31 -07002330
2331int
2332SMB2_set_hardlink(const unsigned int xid, struct cifs_tcon *tcon,
2333 u64 persistent_fid, u64 volatile_fid, __le16 *target_file)
2334{
2335 struct smb2_file_link_info info;
2336 void **data;
2337 unsigned int size[2];
2338 int rc;
2339 int len = (2 * UniStrnlen((wchar_t *)target_file, PATH_MAX));
2340
2341 data = kmalloc(sizeof(void *) * 2, GFP_KERNEL);
2342 if (!data)
2343 return -ENOMEM;
2344
2345 info.ReplaceIfExists = 0; /* 1 = replace existing link with new */
2346 /* 0 = fail if link already exists */
2347 info.RootDirectory = 0; /* MBZ for network ops (why does spec say?) */
2348 info.FileNameLength = cpu_to_le32(len);
2349
2350 data[0] = &info;
2351 size[0] = sizeof(struct smb2_file_link_info);
2352
2353 data[1] = target_file;
2354 size[1] = len + 2 /* null */;
2355
2356 rc = send_set_info(xid, tcon, persistent_fid, volatile_fid,
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07002357 current->tgid, FILE_LINK_INFORMATION, 2, data, size);
Pavel Shilovsky568798c2012-09-18 16:20:31 -07002358 kfree(data);
2359 return rc;
2360}
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07002361
2362int
2363SMB2_set_eof(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
Steve Frenchf29ebb42014-07-19 21:44:58 -05002364 u64 volatile_fid, u32 pid, __le64 *eof, bool is_falloc)
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07002365{
2366 struct smb2_file_eof_info info;
2367 void *data;
2368 unsigned int size;
2369
2370 info.EndOfFile = *eof;
2371
2372 data = &info;
2373 size = sizeof(struct smb2_file_eof_info);
2374
Steve Frenchf29ebb42014-07-19 21:44:58 -05002375 if (is_falloc)
2376 return send_set_info(xid, tcon, persistent_fid, volatile_fid,
2377 pid, FILE_ALLOCATION_INFORMATION, 1, &data, &size);
2378 else
2379 return send_set_info(xid, tcon, persistent_fid, volatile_fid,
2380 pid, FILE_END_OF_FILE_INFORMATION, 1, &data, &size);
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07002381}
Pavel Shilovsky1feeaac2012-09-18 16:20:32 -07002382
2383int
2384SMB2_set_info(const unsigned int xid, struct cifs_tcon *tcon,
2385 u64 persistent_fid, u64 volatile_fid, FILE_BASIC_INFO *buf)
2386{
2387 unsigned int size;
2388 size = sizeof(FILE_BASIC_INFO);
2389 return send_set_info(xid, tcon, persistent_fid, volatile_fid,
2390 current->tgid, FILE_BASIC_INFORMATION, 1,
2391 (void **)&buf, &size);
2392}
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07002393
2394int
2395SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
2396 const u64 persistent_fid, const u64 volatile_fid,
2397 __u8 oplock_level)
2398{
2399 int rc;
2400 struct smb2_oplock_break *req = NULL;
2401
Joe Perchesf96637b2013-05-04 22:12:25 -05002402 cifs_dbg(FYI, "SMB2_oplock_break\n");
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07002403 rc = small_smb2_init(SMB2_OPLOCK_BREAK, tcon, (void **) &req);
2404
2405 if (rc)
2406 return rc;
2407
2408 req->VolatileFid = volatile_fid;
2409 req->PersistentFid = persistent_fid;
2410 req->OplockLevel = oplock_level;
2411 req->hdr.CreditRequest = cpu_to_le16(1);
2412
2413 rc = SendReceiveNoRsp(xid, tcon->ses, (char *) req, CIFS_OBREAK_OP);
2414 /* SMB2 buffer freed by function above */
2415
2416 if (rc) {
2417 cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE);
Joe Perchesf96637b2013-05-04 22:12:25 -05002418 cifs_dbg(FYI, "Send error in Oplock Break = %d\n", rc);
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07002419 }
2420
2421 return rc;
2422}
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002423
2424static void
2425copy_fs_info_to_kstatfs(struct smb2_fs_full_size_info *pfs_inf,
2426 struct kstatfs *kst)
2427{
2428 kst->f_bsize = le32_to_cpu(pfs_inf->BytesPerSector) *
2429 le32_to_cpu(pfs_inf->SectorsPerAllocationUnit);
2430 kst->f_blocks = le64_to_cpu(pfs_inf->TotalAllocationUnits);
2431 kst->f_bfree = le64_to_cpu(pfs_inf->ActualAvailableAllocationUnits);
2432 kst->f_bavail = le64_to_cpu(pfs_inf->CallerAvailableAllocationUnits);
2433 return;
2434}
2435
2436static int
2437build_qfs_info_req(struct kvec *iov, struct cifs_tcon *tcon, int level,
2438 int outbuf_len, u64 persistent_fid, u64 volatile_fid)
2439{
2440 int rc;
2441 struct smb2_query_info_req *req;
2442
Joe Perchesf96637b2013-05-04 22:12:25 -05002443 cifs_dbg(FYI, "Query FSInfo level %d\n", level);
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002444
2445 if ((tcon->ses == NULL) || (tcon->ses->server == NULL))
2446 return -EIO;
2447
2448 rc = small_smb2_init(SMB2_QUERY_INFO, tcon, (void **) &req);
2449 if (rc)
2450 return rc;
2451
2452 req->InfoType = SMB2_O_INFO_FILESYSTEM;
2453 req->FileInfoClass = level;
2454 req->PersistentFileId = persistent_fid;
2455 req->VolatileFileId = volatile_fid;
2456 /* 4 for rfc1002 length field and 1 for pad */
2457 req->InputBufferOffset =
2458 cpu_to_le16(sizeof(struct smb2_query_info_req) - 1 - 4);
2459 req->OutputBufferLength = cpu_to_le32(
2460 outbuf_len + sizeof(struct smb2_query_info_rsp) - 1 - 4);
2461
2462 iov->iov_base = (char *)req;
2463 /* 4 for rfc1002 length field */
2464 iov->iov_len = get_rfc1002_length(req) + 4;
2465 return 0;
2466}
2467
2468int
2469SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
2470 u64 persistent_fid, u64 volatile_fid, struct kstatfs *fsdata)
2471{
2472 struct smb2_query_info_rsp *rsp = NULL;
2473 struct kvec iov;
2474 int rc = 0;
2475 int resp_buftype;
2476 struct cifs_ses *ses = tcon->ses;
2477 struct smb2_fs_full_size_info *info = NULL;
2478
2479 rc = build_qfs_info_req(&iov, tcon, FS_FULL_SIZE_INFORMATION,
2480 sizeof(struct smb2_fs_full_size_info),
2481 persistent_fid, volatile_fid);
2482 if (rc)
2483 return rc;
2484
2485 rc = SendReceive2(xid, ses, &iov, 1, &resp_buftype, 0);
2486 if (rc) {
2487 cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
Steve French34f62642013-10-09 02:07:00 -05002488 goto qfsinf_exit;
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002489 }
2490 rsp = (struct smb2_query_info_rsp *)iov.iov_base;
2491
2492 info = (struct smb2_fs_full_size_info *)(4 /* RFC1001 len */ +
2493 le16_to_cpu(rsp->OutputBufferOffset) + (char *)&rsp->hdr);
2494 rc = validate_buf(le16_to_cpu(rsp->OutputBufferOffset),
2495 le32_to_cpu(rsp->OutputBufferLength), &rsp->hdr,
2496 sizeof(struct smb2_fs_full_size_info));
2497 if (!rc)
2498 copy_fs_info_to_kstatfs(info, fsdata);
2499
Steve French34f62642013-10-09 02:07:00 -05002500qfsinf_exit:
2501 free_rsp_buf(resp_buftype, iov.iov_base);
2502 return rc;
2503}
2504
2505int
2506SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
Steven French21671142013-10-09 13:36:35 -05002507 u64 persistent_fid, u64 volatile_fid, int level)
Steve French34f62642013-10-09 02:07:00 -05002508{
2509 struct smb2_query_info_rsp *rsp = NULL;
2510 struct kvec iov;
2511 int rc = 0;
Steven French21671142013-10-09 13:36:35 -05002512 int resp_buftype, max_len, min_len;
Steve French34f62642013-10-09 02:07:00 -05002513 struct cifs_ses *ses = tcon->ses;
2514 unsigned int rsp_len, offset;
2515
Steven French21671142013-10-09 13:36:35 -05002516 if (level == FS_DEVICE_INFORMATION) {
2517 max_len = sizeof(FILE_SYSTEM_DEVICE_INFO);
2518 min_len = sizeof(FILE_SYSTEM_DEVICE_INFO);
2519 } else if (level == FS_ATTRIBUTE_INFORMATION) {
2520 max_len = sizeof(FILE_SYSTEM_ATTRIBUTE_INFO);
2521 min_len = MIN_FS_ATTR_INFO_SIZE;
Steven Frenchaf6a12e2013-10-09 20:55:53 -05002522 } else if (level == FS_SECTOR_SIZE_INFORMATION) {
2523 max_len = sizeof(struct smb3_fs_ss_info);
2524 min_len = sizeof(struct smb3_fs_ss_info);
Steven French21671142013-10-09 13:36:35 -05002525 } else {
Steven Frenchaf6a12e2013-10-09 20:55:53 -05002526 cifs_dbg(FYI, "Invalid qfsinfo level %d\n", level);
Steven French21671142013-10-09 13:36:35 -05002527 return -EINVAL;
2528 }
2529
2530 rc = build_qfs_info_req(&iov, tcon, level, max_len,
Steve French34f62642013-10-09 02:07:00 -05002531 persistent_fid, volatile_fid);
2532 if (rc)
2533 return rc;
2534
2535 rc = SendReceive2(xid, ses, &iov, 1, &resp_buftype, 0);
2536 if (rc) {
2537 cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
2538 goto qfsattr_exit;
2539 }
2540 rsp = (struct smb2_query_info_rsp *)iov.iov_base;
2541
2542 rsp_len = le32_to_cpu(rsp->OutputBufferLength);
2543 offset = le16_to_cpu(rsp->OutputBufferOffset);
Steven French21671142013-10-09 13:36:35 -05002544 rc = validate_buf(offset, rsp_len, &rsp->hdr, min_len);
2545 if (rc)
2546 goto qfsattr_exit;
2547
2548 if (level == FS_ATTRIBUTE_INFORMATION)
Steve French34f62642013-10-09 02:07:00 -05002549 memcpy(&tcon->fsAttrInfo, 4 /* RFC1001 len */ + offset
2550 + (char *)&rsp->hdr, min_t(unsigned int,
Steven French21671142013-10-09 13:36:35 -05002551 rsp_len, max_len));
2552 else if (level == FS_DEVICE_INFORMATION)
2553 memcpy(&tcon->fsDevInfo, 4 /* RFC1001 len */ + offset
2554 + (char *)&rsp->hdr, sizeof(FILE_SYSTEM_DEVICE_INFO));
Steven Frenchaf6a12e2013-10-09 20:55:53 -05002555 else if (level == FS_SECTOR_SIZE_INFORMATION) {
2556 struct smb3_fs_ss_info *ss_info = (struct smb3_fs_ss_info *)
2557 (4 /* RFC1001 len */ + offset + (char *)&rsp->hdr);
2558 tcon->ss_flags = le32_to_cpu(ss_info->Flags);
2559 tcon->perf_sector_size =
2560 le32_to_cpu(ss_info->PhysicalBytesPerSectorForPerf);
2561 }
Steve French34f62642013-10-09 02:07:00 -05002562
2563qfsattr_exit:
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002564 free_rsp_buf(resp_buftype, iov.iov_base);
2565 return rc;
2566}
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07002567
2568int
2569smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon,
2570 const __u64 persist_fid, const __u64 volatile_fid, const __u32 pid,
2571 const __u32 num_lock, struct smb2_lock_element *buf)
2572{
2573 int rc = 0;
2574 struct smb2_lock_req *req = NULL;
2575 struct kvec iov[2];
2576 int resp_buf_type;
2577 unsigned int count;
2578
Joe Perchesf96637b2013-05-04 22:12:25 -05002579 cifs_dbg(FYI, "smb2_lockv num lock %d\n", num_lock);
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07002580
2581 rc = small_smb2_init(SMB2_LOCK, tcon, (void **) &req);
2582 if (rc)
2583 return rc;
2584
2585 req->hdr.ProcessId = cpu_to_le32(pid);
2586 req->LockCount = cpu_to_le16(num_lock);
2587
2588 req->PersistentFileId = persist_fid;
2589 req->VolatileFileId = volatile_fid;
2590
2591 count = num_lock * sizeof(struct smb2_lock_element);
2592 inc_rfc1001_len(req, count - sizeof(struct smb2_lock_element));
2593
2594 iov[0].iov_base = (char *)req;
2595 /* 4 for rfc1002 length field and count for all locks */
2596 iov[0].iov_len = get_rfc1002_length(req) + 4 - count;
2597 iov[1].iov_base = (char *)buf;
2598 iov[1].iov_len = count;
2599
2600 cifs_stats_inc(&tcon->stats.cifs_stats.num_locks);
2601 rc = SendReceive2(xid, tcon->ses, iov, 2, &resp_buf_type, CIFS_NO_RESP);
2602 if (rc) {
Joe Perchesf96637b2013-05-04 22:12:25 -05002603 cifs_dbg(FYI, "Send error in smb2_lockv = %d\n", rc);
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07002604 cifs_stats_fail_inc(tcon, SMB2_LOCK_HE);
2605 }
2606
2607 return rc;
2608}
2609
2610int
2611SMB2_lock(const unsigned int xid, struct cifs_tcon *tcon,
2612 const __u64 persist_fid, const __u64 volatile_fid, const __u32 pid,
2613 const __u64 length, const __u64 offset, const __u32 lock_flags,
2614 const bool wait)
2615{
2616 struct smb2_lock_element lock;
2617
2618 lock.Offset = cpu_to_le64(offset);
2619 lock.Length = cpu_to_le64(length);
2620 lock.Flags = cpu_to_le32(lock_flags);
2621 if (!wait && lock_flags != SMB2_LOCKFLAG_UNLOCK)
2622 lock.Flags |= cpu_to_le32(SMB2_LOCKFLAG_FAIL_IMMEDIATELY);
2623
2624 return smb2_lockv(xid, tcon, persist_fid, volatile_fid, pid, 1, &lock);
2625}
Pavel Shilovsky0822f512012-09-19 06:22:45 -07002626
2627int
2628SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon,
2629 __u8 *lease_key, const __le32 lease_state)
2630{
2631 int rc;
2632 struct smb2_lease_ack *req = NULL;
2633
Joe Perchesf96637b2013-05-04 22:12:25 -05002634 cifs_dbg(FYI, "SMB2_lease_break\n");
Pavel Shilovsky0822f512012-09-19 06:22:45 -07002635 rc = small_smb2_init(SMB2_OPLOCK_BREAK, tcon, (void **) &req);
2636
2637 if (rc)
2638 return rc;
2639
2640 req->hdr.CreditRequest = cpu_to_le16(1);
2641 req->StructureSize = cpu_to_le16(36);
2642 inc_rfc1001_len(req, 12);
2643
2644 memcpy(req->LeaseKey, lease_key, 16);
2645 req->LeaseState = lease_state;
2646
2647 rc = SendReceiveNoRsp(xid, tcon->ses, (char *) req, CIFS_OBREAK_OP);
2648 /* SMB2 buffer freed by function above */
2649
2650 if (rc) {
2651 cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE);
Joe Perchesf96637b2013-05-04 22:12:25 -05002652 cifs_dbg(FYI, "Send error in Lease Break = %d\n", rc);
Pavel Shilovsky0822f512012-09-19 06:22:45 -07002653 }
2654
2655 return rc;
2656}