blob: ad8ef10de0bdda16fb2b544007521e831e5e577e [file] [log] [blame]
Pavel Shilovskyec2e4522011-12-27 16:12:43 +04001/*
2 * fs/cifs/smb2pdu.c
3 *
Steve Frenche4aa25e2012-10-01 12:26:22 -05004 * Copyright (C) International Business Machines Corp., 2009, 2012
Pavel Shilovskyec2e4522011-12-27 16:12:43 +04005 * Etersoft, 2012
6 * Author(s): Steve French (sfrench@us.ibm.com)
7 * Pavel Shilovsky (pshilovsky@samba.org) 2012
8 *
9 * Contains the routines for constructing the SMB2 PDUs themselves
10 *
11 * This library is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU Lesser General Public License as published
13 * by the Free Software Foundation; either version 2.1 of the License, or
14 * (at your option) any later version.
15 *
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
19 * the GNU Lesser General Public License for more details.
20 *
21 * You should have received a copy of the GNU Lesser General Public License
22 * along with this library; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 */
25
26 /* SMB2 PDU handling routines here - except for leftovers (eg session setup) */
27 /* Note that there are handle based routines which must be */
28 /* treated slightly differently for reconnection purposes since we never */
29 /* want to reuse a stale file handle and only the caller knows the file info */
30
31#include <linux/fs.h>
32#include <linux/kernel.h>
33#include <linux/vfs.h>
Pavel Shilovsky09a47072012-09-18 16:20:29 -070034#include <linux/task_io_accounting_ops.h>
Pavel Shilovskyec2e4522011-12-27 16:12:43 +040035#include <linux/uaccess.h>
Pavel Shilovsky33319142012-09-18 16:20:29 -070036#include <linux/pagemap.h>
Pavel Shilovskyec2e4522011-12-27 16:12:43 +040037#include <linux/xattr.h>
38#include "smb2pdu.h"
39#include "cifsglob.h"
40#include "cifsacl.h"
41#include "cifsproto.h"
42#include "smb2proto.h"
43#include "cifs_unicode.h"
44#include "cifs_debug.h"
45#include "ntlmssp.h"
46#include "smb2status.h"
Pavel Shilovsky09a47072012-09-18 16:20:29 -070047#include "smb2glob.h"
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -070048#include "cifspdu.h"
Pavel Shilovskyec2e4522011-12-27 16:12:43 +040049
50/*
51 * The following table defines the expected "StructureSize" of SMB2 requests
52 * in order by SMB2 command. This is similar to "wct" in SMB/CIFS requests.
53 *
54 * Note that commands are defined in smb2pdu.h in le16 but the array below is
55 * indexed by command in host byte order.
56 */
57static const int smb2_req_struct_sizes[NUMBER_OF_SMB2_COMMANDS] = {
58 /* SMB2_NEGOTIATE */ 36,
59 /* SMB2_SESSION_SETUP */ 25,
60 /* SMB2_LOGOFF */ 4,
61 /* SMB2_TREE_CONNECT */ 9,
62 /* SMB2_TREE_DISCONNECT */ 4,
63 /* SMB2_CREATE */ 57,
64 /* SMB2_CLOSE */ 24,
65 /* SMB2_FLUSH */ 24,
66 /* SMB2_READ */ 49,
67 /* SMB2_WRITE */ 49,
68 /* SMB2_LOCK */ 48,
69 /* SMB2_IOCTL */ 57,
70 /* SMB2_CANCEL */ 4,
71 /* SMB2_ECHO */ 4,
72 /* SMB2_QUERY_DIRECTORY */ 33,
73 /* SMB2_CHANGE_NOTIFY */ 32,
74 /* SMB2_QUERY_INFO */ 41,
75 /* SMB2_SET_INFO */ 33,
76 /* SMB2_OPLOCK_BREAK */ 24 /* BB this is 36 for LEASE_BREAK variant */
77};
78
79
80static void
81smb2_hdr_assemble(struct smb2_hdr *hdr, __le16 smb2_cmd /* command */ ,
82 const struct cifs_tcon *tcon)
83{
84 struct smb2_pdu *pdu = (struct smb2_pdu *)hdr;
85 char *temp = (char *)hdr;
86 /* lookup word count ie StructureSize from table */
87 __u16 parmsize = smb2_req_struct_sizes[le16_to_cpu(smb2_cmd)];
88
89 /*
90 * smaller than SMALL_BUFFER_SIZE but bigger than fixed area of
91 * largest operations (Create)
92 */
93 memset(temp, 0, 256);
94
95 /* Note this is only network field converted to big endian */
96 hdr->smb2_buf_length = cpu_to_be32(parmsize + sizeof(struct smb2_hdr)
97 - 4 /* RFC 1001 length field itself not counted */);
98
99 hdr->ProtocolId[0] = 0xFE;
100 hdr->ProtocolId[1] = 'S';
101 hdr->ProtocolId[2] = 'M';
102 hdr->ProtocolId[3] = 'B';
103 hdr->StructureSize = cpu_to_le16(64);
104 hdr->Command = smb2_cmd;
105 hdr->CreditRequest = cpu_to_le16(2); /* BB make this dynamic */
106 hdr->ProcessId = cpu_to_le32((__u16)current->tgid);
107
108 if (!tcon)
109 goto out;
110
111 hdr->TreeId = tcon->tid;
112 /* Uid is not converted */
113 if (tcon->ses)
114 hdr->SessionId = tcon->ses->Suid;
115 /* BB check following DFS flags BB */
116 /* BB do we have to add check for SHI1005_FLAGS_DFS_ROOT too? */
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +0400117 if (tcon->share_flags & SHI1005_FLAGS_DFS)
118 hdr->Flags |= SMB2_FLAGS_DFS_OPERATIONS;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400119 /* BB how does SMB2 do case sensitive? */
120 /* if (tcon->nocase)
121 hdr->Flags |= SMBFLG_CASELESS; */
Jeff Layton38d77c52013-05-26 07:01:00 -0400122 if (tcon->ses && tcon->ses->server && tcon->ses->server->sign)
Pavel Shilovsky3c1bf7e2012-09-18 16:20:30 -0700123 hdr->Flags |= SMB2_FLAGS_SIGNED;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400124out:
125 pdu->StructureSize2 = cpu_to_le16(parmsize);
126 return;
127}
128
129static int
130smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
131{
132 int rc = 0;
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400133 struct nls_table *nls_codepage;
134 struct cifs_ses *ses;
135 struct TCP_Server_Info *server;
136
137 /*
138 * SMB2s NegProt, SessSetup, Logoff do not have tcon yet so
139 * check for tcp and smb session status done differently
140 * for those three - in the calling routine.
141 */
142 if (tcon == NULL)
143 return rc;
144
145 if (smb2_command == SMB2_TREE_CONNECT)
146 return rc;
147
148 if (tcon->tidStatus == CifsExiting) {
149 /*
150 * only tree disconnect, open, and write,
151 * (and ulogoff which does not have tcon)
152 * are allowed as we start force umount.
153 */
154 if ((smb2_command != SMB2_WRITE) &&
155 (smb2_command != SMB2_CREATE) &&
156 (smb2_command != SMB2_TREE_DISCONNECT)) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500157 cifs_dbg(FYI, "can not send cmd %d while umounting\n",
158 smb2_command);
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400159 return -ENODEV;
160 }
161 }
162 if ((!tcon->ses) || (tcon->ses->status == CifsExiting) ||
163 (!tcon->ses->server))
164 return -EIO;
165
166 ses = tcon->ses;
167 server = ses->server;
168
169 /*
170 * Give demultiplex thread up to 10 seconds to reconnect, should be
171 * greater than cifs socket timeout which is 7 seconds
172 */
173 while (server->tcpStatus == CifsNeedReconnect) {
174 /*
175 * Return to caller for TREE_DISCONNECT and LOGOFF and CLOSE
176 * here since they are implicitly done when session drops.
177 */
178 switch (smb2_command) {
179 /*
180 * BB Should we keep oplock break and add flush to exceptions?
181 */
182 case SMB2_TREE_DISCONNECT:
183 case SMB2_CANCEL:
184 case SMB2_CLOSE:
185 case SMB2_OPLOCK_BREAK:
186 return -EAGAIN;
187 }
188
189 wait_event_interruptible_timeout(server->response_q,
190 (server->tcpStatus != CifsNeedReconnect), 10 * HZ);
191
192 /* are we still trying to reconnect? */
193 if (server->tcpStatus != CifsNeedReconnect)
194 break;
195
196 /*
197 * on "soft" mounts we wait once. Hard mounts keep
198 * retrying until process is killed or server comes
199 * back on-line
200 */
201 if (!tcon->retry) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500202 cifs_dbg(FYI, "gave up waiting on reconnect in smb_init\n");
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400203 return -EHOSTDOWN;
204 }
205 }
206
207 if (!tcon->ses->need_reconnect && !tcon->need_reconnect)
208 return rc;
209
210 nls_codepage = load_nls_default();
211
212 /*
213 * need to prevent multiple threads trying to simultaneously reconnect
214 * the same SMB session
215 */
216 mutex_lock(&tcon->ses->session_mutex);
217 rc = cifs_negotiate_protocol(0, tcon->ses);
218 if (!rc && tcon->ses->need_reconnect)
219 rc = cifs_setup_session(0, tcon->ses, nls_codepage);
220
221 if (rc || !tcon->need_reconnect) {
222 mutex_unlock(&tcon->ses->session_mutex);
223 goto out;
224 }
225
226 cifs_mark_open_files_invalid(tcon);
227 rc = SMB2_tcon(0, tcon->ses, tcon->treeName, tcon, nls_codepage);
228 mutex_unlock(&tcon->ses->session_mutex);
Joe Perchesf96637b2013-05-04 22:12:25 -0500229 cifs_dbg(FYI, "reconnect tcon rc = %d\n", rc);
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400230 if (rc)
231 goto out;
232 atomic_inc(&tconInfoReconnectCount);
233 /*
234 * BB FIXME add code to check if wsize needs update due to negotiated
235 * smb buffer size shrinking.
236 */
237out:
238 /*
239 * Check if handle based operation so we know whether we can continue
240 * or not without returning to caller to reset file handle.
241 */
242 /*
243 * BB Is flush done by server on drop of tcp session? Should we special
244 * case it and skip above?
245 */
246 switch (smb2_command) {
247 case SMB2_FLUSH:
248 case SMB2_READ:
249 case SMB2_WRITE:
250 case SMB2_LOCK:
251 case SMB2_IOCTL:
252 case SMB2_QUERY_DIRECTORY:
253 case SMB2_CHANGE_NOTIFY:
254 case SMB2_QUERY_INFO:
255 case SMB2_SET_INFO:
256 return -EAGAIN;
257 }
258 unload_nls(nls_codepage);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400259 return rc;
260}
261
262/*
263 * Allocate and return pointer to an SMB request hdr, and set basic
264 * SMB information in the SMB header. If the return code is zero, this
265 * function must have filled in request_buf pointer.
266 */
267static int
268small_smb2_init(__le16 smb2_command, struct cifs_tcon *tcon,
269 void **request_buf)
270{
271 int rc = 0;
272
273 rc = smb2_reconnect(smb2_command, tcon);
274 if (rc)
275 return rc;
276
277 /* BB eventually switch this to SMB2 specific small buf size */
278 *request_buf = cifs_small_buf_get();
279 if (*request_buf == NULL) {
280 /* BB should we add a retry in here if not a writepage? */
281 return -ENOMEM;
282 }
283
284 smb2_hdr_assemble((struct smb2_hdr *) *request_buf, smb2_command, tcon);
285
286 if (tcon != NULL) {
287#ifdef CONFIG_CIFS_STATS2
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400288 uint16_t com_code = le16_to_cpu(smb2_command);
289 cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_sent[com_code]);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400290#endif
291 cifs_stats_inc(&tcon->num_smbs_sent);
292 }
293
294 return rc;
295}
296
297static void
298free_rsp_buf(int resp_buftype, void *rsp)
299{
300 if (resp_buftype == CIFS_SMALL_BUFFER)
301 cifs_small_buf_release(rsp);
302 else if (resp_buftype == CIFS_LARGE_BUFFER)
303 cifs_buf_release(rsp);
304}
305
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400306
307/*
308 *
309 * SMB2 Worker functions follow:
310 *
311 * The general structure of the worker functions is:
312 * 1) Call smb2_init (assembles SMB2 header)
313 * 2) Initialize SMB2 command specific fields in fixed length area of SMB
314 * 3) Call smb_sendrcv2 (sends request on socket and waits for response)
315 * 4) Decode SMB2 command specific fields in the fixed length area
316 * 5) Decode variable length data area (if any for this SMB2 command type)
317 * 6) Call free smb buffer
318 * 7) return
319 *
320 */
321
322int
323SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
324{
325 struct smb2_negotiate_req *req;
326 struct smb2_negotiate_rsp *rsp;
327 struct kvec iov[1];
328 int rc = 0;
329 int resp_buftype;
Jeff Layton3534b852013-05-24 07:41:01 -0400330 struct TCP_Server_Info *server = ses->server;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400331 unsigned int sec_flags;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400332 int blob_offset, blob_length;
333 char *security_blob;
334 int flags = CIFS_NEG_OP;
335
Joe Perchesf96637b2013-05-04 22:12:25 -0500336 cifs_dbg(FYI, "Negotiate protocol\n");
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400337
Jeff Layton3534b852013-05-24 07:41:01 -0400338 if (!server) {
339 WARN(1, "%s: server is NULL!\n", __func__);
340 return -EIO;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400341 }
342
343 rc = small_smb2_init(SMB2_NEGOTIATE, NULL, (void **) &req);
344 if (rc)
345 return rc;
346
347 /* if any of auth flags (ie not sign or seal) are overriden use them */
348 if (ses->overrideSecFlg & (~(CIFSSEC_MUST_SIGN | CIFSSEC_MUST_SEAL)))
349 sec_flags = ses->overrideSecFlg; /* BB FIXME fix sign flags?*/
350 else /* if override flags set only sign/seal OR them with global auth */
351 sec_flags = global_secflags | ses->overrideSecFlg;
352
Joe Perchesf96637b2013-05-04 22:12:25 -0500353 cifs_dbg(FYI, "sec_flags 0x%x\n", sec_flags);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400354
355 req->hdr.SessionId = 0;
356
Steve Frenche4aa25e2012-10-01 12:26:22 -0500357 req->Dialects[0] = cpu_to_le16(ses->server->vals->protocol_id);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400358
Steve Frenche4aa25e2012-10-01 12:26:22 -0500359 req->DialectCount = cpu_to_le16(1); /* One vers= at a time for now */
360 inc_rfc1001_len(req, 2);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400361
362 /* only one of SMB2 signing flags may be set in SMB2 request */
Jeff Layton38d77c52013-05-26 07:01:00 -0400363 if (ses->sign)
364 req->SecurityMode = SMB2_NEGOTIATE_SIGNING_REQUIRED;
365 else if (global_secflags & CIFSSEC_MAY_SIGN)
366 req->SecurityMode = SMB2_NEGOTIATE_SIGNING_ENABLED;
367 else
368 req->SecurityMode = 0;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400369
Steve Frenche4aa25e2012-10-01 12:26:22 -0500370 req->Capabilities = cpu_to_le32(ses->server->vals->req_capabilities);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400371
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700372 memcpy(req->ClientGUID, cifs_client_guid, SMB2_CLIENT_GUID_SIZE);
373
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400374 iov[0].iov_base = (char *)req;
375 /* 4 for rfc1002 length field */
376 iov[0].iov_len = get_rfc1002_length(req) + 4;
377
378 rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, flags);
379
380 rsp = (struct smb2_negotiate_rsp *)iov[0].iov_base;
381 /*
382 * No tcon so can't do
383 * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]);
384 */
385 if (rc != 0)
386 goto neg_exit;
387
Joe Perchesf96637b2013-05-04 22:12:25 -0500388 cifs_dbg(FYI, "mode 0x%x\n", rsp->SecurityMode);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400389
Steve Frenche4aa25e2012-10-01 12:26:22 -0500390 /* BB we may eventually want to match the negotiated vs. requested
391 dialect, even though we are only requesting one at a time */
392 if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID))
Joe Perchesf96637b2013-05-04 22:12:25 -0500393 cifs_dbg(FYI, "negotiated smb2.0 dialect\n");
Steve Frenche4aa25e2012-10-01 12:26:22 -0500394 else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID))
Joe Perchesf96637b2013-05-04 22:12:25 -0500395 cifs_dbg(FYI, "negotiated smb2.1 dialect\n");
Steve Frenche4aa25e2012-10-01 12:26:22 -0500396 else if (rsp->DialectRevision == cpu_to_le16(SMB30_PROT_ID))
Joe Perchesf96637b2013-05-04 22:12:25 -0500397 cifs_dbg(FYI, "negotiated smb3.0 dialect\n");
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400398 else {
Joe Perchesf96637b2013-05-04 22:12:25 -0500399 cifs_dbg(VFS, "Illegal dialect returned by server %d\n",
400 le16_to_cpu(rsp->DialectRevision));
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400401 rc = -EIO;
402 goto neg_exit;
403 }
404 server->dialect = le16_to_cpu(rsp->DialectRevision);
405
Jeff Laytone598d1d82013-05-26 07:00:59 -0400406 /* SMB2 only has an extended negflavor */
407 server->negflavor = CIFS_NEGFLAVOR_EXTENDED;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400408 server->maxBuf = le32_to_cpu(rsp->MaxTransactSize);
409 server->max_read = le32_to_cpu(rsp->MaxReadSize);
410 server->max_write = le32_to_cpu(rsp->MaxWriteSize);
411 /* BB Do we need to validate the SecurityMode? */
412 server->sec_mode = le16_to_cpu(rsp->SecurityMode);
413 server->capabilities = le32_to_cpu(rsp->Capabilities);
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400414 /* Internal types */
415 server->capabilities |= SMB2_NT_FIND | SMB2_LARGE_FILES;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400416
417 security_blob = smb2_get_data_area_len(&blob_offset, &blob_length,
418 &rsp->hdr);
419 if (blob_length == 0) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500420 cifs_dbg(VFS, "missing security blob on negprot\n");
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400421 rc = -EIO;
422 goto neg_exit;
423 }
Pavel Shilovsky3c1bf7e2012-09-18 16:20:30 -0700424
Jeff Layton38d77c52013-05-26 07:01:00 -0400425 rc = cifs_enable_signing(server, ses->sign);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400426#ifdef CONFIG_SMB2_ASN1 /* BB REMOVEME when updated asn1.c ready */
Jeff Layton9ddec562013-05-26 07:00:58 -0400427 if (rc)
428 goto neg_exit;
429
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400430 rc = decode_neg_token_init(security_blob, blob_length,
431 &server->sec_type);
432 if (rc == 1)
433 rc = 0;
434 else if (rc == 0) {
435 rc = -EIO;
436 goto neg_exit;
437 }
438#endif
439
440neg_exit:
441 free_rsp_buf(resp_buftype, rsp);
442 return rc;
443}
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400444
445int
446SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
447 const struct nls_table *nls_cp)
448{
449 struct smb2_sess_setup_req *req;
450 struct smb2_sess_setup_rsp *rsp = NULL;
451 struct kvec iov[2];
452 int rc = 0;
453 int resp_buftype;
454 __le32 phase = NtLmNegotiate; /* NTLMSSP, if needed, is multistage */
Jeff Layton3534b852013-05-24 07:41:01 -0400455 struct TCP_Server_Info *server = ses->server;
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400456 unsigned int sec_flags;
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400457 u16 blob_length = 0;
458 char *security_blob;
459 char *ntlmssp_blob = NULL;
460 bool use_spnego = false; /* else use raw ntlmssp */
461
Joe Perchesf96637b2013-05-04 22:12:25 -0500462 cifs_dbg(FYI, "Session Setup\n");
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400463
Jeff Layton3534b852013-05-24 07:41:01 -0400464 if (!server) {
465 WARN(1, "%s: server is NULL!\n", __func__);
466 return -EIO;
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400467 }
468
469 /*
470 * If memory allocation is successful, caller of this function
471 * frees it.
472 */
473 ses->ntlmssp = kmalloc(sizeof(struct ntlmssp_auth), GFP_KERNEL);
474 if (!ses->ntlmssp)
475 return -ENOMEM;
476
477 ses->server->secType = RawNTLMSSP;
478
479ssetup_ntlmssp_authenticate:
480 if (phase == NtLmChallenge)
481 phase = NtLmAuthenticate; /* if ntlmssp, now final phase */
482
483 rc = small_smb2_init(SMB2_SESSION_SETUP, NULL, (void **) &req);
484 if (rc)
485 return rc;
486
487 /* if any of auth flags (ie not sign or seal) are overriden use them */
488 if (ses->overrideSecFlg & (~(CIFSSEC_MUST_SIGN | CIFSSEC_MUST_SEAL)))
489 sec_flags = ses->overrideSecFlg; /* BB FIXME fix sign flags?*/
490 else /* if override flags set only sign/seal OR them with global auth */
491 sec_flags = global_secflags | ses->overrideSecFlg;
492
Joe Perchesf96637b2013-05-04 22:12:25 -0500493 cifs_dbg(FYI, "sec_flags 0x%x\n", sec_flags);
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400494
495 req->hdr.SessionId = 0; /* First session, not a reauthenticate */
496 req->VcNumber = 0; /* MBZ */
497 /* to enable echos and oplocks */
498 req->hdr.CreditRequest = cpu_to_le16(3);
499
500 /* only one of SMB2 signing flags may be set in SMB2 request */
Jeff Layton38d77c52013-05-26 07:01:00 -0400501 if (server->sign)
502 req->SecurityMode = SMB2_NEGOTIATE_SIGNING_REQUIRED;
503 else if (global_secflags & CIFSSEC_MAY_SIGN) /* one flag unlike MUST_ */
504 req->SecurityMode = SMB2_NEGOTIATE_SIGNING_ENABLED;
505 else
506 req->SecurityMode = 0;
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400507
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400508 req->Capabilities = 0;
509 req->Channel = 0; /* MBZ */
510
511 iov[0].iov_base = (char *)req;
512 /* 4 for rfc1002 length field and 1 for pad */
513 iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
514 if (phase == NtLmNegotiate) {
515 ntlmssp_blob = kmalloc(sizeof(struct _NEGOTIATE_MESSAGE),
516 GFP_KERNEL);
517 if (ntlmssp_blob == NULL) {
518 rc = -ENOMEM;
519 goto ssetup_exit;
520 }
521 build_ntlmssp_negotiate_blob(ntlmssp_blob, ses);
522 if (use_spnego) {
523 /* blob_length = build_spnego_ntlmssp_blob(
524 &security_blob,
525 sizeof(struct _NEGOTIATE_MESSAGE),
526 ntlmssp_blob); */
527 /* BB eventually need to add this */
Joe Perchesf96637b2013-05-04 22:12:25 -0500528 cifs_dbg(VFS, "spnego not supported for SMB2 yet\n");
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400529 rc = -EOPNOTSUPP;
530 kfree(ntlmssp_blob);
531 goto ssetup_exit;
532 } else {
533 blob_length = sizeof(struct _NEGOTIATE_MESSAGE);
534 /* with raw NTLMSSP we don't encapsulate in SPNEGO */
535 security_blob = ntlmssp_blob;
536 }
537 } else if (phase == NtLmAuthenticate) {
538 req->hdr.SessionId = ses->Suid;
539 ntlmssp_blob = kzalloc(sizeof(struct _NEGOTIATE_MESSAGE) + 500,
540 GFP_KERNEL);
541 if (ntlmssp_blob == NULL) {
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400542 rc = -ENOMEM;
543 goto ssetup_exit;
544 }
545 rc = build_ntlmssp_auth_blob(ntlmssp_blob, &blob_length, ses,
546 nls_cp);
547 if (rc) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500548 cifs_dbg(FYI, "build_ntlmssp_auth_blob failed %d\n",
549 rc);
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400550 goto ssetup_exit; /* BB double check error handling */
551 }
552 if (use_spnego) {
553 /* blob_length = build_spnego_ntlmssp_blob(
554 &security_blob,
555 blob_length,
556 ntlmssp_blob); */
Joe Perchesf96637b2013-05-04 22:12:25 -0500557 cifs_dbg(VFS, "spnego not supported for SMB2 yet\n");
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400558 rc = -EOPNOTSUPP;
559 kfree(ntlmssp_blob);
560 goto ssetup_exit;
561 } else {
562 security_blob = ntlmssp_blob;
563 }
564 } else {
Joe Perchesf96637b2013-05-04 22:12:25 -0500565 cifs_dbg(VFS, "illegal ntlmssp phase\n");
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400566 rc = -EIO;
567 goto ssetup_exit;
568 }
569
570 /* Testing shows that buffer offset must be at location of Buffer[0] */
571 req->SecurityBufferOffset =
572 cpu_to_le16(sizeof(struct smb2_sess_setup_req) -
573 1 /* pad */ - 4 /* rfc1001 len */);
574 req->SecurityBufferLength = cpu_to_le16(blob_length);
575 iov[1].iov_base = security_blob;
576 iov[1].iov_len = blob_length;
577
578 inc_rfc1001_len(req, blob_length - 1 /* pad */);
579
580 /* BB add code to build os and lm fields */
581
Steve French6d8b59d2012-12-08 22:36:29 -0600582 rc = SendReceive2(xid, ses, iov, 2, &resp_buftype,
583 CIFS_LOG_ERROR | CIFS_NEG_OP);
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400584
585 kfree(security_blob);
586 rsp = (struct smb2_sess_setup_rsp *)iov[0].iov_base;
Pavel Shilovsky4ca3a992012-09-25 11:00:09 +0400587 if (resp_buftype != CIFS_NO_BUFFER &&
588 rsp->hdr.Status == STATUS_MORE_PROCESSING_REQUIRED) {
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400589 if (phase != NtLmNegotiate) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500590 cifs_dbg(VFS, "Unexpected more processing error\n");
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400591 goto ssetup_exit;
592 }
593 if (offsetof(struct smb2_sess_setup_rsp, Buffer) - 4 !=
Pavel Shilovsky4ca3a992012-09-25 11:00:09 +0400594 le16_to_cpu(rsp->SecurityBufferOffset)) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500595 cifs_dbg(VFS, "Invalid security buffer offset %d\n",
596 le16_to_cpu(rsp->SecurityBufferOffset));
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400597 rc = -EIO;
598 goto ssetup_exit;
599 }
600
601 /* NTLMSSP Negotiate sent now processing challenge (response) */
602 phase = NtLmChallenge; /* process ntlmssp challenge */
603 rc = 0; /* MORE_PROCESSING is not an error here but expected */
604 ses->Suid = rsp->hdr.SessionId;
605 rc = decode_ntlmssp_challenge(rsp->Buffer,
606 le16_to_cpu(rsp->SecurityBufferLength), ses);
607 }
608
609 /*
610 * BB eventually add code for SPNEGO decoding of NtlmChallenge blob,
611 * but at least the raw NTLMSSP case works.
612 */
613 /*
614 * No tcon so can't do
615 * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]);
616 */
617 if (rc != 0)
618 goto ssetup_exit;
619
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400620 ses->session_flags = le16_to_cpu(rsp->SessionFlags);
621ssetup_exit:
622 free_rsp_buf(resp_buftype, rsp);
623
624 /* if ntlmssp, and negotiate succeeded, proceed to authenticate phase */
625 if ((phase == NtLmChallenge) && (rc == 0))
626 goto ssetup_ntlmssp_authenticate;
627 return rc;
628}
629
630int
631SMB2_logoff(const unsigned int xid, struct cifs_ses *ses)
632{
633 struct smb2_logoff_req *req; /* response is also trivial struct */
634 int rc = 0;
635 struct TCP_Server_Info *server;
636
Joe Perchesf96637b2013-05-04 22:12:25 -0500637 cifs_dbg(FYI, "disconnect session %p\n", ses);
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400638
639 if (ses && (ses->server))
640 server = ses->server;
641 else
642 return -EIO;
643
644 rc = small_smb2_init(SMB2_LOGOFF, NULL, (void **) &req);
645 if (rc)
646 return rc;
647
648 /* since no tcon, smb2_init can not do this, so do here */
649 req->hdr.SessionId = ses->Suid;
Jeff Layton38d77c52013-05-26 07:01:00 -0400650 if (server->sign)
Pavel Shilovsky3c1bf7e2012-09-18 16:20:30 -0700651 req->hdr.Flags |= SMB2_FLAGS_SIGNED;
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400652
653 rc = SendReceiveNoRsp(xid, ses, (char *) &req->hdr, 0);
654 /*
655 * No tcon so can't do
656 * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]);
657 */
658 return rc;
659}
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +0400660
661static inline void cifs_stats_fail_inc(struct cifs_tcon *tcon, uint16_t code)
662{
Pavel Shilovskyd60622e2012-05-28 15:19:39 +0400663 cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_failed[code]);
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +0400664}
665
666#define MAX_SHARENAME_LENGTH (255 /* server */ + 80 /* share */ + 1 /* NULL */)
667
668int
669SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
670 struct cifs_tcon *tcon, const struct nls_table *cp)
671{
672 struct smb2_tree_connect_req *req;
673 struct smb2_tree_connect_rsp *rsp = NULL;
674 struct kvec iov[2];
675 int rc = 0;
676 int resp_buftype;
677 int unc_path_len;
678 struct TCP_Server_Info *server;
679 __le16 *unc_path = NULL;
680
Joe Perchesf96637b2013-05-04 22:12:25 -0500681 cifs_dbg(FYI, "TCON\n");
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +0400682
683 if ((ses->server) && tree)
684 server = ses->server;
685 else
686 return -EIO;
687
688 if (tcon && tcon->bad_network_name)
689 return -ENOENT;
690
691 unc_path = kmalloc(MAX_SHARENAME_LENGTH * 2, GFP_KERNEL);
692 if (unc_path == NULL)
693 return -ENOMEM;
694
695 unc_path_len = cifs_strtoUTF16(unc_path, tree, strlen(tree), cp) + 1;
696 unc_path_len *= 2;
697 if (unc_path_len < 2) {
698 kfree(unc_path);
699 return -EINVAL;
700 }
701
702 rc = small_smb2_init(SMB2_TREE_CONNECT, tcon, (void **) &req);
703 if (rc) {
704 kfree(unc_path);
705 return rc;
706 }
707
708 if (tcon == NULL) {
709 /* since no tcon, smb2_init can not do this, so do here */
710 req->hdr.SessionId = ses->Suid;
711 /* if (ses->server->sec_mode & SECMODE_SIGN_REQUIRED)
712 req->hdr.Flags |= SMB2_FLAGS_SIGNED; */
713 }
714
715 iov[0].iov_base = (char *)req;
716 /* 4 for rfc1002 length field and 1 for pad */
717 iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
718
719 /* Testing shows that buffer offset must be at location of Buffer[0] */
720 req->PathOffset = cpu_to_le16(sizeof(struct smb2_tree_connect_req)
721 - 1 /* pad */ - 4 /* do not count rfc1001 len field */);
722 req->PathLength = cpu_to_le16(unc_path_len - 2);
723 iov[1].iov_base = unc_path;
724 iov[1].iov_len = unc_path_len;
725
726 inc_rfc1001_len(req, unc_path_len - 1 /* pad */);
727
728 rc = SendReceive2(xid, ses, iov, 2, &resp_buftype, 0);
729 rsp = (struct smb2_tree_connect_rsp *)iov[0].iov_base;
730
731 if (rc != 0) {
732 if (tcon) {
733 cifs_stats_fail_inc(tcon, SMB2_TREE_CONNECT_HE);
734 tcon->need_reconnect = true;
735 }
736 goto tcon_error_exit;
737 }
738
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +0400739 if (tcon == NULL) {
740 ses->ipc_tid = rsp->hdr.TreeId;
741 goto tcon_exit;
742 }
743
744 if (rsp->ShareType & SMB2_SHARE_TYPE_DISK)
Joe Perchesf96637b2013-05-04 22:12:25 -0500745 cifs_dbg(FYI, "connection to disk share\n");
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +0400746 else if (rsp->ShareType & SMB2_SHARE_TYPE_PIPE) {
747 tcon->ipc = true;
Joe Perchesf96637b2013-05-04 22:12:25 -0500748 cifs_dbg(FYI, "connection to pipe share\n");
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +0400749 } else if (rsp->ShareType & SMB2_SHARE_TYPE_PRINT) {
750 tcon->print = true;
Joe Perchesf96637b2013-05-04 22:12:25 -0500751 cifs_dbg(FYI, "connection to printer\n");
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +0400752 } else {
Joe Perchesf96637b2013-05-04 22:12:25 -0500753 cifs_dbg(VFS, "unknown share type %d\n", rsp->ShareType);
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +0400754 rc = -EOPNOTSUPP;
755 goto tcon_error_exit;
756 }
757
758 tcon->share_flags = le32_to_cpu(rsp->ShareFlags);
759 tcon->maximal_access = le32_to_cpu(rsp->MaximalAccess);
760 tcon->tidStatus = CifsGood;
761 tcon->need_reconnect = false;
762 tcon->tid = rsp->hdr.TreeId;
763 strncpy(tcon->treeName, tree, MAX_TREE_SIZE);
764
765 if ((rsp->Capabilities & SMB2_SHARE_CAP_DFS) &&
766 ((tcon->share_flags & SHI1005_FLAGS_DFS) == 0))
Joe Perchesf96637b2013-05-04 22:12:25 -0500767 cifs_dbg(VFS, "DFS capability contradicts DFS flag\n");
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +0400768
769tcon_exit:
770 free_rsp_buf(resp_buftype, rsp);
771 kfree(unc_path);
772 return rc;
773
774tcon_error_exit:
775 if (rsp->hdr.Status == STATUS_BAD_NETWORK_NAME) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500776 cifs_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree);
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +0400777 tcon->bad_network_name = true;
778 }
779 goto tcon_exit;
780}
781
782int
783SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon)
784{
785 struct smb2_tree_disconnect_req *req; /* response is trivial */
786 int rc = 0;
787 struct TCP_Server_Info *server;
788 struct cifs_ses *ses = tcon->ses;
789
Joe Perchesf96637b2013-05-04 22:12:25 -0500790 cifs_dbg(FYI, "Tree Disconnect\n");
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +0400791
792 if (ses && (ses->server))
793 server = ses->server;
794 else
795 return -EIO;
796
797 if ((tcon->need_reconnect) || (tcon->ses->need_reconnect))
798 return 0;
799
800 rc = small_smb2_init(SMB2_TREE_DISCONNECT, tcon, (void **) &req);
801 if (rc)
802 return rc;
803
804 rc = SendReceiveNoRsp(xid, ses, (char *)&req->hdr, 0);
805 if (rc)
806 cifs_stats_fail_inc(tcon, SMB2_TREE_DISCONNECT_HE);
807
808 return rc;
809}
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400810
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700811static struct create_lease *
812create_lease_buf(u8 *lease_key, u8 oplock)
813{
814 struct create_lease *buf;
815
Dia Vasiled455b722013-03-10 14:29:04 +0200816 buf = kzalloc(sizeof(struct create_lease), GFP_KERNEL);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700817 if (!buf)
818 return NULL;
819
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700820 buf->lcontext.LeaseKeyLow = cpu_to_le64(*((u64 *)lease_key));
821 buf->lcontext.LeaseKeyHigh = cpu_to_le64(*((u64 *)(lease_key + 8)));
822 if (oplock == SMB2_OPLOCK_LEVEL_EXCLUSIVE)
823 buf->lcontext.LeaseState = SMB2_LEASE_WRITE_CACHING |
824 SMB2_LEASE_READ_CACHING;
825 else if (oplock == SMB2_OPLOCK_LEVEL_II)
826 buf->lcontext.LeaseState = SMB2_LEASE_READ_CACHING;
827 else if (oplock == SMB2_OPLOCK_LEVEL_BATCH)
828 buf->lcontext.LeaseState = SMB2_LEASE_HANDLE_CACHING |
829 SMB2_LEASE_READ_CACHING |
830 SMB2_LEASE_WRITE_CACHING;
831
832 buf->ccontext.DataOffset = cpu_to_le16(offsetof
833 (struct create_lease, lcontext));
834 buf->ccontext.DataLength = cpu_to_le32(sizeof(struct lease_context));
835 buf->ccontext.NameOffset = cpu_to_le16(offsetof
836 (struct create_lease, Name));
837 buf->ccontext.NameLength = cpu_to_le16(4);
838 buf->Name[0] = 'R';
839 buf->Name[1] = 'q';
840 buf->Name[2] = 'L';
841 buf->Name[3] = 's';
842 return buf;
843}
844
845static __u8
846parse_lease_state(struct smb2_create_rsp *rsp)
847{
848 char *data_offset;
849 struct create_lease *lc;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700850 bool found = false;
851
852 data_offset = (char *)rsp;
853 data_offset += 4 + le32_to_cpu(rsp->CreateContextsOffset);
854 lc = (struct create_lease *)data_offset;
855 do {
856 char *name = le16_to_cpu(lc->ccontext.NameOffset) + (char *)lc;
857 if (le16_to_cpu(lc->ccontext.NameLength) != 4 ||
858 strncmp(name, "RqLs", 4)) {
859 lc = (struct create_lease *)((char *)lc
860 + le32_to_cpu(lc->ccontext.Next));
861 continue;
862 }
863 if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS)
864 return SMB2_OPLOCK_LEVEL_NOCHANGE;
865 found = true;
866 break;
867 } while (le32_to_cpu(lc->ccontext.Next) != 0);
868
869 if (!found)
Pavel Shilovsky0822f512012-09-19 06:22:45 -0700870 return 0;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700871
Pavel Shilovsky0822f512012-09-19 06:22:45 -0700872 return smb2_map_lease_to_oplock(lc->lcontext.LeaseState);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700873}
874
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400875int
876SMB2_open(const unsigned int xid, struct cifs_tcon *tcon, __le16 *path,
877 u64 *persistent_fid, u64 *volatile_fid, __u32 desired_access,
Pavel Shilovskyf0df7372012-09-18 16:20:26 -0700878 __u32 create_disposition, __u32 file_attributes, __u32 create_options,
Pavel Shilovsky2e44b282012-09-18 16:20:33 -0700879 __u8 *oplock, struct smb2_file_all_info *buf)
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400880{
881 struct smb2_create_req *req;
882 struct smb2_create_rsp *rsp;
883 struct TCP_Server_Info *server;
884 struct cifs_ses *ses = tcon->ses;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700885 struct kvec iov[3];
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400886 int resp_buftype;
887 int uni_path_len;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700888 __le16 *copy_path = NULL;
889 int copy_size;
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400890 int rc = 0;
891 int num_iovecs = 2;
892
Joe Perchesf96637b2013-05-04 22:12:25 -0500893 cifs_dbg(FYI, "create/open\n");
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400894
895 if (ses && (ses->server))
896 server = ses->server;
897 else
898 return -EIO;
899
900 rc = small_smb2_init(SMB2_CREATE, tcon, (void **) &req);
901 if (rc)
902 return rc;
903
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400904 req->ImpersonationLevel = IL_IMPERSONATION;
905 req->DesiredAccess = cpu_to_le32(desired_access);
906 /* File attributes ignored on open (used in create though) */
907 req->FileAttributes = cpu_to_le32(file_attributes);
908 req->ShareAccess = FILE_SHARE_ALL_LE;
909 req->CreateDisposition = cpu_to_le32(create_disposition);
910 req->CreateOptions = cpu_to_le32(create_options);
911 uni_path_len = (2 * UniStrnlen((wchar_t *)path, PATH_MAX)) + 2;
912 req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req)
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700913 - 8 /* pad */ - 4 /* do not count rfc1001 len field */);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400914
915 iov[0].iov_base = (char *)req;
916 /* 4 for rfc1002 length field */
917 iov[0].iov_len = get_rfc1002_length(req) + 4;
918
919 /* MUST set path len (NameLength) to 0 opening root of share */
920 if (uni_path_len >= 4) {
921 req->NameLength = cpu_to_le16(uni_path_len - 2);
922 /* -1 since last byte is buf[0] which is sent below (path) */
923 iov[0].iov_len--;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700924 if (uni_path_len % 8 != 0) {
925 copy_size = uni_path_len / 8 * 8;
926 if (copy_size < uni_path_len)
927 copy_size += 8;
928
929 copy_path = kzalloc(copy_size, GFP_KERNEL);
930 if (!copy_path)
931 return -ENOMEM;
932 memcpy((char *)copy_path, (const char *)path,
933 uni_path_len);
934 uni_path_len = copy_size;
935 path = copy_path;
936 }
937
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400938 iov[1].iov_len = uni_path_len;
939 iov[1].iov_base = path;
940 /*
941 * -1 since last byte is buf[0] which was counted in
942 * smb2_buf_len.
943 */
944 inc_rfc1001_len(req, uni_path_len - 1);
945 } else {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700946 iov[0].iov_len += 7;
947 req->hdr.smb2_buf_length = cpu_to_be32(be32_to_cpu(
948 req->hdr.smb2_buf_length) + 8 - 1);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400949 num_iovecs = 1;
950 req->NameLength = 0;
951 }
952
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700953 if (!server->oplocks)
954 *oplock = SMB2_OPLOCK_LEVEL_NONE;
955
956 if (!(tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_LEASING) ||
957 *oplock == SMB2_OPLOCK_LEVEL_NONE)
958 req->RequestedOplockLevel = *oplock;
959 else {
960 iov[num_iovecs].iov_base = create_lease_buf(oplock+1, *oplock);
961 if (iov[num_iovecs].iov_base == NULL) {
962 cifs_small_buf_release(req);
963 kfree(copy_path);
964 return -ENOMEM;
965 }
966 iov[num_iovecs].iov_len = sizeof(struct create_lease);
967 req->RequestedOplockLevel = SMB2_OPLOCK_LEVEL_LEASE;
968 req->CreateContextsOffset = cpu_to_le32(
969 sizeof(struct smb2_create_req) - 4 - 8 +
970 iov[num_iovecs-1].iov_len);
971 req->CreateContextsLength = cpu_to_le32(
972 sizeof(struct create_lease));
973 inc_rfc1001_len(&req->hdr, sizeof(struct create_lease));
974 num_iovecs++;
975 }
976
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400977 rc = SendReceive2(xid, ses, iov, num_iovecs, &resp_buftype, 0);
978 rsp = (struct smb2_create_rsp *)iov[0].iov_base;
979
980 if (rc != 0) {
981 cifs_stats_fail_inc(tcon, SMB2_CREATE_HE);
982 goto creat_exit;
983 }
984
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400985 *persistent_fid = rsp->PersistentFileId;
986 *volatile_fid = rsp->VolatileFileId;
Pavel Shilovskyf0df7372012-09-18 16:20:26 -0700987
988 if (buf) {
989 memcpy(buf, &rsp->CreationTime, 32);
990 buf->AllocationSize = rsp->AllocationSize;
991 buf->EndOfFile = rsp->EndofFile;
992 buf->Attributes = rsp->FileAttributes;
993 buf->NumberOfLinks = cpu_to_le32(1);
994 buf->DeletePending = 0;
995 }
Pavel Shilovsky2e44b282012-09-18 16:20:33 -0700996
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700997 if (rsp->OplockLevel == SMB2_OPLOCK_LEVEL_LEASE)
998 *oplock = parse_lease_state(rsp);
999 else
1000 *oplock = rsp->OplockLevel;
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001001creat_exit:
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07001002 kfree(copy_path);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001003 free_rsp_buf(resp_buftype, rsp);
1004 return rc;
1005}
1006
1007int
1008SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
1009 u64 persistent_fid, u64 volatile_fid)
1010{
1011 struct smb2_close_req *req;
1012 struct smb2_close_rsp *rsp;
1013 struct TCP_Server_Info *server;
1014 struct cifs_ses *ses = tcon->ses;
1015 struct kvec iov[1];
1016 int resp_buftype;
1017 int rc = 0;
1018
Joe Perchesf96637b2013-05-04 22:12:25 -05001019 cifs_dbg(FYI, "Close\n");
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001020
1021 if (ses && (ses->server))
1022 server = ses->server;
1023 else
1024 return -EIO;
1025
1026 rc = small_smb2_init(SMB2_CLOSE, tcon, (void **) &req);
1027 if (rc)
1028 return rc;
1029
1030 req->PersistentFileId = persistent_fid;
1031 req->VolatileFileId = volatile_fid;
1032
1033 iov[0].iov_base = (char *)req;
1034 /* 4 for rfc1002 length field */
1035 iov[0].iov_len = get_rfc1002_length(req) + 4;
1036
1037 rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, 0);
1038 rsp = (struct smb2_close_rsp *)iov[0].iov_base;
1039
1040 if (rc != 0) {
1041 if (tcon)
1042 cifs_stats_fail_inc(tcon, SMB2_CLOSE_HE);
1043 goto close_exit;
1044 }
1045
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001046 /* BB FIXME - decode close response, update inode for caching */
1047
1048close_exit:
1049 free_rsp_buf(resp_buftype, rsp);
1050 return rc;
1051}
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04001052
1053static int
1054validate_buf(unsigned int offset, unsigned int buffer_length,
1055 struct smb2_hdr *hdr, unsigned int min_buf_size)
1056
1057{
1058 unsigned int smb_len = be32_to_cpu(hdr->smb2_buf_length);
1059 char *end_of_smb = smb_len + 4 /* RFC1001 length field */ + (char *)hdr;
1060 char *begin_of_buf = 4 /* RFC1001 len field */ + offset + (char *)hdr;
1061 char *end_of_buf = begin_of_buf + buffer_length;
1062
1063
1064 if (buffer_length < min_buf_size) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001065 cifs_dbg(VFS, "buffer length %d smaller than minimum size %d\n",
1066 buffer_length, min_buf_size);
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04001067 return -EINVAL;
1068 }
1069
1070 /* check if beyond RFC1001 maximum length */
1071 if ((smb_len > 0x7FFFFF) || (buffer_length > 0x7FFFFF)) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001072 cifs_dbg(VFS, "buffer length %d or smb length %d too large\n",
1073 buffer_length, smb_len);
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04001074 return -EINVAL;
1075 }
1076
1077 if ((begin_of_buf > end_of_smb) || (end_of_buf > end_of_smb)) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001078 cifs_dbg(VFS, "illegal server response, bad offset to data\n");
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04001079 return -EINVAL;
1080 }
1081
1082 return 0;
1083}
1084
1085/*
1086 * If SMB buffer fields are valid, copy into temporary buffer to hold result.
1087 * Caller must free buffer.
1088 */
1089static int
1090validate_and_copy_buf(unsigned int offset, unsigned int buffer_length,
1091 struct smb2_hdr *hdr, unsigned int minbufsize,
1092 char *data)
1093
1094{
1095 char *begin_of_buf = 4 /* RFC1001 len field */ + offset + (char *)hdr;
1096 int rc;
1097
1098 if (!data)
1099 return -EINVAL;
1100
1101 rc = validate_buf(offset, buffer_length, hdr, minbufsize);
1102 if (rc)
1103 return rc;
1104
1105 memcpy(data, begin_of_buf, buffer_length);
1106
1107 return 0;
1108}
1109
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001110static int
1111query_info(const unsigned int xid, struct cifs_tcon *tcon,
1112 u64 persistent_fid, u64 volatile_fid, u8 info_class,
1113 size_t output_len, size_t min_len, void *data)
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04001114{
1115 struct smb2_query_info_req *req;
1116 struct smb2_query_info_rsp *rsp = NULL;
1117 struct kvec iov[2];
1118 int rc = 0;
1119 int resp_buftype;
1120 struct TCP_Server_Info *server;
1121 struct cifs_ses *ses = tcon->ses;
1122
Joe Perchesf96637b2013-05-04 22:12:25 -05001123 cifs_dbg(FYI, "Query Info\n");
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04001124
1125 if (ses && (ses->server))
1126 server = ses->server;
1127 else
1128 return -EIO;
1129
1130 rc = small_smb2_init(SMB2_QUERY_INFO, tcon, (void **) &req);
1131 if (rc)
1132 return rc;
1133
1134 req->InfoType = SMB2_O_INFO_FILE;
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001135 req->FileInfoClass = info_class;
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04001136 req->PersistentFileId = persistent_fid;
1137 req->VolatileFileId = volatile_fid;
1138 /* 4 for rfc1002 length field and 1 for Buffer */
1139 req->InputBufferOffset =
1140 cpu_to_le16(sizeof(struct smb2_query_info_req) - 1 - 4);
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001141 req->OutputBufferLength = cpu_to_le32(output_len);
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04001142
1143 iov[0].iov_base = (char *)req;
1144 /* 4 for rfc1002 length field */
1145 iov[0].iov_len = get_rfc1002_length(req) + 4;
1146
1147 rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, 0);
Pavel Shilovskye5d04882012-09-19 16:03:26 +04001148 rsp = (struct smb2_query_info_rsp *)iov[0].iov_base;
1149
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04001150 if (rc) {
1151 cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
1152 goto qinf_exit;
1153 }
1154
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04001155 rc = validate_and_copy_buf(le16_to_cpu(rsp->OutputBufferOffset),
1156 le32_to_cpu(rsp->OutputBufferLength),
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001157 &rsp->hdr, min_len, data);
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04001158
1159qinf_exit:
1160 free_rsp_buf(resp_buftype, rsp);
1161 return rc;
1162}
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04001163
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001164int
1165SMB2_query_info(const unsigned int xid, struct cifs_tcon *tcon,
1166 u64 persistent_fid, u64 volatile_fid,
1167 struct smb2_file_all_info *data)
1168{
1169 return query_info(xid, tcon, persistent_fid, volatile_fid,
1170 FILE_ALL_INFORMATION,
1171 sizeof(struct smb2_file_all_info) + MAX_NAME * 2,
1172 sizeof(struct smb2_file_all_info), data);
1173}
1174
1175int
1176SMB2_get_srv_num(const unsigned int xid, struct cifs_tcon *tcon,
1177 u64 persistent_fid, u64 volatile_fid, __le64 *uniqueid)
1178{
1179 return query_info(xid, tcon, persistent_fid, volatile_fid,
1180 FILE_INTERNAL_INFORMATION,
1181 sizeof(struct smb2_file_internal_info),
1182 sizeof(struct smb2_file_internal_info), uniqueid);
1183}
1184
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04001185/*
1186 * This is a no-op for now. We're not really interested in the reply, but
1187 * rather in the fact that the server sent one and that server->lstrp
1188 * gets updated.
1189 *
1190 * FIXME: maybe we should consider checking that the reply matches request?
1191 */
1192static void
1193smb2_echo_callback(struct mid_q_entry *mid)
1194{
1195 struct TCP_Server_Info *server = mid->callback_data;
1196 struct smb2_echo_rsp *smb2 = (struct smb2_echo_rsp *)mid->resp_buf;
1197 unsigned int credits_received = 1;
1198
1199 if (mid->mid_state == MID_RESPONSE_RECEIVED)
1200 credits_received = le16_to_cpu(smb2->hdr.CreditRequest);
1201
1202 DeleteMidQEntry(mid);
1203 add_credits(server, credits_received, CIFS_ECHO_OP);
1204}
1205
1206int
1207SMB2_echo(struct TCP_Server_Info *server)
1208{
1209 struct smb2_echo_req *req;
1210 int rc = 0;
1211 struct kvec iov;
Jeff Laytonfec344e2012-09-18 16:20:35 -07001212 struct smb_rqst rqst = { .rq_iov = &iov,
1213 .rq_nvec = 1 };
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04001214
Joe Perchesf96637b2013-05-04 22:12:25 -05001215 cifs_dbg(FYI, "In echo request\n");
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04001216
1217 rc = small_smb2_init(SMB2_ECHO, NULL, (void **)&req);
1218 if (rc)
1219 return rc;
1220
1221 req->hdr.CreditRequest = cpu_to_le16(1);
1222
1223 iov.iov_base = (char *)req;
1224 /* 4 for rfc1002 length field */
1225 iov.iov_len = get_rfc1002_length(req) + 4;
1226
Jeff Laytonfec344e2012-09-18 16:20:35 -07001227 rc = cifs_call_async(server, &rqst, NULL, smb2_echo_callback, server,
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04001228 CIFS_ECHO_OP);
1229 if (rc)
Joe Perchesf96637b2013-05-04 22:12:25 -05001230 cifs_dbg(FYI, "Echo request failed: %d\n", rc);
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04001231
1232 cifs_small_buf_release(req);
1233 return rc;
1234}
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07001235
1236int
1237SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
1238 u64 volatile_fid)
1239{
1240 struct smb2_flush_req *req;
1241 struct TCP_Server_Info *server;
1242 struct cifs_ses *ses = tcon->ses;
1243 struct kvec iov[1];
1244 int resp_buftype;
1245 int rc = 0;
1246
Joe Perchesf96637b2013-05-04 22:12:25 -05001247 cifs_dbg(FYI, "Flush\n");
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07001248
1249 if (ses && (ses->server))
1250 server = ses->server;
1251 else
1252 return -EIO;
1253
1254 rc = small_smb2_init(SMB2_FLUSH, tcon, (void **) &req);
1255 if (rc)
1256 return rc;
1257
1258 req->PersistentFileId = persistent_fid;
1259 req->VolatileFileId = volatile_fid;
1260
1261 iov[0].iov_base = (char *)req;
1262 /* 4 for rfc1002 length field */
1263 iov[0].iov_len = get_rfc1002_length(req) + 4;
1264
1265 rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, 0);
1266
1267 if ((rc != 0) && tcon)
1268 cifs_stats_fail_inc(tcon, SMB2_FLUSH_HE);
1269
1270 free_rsp_buf(resp_buftype, iov[0].iov_base);
1271 return rc;
1272}
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001273
1274/*
1275 * To form a chain of read requests, any read requests after the first should
1276 * have the end_of_chain boolean set to true.
1277 */
1278static int
1279smb2_new_read_req(struct kvec *iov, struct cifs_io_parms *io_parms,
1280 unsigned int remaining_bytes, int request_type)
1281{
1282 int rc = -EACCES;
1283 struct smb2_read_req *req = NULL;
1284
1285 rc = small_smb2_init(SMB2_READ, io_parms->tcon, (void **) &req);
1286 if (rc)
1287 return rc;
1288 if (io_parms->tcon->ses->server == NULL)
1289 return -ECONNABORTED;
1290
1291 req->hdr.ProcessId = cpu_to_le32(io_parms->pid);
1292
1293 req->PersistentFileId = io_parms->persistent_fid;
1294 req->VolatileFileId = io_parms->volatile_fid;
1295 req->ReadChannelInfoOffset = 0; /* reserved */
1296 req->ReadChannelInfoLength = 0; /* reserved */
1297 req->Channel = 0; /* reserved */
1298 req->MinimumCount = 0;
1299 req->Length = cpu_to_le32(io_parms->length);
1300 req->Offset = cpu_to_le64(io_parms->offset);
1301
1302 if (request_type & CHAINED_REQUEST) {
1303 if (!(request_type & END_OF_CHAIN)) {
1304 /* 4 for rfc1002 length field */
1305 req->hdr.NextCommand =
1306 cpu_to_le32(get_rfc1002_length(req) + 4);
1307 } else /* END_OF_CHAIN */
1308 req->hdr.NextCommand = 0;
1309 if (request_type & RELATED_REQUEST) {
1310 req->hdr.Flags |= SMB2_FLAGS_RELATED_OPERATIONS;
1311 /*
1312 * Related requests use info from previous read request
1313 * in chain.
1314 */
1315 req->hdr.SessionId = 0xFFFFFFFF;
1316 req->hdr.TreeId = 0xFFFFFFFF;
1317 req->PersistentFileId = 0xFFFFFFFF;
1318 req->VolatileFileId = 0xFFFFFFFF;
1319 }
1320 }
1321 if (remaining_bytes > io_parms->length)
1322 req->RemainingBytes = cpu_to_le32(remaining_bytes);
1323 else
1324 req->RemainingBytes = 0;
1325
1326 iov[0].iov_base = (char *)req;
1327 /* 4 for rfc1002 length field */
1328 iov[0].iov_len = get_rfc1002_length(req) + 4;
1329 return rc;
1330}
1331
1332static void
1333smb2_readv_callback(struct mid_q_entry *mid)
1334{
1335 struct cifs_readdata *rdata = mid->callback_data;
1336 struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink);
1337 struct TCP_Server_Info *server = tcon->ses->server;
Jeff Layton58195752012-09-19 06:22:34 -07001338 struct smb2_hdr *buf = (struct smb2_hdr *)rdata->iov.iov_base;
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001339 unsigned int credits_received = 1;
Jeff Layton58195752012-09-19 06:22:34 -07001340 struct smb_rqst rqst = { .rq_iov = &rdata->iov,
Jeff Layton8321fec2012-09-19 06:22:32 -07001341 .rq_nvec = 1,
1342 .rq_pages = rdata->pages,
1343 .rq_npages = rdata->nr_pages,
1344 .rq_pagesz = rdata->pagesz,
1345 .rq_tailsz = rdata->tailsz };
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001346
Joe Perchesf96637b2013-05-04 22:12:25 -05001347 cifs_dbg(FYI, "%s: mid=%llu state=%d result=%d bytes=%u\n",
1348 __func__, mid->mid, mid->mid_state, rdata->result,
1349 rdata->bytes);
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001350
1351 switch (mid->mid_state) {
1352 case MID_RESPONSE_RECEIVED:
1353 credits_received = le16_to_cpu(buf->CreditRequest);
1354 /* result already set, check signature */
Jeff Layton38d77c52013-05-26 07:01:00 -04001355 if (server->sign) {
Pavel Shilovsky3c1bf7e2012-09-18 16:20:30 -07001356 int rc;
1357
Jeff Layton0b688cf2012-09-18 16:20:34 -07001358 rc = smb2_verify_signature(&rqst, server);
Pavel Shilovsky3c1bf7e2012-09-18 16:20:30 -07001359 if (rc)
Joe Perchesf96637b2013-05-04 22:12:25 -05001360 cifs_dbg(VFS, "SMB signature verification returned error = %d\n",
1361 rc);
Pavel Shilovsky3c1bf7e2012-09-18 16:20:30 -07001362 }
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001363 /* FIXME: should this be counted toward the initiating task? */
1364 task_io_account_read(rdata->bytes);
1365 cifs_stats_bytes_read(tcon, rdata->bytes);
1366 break;
1367 case MID_REQUEST_SUBMITTED:
1368 case MID_RETRY_NEEDED:
1369 rdata->result = -EAGAIN;
1370 break;
1371 default:
1372 if (rdata->result != -ENODATA)
1373 rdata->result = -EIO;
1374 }
1375
1376 if (rdata->result)
1377 cifs_stats_fail_inc(tcon, SMB2_READ_HE);
1378
1379 queue_work(cifsiod_wq, &rdata->work);
1380 DeleteMidQEntry(mid);
1381 add_credits(server, credits_received, 0);
1382}
1383
1384/* smb2_async_readv - send an async write, and set up mid to handle result */
1385int
1386smb2_async_readv(struct cifs_readdata *rdata)
1387{
1388 int rc;
1389 struct smb2_hdr *buf;
1390 struct cifs_io_parms io_parms;
Jeff Layton58195752012-09-19 06:22:34 -07001391 struct smb_rqst rqst = { .rq_iov = &rdata->iov,
Jeff Laytonfec344e2012-09-18 16:20:35 -07001392 .rq_nvec = 1 };
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001393
Joe Perchesf96637b2013-05-04 22:12:25 -05001394 cifs_dbg(FYI, "%s: offset=%llu bytes=%u\n",
1395 __func__, rdata->offset, rdata->bytes);
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001396
1397 io_parms.tcon = tlink_tcon(rdata->cfile->tlink);
1398 io_parms.offset = rdata->offset;
1399 io_parms.length = rdata->bytes;
1400 io_parms.persistent_fid = rdata->cfile->fid.persistent_fid;
1401 io_parms.volatile_fid = rdata->cfile->fid.volatile_fid;
1402 io_parms.pid = rdata->pid;
Jeff Layton58195752012-09-19 06:22:34 -07001403 rc = smb2_new_read_req(&rdata->iov, &io_parms, 0, 0);
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001404 if (rc)
1405 return rc;
1406
Jeff Layton58195752012-09-19 06:22:34 -07001407 buf = (struct smb2_hdr *)rdata->iov.iov_base;
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001408 /* 4 for rfc1002 length field */
Jeff Layton58195752012-09-19 06:22:34 -07001409 rdata->iov.iov_len = get_rfc1002_length(rdata->iov.iov_base) + 4;
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001410
1411 kref_get(&rdata->refcount);
Jeff Laytonfec344e2012-09-18 16:20:35 -07001412 rc = cifs_call_async(io_parms.tcon->ses->server, &rqst,
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001413 cifs_readv_receive, smb2_readv_callback,
1414 rdata, 0);
Pavel Shilovskye5d04882012-09-19 16:03:26 +04001415 if (rc) {
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001416 kref_put(&rdata->refcount, cifs_readdata_release);
Pavel Shilovskye5d04882012-09-19 16:03:26 +04001417 cifs_stats_fail_inc(io_parms.tcon, SMB2_READ_HE);
1418 }
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001419
1420 cifs_small_buf_release(buf);
1421 return rc;
1422}
Pavel Shilovsky33319142012-09-18 16:20:29 -07001423
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07001424int
1425SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
1426 unsigned int *nbytes, char **buf, int *buf_type)
1427{
1428 int resp_buftype, rc = -EACCES;
1429 struct smb2_read_rsp *rsp = NULL;
1430 struct kvec iov[1];
1431
1432 *nbytes = 0;
1433 rc = smb2_new_read_req(iov, io_parms, 0, 0);
1434 if (rc)
1435 return rc;
1436
1437 rc = SendReceive2(xid, io_parms->tcon->ses, iov, 1,
1438 &resp_buftype, CIFS_LOG_ERROR);
1439
1440 rsp = (struct smb2_read_rsp *)iov[0].iov_base;
1441
1442 if (rsp->hdr.Status == STATUS_END_OF_FILE) {
1443 free_rsp_buf(resp_buftype, iov[0].iov_base);
1444 return 0;
1445 }
1446
1447 if (rc) {
1448 cifs_stats_fail_inc(io_parms->tcon, SMB2_READ_HE);
Joe Perchesf96637b2013-05-04 22:12:25 -05001449 cifs_dbg(VFS, "Send error in read = %d\n", rc);
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07001450 } else {
1451 *nbytes = le32_to_cpu(rsp->DataLength);
1452 if ((*nbytes > CIFS_MAX_MSGSIZE) ||
1453 (*nbytes > io_parms->length)) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001454 cifs_dbg(FYI, "bad length %d for count %d\n",
1455 *nbytes, io_parms->length);
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07001456 rc = -EIO;
1457 *nbytes = 0;
1458 }
1459 }
1460
1461 if (*buf) {
1462 memcpy(*buf, (char *)rsp->hdr.ProtocolId + rsp->DataOffset,
1463 *nbytes);
1464 free_rsp_buf(resp_buftype, iov[0].iov_base);
1465 } else if (resp_buftype != CIFS_NO_BUFFER) {
1466 *buf = iov[0].iov_base;
1467 if (resp_buftype == CIFS_SMALL_BUFFER)
1468 *buf_type = CIFS_SMALL_BUFFER;
1469 else if (resp_buftype == CIFS_LARGE_BUFFER)
1470 *buf_type = CIFS_LARGE_BUFFER;
1471 }
1472 return rc;
1473}
1474
Pavel Shilovsky33319142012-09-18 16:20:29 -07001475/*
1476 * Check the mid_state and signature on received buffer (if any), and queue the
1477 * workqueue completion task.
1478 */
1479static void
1480smb2_writev_callback(struct mid_q_entry *mid)
1481{
1482 struct cifs_writedata *wdata = mid->callback_data;
1483 struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
1484 unsigned int written;
1485 struct smb2_write_rsp *rsp = (struct smb2_write_rsp *)mid->resp_buf;
1486 unsigned int credits_received = 1;
1487
1488 switch (mid->mid_state) {
1489 case MID_RESPONSE_RECEIVED:
1490 credits_received = le16_to_cpu(rsp->hdr.CreditRequest);
1491 wdata->result = smb2_check_receive(mid, tcon->ses->server, 0);
1492 if (wdata->result != 0)
1493 break;
1494
1495 written = le32_to_cpu(rsp->DataLength);
1496 /*
1497 * Mask off high 16 bits when bytes written as returned
1498 * by the server is greater than bytes requested by the
1499 * client. OS/2 servers are known to set incorrect
1500 * CountHigh values.
1501 */
1502 if (written > wdata->bytes)
1503 written &= 0xFFFF;
1504
1505 if (written < wdata->bytes)
1506 wdata->result = -ENOSPC;
1507 else
1508 wdata->bytes = written;
1509 break;
1510 case MID_REQUEST_SUBMITTED:
1511 case MID_RETRY_NEEDED:
1512 wdata->result = -EAGAIN;
1513 break;
1514 default:
1515 wdata->result = -EIO;
1516 break;
1517 }
1518
1519 if (wdata->result)
1520 cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
1521
1522 queue_work(cifsiod_wq, &wdata->work);
1523 DeleteMidQEntry(mid);
1524 add_credits(tcon->ses->server, credits_received, 0);
1525}
1526
1527/* smb2_async_writev - send an async write, and set up mid to handle result */
1528int
1529smb2_async_writev(struct cifs_writedata *wdata)
1530{
Jeff Laytoneddb0792012-09-18 16:20:35 -07001531 int rc = -EACCES;
Pavel Shilovsky33319142012-09-18 16:20:29 -07001532 struct smb2_write_req *req = NULL;
1533 struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
Jeff Laytoneddb0792012-09-18 16:20:35 -07001534 struct kvec iov;
Jeff Laytonfec344e2012-09-18 16:20:35 -07001535 struct smb_rqst rqst;
Pavel Shilovsky33319142012-09-18 16:20:29 -07001536
1537 rc = small_smb2_init(SMB2_WRITE, tcon, (void **) &req);
1538 if (rc)
1539 goto async_writev_out;
1540
Pavel Shilovsky33319142012-09-18 16:20:29 -07001541 req->hdr.ProcessId = cpu_to_le32(wdata->cfile->pid);
1542
1543 req->PersistentFileId = wdata->cfile->fid.persistent_fid;
1544 req->VolatileFileId = wdata->cfile->fid.volatile_fid;
1545 req->WriteChannelInfoOffset = 0;
1546 req->WriteChannelInfoLength = 0;
1547 req->Channel = 0;
1548 req->Offset = cpu_to_le64(wdata->offset);
1549 /* 4 for rfc1002 length field */
1550 req->DataOffset = cpu_to_le16(
1551 offsetof(struct smb2_write_req, Buffer) - 4);
1552 req->RemainingBytes = 0;
1553
1554 /* 4 for rfc1002 length field and 1 for Buffer */
Jeff Laytoneddb0792012-09-18 16:20:35 -07001555 iov.iov_len = get_rfc1002_length(req) + 4 - 1;
1556 iov.iov_base = req;
Pavel Shilovsky33319142012-09-18 16:20:29 -07001557
Jeff Laytoneddb0792012-09-18 16:20:35 -07001558 rqst.rq_iov = &iov;
1559 rqst.rq_nvec = 1;
1560 rqst.rq_pages = wdata->pages;
1561 rqst.rq_npages = wdata->nr_pages;
1562 rqst.rq_pagesz = wdata->pagesz;
1563 rqst.rq_tailsz = wdata->tailsz;
Pavel Shilovsky33319142012-09-18 16:20:29 -07001564
Joe Perchesf96637b2013-05-04 22:12:25 -05001565 cifs_dbg(FYI, "async write at %llu %u bytes\n",
1566 wdata->offset, wdata->bytes);
Pavel Shilovsky33319142012-09-18 16:20:29 -07001567
1568 req->Length = cpu_to_le32(wdata->bytes);
1569
1570 inc_rfc1001_len(&req->hdr, wdata->bytes - 1 /* Buffer */);
1571
1572 kref_get(&wdata->refcount);
Jeff Laytonfec344e2012-09-18 16:20:35 -07001573 rc = cifs_call_async(tcon->ses->server, &rqst, NULL,
1574 smb2_writev_callback, wdata, 0);
Pavel Shilovsky33319142012-09-18 16:20:29 -07001575
Pavel Shilovskye5d04882012-09-19 16:03:26 +04001576 if (rc) {
Pavel Shilovsky33319142012-09-18 16:20:29 -07001577 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovskye5d04882012-09-19 16:03:26 +04001578 cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
1579 }
Pavel Shilovsky33319142012-09-18 16:20:29 -07001580
Pavel Shilovsky33319142012-09-18 16:20:29 -07001581async_writev_out:
1582 cifs_small_buf_release(req);
Pavel Shilovsky33319142012-09-18 16:20:29 -07001583 return rc;
1584}
Pavel Shilovsky009d3442012-09-18 16:20:30 -07001585
1586/*
1587 * SMB2_write function gets iov pointer to kvec array with n_vec as a length.
1588 * The length field from io_parms must be at least 1 and indicates a number of
1589 * elements with data to write that begins with position 1 in iov array. All
1590 * data length is specified by count.
1591 */
1592int
1593SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
1594 unsigned int *nbytes, struct kvec *iov, int n_vec)
1595{
1596 int rc = 0;
1597 struct smb2_write_req *req = NULL;
1598 struct smb2_write_rsp *rsp = NULL;
1599 int resp_buftype;
1600 *nbytes = 0;
1601
1602 if (n_vec < 1)
1603 return rc;
1604
1605 rc = small_smb2_init(SMB2_WRITE, io_parms->tcon, (void **) &req);
1606 if (rc)
1607 return rc;
1608
1609 if (io_parms->tcon->ses->server == NULL)
1610 return -ECONNABORTED;
1611
1612 req->hdr.ProcessId = cpu_to_le32(io_parms->pid);
1613
1614 req->PersistentFileId = io_parms->persistent_fid;
1615 req->VolatileFileId = io_parms->volatile_fid;
1616 req->WriteChannelInfoOffset = 0;
1617 req->WriteChannelInfoLength = 0;
1618 req->Channel = 0;
1619 req->Length = cpu_to_le32(io_parms->length);
1620 req->Offset = cpu_to_le64(io_parms->offset);
1621 /* 4 for rfc1002 length field */
1622 req->DataOffset = cpu_to_le16(
1623 offsetof(struct smb2_write_req, Buffer) - 4);
1624 req->RemainingBytes = 0;
1625
1626 iov[0].iov_base = (char *)req;
1627 /* 4 for rfc1002 length field and 1 for Buffer */
1628 iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
1629
1630 /* length of entire message including data to be written */
1631 inc_rfc1001_len(req, io_parms->length - 1 /* Buffer */);
1632
1633 rc = SendReceive2(xid, io_parms->tcon->ses, iov, n_vec + 1,
1634 &resp_buftype, 0);
Pavel Shilovskye5d04882012-09-19 16:03:26 +04001635 rsp = (struct smb2_write_rsp *)iov[0].iov_base;
Pavel Shilovsky009d3442012-09-18 16:20:30 -07001636
1637 if (rc) {
1638 cifs_stats_fail_inc(io_parms->tcon, SMB2_WRITE_HE);
Joe Perchesf96637b2013-05-04 22:12:25 -05001639 cifs_dbg(VFS, "Send error in write = %d\n", rc);
Pavel Shilovskye5d04882012-09-19 16:03:26 +04001640 } else
Pavel Shilovsky009d3442012-09-18 16:20:30 -07001641 *nbytes = le32_to_cpu(rsp->DataLength);
Pavel Shilovskye5d04882012-09-19 16:03:26 +04001642
1643 free_rsp_buf(resp_buftype, rsp);
Pavel Shilovsky009d3442012-09-18 16:20:30 -07001644 return rc;
1645}
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07001646
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07001647static unsigned int
1648num_entries(char *bufstart, char *end_of_buf, char **lastentry, size_t size)
1649{
1650 int len;
1651 unsigned int entrycount = 0;
1652 unsigned int next_offset = 0;
1653 FILE_DIRECTORY_INFO *entryptr;
1654
1655 if (bufstart == NULL)
1656 return 0;
1657
1658 entryptr = (FILE_DIRECTORY_INFO *)bufstart;
1659
1660 while (1) {
1661 entryptr = (FILE_DIRECTORY_INFO *)
1662 ((char *)entryptr + next_offset);
1663
1664 if ((char *)entryptr + size > end_of_buf) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001665 cifs_dbg(VFS, "malformed search entry would overflow\n");
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07001666 break;
1667 }
1668
1669 len = le32_to_cpu(entryptr->FileNameLength);
1670 if ((char *)entryptr + len + size > end_of_buf) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001671 cifs_dbg(VFS, "directory entry name would overflow frame end of buf %p\n",
1672 end_of_buf);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07001673 break;
1674 }
1675
1676 *lastentry = (char *)entryptr;
1677 entrycount++;
1678
1679 next_offset = le32_to_cpu(entryptr->NextEntryOffset);
1680 if (!next_offset)
1681 break;
1682 }
1683
1684 return entrycount;
1685}
1686
1687/*
1688 * Readdir/FindFirst
1689 */
1690int
1691SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
1692 u64 persistent_fid, u64 volatile_fid, int index,
1693 struct cifs_search_info *srch_inf)
1694{
1695 struct smb2_query_directory_req *req;
1696 struct smb2_query_directory_rsp *rsp = NULL;
1697 struct kvec iov[2];
1698 int rc = 0;
1699 int len;
1700 int resp_buftype;
1701 unsigned char *bufptr;
1702 struct TCP_Server_Info *server;
1703 struct cifs_ses *ses = tcon->ses;
1704 __le16 asteriks = cpu_to_le16('*');
1705 char *end_of_smb;
1706 unsigned int output_size = CIFSMaxBufSize;
1707 size_t info_buf_size;
1708
1709 if (ses && (ses->server))
1710 server = ses->server;
1711 else
1712 return -EIO;
1713
1714 rc = small_smb2_init(SMB2_QUERY_DIRECTORY, tcon, (void **) &req);
1715 if (rc)
1716 return rc;
1717
1718 switch (srch_inf->info_level) {
1719 case SMB_FIND_FILE_DIRECTORY_INFO:
1720 req->FileInformationClass = FILE_DIRECTORY_INFORMATION;
1721 info_buf_size = sizeof(FILE_DIRECTORY_INFO) - 1;
1722 break;
1723 case SMB_FIND_FILE_ID_FULL_DIR_INFO:
1724 req->FileInformationClass = FILEID_FULL_DIRECTORY_INFORMATION;
1725 info_buf_size = sizeof(SEARCH_ID_FULL_DIR_INFO) - 1;
1726 break;
1727 default:
Joe Perchesf96637b2013-05-04 22:12:25 -05001728 cifs_dbg(VFS, "info level %u isn't supported\n",
1729 srch_inf->info_level);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07001730 rc = -EINVAL;
1731 goto qdir_exit;
1732 }
1733
1734 req->FileIndex = cpu_to_le32(index);
1735 req->PersistentFileId = persistent_fid;
1736 req->VolatileFileId = volatile_fid;
1737
1738 len = 0x2;
1739 bufptr = req->Buffer;
1740 memcpy(bufptr, &asteriks, len);
1741
1742 req->FileNameOffset =
1743 cpu_to_le16(sizeof(struct smb2_query_directory_req) - 1 - 4);
1744 req->FileNameLength = cpu_to_le16(len);
1745 /*
1746 * BB could be 30 bytes or so longer if we used SMB2 specific
1747 * buffer lengths, but this is safe and close enough.
1748 */
1749 output_size = min_t(unsigned int, output_size, server->maxBuf);
1750 output_size = min_t(unsigned int, output_size, 2 << 15);
1751 req->OutputBufferLength = cpu_to_le32(output_size);
1752
1753 iov[0].iov_base = (char *)req;
1754 /* 4 for RFC1001 length and 1 for Buffer */
1755 iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
1756
1757 iov[1].iov_base = (char *)(req->Buffer);
1758 iov[1].iov_len = len;
1759
1760 inc_rfc1001_len(req, len - 1 /* Buffer */);
1761
1762 rc = SendReceive2(xid, ses, iov, 2, &resp_buftype, 0);
Pavel Shilovskye5d04882012-09-19 16:03:26 +04001763 rsp = (struct smb2_query_directory_rsp *)iov[0].iov_base;
1764
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07001765 if (rc) {
1766 cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE);
1767 goto qdir_exit;
1768 }
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07001769
1770 rc = validate_buf(le16_to_cpu(rsp->OutputBufferOffset),
1771 le32_to_cpu(rsp->OutputBufferLength), &rsp->hdr,
1772 info_buf_size);
1773 if (rc)
1774 goto qdir_exit;
1775
1776 srch_inf->unicode = true;
1777
1778 if (srch_inf->ntwrk_buf_start) {
1779 if (srch_inf->smallBuf)
1780 cifs_small_buf_release(srch_inf->ntwrk_buf_start);
1781 else
1782 cifs_buf_release(srch_inf->ntwrk_buf_start);
1783 }
1784 srch_inf->ntwrk_buf_start = (char *)rsp;
1785 srch_inf->srch_entries_start = srch_inf->last_entry = 4 /* rfclen */ +
1786 (char *)&rsp->hdr + le16_to_cpu(rsp->OutputBufferOffset);
1787 /* 4 for rfc1002 length field */
1788 end_of_smb = get_rfc1002_length(rsp) + 4 + (char *)&rsp->hdr;
1789 srch_inf->entries_in_buffer =
1790 num_entries(srch_inf->srch_entries_start, end_of_smb,
1791 &srch_inf->last_entry, info_buf_size);
1792 srch_inf->index_of_last_entry += srch_inf->entries_in_buffer;
Joe Perchesf96637b2013-05-04 22:12:25 -05001793 cifs_dbg(FYI, "num entries %d last_index %lld srch start %p srch end %p\n",
1794 srch_inf->entries_in_buffer, srch_inf->index_of_last_entry,
1795 srch_inf->srch_entries_start, srch_inf->last_entry);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07001796 if (resp_buftype == CIFS_LARGE_BUFFER)
1797 srch_inf->smallBuf = false;
1798 else if (resp_buftype == CIFS_SMALL_BUFFER)
1799 srch_inf->smallBuf = true;
1800 else
Joe Perchesf96637b2013-05-04 22:12:25 -05001801 cifs_dbg(VFS, "illegal search buffer type\n");
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07001802
1803 if (rsp->hdr.Status == STATUS_NO_MORE_FILES)
1804 srch_inf->endOfSearch = 1;
1805 else
1806 srch_inf->endOfSearch = 0;
1807
1808 return rc;
1809
1810qdir_exit:
1811 free_rsp_buf(resp_buftype, rsp);
1812 return rc;
1813}
1814
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07001815static int
1816send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07001817 u64 persistent_fid, u64 volatile_fid, u32 pid, int info_class,
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07001818 unsigned int num, void **data, unsigned int *size)
1819{
1820 struct smb2_set_info_req *req;
1821 struct smb2_set_info_rsp *rsp = NULL;
1822 struct kvec *iov;
1823 int rc = 0;
1824 int resp_buftype;
1825 unsigned int i;
1826 struct TCP_Server_Info *server;
1827 struct cifs_ses *ses = tcon->ses;
1828
1829 if (ses && (ses->server))
1830 server = ses->server;
1831 else
1832 return -EIO;
1833
1834 if (!num)
1835 return -EINVAL;
1836
1837 iov = kmalloc(sizeof(struct kvec) * num, GFP_KERNEL);
1838 if (!iov)
1839 return -ENOMEM;
1840
1841 rc = small_smb2_init(SMB2_SET_INFO, tcon, (void **) &req);
1842 if (rc) {
1843 kfree(iov);
1844 return rc;
1845 }
1846
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07001847 req->hdr.ProcessId = cpu_to_le32(pid);
1848
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07001849 req->InfoType = SMB2_O_INFO_FILE;
1850 req->FileInfoClass = info_class;
1851 req->PersistentFileId = persistent_fid;
1852 req->VolatileFileId = volatile_fid;
1853
1854 /* 4 for RFC1001 length and 1 for Buffer */
1855 req->BufferOffset =
1856 cpu_to_le16(sizeof(struct smb2_set_info_req) - 1 - 4);
1857 req->BufferLength = cpu_to_le32(*size);
1858
1859 inc_rfc1001_len(req, *size - 1 /* Buffer */);
1860
1861 memcpy(req->Buffer, *data, *size);
1862
1863 iov[0].iov_base = (char *)req;
1864 /* 4 for RFC1001 length */
1865 iov[0].iov_len = get_rfc1002_length(req) + 4;
1866
1867 for (i = 1; i < num; i++) {
1868 inc_rfc1001_len(req, size[i]);
1869 le32_add_cpu(&req->BufferLength, size[i]);
1870 iov[i].iov_base = (char *)data[i];
1871 iov[i].iov_len = size[i];
1872 }
1873
1874 rc = SendReceive2(xid, ses, iov, num, &resp_buftype, 0);
1875 rsp = (struct smb2_set_info_rsp *)iov[0].iov_base;
1876
1877 if (rc != 0) {
1878 cifs_stats_fail_inc(tcon, SMB2_SET_INFO_HE);
1879 goto out;
1880 }
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07001881out:
1882 free_rsp_buf(resp_buftype, rsp);
1883 kfree(iov);
1884 return rc;
1885}
1886
1887int
1888SMB2_rename(const unsigned int xid, struct cifs_tcon *tcon,
1889 u64 persistent_fid, u64 volatile_fid, __le16 *target_file)
1890{
1891 struct smb2_file_rename_info info;
1892 void **data;
1893 unsigned int size[2];
1894 int rc;
1895 int len = (2 * UniStrnlen((wchar_t *)target_file, PATH_MAX));
1896
1897 data = kmalloc(sizeof(void *) * 2, GFP_KERNEL);
1898 if (!data)
1899 return -ENOMEM;
1900
1901 info.ReplaceIfExists = 1; /* 1 = replace existing target with new */
1902 /* 0 = fail if target already exists */
1903 info.RootDirectory = 0; /* MBZ for network ops (why does spec say?) */
1904 info.FileNameLength = cpu_to_le32(len);
1905
1906 data[0] = &info;
1907 size[0] = sizeof(struct smb2_file_rename_info);
1908
1909 data[1] = target_file;
1910 size[1] = len + 2 /* null */;
1911
1912 rc = send_set_info(xid, tcon, persistent_fid, volatile_fid,
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07001913 current->tgid, FILE_RENAME_INFORMATION, 2, data,
1914 size);
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07001915 kfree(data);
1916 return rc;
1917}
Pavel Shilovsky568798c2012-09-18 16:20:31 -07001918
1919int
1920SMB2_set_hardlink(const unsigned int xid, struct cifs_tcon *tcon,
1921 u64 persistent_fid, u64 volatile_fid, __le16 *target_file)
1922{
1923 struct smb2_file_link_info info;
1924 void **data;
1925 unsigned int size[2];
1926 int rc;
1927 int len = (2 * UniStrnlen((wchar_t *)target_file, PATH_MAX));
1928
1929 data = kmalloc(sizeof(void *) * 2, GFP_KERNEL);
1930 if (!data)
1931 return -ENOMEM;
1932
1933 info.ReplaceIfExists = 0; /* 1 = replace existing link with new */
1934 /* 0 = fail if link already exists */
1935 info.RootDirectory = 0; /* MBZ for network ops (why does spec say?) */
1936 info.FileNameLength = cpu_to_le32(len);
1937
1938 data[0] = &info;
1939 size[0] = sizeof(struct smb2_file_link_info);
1940
1941 data[1] = target_file;
1942 size[1] = len + 2 /* null */;
1943
1944 rc = send_set_info(xid, tcon, persistent_fid, volatile_fid,
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07001945 current->tgid, FILE_LINK_INFORMATION, 2, data, size);
Pavel Shilovsky568798c2012-09-18 16:20:31 -07001946 kfree(data);
1947 return rc;
1948}
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07001949
1950int
1951SMB2_set_eof(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
1952 u64 volatile_fid, u32 pid, __le64 *eof)
1953{
1954 struct smb2_file_eof_info info;
1955 void *data;
1956 unsigned int size;
1957
1958 info.EndOfFile = *eof;
1959
1960 data = &info;
1961 size = sizeof(struct smb2_file_eof_info);
1962
1963 return send_set_info(xid, tcon, persistent_fid, volatile_fid, pid,
1964 FILE_END_OF_FILE_INFORMATION, 1, &data, &size);
1965}
Pavel Shilovsky1feeaac2012-09-18 16:20:32 -07001966
1967int
1968SMB2_set_info(const unsigned int xid, struct cifs_tcon *tcon,
1969 u64 persistent_fid, u64 volatile_fid, FILE_BASIC_INFO *buf)
1970{
1971 unsigned int size;
1972 size = sizeof(FILE_BASIC_INFO);
1973 return send_set_info(xid, tcon, persistent_fid, volatile_fid,
1974 current->tgid, FILE_BASIC_INFORMATION, 1,
1975 (void **)&buf, &size);
1976}
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07001977
1978int
1979SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
1980 const u64 persistent_fid, const u64 volatile_fid,
1981 __u8 oplock_level)
1982{
1983 int rc;
1984 struct smb2_oplock_break *req = NULL;
1985
Joe Perchesf96637b2013-05-04 22:12:25 -05001986 cifs_dbg(FYI, "SMB2_oplock_break\n");
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07001987 rc = small_smb2_init(SMB2_OPLOCK_BREAK, tcon, (void **) &req);
1988
1989 if (rc)
1990 return rc;
1991
1992 req->VolatileFid = volatile_fid;
1993 req->PersistentFid = persistent_fid;
1994 req->OplockLevel = oplock_level;
1995 req->hdr.CreditRequest = cpu_to_le16(1);
1996
1997 rc = SendReceiveNoRsp(xid, tcon->ses, (char *) req, CIFS_OBREAK_OP);
1998 /* SMB2 buffer freed by function above */
1999
2000 if (rc) {
2001 cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE);
Joe Perchesf96637b2013-05-04 22:12:25 -05002002 cifs_dbg(FYI, "Send error in Oplock Break = %d\n", rc);
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07002003 }
2004
2005 return rc;
2006}
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002007
2008static void
2009copy_fs_info_to_kstatfs(struct smb2_fs_full_size_info *pfs_inf,
2010 struct kstatfs *kst)
2011{
2012 kst->f_bsize = le32_to_cpu(pfs_inf->BytesPerSector) *
2013 le32_to_cpu(pfs_inf->SectorsPerAllocationUnit);
2014 kst->f_blocks = le64_to_cpu(pfs_inf->TotalAllocationUnits);
2015 kst->f_bfree = le64_to_cpu(pfs_inf->ActualAvailableAllocationUnits);
2016 kst->f_bavail = le64_to_cpu(pfs_inf->CallerAvailableAllocationUnits);
2017 return;
2018}
2019
2020static int
2021build_qfs_info_req(struct kvec *iov, struct cifs_tcon *tcon, int level,
2022 int outbuf_len, u64 persistent_fid, u64 volatile_fid)
2023{
2024 int rc;
2025 struct smb2_query_info_req *req;
2026
Joe Perchesf96637b2013-05-04 22:12:25 -05002027 cifs_dbg(FYI, "Query FSInfo level %d\n", level);
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002028
2029 if ((tcon->ses == NULL) || (tcon->ses->server == NULL))
2030 return -EIO;
2031
2032 rc = small_smb2_init(SMB2_QUERY_INFO, tcon, (void **) &req);
2033 if (rc)
2034 return rc;
2035
2036 req->InfoType = SMB2_O_INFO_FILESYSTEM;
2037 req->FileInfoClass = level;
2038 req->PersistentFileId = persistent_fid;
2039 req->VolatileFileId = volatile_fid;
2040 /* 4 for rfc1002 length field and 1 for pad */
2041 req->InputBufferOffset =
2042 cpu_to_le16(sizeof(struct smb2_query_info_req) - 1 - 4);
2043 req->OutputBufferLength = cpu_to_le32(
2044 outbuf_len + sizeof(struct smb2_query_info_rsp) - 1 - 4);
2045
2046 iov->iov_base = (char *)req;
2047 /* 4 for rfc1002 length field */
2048 iov->iov_len = get_rfc1002_length(req) + 4;
2049 return 0;
2050}
2051
2052int
2053SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
2054 u64 persistent_fid, u64 volatile_fid, struct kstatfs *fsdata)
2055{
2056 struct smb2_query_info_rsp *rsp = NULL;
2057 struct kvec iov;
2058 int rc = 0;
2059 int resp_buftype;
2060 struct cifs_ses *ses = tcon->ses;
2061 struct smb2_fs_full_size_info *info = NULL;
2062
2063 rc = build_qfs_info_req(&iov, tcon, FS_FULL_SIZE_INFORMATION,
2064 sizeof(struct smb2_fs_full_size_info),
2065 persistent_fid, volatile_fid);
2066 if (rc)
2067 return rc;
2068
2069 rc = SendReceive2(xid, ses, &iov, 1, &resp_buftype, 0);
2070 if (rc) {
2071 cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
2072 goto qinf_exit;
2073 }
2074 rsp = (struct smb2_query_info_rsp *)iov.iov_base;
2075
2076 info = (struct smb2_fs_full_size_info *)(4 /* RFC1001 len */ +
2077 le16_to_cpu(rsp->OutputBufferOffset) + (char *)&rsp->hdr);
2078 rc = validate_buf(le16_to_cpu(rsp->OutputBufferOffset),
2079 le32_to_cpu(rsp->OutputBufferLength), &rsp->hdr,
2080 sizeof(struct smb2_fs_full_size_info));
2081 if (!rc)
2082 copy_fs_info_to_kstatfs(info, fsdata);
2083
2084qinf_exit:
2085 free_rsp_buf(resp_buftype, iov.iov_base);
2086 return rc;
2087}
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07002088
2089int
2090smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon,
2091 const __u64 persist_fid, const __u64 volatile_fid, const __u32 pid,
2092 const __u32 num_lock, struct smb2_lock_element *buf)
2093{
2094 int rc = 0;
2095 struct smb2_lock_req *req = NULL;
2096 struct kvec iov[2];
2097 int resp_buf_type;
2098 unsigned int count;
2099
Joe Perchesf96637b2013-05-04 22:12:25 -05002100 cifs_dbg(FYI, "smb2_lockv num lock %d\n", num_lock);
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07002101
2102 rc = small_smb2_init(SMB2_LOCK, tcon, (void **) &req);
2103 if (rc)
2104 return rc;
2105
2106 req->hdr.ProcessId = cpu_to_le32(pid);
2107 req->LockCount = cpu_to_le16(num_lock);
2108
2109 req->PersistentFileId = persist_fid;
2110 req->VolatileFileId = volatile_fid;
2111
2112 count = num_lock * sizeof(struct smb2_lock_element);
2113 inc_rfc1001_len(req, count - sizeof(struct smb2_lock_element));
2114
2115 iov[0].iov_base = (char *)req;
2116 /* 4 for rfc1002 length field and count for all locks */
2117 iov[0].iov_len = get_rfc1002_length(req) + 4 - count;
2118 iov[1].iov_base = (char *)buf;
2119 iov[1].iov_len = count;
2120
2121 cifs_stats_inc(&tcon->stats.cifs_stats.num_locks);
2122 rc = SendReceive2(xid, tcon->ses, iov, 2, &resp_buf_type, CIFS_NO_RESP);
2123 if (rc) {
Joe Perchesf96637b2013-05-04 22:12:25 -05002124 cifs_dbg(FYI, "Send error in smb2_lockv = %d\n", rc);
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07002125 cifs_stats_fail_inc(tcon, SMB2_LOCK_HE);
2126 }
2127
2128 return rc;
2129}
2130
2131int
2132SMB2_lock(const unsigned int xid, struct cifs_tcon *tcon,
2133 const __u64 persist_fid, const __u64 volatile_fid, const __u32 pid,
2134 const __u64 length, const __u64 offset, const __u32 lock_flags,
2135 const bool wait)
2136{
2137 struct smb2_lock_element lock;
2138
2139 lock.Offset = cpu_to_le64(offset);
2140 lock.Length = cpu_to_le64(length);
2141 lock.Flags = cpu_to_le32(lock_flags);
2142 if (!wait && lock_flags != SMB2_LOCKFLAG_UNLOCK)
2143 lock.Flags |= cpu_to_le32(SMB2_LOCKFLAG_FAIL_IMMEDIATELY);
2144
2145 return smb2_lockv(xid, tcon, persist_fid, volatile_fid, pid, 1, &lock);
2146}
Pavel Shilovsky0822f512012-09-19 06:22:45 -07002147
2148int
2149SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon,
2150 __u8 *lease_key, const __le32 lease_state)
2151{
2152 int rc;
2153 struct smb2_lease_ack *req = NULL;
2154
Joe Perchesf96637b2013-05-04 22:12:25 -05002155 cifs_dbg(FYI, "SMB2_lease_break\n");
Pavel Shilovsky0822f512012-09-19 06:22:45 -07002156 rc = small_smb2_init(SMB2_OPLOCK_BREAK, tcon, (void **) &req);
2157
2158 if (rc)
2159 return rc;
2160
2161 req->hdr.CreditRequest = cpu_to_le16(1);
2162 req->StructureSize = cpu_to_le16(36);
2163 inc_rfc1001_len(req, 12);
2164
2165 memcpy(req->LeaseKey, lease_key, 16);
2166 req->LeaseState = lease_state;
2167
2168 rc = SendReceiveNoRsp(xid, tcon->ses, (char *) req, CIFS_OBREAK_OP);
2169 /* SMB2 buffer freed by function above */
2170
2171 if (rc) {
2172 cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE);
Joe Perchesf96637b2013-05-04 22:12:25 -05002173 cifs_dbg(FYI, "Send error in Lease Break = %d\n", rc);
Pavel Shilovsky0822f512012-09-19 06:22:45 -07002174 }
2175
2176 return rc;
2177}