blob: a7db95f4760cd39c4dd5130ea2b0b3ba174ae6f3 [file] [log] [blame]
Pavel Shilovskyec2e4522011-12-27 16:12:43 +04001/*
2 * fs/cifs/smb2pdu.c
3 *
4 * Copyright (C) International Business Machines Corp., 2009, 2011
5 * Etersoft, 2012
6 * Author(s): Steve French (sfrench@us.ibm.com)
7 * Pavel Shilovsky (pshilovsky@samba.org) 2012
8 *
9 * Contains the routines for constructing the SMB2 PDUs themselves
10 *
11 * This library is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU Lesser General Public License as published
13 * by the Free Software Foundation; either version 2.1 of the License, or
14 * (at your option) any later version.
15 *
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
19 * the GNU Lesser General Public License for more details.
20 *
21 * You should have received a copy of the GNU Lesser General Public License
22 * along with this library; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 */
25
26 /* SMB2 PDU handling routines here - except for leftovers (eg session setup) */
27 /* Note that there are handle based routines which must be */
28 /* treated slightly differently for reconnection purposes since we never */
29 /* want to reuse a stale file handle and only the caller knows the file info */
30
31#include <linux/fs.h>
32#include <linux/kernel.h>
33#include <linux/vfs.h>
Pavel Shilovsky09a47072012-09-18 16:20:29 -070034#include <linux/task_io_accounting_ops.h>
Pavel Shilovskyec2e4522011-12-27 16:12:43 +040035#include <linux/uaccess.h>
Pavel Shilovsky33319142012-09-18 16:20:29 -070036#include <linux/pagemap.h>
Pavel Shilovskyec2e4522011-12-27 16:12:43 +040037#include <linux/xattr.h>
38#include "smb2pdu.h"
39#include "cifsglob.h"
40#include "cifsacl.h"
41#include "cifsproto.h"
42#include "smb2proto.h"
43#include "cifs_unicode.h"
44#include "cifs_debug.h"
45#include "ntlmssp.h"
46#include "smb2status.h"
Pavel Shilovsky09a47072012-09-18 16:20:29 -070047#include "smb2glob.h"
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -070048#include "cifspdu.h"
Pavel Shilovskyec2e4522011-12-27 16:12:43 +040049
50/*
51 * The following table defines the expected "StructureSize" of SMB2 requests
52 * in order by SMB2 command. This is similar to "wct" in SMB/CIFS requests.
53 *
54 * Note that commands are defined in smb2pdu.h in le16 but the array below is
55 * indexed by command in host byte order.
56 */
57static const int smb2_req_struct_sizes[NUMBER_OF_SMB2_COMMANDS] = {
58 /* SMB2_NEGOTIATE */ 36,
59 /* SMB2_SESSION_SETUP */ 25,
60 /* SMB2_LOGOFF */ 4,
61 /* SMB2_TREE_CONNECT */ 9,
62 /* SMB2_TREE_DISCONNECT */ 4,
63 /* SMB2_CREATE */ 57,
64 /* SMB2_CLOSE */ 24,
65 /* SMB2_FLUSH */ 24,
66 /* SMB2_READ */ 49,
67 /* SMB2_WRITE */ 49,
68 /* SMB2_LOCK */ 48,
69 /* SMB2_IOCTL */ 57,
70 /* SMB2_CANCEL */ 4,
71 /* SMB2_ECHO */ 4,
72 /* SMB2_QUERY_DIRECTORY */ 33,
73 /* SMB2_CHANGE_NOTIFY */ 32,
74 /* SMB2_QUERY_INFO */ 41,
75 /* SMB2_SET_INFO */ 33,
76 /* SMB2_OPLOCK_BREAK */ 24 /* BB this is 36 for LEASE_BREAK variant */
77};
78
79
80static void
81smb2_hdr_assemble(struct smb2_hdr *hdr, __le16 smb2_cmd /* command */ ,
82 const struct cifs_tcon *tcon)
83{
84 struct smb2_pdu *pdu = (struct smb2_pdu *)hdr;
85 char *temp = (char *)hdr;
86 /* lookup word count ie StructureSize from table */
87 __u16 parmsize = smb2_req_struct_sizes[le16_to_cpu(smb2_cmd)];
88
89 /*
90 * smaller than SMALL_BUFFER_SIZE but bigger than fixed area of
91 * largest operations (Create)
92 */
93 memset(temp, 0, 256);
94
95 /* Note this is only network field converted to big endian */
96 hdr->smb2_buf_length = cpu_to_be32(parmsize + sizeof(struct smb2_hdr)
97 - 4 /* RFC 1001 length field itself not counted */);
98
99 hdr->ProtocolId[0] = 0xFE;
100 hdr->ProtocolId[1] = 'S';
101 hdr->ProtocolId[2] = 'M';
102 hdr->ProtocolId[3] = 'B';
103 hdr->StructureSize = cpu_to_le16(64);
104 hdr->Command = smb2_cmd;
105 hdr->CreditRequest = cpu_to_le16(2); /* BB make this dynamic */
106 hdr->ProcessId = cpu_to_le32((__u16)current->tgid);
107
108 if (!tcon)
109 goto out;
110
111 hdr->TreeId = tcon->tid;
112 /* Uid is not converted */
113 if (tcon->ses)
114 hdr->SessionId = tcon->ses->Suid;
115 /* BB check following DFS flags BB */
116 /* BB do we have to add check for SHI1005_FLAGS_DFS_ROOT too? */
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +0400117 if (tcon->share_flags & SHI1005_FLAGS_DFS)
118 hdr->Flags |= SMB2_FLAGS_DFS_OPERATIONS;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400119 /* BB how does SMB2 do case sensitive? */
120 /* if (tcon->nocase)
121 hdr->Flags |= SMBFLG_CASELESS; */
Pavel Shilovsky3c1bf7e2012-09-18 16:20:30 -0700122 if (tcon->ses && tcon->ses->server &&
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400123 (tcon->ses->server->sec_mode & SECMODE_SIGN_REQUIRED))
Pavel Shilovsky3c1bf7e2012-09-18 16:20:30 -0700124 hdr->Flags |= SMB2_FLAGS_SIGNED;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400125out:
126 pdu->StructureSize2 = cpu_to_le16(parmsize);
127 return;
128}
129
130static int
131smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
132{
133 int rc = 0;
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400134 struct nls_table *nls_codepage;
135 struct cifs_ses *ses;
136 struct TCP_Server_Info *server;
137
138 /*
139 * SMB2s NegProt, SessSetup, Logoff do not have tcon yet so
140 * check for tcp and smb session status done differently
141 * for those three - in the calling routine.
142 */
143 if (tcon == NULL)
144 return rc;
145
146 if (smb2_command == SMB2_TREE_CONNECT)
147 return rc;
148
149 if (tcon->tidStatus == CifsExiting) {
150 /*
151 * only tree disconnect, open, and write,
152 * (and ulogoff which does not have tcon)
153 * are allowed as we start force umount.
154 */
155 if ((smb2_command != SMB2_WRITE) &&
156 (smb2_command != SMB2_CREATE) &&
157 (smb2_command != SMB2_TREE_DISCONNECT)) {
158 cFYI(1, "can not send cmd %d while umounting",
159 smb2_command);
160 return -ENODEV;
161 }
162 }
163 if ((!tcon->ses) || (tcon->ses->status == CifsExiting) ||
164 (!tcon->ses->server))
165 return -EIO;
166
167 ses = tcon->ses;
168 server = ses->server;
169
170 /*
171 * Give demultiplex thread up to 10 seconds to reconnect, should be
172 * greater than cifs socket timeout which is 7 seconds
173 */
174 while (server->tcpStatus == CifsNeedReconnect) {
175 /*
176 * Return to caller for TREE_DISCONNECT and LOGOFF and CLOSE
177 * here since they are implicitly done when session drops.
178 */
179 switch (smb2_command) {
180 /*
181 * BB Should we keep oplock break and add flush to exceptions?
182 */
183 case SMB2_TREE_DISCONNECT:
184 case SMB2_CANCEL:
185 case SMB2_CLOSE:
186 case SMB2_OPLOCK_BREAK:
187 return -EAGAIN;
188 }
189
190 wait_event_interruptible_timeout(server->response_q,
191 (server->tcpStatus != CifsNeedReconnect), 10 * HZ);
192
193 /* are we still trying to reconnect? */
194 if (server->tcpStatus != CifsNeedReconnect)
195 break;
196
197 /*
198 * on "soft" mounts we wait once. Hard mounts keep
199 * retrying until process is killed or server comes
200 * back on-line
201 */
202 if (!tcon->retry) {
203 cFYI(1, "gave up waiting on reconnect in smb_init");
204 return -EHOSTDOWN;
205 }
206 }
207
208 if (!tcon->ses->need_reconnect && !tcon->need_reconnect)
209 return rc;
210
211 nls_codepage = load_nls_default();
212
213 /*
214 * need to prevent multiple threads trying to simultaneously reconnect
215 * the same SMB session
216 */
217 mutex_lock(&tcon->ses->session_mutex);
218 rc = cifs_negotiate_protocol(0, tcon->ses);
219 if (!rc && tcon->ses->need_reconnect)
220 rc = cifs_setup_session(0, tcon->ses, nls_codepage);
221
222 if (rc || !tcon->need_reconnect) {
223 mutex_unlock(&tcon->ses->session_mutex);
224 goto out;
225 }
226
227 cifs_mark_open_files_invalid(tcon);
228 rc = SMB2_tcon(0, tcon->ses, tcon->treeName, tcon, nls_codepage);
229 mutex_unlock(&tcon->ses->session_mutex);
230 cFYI(1, "reconnect tcon rc = %d", rc);
231 if (rc)
232 goto out;
233 atomic_inc(&tconInfoReconnectCount);
234 /*
235 * BB FIXME add code to check if wsize needs update due to negotiated
236 * smb buffer size shrinking.
237 */
238out:
239 /*
240 * Check if handle based operation so we know whether we can continue
241 * or not without returning to caller to reset file handle.
242 */
243 /*
244 * BB Is flush done by server on drop of tcp session? Should we special
245 * case it and skip above?
246 */
247 switch (smb2_command) {
248 case SMB2_FLUSH:
249 case SMB2_READ:
250 case SMB2_WRITE:
251 case SMB2_LOCK:
252 case SMB2_IOCTL:
253 case SMB2_QUERY_DIRECTORY:
254 case SMB2_CHANGE_NOTIFY:
255 case SMB2_QUERY_INFO:
256 case SMB2_SET_INFO:
257 return -EAGAIN;
258 }
259 unload_nls(nls_codepage);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400260 return rc;
261}
262
263/*
264 * Allocate and return pointer to an SMB request hdr, and set basic
265 * SMB information in the SMB header. If the return code is zero, this
266 * function must have filled in request_buf pointer.
267 */
268static int
269small_smb2_init(__le16 smb2_command, struct cifs_tcon *tcon,
270 void **request_buf)
271{
272 int rc = 0;
273
274 rc = smb2_reconnect(smb2_command, tcon);
275 if (rc)
276 return rc;
277
278 /* BB eventually switch this to SMB2 specific small buf size */
279 *request_buf = cifs_small_buf_get();
280 if (*request_buf == NULL) {
281 /* BB should we add a retry in here if not a writepage? */
282 return -ENOMEM;
283 }
284
285 smb2_hdr_assemble((struct smb2_hdr *) *request_buf, smb2_command, tcon);
286
287 if (tcon != NULL) {
288#ifdef CONFIG_CIFS_STATS2
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400289 uint16_t com_code = le16_to_cpu(smb2_command);
290 cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_sent[com_code]);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400291#endif
292 cifs_stats_inc(&tcon->num_smbs_sent);
293 }
294
295 return rc;
296}
297
298static void
299free_rsp_buf(int resp_buftype, void *rsp)
300{
301 if (resp_buftype == CIFS_SMALL_BUFFER)
302 cifs_small_buf_release(rsp);
303 else if (resp_buftype == CIFS_LARGE_BUFFER)
304 cifs_buf_release(rsp);
305}
306
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700307#define SMB2_NUM_PROT 2
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400308
309#define SMB2_PROT 0
310#define SMB21_PROT 1
311#define BAD_PROT 0xFFFF
312
313#define SMB2_PROT_ID 0x0202
314#define SMB21_PROT_ID 0x0210
315#define BAD_PROT_ID 0xFFFF
316
317static struct {
318 int index;
319 __le16 name;
320} smb2protocols[] = {
321 {SMB2_PROT, cpu_to_le16(SMB2_PROT_ID)},
322 {SMB21_PROT, cpu_to_le16(SMB21_PROT_ID)},
323 {BAD_PROT, cpu_to_le16(BAD_PROT_ID)}
324};
325
326/*
327 *
328 * SMB2 Worker functions follow:
329 *
330 * The general structure of the worker functions is:
331 * 1) Call smb2_init (assembles SMB2 header)
332 * 2) Initialize SMB2 command specific fields in fixed length area of SMB
333 * 3) Call smb_sendrcv2 (sends request on socket and waits for response)
334 * 4) Decode SMB2 command specific fields in the fixed length area
335 * 5) Decode variable length data area (if any for this SMB2 command type)
336 * 6) Call free smb buffer
337 * 7) return
338 *
339 */
340
341int
342SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
343{
344 struct smb2_negotiate_req *req;
345 struct smb2_negotiate_rsp *rsp;
346 struct kvec iov[1];
347 int rc = 0;
348 int resp_buftype;
349 struct TCP_Server_Info *server;
350 unsigned int sec_flags;
351 u16 i;
352 u16 temp = 0;
353 int blob_offset, blob_length;
354 char *security_blob;
355 int flags = CIFS_NEG_OP;
356
357 cFYI(1, "Negotiate protocol");
358
359 if (ses->server)
360 server = ses->server;
361 else {
362 rc = -EIO;
363 return rc;
364 }
365
366 rc = small_smb2_init(SMB2_NEGOTIATE, NULL, (void **) &req);
367 if (rc)
368 return rc;
369
370 /* if any of auth flags (ie not sign or seal) are overriden use them */
371 if (ses->overrideSecFlg & (~(CIFSSEC_MUST_SIGN | CIFSSEC_MUST_SEAL)))
372 sec_flags = ses->overrideSecFlg; /* BB FIXME fix sign flags?*/
373 else /* if override flags set only sign/seal OR them with global auth */
374 sec_flags = global_secflags | ses->overrideSecFlg;
375
376 cFYI(1, "sec_flags 0x%x", sec_flags);
377
378 req->hdr.SessionId = 0;
379
380 for (i = 0; i < SMB2_NUM_PROT; i++)
381 req->Dialects[i] = smb2protocols[i].name;
382
383 req->DialectCount = cpu_to_le16(i);
384 inc_rfc1001_len(req, i * 2);
385
386 /* only one of SMB2 signing flags may be set in SMB2 request */
387 if ((sec_flags & CIFSSEC_MUST_SIGN) == CIFSSEC_MUST_SIGN)
388 temp = SMB2_NEGOTIATE_SIGNING_REQUIRED;
389 else if (sec_flags & CIFSSEC_MAY_SIGN) /* MAY_SIGN is a single flag */
390 temp = SMB2_NEGOTIATE_SIGNING_ENABLED;
391
392 req->SecurityMode = cpu_to_le16(temp);
393
394 req->Capabilities = cpu_to_le32(SMB2_GLOBAL_CAP_DFS);
395
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700396 memcpy(req->ClientGUID, cifs_client_guid, SMB2_CLIENT_GUID_SIZE);
397
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400398 iov[0].iov_base = (char *)req;
399 /* 4 for rfc1002 length field */
400 iov[0].iov_len = get_rfc1002_length(req) + 4;
401
402 rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, flags);
403
404 rsp = (struct smb2_negotiate_rsp *)iov[0].iov_base;
405 /*
406 * No tcon so can't do
407 * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]);
408 */
409 if (rc != 0)
410 goto neg_exit;
411
412 if (rsp == NULL) {
413 rc = -EIO;
414 goto neg_exit;
415 }
416
417 cFYI(1, "mode 0x%x", rsp->SecurityMode);
418
419 if (rsp->DialectRevision == smb2protocols[SMB21_PROT].name)
420 cFYI(1, "negotiated smb2.1 dialect");
421 else if (rsp->DialectRevision == smb2protocols[SMB2_PROT].name)
422 cFYI(1, "negotiated smb2 dialect");
423 else {
424 cERROR(1, "Illegal dialect returned by server %d",
425 le16_to_cpu(rsp->DialectRevision));
426 rc = -EIO;
427 goto neg_exit;
428 }
429 server->dialect = le16_to_cpu(rsp->DialectRevision);
430
431 server->maxBuf = le32_to_cpu(rsp->MaxTransactSize);
432 server->max_read = le32_to_cpu(rsp->MaxReadSize);
433 server->max_write = le32_to_cpu(rsp->MaxWriteSize);
434 /* BB Do we need to validate the SecurityMode? */
435 server->sec_mode = le16_to_cpu(rsp->SecurityMode);
436 server->capabilities = le32_to_cpu(rsp->Capabilities);
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400437 /* Internal types */
438 server->capabilities |= SMB2_NT_FIND | SMB2_LARGE_FILES;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400439
440 security_blob = smb2_get_data_area_len(&blob_offset, &blob_length,
441 &rsp->hdr);
442 if (blob_length == 0) {
443 cERROR(1, "missing security blob on negprot");
444 rc = -EIO;
445 goto neg_exit;
446 }
Pavel Shilovsky3c1bf7e2012-09-18 16:20:30 -0700447
448 cFYI(1, "sec_flags 0x%x", sec_flags);
449 if (sec_flags & CIFSSEC_MUST_SIGN) {
450 cFYI(1, "Signing required");
451 if (!(server->sec_mode & (SMB2_NEGOTIATE_SIGNING_REQUIRED |
452 SMB2_NEGOTIATE_SIGNING_ENABLED))) {
453 cERROR(1, "signing required but server lacks support");
454 rc = -EOPNOTSUPP;
455 goto neg_exit;
456 }
457 server->sec_mode |= SECMODE_SIGN_REQUIRED;
458 } else if (sec_flags & CIFSSEC_MAY_SIGN) {
459 cFYI(1, "Signing optional");
460 if (server->sec_mode & SMB2_NEGOTIATE_SIGNING_REQUIRED) {
461 cFYI(1, "Server requires signing");
462 server->sec_mode |= SECMODE_SIGN_REQUIRED;
463 } else {
464 server->sec_mode &=
465 ~(SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED);
466 }
467 } else {
468 cFYI(1, "Signing disabled");
469 if (server->sec_mode & SMB2_NEGOTIATE_SIGNING_REQUIRED) {
470 cERROR(1, "Server requires packet signing to be enabled"
471 " in /proc/fs/cifs/SecurityFlags.");
472 rc = -EOPNOTSUPP;
473 goto neg_exit;
474 }
475 server->sec_mode &=
476 ~(SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED);
477 }
478
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400479#ifdef CONFIG_SMB2_ASN1 /* BB REMOVEME when updated asn1.c ready */
480 rc = decode_neg_token_init(security_blob, blob_length,
481 &server->sec_type);
482 if (rc == 1)
483 rc = 0;
484 else if (rc == 0) {
485 rc = -EIO;
486 goto neg_exit;
487 }
488#endif
489
490neg_exit:
491 free_rsp_buf(resp_buftype, rsp);
492 return rc;
493}
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400494
495int
496SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
497 const struct nls_table *nls_cp)
498{
499 struct smb2_sess_setup_req *req;
500 struct smb2_sess_setup_rsp *rsp = NULL;
501 struct kvec iov[2];
502 int rc = 0;
503 int resp_buftype;
504 __le32 phase = NtLmNegotiate; /* NTLMSSP, if needed, is multistage */
505 struct TCP_Server_Info *server;
506 unsigned int sec_flags;
507 u8 temp = 0;
508 u16 blob_length = 0;
509 char *security_blob;
510 char *ntlmssp_blob = NULL;
511 bool use_spnego = false; /* else use raw ntlmssp */
512
513 cFYI(1, "Session Setup");
514
515 if (ses->server)
516 server = ses->server;
517 else {
518 rc = -EIO;
519 return rc;
520 }
521
522 /*
523 * If memory allocation is successful, caller of this function
524 * frees it.
525 */
526 ses->ntlmssp = kmalloc(sizeof(struct ntlmssp_auth), GFP_KERNEL);
527 if (!ses->ntlmssp)
528 return -ENOMEM;
529
530 ses->server->secType = RawNTLMSSP;
531
532ssetup_ntlmssp_authenticate:
533 if (phase == NtLmChallenge)
534 phase = NtLmAuthenticate; /* if ntlmssp, now final phase */
535
536 rc = small_smb2_init(SMB2_SESSION_SETUP, NULL, (void **) &req);
537 if (rc)
538 return rc;
539
540 /* if any of auth flags (ie not sign or seal) are overriden use them */
541 if (ses->overrideSecFlg & (~(CIFSSEC_MUST_SIGN | CIFSSEC_MUST_SEAL)))
542 sec_flags = ses->overrideSecFlg; /* BB FIXME fix sign flags?*/
543 else /* if override flags set only sign/seal OR them with global auth */
544 sec_flags = global_secflags | ses->overrideSecFlg;
545
546 cFYI(1, "sec_flags 0x%x", sec_flags);
547
548 req->hdr.SessionId = 0; /* First session, not a reauthenticate */
549 req->VcNumber = 0; /* MBZ */
550 /* to enable echos and oplocks */
551 req->hdr.CreditRequest = cpu_to_le16(3);
552
553 /* only one of SMB2 signing flags may be set in SMB2 request */
554 if ((sec_flags & CIFSSEC_MUST_SIGN) == CIFSSEC_MUST_SIGN)
555 temp = SMB2_NEGOTIATE_SIGNING_REQUIRED;
556 else if (ses->server->sec_mode & SMB2_NEGOTIATE_SIGNING_REQUIRED)
557 temp = SMB2_NEGOTIATE_SIGNING_REQUIRED;
558 else if (sec_flags & CIFSSEC_MAY_SIGN) /* MAY_SIGN is a single flag */
559 temp = SMB2_NEGOTIATE_SIGNING_ENABLED;
560
561 req->SecurityMode = temp;
562 req->Capabilities = 0;
563 req->Channel = 0; /* MBZ */
564
565 iov[0].iov_base = (char *)req;
566 /* 4 for rfc1002 length field and 1 for pad */
567 iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
568 if (phase == NtLmNegotiate) {
569 ntlmssp_blob = kmalloc(sizeof(struct _NEGOTIATE_MESSAGE),
570 GFP_KERNEL);
571 if (ntlmssp_blob == NULL) {
572 rc = -ENOMEM;
573 goto ssetup_exit;
574 }
575 build_ntlmssp_negotiate_blob(ntlmssp_blob, ses);
576 if (use_spnego) {
577 /* blob_length = build_spnego_ntlmssp_blob(
578 &security_blob,
579 sizeof(struct _NEGOTIATE_MESSAGE),
580 ntlmssp_blob); */
581 /* BB eventually need to add this */
582 cERROR(1, "spnego not supported for SMB2 yet");
583 rc = -EOPNOTSUPP;
584 kfree(ntlmssp_blob);
585 goto ssetup_exit;
586 } else {
587 blob_length = sizeof(struct _NEGOTIATE_MESSAGE);
588 /* with raw NTLMSSP we don't encapsulate in SPNEGO */
589 security_blob = ntlmssp_blob;
590 }
591 } else if (phase == NtLmAuthenticate) {
592 req->hdr.SessionId = ses->Suid;
593 ntlmssp_blob = kzalloc(sizeof(struct _NEGOTIATE_MESSAGE) + 500,
594 GFP_KERNEL);
595 if (ntlmssp_blob == NULL) {
596 cERROR(1, "failed to malloc ntlmssp blob");
597 rc = -ENOMEM;
598 goto ssetup_exit;
599 }
600 rc = build_ntlmssp_auth_blob(ntlmssp_blob, &blob_length, ses,
601 nls_cp);
602 if (rc) {
603 cFYI(1, "build_ntlmssp_auth_blob failed %d", rc);
604 goto ssetup_exit; /* BB double check error handling */
605 }
606 if (use_spnego) {
607 /* blob_length = build_spnego_ntlmssp_blob(
608 &security_blob,
609 blob_length,
610 ntlmssp_blob); */
611 cERROR(1, "spnego not supported for SMB2 yet");
612 rc = -EOPNOTSUPP;
613 kfree(ntlmssp_blob);
614 goto ssetup_exit;
615 } else {
616 security_blob = ntlmssp_blob;
617 }
618 } else {
619 cERROR(1, "illegal ntlmssp phase");
620 rc = -EIO;
621 goto ssetup_exit;
622 }
623
624 /* Testing shows that buffer offset must be at location of Buffer[0] */
625 req->SecurityBufferOffset =
626 cpu_to_le16(sizeof(struct smb2_sess_setup_req) -
627 1 /* pad */ - 4 /* rfc1001 len */);
628 req->SecurityBufferLength = cpu_to_le16(blob_length);
629 iov[1].iov_base = security_blob;
630 iov[1].iov_len = blob_length;
631
632 inc_rfc1001_len(req, blob_length - 1 /* pad */);
633
634 /* BB add code to build os and lm fields */
635
636 rc = SendReceive2(xid, ses, iov, 2, &resp_buftype, CIFS_LOG_ERROR);
637
638 kfree(security_blob);
639 rsp = (struct smb2_sess_setup_rsp *)iov[0].iov_base;
640 if (rsp->hdr.Status == STATUS_MORE_PROCESSING_REQUIRED) {
641 if (phase != NtLmNegotiate) {
642 cERROR(1, "Unexpected more processing error");
643 goto ssetup_exit;
644 }
645 if (offsetof(struct smb2_sess_setup_rsp, Buffer) - 4 !=
646 le16_to_cpu(rsp->SecurityBufferOffset)) {
647 cERROR(1, "Invalid security buffer offset %d",
648 le16_to_cpu(rsp->SecurityBufferOffset));
649 rc = -EIO;
650 goto ssetup_exit;
651 }
652
653 /* NTLMSSP Negotiate sent now processing challenge (response) */
654 phase = NtLmChallenge; /* process ntlmssp challenge */
655 rc = 0; /* MORE_PROCESSING is not an error here but expected */
656 ses->Suid = rsp->hdr.SessionId;
657 rc = decode_ntlmssp_challenge(rsp->Buffer,
658 le16_to_cpu(rsp->SecurityBufferLength), ses);
659 }
660
661 /*
662 * BB eventually add code for SPNEGO decoding of NtlmChallenge blob,
663 * but at least the raw NTLMSSP case works.
664 */
665 /*
666 * No tcon so can't do
667 * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]);
668 */
669 if (rc != 0)
670 goto ssetup_exit;
671
672 if (rsp == NULL) {
673 rc = -EIO;
674 goto ssetup_exit;
675 }
676
677 ses->session_flags = le16_to_cpu(rsp->SessionFlags);
678ssetup_exit:
679 free_rsp_buf(resp_buftype, rsp);
680
681 /* if ntlmssp, and negotiate succeeded, proceed to authenticate phase */
682 if ((phase == NtLmChallenge) && (rc == 0))
683 goto ssetup_ntlmssp_authenticate;
684 return rc;
685}
686
687int
688SMB2_logoff(const unsigned int xid, struct cifs_ses *ses)
689{
690 struct smb2_logoff_req *req; /* response is also trivial struct */
691 int rc = 0;
692 struct TCP_Server_Info *server;
693
694 cFYI(1, "disconnect session %p", ses);
695
696 if (ses && (ses->server))
697 server = ses->server;
698 else
699 return -EIO;
700
701 rc = small_smb2_init(SMB2_LOGOFF, NULL, (void **) &req);
702 if (rc)
703 return rc;
704
705 /* since no tcon, smb2_init can not do this, so do here */
706 req->hdr.SessionId = ses->Suid;
Pavel Shilovsky3c1bf7e2012-09-18 16:20:30 -0700707 if (server->sec_mode & SECMODE_SIGN_REQUIRED)
708 req->hdr.Flags |= SMB2_FLAGS_SIGNED;
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400709
710 rc = SendReceiveNoRsp(xid, ses, (char *) &req->hdr, 0);
711 /*
712 * No tcon so can't do
713 * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]);
714 */
715 return rc;
716}
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +0400717
718static inline void cifs_stats_fail_inc(struct cifs_tcon *tcon, uint16_t code)
719{
Pavel Shilovskyd60622e2012-05-28 15:19:39 +0400720 cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_failed[code]);
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +0400721}
722
723#define MAX_SHARENAME_LENGTH (255 /* server */ + 80 /* share */ + 1 /* NULL */)
724
725int
726SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
727 struct cifs_tcon *tcon, const struct nls_table *cp)
728{
729 struct smb2_tree_connect_req *req;
730 struct smb2_tree_connect_rsp *rsp = NULL;
731 struct kvec iov[2];
732 int rc = 0;
733 int resp_buftype;
734 int unc_path_len;
735 struct TCP_Server_Info *server;
736 __le16 *unc_path = NULL;
737
738 cFYI(1, "TCON");
739
740 if ((ses->server) && tree)
741 server = ses->server;
742 else
743 return -EIO;
744
745 if (tcon && tcon->bad_network_name)
746 return -ENOENT;
747
748 unc_path = kmalloc(MAX_SHARENAME_LENGTH * 2, GFP_KERNEL);
749 if (unc_path == NULL)
750 return -ENOMEM;
751
752 unc_path_len = cifs_strtoUTF16(unc_path, tree, strlen(tree), cp) + 1;
753 unc_path_len *= 2;
754 if (unc_path_len < 2) {
755 kfree(unc_path);
756 return -EINVAL;
757 }
758
759 rc = small_smb2_init(SMB2_TREE_CONNECT, tcon, (void **) &req);
760 if (rc) {
761 kfree(unc_path);
762 return rc;
763 }
764
765 if (tcon == NULL) {
766 /* since no tcon, smb2_init can not do this, so do here */
767 req->hdr.SessionId = ses->Suid;
768 /* if (ses->server->sec_mode & SECMODE_SIGN_REQUIRED)
769 req->hdr.Flags |= SMB2_FLAGS_SIGNED; */
770 }
771
772 iov[0].iov_base = (char *)req;
773 /* 4 for rfc1002 length field and 1 for pad */
774 iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
775
776 /* Testing shows that buffer offset must be at location of Buffer[0] */
777 req->PathOffset = cpu_to_le16(sizeof(struct smb2_tree_connect_req)
778 - 1 /* pad */ - 4 /* do not count rfc1001 len field */);
779 req->PathLength = cpu_to_le16(unc_path_len - 2);
780 iov[1].iov_base = unc_path;
781 iov[1].iov_len = unc_path_len;
782
783 inc_rfc1001_len(req, unc_path_len - 1 /* pad */);
784
785 rc = SendReceive2(xid, ses, iov, 2, &resp_buftype, 0);
786 rsp = (struct smb2_tree_connect_rsp *)iov[0].iov_base;
787
788 if (rc != 0) {
789 if (tcon) {
790 cifs_stats_fail_inc(tcon, SMB2_TREE_CONNECT_HE);
791 tcon->need_reconnect = true;
792 }
793 goto tcon_error_exit;
794 }
795
796 if (rsp == NULL) {
797 rc = -EIO;
798 goto tcon_exit;
799 }
800
801 if (tcon == NULL) {
802 ses->ipc_tid = rsp->hdr.TreeId;
803 goto tcon_exit;
804 }
805
806 if (rsp->ShareType & SMB2_SHARE_TYPE_DISK)
807 cFYI(1, "connection to disk share");
808 else if (rsp->ShareType & SMB2_SHARE_TYPE_PIPE) {
809 tcon->ipc = true;
810 cFYI(1, "connection to pipe share");
811 } else if (rsp->ShareType & SMB2_SHARE_TYPE_PRINT) {
812 tcon->print = true;
813 cFYI(1, "connection to printer");
814 } else {
815 cERROR(1, "unknown share type %d", rsp->ShareType);
816 rc = -EOPNOTSUPP;
817 goto tcon_error_exit;
818 }
819
820 tcon->share_flags = le32_to_cpu(rsp->ShareFlags);
821 tcon->maximal_access = le32_to_cpu(rsp->MaximalAccess);
822 tcon->tidStatus = CifsGood;
823 tcon->need_reconnect = false;
824 tcon->tid = rsp->hdr.TreeId;
825 strncpy(tcon->treeName, tree, MAX_TREE_SIZE);
826
827 if ((rsp->Capabilities & SMB2_SHARE_CAP_DFS) &&
828 ((tcon->share_flags & SHI1005_FLAGS_DFS) == 0))
829 cERROR(1, "DFS capability contradicts DFS flag");
830
831tcon_exit:
832 free_rsp_buf(resp_buftype, rsp);
833 kfree(unc_path);
834 return rc;
835
836tcon_error_exit:
837 if (rsp->hdr.Status == STATUS_BAD_NETWORK_NAME) {
838 cERROR(1, "BAD_NETWORK_NAME: %s", tree);
839 tcon->bad_network_name = true;
840 }
841 goto tcon_exit;
842}
843
844int
845SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon)
846{
847 struct smb2_tree_disconnect_req *req; /* response is trivial */
848 int rc = 0;
849 struct TCP_Server_Info *server;
850 struct cifs_ses *ses = tcon->ses;
851
852 cFYI(1, "Tree Disconnect");
853
854 if (ses && (ses->server))
855 server = ses->server;
856 else
857 return -EIO;
858
859 if ((tcon->need_reconnect) || (tcon->ses->need_reconnect))
860 return 0;
861
862 rc = small_smb2_init(SMB2_TREE_DISCONNECT, tcon, (void **) &req);
863 if (rc)
864 return rc;
865
866 rc = SendReceiveNoRsp(xid, ses, (char *)&req->hdr, 0);
867 if (rc)
868 cifs_stats_fail_inc(tcon, SMB2_TREE_DISCONNECT_HE);
869
870 return rc;
871}
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400872
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700873static struct create_lease *
874create_lease_buf(u8 *lease_key, u8 oplock)
875{
876 struct create_lease *buf;
877
878 buf = kmalloc(sizeof(struct create_lease), GFP_KERNEL);
879 if (!buf)
880 return NULL;
881
882 memset(buf, 0, sizeof(struct create_lease));
883
884 buf->lcontext.LeaseKeyLow = cpu_to_le64(*((u64 *)lease_key));
885 buf->lcontext.LeaseKeyHigh = cpu_to_le64(*((u64 *)(lease_key + 8)));
886 if (oplock == SMB2_OPLOCK_LEVEL_EXCLUSIVE)
887 buf->lcontext.LeaseState = SMB2_LEASE_WRITE_CACHING |
888 SMB2_LEASE_READ_CACHING;
889 else if (oplock == SMB2_OPLOCK_LEVEL_II)
890 buf->lcontext.LeaseState = SMB2_LEASE_READ_CACHING;
891 else if (oplock == SMB2_OPLOCK_LEVEL_BATCH)
892 buf->lcontext.LeaseState = SMB2_LEASE_HANDLE_CACHING |
893 SMB2_LEASE_READ_CACHING |
894 SMB2_LEASE_WRITE_CACHING;
895
896 buf->ccontext.DataOffset = cpu_to_le16(offsetof
897 (struct create_lease, lcontext));
898 buf->ccontext.DataLength = cpu_to_le32(sizeof(struct lease_context));
899 buf->ccontext.NameOffset = cpu_to_le16(offsetof
900 (struct create_lease, Name));
901 buf->ccontext.NameLength = cpu_to_le16(4);
902 buf->Name[0] = 'R';
903 buf->Name[1] = 'q';
904 buf->Name[2] = 'L';
905 buf->Name[3] = 's';
906 return buf;
907}
908
909static __u8
910parse_lease_state(struct smb2_create_rsp *rsp)
911{
912 char *data_offset;
913 struct create_lease *lc;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700914 bool found = false;
915
916 data_offset = (char *)rsp;
917 data_offset += 4 + le32_to_cpu(rsp->CreateContextsOffset);
918 lc = (struct create_lease *)data_offset;
919 do {
920 char *name = le16_to_cpu(lc->ccontext.NameOffset) + (char *)lc;
921 if (le16_to_cpu(lc->ccontext.NameLength) != 4 ||
922 strncmp(name, "RqLs", 4)) {
923 lc = (struct create_lease *)((char *)lc
924 + le32_to_cpu(lc->ccontext.Next));
925 continue;
926 }
927 if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS)
928 return SMB2_OPLOCK_LEVEL_NOCHANGE;
929 found = true;
930 break;
931 } while (le32_to_cpu(lc->ccontext.Next) != 0);
932
933 if (!found)
Pavel Shilovsky0822f512012-09-19 06:22:45 -0700934 return 0;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700935
Pavel Shilovsky0822f512012-09-19 06:22:45 -0700936 return smb2_map_lease_to_oplock(lc->lcontext.LeaseState);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700937}
938
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400939int
940SMB2_open(const unsigned int xid, struct cifs_tcon *tcon, __le16 *path,
941 u64 *persistent_fid, u64 *volatile_fid, __u32 desired_access,
Pavel Shilovskyf0df7372012-09-18 16:20:26 -0700942 __u32 create_disposition, __u32 file_attributes, __u32 create_options,
Pavel Shilovsky2e44b282012-09-18 16:20:33 -0700943 __u8 *oplock, struct smb2_file_all_info *buf)
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400944{
945 struct smb2_create_req *req;
946 struct smb2_create_rsp *rsp;
947 struct TCP_Server_Info *server;
948 struct cifs_ses *ses = tcon->ses;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700949 struct kvec iov[3];
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400950 int resp_buftype;
951 int uni_path_len;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700952 __le16 *copy_path = NULL;
953 int copy_size;
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400954 int rc = 0;
955 int num_iovecs = 2;
956
957 cFYI(1, "create/open");
958
959 if (ses && (ses->server))
960 server = ses->server;
961 else
962 return -EIO;
963
964 rc = small_smb2_init(SMB2_CREATE, tcon, (void **) &req);
965 if (rc)
966 return rc;
967
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400968 req->ImpersonationLevel = IL_IMPERSONATION;
969 req->DesiredAccess = cpu_to_le32(desired_access);
970 /* File attributes ignored on open (used in create though) */
971 req->FileAttributes = cpu_to_le32(file_attributes);
972 req->ShareAccess = FILE_SHARE_ALL_LE;
973 req->CreateDisposition = cpu_to_le32(create_disposition);
974 req->CreateOptions = cpu_to_le32(create_options);
975 uni_path_len = (2 * UniStrnlen((wchar_t *)path, PATH_MAX)) + 2;
976 req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req)
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700977 - 8 /* pad */ - 4 /* do not count rfc1001 len field */);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400978
979 iov[0].iov_base = (char *)req;
980 /* 4 for rfc1002 length field */
981 iov[0].iov_len = get_rfc1002_length(req) + 4;
982
983 /* MUST set path len (NameLength) to 0 opening root of share */
984 if (uni_path_len >= 4) {
985 req->NameLength = cpu_to_le16(uni_path_len - 2);
986 /* -1 since last byte is buf[0] which is sent below (path) */
987 iov[0].iov_len--;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700988 if (uni_path_len % 8 != 0) {
989 copy_size = uni_path_len / 8 * 8;
990 if (copy_size < uni_path_len)
991 copy_size += 8;
992
993 copy_path = kzalloc(copy_size, GFP_KERNEL);
994 if (!copy_path)
995 return -ENOMEM;
996 memcpy((char *)copy_path, (const char *)path,
997 uni_path_len);
998 uni_path_len = copy_size;
999 path = copy_path;
1000 }
1001
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001002 iov[1].iov_len = uni_path_len;
1003 iov[1].iov_base = path;
1004 /*
1005 * -1 since last byte is buf[0] which was counted in
1006 * smb2_buf_len.
1007 */
1008 inc_rfc1001_len(req, uni_path_len - 1);
1009 } else {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07001010 iov[0].iov_len += 7;
1011 req->hdr.smb2_buf_length = cpu_to_be32(be32_to_cpu(
1012 req->hdr.smb2_buf_length) + 8 - 1);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001013 num_iovecs = 1;
1014 req->NameLength = 0;
1015 }
1016
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07001017 if (!server->oplocks)
1018 *oplock = SMB2_OPLOCK_LEVEL_NONE;
1019
1020 if (!(tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_LEASING) ||
1021 *oplock == SMB2_OPLOCK_LEVEL_NONE)
1022 req->RequestedOplockLevel = *oplock;
1023 else {
1024 iov[num_iovecs].iov_base = create_lease_buf(oplock+1, *oplock);
1025 if (iov[num_iovecs].iov_base == NULL) {
1026 cifs_small_buf_release(req);
1027 kfree(copy_path);
1028 return -ENOMEM;
1029 }
1030 iov[num_iovecs].iov_len = sizeof(struct create_lease);
1031 req->RequestedOplockLevel = SMB2_OPLOCK_LEVEL_LEASE;
1032 req->CreateContextsOffset = cpu_to_le32(
1033 sizeof(struct smb2_create_req) - 4 - 8 +
1034 iov[num_iovecs-1].iov_len);
1035 req->CreateContextsLength = cpu_to_le32(
1036 sizeof(struct create_lease));
1037 inc_rfc1001_len(&req->hdr, sizeof(struct create_lease));
1038 num_iovecs++;
1039 }
1040
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001041 rc = SendReceive2(xid, ses, iov, num_iovecs, &resp_buftype, 0);
1042 rsp = (struct smb2_create_rsp *)iov[0].iov_base;
1043
1044 if (rc != 0) {
1045 cifs_stats_fail_inc(tcon, SMB2_CREATE_HE);
1046 goto creat_exit;
1047 }
1048
1049 if (rsp == NULL) {
1050 rc = -EIO;
1051 goto creat_exit;
1052 }
1053 *persistent_fid = rsp->PersistentFileId;
1054 *volatile_fid = rsp->VolatileFileId;
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001055
1056 if (buf) {
1057 memcpy(buf, &rsp->CreationTime, 32);
1058 buf->AllocationSize = rsp->AllocationSize;
1059 buf->EndOfFile = rsp->EndofFile;
1060 buf->Attributes = rsp->FileAttributes;
1061 buf->NumberOfLinks = cpu_to_le32(1);
1062 buf->DeletePending = 0;
1063 }
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07001064
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07001065 if (rsp->OplockLevel == SMB2_OPLOCK_LEVEL_LEASE)
1066 *oplock = parse_lease_state(rsp);
1067 else
1068 *oplock = rsp->OplockLevel;
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001069creat_exit:
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07001070 kfree(copy_path);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001071 free_rsp_buf(resp_buftype, rsp);
1072 return rc;
1073}
1074
1075int
1076SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
1077 u64 persistent_fid, u64 volatile_fid)
1078{
1079 struct smb2_close_req *req;
1080 struct smb2_close_rsp *rsp;
1081 struct TCP_Server_Info *server;
1082 struct cifs_ses *ses = tcon->ses;
1083 struct kvec iov[1];
1084 int resp_buftype;
1085 int rc = 0;
1086
1087 cFYI(1, "Close");
1088
1089 if (ses && (ses->server))
1090 server = ses->server;
1091 else
1092 return -EIO;
1093
1094 rc = small_smb2_init(SMB2_CLOSE, tcon, (void **) &req);
1095 if (rc)
1096 return rc;
1097
1098 req->PersistentFileId = persistent_fid;
1099 req->VolatileFileId = volatile_fid;
1100
1101 iov[0].iov_base = (char *)req;
1102 /* 4 for rfc1002 length field */
1103 iov[0].iov_len = get_rfc1002_length(req) + 4;
1104
1105 rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, 0);
1106 rsp = (struct smb2_close_rsp *)iov[0].iov_base;
1107
1108 if (rc != 0) {
1109 if (tcon)
1110 cifs_stats_fail_inc(tcon, SMB2_CLOSE_HE);
1111 goto close_exit;
1112 }
1113
1114 if (rsp == NULL) {
1115 rc = -EIO;
1116 goto close_exit;
1117 }
1118
1119 /* BB FIXME - decode close response, update inode for caching */
1120
1121close_exit:
1122 free_rsp_buf(resp_buftype, rsp);
1123 return rc;
1124}
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04001125
1126static int
1127validate_buf(unsigned int offset, unsigned int buffer_length,
1128 struct smb2_hdr *hdr, unsigned int min_buf_size)
1129
1130{
1131 unsigned int smb_len = be32_to_cpu(hdr->smb2_buf_length);
1132 char *end_of_smb = smb_len + 4 /* RFC1001 length field */ + (char *)hdr;
1133 char *begin_of_buf = 4 /* RFC1001 len field */ + offset + (char *)hdr;
1134 char *end_of_buf = begin_of_buf + buffer_length;
1135
1136
1137 if (buffer_length < min_buf_size) {
1138 cERROR(1, "buffer length %d smaller than minimum size %d",
1139 buffer_length, min_buf_size);
1140 return -EINVAL;
1141 }
1142
1143 /* check if beyond RFC1001 maximum length */
1144 if ((smb_len > 0x7FFFFF) || (buffer_length > 0x7FFFFF)) {
1145 cERROR(1, "buffer length %d or smb length %d too large",
1146 buffer_length, smb_len);
1147 return -EINVAL;
1148 }
1149
1150 if ((begin_of_buf > end_of_smb) || (end_of_buf > end_of_smb)) {
1151 cERROR(1, "illegal server response, bad offset to data");
1152 return -EINVAL;
1153 }
1154
1155 return 0;
1156}
1157
1158/*
1159 * If SMB buffer fields are valid, copy into temporary buffer to hold result.
1160 * Caller must free buffer.
1161 */
1162static int
1163validate_and_copy_buf(unsigned int offset, unsigned int buffer_length,
1164 struct smb2_hdr *hdr, unsigned int minbufsize,
1165 char *data)
1166
1167{
1168 char *begin_of_buf = 4 /* RFC1001 len field */ + offset + (char *)hdr;
1169 int rc;
1170
1171 if (!data)
1172 return -EINVAL;
1173
1174 rc = validate_buf(offset, buffer_length, hdr, minbufsize);
1175 if (rc)
1176 return rc;
1177
1178 memcpy(data, begin_of_buf, buffer_length);
1179
1180 return 0;
1181}
1182
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001183static int
1184query_info(const unsigned int xid, struct cifs_tcon *tcon,
1185 u64 persistent_fid, u64 volatile_fid, u8 info_class,
1186 size_t output_len, size_t min_len, void *data)
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04001187{
1188 struct smb2_query_info_req *req;
1189 struct smb2_query_info_rsp *rsp = NULL;
1190 struct kvec iov[2];
1191 int rc = 0;
1192 int resp_buftype;
1193 struct TCP_Server_Info *server;
1194 struct cifs_ses *ses = tcon->ses;
1195
1196 cFYI(1, "Query Info");
1197
1198 if (ses && (ses->server))
1199 server = ses->server;
1200 else
1201 return -EIO;
1202
1203 rc = small_smb2_init(SMB2_QUERY_INFO, tcon, (void **) &req);
1204 if (rc)
1205 return rc;
1206
1207 req->InfoType = SMB2_O_INFO_FILE;
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001208 req->FileInfoClass = info_class;
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04001209 req->PersistentFileId = persistent_fid;
1210 req->VolatileFileId = volatile_fid;
1211 /* 4 for rfc1002 length field and 1 for Buffer */
1212 req->InputBufferOffset =
1213 cpu_to_le16(sizeof(struct smb2_query_info_req) - 1 - 4);
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001214 req->OutputBufferLength = cpu_to_le32(output_len);
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04001215
1216 iov[0].iov_base = (char *)req;
1217 /* 4 for rfc1002 length field */
1218 iov[0].iov_len = get_rfc1002_length(req) + 4;
1219
1220 rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, 0);
Pavel Shilovskye5d04882012-09-19 16:03:26 +04001221 rsp = (struct smb2_query_info_rsp *)iov[0].iov_base;
1222
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04001223 if (rc) {
1224 cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
1225 goto qinf_exit;
1226 }
1227
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04001228 rc = validate_and_copy_buf(le16_to_cpu(rsp->OutputBufferOffset),
1229 le32_to_cpu(rsp->OutputBufferLength),
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001230 &rsp->hdr, min_len, data);
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04001231
1232qinf_exit:
1233 free_rsp_buf(resp_buftype, rsp);
1234 return rc;
1235}
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04001236
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001237int
1238SMB2_query_info(const unsigned int xid, struct cifs_tcon *tcon,
1239 u64 persistent_fid, u64 volatile_fid,
1240 struct smb2_file_all_info *data)
1241{
1242 return query_info(xid, tcon, persistent_fid, volatile_fid,
1243 FILE_ALL_INFORMATION,
1244 sizeof(struct smb2_file_all_info) + MAX_NAME * 2,
1245 sizeof(struct smb2_file_all_info), data);
1246}
1247
1248int
1249SMB2_get_srv_num(const unsigned int xid, struct cifs_tcon *tcon,
1250 u64 persistent_fid, u64 volatile_fid, __le64 *uniqueid)
1251{
1252 return query_info(xid, tcon, persistent_fid, volatile_fid,
1253 FILE_INTERNAL_INFORMATION,
1254 sizeof(struct smb2_file_internal_info),
1255 sizeof(struct smb2_file_internal_info), uniqueid);
1256}
1257
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04001258/*
1259 * This is a no-op for now. We're not really interested in the reply, but
1260 * rather in the fact that the server sent one and that server->lstrp
1261 * gets updated.
1262 *
1263 * FIXME: maybe we should consider checking that the reply matches request?
1264 */
1265static void
1266smb2_echo_callback(struct mid_q_entry *mid)
1267{
1268 struct TCP_Server_Info *server = mid->callback_data;
1269 struct smb2_echo_rsp *smb2 = (struct smb2_echo_rsp *)mid->resp_buf;
1270 unsigned int credits_received = 1;
1271
1272 if (mid->mid_state == MID_RESPONSE_RECEIVED)
1273 credits_received = le16_to_cpu(smb2->hdr.CreditRequest);
1274
1275 DeleteMidQEntry(mid);
1276 add_credits(server, credits_received, CIFS_ECHO_OP);
1277}
1278
1279int
1280SMB2_echo(struct TCP_Server_Info *server)
1281{
1282 struct smb2_echo_req *req;
1283 int rc = 0;
1284 struct kvec iov;
Jeff Laytonfec344e2012-09-18 16:20:35 -07001285 struct smb_rqst rqst = { .rq_iov = &iov,
1286 .rq_nvec = 1 };
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04001287
1288 cFYI(1, "In echo request");
1289
1290 rc = small_smb2_init(SMB2_ECHO, NULL, (void **)&req);
1291 if (rc)
1292 return rc;
1293
1294 req->hdr.CreditRequest = cpu_to_le16(1);
1295
1296 iov.iov_base = (char *)req;
1297 /* 4 for rfc1002 length field */
1298 iov.iov_len = get_rfc1002_length(req) + 4;
1299
Jeff Laytonfec344e2012-09-18 16:20:35 -07001300 rc = cifs_call_async(server, &rqst, NULL, smb2_echo_callback, server,
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04001301 CIFS_ECHO_OP);
1302 if (rc)
1303 cFYI(1, "Echo request failed: %d", rc);
1304
1305 cifs_small_buf_release(req);
1306 return rc;
1307}
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07001308
1309int
1310SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
1311 u64 volatile_fid)
1312{
1313 struct smb2_flush_req *req;
1314 struct TCP_Server_Info *server;
1315 struct cifs_ses *ses = tcon->ses;
1316 struct kvec iov[1];
1317 int resp_buftype;
1318 int rc = 0;
1319
1320 cFYI(1, "Flush");
1321
1322 if (ses && (ses->server))
1323 server = ses->server;
1324 else
1325 return -EIO;
1326
1327 rc = small_smb2_init(SMB2_FLUSH, tcon, (void **) &req);
1328 if (rc)
1329 return rc;
1330
1331 req->PersistentFileId = persistent_fid;
1332 req->VolatileFileId = volatile_fid;
1333
1334 iov[0].iov_base = (char *)req;
1335 /* 4 for rfc1002 length field */
1336 iov[0].iov_len = get_rfc1002_length(req) + 4;
1337
1338 rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, 0);
1339
1340 if ((rc != 0) && tcon)
1341 cifs_stats_fail_inc(tcon, SMB2_FLUSH_HE);
1342
1343 free_rsp_buf(resp_buftype, iov[0].iov_base);
1344 return rc;
1345}
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001346
1347/*
1348 * To form a chain of read requests, any read requests after the first should
1349 * have the end_of_chain boolean set to true.
1350 */
1351static int
1352smb2_new_read_req(struct kvec *iov, struct cifs_io_parms *io_parms,
1353 unsigned int remaining_bytes, int request_type)
1354{
1355 int rc = -EACCES;
1356 struct smb2_read_req *req = NULL;
1357
1358 rc = small_smb2_init(SMB2_READ, io_parms->tcon, (void **) &req);
1359 if (rc)
1360 return rc;
1361 if (io_parms->tcon->ses->server == NULL)
1362 return -ECONNABORTED;
1363
1364 req->hdr.ProcessId = cpu_to_le32(io_parms->pid);
1365
1366 req->PersistentFileId = io_parms->persistent_fid;
1367 req->VolatileFileId = io_parms->volatile_fid;
1368 req->ReadChannelInfoOffset = 0; /* reserved */
1369 req->ReadChannelInfoLength = 0; /* reserved */
1370 req->Channel = 0; /* reserved */
1371 req->MinimumCount = 0;
1372 req->Length = cpu_to_le32(io_parms->length);
1373 req->Offset = cpu_to_le64(io_parms->offset);
1374
1375 if (request_type & CHAINED_REQUEST) {
1376 if (!(request_type & END_OF_CHAIN)) {
1377 /* 4 for rfc1002 length field */
1378 req->hdr.NextCommand =
1379 cpu_to_le32(get_rfc1002_length(req) + 4);
1380 } else /* END_OF_CHAIN */
1381 req->hdr.NextCommand = 0;
1382 if (request_type & RELATED_REQUEST) {
1383 req->hdr.Flags |= SMB2_FLAGS_RELATED_OPERATIONS;
1384 /*
1385 * Related requests use info from previous read request
1386 * in chain.
1387 */
1388 req->hdr.SessionId = 0xFFFFFFFF;
1389 req->hdr.TreeId = 0xFFFFFFFF;
1390 req->PersistentFileId = 0xFFFFFFFF;
1391 req->VolatileFileId = 0xFFFFFFFF;
1392 }
1393 }
1394 if (remaining_bytes > io_parms->length)
1395 req->RemainingBytes = cpu_to_le32(remaining_bytes);
1396 else
1397 req->RemainingBytes = 0;
1398
1399 iov[0].iov_base = (char *)req;
1400 /* 4 for rfc1002 length field */
1401 iov[0].iov_len = get_rfc1002_length(req) + 4;
1402 return rc;
1403}
1404
1405static void
1406smb2_readv_callback(struct mid_q_entry *mid)
1407{
1408 struct cifs_readdata *rdata = mid->callback_data;
1409 struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink);
1410 struct TCP_Server_Info *server = tcon->ses->server;
Jeff Layton58195752012-09-19 06:22:34 -07001411 struct smb2_hdr *buf = (struct smb2_hdr *)rdata->iov.iov_base;
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001412 unsigned int credits_received = 1;
Jeff Layton58195752012-09-19 06:22:34 -07001413 struct smb_rqst rqst = { .rq_iov = &rdata->iov,
Jeff Layton8321fec2012-09-19 06:22:32 -07001414 .rq_nvec = 1,
1415 .rq_pages = rdata->pages,
1416 .rq_npages = rdata->nr_pages,
1417 .rq_pagesz = rdata->pagesz,
1418 .rq_tailsz = rdata->tailsz };
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001419
1420 cFYI(1, "%s: mid=%llu state=%d result=%d bytes=%u", __func__,
1421 mid->mid, mid->mid_state, rdata->result, rdata->bytes);
1422
1423 switch (mid->mid_state) {
1424 case MID_RESPONSE_RECEIVED:
1425 credits_received = le16_to_cpu(buf->CreditRequest);
1426 /* result already set, check signature */
Pavel Shilovsky3c1bf7e2012-09-18 16:20:30 -07001427 if (server->sec_mode &
1428 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
1429 int rc;
1430
Jeff Layton0b688cf2012-09-18 16:20:34 -07001431 rc = smb2_verify_signature(&rqst, server);
Pavel Shilovsky3c1bf7e2012-09-18 16:20:30 -07001432 if (rc)
1433 cERROR(1, "SMB signature verification returned "
1434 "error = %d", rc);
1435 }
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001436 /* FIXME: should this be counted toward the initiating task? */
1437 task_io_account_read(rdata->bytes);
1438 cifs_stats_bytes_read(tcon, rdata->bytes);
1439 break;
1440 case MID_REQUEST_SUBMITTED:
1441 case MID_RETRY_NEEDED:
1442 rdata->result = -EAGAIN;
1443 break;
1444 default:
1445 if (rdata->result != -ENODATA)
1446 rdata->result = -EIO;
1447 }
1448
1449 if (rdata->result)
1450 cifs_stats_fail_inc(tcon, SMB2_READ_HE);
1451
1452 queue_work(cifsiod_wq, &rdata->work);
1453 DeleteMidQEntry(mid);
1454 add_credits(server, credits_received, 0);
1455}
1456
1457/* smb2_async_readv - send an async write, and set up mid to handle result */
1458int
1459smb2_async_readv(struct cifs_readdata *rdata)
1460{
1461 int rc;
1462 struct smb2_hdr *buf;
1463 struct cifs_io_parms io_parms;
Jeff Layton58195752012-09-19 06:22:34 -07001464 struct smb_rqst rqst = { .rq_iov = &rdata->iov,
Jeff Laytonfec344e2012-09-18 16:20:35 -07001465 .rq_nvec = 1 };
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001466
1467 cFYI(1, "%s: offset=%llu bytes=%u", __func__,
1468 rdata->offset, rdata->bytes);
1469
1470 io_parms.tcon = tlink_tcon(rdata->cfile->tlink);
1471 io_parms.offset = rdata->offset;
1472 io_parms.length = rdata->bytes;
1473 io_parms.persistent_fid = rdata->cfile->fid.persistent_fid;
1474 io_parms.volatile_fid = rdata->cfile->fid.volatile_fid;
1475 io_parms.pid = rdata->pid;
Jeff Layton58195752012-09-19 06:22:34 -07001476 rc = smb2_new_read_req(&rdata->iov, &io_parms, 0, 0);
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001477 if (rc)
1478 return rc;
1479
Jeff Layton58195752012-09-19 06:22:34 -07001480 buf = (struct smb2_hdr *)rdata->iov.iov_base;
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001481 /* 4 for rfc1002 length field */
Jeff Layton58195752012-09-19 06:22:34 -07001482 rdata->iov.iov_len = get_rfc1002_length(rdata->iov.iov_base) + 4;
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001483
1484 kref_get(&rdata->refcount);
Jeff Laytonfec344e2012-09-18 16:20:35 -07001485 rc = cifs_call_async(io_parms.tcon->ses->server, &rqst,
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001486 cifs_readv_receive, smb2_readv_callback,
1487 rdata, 0);
Pavel Shilovskye5d04882012-09-19 16:03:26 +04001488 if (rc) {
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001489 kref_put(&rdata->refcount, cifs_readdata_release);
Pavel Shilovskye5d04882012-09-19 16:03:26 +04001490 cifs_stats_fail_inc(io_parms.tcon, SMB2_READ_HE);
1491 }
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001492
1493 cifs_small_buf_release(buf);
1494 return rc;
1495}
Pavel Shilovsky33319142012-09-18 16:20:29 -07001496
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07001497int
1498SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
1499 unsigned int *nbytes, char **buf, int *buf_type)
1500{
1501 int resp_buftype, rc = -EACCES;
1502 struct smb2_read_rsp *rsp = NULL;
1503 struct kvec iov[1];
1504
1505 *nbytes = 0;
1506 rc = smb2_new_read_req(iov, io_parms, 0, 0);
1507 if (rc)
1508 return rc;
1509
1510 rc = SendReceive2(xid, io_parms->tcon->ses, iov, 1,
1511 &resp_buftype, CIFS_LOG_ERROR);
1512
1513 rsp = (struct smb2_read_rsp *)iov[0].iov_base;
1514
1515 if (rsp->hdr.Status == STATUS_END_OF_FILE) {
1516 free_rsp_buf(resp_buftype, iov[0].iov_base);
1517 return 0;
1518 }
1519
1520 if (rc) {
1521 cifs_stats_fail_inc(io_parms->tcon, SMB2_READ_HE);
1522 cERROR(1, "Send error in read = %d", rc);
1523 } else {
1524 *nbytes = le32_to_cpu(rsp->DataLength);
1525 if ((*nbytes > CIFS_MAX_MSGSIZE) ||
1526 (*nbytes > io_parms->length)) {
1527 cFYI(1, "bad length %d for count %d", *nbytes,
1528 io_parms->length);
1529 rc = -EIO;
1530 *nbytes = 0;
1531 }
1532 }
1533
1534 if (*buf) {
1535 memcpy(*buf, (char *)rsp->hdr.ProtocolId + rsp->DataOffset,
1536 *nbytes);
1537 free_rsp_buf(resp_buftype, iov[0].iov_base);
1538 } else if (resp_buftype != CIFS_NO_BUFFER) {
1539 *buf = iov[0].iov_base;
1540 if (resp_buftype == CIFS_SMALL_BUFFER)
1541 *buf_type = CIFS_SMALL_BUFFER;
1542 else if (resp_buftype == CIFS_LARGE_BUFFER)
1543 *buf_type = CIFS_LARGE_BUFFER;
1544 }
1545 return rc;
1546}
1547
Pavel Shilovsky33319142012-09-18 16:20:29 -07001548/*
1549 * Check the mid_state and signature on received buffer (if any), and queue the
1550 * workqueue completion task.
1551 */
1552static void
1553smb2_writev_callback(struct mid_q_entry *mid)
1554{
1555 struct cifs_writedata *wdata = mid->callback_data;
1556 struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
1557 unsigned int written;
1558 struct smb2_write_rsp *rsp = (struct smb2_write_rsp *)mid->resp_buf;
1559 unsigned int credits_received = 1;
1560
1561 switch (mid->mid_state) {
1562 case MID_RESPONSE_RECEIVED:
1563 credits_received = le16_to_cpu(rsp->hdr.CreditRequest);
1564 wdata->result = smb2_check_receive(mid, tcon->ses->server, 0);
1565 if (wdata->result != 0)
1566 break;
1567
1568 written = le32_to_cpu(rsp->DataLength);
1569 /*
1570 * Mask off high 16 bits when bytes written as returned
1571 * by the server is greater than bytes requested by the
1572 * client. OS/2 servers are known to set incorrect
1573 * CountHigh values.
1574 */
1575 if (written > wdata->bytes)
1576 written &= 0xFFFF;
1577
1578 if (written < wdata->bytes)
1579 wdata->result = -ENOSPC;
1580 else
1581 wdata->bytes = written;
1582 break;
1583 case MID_REQUEST_SUBMITTED:
1584 case MID_RETRY_NEEDED:
1585 wdata->result = -EAGAIN;
1586 break;
1587 default:
1588 wdata->result = -EIO;
1589 break;
1590 }
1591
1592 if (wdata->result)
1593 cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
1594
1595 queue_work(cifsiod_wq, &wdata->work);
1596 DeleteMidQEntry(mid);
1597 add_credits(tcon->ses->server, credits_received, 0);
1598}
1599
1600/* smb2_async_writev - send an async write, and set up mid to handle result */
1601int
1602smb2_async_writev(struct cifs_writedata *wdata)
1603{
Jeff Laytoneddb0792012-09-18 16:20:35 -07001604 int rc = -EACCES;
Pavel Shilovsky33319142012-09-18 16:20:29 -07001605 struct smb2_write_req *req = NULL;
1606 struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
Jeff Laytoneddb0792012-09-18 16:20:35 -07001607 struct kvec iov;
Jeff Laytonfec344e2012-09-18 16:20:35 -07001608 struct smb_rqst rqst;
Pavel Shilovsky33319142012-09-18 16:20:29 -07001609
1610 rc = small_smb2_init(SMB2_WRITE, tcon, (void **) &req);
1611 if (rc)
1612 goto async_writev_out;
1613
Pavel Shilovsky33319142012-09-18 16:20:29 -07001614 req->hdr.ProcessId = cpu_to_le32(wdata->cfile->pid);
1615
1616 req->PersistentFileId = wdata->cfile->fid.persistent_fid;
1617 req->VolatileFileId = wdata->cfile->fid.volatile_fid;
1618 req->WriteChannelInfoOffset = 0;
1619 req->WriteChannelInfoLength = 0;
1620 req->Channel = 0;
1621 req->Offset = cpu_to_le64(wdata->offset);
1622 /* 4 for rfc1002 length field */
1623 req->DataOffset = cpu_to_le16(
1624 offsetof(struct smb2_write_req, Buffer) - 4);
1625 req->RemainingBytes = 0;
1626
1627 /* 4 for rfc1002 length field and 1 for Buffer */
Jeff Laytoneddb0792012-09-18 16:20:35 -07001628 iov.iov_len = get_rfc1002_length(req) + 4 - 1;
1629 iov.iov_base = req;
Pavel Shilovsky33319142012-09-18 16:20:29 -07001630
Jeff Laytoneddb0792012-09-18 16:20:35 -07001631 rqst.rq_iov = &iov;
1632 rqst.rq_nvec = 1;
1633 rqst.rq_pages = wdata->pages;
1634 rqst.rq_npages = wdata->nr_pages;
1635 rqst.rq_pagesz = wdata->pagesz;
1636 rqst.rq_tailsz = wdata->tailsz;
Pavel Shilovsky33319142012-09-18 16:20:29 -07001637
1638 cFYI(1, "async write at %llu %u bytes", wdata->offset, wdata->bytes);
1639
1640 req->Length = cpu_to_le32(wdata->bytes);
1641
1642 inc_rfc1001_len(&req->hdr, wdata->bytes - 1 /* Buffer */);
1643
1644 kref_get(&wdata->refcount);
Jeff Laytonfec344e2012-09-18 16:20:35 -07001645 rc = cifs_call_async(tcon->ses->server, &rqst, NULL,
1646 smb2_writev_callback, wdata, 0);
Pavel Shilovsky33319142012-09-18 16:20:29 -07001647
Pavel Shilovskye5d04882012-09-19 16:03:26 +04001648 if (rc) {
Pavel Shilovsky33319142012-09-18 16:20:29 -07001649 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovskye5d04882012-09-19 16:03:26 +04001650 cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
1651 }
Pavel Shilovsky33319142012-09-18 16:20:29 -07001652
Pavel Shilovsky33319142012-09-18 16:20:29 -07001653async_writev_out:
1654 cifs_small_buf_release(req);
Pavel Shilovsky33319142012-09-18 16:20:29 -07001655 return rc;
1656}
Pavel Shilovsky009d3442012-09-18 16:20:30 -07001657
1658/*
1659 * SMB2_write function gets iov pointer to kvec array with n_vec as a length.
1660 * The length field from io_parms must be at least 1 and indicates a number of
1661 * elements with data to write that begins with position 1 in iov array. All
1662 * data length is specified by count.
1663 */
1664int
1665SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
1666 unsigned int *nbytes, struct kvec *iov, int n_vec)
1667{
1668 int rc = 0;
1669 struct smb2_write_req *req = NULL;
1670 struct smb2_write_rsp *rsp = NULL;
1671 int resp_buftype;
1672 *nbytes = 0;
1673
1674 if (n_vec < 1)
1675 return rc;
1676
1677 rc = small_smb2_init(SMB2_WRITE, io_parms->tcon, (void **) &req);
1678 if (rc)
1679 return rc;
1680
1681 if (io_parms->tcon->ses->server == NULL)
1682 return -ECONNABORTED;
1683
1684 req->hdr.ProcessId = cpu_to_le32(io_parms->pid);
1685
1686 req->PersistentFileId = io_parms->persistent_fid;
1687 req->VolatileFileId = io_parms->volatile_fid;
1688 req->WriteChannelInfoOffset = 0;
1689 req->WriteChannelInfoLength = 0;
1690 req->Channel = 0;
1691 req->Length = cpu_to_le32(io_parms->length);
1692 req->Offset = cpu_to_le64(io_parms->offset);
1693 /* 4 for rfc1002 length field */
1694 req->DataOffset = cpu_to_le16(
1695 offsetof(struct smb2_write_req, Buffer) - 4);
1696 req->RemainingBytes = 0;
1697
1698 iov[0].iov_base = (char *)req;
1699 /* 4 for rfc1002 length field and 1 for Buffer */
1700 iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
1701
1702 /* length of entire message including data to be written */
1703 inc_rfc1001_len(req, io_parms->length - 1 /* Buffer */);
1704
1705 rc = SendReceive2(xid, io_parms->tcon->ses, iov, n_vec + 1,
1706 &resp_buftype, 0);
Pavel Shilovskye5d04882012-09-19 16:03:26 +04001707 rsp = (struct smb2_write_rsp *)iov[0].iov_base;
Pavel Shilovsky009d3442012-09-18 16:20:30 -07001708
1709 if (rc) {
1710 cifs_stats_fail_inc(io_parms->tcon, SMB2_WRITE_HE);
1711 cERROR(1, "Send error in write = %d", rc);
Pavel Shilovskye5d04882012-09-19 16:03:26 +04001712 } else
Pavel Shilovsky009d3442012-09-18 16:20:30 -07001713 *nbytes = le32_to_cpu(rsp->DataLength);
Pavel Shilovskye5d04882012-09-19 16:03:26 +04001714
1715 free_rsp_buf(resp_buftype, rsp);
Pavel Shilovsky009d3442012-09-18 16:20:30 -07001716 return rc;
1717}
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07001718
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07001719static unsigned int
1720num_entries(char *bufstart, char *end_of_buf, char **lastentry, size_t size)
1721{
1722 int len;
1723 unsigned int entrycount = 0;
1724 unsigned int next_offset = 0;
1725 FILE_DIRECTORY_INFO *entryptr;
1726
1727 if (bufstart == NULL)
1728 return 0;
1729
1730 entryptr = (FILE_DIRECTORY_INFO *)bufstart;
1731
1732 while (1) {
1733 entryptr = (FILE_DIRECTORY_INFO *)
1734 ((char *)entryptr + next_offset);
1735
1736 if ((char *)entryptr + size > end_of_buf) {
1737 cERROR(1, "malformed search entry would overflow");
1738 break;
1739 }
1740
1741 len = le32_to_cpu(entryptr->FileNameLength);
1742 if ((char *)entryptr + len + size > end_of_buf) {
1743 cERROR(1, "directory entry name would overflow frame "
1744 "end of buf %p", end_of_buf);
1745 break;
1746 }
1747
1748 *lastentry = (char *)entryptr;
1749 entrycount++;
1750
1751 next_offset = le32_to_cpu(entryptr->NextEntryOffset);
1752 if (!next_offset)
1753 break;
1754 }
1755
1756 return entrycount;
1757}
1758
1759/*
1760 * Readdir/FindFirst
1761 */
1762int
1763SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
1764 u64 persistent_fid, u64 volatile_fid, int index,
1765 struct cifs_search_info *srch_inf)
1766{
1767 struct smb2_query_directory_req *req;
1768 struct smb2_query_directory_rsp *rsp = NULL;
1769 struct kvec iov[2];
1770 int rc = 0;
1771 int len;
1772 int resp_buftype;
1773 unsigned char *bufptr;
1774 struct TCP_Server_Info *server;
1775 struct cifs_ses *ses = tcon->ses;
1776 __le16 asteriks = cpu_to_le16('*');
1777 char *end_of_smb;
1778 unsigned int output_size = CIFSMaxBufSize;
1779 size_t info_buf_size;
1780
1781 if (ses && (ses->server))
1782 server = ses->server;
1783 else
1784 return -EIO;
1785
1786 rc = small_smb2_init(SMB2_QUERY_DIRECTORY, tcon, (void **) &req);
1787 if (rc)
1788 return rc;
1789
1790 switch (srch_inf->info_level) {
1791 case SMB_FIND_FILE_DIRECTORY_INFO:
1792 req->FileInformationClass = FILE_DIRECTORY_INFORMATION;
1793 info_buf_size = sizeof(FILE_DIRECTORY_INFO) - 1;
1794 break;
1795 case SMB_FIND_FILE_ID_FULL_DIR_INFO:
1796 req->FileInformationClass = FILEID_FULL_DIRECTORY_INFORMATION;
1797 info_buf_size = sizeof(SEARCH_ID_FULL_DIR_INFO) - 1;
1798 break;
1799 default:
1800 cERROR(1, "info level %u isn't supported",
1801 srch_inf->info_level);
1802 rc = -EINVAL;
1803 goto qdir_exit;
1804 }
1805
1806 req->FileIndex = cpu_to_le32(index);
1807 req->PersistentFileId = persistent_fid;
1808 req->VolatileFileId = volatile_fid;
1809
1810 len = 0x2;
1811 bufptr = req->Buffer;
1812 memcpy(bufptr, &asteriks, len);
1813
1814 req->FileNameOffset =
1815 cpu_to_le16(sizeof(struct smb2_query_directory_req) - 1 - 4);
1816 req->FileNameLength = cpu_to_le16(len);
1817 /*
1818 * BB could be 30 bytes or so longer if we used SMB2 specific
1819 * buffer lengths, but this is safe and close enough.
1820 */
1821 output_size = min_t(unsigned int, output_size, server->maxBuf);
1822 output_size = min_t(unsigned int, output_size, 2 << 15);
1823 req->OutputBufferLength = cpu_to_le32(output_size);
1824
1825 iov[0].iov_base = (char *)req;
1826 /* 4 for RFC1001 length and 1 for Buffer */
1827 iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
1828
1829 iov[1].iov_base = (char *)(req->Buffer);
1830 iov[1].iov_len = len;
1831
1832 inc_rfc1001_len(req, len - 1 /* Buffer */);
1833
1834 rc = SendReceive2(xid, ses, iov, 2, &resp_buftype, 0);
Pavel Shilovskye5d04882012-09-19 16:03:26 +04001835 rsp = (struct smb2_query_directory_rsp *)iov[0].iov_base;
1836
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07001837 if (rc) {
1838 cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE);
1839 goto qdir_exit;
1840 }
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07001841
1842 rc = validate_buf(le16_to_cpu(rsp->OutputBufferOffset),
1843 le32_to_cpu(rsp->OutputBufferLength), &rsp->hdr,
1844 info_buf_size);
1845 if (rc)
1846 goto qdir_exit;
1847
1848 srch_inf->unicode = true;
1849
1850 if (srch_inf->ntwrk_buf_start) {
1851 if (srch_inf->smallBuf)
1852 cifs_small_buf_release(srch_inf->ntwrk_buf_start);
1853 else
1854 cifs_buf_release(srch_inf->ntwrk_buf_start);
1855 }
1856 srch_inf->ntwrk_buf_start = (char *)rsp;
1857 srch_inf->srch_entries_start = srch_inf->last_entry = 4 /* rfclen */ +
1858 (char *)&rsp->hdr + le16_to_cpu(rsp->OutputBufferOffset);
1859 /* 4 for rfc1002 length field */
1860 end_of_smb = get_rfc1002_length(rsp) + 4 + (char *)&rsp->hdr;
1861 srch_inf->entries_in_buffer =
1862 num_entries(srch_inf->srch_entries_start, end_of_smb,
1863 &srch_inf->last_entry, info_buf_size);
1864 srch_inf->index_of_last_entry += srch_inf->entries_in_buffer;
1865 cFYI(1, "num entries %d last_index %lld srch start %p srch end %p",
1866 srch_inf->entries_in_buffer, srch_inf->index_of_last_entry,
1867 srch_inf->srch_entries_start, srch_inf->last_entry);
1868 if (resp_buftype == CIFS_LARGE_BUFFER)
1869 srch_inf->smallBuf = false;
1870 else if (resp_buftype == CIFS_SMALL_BUFFER)
1871 srch_inf->smallBuf = true;
1872 else
1873 cERROR(1, "illegal search buffer type");
1874
1875 if (rsp->hdr.Status == STATUS_NO_MORE_FILES)
1876 srch_inf->endOfSearch = 1;
1877 else
1878 srch_inf->endOfSearch = 0;
1879
1880 return rc;
1881
1882qdir_exit:
1883 free_rsp_buf(resp_buftype, rsp);
1884 return rc;
1885}
1886
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07001887static int
1888send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07001889 u64 persistent_fid, u64 volatile_fid, u32 pid, int info_class,
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07001890 unsigned int num, void **data, unsigned int *size)
1891{
1892 struct smb2_set_info_req *req;
1893 struct smb2_set_info_rsp *rsp = NULL;
1894 struct kvec *iov;
1895 int rc = 0;
1896 int resp_buftype;
1897 unsigned int i;
1898 struct TCP_Server_Info *server;
1899 struct cifs_ses *ses = tcon->ses;
1900
1901 if (ses && (ses->server))
1902 server = ses->server;
1903 else
1904 return -EIO;
1905
1906 if (!num)
1907 return -EINVAL;
1908
1909 iov = kmalloc(sizeof(struct kvec) * num, GFP_KERNEL);
1910 if (!iov)
1911 return -ENOMEM;
1912
1913 rc = small_smb2_init(SMB2_SET_INFO, tcon, (void **) &req);
1914 if (rc) {
1915 kfree(iov);
1916 return rc;
1917 }
1918
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07001919 req->hdr.ProcessId = cpu_to_le32(pid);
1920
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07001921 req->InfoType = SMB2_O_INFO_FILE;
1922 req->FileInfoClass = info_class;
1923 req->PersistentFileId = persistent_fid;
1924 req->VolatileFileId = volatile_fid;
1925
1926 /* 4 for RFC1001 length and 1 for Buffer */
1927 req->BufferOffset =
1928 cpu_to_le16(sizeof(struct smb2_set_info_req) - 1 - 4);
1929 req->BufferLength = cpu_to_le32(*size);
1930
1931 inc_rfc1001_len(req, *size - 1 /* Buffer */);
1932
1933 memcpy(req->Buffer, *data, *size);
1934
1935 iov[0].iov_base = (char *)req;
1936 /* 4 for RFC1001 length */
1937 iov[0].iov_len = get_rfc1002_length(req) + 4;
1938
1939 for (i = 1; i < num; i++) {
1940 inc_rfc1001_len(req, size[i]);
1941 le32_add_cpu(&req->BufferLength, size[i]);
1942 iov[i].iov_base = (char *)data[i];
1943 iov[i].iov_len = size[i];
1944 }
1945
1946 rc = SendReceive2(xid, ses, iov, num, &resp_buftype, 0);
1947 rsp = (struct smb2_set_info_rsp *)iov[0].iov_base;
1948
1949 if (rc != 0) {
1950 cifs_stats_fail_inc(tcon, SMB2_SET_INFO_HE);
1951 goto out;
1952 }
1953
1954 if (rsp == NULL) {
1955 rc = -EIO;
1956 goto out;
1957 }
1958
1959out:
1960 free_rsp_buf(resp_buftype, rsp);
1961 kfree(iov);
1962 return rc;
1963}
1964
1965int
1966SMB2_rename(const unsigned int xid, struct cifs_tcon *tcon,
1967 u64 persistent_fid, u64 volatile_fid, __le16 *target_file)
1968{
1969 struct smb2_file_rename_info info;
1970 void **data;
1971 unsigned int size[2];
1972 int rc;
1973 int len = (2 * UniStrnlen((wchar_t *)target_file, PATH_MAX));
1974
1975 data = kmalloc(sizeof(void *) * 2, GFP_KERNEL);
1976 if (!data)
1977 return -ENOMEM;
1978
1979 info.ReplaceIfExists = 1; /* 1 = replace existing target with new */
1980 /* 0 = fail if target already exists */
1981 info.RootDirectory = 0; /* MBZ for network ops (why does spec say?) */
1982 info.FileNameLength = cpu_to_le32(len);
1983
1984 data[0] = &info;
1985 size[0] = sizeof(struct smb2_file_rename_info);
1986
1987 data[1] = target_file;
1988 size[1] = len + 2 /* null */;
1989
1990 rc = send_set_info(xid, tcon, persistent_fid, volatile_fid,
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07001991 current->tgid, FILE_RENAME_INFORMATION, 2, data,
1992 size);
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07001993 kfree(data);
1994 return rc;
1995}
Pavel Shilovsky568798c2012-09-18 16:20:31 -07001996
1997int
1998SMB2_set_hardlink(const unsigned int xid, struct cifs_tcon *tcon,
1999 u64 persistent_fid, u64 volatile_fid, __le16 *target_file)
2000{
2001 struct smb2_file_link_info info;
2002 void **data;
2003 unsigned int size[2];
2004 int rc;
2005 int len = (2 * UniStrnlen((wchar_t *)target_file, PATH_MAX));
2006
2007 data = kmalloc(sizeof(void *) * 2, GFP_KERNEL);
2008 if (!data)
2009 return -ENOMEM;
2010
2011 info.ReplaceIfExists = 0; /* 1 = replace existing link with new */
2012 /* 0 = fail if link already exists */
2013 info.RootDirectory = 0; /* MBZ for network ops (why does spec say?) */
2014 info.FileNameLength = cpu_to_le32(len);
2015
2016 data[0] = &info;
2017 size[0] = sizeof(struct smb2_file_link_info);
2018
2019 data[1] = target_file;
2020 size[1] = len + 2 /* null */;
2021
2022 rc = send_set_info(xid, tcon, persistent_fid, volatile_fid,
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07002023 current->tgid, FILE_LINK_INFORMATION, 2, data, size);
Pavel Shilovsky568798c2012-09-18 16:20:31 -07002024 kfree(data);
2025 return rc;
2026}
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07002027
2028int
2029SMB2_set_eof(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
2030 u64 volatile_fid, u32 pid, __le64 *eof)
2031{
2032 struct smb2_file_eof_info info;
2033 void *data;
2034 unsigned int size;
2035
2036 info.EndOfFile = *eof;
2037
2038 data = &info;
2039 size = sizeof(struct smb2_file_eof_info);
2040
2041 return send_set_info(xid, tcon, persistent_fid, volatile_fid, pid,
2042 FILE_END_OF_FILE_INFORMATION, 1, &data, &size);
2043}
Pavel Shilovsky1feeaac2012-09-18 16:20:32 -07002044
2045int
2046SMB2_set_info(const unsigned int xid, struct cifs_tcon *tcon,
2047 u64 persistent_fid, u64 volatile_fid, FILE_BASIC_INFO *buf)
2048{
2049 unsigned int size;
2050 size = sizeof(FILE_BASIC_INFO);
2051 return send_set_info(xid, tcon, persistent_fid, volatile_fid,
2052 current->tgid, FILE_BASIC_INFORMATION, 1,
2053 (void **)&buf, &size);
2054}
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07002055
2056int
2057SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
2058 const u64 persistent_fid, const u64 volatile_fid,
2059 __u8 oplock_level)
2060{
2061 int rc;
2062 struct smb2_oplock_break *req = NULL;
2063
2064 cFYI(1, "SMB2_oplock_break");
2065 rc = small_smb2_init(SMB2_OPLOCK_BREAK, tcon, (void **) &req);
2066
2067 if (rc)
2068 return rc;
2069
2070 req->VolatileFid = volatile_fid;
2071 req->PersistentFid = persistent_fid;
2072 req->OplockLevel = oplock_level;
2073 req->hdr.CreditRequest = cpu_to_le16(1);
2074
2075 rc = SendReceiveNoRsp(xid, tcon->ses, (char *) req, CIFS_OBREAK_OP);
2076 /* SMB2 buffer freed by function above */
2077
2078 if (rc) {
2079 cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE);
2080 cFYI(1, "Send error in Oplock Break = %d", rc);
2081 }
2082
2083 return rc;
2084}
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002085
2086static void
2087copy_fs_info_to_kstatfs(struct smb2_fs_full_size_info *pfs_inf,
2088 struct kstatfs *kst)
2089{
2090 kst->f_bsize = le32_to_cpu(pfs_inf->BytesPerSector) *
2091 le32_to_cpu(pfs_inf->SectorsPerAllocationUnit);
2092 kst->f_blocks = le64_to_cpu(pfs_inf->TotalAllocationUnits);
2093 kst->f_bfree = le64_to_cpu(pfs_inf->ActualAvailableAllocationUnits);
2094 kst->f_bavail = le64_to_cpu(pfs_inf->CallerAvailableAllocationUnits);
2095 return;
2096}
2097
2098static int
2099build_qfs_info_req(struct kvec *iov, struct cifs_tcon *tcon, int level,
2100 int outbuf_len, u64 persistent_fid, u64 volatile_fid)
2101{
2102 int rc;
2103 struct smb2_query_info_req *req;
2104
2105 cFYI(1, "Query FSInfo level %d", level);
2106
2107 if ((tcon->ses == NULL) || (tcon->ses->server == NULL))
2108 return -EIO;
2109
2110 rc = small_smb2_init(SMB2_QUERY_INFO, tcon, (void **) &req);
2111 if (rc)
2112 return rc;
2113
2114 req->InfoType = SMB2_O_INFO_FILESYSTEM;
2115 req->FileInfoClass = level;
2116 req->PersistentFileId = persistent_fid;
2117 req->VolatileFileId = volatile_fid;
2118 /* 4 for rfc1002 length field and 1 for pad */
2119 req->InputBufferOffset =
2120 cpu_to_le16(sizeof(struct smb2_query_info_req) - 1 - 4);
2121 req->OutputBufferLength = cpu_to_le32(
2122 outbuf_len + sizeof(struct smb2_query_info_rsp) - 1 - 4);
2123
2124 iov->iov_base = (char *)req;
2125 /* 4 for rfc1002 length field */
2126 iov->iov_len = get_rfc1002_length(req) + 4;
2127 return 0;
2128}
2129
2130int
2131SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
2132 u64 persistent_fid, u64 volatile_fid, struct kstatfs *fsdata)
2133{
2134 struct smb2_query_info_rsp *rsp = NULL;
2135 struct kvec iov;
2136 int rc = 0;
2137 int resp_buftype;
2138 struct cifs_ses *ses = tcon->ses;
2139 struct smb2_fs_full_size_info *info = NULL;
2140
2141 rc = build_qfs_info_req(&iov, tcon, FS_FULL_SIZE_INFORMATION,
2142 sizeof(struct smb2_fs_full_size_info),
2143 persistent_fid, volatile_fid);
2144 if (rc)
2145 return rc;
2146
2147 rc = SendReceive2(xid, ses, &iov, 1, &resp_buftype, 0);
2148 if (rc) {
2149 cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
2150 goto qinf_exit;
2151 }
2152 rsp = (struct smb2_query_info_rsp *)iov.iov_base;
2153
2154 info = (struct smb2_fs_full_size_info *)(4 /* RFC1001 len */ +
2155 le16_to_cpu(rsp->OutputBufferOffset) + (char *)&rsp->hdr);
2156 rc = validate_buf(le16_to_cpu(rsp->OutputBufferOffset),
2157 le32_to_cpu(rsp->OutputBufferLength), &rsp->hdr,
2158 sizeof(struct smb2_fs_full_size_info));
2159 if (!rc)
2160 copy_fs_info_to_kstatfs(info, fsdata);
2161
2162qinf_exit:
2163 free_rsp_buf(resp_buftype, iov.iov_base);
2164 return rc;
2165}
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07002166
2167int
2168smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon,
2169 const __u64 persist_fid, const __u64 volatile_fid, const __u32 pid,
2170 const __u32 num_lock, struct smb2_lock_element *buf)
2171{
2172 int rc = 0;
2173 struct smb2_lock_req *req = NULL;
2174 struct kvec iov[2];
2175 int resp_buf_type;
2176 unsigned int count;
2177
2178 cFYI(1, "smb2_lockv num lock %d", num_lock);
2179
2180 rc = small_smb2_init(SMB2_LOCK, tcon, (void **) &req);
2181 if (rc)
2182 return rc;
2183
2184 req->hdr.ProcessId = cpu_to_le32(pid);
2185 req->LockCount = cpu_to_le16(num_lock);
2186
2187 req->PersistentFileId = persist_fid;
2188 req->VolatileFileId = volatile_fid;
2189
2190 count = num_lock * sizeof(struct smb2_lock_element);
2191 inc_rfc1001_len(req, count - sizeof(struct smb2_lock_element));
2192
2193 iov[0].iov_base = (char *)req;
2194 /* 4 for rfc1002 length field and count for all locks */
2195 iov[0].iov_len = get_rfc1002_length(req) + 4 - count;
2196 iov[1].iov_base = (char *)buf;
2197 iov[1].iov_len = count;
2198
2199 cifs_stats_inc(&tcon->stats.cifs_stats.num_locks);
2200 rc = SendReceive2(xid, tcon->ses, iov, 2, &resp_buf_type, CIFS_NO_RESP);
2201 if (rc) {
2202 cFYI(1, "Send error in smb2_lockv = %d", rc);
2203 cifs_stats_fail_inc(tcon, SMB2_LOCK_HE);
2204 }
2205
2206 return rc;
2207}
2208
2209int
2210SMB2_lock(const unsigned int xid, struct cifs_tcon *tcon,
2211 const __u64 persist_fid, const __u64 volatile_fid, const __u32 pid,
2212 const __u64 length, const __u64 offset, const __u32 lock_flags,
2213 const bool wait)
2214{
2215 struct smb2_lock_element lock;
2216
2217 lock.Offset = cpu_to_le64(offset);
2218 lock.Length = cpu_to_le64(length);
2219 lock.Flags = cpu_to_le32(lock_flags);
2220 if (!wait && lock_flags != SMB2_LOCKFLAG_UNLOCK)
2221 lock.Flags |= cpu_to_le32(SMB2_LOCKFLAG_FAIL_IMMEDIATELY);
2222
2223 return smb2_lockv(xid, tcon, persist_fid, volatile_fid, pid, 1, &lock);
2224}
Pavel Shilovsky0822f512012-09-19 06:22:45 -07002225
2226int
2227SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon,
2228 __u8 *lease_key, const __le32 lease_state)
2229{
2230 int rc;
2231 struct smb2_lease_ack *req = NULL;
2232
2233 cFYI(1, "SMB2_lease_break");
2234 rc = small_smb2_init(SMB2_OPLOCK_BREAK, tcon, (void **) &req);
2235
2236 if (rc)
2237 return rc;
2238
2239 req->hdr.CreditRequest = cpu_to_le16(1);
2240 req->StructureSize = cpu_to_le16(36);
2241 inc_rfc1001_len(req, 12);
2242
2243 memcpy(req->LeaseKey, lease_key, 16);
2244 req->LeaseState = lease_state;
2245
2246 rc = SendReceiveNoRsp(xid, tcon->ses, (char *) req, CIFS_OBREAK_OP);
2247 /* SMB2 buffer freed by function above */
2248
2249 if (rc) {
2250 cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE);
2251 cFYI(1, "Send error in Lease Break = %d", rc);
2252 }
2253
2254 return rc;
2255}