blob: cf33622cdac841c7108d76043cf70ca7fab252e7 [file] [log] [blame]
Pavel Shilovskyec2e4522011-12-27 16:12:43 +04001/*
2 * fs/cifs/smb2pdu.c
3 *
Steve Frenche4aa25e2012-10-01 12:26:22 -05004 * Copyright (C) International Business Machines Corp., 2009, 2012
Pavel Shilovskyec2e4522011-12-27 16:12:43 +04005 * Etersoft, 2012
6 * Author(s): Steve French (sfrench@us.ibm.com)
7 * Pavel Shilovsky (pshilovsky@samba.org) 2012
8 *
9 * Contains the routines for constructing the SMB2 PDUs themselves
10 *
11 * This library is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU Lesser General Public License as published
13 * by the Free Software Foundation; either version 2.1 of the License, or
14 * (at your option) any later version.
15 *
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
19 * the GNU Lesser General Public License for more details.
20 *
21 * You should have received a copy of the GNU Lesser General Public License
22 * along with this library; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 */
25
26 /* SMB2 PDU handling routines here - except for leftovers (eg session setup) */
27 /* Note that there are handle based routines which must be */
28 /* treated slightly differently for reconnection purposes since we never */
29 /* want to reuse a stale file handle and only the caller knows the file info */
30
31#include <linux/fs.h>
32#include <linux/kernel.h>
33#include <linux/vfs.h>
Pavel Shilovsky09a47072012-09-18 16:20:29 -070034#include <linux/task_io_accounting_ops.h>
Pavel Shilovskyec2e4522011-12-27 16:12:43 +040035#include <linux/uaccess.h>
Pavel Shilovsky33319142012-09-18 16:20:29 -070036#include <linux/pagemap.h>
Pavel Shilovskyec2e4522011-12-27 16:12:43 +040037#include <linux/xattr.h>
38#include "smb2pdu.h"
39#include "cifsglob.h"
40#include "cifsacl.h"
41#include "cifsproto.h"
42#include "smb2proto.h"
43#include "cifs_unicode.h"
44#include "cifs_debug.h"
45#include "ntlmssp.h"
46#include "smb2status.h"
Pavel Shilovsky09a47072012-09-18 16:20:29 -070047#include "smb2glob.h"
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -070048#include "cifspdu.h"
Pavel Shilovskyec2e4522011-12-27 16:12:43 +040049
50/*
51 * The following table defines the expected "StructureSize" of SMB2 requests
52 * in order by SMB2 command. This is similar to "wct" in SMB/CIFS requests.
53 *
54 * Note that commands are defined in smb2pdu.h in le16 but the array below is
55 * indexed by command in host byte order.
56 */
57static const int smb2_req_struct_sizes[NUMBER_OF_SMB2_COMMANDS] = {
58 /* SMB2_NEGOTIATE */ 36,
59 /* SMB2_SESSION_SETUP */ 25,
60 /* SMB2_LOGOFF */ 4,
61 /* SMB2_TREE_CONNECT */ 9,
62 /* SMB2_TREE_DISCONNECT */ 4,
63 /* SMB2_CREATE */ 57,
64 /* SMB2_CLOSE */ 24,
65 /* SMB2_FLUSH */ 24,
66 /* SMB2_READ */ 49,
67 /* SMB2_WRITE */ 49,
68 /* SMB2_LOCK */ 48,
69 /* SMB2_IOCTL */ 57,
70 /* SMB2_CANCEL */ 4,
71 /* SMB2_ECHO */ 4,
72 /* SMB2_QUERY_DIRECTORY */ 33,
73 /* SMB2_CHANGE_NOTIFY */ 32,
74 /* SMB2_QUERY_INFO */ 41,
75 /* SMB2_SET_INFO */ 33,
76 /* SMB2_OPLOCK_BREAK */ 24 /* BB this is 36 for LEASE_BREAK variant */
77};
78
79
80static void
81smb2_hdr_assemble(struct smb2_hdr *hdr, __le16 smb2_cmd /* command */ ,
82 const struct cifs_tcon *tcon)
83{
84 struct smb2_pdu *pdu = (struct smb2_pdu *)hdr;
85 char *temp = (char *)hdr;
86 /* lookup word count ie StructureSize from table */
87 __u16 parmsize = smb2_req_struct_sizes[le16_to_cpu(smb2_cmd)];
88
89 /*
90 * smaller than SMALL_BUFFER_SIZE but bigger than fixed area of
91 * largest operations (Create)
92 */
93 memset(temp, 0, 256);
94
95 /* Note this is only network field converted to big endian */
96 hdr->smb2_buf_length = cpu_to_be32(parmsize + sizeof(struct smb2_hdr)
97 - 4 /* RFC 1001 length field itself not counted */);
98
99 hdr->ProtocolId[0] = 0xFE;
100 hdr->ProtocolId[1] = 'S';
101 hdr->ProtocolId[2] = 'M';
102 hdr->ProtocolId[3] = 'B';
103 hdr->StructureSize = cpu_to_le16(64);
104 hdr->Command = smb2_cmd;
105 hdr->CreditRequest = cpu_to_le16(2); /* BB make this dynamic */
106 hdr->ProcessId = cpu_to_le32((__u16)current->tgid);
107
108 if (!tcon)
109 goto out;
110
111 hdr->TreeId = tcon->tid;
112 /* Uid is not converted */
113 if (tcon->ses)
114 hdr->SessionId = tcon->ses->Suid;
115 /* BB check following DFS flags BB */
116 /* BB do we have to add check for SHI1005_FLAGS_DFS_ROOT too? */
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +0400117 if (tcon->share_flags & SHI1005_FLAGS_DFS)
118 hdr->Flags |= SMB2_FLAGS_DFS_OPERATIONS;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400119 /* BB how does SMB2 do case sensitive? */
120 /* if (tcon->nocase)
121 hdr->Flags |= SMBFLG_CASELESS; */
Pavel Shilovsky3c1bf7e2012-09-18 16:20:30 -0700122 if (tcon->ses && tcon->ses->server &&
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400123 (tcon->ses->server->sec_mode & SECMODE_SIGN_REQUIRED))
Pavel Shilovsky3c1bf7e2012-09-18 16:20:30 -0700124 hdr->Flags |= SMB2_FLAGS_SIGNED;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400125out:
126 pdu->StructureSize2 = cpu_to_le16(parmsize);
127 return;
128}
129
130static int
131smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
132{
133 int rc = 0;
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400134 struct nls_table *nls_codepage;
135 struct cifs_ses *ses;
136 struct TCP_Server_Info *server;
137
138 /*
139 * SMB2s NegProt, SessSetup, Logoff do not have tcon yet so
140 * check for tcp and smb session status done differently
141 * for those three - in the calling routine.
142 */
143 if (tcon == NULL)
144 return rc;
145
146 if (smb2_command == SMB2_TREE_CONNECT)
147 return rc;
148
149 if (tcon->tidStatus == CifsExiting) {
150 /*
151 * only tree disconnect, open, and write,
152 * (and ulogoff which does not have tcon)
153 * are allowed as we start force umount.
154 */
155 if ((smb2_command != SMB2_WRITE) &&
156 (smb2_command != SMB2_CREATE) &&
157 (smb2_command != SMB2_TREE_DISCONNECT)) {
158 cFYI(1, "can not send cmd %d while umounting",
159 smb2_command);
160 return -ENODEV;
161 }
162 }
163 if ((!tcon->ses) || (tcon->ses->status == CifsExiting) ||
164 (!tcon->ses->server))
165 return -EIO;
166
167 ses = tcon->ses;
168 server = ses->server;
169
170 /*
171 * Give demultiplex thread up to 10 seconds to reconnect, should be
172 * greater than cifs socket timeout which is 7 seconds
173 */
174 while (server->tcpStatus == CifsNeedReconnect) {
175 /*
176 * Return to caller for TREE_DISCONNECT and LOGOFF and CLOSE
177 * here since they are implicitly done when session drops.
178 */
179 switch (smb2_command) {
180 /*
181 * BB Should we keep oplock break and add flush to exceptions?
182 */
183 case SMB2_TREE_DISCONNECT:
184 case SMB2_CANCEL:
185 case SMB2_CLOSE:
186 case SMB2_OPLOCK_BREAK:
187 return -EAGAIN;
188 }
189
190 wait_event_interruptible_timeout(server->response_q,
191 (server->tcpStatus != CifsNeedReconnect), 10 * HZ);
192
193 /* are we still trying to reconnect? */
194 if (server->tcpStatus != CifsNeedReconnect)
195 break;
196
197 /*
198 * on "soft" mounts we wait once. Hard mounts keep
199 * retrying until process is killed or server comes
200 * back on-line
201 */
202 if (!tcon->retry) {
203 cFYI(1, "gave up waiting on reconnect in smb_init");
204 return -EHOSTDOWN;
205 }
206 }
207
208 if (!tcon->ses->need_reconnect && !tcon->need_reconnect)
209 return rc;
210
211 nls_codepage = load_nls_default();
212
213 /*
214 * need to prevent multiple threads trying to simultaneously reconnect
215 * the same SMB session
216 */
217 mutex_lock(&tcon->ses->session_mutex);
218 rc = cifs_negotiate_protocol(0, tcon->ses);
219 if (!rc && tcon->ses->need_reconnect)
220 rc = cifs_setup_session(0, tcon->ses, nls_codepage);
221
222 if (rc || !tcon->need_reconnect) {
223 mutex_unlock(&tcon->ses->session_mutex);
224 goto out;
225 }
226
227 cifs_mark_open_files_invalid(tcon);
228 rc = SMB2_tcon(0, tcon->ses, tcon->treeName, tcon, nls_codepage);
229 mutex_unlock(&tcon->ses->session_mutex);
230 cFYI(1, "reconnect tcon rc = %d", rc);
231 if (rc)
232 goto out;
233 atomic_inc(&tconInfoReconnectCount);
234 /*
235 * BB FIXME add code to check if wsize needs update due to negotiated
236 * smb buffer size shrinking.
237 */
238out:
239 /*
240 * Check if handle based operation so we know whether we can continue
241 * or not without returning to caller to reset file handle.
242 */
243 /*
244 * BB Is flush done by server on drop of tcp session? Should we special
245 * case it and skip above?
246 */
247 switch (smb2_command) {
248 case SMB2_FLUSH:
249 case SMB2_READ:
250 case SMB2_WRITE:
251 case SMB2_LOCK:
252 case SMB2_IOCTL:
253 case SMB2_QUERY_DIRECTORY:
254 case SMB2_CHANGE_NOTIFY:
255 case SMB2_QUERY_INFO:
256 case SMB2_SET_INFO:
257 return -EAGAIN;
258 }
259 unload_nls(nls_codepage);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400260 return rc;
261}
262
263/*
264 * Allocate and return pointer to an SMB request hdr, and set basic
265 * SMB information in the SMB header. If the return code is zero, this
266 * function must have filled in request_buf pointer.
267 */
268static int
269small_smb2_init(__le16 smb2_command, struct cifs_tcon *tcon,
270 void **request_buf)
271{
272 int rc = 0;
273
274 rc = smb2_reconnect(smb2_command, tcon);
275 if (rc)
276 return rc;
277
278 /* BB eventually switch this to SMB2 specific small buf size */
279 *request_buf = cifs_small_buf_get();
280 if (*request_buf == NULL) {
281 /* BB should we add a retry in here if not a writepage? */
282 return -ENOMEM;
283 }
284
285 smb2_hdr_assemble((struct smb2_hdr *) *request_buf, smb2_command, tcon);
286
287 if (tcon != NULL) {
288#ifdef CONFIG_CIFS_STATS2
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400289 uint16_t com_code = le16_to_cpu(smb2_command);
290 cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_sent[com_code]);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400291#endif
292 cifs_stats_inc(&tcon->num_smbs_sent);
293 }
294
295 return rc;
296}
297
298static void
299free_rsp_buf(int resp_buftype, void *rsp)
300{
301 if (resp_buftype == CIFS_SMALL_BUFFER)
302 cifs_small_buf_release(rsp);
303 else if (resp_buftype == CIFS_LARGE_BUFFER)
304 cifs_buf_release(rsp);
305}
306
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400307
308/*
309 *
310 * SMB2 Worker functions follow:
311 *
312 * The general structure of the worker functions is:
313 * 1) Call smb2_init (assembles SMB2 header)
314 * 2) Initialize SMB2 command specific fields in fixed length area of SMB
315 * 3) Call smb_sendrcv2 (sends request on socket and waits for response)
316 * 4) Decode SMB2 command specific fields in the fixed length area
317 * 5) Decode variable length data area (if any for this SMB2 command type)
318 * 6) Call free smb buffer
319 * 7) return
320 *
321 */
322
323int
324SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
325{
326 struct smb2_negotiate_req *req;
327 struct smb2_negotiate_rsp *rsp;
328 struct kvec iov[1];
329 int rc = 0;
330 int resp_buftype;
331 struct TCP_Server_Info *server;
332 unsigned int sec_flags;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400333 u16 temp = 0;
334 int blob_offset, blob_length;
335 char *security_blob;
336 int flags = CIFS_NEG_OP;
337
338 cFYI(1, "Negotiate protocol");
339
340 if (ses->server)
341 server = ses->server;
342 else {
343 rc = -EIO;
344 return rc;
345 }
346
347 rc = small_smb2_init(SMB2_NEGOTIATE, NULL, (void **) &req);
348 if (rc)
349 return rc;
350
351 /* if any of auth flags (ie not sign or seal) are overriden use them */
352 if (ses->overrideSecFlg & (~(CIFSSEC_MUST_SIGN | CIFSSEC_MUST_SEAL)))
353 sec_flags = ses->overrideSecFlg; /* BB FIXME fix sign flags?*/
354 else /* if override flags set only sign/seal OR them with global auth */
355 sec_flags = global_secflags | ses->overrideSecFlg;
356
357 cFYI(1, "sec_flags 0x%x", sec_flags);
358
359 req->hdr.SessionId = 0;
360
Steve Frenche4aa25e2012-10-01 12:26:22 -0500361 req->Dialects[0] = cpu_to_le16(ses->server->vals->protocol_id);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400362
Steve Frenche4aa25e2012-10-01 12:26:22 -0500363 req->DialectCount = cpu_to_le16(1); /* One vers= at a time for now */
364 inc_rfc1001_len(req, 2);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400365
366 /* only one of SMB2 signing flags may be set in SMB2 request */
367 if ((sec_flags & CIFSSEC_MUST_SIGN) == CIFSSEC_MUST_SIGN)
368 temp = SMB2_NEGOTIATE_SIGNING_REQUIRED;
369 else if (sec_flags & CIFSSEC_MAY_SIGN) /* MAY_SIGN is a single flag */
370 temp = SMB2_NEGOTIATE_SIGNING_ENABLED;
371
372 req->SecurityMode = cpu_to_le16(temp);
373
Steve Frenche4aa25e2012-10-01 12:26:22 -0500374 req->Capabilities = cpu_to_le32(ses->server->vals->req_capabilities);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400375
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700376 memcpy(req->ClientGUID, cifs_client_guid, SMB2_CLIENT_GUID_SIZE);
377
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400378 iov[0].iov_base = (char *)req;
379 /* 4 for rfc1002 length field */
380 iov[0].iov_len = get_rfc1002_length(req) + 4;
381
382 rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, flags);
383
384 rsp = (struct smb2_negotiate_rsp *)iov[0].iov_base;
385 /*
386 * No tcon so can't do
387 * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]);
388 */
389 if (rc != 0)
390 goto neg_exit;
391
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400392 cFYI(1, "mode 0x%x", rsp->SecurityMode);
393
Steve Frenche4aa25e2012-10-01 12:26:22 -0500394 /* BB we may eventually want to match the negotiated vs. requested
395 dialect, even though we are only requesting one at a time */
396 if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID))
397 cFYI(1, "negotiated smb2.0 dialect");
398 else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID))
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400399 cFYI(1, "negotiated smb2.1 dialect");
Steve Frenche4aa25e2012-10-01 12:26:22 -0500400 else if (rsp->DialectRevision == cpu_to_le16(SMB30_PROT_ID))
401 cFYI(1, "negotiated smb3.0 dialect");
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400402 else {
403 cERROR(1, "Illegal dialect returned by server %d",
404 le16_to_cpu(rsp->DialectRevision));
405 rc = -EIO;
406 goto neg_exit;
407 }
408 server->dialect = le16_to_cpu(rsp->DialectRevision);
409
410 server->maxBuf = le32_to_cpu(rsp->MaxTransactSize);
411 server->max_read = le32_to_cpu(rsp->MaxReadSize);
412 server->max_write = le32_to_cpu(rsp->MaxWriteSize);
413 /* BB Do we need to validate the SecurityMode? */
414 server->sec_mode = le16_to_cpu(rsp->SecurityMode);
415 server->capabilities = le32_to_cpu(rsp->Capabilities);
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400416 /* Internal types */
417 server->capabilities |= SMB2_NT_FIND | SMB2_LARGE_FILES;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400418
419 security_blob = smb2_get_data_area_len(&blob_offset, &blob_length,
420 &rsp->hdr);
421 if (blob_length == 0) {
422 cERROR(1, "missing security blob on negprot");
423 rc = -EIO;
424 goto neg_exit;
425 }
Pavel Shilovsky3c1bf7e2012-09-18 16:20:30 -0700426
427 cFYI(1, "sec_flags 0x%x", sec_flags);
428 if (sec_flags & CIFSSEC_MUST_SIGN) {
429 cFYI(1, "Signing required");
430 if (!(server->sec_mode & (SMB2_NEGOTIATE_SIGNING_REQUIRED |
431 SMB2_NEGOTIATE_SIGNING_ENABLED))) {
432 cERROR(1, "signing required but server lacks support");
433 rc = -EOPNOTSUPP;
434 goto neg_exit;
435 }
436 server->sec_mode |= SECMODE_SIGN_REQUIRED;
437 } else if (sec_flags & CIFSSEC_MAY_SIGN) {
438 cFYI(1, "Signing optional");
439 if (server->sec_mode & SMB2_NEGOTIATE_SIGNING_REQUIRED) {
440 cFYI(1, "Server requires signing");
441 server->sec_mode |= SECMODE_SIGN_REQUIRED;
442 } else {
443 server->sec_mode &=
444 ~(SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED);
445 }
446 } else {
447 cFYI(1, "Signing disabled");
448 if (server->sec_mode & SMB2_NEGOTIATE_SIGNING_REQUIRED) {
449 cERROR(1, "Server requires packet signing to be enabled"
450 " in /proc/fs/cifs/SecurityFlags.");
451 rc = -EOPNOTSUPP;
452 goto neg_exit;
453 }
454 server->sec_mode &=
455 ~(SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED);
456 }
457
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400458#ifdef CONFIG_SMB2_ASN1 /* BB REMOVEME when updated asn1.c ready */
459 rc = decode_neg_token_init(security_blob, blob_length,
460 &server->sec_type);
461 if (rc == 1)
462 rc = 0;
463 else if (rc == 0) {
464 rc = -EIO;
465 goto neg_exit;
466 }
467#endif
468
469neg_exit:
470 free_rsp_buf(resp_buftype, rsp);
471 return rc;
472}
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400473
474int
475SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
476 const struct nls_table *nls_cp)
477{
478 struct smb2_sess_setup_req *req;
479 struct smb2_sess_setup_rsp *rsp = NULL;
480 struct kvec iov[2];
481 int rc = 0;
482 int resp_buftype;
483 __le32 phase = NtLmNegotiate; /* NTLMSSP, if needed, is multistage */
484 struct TCP_Server_Info *server;
485 unsigned int sec_flags;
486 u8 temp = 0;
487 u16 blob_length = 0;
488 char *security_blob;
489 char *ntlmssp_blob = NULL;
490 bool use_spnego = false; /* else use raw ntlmssp */
491
492 cFYI(1, "Session Setup");
493
494 if (ses->server)
495 server = ses->server;
496 else {
497 rc = -EIO;
498 return rc;
499 }
500
501 /*
502 * If memory allocation is successful, caller of this function
503 * frees it.
504 */
505 ses->ntlmssp = kmalloc(sizeof(struct ntlmssp_auth), GFP_KERNEL);
506 if (!ses->ntlmssp)
507 return -ENOMEM;
508
509 ses->server->secType = RawNTLMSSP;
510
511ssetup_ntlmssp_authenticate:
512 if (phase == NtLmChallenge)
513 phase = NtLmAuthenticate; /* if ntlmssp, now final phase */
514
515 rc = small_smb2_init(SMB2_SESSION_SETUP, NULL, (void **) &req);
516 if (rc)
517 return rc;
518
519 /* if any of auth flags (ie not sign or seal) are overriden use them */
520 if (ses->overrideSecFlg & (~(CIFSSEC_MUST_SIGN | CIFSSEC_MUST_SEAL)))
521 sec_flags = ses->overrideSecFlg; /* BB FIXME fix sign flags?*/
522 else /* if override flags set only sign/seal OR them with global auth */
523 sec_flags = global_secflags | ses->overrideSecFlg;
524
525 cFYI(1, "sec_flags 0x%x", sec_flags);
526
527 req->hdr.SessionId = 0; /* First session, not a reauthenticate */
528 req->VcNumber = 0; /* MBZ */
529 /* to enable echos and oplocks */
530 req->hdr.CreditRequest = cpu_to_le16(3);
531
532 /* only one of SMB2 signing flags may be set in SMB2 request */
533 if ((sec_flags & CIFSSEC_MUST_SIGN) == CIFSSEC_MUST_SIGN)
534 temp = SMB2_NEGOTIATE_SIGNING_REQUIRED;
535 else if (ses->server->sec_mode & SMB2_NEGOTIATE_SIGNING_REQUIRED)
536 temp = SMB2_NEGOTIATE_SIGNING_REQUIRED;
537 else if (sec_flags & CIFSSEC_MAY_SIGN) /* MAY_SIGN is a single flag */
538 temp = SMB2_NEGOTIATE_SIGNING_ENABLED;
539
540 req->SecurityMode = temp;
541 req->Capabilities = 0;
542 req->Channel = 0; /* MBZ */
543
544 iov[0].iov_base = (char *)req;
545 /* 4 for rfc1002 length field and 1 for pad */
546 iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
547 if (phase == NtLmNegotiate) {
548 ntlmssp_blob = kmalloc(sizeof(struct _NEGOTIATE_MESSAGE),
549 GFP_KERNEL);
550 if (ntlmssp_blob == NULL) {
551 rc = -ENOMEM;
552 goto ssetup_exit;
553 }
554 build_ntlmssp_negotiate_blob(ntlmssp_blob, ses);
555 if (use_spnego) {
556 /* blob_length = build_spnego_ntlmssp_blob(
557 &security_blob,
558 sizeof(struct _NEGOTIATE_MESSAGE),
559 ntlmssp_blob); */
560 /* BB eventually need to add this */
561 cERROR(1, "spnego not supported for SMB2 yet");
562 rc = -EOPNOTSUPP;
563 kfree(ntlmssp_blob);
564 goto ssetup_exit;
565 } else {
566 blob_length = sizeof(struct _NEGOTIATE_MESSAGE);
567 /* with raw NTLMSSP we don't encapsulate in SPNEGO */
568 security_blob = ntlmssp_blob;
569 }
570 } else if (phase == NtLmAuthenticate) {
571 req->hdr.SessionId = ses->Suid;
572 ntlmssp_blob = kzalloc(sizeof(struct _NEGOTIATE_MESSAGE) + 500,
573 GFP_KERNEL);
574 if (ntlmssp_blob == NULL) {
575 cERROR(1, "failed to malloc ntlmssp blob");
576 rc = -ENOMEM;
577 goto ssetup_exit;
578 }
579 rc = build_ntlmssp_auth_blob(ntlmssp_blob, &blob_length, ses,
580 nls_cp);
581 if (rc) {
582 cFYI(1, "build_ntlmssp_auth_blob failed %d", rc);
583 goto ssetup_exit; /* BB double check error handling */
584 }
585 if (use_spnego) {
586 /* blob_length = build_spnego_ntlmssp_blob(
587 &security_blob,
588 blob_length,
589 ntlmssp_blob); */
590 cERROR(1, "spnego not supported for SMB2 yet");
591 rc = -EOPNOTSUPP;
592 kfree(ntlmssp_blob);
593 goto ssetup_exit;
594 } else {
595 security_blob = ntlmssp_blob;
596 }
597 } else {
598 cERROR(1, "illegal ntlmssp phase");
599 rc = -EIO;
600 goto ssetup_exit;
601 }
602
603 /* Testing shows that buffer offset must be at location of Buffer[0] */
604 req->SecurityBufferOffset =
605 cpu_to_le16(sizeof(struct smb2_sess_setup_req) -
606 1 /* pad */ - 4 /* rfc1001 len */);
607 req->SecurityBufferLength = cpu_to_le16(blob_length);
608 iov[1].iov_base = security_blob;
609 iov[1].iov_len = blob_length;
610
611 inc_rfc1001_len(req, blob_length - 1 /* pad */);
612
613 /* BB add code to build os and lm fields */
614
615 rc = SendReceive2(xid, ses, iov, 2, &resp_buftype, CIFS_LOG_ERROR);
616
617 kfree(security_blob);
618 rsp = (struct smb2_sess_setup_rsp *)iov[0].iov_base;
Pavel Shilovsky4ca3a992012-09-25 11:00:09 +0400619 if (resp_buftype != CIFS_NO_BUFFER &&
620 rsp->hdr.Status == STATUS_MORE_PROCESSING_REQUIRED) {
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400621 if (phase != NtLmNegotiate) {
622 cERROR(1, "Unexpected more processing error");
623 goto ssetup_exit;
624 }
625 if (offsetof(struct smb2_sess_setup_rsp, Buffer) - 4 !=
Pavel Shilovsky4ca3a992012-09-25 11:00:09 +0400626 le16_to_cpu(rsp->SecurityBufferOffset)) {
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400627 cERROR(1, "Invalid security buffer offset %d",
628 le16_to_cpu(rsp->SecurityBufferOffset));
629 rc = -EIO;
630 goto ssetup_exit;
631 }
632
633 /* NTLMSSP Negotiate sent now processing challenge (response) */
634 phase = NtLmChallenge; /* process ntlmssp challenge */
635 rc = 0; /* MORE_PROCESSING is not an error here but expected */
636 ses->Suid = rsp->hdr.SessionId;
637 rc = decode_ntlmssp_challenge(rsp->Buffer,
638 le16_to_cpu(rsp->SecurityBufferLength), ses);
639 }
640
641 /*
642 * BB eventually add code for SPNEGO decoding of NtlmChallenge blob,
643 * but at least the raw NTLMSSP case works.
644 */
645 /*
646 * No tcon so can't do
647 * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]);
648 */
649 if (rc != 0)
650 goto ssetup_exit;
651
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400652 ses->session_flags = le16_to_cpu(rsp->SessionFlags);
653ssetup_exit:
654 free_rsp_buf(resp_buftype, rsp);
655
656 /* if ntlmssp, and negotiate succeeded, proceed to authenticate phase */
657 if ((phase == NtLmChallenge) && (rc == 0))
658 goto ssetup_ntlmssp_authenticate;
659 return rc;
660}
661
662int
663SMB2_logoff(const unsigned int xid, struct cifs_ses *ses)
664{
665 struct smb2_logoff_req *req; /* response is also trivial struct */
666 int rc = 0;
667 struct TCP_Server_Info *server;
668
669 cFYI(1, "disconnect session %p", ses);
670
671 if (ses && (ses->server))
672 server = ses->server;
673 else
674 return -EIO;
675
676 rc = small_smb2_init(SMB2_LOGOFF, NULL, (void **) &req);
677 if (rc)
678 return rc;
679
680 /* since no tcon, smb2_init can not do this, so do here */
681 req->hdr.SessionId = ses->Suid;
Pavel Shilovsky3c1bf7e2012-09-18 16:20:30 -0700682 if (server->sec_mode & SECMODE_SIGN_REQUIRED)
683 req->hdr.Flags |= SMB2_FLAGS_SIGNED;
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400684
685 rc = SendReceiveNoRsp(xid, ses, (char *) &req->hdr, 0);
686 /*
687 * No tcon so can't do
688 * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]);
689 */
690 return rc;
691}
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +0400692
693static inline void cifs_stats_fail_inc(struct cifs_tcon *tcon, uint16_t code)
694{
Pavel Shilovskyd60622e2012-05-28 15:19:39 +0400695 cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_failed[code]);
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +0400696}
697
698#define MAX_SHARENAME_LENGTH (255 /* server */ + 80 /* share */ + 1 /* NULL */)
699
700int
701SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
702 struct cifs_tcon *tcon, const struct nls_table *cp)
703{
704 struct smb2_tree_connect_req *req;
705 struct smb2_tree_connect_rsp *rsp = NULL;
706 struct kvec iov[2];
707 int rc = 0;
708 int resp_buftype;
709 int unc_path_len;
710 struct TCP_Server_Info *server;
711 __le16 *unc_path = NULL;
712
713 cFYI(1, "TCON");
714
715 if ((ses->server) && tree)
716 server = ses->server;
717 else
718 return -EIO;
719
720 if (tcon && tcon->bad_network_name)
721 return -ENOENT;
722
723 unc_path = kmalloc(MAX_SHARENAME_LENGTH * 2, GFP_KERNEL);
724 if (unc_path == NULL)
725 return -ENOMEM;
726
727 unc_path_len = cifs_strtoUTF16(unc_path, tree, strlen(tree), cp) + 1;
728 unc_path_len *= 2;
729 if (unc_path_len < 2) {
730 kfree(unc_path);
731 return -EINVAL;
732 }
733
734 rc = small_smb2_init(SMB2_TREE_CONNECT, tcon, (void **) &req);
735 if (rc) {
736 kfree(unc_path);
737 return rc;
738 }
739
740 if (tcon == NULL) {
741 /* since no tcon, smb2_init can not do this, so do here */
742 req->hdr.SessionId = ses->Suid;
743 /* if (ses->server->sec_mode & SECMODE_SIGN_REQUIRED)
744 req->hdr.Flags |= SMB2_FLAGS_SIGNED; */
745 }
746
747 iov[0].iov_base = (char *)req;
748 /* 4 for rfc1002 length field and 1 for pad */
749 iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
750
751 /* Testing shows that buffer offset must be at location of Buffer[0] */
752 req->PathOffset = cpu_to_le16(sizeof(struct smb2_tree_connect_req)
753 - 1 /* pad */ - 4 /* do not count rfc1001 len field */);
754 req->PathLength = cpu_to_le16(unc_path_len - 2);
755 iov[1].iov_base = unc_path;
756 iov[1].iov_len = unc_path_len;
757
758 inc_rfc1001_len(req, unc_path_len - 1 /* pad */);
759
760 rc = SendReceive2(xid, ses, iov, 2, &resp_buftype, 0);
761 rsp = (struct smb2_tree_connect_rsp *)iov[0].iov_base;
762
763 if (rc != 0) {
764 if (tcon) {
765 cifs_stats_fail_inc(tcon, SMB2_TREE_CONNECT_HE);
766 tcon->need_reconnect = true;
767 }
768 goto tcon_error_exit;
769 }
770
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +0400771 if (tcon == NULL) {
772 ses->ipc_tid = rsp->hdr.TreeId;
773 goto tcon_exit;
774 }
775
776 if (rsp->ShareType & SMB2_SHARE_TYPE_DISK)
777 cFYI(1, "connection to disk share");
778 else if (rsp->ShareType & SMB2_SHARE_TYPE_PIPE) {
779 tcon->ipc = true;
780 cFYI(1, "connection to pipe share");
781 } else if (rsp->ShareType & SMB2_SHARE_TYPE_PRINT) {
782 tcon->print = true;
783 cFYI(1, "connection to printer");
784 } else {
785 cERROR(1, "unknown share type %d", rsp->ShareType);
786 rc = -EOPNOTSUPP;
787 goto tcon_error_exit;
788 }
789
790 tcon->share_flags = le32_to_cpu(rsp->ShareFlags);
791 tcon->maximal_access = le32_to_cpu(rsp->MaximalAccess);
792 tcon->tidStatus = CifsGood;
793 tcon->need_reconnect = false;
794 tcon->tid = rsp->hdr.TreeId;
795 strncpy(tcon->treeName, tree, MAX_TREE_SIZE);
796
797 if ((rsp->Capabilities & SMB2_SHARE_CAP_DFS) &&
798 ((tcon->share_flags & SHI1005_FLAGS_DFS) == 0))
799 cERROR(1, "DFS capability contradicts DFS flag");
800
801tcon_exit:
802 free_rsp_buf(resp_buftype, rsp);
803 kfree(unc_path);
804 return rc;
805
806tcon_error_exit:
807 if (rsp->hdr.Status == STATUS_BAD_NETWORK_NAME) {
808 cERROR(1, "BAD_NETWORK_NAME: %s", tree);
809 tcon->bad_network_name = true;
810 }
811 goto tcon_exit;
812}
813
814int
815SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon)
816{
817 struct smb2_tree_disconnect_req *req; /* response is trivial */
818 int rc = 0;
819 struct TCP_Server_Info *server;
820 struct cifs_ses *ses = tcon->ses;
821
822 cFYI(1, "Tree Disconnect");
823
824 if (ses && (ses->server))
825 server = ses->server;
826 else
827 return -EIO;
828
829 if ((tcon->need_reconnect) || (tcon->ses->need_reconnect))
830 return 0;
831
832 rc = small_smb2_init(SMB2_TREE_DISCONNECT, tcon, (void **) &req);
833 if (rc)
834 return rc;
835
836 rc = SendReceiveNoRsp(xid, ses, (char *)&req->hdr, 0);
837 if (rc)
838 cifs_stats_fail_inc(tcon, SMB2_TREE_DISCONNECT_HE);
839
840 return rc;
841}
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400842
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700843static struct create_lease *
844create_lease_buf(u8 *lease_key, u8 oplock)
845{
846 struct create_lease *buf;
847
848 buf = kmalloc(sizeof(struct create_lease), GFP_KERNEL);
849 if (!buf)
850 return NULL;
851
852 memset(buf, 0, sizeof(struct create_lease));
853
854 buf->lcontext.LeaseKeyLow = cpu_to_le64(*((u64 *)lease_key));
855 buf->lcontext.LeaseKeyHigh = cpu_to_le64(*((u64 *)(lease_key + 8)));
856 if (oplock == SMB2_OPLOCK_LEVEL_EXCLUSIVE)
857 buf->lcontext.LeaseState = SMB2_LEASE_WRITE_CACHING |
858 SMB2_LEASE_READ_CACHING;
859 else if (oplock == SMB2_OPLOCK_LEVEL_II)
860 buf->lcontext.LeaseState = SMB2_LEASE_READ_CACHING;
861 else if (oplock == SMB2_OPLOCK_LEVEL_BATCH)
862 buf->lcontext.LeaseState = SMB2_LEASE_HANDLE_CACHING |
863 SMB2_LEASE_READ_CACHING |
864 SMB2_LEASE_WRITE_CACHING;
865
866 buf->ccontext.DataOffset = cpu_to_le16(offsetof
867 (struct create_lease, lcontext));
868 buf->ccontext.DataLength = cpu_to_le32(sizeof(struct lease_context));
869 buf->ccontext.NameOffset = cpu_to_le16(offsetof
870 (struct create_lease, Name));
871 buf->ccontext.NameLength = cpu_to_le16(4);
872 buf->Name[0] = 'R';
873 buf->Name[1] = 'q';
874 buf->Name[2] = 'L';
875 buf->Name[3] = 's';
876 return buf;
877}
878
879static __u8
880parse_lease_state(struct smb2_create_rsp *rsp)
881{
882 char *data_offset;
883 struct create_lease *lc;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700884 bool found = false;
885
886 data_offset = (char *)rsp;
887 data_offset += 4 + le32_to_cpu(rsp->CreateContextsOffset);
888 lc = (struct create_lease *)data_offset;
889 do {
890 char *name = le16_to_cpu(lc->ccontext.NameOffset) + (char *)lc;
891 if (le16_to_cpu(lc->ccontext.NameLength) != 4 ||
892 strncmp(name, "RqLs", 4)) {
893 lc = (struct create_lease *)((char *)lc
894 + le32_to_cpu(lc->ccontext.Next));
895 continue;
896 }
897 if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS)
898 return SMB2_OPLOCK_LEVEL_NOCHANGE;
899 found = true;
900 break;
901 } while (le32_to_cpu(lc->ccontext.Next) != 0);
902
903 if (!found)
Pavel Shilovsky0822f512012-09-19 06:22:45 -0700904 return 0;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700905
Pavel Shilovsky0822f512012-09-19 06:22:45 -0700906 return smb2_map_lease_to_oplock(lc->lcontext.LeaseState);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700907}
908
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400909int
910SMB2_open(const unsigned int xid, struct cifs_tcon *tcon, __le16 *path,
911 u64 *persistent_fid, u64 *volatile_fid, __u32 desired_access,
Pavel Shilovskyf0df7372012-09-18 16:20:26 -0700912 __u32 create_disposition, __u32 file_attributes, __u32 create_options,
Pavel Shilovsky2e44b282012-09-18 16:20:33 -0700913 __u8 *oplock, struct smb2_file_all_info *buf)
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400914{
915 struct smb2_create_req *req;
916 struct smb2_create_rsp *rsp;
917 struct TCP_Server_Info *server;
918 struct cifs_ses *ses = tcon->ses;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700919 struct kvec iov[3];
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400920 int resp_buftype;
921 int uni_path_len;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700922 __le16 *copy_path = NULL;
923 int copy_size;
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400924 int rc = 0;
925 int num_iovecs = 2;
926
927 cFYI(1, "create/open");
928
929 if (ses && (ses->server))
930 server = ses->server;
931 else
932 return -EIO;
933
934 rc = small_smb2_init(SMB2_CREATE, tcon, (void **) &req);
935 if (rc)
936 return rc;
937
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400938 req->ImpersonationLevel = IL_IMPERSONATION;
939 req->DesiredAccess = cpu_to_le32(desired_access);
940 /* File attributes ignored on open (used in create though) */
941 req->FileAttributes = cpu_to_le32(file_attributes);
942 req->ShareAccess = FILE_SHARE_ALL_LE;
943 req->CreateDisposition = cpu_to_le32(create_disposition);
944 req->CreateOptions = cpu_to_le32(create_options);
945 uni_path_len = (2 * UniStrnlen((wchar_t *)path, PATH_MAX)) + 2;
946 req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req)
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700947 - 8 /* pad */ - 4 /* do not count rfc1001 len field */);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400948
949 iov[0].iov_base = (char *)req;
950 /* 4 for rfc1002 length field */
951 iov[0].iov_len = get_rfc1002_length(req) + 4;
952
953 /* MUST set path len (NameLength) to 0 opening root of share */
954 if (uni_path_len >= 4) {
955 req->NameLength = cpu_to_le16(uni_path_len - 2);
956 /* -1 since last byte is buf[0] which is sent below (path) */
957 iov[0].iov_len--;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700958 if (uni_path_len % 8 != 0) {
959 copy_size = uni_path_len / 8 * 8;
960 if (copy_size < uni_path_len)
961 copy_size += 8;
962
963 copy_path = kzalloc(copy_size, GFP_KERNEL);
964 if (!copy_path)
965 return -ENOMEM;
966 memcpy((char *)copy_path, (const char *)path,
967 uni_path_len);
968 uni_path_len = copy_size;
969 path = copy_path;
970 }
971
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400972 iov[1].iov_len = uni_path_len;
973 iov[1].iov_base = path;
974 /*
975 * -1 since last byte is buf[0] which was counted in
976 * smb2_buf_len.
977 */
978 inc_rfc1001_len(req, uni_path_len - 1);
979 } else {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700980 iov[0].iov_len += 7;
981 req->hdr.smb2_buf_length = cpu_to_be32(be32_to_cpu(
982 req->hdr.smb2_buf_length) + 8 - 1);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400983 num_iovecs = 1;
984 req->NameLength = 0;
985 }
986
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700987 if (!server->oplocks)
988 *oplock = SMB2_OPLOCK_LEVEL_NONE;
989
990 if (!(tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_LEASING) ||
991 *oplock == SMB2_OPLOCK_LEVEL_NONE)
992 req->RequestedOplockLevel = *oplock;
993 else {
994 iov[num_iovecs].iov_base = create_lease_buf(oplock+1, *oplock);
995 if (iov[num_iovecs].iov_base == NULL) {
996 cifs_small_buf_release(req);
997 kfree(copy_path);
998 return -ENOMEM;
999 }
1000 iov[num_iovecs].iov_len = sizeof(struct create_lease);
1001 req->RequestedOplockLevel = SMB2_OPLOCK_LEVEL_LEASE;
1002 req->CreateContextsOffset = cpu_to_le32(
1003 sizeof(struct smb2_create_req) - 4 - 8 +
1004 iov[num_iovecs-1].iov_len);
1005 req->CreateContextsLength = cpu_to_le32(
1006 sizeof(struct create_lease));
1007 inc_rfc1001_len(&req->hdr, sizeof(struct create_lease));
1008 num_iovecs++;
1009 }
1010
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001011 rc = SendReceive2(xid, ses, iov, num_iovecs, &resp_buftype, 0);
1012 rsp = (struct smb2_create_rsp *)iov[0].iov_base;
1013
1014 if (rc != 0) {
1015 cifs_stats_fail_inc(tcon, SMB2_CREATE_HE);
1016 goto creat_exit;
1017 }
1018
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001019 *persistent_fid = rsp->PersistentFileId;
1020 *volatile_fid = rsp->VolatileFileId;
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001021
1022 if (buf) {
1023 memcpy(buf, &rsp->CreationTime, 32);
1024 buf->AllocationSize = rsp->AllocationSize;
1025 buf->EndOfFile = rsp->EndofFile;
1026 buf->Attributes = rsp->FileAttributes;
1027 buf->NumberOfLinks = cpu_to_le32(1);
1028 buf->DeletePending = 0;
1029 }
Pavel Shilovsky2e44b282012-09-18 16:20:33 -07001030
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07001031 if (rsp->OplockLevel == SMB2_OPLOCK_LEVEL_LEASE)
1032 *oplock = parse_lease_state(rsp);
1033 else
1034 *oplock = rsp->OplockLevel;
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001035creat_exit:
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -07001036 kfree(copy_path);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001037 free_rsp_buf(resp_buftype, rsp);
1038 return rc;
1039}
1040
1041int
1042SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
1043 u64 persistent_fid, u64 volatile_fid)
1044{
1045 struct smb2_close_req *req;
1046 struct smb2_close_rsp *rsp;
1047 struct TCP_Server_Info *server;
1048 struct cifs_ses *ses = tcon->ses;
1049 struct kvec iov[1];
1050 int resp_buftype;
1051 int rc = 0;
1052
1053 cFYI(1, "Close");
1054
1055 if (ses && (ses->server))
1056 server = ses->server;
1057 else
1058 return -EIO;
1059
1060 rc = small_smb2_init(SMB2_CLOSE, tcon, (void **) &req);
1061 if (rc)
1062 return rc;
1063
1064 req->PersistentFileId = persistent_fid;
1065 req->VolatileFileId = volatile_fid;
1066
1067 iov[0].iov_base = (char *)req;
1068 /* 4 for rfc1002 length field */
1069 iov[0].iov_len = get_rfc1002_length(req) + 4;
1070
1071 rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, 0);
1072 rsp = (struct smb2_close_rsp *)iov[0].iov_base;
1073
1074 if (rc != 0) {
1075 if (tcon)
1076 cifs_stats_fail_inc(tcon, SMB2_CLOSE_HE);
1077 goto close_exit;
1078 }
1079
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001080 /* BB FIXME - decode close response, update inode for caching */
1081
1082close_exit:
1083 free_rsp_buf(resp_buftype, rsp);
1084 return rc;
1085}
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04001086
1087static int
1088validate_buf(unsigned int offset, unsigned int buffer_length,
1089 struct smb2_hdr *hdr, unsigned int min_buf_size)
1090
1091{
1092 unsigned int smb_len = be32_to_cpu(hdr->smb2_buf_length);
1093 char *end_of_smb = smb_len + 4 /* RFC1001 length field */ + (char *)hdr;
1094 char *begin_of_buf = 4 /* RFC1001 len field */ + offset + (char *)hdr;
1095 char *end_of_buf = begin_of_buf + buffer_length;
1096
1097
1098 if (buffer_length < min_buf_size) {
1099 cERROR(1, "buffer length %d smaller than minimum size %d",
1100 buffer_length, min_buf_size);
1101 return -EINVAL;
1102 }
1103
1104 /* check if beyond RFC1001 maximum length */
1105 if ((smb_len > 0x7FFFFF) || (buffer_length > 0x7FFFFF)) {
1106 cERROR(1, "buffer length %d or smb length %d too large",
1107 buffer_length, smb_len);
1108 return -EINVAL;
1109 }
1110
1111 if ((begin_of_buf > end_of_smb) || (end_of_buf > end_of_smb)) {
1112 cERROR(1, "illegal server response, bad offset to data");
1113 return -EINVAL;
1114 }
1115
1116 return 0;
1117}
1118
1119/*
1120 * If SMB buffer fields are valid, copy into temporary buffer to hold result.
1121 * Caller must free buffer.
1122 */
1123static int
1124validate_and_copy_buf(unsigned int offset, unsigned int buffer_length,
1125 struct smb2_hdr *hdr, unsigned int minbufsize,
1126 char *data)
1127
1128{
1129 char *begin_of_buf = 4 /* RFC1001 len field */ + offset + (char *)hdr;
1130 int rc;
1131
1132 if (!data)
1133 return -EINVAL;
1134
1135 rc = validate_buf(offset, buffer_length, hdr, minbufsize);
1136 if (rc)
1137 return rc;
1138
1139 memcpy(data, begin_of_buf, buffer_length);
1140
1141 return 0;
1142}
1143
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001144static int
1145query_info(const unsigned int xid, struct cifs_tcon *tcon,
1146 u64 persistent_fid, u64 volatile_fid, u8 info_class,
1147 size_t output_len, size_t min_len, void *data)
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04001148{
1149 struct smb2_query_info_req *req;
1150 struct smb2_query_info_rsp *rsp = NULL;
1151 struct kvec iov[2];
1152 int rc = 0;
1153 int resp_buftype;
1154 struct TCP_Server_Info *server;
1155 struct cifs_ses *ses = tcon->ses;
1156
1157 cFYI(1, "Query Info");
1158
1159 if (ses && (ses->server))
1160 server = ses->server;
1161 else
1162 return -EIO;
1163
1164 rc = small_smb2_init(SMB2_QUERY_INFO, tcon, (void **) &req);
1165 if (rc)
1166 return rc;
1167
1168 req->InfoType = SMB2_O_INFO_FILE;
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001169 req->FileInfoClass = info_class;
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04001170 req->PersistentFileId = persistent_fid;
1171 req->VolatileFileId = volatile_fid;
1172 /* 4 for rfc1002 length field and 1 for Buffer */
1173 req->InputBufferOffset =
1174 cpu_to_le16(sizeof(struct smb2_query_info_req) - 1 - 4);
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001175 req->OutputBufferLength = cpu_to_le32(output_len);
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04001176
1177 iov[0].iov_base = (char *)req;
1178 /* 4 for rfc1002 length field */
1179 iov[0].iov_len = get_rfc1002_length(req) + 4;
1180
1181 rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, 0);
Pavel Shilovskye5d04882012-09-19 16:03:26 +04001182 rsp = (struct smb2_query_info_rsp *)iov[0].iov_base;
1183
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04001184 if (rc) {
1185 cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
1186 goto qinf_exit;
1187 }
1188
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04001189 rc = validate_and_copy_buf(le16_to_cpu(rsp->OutputBufferOffset),
1190 le32_to_cpu(rsp->OutputBufferLength),
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001191 &rsp->hdr, min_len, data);
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04001192
1193qinf_exit:
1194 free_rsp_buf(resp_buftype, rsp);
1195 return rc;
1196}
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04001197
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001198int
1199SMB2_query_info(const unsigned int xid, struct cifs_tcon *tcon,
1200 u64 persistent_fid, u64 volatile_fid,
1201 struct smb2_file_all_info *data)
1202{
1203 return query_info(xid, tcon, persistent_fid, volatile_fid,
1204 FILE_ALL_INFORMATION,
1205 sizeof(struct smb2_file_all_info) + MAX_NAME * 2,
1206 sizeof(struct smb2_file_all_info), data);
1207}
1208
1209int
1210SMB2_get_srv_num(const unsigned int xid, struct cifs_tcon *tcon,
1211 u64 persistent_fid, u64 volatile_fid, __le64 *uniqueid)
1212{
1213 return query_info(xid, tcon, persistent_fid, volatile_fid,
1214 FILE_INTERNAL_INFORMATION,
1215 sizeof(struct smb2_file_internal_info),
1216 sizeof(struct smb2_file_internal_info), uniqueid);
1217}
1218
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04001219/*
1220 * This is a no-op for now. We're not really interested in the reply, but
1221 * rather in the fact that the server sent one and that server->lstrp
1222 * gets updated.
1223 *
1224 * FIXME: maybe we should consider checking that the reply matches request?
1225 */
1226static void
1227smb2_echo_callback(struct mid_q_entry *mid)
1228{
1229 struct TCP_Server_Info *server = mid->callback_data;
1230 struct smb2_echo_rsp *smb2 = (struct smb2_echo_rsp *)mid->resp_buf;
1231 unsigned int credits_received = 1;
1232
1233 if (mid->mid_state == MID_RESPONSE_RECEIVED)
1234 credits_received = le16_to_cpu(smb2->hdr.CreditRequest);
1235
1236 DeleteMidQEntry(mid);
1237 add_credits(server, credits_received, CIFS_ECHO_OP);
1238}
1239
1240int
1241SMB2_echo(struct TCP_Server_Info *server)
1242{
1243 struct smb2_echo_req *req;
1244 int rc = 0;
1245 struct kvec iov;
Jeff Laytonfec344e2012-09-18 16:20:35 -07001246 struct smb_rqst rqst = { .rq_iov = &iov,
1247 .rq_nvec = 1 };
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04001248
1249 cFYI(1, "In echo request");
1250
1251 rc = small_smb2_init(SMB2_ECHO, NULL, (void **)&req);
1252 if (rc)
1253 return rc;
1254
1255 req->hdr.CreditRequest = cpu_to_le16(1);
1256
1257 iov.iov_base = (char *)req;
1258 /* 4 for rfc1002 length field */
1259 iov.iov_len = get_rfc1002_length(req) + 4;
1260
Jeff Laytonfec344e2012-09-18 16:20:35 -07001261 rc = cifs_call_async(server, &rqst, NULL, smb2_echo_callback, server,
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04001262 CIFS_ECHO_OP);
1263 if (rc)
1264 cFYI(1, "Echo request failed: %d", rc);
1265
1266 cifs_small_buf_release(req);
1267 return rc;
1268}
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07001269
1270int
1271SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
1272 u64 volatile_fid)
1273{
1274 struct smb2_flush_req *req;
1275 struct TCP_Server_Info *server;
1276 struct cifs_ses *ses = tcon->ses;
1277 struct kvec iov[1];
1278 int resp_buftype;
1279 int rc = 0;
1280
1281 cFYI(1, "Flush");
1282
1283 if (ses && (ses->server))
1284 server = ses->server;
1285 else
1286 return -EIO;
1287
1288 rc = small_smb2_init(SMB2_FLUSH, tcon, (void **) &req);
1289 if (rc)
1290 return rc;
1291
1292 req->PersistentFileId = persistent_fid;
1293 req->VolatileFileId = volatile_fid;
1294
1295 iov[0].iov_base = (char *)req;
1296 /* 4 for rfc1002 length field */
1297 iov[0].iov_len = get_rfc1002_length(req) + 4;
1298
1299 rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, 0);
1300
1301 if ((rc != 0) && tcon)
1302 cifs_stats_fail_inc(tcon, SMB2_FLUSH_HE);
1303
1304 free_rsp_buf(resp_buftype, iov[0].iov_base);
1305 return rc;
1306}
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001307
1308/*
1309 * To form a chain of read requests, any read requests after the first should
1310 * have the end_of_chain boolean set to true.
1311 */
1312static int
1313smb2_new_read_req(struct kvec *iov, struct cifs_io_parms *io_parms,
1314 unsigned int remaining_bytes, int request_type)
1315{
1316 int rc = -EACCES;
1317 struct smb2_read_req *req = NULL;
1318
1319 rc = small_smb2_init(SMB2_READ, io_parms->tcon, (void **) &req);
1320 if (rc)
1321 return rc;
1322 if (io_parms->tcon->ses->server == NULL)
1323 return -ECONNABORTED;
1324
1325 req->hdr.ProcessId = cpu_to_le32(io_parms->pid);
1326
1327 req->PersistentFileId = io_parms->persistent_fid;
1328 req->VolatileFileId = io_parms->volatile_fid;
1329 req->ReadChannelInfoOffset = 0; /* reserved */
1330 req->ReadChannelInfoLength = 0; /* reserved */
1331 req->Channel = 0; /* reserved */
1332 req->MinimumCount = 0;
1333 req->Length = cpu_to_le32(io_parms->length);
1334 req->Offset = cpu_to_le64(io_parms->offset);
1335
1336 if (request_type & CHAINED_REQUEST) {
1337 if (!(request_type & END_OF_CHAIN)) {
1338 /* 4 for rfc1002 length field */
1339 req->hdr.NextCommand =
1340 cpu_to_le32(get_rfc1002_length(req) + 4);
1341 } else /* END_OF_CHAIN */
1342 req->hdr.NextCommand = 0;
1343 if (request_type & RELATED_REQUEST) {
1344 req->hdr.Flags |= SMB2_FLAGS_RELATED_OPERATIONS;
1345 /*
1346 * Related requests use info from previous read request
1347 * in chain.
1348 */
1349 req->hdr.SessionId = 0xFFFFFFFF;
1350 req->hdr.TreeId = 0xFFFFFFFF;
1351 req->PersistentFileId = 0xFFFFFFFF;
1352 req->VolatileFileId = 0xFFFFFFFF;
1353 }
1354 }
1355 if (remaining_bytes > io_parms->length)
1356 req->RemainingBytes = cpu_to_le32(remaining_bytes);
1357 else
1358 req->RemainingBytes = 0;
1359
1360 iov[0].iov_base = (char *)req;
1361 /* 4 for rfc1002 length field */
1362 iov[0].iov_len = get_rfc1002_length(req) + 4;
1363 return rc;
1364}
1365
1366static void
1367smb2_readv_callback(struct mid_q_entry *mid)
1368{
1369 struct cifs_readdata *rdata = mid->callback_data;
1370 struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink);
1371 struct TCP_Server_Info *server = tcon->ses->server;
Jeff Layton58195752012-09-19 06:22:34 -07001372 struct smb2_hdr *buf = (struct smb2_hdr *)rdata->iov.iov_base;
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001373 unsigned int credits_received = 1;
Jeff Layton58195752012-09-19 06:22:34 -07001374 struct smb_rqst rqst = { .rq_iov = &rdata->iov,
Jeff Layton8321fec2012-09-19 06:22:32 -07001375 .rq_nvec = 1,
1376 .rq_pages = rdata->pages,
1377 .rq_npages = rdata->nr_pages,
1378 .rq_pagesz = rdata->pagesz,
1379 .rq_tailsz = rdata->tailsz };
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001380
1381 cFYI(1, "%s: mid=%llu state=%d result=%d bytes=%u", __func__,
1382 mid->mid, mid->mid_state, rdata->result, rdata->bytes);
1383
1384 switch (mid->mid_state) {
1385 case MID_RESPONSE_RECEIVED:
1386 credits_received = le16_to_cpu(buf->CreditRequest);
1387 /* result already set, check signature */
Pavel Shilovsky3c1bf7e2012-09-18 16:20:30 -07001388 if (server->sec_mode &
1389 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
1390 int rc;
1391
Jeff Layton0b688cf2012-09-18 16:20:34 -07001392 rc = smb2_verify_signature(&rqst, server);
Pavel Shilovsky3c1bf7e2012-09-18 16:20:30 -07001393 if (rc)
1394 cERROR(1, "SMB signature verification returned "
1395 "error = %d", rc);
1396 }
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001397 /* FIXME: should this be counted toward the initiating task? */
1398 task_io_account_read(rdata->bytes);
1399 cifs_stats_bytes_read(tcon, rdata->bytes);
1400 break;
1401 case MID_REQUEST_SUBMITTED:
1402 case MID_RETRY_NEEDED:
1403 rdata->result = -EAGAIN;
1404 break;
1405 default:
1406 if (rdata->result != -ENODATA)
1407 rdata->result = -EIO;
1408 }
1409
1410 if (rdata->result)
1411 cifs_stats_fail_inc(tcon, SMB2_READ_HE);
1412
1413 queue_work(cifsiod_wq, &rdata->work);
1414 DeleteMidQEntry(mid);
1415 add_credits(server, credits_received, 0);
1416}
1417
1418/* smb2_async_readv - send an async write, and set up mid to handle result */
1419int
1420smb2_async_readv(struct cifs_readdata *rdata)
1421{
1422 int rc;
1423 struct smb2_hdr *buf;
1424 struct cifs_io_parms io_parms;
Jeff Layton58195752012-09-19 06:22:34 -07001425 struct smb_rqst rqst = { .rq_iov = &rdata->iov,
Jeff Laytonfec344e2012-09-18 16:20:35 -07001426 .rq_nvec = 1 };
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001427
1428 cFYI(1, "%s: offset=%llu bytes=%u", __func__,
1429 rdata->offset, rdata->bytes);
1430
1431 io_parms.tcon = tlink_tcon(rdata->cfile->tlink);
1432 io_parms.offset = rdata->offset;
1433 io_parms.length = rdata->bytes;
1434 io_parms.persistent_fid = rdata->cfile->fid.persistent_fid;
1435 io_parms.volatile_fid = rdata->cfile->fid.volatile_fid;
1436 io_parms.pid = rdata->pid;
Jeff Layton58195752012-09-19 06:22:34 -07001437 rc = smb2_new_read_req(&rdata->iov, &io_parms, 0, 0);
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001438 if (rc)
1439 return rc;
1440
Jeff Layton58195752012-09-19 06:22:34 -07001441 buf = (struct smb2_hdr *)rdata->iov.iov_base;
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001442 /* 4 for rfc1002 length field */
Jeff Layton58195752012-09-19 06:22:34 -07001443 rdata->iov.iov_len = get_rfc1002_length(rdata->iov.iov_base) + 4;
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001444
1445 kref_get(&rdata->refcount);
Jeff Laytonfec344e2012-09-18 16:20:35 -07001446 rc = cifs_call_async(io_parms.tcon->ses->server, &rqst,
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001447 cifs_readv_receive, smb2_readv_callback,
1448 rdata, 0);
Pavel Shilovskye5d04882012-09-19 16:03:26 +04001449 if (rc) {
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001450 kref_put(&rdata->refcount, cifs_readdata_release);
Pavel Shilovskye5d04882012-09-19 16:03:26 +04001451 cifs_stats_fail_inc(io_parms.tcon, SMB2_READ_HE);
1452 }
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001453
1454 cifs_small_buf_release(buf);
1455 return rc;
1456}
Pavel Shilovsky33319142012-09-18 16:20:29 -07001457
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07001458int
1459SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
1460 unsigned int *nbytes, char **buf, int *buf_type)
1461{
1462 int resp_buftype, rc = -EACCES;
1463 struct smb2_read_rsp *rsp = NULL;
1464 struct kvec iov[1];
1465
1466 *nbytes = 0;
1467 rc = smb2_new_read_req(iov, io_parms, 0, 0);
1468 if (rc)
1469 return rc;
1470
1471 rc = SendReceive2(xid, io_parms->tcon->ses, iov, 1,
1472 &resp_buftype, CIFS_LOG_ERROR);
1473
1474 rsp = (struct smb2_read_rsp *)iov[0].iov_base;
1475
1476 if (rsp->hdr.Status == STATUS_END_OF_FILE) {
1477 free_rsp_buf(resp_buftype, iov[0].iov_base);
1478 return 0;
1479 }
1480
1481 if (rc) {
1482 cifs_stats_fail_inc(io_parms->tcon, SMB2_READ_HE);
1483 cERROR(1, "Send error in read = %d", rc);
1484 } else {
1485 *nbytes = le32_to_cpu(rsp->DataLength);
1486 if ((*nbytes > CIFS_MAX_MSGSIZE) ||
1487 (*nbytes > io_parms->length)) {
1488 cFYI(1, "bad length %d for count %d", *nbytes,
1489 io_parms->length);
1490 rc = -EIO;
1491 *nbytes = 0;
1492 }
1493 }
1494
1495 if (*buf) {
1496 memcpy(*buf, (char *)rsp->hdr.ProtocolId + rsp->DataOffset,
1497 *nbytes);
1498 free_rsp_buf(resp_buftype, iov[0].iov_base);
1499 } else if (resp_buftype != CIFS_NO_BUFFER) {
1500 *buf = iov[0].iov_base;
1501 if (resp_buftype == CIFS_SMALL_BUFFER)
1502 *buf_type = CIFS_SMALL_BUFFER;
1503 else if (resp_buftype == CIFS_LARGE_BUFFER)
1504 *buf_type = CIFS_LARGE_BUFFER;
1505 }
1506 return rc;
1507}
1508
Pavel Shilovsky33319142012-09-18 16:20:29 -07001509/*
1510 * Check the mid_state and signature on received buffer (if any), and queue the
1511 * workqueue completion task.
1512 */
1513static void
1514smb2_writev_callback(struct mid_q_entry *mid)
1515{
1516 struct cifs_writedata *wdata = mid->callback_data;
1517 struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
1518 unsigned int written;
1519 struct smb2_write_rsp *rsp = (struct smb2_write_rsp *)mid->resp_buf;
1520 unsigned int credits_received = 1;
1521
1522 switch (mid->mid_state) {
1523 case MID_RESPONSE_RECEIVED:
1524 credits_received = le16_to_cpu(rsp->hdr.CreditRequest);
1525 wdata->result = smb2_check_receive(mid, tcon->ses->server, 0);
1526 if (wdata->result != 0)
1527 break;
1528
1529 written = le32_to_cpu(rsp->DataLength);
1530 /*
1531 * Mask off high 16 bits when bytes written as returned
1532 * by the server is greater than bytes requested by the
1533 * client. OS/2 servers are known to set incorrect
1534 * CountHigh values.
1535 */
1536 if (written > wdata->bytes)
1537 written &= 0xFFFF;
1538
1539 if (written < wdata->bytes)
1540 wdata->result = -ENOSPC;
1541 else
1542 wdata->bytes = written;
1543 break;
1544 case MID_REQUEST_SUBMITTED:
1545 case MID_RETRY_NEEDED:
1546 wdata->result = -EAGAIN;
1547 break;
1548 default:
1549 wdata->result = -EIO;
1550 break;
1551 }
1552
1553 if (wdata->result)
1554 cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
1555
1556 queue_work(cifsiod_wq, &wdata->work);
1557 DeleteMidQEntry(mid);
1558 add_credits(tcon->ses->server, credits_received, 0);
1559}
1560
1561/* smb2_async_writev - send an async write, and set up mid to handle result */
1562int
1563smb2_async_writev(struct cifs_writedata *wdata)
1564{
Jeff Laytoneddb0792012-09-18 16:20:35 -07001565 int rc = -EACCES;
Pavel Shilovsky33319142012-09-18 16:20:29 -07001566 struct smb2_write_req *req = NULL;
1567 struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
Jeff Laytoneddb0792012-09-18 16:20:35 -07001568 struct kvec iov;
Jeff Laytonfec344e2012-09-18 16:20:35 -07001569 struct smb_rqst rqst;
Pavel Shilovsky33319142012-09-18 16:20:29 -07001570
1571 rc = small_smb2_init(SMB2_WRITE, tcon, (void **) &req);
1572 if (rc)
1573 goto async_writev_out;
1574
Pavel Shilovsky33319142012-09-18 16:20:29 -07001575 req->hdr.ProcessId = cpu_to_le32(wdata->cfile->pid);
1576
1577 req->PersistentFileId = wdata->cfile->fid.persistent_fid;
1578 req->VolatileFileId = wdata->cfile->fid.volatile_fid;
1579 req->WriteChannelInfoOffset = 0;
1580 req->WriteChannelInfoLength = 0;
1581 req->Channel = 0;
1582 req->Offset = cpu_to_le64(wdata->offset);
1583 /* 4 for rfc1002 length field */
1584 req->DataOffset = cpu_to_le16(
1585 offsetof(struct smb2_write_req, Buffer) - 4);
1586 req->RemainingBytes = 0;
1587
1588 /* 4 for rfc1002 length field and 1 for Buffer */
Jeff Laytoneddb0792012-09-18 16:20:35 -07001589 iov.iov_len = get_rfc1002_length(req) + 4 - 1;
1590 iov.iov_base = req;
Pavel Shilovsky33319142012-09-18 16:20:29 -07001591
Jeff Laytoneddb0792012-09-18 16:20:35 -07001592 rqst.rq_iov = &iov;
1593 rqst.rq_nvec = 1;
1594 rqst.rq_pages = wdata->pages;
1595 rqst.rq_npages = wdata->nr_pages;
1596 rqst.rq_pagesz = wdata->pagesz;
1597 rqst.rq_tailsz = wdata->tailsz;
Pavel Shilovsky33319142012-09-18 16:20:29 -07001598
1599 cFYI(1, "async write at %llu %u bytes", wdata->offset, wdata->bytes);
1600
1601 req->Length = cpu_to_le32(wdata->bytes);
1602
1603 inc_rfc1001_len(&req->hdr, wdata->bytes - 1 /* Buffer */);
1604
1605 kref_get(&wdata->refcount);
Jeff Laytonfec344e2012-09-18 16:20:35 -07001606 rc = cifs_call_async(tcon->ses->server, &rqst, NULL,
1607 smb2_writev_callback, wdata, 0);
Pavel Shilovsky33319142012-09-18 16:20:29 -07001608
Pavel Shilovskye5d04882012-09-19 16:03:26 +04001609 if (rc) {
Pavel Shilovsky33319142012-09-18 16:20:29 -07001610 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovskye5d04882012-09-19 16:03:26 +04001611 cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
1612 }
Pavel Shilovsky33319142012-09-18 16:20:29 -07001613
Pavel Shilovsky33319142012-09-18 16:20:29 -07001614async_writev_out:
1615 cifs_small_buf_release(req);
Pavel Shilovsky33319142012-09-18 16:20:29 -07001616 return rc;
1617}
Pavel Shilovsky009d3442012-09-18 16:20:30 -07001618
1619/*
1620 * SMB2_write function gets iov pointer to kvec array with n_vec as a length.
1621 * The length field from io_parms must be at least 1 and indicates a number of
1622 * elements with data to write that begins with position 1 in iov array. All
1623 * data length is specified by count.
1624 */
1625int
1626SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
1627 unsigned int *nbytes, struct kvec *iov, int n_vec)
1628{
1629 int rc = 0;
1630 struct smb2_write_req *req = NULL;
1631 struct smb2_write_rsp *rsp = NULL;
1632 int resp_buftype;
1633 *nbytes = 0;
1634
1635 if (n_vec < 1)
1636 return rc;
1637
1638 rc = small_smb2_init(SMB2_WRITE, io_parms->tcon, (void **) &req);
1639 if (rc)
1640 return rc;
1641
1642 if (io_parms->tcon->ses->server == NULL)
1643 return -ECONNABORTED;
1644
1645 req->hdr.ProcessId = cpu_to_le32(io_parms->pid);
1646
1647 req->PersistentFileId = io_parms->persistent_fid;
1648 req->VolatileFileId = io_parms->volatile_fid;
1649 req->WriteChannelInfoOffset = 0;
1650 req->WriteChannelInfoLength = 0;
1651 req->Channel = 0;
1652 req->Length = cpu_to_le32(io_parms->length);
1653 req->Offset = cpu_to_le64(io_parms->offset);
1654 /* 4 for rfc1002 length field */
1655 req->DataOffset = cpu_to_le16(
1656 offsetof(struct smb2_write_req, Buffer) - 4);
1657 req->RemainingBytes = 0;
1658
1659 iov[0].iov_base = (char *)req;
1660 /* 4 for rfc1002 length field and 1 for Buffer */
1661 iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
1662
1663 /* length of entire message including data to be written */
1664 inc_rfc1001_len(req, io_parms->length - 1 /* Buffer */);
1665
1666 rc = SendReceive2(xid, io_parms->tcon->ses, iov, n_vec + 1,
1667 &resp_buftype, 0);
Pavel Shilovskye5d04882012-09-19 16:03:26 +04001668 rsp = (struct smb2_write_rsp *)iov[0].iov_base;
Pavel Shilovsky009d3442012-09-18 16:20:30 -07001669
1670 if (rc) {
1671 cifs_stats_fail_inc(io_parms->tcon, SMB2_WRITE_HE);
1672 cERROR(1, "Send error in write = %d", rc);
Pavel Shilovskye5d04882012-09-19 16:03:26 +04001673 } else
Pavel Shilovsky009d3442012-09-18 16:20:30 -07001674 *nbytes = le32_to_cpu(rsp->DataLength);
Pavel Shilovskye5d04882012-09-19 16:03:26 +04001675
1676 free_rsp_buf(resp_buftype, rsp);
Pavel Shilovsky009d3442012-09-18 16:20:30 -07001677 return rc;
1678}
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07001679
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07001680static unsigned int
1681num_entries(char *bufstart, char *end_of_buf, char **lastentry, size_t size)
1682{
1683 int len;
1684 unsigned int entrycount = 0;
1685 unsigned int next_offset = 0;
1686 FILE_DIRECTORY_INFO *entryptr;
1687
1688 if (bufstart == NULL)
1689 return 0;
1690
1691 entryptr = (FILE_DIRECTORY_INFO *)bufstart;
1692
1693 while (1) {
1694 entryptr = (FILE_DIRECTORY_INFO *)
1695 ((char *)entryptr + next_offset);
1696
1697 if ((char *)entryptr + size > end_of_buf) {
1698 cERROR(1, "malformed search entry would overflow");
1699 break;
1700 }
1701
1702 len = le32_to_cpu(entryptr->FileNameLength);
1703 if ((char *)entryptr + len + size > end_of_buf) {
1704 cERROR(1, "directory entry name would overflow frame "
1705 "end of buf %p", end_of_buf);
1706 break;
1707 }
1708
1709 *lastentry = (char *)entryptr;
1710 entrycount++;
1711
1712 next_offset = le32_to_cpu(entryptr->NextEntryOffset);
1713 if (!next_offset)
1714 break;
1715 }
1716
1717 return entrycount;
1718}
1719
1720/*
1721 * Readdir/FindFirst
1722 */
1723int
1724SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
1725 u64 persistent_fid, u64 volatile_fid, int index,
1726 struct cifs_search_info *srch_inf)
1727{
1728 struct smb2_query_directory_req *req;
1729 struct smb2_query_directory_rsp *rsp = NULL;
1730 struct kvec iov[2];
1731 int rc = 0;
1732 int len;
1733 int resp_buftype;
1734 unsigned char *bufptr;
1735 struct TCP_Server_Info *server;
1736 struct cifs_ses *ses = tcon->ses;
1737 __le16 asteriks = cpu_to_le16('*');
1738 char *end_of_smb;
1739 unsigned int output_size = CIFSMaxBufSize;
1740 size_t info_buf_size;
1741
1742 if (ses && (ses->server))
1743 server = ses->server;
1744 else
1745 return -EIO;
1746
1747 rc = small_smb2_init(SMB2_QUERY_DIRECTORY, tcon, (void **) &req);
1748 if (rc)
1749 return rc;
1750
1751 switch (srch_inf->info_level) {
1752 case SMB_FIND_FILE_DIRECTORY_INFO:
1753 req->FileInformationClass = FILE_DIRECTORY_INFORMATION;
1754 info_buf_size = sizeof(FILE_DIRECTORY_INFO) - 1;
1755 break;
1756 case SMB_FIND_FILE_ID_FULL_DIR_INFO:
1757 req->FileInformationClass = FILEID_FULL_DIRECTORY_INFORMATION;
1758 info_buf_size = sizeof(SEARCH_ID_FULL_DIR_INFO) - 1;
1759 break;
1760 default:
1761 cERROR(1, "info level %u isn't supported",
1762 srch_inf->info_level);
1763 rc = -EINVAL;
1764 goto qdir_exit;
1765 }
1766
1767 req->FileIndex = cpu_to_le32(index);
1768 req->PersistentFileId = persistent_fid;
1769 req->VolatileFileId = volatile_fid;
1770
1771 len = 0x2;
1772 bufptr = req->Buffer;
1773 memcpy(bufptr, &asteriks, len);
1774
1775 req->FileNameOffset =
1776 cpu_to_le16(sizeof(struct smb2_query_directory_req) - 1 - 4);
1777 req->FileNameLength = cpu_to_le16(len);
1778 /*
1779 * BB could be 30 bytes or so longer if we used SMB2 specific
1780 * buffer lengths, but this is safe and close enough.
1781 */
1782 output_size = min_t(unsigned int, output_size, server->maxBuf);
1783 output_size = min_t(unsigned int, output_size, 2 << 15);
1784 req->OutputBufferLength = cpu_to_le32(output_size);
1785
1786 iov[0].iov_base = (char *)req;
1787 /* 4 for RFC1001 length and 1 for Buffer */
1788 iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
1789
1790 iov[1].iov_base = (char *)(req->Buffer);
1791 iov[1].iov_len = len;
1792
1793 inc_rfc1001_len(req, len - 1 /* Buffer */);
1794
1795 rc = SendReceive2(xid, ses, iov, 2, &resp_buftype, 0);
Pavel Shilovskye5d04882012-09-19 16:03:26 +04001796 rsp = (struct smb2_query_directory_rsp *)iov[0].iov_base;
1797
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07001798 if (rc) {
1799 cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE);
1800 goto qdir_exit;
1801 }
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07001802
1803 rc = validate_buf(le16_to_cpu(rsp->OutputBufferOffset),
1804 le32_to_cpu(rsp->OutputBufferLength), &rsp->hdr,
1805 info_buf_size);
1806 if (rc)
1807 goto qdir_exit;
1808
1809 srch_inf->unicode = true;
1810
1811 if (srch_inf->ntwrk_buf_start) {
1812 if (srch_inf->smallBuf)
1813 cifs_small_buf_release(srch_inf->ntwrk_buf_start);
1814 else
1815 cifs_buf_release(srch_inf->ntwrk_buf_start);
1816 }
1817 srch_inf->ntwrk_buf_start = (char *)rsp;
1818 srch_inf->srch_entries_start = srch_inf->last_entry = 4 /* rfclen */ +
1819 (char *)&rsp->hdr + le16_to_cpu(rsp->OutputBufferOffset);
1820 /* 4 for rfc1002 length field */
1821 end_of_smb = get_rfc1002_length(rsp) + 4 + (char *)&rsp->hdr;
1822 srch_inf->entries_in_buffer =
1823 num_entries(srch_inf->srch_entries_start, end_of_smb,
1824 &srch_inf->last_entry, info_buf_size);
1825 srch_inf->index_of_last_entry += srch_inf->entries_in_buffer;
1826 cFYI(1, "num entries %d last_index %lld srch start %p srch end %p",
1827 srch_inf->entries_in_buffer, srch_inf->index_of_last_entry,
1828 srch_inf->srch_entries_start, srch_inf->last_entry);
1829 if (resp_buftype == CIFS_LARGE_BUFFER)
1830 srch_inf->smallBuf = false;
1831 else if (resp_buftype == CIFS_SMALL_BUFFER)
1832 srch_inf->smallBuf = true;
1833 else
1834 cERROR(1, "illegal search buffer type");
1835
1836 if (rsp->hdr.Status == STATUS_NO_MORE_FILES)
1837 srch_inf->endOfSearch = 1;
1838 else
1839 srch_inf->endOfSearch = 0;
1840
1841 return rc;
1842
1843qdir_exit:
1844 free_rsp_buf(resp_buftype, rsp);
1845 return rc;
1846}
1847
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07001848static int
1849send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07001850 u64 persistent_fid, u64 volatile_fid, u32 pid, int info_class,
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07001851 unsigned int num, void **data, unsigned int *size)
1852{
1853 struct smb2_set_info_req *req;
1854 struct smb2_set_info_rsp *rsp = NULL;
1855 struct kvec *iov;
1856 int rc = 0;
1857 int resp_buftype;
1858 unsigned int i;
1859 struct TCP_Server_Info *server;
1860 struct cifs_ses *ses = tcon->ses;
1861
1862 if (ses && (ses->server))
1863 server = ses->server;
1864 else
1865 return -EIO;
1866
1867 if (!num)
1868 return -EINVAL;
1869
1870 iov = kmalloc(sizeof(struct kvec) * num, GFP_KERNEL);
1871 if (!iov)
1872 return -ENOMEM;
1873
1874 rc = small_smb2_init(SMB2_SET_INFO, tcon, (void **) &req);
1875 if (rc) {
1876 kfree(iov);
1877 return rc;
1878 }
1879
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07001880 req->hdr.ProcessId = cpu_to_le32(pid);
1881
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07001882 req->InfoType = SMB2_O_INFO_FILE;
1883 req->FileInfoClass = info_class;
1884 req->PersistentFileId = persistent_fid;
1885 req->VolatileFileId = volatile_fid;
1886
1887 /* 4 for RFC1001 length and 1 for Buffer */
1888 req->BufferOffset =
1889 cpu_to_le16(sizeof(struct smb2_set_info_req) - 1 - 4);
1890 req->BufferLength = cpu_to_le32(*size);
1891
1892 inc_rfc1001_len(req, *size - 1 /* Buffer */);
1893
1894 memcpy(req->Buffer, *data, *size);
1895
1896 iov[0].iov_base = (char *)req;
1897 /* 4 for RFC1001 length */
1898 iov[0].iov_len = get_rfc1002_length(req) + 4;
1899
1900 for (i = 1; i < num; i++) {
1901 inc_rfc1001_len(req, size[i]);
1902 le32_add_cpu(&req->BufferLength, size[i]);
1903 iov[i].iov_base = (char *)data[i];
1904 iov[i].iov_len = size[i];
1905 }
1906
1907 rc = SendReceive2(xid, ses, iov, num, &resp_buftype, 0);
1908 rsp = (struct smb2_set_info_rsp *)iov[0].iov_base;
1909
1910 if (rc != 0) {
1911 cifs_stats_fail_inc(tcon, SMB2_SET_INFO_HE);
1912 goto out;
1913 }
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07001914out:
1915 free_rsp_buf(resp_buftype, rsp);
1916 kfree(iov);
1917 return rc;
1918}
1919
1920int
1921SMB2_rename(const unsigned int xid, struct cifs_tcon *tcon,
1922 u64 persistent_fid, u64 volatile_fid, __le16 *target_file)
1923{
1924 struct smb2_file_rename_info info;
1925 void **data;
1926 unsigned int size[2];
1927 int rc;
1928 int len = (2 * UniStrnlen((wchar_t *)target_file, PATH_MAX));
1929
1930 data = kmalloc(sizeof(void *) * 2, GFP_KERNEL);
1931 if (!data)
1932 return -ENOMEM;
1933
1934 info.ReplaceIfExists = 1; /* 1 = replace existing target with new */
1935 /* 0 = fail if target already exists */
1936 info.RootDirectory = 0; /* MBZ for network ops (why does spec say?) */
1937 info.FileNameLength = cpu_to_le32(len);
1938
1939 data[0] = &info;
1940 size[0] = sizeof(struct smb2_file_rename_info);
1941
1942 data[1] = target_file;
1943 size[1] = len + 2 /* null */;
1944
1945 rc = send_set_info(xid, tcon, persistent_fid, volatile_fid,
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07001946 current->tgid, FILE_RENAME_INFORMATION, 2, data,
1947 size);
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07001948 kfree(data);
1949 return rc;
1950}
Pavel Shilovsky568798c2012-09-18 16:20:31 -07001951
1952int
1953SMB2_set_hardlink(const unsigned int xid, struct cifs_tcon *tcon,
1954 u64 persistent_fid, u64 volatile_fid, __le16 *target_file)
1955{
1956 struct smb2_file_link_info info;
1957 void **data;
1958 unsigned int size[2];
1959 int rc;
1960 int len = (2 * UniStrnlen((wchar_t *)target_file, PATH_MAX));
1961
1962 data = kmalloc(sizeof(void *) * 2, GFP_KERNEL);
1963 if (!data)
1964 return -ENOMEM;
1965
1966 info.ReplaceIfExists = 0; /* 1 = replace existing link with new */
1967 /* 0 = fail if link already exists */
1968 info.RootDirectory = 0; /* MBZ for network ops (why does spec say?) */
1969 info.FileNameLength = cpu_to_le32(len);
1970
1971 data[0] = &info;
1972 size[0] = sizeof(struct smb2_file_link_info);
1973
1974 data[1] = target_file;
1975 size[1] = len + 2 /* null */;
1976
1977 rc = send_set_info(xid, tcon, persistent_fid, volatile_fid,
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07001978 current->tgid, FILE_LINK_INFORMATION, 2, data, size);
Pavel Shilovsky568798c2012-09-18 16:20:31 -07001979 kfree(data);
1980 return rc;
1981}
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07001982
1983int
1984SMB2_set_eof(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
1985 u64 volatile_fid, u32 pid, __le64 *eof)
1986{
1987 struct smb2_file_eof_info info;
1988 void *data;
1989 unsigned int size;
1990
1991 info.EndOfFile = *eof;
1992
1993 data = &info;
1994 size = sizeof(struct smb2_file_eof_info);
1995
1996 return send_set_info(xid, tcon, persistent_fid, volatile_fid, pid,
1997 FILE_END_OF_FILE_INFORMATION, 1, &data, &size);
1998}
Pavel Shilovsky1feeaac2012-09-18 16:20:32 -07001999
2000int
2001SMB2_set_info(const unsigned int xid, struct cifs_tcon *tcon,
2002 u64 persistent_fid, u64 volatile_fid, FILE_BASIC_INFO *buf)
2003{
2004 unsigned int size;
2005 size = sizeof(FILE_BASIC_INFO);
2006 return send_set_info(xid, tcon, persistent_fid, volatile_fid,
2007 current->tgid, FILE_BASIC_INFORMATION, 1,
2008 (void **)&buf, &size);
2009}
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07002010
2011int
2012SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
2013 const u64 persistent_fid, const u64 volatile_fid,
2014 __u8 oplock_level)
2015{
2016 int rc;
2017 struct smb2_oplock_break *req = NULL;
2018
2019 cFYI(1, "SMB2_oplock_break");
2020 rc = small_smb2_init(SMB2_OPLOCK_BREAK, tcon, (void **) &req);
2021
2022 if (rc)
2023 return rc;
2024
2025 req->VolatileFid = volatile_fid;
2026 req->PersistentFid = persistent_fid;
2027 req->OplockLevel = oplock_level;
2028 req->hdr.CreditRequest = cpu_to_le16(1);
2029
2030 rc = SendReceiveNoRsp(xid, tcon->ses, (char *) req, CIFS_OBREAK_OP);
2031 /* SMB2 buffer freed by function above */
2032
2033 if (rc) {
2034 cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE);
2035 cFYI(1, "Send error in Oplock Break = %d", rc);
2036 }
2037
2038 return rc;
2039}
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002040
2041static void
2042copy_fs_info_to_kstatfs(struct smb2_fs_full_size_info *pfs_inf,
2043 struct kstatfs *kst)
2044{
2045 kst->f_bsize = le32_to_cpu(pfs_inf->BytesPerSector) *
2046 le32_to_cpu(pfs_inf->SectorsPerAllocationUnit);
2047 kst->f_blocks = le64_to_cpu(pfs_inf->TotalAllocationUnits);
2048 kst->f_bfree = le64_to_cpu(pfs_inf->ActualAvailableAllocationUnits);
2049 kst->f_bavail = le64_to_cpu(pfs_inf->CallerAvailableAllocationUnits);
2050 return;
2051}
2052
2053static int
2054build_qfs_info_req(struct kvec *iov, struct cifs_tcon *tcon, int level,
2055 int outbuf_len, u64 persistent_fid, u64 volatile_fid)
2056{
2057 int rc;
2058 struct smb2_query_info_req *req;
2059
2060 cFYI(1, "Query FSInfo level %d", level);
2061
2062 if ((tcon->ses == NULL) || (tcon->ses->server == NULL))
2063 return -EIO;
2064
2065 rc = small_smb2_init(SMB2_QUERY_INFO, tcon, (void **) &req);
2066 if (rc)
2067 return rc;
2068
2069 req->InfoType = SMB2_O_INFO_FILESYSTEM;
2070 req->FileInfoClass = level;
2071 req->PersistentFileId = persistent_fid;
2072 req->VolatileFileId = volatile_fid;
2073 /* 4 for rfc1002 length field and 1 for pad */
2074 req->InputBufferOffset =
2075 cpu_to_le16(sizeof(struct smb2_query_info_req) - 1 - 4);
2076 req->OutputBufferLength = cpu_to_le32(
2077 outbuf_len + sizeof(struct smb2_query_info_rsp) - 1 - 4);
2078
2079 iov->iov_base = (char *)req;
2080 /* 4 for rfc1002 length field */
2081 iov->iov_len = get_rfc1002_length(req) + 4;
2082 return 0;
2083}
2084
2085int
2086SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
2087 u64 persistent_fid, u64 volatile_fid, struct kstatfs *fsdata)
2088{
2089 struct smb2_query_info_rsp *rsp = NULL;
2090 struct kvec iov;
2091 int rc = 0;
2092 int resp_buftype;
2093 struct cifs_ses *ses = tcon->ses;
2094 struct smb2_fs_full_size_info *info = NULL;
2095
2096 rc = build_qfs_info_req(&iov, tcon, FS_FULL_SIZE_INFORMATION,
2097 sizeof(struct smb2_fs_full_size_info),
2098 persistent_fid, volatile_fid);
2099 if (rc)
2100 return rc;
2101
2102 rc = SendReceive2(xid, ses, &iov, 1, &resp_buftype, 0);
2103 if (rc) {
2104 cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
2105 goto qinf_exit;
2106 }
2107 rsp = (struct smb2_query_info_rsp *)iov.iov_base;
2108
2109 info = (struct smb2_fs_full_size_info *)(4 /* RFC1001 len */ +
2110 le16_to_cpu(rsp->OutputBufferOffset) + (char *)&rsp->hdr);
2111 rc = validate_buf(le16_to_cpu(rsp->OutputBufferOffset),
2112 le32_to_cpu(rsp->OutputBufferLength), &rsp->hdr,
2113 sizeof(struct smb2_fs_full_size_info));
2114 if (!rc)
2115 copy_fs_info_to_kstatfs(info, fsdata);
2116
2117qinf_exit:
2118 free_rsp_buf(resp_buftype, iov.iov_base);
2119 return rc;
2120}
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07002121
2122int
2123smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon,
2124 const __u64 persist_fid, const __u64 volatile_fid, const __u32 pid,
2125 const __u32 num_lock, struct smb2_lock_element *buf)
2126{
2127 int rc = 0;
2128 struct smb2_lock_req *req = NULL;
2129 struct kvec iov[2];
2130 int resp_buf_type;
2131 unsigned int count;
2132
2133 cFYI(1, "smb2_lockv num lock %d", num_lock);
2134
2135 rc = small_smb2_init(SMB2_LOCK, tcon, (void **) &req);
2136 if (rc)
2137 return rc;
2138
2139 req->hdr.ProcessId = cpu_to_le32(pid);
2140 req->LockCount = cpu_to_le16(num_lock);
2141
2142 req->PersistentFileId = persist_fid;
2143 req->VolatileFileId = volatile_fid;
2144
2145 count = num_lock * sizeof(struct smb2_lock_element);
2146 inc_rfc1001_len(req, count - sizeof(struct smb2_lock_element));
2147
2148 iov[0].iov_base = (char *)req;
2149 /* 4 for rfc1002 length field and count for all locks */
2150 iov[0].iov_len = get_rfc1002_length(req) + 4 - count;
2151 iov[1].iov_base = (char *)buf;
2152 iov[1].iov_len = count;
2153
2154 cifs_stats_inc(&tcon->stats.cifs_stats.num_locks);
2155 rc = SendReceive2(xid, tcon->ses, iov, 2, &resp_buf_type, CIFS_NO_RESP);
2156 if (rc) {
2157 cFYI(1, "Send error in smb2_lockv = %d", rc);
2158 cifs_stats_fail_inc(tcon, SMB2_LOCK_HE);
2159 }
2160
2161 return rc;
2162}
2163
2164int
2165SMB2_lock(const unsigned int xid, struct cifs_tcon *tcon,
2166 const __u64 persist_fid, const __u64 volatile_fid, const __u32 pid,
2167 const __u64 length, const __u64 offset, const __u32 lock_flags,
2168 const bool wait)
2169{
2170 struct smb2_lock_element lock;
2171
2172 lock.Offset = cpu_to_le64(offset);
2173 lock.Length = cpu_to_le64(length);
2174 lock.Flags = cpu_to_le32(lock_flags);
2175 if (!wait && lock_flags != SMB2_LOCKFLAG_UNLOCK)
2176 lock.Flags |= cpu_to_le32(SMB2_LOCKFLAG_FAIL_IMMEDIATELY);
2177
2178 return smb2_lockv(xid, tcon, persist_fid, volatile_fid, pid, 1, &lock);
2179}
Pavel Shilovsky0822f512012-09-19 06:22:45 -07002180
2181int
2182SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon,
2183 __u8 *lease_key, const __le32 lease_state)
2184{
2185 int rc;
2186 struct smb2_lease_ack *req = NULL;
2187
2188 cFYI(1, "SMB2_lease_break");
2189 rc = small_smb2_init(SMB2_OPLOCK_BREAK, tcon, (void **) &req);
2190
2191 if (rc)
2192 return rc;
2193
2194 req->hdr.CreditRequest = cpu_to_le16(1);
2195 req->StructureSize = cpu_to_le16(36);
2196 inc_rfc1001_len(req, 12);
2197
2198 memcpy(req->LeaseKey, lease_key, 16);
2199 req->LeaseState = lease_state;
2200
2201 rc = SendReceiveNoRsp(xid, tcon->ses, (char *) req, CIFS_OBREAK_OP);
2202 /* SMB2 buffer freed by function above */
2203
2204 if (rc) {
2205 cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE);
2206 cFYI(1, "Send error in Lease Break = %d", rc);
2207 }
2208
2209 return rc;
2210}