blob: a20a8a7e1470ecbc1d0d43ca946516f3b19bca69 [file] [log] [blame]
Pavel Shilovskyec2e4522011-12-27 16:12:43 +04001/*
2 * fs/cifs/smb2pdu.c
3 *
Steve Frenche4aa25e2012-10-01 12:26:22 -05004 * Copyright (C) International Business Machines Corp., 2009, 2012
Pavel Shilovskyec2e4522011-12-27 16:12:43 +04005 * Etersoft, 2012
6 * Author(s): Steve French (sfrench@us.ibm.com)
7 * Pavel Shilovsky (pshilovsky@samba.org) 2012
8 *
9 * Contains the routines for constructing the SMB2 PDUs themselves
10 *
11 * This library is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU Lesser General Public License as published
13 * by the Free Software Foundation; either version 2.1 of the License, or
14 * (at your option) any later version.
15 *
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
19 * the GNU Lesser General Public License for more details.
20 *
21 * You should have received a copy of the GNU Lesser General Public License
22 * along with this library; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 */
25
26 /* SMB2 PDU handling routines here - except for leftovers (eg session setup) */
27 /* Note that there are handle based routines which must be */
28 /* treated slightly differently for reconnection purposes since we never */
29 /* want to reuse a stale file handle and only the caller knows the file info */
30
31#include <linux/fs.h>
32#include <linux/kernel.h>
33#include <linux/vfs.h>
Pavel Shilovsky09a47072012-09-18 16:20:29 -070034#include <linux/task_io_accounting_ops.h>
Pavel Shilovskyec2e4522011-12-27 16:12:43 +040035#include <linux/uaccess.h>
Pavel Shilovsky33319142012-09-18 16:20:29 -070036#include <linux/pagemap.h>
Pavel Shilovskyec2e4522011-12-27 16:12:43 +040037#include <linux/xattr.h>
38#include "smb2pdu.h"
39#include "cifsglob.h"
40#include "cifsacl.h"
41#include "cifsproto.h"
42#include "smb2proto.h"
43#include "cifs_unicode.h"
44#include "cifs_debug.h"
45#include "ntlmssp.h"
46#include "smb2status.h"
Pavel Shilovsky09a47072012-09-18 16:20:29 -070047#include "smb2glob.h"
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -070048#include "cifspdu.h"
Pavel Shilovskyec2e4522011-12-27 16:12:43 +040049
50/*
51 * The following table defines the expected "StructureSize" of SMB2 requests
52 * in order by SMB2 command. This is similar to "wct" in SMB/CIFS requests.
53 *
54 * Note that commands are defined in smb2pdu.h in le16 but the array below is
55 * indexed by command in host byte order.
56 */
57static const int smb2_req_struct_sizes[NUMBER_OF_SMB2_COMMANDS] = {
58 /* SMB2_NEGOTIATE */ 36,
59 /* SMB2_SESSION_SETUP */ 25,
60 /* SMB2_LOGOFF */ 4,
61 /* SMB2_TREE_CONNECT */ 9,
62 /* SMB2_TREE_DISCONNECT */ 4,
63 /* SMB2_CREATE */ 57,
64 /* SMB2_CLOSE */ 24,
65 /* SMB2_FLUSH */ 24,
66 /* SMB2_READ */ 49,
67 /* SMB2_WRITE */ 49,
68 /* SMB2_LOCK */ 48,
69 /* SMB2_IOCTL */ 57,
70 /* SMB2_CANCEL */ 4,
71 /* SMB2_ECHO */ 4,
72 /* SMB2_QUERY_DIRECTORY */ 33,
73 /* SMB2_CHANGE_NOTIFY */ 32,
74 /* SMB2_QUERY_INFO */ 41,
75 /* SMB2_SET_INFO */ 33,
76 /* SMB2_OPLOCK_BREAK */ 24 /* BB this is 36 for LEASE_BREAK variant */
77};
78
79
80static void
81smb2_hdr_assemble(struct smb2_hdr *hdr, __le16 smb2_cmd /* command */ ,
82 const struct cifs_tcon *tcon)
83{
84 struct smb2_pdu *pdu = (struct smb2_pdu *)hdr;
85 char *temp = (char *)hdr;
86 /* lookup word count ie StructureSize from table */
87 __u16 parmsize = smb2_req_struct_sizes[le16_to_cpu(smb2_cmd)];
88
89 /*
90 * smaller than SMALL_BUFFER_SIZE but bigger than fixed area of
91 * largest operations (Create)
92 */
93 memset(temp, 0, 256);
94
95 /* Note this is only network field converted to big endian */
96 hdr->smb2_buf_length = cpu_to_be32(parmsize + sizeof(struct smb2_hdr)
97 - 4 /* RFC 1001 length field itself not counted */);
98
99 hdr->ProtocolId[0] = 0xFE;
100 hdr->ProtocolId[1] = 'S';
101 hdr->ProtocolId[2] = 'M';
102 hdr->ProtocolId[3] = 'B';
103 hdr->StructureSize = cpu_to_le16(64);
104 hdr->Command = smb2_cmd;
105 hdr->CreditRequest = cpu_to_le16(2); /* BB make this dynamic */
106 hdr->ProcessId = cpu_to_le32((__u16)current->tgid);
107
108 if (!tcon)
109 goto out;
110
111 hdr->TreeId = tcon->tid;
112 /* Uid is not converted */
113 if (tcon->ses)
114 hdr->SessionId = tcon->ses->Suid;
115 /* BB check following DFS flags BB */
116 /* BB do we have to add check for SHI1005_FLAGS_DFS_ROOT too? */
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +0400117 if (tcon->share_flags & SHI1005_FLAGS_DFS)
118 hdr->Flags |= SMB2_FLAGS_DFS_OPERATIONS;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400119 /* BB how does SMB2 do case sensitive? */
120 /* if (tcon->nocase)
121 hdr->Flags |= SMBFLG_CASELESS; */
Jeff Layton38d77c52013-05-26 07:01:00 -0400122 if (tcon->ses && tcon->ses->server && tcon->ses->server->sign)
Pavel Shilovsky3c1bf7e2012-09-18 16:20:30 -0700123 hdr->Flags |= SMB2_FLAGS_SIGNED;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400124out:
125 pdu->StructureSize2 = cpu_to_le16(parmsize);
126 return;
127}
128
129static int
130smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
131{
132 int rc = 0;
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400133 struct nls_table *nls_codepage;
134 struct cifs_ses *ses;
135 struct TCP_Server_Info *server;
136
137 /*
138 * SMB2s NegProt, SessSetup, Logoff do not have tcon yet so
139 * check for tcp and smb session status done differently
140 * for those three - in the calling routine.
141 */
142 if (tcon == NULL)
143 return rc;
144
145 if (smb2_command == SMB2_TREE_CONNECT)
146 return rc;
147
148 if (tcon->tidStatus == CifsExiting) {
149 /*
150 * only tree disconnect, open, and write,
151 * (and ulogoff which does not have tcon)
152 * are allowed as we start force umount.
153 */
154 if ((smb2_command != SMB2_WRITE) &&
155 (smb2_command != SMB2_CREATE) &&
156 (smb2_command != SMB2_TREE_DISCONNECT)) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500157 cifs_dbg(FYI, "can not send cmd %d while umounting\n",
158 smb2_command);
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400159 return -ENODEV;
160 }
161 }
162 if ((!tcon->ses) || (tcon->ses->status == CifsExiting) ||
163 (!tcon->ses->server))
164 return -EIO;
165
166 ses = tcon->ses;
167 server = ses->server;
168
169 /*
170 * Give demultiplex thread up to 10 seconds to reconnect, should be
171 * greater than cifs socket timeout which is 7 seconds
172 */
173 while (server->tcpStatus == CifsNeedReconnect) {
174 /*
175 * Return to caller for TREE_DISCONNECT and LOGOFF and CLOSE
176 * here since they are implicitly done when session drops.
177 */
178 switch (smb2_command) {
179 /*
180 * BB Should we keep oplock break and add flush to exceptions?
181 */
182 case SMB2_TREE_DISCONNECT:
183 case SMB2_CANCEL:
184 case SMB2_CLOSE:
185 case SMB2_OPLOCK_BREAK:
186 return -EAGAIN;
187 }
188
189 wait_event_interruptible_timeout(server->response_q,
190 (server->tcpStatus != CifsNeedReconnect), 10 * HZ);
191
192 /* are we still trying to reconnect? */
193 if (server->tcpStatus != CifsNeedReconnect)
194 break;
195
196 /*
197 * on "soft" mounts we wait once. Hard mounts keep
198 * retrying until process is killed or server comes
199 * back on-line
200 */
201 if (!tcon->retry) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500202 cifs_dbg(FYI, "gave up waiting on reconnect in smb_init\n");
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400203 return -EHOSTDOWN;
204 }
205 }
206
207 if (!tcon->ses->need_reconnect && !tcon->need_reconnect)
208 return rc;
209
210 nls_codepage = load_nls_default();
211
212 /*
213 * need to prevent multiple threads trying to simultaneously reconnect
214 * the same SMB session
215 */
216 mutex_lock(&tcon->ses->session_mutex);
217 rc = cifs_negotiate_protocol(0, tcon->ses);
218 if (!rc && tcon->ses->need_reconnect)
219 rc = cifs_setup_session(0, tcon->ses, nls_codepage);
220
221 if (rc || !tcon->need_reconnect) {
222 mutex_unlock(&tcon->ses->session_mutex);
223 goto out;
224 }
225
226 cifs_mark_open_files_invalid(tcon);
227 rc = SMB2_tcon(0, tcon->ses, tcon->treeName, tcon, nls_codepage);
228 mutex_unlock(&tcon->ses->session_mutex);
Joe Perchesf96637b2013-05-04 22:12:25 -0500229 cifs_dbg(FYI, "reconnect tcon rc = %d\n", rc);
Pavel Shilovskyaa24d1e2011-12-27 16:23:34 +0400230 if (rc)
231 goto out;
232 atomic_inc(&tconInfoReconnectCount);
233 /*
234 * BB FIXME add code to check if wsize needs update due to negotiated
235 * smb buffer size shrinking.
236 */
237out:
238 /*
239 * Check if handle based operation so we know whether we can continue
240 * or not without returning to caller to reset file handle.
241 */
242 /*
243 * BB Is flush done by server on drop of tcp session? Should we special
244 * case it and skip above?
245 */
246 switch (smb2_command) {
247 case SMB2_FLUSH:
248 case SMB2_READ:
249 case SMB2_WRITE:
250 case SMB2_LOCK:
251 case SMB2_IOCTL:
252 case SMB2_QUERY_DIRECTORY:
253 case SMB2_CHANGE_NOTIFY:
254 case SMB2_QUERY_INFO:
255 case SMB2_SET_INFO:
256 return -EAGAIN;
257 }
258 unload_nls(nls_codepage);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400259 return rc;
260}
261
262/*
263 * Allocate and return pointer to an SMB request hdr, and set basic
264 * SMB information in the SMB header. If the return code is zero, this
265 * function must have filled in request_buf pointer.
266 */
267static int
268small_smb2_init(__le16 smb2_command, struct cifs_tcon *tcon,
269 void **request_buf)
270{
271 int rc = 0;
272
273 rc = smb2_reconnect(smb2_command, tcon);
274 if (rc)
275 return rc;
276
277 /* BB eventually switch this to SMB2 specific small buf size */
278 *request_buf = cifs_small_buf_get();
279 if (*request_buf == NULL) {
280 /* BB should we add a retry in here if not a writepage? */
281 return -ENOMEM;
282 }
283
284 smb2_hdr_assemble((struct smb2_hdr *) *request_buf, smb2_command, tcon);
285
286 if (tcon != NULL) {
287#ifdef CONFIG_CIFS_STATS2
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400288 uint16_t com_code = le16_to_cpu(smb2_command);
289 cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_sent[com_code]);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400290#endif
291 cifs_stats_inc(&tcon->num_smbs_sent);
292 }
293
294 return rc;
295}
296
297static void
298free_rsp_buf(int resp_buftype, void *rsp)
299{
300 if (resp_buftype == CIFS_SMALL_BUFFER)
301 cifs_small_buf_release(rsp);
302 else if (resp_buftype == CIFS_LARGE_BUFFER)
303 cifs_buf_release(rsp);
304}
305
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400306
307/*
308 *
309 * SMB2 Worker functions follow:
310 *
311 * The general structure of the worker functions is:
312 * 1) Call smb2_init (assembles SMB2 header)
313 * 2) Initialize SMB2 command specific fields in fixed length area of SMB
314 * 3) Call smb_sendrcv2 (sends request on socket and waits for response)
315 * 4) Decode SMB2 command specific fields in the fixed length area
316 * 5) Decode variable length data area (if any for this SMB2 command type)
317 * 6) Call free smb buffer
318 * 7) return
319 *
320 */
321
322int
323SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
324{
325 struct smb2_negotiate_req *req;
326 struct smb2_negotiate_rsp *rsp;
327 struct kvec iov[1];
328 int rc = 0;
329 int resp_buftype;
Jeff Layton3534b852013-05-24 07:41:01 -0400330 struct TCP_Server_Info *server = ses->server;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400331 int blob_offset, blob_length;
332 char *security_blob;
333 int flags = CIFS_NEG_OP;
334
Joe Perchesf96637b2013-05-04 22:12:25 -0500335 cifs_dbg(FYI, "Negotiate protocol\n");
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400336
Jeff Layton3534b852013-05-24 07:41:01 -0400337 if (!server) {
338 WARN(1, "%s: server is NULL!\n", __func__);
339 return -EIO;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400340 }
341
342 rc = small_smb2_init(SMB2_NEGOTIATE, NULL, (void **) &req);
343 if (rc)
344 return rc;
345
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400346 req->hdr.SessionId = 0;
347
Steve Frenche4aa25e2012-10-01 12:26:22 -0500348 req->Dialects[0] = cpu_to_le16(ses->server->vals->protocol_id);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400349
Steve Frenche4aa25e2012-10-01 12:26:22 -0500350 req->DialectCount = cpu_to_le16(1); /* One vers= at a time for now */
351 inc_rfc1001_len(req, 2);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400352
353 /* only one of SMB2 signing flags may be set in SMB2 request */
Jeff Layton38d77c52013-05-26 07:01:00 -0400354 if (ses->sign)
Steve French9cd2e622013-06-12 19:59:03 -0500355 req->SecurityMode = cpu_to_le16(SMB2_NEGOTIATE_SIGNING_REQUIRED);
Jeff Layton38d77c52013-05-26 07:01:00 -0400356 else if (global_secflags & CIFSSEC_MAY_SIGN)
Steve French9cd2e622013-06-12 19:59:03 -0500357 req->SecurityMode = cpu_to_le16(SMB2_NEGOTIATE_SIGNING_ENABLED);
Jeff Layton38d77c52013-05-26 07:01:00 -0400358 else
359 req->SecurityMode = 0;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400360
Steve Frenche4aa25e2012-10-01 12:26:22 -0500361 req->Capabilities = cpu_to_le32(ses->server->vals->req_capabilities);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400362
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700363 memcpy(req->ClientGUID, cifs_client_guid, SMB2_CLIENT_GUID_SIZE);
364
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400365 iov[0].iov_base = (char *)req;
366 /* 4 for rfc1002 length field */
367 iov[0].iov_len = get_rfc1002_length(req) + 4;
368
369 rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, flags);
370
371 rsp = (struct smb2_negotiate_rsp *)iov[0].iov_base;
372 /*
373 * No tcon so can't do
374 * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]);
375 */
376 if (rc != 0)
377 goto neg_exit;
378
Joe Perchesf96637b2013-05-04 22:12:25 -0500379 cifs_dbg(FYI, "mode 0x%x\n", rsp->SecurityMode);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400380
Steve Frenche4aa25e2012-10-01 12:26:22 -0500381 /* BB we may eventually want to match the negotiated vs. requested
382 dialect, even though we are only requesting one at a time */
383 if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID))
Joe Perchesf96637b2013-05-04 22:12:25 -0500384 cifs_dbg(FYI, "negotiated smb2.0 dialect\n");
Steve Frenche4aa25e2012-10-01 12:26:22 -0500385 else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID))
Joe Perchesf96637b2013-05-04 22:12:25 -0500386 cifs_dbg(FYI, "negotiated smb2.1 dialect\n");
Steve Frenche4aa25e2012-10-01 12:26:22 -0500387 else if (rsp->DialectRevision == cpu_to_le16(SMB30_PROT_ID))
Joe Perchesf96637b2013-05-04 22:12:25 -0500388 cifs_dbg(FYI, "negotiated smb3.0 dialect\n");
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400389 else {
Joe Perchesf96637b2013-05-04 22:12:25 -0500390 cifs_dbg(VFS, "Illegal dialect returned by server %d\n",
391 le16_to_cpu(rsp->DialectRevision));
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400392 rc = -EIO;
393 goto neg_exit;
394 }
395 server->dialect = le16_to_cpu(rsp->DialectRevision);
396
Jeff Laytone598d1d82013-05-26 07:00:59 -0400397 /* SMB2 only has an extended negflavor */
398 server->negflavor = CIFS_NEGFLAVOR_EXTENDED;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400399 server->maxBuf = le32_to_cpu(rsp->MaxTransactSize);
400 server->max_read = le32_to_cpu(rsp->MaxReadSize);
401 server->max_write = le32_to_cpu(rsp->MaxWriteSize);
402 /* BB Do we need to validate the SecurityMode? */
403 server->sec_mode = le16_to_cpu(rsp->SecurityMode);
404 server->capabilities = le32_to_cpu(rsp->Capabilities);
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400405 /* Internal types */
406 server->capabilities |= SMB2_NT_FIND | SMB2_LARGE_FILES;
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400407
408 security_blob = smb2_get_data_area_len(&blob_offset, &blob_length,
409 &rsp->hdr);
410 if (blob_length == 0) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500411 cifs_dbg(VFS, "missing security blob on negprot\n");
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400412 rc = -EIO;
413 goto neg_exit;
414 }
Pavel Shilovsky3c1bf7e2012-09-18 16:20:30 -0700415
Jeff Layton38d77c52013-05-26 07:01:00 -0400416 rc = cifs_enable_signing(server, ses->sign);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400417#ifdef CONFIG_SMB2_ASN1 /* BB REMOVEME when updated asn1.c ready */
Jeff Layton9ddec562013-05-26 07:00:58 -0400418 if (rc)
419 goto neg_exit;
420
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400421 rc = decode_neg_token_init(security_blob, blob_length,
422 &server->sec_type);
423 if (rc == 1)
424 rc = 0;
425 else if (rc == 0) {
426 rc = -EIO;
427 goto neg_exit;
428 }
429#endif
430
431neg_exit:
432 free_rsp_buf(resp_buftype, rsp);
433 return rc;
434}
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400435
436int
437SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
438 const struct nls_table *nls_cp)
439{
440 struct smb2_sess_setup_req *req;
441 struct smb2_sess_setup_rsp *rsp = NULL;
442 struct kvec iov[2];
443 int rc = 0;
444 int resp_buftype;
445 __le32 phase = NtLmNegotiate; /* NTLMSSP, if needed, is multistage */
Jeff Layton3534b852013-05-24 07:41:01 -0400446 struct TCP_Server_Info *server = ses->server;
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400447 u16 blob_length = 0;
448 char *security_blob;
449 char *ntlmssp_blob = NULL;
450 bool use_spnego = false; /* else use raw ntlmssp */
451
Joe Perchesf96637b2013-05-04 22:12:25 -0500452 cifs_dbg(FYI, "Session Setup\n");
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400453
Jeff Layton3534b852013-05-24 07:41:01 -0400454 if (!server) {
455 WARN(1, "%s: server is NULL!\n", __func__);
456 return -EIO;
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400457 }
458
459 /*
460 * If memory allocation is successful, caller of this function
461 * frees it.
462 */
463 ses->ntlmssp = kmalloc(sizeof(struct ntlmssp_auth), GFP_KERNEL);
464 if (!ses->ntlmssp)
465 return -ENOMEM;
466
Jeff Layton3f618222013-06-12 19:52:14 -0500467 /* FIXME: allow for other auth types besides NTLMSSP (e.g. krb5) */
468 ses->sectype = RawNTLMSSP;
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400469
470ssetup_ntlmssp_authenticate:
471 if (phase == NtLmChallenge)
472 phase = NtLmAuthenticate; /* if ntlmssp, now final phase */
473
474 rc = small_smb2_init(SMB2_SESSION_SETUP, NULL, (void **) &req);
475 if (rc)
476 return rc;
477
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400478 req->hdr.SessionId = 0; /* First session, not a reauthenticate */
479 req->VcNumber = 0; /* MBZ */
480 /* to enable echos and oplocks */
481 req->hdr.CreditRequest = cpu_to_le16(3);
482
483 /* only one of SMB2 signing flags may be set in SMB2 request */
Jeff Layton38d77c52013-05-26 07:01:00 -0400484 if (server->sign)
485 req->SecurityMode = SMB2_NEGOTIATE_SIGNING_REQUIRED;
486 else if (global_secflags & CIFSSEC_MAY_SIGN) /* one flag unlike MUST_ */
487 req->SecurityMode = SMB2_NEGOTIATE_SIGNING_ENABLED;
488 else
489 req->SecurityMode = 0;
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400490
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400491 req->Capabilities = 0;
492 req->Channel = 0; /* MBZ */
493
494 iov[0].iov_base = (char *)req;
495 /* 4 for rfc1002 length field and 1 for pad */
496 iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
497 if (phase == NtLmNegotiate) {
498 ntlmssp_blob = kmalloc(sizeof(struct _NEGOTIATE_MESSAGE),
499 GFP_KERNEL);
500 if (ntlmssp_blob == NULL) {
501 rc = -ENOMEM;
502 goto ssetup_exit;
503 }
504 build_ntlmssp_negotiate_blob(ntlmssp_blob, ses);
505 if (use_spnego) {
506 /* blob_length = build_spnego_ntlmssp_blob(
507 &security_blob,
508 sizeof(struct _NEGOTIATE_MESSAGE),
509 ntlmssp_blob); */
510 /* BB eventually need to add this */
Joe Perchesf96637b2013-05-04 22:12:25 -0500511 cifs_dbg(VFS, "spnego not supported for SMB2 yet\n");
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400512 rc = -EOPNOTSUPP;
513 kfree(ntlmssp_blob);
514 goto ssetup_exit;
515 } else {
516 blob_length = sizeof(struct _NEGOTIATE_MESSAGE);
517 /* with raw NTLMSSP we don't encapsulate in SPNEGO */
518 security_blob = ntlmssp_blob;
519 }
520 } else if (phase == NtLmAuthenticate) {
521 req->hdr.SessionId = ses->Suid;
522 ntlmssp_blob = kzalloc(sizeof(struct _NEGOTIATE_MESSAGE) + 500,
523 GFP_KERNEL);
524 if (ntlmssp_blob == NULL) {
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400525 rc = -ENOMEM;
526 goto ssetup_exit;
527 }
528 rc = build_ntlmssp_auth_blob(ntlmssp_blob, &blob_length, ses,
529 nls_cp);
530 if (rc) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500531 cifs_dbg(FYI, "build_ntlmssp_auth_blob failed %d\n",
532 rc);
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400533 goto ssetup_exit; /* BB double check error handling */
534 }
535 if (use_spnego) {
536 /* blob_length = build_spnego_ntlmssp_blob(
537 &security_blob,
538 blob_length,
539 ntlmssp_blob); */
Joe Perchesf96637b2013-05-04 22:12:25 -0500540 cifs_dbg(VFS, "spnego not supported for SMB2 yet\n");
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400541 rc = -EOPNOTSUPP;
542 kfree(ntlmssp_blob);
543 goto ssetup_exit;
544 } else {
545 security_blob = ntlmssp_blob;
546 }
547 } else {
Joe Perchesf96637b2013-05-04 22:12:25 -0500548 cifs_dbg(VFS, "illegal ntlmssp phase\n");
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400549 rc = -EIO;
550 goto ssetup_exit;
551 }
552
553 /* Testing shows that buffer offset must be at location of Buffer[0] */
554 req->SecurityBufferOffset =
555 cpu_to_le16(sizeof(struct smb2_sess_setup_req) -
556 1 /* pad */ - 4 /* rfc1001 len */);
557 req->SecurityBufferLength = cpu_to_le16(blob_length);
558 iov[1].iov_base = security_blob;
559 iov[1].iov_len = blob_length;
560
561 inc_rfc1001_len(req, blob_length - 1 /* pad */);
562
563 /* BB add code to build os and lm fields */
564
Steve French6d8b59d2012-12-08 22:36:29 -0600565 rc = SendReceive2(xid, ses, iov, 2, &resp_buftype,
566 CIFS_LOG_ERROR | CIFS_NEG_OP);
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400567
568 kfree(security_blob);
569 rsp = (struct smb2_sess_setup_rsp *)iov[0].iov_base;
Pavel Shilovsky4ca3a992012-09-25 11:00:09 +0400570 if (resp_buftype != CIFS_NO_BUFFER &&
571 rsp->hdr.Status == STATUS_MORE_PROCESSING_REQUIRED) {
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400572 if (phase != NtLmNegotiate) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500573 cifs_dbg(VFS, "Unexpected more processing error\n");
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400574 goto ssetup_exit;
575 }
576 if (offsetof(struct smb2_sess_setup_rsp, Buffer) - 4 !=
Pavel Shilovsky4ca3a992012-09-25 11:00:09 +0400577 le16_to_cpu(rsp->SecurityBufferOffset)) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500578 cifs_dbg(VFS, "Invalid security buffer offset %d\n",
579 le16_to_cpu(rsp->SecurityBufferOffset));
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400580 rc = -EIO;
581 goto ssetup_exit;
582 }
583
584 /* NTLMSSP Negotiate sent now processing challenge (response) */
585 phase = NtLmChallenge; /* process ntlmssp challenge */
586 rc = 0; /* MORE_PROCESSING is not an error here but expected */
587 ses->Suid = rsp->hdr.SessionId;
588 rc = decode_ntlmssp_challenge(rsp->Buffer,
589 le16_to_cpu(rsp->SecurityBufferLength), ses);
590 }
591
592 /*
593 * BB eventually add code for SPNEGO decoding of NtlmChallenge blob,
594 * but at least the raw NTLMSSP case works.
595 */
596 /*
597 * No tcon so can't do
598 * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]);
599 */
600 if (rc != 0)
601 goto ssetup_exit;
602
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400603 ses->session_flags = le16_to_cpu(rsp->SessionFlags);
604ssetup_exit:
605 free_rsp_buf(resp_buftype, rsp);
606
607 /* if ntlmssp, and negotiate succeeded, proceed to authenticate phase */
608 if ((phase == NtLmChallenge) && (rc == 0))
609 goto ssetup_ntlmssp_authenticate;
610 return rc;
611}
612
613int
614SMB2_logoff(const unsigned int xid, struct cifs_ses *ses)
615{
616 struct smb2_logoff_req *req; /* response is also trivial struct */
617 int rc = 0;
618 struct TCP_Server_Info *server;
619
Joe Perchesf96637b2013-05-04 22:12:25 -0500620 cifs_dbg(FYI, "disconnect session %p\n", ses);
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400621
622 if (ses && (ses->server))
623 server = ses->server;
624 else
625 return -EIO;
626
627 rc = small_smb2_init(SMB2_LOGOFF, NULL, (void **) &req);
628 if (rc)
629 return rc;
630
631 /* since no tcon, smb2_init can not do this, so do here */
632 req->hdr.SessionId = ses->Suid;
Jeff Layton38d77c52013-05-26 07:01:00 -0400633 if (server->sign)
Pavel Shilovsky3c1bf7e2012-09-18 16:20:30 -0700634 req->hdr.Flags |= SMB2_FLAGS_SIGNED;
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400635
636 rc = SendReceiveNoRsp(xid, ses, (char *) &req->hdr, 0);
637 /*
638 * No tcon so can't do
639 * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]);
640 */
641 return rc;
642}
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +0400643
644static inline void cifs_stats_fail_inc(struct cifs_tcon *tcon, uint16_t code)
645{
Pavel Shilovskyd60622e2012-05-28 15:19:39 +0400646 cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_failed[code]);
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +0400647}
648
649#define MAX_SHARENAME_LENGTH (255 /* server */ + 80 /* share */ + 1 /* NULL */)
650
651int
652SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
653 struct cifs_tcon *tcon, const struct nls_table *cp)
654{
655 struct smb2_tree_connect_req *req;
656 struct smb2_tree_connect_rsp *rsp = NULL;
657 struct kvec iov[2];
658 int rc = 0;
659 int resp_buftype;
660 int unc_path_len;
661 struct TCP_Server_Info *server;
662 __le16 *unc_path = NULL;
663
Joe Perchesf96637b2013-05-04 22:12:25 -0500664 cifs_dbg(FYI, "TCON\n");
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +0400665
666 if ((ses->server) && tree)
667 server = ses->server;
668 else
669 return -EIO;
670
671 if (tcon && tcon->bad_network_name)
672 return -ENOENT;
673
674 unc_path = kmalloc(MAX_SHARENAME_LENGTH * 2, GFP_KERNEL);
675 if (unc_path == NULL)
676 return -ENOMEM;
677
678 unc_path_len = cifs_strtoUTF16(unc_path, tree, strlen(tree), cp) + 1;
679 unc_path_len *= 2;
680 if (unc_path_len < 2) {
681 kfree(unc_path);
682 return -EINVAL;
683 }
684
685 rc = small_smb2_init(SMB2_TREE_CONNECT, tcon, (void **) &req);
686 if (rc) {
687 kfree(unc_path);
688 return rc;
689 }
690
691 if (tcon == NULL) {
692 /* since no tcon, smb2_init can not do this, so do here */
693 req->hdr.SessionId = ses->Suid;
694 /* if (ses->server->sec_mode & SECMODE_SIGN_REQUIRED)
695 req->hdr.Flags |= SMB2_FLAGS_SIGNED; */
696 }
697
698 iov[0].iov_base = (char *)req;
699 /* 4 for rfc1002 length field and 1 for pad */
700 iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
701
702 /* Testing shows that buffer offset must be at location of Buffer[0] */
703 req->PathOffset = cpu_to_le16(sizeof(struct smb2_tree_connect_req)
704 - 1 /* pad */ - 4 /* do not count rfc1001 len field */);
705 req->PathLength = cpu_to_le16(unc_path_len - 2);
706 iov[1].iov_base = unc_path;
707 iov[1].iov_len = unc_path_len;
708
709 inc_rfc1001_len(req, unc_path_len - 1 /* pad */);
710
711 rc = SendReceive2(xid, ses, iov, 2, &resp_buftype, 0);
712 rsp = (struct smb2_tree_connect_rsp *)iov[0].iov_base;
713
714 if (rc != 0) {
715 if (tcon) {
716 cifs_stats_fail_inc(tcon, SMB2_TREE_CONNECT_HE);
717 tcon->need_reconnect = true;
718 }
719 goto tcon_error_exit;
720 }
721
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +0400722 if (tcon == NULL) {
723 ses->ipc_tid = rsp->hdr.TreeId;
724 goto tcon_exit;
725 }
726
727 if (rsp->ShareType & SMB2_SHARE_TYPE_DISK)
Joe Perchesf96637b2013-05-04 22:12:25 -0500728 cifs_dbg(FYI, "connection to disk share\n");
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +0400729 else if (rsp->ShareType & SMB2_SHARE_TYPE_PIPE) {
730 tcon->ipc = true;
Joe Perchesf96637b2013-05-04 22:12:25 -0500731 cifs_dbg(FYI, "connection to pipe share\n");
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +0400732 } else if (rsp->ShareType & SMB2_SHARE_TYPE_PRINT) {
733 tcon->print = true;
Joe Perchesf96637b2013-05-04 22:12:25 -0500734 cifs_dbg(FYI, "connection to printer\n");
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +0400735 } else {
Joe Perchesf96637b2013-05-04 22:12:25 -0500736 cifs_dbg(VFS, "unknown share type %d\n", rsp->ShareType);
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +0400737 rc = -EOPNOTSUPP;
738 goto tcon_error_exit;
739 }
740
741 tcon->share_flags = le32_to_cpu(rsp->ShareFlags);
742 tcon->maximal_access = le32_to_cpu(rsp->MaximalAccess);
743 tcon->tidStatus = CifsGood;
744 tcon->need_reconnect = false;
745 tcon->tid = rsp->hdr.TreeId;
746 strncpy(tcon->treeName, tree, MAX_TREE_SIZE);
747
748 if ((rsp->Capabilities & SMB2_SHARE_CAP_DFS) &&
749 ((tcon->share_flags & SHI1005_FLAGS_DFS) == 0))
Joe Perchesf96637b2013-05-04 22:12:25 -0500750 cifs_dbg(VFS, "DFS capability contradicts DFS flag\n");
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +0400751
752tcon_exit:
753 free_rsp_buf(resp_buftype, rsp);
754 kfree(unc_path);
755 return rc;
756
757tcon_error_exit:
758 if (rsp->hdr.Status == STATUS_BAD_NETWORK_NAME) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500759 cifs_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree);
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +0400760 tcon->bad_network_name = true;
761 }
762 goto tcon_exit;
763}
764
765int
766SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon)
767{
768 struct smb2_tree_disconnect_req *req; /* response is trivial */
769 int rc = 0;
770 struct TCP_Server_Info *server;
771 struct cifs_ses *ses = tcon->ses;
772
Joe Perchesf96637b2013-05-04 22:12:25 -0500773 cifs_dbg(FYI, "Tree Disconnect\n");
Pavel Shilovskyfaaf9462011-12-27 16:04:00 +0400774
775 if (ses && (ses->server))
776 server = ses->server;
777 else
778 return -EIO;
779
780 if ((tcon->need_reconnect) || (tcon->ses->need_reconnect))
781 return 0;
782
783 rc = small_smb2_init(SMB2_TREE_DISCONNECT, tcon, (void **) &req);
784 if (rc)
785 return rc;
786
787 rc = SendReceiveNoRsp(xid, ses, (char *)&req->hdr, 0);
788 if (rc)
789 cifs_stats_fail_inc(tcon, SMB2_TREE_DISCONNECT_HE);
790
791 return rc;
792}
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400793
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700794static struct create_lease *
795create_lease_buf(u8 *lease_key, u8 oplock)
796{
797 struct create_lease *buf;
798
Dia Vasiled455b722013-03-10 14:29:04 +0200799 buf = kzalloc(sizeof(struct create_lease), GFP_KERNEL);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700800 if (!buf)
801 return NULL;
802
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700803 buf->lcontext.LeaseKeyLow = cpu_to_le64(*((u64 *)lease_key));
804 buf->lcontext.LeaseKeyHigh = cpu_to_le64(*((u64 *)(lease_key + 8)));
805 if (oplock == SMB2_OPLOCK_LEVEL_EXCLUSIVE)
806 buf->lcontext.LeaseState = SMB2_LEASE_WRITE_CACHING |
807 SMB2_LEASE_READ_CACHING;
808 else if (oplock == SMB2_OPLOCK_LEVEL_II)
809 buf->lcontext.LeaseState = SMB2_LEASE_READ_CACHING;
810 else if (oplock == SMB2_OPLOCK_LEVEL_BATCH)
811 buf->lcontext.LeaseState = SMB2_LEASE_HANDLE_CACHING |
812 SMB2_LEASE_READ_CACHING |
813 SMB2_LEASE_WRITE_CACHING;
814
815 buf->ccontext.DataOffset = cpu_to_le16(offsetof
816 (struct create_lease, lcontext));
817 buf->ccontext.DataLength = cpu_to_le32(sizeof(struct lease_context));
818 buf->ccontext.NameOffset = cpu_to_le16(offsetof
819 (struct create_lease, Name));
820 buf->ccontext.NameLength = cpu_to_le16(4);
821 buf->Name[0] = 'R';
822 buf->Name[1] = 'q';
823 buf->Name[2] = 'L';
824 buf->Name[3] = 's';
825 return buf;
826}
827
828static __u8
829parse_lease_state(struct smb2_create_rsp *rsp)
830{
831 char *data_offset;
832 struct create_lease *lc;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700833 bool found = false;
834
835 data_offset = (char *)rsp;
836 data_offset += 4 + le32_to_cpu(rsp->CreateContextsOffset);
837 lc = (struct create_lease *)data_offset;
838 do {
839 char *name = le16_to_cpu(lc->ccontext.NameOffset) + (char *)lc;
840 if (le16_to_cpu(lc->ccontext.NameLength) != 4 ||
841 strncmp(name, "RqLs", 4)) {
842 lc = (struct create_lease *)((char *)lc
843 + le32_to_cpu(lc->ccontext.Next));
844 continue;
845 }
846 if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS)
847 return SMB2_OPLOCK_LEVEL_NOCHANGE;
848 found = true;
849 break;
850 } while (le32_to_cpu(lc->ccontext.Next) != 0);
851
852 if (!found)
Pavel Shilovsky0822f512012-09-19 06:22:45 -0700853 return 0;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700854
Pavel Shilovsky0822f512012-09-19 06:22:45 -0700855 return smb2_map_lease_to_oplock(lc->lcontext.LeaseState);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700856}
857
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400858int
859SMB2_open(const unsigned int xid, struct cifs_tcon *tcon, __le16 *path,
860 u64 *persistent_fid, u64 *volatile_fid, __u32 desired_access,
Pavel Shilovskyf0df7372012-09-18 16:20:26 -0700861 __u32 create_disposition, __u32 file_attributes, __u32 create_options,
Pavel Shilovsky2e44b282012-09-18 16:20:33 -0700862 __u8 *oplock, struct smb2_file_all_info *buf)
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400863{
864 struct smb2_create_req *req;
865 struct smb2_create_rsp *rsp;
866 struct TCP_Server_Info *server;
867 struct cifs_ses *ses = tcon->ses;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700868 struct kvec iov[3];
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400869 int resp_buftype;
870 int uni_path_len;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700871 __le16 *copy_path = NULL;
872 int copy_size;
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400873 int rc = 0;
874 int num_iovecs = 2;
875
Joe Perchesf96637b2013-05-04 22:12:25 -0500876 cifs_dbg(FYI, "create/open\n");
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400877
878 if (ses && (ses->server))
879 server = ses->server;
880 else
881 return -EIO;
882
883 rc = small_smb2_init(SMB2_CREATE, tcon, (void **) &req);
884 if (rc)
885 return rc;
886
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400887 req->ImpersonationLevel = IL_IMPERSONATION;
888 req->DesiredAccess = cpu_to_le32(desired_access);
889 /* File attributes ignored on open (used in create though) */
890 req->FileAttributes = cpu_to_le32(file_attributes);
891 req->ShareAccess = FILE_SHARE_ALL_LE;
892 req->CreateDisposition = cpu_to_le32(create_disposition);
893 req->CreateOptions = cpu_to_le32(create_options);
894 uni_path_len = (2 * UniStrnlen((wchar_t *)path, PATH_MAX)) + 2;
895 req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req)
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700896 - 8 /* pad */ - 4 /* do not count rfc1001 len field */);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400897
898 iov[0].iov_base = (char *)req;
899 /* 4 for rfc1002 length field */
900 iov[0].iov_len = get_rfc1002_length(req) + 4;
901
902 /* MUST set path len (NameLength) to 0 opening root of share */
903 if (uni_path_len >= 4) {
904 req->NameLength = cpu_to_le16(uni_path_len - 2);
905 /* -1 since last byte is buf[0] which is sent below (path) */
906 iov[0].iov_len--;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700907 if (uni_path_len % 8 != 0) {
908 copy_size = uni_path_len / 8 * 8;
909 if (copy_size < uni_path_len)
910 copy_size += 8;
911
912 copy_path = kzalloc(copy_size, GFP_KERNEL);
913 if (!copy_path)
914 return -ENOMEM;
915 memcpy((char *)copy_path, (const char *)path,
916 uni_path_len);
917 uni_path_len = copy_size;
918 path = copy_path;
919 }
920
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400921 iov[1].iov_len = uni_path_len;
922 iov[1].iov_base = path;
923 /*
924 * -1 since last byte is buf[0] which was counted in
925 * smb2_buf_len.
926 */
927 inc_rfc1001_len(req, uni_path_len - 1);
928 } else {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700929 iov[0].iov_len += 7;
930 req->hdr.smb2_buf_length = cpu_to_be32(be32_to_cpu(
931 req->hdr.smb2_buf_length) + 8 - 1);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400932 num_iovecs = 1;
933 req->NameLength = 0;
934 }
935
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700936 if (!server->oplocks)
937 *oplock = SMB2_OPLOCK_LEVEL_NONE;
938
939 if (!(tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_LEASING) ||
940 *oplock == SMB2_OPLOCK_LEVEL_NONE)
941 req->RequestedOplockLevel = *oplock;
942 else {
943 iov[num_iovecs].iov_base = create_lease_buf(oplock+1, *oplock);
944 if (iov[num_iovecs].iov_base == NULL) {
945 cifs_small_buf_release(req);
946 kfree(copy_path);
947 return -ENOMEM;
948 }
949 iov[num_iovecs].iov_len = sizeof(struct create_lease);
950 req->RequestedOplockLevel = SMB2_OPLOCK_LEVEL_LEASE;
951 req->CreateContextsOffset = cpu_to_le32(
952 sizeof(struct smb2_create_req) - 4 - 8 +
953 iov[num_iovecs-1].iov_len);
954 req->CreateContextsLength = cpu_to_le32(
955 sizeof(struct create_lease));
956 inc_rfc1001_len(&req->hdr, sizeof(struct create_lease));
957 num_iovecs++;
958 }
959
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400960 rc = SendReceive2(xid, ses, iov, num_iovecs, &resp_buftype, 0);
961 rsp = (struct smb2_create_rsp *)iov[0].iov_base;
962
963 if (rc != 0) {
964 cifs_stats_fail_inc(tcon, SMB2_CREATE_HE);
965 goto creat_exit;
966 }
967
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400968 *persistent_fid = rsp->PersistentFileId;
969 *volatile_fid = rsp->VolatileFileId;
Pavel Shilovskyf0df7372012-09-18 16:20:26 -0700970
971 if (buf) {
972 memcpy(buf, &rsp->CreationTime, 32);
973 buf->AllocationSize = rsp->AllocationSize;
974 buf->EndOfFile = rsp->EndofFile;
975 buf->Attributes = rsp->FileAttributes;
976 buf->NumberOfLinks = cpu_to_le32(1);
977 buf->DeletePending = 0;
978 }
Pavel Shilovsky2e44b282012-09-18 16:20:33 -0700979
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700980 if (rsp->OplockLevel == SMB2_OPLOCK_LEVEL_LEASE)
981 *oplock = parse_lease_state(rsp);
982 else
983 *oplock = rsp->OplockLevel;
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400984creat_exit:
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700985 kfree(copy_path);
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +0400986 free_rsp_buf(resp_buftype, rsp);
987 return rc;
988}
989
990int
991SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
992 u64 persistent_fid, u64 volatile_fid)
993{
994 struct smb2_close_req *req;
995 struct smb2_close_rsp *rsp;
996 struct TCP_Server_Info *server;
997 struct cifs_ses *ses = tcon->ses;
998 struct kvec iov[1];
999 int resp_buftype;
1000 int rc = 0;
1001
Joe Perchesf96637b2013-05-04 22:12:25 -05001002 cifs_dbg(FYI, "Close\n");
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001003
1004 if (ses && (ses->server))
1005 server = ses->server;
1006 else
1007 return -EIO;
1008
1009 rc = small_smb2_init(SMB2_CLOSE, tcon, (void **) &req);
1010 if (rc)
1011 return rc;
1012
1013 req->PersistentFileId = persistent_fid;
1014 req->VolatileFileId = volatile_fid;
1015
1016 iov[0].iov_base = (char *)req;
1017 /* 4 for rfc1002 length field */
1018 iov[0].iov_len = get_rfc1002_length(req) + 4;
1019
1020 rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, 0);
1021 rsp = (struct smb2_close_rsp *)iov[0].iov_base;
1022
1023 if (rc != 0) {
1024 if (tcon)
1025 cifs_stats_fail_inc(tcon, SMB2_CLOSE_HE);
1026 goto close_exit;
1027 }
1028
Pavel Shilovsky2503a0d2011-12-26 22:58:46 +04001029 /* BB FIXME - decode close response, update inode for caching */
1030
1031close_exit:
1032 free_rsp_buf(resp_buftype, rsp);
1033 return rc;
1034}
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04001035
1036static int
1037validate_buf(unsigned int offset, unsigned int buffer_length,
1038 struct smb2_hdr *hdr, unsigned int min_buf_size)
1039
1040{
1041 unsigned int smb_len = be32_to_cpu(hdr->smb2_buf_length);
1042 char *end_of_smb = smb_len + 4 /* RFC1001 length field */ + (char *)hdr;
1043 char *begin_of_buf = 4 /* RFC1001 len field */ + offset + (char *)hdr;
1044 char *end_of_buf = begin_of_buf + buffer_length;
1045
1046
1047 if (buffer_length < min_buf_size) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001048 cifs_dbg(VFS, "buffer length %d smaller than minimum size %d\n",
1049 buffer_length, min_buf_size);
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04001050 return -EINVAL;
1051 }
1052
1053 /* check if beyond RFC1001 maximum length */
1054 if ((smb_len > 0x7FFFFF) || (buffer_length > 0x7FFFFF)) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001055 cifs_dbg(VFS, "buffer length %d or smb length %d too large\n",
1056 buffer_length, smb_len);
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04001057 return -EINVAL;
1058 }
1059
1060 if ((begin_of_buf > end_of_smb) || (end_of_buf > end_of_smb)) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001061 cifs_dbg(VFS, "illegal server response, bad offset to data\n");
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04001062 return -EINVAL;
1063 }
1064
1065 return 0;
1066}
1067
1068/*
1069 * If SMB buffer fields are valid, copy into temporary buffer to hold result.
1070 * Caller must free buffer.
1071 */
1072static int
1073validate_and_copy_buf(unsigned int offset, unsigned int buffer_length,
1074 struct smb2_hdr *hdr, unsigned int minbufsize,
1075 char *data)
1076
1077{
1078 char *begin_of_buf = 4 /* RFC1001 len field */ + offset + (char *)hdr;
1079 int rc;
1080
1081 if (!data)
1082 return -EINVAL;
1083
1084 rc = validate_buf(offset, buffer_length, hdr, minbufsize);
1085 if (rc)
1086 return rc;
1087
1088 memcpy(data, begin_of_buf, buffer_length);
1089
1090 return 0;
1091}
1092
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001093static int
1094query_info(const unsigned int xid, struct cifs_tcon *tcon,
1095 u64 persistent_fid, u64 volatile_fid, u8 info_class,
1096 size_t output_len, size_t min_len, void *data)
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04001097{
1098 struct smb2_query_info_req *req;
1099 struct smb2_query_info_rsp *rsp = NULL;
1100 struct kvec iov[2];
1101 int rc = 0;
1102 int resp_buftype;
1103 struct TCP_Server_Info *server;
1104 struct cifs_ses *ses = tcon->ses;
1105
Joe Perchesf96637b2013-05-04 22:12:25 -05001106 cifs_dbg(FYI, "Query Info\n");
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04001107
1108 if (ses && (ses->server))
1109 server = ses->server;
1110 else
1111 return -EIO;
1112
1113 rc = small_smb2_init(SMB2_QUERY_INFO, tcon, (void **) &req);
1114 if (rc)
1115 return rc;
1116
1117 req->InfoType = SMB2_O_INFO_FILE;
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001118 req->FileInfoClass = info_class;
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04001119 req->PersistentFileId = persistent_fid;
1120 req->VolatileFileId = volatile_fid;
1121 /* 4 for rfc1002 length field and 1 for Buffer */
1122 req->InputBufferOffset =
1123 cpu_to_le16(sizeof(struct smb2_query_info_req) - 1 - 4);
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001124 req->OutputBufferLength = cpu_to_le32(output_len);
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04001125
1126 iov[0].iov_base = (char *)req;
1127 /* 4 for rfc1002 length field */
1128 iov[0].iov_len = get_rfc1002_length(req) + 4;
1129
1130 rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, 0);
Pavel Shilovskye5d04882012-09-19 16:03:26 +04001131 rsp = (struct smb2_query_info_rsp *)iov[0].iov_base;
1132
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04001133 if (rc) {
1134 cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
1135 goto qinf_exit;
1136 }
1137
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04001138 rc = validate_and_copy_buf(le16_to_cpu(rsp->OutputBufferOffset),
1139 le32_to_cpu(rsp->OutputBufferLength),
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001140 &rsp->hdr, min_len, data);
Pavel Shilovskybe4cb9e2011-12-29 17:06:33 +04001141
1142qinf_exit:
1143 free_rsp_buf(resp_buftype, rsp);
1144 return rc;
1145}
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04001146
Pavel Shilovskyf0df7372012-09-18 16:20:26 -07001147int
1148SMB2_query_info(const unsigned int xid, struct cifs_tcon *tcon,
1149 u64 persistent_fid, u64 volatile_fid,
1150 struct smb2_file_all_info *data)
1151{
1152 return query_info(xid, tcon, persistent_fid, volatile_fid,
1153 FILE_ALL_INFORMATION,
1154 sizeof(struct smb2_file_all_info) + MAX_NAME * 2,
1155 sizeof(struct smb2_file_all_info), data);
1156}
1157
1158int
1159SMB2_get_srv_num(const unsigned int xid, struct cifs_tcon *tcon,
1160 u64 persistent_fid, u64 volatile_fid, __le64 *uniqueid)
1161{
1162 return query_info(xid, tcon, persistent_fid, volatile_fid,
1163 FILE_INTERNAL_INFORMATION,
1164 sizeof(struct smb2_file_internal_info),
1165 sizeof(struct smb2_file_internal_info), uniqueid);
1166}
1167
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04001168/*
1169 * This is a no-op for now. We're not really interested in the reply, but
1170 * rather in the fact that the server sent one and that server->lstrp
1171 * gets updated.
1172 *
1173 * FIXME: maybe we should consider checking that the reply matches request?
1174 */
1175static void
1176smb2_echo_callback(struct mid_q_entry *mid)
1177{
1178 struct TCP_Server_Info *server = mid->callback_data;
1179 struct smb2_echo_rsp *smb2 = (struct smb2_echo_rsp *)mid->resp_buf;
1180 unsigned int credits_received = 1;
1181
1182 if (mid->mid_state == MID_RESPONSE_RECEIVED)
1183 credits_received = le16_to_cpu(smb2->hdr.CreditRequest);
1184
1185 DeleteMidQEntry(mid);
1186 add_credits(server, credits_received, CIFS_ECHO_OP);
1187}
1188
1189int
1190SMB2_echo(struct TCP_Server_Info *server)
1191{
1192 struct smb2_echo_req *req;
1193 int rc = 0;
1194 struct kvec iov;
Jeff Laytonfec344e2012-09-18 16:20:35 -07001195 struct smb_rqst rqst = { .rq_iov = &iov,
1196 .rq_nvec = 1 };
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04001197
Joe Perchesf96637b2013-05-04 22:12:25 -05001198 cifs_dbg(FYI, "In echo request\n");
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04001199
1200 rc = small_smb2_init(SMB2_ECHO, NULL, (void **)&req);
1201 if (rc)
1202 return rc;
1203
1204 req->hdr.CreditRequest = cpu_to_le16(1);
1205
1206 iov.iov_base = (char *)req;
1207 /* 4 for rfc1002 length field */
1208 iov.iov_len = get_rfc1002_length(req) + 4;
1209
Jeff Laytonfec344e2012-09-18 16:20:35 -07001210 rc = cifs_call_async(server, &rqst, NULL, smb2_echo_callback, server,
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04001211 CIFS_ECHO_OP);
1212 if (rc)
Joe Perchesf96637b2013-05-04 22:12:25 -05001213 cifs_dbg(FYI, "Echo request failed: %d\n", rc);
Pavel Shilovsky9094fad2012-07-12 18:30:44 +04001214
1215 cifs_small_buf_release(req);
1216 return rc;
1217}
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07001218
1219int
1220SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
1221 u64 volatile_fid)
1222{
1223 struct smb2_flush_req *req;
1224 struct TCP_Server_Info *server;
1225 struct cifs_ses *ses = tcon->ses;
1226 struct kvec iov[1];
1227 int resp_buftype;
1228 int rc = 0;
1229
Joe Perchesf96637b2013-05-04 22:12:25 -05001230 cifs_dbg(FYI, "Flush\n");
Pavel Shilovsky7a5cfb12012-09-18 16:20:28 -07001231
1232 if (ses && (ses->server))
1233 server = ses->server;
1234 else
1235 return -EIO;
1236
1237 rc = small_smb2_init(SMB2_FLUSH, tcon, (void **) &req);
1238 if (rc)
1239 return rc;
1240
1241 req->PersistentFileId = persistent_fid;
1242 req->VolatileFileId = volatile_fid;
1243
1244 iov[0].iov_base = (char *)req;
1245 /* 4 for rfc1002 length field */
1246 iov[0].iov_len = get_rfc1002_length(req) + 4;
1247
1248 rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, 0);
1249
1250 if ((rc != 0) && tcon)
1251 cifs_stats_fail_inc(tcon, SMB2_FLUSH_HE);
1252
1253 free_rsp_buf(resp_buftype, iov[0].iov_base);
1254 return rc;
1255}
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001256
1257/*
1258 * To form a chain of read requests, any read requests after the first should
1259 * have the end_of_chain boolean set to true.
1260 */
1261static int
1262smb2_new_read_req(struct kvec *iov, struct cifs_io_parms *io_parms,
1263 unsigned int remaining_bytes, int request_type)
1264{
1265 int rc = -EACCES;
1266 struct smb2_read_req *req = NULL;
1267
1268 rc = small_smb2_init(SMB2_READ, io_parms->tcon, (void **) &req);
1269 if (rc)
1270 return rc;
1271 if (io_parms->tcon->ses->server == NULL)
1272 return -ECONNABORTED;
1273
1274 req->hdr.ProcessId = cpu_to_le32(io_parms->pid);
1275
1276 req->PersistentFileId = io_parms->persistent_fid;
1277 req->VolatileFileId = io_parms->volatile_fid;
1278 req->ReadChannelInfoOffset = 0; /* reserved */
1279 req->ReadChannelInfoLength = 0; /* reserved */
1280 req->Channel = 0; /* reserved */
1281 req->MinimumCount = 0;
1282 req->Length = cpu_to_le32(io_parms->length);
1283 req->Offset = cpu_to_le64(io_parms->offset);
1284
1285 if (request_type & CHAINED_REQUEST) {
1286 if (!(request_type & END_OF_CHAIN)) {
1287 /* 4 for rfc1002 length field */
1288 req->hdr.NextCommand =
1289 cpu_to_le32(get_rfc1002_length(req) + 4);
1290 } else /* END_OF_CHAIN */
1291 req->hdr.NextCommand = 0;
1292 if (request_type & RELATED_REQUEST) {
1293 req->hdr.Flags |= SMB2_FLAGS_RELATED_OPERATIONS;
1294 /*
1295 * Related requests use info from previous read request
1296 * in chain.
1297 */
1298 req->hdr.SessionId = 0xFFFFFFFF;
1299 req->hdr.TreeId = 0xFFFFFFFF;
1300 req->PersistentFileId = 0xFFFFFFFF;
1301 req->VolatileFileId = 0xFFFFFFFF;
1302 }
1303 }
1304 if (remaining_bytes > io_parms->length)
1305 req->RemainingBytes = cpu_to_le32(remaining_bytes);
1306 else
1307 req->RemainingBytes = 0;
1308
1309 iov[0].iov_base = (char *)req;
1310 /* 4 for rfc1002 length field */
1311 iov[0].iov_len = get_rfc1002_length(req) + 4;
1312 return rc;
1313}
1314
1315static void
1316smb2_readv_callback(struct mid_q_entry *mid)
1317{
1318 struct cifs_readdata *rdata = mid->callback_data;
1319 struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink);
1320 struct TCP_Server_Info *server = tcon->ses->server;
Jeff Layton58195752012-09-19 06:22:34 -07001321 struct smb2_hdr *buf = (struct smb2_hdr *)rdata->iov.iov_base;
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001322 unsigned int credits_received = 1;
Jeff Layton58195752012-09-19 06:22:34 -07001323 struct smb_rqst rqst = { .rq_iov = &rdata->iov,
Jeff Layton8321fec2012-09-19 06:22:32 -07001324 .rq_nvec = 1,
1325 .rq_pages = rdata->pages,
1326 .rq_npages = rdata->nr_pages,
1327 .rq_pagesz = rdata->pagesz,
1328 .rq_tailsz = rdata->tailsz };
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001329
Joe Perchesf96637b2013-05-04 22:12:25 -05001330 cifs_dbg(FYI, "%s: mid=%llu state=%d result=%d bytes=%u\n",
1331 __func__, mid->mid, mid->mid_state, rdata->result,
1332 rdata->bytes);
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001333
1334 switch (mid->mid_state) {
1335 case MID_RESPONSE_RECEIVED:
1336 credits_received = le16_to_cpu(buf->CreditRequest);
1337 /* result already set, check signature */
Jeff Layton38d77c52013-05-26 07:01:00 -04001338 if (server->sign) {
Pavel Shilovsky3c1bf7e2012-09-18 16:20:30 -07001339 int rc;
1340
Jeff Layton0b688cf2012-09-18 16:20:34 -07001341 rc = smb2_verify_signature(&rqst, server);
Pavel Shilovsky3c1bf7e2012-09-18 16:20:30 -07001342 if (rc)
Joe Perchesf96637b2013-05-04 22:12:25 -05001343 cifs_dbg(VFS, "SMB signature verification returned error = %d\n",
1344 rc);
Pavel Shilovsky3c1bf7e2012-09-18 16:20:30 -07001345 }
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001346 /* FIXME: should this be counted toward the initiating task? */
1347 task_io_account_read(rdata->bytes);
1348 cifs_stats_bytes_read(tcon, rdata->bytes);
1349 break;
1350 case MID_REQUEST_SUBMITTED:
1351 case MID_RETRY_NEEDED:
1352 rdata->result = -EAGAIN;
1353 break;
1354 default:
1355 if (rdata->result != -ENODATA)
1356 rdata->result = -EIO;
1357 }
1358
1359 if (rdata->result)
1360 cifs_stats_fail_inc(tcon, SMB2_READ_HE);
1361
1362 queue_work(cifsiod_wq, &rdata->work);
1363 DeleteMidQEntry(mid);
1364 add_credits(server, credits_received, 0);
1365}
1366
1367/* smb2_async_readv - send an async write, and set up mid to handle result */
1368int
1369smb2_async_readv(struct cifs_readdata *rdata)
1370{
1371 int rc;
1372 struct smb2_hdr *buf;
1373 struct cifs_io_parms io_parms;
Jeff Layton58195752012-09-19 06:22:34 -07001374 struct smb_rqst rqst = { .rq_iov = &rdata->iov,
Jeff Laytonfec344e2012-09-18 16:20:35 -07001375 .rq_nvec = 1 };
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001376
Joe Perchesf96637b2013-05-04 22:12:25 -05001377 cifs_dbg(FYI, "%s: offset=%llu bytes=%u\n",
1378 __func__, rdata->offset, rdata->bytes);
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001379
1380 io_parms.tcon = tlink_tcon(rdata->cfile->tlink);
1381 io_parms.offset = rdata->offset;
1382 io_parms.length = rdata->bytes;
1383 io_parms.persistent_fid = rdata->cfile->fid.persistent_fid;
1384 io_parms.volatile_fid = rdata->cfile->fid.volatile_fid;
1385 io_parms.pid = rdata->pid;
Jeff Layton58195752012-09-19 06:22:34 -07001386 rc = smb2_new_read_req(&rdata->iov, &io_parms, 0, 0);
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001387 if (rc)
1388 return rc;
1389
Jeff Layton58195752012-09-19 06:22:34 -07001390 buf = (struct smb2_hdr *)rdata->iov.iov_base;
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001391 /* 4 for rfc1002 length field */
Jeff Layton58195752012-09-19 06:22:34 -07001392 rdata->iov.iov_len = get_rfc1002_length(rdata->iov.iov_base) + 4;
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001393
1394 kref_get(&rdata->refcount);
Jeff Laytonfec344e2012-09-18 16:20:35 -07001395 rc = cifs_call_async(io_parms.tcon->ses->server, &rqst,
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001396 cifs_readv_receive, smb2_readv_callback,
1397 rdata, 0);
Pavel Shilovskye5d04882012-09-19 16:03:26 +04001398 if (rc) {
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001399 kref_put(&rdata->refcount, cifs_readdata_release);
Pavel Shilovskye5d04882012-09-19 16:03:26 +04001400 cifs_stats_fail_inc(io_parms.tcon, SMB2_READ_HE);
1401 }
Pavel Shilovsky09a47072012-09-18 16:20:29 -07001402
1403 cifs_small_buf_release(buf);
1404 return rc;
1405}
Pavel Shilovsky33319142012-09-18 16:20:29 -07001406
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07001407int
1408SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
1409 unsigned int *nbytes, char **buf, int *buf_type)
1410{
1411 int resp_buftype, rc = -EACCES;
1412 struct smb2_read_rsp *rsp = NULL;
1413 struct kvec iov[1];
1414
1415 *nbytes = 0;
1416 rc = smb2_new_read_req(iov, io_parms, 0, 0);
1417 if (rc)
1418 return rc;
1419
1420 rc = SendReceive2(xid, io_parms->tcon->ses, iov, 1,
1421 &resp_buftype, CIFS_LOG_ERROR);
1422
1423 rsp = (struct smb2_read_rsp *)iov[0].iov_base;
1424
1425 if (rsp->hdr.Status == STATUS_END_OF_FILE) {
1426 free_rsp_buf(resp_buftype, iov[0].iov_base);
1427 return 0;
1428 }
1429
1430 if (rc) {
1431 cifs_stats_fail_inc(io_parms->tcon, SMB2_READ_HE);
Joe Perchesf96637b2013-05-04 22:12:25 -05001432 cifs_dbg(VFS, "Send error in read = %d\n", rc);
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07001433 } else {
1434 *nbytes = le32_to_cpu(rsp->DataLength);
1435 if ((*nbytes > CIFS_MAX_MSGSIZE) ||
1436 (*nbytes > io_parms->length)) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001437 cifs_dbg(FYI, "bad length %d for count %d\n",
1438 *nbytes, io_parms->length);
Pavel Shilovskyd8e05032012-09-18 16:20:30 -07001439 rc = -EIO;
1440 *nbytes = 0;
1441 }
1442 }
1443
1444 if (*buf) {
1445 memcpy(*buf, (char *)rsp->hdr.ProtocolId + rsp->DataOffset,
1446 *nbytes);
1447 free_rsp_buf(resp_buftype, iov[0].iov_base);
1448 } else if (resp_buftype != CIFS_NO_BUFFER) {
1449 *buf = iov[0].iov_base;
1450 if (resp_buftype == CIFS_SMALL_BUFFER)
1451 *buf_type = CIFS_SMALL_BUFFER;
1452 else if (resp_buftype == CIFS_LARGE_BUFFER)
1453 *buf_type = CIFS_LARGE_BUFFER;
1454 }
1455 return rc;
1456}
1457
Pavel Shilovsky33319142012-09-18 16:20:29 -07001458/*
1459 * Check the mid_state and signature on received buffer (if any), and queue the
1460 * workqueue completion task.
1461 */
1462static void
1463smb2_writev_callback(struct mid_q_entry *mid)
1464{
1465 struct cifs_writedata *wdata = mid->callback_data;
1466 struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
1467 unsigned int written;
1468 struct smb2_write_rsp *rsp = (struct smb2_write_rsp *)mid->resp_buf;
1469 unsigned int credits_received = 1;
1470
1471 switch (mid->mid_state) {
1472 case MID_RESPONSE_RECEIVED:
1473 credits_received = le16_to_cpu(rsp->hdr.CreditRequest);
1474 wdata->result = smb2_check_receive(mid, tcon->ses->server, 0);
1475 if (wdata->result != 0)
1476 break;
1477
1478 written = le32_to_cpu(rsp->DataLength);
1479 /*
1480 * Mask off high 16 bits when bytes written as returned
1481 * by the server is greater than bytes requested by the
1482 * client. OS/2 servers are known to set incorrect
1483 * CountHigh values.
1484 */
1485 if (written > wdata->bytes)
1486 written &= 0xFFFF;
1487
1488 if (written < wdata->bytes)
1489 wdata->result = -ENOSPC;
1490 else
1491 wdata->bytes = written;
1492 break;
1493 case MID_REQUEST_SUBMITTED:
1494 case MID_RETRY_NEEDED:
1495 wdata->result = -EAGAIN;
1496 break;
1497 default:
1498 wdata->result = -EIO;
1499 break;
1500 }
1501
1502 if (wdata->result)
1503 cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
1504
1505 queue_work(cifsiod_wq, &wdata->work);
1506 DeleteMidQEntry(mid);
1507 add_credits(tcon->ses->server, credits_received, 0);
1508}
1509
1510/* smb2_async_writev - send an async write, and set up mid to handle result */
1511int
1512smb2_async_writev(struct cifs_writedata *wdata)
1513{
Jeff Laytoneddb0792012-09-18 16:20:35 -07001514 int rc = -EACCES;
Pavel Shilovsky33319142012-09-18 16:20:29 -07001515 struct smb2_write_req *req = NULL;
1516 struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
Jeff Laytoneddb0792012-09-18 16:20:35 -07001517 struct kvec iov;
Jeff Laytonfec344e2012-09-18 16:20:35 -07001518 struct smb_rqst rqst;
Pavel Shilovsky33319142012-09-18 16:20:29 -07001519
1520 rc = small_smb2_init(SMB2_WRITE, tcon, (void **) &req);
1521 if (rc)
1522 goto async_writev_out;
1523
Pavel Shilovsky33319142012-09-18 16:20:29 -07001524 req->hdr.ProcessId = cpu_to_le32(wdata->cfile->pid);
1525
1526 req->PersistentFileId = wdata->cfile->fid.persistent_fid;
1527 req->VolatileFileId = wdata->cfile->fid.volatile_fid;
1528 req->WriteChannelInfoOffset = 0;
1529 req->WriteChannelInfoLength = 0;
1530 req->Channel = 0;
1531 req->Offset = cpu_to_le64(wdata->offset);
1532 /* 4 for rfc1002 length field */
1533 req->DataOffset = cpu_to_le16(
1534 offsetof(struct smb2_write_req, Buffer) - 4);
1535 req->RemainingBytes = 0;
1536
1537 /* 4 for rfc1002 length field and 1 for Buffer */
Jeff Laytoneddb0792012-09-18 16:20:35 -07001538 iov.iov_len = get_rfc1002_length(req) + 4 - 1;
1539 iov.iov_base = req;
Pavel Shilovsky33319142012-09-18 16:20:29 -07001540
Jeff Laytoneddb0792012-09-18 16:20:35 -07001541 rqst.rq_iov = &iov;
1542 rqst.rq_nvec = 1;
1543 rqst.rq_pages = wdata->pages;
1544 rqst.rq_npages = wdata->nr_pages;
1545 rqst.rq_pagesz = wdata->pagesz;
1546 rqst.rq_tailsz = wdata->tailsz;
Pavel Shilovsky33319142012-09-18 16:20:29 -07001547
Joe Perchesf96637b2013-05-04 22:12:25 -05001548 cifs_dbg(FYI, "async write at %llu %u bytes\n",
1549 wdata->offset, wdata->bytes);
Pavel Shilovsky33319142012-09-18 16:20:29 -07001550
1551 req->Length = cpu_to_le32(wdata->bytes);
1552
1553 inc_rfc1001_len(&req->hdr, wdata->bytes - 1 /* Buffer */);
1554
1555 kref_get(&wdata->refcount);
Jeff Laytonfec344e2012-09-18 16:20:35 -07001556 rc = cifs_call_async(tcon->ses->server, &rqst, NULL,
1557 smb2_writev_callback, wdata, 0);
Pavel Shilovsky33319142012-09-18 16:20:29 -07001558
Pavel Shilovskye5d04882012-09-19 16:03:26 +04001559 if (rc) {
Pavel Shilovsky33319142012-09-18 16:20:29 -07001560 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovskye5d04882012-09-19 16:03:26 +04001561 cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
1562 }
Pavel Shilovsky33319142012-09-18 16:20:29 -07001563
Pavel Shilovsky33319142012-09-18 16:20:29 -07001564async_writev_out:
1565 cifs_small_buf_release(req);
Pavel Shilovsky33319142012-09-18 16:20:29 -07001566 return rc;
1567}
Pavel Shilovsky009d3442012-09-18 16:20:30 -07001568
1569/*
1570 * SMB2_write function gets iov pointer to kvec array with n_vec as a length.
1571 * The length field from io_parms must be at least 1 and indicates a number of
1572 * elements with data to write that begins with position 1 in iov array. All
1573 * data length is specified by count.
1574 */
1575int
1576SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
1577 unsigned int *nbytes, struct kvec *iov, int n_vec)
1578{
1579 int rc = 0;
1580 struct smb2_write_req *req = NULL;
1581 struct smb2_write_rsp *rsp = NULL;
1582 int resp_buftype;
1583 *nbytes = 0;
1584
1585 if (n_vec < 1)
1586 return rc;
1587
1588 rc = small_smb2_init(SMB2_WRITE, io_parms->tcon, (void **) &req);
1589 if (rc)
1590 return rc;
1591
1592 if (io_parms->tcon->ses->server == NULL)
1593 return -ECONNABORTED;
1594
1595 req->hdr.ProcessId = cpu_to_le32(io_parms->pid);
1596
1597 req->PersistentFileId = io_parms->persistent_fid;
1598 req->VolatileFileId = io_parms->volatile_fid;
1599 req->WriteChannelInfoOffset = 0;
1600 req->WriteChannelInfoLength = 0;
1601 req->Channel = 0;
1602 req->Length = cpu_to_le32(io_parms->length);
1603 req->Offset = cpu_to_le64(io_parms->offset);
1604 /* 4 for rfc1002 length field */
1605 req->DataOffset = cpu_to_le16(
1606 offsetof(struct smb2_write_req, Buffer) - 4);
1607 req->RemainingBytes = 0;
1608
1609 iov[0].iov_base = (char *)req;
1610 /* 4 for rfc1002 length field and 1 for Buffer */
1611 iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
1612
1613 /* length of entire message including data to be written */
1614 inc_rfc1001_len(req, io_parms->length - 1 /* Buffer */);
1615
1616 rc = SendReceive2(xid, io_parms->tcon->ses, iov, n_vec + 1,
1617 &resp_buftype, 0);
Pavel Shilovskye5d04882012-09-19 16:03:26 +04001618 rsp = (struct smb2_write_rsp *)iov[0].iov_base;
Pavel Shilovsky009d3442012-09-18 16:20:30 -07001619
1620 if (rc) {
1621 cifs_stats_fail_inc(io_parms->tcon, SMB2_WRITE_HE);
Joe Perchesf96637b2013-05-04 22:12:25 -05001622 cifs_dbg(VFS, "Send error in write = %d\n", rc);
Pavel Shilovskye5d04882012-09-19 16:03:26 +04001623 } else
Pavel Shilovsky009d3442012-09-18 16:20:30 -07001624 *nbytes = le32_to_cpu(rsp->DataLength);
Pavel Shilovskye5d04882012-09-19 16:03:26 +04001625
1626 free_rsp_buf(resp_buftype, rsp);
Pavel Shilovsky009d3442012-09-18 16:20:30 -07001627 return rc;
1628}
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07001629
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07001630static unsigned int
1631num_entries(char *bufstart, char *end_of_buf, char **lastentry, size_t size)
1632{
1633 int len;
1634 unsigned int entrycount = 0;
1635 unsigned int next_offset = 0;
1636 FILE_DIRECTORY_INFO *entryptr;
1637
1638 if (bufstart == NULL)
1639 return 0;
1640
1641 entryptr = (FILE_DIRECTORY_INFO *)bufstart;
1642
1643 while (1) {
1644 entryptr = (FILE_DIRECTORY_INFO *)
1645 ((char *)entryptr + next_offset);
1646
1647 if ((char *)entryptr + size > end_of_buf) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001648 cifs_dbg(VFS, "malformed search entry would overflow\n");
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07001649 break;
1650 }
1651
1652 len = le32_to_cpu(entryptr->FileNameLength);
1653 if ((char *)entryptr + len + size > end_of_buf) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001654 cifs_dbg(VFS, "directory entry name would overflow frame end of buf %p\n",
1655 end_of_buf);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07001656 break;
1657 }
1658
1659 *lastentry = (char *)entryptr;
1660 entrycount++;
1661
1662 next_offset = le32_to_cpu(entryptr->NextEntryOffset);
1663 if (!next_offset)
1664 break;
1665 }
1666
1667 return entrycount;
1668}
1669
1670/*
1671 * Readdir/FindFirst
1672 */
1673int
1674SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
1675 u64 persistent_fid, u64 volatile_fid, int index,
1676 struct cifs_search_info *srch_inf)
1677{
1678 struct smb2_query_directory_req *req;
1679 struct smb2_query_directory_rsp *rsp = NULL;
1680 struct kvec iov[2];
1681 int rc = 0;
1682 int len;
1683 int resp_buftype;
1684 unsigned char *bufptr;
1685 struct TCP_Server_Info *server;
1686 struct cifs_ses *ses = tcon->ses;
1687 __le16 asteriks = cpu_to_le16('*');
1688 char *end_of_smb;
1689 unsigned int output_size = CIFSMaxBufSize;
1690 size_t info_buf_size;
1691
1692 if (ses && (ses->server))
1693 server = ses->server;
1694 else
1695 return -EIO;
1696
1697 rc = small_smb2_init(SMB2_QUERY_DIRECTORY, tcon, (void **) &req);
1698 if (rc)
1699 return rc;
1700
1701 switch (srch_inf->info_level) {
1702 case SMB_FIND_FILE_DIRECTORY_INFO:
1703 req->FileInformationClass = FILE_DIRECTORY_INFORMATION;
1704 info_buf_size = sizeof(FILE_DIRECTORY_INFO) - 1;
1705 break;
1706 case SMB_FIND_FILE_ID_FULL_DIR_INFO:
1707 req->FileInformationClass = FILEID_FULL_DIRECTORY_INFORMATION;
1708 info_buf_size = sizeof(SEARCH_ID_FULL_DIR_INFO) - 1;
1709 break;
1710 default:
Joe Perchesf96637b2013-05-04 22:12:25 -05001711 cifs_dbg(VFS, "info level %u isn't supported\n",
1712 srch_inf->info_level);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07001713 rc = -EINVAL;
1714 goto qdir_exit;
1715 }
1716
1717 req->FileIndex = cpu_to_le32(index);
1718 req->PersistentFileId = persistent_fid;
1719 req->VolatileFileId = volatile_fid;
1720
1721 len = 0x2;
1722 bufptr = req->Buffer;
1723 memcpy(bufptr, &asteriks, len);
1724
1725 req->FileNameOffset =
1726 cpu_to_le16(sizeof(struct smb2_query_directory_req) - 1 - 4);
1727 req->FileNameLength = cpu_to_le16(len);
1728 /*
1729 * BB could be 30 bytes or so longer if we used SMB2 specific
1730 * buffer lengths, but this is safe and close enough.
1731 */
1732 output_size = min_t(unsigned int, output_size, server->maxBuf);
1733 output_size = min_t(unsigned int, output_size, 2 << 15);
1734 req->OutputBufferLength = cpu_to_le32(output_size);
1735
1736 iov[0].iov_base = (char *)req;
1737 /* 4 for RFC1001 length and 1 for Buffer */
1738 iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
1739
1740 iov[1].iov_base = (char *)(req->Buffer);
1741 iov[1].iov_len = len;
1742
1743 inc_rfc1001_len(req, len - 1 /* Buffer */);
1744
1745 rc = SendReceive2(xid, ses, iov, 2, &resp_buftype, 0);
Pavel Shilovskye5d04882012-09-19 16:03:26 +04001746 rsp = (struct smb2_query_directory_rsp *)iov[0].iov_base;
1747
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07001748 if (rc) {
1749 cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE);
1750 goto qdir_exit;
1751 }
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07001752
1753 rc = validate_buf(le16_to_cpu(rsp->OutputBufferOffset),
1754 le32_to_cpu(rsp->OutputBufferLength), &rsp->hdr,
1755 info_buf_size);
1756 if (rc)
1757 goto qdir_exit;
1758
1759 srch_inf->unicode = true;
1760
1761 if (srch_inf->ntwrk_buf_start) {
1762 if (srch_inf->smallBuf)
1763 cifs_small_buf_release(srch_inf->ntwrk_buf_start);
1764 else
1765 cifs_buf_release(srch_inf->ntwrk_buf_start);
1766 }
1767 srch_inf->ntwrk_buf_start = (char *)rsp;
1768 srch_inf->srch_entries_start = srch_inf->last_entry = 4 /* rfclen */ +
1769 (char *)&rsp->hdr + le16_to_cpu(rsp->OutputBufferOffset);
1770 /* 4 for rfc1002 length field */
1771 end_of_smb = get_rfc1002_length(rsp) + 4 + (char *)&rsp->hdr;
1772 srch_inf->entries_in_buffer =
1773 num_entries(srch_inf->srch_entries_start, end_of_smb,
1774 &srch_inf->last_entry, info_buf_size);
1775 srch_inf->index_of_last_entry += srch_inf->entries_in_buffer;
Joe Perchesf96637b2013-05-04 22:12:25 -05001776 cifs_dbg(FYI, "num entries %d last_index %lld srch start %p srch end %p\n",
1777 srch_inf->entries_in_buffer, srch_inf->index_of_last_entry,
1778 srch_inf->srch_entries_start, srch_inf->last_entry);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07001779 if (resp_buftype == CIFS_LARGE_BUFFER)
1780 srch_inf->smallBuf = false;
1781 else if (resp_buftype == CIFS_SMALL_BUFFER)
1782 srch_inf->smallBuf = true;
1783 else
Joe Perchesf96637b2013-05-04 22:12:25 -05001784 cifs_dbg(VFS, "illegal search buffer type\n");
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -07001785
1786 if (rsp->hdr.Status == STATUS_NO_MORE_FILES)
1787 srch_inf->endOfSearch = 1;
1788 else
1789 srch_inf->endOfSearch = 0;
1790
1791 return rc;
1792
1793qdir_exit:
1794 free_rsp_buf(resp_buftype, rsp);
1795 return rc;
1796}
1797
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07001798static int
1799send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07001800 u64 persistent_fid, u64 volatile_fid, u32 pid, int info_class,
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07001801 unsigned int num, void **data, unsigned int *size)
1802{
1803 struct smb2_set_info_req *req;
1804 struct smb2_set_info_rsp *rsp = NULL;
1805 struct kvec *iov;
1806 int rc = 0;
1807 int resp_buftype;
1808 unsigned int i;
1809 struct TCP_Server_Info *server;
1810 struct cifs_ses *ses = tcon->ses;
1811
1812 if (ses && (ses->server))
1813 server = ses->server;
1814 else
1815 return -EIO;
1816
1817 if (!num)
1818 return -EINVAL;
1819
1820 iov = kmalloc(sizeof(struct kvec) * num, GFP_KERNEL);
1821 if (!iov)
1822 return -ENOMEM;
1823
1824 rc = small_smb2_init(SMB2_SET_INFO, tcon, (void **) &req);
1825 if (rc) {
1826 kfree(iov);
1827 return rc;
1828 }
1829
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07001830 req->hdr.ProcessId = cpu_to_le32(pid);
1831
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07001832 req->InfoType = SMB2_O_INFO_FILE;
1833 req->FileInfoClass = info_class;
1834 req->PersistentFileId = persistent_fid;
1835 req->VolatileFileId = volatile_fid;
1836
1837 /* 4 for RFC1001 length and 1 for Buffer */
1838 req->BufferOffset =
1839 cpu_to_le16(sizeof(struct smb2_set_info_req) - 1 - 4);
1840 req->BufferLength = cpu_to_le32(*size);
1841
1842 inc_rfc1001_len(req, *size - 1 /* Buffer */);
1843
1844 memcpy(req->Buffer, *data, *size);
1845
1846 iov[0].iov_base = (char *)req;
1847 /* 4 for RFC1001 length */
1848 iov[0].iov_len = get_rfc1002_length(req) + 4;
1849
1850 for (i = 1; i < num; i++) {
1851 inc_rfc1001_len(req, size[i]);
1852 le32_add_cpu(&req->BufferLength, size[i]);
1853 iov[i].iov_base = (char *)data[i];
1854 iov[i].iov_len = size[i];
1855 }
1856
1857 rc = SendReceive2(xid, ses, iov, num, &resp_buftype, 0);
1858 rsp = (struct smb2_set_info_rsp *)iov[0].iov_base;
1859
1860 if (rc != 0) {
1861 cifs_stats_fail_inc(tcon, SMB2_SET_INFO_HE);
1862 goto out;
1863 }
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07001864out:
1865 free_rsp_buf(resp_buftype, rsp);
1866 kfree(iov);
1867 return rc;
1868}
1869
1870int
1871SMB2_rename(const unsigned int xid, struct cifs_tcon *tcon,
1872 u64 persistent_fid, u64 volatile_fid, __le16 *target_file)
1873{
1874 struct smb2_file_rename_info info;
1875 void **data;
1876 unsigned int size[2];
1877 int rc;
1878 int len = (2 * UniStrnlen((wchar_t *)target_file, PATH_MAX));
1879
1880 data = kmalloc(sizeof(void *) * 2, GFP_KERNEL);
1881 if (!data)
1882 return -ENOMEM;
1883
1884 info.ReplaceIfExists = 1; /* 1 = replace existing target with new */
1885 /* 0 = fail if target already exists */
1886 info.RootDirectory = 0; /* MBZ for network ops (why does spec say?) */
1887 info.FileNameLength = cpu_to_le32(len);
1888
1889 data[0] = &info;
1890 size[0] = sizeof(struct smb2_file_rename_info);
1891
1892 data[1] = target_file;
1893 size[1] = len + 2 /* null */;
1894
1895 rc = send_set_info(xid, tcon, persistent_fid, volatile_fid,
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07001896 current->tgid, FILE_RENAME_INFORMATION, 2, data,
1897 size);
Pavel Shilovsky35143eb2012-09-18 16:20:31 -07001898 kfree(data);
1899 return rc;
1900}
Pavel Shilovsky568798c2012-09-18 16:20:31 -07001901
1902int
1903SMB2_set_hardlink(const unsigned int xid, struct cifs_tcon *tcon,
1904 u64 persistent_fid, u64 volatile_fid, __le16 *target_file)
1905{
1906 struct smb2_file_link_info info;
1907 void **data;
1908 unsigned int size[2];
1909 int rc;
1910 int len = (2 * UniStrnlen((wchar_t *)target_file, PATH_MAX));
1911
1912 data = kmalloc(sizeof(void *) * 2, GFP_KERNEL);
1913 if (!data)
1914 return -ENOMEM;
1915
1916 info.ReplaceIfExists = 0; /* 1 = replace existing link with new */
1917 /* 0 = fail if link already exists */
1918 info.RootDirectory = 0; /* MBZ for network ops (why does spec say?) */
1919 info.FileNameLength = cpu_to_le32(len);
1920
1921 data[0] = &info;
1922 size[0] = sizeof(struct smb2_file_link_info);
1923
1924 data[1] = target_file;
1925 size[1] = len + 2 /* null */;
1926
1927 rc = send_set_info(xid, tcon, persistent_fid, volatile_fid,
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07001928 current->tgid, FILE_LINK_INFORMATION, 2, data, size);
Pavel Shilovsky568798c2012-09-18 16:20:31 -07001929 kfree(data);
1930 return rc;
1931}
Pavel Shilovskyc839ff22012-09-18 16:20:32 -07001932
1933int
1934SMB2_set_eof(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
1935 u64 volatile_fid, u32 pid, __le64 *eof)
1936{
1937 struct smb2_file_eof_info info;
1938 void *data;
1939 unsigned int size;
1940
1941 info.EndOfFile = *eof;
1942
1943 data = &info;
1944 size = sizeof(struct smb2_file_eof_info);
1945
1946 return send_set_info(xid, tcon, persistent_fid, volatile_fid, pid,
1947 FILE_END_OF_FILE_INFORMATION, 1, &data, &size);
1948}
Pavel Shilovsky1feeaac2012-09-18 16:20:32 -07001949
1950int
1951SMB2_set_info(const unsigned int xid, struct cifs_tcon *tcon,
1952 u64 persistent_fid, u64 volatile_fid, FILE_BASIC_INFO *buf)
1953{
1954 unsigned int size;
1955 size = sizeof(FILE_BASIC_INFO);
1956 return send_set_info(xid, tcon, persistent_fid, volatile_fid,
1957 current->tgid, FILE_BASIC_INFORMATION, 1,
1958 (void **)&buf, &size);
1959}
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07001960
1961int
1962SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
1963 const u64 persistent_fid, const u64 volatile_fid,
1964 __u8 oplock_level)
1965{
1966 int rc;
1967 struct smb2_oplock_break *req = NULL;
1968
Joe Perchesf96637b2013-05-04 22:12:25 -05001969 cifs_dbg(FYI, "SMB2_oplock_break\n");
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07001970 rc = small_smb2_init(SMB2_OPLOCK_BREAK, tcon, (void **) &req);
1971
1972 if (rc)
1973 return rc;
1974
1975 req->VolatileFid = volatile_fid;
1976 req->PersistentFid = persistent_fid;
1977 req->OplockLevel = oplock_level;
1978 req->hdr.CreditRequest = cpu_to_le16(1);
1979
1980 rc = SendReceiveNoRsp(xid, tcon->ses, (char *) req, CIFS_OBREAK_OP);
1981 /* SMB2 buffer freed by function above */
1982
1983 if (rc) {
1984 cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE);
Joe Perchesf96637b2013-05-04 22:12:25 -05001985 cifs_dbg(FYI, "Send error in Oplock Break = %d\n", rc);
Pavel Shilovsky983c88a2012-09-18 16:20:33 -07001986 }
1987
1988 return rc;
1989}
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07001990
1991static void
1992copy_fs_info_to_kstatfs(struct smb2_fs_full_size_info *pfs_inf,
1993 struct kstatfs *kst)
1994{
1995 kst->f_bsize = le32_to_cpu(pfs_inf->BytesPerSector) *
1996 le32_to_cpu(pfs_inf->SectorsPerAllocationUnit);
1997 kst->f_blocks = le64_to_cpu(pfs_inf->TotalAllocationUnits);
1998 kst->f_bfree = le64_to_cpu(pfs_inf->ActualAvailableAllocationUnits);
1999 kst->f_bavail = le64_to_cpu(pfs_inf->CallerAvailableAllocationUnits);
2000 return;
2001}
2002
2003static int
2004build_qfs_info_req(struct kvec *iov, struct cifs_tcon *tcon, int level,
2005 int outbuf_len, u64 persistent_fid, u64 volatile_fid)
2006{
2007 int rc;
2008 struct smb2_query_info_req *req;
2009
Joe Perchesf96637b2013-05-04 22:12:25 -05002010 cifs_dbg(FYI, "Query FSInfo level %d\n", level);
Pavel Shilovsky6fc05c22012-09-18 16:20:34 -07002011
2012 if ((tcon->ses == NULL) || (tcon->ses->server == NULL))
2013 return -EIO;
2014
2015 rc = small_smb2_init(SMB2_QUERY_INFO, tcon, (void **) &req);
2016 if (rc)
2017 return rc;
2018
2019 req->InfoType = SMB2_O_INFO_FILESYSTEM;
2020 req->FileInfoClass = level;
2021 req->PersistentFileId = persistent_fid;
2022 req->VolatileFileId = volatile_fid;
2023 /* 4 for rfc1002 length field and 1 for pad */
2024 req->InputBufferOffset =
2025 cpu_to_le16(sizeof(struct smb2_query_info_req) - 1 - 4);
2026 req->OutputBufferLength = cpu_to_le32(
2027 outbuf_len + sizeof(struct smb2_query_info_rsp) - 1 - 4);
2028
2029 iov->iov_base = (char *)req;
2030 /* 4 for rfc1002 length field */
2031 iov->iov_len = get_rfc1002_length(req) + 4;
2032 return 0;
2033}
2034
2035int
2036SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
2037 u64 persistent_fid, u64 volatile_fid, struct kstatfs *fsdata)
2038{
2039 struct smb2_query_info_rsp *rsp = NULL;
2040 struct kvec iov;
2041 int rc = 0;
2042 int resp_buftype;
2043 struct cifs_ses *ses = tcon->ses;
2044 struct smb2_fs_full_size_info *info = NULL;
2045
2046 rc = build_qfs_info_req(&iov, tcon, FS_FULL_SIZE_INFORMATION,
2047 sizeof(struct smb2_fs_full_size_info),
2048 persistent_fid, volatile_fid);
2049 if (rc)
2050 return rc;
2051
2052 rc = SendReceive2(xid, ses, &iov, 1, &resp_buftype, 0);
2053 if (rc) {
2054 cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
2055 goto qinf_exit;
2056 }
2057 rsp = (struct smb2_query_info_rsp *)iov.iov_base;
2058
2059 info = (struct smb2_fs_full_size_info *)(4 /* RFC1001 len */ +
2060 le16_to_cpu(rsp->OutputBufferOffset) + (char *)&rsp->hdr);
2061 rc = validate_buf(le16_to_cpu(rsp->OutputBufferOffset),
2062 le32_to_cpu(rsp->OutputBufferLength), &rsp->hdr,
2063 sizeof(struct smb2_fs_full_size_info));
2064 if (!rc)
2065 copy_fs_info_to_kstatfs(info, fsdata);
2066
2067qinf_exit:
2068 free_rsp_buf(resp_buftype, iov.iov_base);
2069 return rc;
2070}
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07002071
2072int
2073smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon,
2074 const __u64 persist_fid, const __u64 volatile_fid, const __u32 pid,
2075 const __u32 num_lock, struct smb2_lock_element *buf)
2076{
2077 int rc = 0;
2078 struct smb2_lock_req *req = NULL;
2079 struct kvec iov[2];
2080 int resp_buf_type;
2081 unsigned int count;
2082
Joe Perchesf96637b2013-05-04 22:12:25 -05002083 cifs_dbg(FYI, "smb2_lockv num lock %d\n", num_lock);
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07002084
2085 rc = small_smb2_init(SMB2_LOCK, tcon, (void **) &req);
2086 if (rc)
2087 return rc;
2088
2089 req->hdr.ProcessId = cpu_to_le32(pid);
2090 req->LockCount = cpu_to_le16(num_lock);
2091
2092 req->PersistentFileId = persist_fid;
2093 req->VolatileFileId = volatile_fid;
2094
2095 count = num_lock * sizeof(struct smb2_lock_element);
2096 inc_rfc1001_len(req, count - sizeof(struct smb2_lock_element));
2097
2098 iov[0].iov_base = (char *)req;
2099 /* 4 for rfc1002 length field and count for all locks */
2100 iov[0].iov_len = get_rfc1002_length(req) + 4 - count;
2101 iov[1].iov_base = (char *)buf;
2102 iov[1].iov_len = count;
2103
2104 cifs_stats_inc(&tcon->stats.cifs_stats.num_locks);
2105 rc = SendReceive2(xid, tcon->ses, iov, 2, &resp_buf_type, CIFS_NO_RESP);
2106 if (rc) {
Joe Perchesf96637b2013-05-04 22:12:25 -05002107 cifs_dbg(FYI, "Send error in smb2_lockv = %d\n", rc);
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07002108 cifs_stats_fail_inc(tcon, SMB2_LOCK_HE);
2109 }
2110
2111 return rc;
2112}
2113
2114int
2115SMB2_lock(const unsigned int xid, struct cifs_tcon *tcon,
2116 const __u64 persist_fid, const __u64 volatile_fid, const __u32 pid,
2117 const __u64 length, const __u64 offset, const __u32 lock_flags,
2118 const bool wait)
2119{
2120 struct smb2_lock_element lock;
2121
2122 lock.Offset = cpu_to_le64(offset);
2123 lock.Length = cpu_to_le64(length);
2124 lock.Flags = cpu_to_le32(lock_flags);
2125 if (!wait && lock_flags != SMB2_LOCKFLAG_UNLOCK)
2126 lock.Flags |= cpu_to_le32(SMB2_LOCKFLAG_FAIL_IMMEDIATELY);
2127
2128 return smb2_lockv(xid, tcon, persist_fid, volatile_fid, pid, 1, &lock);
2129}
Pavel Shilovsky0822f512012-09-19 06:22:45 -07002130
2131int
2132SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon,
2133 __u8 *lease_key, const __le32 lease_state)
2134{
2135 int rc;
2136 struct smb2_lease_ack *req = NULL;
2137
Joe Perchesf96637b2013-05-04 22:12:25 -05002138 cifs_dbg(FYI, "SMB2_lease_break\n");
Pavel Shilovsky0822f512012-09-19 06:22:45 -07002139 rc = small_smb2_init(SMB2_OPLOCK_BREAK, tcon, (void **) &req);
2140
2141 if (rc)
2142 return rc;
2143
2144 req->hdr.CreditRequest = cpu_to_le16(1);
2145 req->StructureSize = cpu_to_le16(36);
2146 inc_rfc1001_len(req, 12);
2147
2148 memcpy(req->LeaseKey, lease_key, 16);
2149 req->LeaseState = lease_state;
2150
2151 rc = SendReceiveNoRsp(xid, tcon->ses, (char *) req, CIFS_OBREAK_OP);
2152 /* SMB2 buffer freed by function above */
2153
2154 if (rc) {
2155 cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE);
Joe Perchesf96637b2013-05-04 22:12:25 -05002156 cifs_dbg(FYI, "Send error in Lease Break = %d\n", rc);
Pavel Shilovsky0822f512012-09-19 06:22:45 -07002157 }
2158
2159 return rc;
2160}