blob: 7e8a2bdd69c87acd8f6ab83081576ca7a4c8c25d [file] [log] [blame]
Jeff Layton23db65f2012-05-15 12:20:51 -04001/*
2 * SMB1 (CIFS) version specific operations
3 *
4 * Copyright (c) 2012, Jeff Layton <jlayton@redhat.com>
5 *
6 * This library is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License v2 as published
8 * by the Free Software Foundation.
9 *
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
13 * the GNU Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public License
16 * along with this library; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
Pavel Shilovsky24985c52012-09-18 16:20:28 -070020#include <linux/pagemap.h>
Jeff Layton23db65f2012-05-15 12:20:51 -040021#include "cifsglob.h"
Jeff Layton121b0462012-05-15 12:21:10 -040022#include "cifsproto.h"
23#include "cifs_debug.h"
Pavel Shilovsky106dc532012-02-28 14:23:34 +030024#include "cifspdu.h"
Jeff Layton121b0462012-05-15 12:21:10 -040025
26/*
27 * An NT cancel request header looks just like the original request except:
28 *
29 * The Command is SMB_COM_NT_CANCEL
30 * The WordCount is zeroed out
31 * The ByteCount is zeroed out
32 *
33 * This function mangles an existing request buffer into a
34 * SMB_COM_NT_CANCEL request and then sends it.
35 */
36static int
37send_nt_cancel(struct TCP_Server_Info *server, void *buf,
38 struct mid_q_entry *mid)
39{
40 int rc = 0;
41 struct smb_hdr *in_buf = (struct smb_hdr *)buf;
42
43 /* -4 for RFC1001 length and +2 for BCC field */
44 in_buf->smb_buf_length = cpu_to_be32(sizeof(struct smb_hdr) - 4 + 2);
45 in_buf->Command = SMB_COM_NT_CANCEL;
46 in_buf->WordCount = 0;
47 put_bcc(0, in_buf);
48
49 mutex_lock(&server->srv_mutex);
50 rc = cifs_sign_smb(in_buf, server, &mid->sequence_number);
51 if (rc) {
52 mutex_unlock(&server->srv_mutex);
53 return rc;
54 }
55 rc = smb_send(server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
56 mutex_unlock(&server->srv_mutex);
57
58 cFYI(1, "issued NT_CANCEL for mid %u, rc = %d",
59 in_buf->Mid, rc);
60
61 return rc;
62}
Jeff Layton23db65f2012-05-15 12:20:51 -040063
Pavel Shilovsky55157df2012-02-28 14:04:17 +030064static bool
65cifs_compare_fids(struct cifsFileInfo *ob1, struct cifsFileInfo *ob2)
66{
Pavel Shilovsky4b4de762012-09-18 16:20:26 -070067 return ob1->fid.netfid == ob2->fid.netfid;
Pavel Shilovsky55157df2012-02-28 14:04:17 +030068}
69
Pavel Shilovskyeb378712012-05-17 13:02:51 +040070static unsigned int
71cifs_read_data_offset(char *buf)
72{
73 READ_RSP *rsp = (READ_RSP *)buf;
74 return le16_to_cpu(rsp->DataOffset);
75}
76
77static unsigned int
78cifs_read_data_length(char *buf)
79{
80 READ_RSP *rsp = (READ_RSP *)buf;
81 return (le16_to_cpu(rsp->DataLengthHigh) << 16) +
82 le16_to_cpu(rsp->DataLength);
83}
84
Pavel Shilovsky8aa26f32012-05-17 13:25:35 +040085static struct mid_q_entry *
86cifs_find_mid(struct TCP_Server_Info *server, char *buffer)
87{
88 struct smb_hdr *buf = (struct smb_hdr *)buffer;
89 struct mid_q_entry *mid;
90
91 spin_lock(&GlobalMid_Lock);
92 list_for_each_entry(mid, &server->pending_mid_q, qhead) {
93 if (mid->mid == buf->Mid &&
94 mid->mid_state == MID_REQUEST_SUBMITTED &&
95 le16_to_cpu(mid->command) == buf->Command) {
96 spin_unlock(&GlobalMid_Lock);
97 return mid;
98 }
99 }
100 spin_unlock(&GlobalMid_Lock);
101 return NULL;
102}
103
Pavel Shilovsky45275782012-05-17 17:53:29 +0400104static void
Pavel Shilovskya891f0f2012-05-23 16:14:34 +0400105cifs_add_credits(struct TCP_Server_Info *server, const unsigned int add,
106 const int optype)
Pavel Shilovsky45275782012-05-17 17:53:29 +0400107{
108 spin_lock(&server->req_lock);
109 server->credits += add;
110 server->in_flight--;
111 spin_unlock(&server->req_lock);
112 wake_up(&server->request_q);
113}
114
115static void
116cifs_set_credits(struct TCP_Server_Info *server, const int val)
117{
118 spin_lock(&server->req_lock);
119 server->credits = val;
120 server->oplocks = val > 1 ? enable_oplocks : false;
121 spin_unlock(&server->req_lock);
122}
123
124static int *
Pavel Shilovskya891f0f2012-05-23 16:14:34 +0400125cifs_get_credits_field(struct TCP_Server_Info *server, const int optype)
Pavel Shilovsky45275782012-05-17 17:53:29 +0400126{
127 return &server->credits;
128}
129
Pavel Shilovskya891f0f2012-05-23 16:14:34 +0400130static unsigned int
131cifs_get_credits(struct mid_q_entry *mid)
132{
133 return 1;
134}
135
Pavel Shilovsky88257362012-05-23 14:01:59 +0400136/*
137 * Find a free multiplex id (SMB mid). Otherwise there could be
138 * mid collisions which might cause problems, demultiplexing the
139 * wrong response to this request. Multiplex ids could collide if
140 * one of a series requests takes much longer than the others, or
141 * if a very large number of long lived requests (byte range
142 * locks or FindNotify requests) are pending. No more than
143 * 64K-1 requests can be outstanding at one time. If no
144 * mids are available, return zero. A future optimization
145 * could make the combination of mids and uid the key we use
146 * to demultiplex on (rather than mid alone).
147 * In addition to the above check, the cifs demultiplex
148 * code already used the command code as a secondary
149 * check of the frame and if signing is negotiated the
150 * response would be discarded if the mid were the same
151 * but the signature was wrong. Since the mid is not put in the
152 * pending queue until later (when it is about to be dispatched)
153 * we do have to limit the number of outstanding requests
154 * to somewhat less than 64K-1 although it is hard to imagine
155 * so many threads being in the vfs at one time.
156 */
157static __u64
158cifs_get_next_mid(struct TCP_Server_Info *server)
159{
160 __u64 mid = 0;
161 __u16 last_mid, cur_mid;
162 bool collision;
163
164 spin_lock(&GlobalMid_Lock);
165
166 /* mid is 16 bit only for CIFS/SMB */
167 cur_mid = (__u16)((server->CurrentMid) & 0xffff);
168 /* we do not want to loop forever */
169 last_mid = cur_mid;
170 cur_mid++;
171
172 /*
173 * This nested loop looks more expensive than it is.
174 * In practice the list of pending requests is short,
175 * fewer than 50, and the mids are likely to be unique
176 * on the first pass through the loop unless some request
177 * takes longer than the 64 thousand requests before it
178 * (and it would also have to have been a request that
179 * did not time out).
180 */
181 while (cur_mid != last_mid) {
182 struct mid_q_entry *mid_entry;
183 unsigned int num_mids;
184
185 collision = false;
186 if (cur_mid == 0)
187 cur_mid++;
188
189 num_mids = 0;
190 list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) {
191 ++num_mids;
192 if (mid_entry->mid == cur_mid &&
193 mid_entry->mid_state == MID_REQUEST_SUBMITTED) {
194 /* This mid is in use, try a different one */
195 collision = true;
196 break;
197 }
198 }
199
200 /*
201 * if we have more than 32k mids in the list, then something
202 * is very wrong. Possibly a local user is trying to DoS the
203 * box by issuing long-running calls and SIGKILL'ing them. If
204 * we get to 2^16 mids then we're in big trouble as this
205 * function could loop forever.
206 *
207 * Go ahead and assign out the mid in this situation, but force
208 * an eventual reconnect to clean out the pending_mid_q.
209 */
210 if (num_mids > 32768)
211 server->tcpStatus = CifsNeedReconnect;
212
213 if (!collision) {
214 mid = (__u64)cur_mid;
215 server->CurrentMid = mid;
216 break;
217 }
218 cur_mid++;
219 }
220 spin_unlock(&GlobalMid_Lock);
221 return mid;
222}
223
Pavel Shilovsky316cf942012-05-23 14:31:03 +0400224/*
225 return codes:
226 0 not a transact2, or all data present
227 >0 transact2 with that much data missing
228 -EINVAL invalid transact2
229 */
230static int
231check2ndT2(char *buf)
232{
233 struct smb_hdr *pSMB = (struct smb_hdr *)buf;
234 struct smb_t2_rsp *pSMBt;
235 int remaining;
236 __u16 total_data_size, data_in_this_rsp;
237
238 if (pSMB->Command != SMB_COM_TRANSACTION2)
239 return 0;
240
241 /* check for plausible wct, bcc and t2 data and parm sizes */
242 /* check for parm and data offset going beyond end of smb */
243 if (pSMB->WordCount != 10) { /* coalesce_t2 depends on this */
244 cFYI(1, "invalid transact2 word count");
245 return -EINVAL;
246 }
247
248 pSMBt = (struct smb_t2_rsp *)pSMB;
249
250 total_data_size = get_unaligned_le16(&pSMBt->t2_rsp.TotalDataCount);
251 data_in_this_rsp = get_unaligned_le16(&pSMBt->t2_rsp.DataCount);
252
253 if (total_data_size == data_in_this_rsp)
254 return 0;
255 else if (total_data_size < data_in_this_rsp) {
256 cFYI(1, "total data %d smaller than data in frame %d",
257 total_data_size, data_in_this_rsp);
258 return -EINVAL;
259 }
260
261 remaining = total_data_size - data_in_this_rsp;
262
263 cFYI(1, "missing %d bytes from transact2, check next response",
264 remaining);
265 if (total_data_size > CIFSMaxBufSize) {
266 cERROR(1, "TotalDataSize %d is over maximum buffer %d",
267 total_data_size, CIFSMaxBufSize);
268 return -EINVAL;
269 }
270 return remaining;
271}
272
273static int
274coalesce_t2(char *second_buf, struct smb_hdr *target_hdr)
275{
276 struct smb_t2_rsp *pSMBs = (struct smb_t2_rsp *)second_buf;
277 struct smb_t2_rsp *pSMBt = (struct smb_t2_rsp *)target_hdr;
278 char *data_area_of_tgt;
279 char *data_area_of_src;
280 int remaining;
281 unsigned int byte_count, total_in_tgt;
282 __u16 tgt_total_cnt, src_total_cnt, total_in_src;
283
284 src_total_cnt = get_unaligned_le16(&pSMBs->t2_rsp.TotalDataCount);
285 tgt_total_cnt = get_unaligned_le16(&pSMBt->t2_rsp.TotalDataCount);
286
287 if (tgt_total_cnt != src_total_cnt)
288 cFYI(1, "total data count of primary and secondary t2 differ "
289 "source=%hu target=%hu", src_total_cnt, tgt_total_cnt);
290
291 total_in_tgt = get_unaligned_le16(&pSMBt->t2_rsp.DataCount);
292
293 remaining = tgt_total_cnt - total_in_tgt;
294
295 if (remaining < 0) {
296 cFYI(1, "Server sent too much data. tgt_total_cnt=%hu "
297 "total_in_tgt=%hu", tgt_total_cnt, total_in_tgt);
298 return -EPROTO;
299 }
300
301 if (remaining == 0) {
302 /* nothing to do, ignore */
303 cFYI(1, "no more data remains");
304 return 0;
305 }
306
307 total_in_src = get_unaligned_le16(&pSMBs->t2_rsp.DataCount);
308 if (remaining < total_in_src)
309 cFYI(1, "transact2 2nd response contains too much data");
310
311 /* find end of first SMB data area */
312 data_area_of_tgt = (char *)&pSMBt->hdr.Protocol +
313 get_unaligned_le16(&pSMBt->t2_rsp.DataOffset);
314
315 /* validate target area */
316 data_area_of_src = (char *)&pSMBs->hdr.Protocol +
317 get_unaligned_le16(&pSMBs->t2_rsp.DataOffset);
318
319 data_area_of_tgt += total_in_tgt;
320
321 total_in_tgt += total_in_src;
322 /* is the result too big for the field? */
323 if (total_in_tgt > USHRT_MAX) {
324 cFYI(1, "coalesced DataCount too large (%u)", total_in_tgt);
325 return -EPROTO;
326 }
327 put_unaligned_le16(total_in_tgt, &pSMBt->t2_rsp.DataCount);
328
329 /* fix up the BCC */
330 byte_count = get_bcc(target_hdr);
331 byte_count += total_in_src;
332 /* is the result too big for the field? */
333 if (byte_count > USHRT_MAX) {
334 cFYI(1, "coalesced BCC too large (%u)", byte_count);
335 return -EPROTO;
336 }
337 put_bcc(byte_count, target_hdr);
338
339 byte_count = be32_to_cpu(target_hdr->smb_buf_length);
340 byte_count += total_in_src;
341 /* don't allow buffer to overflow */
342 if (byte_count > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
343 cFYI(1, "coalesced BCC exceeds buffer size (%u)", byte_count);
344 return -ENOBUFS;
345 }
346 target_hdr->smb_buf_length = cpu_to_be32(byte_count);
347
348 /* copy second buffer into end of first buffer */
349 memcpy(data_area_of_tgt, data_area_of_src, total_in_src);
350
351 if (remaining != total_in_src) {
352 /* more responses to go */
353 cFYI(1, "waiting for more secondary responses");
354 return 1;
355 }
356
357 /* we are done */
358 cFYI(1, "found the last secondary response");
359 return 0;
360}
361
362static bool
363cifs_check_trans2(struct mid_q_entry *mid, struct TCP_Server_Info *server,
364 char *buf, int malformed)
365{
366 if (malformed)
367 return false;
368 if (check2ndT2(buf) <= 0)
369 return false;
370 mid->multiRsp = true;
371 if (mid->resp_buf) {
372 /* merge response - fix up 1st*/
373 malformed = coalesce_t2(buf, mid->resp_buf);
374 if (malformed > 0)
375 return true;
376 /* All parts received or packet is malformed. */
377 mid->multiEnd = true;
378 dequeue_mid(mid, malformed);
379 return true;
380 }
381 if (!server->large_buf) {
382 /*FIXME: switch to already allocated largebuf?*/
383 cERROR(1, "1st trans2 resp needs bigbuf");
384 } else {
385 /* Have first buffer */
386 mid->resp_buf = buf;
387 mid->large_buf = true;
388 server->bigbuf = NULL;
389 }
390 return true;
391}
392
Pavel Shilovsky286170a2012-05-25 10:43:58 +0400393static bool
394cifs_need_neg(struct TCP_Server_Info *server)
395{
396 return server->maxBuf == 0;
397}
398
399static int
400cifs_negotiate(const unsigned int xid, struct cifs_ses *ses)
401{
402 int rc;
403 rc = CIFSSMBNegotiate(xid, ses);
404 if (rc == -EAGAIN) {
405 /* retry only once on 1st time connection */
406 set_credits(ses->server, 1);
407 rc = CIFSSMBNegotiate(xid, ses);
408 if (rc == -EAGAIN)
409 rc = -EHOSTDOWN;
410 }
411 return rc;
412}
413
Pavel Shilovsky24985c52012-09-18 16:20:28 -0700414static unsigned int
415cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
416{
417 __u64 unix_cap = le64_to_cpu(tcon->fsUnixInfo.Capability);
418 struct TCP_Server_Info *server = tcon->ses->server;
419 unsigned int wsize;
420
421 /* start with specified wsize, or default */
422 if (volume_info->wsize)
423 wsize = volume_info->wsize;
424 else if (tcon->unix_ext && (unix_cap & CIFS_UNIX_LARGE_WRITE_CAP))
425 wsize = CIFS_DEFAULT_IOSIZE;
426 else
427 wsize = CIFS_DEFAULT_NON_POSIX_WSIZE;
428
429 /* can server support 24-bit write sizes? (via UNIX extensions) */
430 if (!tcon->unix_ext || !(unix_cap & CIFS_UNIX_LARGE_WRITE_CAP))
431 wsize = min_t(unsigned int, wsize, CIFS_MAX_RFC1002_WSIZE);
432
433 /*
434 * no CAP_LARGE_WRITE_X or is signing enabled without CAP_UNIX set?
435 * Limit it to max buffer offered by the server, minus the size of the
436 * WRITEX header, not including the 4 byte RFC1001 length.
437 */
438 if (!(server->capabilities & CAP_LARGE_WRITE_X) ||
439 (!(server->capabilities & CAP_UNIX) &&
440 (server->sec_mode & (SECMODE_SIGN_ENABLED|SECMODE_SIGN_REQUIRED))))
441 wsize = min_t(unsigned int, wsize,
442 server->maxBuf - sizeof(WRITE_REQ) + 4);
443
444 /* limit to the amount that we can kmap at once */
445 wsize = min_t(unsigned int, wsize, CIFS_KMAP_SIZE_LIMIT);
446
447 /* hard limit of CIFS_MAX_WSIZE */
448 wsize = min_t(unsigned int, wsize, CIFS_MAX_WSIZE);
449
450 return wsize;
451}
452
453static unsigned int
454cifs_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
455{
456 __u64 unix_cap = le64_to_cpu(tcon->fsUnixInfo.Capability);
457 struct TCP_Server_Info *server = tcon->ses->server;
458 unsigned int rsize, defsize;
459
460 /*
461 * Set default value...
462 *
463 * HACK alert! Ancient servers have very small buffers. Even though
464 * MS-CIFS indicates that servers are only limited by the client's
465 * bufsize for reads, testing against win98se shows that it throws
466 * INVALID_PARAMETER errors if you try to request too large a read.
467 * OS/2 just sends back short reads.
468 *
469 * If the server doesn't advertise CAP_LARGE_READ_X, then assume that
470 * it can't handle a read request larger than its MaxBufferSize either.
471 */
472 if (tcon->unix_ext && (unix_cap & CIFS_UNIX_LARGE_READ_CAP))
473 defsize = CIFS_DEFAULT_IOSIZE;
474 else if (server->capabilities & CAP_LARGE_READ_X)
475 defsize = CIFS_DEFAULT_NON_POSIX_RSIZE;
476 else
477 defsize = server->maxBuf - sizeof(READ_RSP);
478
479 rsize = volume_info->rsize ? volume_info->rsize : defsize;
480
481 /*
482 * no CAP_LARGE_READ_X? Then MS-CIFS states that we must limit this to
483 * the client's MaxBufferSize.
484 */
485 if (!(server->capabilities & CAP_LARGE_READ_X))
486 rsize = min_t(unsigned int, CIFSMaxBufSize, rsize);
487
488 /* limit to the amount that we can kmap at once */
489 rsize = min_t(unsigned int, rsize, CIFS_KMAP_SIZE_LIMIT);
490
491 /* hard limit of CIFS_MAX_RSIZE */
492 rsize = min_t(unsigned int, rsize, CIFS_MAX_RSIZE);
493
494 return rsize;
495}
496
Pavel Shilovskyaf4281d2012-05-27 20:48:35 +0400497static void
498cifs_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon)
499{
500 CIFSSMBQFSDeviceInfo(xid, tcon);
501 CIFSSMBQFSAttributeInfo(xid, tcon);
502}
503
Pavel Shilovsky68889f22012-05-25 14:40:22 +0400504static int
505cifs_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
506 struct cifs_sb_info *cifs_sb, const char *full_path)
507{
508 int rc;
509 FILE_ALL_INFO *file_info;
510
511 file_info = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
512 if (file_info == NULL)
513 return -ENOMEM;
514
515 rc = CIFSSMBQPathInfo(xid, tcon, full_path, file_info,
516 0 /* not legacy */, cifs_sb->local_nls,
517 cifs_sb->mnt_cifs_flags &
518 CIFS_MOUNT_MAP_SPECIAL_CHR);
519
520 if (rc == -EOPNOTSUPP || rc == -EINVAL)
521 rc = SMBQueryInformation(xid, tcon, full_path, file_info,
522 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
523 CIFS_MOUNT_MAP_SPECIAL_CHR);
524 kfree(file_info);
525 return rc;
526}
527
Pavel Shilovsky1208ef12012-05-27 17:34:43 +0400528static int
529cifs_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
530 struct cifs_sb_info *cifs_sb, const char *full_path,
531 FILE_ALL_INFO *data, bool *adjustTZ)
532{
533 int rc;
534
535 /* could do find first instead but this returns more info */
536 rc = CIFSSMBQPathInfo(xid, tcon, full_path, data, 0 /* not legacy */,
537 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
538 CIFS_MOUNT_MAP_SPECIAL_CHR);
539 /*
540 * BB optimize code so we do not make the above call when server claims
541 * no NT SMB support and the above call failed at least once - set flag
542 * in tcon or mount.
543 */
544 if ((rc == -EOPNOTSUPP) || (rc == -EINVAL)) {
545 rc = SMBQueryInformation(xid, tcon, full_path, data,
546 cifs_sb->local_nls,
547 cifs_sb->mnt_cifs_flags &
548 CIFS_MOUNT_MAP_SPECIAL_CHR);
549 *adjustTZ = true;
550 }
551 return rc;
552}
553
554static int
555cifs_get_srv_inum(const unsigned int xid, struct cifs_tcon *tcon,
556 struct cifs_sb_info *cifs_sb, const char *full_path,
557 u64 *uniqueid, FILE_ALL_INFO *data)
558{
559 /*
560 * We can not use the IndexNumber field by default from Windows or
561 * Samba (in ALL_INFO buf) but we can request it explicitly. The SNIA
562 * CIFS spec claims that this value is unique within the scope of a
563 * share, and the windows docs hint that it's actually unique
564 * per-machine.
565 *
566 * There may be higher info levels that work but are there Windows
567 * server or network appliances for which IndexNumber field is not
568 * guaranteed unique?
569 */
570 return CIFSGetSrvInodeNumber(xid, tcon, full_path, uniqueid,
571 cifs_sb->local_nls,
572 cifs_sb->mnt_cifs_flags &
573 CIFS_MOUNT_MAP_SPECIAL_CHR);
574}
575
Pavel Shilovsky4ad65042012-09-18 16:20:26 -0700576static int
577cifs_query_file_info(const unsigned int xid, struct cifs_tcon *tcon,
578 struct cifs_fid *fid, FILE_ALL_INFO *data)
579{
580 return CIFSSMBQFileInfo(xid, tcon, fid->netfid, data);
581}
582
Pavel Shilovsky9224dfc2012-05-27 20:39:52 +0400583static char *
584cifs_build_path_to_root(struct smb_vol *vol, struct cifs_sb_info *cifs_sb,
585 struct cifs_tcon *tcon)
586{
587 int pplen = vol->prepath ? strlen(vol->prepath) : 0;
588 int dfsplen;
589 char *full_path = NULL;
590
591 /* if no prefix path, simply set path to the root of share to "" */
592 if (pplen == 0) {
593 full_path = kzalloc(1, GFP_KERNEL);
594 return full_path;
595 }
596
597 if (tcon->Flags & SMB_SHARE_IS_IN_DFS)
598 dfsplen = strnlen(tcon->treeName, MAX_TREE_SIZE + 1);
599 else
600 dfsplen = 0;
601
602 full_path = kmalloc(dfsplen + pplen + 1, GFP_KERNEL);
603 if (full_path == NULL)
604 return full_path;
605
606 if (dfsplen)
607 strncpy(full_path, tcon->treeName, dfsplen);
608 strncpy(full_path + dfsplen, vol->prepath, pplen);
609 convert_delimiter(full_path, CIFS_DIR_SEP(cifs_sb));
610 full_path[dfsplen + pplen] = 0; /* add trailing null */
611 return full_path;
612}
613
Pavel Shilovsky44c58182012-05-28 14:16:31 +0400614static void
615cifs_clear_stats(struct cifs_tcon *tcon)
616{
617#ifdef CONFIG_CIFS_STATS
618 atomic_set(&tcon->stats.cifs_stats.num_writes, 0);
619 atomic_set(&tcon->stats.cifs_stats.num_reads, 0);
620 atomic_set(&tcon->stats.cifs_stats.num_flushes, 0);
621 atomic_set(&tcon->stats.cifs_stats.num_oplock_brks, 0);
622 atomic_set(&tcon->stats.cifs_stats.num_opens, 0);
623 atomic_set(&tcon->stats.cifs_stats.num_posixopens, 0);
624 atomic_set(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
625 atomic_set(&tcon->stats.cifs_stats.num_closes, 0);
626 atomic_set(&tcon->stats.cifs_stats.num_deletes, 0);
627 atomic_set(&tcon->stats.cifs_stats.num_mkdirs, 0);
628 atomic_set(&tcon->stats.cifs_stats.num_rmdirs, 0);
629 atomic_set(&tcon->stats.cifs_stats.num_renames, 0);
630 atomic_set(&tcon->stats.cifs_stats.num_t2renames, 0);
631 atomic_set(&tcon->stats.cifs_stats.num_ffirst, 0);
632 atomic_set(&tcon->stats.cifs_stats.num_fnext, 0);
633 atomic_set(&tcon->stats.cifs_stats.num_fclose, 0);
634 atomic_set(&tcon->stats.cifs_stats.num_hardlinks, 0);
635 atomic_set(&tcon->stats.cifs_stats.num_symlinks, 0);
636 atomic_set(&tcon->stats.cifs_stats.num_locks, 0);
637 atomic_set(&tcon->stats.cifs_stats.num_acl_get, 0);
638 atomic_set(&tcon->stats.cifs_stats.num_acl_set, 0);
639#endif
640}
641
642static void
643cifs_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
644{
645#ifdef CONFIG_CIFS_STATS
646 seq_printf(m, " Oplocks breaks: %d",
647 atomic_read(&tcon->stats.cifs_stats.num_oplock_brks));
648 seq_printf(m, "\nReads: %d Bytes: %llu",
649 atomic_read(&tcon->stats.cifs_stats.num_reads),
650 (long long)(tcon->bytes_read));
651 seq_printf(m, "\nWrites: %d Bytes: %llu",
652 atomic_read(&tcon->stats.cifs_stats.num_writes),
653 (long long)(tcon->bytes_written));
654 seq_printf(m, "\nFlushes: %d",
655 atomic_read(&tcon->stats.cifs_stats.num_flushes));
656 seq_printf(m, "\nLocks: %d HardLinks: %d Symlinks: %d",
657 atomic_read(&tcon->stats.cifs_stats.num_locks),
658 atomic_read(&tcon->stats.cifs_stats.num_hardlinks),
659 atomic_read(&tcon->stats.cifs_stats.num_symlinks));
660 seq_printf(m, "\nOpens: %d Closes: %d Deletes: %d",
661 atomic_read(&tcon->stats.cifs_stats.num_opens),
662 atomic_read(&tcon->stats.cifs_stats.num_closes),
663 atomic_read(&tcon->stats.cifs_stats.num_deletes));
664 seq_printf(m, "\nPosix Opens: %d Posix Mkdirs: %d",
665 atomic_read(&tcon->stats.cifs_stats.num_posixopens),
666 atomic_read(&tcon->stats.cifs_stats.num_posixmkdirs));
667 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
668 atomic_read(&tcon->stats.cifs_stats.num_mkdirs),
669 atomic_read(&tcon->stats.cifs_stats.num_rmdirs));
670 seq_printf(m, "\nRenames: %d T2 Renames %d",
671 atomic_read(&tcon->stats.cifs_stats.num_renames),
672 atomic_read(&tcon->stats.cifs_stats.num_t2renames));
673 seq_printf(m, "\nFindFirst: %d FNext %d FClose %d",
674 atomic_read(&tcon->stats.cifs_stats.num_ffirst),
675 atomic_read(&tcon->stats.cifs_stats.num_fnext),
676 atomic_read(&tcon->stats.cifs_stats.num_fclose));
677#endif
678}
679
Pavel Shilovskyf4367202012-03-17 11:41:12 +0300680static void
681cifs_mkdir_setinfo(struct inode *inode, const char *full_path,
682 struct cifs_sb_info *cifs_sb, struct cifs_tcon *tcon,
683 const unsigned int xid)
684{
685 FILE_BASIC_INFO info;
686 struct cifsInodeInfo *cifsInode;
687 u32 dosattrs;
688 int rc;
689
690 memset(&info, 0, sizeof(info));
691 cifsInode = CIFS_I(inode);
692 dosattrs = cifsInode->cifsAttrs|ATTR_READONLY;
693 info.Attributes = cpu_to_le32(dosattrs);
694 rc = CIFSSMBSetPathInfo(xid, tcon, full_path, &info, cifs_sb->local_nls,
695 cifs_sb->mnt_cifs_flags &
696 CIFS_MOUNT_MAP_SPECIAL_CHR);
697 if (rc == 0)
698 cifsInode->cifsAttrs = dosattrs;
699}
700
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700701static int
702cifs_open_file(const unsigned int xid, struct cifs_tcon *tcon, const char *path,
703 int disposition, int desired_access, int create_options,
704 struct cifs_fid *fid, __u32 *oplock, FILE_ALL_INFO *buf,
705 struct cifs_sb_info *cifs_sb)
706{
707 if (!(tcon->ses->capabilities & CAP_NT_SMBS))
708 return SMBLegacyOpen(xid, tcon, path, disposition,
Pavel Shilovsky25364132012-09-18 16:20:27 -0700709 desired_access, create_options,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700710 &fid->netfid, oplock, buf,
711 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
712 & CIFS_MOUNT_MAP_SPECIAL_CHR);
713 return CIFSSMBOpen(xid, tcon, path, disposition, desired_access,
714 create_options, &fid->netfid, oplock, buf,
715 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
716 CIFS_MOUNT_MAP_SPECIAL_CHR);
717}
718
719static void
720cifs_set_fid(struct cifsFileInfo *cfile, struct cifs_fid *fid, __u32 oplock)
721{
722 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
723 cfile->fid.netfid = fid->netfid;
724 cifs_set_oplock_level(cinode, oplock);
725 cinode->can_cache_brlcks = cinode->clientCanCacheAll;
726}
727
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700728static int
729cifs_close_file(const unsigned int xid, struct cifs_tcon *tcon,
730 struct cifs_fid *fid)
731{
732 return CIFSSMBClose(xid, tcon, fid->netfid);
733}
734
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -0700735static int
736cifs_flush_file(const unsigned int xid, struct cifs_tcon *tcon,
737 struct cifs_fid *fid)
738{
739 return CIFSSMBFlush(xid, tcon, fid->netfid);
740}
741
Jeff Layton23db65f2012-05-15 12:20:51 -0400742struct smb_version_operations smb1_operations = {
Jeff Layton121b0462012-05-15 12:21:10 -0400743 .send_cancel = send_nt_cancel,
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300744 .compare_fids = cifs_compare_fids,
Pavel Shilovsky082d0642012-05-17 12:18:21 +0400745 .setup_request = cifs_setup_request,
Pavel Shilovsky45740842012-06-01 14:26:18 +0400746 .setup_async_request = cifs_setup_async_request,
Pavel Shilovsky082d0642012-05-17 12:18:21 +0400747 .check_receive = cifs_check_receive,
Pavel Shilovsky45275782012-05-17 17:53:29 +0400748 .add_credits = cifs_add_credits,
749 .set_credits = cifs_set_credits,
750 .get_credits_field = cifs_get_credits_field,
Pavel Shilovskya891f0f2012-05-23 16:14:34 +0400751 .get_credits = cifs_get_credits,
Pavel Shilovsky88257362012-05-23 14:01:59 +0400752 .get_next_mid = cifs_get_next_mid,
Pavel Shilovskyeb378712012-05-17 13:02:51 +0400753 .read_data_offset = cifs_read_data_offset,
754 .read_data_length = cifs_read_data_length,
755 .map_error = map_smb_to_linux_error,
Pavel Shilovsky8aa26f32012-05-17 13:25:35 +0400756 .find_mid = cifs_find_mid,
757 .check_message = checkSMB,
758 .dump_detail = cifs_dump_detail,
Pavel Shilovsky44c58182012-05-28 14:16:31 +0400759 .clear_stats = cifs_clear_stats,
760 .print_stats = cifs_print_stats,
Pavel Shilovsky8aa26f32012-05-17 13:25:35 +0400761 .is_oplock_break = is_valid_oplock_break,
Pavel Shilovsky316cf942012-05-23 14:31:03 +0400762 .check_trans2 = cifs_check_trans2,
Pavel Shilovsky286170a2012-05-25 10:43:58 +0400763 .need_neg = cifs_need_neg,
764 .negotiate = cifs_negotiate,
Pavel Shilovsky24985c52012-09-18 16:20:28 -0700765 .negotiate_wsize = cifs_negotiate_wsize,
766 .negotiate_rsize = cifs_negotiate_rsize,
Pavel Shilovsky58c45c52012-05-25 10:54:49 +0400767 .sess_setup = CIFS_SessSetup,
768 .logoff = CIFSSMBLogoff,
Pavel Shilovsky2e6e02a2012-05-25 11:11:39 +0400769 .tree_connect = CIFSTCon,
770 .tree_disconnect = CIFSSMBTDis,
Pavel Shilovskyb669f332012-05-27 20:21:53 +0400771 .get_dfs_refer = CIFSGetDFSRefer,
Pavel Shilovskyaf4281d2012-05-27 20:48:35 +0400772 .qfs_tcon = cifs_qfs_tcon,
Pavel Shilovsky68889f22012-05-25 14:40:22 +0400773 .is_path_accessible = cifs_is_path_accessible,
Pavel Shilovsky1208ef12012-05-27 17:34:43 +0400774 .query_path_info = cifs_query_path_info,
Pavel Shilovsky4ad65042012-09-18 16:20:26 -0700775 .query_file_info = cifs_query_file_info,
Pavel Shilovsky1208ef12012-05-27 17:34:43 +0400776 .get_srv_inum = cifs_get_srv_inum,
Pavel Shilovsky9224dfc2012-05-27 20:39:52 +0400777 .build_path_to_root = cifs_build_path_to_root,
Pavel Shilovskyf6d76172012-05-25 14:47:16 +0400778 .echo = CIFSSMBEcho,
Pavel Shilovskyf4367202012-03-17 11:41:12 +0300779 .mkdir = CIFSSMBMkDir,
780 .mkdir_setinfo = cifs_mkdir_setinfo,
Pavel Shilovskyf958ca52012-07-10 16:14:18 +0400781 .rmdir = CIFSSMBRmDir,
Pavel Shilovskyed6875e2012-09-18 16:20:25 -0700782 .unlink = CIFSSMBDelFile,
783 .rename_pending_delete = cifs_rename_pending_delete,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700784 .open = cifs_open_file,
785 .set_fid = cifs_set_fid,
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700786 .close = cifs_close_file,
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -0700787 .flush = cifs_flush_file,
Jeff Layton23db65f2012-05-15 12:20:51 -0400788};
789
790struct smb_version_values smb1_values = {
791 .version_string = SMB1_VERSION_STRING,
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300792 .large_lock_type = LOCKING_ANDX_LARGE_FILES,
793 .exclusive_lock_type = 0,
794 .shared_lock_type = LOCKING_ANDX_SHARED_LOCK,
795 .unlock_lock_type = 0,
Pavel Shilovsky1887f602012-05-17 12:45:31 +0400796 .header_size = sizeof(struct smb_hdr),
797 .max_header_size = MAX_CIFS_HDR_SIZE,
Pavel Shilovskyeb378712012-05-17 13:02:51 +0400798 .read_rsp_size = sizeof(READ_RSP),
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +0400799 .lock_cmd = cpu_to_le16(SMB_COM_LOCKING_ANDX),
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400800 .cap_unix = CAP_UNIX,
801 .cap_nt_find = CAP_NT_SMBS | CAP_NT_FIND,
802 .cap_large_files = CAP_LARGE_FILES,
Jeff Layton23db65f2012-05-15 12:20:51 -0400803};