blob: df20dd9e64ca8c687f8aeaf7965421974962de30 [file] [log] [blame]
Jeff Layton23db65f2012-05-15 12:20:51 -04001/*
2 * SMB1 (CIFS) version specific operations
3 *
4 * Copyright (c) 2012, Jeff Layton <jlayton@redhat.com>
5 *
6 * This library is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License v2 as published
8 * by the Free Software Foundation.
9 *
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
13 * the GNU Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public License
16 * along with this library; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#include "cifsglob.h"
Jeff Layton121b0462012-05-15 12:21:10 -040021#include "cifsproto.h"
22#include "cifs_debug.h"
Pavel Shilovsky106dc532012-02-28 14:23:34 +030023#include "cifspdu.h"
Jeff Layton121b0462012-05-15 12:21:10 -040024
25/*
26 * An NT cancel request header looks just like the original request except:
27 *
28 * The Command is SMB_COM_NT_CANCEL
29 * The WordCount is zeroed out
30 * The ByteCount is zeroed out
31 *
32 * This function mangles an existing request buffer into a
33 * SMB_COM_NT_CANCEL request and then sends it.
34 */
35static int
36send_nt_cancel(struct TCP_Server_Info *server, void *buf,
37 struct mid_q_entry *mid)
38{
39 int rc = 0;
40 struct smb_hdr *in_buf = (struct smb_hdr *)buf;
41
42 /* -4 for RFC1001 length and +2 for BCC field */
43 in_buf->smb_buf_length = cpu_to_be32(sizeof(struct smb_hdr) - 4 + 2);
44 in_buf->Command = SMB_COM_NT_CANCEL;
45 in_buf->WordCount = 0;
46 put_bcc(0, in_buf);
47
48 mutex_lock(&server->srv_mutex);
49 rc = cifs_sign_smb(in_buf, server, &mid->sequence_number);
50 if (rc) {
51 mutex_unlock(&server->srv_mutex);
52 return rc;
53 }
54 rc = smb_send(server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
55 mutex_unlock(&server->srv_mutex);
56
57 cFYI(1, "issued NT_CANCEL for mid %u, rc = %d",
58 in_buf->Mid, rc);
59
60 return rc;
61}
Jeff Layton23db65f2012-05-15 12:20:51 -040062
Pavel Shilovsky55157df2012-02-28 14:04:17 +030063static bool
64cifs_compare_fids(struct cifsFileInfo *ob1, struct cifsFileInfo *ob2)
65{
Pavel Shilovsky4b4de762012-09-18 16:20:26 -070066 return ob1->fid.netfid == ob2->fid.netfid;
Pavel Shilovsky55157df2012-02-28 14:04:17 +030067}
68
Pavel Shilovskyeb378712012-05-17 13:02:51 +040069static unsigned int
70cifs_read_data_offset(char *buf)
71{
72 READ_RSP *rsp = (READ_RSP *)buf;
73 return le16_to_cpu(rsp->DataOffset);
74}
75
76static unsigned int
77cifs_read_data_length(char *buf)
78{
79 READ_RSP *rsp = (READ_RSP *)buf;
80 return (le16_to_cpu(rsp->DataLengthHigh) << 16) +
81 le16_to_cpu(rsp->DataLength);
82}
83
Pavel Shilovsky8aa26f32012-05-17 13:25:35 +040084static struct mid_q_entry *
85cifs_find_mid(struct TCP_Server_Info *server, char *buffer)
86{
87 struct smb_hdr *buf = (struct smb_hdr *)buffer;
88 struct mid_q_entry *mid;
89
90 spin_lock(&GlobalMid_Lock);
91 list_for_each_entry(mid, &server->pending_mid_q, qhead) {
92 if (mid->mid == buf->Mid &&
93 mid->mid_state == MID_REQUEST_SUBMITTED &&
94 le16_to_cpu(mid->command) == buf->Command) {
95 spin_unlock(&GlobalMid_Lock);
96 return mid;
97 }
98 }
99 spin_unlock(&GlobalMid_Lock);
100 return NULL;
101}
102
Pavel Shilovsky45275782012-05-17 17:53:29 +0400103static void
Pavel Shilovskya891f0f2012-05-23 16:14:34 +0400104cifs_add_credits(struct TCP_Server_Info *server, const unsigned int add,
105 const int optype)
Pavel Shilovsky45275782012-05-17 17:53:29 +0400106{
107 spin_lock(&server->req_lock);
108 server->credits += add;
109 server->in_flight--;
110 spin_unlock(&server->req_lock);
111 wake_up(&server->request_q);
112}
113
114static void
115cifs_set_credits(struct TCP_Server_Info *server, const int val)
116{
117 spin_lock(&server->req_lock);
118 server->credits = val;
119 server->oplocks = val > 1 ? enable_oplocks : false;
120 spin_unlock(&server->req_lock);
121}
122
123static int *
Pavel Shilovskya891f0f2012-05-23 16:14:34 +0400124cifs_get_credits_field(struct TCP_Server_Info *server, const int optype)
Pavel Shilovsky45275782012-05-17 17:53:29 +0400125{
126 return &server->credits;
127}
128
Pavel Shilovskya891f0f2012-05-23 16:14:34 +0400129static unsigned int
130cifs_get_credits(struct mid_q_entry *mid)
131{
132 return 1;
133}
134
Pavel Shilovsky88257362012-05-23 14:01:59 +0400135/*
136 * Find a free multiplex id (SMB mid). Otherwise there could be
137 * mid collisions which might cause problems, demultiplexing the
138 * wrong response to this request. Multiplex ids could collide if
139 * one of a series requests takes much longer than the others, or
140 * if a very large number of long lived requests (byte range
141 * locks or FindNotify requests) are pending. No more than
142 * 64K-1 requests can be outstanding at one time. If no
143 * mids are available, return zero. A future optimization
144 * could make the combination of mids and uid the key we use
145 * to demultiplex on (rather than mid alone).
146 * In addition to the above check, the cifs demultiplex
147 * code already used the command code as a secondary
148 * check of the frame and if signing is negotiated the
149 * response would be discarded if the mid were the same
150 * but the signature was wrong. Since the mid is not put in the
151 * pending queue until later (when it is about to be dispatched)
152 * we do have to limit the number of outstanding requests
153 * to somewhat less than 64K-1 although it is hard to imagine
154 * so many threads being in the vfs at one time.
155 */
156static __u64
157cifs_get_next_mid(struct TCP_Server_Info *server)
158{
159 __u64 mid = 0;
160 __u16 last_mid, cur_mid;
161 bool collision;
162
163 spin_lock(&GlobalMid_Lock);
164
165 /* mid is 16 bit only for CIFS/SMB */
166 cur_mid = (__u16)((server->CurrentMid) & 0xffff);
167 /* we do not want to loop forever */
168 last_mid = cur_mid;
169 cur_mid++;
170
171 /*
172 * This nested loop looks more expensive than it is.
173 * In practice the list of pending requests is short,
174 * fewer than 50, and the mids are likely to be unique
175 * on the first pass through the loop unless some request
176 * takes longer than the 64 thousand requests before it
177 * (and it would also have to have been a request that
178 * did not time out).
179 */
180 while (cur_mid != last_mid) {
181 struct mid_q_entry *mid_entry;
182 unsigned int num_mids;
183
184 collision = false;
185 if (cur_mid == 0)
186 cur_mid++;
187
188 num_mids = 0;
189 list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) {
190 ++num_mids;
191 if (mid_entry->mid == cur_mid &&
192 mid_entry->mid_state == MID_REQUEST_SUBMITTED) {
193 /* This mid is in use, try a different one */
194 collision = true;
195 break;
196 }
197 }
198
199 /*
200 * if we have more than 32k mids in the list, then something
201 * is very wrong. Possibly a local user is trying to DoS the
202 * box by issuing long-running calls and SIGKILL'ing them. If
203 * we get to 2^16 mids then we're in big trouble as this
204 * function could loop forever.
205 *
206 * Go ahead and assign out the mid in this situation, but force
207 * an eventual reconnect to clean out the pending_mid_q.
208 */
209 if (num_mids > 32768)
210 server->tcpStatus = CifsNeedReconnect;
211
212 if (!collision) {
213 mid = (__u64)cur_mid;
214 server->CurrentMid = mid;
215 break;
216 }
217 cur_mid++;
218 }
219 spin_unlock(&GlobalMid_Lock);
220 return mid;
221}
222
Pavel Shilovsky316cf942012-05-23 14:31:03 +0400223/*
224 return codes:
225 0 not a transact2, or all data present
226 >0 transact2 with that much data missing
227 -EINVAL invalid transact2
228 */
229static int
230check2ndT2(char *buf)
231{
232 struct smb_hdr *pSMB = (struct smb_hdr *)buf;
233 struct smb_t2_rsp *pSMBt;
234 int remaining;
235 __u16 total_data_size, data_in_this_rsp;
236
237 if (pSMB->Command != SMB_COM_TRANSACTION2)
238 return 0;
239
240 /* check for plausible wct, bcc and t2 data and parm sizes */
241 /* check for parm and data offset going beyond end of smb */
242 if (pSMB->WordCount != 10) { /* coalesce_t2 depends on this */
243 cFYI(1, "invalid transact2 word count");
244 return -EINVAL;
245 }
246
247 pSMBt = (struct smb_t2_rsp *)pSMB;
248
249 total_data_size = get_unaligned_le16(&pSMBt->t2_rsp.TotalDataCount);
250 data_in_this_rsp = get_unaligned_le16(&pSMBt->t2_rsp.DataCount);
251
252 if (total_data_size == data_in_this_rsp)
253 return 0;
254 else if (total_data_size < data_in_this_rsp) {
255 cFYI(1, "total data %d smaller than data in frame %d",
256 total_data_size, data_in_this_rsp);
257 return -EINVAL;
258 }
259
260 remaining = total_data_size - data_in_this_rsp;
261
262 cFYI(1, "missing %d bytes from transact2, check next response",
263 remaining);
264 if (total_data_size > CIFSMaxBufSize) {
265 cERROR(1, "TotalDataSize %d is over maximum buffer %d",
266 total_data_size, CIFSMaxBufSize);
267 return -EINVAL;
268 }
269 return remaining;
270}
271
272static int
273coalesce_t2(char *second_buf, struct smb_hdr *target_hdr)
274{
275 struct smb_t2_rsp *pSMBs = (struct smb_t2_rsp *)second_buf;
276 struct smb_t2_rsp *pSMBt = (struct smb_t2_rsp *)target_hdr;
277 char *data_area_of_tgt;
278 char *data_area_of_src;
279 int remaining;
280 unsigned int byte_count, total_in_tgt;
281 __u16 tgt_total_cnt, src_total_cnt, total_in_src;
282
283 src_total_cnt = get_unaligned_le16(&pSMBs->t2_rsp.TotalDataCount);
284 tgt_total_cnt = get_unaligned_le16(&pSMBt->t2_rsp.TotalDataCount);
285
286 if (tgt_total_cnt != src_total_cnt)
287 cFYI(1, "total data count of primary and secondary t2 differ "
288 "source=%hu target=%hu", src_total_cnt, tgt_total_cnt);
289
290 total_in_tgt = get_unaligned_le16(&pSMBt->t2_rsp.DataCount);
291
292 remaining = tgt_total_cnt - total_in_tgt;
293
294 if (remaining < 0) {
295 cFYI(1, "Server sent too much data. tgt_total_cnt=%hu "
296 "total_in_tgt=%hu", tgt_total_cnt, total_in_tgt);
297 return -EPROTO;
298 }
299
300 if (remaining == 0) {
301 /* nothing to do, ignore */
302 cFYI(1, "no more data remains");
303 return 0;
304 }
305
306 total_in_src = get_unaligned_le16(&pSMBs->t2_rsp.DataCount);
307 if (remaining < total_in_src)
308 cFYI(1, "transact2 2nd response contains too much data");
309
310 /* find end of first SMB data area */
311 data_area_of_tgt = (char *)&pSMBt->hdr.Protocol +
312 get_unaligned_le16(&pSMBt->t2_rsp.DataOffset);
313
314 /* validate target area */
315 data_area_of_src = (char *)&pSMBs->hdr.Protocol +
316 get_unaligned_le16(&pSMBs->t2_rsp.DataOffset);
317
318 data_area_of_tgt += total_in_tgt;
319
320 total_in_tgt += total_in_src;
321 /* is the result too big for the field? */
322 if (total_in_tgt > USHRT_MAX) {
323 cFYI(1, "coalesced DataCount too large (%u)", total_in_tgt);
324 return -EPROTO;
325 }
326 put_unaligned_le16(total_in_tgt, &pSMBt->t2_rsp.DataCount);
327
328 /* fix up the BCC */
329 byte_count = get_bcc(target_hdr);
330 byte_count += total_in_src;
331 /* is the result too big for the field? */
332 if (byte_count > USHRT_MAX) {
333 cFYI(1, "coalesced BCC too large (%u)", byte_count);
334 return -EPROTO;
335 }
336 put_bcc(byte_count, target_hdr);
337
338 byte_count = be32_to_cpu(target_hdr->smb_buf_length);
339 byte_count += total_in_src;
340 /* don't allow buffer to overflow */
341 if (byte_count > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
342 cFYI(1, "coalesced BCC exceeds buffer size (%u)", byte_count);
343 return -ENOBUFS;
344 }
345 target_hdr->smb_buf_length = cpu_to_be32(byte_count);
346
347 /* copy second buffer into end of first buffer */
348 memcpy(data_area_of_tgt, data_area_of_src, total_in_src);
349
350 if (remaining != total_in_src) {
351 /* more responses to go */
352 cFYI(1, "waiting for more secondary responses");
353 return 1;
354 }
355
356 /* we are done */
357 cFYI(1, "found the last secondary response");
358 return 0;
359}
360
361static bool
362cifs_check_trans2(struct mid_q_entry *mid, struct TCP_Server_Info *server,
363 char *buf, int malformed)
364{
365 if (malformed)
366 return false;
367 if (check2ndT2(buf) <= 0)
368 return false;
369 mid->multiRsp = true;
370 if (mid->resp_buf) {
371 /* merge response - fix up 1st*/
372 malformed = coalesce_t2(buf, mid->resp_buf);
373 if (malformed > 0)
374 return true;
375 /* All parts received or packet is malformed. */
376 mid->multiEnd = true;
377 dequeue_mid(mid, malformed);
378 return true;
379 }
380 if (!server->large_buf) {
381 /*FIXME: switch to already allocated largebuf?*/
382 cERROR(1, "1st trans2 resp needs bigbuf");
383 } else {
384 /* Have first buffer */
385 mid->resp_buf = buf;
386 mid->large_buf = true;
387 server->bigbuf = NULL;
388 }
389 return true;
390}
391
Pavel Shilovsky286170a2012-05-25 10:43:58 +0400392static bool
393cifs_need_neg(struct TCP_Server_Info *server)
394{
395 return server->maxBuf == 0;
396}
397
398static int
399cifs_negotiate(const unsigned int xid, struct cifs_ses *ses)
400{
401 int rc;
402 rc = CIFSSMBNegotiate(xid, ses);
403 if (rc == -EAGAIN) {
404 /* retry only once on 1st time connection */
405 set_credits(ses->server, 1);
406 rc = CIFSSMBNegotiate(xid, ses);
407 if (rc == -EAGAIN)
408 rc = -EHOSTDOWN;
409 }
410 return rc;
411}
412
Pavel Shilovskyaf4281d2012-05-27 20:48:35 +0400413static void
414cifs_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon)
415{
416 CIFSSMBQFSDeviceInfo(xid, tcon);
417 CIFSSMBQFSAttributeInfo(xid, tcon);
418}
419
Pavel Shilovsky68889f22012-05-25 14:40:22 +0400420static int
421cifs_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
422 struct cifs_sb_info *cifs_sb, const char *full_path)
423{
424 int rc;
425 FILE_ALL_INFO *file_info;
426
427 file_info = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
428 if (file_info == NULL)
429 return -ENOMEM;
430
431 rc = CIFSSMBQPathInfo(xid, tcon, full_path, file_info,
432 0 /* not legacy */, cifs_sb->local_nls,
433 cifs_sb->mnt_cifs_flags &
434 CIFS_MOUNT_MAP_SPECIAL_CHR);
435
436 if (rc == -EOPNOTSUPP || rc == -EINVAL)
437 rc = SMBQueryInformation(xid, tcon, full_path, file_info,
438 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
439 CIFS_MOUNT_MAP_SPECIAL_CHR);
440 kfree(file_info);
441 return rc;
442}
443
Pavel Shilovsky1208ef12012-05-27 17:34:43 +0400444static int
445cifs_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
446 struct cifs_sb_info *cifs_sb, const char *full_path,
447 FILE_ALL_INFO *data, bool *adjustTZ)
448{
449 int rc;
450
451 /* could do find first instead but this returns more info */
452 rc = CIFSSMBQPathInfo(xid, tcon, full_path, data, 0 /* not legacy */,
453 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
454 CIFS_MOUNT_MAP_SPECIAL_CHR);
455 /*
456 * BB optimize code so we do not make the above call when server claims
457 * no NT SMB support and the above call failed at least once - set flag
458 * in tcon or mount.
459 */
460 if ((rc == -EOPNOTSUPP) || (rc == -EINVAL)) {
461 rc = SMBQueryInformation(xid, tcon, full_path, data,
462 cifs_sb->local_nls,
463 cifs_sb->mnt_cifs_flags &
464 CIFS_MOUNT_MAP_SPECIAL_CHR);
465 *adjustTZ = true;
466 }
467 return rc;
468}
469
470static int
471cifs_get_srv_inum(const unsigned int xid, struct cifs_tcon *tcon,
472 struct cifs_sb_info *cifs_sb, const char *full_path,
473 u64 *uniqueid, FILE_ALL_INFO *data)
474{
475 /*
476 * We can not use the IndexNumber field by default from Windows or
477 * Samba (in ALL_INFO buf) but we can request it explicitly. The SNIA
478 * CIFS spec claims that this value is unique within the scope of a
479 * share, and the windows docs hint that it's actually unique
480 * per-machine.
481 *
482 * There may be higher info levels that work but are there Windows
483 * server or network appliances for which IndexNumber field is not
484 * guaranteed unique?
485 */
486 return CIFSGetSrvInodeNumber(xid, tcon, full_path, uniqueid,
487 cifs_sb->local_nls,
488 cifs_sb->mnt_cifs_flags &
489 CIFS_MOUNT_MAP_SPECIAL_CHR);
490}
491
Pavel Shilovsky4ad65042012-09-18 16:20:26 -0700492static int
493cifs_query_file_info(const unsigned int xid, struct cifs_tcon *tcon,
494 struct cifs_fid *fid, FILE_ALL_INFO *data)
495{
496 return CIFSSMBQFileInfo(xid, tcon, fid->netfid, data);
497}
498
Pavel Shilovsky9224dfc2012-05-27 20:39:52 +0400499static char *
500cifs_build_path_to_root(struct smb_vol *vol, struct cifs_sb_info *cifs_sb,
501 struct cifs_tcon *tcon)
502{
503 int pplen = vol->prepath ? strlen(vol->prepath) : 0;
504 int dfsplen;
505 char *full_path = NULL;
506
507 /* if no prefix path, simply set path to the root of share to "" */
508 if (pplen == 0) {
509 full_path = kzalloc(1, GFP_KERNEL);
510 return full_path;
511 }
512
513 if (tcon->Flags & SMB_SHARE_IS_IN_DFS)
514 dfsplen = strnlen(tcon->treeName, MAX_TREE_SIZE + 1);
515 else
516 dfsplen = 0;
517
518 full_path = kmalloc(dfsplen + pplen + 1, GFP_KERNEL);
519 if (full_path == NULL)
520 return full_path;
521
522 if (dfsplen)
523 strncpy(full_path, tcon->treeName, dfsplen);
524 strncpy(full_path + dfsplen, vol->prepath, pplen);
525 convert_delimiter(full_path, CIFS_DIR_SEP(cifs_sb));
526 full_path[dfsplen + pplen] = 0; /* add trailing null */
527 return full_path;
528}
529
Pavel Shilovsky44c58182012-05-28 14:16:31 +0400530static void
531cifs_clear_stats(struct cifs_tcon *tcon)
532{
533#ifdef CONFIG_CIFS_STATS
534 atomic_set(&tcon->stats.cifs_stats.num_writes, 0);
535 atomic_set(&tcon->stats.cifs_stats.num_reads, 0);
536 atomic_set(&tcon->stats.cifs_stats.num_flushes, 0);
537 atomic_set(&tcon->stats.cifs_stats.num_oplock_brks, 0);
538 atomic_set(&tcon->stats.cifs_stats.num_opens, 0);
539 atomic_set(&tcon->stats.cifs_stats.num_posixopens, 0);
540 atomic_set(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
541 atomic_set(&tcon->stats.cifs_stats.num_closes, 0);
542 atomic_set(&tcon->stats.cifs_stats.num_deletes, 0);
543 atomic_set(&tcon->stats.cifs_stats.num_mkdirs, 0);
544 atomic_set(&tcon->stats.cifs_stats.num_rmdirs, 0);
545 atomic_set(&tcon->stats.cifs_stats.num_renames, 0);
546 atomic_set(&tcon->stats.cifs_stats.num_t2renames, 0);
547 atomic_set(&tcon->stats.cifs_stats.num_ffirst, 0);
548 atomic_set(&tcon->stats.cifs_stats.num_fnext, 0);
549 atomic_set(&tcon->stats.cifs_stats.num_fclose, 0);
550 atomic_set(&tcon->stats.cifs_stats.num_hardlinks, 0);
551 atomic_set(&tcon->stats.cifs_stats.num_symlinks, 0);
552 atomic_set(&tcon->stats.cifs_stats.num_locks, 0);
553 atomic_set(&tcon->stats.cifs_stats.num_acl_get, 0);
554 atomic_set(&tcon->stats.cifs_stats.num_acl_set, 0);
555#endif
556}
557
558static void
559cifs_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
560{
561#ifdef CONFIG_CIFS_STATS
562 seq_printf(m, " Oplocks breaks: %d",
563 atomic_read(&tcon->stats.cifs_stats.num_oplock_brks));
564 seq_printf(m, "\nReads: %d Bytes: %llu",
565 atomic_read(&tcon->stats.cifs_stats.num_reads),
566 (long long)(tcon->bytes_read));
567 seq_printf(m, "\nWrites: %d Bytes: %llu",
568 atomic_read(&tcon->stats.cifs_stats.num_writes),
569 (long long)(tcon->bytes_written));
570 seq_printf(m, "\nFlushes: %d",
571 atomic_read(&tcon->stats.cifs_stats.num_flushes));
572 seq_printf(m, "\nLocks: %d HardLinks: %d Symlinks: %d",
573 atomic_read(&tcon->stats.cifs_stats.num_locks),
574 atomic_read(&tcon->stats.cifs_stats.num_hardlinks),
575 atomic_read(&tcon->stats.cifs_stats.num_symlinks));
576 seq_printf(m, "\nOpens: %d Closes: %d Deletes: %d",
577 atomic_read(&tcon->stats.cifs_stats.num_opens),
578 atomic_read(&tcon->stats.cifs_stats.num_closes),
579 atomic_read(&tcon->stats.cifs_stats.num_deletes));
580 seq_printf(m, "\nPosix Opens: %d Posix Mkdirs: %d",
581 atomic_read(&tcon->stats.cifs_stats.num_posixopens),
582 atomic_read(&tcon->stats.cifs_stats.num_posixmkdirs));
583 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
584 atomic_read(&tcon->stats.cifs_stats.num_mkdirs),
585 atomic_read(&tcon->stats.cifs_stats.num_rmdirs));
586 seq_printf(m, "\nRenames: %d T2 Renames %d",
587 atomic_read(&tcon->stats.cifs_stats.num_renames),
588 atomic_read(&tcon->stats.cifs_stats.num_t2renames));
589 seq_printf(m, "\nFindFirst: %d FNext %d FClose %d",
590 atomic_read(&tcon->stats.cifs_stats.num_ffirst),
591 atomic_read(&tcon->stats.cifs_stats.num_fnext),
592 atomic_read(&tcon->stats.cifs_stats.num_fclose));
593#endif
594}
595
Pavel Shilovskyf4367202012-03-17 11:41:12 +0300596static void
597cifs_mkdir_setinfo(struct inode *inode, const char *full_path,
598 struct cifs_sb_info *cifs_sb, struct cifs_tcon *tcon,
599 const unsigned int xid)
600{
601 FILE_BASIC_INFO info;
602 struct cifsInodeInfo *cifsInode;
603 u32 dosattrs;
604 int rc;
605
606 memset(&info, 0, sizeof(info));
607 cifsInode = CIFS_I(inode);
608 dosattrs = cifsInode->cifsAttrs|ATTR_READONLY;
609 info.Attributes = cpu_to_le32(dosattrs);
610 rc = CIFSSMBSetPathInfo(xid, tcon, full_path, &info, cifs_sb->local_nls,
611 cifs_sb->mnt_cifs_flags &
612 CIFS_MOUNT_MAP_SPECIAL_CHR);
613 if (rc == 0)
614 cifsInode->cifsAttrs = dosattrs;
615}
616
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700617static int
618cifs_open_file(const unsigned int xid, struct cifs_tcon *tcon, const char *path,
619 int disposition, int desired_access, int create_options,
620 struct cifs_fid *fid, __u32 *oplock, FILE_ALL_INFO *buf,
621 struct cifs_sb_info *cifs_sb)
622{
623 if (!(tcon->ses->capabilities & CAP_NT_SMBS))
624 return SMBLegacyOpen(xid, tcon, path, disposition,
Pavel Shilovsky25364132012-09-18 16:20:27 -0700625 desired_access, create_options,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700626 &fid->netfid, oplock, buf,
627 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
628 & CIFS_MOUNT_MAP_SPECIAL_CHR);
629 return CIFSSMBOpen(xid, tcon, path, disposition, desired_access,
630 create_options, &fid->netfid, oplock, buf,
631 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
632 CIFS_MOUNT_MAP_SPECIAL_CHR);
633}
634
635static void
636cifs_set_fid(struct cifsFileInfo *cfile, struct cifs_fid *fid, __u32 oplock)
637{
638 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
639 cfile->fid.netfid = fid->netfid;
640 cifs_set_oplock_level(cinode, oplock);
641 cinode->can_cache_brlcks = cinode->clientCanCacheAll;
642}
643
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700644static int
645cifs_close_file(const unsigned int xid, struct cifs_tcon *tcon,
646 struct cifs_fid *fid)
647{
648 return CIFSSMBClose(xid, tcon, fid->netfid);
649}
650
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -0700651static int
652cifs_flush_file(const unsigned int xid, struct cifs_tcon *tcon,
653 struct cifs_fid *fid)
654{
655 return CIFSSMBFlush(xid, tcon, fid->netfid);
656}
657
Jeff Layton23db65f2012-05-15 12:20:51 -0400658struct smb_version_operations smb1_operations = {
Jeff Layton121b0462012-05-15 12:21:10 -0400659 .send_cancel = send_nt_cancel,
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300660 .compare_fids = cifs_compare_fids,
Pavel Shilovsky082d0642012-05-17 12:18:21 +0400661 .setup_request = cifs_setup_request,
Pavel Shilovsky45740842012-06-01 14:26:18 +0400662 .setup_async_request = cifs_setup_async_request,
Pavel Shilovsky082d0642012-05-17 12:18:21 +0400663 .check_receive = cifs_check_receive,
Pavel Shilovsky45275782012-05-17 17:53:29 +0400664 .add_credits = cifs_add_credits,
665 .set_credits = cifs_set_credits,
666 .get_credits_field = cifs_get_credits_field,
Pavel Shilovskya891f0f2012-05-23 16:14:34 +0400667 .get_credits = cifs_get_credits,
Pavel Shilovsky88257362012-05-23 14:01:59 +0400668 .get_next_mid = cifs_get_next_mid,
Pavel Shilovskyeb378712012-05-17 13:02:51 +0400669 .read_data_offset = cifs_read_data_offset,
670 .read_data_length = cifs_read_data_length,
671 .map_error = map_smb_to_linux_error,
Pavel Shilovsky8aa26f32012-05-17 13:25:35 +0400672 .find_mid = cifs_find_mid,
673 .check_message = checkSMB,
674 .dump_detail = cifs_dump_detail,
Pavel Shilovsky44c58182012-05-28 14:16:31 +0400675 .clear_stats = cifs_clear_stats,
676 .print_stats = cifs_print_stats,
Pavel Shilovsky8aa26f32012-05-17 13:25:35 +0400677 .is_oplock_break = is_valid_oplock_break,
Pavel Shilovsky316cf942012-05-23 14:31:03 +0400678 .check_trans2 = cifs_check_trans2,
Pavel Shilovsky286170a2012-05-25 10:43:58 +0400679 .need_neg = cifs_need_neg,
680 .negotiate = cifs_negotiate,
Pavel Shilovsky58c45c52012-05-25 10:54:49 +0400681 .sess_setup = CIFS_SessSetup,
682 .logoff = CIFSSMBLogoff,
Pavel Shilovsky2e6e02a2012-05-25 11:11:39 +0400683 .tree_connect = CIFSTCon,
684 .tree_disconnect = CIFSSMBTDis,
Pavel Shilovskyb669f332012-05-27 20:21:53 +0400685 .get_dfs_refer = CIFSGetDFSRefer,
Pavel Shilovskyaf4281d2012-05-27 20:48:35 +0400686 .qfs_tcon = cifs_qfs_tcon,
Pavel Shilovsky68889f22012-05-25 14:40:22 +0400687 .is_path_accessible = cifs_is_path_accessible,
Pavel Shilovsky1208ef12012-05-27 17:34:43 +0400688 .query_path_info = cifs_query_path_info,
Pavel Shilovsky4ad65042012-09-18 16:20:26 -0700689 .query_file_info = cifs_query_file_info,
Pavel Shilovsky1208ef12012-05-27 17:34:43 +0400690 .get_srv_inum = cifs_get_srv_inum,
Pavel Shilovsky9224dfc2012-05-27 20:39:52 +0400691 .build_path_to_root = cifs_build_path_to_root,
Pavel Shilovskyf6d76172012-05-25 14:47:16 +0400692 .echo = CIFSSMBEcho,
Pavel Shilovskyf4367202012-03-17 11:41:12 +0300693 .mkdir = CIFSSMBMkDir,
694 .mkdir_setinfo = cifs_mkdir_setinfo,
Pavel Shilovskyf958ca52012-07-10 16:14:18 +0400695 .rmdir = CIFSSMBRmDir,
Pavel Shilovskyed6875e2012-09-18 16:20:25 -0700696 .unlink = CIFSSMBDelFile,
697 .rename_pending_delete = cifs_rename_pending_delete,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700698 .open = cifs_open_file,
699 .set_fid = cifs_set_fid,
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700700 .close = cifs_close_file,
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -0700701 .flush = cifs_flush_file,
Jeff Layton23db65f2012-05-15 12:20:51 -0400702};
703
704struct smb_version_values smb1_values = {
705 .version_string = SMB1_VERSION_STRING,
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300706 .large_lock_type = LOCKING_ANDX_LARGE_FILES,
707 .exclusive_lock_type = 0,
708 .shared_lock_type = LOCKING_ANDX_SHARED_LOCK,
709 .unlock_lock_type = 0,
Pavel Shilovsky1887f602012-05-17 12:45:31 +0400710 .header_size = sizeof(struct smb_hdr),
711 .max_header_size = MAX_CIFS_HDR_SIZE,
Pavel Shilovskyeb378712012-05-17 13:02:51 +0400712 .read_rsp_size = sizeof(READ_RSP),
Pavel Shilovsky2dc7e1c2011-12-26 22:53:34 +0400713 .lock_cmd = cpu_to_le16(SMB_COM_LOCKING_ANDX),
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400714 .cap_unix = CAP_UNIX,
715 .cap_nt_find = CAP_NT_SMBS | CAP_NT_FIND,
716 .cap_large_files = CAP_LARGE_FILES,
Jeff Layton23db65f2012-05-15 12:20:51 -0400717};