blob: 5082fce3c566d56d81d3b7cbcf447807437ddd2f [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * fs/cifs/cifsfs.c
3 *
4 * Copyright (C) International Business Machines Corp., 2002,2004
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 *
7 * Common Internet FileSystem (CIFS) client
8 *
9 * This library is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU Lesser General Public License as published
11 * by the Free Software Foundation; either version 2.1 of the License, or
12 * (at your option) any later version.
13 *
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public License
20 * along with this library; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 */
23
24/* Note that BB means BUGBUG (ie something to fix eventually) */
25
26#include <linux/module.h>
27#include <linux/fs.h>
28#include <linux/mount.h>
29#include <linux/slab.h>
30#include <linux/init.h>
31#include <linux/list.h>
32#include <linux/seq_file.h>
33#include <linux/vfs.h>
34#include <linux/mempool.h>
35#include "cifsfs.h"
36#include "cifspdu.h"
37#define DECLARE_GLOBALS_HERE
38#include "cifsglob.h"
39#include "cifsproto.h"
40#include "cifs_debug.h"
41#include "cifs_fs_sb.h"
42#include <linux/mm.h>
43#define CIFS_MAGIC_NUMBER 0xFF534D42 /* the first four bytes of SMB PDUs */
44
45#ifdef CONFIG_CIFS_QUOTA
46static struct quotactl_ops cifs_quotactl_ops;
47#endif
48
49int cifsFYI = 0;
50int cifsERROR = 1;
51int traceSMB = 0;
52unsigned int oplockEnabled = 1;
53unsigned int experimEnabled = 0;
54unsigned int linuxExtEnabled = 1;
55unsigned int lookupCacheEnabled = 1;
56unsigned int multiuser_mount = 0;
57unsigned int extended_security = 0;
58unsigned int ntlmv2_support = 0;
59unsigned int sign_CIFS_PDUs = 1;
60extern struct task_struct * oplockThread; /* remove sparse warning */
61struct task_struct * oplockThread = NULL;
62unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
63module_param(CIFSMaxBufSize, int, 0);
64MODULE_PARM_DESC(CIFSMaxBufSize,"Network buffer size (not including header). Default: 16384 Range: 8192 to 130048");
65unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
66module_param(cifs_min_rcv, int, 0);
67MODULE_PARM_DESC(cifs_min_rcv,"Network buffers in pool. Default: 4 Range: 1 to 64");
68unsigned int cifs_min_small = 30;
69module_param(cifs_min_small, int, 0);
70MODULE_PARM_DESC(cifs_min_small,"Small network buffers in pool. Default: 30 Range: 2 to 256");
71unsigned int cifs_max_pending = CIFS_MAX_REQ;
72module_param(cifs_max_pending, int, 0);
73MODULE_PARM_DESC(cifs_max_pending,"Simultaneous requests to server. Default: 50 Range: 2 to 256");
74
75static DECLARE_COMPLETION(cifs_oplock_exited);
76
77extern mempool_t *cifs_sm_req_poolp;
78extern mempool_t *cifs_req_poolp;
79extern mempool_t *cifs_mid_poolp;
80
81extern kmem_cache_t *cifs_oplock_cachep;
82
83static int
84cifs_read_super(struct super_block *sb, void *data,
85 const char *devname, int silent)
86{
87 struct inode *inode;
88 struct cifs_sb_info *cifs_sb;
89 int rc = 0;
90
91 sb->s_flags |= MS_NODIRATIME; /* and probably even noatime */
92 sb->s_fs_info = kmalloc(sizeof(struct cifs_sb_info),GFP_KERNEL);
93 cifs_sb = CIFS_SB(sb);
94 if(cifs_sb == NULL)
95 return -ENOMEM;
96 else
97 memset(cifs_sb,0,sizeof(struct cifs_sb_info));
98
99
100 rc = cifs_mount(sb, cifs_sb, data, devname);
101
102 if (rc) {
103 if (!silent)
104 cERROR(1,
105 ("cifs_mount failed w/return code = %d", rc));
106 goto out_mount_failed;
107 }
108
109 sb->s_magic = CIFS_MAGIC_NUMBER;
110 sb->s_op = &cifs_super_ops;
111/* if(cifs_sb->tcon->ses->server->maxBuf > MAX_CIFS_HDR_SIZE + 512)
112 sb->s_blocksize = cifs_sb->tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE; */
113#ifdef CONFIG_CIFS_QUOTA
114 sb->s_qcop = &cifs_quotactl_ops;
115#endif
116 sb->s_blocksize = CIFS_MAX_MSGSIZE;
117 sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */
118 inode = iget(sb, ROOT_I);
119
120 if (!inode) {
121 rc = -ENOMEM;
122 goto out_no_root;
123 }
124
125 sb->s_root = d_alloc_root(inode);
126
127 if (!sb->s_root) {
128 rc = -ENOMEM;
129 goto out_no_root;
130 }
131
132 return 0;
133
134out_no_root:
135 cERROR(1, ("cifs_read_super: get root inode failed"));
136 if (inode)
137 iput(inode);
138
139out_mount_failed:
140 if(cifs_sb) {
141 if(cifs_sb->local_nls)
142 unload_nls(cifs_sb->local_nls);
143 kfree(cifs_sb);
144 }
145 return rc;
146}
147
148static void
149cifs_put_super(struct super_block *sb)
150{
151 int rc = 0;
152 struct cifs_sb_info *cifs_sb;
153
154 cFYI(1, ("In cifs_put_super"));
155 cifs_sb = CIFS_SB(sb);
156 if(cifs_sb == NULL) {
157 cFYI(1,("Empty cifs superblock info passed to unmount"));
158 return;
159 }
160 rc = cifs_umount(sb, cifs_sb);
161 if (rc) {
162 cERROR(1, ("cifs_umount failed with return code %d", rc));
163 }
164 unload_nls(cifs_sb->local_nls);
165 kfree(cifs_sb);
166 return;
167}
168
169static int
170cifs_statfs(struct super_block *sb, struct kstatfs *buf)
171{
172 int xid, rc = -EOPNOTSUPP;
173 struct cifs_sb_info *cifs_sb;
174 struct cifsTconInfo *pTcon;
175
176 xid = GetXid();
177
178 cifs_sb = CIFS_SB(sb);
179 pTcon = cifs_sb->tcon;
180
181 buf->f_type = CIFS_MAGIC_NUMBER;
182
183 /* instead could get the real value via SMB_QUERY_FS_ATTRIBUTE_INFO */
184 buf->f_namelen = PATH_MAX; /* PATH_MAX may be too long - it would presumably
185 be length of total path, note that some servers may be
186 able to support more than this, but best to be safe
187 since Win2k and others can not handle very long filenames */
188 buf->f_files = 0; /* undefined */
189 buf->f_ffree = 0; /* unlimited */
190
191#ifdef CONFIG_CIFS_EXPERIMENTAL
192/* BB we could add a second check for a QFS Unix capability bit */
193 if (pTcon->ses->capabilities & CAP_UNIX)
194 rc = CIFSSMBQFSPosixInfo(xid, pTcon, buf, cifs_sb->local_nls);
195
196 /* Only need to call the old QFSInfo if failed
197 on newer one */
198 if(rc)
199#endif /* CIFS_EXPERIMENTAL */
200 rc = CIFSSMBQFSInfo(xid, pTcon, buf, cifs_sb->local_nls);
201
202 /*
203 int f_type;
204 __fsid_t f_fsid;
205 int f_namelen; */
206 /* BB get from info put in tcon struct at mount time with call to QFSAttrInfo */
207 FreeXid(xid);
208 return 0; /* always return success? what if volume is no longer available? */
209}
210
211static int cifs_permission(struct inode * inode, int mask, struct nameidata *nd)
212{
213 struct cifs_sb_info *cifs_sb;
214
215 cifs_sb = CIFS_SB(inode->i_sb);
216
217 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
218 return 0;
219 } else /* file mode might have been restricted at mount time
220 on the client (above and beyond ACL on servers) for
221 servers which do not support setting and viewing mode bits,
222 so allowing client to check permissions is useful */
223 return generic_permission(inode, mask, NULL);
224}
225
226static kmem_cache_t *cifs_inode_cachep;
227static kmem_cache_t *cifs_req_cachep;
228static kmem_cache_t *cifs_mid_cachep;
229kmem_cache_t *cifs_oplock_cachep;
230static kmem_cache_t *cifs_sm_req_cachep;
231mempool_t *cifs_sm_req_poolp;
232mempool_t *cifs_req_poolp;
233mempool_t *cifs_mid_poolp;
234
235static struct inode *
236cifs_alloc_inode(struct super_block *sb)
237{
238 struct cifsInodeInfo *cifs_inode;
239 cifs_inode = kmem_cache_alloc(cifs_inode_cachep, SLAB_KERNEL);
240 if (!cifs_inode)
241 return NULL;
242 cifs_inode->cifsAttrs = 0x20; /* default */
243 atomic_set(&cifs_inode->inUse, 0);
244 cifs_inode->time = 0;
245 /* Until the file is open and we have gotten oplock
246 info back from the server, can not assume caching of
247 file data or metadata */
248 cifs_inode->clientCanCacheRead = FALSE;
249 cifs_inode->clientCanCacheAll = FALSE;
250 cifs_inode->vfs_inode.i_blksize = CIFS_MAX_MSGSIZE;
251 cifs_inode->vfs_inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */
252
253 INIT_LIST_HEAD(&cifs_inode->openFileList);
254 return &cifs_inode->vfs_inode;
255}
256
257static void
258cifs_destroy_inode(struct inode *inode)
259{
260 kmem_cache_free(cifs_inode_cachep, CIFS_I(inode));
261}
262
263/*
264 * cifs_show_options() is for displaying mount options in /proc/mounts.
265 * Not all settable options are displayed but most of the important
266 * ones are.
267 */
268static int
269cifs_show_options(struct seq_file *s, struct vfsmount *m)
270{
271 struct cifs_sb_info *cifs_sb;
272
273 cifs_sb = CIFS_SB(m->mnt_sb);
274
275 if (cifs_sb) {
276 if (cifs_sb->tcon) {
277 seq_printf(s, ",unc=%s", cifs_sb->tcon->treeName);
278 if (cifs_sb->tcon->ses) {
279 if (cifs_sb->tcon->ses->userName)
280 seq_printf(s, ",username=%s",
281 cifs_sb->tcon->ses->userName);
282 if(cifs_sb->tcon->ses->domainName)
283 seq_printf(s, ",domain=%s",
284 cifs_sb->tcon->ses->domainName);
285 }
286 }
287 seq_printf(s, ",rsize=%d",cifs_sb->rsize);
288 seq_printf(s, ",wsize=%d",cifs_sb->wsize);
289 }
290 return 0;
291}
292
293#ifdef CONFIG_CIFS_QUOTA
294int cifs_xquota_set(struct super_block * sb, int quota_type, qid_t qid,
295 struct fs_disk_quota * pdquota)
296{
297 int xid;
298 int rc = 0;
299 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
300 struct cifsTconInfo *pTcon;
301
302 if(cifs_sb)
303 pTcon = cifs_sb->tcon;
304 else
305 return -EIO;
306
307
308 xid = GetXid();
309 if(pTcon) {
310 cFYI(1,("set type: 0x%x id: %d",quota_type,qid));
311 } else {
312 return -EIO;
313 }
314
315 FreeXid(xid);
316 return rc;
317}
318
319int cifs_xquota_get(struct super_block * sb, int quota_type, qid_t qid,
320 struct fs_disk_quota * pdquota)
321{
322 int xid;
323 int rc = 0;
324 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
325 struct cifsTconInfo *pTcon;
326
327 if(cifs_sb)
328 pTcon = cifs_sb->tcon;
329 else
330 return -EIO;
331
332 xid = GetXid();
333 if(pTcon) {
334 cFYI(1,("set type: 0x%x id: %d",quota_type,qid));
335 } else {
336 rc = -EIO;
337 }
338
339 FreeXid(xid);
340 return rc;
341}
342
343int cifs_xstate_set(struct super_block * sb, unsigned int flags, int operation)
344{
345 int xid;
346 int rc = 0;
347 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
348 struct cifsTconInfo *pTcon;
349
350 if(cifs_sb)
351 pTcon = cifs_sb->tcon;
352 else
353 return -EIO;
354
355 xid = GetXid();
356 if(pTcon) {
357 cFYI(1,("flags: 0x%x operation: 0x%x",flags,operation));
358 } else {
359 rc = -EIO;
360 }
361
362 FreeXid(xid);
363 return rc;
364}
365
366int cifs_xstate_get(struct super_block * sb, struct fs_quota_stat *qstats)
367{
368 int xid;
369 int rc = 0;
370 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
371 struct cifsTconInfo *pTcon;
372
373 if(cifs_sb) {
374 pTcon = cifs_sb->tcon;
375 } else {
376 return -EIO;
377 }
378 xid = GetXid();
379 if(pTcon) {
380 cFYI(1,("pqstats %p",qstats));
381 } else {
382 rc = -EIO;
383 }
384
385 FreeXid(xid);
386 return rc;
387}
388
389static struct quotactl_ops cifs_quotactl_ops = {
390 .set_xquota = cifs_xquota_set,
391 .get_xquota = cifs_xquota_set,
392 .set_xstate = cifs_xstate_set,
393 .get_xstate = cifs_xstate_get,
394};
395#endif
396
397static int cifs_remount(struct super_block *sb, int *flags, char *data)
398{
399 *flags |= MS_NODIRATIME;
400 return 0;
401}
402
403struct super_operations cifs_super_ops = {
404 .read_inode = cifs_read_inode,
405 .put_super = cifs_put_super,
406 .statfs = cifs_statfs,
407 .alloc_inode = cifs_alloc_inode,
408 .destroy_inode = cifs_destroy_inode,
409/* .drop_inode = generic_delete_inode,
410 .delete_inode = cifs_delete_inode, *//* Do not need the above two functions
411 unless later we add lazy close of inodes or unless the kernel forgets to call
412 us with the same number of releases (closes) as opens */
413 .show_options = cifs_show_options,
414/* .umount_begin = cifs_umount_begin, *//* consider adding in the future */
415 .remount_fs = cifs_remount,
416};
417
418static struct super_block *
419cifs_get_sb(struct file_system_type *fs_type,
420 int flags, const char *dev_name, void *data)
421{
422 int rc;
423 struct super_block *sb = sget(fs_type, NULL, set_anon_super, NULL);
424
425 cFYI(1, ("Devname: %s flags: %d ", dev_name, flags));
426
427 if (IS_ERR(sb))
428 return sb;
429
430 sb->s_flags = flags;
431
432 rc = cifs_read_super(sb, data, dev_name, flags & MS_VERBOSE ? 1 : 0);
433 if (rc) {
434 up_write(&sb->s_umount);
435 deactivate_super(sb);
436 return ERR_PTR(rc);
437 }
438 sb->s_flags |= MS_ACTIVE;
439 return sb;
440}
441
442static ssize_t
443cifs_read_wrapper(struct file * file, char __user *read_data, size_t read_size,
444 loff_t * poffset)
445{
446 if(file->f_dentry == NULL)
447 return -EIO;
448 else if(file->f_dentry->d_inode == NULL)
449 return -EIO;
450
451 cFYI(1,("In read_wrapper size %zd at %lld",read_size,*poffset));
452
453 if(CIFS_I(file->f_dentry->d_inode)->clientCanCacheRead) {
454 return generic_file_read(file,read_data,read_size,poffset);
455 } else {
456 /* BB do we need to lock inode from here until after invalidate? */
457/* if(file->f_dentry->d_inode->i_mapping) {
458 filemap_fdatawrite(file->f_dentry->d_inode->i_mapping);
459 filemap_fdatawait(file->f_dentry->d_inode->i_mapping);
460 }*/
461/* cifs_revalidate(file->f_dentry);*/ /* BB fixme */
462
463 /* BB we should make timer configurable - perhaps
464 by simply calling cifs_revalidate here */
465 /* invalidate_remote_inode(file->f_dentry->d_inode);*/
466 return generic_file_read(file,read_data,read_size,poffset);
467 }
468}
469
470static ssize_t
471cifs_write_wrapper(struct file * file, const char __user *write_data,
472 size_t write_size, loff_t * poffset)
473{
474 ssize_t written;
475
476 if(file->f_dentry == NULL)
477 return -EIO;
478 else if(file->f_dentry->d_inode == NULL)
479 return -EIO;
480
481 cFYI(1,("In write_wrapper size %zd at %lld",write_size,*poffset));
482
483 written = generic_file_write(file,write_data,write_size,poffset);
484 if(!CIFS_I(file->f_dentry->d_inode)->clientCanCacheAll) {
485 if(file->f_dentry->d_inode->i_mapping) {
486 filemap_fdatawrite(file->f_dentry->d_inode->i_mapping);
487 }
488 }
489 return written;
490}
491
492
493static struct file_system_type cifs_fs_type = {
494 .owner = THIS_MODULE,
495 .name = "cifs",
496 .get_sb = cifs_get_sb,
497 .kill_sb = kill_anon_super,
498 /* .fs_flags */
499};
500struct inode_operations cifs_dir_inode_ops = {
501 .create = cifs_create,
502 .lookup = cifs_lookup,
503 .getattr = cifs_getattr,
504 .unlink = cifs_unlink,
505 .link = cifs_hardlink,
506 .mkdir = cifs_mkdir,
507 .rmdir = cifs_rmdir,
508 .rename = cifs_rename,
509 .permission = cifs_permission,
510/* revalidate:cifs_revalidate, */
511 .setattr = cifs_setattr,
512 .symlink = cifs_symlink,
513 .mknod = cifs_mknod,
514#ifdef CONFIG_CIFS_XATTR
515 .setxattr = cifs_setxattr,
516 .getxattr = cifs_getxattr,
517 .listxattr = cifs_listxattr,
518 .removexattr = cifs_removexattr,
519#endif
520};
521
522struct inode_operations cifs_file_inode_ops = {
523/* revalidate:cifs_revalidate, */
524 .setattr = cifs_setattr,
525 .getattr = cifs_getattr, /* do we need this anymore? */
526 .rename = cifs_rename,
527 .permission = cifs_permission,
528#ifdef CONFIG_CIFS_XATTR
529 .setxattr = cifs_setxattr,
530 .getxattr = cifs_getxattr,
531 .listxattr = cifs_listxattr,
532 .removexattr = cifs_removexattr,
533#endif
534};
535
536struct inode_operations cifs_symlink_inode_ops = {
537 .readlink = generic_readlink,
538 .follow_link = cifs_follow_link,
539 .put_link = cifs_put_link,
540 .permission = cifs_permission,
541 /* BB add the following two eventually */
542 /* revalidate: cifs_revalidate,
543 setattr: cifs_notify_change, *//* BB do we need notify change */
544#ifdef CONFIG_CIFS_XATTR
545 .setxattr = cifs_setxattr,
546 .getxattr = cifs_getxattr,
547 .listxattr = cifs_listxattr,
548 .removexattr = cifs_removexattr,
549#endif
550};
551
552struct file_operations cifs_file_ops = {
553 .read = cifs_read_wrapper,
554 .write = cifs_write_wrapper,
555 .open = cifs_open,
556 .release = cifs_close,
557 .lock = cifs_lock,
558 .fsync = cifs_fsync,
559 .flush = cifs_flush,
560 .mmap = cifs_file_mmap,
561 .sendfile = generic_file_sendfile,
562#ifdef CONFIG_CIFS_EXPERIMENTAL
563 .readv = generic_file_readv,
564 .writev = generic_file_writev,
565 .aio_read = generic_file_aio_read,
566 .aio_write = generic_file_aio_write,
567 .dir_notify = cifs_dir_notify,
568#endif /* CONFIG_CIFS_EXPERIMENTAL */
569};
570
571struct file_operations cifs_file_direct_ops = {
572 /* no mmap, no aio, no readv -
573 BB reevaluate whether they can be done with directio, no cache */
574 .read = cifs_user_read,
575 .write = cifs_user_write,
576 .open = cifs_open,
577 .release = cifs_close,
578 .lock = cifs_lock,
579 .fsync = cifs_fsync,
580 .flush = cifs_flush,
581 .sendfile = generic_file_sendfile, /* BB removeme BB */
582#ifdef CONFIG_CIFS_EXPERIMENTAL
583 .dir_notify = cifs_dir_notify,
584#endif /* CONFIG_CIFS_EXPERIMENTAL */
585};
586
587struct file_operations cifs_dir_ops = {
588 .readdir = cifs_readdir,
589 .release = cifs_closedir,
590 .read = generic_read_dir,
591#ifdef CONFIG_CIFS_EXPERIMENTAL
592 .dir_notify = cifs_dir_notify,
593#endif /* CONFIG_CIFS_EXPERIMENTAL */
594};
595
596static void
597cifs_init_once(void *inode, kmem_cache_t * cachep, unsigned long flags)
598{
599 struct cifsInodeInfo *cifsi = inode;
600
601 if ((flags & (SLAB_CTOR_VERIFY | SLAB_CTOR_CONSTRUCTOR)) ==
602 SLAB_CTOR_CONSTRUCTOR) {
603 inode_init_once(&cifsi->vfs_inode);
604 INIT_LIST_HEAD(&cifsi->lockList);
605 }
606}
607
608static int
609cifs_init_inodecache(void)
610{
611 cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
612 sizeof (struct cifsInodeInfo),
613 0, SLAB_RECLAIM_ACCOUNT,
614 cifs_init_once, NULL);
615 if (cifs_inode_cachep == NULL)
616 return -ENOMEM;
617
618 return 0;
619}
620
621static void
622cifs_destroy_inodecache(void)
623{
624 if (kmem_cache_destroy(cifs_inode_cachep))
625 printk(KERN_WARNING "cifs_inode_cache: error freeing\n");
626}
627
628static int
629cifs_init_request_bufs(void)
630{
631 if(CIFSMaxBufSize < 8192) {
632 /* Buffer size can not be smaller than 2 * PATH_MAX since maximum
633 Unicode path name has to fit in any SMB/CIFS path based frames */
634 CIFSMaxBufSize = 8192;
635 } else if (CIFSMaxBufSize > 1024*127) {
636 CIFSMaxBufSize = 1024 * 127;
637 } else {
638 CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
639 }
640/* cERROR(1,("CIFSMaxBufSize %d 0x%x",CIFSMaxBufSize,CIFSMaxBufSize)); */
641 cifs_req_cachep = kmem_cache_create("cifs_request",
642 CIFSMaxBufSize +
643 MAX_CIFS_HDR_SIZE, 0,
644 SLAB_HWCACHE_ALIGN, NULL, NULL);
645 if (cifs_req_cachep == NULL)
646 return -ENOMEM;
647
648 if(cifs_min_rcv < 1)
649 cifs_min_rcv = 1;
650 else if (cifs_min_rcv > 64) {
651 cifs_min_rcv = 64;
652 cERROR(1,("cifs_min_rcv set to maximum (64)"));
653 }
654
655 cifs_req_poolp = mempool_create(cifs_min_rcv,
656 mempool_alloc_slab,
657 mempool_free_slab,
658 cifs_req_cachep);
659
660 if(cifs_req_poolp == NULL) {
661 kmem_cache_destroy(cifs_req_cachep);
662 return -ENOMEM;
663 }
664 /* 256 (MAX_CIFS_HDR_SIZE bytes is enough for most SMB responses and
665 almost all handle based requests (but not write response, nor is it
666 sufficient for path based requests). A smaller size would have
667 been more efficient (compacting multiple slab items on one 4k page)
668 for the case in which debug was on, but this larger size allows
669 more SMBs to use small buffer alloc and is still much more
670 efficient to alloc 1 per page off the slab compared to 17K (5page)
671 alloc of large cifs buffers even when page debugging is on */
672 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
673 MAX_CIFS_HDR_SIZE, 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
674 if (cifs_sm_req_cachep == NULL) {
675 mempool_destroy(cifs_req_poolp);
676 kmem_cache_destroy(cifs_req_cachep);
677 return -ENOMEM;
678 }
679
680 if(cifs_min_small < 2)
681 cifs_min_small = 2;
682 else if (cifs_min_small > 256) {
683 cifs_min_small = 256;
684 cFYI(1,("cifs_min_small set to maximum (256)"));
685 }
686
687 cifs_sm_req_poolp = mempool_create(cifs_min_small,
688 mempool_alloc_slab,
689 mempool_free_slab,
690 cifs_sm_req_cachep);
691
692 if(cifs_sm_req_poolp == NULL) {
693 mempool_destroy(cifs_req_poolp);
694 kmem_cache_destroy(cifs_req_cachep);
695 kmem_cache_destroy(cifs_sm_req_cachep);
696 return -ENOMEM;
697 }
698
699 return 0;
700}
701
702static void
703cifs_destroy_request_bufs(void)
704{
705 mempool_destroy(cifs_req_poolp);
706 if (kmem_cache_destroy(cifs_req_cachep))
707 printk(KERN_WARNING
708 "cifs_destroy_request_cache: error not all structures were freed\n");
709 mempool_destroy(cifs_sm_req_poolp);
710 if (kmem_cache_destroy(cifs_sm_req_cachep))
711 printk(KERN_WARNING
712 "cifs_destroy_request_cache: cifs_small_rq free error\n");
713}
714
715static int
716cifs_init_mids(void)
717{
718 cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
719 sizeof (struct mid_q_entry), 0,
720 SLAB_HWCACHE_ALIGN, NULL, NULL);
721 if (cifs_mid_cachep == NULL)
722 return -ENOMEM;
723
724 cifs_mid_poolp = mempool_create(3 /* a reasonable min simultan opers */,
725 mempool_alloc_slab,
726 mempool_free_slab,
727 cifs_mid_cachep);
728 if(cifs_mid_poolp == NULL) {
729 kmem_cache_destroy(cifs_mid_cachep);
730 return -ENOMEM;
731 }
732
733 cifs_oplock_cachep = kmem_cache_create("cifs_oplock_structs",
734 sizeof (struct oplock_q_entry), 0,
735 SLAB_HWCACHE_ALIGN, NULL, NULL);
736 if (cifs_oplock_cachep == NULL) {
737 kmem_cache_destroy(cifs_mid_cachep);
738 mempool_destroy(cifs_mid_poolp);
739 return -ENOMEM;
740 }
741
742 return 0;
743}
744
745static void
746cifs_destroy_mids(void)
747{
748 mempool_destroy(cifs_mid_poolp);
749 if (kmem_cache_destroy(cifs_mid_cachep))
750 printk(KERN_WARNING
751 "cifs_destroy_mids: error not all structures were freed\n");
752
753 if (kmem_cache_destroy(cifs_oplock_cachep))
754 printk(KERN_WARNING
755 "error not all oplock structures were freed\n");
756}
757
758static int cifs_oplock_thread(void * dummyarg)
759{
760 struct oplock_q_entry * oplock_item;
761 struct cifsTconInfo *pTcon;
762 struct inode * inode;
763 __u16 netfid;
764 int rc;
765
766 daemonize("cifsoplockd");
767 allow_signal(SIGTERM);
768
769 oplockThread = current;
770 do {
771 set_current_state(TASK_INTERRUPTIBLE);
772
773 schedule_timeout(1*HZ);
774 spin_lock(&GlobalMid_Lock);
775 if(list_empty(&GlobalOplock_Q)) {
776 spin_unlock(&GlobalMid_Lock);
777 set_current_state(TASK_INTERRUPTIBLE);
778 schedule_timeout(39*HZ);
779 } else {
780 oplock_item = list_entry(GlobalOplock_Q.next,
781 struct oplock_q_entry, qhead);
782 if(oplock_item) {
783 cFYI(1,("found oplock item to write out"));
784 pTcon = oplock_item->tcon;
785 inode = oplock_item->pinode;
786 netfid = oplock_item->netfid;
787 spin_unlock(&GlobalMid_Lock);
788 DeleteOplockQEntry(oplock_item);
789 /* can not grab inode sem here since it would
790 deadlock when oplock received on delete
791 since vfs_unlink holds the i_sem across
792 the call */
793 /* down(&inode->i_sem);*/
794 if (S_ISREG(inode->i_mode)) {
795 rc = filemap_fdatawrite(inode->i_mapping);
796 if(CIFS_I(inode)->clientCanCacheRead == 0) {
797 filemap_fdatawait(inode->i_mapping);
798 invalidate_remote_inode(inode);
799 }
800 } else
801 rc = 0;
802 /* up(&inode->i_sem);*/
803 if (rc)
804 CIFS_I(inode)->write_behind_rc = rc;
805 cFYI(1,("Oplock flush inode %p rc %d",inode,rc));
806
807 /* releasing a stale oplock after recent reconnection
808 of smb session using a now incorrect file
809 handle is not a data integrity issue but do
810 not bother sending an oplock release if session
811 to server still is disconnected since oplock
812 already released by the server in that case */
813 if(pTcon->tidStatus != CifsNeedReconnect) {
814 rc = CIFSSMBLock(0, pTcon, netfid,
815 0 /* len */ , 0 /* offset */, 0,
816 0, LOCKING_ANDX_OPLOCK_RELEASE,
817 0 /* wait flag */);
818 cFYI(1,("Oplock release rc = %d ",rc));
819 }
820 } else
821 spin_unlock(&GlobalMid_Lock);
822 }
823 } while(!signal_pending(current));
824 complete_and_exit (&cifs_oplock_exited, 0);
825}
826
827static int __init
828init_cifs(void)
829{
830 int rc = 0;
831#ifdef CONFIG_PROC_FS
832 cifs_proc_init();
833#endif
834 INIT_LIST_HEAD(&GlobalServerList); /* BB not implemented yet */
835 INIT_LIST_HEAD(&GlobalSMBSessionList);
836 INIT_LIST_HEAD(&GlobalTreeConnectionList);
837 INIT_LIST_HEAD(&GlobalOplock_Q);
838/*
839 * Initialize Global counters
840 */
841 atomic_set(&sesInfoAllocCount, 0);
842 atomic_set(&tconInfoAllocCount, 0);
843 atomic_set(&tcpSesAllocCount,0);
844 atomic_set(&tcpSesReconnectCount, 0);
845 atomic_set(&tconInfoReconnectCount, 0);
846
847 atomic_set(&bufAllocCount, 0);
848 atomic_set(&midCount, 0);
849 GlobalCurrentXid = 0;
850 GlobalTotalActiveXid = 0;
851 GlobalMaxActiveXid = 0;
852 rwlock_init(&GlobalSMBSeslock);
853 spin_lock_init(&GlobalMid_Lock);
854
855 if(cifs_max_pending < 2) {
856 cifs_max_pending = 2;
857 cFYI(1,("cifs_max_pending set to min of 2"));
858 } else if(cifs_max_pending > 256) {
859 cifs_max_pending = 256;
860 cFYI(1,("cifs_max_pending set to max of 256"));
861 }
862
863 rc = cifs_init_inodecache();
864 if (!rc) {
865 rc = cifs_init_mids();
866 if (!rc) {
867 rc = cifs_init_request_bufs();
868 if (!rc) {
869 rc = register_filesystem(&cifs_fs_type);
870 if (!rc) {
871 rc = (int)kernel_thread(cifs_oplock_thread, NULL,
872 CLONE_FS | CLONE_FILES | CLONE_VM);
873 if(rc > 0)
874 return 0;
875 else
876 cERROR(1,("error %d create oplock thread",rc));
877 }
878 cifs_destroy_request_bufs();
879 }
880 cifs_destroy_mids();
881 }
882 cifs_destroy_inodecache();
883 }
884#ifdef CONFIG_PROC_FS
885 cifs_proc_clean();
886#endif
887 return rc;
888}
889
890static void __exit
891exit_cifs(void)
892{
893 cFYI(0, ("In unregister ie exit_cifs"));
894#ifdef CONFIG_PROC_FS
895 cifs_proc_clean();
896#endif
897 unregister_filesystem(&cifs_fs_type);
898 cifs_destroy_inodecache();
899 cifs_destroy_mids();
900 cifs_destroy_request_bufs();
901 if(oplockThread) {
902 send_sig(SIGTERM, oplockThread, 1);
903 wait_for_completion(&cifs_oplock_exited);
904 }
905}
906
907MODULE_AUTHOR("Steve French <sfrench@us.ibm.com>");
908MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */
909MODULE_DESCRIPTION
910 ("VFS to access servers complying with the SNIA CIFS Specification e.g. Samba and Windows");
911MODULE_VERSION(CIFS_VERSION);
912module_init(init_cifs)
913module_exit(exit_cifs)