ceph: clean up readdir caps reservation
Use a global counter for the minimum number of allocated caps instead of
hard coding a check against readdir_max. This takes into account multiple
client instances, and avoids examining the superblock mount options when a
cap is dropped.
Signed-off-by: Sage Weil <sage@newdream.net>
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index ab9b571..f94b56f 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -128,6 +128,7 @@
static int caps_use_count; /* in use */
static int caps_reserve_count; /* unused, reserved */
static int caps_avail_count; /* unused, unreserved */
+static int caps_min_count; /* keep at least this many (unreserved) */
void __init ceph_caps_init(void)
{
@@ -149,6 +150,15 @@
caps_avail_count = 0;
caps_use_count = 0;
caps_reserve_count = 0;
+ caps_min_count = 0;
+ spin_unlock(&caps_list_lock);
+}
+
+void ceph_adjust_min_caps(int delta)
+{
+ spin_lock(&caps_list_lock);
+ caps_min_count += delta;
+ BUG_ON(caps_min_count < 0);
spin_unlock(&caps_list_lock);
}
@@ -265,12 +275,10 @@
caps_reserve_count, caps_avail_count);
caps_use_count--;
/*
- * Keep some preallocated caps around, at least enough to do a
- * readdir (which needs to preallocate lots of them), to avoid
- * lots of free/alloc churn.
+ * Keep some preallocated caps around (ceph_min_count), to
+ * avoid lots of free/alloc churn.
*/
- if (caps_avail_count >= caps_reserve_count +
- ceph_client(cap->ci->vfs_inode.i_sb)->mount_args->max_readdir) {
+ if (caps_avail_count >= caps_reserve_count + caps_min_count) {
caps_total_count--;
kmem_cache_free(ceph_cap_cachep, cap);
} else {
@@ -289,7 +297,8 @@
}
void ceph_reservation_status(struct ceph_client *client,
- int *total, int *avail, int *used, int *reserved)
+ int *total, int *avail, int *used, int *reserved,
+ int *min)
{
if (total)
*total = caps_total_count;
@@ -299,6 +308,8 @@
*used = caps_use_count;
if (reserved)
*reserved = caps_reserve_count;
+ if (min)
+ *min = caps_min_count;
}
/*
diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c
index b58bd91..1a47b5c 100644
--- a/fs/ceph/debugfs.c
+++ b/fs/ceph/debugfs.c
@@ -255,14 +255,15 @@
static int caps_show(struct seq_file *s, void *p)
{
struct ceph_client *client = p;
- int total, avail, used, reserved;
+ int total, avail, used, reserved, min;
- ceph_reservation_status(client, &total, &avail, &used, &reserved);
+ ceph_reservation_status(client, &total, &avail, &used, &reserved, &min);
seq_printf(s, "total\t\t%d\n"
- "avail\t\t%d\n"
- "used\t\t%d\n"
- "reserved\t%d\n",
- total, avail, used, reserved);
+ "avail\t\t%d\n"
+ "used\t\t%d\n"
+ "reserved\t%d\n"
+ "min\t%d\n",
+ total, avail, used, reserved, min);
return 0;
}
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index 39aaf29..74953be 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -578,6 +578,9 @@
if (!client->wb_pagevec_pool)
goto fail_trunc_wq;
+ /* caps */
+ client->min_caps = args->max_readdir;
+ ceph_adjust_min_caps(client->min_caps);
/* subsystems */
err = ceph_monc_init(&client->monc, client);
@@ -619,6 +622,8 @@
ceph_monc_stop(&client->monc);
ceph_osdc_stop(&client->osdc);
+ ceph_adjust_min_caps(-client->min_caps);
+
ceph_debugfs_client_cleanup(client);
destroy_workqueue(client->wb_wq);
destroy_workqueue(client->pg_inv_wq);
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 1f39287..3b5faf9 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -129,6 +129,8 @@
int auth_err;
+ int min_caps; /* min caps i added */
+
struct ceph_messenger *msgr; /* messenger instance */
struct ceph_mon_client monc;
struct ceph_mds_client mdsc;
@@ -557,11 +559,12 @@
extern void ceph_caps_init(void);
extern void ceph_caps_finalize(void);
+extern void ceph_adjust_min_caps(int delta);
extern int ceph_reserve_caps(struct ceph_cap_reservation *ctx, int need);
extern int ceph_unreserve_caps(struct ceph_cap_reservation *ctx);
extern void ceph_reservation_status(struct ceph_client *client,
int *total, int *avail, int *used,
- int *reserved);
+ int *reserved, int *min);
static inline struct ceph_client *ceph_inode_to_client(struct inode *inode)
{