ceph: clean up readdir caps reservation
authorSage Weil <sage@newdream.net>
Wed, 17 Feb 2010 18:02:43 +0000 (10:02 -0800)
committerSage Weil <sage@newdream.net>
Wed, 17 Feb 2010 18:02:43 +0000 (10:02 -0800)
Use a global counter for the minimum number of allocated caps instead of
hard coding a check against readdir_max.  This takes into account multiple
client instances, and avoids examining the superblock mount options when a
cap is dropped.

Signed-off-by: Sage Weil <sage@newdream.net>
fs/ceph/caps.c
fs/ceph/debugfs.c
fs/ceph/super.c
fs/ceph/super.h

index ab9b571dda1105e87e01708b05a885b489daa2cc..f94b56faba3b5627f5505615f5ba108d6e3a90c3 100644 (file)
@@ -128,6 +128,7 @@ static int caps_total_count;        /* total caps allocated */
 static int caps_use_count;          /* in use */
 static int caps_reserve_count;      /* unused, reserved */
 static int caps_avail_count;        /* unused, unreserved */
+static int caps_min_count;          /* keep at least this many (unreserved) */
 
 void __init ceph_caps_init(void)
 {
@@ -149,6 +150,15 @@ void ceph_caps_finalize(void)
        caps_avail_count = 0;
        caps_use_count = 0;
        caps_reserve_count = 0;
+       caps_min_count = 0;
+       spin_unlock(&caps_list_lock);
+}
+
+void ceph_adjust_min_caps(int delta)
+{
+       spin_lock(&caps_list_lock);
+       caps_min_count += delta;
+       BUG_ON(caps_min_count < 0);
        spin_unlock(&caps_list_lock);
 }
 
@@ -265,12 +275,10 @@ static void put_cap(struct ceph_cap *cap,
             caps_reserve_count, caps_avail_count);
        caps_use_count--;
        /*
-        * Keep some preallocated caps around, at least enough to do a
-        * readdir (which needs to preallocate lots of them), to avoid
-        * lots of free/alloc churn.
+        * Keep some preallocated caps around (ceph_min_count), to
+        * avoid lots of free/alloc churn.
         */
-       if (caps_avail_count >= caps_reserve_count +
-           ceph_client(cap->ci->vfs_inode.i_sb)->mount_args->max_readdir) {
+       if (caps_avail_count >= caps_reserve_count + caps_min_count) {
                caps_total_count--;
                kmem_cache_free(ceph_cap_cachep, cap);
        } else {
@@ -289,7 +297,8 @@ static void put_cap(struct ceph_cap *cap,
 }
 
 void ceph_reservation_status(struct ceph_client *client,
-                            int *total, int *avail, int *used, int *reserved)
+                            int *total, int *avail, int *used, int *reserved,
+                            int *min)
 {
        if (total)
                *total = caps_total_count;
@@ -299,6 +308,8 @@ void ceph_reservation_status(struct ceph_client *client,
                *used = caps_use_count;
        if (reserved)
                *reserved = caps_reserve_count;
+       if (min)
+               *min = caps_min_count;
 }
 
 /*
index b58bd9188692311067de7417f0c906d95475d95f..1a47b5c25b5fb3707d39696e1dfbe82ec60f9d26 100644 (file)
@@ -255,14 +255,15 @@ static int osdc_show(struct seq_file *s, void *pp)
 static int caps_show(struct seq_file *s, void *p)
 {
        struct ceph_client *client = p;
-       int total, avail, used, reserved;
+       int total, avail, used, reserved, min;
 
-       ceph_reservation_status(client, &total, &avail, &used, &reserved);
+       ceph_reservation_status(client, &total, &avail, &used, &reserved, &min);
        seq_printf(s, "total\t\t%d\n"
-                     "avail\t\t%d\n"
-                     "used\t\t%d\n"
-                     "reserved\t%d\n",
-                  total, avail, used, reserved);
+                  "avail\t\t%d\n"
+                  "used\t\t%d\n"
+                  "reserved\t%d\n"
+                  "min\t%d\n",
+                  total, avail, used, reserved, min);
        return 0;
 }
 
index 39aaf29a04a06292eb8ec2d0a5a1603c6327d379..74953be75f8f135e55fcd39f354847df1c51e167 100644 (file)
@@ -578,6 +578,9 @@ static struct ceph_client *ceph_create_client(struct ceph_mount_args *args)
        if (!client->wb_pagevec_pool)
                goto fail_trunc_wq;
 
+       /* caps */
+       client->min_caps = args->max_readdir;
+       ceph_adjust_min_caps(client->min_caps);
 
        /* subsystems */
        err = ceph_monc_init(&client->monc, client);
@@ -619,6 +622,8 @@ static void ceph_destroy_client(struct ceph_client *client)
        ceph_monc_stop(&client->monc);
        ceph_osdc_stop(&client->osdc);
 
+       ceph_adjust_min_caps(-client->min_caps);
+
        ceph_debugfs_client_cleanup(client);
        destroy_workqueue(client->wb_wq);
        destroy_workqueue(client->pg_inv_wq);
index 1f3928785e1277b6df0b1acbabcb61cedcc55f49..3b5faf9980f825fd0e6148a9c5f6ec3924fcc119 100644 (file)
@@ -129,6 +129,8 @@ struct ceph_client {
 
        int auth_err;
 
+       int min_caps;                  /* min caps i added */
+
        struct ceph_messenger *msgr;   /* messenger instance */
        struct ceph_mon_client monc;
        struct ceph_mds_client mdsc;
@@ -557,11 +559,12 @@ extern int __ceph_caps_mds_wanted(struct ceph_inode_info *ci);
 
 extern void ceph_caps_init(void);
 extern void ceph_caps_finalize(void);
+extern void ceph_adjust_min_caps(int delta);
 extern int ceph_reserve_caps(struct ceph_cap_reservation *ctx, int need);
 extern int ceph_unreserve_caps(struct ceph_cap_reservation *ctx);
 extern void ceph_reservation_status(struct ceph_client *client,
                                    int *total, int *avail, int *used,
-                                   int *reserved);
+                                   int *reserved, int *min);
 
 static inline struct ceph_client *ceph_inode_to_client(struct inode *inode)
 {