1 /* AFS volume location management
3 * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/slab.h>
15 #include <linux/init.h>
16 #include <linux/sched.h>
19 static unsigned afs_vlocation_timeout = 10; /* volume location timeout in seconds */
20 static unsigned afs_vlocation_update_timeout = 10 * 60;
22 static void afs_vlocation_reaper(struct work_struct *);
23 static void afs_vlocation_updater(struct work_struct *);
25 static LIST_HEAD(afs_vlocation_updates);
26 static LIST_HEAD(afs_vlocation_graveyard);
27 static DEFINE_SPINLOCK(afs_vlocation_updates_lock);
28 static DEFINE_SPINLOCK(afs_vlocation_graveyard_lock);
29 static DECLARE_DELAYED_WORK(afs_vlocation_reap, afs_vlocation_reaper);
30 static DECLARE_DELAYED_WORK(afs_vlocation_update, afs_vlocation_updater);
31 static struct workqueue_struct *afs_vlocation_update_worker;
34 * iterate through the VL servers in a cell until one of them admits knowing
35 * about the volume in question
37 static int afs_vlocation_access_vl_by_name(struct afs_vlocation *vl,
39 struct afs_cache_vlocation *vldb)
41 struct afs_cell *cell = vl->cell;
45 _enter("%s,%s", cell->name, vl->vldb.name);
47 down_write(&vl->cell->vl_sem);
49 for (count = cell->vl_naddrs; count > 0; count--) {
50 addr = cell->vl_addrs[cell->vl_curr_svix];
52 _debug("CellServ[%hu]: %08x", cell->vl_curr_svix, addr.s_addr);
54 /* attempt to access the VL server */
55 ret = afs_vl_get_entry_by_name(&addr, key, vl->vldb.name, vldb,
65 if (ret == -ENOMEM || ret == -ENONET)
77 /* rotate the server records upon lookup failure */
80 cell->vl_curr_svix %= cell->vl_naddrs;
84 up_write(&vl->cell->vl_sem);
90 * iterate through the VL servers in a cell until one of them admits knowing
91 * about the volume in question
93 static int afs_vlocation_access_vl_by_id(struct afs_vlocation *vl,
96 afs_voltype_t voltype,
97 struct afs_cache_vlocation *vldb)
99 struct afs_cell *cell = vl->cell;
103 _enter("%s,%x,%d,", cell->name, volid, voltype);
105 down_write(&vl->cell->vl_sem);
107 for (count = cell->vl_naddrs; count > 0; count--) {
108 addr = cell->vl_addrs[cell->vl_curr_svix];
110 _debug("CellServ[%hu]: %08x", cell->vl_curr_svix, addr.s_addr);
112 /* attempt to access the VL server */
113 ret = afs_vl_get_entry_by_id(&addr, key, volid, voltype, vldb,
123 if (ret == -ENOMEM || ret == -ENONET)
128 if (vl->upd_busy_cnt <= 3) {
129 if (vl->upd_busy_cnt > 1) {
130 /* second+ BUSY - sleep a little bit */
131 set_current_state(TASK_UNINTERRUPTIBLE);
133 __set_current_state(TASK_RUNNING);
146 /* rotate the server records upon lookup failure */
148 cell->vl_curr_svix++;
149 cell->vl_curr_svix %= cell->vl_naddrs;
150 vl->upd_busy_cnt = 0;
154 if (ret < 0 && vl->upd_rej_cnt > 0) {
155 printk(KERN_NOTICE "kAFS:"
156 " Active volume no longer valid '%s'\n",
162 up_write(&vl->cell->vl_sem);
163 _leave(" = %d", ret);
168 * allocate a volume location record
170 static struct afs_vlocation *afs_vlocation_alloc(struct afs_cell *cell,
174 struct afs_vlocation *vl;
176 vl = kzalloc(sizeof(struct afs_vlocation), GFP_KERNEL);
179 vl->state = AFS_VL_NEW;
180 atomic_set(&vl->usage, 1);
181 INIT_LIST_HEAD(&vl->link);
182 INIT_LIST_HEAD(&vl->grave);
183 INIT_LIST_HEAD(&vl->update);
184 init_waitqueue_head(&vl->waitq);
185 spin_lock_init(&vl->lock);
186 memcpy(vl->vldb.name, name, namesz);
194 * update record if we found it in the cache
196 static int afs_vlocation_update_record(struct afs_vlocation *vl,
198 struct afs_cache_vlocation *vldb)
200 afs_voltype_t voltype;
204 /* try to look up a cached volume in the cell VL databases by ID */
205 _debug("Locally Cached: %s %02x { %08x(%x) %08x(%x) %08x(%x) }",
208 ntohl(vl->vldb.servers[0].s_addr),
209 vl->vldb.srvtmask[0],
210 ntohl(vl->vldb.servers[1].s_addr),
211 vl->vldb.srvtmask[1],
212 ntohl(vl->vldb.servers[2].s_addr),
213 vl->vldb.srvtmask[2]);
215 _debug("Vids: %08x %08x %08x",
220 if (vl->vldb.vidmask & AFS_VOL_VTM_RW) {
221 vid = vl->vldb.vid[0];
222 voltype = AFSVL_RWVOL;
223 } else if (vl->vldb.vidmask & AFS_VOL_VTM_RO) {
224 vid = vl->vldb.vid[1];
225 voltype = AFSVL_ROVOL;
226 } else if (vl->vldb.vidmask & AFS_VOL_VTM_BAK) {
227 vid = vl->vldb.vid[2];
228 voltype = AFSVL_BACKVOL;
235 /* contact the server to make sure the volume is still available
236 * - TODO: need to handle disconnected operation here
238 ret = afs_vlocation_access_vl_by_id(vl, key, vid, voltype, vldb);
242 printk(KERN_WARNING "kAFS:"
243 " failed to update volume '%s' (%x) up in '%s': %d\n",
244 vl->vldb.name, vid, vl->cell->name, ret);
245 _leave(" = %d", ret);
248 /* pulled from local cache into memory */
253 /* uh oh... looks like the volume got deleted */
255 printk(KERN_ERR "kAFS:"
256 " volume '%s' (%x) does not exist '%s'\n",
257 vl->vldb.name, vid, vl->cell->name);
259 /* TODO: make existing record unavailable */
260 _leave(" = %d", ret);
266 * apply the update to a VL record
268 static void afs_vlocation_apply_update(struct afs_vlocation *vl,
269 struct afs_cache_vlocation *vldb)
271 _debug("Done VL Lookup: %s %02x { %08x(%x) %08x(%x) %08x(%x) }",
272 vldb->name, vldb->vidmask,
273 ntohl(vldb->servers[0].s_addr), vldb->srvtmask[0],
274 ntohl(vldb->servers[1].s_addr), vldb->srvtmask[1],
275 ntohl(vldb->servers[2].s_addr), vldb->srvtmask[2]);
277 _debug("Vids: %08x %08x %08x",
278 vldb->vid[0], vldb->vid[1], vldb->vid[2]);
280 if (strcmp(vldb->name, vl->vldb.name) != 0)
281 printk(KERN_NOTICE "kAFS:"
282 " name of volume '%s' changed to '%s' on server\n",
283 vl->vldb.name, vldb->name);
287 #ifdef CONFIG_AFS_FSCACHE
288 fscache_update_cookie(vl->cache);
293 * fill in a volume location record, consulting the cache and the VL server
296 static int afs_vlocation_fill_in_record(struct afs_vlocation *vl,
299 struct afs_cache_vlocation vldb;
304 ASSERTCMP(vl->valid, ==, 0);
306 memset(&vldb, 0, sizeof(vldb));
308 /* see if we have an in-cache copy (will set vl->valid if there is) */
309 #ifdef CONFIG_AFS_FSCACHE
310 vl->cache = fscache_acquire_cookie(vl->cell->cache,
311 &afs_vlocation_cache_index_def, vl,
316 /* try to update a known volume in the cell VL databases by
317 * ID as the name may have changed */
318 _debug("found in cache");
319 ret = afs_vlocation_update_record(vl, key, &vldb);
321 /* try to look up an unknown volume in the cell VL databases by
323 ret = afs_vlocation_access_vl_by_name(vl, key, &vldb);
325 printk("kAFS: failed to locate '%s' in cell '%s'\n",
326 vl->vldb.name, vl->cell->name);
331 afs_vlocation_apply_update(vl, &vldb);
337 * queue a vlocation record for updates
339 static void afs_vlocation_queue_for_updates(struct afs_vlocation *vl)
341 struct afs_vlocation *xvl;
343 /* wait at least 10 minutes before updating... */
344 vl->update_at = get_seconds() + afs_vlocation_update_timeout;
346 spin_lock(&afs_vlocation_updates_lock);
348 if (!list_empty(&afs_vlocation_updates)) {
349 /* ... but wait at least 1 second more than the newest record
350 * already queued so that we don't spam the VL server suddenly
351 * with lots of requests
353 xvl = list_entry(afs_vlocation_updates.prev,
354 struct afs_vlocation, update);
355 if (vl->update_at <= xvl->update_at)
356 vl->update_at = xvl->update_at + 1;
358 queue_delayed_work(afs_vlocation_update_worker,
359 &afs_vlocation_update,
360 afs_vlocation_update_timeout * HZ);
363 list_add_tail(&vl->update, &afs_vlocation_updates);
364 spin_unlock(&afs_vlocation_updates_lock);
368 * lookup volume location
369 * - iterate through the VL servers in a cell until one of them admits knowing
370 * about the volume in question
371 * - lookup in the local cache if not able to find on the VL server
372 * - insert/update in the local cache if did get a VL response
374 struct afs_vlocation *afs_vlocation_lookup(struct afs_cell *cell,
379 struct afs_vlocation *vl;
382 _enter("{%s},{%x},%*.*s,%zu",
383 cell->name, key_serial(key),
384 (int) namesz, (int) namesz, name, namesz);
386 if (namesz >= sizeof(vl->vldb.name)) {
387 _leave(" = -ENAMETOOLONG");
388 return ERR_PTR(-ENAMETOOLONG);
391 /* see if we have an in-memory copy first */
392 down_write(&cell->vl_sem);
393 spin_lock(&cell->vl_lock);
394 list_for_each_entry(vl, &cell->vl_list, link) {
395 if (vl->vldb.name[namesz] != '\0')
397 if (memcmp(vl->vldb.name, name, namesz) == 0)
398 goto found_in_memory;
400 spin_unlock(&cell->vl_lock);
402 /* not in the cell's in-memory lists - create a new record */
403 vl = afs_vlocation_alloc(cell, name, namesz);
405 up_write(&cell->vl_sem);
406 return ERR_PTR(-ENOMEM);
411 list_add_tail(&vl->link, &cell->vl_list);
412 vl->state = AFS_VL_CREATING;
413 up_write(&cell->vl_sem);
416 ret = afs_vlocation_fill_in_record(vl, key);
419 spin_lock(&vl->lock);
420 vl->state = AFS_VL_VALID;
421 spin_unlock(&vl->lock);
424 /* update volume entry in local cache */
425 #ifdef CONFIG_AFS_FSCACHE
426 fscache_update_cookie(vl->cache);
429 /* schedule for regular updates */
430 afs_vlocation_queue_for_updates(vl);
434 /* found in memory */
435 _debug("found in memory");
436 atomic_inc(&vl->usage);
437 spin_unlock(&cell->vl_lock);
438 if (!list_empty(&vl->grave)) {
439 spin_lock(&afs_vlocation_graveyard_lock);
440 list_del_init(&vl->grave);
441 spin_unlock(&afs_vlocation_graveyard_lock);
443 up_write(&cell->vl_sem);
445 /* see if it was an abandoned record that we might try filling in */
446 spin_lock(&vl->lock);
447 while (vl->state != AFS_VL_VALID) {
448 afs_vlocation_state_t state = vl->state;
450 _debug("invalid [state %d]", state);
452 if (state == AFS_VL_NEW || state == AFS_VL_NO_VOLUME) {
453 vl->state = AFS_VL_CREATING;
454 spin_unlock(&vl->lock);
458 /* must now wait for creation or update by someone else to
462 spin_unlock(&vl->lock);
463 ret = wait_event_interruptible(vl->waitq,
464 vl->state == AFS_VL_NEW ||
465 vl->state == AFS_VL_VALID ||
466 vl->state == AFS_VL_NO_VOLUME);
469 spin_lock(&vl->lock);
471 spin_unlock(&vl->lock);
478 spin_lock(&vl->lock);
479 vl->state = AFS_VL_NEW;
480 spin_unlock(&vl->lock);
484 afs_put_vlocation(vl);
485 _leave(" = %d", ret);
490 * finish using a volume location record
492 void afs_put_vlocation(struct afs_vlocation *vl)
497 _enter("%s", vl->vldb.name);
499 ASSERTCMP(atomic_read(&vl->usage), >, 0);
501 if (likely(!atomic_dec_and_test(&vl->usage))) {
506 spin_lock(&afs_vlocation_graveyard_lock);
507 if (atomic_read(&vl->usage) == 0) {
509 list_move_tail(&vl->grave, &afs_vlocation_graveyard);
510 vl->time_of_death = get_seconds();
511 queue_delayed_work(afs_wq, &afs_vlocation_reap,
512 afs_vlocation_timeout * HZ);
514 /* suspend updates on this record */
515 if (!list_empty(&vl->update)) {
516 spin_lock(&afs_vlocation_updates_lock);
517 list_del_init(&vl->update);
518 spin_unlock(&afs_vlocation_updates_lock);
521 spin_unlock(&afs_vlocation_graveyard_lock);
522 _leave(" [killed?]");
526 * destroy a dead volume location record
528 static void afs_vlocation_destroy(struct afs_vlocation *vl)
532 #ifdef CONFIG_AFS_FSCACHE
533 fscache_relinquish_cookie(vl->cache, 0);
535 afs_put_cell(vl->cell);
540 * reap dead volume location records
542 static void afs_vlocation_reaper(struct work_struct *work)
545 struct afs_vlocation *vl;
546 unsigned long delay, expiry;
552 spin_lock(&afs_vlocation_graveyard_lock);
554 while (!list_empty(&afs_vlocation_graveyard)) {
555 vl = list_entry(afs_vlocation_graveyard.next,
556 struct afs_vlocation, grave);
558 _debug("check %p", vl);
560 /* the queue is ordered most dead first */
561 expiry = vl->time_of_death + afs_vlocation_timeout;
563 delay = (expiry - now) * HZ;
564 _debug("delay %lu", delay);
565 mod_delayed_work(afs_wq, &afs_vlocation_reap, delay);
569 spin_lock(&vl->cell->vl_lock);
570 if (atomic_read(&vl->usage) > 0) {
572 list_del_init(&vl->grave);
575 list_move_tail(&vl->grave, &corpses);
576 list_del_init(&vl->link);
578 spin_unlock(&vl->cell->vl_lock);
581 spin_unlock(&afs_vlocation_graveyard_lock);
583 /* now reap the corpses we've extracted */
584 while (!list_empty(&corpses)) {
585 vl = list_entry(corpses.next, struct afs_vlocation, grave);
586 list_del(&vl->grave);
587 afs_vlocation_destroy(vl);
594 * initialise the VL update process
596 int __init afs_vlocation_update_init(void)
598 afs_vlocation_update_worker =
599 create_singlethread_workqueue("kafs_vlupdated");
600 return afs_vlocation_update_worker ? 0 : -ENOMEM;
604 * discard all the volume location records for rmmod
606 void afs_vlocation_purge(void)
608 afs_vlocation_timeout = 0;
610 spin_lock(&afs_vlocation_updates_lock);
611 list_del_init(&afs_vlocation_updates);
612 spin_unlock(&afs_vlocation_updates_lock);
613 mod_delayed_work(afs_vlocation_update_worker, &afs_vlocation_update, 0);
614 destroy_workqueue(afs_vlocation_update_worker);
616 mod_delayed_work(afs_wq, &afs_vlocation_reap, 0);
620 * update a volume location
622 static void afs_vlocation_updater(struct work_struct *work)
624 struct afs_cache_vlocation vldb;
625 struct afs_vlocation *vl, *xvl;
634 /* find a record to update */
635 spin_lock(&afs_vlocation_updates_lock);
637 if (list_empty(&afs_vlocation_updates)) {
638 spin_unlock(&afs_vlocation_updates_lock);
639 _leave(" [nothing]");
643 vl = list_entry(afs_vlocation_updates.next,
644 struct afs_vlocation, update);
645 if (atomic_read(&vl->usage) > 0)
647 list_del_init(&vl->update);
650 timeout = vl->update_at - now;
652 queue_delayed_work(afs_vlocation_update_worker,
653 &afs_vlocation_update, timeout * HZ);
654 spin_unlock(&afs_vlocation_updates_lock);
655 _leave(" [nothing]");
659 list_del_init(&vl->update);
660 atomic_inc(&vl->usage);
661 spin_unlock(&afs_vlocation_updates_lock);
663 /* we can now perform the update */
664 _debug("update %s", vl->vldb.name);
665 vl->state = AFS_VL_UPDATING;
667 vl->upd_busy_cnt = 0;
669 ret = afs_vlocation_update_record(vl, NULL, &vldb);
670 spin_lock(&vl->lock);
673 afs_vlocation_apply_update(vl, &vldb);
674 vl->state = AFS_VL_VALID;
677 vl->state = AFS_VL_VOLUME_DELETED;
680 vl->state = AFS_VL_UNCERTAIN;
683 spin_unlock(&vl->lock);
686 /* and then reschedule */
687 _debug("reschedule");
688 vl->update_at = get_seconds() + afs_vlocation_update_timeout;
690 spin_lock(&afs_vlocation_updates_lock);
692 if (!list_empty(&afs_vlocation_updates)) {
693 /* next update in 10 minutes, but wait at least 1 second more
694 * than the newest record already queued so that we don't spam
695 * the VL server suddenly with lots of requests
697 xvl = list_entry(afs_vlocation_updates.prev,
698 struct afs_vlocation, update);
699 if (vl->update_at <= xvl->update_at)
700 vl->update_at = xvl->update_at + 1;
701 xvl = list_entry(afs_vlocation_updates.next,
702 struct afs_vlocation, update);
703 timeout = xvl->update_at - now;
707 timeout = afs_vlocation_update_timeout;
710 ASSERT(list_empty(&vl->update));
712 list_add_tail(&vl->update, &afs_vlocation_updates);
714 _debug("timeout %ld", timeout);
715 queue_delayed_work(afs_vlocation_update_worker,
716 &afs_vlocation_update, timeout * HZ);
717 spin_unlock(&afs_vlocation_updates_lock);
718 afs_put_vlocation(vl);