2 * Copyright (C) 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2006 Red Hat GmbH
5 * This file is released under the GPL.
7 * Kcopyd provides a simple interface for copying an area of one
8 * block-device to one or more other block-devices, with an asynchronous
9 * completion notification.
12 #include <linux/types.h>
13 #include <asm/atomic.h>
14 #include <linux/blkdev.h>
16 #include <linux/init.h>
17 #include <linux/list.h>
18 #include <linux/mempool.h>
19 #include <linux/module.h>
20 #include <linux/pagemap.h>
21 #include <linux/slab.h>
22 #include <linux/vmalloc.h>
23 #include <linux/workqueue.h>
24 #include <linux/mutex.h>
29 /*-----------------------------------------------------------------
30 * Each kcopyd client has its own little pool of preallocated
31 * pages for kcopyd io.
32 *---------------------------------------------------------------*/
33 struct dm_kcopyd_client {
34 struct list_head list;
37 struct page_list *pages;
38 unsigned int nr_pages;
39 unsigned int nr_free_pages;
41 struct dm_io_client *io_client;
43 wait_queue_head_t destroyq;
48 struct workqueue_struct *kcopyd_wq;
49 struct work_struct kcopyd_work;
52 * We maintain three lists of jobs:
54 * i) jobs waiting for pages
55 * ii) jobs that have pages, and are waiting for the io to be issued.
56 * iii) jobs that have completed.
58 * All three of these are protected by job_lock.
61 struct list_head complete_jobs;
62 struct list_head io_jobs;
63 struct list_head pages_jobs;
66 static void wake(struct dm_kcopyd_client *kc)
68 queue_work(kc->kcopyd_wq, &kc->kcopyd_work);
71 static struct page_list *alloc_pl(void)
75 pl = kmalloc(sizeof(*pl), GFP_KERNEL);
79 pl->page = alloc_page(GFP_KERNEL);
88 static void free_pl(struct page_list *pl)
90 __free_page(pl->page);
94 static int kcopyd_get_pages(struct dm_kcopyd_client *kc,
95 unsigned int nr, struct page_list **pages)
100 if (kc->nr_free_pages < nr) {
101 spin_unlock(&kc->lock);
105 kc->nr_free_pages -= nr;
106 for (*pages = pl = kc->pages; --nr; pl = pl->next)
109 kc->pages = pl->next;
112 spin_unlock(&kc->lock);
117 static void kcopyd_put_pages(struct dm_kcopyd_client *kc, struct page_list *pl)
119 struct page_list *cursor;
121 spin_lock(&kc->lock);
122 for (cursor = pl; cursor->next; cursor = cursor->next)
126 cursor->next = kc->pages;
128 spin_unlock(&kc->lock);
132 * These three functions resize the page pool.
134 static void drop_pages(struct page_list *pl)
136 struct page_list *next;
145 static int client_alloc_pages(struct dm_kcopyd_client *kc, unsigned int nr)
148 struct page_list *pl = NULL, *next;
150 for (i = 0; i < nr; i++) {
161 kcopyd_put_pages(kc, pl);
166 static void client_free_pages(struct dm_kcopyd_client *kc)
168 BUG_ON(kc->nr_free_pages != kc->nr_pages);
169 drop_pages(kc->pages);
171 kc->nr_free_pages = kc->nr_pages = 0;
174 /*-----------------------------------------------------------------
175 * kcopyd_jobs need to be allocated by the *clients* of kcopyd,
176 * for this reason we use a mempool to prevent the client from
177 * ever having to do io (which could cause a deadlock).
178 *---------------------------------------------------------------*/
180 struct dm_kcopyd_client *kc;
181 struct list_head list;
185 * Error state of the job.
188 unsigned long write_err;
191 * Either READ or WRITE
194 struct dm_io_region source;
197 * The destinations for the transfer.
199 unsigned int num_dests;
200 struct dm_io_region dests[DM_KCOPYD_MAX_REGIONS];
203 unsigned int nr_pages;
204 struct page_list *pages;
207 * Set this to ensure you are notified when the job has
208 * completed. 'context' is for callback to use.
210 dm_kcopyd_notify_fn fn;
214 * These fields are only used if the job has been split
215 * into more manageable parts.
222 /* FIXME: this should scale with the number of pages */
225 static struct kmem_cache *_job_cache;
227 static int jobs_init(void)
229 _job_cache = KMEM_CACHE(kcopyd_job, 0);
236 static void jobs_exit(void)
238 kmem_cache_destroy(_job_cache);
243 * Functions to push and pop a job onto the head of a given job
246 static struct kcopyd_job *pop(struct list_head *jobs,
247 struct dm_kcopyd_client *kc)
249 struct kcopyd_job *job = NULL;
252 spin_lock_irqsave(&kc->job_lock, flags);
254 if (!list_empty(jobs)) {
255 job = list_entry(jobs->next, struct kcopyd_job, list);
256 list_del(&job->list);
258 spin_unlock_irqrestore(&kc->job_lock, flags);
263 static void push(struct list_head *jobs, struct kcopyd_job *job)
266 struct dm_kcopyd_client *kc = job->kc;
268 spin_lock_irqsave(&kc->job_lock, flags);
269 list_add_tail(&job->list, jobs);
270 spin_unlock_irqrestore(&kc->job_lock, flags);
274 * These three functions process 1 item from the corresponding
280 * > 0: can't process yet.
282 static int run_complete_job(struct kcopyd_job *job)
284 void *context = job->context;
285 int read_err = job->read_err;
286 unsigned long write_err = job->write_err;
287 dm_kcopyd_notify_fn fn = job->fn;
288 struct dm_kcopyd_client *kc = job->kc;
290 kcopyd_put_pages(kc, job->pages);
291 mempool_free(job, kc->job_pool);
292 fn(read_err, write_err, context);
294 if (atomic_dec_and_test(&kc->nr_jobs))
295 wake_up(&kc->destroyq);
300 static void complete_io(unsigned long error, void *context)
302 struct kcopyd_job *job = (struct kcopyd_job *) context;
303 struct dm_kcopyd_client *kc = job->kc;
306 if (job->rw == WRITE)
307 job->write_err |= error;
311 if (!test_bit(DM_KCOPYD_IGNORE_ERROR, &job->flags)) {
312 push(&kc->complete_jobs, job);
318 if (job->rw == WRITE)
319 push(&kc->complete_jobs, job);
323 push(&kc->io_jobs, job);
330 * Request io on as many buffer heads as we can currently get for
333 static int run_io_job(struct kcopyd_job *job)
336 struct dm_io_request io_req = {
338 .mem.type = DM_IO_PAGE_LIST,
339 .mem.ptr.pl = job->pages,
340 .mem.offset = job->offset,
341 .notify.fn = complete_io,
342 .notify.context = job,
343 .client = job->kc->io_client,
347 r = dm_io(&io_req, 1, &job->source, NULL);
349 r = dm_io(&io_req, job->num_dests, job->dests, NULL);
354 static int run_pages_job(struct kcopyd_job *job)
358 job->nr_pages = dm_div_up(job->dests[0].count + job->offset,
360 r = kcopyd_get_pages(job->kc, job->nr_pages, &job->pages);
362 /* this job is ready for io */
363 push(&job->kc->io_jobs, job);
368 /* can't complete now */
375 * Run through a list for as long as possible. Returns the count
376 * of successful jobs.
378 static int process_jobs(struct list_head *jobs, struct dm_kcopyd_client *kc,
379 int (*fn) (struct kcopyd_job *))
381 struct kcopyd_job *job;
384 while ((job = pop(jobs, kc))) {
389 /* error this rogue job */
390 if (job->rw == WRITE)
391 job->write_err = (unsigned long) -1L;
394 push(&kc->complete_jobs, job);
400 * We couldn't service this job ATM, so
401 * push this job back onto the list.
414 * kcopyd does this every time it's woken up.
416 static void do_work(struct work_struct *work)
418 struct dm_kcopyd_client *kc = container_of(work,
419 struct dm_kcopyd_client, kcopyd_work);
422 * The order that these are called is *very* important.
423 * complete jobs can free some pages for pages jobs.
424 * Pages jobs when successful will jump onto the io jobs
425 * list. io jobs call wake when they complete and it all
428 process_jobs(&kc->complete_jobs, kc, run_complete_job);
429 process_jobs(&kc->pages_jobs, kc, run_pages_job);
430 process_jobs(&kc->io_jobs, kc, run_io_job);
434 * If we are copying a small region we just dispatch a single job
435 * to do the copy, otherwise the io has to be split up into many
438 static void dispatch_job(struct kcopyd_job *job)
440 struct dm_kcopyd_client *kc = job->kc;
441 atomic_inc(&kc->nr_jobs);
442 push(&kc->pages_jobs, job);
446 #define SUB_JOB_SIZE 128
447 static void segment_complete(int read_err, unsigned long write_err,
450 /* FIXME: tidy this function */
451 sector_t progress = 0;
453 struct kcopyd_job *job = (struct kcopyd_job *) context;
455 mutex_lock(&job->lock);
457 /* update the error */
462 job->write_err |= write_err;
465 * Only dispatch more work if there hasn't been an error.
467 if ((!job->read_err && !job->write_err) ||
468 test_bit(DM_KCOPYD_IGNORE_ERROR, &job->flags)) {
469 /* get the next chunk of work */
470 progress = job->progress;
471 count = job->source.count - progress;
473 if (count > SUB_JOB_SIZE)
474 count = SUB_JOB_SIZE;
476 job->progress += count;
479 mutex_unlock(&job->lock);
483 struct kcopyd_job *sub_job = mempool_alloc(job->kc->job_pool,
487 sub_job->source.sector += progress;
488 sub_job->source.count = count;
490 for (i = 0; i < job->num_dests; i++) {
491 sub_job->dests[i].sector += progress;
492 sub_job->dests[i].count = count;
495 sub_job->fn = segment_complete;
496 sub_job->context = job;
497 dispatch_job(sub_job);
499 } else if (atomic_dec_and_test(&job->sub_jobs)) {
502 * To avoid a race we must keep the job around
503 * until after the notify function has completed.
504 * Otherwise the client may try and stop the job
505 * after we've completed.
507 job->fn(read_err, write_err, job->context);
508 mempool_free(job, job->kc->job_pool);
513 * Create some little jobs that will do the move between
516 #define SPLIT_COUNT 8
517 static void split_job(struct kcopyd_job *job)
521 atomic_set(&job->sub_jobs, SPLIT_COUNT);
522 for (i = 0; i < SPLIT_COUNT; i++)
523 segment_complete(0, 0u, job);
526 int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
527 unsigned int num_dests, struct dm_io_region *dests,
528 unsigned int flags, dm_kcopyd_notify_fn fn, void *context)
530 struct kcopyd_job *job;
533 * Allocate a new job.
535 job = mempool_alloc(kc->job_pool, GFP_NOIO);
538 * set up for the read.
548 job->num_dests = num_dests;
549 memcpy(&job->dests, dests, sizeof(*dests) * num_dests);
556 job->context = context;
558 if (job->source.count < SUB_JOB_SIZE)
562 mutex_init(&job->lock);
569 EXPORT_SYMBOL(dm_kcopyd_copy);
572 * Cancels a kcopyd job, eg. someone might be deactivating a
576 int kcopyd_cancel(struct kcopyd_job *job, int block)
583 /*-----------------------------------------------------------------
585 *---------------------------------------------------------------*/
586 static DEFINE_MUTEX(_client_lock);
587 static LIST_HEAD(_clients);
589 static void client_add(struct dm_kcopyd_client *kc)
591 mutex_lock(&_client_lock);
592 list_add(&kc->list, &_clients);
593 mutex_unlock(&_client_lock);
596 static void client_del(struct dm_kcopyd_client *kc)
598 mutex_lock(&_client_lock);
600 mutex_unlock(&_client_lock);
603 static DEFINE_MUTEX(kcopyd_init_lock);
604 static int kcopyd_clients = 0;
606 static int kcopyd_init(void)
610 mutex_lock(&kcopyd_init_lock);
612 if (kcopyd_clients) {
613 /* Already initialized. */
615 mutex_unlock(&kcopyd_init_lock);
621 mutex_unlock(&kcopyd_init_lock);
626 mutex_unlock(&kcopyd_init_lock);
630 static void kcopyd_exit(void)
632 mutex_lock(&kcopyd_init_lock);
634 if (!kcopyd_clients) {
637 mutex_unlock(&kcopyd_init_lock);
640 int dm_kcopyd_client_create(unsigned int nr_pages,
641 struct dm_kcopyd_client **result)
644 struct dm_kcopyd_client *kc;
650 kc = kmalloc(sizeof(*kc), GFP_KERNEL);
657 spin_lock_init(&kc->lock);
658 spin_lock_init(&kc->job_lock);
659 INIT_LIST_HEAD(&kc->complete_jobs);
660 INIT_LIST_HEAD(&kc->io_jobs);
661 INIT_LIST_HEAD(&kc->pages_jobs);
663 kc->job_pool = mempool_create_slab_pool(MIN_JOBS, _job_cache);
671 INIT_WORK(&kc->kcopyd_work, do_work);
672 kc->kcopyd_wq = create_singlethread_workqueue("kcopyd");
673 if (!kc->kcopyd_wq) {
675 mempool_destroy(kc->job_pool);
682 kc->nr_pages = kc->nr_free_pages = 0;
683 r = client_alloc_pages(kc, nr_pages);
685 destroy_workqueue(kc->kcopyd_wq);
686 mempool_destroy(kc->job_pool);
692 kc->io_client = dm_io_client_create(nr_pages);
693 if (IS_ERR(kc->io_client)) {
694 r = PTR_ERR(kc->io_client);
695 client_free_pages(kc);
696 destroy_workqueue(kc->kcopyd_wq);
697 mempool_destroy(kc->job_pool);
703 init_waitqueue_head(&kc->destroyq);
704 atomic_set(&kc->nr_jobs, 0);
710 EXPORT_SYMBOL(dm_kcopyd_client_create);
712 void dm_kcopyd_client_destroy(struct dm_kcopyd_client *kc)
714 /* Wait for completion of all jobs submitted by this client. */
715 wait_event(kc->destroyq, !atomic_read(&kc->nr_jobs));
717 BUG_ON(!list_empty(&kc->complete_jobs));
718 BUG_ON(!list_empty(&kc->io_jobs));
719 BUG_ON(!list_empty(&kc->pages_jobs));
720 destroy_workqueue(kc->kcopyd_wq);
721 dm_io_client_destroy(kc->io_client);
722 client_free_pages(kc);
724 mempool_destroy(kc->job_pool);
728 EXPORT_SYMBOL(dm_kcopyd_client_destroy);