2 * Copyright (C) 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2006 Red Hat GmbH
5 * This file is released under the GPL.
7 * Kcopyd provides a simple interface for copying an area of one
8 * block-device to one or more other block-devices, with an asynchronous
9 * completion notification.
12 #include <linux/types.h>
13 #include <asm/atomic.h>
14 #include <linux/blkdev.h>
16 #include <linux/init.h>
17 #include <linux/list.h>
18 #include <linux/mempool.h>
19 #include <linux/module.h>
20 #include <linux/pagemap.h>
21 #include <linux/slab.h>
22 #include <linux/vmalloc.h>
23 #include <linux/workqueue.h>
24 #include <linux/mutex.h>
25 #include <linux/device-mapper.h>
26 #include <linux/dm-kcopyd.h>
30 #define SUB_JOB_SIZE 128
34 /*-----------------------------------------------------------------
35 * Each kcopyd client has its own little pool of preallocated
36 * pages for kcopyd io.
37 *---------------------------------------------------------------*/
38 struct dm_kcopyd_client {
39 struct page_list *pages;
40 unsigned nr_reserved_pages;
41 unsigned nr_free_pages;
43 struct dm_io_client *io_client;
45 wait_queue_head_t destroyq;
50 struct workqueue_struct *kcopyd_wq;
51 struct work_struct kcopyd_work;
54 * We maintain three lists of jobs:
56 * i) jobs waiting for pages
57 * ii) jobs that have pages, and are waiting for the io to be issued.
58 * iii) jobs that have completed.
60 * All three of these are protected by job_lock.
63 struct list_head complete_jobs;
64 struct list_head io_jobs;
65 struct list_head pages_jobs;
68 static void wake(struct dm_kcopyd_client *kc)
70 queue_work(kc->kcopyd_wq, &kc->kcopyd_work);
74 * Obtain one page for the use of kcopyd.
76 static struct page_list *alloc_pl(gfp_t gfp)
80 pl = kmalloc(sizeof(*pl), gfp);
84 pl->page = alloc_page(gfp);
93 static void free_pl(struct page_list *pl)
95 __free_page(pl->page);
100 * Add the provided pages to a client's free page list, releasing
101 * back to the system any beyond the reserved_pages limit.
103 static void kcopyd_put_pages(struct dm_kcopyd_client *kc, struct page_list *pl)
105 struct page_list *next;
110 if (kc->nr_free_pages >= kc->nr_reserved_pages)
113 pl->next = kc->pages;
122 static int kcopyd_get_pages(struct dm_kcopyd_client *kc,
123 unsigned int nr, struct page_list **pages)
125 struct page_list *pl;
130 pl = alloc_pl(__GFP_NOWARN | __GFP_NORETRY);
132 /* Use reserved pages */
136 kc->pages = pl->next;
147 kcopyd_put_pages(kc, *pages);
152 * These three functions resize the page pool.
154 static void drop_pages(struct page_list *pl)
156 struct page_list *next;
166 * Allocate and reserve nr_pages for the use of a specific client.
168 static int client_reserve_pages(struct dm_kcopyd_client *kc, unsigned nr_pages)
171 struct page_list *pl = NULL, *next;
173 for (i = 0; i < nr_pages; i++) {
174 next = alloc_pl(GFP_KERNEL);
184 kc->nr_reserved_pages += nr_pages;
185 kcopyd_put_pages(kc, pl);
190 static void client_free_pages(struct dm_kcopyd_client *kc)
192 BUG_ON(kc->nr_free_pages != kc->nr_reserved_pages);
193 drop_pages(kc->pages);
195 kc->nr_free_pages = kc->nr_reserved_pages = 0;
198 /*-----------------------------------------------------------------
199 * kcopyd_jobs need to be allocated by the *clients* of kcopyd,
200 * for this reason we use a mempool to prevent the client from
201 * ever having to do io (which could cause a deadlock).
202 *---------------------------------------------------------------*/
204 struct dm_kcopyd_client *kc;
205 struct list_head list;
209 * Error state of the job.
212 unsigned long write_err;
215 * Either READ or WRITE
218 struct dm_io_region source;
221 * The destinations for the transfer.
223 unsigned int num_dests;
224 struct dm_io_region dests[DM_KCOPYD_MAX_REGIONS];
227 unsigned int nr_pages;
228 struct page_list *pages;
231 * Set this to ensure you are notified when the job has
232 * completed. 'context' is for callback to use.
234 dm_kcopyd_notify_fn fn;
238 * These fields are only used if the job has been split
239 * into more manageable parts.
245 struct kcopyd_job *master_job;
248 static struct kmem_cache *_job_cache;
250 int __init dm_kcopyd_init(void)
252 _job_cache = kmem_cache_create("kcopyd_job",
253 sizeof(struct kcopyd_job) * (SPLIT_COUNT + 1),
254 __alignof__(struct kcopyd_job), 0, NULL);
261 void dm_kcopyd_exit(void)
263 kmem_cache_destroy(_job_cache);
268 * Functions to push and pop a job onto the head of a given job
271 static struct kcopyd_job *pop(struct list_head *jobs,
272 struct dm_kcopyd_client *kc)
274 struct kcopyd_job *job = NULL;
277 spin_lock_irqsave(&kc->job_lock, flags);
279 if (!list_empty(jobs)) {
280 job = list_entry(jobs->next, struct kcopyd_job, list);
281 list_del(&job->list);
283 spin_unlock_irqrestore(&kc->job_lock, flags);
288 static void push(struct list_head *jobs, struct kcopyd_job *job)
291 struct dm_kcopyd_client *kc = job->kc;
293 spin_lock_irqsave(&kc->job_lock, flags);
294 list_add_tail(&job->list, jobs);
295 spin_unlock_irqrestore(&kc->job_lock, flags);
299 static void push_head(struct list_head *jobs, struct kcopyd_job *job)
302 struct dm_kcopyd_client *kc = job->kc;
304 spin_lock_irqsave(&kc->job_lock, flags);
305 list_add(&job->list, jobs);
306 spin_unlock_irqrestore(&kc->job_lock, flags);
310 * These three functions process 1 item from the corresponding
316 * > 0: can't process yet.
318 static int run_complete_job(struct kcopyd_job *job)
320 void *context = job->context;
321 int read_err = job->read_err;
322 unsigned long write_err = job->write_err;
323 dm_kcopyd_notify_fn fn = job->fn;
324 struct dm_kcopyd_client *kc = job->kc;
327 kcopyd_put_pages(kc, job->pages);
329 * If this is the master job, the sub jobs have already
330 * completed so we can free everything.
332 if (job->master_job == job)
333 mempool_free(job, kc->job_pool);
334 fn(read_err, write_err, context);
336 if (atomic_dec_and_test(&kc->nr_jobs))
337 wake_up(&kc->destroyq);
342 static void complete_io(unsigned long error, void *context)
344 struct kcopyd_job *job = (struct kcopyd_job *) context;
345 struct dm_kcopyd_client *kc = job->kc;
348 if (job->rw == WRITE)
349 job->write_err |= error;
353 if (!test_bit(DM_KCOPYD_IGNORE_ERROR, &job->flags)) {
354 push(&kc->complete_jobs, job);
360 if (job->rw == WRITE)
361 push(&kc->complete_jobs, job);
365 push(&kc->io_jobs, job);
372 * Request io on as many buffer heads as we can currently get for
375 static int run_io_job(struct kcopyd_job *job)
378 struct dm_io_request io_req = {
380 .mem.type = DM_IO_PAGE_LIST,
381 .mem.ptr.pl = job->pages,
382 .mem.offset = job->offset,
383 .notify.fn = complete_io,
384 .notify.context = job,
385 .client = job->kc->io_client,
389 r = dm_io(&io_req, 1, &job->source, NULL);
391 r = dm_io(&io_req, job->num_dests, job->dests, NULL);
396 static int run_pages_job(struct kcopyd_job *job)
400 job->nr_pages = dm_div_up(job->dests[0].count + job->offset,
402 r = kcopyd_get_pages(job->kc, job->nr_pages, &job->pages);
404 /* this job is ready for io */
405 push(&job->kc->io_jobs, job);
410 /* can't complete now */
417 * Run through a list for as long as possible. Returns the count
418 * of successful jobs.
420 static int process_jobs(struct list_head *jobs, struct dm_kcopyd_client *kc,
421 int (*fn) (struct kcopyd_job *))
423 struct kcopyd_job *job;
426 while ((job = pop(jobs, kc))) {
431 /* error this rogue job */
432 if (job->rw == WRITE)
433 job->write_err = (unsigned long) -1L;
436 push(&kc->complete_jobs, job);
442 * We couldn't service this job ATM, so
443 * push this job back onto the list.
445 push_head(jobs, job);
456 * kcopyd does this every time it's woken up.
458 static void do_work(struct work_struct *work)
460 struct dm_kcopyd_client *kc = container_of(work,
461 struct dm_kcopyd_client, kcopyd_work);
462 struct blk_plug plug;
465 * The order that these are called is *very* important.
466 * complete jobs can free some pages for pages jobs.
467 * Pages jobs when successful will jump onto the io jobs
468 * list. io jobs call wake when they complete and it all
471 blk_start_plug(&plug);
472 process_jobs(&kc->complete_jobs, kc, run_complete_job);
473 process_jobs(&kc->pages_jobs, kc, run_pages_job);
474 process_jobs(&kc->io_jobs, kc, run_io_job);
475 blk_finish_plug(&plug);
479 * If we are copying a small region we just dispatch a single job
480 * to do the copy, otherwise the io has to be split up into many
483 static void dispatch_job(struct kcopyd_job *job)
485 struct dm_kcopyd_client *kc = job->kc;
486 atomic_inc(&kc->nr_jobs);
487 if (unlikely(!job->source.count))
488 push(&kc->complete_jobs, job);
490 push(&kc->pages_jobs, job);
494 static void segment_complete(int read_err, unsigned long write_err,
497 /* FIXME: tidy this function */
498 sector_t progress = 0;
500 struct kcopyd_job *sub_job = (struct kcopyd_job *) context;
501 struct kcopyd_job *job = sub_job->master_job;
502 struct dm_kcopyd_client *kc = job->kc;
504 mutex_lock(&job->lock);
506 /* update the error */
511 job->write_err |= write_err;
514 * Only dispatch more work if there hasn't been an error.
516 if ((!job->read_err && !job->write_err) ||
517 test_bit(DM_KCOPYD_IGNORE_ERROR, &job->flags)) {
518 /* get the next chunk of work */
519 progress = job->progress;
520 count = job->source.count - progress;
522 if (count > SUB_JOB_SIZE)
523 count = SUB_JOB_SIZE;
525 job->progress += count;
528 mutex_unlock(&job->lock);
534 sub_job->source.sector += progress;
535 sub_job->source.count = count;
537 for (i = 0; i < job->num_dests; i++) {
538 sub_job->dests[i].sector += progress;
539 sub_job->dests[i].count = count;
542 sub_job->fn = segment_complete;
543 sub_job->context = sub_job;
544 dispatch_job(sub_job);
546 } else if (atomic_dec_and_test(&job->sub_jobs)) {
549 * Queue the completion callback to the kcopyd thread.
551 * Some callers assume that all the completions are called
552 * from a single thread and don't race with each other.
554 * We must not call the callback directly here because this
555 * code may not be executing in the thread.
557 push(&kc->complete_jobs, job);
563 * Create some sub jobs to share the work between them.
565 static void split_job(struct kcopyd_job *master_job)
569 atomic_inc(&master_job->kc->nr_jobs);
571 atomic_set(&master_job->sub_jobs, SPLIT_COUNT);
572 for (i = 0; i < SPLIT_COUNT; i++) {
573 master_job[i + 1].master_job = master_job;
574 segment_complete(0, 0u, &master_job[i + 1]);
578 int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
579 unsigned int num_dests, struct dm_io_region *dests,
580 unsigned int flags, dm_kcopyd_notify_fn fn, void *context)
582 struct kcopyd_job *job;
585 * Allocate an array of jobs consisting of one master job
586 * followed by SPLIT_COUNT sub jobs.
588 job = mempool_alloc(kc->job_pool, GFP_NOIO);
591 * set up for the read.
601 job->num_dests = num_dests;
602 memcpy(&job->dests, dests, sizeof(*dests) * num_dests);
609 job->context = context;
610 job->master_job = job;
612 if (job->source.count <= SUB_JOB_SIZE)
615 mutex_init(&job->lock);
622 EXPORT_SYMBOL(dm_kcopyd_copy);
625 * Cancels a kcopyd job, eg. someone might be deactivating a
629 int kcopyd_cancel(struct kcopyd_job *job, int block)
636 /*-----------------------------------------------------------------
638 *---------------------------------------------------------------*/
639 int dm_kcopyd_client_create(unsigned min_pages,
640 struct dm_kcopyd_client **result)
643 struct dm_kcopyd_client *kc;
645 kc = kmalloc(sizeof(*kc), GFP_KERNEL);
649 spin_lock_init(&kc->job_lock);
650 INIT_LIST_HEAD(&kc->complete_jobs);
651 INIT_LIST_HEAD(&kc->io_jobs);
652 INIT_LIST_HEAD(&kc->pages_jobs);
654 kc->job_pool = mempool_create_slab_pool(MIN_JOBS, _job_cache);
658 INIT_WORK(&kc->kcopyd_work, do_work);
659 kc->kcopyd_wq = alloc_workqueue("kcopyd",
660 WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0);
665 kc->nr_reserved_pages = kc->nr_free_pages = 0;
666 r = client_reserve_pages(kc, min_pages);
668 goto bad_client_pages;
670 kc->io_client = dm_io_client_create(min_pages);
671 if (IS_ERR(kc->io_client)) {
672 r = PTR_ERR(kc->io_client);
676 init_waitqueue_head(&kc->destroyq);
677 atomic_set(&kc->nr_jobs, 0);
683 client_free_pages(kc);
685 destroy_workqueue(kc->kcopyd_wq);
687 mempool_destroy(kc->job_pool);
693 EXPORT_SYMBOL(dm_kcopyd_client_create);
695 void dm_kcopyd_client_destroy(struct dm_kcopyd_client *kc)
697 /* Wait for completion of all jobs submitted by this client. */
698 wait_event(kc->destroyq, !atomic_read(&kc->nr_jobs));
700 BUG_ON(!list_empty(&kc->complete_jobs));
701 BUG_ON(!list_empty(&kc->io_jobs));
702 BUG_ON(!list_empty(&kc->pages_jobs));
703 destroy_workqueue(kc->kcopyd_wq);
704 dm_io_client_destroy(kc->io_client);
705 client_free_pages(kc);
706 mempool_destroy(kc->job_pool);
709 EXPORT_SYMBOL(dm_kcopyd_client_destroy);