2 * 2007+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
16 #include <linux/kernel.h>
17 #include <linux/dst.h>
18 #include <linux/kthread.h>
19 #include <linux/slab.h>
22 * Thread pool abstraction allows to schedule a work to be performed
23 * on behalf of kernel thread. One does not operate with threads itself,
24 * instead user provides setup and cleanup callbacks for thread pool itself,
25 * and action and cleanup callbacks for each submitted work.
27 * Each worker has private data initialized at creation time and data,
28 * provided by user at scheduling time.
30 * When action is being performed, thread can not be used by other users,
31 * instead they will sleep until there is free thread to pick their work.
33 struct thread_pool_worker
35 struct list_head worker_entry;
37 struct task_struct *thread;
39 struct thread_pool *pool;
46 wait_queue_head_t wait;
51 int (* action)(void *private, void *schedule_data);
52 void (* cleanup)(void *private);
55 static void thread_pool_exit_worker(struct thread_pool_worker *w)
57 kthread_stop(w->thread);
59 w->cleanup(w->private);
64 * Called to mark thread as ready and allow users to schedule new work.
66 static void thread_pool_worker_make_ready(struct thread_pool_worker *w)
68 struct thread_pool *p = w->pool;
70 mutex_lock(&p->thread_lock);
73 list_move_tail(&w->worker_entry, &p->ready_list);
75 mutex_unlock(&p->thread_lock);
80 list_del(&w->worker_entry);
81 mutex_unlock(&p->thread_lock);
83 thread_pool_exit_worker(w);
88 * Thread action loop: waits until there is new work.
90 static int thread_pool_worker_func(void *data)
92 struct thread_pool_worker *w = data;
94 while (!kthread_should_stop()) {
95 wait_event_interruptible(w->wait,
96 kthread_should_stop() || w->has_data);
98 if (kthread_should_stop())
104 w->action(w->private, w->schedule_data);
105 thread_pool_worker_make_ready(w);
112 * Remove single worker without specifying which one.
114 void thread_pool_del_worker(struct thread_pool *p)
116 struct thread_pool_worker *w = NULL;
118 while (!w && p->thread_num) {
119 wait_event(p->wait, !list_empty(&p->ready_list) || !p->thread_num);
121 dprintk("%s: locking list_empty: %d, thread_num: %d.\n",
122 __func__, list_empty(&p->ready_list), p->thread_num);
124 mutex_lock(&p->thread_lock);
125 if (!list_empty(&p->ready_list)) {
126 w = list_first_entry(&p->ready_list,
127 struct thread_pool_worker,
130 dprintk("%s: deleting w: %p, thread_num: %d, list: %p [%p.%p].\n",
131 __func__, w, p->thread_num, &p->ready_list,
132 p->ready_list.prev, p->ready_list.next);
135 list_del(&w->worker_entry);
137 mutex_unlock(&p->thread_lock);
141 thread_pool_exit_worker(w);
142 dprintk("%s: deleted w: %p, thread_num: %d.\n",
143 __func__, w, p->thread_num);
147 * Remove a worker with given ID.
149 void thread_pool_del_worker_id(struct thread_pool *p, unsigned int id)
151 struct thread_pool_worker *w;
154 mutex_lock(&p->thread_lock);
155 list_for_each_entry(w, &p->ready_list, worker_entry) {
159 list_del(&w->worker_entry);
165 list_for_each_entry(w, &p->active_list, worker_entry) {
172 mutex_unlock(&p->thread_lock);
175 thread_pool_exit_worker(w);
179 * Add new worker thread with given parameters.
180 * If initialization callback fails, return error.
182 int thread_pool_add_worker(struct thread_pool *p,
185 void *(* init)(void *private),
186 void (* cleanup)(void *private),
189 struct thread_pool_worker *w;
192 w = kzalloc(sizeof(struct thread_pool_worker), GFP_KERNEL);
197 init_waitqueue_head(&w->wait);
198 w->cleanup = cleanup;
201 w->thread = kthread_run(thread_pool_worker_func, w, "%s", name);
202 if (IS_ERR(w->thread)) {
203 err = PTR_ERR(w->thread);
207 w->private = init(private);
208 if (IS_ERR(w->private)) {
209 err = PTR_ERR(w->private);
210 goto err_out_stop_thread;
213 mutex_lock(&p->thread_lock);
214 list_add_tail(&w->worker_entry, &p->ready_list);
216 mutex_unlock(&p->thread_lock);
221 kthread_stop(w->thread);
229 * Destroy the whole pool.
231 void thread_pool_destroy(struct thread_pool *p)
233 while (p->thread_num) {
234 dprintk("%s: num: %d.\n", __func__, p->thread_num);
235 thread_pool_del_worker(p);
242 * Create a pool with given number of threads.
243 * They will have sequential IDs started from zero.
245 struct thread_pool *thread_pool_create(int num, char *name,
246 void *(* init)(void *private),
247 void (* cleanup)(void *private),
250 struct thread_pool_worker *w, *tmp;
251 struct thread_pool *p;
255 p = kzalloc(sizeof(struct thread_pool), GFP_KERNEL);
259 init_waitqueue_head(&p->wait);
260 mutex_init(&p->thread_lock);
261 INIT_LIST_HEAD(&p->ready_list);
262 INIT_LIST_HEAD(&p->active_list);
265 for (i=0; i<num; ++i) {
266 err = thread_pool_add_worker(p, name, i, init,
269 goto err_out_free_all;
275 list_for_each_entry_safe(w, tmp, &p->ready_list, worker_entry) {
276 list_del(&w->worker_entry);
277 thread_pool_exit_worker(w);
285 * Schedule execution of the action on a given thread,
286 * provided ID pointer has to match previously stored
289 int thread_pool_schedule_private(struct thread_pool *p,
290 int (* setup)(void *private, void *data),
291 int (* action)(void *private, void *data),
292 void *data, long timeout, void *id)
294 struct thread_pool_worker *w, *tmp, *worker = NULL;
297 while (!worker && !err) {
298 timeout = wait_event_interruptible_timeout(p->wait,
299 !list_empty(&p->ready_list),
308 mutex_lock(&p->thread_lock);
309 list_for_each_entry_safe(w, tmp, &p->ready_list, worker_entry) {
310 if (id && id != w->private)
315 list_move_tail(&w->worker_entry, &p->active_list);
317 err = setup(w->private, data);
319 w->schedule_data = data;
324 list_move_tail(&w->worker_entry, &p->ready_list);
329 mutex_unlock(&p->thread_lock);
336 * Schedule execution on arbitrary thread from the pool.
338 int thread_pool_schedule(struct thread_pool *p,
339 int (* setup)(void *private, void *data),
340 int (* action)(void *private, void *data),
341 void *data, long timeout)
343 return thread_pool_schedule_private(p, setup,
344 action, data, timeout, NULL);