1 /* sched.c - SPU scheduler.
3 * Copyright (C) IBM 2005
4 * Author: Mark Nutter <mnutter@us.ibm.com>
6 * 2006-03-31 NUMA domains added.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 #include <linux/module.h>
26 #include <linux/errno.h>
27 #include <linux/sched.h>
28 #include <linux/kernel.h>
30 #include <linux/completion.h>
31 #include <linux/vmalloc.h>
32 #include <linux/smp.h>
33 #include <linux/smp_lock.h>
34 #include <linux/stddef.h>
35 #include <linux/unistd.h>
36 #include <linux/numa.h>
37 #include <linux/mutex.h>
38 #include <linux/notifier.h>
41 #include <asm/mmu_context.h>
43 #include <asm/spu_csa.h>
44 #include <asm/spu_priv1.h>
47 #define SPU_MIN_TIMESLICE (100 * HZ / 1000)
49 #define SPU_BITMAP_SIZE (((MAX_PRIO+BITS_PER_LONG)/BITS_PER_LONG)+1)
50 struct spu_prio_array {
51 unsigned long bitmap[SPU_BITMAP_SIZE];
52 wait_queue_head_t waitq[MAX_PRIO];
53 struct list_head active_list[MAX_NUMNODES];
54 struct mutex active_mutex[MAX_NUMNODES];
57 static struct spu_prio_array *spu_prio;
59 static inline int node_allowed(int node)
63 if (!nr_cpus_node(node))
65 mask = node_to_cpumask(node);
66 if (!cpus_intersects(mask, current->cpus_allowed))
72 * spu_add_to_active_list - add spu to active list
73 * @spu: spu to add to the active list
75 static void spu_add_to_active_list(struct spu *spu)
77 mutex_lock(&spu_prio->active_mutex[spu->node]);
78 list_add_tail(&spu->list, &spu_prio->active_list[spu->node]);
79 mutex_unlock(&spu_prio->active_mutex[spu->node]);
83 * spu_remove_from_active_list - remove spu from active list
84 * @spu: spu to remove from the active list
86 * This function removes an spu from the active list. If the spu was
87 * found on the active list the function returns 1, else it doesn't do
88 * anything and returns 0.
90 static int spu_remove_from_active_list(struct spu *spu)
96 mutex_lock(&spu_prio->active_mutex[node]);
97 list_for_each_entry(tmp, &spu_prio->active_list[node], list) {
99 list_del_init(&spu->list);
104 mutex_unlock(&spu_prio->active_mutex[node]);
108 static inline void mm_needs_global_tlbie(struct mm_struct *mm)
110 int nr = (NR_CPUS > 1) ? NR_CPUS : NR_CPUS + 1;
112 /* Global TLBIE broadcast required with SPEs. */
113 __cpus_setall(&mm->cpu_vm_mask, nr);
116 static BLOCKING_NOTIFIER_HEAD(spu_switch_notifier);
118 static void spu_switch_notify(struct spu *spu, struct spu_context *ctx)
120 blocking_notifier_call_chain(&spu_switch_notifier,
121 ctx ? ctx->object_id : 0, spu);
124 int spu_switch_event_register(struct notifier_block * n)
126 return blocking_notifier_chain_register(&spu_switch_notifier, n);
129 int spu_switch_event_unregister(struct notifier_block * n)
131 return blocking_notifier_chain_unregister(&spu_switch_notifier, n);
135 * spu_bind_context - bind spu context to physical spu
136 * @spu: physical spu to bind to
137 * @ctx: context to bind
139 static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
141 pr_debug("%s: pid=%d SPU=%d NODE=%d\n", __FUNCTION__, current->pid,
142 spu->number, spu->node);
146 ctx->ops = &spu_hw_ops;
147 spu->pid = current->pid;
148 spu->prio = current->prio;
149 spu->mm = ctx->owner;
150 mm_needs_global_tlbie(spu->mm);
151 spu->ibox_callback = spufs_ibox_callback;
152 spu->wbox_callback = spufs_wbox_callback;
153 spu->stop_callback = spufs_stop_callback;
154 spu->mfc_callback = spufs_mfc_callback;
155 spu->dma_callback = spufs_dma_callback;
157 spu_unmap_mappings(ctx);
158 spu_restore(&ctx->csa, spu);
159 spu->timestamp = jiffies;
160 spu_cpu_affinity_set(spu, raw_smp_processor_id());
161 spu_switch_notify(spu, ctx);
162 spu_add_to_active_list(spu);
163 ctx->state = SPU_STATE_RUNNABLE;
167 * spu_unbind_context - unbind spu context from physical spu
168 * @spu: physical spu to unbind from
169 * @ctx: context to unbind
171 * If the spu was on the active list the function returns 1, else 0.
173 static int spu_unbind_context(struct spu *spu, struct spu_context *ctx)
175 int was_active = spu_remove_from_active_list(spu);
177 pr_debug("%s: unbind pid=%d SPU=%d NODE=%d\n", __FUNCTION__,
178 spu->pid, spu->number, spu->node);
180 spu_switch_notify(spu, NULL);
181 spu_unmap_mappings(ctx);
182 spu_save(&ctx->csa, spu);
183 spu->timestamp = jiffies;
184 ctx->state = SPU_STATE_SAVED;
185 spu->ibox_callback = NULL;
186 spu->wbox_callback = NULL;
187 spu->stop_callback = NULL;
188 spu->mfc_callback = NULL;
189 spu->dma_callback = NULL;
192 spu->prio = MAX_PRIO;
193 ctx->ops = &spu_backing_ops;
201 static inline void spu_add_wq(wait_queue_head_t * wq, wait_queue_t * wait,
204 prepare_to_wait_exclusive(wq, wait, TASK_INTERRUPTIBLE);
205 set_bit(prio, spu_prio->bitmap);
208 static inline void spu_del_wq(wait_queue_head_t * wq, wait_queue_t * wait,
213 __set_current_state(TASK_RUNNING);
215 spin_lock_irqsave(&wq->lock, flags);
217 remove_wait_queue_locked(wq, wait);
218 if (list_empty(&wq->task_list))
219 clear_bit(prio, spu_prio->bitmap);
221 spin_unlock_irqrestore(&wq->lock, flags);
224 static void spu_prio_wait(struct spu_context *ctx, u64 flags)
226 int prio = current->prio;
227 wait_queue_head_t *wq = &spu_prio->waitq[prio];
233 spu_add_wq(wq, &wait, prio);
235 if (!signal_pending(current)) {
236 up_write(&ctx->state_sema);
237 pr_debug("%s: pid=%d prio=%d\n", __FUNCTION__,
238 current->pid, current->prio);
240 down_write(&ctx->state_sema);
243 spu_del_wq(wq, &wait, prio);
246 static void spu_prio_wakeup(void)
248 int best = sched_find_first_bit(spu_prio->bitmap);
249 if (best < MAX_PRIO) {
250 wait_queue_head_t *wq = &spu_prio->waitq[best];
251 wake_up_interruptible_nr(wq, 1);
255 static struct spu *spu_get_idle(struct spu_context *ctx, u64 flags)
257 struct spu *spu = NULL;
258 int node = cpu_to_node(raw_smp_processor_id());
261 for (n = 0; n < MAX_NUMNODES; n++, node++) {
262 node = (node < MAX_NUMNODES) ? node : 0;
263 if (!node_allowed(node))
265 spu = spu_alloc_node(node);
272 static inline struct spu *spu_get(struct spu_context *ctx, u64 flags)
274 /* Future: spu_get_idle() if possible,
275 * otherwise try to preempt an active
278 return spu_get_idle(ctx, flags);
281 /* The three externally callable interfaces
282 * for the scheduler begin here.
284 * spu_activate - bind a context to SPU, waiting as needed.
285 * spu_deactivate - unbind a context from its SPU.
286 * spu_yield - yield an SPU if others are waiting.
289 int spu_activate(struct spu_context *ctx, u64 flags)
297 spu = spu_get(ctx, flags);
299 if (ctx->spu != NULL) {
304 spu_bind_context(spu, ctx);
307 spu_prio_wait(ctx, flags);
308 if (signal_pending(current)) {
317 void spu_deactivate(struct spu_context *ctx)
325 was_active = spu_unbind_context(spu, ctx);
332 void spu_yield(struct spu_context *ctx)
337 if (down_write_trylock(&ctx->state_sema)) {
338 if ((spu = ctx->spu) != NULL) {
339 int best = sched_find_first_bit(spu_prio->bitmap);
340 if (best < MAX_PRIO) {
341 pr_debug("%s: yielding SPU %d NODE %d\n",
342 __FUNCTION__, spu->number, spu->node);
346 spu->prio = MAX_PRIO;
349 up_write(&ctx->state_sema);
351 if (unlikely(need_yield))
355 int __init spu_sched_init(void)
359 spu_prio = kzalloc(sizeof(struct spu_prio_array), GFP_KERNEL);
361 printk(KERN_WARNING "%s: Unable to allocate priority queue.\n",
365 for (i = 0; i < MAX_PRIO; i++) {
366 init_waitqueue_head(&spu_prio->waitq[i]);
367 __clear_bit(i, spu_prio->bitmap);
369 __set_bit(MAX_PRIO, spu_prio->bitmap);
370 for (i = 0; i < MAX_NUMNODES; i++) {
371 mutex_init(&spu_prio->active_mutex[i]);
372 INIT_LIST_HEAD(&spu_prio->active_list[i]);
377 void __exit spu_sched_exit(void)
379 struct spu *spu, *tmp;
382 for (node = 0; node < MAX_NUMNODES; node++) {
383 mutex_lock(&spu_prio->active_mutex[node]);
384 list_for_each_entry_safe(spu, tmp, &spu_prio->active_list[node],
386 list_del_init(&spu->list);
389 mutex_unlock(&spu_prio->active_mutex[node]);