2 * linux/arch/arm/mm/context.c
4 * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #include <linux/init.h>
11 #include <linux/sched.h>
13 #include <linux/smp.h>
14 #include <linux/percpu.h>
16 #include <asm/mmu_context.h>
17 #include <asm/tlbflush.h>
19 static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
20 unsigned int cpu_last_asid = ASID_FIRST_VERSION;
22 #ifdef CONFIG_ARM_LPAE
23 void cpu_set_reserved_ttbr0(void)
25 unsigned long ttbl = __pa(swapper_pg_dir);
26 unsigned long ttbh = 0;
29 * Set TTBR0 to swapper_pg_dir which contains only global entries. The
33 " mcrr p15, 0, %0, %1, c2 @ set TTBR0\n"
35 : "r" (ttbl), "r" (ttbh));
39 void cpu_set_reserved_ttbr0(void)
42 /* Copy TTBR1 into TTBR0 */
44 " mrc p15, 0, %0, c2, c0, 1 @ read TTBR1\n"
45 " mcr p15, 0, %0, c2, c0, 0 @ set TTBR0\n"
52 * We fork()ed a process, and we need a new context for the child
55 void __init_new_context(struct task_struct *tsk, struct mm_struct *mm)
58 raw_spin_lock_init(&mm->context.id_lock);
61 static void flush_context(void)
63 cpu_set_reserved_ttbr0();
64 local_flush_tlb_all();
65 if (icache_is_vivt_asid_tagged()) {
73 static void set_mm_context(struct mm_struct *mm, unsigned int asid)
78 * Locking needed for multi-threaded applications where the
79 * same mm->context.id could be set from different CPUs during
80 * the broadcast. This function is also called via IPI so the
81 * mm->context.id_lock has to be IRQ-safe.
83 raw_spin_lock_irqsave(&mm->context.id_lock, flags);
84 if (likely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) {
86 * Old version of ASID found. Set the new one and
87 * reset mm_cpumask(mm).
89 mm->context.id = asid;
90 cpumask_clear(mm_cpumask(mm));
92 raw_spin_unlock_irqrestore(&mm->context.id_lock, flags);
95 * Set the mm_cpumask(mm) bit for the current CPU.
97 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
101 * Reset the ASID on the current CPU. This function call is broadcast
102 * from the CPU handling the ASID rollover and holding cpu_asid_lock.
104 static void reset_context(void *info)
107 unsigned int cpu = smp_processor_id();
108 struct mm_struct *mm = current->active_mm;
111 asid = cpu_last_asid + cpu + 1;
114 set_mm_context(mm, asid);
116 /* set the new ASID */
117 cpu_switch_mm(mm->pgd, mm);
122 static inline void set_mm_context(struct mm_struct *mm, unsigned int asid)
124 mm->context.id = asid;
125 cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id()));
130 void __new_context(struct mm_struct *mm)
134 raw_spin_lock(&cpu_asid_lock);
137 * Check the ASID again, in case the change was broadcast from
138 * another CPU before we acquired the lock.
140 if (unlikely(((mm->context.id ^ cpu_last_asid) >> ASID_BITS) == 0)) {
141 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
142 raw_spin_unlock(&cpu_asid_lock);
147 * At this point, it is guaranteed that the current mm (with
148 * an old ASID) isn't active on any other CPU since the ASIDs
149 * are changed simultaneously via IPI.
151 asid = ++cpu_last_asid;
153 asid = cpu_last_asid = ASID_FIRST_VERSION;
156 * If we've used up all our ASIDs, we need
157 * to start a new version and flush the TLB.
159 if (unlikely((asid & ~ASID_MASK) == 0)) {
160 asid = cpu_last_asid + smp_processor_id() + 1;
164 smp_call_function(reset_context, NULL, 1);
166 cpu_last_asid += NR_CPUS;
169 set_mm_context(mm, asid);
170 raw_spin_unlock(&cpu_asid_lock);