2 * Split spinlock implementation out into its own file, so it can be
3 * compiled in a FTRACE-compatible way.
5 #include <linux/kernel_stat.h>
6 #include <linux/spinlock.h>
7 #include <linux/debugfs.h>
8 #include <linux/log2.h>
10 #include <linux/slab.h>
12 #include <asm/paravirt.h>
14 #include <xen/interface/xen.h>
15 #include <xen/events.h>
20 static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
21 static DEFINE_PER_CPU(char *, irq_name);
22 static bool xen_pvspin = true;
24 #ifdef CONFIG_QUEUED_SPINLOCKS
26 #include <asm/qspinlock.h>
28 static void xen_qlock_kick(int cpu)
30 xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
34 * Halt the current CPU & release it back to the host
36 static void xen_qlock_wait(u8 *byte, u8 val)
38 int irq = __this_cpu_read(lock_kicker_irq);
40 /* If kicker interrupts not initialized yet, just spin */
45 xen_clear_irq_pending(irq);
49 * We check the byte value after clearing pending IRQ to make sure
50 * that we won't miss a wakeup event because of the clearing.
52 * The sync_clear_bit() call in xen_clear_irq_pending() is atomic.
53 * So it is effectively a memory barrier for x86.
55 if (READ_ONCE(*byte) != val)
59 * If an interrupt happens here, it will leave the wakeup irq
60 * pending, which will cause xen_poll_irq() to return
64 /* Block until irq becomes pending (or perhaps a spurious wakeup) */
68 #else /* CONFIG_QUEUED_SPINLOCKS */
70 enum xen_contention_stat {
80 #ifdef CONFIG_XEN_DEBUG_FS
81 #define HISTO_BUCKETS 30
82 static struct xen_spinlock_stats
84 u32 contention_stats[NR_CONTENTION_STATS];
85 u32 histo_spin_blocked[HISTO_BUCKETS+1];
91 static inline void check_zero(void)
94 u8 old = READ_ONCE(zero_stats);
96 ret = cmpxchg(&zero_stats, old, 0);
97 /* This ensures only one fellow resets the stat */
99 memset(&spinlock_stats, 0, sizeof(spinlock_stats));
103 static inline void add_stats(enum xen_contention_stat var, u32 val)
106 spinlock_stats.contention_stats[var] += val;
109 static inline u64 spin_time_start(void)
111 return xen_clocksource_read();
114 static void __spin_time_accum(u64 delta, u32 *array)
116 unsigned index = ilog2(delta);
120 if (index < HISTO_BUCKETS)
123 array[HISTO_BUCKETS]++;
126 static inline void spin_time_accum_blocked(u64 start)
128 u32 delta = xen_clocksource_read() - start;
130 __spin_time_accum(delta, spinlock_stats.histo_spin_blocked);
131 spinlock_stats.time_blocked += delta;
133 #else /* !CONFIG_XEN_DEBUG_FS */
134 static inline void add_stats(enum xen_contention_stat var, u32 val)
138 static inline u64 spin_time_start(void)
143 static inline void spin_time_accum_blocked(u64 start)
146 #endif /* CONFIG_XEN_DEBUG_FS */
148 struct xen_lock_waiting {
149 struct arch_spinlock *lock;
153 static DEFINE_PER_CPU(struct xen_lock_waiting, lock_waiting);
154 static cpumask_t waiting_cpus;
156 __visible void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
158 int irq = __this_cpu_read(lock_kicker_irq);
159 struct xen_lock_waiting *w = this_cpu_ptr(&lock_waiting);
160 int cpu = smp_processor_id();
165 /* If kicker interrupts not initialized yet, just spin */
169 start = spin_time_start();
172 * Make sure an interrupt handler can't upset things in a
173 * partially setup state.
175 local_irq_save(flags);
177 * We don't really care if we're overwriting some other
178 * (lock,want) pair, as that would mean that we're currently
179 * in an interrupt context, and the outer context had
180 * interrupts enabled. That has already kicked the VCPU out
181 * of xen_poll_irq(), so it will just return spuriously and
182 * retry with newly setup (lock,want).
184 * The ordering protocol on this is that the "lock" pointer
185 * may only be set non-NULL if the "want" ticket is correct.
186 * If we're updating "want", we must first clear "lock".
194 /* This uses set_bit, which atomic and therefore a barrier */
195 cpumask_set_cpu(cpu, &waiting_cpus);
196 add_stats(TAKEN_SLOW, 1);
199 xen_clear_irq_pending(irq);
201 /* Only check lock once pending cleared */
205 * Mark entry to slowpath before doing the pickup test to make
206 * sure we don't deadlock with an unlocker.
208 __ticket_enter_slowpath(lock);
210 /* make sure enter_slowpath, which is atomic does not cross the read */
211 smp_mb__after_atomic();
214 * check again make sure it didn't become free while
217 head = READ_ONCE(lock->tickets.head);
218 if (__tickets_equal(head, want)) {
219 add_stats(TAKEN_SLOW_PICKUP, 1);
223 /* Allow interrupts while blocked */
224 local_irq_restore(flags);
227 * If an interrupt happens here, it will leave the wakeup irq
228 * pending, which will cause xen_poll_irq() to return
232 /* Block until irq becomes pending (or perhaps a spurious wakeup) */
234 add_stats(TAKEN_SLOW_SPURIOUS, !xen_test_irq_pending(irq));
236 local_irq_save(flags);
238 kstat_incr_irq_this_cpu(irq);
240 cpumask_clear_cpu(cpu, &waiting_cpus);
243 local_irq_restore(flags);
245 spin_time_accum_blocked(start);
247 PV_CALLEE_SAVE_REGS_THUNK(xen_lock_spinning);
249 static void xen_unlock_kick(struct arch_spinlock *lock, __ticket_t next)
253 add_stats(RELEASED_SLOW, 1);
255 for_each_cpu(cpu, &waiting_cpus) {
256 const struct xen_lock_waiting *w = &per_cpu(lock_waiting, cpu);
258 /* Make sure we read lock before want */
259 if (READ_ONCE(w->lock) == lock &&
260 READ_ONCE(w->want) == next) {
261 add_stats(RELEASED_SLOW_KICKED, 1);
262 xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
267 #endif /* CONFIG_QUEUED_SPINLOCKS */
269 static irqreturn_t dummy_handler(int irq, void *dev_id)
275 void xen_init_lock_cpu(int cpu)
283 WARN(per_cpu(lock_kicker_irq, cpu) >= 0, "spinlock on CPU%d exists on IRQ%d!\n",
284 cpu, per_cpu(lock_kicker_irq, cpu));
286 name = kasprintf(GFP_KERNEL, "spinlock%d", cpu);
287 irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR,
290 IRQF_PERCPU|IRQF_NOBALANCING,
295 disable_irq(irq); /* make sure it's never delivered */
296 per_cpu(lock_kicker_irq, cpu) = irq;
297 per_cpu(irq_name, cpu) = name;
300 printk("cpu %d spinlock event irq %d\n", cpu, irq);
303 void xen_uninit_lock_cpu(int cpu)
308 unbind_from_irqhandler(per_cpu(lock_kicker_irq, cpu), NULL);
309 per_cpu(lock_kicker_irq, cpu) = -1;
310 kfree(per_cpu(irq_name, cpu));
311 per_cpu(irq_name, cpu) = NULL;
316 * Our init of PV spinlocks is split in two init functions due to us
317 * using paravirt patching and jump labels patching and having to do
318 * all of this before SMP code is invoked.
320 * The paravirt patching needs to be done _before_ the alternative asm code
321 * is started, otherwise we would not patch the core kernel code.
323 void __init xen_init_spinlocks(void)
327 printk(KERN_DEBUG "xen: PV spinlocks disabled\n");
330 printk(KERN_DEBUG "xen: PV spinlocks enabled\n");
331 #ifdef CONFIG_QUEUED_SPINLOCKS
332 __pv_init_lock_hash();
333 pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
334 pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
335 pv_lock_ops.wait = xen_qlock_wait;
336 pv_lock_ops.kick = xen_qlock_kick;
338 pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(xen_lock_spinning);
339 pv_lock_ops.unlock_kick = xen_unlock_kick;
344 * While the jump_label init code needs to happend _after_ the jump labels are
345 * enabled and before SMP is started. Hence we use pre-SMP initcall level
346 * init. We cannot do it in xen_init_spinlocks as that is done before
347 * jump labels are activated.
349 static __init int xen_init_spinlocks_jump(void)
357 static_key_slow_inc(¶virt_ticketlocks_enabled);
360 early_initcall(xen_init_spinlocks_jump);
362 static __init int xen_parse_nopvspin(char *arg)
367 early_param("xen_nopvspin", xen_parse_nopvspin);
369 #if defined(CONFIG_XEN_DEBUG_FS) && !defined(CONFIG_QUEUED_SPINLOCKS)
371 static struct dentry *d_spin_debug;
373 static int __init xen_spinlock_debugfs(void)
375 struct dentry *d_xen = xen_init_debugfs();
383 d_spin_debug = debugfs_create_dir("spinlocks", d_xen);
385 debugfs_create_u8("zero_stats", 0644, d_spin_debug, &zero_stats);
387 debugfs_create_u32("taken_slow", 0444, d_spin_debug,
388 &spinlock_stats.contention_stats[TAKEN_SLOW]);
389 debugfs_create_u32("taken_slow_pickup", 0444, d_spin_debug,
390 &spinlock_stats.contention_stats[TAKEN_SLOW_PICKUP]);
391 debugfs_create_u32("taken_slow_spurious", 0444, d_spin_debug,
392 &spinlock_stats.contention_stats[TAKEN_SLOW_SPURIOUS]);
394 debugfs_create_u32("released_slow", 0444, d_spin_debug,
395 &spinlock_stats.contention_stats[RELEASED_SLOW]);
396 debugfs_create_u32("released_slow_kicked", 0444, d_spin_debug,
397 &spinlock_stats.contention_stats[RELEASED_SLOW_KICKED]);
399 debugfs_create_u64("time_blocked", 0444, d_spin_debug,
400 &spinlock_stats.time_blocked);
402 debugfs_create_u32_array("histo_blocked", 0444, d_spin_debug,
403 spinlock_stats.histo_spin_blocked, HISTO_BUCKETS + 1);
407 fs_initcall(xen_spinlock_debugfs);
409 #endif /* CONFIG_XEN_DEBUG_FS */