+/*
+ * Some of the guest registers may be modified asynchronously (e.g. from a
+ * hrtimer callback in hard irq context) and therefore need stronger atomicity
+ * guarantees than other registers.
+ */
+
+static inline void _kvm_atomic_set_c0_guest_reg(unsigned long *reg,
+ unsigned long val)
+{
+ unsigned long temp;
+ do {
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ " " __LL "%0, %1 \n"
+ " or %0, %2 \n"
+ " " __SC "%0, %1 \n"
+ " .set mips0 \n"
+ : "=&r" (temp), "+m" (*reg)
+ : "r" (val));
+ } while (unlikely(!temp));
+}
+
+static inline void _kvm_atomic_clear_c0_guest_reg(unsigned long *reg,
+ unsigned long val)
+{
+ unsigned long temp;
+ do {
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ " " __LL "%0, %1 \n"
+ " and %0, %2 \n"
+ " " __SC "%0, %1 \n"
+ " .set mips0 \n"
+ : "=&r" (temp), "+m" (*reg)
+ : "r" (~val));
+ } while (unlikely(!temp));
+}
+
+static inline void _kvm_atomic_change_c0_guest_reg(unsigned long *reg,
+ unsigned long change,
+ unsigned long val)
+{
+ unsigned long temp;
+ do {
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ " " __LL "%0, %1 \n"
+ " and %0, %2 \n"
+ " or %0, %3 \n"
+ " " __SC "%0, %1 \n"
+ " .set mips0 \n"
+ : "=&r" (temp), "+m" (*reg)
+ : "r" (~change), "r" (val & change));
+ } while (unlikely(!temp));
+}
+