MIPS: KVM: Whitespace fixes in kvm_mips_callbacks
[firefly-linux-kernel-4.4.55.git] / arch / mips / include / asm / kvm_host.h
index 41e180ed36e33a27b8e39856a099899f6580583f..b0aa95565752a8d79212ecfd0bc56be0bacd2e42 100644 (file)
@@ -404,8 +404,19 @@ struct kvm_vcpu_arch {
 
        u32 io_gpr;             /* GPR used as IO source/target */
 
-       /* Used to calibrate the virutal count register for the guest */
-       int32_t host_cp0_count;
+       struct hrtimer comparecount_timer;
+       /* Count timer control KVM register */
+       uint32_t count_ctl;
+       /* Count bias from the raw time */
+       uint32_t count_bias;
+       /* Frequency of timer in Hz */
+       uint32_t count_hz;
+       /* Dynamic nanosecond bias (multiple of count_period) to avoid overflow */
+       s64 count_dyn_bias;
+       /* Resume time */
+       ktime_t count_resume;
+       /* Period of timer tick in ns */
+       u64 count_period;
 
        /* Bitmask of exceptions that are pending */
        unsigned long pending_exceptions;
@@ -426,8 +437,6 @@ struct kvm_vcpu_arch {
        uint32_t guest_kernel_asid[NR_CPUS];
        struct mm_struct guest_kernel_mm, guest_user_mm;
 
-       struct hrtimer comparecount_timer;
-
        int last_sched_cpu;
 
        /* WAIT executed */
@@ -442,6 +451,7 @@ struct kvm_vcpu_arch {
 #define kvm_read_c0_guest_context(cop0)                (cop0->reg[MIPS_CP0_TLB_CONTEXT][0])
 #define kvm_write_c0_guest_context(cop0, val)  (cop0->reg[MIPS_CP0_TLB_CONTEXT][0] = (val))
 #define kvm_read_c0_guest_userlocal(cop0)      (cop0->reg[MIPS_CP0_TLB_CONTEXT][2])
+#define kvm_write_c0_guest_userlocal(cop0, val)        (cop0->reg[MIPS_CP0_TLB_CONTEXT][2] = (val))
 #define kvm_read_c0_guest_pagemask(cop0)       (cop0->reg[MIPS_CP0_TLB_PG_MASK][0])
 #define kvm_write_c0_guest_pagemask(cop0, val) (cop0->reg[MIPS_CP0_TLB_PG_MASK][0] = (val))
 #define kvm_read_c0_guest_wired(cop0)          (cop0->reg[MIPS_CP0_TLB_WIRED][0])
@@ -481,15 +491,74 @@ struct kvm_vcpu_arch {
 #define kvm_read_c0_guest_errorepc(cop0)       (cop0->reg[MIPS_CP0_ERROR_PC][0])
 #define kvm_write_c0_guest_errorepc(cop0, val) (cop0->reg[MIPS_CP0_ERROR_PC][0] = (val))
 
+/*
+ * Some of the guest registers may be modified asynchronously (e.g. from a
+ * hrtimer callback in hard irq context) and therefore need stronger atomicity
+ * guarantees than other registers.
+ */
+
+static inline void _kvm_atomic_set_c0_guest_reg(unsigned long *reg,
+                                               unsigned long val)
+{
+       unsigned long temp;
+       do {
+               __asm__ __volatile__(
+               "       .set    mips3                           \n"
+               "       " __LL "%0, %1                          \n"
+               "       or      %0, %2                          \n"
+               "       " __SC  "%0, %1                         \n"
+               "       .set    mips0                           \n"
+               : "=&r" (temp), "+m" (*reg)
+               : "r" (val));
+       } while (unlikely(!temp));
+}
+
+static inline void _kvm_atomic_clear_c0_guest_reg(unsigned long *reg,
+                                                 unsigned long val)
+{
+       unsigned long temp;
+       do {
+               __asm__ __volatile__(
+               "       .set    mips3                           \n"
+               "       " __LL "%0, %1                          \n"
+               "       and     %0, %2                          \n"
+               "       " __SC  "%0, %1                         \n"
+               "       .set    mips0                           \n"
+               : "=&r" (temp), "+m" (*reg)
+               : "r" (~val));
+       } while (unlikely(!temp));
+}
+
+static inline void _kvm_atomic_change_c0_guest_reg(unsigned long *reg,
+                                                  unsigned long change,
+                                                  unsigned long val)
+{
+       unsigned long temp;
+       do {
+               __asm__ __volatile__(
+               "       .set    mips3                           \n"
+               "       " __LL "%0, %1                          \n"
+               "       and     %0, %2                          \n"
+               "       or      %0, %3                          \n"
+               "       " __SC  "%0, %1                         \n"
+               "       .set    mips0                           \n"
+               : "=&r" (temp), "+m" (*reg)
+               : "r" (~change), "r" (val & change));
+       } while (unlikely(!temp));
+}
+
 #define kvm_set_c0_guest_status(cop0, val)     (cop0->reg[MIPS_CP0_STATUS][0] |= (val))
 #define kvm_clear_c0_guest_status(cop0, val)   (cop0->reg[MIPS_CP0_STATUS][0] &= ~(val))
-#define kvm_set_c0_guest_cause(cop0, val)      (cop0->reg[MIPS_CP0_CAUSE][0] |= (val))
-#define kvm_clear_c0_guest_cause(cop0, val)    (cop0->reg[MIPS_CP0_CAUSE][0] &= ~(val))
+
+/* Cause can be modified asynchronously from hardirq hrtimer callback */
+#define kvm_set_c0_guest_cause(cop0, val)                              \
+       _kvm_atomic_set_c0_guest_reg(&cop0->reg[MIPS_CP0_CAUSE][0], val)
+#define kvm_clear_c0_guest_cause(cop0, val)                            \
+       _kvm_atomic_clear_c0_guest_reg(&cop0->reg[MIPS_CP0_CAUSE][0], val)
 #define kvm_change_c0_guest_cause(cop0, change, val)                   \
-{                                                                      \
-       kvm_clear_c0_guest_cause(cop0, change);                         \
-       kvm_set_c0_guest_cause(cop0, ((val) & (change)));               \
-}
+       _kvm_atomic_change_c0_guest_reg(&cop0->reg[MIPS_CP0_CAUSE][0],  \
+                                       change, val)
+
 #define kvm_set_c0_guest_ebase(cop0, val)      (cop0->reg[MIPS_CP0_PRID][1] |= (val))
 #define kvm_clear_c0_guest_ebase(cop0, val)    (cop0->reg[MIPS_CP0_PRID][1] &= ~(val))
 #define kvm_change_c0_guest_ebase(cop0, change, val)                   \
@@ -500,29 +569,29 @@ struct kvm_vcpu_arch {
 
 
 struct kvm_mips_callbacks {
-       int (*handle_cop_unusable) (struct kvm_vcpu *vcpu);
-       int (*handle_tlb_mod) (struct kvm_vcpu *vcpu);
-       int (*handle_tlb_ld_miss) (struct kvm_vcpu *vcpu);
-       int (*handle_tlb_st_miss) (struct kvm_vcpu *vcpu);
-       int (*handle_addr_err_st) (struct kvm_vcpu *vcpu);
-       int (*handle_addr_err_ld) (struct kvm_vcpu *vcpu);
-       int (*handle_syscall) (struct kvm_vcpu *vcpu);
-       int (*handle_res_inst) (struct kvm_vcpu *vcpu);
-       int (*handle_break) (struct kvm_vcpu *vcpu);
-       int (*vm_init) (struct kvm *kvm);
-       int (*vcpu_init) (struct kvm_vcpu *vcpu);
-       int (*vcpu_setup) (struct kvm_vcpu *vcpu);
-        gpa_t(*gva_to_gpa) (gva_t gva);
-       void (*queue_timer_int) (struct kvm_vcpu *vcpu);
-       void (*dequeue_timer_int) (struct kvm_vcpu *vcpu);
-       void (*queue_io_int) (struct kvm_vcpu *vcpu,
-                             struct kvm_mips_interrupt *irq);
-       void (*dequeue_io_int) (struct kvm_vcpu *vcpu,
-                               struct kvm_mips_interrupt *irq);
-       int (*irq_deliver) (struct kvm_vcpu *vcpu, unsigned int priority,
-                           uint32_t cause);
-       int (*irq_clear) (struct kvm_vcpu *vcpu, unsigned int priority,
-                         uint32_t cause);
+       int (*handle_cop_unusable)(struct kvm_vcpu *vcpu);
+       int (*handle_tlb_mod)(struct kvm_vcpu *vcpu);
+       int (*handle_tlb_ld_miss)(struct kvm_vcpu *vcpu);
+       int (*handle_tlb_st_miss)(struct kvm_vcpu *vcpu);
+       int (*handle_addr_err_st)(struct kvm_vcpu *vcpu);
+       int (*handle_addr_err_ld)(struct kvm_vcpu *vcpu);
+       int (*handle_syscall)(struct kvm_vcpu *vcpu);
+       int (*handle_res_inst)(struct kvm_vcpu *vcpu);
+       int (*handle_break)(struct kvm_vcpu *vcpu);
+       int (*vm_init)(struct kvm *kvm);
+       int (*vcpu_init)(struct kvm_vcpu *vcpu);
+       int (*vcpu_setup)(struct kvm_vcpu *vcpu);
+       gpa_t (*gva_to_gpa)(gva_t gva);
+       void (*queue_timer_int)(struct kvm_vcpu *vcpu);
+       void (*dequeue_timer_int)(struct kvm_vcpu *vcpu);
+       void (*queue_io_int)(struct kvm_vcpu *vcpu,
+                            struct kvm_mips_interrupt *irq);
+       void (*dequeue_io_int)(struct kvm_vcpu *vcpu,
+                              struct kvm_mips_interrupt *irq);
+       int (*irq_deliver)(struct kvm_vcpu *vcpu, unsigned int priority,
+                          uint32_t cause);
+       int (*irq_clear)(struct kvm_vcpu *vcpu, unsigned int priority,
+                        uint32_t cause);
        int (*get_one_reg)(struct kvm_vcpu *vcpu,
                           const struct kvm_one_reg *reg, s64 *v);
        int (*set_one_reg)(struct kvm_vcpu *vcpu,
@@ -645,7 +714,16 @@ extern enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause,
 extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
                                                         struct kvm_run *run);
 
-enum emulation_result kvm_mips_emulate_count(struct kvm_vcpu *vcpu);
+uint32_t kvm_mips_read_count(struct kvm_vcpu *vcpu);
+void kvm_mips_write_count(struct kvm_vcpu *vcpu, uint32_t count);
+void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare);
+void kvm_mips_init_count(struct kvm_vcpu *vcpu);
+int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl);
+int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume);
+int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz);
+void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu);
+void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu);
+enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu);
 
 enum emulation_result kvm_mips_check_privilege(unsigned long cause,
                                               uint32_t *opc,