ARM64: DTS: Add rk3399-firefly uart4 device, node as /dev/ttyS1
[firefly-linux-kernel-4.4.55.git] / pmu.c
1 #include <linux/module.h>
2 #include <linux/spinlock.h>
3 #include <mach/pmu.h>
4 #include <mach/sram.h>
5 #include <mach/cpu_axi.h>
6
7 static void __sramfunc pmu_set_power_domain_sram(enum pmu_power_domain pd, bool on)
8 {
9         u32 mask = 1 << pd;
10         u32 val = readl_relaxed(RK30_PMU_BASE + PMU_PWRDN_CON);
11
12         if (on)
13                 val &= ~mask;
14         else
15                 val |=  mask;
16         writel_relaxed(val, RK30_PMU_BASE + PMU_PWRDN_CON);
17         dsb();
18
19         while (pmu_power_domain_is_on(pd) != on)
20                 ;
21 }
22
23 static noinline void do_pmu_set_power_domain(enum pmu_power_domain pd, bool on)
24 {
25         static unsigned long save_sp;
26
27         DDR_SAVE_SP(save_sp);
28         pmu_set_power_domain_sram(pd, on);
29         DDR_RESTORE_SP(save_sp);
30 }
31
32 /*
33  *  software should power down or power up power domain one by one. Power down or
34  *  power up multiple power domains simultaneously will result in chip electric current
35  *  change dramatically which will affect the chip function.
36  */
37 static DEFINE_SPINLOCK(pmu_pd_lock);
38 static u32 lcdc0_qos[CPU_AXI_QOS_NUM_REGS];
39 static u32 lcdc1_qos[CPU_AXI_QOS_NUM_REGS];
40 static u32 cif0_qos[CPU_AXI_QOS_NUM_REGS];
41 static u32 cif1_qos[CPU_AXI_QOS_NUM_REGS];
42 static u32 ipp_qos[CPU_AXI_QOS_NUM_REGS];
43 static u32 rga_qos[CPU_AXI_QOS_NUM_REGS];
44 static u32 gpu_qos[CPU_AXI_QOS_NUM_REGS];
45 static u32 vpu_qos[CPU_AXI_QOS_NUM_REGS];
46
47 void pmu_set_power_domain(enum pmu_power_domain pd, bool on)
48 {
49         unsigned long flags;
50
51         spin_lock_irqsave(&pmu_pd_lock, flags);
52         if (pmu_power_domain_is_on(pd) == on) {
53                 spin_unlock_irqrestore(&pmu_pd_lock, flags);
54                 return;
55         }
56         if (!on) {
57                 /* if power down, idle request to NIU first */
58                 if (pd == PD_VIO) {
59                         CPU_AXI_SAVE_QOS(lcdc0_qos, LCDC0);
60                         CPU_AXI_SAVE_QOS(lcdc1_qos, LCDC1);
61                         CPU_AXI_SAVE_QOS(cif0_qos, CIF0);
62                         CPU_AXI_SAVE_QOS(cif1_qos, CIF1);
63                         CPU_AXI_SAVE_QOS(ipp_qos, IPP);
64                         CPU_AXI_SAVE_QOS(rga_qos, RGA);
65                         pmu_set_idle_request(IDLE_REQ_VIO, true);
66                 } else if (pd == PD_VIDEO) {
67                         CPU_AXI_SAVE_QOS(vpu_qos, VPU);
68                         pmu_set_idle_request(IDLE_REQ_VIDEO, true);
69                 } else if (pd == PD_GPU) {
70                         CPU_AXI_SAVE_QOS(gpu_qos, GPU);
71                         pmu_set_idle_request(IDLE_REQ_GPU, true);
72                 }
73         }
74         do_pmu_set_power_domain(pd, on);
75         if (on) {
76                 /* if power up, idle request release to NIU */
77                 if (pd == PD_VIO) {
78                         pmu_set_idle_request(IDLE_REQ_VIO, false);
79                         CPU_AXI_RESTORE_QOS(lcdc0_qos, LCDC0);
80                         CPU_AXI_RESTORE_QOS(lcdc1_qos, LCDC1);
81                         CPU_AXI_RESTORE_QOS(cif0_qos, CIF0);
82                         CPU_AXI_RESTORE_QOS(cif1_qos, CIF1);
83                         CPU_AXI_RESTORE_QOS(ipp_qos, IPP);
84                         CPU_AXI_RESTORE_QOS(rga_qos, RGA);
85                 } else if (pd == PD_VIDEO) {
86                         pmu_set_idle_request(IDLE_REQ_VIDEO, false);
87                         CPU_AXI_RESTORE_QOS(vpu_qos, VPU);
88                 } else if (pd == PD_GPU) {
89                         pmu_set_idle_request(IDLE_REQ_GPU, false);
90                         CPU_AXI_RESTORE_QOS(gpu_qos, GPU);
91                 }
92         }
93         spin_unlock_irqrestore(&pmu_pd_lock, flags);
94 }
95 EXPORT_SYMBOL(pmu_set_power_domain);
96
97 static DEFINE_SPINLOCK(pmu_misc_con1_lock);
98
99 void pmu_set_idle_request(enum pmu_idle_req req, bool idle)
100 {
101         u32 idle_mask = 1 << (26 - req);
102         u32 idle_target = idle << (26 - req);
103         u32 ack_mask = 1 << (31 - req);
104         u32 ack_target = idle << (31 - req);
105         u32 mask = 1 << (req + 1);
106         u32 val;
107         unsigned long flags;
108
109 #if defined(CONFIG_ARCH_RK3188)
110         if (req == IDLE_REQ_CORE) {
111                 idle_mask = 1 << 15;
112                 idle_target = idle << 15;
113                 ack_mask = 1 << 18;
114                 ack_target = idle << 18;
115         } else if (req == IDLE_REQ_DMA) {
116                 idle_mask = 1 << 14;
117                 idle_target = idle << 14;
118                 ack_mask = 1 << 17;
119                 ack_target = idle << 17;
120         }
121 #endif
122
123         spin_lock_irqsave(&pmu_misc_con1_lock, flags);
124         val = readl_relaxed(RK30_PMU_BASE + PMU_MISC_CON1);
125         if (idle)
126                 val |=  mask;
127         else
128                 val &= ~mask;
129         writel_relaxed(val, RK30_PMU_BASE + PMU_MISC_CON1);
130         dsb();
131
132         while ((readl_relaxed(RK30_PMU_BASE + PMU_PWRDN_ST) & ack_mask) != ack_target)
133                 ;
134         while ((readl_relaxed(RK30_PMU_BASE + PMU_PWRDN_ST) & idle_mask) != idle_target)
135                 ;
136         spin_unlock_irqrestore(&pmu_misc_con1_lock, flags);
137 }
138 EXPORT_SYMBOL(pmu_set_idle_request);