1 #include <linux/module.h>
2 #include <linux/spinlock.h>
5 #include <mach/cpu_axi.h>
7 static void __sramfunc pmu_set_power_domain_sram(enum pmu_power_domain pd, bool on)
10 u32 val = readl_relaxed(RK30_PMU_BASE + PMU_PWRDN_CON);
16 writel_relaxed(val, RK30_PMU_BASE + PMU_PWRDN_CON);
19 while (pmu_power_domain_is_on(pd) != on)
23 static noinline void do_pmu_set_power_domain(enum pmu_power_domain pd, bool on)
25 static unsigned long save_sp;
28 pmu_set_power_domain_sram(pd, on);
29 DDR_RESTORE_SP(save_sp);
33 * software should power down or power up power domain one by one. Power down or
34 * power up multiple power domains simultaneously will result in chip electric current
35 * change dramatically which will affect the chip function.
37 static DEFINE_SPINLOCK(pmu_pd_lock);
38 static u32 lcdc0_qos[CPU_AXI_QOS_NUM_REGS];
39 static u32 lcdc1_qos[CPU_AXI_QOS_NUM_REGS];
40 static u32 cif0_qos[CPU_AXI_QOS_NUM_REGS];
41 static u32 cif1_qos[CPU_AXI_QOS_NUM_REGS];
42 static u32 ipp_qos[CPU_AXI_QOS_NUM_REGS];
43 static u32 rga_qos[CPU_AXI_QOS_NUM_REGS];
44 static u32 gpu_qos[CPU_AXI_QOS_NUM_REGS];
45 static u32 vpu_qos[CPU_AXI_QOS_NUM_REGS];
47 void pmu_set_power_domain(enum pmu_power_domain pd, bool on)
51 spin_lock_irqsave(&pmu_pd_lock, flags);
52 if (pmu_power_domain_is_on(pd) == on) {
53 spin_unlock_irqrestore(&pmu_pd_lock, flags);
57 /* if power down, idle request to NIU first */
59 CPU_AXI_SAVE_QOS(lcdc0_qos, LCDC0);
60 CPU_AXI_SAVE_QOS(lcdc1_qos, LCDC1);
61 CPU_AXI_SAVE_QOS(cif0_qos, CIF0);
62 CPU_AXI_SAVE_QOS(cif1_qos, CIF1);
63 CPU_AXI_SAVE_QOS(ipp_qos, IPP);
64 CPU_AXI_SAVE_QOS(rga_qos, RGA);
65 pmu_set_idle_request(IDLE_REQ_VIO, true);
66 } else if (pd == PD_VIDEO) {
67 CPU_AXI_SAVE_QOS(vpu_qos, VPU);
68 pmu_set_idle_request(IDLE_REQ_VIDEO, true);
69 } else if (pd == PD_GPU) {
70 CPU_AXI_SAVE_QOS(gpu_qos, GPU);
71 pmu_set_idle_request(IDLE_REQ_GPU, true);
74 do_pmu_set_power_domain(pd, on);
76 /* if power up, idle request release to NIU */
78 pmu_set_idle_request(IDLE_REQ_VIO, false);
79 CPU_AXI_RESTORE_QOS(lcdc0_qos, LCDC0);
80 CPU_AXI_RESTORE_QOS(lcdc1_qos, LCDC1);
81 CPU_AXI_RESTORE_QOS(cif0_qos, CIF0);
82 CPU_AXI_RESTORE_QOS(cif1_qos, CIF1);
83 CPU_AXI_RESTORE_QOS(ipp_qos, IPP);
84 CPU_AXI_RESTORE_QOS(rga_qos, RGA);
85 } else if (pd == PD_VIDEO) {
86 pmu_set_idle_request(IDLE_REQ_VIDEO, false);
87 CPU_AXI_RESTORE_QOS(vpu_qos, VPU);
88 } else if (pd == PD_GPU) {
89 pmu_set_idle_request(IDLE_REQ_GPU, false);
90 CPU_AXI_RESTORE_QOS(gpu_qos, GPU);
93 spin_unlock_irqrestore(&pmu_pd_lock, flags);
95 EXPORT_SYMBOL(pmu_set_power_domain);
97 static DEFINE_SPINLOCK(pmu_misc_con1_lock);
99 void pmu_set_idle_request(enum pmu_idle_req req, bool idle)
101 u32 idle_mask = 1 << (26 - req);
102 u32 idle_target = idle << (26 - req);
103 u32 ack_mask = 1 << (31 - req);
104 u32 ack_target = idle << (31 - req);
105 u32 mask = 1 << (req + 1);
109 #if defined(CONFIG_ARCH_RK3188)
110 if (req == IDLE_REQ_CORE) {
112 idle_target = idle << 15;
114 ack_target = idle << 18;
115 } else if (req == IDLE_REQ_DMA) {
117 idle_target = idle << 14;
119 ack_target = idle << 17;
123 spin_lock_irqsave(&pmu_misc_con1_lock, flags);
124 val = readl_relaxed(RK30_PMU_BASE + PMU_MISC_CON1);
129 writel_relaxed(val, RK30_PMU_BASE + PMU_MISC_CON1);
132 while ((readl_relaxed(RK30_PMU_BASE + PMU_PWRDN_ST) & ack_mask) != ack_target)
134 while ((readl_relaxed(RK30_PMU_BASE + PMU_PWRDN_ST) & idle_mask) != idle_target)
136 spin_unlock_irqrestore(&pmu_misc_con1_lock, flags);
138 EXPORT_SYMBOL(pmu_set_idle_request);