2 * Marvell EBU SoC common clock handling
4 * Copyright (C) 2012 Marvell
6 * Gregory CLEMENT <gregory.clement@free-electrons.com>
7 * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
8 * Andrew Lunn <andrew@lunn.ch>
10 * This file is licensed under the terms of the GNU General Public
11 * License version 2. This program is licensed "as is" without any
12 * warranty of any kind, whether express or implied.
15 #include <linux/kernel.h>
16 #include <linux/clk.h>
17 #include <linux/clkdev.h>
18 #include <linux/clk-provider.h>
21 #include <linux/of_address.h>
29 #define SSCG_CONF_MODE(reg) (((reg) >> 16) & 0x3)
30 #define SSCG_SPREAD_DOWN 0x0
31 #define SSCG_SPREAD_UP 0x1
32 #define SSCG_SPREAD_CENTRAL 0x2
33 #define SSCG_CONF_LOW(reg) (((reg) >> 8) & 0xFF)
34 #define SSCG_CONF_HIGH(reg) ((reg) & 0xFF)
36 static struct clk_onecell_data clk_data;
39 * This function can be used by the Kirkwood, the Armada 370, the
40 * Armada XP and the Armada 375 SoC. The name of the function was
41 * chosen following the dt convention: using the first known SoC
44 u32 kirkwood_fix_sscg_deviation(u32 system_clk)
46 struct device_node *sscg_np = NULL;
47 void __iomem *sscg_map;
49 s32 low_bound, high_bound;
52 sscg_np = of_find_node_by_name(NULL, "sscg");
53 if (sscg_np == NULL) {
54 pr_err("cannot get SSCG register node\n");
58 sscg_map = of_iomap(sscg_np, 0);
59 if (sscg_map == NULL) {
60 pr_err("cannot map SSCG register\n");
64 sscg_reg = readl(sscg_map);
65 high_bound = SSCG_CONF_HIGH(sscg_reg);
66 low_bound = SSCG_CONF_LOW(sscg_reg);
68 if ((high_bound - low_bound) <= 0)
71 * From Marvell engineer we got the following formula (when
72 * this code was written, the datasheet was erroneous)
73 * Spread percentage = 1/96 * (H - L) / H
74 * H = SSCG_High_Boundary
75 * L = SSCG_Low_Boundary
77 * As the deviation is half of spread then it lead to the
78 * following formula in the code.
80 * To avoid an overflow and not lose any significant digit in
81 * the same time we have to use a 64 bit integer.
84 freq_swing_half = (((u64)high_bound - (u64)low_bound)
86 do_div(freq_swing_half, (2 * 96 * high_bound));
88 switch (SSCG_CONF_MODE(sscg_reg)) {
89 case SSCG_SPREAD_DOWN:
90 system_clk -= freq_swing_half;
93 system_clk += freq_swing_half;
95 case SSCG_SPREAD_CENTRAL:
103 of_node_put(sscg_np);
108 void __init mvebu_coreclk_setup(struct device_node *np,
109 const struct coreclk_soc_desc *desc)
111 const char *tclk_name = "tclk";
112 const char *cpuclk_name = "cpuclk";
117 base = of_iomap(np, 0);
121 /* Allocate struct for TCLK, cpu clk, and core ratio clocks */
122 clk_data.clk_num = 2 + desc->num_ratios;
123 clk_data.clks = kzalloc(clk_data.clk_num * sizeof(struct clk *),
125 if (WARN_ON(!clk_data.clks)) {
131 of_property_read_string_index(np, "clock-output-names", 0,
133 rate = desc->get_tclk_freq(base);
134 clk_data.clks[0] = clk_register_fixed_rate(NULL, tclk_name, NULL,
136 WARN_ON(IS_ERR(clk_data.clks[0]));
138 /* Register CPU clock */
139 of_property_read_string_index(np, "clock-output-names", 1,
141 rate = desc->get_cpu_freq(base);
143 if (desc->is_sscg_enabled && desc->fix_sscg_deviation
144 && desc->is_sscg_enabled(base))
145 rate = desc->fix_sscg_deviation(rate);
147 clk_data.clks[1] = clk_register_fixed_rate(NULL, cpuclk_name, NULL,
149 WARN_ON(IS_ERR(clk_data.clks[1]));
151 /* Register fixed-factor clocks derived from CPU clock */
152 for (n = 0; n < desc->num_ratios; n++) {
153 const char *rclk_name = desc->ratios[n].name;
156 of_property_read_string_index(np, "clock-output-names",
158 desc->get_clk_ratio(base, desc->ratios[n].id, &mult, &div);
159 clk_data.clks[2+n] = clk_register_fixed_factor(NULL, rclk_name,
160 cpuclk_name, 0, mult, div);
161 WARN_ON(IS_ERR(clk_data.clks[2+n]));
164 /* SAR register isn't needed anymore */
167 of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
171 * Clock Gating Control
174 DEFINE_SPINLOCK(ctrl_gating_lock);
176 struct clk_gating_ctrl {
182 #define to_clk_gate(_hw) container_of(_hw, struct clk_gate, hw)
184 static struct clk *clk_gating_get_src(
185 struct of_phandle_args *clkspec, void *data)
187 struct clk_gating_ctrl *ctrl = (struct clk_gating_ctrl *)data;
190 if (clkspec->args_count < 1)
191 return ERR_PTR(-EINVAL);
193 for (n = 0; n < ctrl->num_gates; n++) {
194 struct clk_gate *gate =
195 to_clk_gate(__clk_get_hw(ctrl->gates[n]));
196 if (clkspec->args[0] == gate->bit_idx)
197 return ctrl->gates[n];
199 return ERR_PTR(-ENODEV);
202 void __init mvebu_clk_gating_setup(struct device_node *np,
203 const struct clk_gating_soc_desc *desc)
205 struct clk_gating_ctrl *ctrl;
208 const char *default_parent = NULL;
211 base = of_iomap(np, 0);
215 clk = of_clk_get(np, 0);
217 default_parent = __clk_get_name(clk);
221 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
225 /* lock must already be initialized */
226 ctrl->lock = &ctrl_gating_lock;
228 /* Count, allocate, and register clock gates */
229 for (n = 0; desc[n].name;)
233 ctrl->gates = kzalloc(ctrl->num_gates * sizeof(struct clk *),
235 if (WARN_ON(!ctrl->gates))
238 for (n = 0; n < ctrl->num_gates; n++) {
240 (desc[n].parent) ? desc[n].parent : default_parent;
241 ctrl->gates[n] = clk_register_gate(NULL, desc[n].name, parent,
242 desc[n].flags, base, desc[n].bit_idx,
244 WARN_ON(IS_ERR(ctrl->gates[n]));
247 of_clk_add_provider(np, clk_gating_get_src, ctrl);