2 * Copyright (C) 2013 Broadcom Corporation
3 * Copyright 2013 Linaro Limited
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation version 2.
9 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
10 * kind, whether express or implied; without even the implied warranty
11 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
17 #include <linux/delay.h>
20 * "Policies" affect the frequencies of bus clocks provided by a
21 * CCU. (I believe these polices are named "Deep Sleep", "Economy",
22 * "Normal", and "Turbo".) A lower policy number has lower power
23 * consumption, and policy 2 is the default.
25 #define CCU_POLICY_COUNT 4
27 #define CCU_ACCESS_PASSWORD 0xA5A500
28 #define CLK_GATE_DELAY_LOOP 2000
30 /* Bitfield operations */
32 /* Produces a mask of set bits covering a range of a 32-bit value */
33 static inline u32 bitfield_mask(u32 shift, u32 width)
35 return ((1 << width) - 1) << shift;
38 /* Extract the value of a bitfield found within a given register value */
39 static inline u32 bitfield_extract(u32 reg_val, u32 shift, u32 width)
41 return (reg_val & bitfield_mask(shift, width)) >> shift;
44 /* Replace the value of a bitfield found within a given register value */
45 static inline u32 bitfield_replace(u32 reg_val, u32 shift, u32 width, u32 val)
47 u32 mask = bitfield_mask(shift, width);
49 return (reg_val & ~mask) | (val << shift);
52 /* Divider and scaling helpers */
55 * Implement DIV_ROUND_CLOSEST() for 64-bit dividend and both values
56 * unsigned. Note that unlike do_div(), the remainder is discarded
57 * and the return value is the quotient (not the remainder).
59 u64 do_div_round_closest(u64 dividend, unsigned long divisor)
63 result = dividend + ((u64)divisor >> 1);
64 (void)do_div(result, divisor);
69 /* Convert a divider into the scaled divisor value it represents. */
70 static inline u64 scaled_div_value(struct bcm_clk_div *div, u32 reg_div)
72 return (u64)reg_div + ((u64)1 << div->u.s.frac_width);
76 * Build a scaled divider value as close as possible to the
77 * given whole part (div_value) and fractional part (expressed
80 u64 scaled_div_build(struct bcm_clk_div *div, u32 div_value, u32 billionths)
85 BUG_ON(billionths >= BILLION);
87 combined = (u64)div_value * BILLION + billionths;
88 combined <<= div->u.s.frac_width;
90 return do_div_round_closest(combined, BILLION);
93 /* The scaled minimum divisor representable by a divider */
95 scaled_div_min(struct bcm_clk_div *div)
97 if (divider_is_fixed(div))
98 return (u64)div->u.fixed;
100 return scaled_div_value(div, 0);
103 /* The scaled maximum divisor representable by a divider */
104 u64 scaled_div_max(struct bcm_clk_div *div)
108 if (divider_is_fixed(div))
109 return (u64)div->u.fixed;
111 reg_div = ((u32)1 << div->u.s.width) - 1;
113 return scaled_div_value(div, reg_div);
117 * Convert a scaled divisor into its divider representation as
118 * stored in a divider register field.
121 divider(struct bcm_clk_div *div, u64 scaled_div)
123 BUG_ON(scaled_div < scaled_div_min(div));
124 BUG_ON(scaled_div > scaled_div_max(div));
126 return (u32)(scaled_div - ((u64)1 << div->u.s.frac_width));
129 /* Return a rate scaled for use when dividing by a scaled divisor. */
131 scale_rate(struct bcm_clk_div *div, u32 rate)
133 if (divider_is_fixed(div))
136 return (u64)rate << div->u.s.frac_width;
141 /* Read a 32-bit register value from a CCU's address space. */
142 static inline u32 __ccu_read(struct ccu_data *ccu, u32 reg_offset)
144 return readl(ccu->base + reg_offset);
147 /* Write a 32-bit register value into a CCU's address space. */
149 __ccu_write(struct ccu_data *ccu, u32 reg_offset, u32 reg_val)
151 writel(reg_val, ccu->base + reg_offset);
154 static inline unsigned long ccu_lock(struct ccu_data *ccu)
158 spin_lock_irqsave(&ccu->lock, flags);
162 static inline void ccu_unlock(struct ccu_data *ccu, unsigned long flags)
164 spin_unlock_irqrestore(&ccu->lock, flags);
168 * Enable/disable write access to CCU protected registers. The
169 * WR_ACCESS register for all CCUs is at offset 0.
171 static inline void __ccu_write_enable(struct ccu_data *ccu)
173 if (ccu->write_enabled) {
174 pr_err("%s: access already enabled for %s\n", __func__,
178 ccu->write_enabled = true;
179 __ccu_write(ccu, 0, CCU_ACCESS_PASSWORD | 1);
182 static inline void __ccu_write_disable(struct ccu_data *ccu)
184 if (!ccu->write_enabled) {
185 pr_err("%s: access wasn't enabled for %s\n", __func__,
190 __ccu_write(ccu, 0, CCU_ACCESS_PASSWORD);
191 ccu->write_enabled = false;
195 * Poll a register in a CCU's address space, returning when the
196 * specified bit in that register's value is set (or clear). Delay
197 * a microsecond after each read of the register. Returns true if
198 * successful, or false if we gave up trying.
200 * Caller must ensure the CCU lock is held.
203 __ccu_wait_bit(struct ccu_data *ccu, u32 reg_offset, u32 bit, bool want)
206 u32 bit_mask = 1 << bit;
208 for (tries = 0; tries < CLK_GATE_DELAY_LOOP; tries++) {
212 val = __ccu_read(ccu, reg_offset);
213 bit_val = (val & bit_mask) != 0;
218 pr_warn("%s: %s/0x%04x bit %u was never %s\n", __func__,
219 ccu->name, reg_offset, bit, want ? "set" : "clear");
224 /* Policy operations */
226 static bool __ccu_policy_engine_start(struct ccu_data *ccu, bool sync)
228 struct bcm_policy_ctl *control = &ccu->policy.control;
234 /* If we don't need to control policy for this CCU, we're done. */
235 if (!policy_ctl_exists(control))
238 offset = control->offset;
239 go_bit = control->go_bit;
241 /* Ensure we're not busy before we start */
242 ret = __ccu_wait_bit(ccu, offset, go_bit, false);
244 pr_err("%s: ccu %s policy engine wouldn't go idle\n",
245 __func__, ccu->name);
250 * If it's a synchronous request, we'll wait for the voltage
251 * and frequency of the active load to stabilize before
252 * returning. To do this we select the active load by
253 * setting the ATL bit.
255 * An asynchronous request instead ramps the voltage in the
256 * background, and when that process stabilizes, the target
257 * load is copied to the active load and the CCU frequency
258 * is switched. We do this by selecting the target load
259 * (ATL bit clear) and setting the request auto-copy (AC bit
262 * Note, we do NOT read-modify-write this register.
264 mask = (u32)1 << go_bit;
266 mask |= 1 << control->atl_bit;
268 mask |= 1 << control->ac_bit;
269 __ccu_write(ccu, offset, mask);
271 /* Wait for indication that operation is complete. */
272 ret = __ccu_wait_bit(ccu, offset, go_bit, false);
274 pr_err("%s: ccu %s policy engine never started\n",
275 __func__, ccu->name);
280 static bool __ccu_policy_engine_stop(struct ccu_data *ccu)
282 struct bcm_lvm_en *enable = &ccu->policy.enable;
287 /* If we don't need to control policy for this CCU, we're done. */
288 if (!policy_lvm_en_exists(enable))
291 /* Ensure we're not busy before we start */
292 offset = enable->offset;
293 enable_bit = enable->bit;
294 ret = __ccu_wait_bit(ccu, offset, enable_bit, false);
296 pr_err("%s: ccu %s policy engine already stopped\n",
297 __func__, ccu->name);
301 /* Now set the bit to stop the engine (NO read-modify-write) */
302 __ccu_write(ccu, offset, (u32)1 << enable_bit);
304 /* Wait for indication that it has stopped. */
305 ret = __ccu_wait_bit(ccu, offset, enable_bit, false);
307 pr_err("%s: ccu %s policy engine never stopped\n",
308 __func__, ccu->name);
314 * A CCU has four operating conditions ("policies"), and some clocks
315 * can be disabled or enabled based on which policy is currently in
316 * effect. Such clocks have a bit in a "policy mask" register for
317 * each policy indicating whether the clock is enabled for that
318 * policy or not. The bit position for a clock is the same for all
319 * four registers, and the 32-bit registers are at consecutive
322 static bool policy_init(struct ccu_data *ccu, struct bcm_clk_policy *policy)
329 if (!policy_exists(policy))
333 * We need to stop the CCU policy engine to allow update
334 * of our policy bits.
336 if (!__ccu_policy_engine_stop(ccu)) {
337 pr_err("%s: unable to stop CCU %s policy engine\n",
338 __func__, ccu->name);
343 * For now, if a clock defines its policy bit we just mark
344 * it "enabled" for all four policies.
346 offset = policy->offset;
347 mask = (u32)1 << policy->bit;
348 for (i = 0; i < CCU_POLICY_COUNT; i++) {
351 reg_val = __ccu_read(ccu, offset);
353 __ccu_write(ccu, offset, reg_val);
354 offset += sizeof(u32);
357 /* We're done updating; fire up the policy engine again. */
358 ret = __ccu_policy_engine_start(ccu, true);
360 pr_err("%s: unable to restart CCU %s policy engine\n",
361 __func__, ccu->name);
366 /* Gate operations */
368 /* Determine whether a clock is gated. CCU lock must be held. */
370 __is_clk_gate_enabled(struct ccu_data *ccu, struct bcm_clk_gate *gate)
375 /* If there is no gate we can assume it's enabled. */
376 if (!gate_exists(gate))
379 bit_mask = 1 << gate->status_bit;
380 reg_val = __ccu_read(ccu, gate->offset);
382 return (reg_val & bit_mask) != 0;
385 /* Determine whether a clock is gated. */
387 is_clk_gate_enabled(struct ccu_data *ccu, struct bcm_clk_gate *gate)
392 /* Avoid taking the lock if we can */
393 if (!gate_exists(gate))
396 flags = ccu_lock(ccu);
397 ret = __is_clk_gate_enabled(ccu, gate);
398 ccu_unlock(ccu, flags);
404 * Commit our desired gate state to the hardware.
405 * Returns true if successful, false otherwise.
408 __gate_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate)
412 bool enabled = false;
414 BUG_ON(!gate_exists(gate));
415 if (!gate_is_sw_controllable(gate))
416 return true; /* Nothing we can change */
418 reg_val = __ccu_read(ccu, gate->offset);
420 /* For a hardware/software gate, set which is in control */
421 if (gate_is_hw_controllable(gate)) {
422 mask = (u32)1 << gate->hw_sw_sel_bit;
423 if (gate_is_sw_managed(gate))
430 * If software is in control, enable or disable the gate.
431 * If hardware is, clear the enabled bit for good measure.
432 * If a software controlled gate can't be disabled, we're
433 * required to write a 0 into the enable bit (but the gate
436 mask = (u32)1 << gate->en_bit;
437 if (gate_is_sw_managed(gate) && (enabled = gate_is_enabled(gate)) &&
438 !gate_is_no_disable(gate))
443 __ccu_write(ccu, gate->offset, reg_val);
445 /* For a hardware controlled gate, we're done */
446 if (!gate_is_sw_managed(gate))
449 /* Otherwise wait for the gate to be in desired state */
450 return __ccu_wait_bit(ccu, gate->offset, gate->status_bit, enabled);
454 * Initialize a gate. Our desired state (hardware/software select,
455 * and if software, its enable state) is committed to hardware
456 * without the usual checks to see if it's already set up that way.
457 * Returns true if successful, false otherwise.
459 static bool gate_init(struct ccu_data *ccu, struct bcm_clk_gate *gate)
461 if (!gate_exists(gate))
463 return __gate_commit(ccu, gate);
467 * Set a gate to enabled or disabled state. Does nothing if the
468 * gate is not currently under software control, or if it is already
469 * in the requested state. Returns true if successful, false
470 * otherwise. CCU lock must be held.
473 __clk_gate(struct ccu_data *ccu, struct bcm_clk_gate *gate, bool enable)
477 if (!gate_exists(gate) || !gate_is_sw_managed(gate))
478 return true; /* Nothing to do */
480 if (!enable && gate_is_no_disable(gate)) {
481 pr_warn("%s: invalid gate disable request (ignoring)\n",
486 if (enable == gate_is_enabled(gate))
487 return true; /* No change */
489 gate_flip_enabled(gate);
490 ret = __gate_commit(ccu, gate);
492 gate_flip_enabled(gate); /* Revert the change */
497 /* Enable or disable a gate. Returns 0 if successful, -EIO otherwise */
498 static int clk_gate(struct ccu_data *ccu, const char *name,
499 struct bcm_clk_gate *gate, bool enable)
505 * Avoid taking the lock if we can. We quietly ignore
506 * requests to change state that don't make sense.
508 if (!gate_exists(gate) || !gate_is_sw_managed(gate))
510 if (!enable && gate_is_no_disable(gate))
513 flags = ccu_lock(ccu);
514 __ccu_write_enable(ccu);
516 success = __clk_gate(ccu, gate, enable);
518 __ccu_write_disable(ccu);
519 ccu_unlock(ccu, flags);
524 pr_err("%s: failed to %s gate for %s\n", __func__,
525 enable ? "enable" : "disable", name);
530 /* Hysteresis operations */
533 * If a clock gate requires a turn-off delay it will have
534 * "hysteresis" register bits defined. The first, if set, enables
535 * the delay; and if enabled, the second bit determines whether the
536 * delay is "low" or "high" (1 means high). For now, if it's
537 * defined for a clock, we set it.
539 static bool hyst_init(struct ccu_data *ccu, struct bcm_clk_hyst *hyst)
545 if (!hyst_exists(hyst))
548 offset = hyst->offset;
549 mask = (u32)1 << hyst->en_bit;
550 mask |= (u32)1 << hyst->val_bit;
552 reg_val = __ccu_read(ccu, offset);
554 __ccu_write(ccu, offset, reg_val);
559 /* Trigger operations */
562 * Caller must ensure CCU lock is held and access is enabled.
563 * Returns true if successful, false otherwise.
565 static bool __clk_trigger(struct ccu_data *ccu, struct bcm_clk_trig *trig)
567 /* Trigger the clock and wait for it to finish */
568 __ccu_write(ccu, trig->offset, 1 << trig->bit);
570 return __ccu_wait_bit(ccu, trig->offset, trig->bit, false);
573 /* Divider operations */
575 /* Read a divider value and return the scaled divisor it represents. */
576 static u64 divider_read_scaled(struct ccu_data *ccu, struct bcm_clk_div *div)
582 if (divider_is_fixed(div))
583 return (u64)div->u.fixed;
585 flags = ccu_lock(ccu);
586 reg_val = __ccu_read(ccu, div->u.s.offset);
587 ccu_unlock(ccu, flags);
589 /* Extract the full divider field from the register value */
590 reg_div = bitfield_extract(reg_val, div->u.s.shift, div->u.s.width);
592 /* Return the scaled divisor value it represents */
593 return scaled_div_value(div, reg_div);
597 * Convert a divider's scaled divisor value into its recorded form
598 * and commit it into the hardware divider register.
600 * Returns 0 on success. Returns -EINVAL for invalid arguments.
601 * Returns -ENXIO if gating failed, and -EIO if a trigger failed.
603 static int __div_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate,
604 struct bcm_clk_div *div, struct bcm_clk_trig *trig)
611 BUG_ON(divider_is_fixed(div));
614 * If we're just initializing the divider, and no initial
615 * state was defined in the device tree, we just find out
616 * what its current value is rather than updating it.
618 if (div->u.s.scaled_div == BAD_SCALED_DIV_VALUE) {
619 reg_val = __ccu_read(ccu, div->u.s.offset);
620 reg_div = bitfield_extract(reg_val, div->u.s.shift,
622 div->u.s.scaled_div = scaled_div_value(div, reg_div);
627 /* Convert the scaled divisor to the value we need to record */
628 reg_div = divider(div, div->u.s.scaled_div);
630 /* Clock needs to be enabled before changing the rate */
631 enabled = __is_clk_gate_enabled(ccu, gate);
632 if (!enabled && !__clk_gate(ccu, gate, true)) {
637 /* Replace the divider value and record the result */
638 reg_val = __ccu_read(ccu, div->u.s.offset);
639 reg_val = bitfield_replace(reg_val, div->u.s.shift, div->u.s.width,
641 __ccu_write(ccu, div->u.s.offset, reg_val);
643 /* If the trigger fails we still want to disable the gate */
644 if (!__clk_trigger(ccu, trig))
647 /* Disable the clock again if it was disabled to begin with */
648 if (!enabled && !__clk_gate(ccu, gate, false))
649 ret = ret ? ret : -ENXIO; /* return first error */
655 * Initialize a divider by committing our desired state to hardware
656 * without the usual checks to see if it's already set up that way.
657 * Returns true if successful, false otherwise.
659 static bool div_init(struct ccu_data *ccu, struct bcm_clk_gate *gate,
660 struct bcm_clk_div *div, struct bcm_clk_trig *trig)
662 if (!divider_exists(div) || divider_is_fixed(div))
664 return !__div_commit(ccu, gate, div, trig);
667 static int divider_write(struct ccu_data *ccu, struct bcm_clk_gate *gate,
668 struct bcm_clk_div *div, struct bcm_clk_trig *trig,
675 BUG_ON(divider_is_fixed(div));
677 previous = div->u.s.scaled_div;
678 if (previous == scaled_div)
679 return 0; /* No change */
681 div->u.s.scaled_div = scaled_div;
683 flags = ccu_lock(ccu);
684 __ccu_write_enable(ccu);
686 ret = __div_commit(ccu, gate, div, trig);
688 __ccu_write_disable(ccu);
689 ccu_unlock(ccu, flags);
692 div->u.s.scaled_div = previous; /* Revert the change */
698 /* Common clock rate helpers */
701 * Implement the common clock framework recalc_rate method, taking
702 * into account a divider and an optional pre-divider. The
703 * pre-divider register pointer may be NULL.
705 static unsigned long clk_recalc_rate(struct ccu_data *ccu,
706 struct bcm_clk_div *div, struct bcm_clk_div *pre_div,
707 unsigned long parent_rate)
709 u64 scaled_parent_rate;
713 if (!divider_exists(div))
716 if (parent_rate > (unsigned long)LONG_MAX)
717 return 0; /* actually this would be a caller bug */
720 * If there is a pre-divider, divide the scaled parent rate
721 * by the pre-divider value first. In this case--to improve
722 * accuracy--scale the parent rate by *both* the pre-divider
723 * value and the divider before actually computing the
724 * result of the pre-divider.
726 * If there's only one divider, just scale the parent rate.
728 if (pre_div && divider_exists(pre_div)) {
731 scaled_rate = scale_rate(pre_div, parent_rate);
732 scaled_rate = scale_rate(div, scaled_rate);
733 scaled_div = divider_read_scaled(ccu, pre_div);
734 scaled_parent_rate = do_div_round_closest(scaled_rate,
737 scaled_parent_rate = scale_rate(div, parent_rate);
741 * Get the scaled divisor value, and divide the scaled
742 * parent rate by that to determine this clock's resulting
745 scaled_div = divider_read_scaled(ccu, div);
746 result = do_div_round_closest(scaled_parent_rate, scaled_div);
748 return (unsigned long)result;
752 * Compute the output rate produced when a given parent rate is fed
753 * into two dividers. The pre-divider can be NULL, and even if it's
754 * non-null it may be nonexistent. It's also OK for the divider to
755 * be nonexistent, and in that case the pre-divider is also ignored.
757 * If scaled_div is non-null, it is used to return the scaled divisor
758 * value used by the (downstream) divider to produce that rate.
760 static long round_rate(struct ccu_data *ccu, struct bcm_clk_div *div,
761 struct bcm_clk_div *pre_div,
762 unsigned long rate, unsigned long parent_rate,
765 u64 scaled_parent_rate;
771 BUG_ON(!divider_exists(div));
773 BUG_ON(parent_rate > (u64)LONG_MAX);
776 * If there is a pre-divider, divide the scaled parent rate
777 * by the pre-divider value first. In this case--to improve
778 * accuracy--scale the parent rate by *both* the pre-divider
779 * value and the divider before actually computing the
780 * result of the pre-divider.
782 * If there's only one divider, just scale the parent rate.
784 * For simplicity we treat the pre-divider as fixed (for now).
786 if (divider_exists(pre_div)) {
790 scaled_rate = scale_rate(pre_div, parent_rate);
791 scaled_rate = scale_rate(div, scaled_rate);
792 scaled_pre_div = divider_read_scaled(ccu, pre_div);
793 scaled_parent_rate = do_div_round_closest(scaled_rate,
796 scaled_parent_rate = scale_rate(div, parent_rate);
800 * Compute the best possible divider and ensure it is in
801 * range. A fixed divider can't be changed, so just report
802 * the best we can do.
804 if (!divider_is_fixed(div)) {
805 best_scaled_div = do_div_round_closest(scaled_parent_rate,
807 min_scaled_div = scaled_div_min(div);
808 max_scaled_div = scaled_div_max(div);
809 if (best_scaled_div > max_scaled_div)
810 best_scaled_div = max_scaled_div;
811 else if (best_scaled_div < min_scaled_div)
812 best_scaled_div = min_scaled_div;
814 best_scaled_div = divider_read_scaled(ccu, div);
817 /* OK, figure out the resulting rate */
818 result = do_div_round_closest(scaled_parent_rate, best_scaled_div);
821 *scaled_div = best_scaled_div;
826 /* Common clock parent helpers */
829 * For a given parent selector (register field) value, find the
830 * index into a selector's parent_sel array that contains it.
831 * Returns the index, or BAD_CLK_INDEX if it's not found.
833 static u8 parent_index(struct bcm_clk_sel *sel, u8 parent_sel)
837 BUG_ON(sel->parent_count > (u32)U8_MAX);
838 for (i = 0; i < sel->parent_count; i++)
839 if (sel->parent_sel[i] == parent_sel)
841 return BAD_CLK_INDEX;
845 * Fetch the current value of the selector, and translate that into
846 * its corresponding index in the parent array we registered with
847 * the clock framework.
849 * Returns parent array index that corresponds with the value found,
850 * or BAD_CLK_INDEX if the found value is out of range.
852 static u8 selector_read_index(struct ccu_data *ccu, struct bcm_clk_sel *sel)
859 /* If there's no selector, there's only one parent */
860 if (!selector_exists(sel))
863 /* Get the value in the selector register */
864 flags = ccu_lock(ccu);
865 reg_val = __ccu_read(ccu, sel->offset);
866 ccu_unlock(ccu, flags);
868 parent_sel = bitfield_extract(reg_val, sel->shift, sel->width);
870 /* Look up that selector's parent array index and return it */
871 index = parent_index(sel, parent_sel);
872 if (index == BAD_CLK_INDEX)
873 pr_err("%s: out-of-range parent selector %u (%s 0x%04x)\n",
874 __func__, parent_sel, ccu->name, sel->offset);
880 * Commit our desired selector value to the hardware.
882 * Returns 0 on success. Returns -EINVAL for invalid arguments.
883 * Returns -ENXIO if gating failed, and -EIO if a trigger failed.
886 __sel_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate,
887 struct bcm_clk_sel *sel, struct bcm_clk_trig *trig)
894 BUG_ON(!selector_exists(sel));
897 * If we're just initializing the selector, and no initial
898 * state was defined in the device tree, we just find out
899 * what its current value is rather than updating it.
901 if (sel->clk_index == BAD_CLK_INDEX) {
904 reg_val = __ccu_read(ccu, sel->offset);
905 parent_sel = bitfield_extract(reg_val, sel->shift, sel->width);
906 index = parent_index(sel, parent_sel);
907 if (index == BAD_CLK_INDEX)
909 sel->clk_index = index;
914 BUG_ON((u32)sel->clk_index >= sel->parent_count);
915 parent_sel = sel->parent_sel[sel->clk_index];
917 /* Clock needs to be enabled before changing the parent */
918 enabled = __is_clk_gate_enabled(ccu, gate);
919 if (!enabled && !__clk_gate(ccu, gate, true))
922 /* Replace the selector value and record the result */
923 reg_val = __ccu_read(ccu, sel->offset);
924 reg_val = bitfield_replace(reg_val, sel->shift, sel->width, parent_sel);
925 __ccu_write(ccu, sel->offset, reg_val);
927 /* If the trigger fails we still want to disable the gate */
928 if (!__clk_trigger(ccu, trig))
931 /* Disable the clock again if it was disabled to begin with */
932 if (!enabled && !__clk_gate(ccu, gate, false))
933 ret = ret ? ret : -ENXIO; /* return first error */
939 * Initialize a selector by committing our desired state to hardware
940 * without the usual checks to see if it's already set up that way.
941 * Returns true if successful, false otherwise.
943 static bool sel_init(struct ccu_data *ccu, struct bcm_clk_gate *gate,
944 struct bcm_clk_sel *sel, struct bcm_clk_trig *trig)
946 if (!selector_exists(sel))
948 return !__sel_commit(ccu, gate, sel, trig);
952 * Write a new value into a selector register to switch to a
953 * different parent clock. Returns 0 on success, or an error code
954 * (from __sel_commit()) otherwise.
956 static int selector_write(struct ccu_data *ccu, struct bcm_clk_gate *gate,
957 struct bcm_clk_sel *sel, struct bcm_clk_trig *trig,
964 previous = sel->clk_index;
965 if (previous == index)
966 return 0; /* No change */
968 sel->clk_index = index;
970 flags = ccu_lock(ccu);
971 __ccu_write_enable(ccu);
973 ret = __sel_commit(ccu, gate, sel, trig);
975 __ccu_write_disable(ccu);
976 ccu_unlock(ccu, flags);
979 sel->clk_index = previous; /* Revert the change */
984 /* Clock operations */
986 static int kona_peri_clk_enable(struct clk_hw *hw)
988 struct kona_clk *bcm_clk = to_kona_clk(hw);
989 struct bcm_clk_gate *gate = &bcm_clk->u.peri->gate;
991 return clk_gate(bcm_clk->ccu, bcm_clk->init_data.name, gate, true);
994 static void kona_peri_clk_disable(struct clk_hw *hw)
996 struct kona_clk *bcm_clk = to_kona_clk(hw);
997 struct bcm_clk_gate *gate = &bcm_clk->u.peri->gate;
999 (void)clk_gate(bcm_clk->ccu, bcm_clk->init_data.name, gate, false);
1002 static int kona_peri_clk_is_enabled(struct clk_hw *hw)
1004 struct kona_clk *bcm_clk = to_kona_clk(hw);
1005 struct bcm_clk_gate *gate = &bcm_clk->u.peri->gate;
1007 return is_clk_gate_enabled(bcm_clk->ccu, gate) ? 1 : 0;
1010 static unsigned long kona_peri_clk_recalc_rate(struct clk_hw *hw,
1011 unsigned long parent_rate)
1013 struct kona_clk *bcm_clk = to_kona_clk(hw);
1014 struct peri_clk_data *data = bcm_clk->u.peri;
1016 return clk_recalc_rate(bcm_clk->ccu, &data->div, &data->pre_div,
1020 static long kona_peri_clk_round_rate(struct clk_hw *hw, unsigned long rate,
1021 unsigned long *parent_rate)
1023 struct kona_clk *bcm_clk = to_kona_clk(hw);
1024 struct bcm_clk_div *div = &bcm_clk->u.peri->div;
1026 if (!divider_exists(div))
1027 return __clk_get_rate(hw->clk);
1029 /* Quietly avoid a zero rate */
1030 return round_rate(bcm_clk->ccu, div, &bcm_clk->u.peri->pre_div,
1031 rate ? rate : 1, *parent_rate, NULL);
1034 static long kona_peri_clk_determine_rate(struct clk_hw *hw, unsigned long rate,
1035 unsigned long min_rate,
1036 unsigned long max_rate,
1037 unsigned long *best_parent_rate, struct clk_hw **best_parent)
1039 struct kona_clk *bcm_clk = to_kona_clk(hw);
1040 struct clk *clk = hw->clk;
1041 struct clk *current_parent;
1042 unsigned long parent_rate;
1043 unsigned long best_delta;
1044 unsigned long best_rate;
1049 * If there is no other parent to choose, use the current one.
1050 * Note: We don't honor (or use) CLK_SET_RATE_NO_REPARENT.
1052 WARN_ON_ONCE(bcm_clk->init_data.flags & CLK_SET_RATE_NO_REPARENT);
1053 parent_count = (u32)bcm_clk->init_data.num_parents;
1054 if (parent_count < 2)
1055 return kona_peri_clk_round_rate(hw, rate, best_parent_rate);
1057 /* Unless we can do better, stick with current parent */
1058 current_parent = clk_get_parent(clk);
1059 parent_rate = __clk_get_rate(current_parent);
1060 best_rate = kona_peri_clk_round_rate(hw, rate, &parent_rate);
1061 best_delta = abs(best_rate - rate);
1063 /* Check whether any other parent clock can produce a better result */
1064 for (which = 0; which < parent_count; which++) {
1065 struct clk *parent = clk_get_parent_by_index(clk, which);
1066 unsigned long delta;
1067 unsigned long other_rate;
1070 if (parent == current_parent)
1073 /* We don't support CLK_SET_RATE_PARENT */
1074 parent_rate = __clk_get_rate(parent);
1075 other_rate = kona_peri_clk_round_rate(hw, rate, &parent_rate);
1076 delta = abs(other_rate - rate);
1077 if (delta < best_delta) {
1079 best_rate = other_rate;
1080 *best_parent = __clk_get_hw(parent);
1081 *best_parent_rate = parent_rate;
1088 static int kona_peri_clk_set_parent(struct clk_hw *hw, u8 index)
1090 struct kona_clk *bcm_clk = to_kona_clk(hw);
1091 struct peri_clk_data *data = bcm_clk->u.peri;
1092 struct bcm_clk_sel *sel = &data->sel;
1093 struct bcm_clk_trig *trig;
1096 BUG_ON(index >= sel->parent_count);
1098 /* If there's only one parent we don't require a selector */
1099 if (!selector_exists(sel))
1103 * The regular trigger is used by default, but if there's a
1104 * pre-trigger we want to use that instead.
1106 trig = trigger_exists(&data->pre_trig) ? &data->pre_trig
1109 ret = selector_write(bcm_clk->ccu, &data->gate, sel, trig, index);
1110 if (ret == -ENXIO) {
1111 pr_err("%s: gating failure for %s\n", __func__,
1112 bcm_clk->init_data.name);
1113 ret = -EIO; /* Don't proliferate weird errors */
1114 } else if (ret == -EIO) {
1115 pr_err("%s: %strigger failed for %s\n", __func__,
1116 trig == &data->pre_trig ? "pre-" : "",
1117 bcm_clk->init_data.name);
1123 static u8 kona_peri_clk_get_parent(struct clk_hw *hw)
1125 struct kona_clk *bcm_clk = to_kona_clk(hw);
1126 struct peri_clk_data *data = bcm_clk->u.peri;
1129 index = selector_read_index(bcm_clk->ccu, &data->sel);
1131 /* Not all callers would handle an out-of-range value gracefully */
1132 return index == BAD_CLK_INDEX ? 0 : index;
1135 static int kona_peri_clk_set_rate(struct clk_hw *hw, unsigned long rate,
1136 unsigned long parent_rate)
1138 struct kona_clk *bcm_clk = to_kona_clk(hw);
1139 struct peri_clk_data *data = bcm_clk->u.peri;
1140 struct bcm_clk_div *div = &data->div;
1144 if (parent_rate > (unsigned long)LONG_MAX)
1147 if (rate == __clk_get_rate(hw->clk))
1150 if (!divider_exists(div))
1151 return rate == parent_rate ? 0 : -EINVAL;
1154 * A fixed divider can't be changed. (Nor can a fixed
1155 * pre-divider be, but for now we never actually try to
1156 * change that.) Tolerate a request for a no-op change.
1158 if (divider_is_fixed(&data->div))
1159 return rate == parent_rate ? 0 : -EINVAL;
1162 * Get the scaled divisor value needed to achieve a clock
1163 * rate as close as possible to what was requested, given
1164 * the parent clock rate supplied.
1166 (void)round_rate(bcm_clk->ccu, div, &data->pre_div,
1167 rate ? rate : 1, parent_rate, &scaled_div);
1170 * We aren't updating any pre-divider at this point, so
1171 * we'll use the regular trigger.
1173 ret = divider_write(bcm_clk->ccu, &data->gate, &data->div,
1174 &data->trig, scaled_div);
1175 if (ret == -ENXIO) {
1176 pr_err("%s: gating failure for %s\n", __func__,
1177 bcm_clk->init_data.name);
1178 ret = -EIO; /* Don't proliferate weird errors */
1179 } else if (ret == -EIO) {
1180 pr_err("%s: trigger failed for %s\n", __func__,
1181 bcm_clk->init_data.name);
1187 struct clk_ops kona_peri_clk_ops = {
1188 .enable = kona_peri_clk_enable,
1189 .disable = kona_peri_clk_disable,
1190 .is_enabled = kona_peri_clk_is_enabled,
1191 .recalc_rate = kona_peri_clk_recalc_rate,
1192 .determine_rate = kona_peri_clk_determine_rate,
1193 .set_parent = kona_peri_clk_set_parent,
1194 .get_parent = kona_peri_clk_get_parent,
1195 .set_rate = kona_peri_clk_set_rate,
1198 /* Put a peripheral clock into its initial state */
1199 static bool __peri_clk_init(struct kona_clk *bcm_clk)
1201 struct ccu_data *ccu = bcm_clk->ccu;
1202 struct peri_clk_data *peri = bcm_clk->u.peri;
1203 const char *name = bcm_clk->init_data.name;
1204 struct bcm_clk_trig *trig;
1206 BUG_ON(bcm_clk->type != bcm_clk_peri);
1208 if (!policy_init(ccu, &peri->policy)) {
1209 pr_err("%s: error initializing policy for %s\n",
1213 if (!gate_init(ccu, &peri->gate)) {
1214 pr_err("%s: error initializing gate for %s\n", __func__, name);
1217 if (!hyst_init(ccu, &peri->hyst)) {
1218 pr_err("%s: error initializing hyst for %s\n", __func__, name);
1221 if (!div_init(ccu, &peri->gate, &peri->div, &peri->trig)) {
1222 pr_err("%s: error initializing divider for %s\n", __func__,
1228 * For the pre-divider and selector, the pre-trigger is used
1229 * if it's present, otherwise we just use the regular trigger.
1231 trig = trigger_exists(&peri->pre_trig) ? &peri->pre_trig
1234 if (!div_init(ccu, &peri->gate, &peri->pre_div, trig)) {
1235 pr_err("%s: error initializing pre-divider for %s\n", __func__,
1240 if (!sel_init(ccu, &peri->gate, &peri->sel, trig)) {
1241 pr_err("%s: error initializing selector for %s\n", __func__,
1249 static bool __kona_clk_init(struct kona_clk *bcm_clk)
1251 switch (bcm_clk->type) {
1253 return __peri_clk_init(bcm_clk);
1260 /* Set a CCU and all its clocks into their desired initial state */
1261 bool __init kona_ccu_init(struct ccu_data *ccu)
1263 unsigned long flags;
1265 struct clk **clks = ccu->clk_data.clks;
1266 bool success = true;
1268 flags = ccu_lock(ccu);
1269 __ccu_write_enable(ccu);
1271 for (which = 0; which < ccu->clk_data.clk_num; which++) {
1272 struct kona_clk *bcm_clk;
1276 bcm_clk = to_kona_clk(__clk_get_hw(clks[which]));
1277 success &= __kona_clk_init(bcm_clk);
1280 __ccu_write_disable(ccu);
1281 ccu_unlock(ccu, flags);