4 * Exposes all configurable internal clock sources to the clk framework.
7 * - Root source, usually 12MHz supplied by an external crystal
8 * - 3 PLLs which generate multiples of root rate [AUX, CPU, AUX2]
11 * - 6 clock dividers with:
12 * * selectable source [one of the PLLs],
13 * * output divided between [2 .. 512 in steps of 2] (!Au1300)
14 * or [1 .. 256 in steps of 1] (Au1300),
15 * * can be enabled individually.
17 * - up to 6 "internal" (fixed) consumers which:
18 * * take either AUXPLL or one of the above 6 dividers as input,
19 * * divide this input by 1, 2, or 4 (and 3 on Au1300).
20 * * can be disabled separately.
23 * - sysbus clock: CPU core clock (CPUPLL) divided by 2, 3 or 4.
24 * depends on board design and should be set by bootloader, read-only.
25 * - peripheral clock: half the rate of sysbus clock, source for a lot
26 * of peripheral blocks, read-only.
27 * - memory clock: clk rate to main memory chips, depends on board
28 * design and is read-only,
29 * - lrclk: the static bus clock signal for synchronous operation.
30 * depends on board design, must be set by bootloader,
31 * but may be required to correctly configure devices attached to
32 * the static bus. The Au1000/1500/1100 manuals call it LCLK, on
33 * later models it's called RCLK.
36 #include <linux/init.h>
38 #include <linux/clk.h>
39 #include <linux/clk-provider.h>
40 #include <linux/clkdev.h>
41 #include <linux/slab.h>
42 #include <linux/spinlock.h>
43 #include <linux/types.h>
44 #include <asm/mach-au1x00/au1000.h>
46 /* Base clock: 12MHz is the default in all databooks, and I haven't
47 * found any board yet which uses a different rate.
49 #define ALCHEMY_ROOTCLK_RATE 12000000
52 * the internal sources which can be driven by the PLLs and dividers.
53 * Names taken from the databooks, refer to them for more information,
54 * especially which ones are share a clock line.
56 static const char * const alchemy_au1300_intclknames[] = {
57 "lcd_intclk", "gpemgp_clk", "maempe_clk", "maebsa_clk",
61 static const char * const alchemy_au1200_intclknames[] = {
62 "lcd_intclk", NULL, NULL, NULL, "EXTCLK0", "EXTCLK1"
65 static const char * const alchemy_au1550_intclknames[] = {
66 "usb_clk", "psc0_intclk", "psc1_intclk", "pci_clko",
70 static const char * const alchemy_au1100_intclknames[] = {
71 "usb_clk", "lcd_intclk", NULL, "i2s_clk", "EXTCLK0", "EXTCLK1"
74 static const char * const alchemy_au1500_intclknames[] = {
75 NULL, "usbd_clk", "usbh_clk", "pci_clko", "EXTCLK0", "EXTCLK1"
78 static const char * const alchemy_au1000_intclknames[] = {
79 "irda_clk", "usbd_clk", "usbh_clk", "i2s_clk", "EXTCLK0",
83 /* aliases for a few on-chip sources which are either shared
84 * or have gone through name changes.
86 static struct clk_aliastable {
90 } alchemy_clk_aliases[] __initdata = {
91 { "usbh_clk", "usb_clk", ALCHEMY_CPU_AU1100 },
92 { "usbd_clk", "usb_clk", ALCHEMY_CPU_AU1100 },
93 { "irda_clk", "usb_clk", ALCHEMY_CPU_AU1100 },
94 { "usbh_clk", "usb_clk", ALCHEMY_CPU_AU1550 },
95 { "usbd_clk", "usb_clk", ALCHEMY_CPU_AU1550 },
96 { "psc2_intclk", "usb_clk", ALCHEMY_CPU_AU1550 },
97 { "psc3_intclk", "EXTCLK0", ALCHEMY_CPU_AU1550 },
98 { "psc0_intclk", "EXTCLK0", ALCHEMY_CPU_AU1200 },
99 { "psc1_intclk", "EXTCLK1", ALCHEMY_CPU_AU1200 },
100 { "psc0_intclk", "EXTCLK0", ALCHEMY_CPU_AU1300 },
101 { "psc2_intclk", "EXTCLK0", ALCHEMY_CPU_AU1300 },
102 { "psc1_intclk", "EXTCLK1", ALCHEMY_CPU_AU1300 },
103 { "psc3_intclk", "EXTCLK1", ALCHEMY_CPU_AU1300 },
108 #define IOMEM(x) ((void __iomem *)(KSEG1ADDR(CPHYSADDR(x))))
110 /* access locks to SYS_FREQCTRL0/1 and SYS_CLKSRC registers */
111 static spinlock_t alchemy_clk_fg0_lock;
112 static spinlock_t alchemy_clk_fg1_lock;
113 static spinlock_t alchemy_clk_csrc_lock;
115 /* CPU Core clock *****************************************************/
117 static unsigned long alchemy_clk_cpu_recalc(struct clk_hw *hw,
118 unsigned long parent_rate)
123 * On early Au1000, sys_cpupll was write-only. Since these
124 * silicon versions of Au1000 are not sold, we don't bend
125 * over backwards trying to determine the frequency.
127 if (unlikely(au1xxx_cpu_has_pll_wo()))
130 t = alchemy_rdsys(AU1000_SYS_CPUPLL) & 0x7f;
131 if (alchemy_get_cputype() < ALCHEMY_CPU_AU1300)
139 void __init alchemy_set_lpj(void)
141 preset_lpj = alchemy_clk_cpu_recalc(NULL, ALCHEMY_ROOTCLK_RATE);
142 preset_lpj /= 2 * HZ;
145 static struct clk_ops alchemy_clkops_cpu = {
146 .recalc_rate = alchemy_clk_cpu_recalc,
149 static struct clk __init *alchemy_clk_setup_cpu(const char *parent_name,
152 struct clk_init_data id;
155 h = kzalloc(sizeof(*h), GFP_KERNEL);
157 return ERR_PTR(-ENOMEM);
159 id.name = ALCHEMY_CPU_CLK;
160 id.parent_names = &parent_name;
162 id.flags = CLK_IS_BASIC;
163 id.ops = &alchemy_clkops_cpu;
166 return clk_register(NULL, h);
169 /* AUXPLLs ************************************************************/
171 struct alchemy_auxpll_clk {
173 unsigned long reg; /* au1300 has also AUXPLL2 */
174 int maxmult; /* max multiplier */
176 #define to_auxpll_clk(x) container_of(x, struct alchemy_auxpll_clk, hw)
178 static unsigned long alchemy_clk_aux_recalc(struct clk_hw *hw,
179 unsigned long parent_rate)
181 struct alchemy_auxpll_clk *a = to_auxpll_clk(hw);
183 return (alchemy_rdsys(a->reg) & 0xff) * parent_rate;
186 static int alchemy_clk_aux_setr(struct clk_hw *hw,
188 unsigned long parent_rate)
190 struct alchemy_auxpll_clk *a = to_auxpll_clk(hw);
191 unsigned long d = rate;
198 /* minimum is 84MHz, max is 756-1032 depending on variant */
199 if (((d < 7) && (d != 0)) || (d > a->maxmult))
202 alchemy_wrsys(d, a->reg);
206 static long alchemy_clk_aux_roundr(struct clk_hw *hw,
208 unsigned long *parent_rate)
210 struct alchemy_auxpll_clk *a = to_auxpll_clk(hw);
213 if (!rate || !*parent_rate)
216 mult = rate / (*parent_rate);
218 if (mult && (mult < 7))
220 if (mult > a->maxmult)
223 return (*parent_rate) * mult;
226 static struct clk_ops alchemy_clkops_aux = {
227 .recalc_rate = alchemy_clk_aux_recalc,
228 .set_rate = alchemy_clk_aux_setr,
229 .round_rate = alchemy_clk_aux_roundr,
232 static struct clk __init *alchemy_clk_setup_aux(const char *parent_name,
233 char *name, int maxmult,
236 struct clk_init_data id;
238 struct alchemy_auxpll_clk *a;
240 a = kzalloc(sizeof(*a), GFP_KERNEL);
242 return ERR_PTR(-ENOMEM);
245 id.parent_names = &parent_name;
247 id.flags = CLK_GET_RATE_NOCACHE;
248 id.ops = &alchemy_clkops_aux;
251 a->maxmult = maxmult;
254 c = clk_register(NULL, &a->hw);
256 clk_register_clkdev(c, name, NULL);
263 /* sysbus_clk *********************************************************/
265 static struct clk __init *alchemy_clk_setup_sysbus(const char *pn)
267 unsigned long v = (alchemy_rdsys(AU1000_SYS_POWERCTRL) & 3) + 2;
270 c = clk_register_fixed_factor(NULL, ALCHEMY_SYSBUS_CLK,
273 clk_register_clkdev(c, ALCHEMY_SYSBUS_CLK, NULL);
277 /* Peripheral Clock ***************************************************/
279 static struct clk __init *alchemy_clk_setup_periph(const char *pn)
281 /* Peripheral clock runs at half the rate of sysbus clk */
284 c = clk_register_fixed_factor(NULL, ALCHEMY_PERIPH_CLK,
287 clk_register_clkdev(c, ALCHEMY_PERIPH_CLK, NULL);
291 /* mem clock **********************************************************/
293 static struct clk __init *alchemy_clk_setup_mem(const char *pn, int ct)
295 void __iomem *addr = IOMEM(AU1000_MEM_PHYS_ADDR);
301 case ALCHEMY_CPU_AU1550:
302 case ALCHEMY_CPU_AU1200:
303 v = __raw_readl(addr + AU1550_MEM_SDCONFIGB);
304 div = (v & (1 << 15)) ? 1 : 2;
306 case ALCHEMY_CPU_AU1300:
307 v = __raw_readl(addr + AU1550_MEM_SDCONFIGB);
308 div = (v & (1 << 31)) ? 1 : 2;
310 case ALCHEMY_CPU_AU1000:
311 case ALCHEMY_CPU_AU1500:
312 case ALCHEMY_CPU_AU1100:
318 c = clk_register_fixed_factor(NULL, ALCHEMY_MEM_CLK, pn,
321 clk_register_clkdev(c, ALCHEMY_MEM_CLK, NULL);
325 /* lrclk: external synchronous static bus clock ***********************/
327 static struct clk __init *alchemy_clk_setup_lrclk(const char *pn, int t)
329 /* Au1000, Au1500: MEM_STCFG0[11]: If bit is set, lrclk=pclk/5,
330 * otherwise lrclk=pclk/4.
331 * All other variants: MEM_STCFG0[15:13] = divisor.
332 * L/RCLK = periph_clk / (divisor + 1)
333 * On Au1000, Au1500, Au1100 it's called LCLK,
334 * on later models it's called RCLK, but it's the same thing.
337 unsigned long v = alchemy_rdsmem(AU1000_MEM_STCFG0);
340 case ALCHEMY_CPU_AU1000:
341 case ALCHEMY_CPU_AU1500:
342 v = 4 + ((v >> 11) & 1);
344 default: /* all other models */
345 v = ((v >> 13) & 7) + 1;
347 c = clk_register_fixed_factor(NULL, ALCHEMY_LR_CLK,
350 clk_register_clkdev(c, ALCHEMY_LR_CLK, NULL);
354 /* Clock dividers and muxes *******************************************/
356 /* data for fgen and csrc mux-dividers */
357 struct alchemy_fgcs_clk {
359 spinlock_t *reglock; /* register lock */
360 unsigned long reg; /* SYS_FREQCTRL0/1 */
361 int shift; /* offset in register */
362 int parent; /* parent before disable [Au1300] */
363 int isen; /* is it enabled? */
364 int *dt; /* dividertable for csrc */
366 #define to_fgcs_clk(x) container_of(x, struct alchemy_fgcs_clk, hw)
368 static long alchemy_calc_div(unsigned long rate, unsigned long prate,
369 int scale, int maxdiv, unsigned long *rv)
374 if ((prate / div1) > rate)
377 if (scale == 2) { /* only div-by-multiple-of-2 possible */
379 div1++; /* stay <=prate */
382 div2 = (div1 / scale) - 1; /* value to write to register */
389 div1 = ((div2 + 1) * scale);
393 static int alchemy_clk_fgcs_detr(struct clk_hw *hw,
394 struct clk_rate_request *req,
395 int scale, int maxdiv)
397 struct clk_hw *pc, *bpc, *free;
398 long tdv, tpr, pr, nr, br, bpr, diff, lastdiff;
407 /* look at the rates each enabled parent supplies and select
408 * the one that gets closest to but not over the requested rate.
410 for (j = 0; j < 7; j++) {
411 pc = clk_hw_get_parent_by_index(hw, j);
415 /* if this parent is currently unused, remember it.
416 * XXX: we would actually want clk_has_active_children()
417 * but this is a good-enough approximation for now.
419 if (!clk_hw_is_prepared(pc)) {
424 pr = clk_hw_get_rate(pc);
428 /* what can hardware actually provide */
429 tdv = alchemy_calc_div(req->rate, pr, scale, maxdiv, NULL);
431 diff = req->rate - nr;
435 if (diff < lastdiff) {
445 /* if we couldn't get the exact rate we wanted from the enabled
446 * parents, maybe we can tell an available disabled/inactive one
447 * to give us a rate we can divide down to the requested rate.
449 if (lastdiff && free) {
450 for (j = (maxdiv == 4) ? 1 : scale; j <= maxdiv; j += scale) {
454 pr = clk_hw_round_rate(free, tpr);
456 tdv = alchemy_calc_div(req->rate, pr, scale, maxdiv,
459 diff = req->rate - nr;
462 if (diff < lastdiff) {
476 req->best_parent_rate = bpr;
477 req->best_parent_hw = bpc;
483 static int alchemy_clk_fgv1_en(struct clk_hw *hw)
485 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
486 unsigned long v, flags;
488 spin_lock_irqsave(c->reglock, flags);
489 v = alchemy_rdsys(c->reg);
490 v |= (1 << 1) << c->shift;
491 alchemy_wrsys(v, c->reg);
492 spin_unlock_irqrestore(c->reglock, flags);
497 static int alchemy_clk_fgv1_isen(struct clk_hw *hw)
499 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
500 unsigned long v = alchemy_rdsys(c->reg) >> (c->shift + 1);
505 static void alchemy_clk_fgv1_dis(struct clk_hw *hw)
507 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
508 unsigned long v, flags;
510 spin_lock_irqsave(c->reglock, flags);
511 v = alchemy_rdsys(c->reg);
512 v &= ~((1 << 1) << c->shift);
513 alchemy_wrsys(v, c->reg);
514 spin_unlock_irqrestore(c->reglock, flags);
517 static int alchemy_clk_fgv1_setp(struct clk_hw *hw, u8 index)
519 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
520 unsigned long v, flags;
522 spin_lock_irqsave(c->reglock, flags);
523 v = alchemy_rdsys(c->reg);
525 v |= (1 << c->shift);
527 v &= ~(1 << c->shift);
528 alchemy_wrsys(v, c->reg);
529 spin_unlock_irqrestore(c->reglock, flags);
534 static u8 alchemy_clk_fgv1_getp(struct clk_hw *hw)
536 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
538 return (alchemy_rdsys(c->reg) >> c->shift) & 1;
541 static int alchemy_clk_fgv1_setr(struct clk_hw *hw, unsigned long rate,
542 unsigned long parent_rate)
544 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
545 unsigned long div, v, flags, ret;
546 int sh = c->shift + 2;
548 if (!rate || !parent_rate || rate > (parent_rate / 2))
550 ret = alchemy_calc_div(rate, parent_rate, 2, 512, &div);
551 spin_lock_irqsave(c->reglock, flags);
552 v = alchemy_rdsys(c->reg);
555 alchemy_wrsys(v, c->reg);
556 spin_unlock_irqrestore(c->reglock, flags);
561 static unsigned long alchemy_clk_fgv1_recalc(struct clk_hw *hw,
562 unsigned long parent_rate)
564 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
565 unsigned long v = alchemy_rdsys(c->reg) >> (c->shift + 2);
567 v = ((v & 0xff) + 1) * 2;
568 return parent_rate / v;
571 static int alchemy_clk_fgv1_detr(struct clk_hw *hw,
572 struct clk_rate_request *req)
574 return alchemy_clk_fgcs_detr(hw, req, 2, 512);
577 /* Au1000, Au1100, Au15x0, Au12x0 */
578 static struct clk_ops alchemy_clkops_fgenv1 = {
579 .recalc_rate = alchemy_clk_fgv1_recalc,
580 .determine_rate = alchemy_clk_fgv1_detr,
581 .set_rate = alchemy_clk_fgv1_setr,
582 .set_parent = alchemy_clk_fgv1_setp,
583 .get_parent = alchemy_clk_fgv1_getp,
584 .enable = alchemy_clk_fgv1_en,
585 .disable = alchemy_clk_fgv1_dis,
586 .is_enabled = alchemy_clk_fgv1_isen,
589 static void __alchemy_clk_fgv2_en(struct alchemy_fgcs_clk *c)
591 unsigned long v = alchemy_rdsys(c->reg);
593 v &= ~(3 << c->shift);
594 v |= (c->parent & 3) << c->shift;
595 alchemy_wrsys(v, c->reg);
599 static int alchemy_clk_fgv2_en(struct clk_hw *hw)
601 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
604 /* enable by setting the previous parent clock */
605 spin_lock_irqsave(c->reglock, flags);
606 __alchemy_clk_fgv2_en(c);
607 spin_unlock_irqrestore(c->reglock, flags);
612 static int alchemy_clk_fgv2_isen(struct clk_hw *hw)
614 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
616 return ((alchemy_rdsys(c->reg) >> c->shift) & 3) != 0;
619 static void alchemy_clk_fgv2_dis(struct clk_hw *hw)
621 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
622 unsigned long v, flags;
624 spin_lock_irqsave(c->reglock, flags);
625 v = alchemy_rdsys(c->reg);
626 v &= ~(3 << c->shift); /* set input mux to "disabled" state */
627 alchemy_wrsys(v, c->reg);
629 spin_unlock_irqrestore(c->reglock, flags);
632 static int alchemy_clk_fgv2_setp(struct clk_hw *hw, u8 index)
634 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
637 spin_lock_irqsave(c->reglock, flags);
638 c->parent = index + 1; /* value to write to register */
640 __alchemy_clk_fgv2_en(c);
641 spin_unlock_irqrestore(c->reglock, flags);
646 static u8 alchemy_clk_fgv2_getp(struct clk_hw *hw)
648 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
649 unsigned long flags, v;
651 spin_lock_irqsave(c->reglock, flags);
653 spin_unlock_irqrestore(c->reglock, flags);
657 /* fg0-2 and fg4-6 share a "scale"-bit. With this bit cleared, the
658 * dividers behave exactly as on previous models (dividers are multiples
659 * of 2); with the bit set, dividers are multiples of 1, halving their
660 * range, but making them also much more flexible.
662 static int alchemy_clk_fgv2_setr(struct clk_hw *hw, unsigned long rate,
663 unsigned long parent_rate)
665 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
666 int sh = c->shift + 2;
667 unsigned long div, v, flags, ret;
669 if (!rate || !parent_rate || rate > parent_rate)
672 v = alchemy_rdsys(c->reg) & (1 << 30); /* test "scale" bit */
673 ret = alchemy_calc_div(rate, parent_rate, v ? 1 : 2,
674 v ? 256 : 512, &div);
676 spin_lock_irqsave(c->reglock, flags);
677 v = alchemy_rdsys(c->reg);
679 v |= (div & 0xff) << sh;
680 alchemy_wrsys(v, c->reg);
681 spin_unlock_irqrestore(c->reglock, flags);
686 static unsigned long alchemy_clk_fgv2_recalc(struct clk_hw *hw,
687 unsigned long parent_rate)
689 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
690 int sh = c->shift + 2;
693 v = alchemy_rdsys(c->reg);
694 t = parent_rate / (((v >> sh) & 0xff) + 1);
695 if ((v & (1 << 30)) == 0) /* test scale bit */
701 static int alchemy_clk_fgv2_detr(struct clk_hw *hw,
702 struct clk_rate_request *req)
704 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
707 if (alchemy_rdsys(c->reg) & (1 << 30)) {
715 return alchemy_clk_fgcs_detr(hw, req, scale, maxdiv);
718 /* Au1300 larger input mux, no separate disable bit, flexible divider */
719 static struct clk_ops alchemy_clkops_fgenv2 = {
720 .recalc_rate = alchemy_clk_fgv2_recalc,
721 .determine_rate = alchemy_clk_fgv2_detr,
722 .set_rate = alchemy_clk_fgv2_setr,
723 .set_parent = alchemy_clk_fgv2_setp,
724 .get_parent = alchemy_clk_fgv2_getp,
725 .enable = alchemy_clk_fgv2_en,
726 .disable = alchemy_clk_fgv2_dis,
727 .is_enabled = alchemy_clk_fgv2_isen,
730 static const char * const alchemy_clk_fgv1_parents[] = {
731 ALCHEMY_CPU_CLK, ALCHEMY_AUXPLL_CLK
734 static const char * const alchemy_clk_fgv2_parents[] = {
735 ALCHEMY_AUXPLL2_CLK, ALCHEMY_CPU_CLK, ALCHEMY_AUXPLL_CLK
738 static const char * const alchemy_clk_fgen_names[] = {
739 ALCHEMY_FG0_CLK, ALCHEMY_FG1_CLK, ALCHEMY_FG2_CLK,
740 ALCHEMY_FG3_CLK, ALCHEMY_FG4_CLK, ALCHEMY_FG5_CLK };
742 static int __init alchemy_clk_init_fgens(int ctype)
745 struct clk_init_data id;
746 struct alchemy_fgcs_clk *a;
751 case ALCHEMY_CPU_AU1000...ALCHEMY_CPU_AU1200:
752 id.ops = &alchemy_clkops_fgenv1;
753 id.parent_names = alchemy_clk_fgv1_parents;
756 case ALCHEMY_CPU_AU1300:
757 id.ops = &alchemy_clkops_fgenv2;
758 id.parent_names = alchemy_clk_fgv2_parents;
764 id.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE;
766 a = kzalloc((sizeof(*a)) * 6, GFP_KERNEL);
770 spin_lock_init(&alchemy_clk_fg0_lock);
771 spin_lock_init(&alchemy_clk_fg1_lock);
773 for (i = 0; i < 6; i++) {
774 id.name = alchemy_clk_fgen_names[i];
775 a->shift = 10 * (i < 3 ? i : i - 3);
777 a->reg = AU1000_SYS_FREQCTRL1;
778 a->reglock = &alchemy_clk_fg1_lock;
780 a->reg = AU1000_SYS_FREQCTRL0;
781 a->reglock = &alchemy_clk_fg0_lock;
784 /* default to first parent if bootloader has set
785 * the mux to disabled state.
787 if (ctype == ALCHEMY_CPU_AU1300) {
788 v = alchemy_rdsys(a->reg);
789 a->parent = (v >> a->shift) & 3;
798 c = clk_register(NULL, &a->hw);
802 clk_register_clkdev(c, id.name, NULL);
809 /* internal sources muxes *********************************************/
811 static int alchemy_clk_csrc_isen(struct clk_hw *hw)
813 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
814 unsigned long v = alchemy_rdsys(c->reg);
816 return (((v >> c->shift) >> 2) & 7) != 0;
819 static void __alchemy_clk_csrc_en(struct alchemy_fgcs_clk *c)
821 unsigned long v = alchemy_rdsys(c->reg);
823 v &= ~((7 << 2) << c->shift);
824 v |= ((c->parent & 7) << 2) << c->shift;
825 alchemy_wrsys(v, c->reg);
829 static int alchemy_clk_csrc_en(struct clk_hw *hw)
831 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
834 /* enable by setting the previous parent clock */
835 spin_lock_irqsave(c->reglock, flags);
836 __alchemy_clk_csrc_en(c);
837 spin_unlock_irqrestore(c->reglock, flags);
842 static void alchemy_clk_csrc_dis(struct clk_hw *hw)
844 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
845 unsigned long v, flags;
847 spin_lock_irqsave(c->reglock, flags);
848 v = alchemy_rdsys(c->reg);
849 v &= ~((3 << 2) << c->shift); /* mux to "disabled" state */
850 alchemy_wrsys(v, c->reg);
852 spin_unlock_irqrestore(c->reglock, flags);
855 static int alchemy_clk_csrc_setp(struct clk_hw *hw, u8 index)
857 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
860 spin_lock_irqsave(c->reglock, flags);
861 c->parent = index + 1; /* value to write to register */
863 __alchemy_clk_csrc_en(c);
864 spin_unlock_irqrestore(c->reglock, flags);
869 static u8 alchemy_clk_csrc_getp(struct clk_hw *hw)
871 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
873 return c->parent - 1;
876 static unsigned long alchemy_clk_csrc_recalc(struct clk_hw *hw,
877 unsigned long parent_rate)
879 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
880 unsigned long v = (alchemy_rdsys(c->reg) >> c->shift) & 3;
882 return parent_rate / c->dt[v];
885 static int alchemy_clk_csrc_setr(struct clk_hw *hw, unsigned long rate,
886 unsigned long parent_rate)
888 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
889 unsigned long d, v, flags;
892 if (!rate || !parent_rate || rate > parent_rate)
895 d = (parent_rate + (rate / 2)) / rate;
898 if ((d == 3) && (c->dt[2] != 3))
901 for (i = 0; i < 4; i++)
906 return -EINVAL; /* oops */
908 spin_lock_irqsave(c->reglock, flags);
909 v = alchemy_rdsys(c->reg);
910 v &= ~(3 << c->shift);
911 v |= (i & 3) << c->shift;
912 alchemy_wrsys(v, c->reg);
913 spin_unlock_irqrestore(c->reglock, flags);
918 static int alchemy_clk_csrc_detr(struct clk_hw *hw,
919 struct clk_rate_request *req)
921 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
922 int scale = c->dt[2] == 3 ? 1 : 2; /* au1300 check */
924 return alchemy_clk_fgcs_detr(hw, req, scale, 4);
927 static struct clk_ops alchemy_clkops_csrc = {
928 .recalc_rate = alchemy_clk_csrc_recalc,
929 .determine_rate = alchemy_clk_csrc_detr,
930 .set_rate = alchemy_clk_csrc_setr,
931 .set_parent = alchemy_clk_csrc_setp,
932 .get_parent = alchemy_clk_csrc_getp,
933 .enable = alchemy_clk_csrc_en,
934 .disable = alchemy_clk_csrc_dis,
935 .is_enabled = alchemy_clk_csrc_isen,
938 static const char * const alchemy_clk_csrc_parents[] = {
939 /* disabled at index 0 */ ALCHEMY_AUXPLL_CLK,
940 ALCHEMY_FG0_CLK, ALCHEMY_FG1_CLK, ALCHEMY_FG2_CLK,
941 ALCHEMY_FG3_CLK, ALCHEMY_FG4_CLK, ALCHEMY_FG5_CLK
945 static int alchemy_csrc_dt1[] = { 1, 4, 1, 2 }; /* rest */
946 static int alchemy_csrc_dt2[] = { 1, 4, 3, 2 }; /* Au1300 */
948 static int __init alchemy_clk_setup_imux(int ctype)
950 struct alchemy_fgcs_clk *a;
951 const char * const *names;
952 struct clk_init_data id;
957 id.ops = &alchemy_clkops_csrc;
958 id.parent_names = alchemy_clk_csrc_parents;
960 id.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE;
962 dt = alchemy_csrc_dt1;
964 case ALCHEMY_CPU_AU1000:
965 names = alchemy_au1000_intclknames;
967 case ALCHEMY_CPU_AU1500:
968 names = alchemy_au1500_intclknames;
970 case ALCHEMY_CPU_AU1100:
971 names = alchemy_au1100_intclknames;
973 case ALCHEMY_CPU_AU1550:
974 names = alchemy_au1550_intclknames;
976 case ALCHEMY_CPU_AU1200:
977 names = alchemy_au1200_intclknames;
979 case ALCHEMY_CPU_AU1300:
980 dt = alchemy_csrc_dt2;
981 names = alchemy_au1300_intclknames;
987 a = kzalloc((sizeof(*a)) * 6, GFP_KERNEL);
991 spin_lock_init(&alchemy_clk_csrc_lock);
994 for (i = 0; i < 6; i++) {
1000 a->reg = AU1000_SYS_CLKSRC;
1001 a->reglock = &alchemy_clk_csrc_lock;
1004 /* default to first parent clock if mux is initially
1005 * set to disabled state.
1007 v = alchemy_rdsys(a->reg);
1008 a->parent = ((v >> a->shift) >> 2) & 7;
1016 c = clk_register(NULL, &a->hw);
1020 clk_register_clkdev(c, id.name, NULL);
1029 /**********************************************************************/
1038 static int __init alchemy_clk_init(void)
1040 int ctype = alchemy_get_cputype(), ret, i;
1041 struct clk_aliastable *t = alchemy_clk_aliases;
1044 /* Root of the Alchemy clock tree: external 12MHz crystal osc */
1045 c = clk_register_fixed_rate(NULL, ALCHEMY_ROOT_CLK, NULL,
1047 ALCHEMY_ROOTCLK_RATE);
1050 /* CPU core clock */
1051 c = alchemy_clk_setup_cpu(ALCHEMY_ROOT_CLK, ctype);
1054 /* AUXPLLs: max 1GHz on Au1300, 748MHz on older models */
1055 i = (ctype == ALCHEMY_CPU_AU1300) ? 84 : 63;
1056 c = alchemy_clk_setup_aux(ALCHEMY_ROOT_CLK, ALCHEMY_AUXPLL_CLK,
1057 i, AU1000_SYS_AUXPLL);
1060 if (ctype == ALCHEMY_CPU_AU1300) {
1061 c = alchemy_clk_setup_aux(ALCHEMY_ROOT_CLK,
1062 ALCHEMY_AUXPLL2_CLK, i,
1063 AU1300_SYS_AUXPLL2);
1067 /* sysbus clock: cpu core clock divided by 2, 3 or 4 */
1068 c = alchemy_clk_setup_sysbus(ALCHEMY_CPU_CLK);
1071 /* peripheral clock: runs at half rate of sysbus clk */
1072 c = alchemy_clk_setup_periph(ALCHEMY_SYSBUS_CLK);
1075 /* SDR/DDR memory clock */
1076 c = alchemy_clk_setup_mem(ALCHEMY_SYSBUS_CLK, ctype);
1079 /* L/RCLK: external static bus clock for synchronous mode */
1080 c = alchemy_clk_setup_lrclk(ALCHEMY_PERIPH_CLK, ctype);
1083 /* Frequency dividers 0-5 */
1084 ret = alchemy_clk_init_fgens(ctype);
1090 /* diving muxes for internal sources */
1091 ret = alchemy_clk_setup_imux(ctype);
1097 /* set up aliases drivers might look for */
1099 if (t->cputype == ctype)
1100 clk_add_alias(t->alias, NULL, t->base, NULL);
1104 pr_info("Alchemy clocktree installed\n");
1110 postcore_initcall(alchemy_clk_init);