2 * Set up the interrupt priorities
4 * Copyright 2004-2009 Analog Devices Inc.
5 * 2003 Bas Vermeulen <bas@buyways.nl>
6 * 2002 Arcturus Networks Inc. MaTed <mated@sympatico.ca>
7 * 2000-2001 Lineo, Inc. D. Jefff Dionne <jeff@lineo.ca>
8 * 1999 D. Jeff Dionne <jeff@uclinux.org>
11 * Licensed under the GPL-2
14 #include <linux/module.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/seq_file.h>
17 #include <linux/irq.h>
18 #include <linux/sched.h>
19 #include <linux/syscore_ops.h>
20 #include <asm/delay.h>
22 #include <linux/ipipe.h>
24 #include <asm/traps.h>
25 #include <asm/blackfin.h>
27 #include <asm/irq_handler.h>
31 # define SIC_SYSIRQ(irq) (irq - (IRQ_CORETMR + 1))
33 # define SIC_SYSIRQ(irq) ((irq) - IVG15)
38 * - we have separated the physical Hardware interrupt from the
39 * levels that the LINUX kernel sees (see the description in irq.h)
44 /* Initialize this to an actual value to force it into the .data
45 * section so that we know it is properly initialized at entry into
46 * the kernel but before bss is initialized to zero (which is where
47 * it would live otherwise). The 0x1f magic represents the IRQs we
48 * cannot actually mask out in hardware.
50 unsigned long bfin_irq_flags = 0x1f;
51 EXPORT_SYMBOL(bfin_irq_flags);
55 unsigned long bfin_sic_iwr[3]; /* Up to 3 SIC_IWRx registers */
61 /* irq number for request_irq, available in mach-bf5xx/irq.h */
63 /* corresponding bit in the SIC_ISR register */
65 } ivg_table[NR_PERI_INTS];
67 static struct ivg_slice {
68 /* position of first irq in ivg_table for given ivg */
71 } ivg7_13[IVG13 - IVG7 + 1];
75 * Search SIC_IAR and fill tables with the irqvalues
76 * and their positions in the SIC_ISR register.
78 static void __init search_IAR(void)
80 unsigned ivg, irq_pos = 0;
81 for (ivg = 0; ivg <= IVG13 - IVG7; ivg++) {
84 ivg7_13[ivg].istop = ivg7_13[ivg].ifirst = &ivg_table[irq_pos];
86 for (irqN = 0; irqN < NR_PERI_INTS; irqN += 4) {
89 bfin_read32((unsigned long *)SIC_IAR0 +
90 #if defined(CONFIG_BF51x) || defined(CONFIG_BF52x) || \
91 defined(CONFIG_BF538) || defined(CONFIG_BF539)
92 ((irqN % 32) >> 3) + ((irqN / 32) * ((SIC_IAR4 - SIC_IAR0) / 4))
97 for (irqn = irqN; irqn < irqN + 4; ++irqn) {
98 int iar_shift = (irqn & 7) * 4;
99 if (ivg == (0xf & (iar >> iar_shift))) {
100 ivg_table[irq_pos].irqno = IVG7 + irqn;
101 ivg_table[irq_pos].isrflag = 1 << (irqn % 32);
102 ivg7_13[ivg].istop++;
112 * This is for core internal IRQs
114 void bfin_ack_noop(struct irq_data *d)
116 /* Dummy function. */
119 static void bfin_core_mask_irq(struct irq_data *d)
121 bfin_irq_flags &= ~(1 << d->irq);
122 if (!hard_irqs_disabled())
123 hard_local_irq_enable();
126 static void bfin_core_unmask_irq(struct irq_data *d)
128 bfin_irq_flags |= 1 << d->irq;
130 * If interrupts are enabled, IMASK must contain the same value
131 * as bfin_irq_flags. Make sure that invariant holds. If interrupts
132 * are currently disabled we need not do anything; one of the
133 * callers will take care of setting IMASK to the proper value
134 * when reenabling interrupts.
135 * local_irq_enable just does "STI bfin_irq_flags", so it's exactly
138 if (!hard_irqs_disabled())
139 hard_local_irq_enable();
143 void bfin_internal_mask_irq(unsigned int irq)
145 unsigned long flags = hard_local_irq_save();
148 unsigned mask_bank = SIC_SYSIRQ(irq) / 32;
149 unsigned mask_bit = SIC_SYSIRQ(irq) % 32;
150 bfin_write_SIC_IMASK(mask_bank, bfin_read_SIC_IMASK(mask_bank) &
152 # if defined(CONFIG_SMP) || defined(CONFIG_ICC)
153 bfin_write_SICB_IMASK(mask_bank, bfin_read_SICB_IMASK(mask_bank) &
157 bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() &
158 ~(1 << SIC_SYSIRQ(irq)));
159 #endif /* end of SIC_IMASK0 */
161 hard_local_irq_restore(flags);
164 static void bfin_internal_mask_irq_chip(struct irq_data *d)
166 bfin_internal_mask_irq(d->irq);
170 void bfin_internal_unmask_irq_affinity(unsigned int irq,
171 const struct cpumask *affinity)
173 void bfin_internal_unmask_irq(unsigned int irq)
176 unsigned long flags = hard_local_irq_save();
180 unsigned mask_bank = SIC_SYSIRQ(irq) / 32;
181 unsigned mask_bit = SIC_SYSIRQ(irq) % 32;
183 if (cpumask_test_cpu(0, affinity))
185 bfin_write_SIC_IMASK(mask_bank,
186 bfin_read_SIC_IMASK(mask_bank) |
189 if (cpumask_test_cpu(1, affinity))
190 bfin_write_SICB_IMASK(mask_bank,
191 bfin_read_SICB_IMASK(mask_bank) |
195 bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() |
196 (1 << SIC_SYSIRQ(irq)));
199 hard_local_irq_restore(flags);
203 static void bfin_sec_preflow_handler(struct irq_data *d)
205 unsigned long flags = hard_local_irq_save();
206 unsigned int sid = SIC_SYSIRQ(d->irq);
208 bfin_write_SEC_SCI(0, SEC_CSID, sid);
210 hard_local_irq_restore(flags);
213 static void bfin_sec_mask_ack_irq(struct irq_data *d)
215 unsigned long flags = hard_local_irq_save();
216 unsigned int sid = SIC_SYSIRQ(d->irq);
218 bfin_write_SEC_SCI(0, SEC_CSID, sid);
220 hard_local_irq_restore(flags);
223 static void bfin_sec_unmask_irq(struct irq_data *d)
225 unsigned long flags = hard_local_irq_save();
226 unsigned int sid = SIC_SYSIRQ(d->irq);
228 bfin_write32(SEC_END, sid);
230 hard_local_irq_restore(flags);
233 static void bfin_sec_enable_ssi(unsigned int sid)
235 unsigned long flags = hard_local_irq_save();
236 uint32_t reg_sctl = bfin_read_SEC_SCTL(sid);
238 reg_sctl |= SEC_SCTL_SRC_EN;
239 bfin_write_SEC_SCTL(sid, reg_sctl);
241 hard_local_irq_restore(flags);
244 static void bfin_sec_disable_ssi(unsigned int sid)
246 unsigned long flags = hard_local_irq_save();
247 uint32_t reg_sctl = bfin_read_SEC_SCTL(sid);
249 reg_sctl &= ((uint32_t)~SEC_SCTL_SRC_EN);
250 bfin_write_SEC_SCTL(sid, reg_sctl);
252 hard_local_irq_restore(flags);
255 static void bfin_sec_set_ssi_coreid(unsigned int sid, unsigned int coreid)
257 unsigned long flags = hard_local_irq_save();
258 uint32_t reg_sctl = bfin_read_SEC_SCTL(sid);
260 reg_sctl &= ((uint32_t)~SEC_SCTL_CTG);
261 bfin_write_SEC_SCTL(sid, reg_sctl | ((coreid << 20) & SEC_SCTL_CTG));
263 hard_local_irq_restore(flags);
266 static void bfin_sec_enable_sci(unsigned int sid)
268 unsigned long flags = hard_local_irq_save();
269 uint32_t reg_sctl = bfin_read_SEC_SCTL(sid);
271 if (sid == SIC_SYSIRQ(IRQ_WATCH0))
272 reg_sctl |= SEC_SCTL_FAULT_EN;
274 reg_sctl |= SEC_SCTL_INT_EN;
275 bfin_write_SEC_SCTL(sid, reg_sctl);
277 hard_local_irq_restore(flags);
280 static void bfin_sec_disable_sci(unsigned int sid)
282 unsigned long flags = hard_local_irq_save();
283 uint32_t reg_sctl = bfin_read_SEC_SCTL(sid);
285 reg_sctl &= ((uint32_t)~SEC_SCTL_INT_EN);
286 bfin_write_SEC_SCTL(sid, reg_sctl);
288 hard_local_irq_restore(flags);
291 static void bfin_sec_enable(struct irq_data *d)
293 unsigned long flags = hard_local_irq_save();
294 unsigned int sid = SIC_SYSIRQ(d->irq);
296 bfin_sec_enable_sci(sid);
297 bfin_sec_enable_ssi(sid);
299 hard_local_irq_restore(flags);
302 static void bfin_sec_disable(struct irq_data *d)
304 unsigned long flags = hard_local_irq_save();
305 unsigned int sid = SIC_SYSIRQ(d->irq);
307 bfin_sec_disable_sci(sid);
308 bfin_sec_disable_ssi(sid);
310 hard_local_irq_restore(flags);
313 static void bfin_sec_raise_irq(unsigned int sid)
315 unsigned long flags = hard_local_irq_save();
317 bfin_write32(SEC_RAISE, sid);
319 hard_local_irq_restore(flags);
322 static void init_software_driven_irq(void)
324 bfin_sec_set_ssi_coreid(34, 0);
325 bfin_sec_set_ssi_coreid(35, 1);
326 bfin_sec_set_ssi_coreid(36, 0);
327 bfin_sec_set_ssi_coreid(37, 1);
330 void bfin_sec_resume(void)
332 bfin_write_SEC_SCI(0, SEC_CCTL, SEC_CCTL_RESET);
334 bfin_write_SEC_GCTL(SEC_GCTL_EN);
335 bfin_write_SEC_SCI(0, SEC_CCTL, SEC_CCTL_EN | SEC_CCTL_NMI_EN);
338 void handle_sec_sfi_fault(uint32_t gstat)
343 void handle_sec_sci_fault(uint32_t gstat)
348 core_id = gstat & SEC_GSTAT_SCI;
349 cstat = bfin_read_SEC_SCI(core_id, SEC_CSTAT);
350 if (cstat & SEC_CSTAT_ERR) {
351 switch (cstat & SEC_CSTAT_ERRC) {
352 case SEC_CSTAT_ACKERR:
353 printk(KERN_DEBUG "sec ack err\n");
356 printk(KERN_DEBUG "sec sci unknow err\n");
362 void handle_sec_ssi_fault(uint32_t gstat)
367 sid = gstat & SEC_GSTAT_SID;
368 sstat = bfin_read_SEC_SSTAT(sid);
372 void handle_sec_fault(unsigned int irq, struct irq_desc *desc)
376 raw_spin_lock(&desc->lock);
378 sec_gstat = bfin_read32(SEC_GSTAT);
379 if (sec_gstat & SEC_GSTAT_ERR) {
381 switch (sec_gstat & SEC_GSTAT_ERRC) {
383 handle_sec_sfi_fault(sec_gstat);
385 case SEC_GSTAT_SCIERR:
386 handle_sec_sci_fault(sec_gstat);
388 case SEC_GSTAT_SSIERR:
389 handle_sec_ssi_fault(sec_gstat);
396 raw_spin_unlock(&desc->lock);
399 static int sec_suspend(void)
404 static void sec_resume(void)
406 bfin_write_SEC_SCI(0, SEC_CCTL, SEC_CCTL_RESET);
408 bfin_write_SEC_GCTL(SEC_GCTL_EN);
409 bfin_write_SEC_SCI(0, SEC_CCTL, SEC_CCTL_EN | SEC_CCTL_NMI_EN);
412 static struct syscore_ops sec_pm_syscore_ops = {
413 .suspend = sec_suspend,
414 .resume = sec_resume,
420 static void bfin_internal_unmask_irq_chip(struct irq_data *d)
422 bfin_internal_unmask_irq_affinity(d->irq, d->affinity);
425 static int bfin_internal_set_affinity(struct irq_data *d,
426 const struct cpumask *mask, bool force)
428 bfin_internal_mask_irq(d->irq);
429 bfin_internal_unmask_irq_affinity(d->irq, mask);
434 static void bfin_internal_unmask_irq_chip(struct irq_data *d)
436 bfin_internal_unmask_irq(d->irq);
441 int bfin_internal_set_wake(unsigned int irq, unsigned int state)
443 u32 bank, bit, wakeup = 0;
445 bank = SIC_SYSIRQ(irq) / 32;
446 bit = SIC_SYSIRQ(irq) % 32;
478 flags = hard_local_irq_save();
481 bfin_sic_iwr[bank] |= (1 << bit);
485 bfin_sic_iwr[bank] &= ~(1 << bit);
486 vr_wakeup &= ~wakeup;
489 hard_local_irq_restore(flags);
494 static int bfin_internal_set_wake_chip(struct irq_data *d, unsigned int state)
496 return bfin_internal_set_wake(d->irq, state);
499 # define bfin_internal_set_wake_chip NULL
502 static struct irq_chip bfin_core_irqchip = {
504 .irq_mask = bfin_core_mask_irq,
505 .irq_unmask = bfin_core_unmask_irq,
508 static struct irq_chip bfin_internal_irqchip = {
510 .irq_mask = bfin_internal_mask_irq_chip,
511 .irq_unmask = bfin_internal_unmask_irq_chip,
512 .irq_disable = bfin_internal_mask_irq_chip,
513 .irq_enable = bfin_internal_unmask_irq_chip,
515 .irq_set_affinity = bfin_internal_set_affinity,
517 .irq_set_wake = bfin_internal_set_wake_chip,
521 static struct irq_chip bfin_sec_irqchip = {
523 .irq_mask_ack = bfin_sec_mask_ack_irq,
524 .irq_mask = bfin_sec_mask_ack_irq,
525 .irq_unmask = bfin_sec_unmask_irq,
526 .irq_eoi = bfin_sec_unmask_irq,
527 .irq_disable = bfin_sec_disable,
528 .irq_enable = bfin_sec_enable,
532 void bfin_handle_irq(unsigned irq)
535 struct pt_regs regs; /* Contents not used. */
536 ipipe_trace_irq_entry(irq);
537 __ipipe_handle_irq(irq, ®s);
538 ipipe_trace_irq_exit(irq);
539 #else /* !CONFIG_IPIPE */
540 generic_handle_irq(irq);
541 #endif /* !CONFIG_IPIPE */
544 #if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
545 static int mac_stat_int_mask;
547 static void bfin_mac_status_ack_irq(unsigned int irq)
551 bfin_write_EMAC_MMC_TIRQS(
552 bfin_read_EMAC_MMC_TIRQE() &
553 bfin_read_EMAC_MMC_TIRQS());
554 bfin_write_EMAC_MMC_RIRQS(
555 bfin_read_EMAC_MMC_RIRQE() &
556 bfin_read_EMAC_MMC_RIRQS());
558 case IRQ_MAC_RXFSINT:
559 bfin_write_EMAC_RX_STKY(
560 bfin_read_EMAC_RX_IRQE() &
561 bfin_read_EMAC_RX_STKY());
563 case IRQ_MAC_TXFSINT:
564 bfin_write_EMAC_TX_STKY(
565 bfin_read_EMAC_TX_IRQE() &
566 bfin_read_EMAC_TX_STKY());
568 case IRQ_MAC_WAKEDET:
569 bfin_write_EMAC_WKUP_CTL(
570 bfin_read_EMAC_WKUP_CTL() | MPKS | RWKS);
573 /* These bits are W1C */
574 bfin_write_EMAC_SYSTAT(1L << (irq - IRQ_MAC_PHYINT));
579 static void bfin_mac_status_mask_irq(struct irq_data *d)
581 unsigned int irq = d->irq;
583 mac_stat_int_mask &= ~(1L << (irq - IRQ_MAC_PHYINT));
587 bfin_write_EMAC_SYSCTL(bfin_read_EMAC_SYSCTL() & ~PHYIE);
593 if (!mac_stat_int_mask)
594 bfin_internal_mask_irq(IRQ_MAC_ERROR);
596 bfin_mac_status_ack_irq(irq);
599 static void bfin_mac_status_unmask_irq(struct irq_data *d)
601 unsigned int irq = d->irq;
606 bfin_write_EMAC_SYSCTL(bfin_read_EMAC_SYSCTL() | PHYIE);
612 if (!mac_stat_int_mask)
613 bfin_internal_unmask_irq(IRQ_MAC_ERROR);
615 mac_stat_int_mask |= 1L << (irq - IRQ_MAC_PHYINT);
619 int bfin_mac_status_set_wake(struct irq_data *d, unsigned int state)
622 return bfin_internal_set_wake(IRQ_GENERIC_ERROR, state);
624 return bfin_internal_set_wake(IRQ_MAC_ERROR, state);
628 # define bfin_mac_status_set_wake NULL
631 static struct irq_chip bfin_mac_status_irqchip = {
633 .irq_mask = bfin_mac_status_mask_irq,
634 .irq_unmask = bfin_mac_status_unmask_irq,
635 .irq_set_wake = bfin_mac_status_set_wake,
638 void bfin_demux_mac_status_irq(unsigned int int_err_irq,
639 struct irq_desc *inta_desc)
642 u32 status = bfin_read_EMAC_SYSTAT();
644 for (i = 0; i <= (IRQ_MAC_STMDONE - IRQ_MAC_PHYINT); i++)
645 if (status & (1L << i)) {
646 irq = IRQ_MAC_PHYINT + i;
651 if (mac_stat_int_mask & (1L << (irq - IRQ_MAC_PHYINT))) {
652 bfin_handle_irq(irq);
654 bfin_mac_status_ack_irq(irq);
656 " MASKED MAC ERROR INTERRUPT ASSERTED\n",
661 "%s : %s : LINE %d :\nIRQ ?: MAC ERROR"
662 " INTERRUPT ASSERTED BUT NO SOURCE FOUND"
663 "(EMAC_SYSTAT=0x%X)\n",
664 __func__, __FILE__, __LINE__, status);
668 static inline void bfin_set_irq_handler(unsigned irq, irq_flow_handler_t handle)
671 handle = handle_level_irq;
673 __irq_set_handler_locked(irq, handle);
676 static DECLARE_BITMAP(gpio_enabled, MAX_BLACKFIN_GPIOS);
677 extern void bfin_gpio_irq_prepare(unsigned gpio);
681 static void bfin_gpio_ack_irq(struct irq_data *d)
683 /* AFAIK ack_irq in case mask_ack is provided
684 * get's only called for edge sense irqs
686 set_gpio_data(irq_to_gpio(d->irq), 0);
689 static void bfin_gpio_mask_ack_irq(struct irq_data *d)
691 unsigned int irq = d->irq;
692 u32 gpionr = irq_to_gpio(irq);
694 if (!irqd_is_level_type(d))
695 set_gpio_data(gpionr, 0);
697 set_gpio_maska(gpionr, 0);
700 static void bfin_gpio_mask_irq(struct irq_data *d)
702 set_gpio_maska(irq_to_gpio(d->irq), 0);
705 static void bfin_gpio_unmask_irq(struct irq_data *d)
707 set_gpio_maska(irq_to_gpio(d->irq), 1);
710 static unsigned int bfin_gpio_irq_startup(struct irq_data *d)
712 u32 gpionr = irq_to_gpio(d->irq);
714 if (__test_and_set_bit(gpionr, gpio_enabled))
715 bfin_gpio_irq_prepare(gpionr);
717 bfin_gpio_unmask_irq(d);
722 static void bfin_gpio_irq_shutdown(struct irq_data *d)
724 u32 gpionr = irq_to_gpio(d->irq);
726 bfin_gpio_mask_irq(d);
727 __clear_bit(gpionr, gpio_enabled);
728 bfin_gpio_irq_free(gpionr);
731 static int bfin_gpio_irq_type(struct irq_data *d, unsigned int type)
733 unsigned int irq = d->irq;
736 u32 gpionr = irq_to_gpio(irq);
738 if (type == IRQ_TYPE_PROBE) {
739 /* only probe unenabled GPIO interrupt lines */
740 if (test_bit(gpionr, gpio_enabled))
742 type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
745 if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING |
746 IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) {
748 snprintf(buf, 16, "gpio-irq%d", irq);
749 ret = bfin_gpio_irq_request(gpionr, buf);
753 if (__test_and_set_bit(gpionr, gpio_enabled))
754 bfin_gpio_irq_prepare(gpionr);
757 __clear_bit(gpionr, gpio_enabled);
761 set_gpio_inen(gpionr, 0);
762 set_gpio_dir(gpionr, 0);
764 if ((type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
765 == (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
766 set_gpio_both(gpionr, 1);
768 set_gpio_both(gpionr, 0);
770 if ((type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_LEVEL_LOW)))
771 set_gpio_polar(gpionr, 1); /* low or falling edge denoted by one */
773 set_gpio_polar(gpionr, 0); /* high or rising edge denoted by zero */
775 if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) {
776 set_gpio_edge(gpionr, 1);
777 set_gpio_inen(gpionr, 1);
778 set_gpio_data(gpionr, 0);
781 set_gpio_edge(gpionr, 0);
782 set_gpio_inen(gpionr, 1);
785 if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
786 bfin_set_irq_handler(irq, handle_edge_irq);
788 bfin_set_irq_handler(irq, handle_level_irq);
794 static int bfin_gpio_set_wake(struct irq_data *d, unsigned int state)
796 return gpio_pm_wakeup_ctrl(irq_to_gpio(d->irq), state);
799 # define bfin_gpio_set_wake NULL
802 static void bfin_demux_gpio_block(unsigned int irq)
804 unsigned int gpio, mask;
806 gpio = irq_to_gpio(irq);
807 mask = get_gpiop_data(gpio) & get_gpiop_maska(gpio);
811 bfin_handle_irq(irq);
817 void bfin_demux_gpio_irq(unsigned int inta_irq,
818 struct irq_desc *desc)
823 #if defined(BF537_FAMILY)
824 case IRQ_PF_INTA_PG_INTA:
825 bfin_demux_gpio_block(IRQ_PF0);
828 case IRQ_PH_INTA_MAC_RX:
831 #elif defined(BF533_FAMILY)
835 #elif defined(BF538_FAMILY)
839 #elif defined(CONFIG_BF52x) || defined(CONFIG_BF51x)
849 #elif defined(CONFIG_BF561)
865 bfin_demux_gpio_block(irq);
870 # ifndef CONFIG_BF60x
871 #define NR_PINT_SYS_IRQS 4
874 #define NR_PINT_SYS_IRQS 6
878 #define NR_PINT_BITS 32
879 #define IRQ_NOT_AVAIL 0xFF
881 #define PINT_2_BANK(x) ((x) >> 5)
882 #define PINT_2_BIT(x) ((x) & 0x1F)
883 #define PINT_BIT(x) (1 << (PINT_2_BIT(x)))
885 static unsigned char irq2pint_lut[NR_PINTS];
886 static unsigned char pint2irq_lut[NR_PINT_SYS_IRQS * NR_PINT_BITS];
888 static struct bfin_pint_regs * const pint[NR_PINT_SYS_IRQS] = {
889 (struct bfin_pint_regs *)PINT0_MASK_SET,
890 (struct bfin_pint_regs *)PINT1_MASK_SET,
891 (struct bfin_pint_regs *)PINT2_MASK_SET,
892 (struct bfin_pint_regs *)PINT3_MASK_SET,
894 (struct bfin_pint_regs *)PINT4_MASK_SET,
895 (struct bfin_pint_regs *)PINT5_MASK_SET,
900 inline unsigned int get_irq_base(u32 bank, u8 bmap)
902 unsigned int irq_base;
904 if (bank < 2) { /*PA-PB */
905 irq_base = IRQ_PA0 + bmap * 16;
907 irq_base = IRQ_PC0 + bmap * 16;
913 inline unsigned int get_irq_base(u32 bank, u8 bmap)
915 unsigned int irq_base;
917 irq_base = IRQ_PA0 + bank * 16 + bmap * 16;
923 /* Whenever PINTx_ASSIGN is altered init_pint_lut() must be executed! */
924 void init_pint_lut(void)
926 u16 bank, bit, irq_base, bit_pos;
930 memset(irq2pint_lut, IRQ_NOT_AVAIL, sizeof(irq2pint_lut));
932 for (bank = 0; bank < NR_PINT_SYS_IRQS; bank++) {
934 pint_assign = pint[bank]->assign;
936 for (bit = 0; bit < NR_PINT_BITS; bit++) {
938 bmap = (pint_assign >> ((bit / 8) * 8)) & 0xFF;
940 irq_base = get_irq_base(bank, bmap);
942 irq_base += (bit % 8) + ((bit / 8) & 1 ? 8 : 0);
943 bit_pos = bit + bank * NR_PINT_BITS;
945 pint2irq_lut[bit_pos] = irq_base - SYS_IRQS;
946 irq2pint_lut[irq_base - SYS_IRQS] = bit_pos;
951 static void bfin_gpio_ack_irq(struct irq_data *d)
953 u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS];
954 u32 pintbit = PINT_BIT(pint_val);
955 u32 bank = PINT_2_BANK(pint_val);
957 if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH) {
958 if (pint[bank]->invert_set & pintbit)
959 pint[bank]->invert_clear = pintbit;
961 pint[bank]->invert_set = pintbit;
963 pint[bank]->request = pintbit;
967 static void bfin_gpio_mask_ack_irq(struct irq_data *d)
969 u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS];
970 u32 pintbit = PINT_BIT(pint_val);
971 u32 bank = PINT_2_BANK(pint_val);
973 if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH) {
974 if (pint[bank]->invert_set & pintbit)
975 pint[bank]->invert_clear = pintbit;
977 pint[bank]->invert_set = pintbit;
980 pint[bank]->request = pintbit;
981 pint[bank]->mask_clear = pintbit;
984 static void bfin_gpio_mask_irq(struct irq_data *d)
986 u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS];
988 pint[PINT_2_BANK(pint_val)]->mask_clear = PINT_BIT(pint_val);
991 static void bfin_gpio_unmask_irq(struct irq_data *d)
993 u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS];
994 u32 pintbit = PINT_BIT(pint_val);
995 u32 bank = PINT_2_BANK(pint_val);
997 pint[bank]->mask_set = pintbit;
1000 static unsigned int bfin_gpio_irq_startup(struct irq_data *d)
1002 unsigned int irq = d->irq;
1003 u32 gpionr = irq_to_gpio(irq);
1004 u32 pint_val = irq2pint_lut[irq - SYS_IRQS];
1006 if (pint_val == IRQ_NOT_AVAIL) {
1008 "GPIO IRQ %d :Not in PINT Assign table "
1009 "Reconfigure Interrupt to Port Assignemt\n", irq);
1013 if (__test_and_set_bit(gpionr, gpio_enabled))
1014 bfin_gpio_irq_prepare(gpionr);
1016 bfin_gpio_unmask_irq(d);
1021 static void bfin_gpio_irq_shutdown(struct irq_data *d)
1023 u32 gpionr = irq_to_gpio(d->irq);
1025 bfin_gpio_mask_irq(d);
1026 __clear_bit(gpionr, gpio_enabled);
1027 bfin_gpio_irq_free(gpionr);
1030 static int bfin_gpio_irq_type(struct irq_data *d, unsigned int type)
1032 unsigned int irq = d->irq;
1035 u32 gpionr = irq_to_gpio(irq);
1036 u32 pint_val = irq2pint_lut[irq - SYS_IRQS];
1037 u32 pintbit = PINT_BIT(pint_val);
1038 u32 bank = PINT_2_BANK(pint_val);
1040 if (pint_val == IRQ_NOT_AVAIL)
1043 if (type == IRQ_TYPE_PROBE) {
1044 /* only probe unenabled GPIO interrupt lines */
1045 if (test_bit(gpionr, gpio_enabled))
1047 type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
1050 if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING |
1051 IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) {
1053 snprintf(buf, 16, "gpio-irq%d", irq);
1054 ret = bfin_gpio_irq_request(gpionr, buf);
1058 if (__test_and_set_bit(gpionr, gpio_enabled))
1059 bfin_gpio_irq_prepare(gpionr);
1062 __clear_bit(gpionr, gpio_enabled);
1066 if ((type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_LEVEL_LOW)))
1067 pint[bank]->invert_set = pintbit; /* low or falling edge denoted by one */
1069 pint[bank]->invert_clear = pintbit; /* high or rising edge denoted by zero */
1071 if ((type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
1072 == (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) {
1073 if (gpio_get_value(gpionr))
1074 pint[bank]->invert_set = pintbit;
1076 pint[bank]->invert_clear = pintbit;
1079 if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) {
1080 pint[bank]->edge_set = pintbit;
1081 bfin_set_irq_handler(irq, handle_edge_irq);
1083 pint[bank]->edge_clear = pintbit;
1084 bfin_set_irq_handler(irq, handle_level_irq);
1091 static int bfin_gpio_set_wake(struct irq_data *d, unsigned int state)
1094 u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS];
1095 u32 bank = PINT_2_BANK(pint_val);
1099 pint_irq = IRQ_PINT0;
1102 pint_irq = IRQ_PINT2;
1105 pint_irq = IRQ_PINT3;
1108 pint_irq = IRQ_PINT1;
1112 pint_irq = IRQ_PINT4;
1115 pint_irq = IRQ_PINT5;
1122 bfin_internal_set_wake(pint_irq, state);
1127 # define bfin_gpio_set_wake NULL
1130 void bfin_demux_gpio_irq(unsigned int inta_irq,
1131 struct irq_desc *desc)
1137 struct irq_chip *chip = irq_desc_get_chip(desc);
1139 if (chip->irq_mask_ack) {
1140 chip->irq_mask_ack(&desc->irq_data);
1142 chip->irq_mask(&desc->irq_data);
1144 chip->irq_ack(&desc->irq_data);
1172 pint_val = bank * NR_PINT_BITS;
1174 request = pint[bank]->request;
1176 level_mask = pint[bank]->edge_set & request;
1180 irq = pint2irq_lut[pint_val] + SYS_IRQS;
1181 if (level_mask & PINT_BIT(pint_val)) {
1183 chip->irq_unmask(&desc->irq_data);
1185 bfin_handle_irq(irq);
1192 chip->irq_unmask(&desc->irq_data);
1196 static struct irq_chip bfin_gpio_irqchip = {
1198 .irq_ack = bfin_gpio_ack_irq,
1199 .irq_mask = bfin_gpio_mask_irq,
1200 .irq_mask_ack = bfin_gpio_mask_ack_irq,
1201 .irq_unmask = bfin_gpio_unmask_irq,
1202 .irq_disable = bfin_gpio_mask_irq,
1203 .irq_enable = bfin_gpio_unmask_irq,
1204 .irq_set_type = bfin_gpio_irq_type,
1205 .irq_startup = bfin_gpio_irq_startup,
1206 .irq_shutdown = bfin_gpio_irq_shutdown,
1207 .irq_set_wake = bfin_gpio_set_wake,
1210 void __cpuinit init_exception_vectors(void)
1212 /* cannot program in software:
1213 * evt0 - emulation (jtag)
1216 bfin_write_EVT2(evt_nmi);
1217 bfin_write_EVT3(trap);
1218 bfin_write_EVT5(evt_ivhw);
1219 bfin_write_EVT6(evt_timer);
1220 bfin_write_EVT7(evt_evt7);
1221 bfin_write_EVT8(evt_evt8);
1222 bfin_write_EVT9(evt_evt9);
1223 bfin_write_EVT10(evt_evt10);
1224 bfin_write_EVT11(evt_evt11);
1225 bfin_write_EVT12(evt_evt12);
1226 bfin_write_EVT13(evt_evt13);
1227 bfin_write_EVT14(evt_evt14);
1228 bfin_write_EVT15(evt_system_call);
1233 * This function should be called during kernel startup to initialize
1234 * the BFin IRQ handling routines.
1237 int __init init_arch_irq(void)
1240 unsigned long ilat = 0;
1242 #ifndef CONFIG_BF60x
1243 /* Disable all the peripheral intrs - page 4-29 HW Ref manual */
1245 bfin_write_SIC_IMASK0(SIC_UNMASK_ALL);
1246 bfin_write_SIC_IMASK1(SIC_UNMASK_ALL);
1248 bfin_write_SIC_IMASK2(SIC_UNMASK_ALL);
1250 # if defined(CONFIG_SMP) || defined(CONFIG_ICC)
1251 bfin_write_SICB_IMASK0(SIC_UNMASK_ALL);
1252 bfin_write_SICB_IMASK1(SIC_UNMASK_ALL);
1255 bfin_write_SIC_IMASK(SIC_UNMASK_ALL);
1257 #else /* CONFIG_BF60x */
1258 bfin_write_SEC_GCTL(SEC_GCTL_RESET);
1261 local_irq_disable();
1264 # ifdef CONFIG_PINTx_REASSIGN
1265 pint[0]->assign = CONFIG_PINT0_ASSIGN;
1266 pint[1]->assign = CONFIG_PINT1_ASSIGN;
1267 pint[2]->assign = CONFIG_PINT2_ASSIGN;
1268 pint[3]->assign = CONFIG_PINT3_ASSIGN;
1269 # ifdef CONFIG_BF60x
1270 pint[4]->assign = CONFIG_PINT4_ASSIGN;
1271 pint[5]->assign = CONFIG_PINT5_ASSIGN;
1274 /* Whenever PINTx_ASSIGN is altered init_pint_lut() must be executed! */
1278 for (irq = 0; irq <= SYS_IRQS; irq++) {
1279 if (irq <= IRQ_CORETMR)
1280 irq_set_chip(irq, &bfin_core_irqchip);
1282 irq_set_chip(irq, &bfin_internal_irqchip);
1285 #ifndef CONFIG_BF60x
1291 #elif defined(BF537_FAMILY)
1292 case IRQ_PH_INTA_MAC_RX:
1293 case IRQ_PF_INTA_PG_INTA:
1294 #elif defined(BF533_FAMILY)
1296 #elif defined(CONFIG_BF52x) || defined(CONFIG_BF51x)
1297 case IRQ_PORTF_INTA:
1298 case IRQ_PORTG_INTA:
1299 case IRQ_PORTH_INTA:
1300 #elif defined(CONFIG_BF561)
1301 case IRQ_PROG0_INTA:
1302 case IRQ_PROG1_INTA:
1303 case IRQ_PROG2_INTA:
1304 #elif defined(BF538_FAMILY)
1305 case IRQ_PORTF_INTA:
1307 irq_set_chained_handler(irq, bfin_demux_gpio_irq);
1309 #if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
1311 irq_set_chained_handler(irq,
1312 bfin_demux_mac_status_irq);
1315 #if defined(CONFIG_SMP) || defined(CONFIG_ICC)
1318 irq_set_handler(irq, handle_percpu_irq);
1323 #ifdef CONFIG_TICKSOURCE_CORETMR
1326 irq_set_handler(irq, handle_percpu_irq);
1328 irq_set_handler(irq, handle_simple_irq);
1333 #ifdef CONFIG_TICKSOURCE_GPTMR0
1335 irq_set_handler(irq, handle_simple_irq);
1341 irq_set_handler(irq, handle_level_irq);
1343 irq_set_handler(irq, handle_simple_irq);
1351 #ifndef CONFIG_BF60x
1352 #if (defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)) && !defined(CONFIG_BF60x)
1353 for (irq = IRQ_MAC_PHYINT; irq <= IRQ_MAC_STMDONE; irq++)
1354 irq_set_chip_and_handler(irq, &bfin_mac_status_irqchip,
1357 /* if configured as edge, then will be changed to do_edge_IRQ */
1358 for (irq = GPIO_IRQ_BASE;
1359 irq < (GPIO_IRQ_BASE + MAX_BLACKFIN_GPIOS); irq++)
1360 irq_set_chip_and_handler(irq, &bfin_gpio_irqchip,
1363 for (irq = BFIN_IRQ(0); irq <= SYS_IRQS; irq++) {
1364 if (irq < CORE_IRQS) {
1365 irq_set_chip(irq, &bfin_sec_irqchip);
1366 __irq_set_handler(irq, handle_sec_fault, 0, NULL);
1367 } else if (irq >= BFIN_IRQ(21) && irq <= BFIN_IRQ(26)) {
1368 irq_set_chip(irq, &bfin_sec_irqchip);
1369 irq_set_chained_handler(irq, bfin_demux_gpio_irq);
1370 } else if (irq >= BFIN_IRQ(34) && irq <= BFIN_IRQ(37)) {
1371 irq_set_chip(irq, &bfin_sec_irqchip);
1372 irq_set_handler(irq, handle_percpu_irq);
1374 irq_set_chip_and_handler(irq, &bfin_sec_irqchip,
1375 handle_fasteoi_irq);
1376 __irq_set_preflow_handler(irq, bfin_sec_preflow_handler);
1379 for (irq = GPIO_IRQ_BASE;
1380 irq < (GPIO_IRQ_BASE + MAX_BLACKFIN_GPIOS); irq++)
1381 irq_set_chip_and_handler(irq, &bfin_gpio_irqchip,
1384 bfin_write_IMASK(0);
1386 ilat = bfin_read_ILAT();
1388 bfin_write_ILAT(ilat);
1391 printk(KERN_INFO "Configuring Blackfin Priority Driven Interrupts\n");
1392 /* IMASK=xxx is equivalent to STI xx or bfin_irq_flags=xx,
1393 * local_irq_enable()
1395 #ifndef CONFIG_BF60x
1397 /* Therefore it's better to setup IARs before interrupts enabled */
1400 /* Enable interrupts IVG7-15 */
1401 bfin_irq_flags |= IMASK_IVG15 |
1402 IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |
1403 IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW;
1405 bfin_sti(bfin_irq_flags);
1407 /* This implicitly covers ANOMALY_05000171
1408 * Boot-ROM code modifies SICA_IWRx wakeup registers
1411 bfin_write_SIC_IWR0(IWR_DISABLE_ALL);
1413 /* BF52x/BF51x system reset does not properly reset SIC_IWR1 which
1414 * will screw up the bootrom as it relies on MDMA0/1 waking it
1415 * up from IDLE instructions. See this report for more info:
1416 * http://blackfin.uclinux.org/gf/tracker/4323
1418 if (ANOMALY_05000435)
1419 bfin_write_SIC_IWR1(IWR_ENABLE(10) | IWR_ENABLE(11));
1421 bfin_write_SIC_IWR1(IWR_DISABLE_ALL);
1424 bfin_write_SIC_IWR2(IWR_DISABLE_ALL);
1427 bfin_write_SIC_IWR(IWR_DISABLE_ALL);
1429 #else /* CONFIG_BF60x */
1430 /* Enable interrupts IVG7-15 */
1431 bfin_irq_flags |= IMASK_IVG15 |
1432 IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |
1433 IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW;
1436 bfin_write_SEC_FCTL(SEC_FCTL_EN | SEC_FCTL_SYSRST_EN | SEC_FCTL_FLTIN_EN);
1437 bfin_sec_enable_sci(SIC_SYSIRQ(IRQ_WATCH0));
1438 bfin_sec_enable_ssi(SIC_SYSIRQ(IRQ_WATCH0));
1439 bfin_write_SEC_SCI(0, SEC_CCTL, SEC_CCTL_RESET);
1441 bfin_write_SEC_GCTL(SEC_GCTL_EN);
1442 bfin_write_SEC_SCI(0, SEC_CCTL, SEC_CCTL_EN | SEC_CCTL_NMI_EN);
1443 init_software_driven_irq();
1444 register_syscore_ops(&sec_pm_syscore_ops);
1449 #ifdef CONFIG_DO_IRQ_L1
1450 __attribute__((l1_text))
1452 static int vec_to_irq(int vec)
1454 #ifndef CONFIG_BF60x
1455 struct ivgx *ivg = ivg7_13[vec - IVG7].ifirst;
1456 struct ivgx *ivg_stop = ivg7_13[vec - IVG7].istop;
1457 unsigned long sic_status[3];
1459 if (likely(vec == EVT_IVTMR_P))
1461 #ifndef CONFIG_BF60x
1463 sic_status[0] = bfin_read_SIC_IMASK() & bfin_read_SIC_ISR();
1465 if (smp_processor_id()) {
1467 /* This will be optimized out in UP mode. */
1468 sic_status[0] = bfin_read_SICB_ISR0() & bfin_read_SICB_IMASK0();
1469 sic_status[1] = bfin_read_SICB_ISR1() & bfin_read_SICB_IMASK1();
1472 sic_status[0] = bfin_read_SIC_ISR0() & bfin_read_SIC_IMASK0();
1473 sic_status[1] = bfin_read_SIC_ISR1() & bfin_read_SIC_IMASK1();
1477 sic_status[2] = bfin_read_SIC_ISR2() & bfin_read_SIC_IMASK2();
1481 if (ivg >= ivg_stop)
1484 if (sic_status[0] & ivg->isrflag)
1486 if (sic_status[(ivg->irqno - IVG7) / 32] & ivg->isrflag)
1491 /* for bf60x read */
1492 return BFIN_IRQ(bfin_read_SEC_SCI(0, SEC_CSID));
1493 #endif /* end of CONFIG_BF60x */
1496 #ifdef CONFIG_DO_IRQ_L1
1497 __attribute__((l1_text))
1499 void do_irq(int vec, struct pt_regs *fp)
1501 int irq = vec_to_irq(vec);
1504 asm_do_IRQ(irq, fp);
1509 int __ipipe_get_irq_priority(unsigned irq)
1513 if (irq <= IRQ_CORETMR)
1516 for (ient = 0; ient < NR_PERI_INTS; ient++) {
1517 struct ivgx *ivg = ivg_table + ient;
1518 if (ivg->irqno == irq) {
1519 for (prio = 0; prio <= IVG13-IVG7; prio++) {
1520 if (ivg7_13[prio].ifirst <= ivg &&
1521 ivg7_13[prio].istop > ivg)
1530 /* Hw interrupts are disabled on entry (check SAVE_CONTEXT). */
1531 #ifdef CONFIG_DO_IRQ_L1
1532 __attribute__((l1_text))
1534 asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs)
1536 struct ipipe_percpu_domain_data *p = ipipe_root_cpudom_ptr();
1537 struct ipipe_domain *this_domain = __ipipe_current_domain;
1538 struct ivgx *ivg_stop = ivg7_13[vec-IVG7].istop;
1539 struct ivgx *ivg = ivg7_13[vec-IVG7].ifirst;
1542 irq = vec_to_irq(vec);
1546 if (irq == IRQ_SYSTMR) {
1547 #if !defined(CONFIG_GENERIC_CLOCKEVENTS) || defined(CONFIG_TICKSOURCE_GPTMR0)
1548 bfin_write_TIMER_STATUS(1); /* Latch TIMIL0 */
1550 /* This is basically what we need from the register frame. */
1551 __raw_get_cpu_var(__ipipe_tick_regs).ipend = regs->ipend;
1552 __raw_get_cpu_var(__ipipe_tick_regs).pc = regs->pc;
1553 if (this_domain != ipipe_root_domain)
1554 __raw_get_cpu_var(__ipipe_tick_regs).ipend &= ~0x10;
1556 __raw_get_cpu_var(__ipipe_tick_regs).ipend |= 0x10;
1560 * We don't want Linux interrupt handlers to run at the
1561 * current core priority level (i.e. < EVT15), since this
1562 * might delay other interrupts handled by a high priority
1563 * domain. Here is what we do instead:
1565 * - we raise the SYNCDEFER bit to prevent
1566 * __ipipe_handle_irq() to sync the pipeline for the root
1567 * stage for the incoming interrupt. Upon return, that IRQ is
1568 * pending in the interrupt log.
1570 * - we raise the TIF_IRQ_SYNC bit for the current thread, so
1571 * that _schedule_and_signal_from_int will eventually sync the
1572 * pipeline from EVT15.
1574 if (this_domain == ipipe_root_domain) {
1575 s = __test_and_set_bit(IPIPE_SYNCDEFER_FLAG, &p->status);
1579 ipipe_trace_irq_entry(irq);
1580 __ipipe_handle_irq(irq, regs);
1581 ipipe_trace_irq_exit(irq);
1583 if (user_mode(regs) &&
1584 !ipipe_test_foreign_stack() &&
1585 (current->ipipe_flags & PF_EVTRET) != 0) {
1587 * Testing for user_regs() does NOT fully eliminate
1588 * foreign stack contexts, because of the forged
1589 * interrupt returns we do through
1590 * __ipipe_call_irqtail. In that case, we might have
1591 * preempted a foreign stack context in a high
1592 * priority domain, with a single interrupt level now
1593 * pending after the irqtail unwinding is done. In
1594 * which case user_mode() is now true, and the event
1595 * gets dispatched spuriously.
1597 current->ipipe_flags &= ~PF_EVTRET;
1598 __ipipe_dispatch_event(IPIPE_EVENT_RETURN, regs);
1601 if (this_domain == ipipe_root_domain) {
1602 set_thread_flag(TIF_IRQ_SYNC);
1604 __clear_bit(IPIPE_SYNCDEFER_FLAG, &p->status);
1605 return !test_bit(IPIPE_STALL_FLAG, &p->status);
1612 #endif /* CONFIG_IPIPE */