2 * Atheros AR71xx/AR724x/AR913x specific interrupt handling
4 * Copyright (C) 2010-2011 Jaiganesh Narayanan <jnarayanan@atheros.com>
5 * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
6 * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
8 * Parts of this file are based on Atheros' 2.6.15/2.6.31 BSP
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License version 2 as published
12 * by the Free Software Foundation.
15 #include <linux/kernel.h>
16 #include <linux/init.h>
17 #include <linux/interrupt.h>
18 #include <linux/irqchip.h>
19 #include <linux/of_irq.h>
20 #include "../../../drivers/irqchip/irqchip.h"
22 #include <asm/irq_cpu.h>
23 #include <asm/mipsregs.h>
25 #include <asm/mach-ath79/ath79.h>
26 #include <asm/mach-ath79/ar71xx_regs.h>
28 #include "machtypes.h"
30 static void ath79_misc_irq_handler(unsigned int irq, struct irq_desc *desc)
32 void __iomem *base = ath79_reset_base;
35 pending = __raw_readl(base + AR71XX_RESET_REG_MISC_INT_STATUS) &
36 __raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE);
44 int bit = __ffs(pending);
46 generic_handle_irq(ATH79_MISC_IRQ(bit));
51 static void ar71xx_misc_irq_unmask(struct irq_data *d)
53 unsigned int irq = d->irq - ATH79_MISC_IRQ_BASE;
54 void __iomem *base = ath79_reset_base;
57 t = __raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE);
58 __raw_writel(t | (1 << irq), base + AR71XX_RESET_REG_MISC_INT_ENABLE);
61 __raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE);
64 static void ar71xx_misc_irq_mask(struct irq_data *d)
66 unsigned int irq = d->irq - ATH79_MISC_IRQ_BASE;
67 void __iomem *base = ath79_reset_base;
70 t = __raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE);
71 __raw_writel(t & ~(1 << irq), base + AR71XX_RESET_REG_MISC_INT_ENABLE);
74 __raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE);
77 static void ar724x_misc_irq_ack(struct irq_data *d)
79 unsigned int irq = d->irq - ATH79_MISC_IRQ_BASE;
80 void __iomem *base = ath79_reset_base;
83 t = __raw_readl(base + AR71XX_RESET_REG_MISC_INT_STATUS);
84 __raw_writel(t & ~(1 << irq), base + AR71XX_RESET_REG_MISC_INT_STATUS);
87 __raw_readl(base + AR71XX_RESET_REG_MISC_INT_STATUS);
90 static struct irq_chip ath79_misc_irq_chip = {
92 .irq_unmask = ar71xx_misc_irq_unmask,
93 .irq_mask = ar71xx_misc_irq_mask,
96 static void __init ath79_misc_irq_init(void)
98 void __iomem *base = ath79_reset_base;
101 __raw_writel(0, base + AR71XX_RESET_REG_MISC_INT_ENABLE);
102 __raw_writel(0, base + AR71XX_RESET_REG_MISC_INT_STATUS);
104 if (soc_is_ar71xx() || soc_is_ar913x())
105 ath79_misc_irq_chip.irq_mask_ack = ar71xx_misc_irq_mask;
106 else if (soc_is_ar724x() ||
110 ath79_misc_irq_chip.irq_ack = ar724x_misc_irq_ack;
114 for (i = ATH79_MISC_IRQ_BASE;
115 i < ATH79_MISC_IRQ_BASE + ATH79_MISC_IRQ_COUNT; i++) {
116 irq_set_chip_and_handler(i, &ath79_misc_irq_chip,
120 irq_set_chained_handler(ATH79_CPU_IRQ(6), ath79_misc_irq_handler);
123 static void ar934x_ip2_irq_dispatch(unsigned int irq, struct irq_desc *desc)
127 disable_irq_nosync(irq);
129 status = ath79_reset_rr(AR934X_RESET_REG_PCIE_WMAC_INT_STATUS);
131 if (status & AR934X_PCIE_WMAC_INT_PCIE_ALL) {
132 ath79_ddr_wb_flush(3);
133 generic_handle_irq(ATH79_IP2_IRQ(0));
134 } else if (status & AR934X_PCIE_WMAC_INT_WMAC_ALL) {
135 ath79_ddr_wb_flush(4);
136 generic_handle_irq(ATH79_IP2_IRQ(1));
138 spurious_interrupt();
144 static void ar934x_ip2_irq_init(void)
148 for (i = ATH79_IP2_IRQ_BASE;
149 i < ATH79_IP2_IRQ_BASE + ATH79_IP2_IRQ_COUNT; i++)
150 irq_set_chip_and_handler(i, &dummy_irq_chip,
153 irq_set_chained_handler(ATH79_CPU_IRQ(2), ar934x_ip2_irq_dispatch);
156 static void qca955x_ip2_irq_dispatch(unsigned int irq, struct irq_desc *desc)
160 disable_irq_nosync(irq);
162 status = ath79_reset_rr(QCA955X_RESET_REG_EXT_INT_STATUS);
163 status &= QCA955X_EXT_INT_PCIE_RC1_ALL | QCA955X_EXT_INT_WMAC_ALL;
166 spurious_interrupt();
170 if (status & QCA955X_EXT_INT_PCIE_RC1_ALL) {
171 /* TODO: flush DDR? */
172 generic_handle_irq(ATH79_IP2_IRQ(0));
175 if (status & QCA955X_EXT_INT_WMAC_ALL) {
176 /* TODO: flush DDR? */
177 generic_handle_irq(ATH79_IP2_IRQ(1));
184 static void qca955x_ip3_irq_dispatch(unsigned int irq, struct irq_desc *desc)
188 disable_irq_nosync(irq);
190 status = ath79_reset_rr(QCA955X_RESET_REG_EXT_INT_STATUS);
191 status &= QCA955X_EXT_INT_PCIE_RC2_ALL |
192 QCA955X_EXT_INT_USB1 |
193 QCA955X_EXT_INT_USB2;
196 spurious_interrupt();
200 if (status & QCA955X_EXT_INT_USB1) {
201 /* TODO: flush DDR? */
202 generic_handle_irq(ATH79_IP3_IRQ(0));
205 if (status & QCA955X_EXT_INT_USB2) {
206 /* TODO: flush DDR? */
207 generic_handle_irq(ATH79_IP3_IRQ(1));
210 if (status & QCA955X_EXT_INT_PCIE_RC2_ALL) {
211 /* TODO: flush DDR? */
212 generic_handle_irq(ATH79_IP3_IRQ(2));
219 static void qca955x_irq_init(void)
223 for (i = ATH79_IP2_IRQ_BASE;
224 i < ATH79_IP2_IRQ_BASE + ATH79_IP2_IRQ_COUNT; i++)
225 irq_set_chip_and_handler(i, &dummy_irq_chip,
228 irq_set_chained_handler(ATH79_CPU_IRQ(2), qca955x_ip2_irq_dispatch);
230 for (i = ATH79_IP3_IRQ_BASE;
231 i < ATH79_IP3_IRQ_BASE + ATH79_IP3_IRQ_COUNT; i++)
232 irq_set_chip_and_handler(i, &dummy_irq_chip,
235 irq_set_chained_handler(ATH79_CPU_IRQ(3), qca955x_ip3_irq_dispatch);
239 * The IP2/IP3 lines are tied to a PCI/WMAC/USB device. Drivers for
240 * these devices typically allocate coherent DMA memory, however the
241 * DMA controller may still have some unsynchronized data in the FIFO.
242 * Issue a flush in the handlers to ensure that the driver sees
245 * This array map the interrupt lines to the DDR write buffer channels.
248 static unsigned irq_wb_chan[8] = {
249 -1, -1, -1, -1, -1, -1, -1, -1,
252 asmlinkage void plat_irq_dispatch(void)
254 unsigned long pending;
257 pending = read_c0_status() & read_c0_cause() & ST0_IM;
260 spurious_interrupt();
264 pending >>= CAUSEB_IP;
266 irq = fls(pending) - 1;
267 if (irq < ARRAY_SIZE(irq_wb_chan) && irq_wb_chan[irq] != -1)
268 ath79_ddr_wb_flush(irq_wb_chan[irq]);
269 do_IRQ(MIPS_CPU_IRQ_BASE + irq);
270 pending &= ~BIT(irq);
274 #ifdef CONFIG_IRQCHIP
275 static int misc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
277 irq_set_chip_and_handler(irq, &ath79_misc_irq_chip, handle_level_irq);
281 static const struct irq_domain_ops misc_irq_domain_ops = {
282 .xlate = irq_domain_xlate_onecell,
286 static int __init ath79_misc_intc_of_init(
287 struct device_node *node, struct device_node *parent)
289 void __iomem *base = ath79_reset_base;
290 struct irq_domain *domain;
293 irq = irq_of_parse_and_map(node, 0);
295 panic("Failed to get MISC IRQ");
297 domain = irq_domain_add_legacy(node, ATH79_MISC_IRQ_COUNT,
298 ATH79_MISC_IRQ_BASE, 0, &misc_irq_domain_ops, NULL);
300 panic("Failed to add MISC irqdomain");
302 /* Disable and clear all interrupts */
303 __raw_writel(0, base + AR71XX_RESET_REG_MISC_INT_ENABLE);
304 __raw_writel(0, base + AR71XX_RESET_REG_MISC_INT_STATUS);
307 irq_set_chained_handler(irq, ath79_misc_irq_handler);
311 IRQCHIP_DECLARE(ath79_misc_intc, "qca,ar7100-misc-intc",
312 ath79_misc_intc_of_init);
314 static int __init ar79_cpu_intc_of_init(
315 struct device_node *node, struct device_node *parent)
319 /* Fill the irq_wb_chan table */
320 count = of_count_phandle_with_args(
321 node, "qca,ddr-wb-channels", "#qca,ddr-wb-channel-cells");
323 for (i = 0; i < count; i++) {
324 struct of_phandle_args args;
327 of_property_read_u32_index(
328 node, "qca,ddr-wb-channel-interrupts", i, &irq);
329 if (irq >= ARRAY_SIZE(irq_wb_chan))
332 err = of_parse_phandle_with_args(
333 node, "qca,ddr-wb-channels",
334 "#qca,ddr-wb-channel-cells",
339 irq_wb_chan[irq] = args.args[0];
340 pr_info("IRQ: Set flush channel of IRQ%d to %d\n",
344 return mips_cpu_irq_of_init(node, parent);
346 IRQCHIP_DECLARE(ar79_cpu_intc, "qca,ar7100-cpu-intc",
347 ar79_cpu_intc_of_init);
351 void __init arch_init_irq(void)
353 if (mips_machtype == ATH79_MACH_GENERIC_OF) {
358 if (soc_is_ar71xx() || soc_is_ar724x() ||
359 soc_is_ar913x() || soc_is_ar933x()) {
362 } else if (soc_is_ar934x()) {
367 ath79_misc_irq_init();
370 ar934x_ip2_irq_init();
371 else if (soc_is_qca955x())