2 * Platform dependent support for SGI SN
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
8 * Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved.
11 #include <linux/irq.h>
12 #include <linux/spinlock.h>
13 #include <asm/sn/intr.h>
14 #include <asm/sn/addrs.h>
15 #include <asm/sn/arch.h>
16 #include "xtalk/xwidgetdev.h"
17 #include <asm/sn/pcibus_provider_defs.h>
18 #include <asm/sn/pcidev.h>
19 #include "pci/pcibr_provider.h"
20 #include <asm/sn/shub_mmr.h>
21 #include <asm/sn/sn_sal.h>
23 static void force_interrupt(int irq);
24 static void register_intr_pda(struct sn_irq_info *sn_irq_info);
25 static void unregister_intr_pda(struct sn_irq_info *sn_irq_info);
27 extern int sn_force_interrupt_flag;
28 extern int sn_ioif_inited;
29 static struct list_head **sn_irq_lh;
30 static spinlock_t sn_irq_info_lock = SPIN_LOCK_UNLOCKED; /* non-IRQ lock */
32 static inline uint64_t sn_intr_alloc(nasid_t local_nasid, int local_widget,
34 int req_irq, nasid_t req_nasid,
37 struct ia64_sal_retval ret_stuff;
41 SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT,
42 (u64) SAL_INTR_ALLOC, (u64) local_nasid,
43 (u64) local_widget, (u64) sn_irq_info, (u64) req_irq,
44 (u64) req_nasid, (u64) req_slice);
45 return ret_stuff.status;
48 static inline void sn_intr_free(nasid_t local_nasid, int local_widget,
49 struct sn_irq_info *sn_irq_info)
51 struct ia64_sal_retval ret_stuff;
55 SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT,
56 (u64) SAL_INTR_FREE, (u64) local_nasid,
57 (u64) local_widget, (u64) sn_irq_info->irq_irq,
58 (u64) sn_irq_info->irq_cookie, 0, 0);
61 static unsigned int sn_startup_irq(unsigned int irq)
66 static void sn_shutdown_irq(unsigned int irq)
70 static void sn_disable_irq(unsigned int irq)
74 static void sn_enable_irq(unsigned int irq)
78 static void sn_ack_irq(unsigned int irq)
80 uint64_t event_occurred, mask = 0;
86 HUB_L((uint64_t *) GLOBAL_MMR_ADDR(nasid, SH_EVENT_OCCURRED));
87 mask = event_occurred & SH_ALL_INT_MASK;
88 HUB_S((uint64_t *) GLOBAL_MMR_ADDR(nasid, SH_EVENT_OCCURRED_ALIAS),
90 __set_bit(irq, (volatile void *)pda->sn_in_service_ivecs);
95 static void sn_end_irq(unsigned int irq)
99 uint64_t event_occurred;
102 if (ivec == SGI_UART_VECTOR) {
104 event_occurred = HUB_L((uint64_t *) GLOBAL_MMR_ADDR
105 (nasid, SH_EVENT_OCCURRED));
106 /* If the UART bit is set here, we may have received an
107 * interrupt from the UART that the driver missed. To
108 * make sure, we IPI ourselves to force us to look again.
110 if (event_occurred & SH_EVENT_OCCURRED_UART_INT_MASK) {
111 platform_send_ipi(smp_processor_id(), SGI_UART_VECTOR,
115 __clear_bit(ivec, (volatile void *)pda->sn_in_service_ivecs);
116 if (sn_force_interrupt_flag)
117 force_interrupt(irq);
120 static void sn_irq_info_free(struct rcu_head *head);
122 static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask)
124 struct sn_irq_info *sn_irq_info, *sn_irq_info_safe;
127 cpuid = first_cpu(mask);
128 cpuphys = cpu_physical_id(cpuid);
130 list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe,
131 sn_irq_lh[irq], list) {
133 int local_widget, status;
135 struct sn_irq_info *new_irq_info;
137 new_irq_info = kmalloc(sizeof(struct sn_irq_info), GFP_ATOMIC);
138 if (new_irq_info == NULL)
140 memcpy(new_irq_info, sn_irq_info, sizeof(struct sn_irq_info));
142 bridge = (uint64_t) new_irq_info->irq_bridge;
145 break; /* irq is not a device interrupt */
148 local_nasid = NASID_GET(bridge);
151 local_widget = TIO_SWIN_WIDGETNUM(bridge);
153 local_widget = SWIN_WIDGETNUM(bridge);
155 /* Free the old PROM new_irq_info structure */
156 sn_intr_free(local_nasid, local_widget, new_irq_info);
157 /* Update kernels new_irq_info with new target info */
158 unregister_intr_pda(new_irq_info);
160 /* allocate a new PROM new_irq_info struct */
161 status = sn_intr_alloc(local_nasid, local_widget,
162 __pa(new_irq_info), irq,
163 cpuid_to_nasid(cpuid),
164 cpuid_to_slice(cpuid));
166 /* SAL call failed */
172 new_irq_info->irq_cpuid = cpuid;
173 register_intr_pda(new_irq_info);
175 if (IS_PCI_BRIDGE_ASIC(new_irq_info->irq_bridge_type))
176 pcibr_change_devices_irq(new_irq_info);
178 spin_lock(&sn_irq_info_lock);
179 list_replace_rcu(&sn_irq_info->list, &new_irq_info->list);
180 spin_unlock(&sn_irq_info_lock);
181 call_rcu(&sn_irq_info->rcu, sn_irq_info_free);
184 set_irq_affinity_info((irq & 0xff), cpuphys, 0);
189 struct hw_interrupt_type irq_type_sn = {
190 .typename = "SN hub",
191 .startup = sn_startup_irq,
192 .shutdown = sn_shutdown_irq,
193 .enable = sn_enable_irq,
194 .disable = sn_disable_irq,
197 .set_affinity = sn_set_affinity_irq
200 unsigned int sn_local_vector_to_irq(u8 vector)
202 return (CPU_VECTOR_TO_IRQ(smp_processor_id(), vector));
205 void sn_irq_init(void)
208 irq_desc_t *base_desc = irq_desc;
210 for (i = 0; i < NR_IRQS; i++) {
211 if (base_desc[i].handler == &no_irq_type) {
212 base_desc[i].handler = &irq_type_sn;
217 static void register_intr_pda(struct sn_irq_info *sn_irq_info)
219 int irq = sn_irq_info->irq_irq;
220 int cpu = sn_irq_info->irq_cpuid;
222 if (pdacpu(cpu)->sn_last_irq < irq) {
223 pdacpu(cpu)->sn_last_irq = irq;
226 if (pdacpu(cpu)->sn_first_irq == 0 || pdacpu(cpu)->sn_first_irq > irq) {
227 pdacpu(cpu)->sn_first_irq = irq;
231 static void unregister_intr_pda(struct sn_irq_info *sn_irq_info)
233 int irq = sn_irq_info->irq_irq;
234 int cpu = sn_irq_info->irq_cpuid;
235 struct sn_irq_info *tmp_irq_info;
239 if (pdacpu(cpu)->sn_last_irq == irq) {
241 for (i = pdacpu(cpu)->sn_last_irq - 1;
242 i && !foundmatch; i--) {
243 list_for_each_entry_rcu(tmp_irq_info,
246 if (tmp_irq_info->irq_cpuid == cpu) {
252 pdacpu(cpu)->sn_last_irq = i;
255 if (pdacpu(cpu)->sn_first_irq == irq) {
257 for (i = pdacpu(cpu)->sn_first_irq + 1;
258 i < NR_IRQS && !foundmatch; i++) {
259 list_for_each_entry_rcu(tmp_irq_info,
262 if (tmp_irq_info->irq_cpuid == cpu) {
268 pdacpu(cpu)->sn_first_irq = ((i == NR_IRQS) ? 0 : i);
273 static void sn_irq_info_free(struct rcu_head *head)
275 struct sn_irq_info *sn_irq_info;
277 sn_irq_info = container_of(head, struct sn_irq_info, rcu);
281 void sn_irq_fixup(struct pci_dev *pci_dev, struct sn_irq_info *sn_irq_info)
283 nasid_t nasid = sn_irq_info->irq_nasid;
284 int slice = sn_irq_info->irq_slice;
285 int cpu = nasid_slice_to_cpuid(nasid, slice);
287 pci_dev_get(pci_dev);
289 sn_irq_info->irq_cpuid = cpu;
290 sn_irq_info->irq_pciioinfo = SN_PCIDEV_INFO(pci_dev);
292 /* link it into the sn_irq[irq] list */
293 spin_lock(&sn_irq_info_lock);
294 list_add_rcu(&sn_irq_info->list, sn_irq_lh[sn_irq_info->irq_irq]);
295 spin_unlock(&sn_irq_info_lock);
297 (void)register_intr_pda(sn_irq_info);
300 void sn_irq_unfixup(struct pci_dev *pci_dev)
302 struct sn_irq_info *sn_irq_info;
304 /* Only cleanup IRQ stuff if this device has a host bus context */
305 if (!SN_PCIDEV_BUSSOFT(pci_dev))
308 sn_irq_info = SN_PCIDEV_INFO(pci_dev)->pdi_sn_irq_info;
309 if (!sn_irq_info || !sn_irq_info->irq_irq)
312 unregister_intr_pda(sn_irq_info);
313 spin_lock(&sn_irq_info_lock);
314 list_del_rcu(&sn_irq_info->list);
315 spin_unlock(&sn_irq_info_lock);
316 call_rcu(&sn_irq_info->rcu, sn_irq_info_free);
318 pci_dev_put(pci_dev);
321 static void force_interrupt(int irq)
323 struct sn_irq_info *sn_irq_info;
329 list_for_each_entry_rcu(sn_irq_info, sn_irq_lh[irq], list) {
330 if (IS_PCI_BRIDGE_ASIC(sn_irq_info->irq_bridge_type) &&
331 (sn_irq_info->irq_bridge != NULL))
332 pcibr_force_interrupt(sn_irq_info);
338 * Check for lost interrupts. If the PIC int_status reg. says that
339 * an interrupt has been sent, but not handled, and the interrupt
340 * is not pending in either the cpu irr regs or in the soft irr regs,
341 * and the interrupt is not in service, then the interrupt may have
342 * been lost. Force an interrupt on that pin. It is possible that
343 * the interrupt is in flight, so we may generate a spurious interrupt,
344 * but we should never miss a real lost interrupt.
346 static void sn_check_intr(int irq, struct sn_irq_info *sn_irq_info)
352 struct pcidev_info *pcidev_info;
353 struct pcibus_info *pcibus_info;
355 pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
360 (struct pcibus_info *)pcidev_info->pdi_host_pcidev_info->
362 regval = pcireg_intr_status_get(pcibus_info);
364 irr_reg_num = irq_to_vector(irq) / 64;
365 irr_bit = irq_to_vector(irq) % 64;
366 switch (irr_reg_num) {
368 irr_reg = ia64_getreg(_IA64_REG_CR_IRR0);
371 irr_reg = ia64_getreg(_IA64_REG_CR_IRR1);
374 irr_reg = ia64_getreg(_IA64_REG_CR_IRR2);
377 irr_reg = ia64_getreg(_IA64_REG_CR_IRR3);
380 if (!test_bit(irr_bit, &irr_reg)) {
381 if (!test_bit(irq, pda->sn_soft_irr)) {
382 if (!test_bit(irq, pda->sn_in_service_ivecs)) {
384 if (sn_irq_info->irq_int_bit & regval &
385 sn_irq_info->irq_last_intr) {
388 irq_int_bit & regval);
389 pcibr_force_interrupt(sn_irq_info);
394 sn_irq_info->irq_last_intr = regval;
397 void sn_lb_int_war_check(void)
399 struct sn_irq_info *sn_irq_info;
402 if (!sn_ioif_inited || pda->sn_first_irq == 0)
406 for (i = pda->sn_first_irq; i <= pda->sn_last_irq; i++) {
407 list_for_each_entry_rcu(sn_irq_info, sn_irq_lh[i], list) {
409 * Only call for PCI bridges that are fully
412 if (IS_PCI_BRIDGE_ASIC(sn_irq_info->irq_bridge_type) &&
413 (sn_irq_info->irq_bridge != NULL))
414 sn_check_intr(i, sn_irq_info);
420 void sn_irq_lh_init(void)
424 sn_irq_lh = kmalloc(sizeof(struct list_head *) * NR_IRQS, GFP_KERNEL);
426 panic("SN PCI INIT: Failed to allocate memory for PCI init\n");
428 for (i = 0; i < NR_IRQS; i++) {
429 sn_irq_lh[i] = kmalloc(sizeof(struct list_head), GFP_KERNEL);
431 panic("SN PCI INIT: Failed IRQ memory allocation\n");
433 INIT_LIST_HEAD(sn_irq_lh[i]);