e6f7551edfda49e8d22e9378a7d09a63ff009d0d
[firefly-linux-kernel-4.4.55.git] / arch / ia64 / sn / kernel / irq.c
1 /*
2  * Platform dependent support for SGI SN
3  *
4  * This file is subject to the terms and conditions of the GNU General Public
5  * License.  See the file "COPYING" in the main directory of this archive
6  * for more details.
7  *
8  * Copyright (c) 2000-2004 Silicon Graphics, Inc.  All Rights Reserved.
9  */
10
11 #include <linux/irq.h>
12 #include <linux/spinlock.h>
13 #include <asm/sn/intr.h>
14 #include <asm/sn/addrs.h>
15 #include <asm/sn/arch.h>
16 #include "xtalk/xwidgetdev.h"
17 #include <asm/sn/pcibus_provider_defs.h>
18 #include <asm/sn/pcidev.h>
19 #include "pci/pcibr_provider.h"
20 #include <asm/sn/shub_mmr.h>
21 #include <asm/sn/sn_sal.h>
22
23 static void force_interrupt(int irq);
24 static void register_intr_pda(struct sn_irq_info *sn_irq_info);
25 static void unregister_intr_pda(struct sn_irq_info *sn_irq_info);
26
27 extern int sn_force_interrupt_flag;
28 extern int sn_ioif_inited;
29 static struct list_head **sn_irq_lh;
30 static spinlock_t sn_irq_info_lock = SPIN_LOCK_UNLOCKED; /* non-IRQ lock */
31
32 static inline uint64_t sn_intr_alloc(nasid_t local_nasid, int local_widget,
33                                      u64 sn_irq_info,
34                                      int req_irq, nasid_t req_nasid,
35                                      int req_slice)
36 {
37         struct ia64_sal_retval ret_stuff;
38         ret_stuff.status = 0;
39         ret_stuff.v0 = 0;
40
41         SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT,
42                         (u64) SAL_INTR_ALLOC, (u64) local_nasid,
43                         (u64) local_widget, (u64) sn_irq_info, (u64) req_irq,
44                         (u64) req_nasid, (u64) req_slice);
45         return ret_stuff.status;
46 }
47
48 static inline void sn_intr_free(nasid_t local_nasid, int local_widget,
49                                 struct sn_irq_info *sn_irq_info)
50 {
51         struct ia64_sal_retval ret_stuff;
52         ret_stuff.status = 0;
53         ret_stuff.v0 = 0;
54
55         SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT,
56                         (u64) SAL_INTR_FREE, (u64) local_nasid,
57                         (u64) local_widget, (u64) sn_irq_info->irq_irq,
58                         (u64) sn_irq_info->irq_cookie, 0, 0);
59 }
60
61 static unsigned int sn_startup_irq(unsigned int irq)
62 {
63         return 0;
64 }
65
66 static void sn_shutdown_irq(unsigned int irq)
67 {
68 }
69
70 static void sn_disable_irq(unsigned int irq)
71 {
72 }
73
74 static void sn_enable_irq(unsigned int irq)
75 {
76 }
77
78 static void sn_ack_irq(unsigned int irq)
79 {
80         uint64_t event_occurred, mask = 0;
81         int nasid;
82
83         irq = irq & 0xff;
84         nasid = get_nasid();
85         event_occurred =
86             HUB_L((uint64_t *) GLOBAL_MMR_ADDR(nasid, SH_EVENT_OCCURRED));
87         mask = event_occurred & SH_ALL_INT_MASK;
88         HUB_S((uint64_t *) GLOBAL_MMR_ADDR(nasid, SH_EVENT_OCCURRED_ALIAS),
89                  mask);
90         __set_bit(irq, (volatile void *)pda->sn_in_service_ivecs);
91
92         move_irq(irq);
93 }
94
95 static void sn_end_irq(unsigned int irq)
96 {
97         int nasid;
98         int ivec;
99         uint64_t event_occurred;
100
101         ivec = irq & 0xff;
102         if (ivec == SGI_UART_VECTOR) {
103                 nasid = get_nasid();
104                 event_occurred = HUB_L((uint64_t *) GLOBAL_MMR_ADDR
105                                        (nasid, SH_EVENT_OCCURRED));
106                 /* If the UART bit is set here, we may have received an
107                  * interrupt from the UART that the driver missed.  To
108                  * make sure, we IPI ourselves to force us to look again.
109                  */
110                 if (event_occurred & SH_EVENT_OCCURRED_UART_INT_MASK) {
111                         platform_send_ipi(smp_processor_id(), SGI_UART_VECTOR,
112                                           IA64_IPI_DM_INT, 0);
113                 }
114         }
115         __clear_bit(ivec, (volatile void *)pda->sn_in_service_ivecs);
116         if (sn_force_interrupt_flag)
117                 force_interrupt(irq);
118 }
119
120 static void sn_irq_info_free(struct rcu_head *head);
121
122 static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask)
123 {
124         struct sn_irq_info *sn_irq_info, *sn_irq_info_safe;
125         int cpuid, cpuphys;
126
127         cpuid = first_cpu(mask);
128         cpuphys = cpu_physical_id(cpuid);
129
130         list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe,
131                                  sn_irq_lh[irq], list) {
132                 uint64_t bridge;
133                 int local_widget, status;
134                 nasid_t local_nasid;
135                 struct sn_irq_info *new_irq_info;
136
137                 new_irq_info = kmalloc(sizeof(struct sn_irq_info), GFP_ATOMIC);
138                 if (new_irq_info == NULL)
139                         break;
140                 memcpy(new_irq_info, sn_irq_info, sizeof(struct sn_irq_info));
141
142                 bridge = (uint64_t) new_irq_info->irq_bridge;
143                 if (!bridge) {
144                         kfree(new_irq_info);
145                         break; /* irq is not a device interrupt */
146                 }
147
148                 local_nasid = NASID_GET(bridge);
149
150                 if (local_nasid & 1)
151                         local_widget = TIO_SWIN_WIDGETNUM(bridge);
152                 else
153                         local_widget = SWIN_WIDGETNUM(bridge);
154
155                 /* Free the old PROM new_irq_info structure */
156                 sn_intr_free(local_nasid, local_widget, new_irq_info);
157                 /* Update kernels new_irq_info with new target info */
158                 unregister_intr_pda(new_irq_info);
159
160                 /* allocate a new PROM new_irq_info struct */
161                 status = sn_intr_alloc(local_nasid, local_widget,
162                                        __pa(new_irq_info), irq,
163                                        cpuid_to_nasid(cpuid),
164                                        cpuid_to_slice(cpuid));
165
166                 /* SAL call failed */
167                 if (status) {
168                         kfree(new_irq_info);
169                         break;
170                 }
171
172                 new_irq_info->irq_cpuid = cpuid;
173                 register_intr_pda(new_irq_info);
174
175                 if (IS_PCI_BRIDGE_ASIC(new_irq_info->irq_bridge_type))
176                         pcibr_change_devices_irq(new_irq_info);
177
178                 spin_lock(&sn_irq_info_lock);
179                 list_replace_rcu(&sn_irq_info->list, &new_irq_info->list);
180                 spin_unlock(&sn_irq_info_lock);
181                 call_rcu(&sn_irq_info->rcu, sn_irq_info_free);
182
183 #ifdef CONFIG_SMP
184                 set_irq_affinity_info((irq & 0xff), cpuphys, 0);
185 #endif
186         }
187 }
188
189 struct hw_interrupt_type irq_type_sn = {
190         .typename       = "SN hub",
191         .startup        = sn_startup_irq,
192         .shutdown       = sn_shutdown_irq,
193         .enable         = sn_enable_irq,
194         .disable        = sn_disable_irq,
195         .ack            = sn_ack_irq,
196         .end            = sn_end_irq,
197         .set_affinity   = sn_set_affinity_irq
198 };
199
200 unsigned int sn_local_vector_to_irq(u8 vector)
201 {
202         return (CPU_VECTOR_TO_IRQ(smp_processor_id(), vector));
203 }
204
205 void sn_irq_init(void)
206 {
207         int i;
208         irq_desc_t *base_desc = irq_desc;
209
210         for (i = 0; i < NR_IRQS; i++) {
211                 if (base_desc[i].handler == &no_irq_type) {
212                         base_desc[i].handler = &irq_type_sn;
213                 }
214         }
215 }
216
217 static void register_intr_pda(struct sn_irq_info *sn_irq_info)
218 {
219         int irq = sn_irq_info->irq_irq;
220         int cpu = sn_irq_info->irq_cpuid;
221
222         if (pdacpu(cpu)->sn_last_irq < irq) {
223                 pdacpu(cpu)->sn_last_irq = irq;
224         }
225
226         if (pdacpu(cpu)->sn_first_irq == 0 || pdacpu(cpu)->sn_first_irq > irq) {
227                 pdacpu(cpu)->sn_first_irq = irq;
228         }
229 }
230
231 static void unregister_intr_pda(struct sn_irq_info *sn_irq_info)
232 {
233         int irq = sn_irq_info->irq_irq;
234         int cpu = sn_irq_info->irq_cpuid;
235         struct sn_irq_info *tmp_irq_info;
236         int i, foundmatch;
237
238         rcu_read_lock();
239         if (pdacpu(cpu)->sn_last_irq == irq) {
240                 foundmatch = 0;
241                 for (i = pdacpu(cpu)->sn_last_irq - 1;
242                      i && !foundmatch; i--) {
243                         list_for_each_entry_rcu(tmp_irq_info,
244                                                 sn_irq_lh[i],
245                                                 list) {
246                                 if (tmp_irq_info->irq_cpuid == cpu) {
247                                         foundmatch = 1;
248                                         break;
249                                 }
250                         }
251                 }
252                 pdacpu(cpu)->sn_last_irq = i;
253         }
254
255         if (pdacpu(cpu)->sn_first_irq == irq) {
256                 foundmatch = 0;
257                 for (i = pdacpu(cpu)->sn_first_irq + 1;
258                      i < NR_IRQS && !foundmatch; i++) {
259                         list_for_each_entry_rcu(tmp_irq_info,
260                                                 sn_irq_lh[i],
261                                                 list) {
262                                 if (tmp_irq_info->irq_cpuid == cpu) {
263                                         foundmatch = 1;
264                                         break;
265                                 }
266                         }
267                 }
268                 pdacpu(cpu)->sn_first_irq = ((i == NR_IRQS) ? 0 : i);
269         }
270         rcu_read_unlock();
271 }
272
273 static void sn_irq_info_free(struct rcu_head *head)
274 {
275         struct sn_irq_info *sn_irq_info;
276
277         sn_irq_info = container_of(head, struct sn_irq_info, rcu);
278         kfree(sn_irq_info);
279 }
280
281 void sn_irq_fixup(struct pci_dev *pci_dev, struct sn_irq_info *sn_irq_info)
282 {
283         nasid_t nasid = sn_irq_info->irq_nasid;
284         int slice = sn_irq_info->irq_slice;
285         int cpu = nasid_slice_to_cpuid(nasid, slice);
286
287         pci_dev_get(pci_dev);
288
289         sn_irq_info->irq_cpuid = cpu;
290         sn_irq_info->irq_pciioinfo = SN_PCIDEV_INFO(pci_dev);
291
292         /* link it into the sn_irq[irq] list */
293         spin_lock(&sn_irq_info_lock);
294         list_add_rcu(&sn_irq_info->list, sn_irq_lh[sn_irq_info->irq_irq]);
295         spin_unlock(&sn_irq_info_lock);
296
297         (void)register_intr_pda(sn_irq_info);
298 }
299
300 void sn_irq_unfixup(struct pci_dev *pci_dev)
301 {
302         struct sn_irq_info *sn_irq_info;
303
304         /* Only cleanup IRQ stuff if this device has a host bus context */
305         if (!SN_PCIDEV_BUSSOFT(pci_dev))
306                 return;
307
308         sn_irq_info = SN_PCIDEV_INFO(pci_dev)->pdi_sn_irq_info;
309         if (!sn_irq_info || !sn_irq_info->irq_irq)
310                 return;
311
312         unregister_intr_pda(sn_irq_info);
313         spin_lock(&sn_irq_info_lock);
314         list_del_rcu(&sn_irq_info->list);
315         spin_unlock(&sn_irq_info_lock);
316         call_rcu(&sn_irq_info->rcu, sn_irq_info_free);
317
318         pci_dev_put(pci_dev);
319 }
320
321 static void force_interrupt(int irq)
322 {
323         struct sn_irq_info *sn_irq_info;
324
325         if (!sn_ioif_inited)
326                 return;
327
328         rcu_read_lock();
329         list_for_each_entry_rcu(sn_irq_info, sn_irq_lh[irq], list) {
330                 if (IS_PCI_BRIDGE_ASIC(sn_irq_info->irq_bridge_type) &&
331                     (sn_irq_info->irq_bridge != NULL))
332                         pcibr_force_interrupt(sn_irq_info);
333         }
334         rcu_read_unlock();
335 }
336
337 /*
338  * Check for lost interrupts.  If the PIC int_status reg. says that
339  * an interrupt has been sent, but not handled, and the interrupt
340  * is not pending in either the cpu irr regs or in the soft irr regs,
341  * and the interrupt is not in service, then the interrupt may have
342  * been lost.  Force an interrupt on that pin.  It is possible that
343  * the interrupt is in flight, so we may generate a spurious interrupt,
344  * but we should never miss a real lost interrupt.
345  */
346 static void sn_check_intr(int irq, struct sn_irq_info *sn_irq_info)
347 {
348         uint64_t regval;
349         int irr_reg_num;
350         int irr_bit;
351         uint64_t irr_reg;
352         struct pcidev_info *pcidev_info;
353         struct pcibus_info *pcibus_info;
354
355         pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
356         if (!pcidev_info)
357                 return;
358
359         pcibus_info =
360             (struct pcibus_info *)pcidev_info->pdi_host_pcidev_info->
361             pdi_pcibus_info;
362         regval = pcireg_intr_status_get(pcibus_info);
363
364         irr_reg_num = irq_to_vector(irq) / 64;
365         irr_bit = irq_to_vector(irq) % 64;
366         switch (irr_reg_num) {
367         case 0:
368                 irr_reg = ia64_getreg(_IA64_REG_CR_IRR0);
369                 break;
370         case 1:
371                 irr_reg = ia64_getreg(_IA64_REG_CR_IRR1);
372                 break;
373         case 2:
374                 irr_reg = ia64_getreg(_IA64_REG_CR_IRR2);
375                 break;
376         case 3:
377                 irr_reg = ia64_getreg(_IA64_REG_CR_IRR3);
378                 break;
379         }
380         if (!test_bit(irr_bit, &irr_reg)) {
381                 if (!test_bit(irq, pda->sn_soft_irr)) {
382                         if (!test_bit(irq, pda->sn_in_service_ivecs)) {
383                                 regval &= 0xff;
384                                 if (sn_irq_info->irq_int_bit & regval &
385                                     sn_irq_info->irq_last_intr) {
386                                         regval &=
387                                             ~(sn_irq_info->
388                                               irq_int_bit & regval);
389                                         pcibr_force_interrupt(sn_irq_info);
390                                 }
391                         }
392                 }
393         }
394         sn_irq_info->irq_last_intr = regval;
395 }
396
397 void sn_lb_int_war_check(void)
398 {
399         struct sn_irq_info *sn_irq_info;
400         int i;
401
402         if (!sn_ioif_inited || pda->sn_first_irq == 0)
403                 return;
404
405         rcu_read_lock();
406         for (i = pda->sn_first_irq; i <= pda->sn_last_irq; i++) {
407                 list_for_each_entry_rcu(sn_irq_info, sn_irq_lh[i], list) {
408                         /*
409                          * Only call for PCI bridges that are fully
410                          * initialized.
411                          */
412                         if (IS_PCI_BRIDGE_ASIC(sn_irq_info->irq_bridge_type) &&
413                             (sn_irq_info->irq_bridge != NULL))
414                                 sn_check_intr(i, sn_irq_info);
415                 }
416         }
417         rcu_read_unlock();
418 }
419
420 void sn_irq_lh_init(void)
421 {
422         int i;
423
424         sn_irq_lh = kmalloc(sizeof(struct list_head *) * NR_IRQS, GFP_KERNEL);
425         if (!sn_irq_lh)
426                 panic("SN PCI INIT: Failed to allocate memory for PCI init\n");
427
428         for (i = 0; i < NR_IRQS; i++) {
429                 sn_irq_lh[i] = kmalloc(sizeof(struct list_head), GFP_KERNEL);
430                 if (!sn_irq_lh[i])
431                         panic("SN PCI INIT: Failed IRQ memory allocation\n");
432
433                 INIT_LIST_HEAD(sn_irq_lh[i]);
434         }
435
436 }