gru: support instruction completion interrupts
authorJack Steiner <steiner@sgi.com>
Wed, 17 Jun 2009 23:28:25 +0000 (16:28 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 18 Jun 2009 20:04:00 +0000 (13:04 -0700)
Add support for interrupts generated by GRU instruction completion.
Previously, the only interrupts were for TLB misses.  The hardware also
supports interrupts on instruction completion.  This will be supported for
instructions issued by the kernel.

Signed-off-by: Jack Steiner <steiner@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
drivers/misc/sgi-gru/grufault.c
drivers/misc/sgi-gru/grumain.c
drivers/misc/sgi-gru/grutables.h

index f15152165a99c91092e437cb5dbd4297315830fd..3220e95be6b5752faaee4ebe776f300947e85098 100644 (file)
@@ -166,7 +166,8 @@ static inline struct gru_state *irq_to_gru(int irq)
  * the GRU, atomic operations must be used to clear bits.
  */
 static void get_clear_fault_map(struct gru_state *gru,
-                               struct gru_tlb_fault_map *map)
+                               struct gru_tlb_fault_map *imap,
+                               struct gru_tlb_fault_map *dmap)
 {
        unsigned long i, k;
        struct gru_tlb_fault_map *tfm;
@@ -177,7 +178,11 @@ static void get_clear_fault_map(struct gru_state *gru,
                k = tfm->fault_bits[i];
                if (k)
                        k = xchg(&tfm->fault_bits[i], 0UL);
-               map->fault_bits[i] = k;
+               imap->fault_bits[i] = k;
+               k = tfm->done_bits[i];
+               if (k)
+                       k = xchg(&tfm->done_bits[i], 0UL);
+               dmap->fault_bits[i] = k;
        }
 
        /*
@@ -449,7 +454,7 @@ failactive:
 irqreturn_t gru_intr(int irq, void *dev_id)
 {
        struct gru_state *gru;
-       struct gru_tlb_fault_map map;
+       struct gru_tlb_fault_map imap, dmap;
        struct gru_thread_state *gts;
        struct gru_tlb_fault_handle *tfh = NULL;
        int cbrnum, ctxnum;
@@ -462,11 +467,19 @@ irqreturn_t gru_intr(int irq, void *dev_id)
                        raw_smp_processor_id(), irq);
                return IRQ_NONE;
        }
-       get_clear_fault_map(gru, &map);
-       gru_dbg(grudev, "irq %d, gru %x, map 0x%lx\n", irq, gru->gs_gid,
-               map.fault_bits[0]);
+       get_clear_fault_map(gru, &imap, &dmap);
+       gru_dbg(grudev,
+               "irq %d, gid %d, imap %016lx %016lx, dmap %016lx %016lx\n",
+               irq, gru->gs_gid, dmap.fault_bits[0], dmap.fault_bits[1],
+               dmap.fault_bits[0], dmap.fault_bits[1]);
+
+       for_each_cbr_in_tfm(cbrnum, dmap.fault_bits) {
+               complete(gru->gs_blade->bs_async_wq);
+               gru_dbg(grudev, "gid %d, cbr_done %d, done %d\n",
+                       gru->gs_gid, cbrnum, gru->gs_blade->bs_async_wq->done);
+       }
 
-       for_each_cbr_in_tfm(cbrnum, map.fault_bits) {
+       for_each_cbr_in_tfm(cbrnum, imap.fault_bits) {
                tfh = get_tfh_by_index(gru, cbrnum);
                prefetchw(tfh); /* Helps on hdw, required for emulator */
 
index afc4c473c7949329e42c73aaa1c236faf087e88a..e38a0f1775ffc2fcfbb5e72451323134a1c1fecd 100644 (file)
@@ -572,8 +572,12 @@ void gru_load_context(struct gru_thread_state *gts)
 
        if (is_kernel_context(gts)) {
                cch->unmap_enable = 1;
+               cch->tfm_done_bit_enable = 1;
+               cch->cb_int_enable = 1;
        } else {
                cch->unmap_enable = 0;
+               cch->tfm_done_bit_enable = 0;
+               cch->cb_int_enable = 0;
                asid = gru_load_mm_tracker(gru, gts);
                for (i = 0; i < 8; i++) {
                        cch->asid[i] = asid + i;
index 5f8f3bda2fa945105bba37292b24dbe97dc20ca5..ca81800146ffc048bd3b123c4d81f76f53910be4 100644 (file)
@@ -462,6 +462,11 @@ struct gru_blade_state {
        struct rw_semaphore     bs_kgts_sema;           /* lock for kgts */
        struct gru_thread_state *bs_kgts;               /* GTS for kernel use */
 
+       /* ---- the following are used for managing kernel async GRU CBRs --- */
+       int                     bs_async_dsr_bytes;     /* DSRs for async */
+       int                     bs_async_cbrs;          /* CBRs AU for async */
+       struct completion       *bs_async_wq;
+
        /* ---- the following are protected by the bs_lock spinlock ---- */
        spinlock_t              bs_lock;                /* lock used for
                                                           stealing contexts */