cxgb4: Detect DB FULL events and notify RDMA ULD
authorVipul Pandya <vipul@chelsio.com>
Fri, 18 May 2012 09:59:24 +0000 (15:29 +0530)
committerRoland Dreier <roland@purestorage.com>
Fri, 18 May 2012 20:22:25 +0000 (13:22 -0700)
Signed-off-by: Vipul Pandya <vipul@chelsio.com>
Signed-off-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
drivers/net/ethernet/chelsio/cxgb4/sge.c
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c

index 0fe18850c8388c0180196236637ef66938876ae2..f91b259f19be7d6f3047252e034d5b04ec63a1f4 100644 (file)
@@ -504,6 +504,8 @@ struct adapter {
        void **tid_release_head;
        spinlock_t tid_release_lock;
        struct work_struct tid_release_task;
+       struct work_struct db_full_task;
+       struct work_struct db_drop_task;
        bool tid_release_task_busy;
 
        struct dentry *debugfs_root;
@@ -719,4 +721,6 @@ int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
                    unsigned int vf, unsigned int eqid);
 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl);
+void t4_db_full(struct adapter *adapter);
+void t4_db_dropped(struct adapter *adapter);
 #endif /* __CXGB4_H__ */
index b126b98065a9835dd882326bafafa6e80cb836f9..c243f932099ed061b4c97e688fd9b375741a8d84 100644 (file)
@@ -2366,6 +2366,16 @@ unsigned int cxgb4_port_chan(const struct net_device *dev)
 }
 EXPORT_SYMBOL(cxgb4_port_chan);
 
+unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
+{
+       struct adapter *adap = netdev2adap(dev);
+       u32 v;
+
+       v = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
+       return lpfifo ? G_LP_COUNT(v) : G_HP_COUNT(v);
+}
+EXPORT_SYMBOL(cxgb4_dbfifo_count);
+
 /**
  *     cxgb4_port_viid - get the VI id of a port
  *     @dev: the net device for the port
@@ -2446,6 +2456,69 @@ static struct notifier_block cxgb4_netevent_nb = {
        .notifier_call = netevent_cb
 };
 
+static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
+{
+       mutex_lock(&uld_mutex);
+       if (adap->uld_handle[CXGB4_ULD_RDMA])
+               ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
+                               cmd);
+       mutex_unlock(&uld_mutex);
+}
+
+static void process_db_full(struct work_struct *work)
+{
+       struct adapter *adap;
+       static int delay = 1000;
+       u32 v;
+
+       adap = container_of(work, struct adapter, db_full_task);
+
+
+       /* stop LLD queues */
+
+       notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
+       do {
+               set_current_state(TASK_UNINTERRUPTIBLE);
+               schedule_timeout(usecs_to_jiffies(delay));
+               v = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
+               if (G_LP_COUNT(v) == 0 && G_HP_COUNT(v) == 0)
+                       break;
+       } while (1);
+       notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
+
+
+       /*
+        * The more we get db full interrupts, the more we'll delay
+        * in re-enabling db rings on queues, capped off at 200ms.
+        */
+       delay = min(delay << 1, 200000);
+
+       /* resume LLD queues */
+}
+
+static void process_db_drop(struct work_struct *work)
+{
+       struct adapter *adap;
+       adap = container_of(work, struct adapter, db_drop_task);
+
+
+       /*
+        * sync the PIDX values in HW and SW for LLD queues.
+        */
+
+       notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
+}
+
+void t4_db_full(struct adapter *adap)
+{
+       schedule_work(&adap->db_full_task);
+}
+
+void t4_db_dropped(struct adapter *adap)
+{
+       schedule_work(&adap->db_drop_task);
+}
+
 static void uld_attach(struct adapter *adap, unsigned int uld)
 {
        void *handle;
@@ -2649,6 +2722,8 @@ static void cxgb_down(struct adapter *adapter)
 {
        t4_intr_disable(adapter);
        cancel_work_sync(&adapter->tid_release_task);
+       cancel_work_sync(&adapter->db_full_task);
+       cancel_work_sync(&adapter->db_drop_task);
        adapter->tid_release_task_busy = false;
        adapter->tid_release_head = NULL;
 
@@ -3601,6 +3676,8 @@ static int __devinit init_one(struct pci_dev *pdev,
        spin_lock_init(&adapter->tid_release_lock);
 
        INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
+       INIT_WORK(&adapter->db_full_task, process_db_full);
+       INIT_WORK(&adapter->db_drop_task, process_db_drop);
 
        err = t4_prep_adapter(adapter);
        if (err)
index b1d39b8d141af94d343a19689459d1914628d7f9..5cc2f27d60c7c7665087effa54fedd2bdc2e92ea 100644 (file)
@@ -163,6 +163,12 @@ enum cxgb4_state {
        CXGB4_STATE_DETACH
 };
 
+enum cxgb4_control {
+       CXGB4_CONTROL_DB_FULL,
+       CXGB4_CONTROL_DB_EMPTY,
+       CXGB4_CONTROL_DB_DROP,
+};
+
 struct pci_dev;
 struct l2t_data;
 struct net_device;
@@ -225,6 +231,7 @@ struct cxgb4_uld_info {
 int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p);
 int cxgb4_unregister_uld(enum cxgb4_uld type);
 int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb);
+unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo);
 unsigned int cxgb4_port_chan(const struct net_device *dev);
 unsigned int cxgb4_port_viid(const struct net_device *dev);
 unsigned int cxgb4_port_idx(const struct net_device *dev);
index 2dae7959f00082c46c9f00be8b26c80675ae4c22..234c157a4879a926fedf29d1358fe69d3746fd55 100644 (file)
@@ -2415,6 +2415,12 @@ void t4_sge_init(struct adapter *adap)
                         RXPKTCPLMODE |
                         (STAT_LEN == 128 ? EGRSTATUSPAGESIZE : 0));
 
+       t4_set_reg_field(adap, A_SGE_DBFIFO_STATUS,
+                       V_HP_INT_THRESH(5) | V_LP_INT_THRESH(5),
+                       V_HP_INT_THRESH(5) | V_LP_INT_THRESH(5));
+       t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_ENABLE_DROP,
+                       F_ENABLE_DROP);
+
        for (i = v = 0; i < 32; i += 4)
                v |= (PAGE_SHIFT - 10) << i;
        t4_write_reg(adap, SGE_HOST_PAGE_SIZE, v);
index d1ec111aebd8117be5529fc945273e3daeb49f1b..13609bf056b018de1521099636aa49b69510cacf 100644 (file)
@@ -1013,6 +1013,8 @@ static void sge_intr_handler(struct adapter *adapter)
                { ERR_INVALID_CIDX_INC,
                  "SGE GTS CIDX increment too large", -1, 0 },
                { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
+               { F_DBFIFO_LP_INT, NULL, -1, 0 },
+               { F_DBFIFO_HP_INT, NULL, -1, 0 },
                { ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 },
                { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0,
                  "SGE IQID > 1023 received CPL for FL", -1, 0 },
@@ -1042,6 +1044,12 @@ static void sge_intr_handler(struct adapter *adapter)
                t4_write_reg(adapter, SGE_INT_CAUSE2, v >> 32);
        }
 
+       err = t4_read_reg(adapter, A_SGE_INT_CAUSE3);
+       if (err & (F_DBFIFO_HP_INT|F_DBFIFO_LP_INT))
+               t4_db_full(adapter);
+       if (err & F_ERR_DROPPED_DB)
+               t4_db_dropped(adapter);
+
        if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3, sge_intr_info) ||
            v != 0)
                t4_fatal_err(adapter);
@@ -1513,6 +1521,7 @@ void t4_intr_enable(struct adapter *adapter)
                     ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 |
                     ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO |
                     ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR |
+                    F_DBFIFO_HP_INT | F_DBFIFO_LP_INT |
                     EGRESS_SIZE_ERR);
        t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK);
        t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf);