IB/mlx4: Reset flow support for IB kernel ULPs
[firefly-linux-kernel-4.4.55.git] / drivers / infiniband / hw / mlx4 / cq.c
index a3b70f6c4035b2f9e106941e057d36258019b7c1..543ecdd8667bad824fa3313a5b45be9693a5fc69 100644 (file)
@@ -188,6 +188,8 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector
        spin_lock_init(&cq->lock);
        cq->resize_buf = NULL;
        cq->resize_umem = NULL;
+       INIT_LIST_HEAD(&cq->send_qp_list);
+       INIT_LIST_HEAD(&cq->recv_qp_list);
 
        if (context) {
                struct mlx4_ib_create_cq ucmd;
@@ -594,6 +596,55 @@ static int use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct
        return 0;
 }
 
+static void mlx4_ib_qp_sw_comp(struct mlx4_ib_qp *qp, int num_entries,
+                              struct ib_wc *wc, int *npolled, int is_send)
+{
+       struct mlx4_ib_wq *wq;
+       unsigned cur;
+       int i;
+
+       wq = is_send ? &qp->sq : &qp->rq;
+       cur = wq->head - wq->tail;
+
+       if (cur == 0)
+               return;
+
+       for (i = 0;  i < cur && *npolled < num_entries; i++) {
+               wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
+               wc->status = IB_WC_WR_FLUSH_ERR;
+               wc->vendor_err = MLX4_CQE_SYNDROME_WR_FLUSH_ERR;
+               wq->tail++;
+               (*npolled)++;
+               wc->qp = &qp->ibqp;
+               wc++;
+       }
+}
+
+static void mlx4_ib_poll_sw_comp(struct mlx4_ib_cq *cq, int num_entries,
+                                struct ib_wc *wc, int *npolled)
+{
+       struct mlx4_ib_qp *qp;
+
+       *npolled = 0;
+       /* Find uncompleted WQEs belonging to that cq and retrun
+        * simulated FLUSH_ERR completions
+        */
+       list_for_each_entry(qp, &cq->send_qp_list, cq_send_list) {
+               mlx4_ib_qp_sw_comp(qp, num_entries, wc, npolled, 1);
+               if (*npolled >= num_entries)
+                       goto out;
+       }
+
+       list_for_each_entry(qp, &cq->recv_qp_list, cq_recv_list) {
+               mlx4_ib_qp_sw_comp(qp, num_entries, wc + *npolled, npolled, 0);
+               if (*npolled >= num_entries)
+                       goto out;
+       }
+
+out:
+       return;
+}
+
 static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
                            struct mlx4_ib_qp **cur_qp,
                            struct ib_wc *wc)
@@ -836,8 +887,13 @@ int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
        unsigned long flags;
        int npolled;
        int err = 0;
+       struct mlx4_ib_dev *mdev = to_mdev(cq->ibcq.device);
 
        spin_lock_irqsave(&cq->lock, flags);
+       if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
+               mlx4_ib_poll_sw_comp(cq, num_entries, wc, &npolled);
+               goto out;
+       }
 
        for (npolled = 0; npolled < num_entries; ++npolled) {
                err = mlx4_ib_poll_one(cq, &cur_qp, wc + npolled);
@@ -847,6 +903,7 @@ int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
 
        mlx4_cq_set_ci(&cq->mcq);
 
+out:
        spin_unlock_irqrestore(&cq->lock, flags);
 
        if (err == 0 || err == -EAGAIN)