IB/mthca: Support for query QP and SRQ
authorEli Cohen <eli@mellanox.co.il>
Tue, 14 Feb 2006 00:40:21 +0000 (16:40 -0800)
committerRoland Dreier <rolandd@cisco.com>
Mon, 20 Mar 2006 18:08:15 +0000 (10:08 -0800)
Implement the query_qp and query_srq methods in mthca.

Signed-off-by: Eli Cohen <eli@mellanox.co.il>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
drivers/infiniband/hw/mthca/mthca_cmd.c
drivers/infiniband/hw/mthca/mthca_cmd.h
drivers/infiniband/hw/mthca/mthca_dev.h
drivers/infiniband/hw/mthca/mthca_provider.c
drivers/infiniband/hw/mthca/mthca_qp.c
drivers/infiniband/hw/mthca/mthca_srq.c

index acd00831ef08613bc5b3c5af299fd8e4c99a85ce..890c060ff4d1bad245a6d326fff514ba45cd2186 100644 (file)
@@ -1560,6 +1560,13 @@ int mthca_HW2SW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
                             CMD_TIME_CLASS_A, status);
 }
 
+int mthca_QUERY_SRQ(struct mthca_dev *dev, u32 num,
+                   struct mthca_mailbox *mailbox, u8 *status)
+{
+       return mthca_cmd_box(dev, 0, mailbox->dma, num, 0,
+                            CMD_QUERY_SRQ, CMD_TIME_CLASS_A, status);
+}
+
 int mthca_ARM_SRQ(struct mthca_dev *dev, int srq_num, int limit, u8 *status)
 {
        return mthca_cmd(dev, limit, srq_num, 0, CMD_ARM_SRQ,
index 5156701ae52c099fb8ba16c89eb9d1a2f03b443a..e4ec35c40dd3c1cf892bf649ec2c4ca92ea5eed4 100644 (file)
@@ -305,6 +305,8 @@ int mthca_SW2HW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
                    int srq_num, u8 *status);
 int mthca_HW2SW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
                    int srq_num, u8 *status);
+int mthca_QUERY_SRQ(struct mthca_dev *dev, u32 num,
+                   struct mthca_mailbox *mailbox, u8 *status);
 int mthca_ARM_SRQ(struct mthca_dev *dev, int srq_num, int limit, u8 *status);
 int mthca_MODIFY_QP(struct mthca_dev *dev, enum ib_qp_state cur,
                    enum ib_qp_state next, u32 num, int is_ee,
index d827558c27be19cdc38800d55b479e1bd6829c62..2f4500f85ae04eabb3e3cab2c0fd434cac83bbdd 100644 (file)
@@ -479,6 +479,7 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
 void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq);
 int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
                     enum ib_srq_attr_mask attr_mask);
+int mthca_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
 void mthca_srq_event(struct mthca_dev *dev, u32 srqn,
                     enum ib_event_type event_type);
 void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr);
@@ -489,6 +490,8 @@ int mthca_arbel_post_srq_recv(struct ib_srq *srq, struct ib_recv_wr *wr,
 
 void mthca_qp_event(struct mthca_dev *dev, u32 qpn,
                    enum ib_event_type event_type);
+int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
+                  struct ib_qp_init_attr *qp_init_attr);
 int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask);
 int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                          struct ib_send_wr **bad_wr);
index 1fa1b55ffffeaa61eb1a245062e6ef3a7407a6fa..084bea592df502b95a6a6a2e1c2344c92cb43045 100644 (file)
@@ -1264,12 +1264,14 @@ int mthca_register_device(struct mthca_dev *dev)
                (1ull << IB_USER_VERBS_CMD_RESIZE_CQ)           |
                (1ull << IB_USER_VERBS_CMD_DESTROY_CQ)          |
                (1ull << IB_USER_VERBS_CMD_CREATE_QP)           |
+               (1ull << IB_USER_VERBS_CMD_QUERY_QP)            |
                (1ull << IB_USER_VERBS_CMD_MODIFY_QP)           |
                (1ull << IB_USER_VERBS_CMD_DESTROY_QP)          |
                (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST)        |
                (1ull << IB_USER_VERBS_CMD_DETACH_MCAST)        |
                (1ull << IB_USER_VERBS_CMD_CREATE_SRQ)          |
                (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ)          |
+               (1ull << IB_USER_VERBS_CMD_QUERY_SRQ)           |
                (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ);
        dev->ib_dev.node_type            = IB_NODE_CA;
        dev->ib_dev.phys_port_cnt        = dev->limits.num_ports;
@@ -1291,7 +1293,8 @@ int mthca_register_device(struct mthca_dev *dev)
 
        if (dev->mthca_flags & MTHCA_FLAG_SRQ) {
                dev->ib_dev.create_srq           = mthca_create_srq;
-               dev->ib_dev.modify_srq           = mthca_modify_srq;
+               dev->ib_dev.modify_srq           = mthca_modify_srq;
+               dev->ib_dev.query_srq            = mthca_query_srq;
                dev->ib_dev.destroy_srq          = mthca_destroy_srq;
 
                if (mthca_is_memfree(dev))
@@ -1302,6 +1305,7 @@ int mthca_register_device(struct mthca_dev *dev)
 
        dev->ib_dev.create_qp            = mthca_create_qp;
        dev->ib_dev.modify_qp            = mthca_modify_qp;
+       dev->ib_dev.query_qp             = mthca_query_qp;
        dev->ib_dev.destroy_qp           = mthca_destroy_qp;
        dev->ib_dev.create_cq            = mthca_create_cq;
        dev->ib_dev.resize_cq            = mthca_resize_cq;
index c2d3300dace9798984e1d44a95d6ee324aefc035..e99d735f5f36e5ce0027e5eedd27d7991c97e210 100644 (file)
@@ -348,6 +348,141 @@ static __be32 get_hw_access_flags(struct mthca_qp *qp, struct ib_qp_attr *attr,
        return cpu_to_be32(hw_access_flags);
 }
 
+static inline enum ib_qp_state to_ib_qp_state(int mthca_state)
+{
+       switch (mthca_state) {
+       case MTHCA_QP_STATE_RST:      return IB_QPS_RESET;
+       case MTHCA_QP_STATE_INIT:     return IB_QPS_INIT;
+       case MTHCA_QP_STATE_RTR:      return IB_QPS_RTR;
+       case MTHCA_QP_STATE_RTS:      return IB_QPS_RTS;
+       case MTHCA_QP_STATE_DRAINING:
+       case MTHCA_QP_STATE_SQD:      return IB_QPS_SQD;
+       case MTHCA_QP_STATE_SQE:      return IB_QPS_SQE;
+       case MTHCA_QP_STATE_ERR:      return IB_QPS_ERR;
+       default:                      return -1;
+       }
+}
+
+static inline enum ib_mig_state to_ib_mig_state(int mthca_mig_state)
+{
+       switch (mthca_mig_state) {
+       case 0:  return IB_MIG_ARMED;
+       case 1:  return IB_MIG_REARM;
+       case 3:  return IB_MIG_MIGRATED;
+       default: return -1;
+       }
+}
+
+static int to_ib_qp_access_flags(int mthca_flags)
+{
+       int ib_flags = 0;
+
+       if (mthca_flags & MTHCA_QP_BIT_RRE)
+               ib_flags |= IB_ACCESS_REMOTE_READ;
+       if (mthca_flags & MTHCA_QP_BIT_RWE)
+               ib_flags |= IB_ACCESS_REMOTE_WRITE;
+       if (mthca_flags & MTHCA_QP_BIT_RAE)
+               ib_flags |= IB_ACCESS_REMOTE_ATOMIC;
+
+       return ib_flags;
+}
+
+static void to_ib_ah_attr(struct mthca_dev *dev, struct ib_ah_attr *ib_ah_attr,
+                               struct mthca_qp_path *path)
+{
+       memset(ib_ah_attr, 0, sizeof *path);
+       ib_ah_attr->port_num      = (be32_to_cpu(path->port_pkey) >> 24) & 0x3;
+       ib_ah_attr->dlid          = be16_to_cpu(path->rlid);
+       ib_ah_attr->sl            = be32_to_cpu(path->sl_tclass_flowlabel) >> 28;
+       ib_ah_attr->src_path_bits = path->g_mylmc & 0x7f;
+       ib_ah_attr->static_rate   = path->static_rate & 0x7;
+       ib_ah_attr->ah_flags      = (path->g_mylmc & (1 << 7)) ? IB_AH_GRH : 0;
+       if (ib_ah_attr->ah_flags) {
+               ib_ah_attr->grh.sgid_index = path->mgid_index & (dev->limits.gid_table_len - 1);
+               ib_ah_attr->grh.hop_limit  = path->hop_limit;
+               ib_ah_attr->grh.traffic_class =
+                       (be32_to_cpu(path->sl_tclass_flowlabel) >> 20) & 0xff;
+               ib_ah_attr->grh.flow_label =
+                       be32_to_cpu(path->sl_tclass_flowlabel) & 0xfffff;
+               memcpy(ib_ah_attr->grh.dgid.raw,
+                       path->rgid, sizeof ib_ah_attr->grh.dgid.raw);
+       }
+}
+
+int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
+                  struct ib_qp_init_attr *qp_init_attr)
+{
+       struct mthca_dev *dev = to_mdev(ibqp->device);
+       struct mthca_qp *qp = to_mqp(ibqp);
+       int err;
+       struct mthca_mailbox *mailbox;
+       struct mthca_qp_param *qp_param;
+       struct mthca_qp_context *context;
+       int mthca_state;
+       u8 status;
+
+       mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+
+       err = mthca_QUERY_QP(dev, qp->qpn, 0, mailbox, &status);
+       if (err)
+               goto out;
+       if (status) {
+               mthca_warn(dev, "QUERY_QP returned status %02x\n", status);
+               err = -EINVAL;
+               goto out;
+       }
+
+       qp_param    = mailbox->buf;
+       context     = &qp_param->context;
+       mthca_state = be32_to_cpu(context->flags) >> 28;
+
+       qp_attr->qp_state            = to_ib_qp_state(mthca_state);
+       qp_attr->cur_qp_state        = qp_attr->qp_state;
+       qp_attr->path_mtu            = context->mtu_msgmax >> 5;
+       qp_attr->path_mig_state      =
+               to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3);
+       qp_attr->qkey                = be32_to_cpu(context->qkey);
+       qp_attr->rq_psn              = be32_to_cpu(context->rnr_nextrecvpsn) & 0xffffff;
+       qp_attr->sq_psn              = be32_to_cpu(context->next_send_psn) & 0xffffff;
+       qp_attr->dest_qp_num         = be32_to_cpu(context->remote_qpn) & 0xffffff;
+       qp_attr->qp_access_flags     =
+               to_ib_qp_access_flags(be32_to_cpu(context->params2));
+       qp_attr->cap.max_send_wr     = qp->sq.max;
+       qp_attr->cap.max_recv_wr     = qp->rq.max;
+       qp_attr->cap.max_send_sge    = qp->sq.max_gs;
+       qp_attr->cap.max_recv_sge    = qp->rq.max_gs;
+       qp_attr->cap.max_inline_data = qp->max_inline_data;
+
+       to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path);
+       to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path);
+
+       qp_attr->pkey_index     = be32_to_cpu(context->pri_path.port_pkey) & 0x7f;
+       qp_attr->alt_pkey_index = be32_to_cpu(context->alt_path.port_pkey) & 0x7f;
+
+       /* qp_attr->en_sqd_async_notify is only applicable in modify qp */
+       qp_attr->sq_draining = mthca_state == MTHCA_QP_STATE_DRAINING;
+
+       qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context->params1) >> 21) & 0x7);
+
+       qp_attr->max_dest_rd_atomic =
+               1 << ((be32_to_cpu(context->params2) >> 21) & 0x7);
+       qp_attr->min_rnr_timer      =
+               (be32_to_cpu(context->rnr_nextrecvpsn) >> 24) & 0x1f;
+       qp_attr->port_num           = qp_attr->ah_attr.port_num;
+       qp_attr->timeout            = context->pri_path.ackto >> 3;
+       qp_attr->retry_cnt          = (be32_to_cpu(context->params1) >> 16) & 0x7;
+       qp_attr->rnr_retry          = context->pri_path.rnr_retry >> 5;
+       qp_attr->alt_port_num       = qp_attr->alt_ah_attr.port_num;
+       qp_attr->alt_timeout        = context->alt_path.ackto >> 3;
+       qp_init_attr->cap           = qp_attr->cap;
+
+out:
+       mthca_free_mailbox(dev, mailbox);
+       return err;
+}
+
 static void mthca_path_set(struct ib_ah_attr *ah, struct mthca_qp_path *path)
 {
        path->g_mylmc     = ah->src_path_bits & 0x7f;
index f1a1da147d0be71f13310ea1f721734b92b2fc46..deb526ce013d641c2816a20353cf33945be9b369 100644 (file)
@@ -360,6 +360,38 @@ int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
        return 0;
 }
 
+int mthca_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
+{
+       struct mthca_dev *dev = to_mdev(ibsrq->device);
+       struct mthca_srq *srq = to_msrq(ibsrq);
+       struct mthca_mailbox *mailbox;
+       struct mthca_arbel_srq_context *arbel_ctx;
+       u8 status;
+       int err;
+
+       mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+
+       err = mthca_QUERY_SRQ(dev, srq->srqn, mailbox, &status);
+       if (err)
+               goto out;
+
+       if (mthca_is_memfree(dev)) {
+               arbel_ctx = mailbox->buf;
+               srq_attr->srq_limit = arbel_ctx->limit_watermark;
+       } else
+               srq_attr->srq_limit = 0;
+
+       srq_attr->max_wr  = srq->max;
+       srq_attr->max_sge = srq->max_gs;
+
+out:
+       mthca_free_mailbox(dev, mailbox);
+
+       return err;
+}
+
 void mthca_srq_event(struct mthca_dev *dev, u32 srqn,
                     enum ib_event_type event_type)
 {