2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/module.h>
34 #include <rdma/ib_umem.h>
38 /* not supported currently */
39 static int wq_signature;
42 MLX5_IB_ACK_REQ_FREQ = 8,
46 MLX5_IB_DEFAULT_SCHED_QUEUE = 0x83,
47 MLX5_IB_DEFAULT_QP0_SCHED_QUEUE = 0x3f,
48 MLX5_IB_LINK_TYPE_IB = 0,
49 MLX5_IB_LINK_TYPE_ETH = 1
53 MLX5_IB_SQ_STRIDE = 6,
54 MLX5_IB_CACHE_LINE_SIZE = 64,
57 static const u32 mlx5_ib_opcode[] = {
58 [IB_WR_SEND] = MLX5_OPCODE_SEND,
59 [IB_WR_SEND_WITH_IMM] = MLX5_OPCODE_SEND_IMM,
60 [IB_WR_RDMA_WRITE] = MLX5_OPCODE_RDMA_WRITE,
61 [IB_WR_RDMA_WRITE_WITH_IMM] = MLX5_OPCODE_RDMA_WRITE_IMM,
62 [IB_WR_RDMA_READ] = MLX5_OPCODE_RDMA_READ,
63 [IB_WR_ATOMIC_CMP_AND_SWP] = MLX5_OPCODE_ATOMIC_CS,
64 [IB_WR_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_FA,
65 [IB_WR_SEND_WITH_INV] = MLX5_OPCODE_SEND_INVAL,
66 [IB_WR_LOCAL_INV] = MLX5_OPCODE_UMR,
67 [IB_WR_FAST_REG_MR] = MLX5_OPCODE_UMR,
68 [IB_WR_MASKED_ATOMIC_CMP_AND_SWP] = MLX5_OPCODE_ATOMIC_MASKED_CS,
69 [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_MASKED_FA,
70 [MLX5_IB_WR_UMR] = MLX5_OPCODE_UMR,
76 unsigned int page_shift;
83 static int is_qp0(enum ib_qp_type qp_type)
85 return qp_type == IB_QPT_SMI;
88 static int is_qp1(enum ib_qp_type qp_type)
90 return qp_type == IB_QPT_GSI;
93 static int is_sqp(enum ib_qp_type qp_type)
95 return is_qp0(qp_type) || is_qp1(qp_type);
98 static void *get_wqe(struct mlx5_ib_qp *qp, int offset)
100 return mlx5_buf_offset(&qp->buf, offset);
103 static void *get_recv_wqe(struct mlx5_ib_qp *qp, int n)
105 return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift));
108 void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n)
110 return get_wqe(qp, qp->sq.offset + (n << MLX5_IB_SQ_STRIDE));
113 static void mlx5_ib_qp_event(struct mlx5_core_qp *qp, int type)
115 struct ib_qp *ibqp = &to_mibqp(qp)->ibqp;
116 struct ib_event event;
118 if (type == MLX5_EVENT_TYPE_PATH_MIG)
119 to_mibqp(qp)->port = to_mibqp(qp)->alt_port;
121 if (ibqp->event_handler) {
122 event.device = ibqp->device;
123 event.element.qp = ibqp;
125 case MLX5_EVENT_TYPE_PATH_MIG:
126 event.event = IB_EVENT_PATH_MIG;
128 case MLX5_EVENT_TYPE_COMM_EST:
129 event.event = IB_EVENT_COMM_EST;
131 case MLX5_EVENT_TYPE_SQ_DRAINED:
132 event.event = IB_EVENT_SQ_DRAINED;
134 case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
135 event.event = IB_EVENT_QP_LAST_WQE_REACHED;
137 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
138 event.event = IB_EVENT_QP_FATAL;
140 case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
141 event.event = IB_EVENT_PATH_MIG_ERR;
143 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
144 event.event = IB_EVENT_QP_REQ_ERR;
146 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
147 event.event = IB_EVENT_QP_ACCESS_ERR;
150 pr_warn("mlx5_ib: Unexpected event type %d on QP %06x\n", type, qp->qpn);
154 ibqp->event_handler(&event, ibqp->qp_context);
158 static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
159 int has_rq, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd)
164 /* Sanity check RQ size before proceeding */
165 if (cap->max_recv_wr > dev->mdev.caps.max_wqes)
171 qp->rq.wqe_shift = 0;
174 qp->rq.wqe_cnt = ucmd->rq_wqe_count;
175 qp->rq.wqe_shift = ucmd->rq_wqe_shift;
176 qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig;
177 qp->rq.max_post = qp->rq.wqe_cnt;
179 wqe_size = qp->wq_sig ? sizeof(struct mlx5_wqe_signature_seg) : 0;
180 wqe_size += cap->max_recv_sge * sizeof(struct mlx5_wqe_data_seg);
181 wqe_size = roundup_pow_of_two(wqe_size);
182 wq_size = roundup_pow_of_two(cap->max_recv_wr) * wqe_size;
183 wq_size = max_t(int, wq_size, MLX5_SEND_WQE_BB);
184 qp->rq.wqe_cnt = wq_size / wqe_size;
185 if (wqe_size > dev->mdev.caps.max_rq_desc_sz) {
186 mlx5_ib_dbg(dev, "wqe_size %d, max %d\n",
188 dev->mdev.caps.max_rq_desc_sz);
191 qp->rq.wqe_shift = ilog2(wqe_size);
192 qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig;
193 qp->rq.max_post = qp->rq.wqe_cnt;
200 static int sq_overhead(enum ib_qp_type qp_type)
206 size += sizeof(struct mlx5_wqe_xrc_seg);
209 size += sizeof(struct mlx5_wqe_ctrl_seg) +
210 sizeof(struct mlx5_wqe_atomic_seg) +
211 sizeof(struct mlx5_wqe_raddr_seg);
218 size += sizeof(struct mlx5_wqe_ctrl_seg) +
219 sizeof(struct mlx5_wqe_raddr_seg) +
220 sizeof(struct mlx5_wqe_umr_ctrl_seg) +
221 sizeof(struct mlx5_mkey_seg);
227 size += sizeof(struct mlx5_wqe_ctrl_seg) +
228 sizeof(struct mlx5_wqe_datagram_seg);
231 case MLX5_IB_QPT_REG_UMR:
232 size += sizeof(struct mlx5_wqe_ctrl_seg) +
233 sizeof(struct mlx5_wqe_umr_ctrl_seg) +
234 sizeof(struct mlx5_mkey_seg);
244 static int calc_send_wqe(struct ib_qp_init_attr *attr)
249 size = sq_overhead(attr->qp_type);
253 if (attr->cap.max_inline_data) {
254 inl_size = size + sizeof(struct mlx5_wqe_inline_seg) +
255 attr->cap.max_inline_data;
258 size += attr->cap.max_send_sge * sizeof(struct mlx5_wqe_data_seg);
259 if (attr->create_flags & IB_QP_CREATE_SIGNATURE_EN &&
260 ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB) < MLX5_SIG_WQE_SIZE)
261 return MLX5_SIG_WQE_SIZE;
263 return ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB);
266 static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
267 struct mlx5_ib_qp *qp)
272 if (!attr->cap.max_send_wr)
275 wqe_size = calc_send_wqe(attr);
276 mlx5_ib_dbg(dev, "wqe_size %d\n", wqe_size);
280 if (wqe_size > dev->mdev.caps.max_sq_desc_sz) {
281 mlx5_ib_dbg(dev, "wqe_size(%d) > max_sq_desc_sz(%d)\n",
282 wqe_size, dev->mdev.caps.max_sq_desc_sz);
286 qp->max_inline_data = wqe_size - sq_overhead(attr->qp_type) -
287 sizeof(struct mlx5_wqe_inline_seg);
288 attr->cap.max_inline_data = qp->max_inline_data;
290 if (attr->create_flags & IB_QP_CREATE_SIGNATURE_EN)
291 qp->signature_en = true;
293 wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size);
294 qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB;
295 if (qp->sq.wqe_cnt > dev->mdev.caps.max_wqes) {
296 mlx5_ib_dbg(dev, "wqe count(%d) exceeds limits(%d)\n",
297 qp->sq.wqe_cnt, dev->mdev.caps.max_wqes);
300 qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
301 qp->sq.max_gs = attr->cap.max_send_sge;
302 qp->sq.max_post = wq_size / wqe_size;
303 attr->cap.max_send_wr = qp->sq.max_post;
308 static int set_user_buf_size(struct mlx5_ib_dev *dev,
309 struct mlx5_ib_qp *qp,
310 struct mlx5_ib_create_qp *ucmd)
312 int desc_sz = 1 << qp->sq.wqe_shift;
314 if (desc_sz > dev->mdev.caps.max_sq_desc_sz) {
315 mlx5_ib_warn(dev, "desc_sz %d, max_sq_desc_sz %d\n",
316 desc_sz, dev->mdev.caps.max_sq_desc_sz);
320 if (ucmd->sq_wqe_count && ((1 << ilog2(ucmd->sq_wqe_count)) != ucmd->sq_wqe_count)) {
321 mlx5_ib_warn(dev, "sq_wqe_count %d, sq_wqe_count %d\n",
322 ucmd->sq_wqe_count, ucmd->sq_wqe_count);
326 qp->sq.wqe_cnt = ucmd->sq_wqe_count;
328 if (qp->sq.wqe_cnt > dev->mdev.caps.max_wqes) {
329 mlx5_ib_warn(dev, "wqe_cnt %d, max_wqes %d\n",
330 qp->sq.wqe_cnt, dev->mdev.caps.max_wqes);
334 qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
335 (qp->sq.wqe_cnt << 6);
340 static int qp_has_rq(struct ib_qp_init_attr *attr)
342 if (attr->qp_type == IB_QPT_XRC_INI ||
343 attr->qp_type == IB_QPT_XRC_TGT || attr->srq ||
344 attr->qp_type == MLX5_IB_QPT_REG_UMR ||
345 !attr->cap.max_recv_wr)
351 static int first_med_uuar(void)
356 static int next_uuar(int n)
360 while (((n % 4) & 2))
366 static int num_med_uuar(struct mlx5_uuar_info *uuari)
370 n = uuari->num_uars * MLX5_NON_FP_BF_REGS_PER_PAGE -
371 uuari->num_low_latency_uuars - 1;
373 return n >= 0 ? n : 0;
376 static int max_uuari(struct mlx5_uuar_info *uuari)
378 return uuari->num_uars * 4;
381 static int first_hi_uuar(struct mlx5_uuar_info *uuari)
387 med = num_med_uuar(uuari);
388 for (t = 0, i = first_med_uuar();; i = next_uuar(i)) {
397 static int alloc_high_class_uuar(struct mlx5_uuar_info *uuari)
401 for (i = first_hi_uuar(uuari); i < max_uuari(uuari); i = next_uuar(i)) {
402 if (!test_bit(i, uuari->bitmap)) {
403 set_bit(i, uuari->bitmap);
412 static int alloc_med_class_uuar(struct mlx5_uuar_info *uuari)
414 int minidx = first_med_uuar();
417 for (i = first_med_uuar(); i < first_hi_uuar(uuari); i = next_uuar(i)) {
418 if (uuari->count[i] < uuari->count[minidx])
422 uuari->count[minidx]++;
426 static int alloc_uuar(struct mlx5_uuar_info *uuari,
427 enum mlx5_ib_latency_class lat)
431 mutex_lock(&uuari->lock);
433 case MLX5_IB_LATENCY_CLASS_LOW:
435 uuari->count[uuarn]++;
438 case MLX5_IB_LATENCY_CLASS_MEDIUM:
442 uuarn = alloc_med_class_uuar(uuari);
445 case MLX5_IB_LATENCY_CLASS_HIGH:
449 uuarn = alloc_high_class_uuar(uuari);
452 case MLX5_IB_LATENCY_CLASS_FAST_PATH:
456 mutex_unlock(&uuari->lock);
461 static void free_med_class_uuar(struct mlx5_uuar_info *uuari, int uuarn)
463 clear_bit(uuarn, uuari->bitmap);
464 --uuari->count[uuarn];
467 static void free_high_class_uuar(struct mlx5_uuar_info *uuari, int uuarn)
469 clear_bit(uuarn, uuari->bitmap);
470 --uuari->count[uuarn];
473 static void free_uuar(struct mlx5_uuar_info *uuari, int uuarn)
475 int nuuars = uuari->num_uars * MLX5_BF_REGS_PER_PAGE;
476 int high_uuar = nuuars - uuari->num_low_latency_uuars;
478 mutex_lock(&uuari->lock);
480 --uuari->count[uuarn];
484 if (uuarn < high_uuar) {
485 free_med_class_uuar(uuari, uuarn);
489 free_high_class_uuar(uuari, uuarn);
492 mutex_unlock(&uuari->lock);
495 static enum mlx5_qp_state to_mlx5_state(enum ib_qp_state state)
498 case IB_QPS_RESET: return MLX5_QP_STATE_RST;
499 case IB_QPS_INIT: return MLX5_QP_STATE_INIT;
500 case IB_QPS_RTR: return MLX5_QP_STATE_RTR;
501 case IB_QPS_RTS: return MLX5_QP_STATE_RTS;
502 case IB_QPS_SQD: return MLX5_QP_STATE_SQD;
503 case IB_QPS_SQE: return MLX5_QP_STATE_SQER;
504 case IB_QPS_ERR: return MLX5_QP_STATE_ERR;
509 static int to_mlx5_st(enum ib_qp_type type)
512 case IB_QPT_RC: return MLX5_QP_ST_RC;
513 case IB_QPT_UC: return MLX5_QP_ST_UC;
514 case IB_QPT_UD: return MLX5_QP_ST_UD;
515 case MLX5_IB_QPT_REG_UMR: return MLX5_QP_ST_REG_UMR;
517 case IB_QPT_XRC_TGT: return MLX5_QP_ST_XRC;
518 case IB_QPT_SMI: return MLX5_QP_ST_QP0;
519 case IB_QPT_GSI: return MLX5_QP_ST_QP1;
520 case IB_QPT_RAW_IPV6: return MLX5_QP_ST_RAW_IPV6;
521 case IB_QPT_RAW_ETHERTYPE: return MLX5_QP_ST_RAW_ETHERTYPE;
522 case IB_QPT_RAW_PACKET:
524 default: return -EINVAL;
528 static int uuarn_to_uar_index(struct mlx5_uuar_info *uuari, int uuarn)
530 return uuari->uars[uuarn / MLX5_BF_REGS_PER_PAGE].index;
533 static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
534 struct mlx5_ib_qp *qp, struct ib_udata *udata,
535 struct mlx5_create_qp_mbox_in **in,
536 struct mlx5_ib_create_qp_resp *resp, int *inlen)
538 struct mlx5_ib_ucontext *context;
539 struct mlx5_ib_create_qp ucmd;
548 err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
550 mlx5_ib_dbg(dev, "copy failed\n");
554 context = to_mucontext(pd->uobject->context);
556 * TBD: should come from the verbs when we have the API
558 uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_HIGH);
560 mlx5_ib_dbg(dev, "failed to allocate low latency UUAR\n");
561 mlx5_ib_dbg(dev, "reverting to medium latency\n");
562 uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_MEDIUM);
564 mlx5_ib_dbg(dev, "failed to allocate medium latency UUAR\n");
565 mlx5_ib_dbg(dev, "reverting to high latency\n");
566 uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_LOW);
568 mlx5_ib_warn(dev, "uuar allocation failed\n");
574 uar_index = uuarn_to_uar_index(&context->uuari, uuarn);
575 mlx5_ib_dbg(dev, "uuarn 0x%x, uar_index 0x%x\n", uuarn, uar_index);
577 err = set_user_buf_size(dev, qp, &ucmd);
581 if (ucmd.buf_addr && qp->buf_size) {
582 qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr,
584 if (IS_ERR(qp->umem)) {
585 mlx5_ib_dbg(dev, "umem_get failed\n");
586 err = PTR_ERR(qp->umem);
594 mlx5_ib_cont_pages(qp->umem, ucmd.buf_addr, &npages, &page_shift,
596 err = mlx5_ib_get_buf_offset(ucmd.buf_addr, page_shift, &offset);
598 mlx5_ib_warn(dev, "bad offset\n");
601 mlx5_ib_dbg(dev, "addr 0x%llx, size %d, npages %d, page_shift %d, ncont %d, offset %d\n",
602 ucmd.buf_addr, qp->buf_size, npages, page_shift, ncont, offset);
605 *inlen = sizeof(**in) + sizeof(*(*in)->pas) * ncont;
606 *in = mlx5_vzalloc(*inlen);
612 mlx5_ib_populate_pas(dev, qp->umem, page_shift, (*in)->pas, 0);
613 (*in)->ctx.log_pg_sz_remote_qpn =
614 cpu_to_be32((page_shift - MLX5_ADAPTER_PAGE_SHIFT) << 24);
615 (*in)->ctx.params2 = cpu_to_be32(offset << 6);
617 (*in)->ctx.qp_counter_set_usr_page = cpu_to_be32(uar_index);
618 resp->uuar_index = uuarn;
621 err = mlx5_ib_db_map_user(context, ucmd.db_addr, &qp->db);
623 mlx5_ib_dbg(dev, "map failed\n");
627 err = ib_copy_to_udata(udata, resp, sizeof(*resp));
629 mlx5_ib_dbg(dev, "copy failed\n");
632 qp->create_type = MLX5_QP_USER;
637 mlx5_ib_db_unmap_user(context, &qp->db);
644 ib_umem_release(qp->umem);
647 free_uuar(&context->uuari, uuarn);
651 static void destroy_qp_user(struct ib_pd *pd, struct mlx5_ib_qp *qp)
653 struct mlx5_ib_ucontext *context;
655 context = to_mucontext(pd->uobject->context);
656 mlx5_ib_db_unmap_user(context, &qp->db);
658 ib_umem_release(qp->umem);
659 free_uuar(&context->uuari, qp->uuarn);
662 static int create_kernel_qp(struct mlx5_ib_dev *dev,
663 struct ib_qp_init_attr *init_attr,
664 struct mlx5_ib_qp *qp,
665 struct mlx5_create_qp_mbox_in **in, int *inlen)
667 enum mlx5_ib_latency_class lc = MLX5_IB_LATENCY_CLASS_LOW;
668 struct mlx5_uuar_info *uuari;
673 uuari = &dev->mdev.priv.uuari;
674 if (init_attr->create_flags & ~IB_QP_CREATE_SIGNATURE_EN)
677 if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR)
678 lc = MLX5_IB_LATENCY_CLASS_FAST_PATH;
680 uuarn = alloc_uuar(uuari, lc);
682 mlx5_ib_dbg(dev, "\n");
686 qp->bf = &uuari->bfs[uuarn];
687 uar_index = qp->bf->uar->index;
689 err = calc_sq_size(dev, init_attr, qp);
691 mlx5_ib_dbg(dev, "err %d\n", err);
696 qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
697 qp->buf_size = err + (qp->rq.wqe_cnt << qp->rq.wqe_shift);
699 err = mlx5_buf_alloc(&dev->mdev, qp->buf_size, PAGE_SIZE * 2, &qp->buf);
701 mlx5_ib_dbg(dev, "err %d\n", err);
705 qp->sq.qend = mlx5_get_send_wqe(qp, qp->sq.wqe_cnt);
706 *inlen = sizeof(**in) + sizeof(*(*in)->pas) * qp->buf.npages;
707 *in = mlx5_vzalloc(*inlen);
712 (*in)->ctx.qp_counter_set_usr_page = cpu_to_be32(uar_index);
713 (*in)->ctx.log_pg_sz_remote_qpn =
714 cpu_to_be32((qp->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT) << 24);
715 /* Set "fast registration enabled" for all kernel QPs */
716 (*in)->ctx.params1 |= cpu_to_be32(1 << 11);
717 (*in)->ctx.sq_crq_size |= cpu_to_be16(1 << 4);
719 mlx5_fill_page_array(&qp->buf, (*in)->pas);
721 err = mlx5_db_alloc(&dev->mdev, &qp->db);
723 mlx5_ib_dbg(dev, "err %d\n", err);
730 qp->sq.wrid = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wrid), GFP_KERNEL);
731 qp->sq.wr_data = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wr_data), GFP_KERNEL);
732 qp->rq.wrid = kmalloc(qp->rq.wqe_cnt * sizeof(*qp->rq.wrid), GFP_KERNEL);
733 qp->sq.w_list = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.w_list), GFP_KERNEL);
734 qp->sq.wqe_head = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wqe_head), GFP_KERNEL);
736 if (!qp->sq.wrid || !qp->sq.wr_data || !qp->rq.wrid ||
737 !qp->sq.w_list || !qp->sq.wqe_head) {
741 qp->create_type = MLX5_QP_KERNEL;
746 mlx5_db_free(&dev->mdev, &qp->db);
747 kfree(qp->sq.wqe_head);
748 kfree(qp->sq.w_list);
750 kfree(qp->sq.wr_data);
757 mlx5_buf_free(&dev->mdev, &qp->buf);
760 free_uuar(&dev->mdev.priv.uuari, uuarn);
764 static void destroy_qp_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
766 mlx5_db_free(&dev->mdev, &qp->db);
767 kfree(qp->sq.wqe_head);
768 kfree(qp->sq.w_list);
770 kfree(qp->sq.wr_data);
772 mlx5_buf_free(&dev->mdev, &qp->buf);
773 free_uuar(&dev->mdev.priv.uuari, qp->bf->uuarn);
776 static __be32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr)
778 if (attr->srq || (attr->qp_type == IB_QPT_XRC_TGT) ||
779 (attr->qp_type == IB_QPT_XRC_INI))
780 return cpu_to_be32(MLX5_SRQ_RQ);
781 else if (!qp->has_rq)
782 return cpu_to_be32(MLX5_ZERO_LEN_RQ);
784 return cpu_to_be32(MLX5_NON_ZERO_RQ);
787 static int is_connected(enum ib_qp_type qp_type)
789 if (qp_type == IB_QPT_RC || qp_type == IB_QPT_UC)
795 static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
796 struct ib_qp_init_attr *init_attr,
797 struct ib_udata *udata, struct mlx5_ib_qp *qp)
799 struct mlx5_ib_resources *devr = &dev->devr;
800 struct mlx5_ib_create_qp_resp resp;
801 struct mlx5_create_qp_mbox_in *in;
802 struct mlx5_ib_create_qp ucmd;
803 int inlen = sizeof(*in);
806 mutex_init(&qp->mutex);
807 spin_lock_init(&qp->sq.lock);
808 spin_lock_init(&qp->rq.lock);
810 if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
811 if (!(dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_BLOCK_MCAST)) {
812 mlx5_ib_dbg(dev, "block multicast loopback isn't supported\n");
815 qp->flags |= MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK;
819 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
820 qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
822 if (pd && pd->uobject) {
823 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
824 mlx5_ib_dbg(dev, "copy failed\n");
828 qp->wq_sig = !!(ucmd.flags & MLX5_QP_FLAG_SIGNATURE);
829 qp->scat_cqe = !!(ucmd.flags & MLX5_QP_FLAG_SCATTER_CQE);
831 qp->wq_sig = !!wq_signature;
834 qp->has_rq = qp_has_rq(init_attr);
835 err = set_rq_size(dev, &init_attr->cap, qp->has_rq,
836 qp, (pd && pd->uobject) ? &ucmd : NULL);
838 mlx5_ib_dbg(dev, "err %d\n", err);
844 mlx5_ib_dbg(dev, "requested sq_wqe_count (%d)\n", ucmd.sq_wqe_count);
845 if (ucmd.rq_wqe_shift != qp->rq.wqe_shift ||
846 ucmd.rq_wqe_count != qp->rq.wqe_cnt) {
847 mlx5_ib_dbg(dev, "invalid rq params\n");
850 if (ucmd.sq_wqe_count > dev->mdev.caps.max_wqes) {
851 mlx5_ib_dbg(dev, "requested sq_wqe_count (%d) > max allowed (%d)\n",
852 ucmd.sq_wqe_count, dev->mdev.caps.max_wqes);
855 err = create_user_qp(dev, pd, qp, udata, &in, &resp, &inlen);
857 mlx5_ib_dbg(dev, "err %d\n", err);
859 err = create_kernel_qp(dev, init_attr, qp, &in, &inlen);
861 mlx5_ib_dbg(dev, "err %d\n", err);
863 qp->pa_lkey = to_mpd(pd)->pa_lkey;
869 in = mlx5_vzalloc(sizeof(*in));
873 qp->create_type = MLX5_QP_EMPTY;
876 if (is_sqp(init_attr->qp_type))
877 qp->port = init_attr->port_num;
879 in->ctx.flags = cpu_to_be32(to_mlx5_st(init_attr->qp_type) << 16 |
880 MLX5_QP_PM_MIGRATED << 11);
882 if (init_attr->qp_type != MLX5_IB_QPT_REG_UMR)
883 in->ctx.flags_pd = cpu_to_be32(to_mpd(pd ? pd : devr->p0)->pdn);
885 in->ctx.flags_pd = cpu_to_be32(MLX5_QP_LAT_SENSITIVE);
888 in->ctx.flags_pd |= cpu_to_be32(MLX5_QP_ENABLE_SIG);
890 if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK)
891 in->ctx.flags_pd |= cpu_to_be32(MLX5_QP_BLOCK_MCAST);
893 if (qp->scat_cqe && is_connected(init_attr->qp_type)) {
897 rcqe_sz = mlx5_ib_get_cqe_size(dev, init_attr->recv_cq);
898 scqe_sz = mlx5_ib_get_cqe_size(dev, init_attr->send_cq);
901 in->ctx.cs_res = MLX5_RES_SCAT_DATA64_CQE;
903 in->ctx.cs_res = MLX5_RES_SCAT_DATA32_CQE;
905 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) {
907 in->ctx.cs_req = MLX5_REQ_SCAT_DATA64_CQE;
909 in->ctx.cs_req = MLX5_REQ_SCAT_DATA32_CQE;
913 if (qp->rq.wqe_cnt) {
914 in->ctx.rq_size_stride = (qp->rq.wqe_shift - 4);
915 in->ctx.rq_size_stride |= ilog2(qp->rq.wqe_cnt) << 3;
918 in->ctx.rq_type_srqn = get_rx_type(qp, init_attr);
921 in->ctx.sq_crq_size |= cpu_to_be16(ilog2(qp->sq.wqe_cnt) << 11);
923 in->ctx.sq_crq_size |= cpu_to_be16(0x8000);
925 /* Set default resources */
926 switch (init_attr->qp_type) {
928 in->ctx.cqn_recv = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn);
929 in->ctx.cqn_send = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn);
930 in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn);
931 in->ctx.xrcd = cpu_to_be32(to_mxrcd(init_attr->xrcd)->xrcdn);
934 in->ctx.cqn_recv = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn);
935 in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x1)->xrcdn);
936 in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn);
939 if (init_attr->srq) {
940 in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x0)->xrcdn);
941 in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(init_attr->srq)->msrq.srqn);
943 in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x1)->xrcdn);
944 in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn);
948 if (init_attr->send_cq)
949 in->ctx.cqn_send = cpu_to_be32(to_mcq(init_attr->send_cq)->mcq.cqn);
951 if (init_attr->recv_cq)
952 in->ctx.cqn_recv = cpu_to_be32(to_mcq(init_attr->recv_cq)->mcq.cqn);
954 in->ctx.db_rec_addr = cpu_to_be64(qp->db.dma);
956 err = mlx5_core_create_qp(&dev->mdev, &qp->mqp, in, inlen);
958 mlx5_ib_dbg(dev, "create qp failed\n");
963 /* Hardware wants QPN written in big-endian order (after
964 * shifting) for send doorbell. Precompute this value to save
965 * a little bit when posting sends.
967 qp->doorbell_qpn = swab32(qp->mqp.qpn << 8);
969 qp->mqp.event = mlx5_ib_qp_event;
974 if (qp->create_type == MLX5_QP_USER)
975 destroy_qp_user(pd, qp);
976 else if (qp->create_type == MLX5_QP_KERNEL)
977 destroy_qp_kernel(dev, qp);
983 static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq)
984 __acquires(&send_cq->lock) __acquires(&recv_cq->lock)
988 if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
989 spin_lock_irq(&send_cq->lock);
990 spin_lock_nested(&recv_cq->lock,
991 SINGLE_DEPTH_NESTING);
992 } else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) {
993 spin_lock_irq(&send_cq->lock);
994 __acquire(&recv_cq->lock);
996 spin_lock_irq(&recv_cq->lock);
997 spin_lock_nested(&send_cq->lock,
998 SINGLE_DEPTH_NESTING);
1001 spin_lock_irq(&send_cq->lock);
1003 } else if (recv_cq) {
1004 spin_lock_irq(&recv_cq->lock);
1008 static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq)
1009 __releases(&send_cq->lock) __releases(&recv_cq->lock)
1013 if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
1014 spin_unlock(&recv_cq->lock);
1015 spin_unlock_irq(&send_cq->lock);
1016 } else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) {
1017 __release(&recv_cq->lock);
1018 spin_unlock_irq(&send_cq->lock);
1020 spin_unlock(&send_cq->lock);
1021 spin_unlock_irq(&recv_cq->lock);
1024 spin_unlock_irq(&send_cq->lock);
1026 } else if (recv_cq) {
1027 spin_unlock_irq(&recv_cq->lock);
1031 static struct mlx5_ib_pd *get_pd(struct mlx5_ib_qp *qp)
1033 return to_mpd(qp->ibqp.pd);
1036 static void get_cqs(struct mlx5_ib_qp *qp,
1037 struct mlx5_ib_cq **send_cq, struct mlx5_ib_cq **recv_cq)
1039 switch (qp->ibqp.qp_type) {
1040 case IB_QPT_XRC_TGT:
1044 case MLX5_IB_QPT_REG_UMR:
1045 case IB_QPT_XRC_INI:
1046 *send_cq = to_mcq(qp->ibqp.send_cq);
1055 case IB_QPT_RAW_IPV6:
1056 case IB_QPT_RAW_ETHERTYPE:
1057 *send_cq = to_mcq(qp->ibqp.send_cq);
1058 *recv_cq = to_mcq(qp->ibqp.recv_cq);
1061 case IB_QPT_RAW_PACKET:
1070 static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
1072 struct mlx5_ib_cq *send_cq, *recv_cq;
1073 struct mlx5_modify_qp_mbox_in *in;
1076 in = kzalloc(sizeof(*in), GFP_KERNEL);
1079 if (qp->state != IB_QPS_RESET)
1080 if (mlx5_core_qp_modify(&dev->mdev, to_mlx5_state(qp->state),
1081 MLX5_QP_STATE_RST, in, sizeof(*in), &qp->mqp))
1082 mlx5_ib_warn(dev, "mlx5_ib: modify QP %06x to RESET failed\n",
1085 get_cqs(qp, &send_cq, &recv_cq);
1087 if (qp->create_type == MLX5_QP_KERNEL) {
1088 mlx5_ib_lock_cqs(send_cq, recv_cq);
1089 __mlx5_ib_cq_clean(recv_cq, qp->mqp.qpn,
1090 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
1091 if (send_cq != recv_cq)
1092 __mlx5_ib_cq_clean(send_cq, qp->mqp.qpn, NULL);
1093 mlx5_ib_unlock_cqs(send_cq, recv_cq);
1096 err = mlx5_core_destroy_qp(&dev->mdev, &qp->mqp);
1098 mlx5_ib_warn(dev, "failed to destroy QP 0x%x\n", qp->mqp.qpn);
1102 if (qp->create_type == MLX5_QP_KERNEL)
1103 destroy_qp_kernel(dev, qp);
1104 else if (qp->create_type == MLX5_QP_USER)
1105 destroy_qp_user(&get_pd(qp)->ibpd, qp);
1108 static const char *ib_qp_type_str(enum ib_qp_type type)
1112 return "IB_QPT_SMI";
1114 return "IB_QPT_GSI";
1121 case IB_QPT_RAW_IPV6:
1122 return "IB_QPT_RAW_IPV6";
1123 case IB_QPT_RAW_ETHERTYPE:
1124 return "IB_QPT_RAW_ETHERTYPE";
1125 case IB_QPT_XRC_INI:
1126 return "IB_QPT_XRC_INI";
1127 case IB_QPT_XRC_TGT:
1128 return "IB_QPT_XRC_TGT";
1129 case IB_QPT_RAW_PACKET:
1130 return "IB_QPT_RAW_PACKET";
1131 case MLX5_IB_QPT_REG_UMR:
1132 return "MLX5_IB_QPT_REG_UMR";
1135 return "Invalid QP type";
1139 struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
1140 struct ib_qp_init_attr *init_attr,
1141 struct ib_udata *udata)
1143 struct mlx5_ib_dev *dev;
1144 struct mlx5_ib_qp *qp;
1149 dev = to_mdev(pd->device);
1151 /* being cautious here */
1152 if (init_attr->qp_type != IB_QPT_XRC_TGT &&
1153 init_attr->qp_type != MLX5_IB_QPT_REG_UMR) {
1154 pr_warn("%s: no PD for transport %s\n", __func__,
1155 ib_qp_type_str(init_attr->qp_type));
1156 return ERR_PTR(-EINVAL);
1158 dev = to_mdev(to_mxrcd(init_attr->xrcd)->ibxrcd.device);
1161 switch (init_attr->qp_type) {
1162 case IB_QPT_XRC_TGT:
1163 case IB_QPT_XRC_INI:
1164 if (!(dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_XRC)) {
1165 mlx5_ib_dbg(dev, "XRC not supported\n");
1166 return ERR_PTR(-ENOSYS);
1168 init_attr->recv_cq = NULL;
1169 if (init_attr->qp_type == IB_QPT_XRC_TGT) {
1170 xrcdn = to_mxrcd(init_attr->xrcd)->xrcdn;
1171 init_attr->send_cq = NULL;
1180 case MLX5_IB_QPT_REG_UMR:
1181 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1183 return ERR_PTR(-ENOMEM);
1185 err = create_qp_common(dev, pd, init_attr, udata, qp);
1187 mlx5_ib_dbg(dev, "create_qp_common failed\n");
1189 return ERR_PTR(err);
1192 if (is_qp0(init_attr->qp_type))
1193 qp->ibqp.qp_num = 0;
1194 else if (is_qp1(init_attr->qp_type))
1195 qp->ibqp.qp_num = 1;
1197 qp->ibqp.qp_num = qp->mqp.qpn;
1199 mlx5_ib_dbg(dev, "ib qpnum 0x%x, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x\n",
1200 qp->ibqp.qp_num, qp->mqp.qpn, to_mcq(init_attr->recv_cq)->mcq.cqn,
1201 to_mcq(init_attr->send_cq)->mcq.cqn);
1207 case IB_QPT_RAW_IPV6:
1208 case IB_QPT_RAW_ETHERTYPE:
1209 case IB_QPT_RAW_PACKET:
1212 mlx5_ib_dbg(dev, "unsupported qp type %d\n",
1213 init_attr->qp_type);
1214 /* Don't support raw QPs */
1215 return ERR_PTR(-EINVAL);
1221 int mlx5_ib_destroy_qp(struct ib_qp *qp)
1223 struct mlx5_ib_dev *dev = to_mdev(qp->device);
1224 struct mlx5_ib_qp *mqp = to_mqp(qp);
1226 destroy_qp_common(dev, mqp);
1233 static __be32 to_mlx5_access_flags(struct mlx5_ib_qp *qp, const struct ib_qp_attr *attr,
1236 u32 hw_access_flags = 0;
1240 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1241 dest_rd_atomic = attr->max_dest_rd_atomic;
1243 dest_rd_atomic = qp->resp_depth;
1245 if (attr_mask & IB_QP_ACCESS_FLAGS)
1246 access_flags = attr->qp_access_flags;
1248 access_flags = qp->atomic_rd_en;
1250 if (!dest_rd_atomic)
1251 access_flags &= IB_ACCESS_REMOTE_WRITE;
1253 if (access_flags & IB_ACCESS_REMOTE_READ)
1254 hw_access_flags |= MLX5_QP_BIT_RRE;
1255 if (access_flags & IB_ACCESS_REMOTE_ATOMIC)
1256 hw_access_flags |= (MLX5_QP_BIT_RAE | MLX5_ATOMIC_MODE_CX);
1257 if (access_flags & IB_ACCESS_REMOTE_WRITE)
1258 hw_access_flags |= MLX5_QP_BIT_RWE;
1260 return cpu_to_be32(hw_access_flags);
1264 MLX5_PATH_FLAG_FL = 1 << 0,
1265 MLX5_PATH_FLAG_FREE_AR = 1 << 1,
1266 MLX5_PATH_FLAG_COUNTER = 1 << 2,
1269 static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate)
1271 if (rate == IB_RATE_PORT_CURRENT) {
1273 } else if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_300_GBPS) {
1276 while (rate != IB_RATE_2_5_GBPS &&
1277 !(1 << (rate + MLX5_STAT_RATE_OFFSET) &
1278 dev->mdev.caps.stat_rate_support))
1282 return rate + MLX5_STAT_RATE_OFFSET;
1285 static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah,
1286 struct mlx5_qp_path *path, u8 port, int attr_mask,
1287 u32 path_flags, const struct ib_qp_attr *attr)
1291 path->fl = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0;
1292 path->free_ar = (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x80 : 0;
1294 if (attr_mask & IB_QP_PKEY_INDEX)
1295 path->pkey_index = attr->pkey_index;
1297 path->grh_mlid = ah->src_path_bits & 0x7f;
1298 path->rlid = cpu_to_be16(ah->dlid);
1300 if (ah->ah_flags & IB_AH_GRH) {
1301 path->grh_mlid |= 1 << 7;
1302 path->mgid_index = ah->grh.sgid_index;
1303 path->hop_limit = ah->grh.hop_limit;
1304 path->tclass_flowlabel =
1305 cpu_to_be32((ah->grh.traffic_class << 20) |
1306 (ah->grh.flow_label));
1307 memcpy(path->rgid, ah->grh.dgid.raw, 16);
1310 err = ib_rate_to_mlx5(dev, ah->static_rate);
1313 path->static_rate = err;
1316 if (ah->ah_flags & IB_AH_GRH) {
1317 if (ah->grh.sgid_index >= dev->mdev.caps.port[port - 1].gid_table_len) {
1318 pr_err(KERN_ERR "sgid_index (%u) too large. max is %d\n",
1319 ah->grh.sgid_index, dev->mdev.caps.port[port - 1].gid_table_len);
1323 path->grh_mlid |= 1 << 7;
1324 path->mgid_index = ah->grh.sgid_index;
1325 path->hop_limit = ah->grh.hop_limit;
1326 path->tclass_flowlabel =
1327 cpu_to_be32((ah->grh.traffic_class << 20) |
1328 (ah->grh.flow_label));
1329 memcpy(path->rgid, ah->grh.dgid.raw, 16);
1332 if (attr_mask & IB_QP_TIMEOUT)
1333 path->ackto_lt = attr->timeout << 3;
1335 path->sl = ah->sl & 0xf;
1340 static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_QP_ST_MAX] = {
1341 [MLX5_QP_STATE_INIT] = {
1342 [MLX5_QP_STATE_INIT] = {
1343 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RRE |
1344 MLX5_QP_OPTPAR_RAE |
1345 MLX5_QP_OPTPAR_RWE |
1346 MLX5_QP_OPTPAR_PKEY_INDEX |
1347 MLX5_QP_OPTPAR_PRI_PORT,
1348 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE |
1349 MLX5_QP_OPTPAR_PKEY_INDEX |
1350 MLX5_QP_OPTPAR_PRI_PORT,
1351 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX |
1352 MLX5_QP_OPTPAR_Q_KEY |
1353 MLX5_QP_OPTPAR_PRI_PORT,
1355 [MLX5_QP_STATE_RTR] = {
1356 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
1357 MLX5_QP_OPTPAR_RRE |
1358 MLX5_QP_OPTPAR_RAE |
1359 MLX5_QP_OPTPAR_RWE |
1360 MLX5_QP_OPTPAR_PKEY_INDEX,
1361 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
1362 MLX5_QP_OPTPAR_RWE |
1363 MLX5_QP_OPTPAR_PKEY_INDEX,
1364 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX |
1365 MLX5_QP_OPTPAR_Q_KEY,
1366 [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_PKEY_INDEX |
1367 MLX5_QP_OPTPAR_Q_KEY,
1368 [MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
1369 MLX5_QP_OPTPAR_RRE |
1370 MLX5_QP_OPTPAR_RAE |
1371 MLX5_QP_OPTPAR_RWE |
1372 MLX5_QP_OPTPAR_PKEY_INDEX,
1375 [MLX5_QP_STATE_RTR] = {
1376 [MLX5_QP_STATE_RTS] = {
1377 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
1378 MLX5_QP_OPTPAR_RRE |
1379 MLX5_QP_OPTPAR_RAE |
1380 MLX5_QP_OPTPAR_RWE |
1381 MLX5_QP_OPTPAR_PM_STATE |
1382 MLX5_QP_OPTPAR_RNR_TIMEOUT,
1383 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
1384 MLX5_QP_OPTPAR_RWE |
1385 MLX5_QP_OPTPAR_PM_STATE,
1386 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY,
1389 [MLX5_QP_STATE_RTS] = {
1390 [MLX5_QP_STATE_RTS] = {
1391 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RRE |
1392 MLX5_QP_OPTPAR_RAE |
1393 MLX5_QP_OPTPAR_RWE |
1394 MLX5_QP_OPTPAR_RNR_TIMEOUT |
1395 MLX5_QP_OPTPAR_PM_STATE |
1396 MLX5_QP_OPTPAR_ALT_ADDR_PATH,
1397 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE |
1398 MLX5_QP_OPTPAR_PM_STATE |
1399 MLX5_QP_OPTPAR_ALT_ADDR_PATH,
1400 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY |
1401 MLX5_QP_OPTPAR_SRQN |
1402 MLX5_QP_OPTPAR_CQN_RCV,
1405 [MLX5_QP_STATE_SQER] = {
1406 [MLX5_QP_STATE_RTS] = {
1407 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY,
1408 [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_Q_KEY,
1409 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE,
1410 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RNR_TIMEOUT |
1411 MLX5_QP_OPTPAR_RWE |
1412 MLX5_QP_OPTPAR_RAE |
1418 static int ib_nr_to_mlx5_nr(int ib_mask)
1423 case IB_QP_CUR_STATE:
1425 case IB_QP_EN_SQD_ASYNC_NOTIFY:
1427 case IB_QP_ACCESS_FLAGS:
1428 return MLX5_QP_OPTPAR_RWE | MLX5_QP_OPTPAR_RRE |
1430 case IB_QP_PKEY_INDEX:
1431 return MLX5_QP_OPTPAR_PKEY_INDEX;
1433 return MLX5_QP_OPTPAR_PRI_PORT;
1435 return MLX5_QP_OPTPAR_Q_KEY;
1437 return MLX5_QP_OPTPAR_PRIMARY_ADDR_PATH |
1438 MLX5_QP_OPTPAR_PRI_PORT;
1439 case IB_QP_PATH_MTU:
1442 return MLX5_QP_OPTPAR_ACK_TIMEOUT;
1443 case IB_QP_RETRY_CNT:
1444 return MLX5_QP_OPTPAR_RETRY_COUNT;
1445 case IB_QP_RNR_RETRY:
1446 return MLX5_QP_OPTPAR_RNR_RETRY;
1449 case IB_QP_MAX_QP_RD_ATOMIC:
1450 return MLX5_QP_OPTPAR_SRA_MAX;
1451 case IB_QP_ALT_PATH:
1452 return MLX5_QP_OPTPAR_ALT_ADDR_PATH;
1453 case IB_QP_MIN_RNR_TIMER:
1454 return MLX5_QP_OPTPAR_RNR_TIMEOUT;
1457 case IB_QP_MAX_DEST_RD_ATOMIC:
1458 return MLX5_QP_OPTPAR_RRA_MAX | MLX5_QP_OPTPAR_RWE |
1459 MLX5_QP_OPTPAR_RRE | MLX5_QP_OPTPAR_RAE;
1460 case IB_QP_PATH_MIG_STATE:
1461 return MLX5_QP_OPTPAR_PM_STATE;
1464 case IB_QP_DEST_QPN:
1470 static int ib_mask_to_mlx5_opt(int ib_mask)
1475 for (i = 0; i < 8 * sizeof(int); i++) {
1476 if ((1 << i) & ib_mask)
1477 result |= ib_nr_to_mlx5_nr(1 << i);
1483 static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
1484 const struct ib_qp_attr *attr, int attr_mask,
1485 enum ib_qp_state cur_state, enum ib_qp_state new_state)
1487 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
1488 struct mlx5_ib_qp *qp = to_mqp(ibqp);
1489 struct mlx5_ib_cq *send_cq, *recv_cq;
1490 struct mlx5_qp_context *context;
1491 struct mlx5_modify_qp_mbox_in *in;
1492 struct mlx5_ib_pd *pd;
1493 enum mlx5_qp_state mlx5_cur, mlx5_new;
1494 enum mlx5_qp_optpar optpar;
1499 in = kzalloc(sizeof(*in), GFP_KERNEL);
1504 err = to_mlx5_st(ibqp->qp_type);
1508 context->flags = cpu_to_be32(err << 16);
1510 if (!(attr_mask & IB_QP_PATH_MIG_STATE)) {
1511 context->flags |= cpu_to_be32(MLX5_QP_PM_MIGRATED << 11);
1513 switch (attr->path_mig_state) {
1514 case IB_MIG_MIGRATED:
1515 context->flags |= cpu_to_be32(MLX5_QP_PM_MIGRATED << 11);
1518 context->flags |= cpu_to_be32(MLX5_QP_PM_REARM << 11);
1521 context->flags |= cpu_to_be32(MLX5_QP_PM_ARMED << 11);
1526 if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI) {
1527 context->mtu_msgmax = (IB_MTU_256 << 5) | 8;
1528 } else if (ibqp->qp_type == IB_QPT_UD ||
1529 ibqp->qp_type == MLX5_IB_QPT_REG_UMR) {
1530 context->mtu_msgmax = (IB_MTU_4096 << 5) | 12;
1531 } else if (attr_mask & IB_QP_PATH_MTU) {
1532 if (attr->path_mtu < IB_MTU_256 ||
1533 attr->path_mtu > IB_MTU_4096) {
1534 mlx5_ib_warn(dev, "invalid mtu %d\n", attr->path_mtu);
1538 context->mtu_msgmax = (attr->path_mtu << 5) | dev->mdev.caps.log_max_msg;
1541 if (attr_mask & IB_QP_DEST_QPN)
1542 context->log_pg_sz_remote_qpn = cpu_to_be32(attr->dest_qp_num);
1544 if (attr_mask & IB_QP_PKEY_INDEX)
1545 context->pri_path.pkey_index = attr->pkey_index;
1547 /* todo implement counter_index functionality */
1549 if (is_sqp(ibqp->qp_type))
1550 context->pri_path.port = qp->port;
1552 if (attr_mask & IB_QP_PORT)
1553 context->pri_path.port = attr->port_num;
1555 if (attr_mask & IB_QP_AV) {
1556 err = mlx5_set_path(dev, &attr->ah_attr, &context->pri_path,
1557 attr_mask & IB_QP_PORT ? attr->port_num : qp->port,
1558 attr_mask, 0, attr);
1563 if (attr_mask & IB_QP_TIMEOUT)
1564 context->pri_path.ackto_lt |= attr->timeout << 3;
1566 if (attr_mask & IB_QP_ALT_PATH) {
1567 err = mlx5_set_path(dev, &attr->alt_ah_attr, &context->alt_path,
1568 attr->alt_port_num, attr_mask, 0, attr);
1574 get_cqs(qp, &send_cq, &recv_cq);
1576 context->flags_pd = cpu_to_be32(pd ? pd->pdn : to_mpd(dev->devr.p0)->pdn);
1577 context->cqn_send = send_cq ? cpu_to_be32(send_cq->mcq.cqn) : 0;
1578 context->cqn_recv = recv_cq ? cpu_to_be32(recv_cq->mcq.cqn) : 0;
1579 context->params1 = cpu_to_be32(MLX5_IB_ACK_REQ_FREQ << 28);
1581 if (attr_mask & IB_QP_RNR_RETRY)
1582 context->params1 |= cpu_to_be32(attr->rnr_retry << 13);
1584 if (attr_mask & IB_QP_RETRY_CNT)
1585 context->params1 |= cpu_to_be32(attr->retry_cnt << 16);
1587 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1588 if (attr->max_rd_atomic)
1590 cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21);
1593 if (attr_mask & IB_QP_SQ_PSN)
1594 context->next_send_psn = cpu_to_be32(attr->sq_psn);
1596 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1597 if (attr->max_dest_rd_atomic)
1599 cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21);
1602 if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC))
1603 context->params2 |= to_mlx5_access_flags(qp, attr, attr_mask);
1605 if (attr_mask & IB_QP_MIN_RNR_TIMER)
1606 context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24);
1608 if (attr_mask & IB_QP_RQ_PSN)
1609 context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn);
1611 if (attr_mask & IB_QP_QKEY)
1612 context->qkey = cpu_to_be32(attr->qkey);
1614 if (qp->rq.wqe_cnt && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
1615 context->db_rec_addr = cpu_to_be64(qp->db.dma);
1617 if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD &&
1618 attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify)
1623 if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
1624 context->sq_crq_size |= cpu_to_be16(1 << 4);
1627 mlx5_cur = to_mlx5_state(cur_state);
1628 mlx5_new = to_mlx5_state(new_state);
1629 mlx5_st = to_mlx5_st(ibqp->qp_type);
1633 optpar = ib_mask_to_mlx5_opt(attr_mask);
1634 optpar &= opt_mask[mlx5_cur][mlx5_new][mlx5_st];
1635 in->optparam = cpu_to_be32(optpar);
1636 err = mlx5_core_qp_modify(&dev->mdev, to_mlx5_state(cur_state),
1637 to_mlx5_state(new_state), in, sqd_event,
1642 qp->state = new_state;
1644 if (attr_mask & IB_QP_ACCESS_FLAGS)
1645 qp->atomic_rd_en = attr->qp_access_flags;
1646 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1647 qp->resp_depth = attr->max_dest_rd_atomic;
1648 if (attr_mask & IB_QP_PORT)
1649 qp->port = attr->port_num;
1650 if (attr_mask & IB_QP_ALT_PATH)
1651 qp->alt_port = attr->alt_port_num;
1654 * If we moved a kernel QP to RESET, clean up all old CQ
1655 * entries and reinitialize the QP.
1657 if (new_state == IB_QPS_RESET && !ibqp->uobject) {
1658 mlx5_ib_cq_clean(recv_cq, qp->mqp.qpn,
1659 ibqp->srq ? to_msrq(ibqp->srq) : NULL);
1660 if (send_cq != recv_cq)
1661 mlx5_ib_cq_clean(send_cq, qp->mqp.qpn, NULL);
1667 qp->sq.cur_post = 0;
1668 qp->sq.last_poll = 0;
1669 qp->db.db[MLX5_RCV_DBR] = 0;
1670 qp->db.db[MLX5_SND_DBR] = 0;
1678 int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1679 int attr_mask, struct ib_udata *udata)
1681 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
1682 struct mlx5_ib_qp *qp = to_mqp(ibqp);
1683 enum ib_qp_state cur_state, new_state;
1687 mutex_lock(&qp->mutex);
1689 cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
1690 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
1692 if (ibqp->qp_type != MLX5_IB_QPT_REG_UMR &&
1693 !ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask,
1694 IB_LINK_LAYER_UNSPECIFIED))
1697 if ((attr_mask & IB_QP_PORT) &&
1698 (attr->port_num == 0 || attr->port_num > dev->mdev.caps.num_ports))
1701 if (attr_mask & IB_QP_PKEY_INDEX) {
1702 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
1703 if (attr->pkey_index >= dev->mdev.caps.port[port - 1].pkey_table_len)
1707 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
1708 attr->max_rd_atomic > dev->mdev.caps.max_ra_res_qp)
1711 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
1712 attr->max_dest_rd_atomic > dev->mdev.caps.max_ra_req_qp)
1715 if (cur_state == new_state && cur_state == IB_QPS_RESET) {
1720 err = __mlx5_ib_modify_qp(ibqp, attr, attr_mask, cur_state, new_state);
1723 mutex_unlock(&qp->mutex);
1727 static int mlx5_wq_overflow(struct mlx5_ib_wq *wq, int nreq, struct ib_cq *ib_cq)
1729 struct mlx5_ib_cq *cq;
1732 cur = wq->head - wq->tail;
1733 if (likely(cur + nreq < wq->max_post))
1737 spin_lock(&cq->lock);
1738 cur = wq->head - wq->tail;
1739 spin_unlock(&cq->lock);
1741 return cur + nreq >= wq->max_post;
1744 static __always_inline void set_raddr_seg(struct mlx5_wqe_raddr_seg *rseg,
1745 u64 remote_addr, u32 rkey)
1747 rseg->raddr = cpu_to_be64(remote_addr);
1748 rseg->rkey = cpu_to_be32(rkey);
1752 static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg,
1753 struct ib_send_wr *wr)
1755 memcpy(&dseg->av, &to_mah(wr->wr.ud.ah)->av, sizeof(struct mlx5_av));
1756 dseg->av.dqp_dct = cpu_to_be32(wr->wr.ud.remote_qpn | MLX5_EXTENDED_UD_AV);
1757 dseg->av.key.qkey.qkey = cpu_to_be32(wr->wr.ud.remote_qkey);
1760 static void set_data_ptr_seg(struct mlx5_wqe_data_seg *dseg, struct ib_sge *sg)
1762 dseg->byte_count = cpu_to_be32(sg->length);
1763 dseg->lkey = cpu_to_be32(sg->lkey);
1764 dseg->addr = cpu_to_be64(sg->addr);
1767 static __be16 get_klm_octo(int npages)
1769 return cpu_to_be16(ALIGN(npages, 8) / 2);
1772 static __be64 frwr_mkey_mask(void)
1776 result = MLX5_MKEY_MASK_LEN |
1777 MLX5_MKEY_MASK_PAGE_SIZE |
1778 MLX5_MKEY_MASK_START_ADDR |
1779 MLX5_MKEY_MASK_EN_RINVAL |
1780 MLX5_MKEY_MASK_KEY |
1786 MLX5_MKEY_MASK_SMALL_FENCE |
1787 MLX5_MKEY_MASK_FREE;
1789 return cpu_to_be64(result);
1792 static __be64 sig_mkey_mask(void)
1796 result = MLX5_MKEY_MASK_LEN |
1797 MLX5_MKEY_MASK_PAGE_SIZE |
1798 MLX5_MKEY_MASK_START_ADDR |
1799 MLX5_MKEY_MASK_EN_SIGERR |
1800 MLX5_MKEY_MASK_EN_RINVAL |
1801 MLX5_MKEY_MASK_KEY |
1806 MLX5_MKEY_MASK_SMALL_FENCE |
1807 MLX5_MKEY_MASK_FREE |
1808 MLX5_MKEY_MASK_BSF_EN;
1810 return cpu_to_be64(result);
1813 static void set_frwr_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
1814 struct ib_send_wr *wr, int li)
1816 memset(umr, 0, sizeof(*umr));
1819 umr->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
1820 umr->flags = 1 << 7;
1824 umr->flags = (1 << 5); /* fail if not free */
1825 umr->klm_octowords = get_klm_octo(wr->wr.fast_reg.page_list_len);
1826 umr->mkey_mask = frwr_mkey_mask();
1829 static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
1830 struct ib_send_wr *wr)
1832 struct umr_wr *umrwr = (struct umr_wr *)&wr->wr.fast_reg;
1835 memset(umr, 0, sizeof(*umr));
1837 if (!(wr->send_flags & MLX5_IB_SEND_UMR_UNREG)) {
1838 umr->flags = 1 << 5; /* fail if not free */
1839 umr->klm_octowords = get_klm_octo(umrwr->npages);
1840 mask = MLX5_MKEY_MASK_LEN |
1841 MLX5_MKEY_MASK_PAGE_SIZE |
1842 MLX5_MKEY_MASK_START_ADDR |
1846 MLX5_MKEY_MASK_KEY |
1850 MLX5_MKEY_MASK_FREE;
1851 umr->mkey_mask = cpu_to_be64(mask);
1853 umr->flags = 2 << 5; /* fail if free */
1854 mask = MLX5_MKEY_MASK_FREE;
1855 umr->mkey_mask = cpu_to_be64(mask);
1859 umr->flags |= (1 << 7); /* inline */
1862 static u8 get_umr_flags(int acc)
1864 return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC : 0) |
1865 (acc & IB_ACCESS_REMOTE_WRITE ? MLX5_PERM_REMOTE_WRITE : 0) |
1866 (acc & IB_ACCESS_REMOTE_READ ? MLX5_PERM_REMOTE_READ : 0) |
1867 (acc & IB_ACCESS_LOCAL_WRITE ? MLX5_PERM_LOCAL_WRITE : 0) |
1868 MLX5_PERM_LOCAL_READ | MLX5_PERM_UMR_EN;
1871 static void set_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr,
1874 memset(seg, 0, sizeof(*seg));
1876 seg->status = 1 << 6;
1880 seg->flags = get_umr_flags(wr->wr.fast_reg.access_flags) |
1881 MLX5_ACCESS_MODE_MTT;
1882 *writ = seg->flags & (MLX5_PERM_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE);
1883 seg->qpn_mkey7_0 = cpu_to_be32((wr->wr.fast_reg.rkey & 0xff) | 0xffffff00);
1884 seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL);
1885 seg->start_addr = cpu_to_be64(wr->wr.fast_reg.iova_start);
1886 seg->len = cpu_to_be64(wr->wr.fast_reg.length);
1887 seg->xlt_oct_size = cpu_to_be32((wr->wr.fast_reg.page_list_len + 1) / 2);
1888 seg->log2_page_size = wr->wr.fast_reg.page_shift;
1891 static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr)
1893 memset(seg, 0, sizeof(*seg));
1894 if (wr->send_flags & MLX5_IB_SEND_UMR_UNREG) {
1895 seg->status = 1 << 6;
1899 seg->flags = convert_access(wr->wr.fast_reg.access_flags);
1900 seg->flags_pd = cpu_to_be32(to_mpd((struct ib_pd *)wr->wr.fast_reg.page_list)->pdn);
1901 seg->start_addr = cpu_to_be64(wr->wr.fast_reg.iova_start);
1902 seg->len = cpu_to_be64(wr->wr.fast_reg.length);
1903 seg->log2_page_size = wr->wr.fast_reg.page_shift;
1904 seg->qpn_mkey7_0 = cpu_to_be32(0xffffff00 |
1905 mlx5_mkey_variant(wr->wr.fast_reg.rkey));
1908 static void set_frwr_pages(struct mlx5_wqe_data_seg *dseg,
1909 struct ib_send_wr *wr,
1910 struct mlx5_core_dev *mdev,
1911 struct mlx5_ib_pd *pd,
1914 struct mlx5_ib_fast_reg_page_list *mfrpl = to_mfrpl(wr->wr.fast_reg.page_list);
1915 u64 *page_list = wr->wr.fast_reg.page_list->page_list;
1916 u64 perm = MLX5_EN_RD | (writ ? MLX5_EN_WR : 0);
1919 for (i = 0; i < wr->wr.fast_reg.page_list_len; i++)
1920 mfrpl->mapped_page_list[i] = cpu_to_be64(page_list[i] | perm);
1921 dseg->addr = cpu_to_be64(mfrpl->map);
1922 dseg->byte_count = cpu_to_be32(ALIGN(sizeof(u64) * wr->wr.fast_reg.page_list_len, 64));
1923 dseg->lkey = cpu_to_be32(pd->pa_lkey);
1926 static __be32 send_ieth(struct ib_send_wr *wr)
1928 switch (wr->opcode) {
1929 case IB_WR_SEND_WITH_IMM:
1930 case IB_WR_RDMA_WRITE_WITH_IMM:
1931 return wr->ex.imm_data;
1933 case IB_WR_SEND_WITH_INV:
1934 return cpu_to_be32(wr->ex.invalidate_rkey);
1941 static u8 calc_sig(void *wqe, int size)
1947 for (i = 0; i < size; i++)
1953 static u8 wq_sig(void *wqe)
1955 return calc_sig(wqe, (*((u8 *)wqe + 8) & 0x3f) << 4);
1958 static int set_data_inl_seg(struct mlx5_ib_qp *qp, struct ib_send_wr *wr,
1961 struct mlx5_wqe_inline_seg *seg;
1962 void *qend = qp->sq.qend;
1970 wqe += sizeof(*seg);
1971 for (i = 0; i < wr->num_sge; i++) {
1972 addr = (void *)(unsigned long)(wr->sg_list[i].addr);
1973 len = wr->sg_list[i].length;
1976 if (unlikely(inl > qp->max_inline_data))
1979 if (unlikely(wqe + len > qend)) {
1981 memcpy(wqe, addr, copy);
1984 wqe = mlx5_get_send_wqe(qp, 0);
1986 memcpy(wqe, addr, len);
1990 seg->byte_count = cpu_to_be32(inl | MLX5_INLINE_SEG);
1992 *sz = ALIGN(inl + sizeof(seg->byte_count), 16) / 16;
1997 static u16 prot_field_size(enum ib_signature_type type)
2000 case IB_SIG_TYPE_T10_DIF:
2001 return MLX5_DIF_SIZE;
2007 static u8 bs_selector(int block_size)
2009 switch (block_size) {
2010 case 512: return 0x1;
2011 case 520: return 0x2;
2012 case 4096: return 0x3;
2013 case 4160: return 0x4;
2014 case 1073741824: return 0x5;
2019 static int format_selector(struct ib_sig_attrs *attr,
2020 struct ib_sig_domain *domain,
2024 #define FORMAT_DIF_NONE 0
2025 #define FORMAT_DIF_CRC_INC 8
2026 #define FORMAT_DIF_CRC_NO_INC 12
2027 #define FORMAT_DIF_CSUM_INC 13
2028 #define FORMAT_DIF_CSUM_NO_INC 14
2030 switch (domain->sig.dif.type) {
2031 case IB_T10DIF_NONE:
2033 *selector = FORMAT_DIF_NONE;
2035 case IB_T10DIF_TYPE1: /* Fall through */
2036 case IB_T10DIF_TYPE2:
2037 switch (domain->sig.dif.bg_type) {
2039 *selector = FORMAT_DIF_CRC_INC;
2041 case IB_T10DIF_CSUM:
2042 *selector = FORMAT_DIF_CSUM_INC;
2048 case IB_T10DIF_TYPE3:
2049 switch (domain->sig.dif.bg_type) {
2051 *selector = domain->sig.dif.type3_inc_reftag ?
2052 FORMAT_DIF_CRC_INC :
2053 FORMAT_DIF_CRC_NO_INC;
2055 case IB_T10DIF_CSUM:
2056 *selector = domain->sig.dif.type3_inc_reftag ?
2057 FORMAT_DIF_CSUM_INC :
2058 FORMAT_DIF_CSUM_NO_INC;
2071 static int mlx5_set_bsf(struct ib_mr *sig_mr,
2072 struct ib_sig_attrs *sig_attrs,
2073 struct mlx5_bsf *bsf, u32 data_size)
2075 struct mlx5_core_sig_ctx *msig = to_mmr(sig_mr)->sig;
2076 struct mlx5_bsf_basic *basic = &bsf->basic;
2077 struct ib_sig_domain *mem = &sig_attrs->mem;
2078 struct ib_sig_domain *wire = &sig_attrs->wire;
2081 switch (sig_attrs->mem.sig_type) {
2082 case IB_SIG_TYPE_T10_DIF:
2083 if (sig_attrs->wire.sig_type != IB_SIG_TYPE_T10_DIF)
2086 /* Input domain check byte mask */
2087 basic->check_byte_mask = sig_attrs->check_mask;
2088 if (mem->sig.dif.pi_interval == wire->sig.dif.pi_interval &&
2089 mem->sig.dif.type == wire->sig.dif.type) {
2090 /* Same block structure */
2091 basic->bsf_size_sbs = 1 << 4;
2092 if (mem->sig.dif.bg_type == wire->sig.dif.bg_type)
2093 basic->wire.copy_byte_mask = 0xff;
2095 basic->wire.copy_byte_mask = 0x3f;
2097 basic->wire.bs_selector = bs_selector(wire->sig.dif.pi_interval);
2099 basic->mem.bs_selector = bs_selector(mem->sig.dif.pi_interval);
2100 basic->raw_data_size = cpu_to_be32(data_size);
2102 ret = format_selector(sig_attrs, mem, &selector);
2105 basic->m_bfs_psv = cpu_to_be32(selector << 24 |
2106 msig->psv_memory.psv_idx);
2108 ret = format_selector(sig_attrs, wire, &selector);
2111 basic->w_bfs_psv = cpu_to_be32(selector << 24 |
2112 msig->psv_wire.psv_idx);
2122 static int set_sig_data_segment(struct ib_send_wr *wr, struct mlx5_ib_qp *qp,
2123 void **seg, int *size)
2125 struct ib_sig_attrs *sig_attrs = wr->wr.sig_handover.sig_attrs;
2126 struct ib_mr *sig_mr = wr->wr.sig_handover.sig_mr;
2127 struct mlx5_bsf *bsf;
2128 u32 data_len = wr->sg_list->length;
2129 u32 data_key = wr->sg_list->lkey;
2130 u64 data_va = wr->sg_list->addr;
2134 if (!wr->wr.sig_handover.prot) {
2136 * Source domain doesn't contain signature information
2137 * So need construct:
2138 * ------------------
2140 * ------------------
2142 * ------------------
2144 struct mlx5_klm *data_klm = *seg;
2146 data_klm->bcount = cpu_to_be32(data_len);
2147 data_klm->key = cpu_to_be32(data_key);
2148 data_klm->va = cpu_to_be64(data_va);
2149 wqe_size = ALIGN(sizeof(*data_klm), 64);
2152 * Source domain contains signature information
2153 * So need construct a strided block format:
2154 * ---------------------------
2155 * | stride_block_ctrl |
2156 * ---------------------------
2158 * ---------------------------
2160 * ---------------------------
2162 * ---------------------------
2164 struct mlx5_stride_block_ctrl_seg *sblock_ctrl;
2165 struct mlx5_stride_block_entry *data_sentry;
2166 struct mlx5_stride_block_entry *prot_sentry;
2167 u32 prot_key = wr->wr.sig_handover.prot->lkey;
2168 u64 prot_va = wr->wr.sig_handover.prot->addr;
2169 u16 block_size = sig_attrs->mem.sig.dif.pi_interval;
2173 data_sentry = (void *)sblock_ctrl + sizeof(*sblock_ctrl);
2174 prot_sentry = (void *)data_sentry + sizeof(*data_sentry);
2176 prot_size = prot_field_size(sig_attrs->mem.sig_type);
2178 pr_err("Bad block size given: %u\n", block_size);
2181 sblock_ctrl->bcount_per_cycle = cpu_to_be32(block_size +
2183 sblock_ctrl->op = cpu_to_be32(MLX5_STRIDE_BLOCK_OP);
2184 sblock_ctrl->repeat_count = cpu_to_be32(data_len / block_size);
2185 sblock_ctrl->num_entries = cpu_to_be16(2);
2187 data_sentry->bcount = cpu_to_be16(block_size);
2188 data_sentry->key = cpu_to_be32(data_key);
2189 data_sentry->va = cpu_to_be64(data_va);
2190 prot_sentry->bcount = cpu_to_be16(prot_size);
2191 prot_sentry->key = cpu_to_be32(prot_key);
2193 if (prot_key == data_key && prot_va == data_va) {
2195 * The data and protection are interleaved
2196 * in a single memory region
2198 prot_sentry->va = cpu_to_be64(data_va + block_size);
2199 prot_sentry->stride = cpu_to_be16(block_size + prot_size);
2200 data_sentry->stride = prot_sentry->stride;
2202 /* The data and protection are two different buffers */
2203 prot_sentry->va = cpu_to_be64(prot_va);
2204 data_sentry->stride = cpu_to_be16(block_size);
2205 prot_sentry->stride = cpu_to_be16(prot_size);
2207 wqe_size = ALIGN(sizeof(*sblock_ctrl) + sizeof(*data_sentry) +
2208 sizeof(*prot_sentry), 64);
2212 *size += wqe_size / 16;
2213 if (unlikely((*seg == qp->sq.qend)))
2214 *seg = mlx5_get_send_wqe(qp, 0);
2217 ret = mlx5_set_bsf(sig_mr, sig_attrs, bsf, data_len);
2221 *seg += sizeof(*bsf);
2222 *size += sizeof(*bsf) / 16;
2223 if (unlikely((*seg == qp->sq.qend)))
2224 *seg = mlx5_get_send_wqe(qp, 0);
2229 static void set_sig_mkey_segment(struct mlx5_mkey_seg *seg,
2230 struct ib_send_wr *wr, u32 nelements,
2231 u32 length, u32 pdn)
2233 struct ib_mr *sig_mr = wr->wr.sig_handover.sig_mr;
2234 u32 sig_key = sig_mr->rkey;
2235 u8 sigerr = to_mmr(sig_mr)->sig->sigerr_count & 1;
2237 memset(seg, 0, sizeof(*seg));
2239 seg->flags = get_umr_flags(wr->wr.sig_handover.access_flags) |
2240 MLX5_ACCESS_MODE_KLM;
2241 seg->qpn_mkey7_0 = cpu_to_be32((sig_key & 0xff) | 0xffffff00);
2242 seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL | sigerr << 26 |
2243 MLX5_MKEY_BSF_EN | pdn);
2244 seg->len = cpu_to_be64(length);
2245 seg->xlt_oct_size = cpu_to_be32(be16_to_cpu(get_klm_octo(nelements)));
2246 seg->bsfs_octo_size = cpu_to_be32(MLX5_MKEY_BSF_OCTO_SIZE);
2249 static void set_sig_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
2250 struct ib_send_wr *wr, u32 nelements)
2252 memset(umr, 0, sizeof(*umr));
2254 umr->flags = MLX5_FLAGS_INLINE | MLX5_FLAGS_CHECK_FREE;
2255 umr->klm_octowords = get_klm_octo(nelements);
2256 umr->bsf_octowords = cpu_to_be16(MLX5_MKEY_BSF_OCTO_SIZE);
2257 umr->mkey_mask = sig_mkey_mask();
2261 static int set_sig_umr_wr(struct ib_send_wr *wr, struct mlx5_ib_qp *qp,
2262 void **seg, int *size)
2264 struct mlx5_ib_mr *sig_mr = to_mmr(wr->wr.sig_handover.sig_mr);
2265 u32 pdn = get_pd(qp)->pdn;
2267 int region_len, ret;
2269 if (unlikely(wr->num_sge != 1) ||
2270 unlikely(wr->wr.sig_handover.access_flags &
2271 IB_ACCESS_REMOTE_ATOMIC) ||
2272 unlikely(!sig_mr->sig) || unlikely(!qp->signature_en) ||
2273 unlikely(!sig_mr->sig->sig_status_checked))
2276 /* length of the protected region, data + protection */
2277 region_len = wr->sg_list->length;
2278 if (wr->wr.sig_handover.prot)
2279 region_len += wr->wr.sig_handover.prot->length;
2282 * KLM octoword size - if protection was provided
2283 * then we use strided block format (3 octowords),
2284 * else we use single KLM (1 octoword)
2286 klm_oct_size = wr->wr.sig_handover.prot ? 3 : 1;
2288 set_sig_umr_segment(*seg, wr, klm_oct_size);
2289 *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
2290 *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
2291 if (unlikely((*seg == qp->sq.qend)))
2292 *seg = mlx5_get_send_wqe(qp, 0);
2294 set_sig_mkey_segment(*seg, wr, klm_oct_size, region_len, pdn);
2295 *seg += sizeof(struct mlx5_mkey_seg);
2296 *size += sizeof(struct mlx5_mkey_seg) / 16;
2297 if (unlikely((*seg == qp->sq.qend)))
2298 *seg = mlx5_get_send_wqe(qp, 0);
2300 ret = set_sig_data_segment(wr, qp, seg, size);
2304 sig_mr->sig->sig_status_checked = false;
2308 static int set_psv_wr(struct ib_sig_domain *domain,
2309 u32 psv_idx, void **seg, int *size)
2311 struct mlx5_seg_set_psv *psv_seg = *seg;
2313 memset(psv_seg, 0, sizeof(*psv_seg));
2314 psv_seg->psv_num = cpu_to_be32(psv_idx);
2315 switch (domain->sig_type) {
2316 case IB_SIG_TYPE_T10_DIF:
2317 psv_seg->transient_sig = cpu_to_be32(domain->sig.dif.bg << 16 |
2318 domain->sig.dif.app_tag);
2319 psv_seg->ref_tag = cpu_to_be32(domain->sig.dif.ref_tag);
2321 *seg += sizeof(*psv_seg);
2322 *size += sizeof(*psv_seg) / 16;
2326 pr_err("Bad signature type given.\n");
2333 static int set_frwr_li_wr(void **seg, struct ib_send_wr *wr, int *size,
2334 struct mlx5_core_dev *mdev, struct mlx5_ib_pd *pd, struct mlx5_ib_qp *qp)
2339 li = wr->opcode == IB_WR_LOCAL_INV ? 1 : 0;
2340 if (unlikely(wr->send_flags & IB_SEND_INLINE))
2343 set_frwr_umr_segment(*seg, wr, li);
2344 *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
2345 *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
2346 if (unlikely((*seg == qp->sq.qend)))
2347 *seg = mlx5_get_send_wqe(qp, 0);
2348 set_mkey_segment(*seg, wr, li, &writ);
2349 *seg += sizeof(struct mlx5_mkey_seg);
2350 *size += sizeof(struct mlx5_mkey_seg) / 16;
2351 if (unlikely((*seg == qp->sq.qend)))
2352 *seg = mlx5_get_send_wqe(qp, 0);
2354 if (unlikely(wr->wr.fast_reg.page_list_len >
2355 wr->wr.fast_reg.page_list->max_page_list_len))
2358 set_frwr_pages(*seg, wr, mdev, pd, writ);
2359 *seg += sizeof(struct mlx5_wqe_data_seg);
2360 *size += (sizeof(struct mlx5_wqe_data_seg) / 16);
2365 static void dump_wqe(struct mlx5_ib_qp *qp, int idx, int size_16)
2371 pr_debug("dump wqe at %p\n", mlx5_get_send_wqe(qp, tidx));
2372 for (i = 0, j = 0; i < size_16 * 4; i += 4, j += 4) {
2373 if ((i & 0xf) == 0) {
2374 void *buf = mlx5_get_send_wqe(qp, tidx);
2375 tidx = (tidx + 1) & (qp->sq.wqe_cnt - 1);
2379 pr_debug("%08x %08x %08x %08x\n", be32_to_cpu(p[j]),
2380 be32_to_cpu(p[j + 1]), be32_to_cpu(p[j + 2]),
2381 be32_to_cpu(p[j + 3]));
2385 static void mlx5_bf_copy(u64 __iomem *dst, u64 *src,
2386 unsigned bytecnt, struct mlx5_ib_qp *qp)
2388 while (bytecnt > 0) {
2389 __iowrite64_copy(dst++, src++, 8);
2390 __iowrite64_copy(dst++, src++, 8);
2391 __iowrite64_copy(dst++, src++, 8);
2392 __iowrite64_copy(dst++, src++, 8);
2393 __iowrite64_copy(dst++, src++, 8);
2394 __iowrite64_copy(dst++, src++, 8);
2395 __iowrite64_copy(dst++, src++, 8);
2396 __iowrite64_copy(dst++, src++, 8);
2398 if (unlikely(src == qp->sq.qend))
2399 src = mlx5_get_send_wqe(qp, 0);
2403 static u8 get_fence(u8 fence, struct ib_send_wr *wr)
2405 if (unlikely(wr->opcode == IB_WR_LOCAL_INV &&
2406 wr->send_flags & IB_SEND_FENCE))
2407 return MLX5_FENCE_MODE_STRONG_ORDERING;
2409 if (unlikely(fence)) {
2410 if (wr->send_flags & IB_SEND_FENCE)
2411 return MLX5_FENCE_MODE_SMALL_AND_FENCE;
2420 static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
2421 struct mlx5_wqe_ctrl_seg **ctrl,
2422 struct ib_send_wr *wr, int *idx,
2423 int *size, int nreq)
2427 if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq))) {
2432 *idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1);
2433 *seg = mlx5_get_send_wqe(qp, *idx);
2435 *(uint32_t *)(*seg + 8) = 0;
2436 (*ctrl)->imm = send_ieth(wr);
2437 (*ctrl)->fm_ce_se = qp->sq_signal_bits |
2438 (wr->send_flags & IB_SEND_SIGNALED ?
2439 MLX5_WQE_CTRL_CQ_UPDATE : 0) |
2440 (wr->send_flags & IB_SEND_SOLICITED ?
2441 MLX5_WQE_CTRL_SOLICITED : 0);
2443 *seg += sizeof(**ctrl);
2444 *size = sizeof(**ctrl) / 16;
2449 static void finish_wqe(struct mlx5_ib_qp *qp,
2450 struct mlx5_wqe_ctrl_seg *ctrl,
2451 u8 size, unsigned idx, u64 wr_id,
2452 int nreq, u8 fence, u8 next_fence,
2457 ctrl->opmod_idx_opcode = cpu_to_be32(((u32)(qp->sq.cur_post) << 8) |
2458 mlx5_opcode | ((u32)opmod << 24));
2459 ctrl->qpn_ds = cpu_to_be32(size | (qp->mqp.qpn << 8));
2460 ctrl->fm_ce_se |= fence;
2461 qp->fm_cache = next_fence;
2462 if (unlikely(qp->wq_sig))
2463 ctrl->signature = wq_sig(ctrl);
2465 qp->sq.wrid[idx] = wr_id;
2466 qp->sq.w_list[idx].opcode = mlx5_opcode;
2467 qp->sq.wqe_head[idx] = qp->sq.head + nreq;
2468 qp->sq.cur_post += DIV_ROUND_UP(size * 16, MLX5_SEND_WQE_BB);
2469 qp->sq.w_list[idx].next = qp->sq.cur_post;
2473 int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2474 struct ib_send_wr **bad_wr)
2476 struct mlx5_wqe_ctrl_seg *ctrl = NULL; /* compiler warning */
2477 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
2478 struct mlx5_core_dev *mdev = &dev->mdev;
2479 struct mlx5_ib_qp *qp = to_mqp(ibqp);
2480 struct mlx5_ib_mr *mr;
2481 struct mlx5_wqe_data_seg *dpseg;
2482 struct mlx5_wqe_xrc_seg *xrc;
2483 struct mlx5_bf *bf = qp->bf;
2484 int uninitialized_var(size);
2485 void *qend = qp->sq.qend;
2486 unsigned long flags;
2497 spin_lock_irqsave(&qp->sq.lock, flags);
2499 for (nreq = 0; wr; nreq++, wr = wr->next) {
2500 if (unlikely(wr->opcode >= sizeof(mlx5_ib_opcode) / sizeof(mlx5_ib_opcode[0]))) {
2501 mlx5_ib_warn(dev, "\n");
2507 fence = qp->fm_cache;
2508 num_sge = wr->num_sge;
2509 if (unlikely(num_sge > qp->sq.max_gs)) {
2510 mlx5_ib_warn(dev, "\n");
2516 err = begin_wqe(qp, &seg, &ctrl, wr, &idx, &size, nreq);
2518 mlx5_ib_warn(dev, "\n");
2524 switch (ibqp->qp_type) {
2525 case IB_QPT_XRC_INI:
2527 xrc->xrc_srqn = htonl(wr->xrc_remote_srq_num);
2528 seg += sizeof(*xrc);
2529 size += sizeof(*xrc) / 16;
2532 switch (wr->opcode) {
2533 case IB_WR_RDMA_READ:
2534 case IB_WR_RDMA_WRITE:
2535 case IB_WR_RDMA_WRITE_WITH_IMM:
2536 set_raddr_seg(seg, wr->wr.rdma.remote_addr,
2538 seg += sizeof(struct mlx5_wqe_raddr_seg);
2539 size += sizeof(struct mlx5_wqe_raddr_seg) / 16;
2542 case IB_WR_ATOMIC_CMP_AND_SWP:
2543 case IB_WR_ATOMIC_FETCH_AND_ADD:
2544 case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
2545 mlx5_ib_warn(dev, "Atomic operations are not supported yet\n");
2550 case IB_WR_LOCAL_INV:
2551 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
2552 qp->sq.wr_data[idx] = IB_WR_LOCAL_INV;
2553 ctrl->imm = cpu_to_be32(wr->ex.invalidate_rkey);
2554 err = set_frwr_li_wr(&seg, wr, &size, mdev, to_mpd(ibqp->pd), qp);
2556 mlx5_ib_warn(dev, "\n");
2563 case IB_WR_FAST_REG_MR:
2564 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
2565 qp->sq.wr_data[idx] = IB_WR_FAST_REG_MR;
2566 ctrl->imm = cpu_to_be32(wr->wr.fast_reg.rkey);
2567 err = set_frwr_li_wr(&seg, wr, &size, mdev, to_mpd(ibqp->pd), qp);
2569 mlx5_ib_warn(dev, "\n");
2576 case IB_WR_REG_SIG_MR:
2577 qp->sq.wr_data[idx] = IB_WR_REG_SIG_MR;
2578 mr = to_mmr(wr->wr.sig_handover.sig_mr);
2580 ctrl->imm = cpu_to_be32(mr->ibmr.rkey);
2581 err = set_sig_umr_wr(wr, qp, &seg, &size);
2583 mlx5_ib_warn(dev, "\n");
2588 finish_wqe(qp, ctrl, size, idx, wr->wr_id,
2589 nreq, get_fence(fence, wr),
2590 next_fence, MLX5_OPCODE_UMR);
2592 * SET_PSV WQEs are not signaled and solicited
2595 wr->send_flags &= ~IB_SEND_SIGNALED;
2596 wr->send_flags |= IB_SEND_SOLICITED;
2597 err = begin_wqe(qp, &seg, &ctrl, wr,
2600 mlx5_ib_warn(dev, "\n");
2606 err = set_psv_wr(&wr->wr.sig_handover.sig_attrs->mem,
2607 mr->sig->psv_memory.psv_idx, &seg,
2610 mlx5_ib_warn(dev, "\n");
2615 finish_wqe(qp, ctrl, size, idx, wr->wr_id,
2616 nreq, get_fence(fence, wr),
2617 next_fence, MLX5_OPCODE_SET_PSV);
2618 err = begin_wqe(qp, &seg, &ctrl, wr,
2621 mlx5_ib_warn(dev, "\n");
2627 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
2628 err = set_psv_wr(&wr->wr.sig_handover.sig_attrs->wire,
2629 mr->sig->psv_wire.psv_idx, &seg,
2632 mlx5_ib_warn(dev, "\n");
2637 finish_wqe(qp, ctrl, size, idx, wr->wr_id,
2638 nreq, get_fence(fence, wr),
2639 next_fence, MLX5_OPCODE_SET_PSV);
2649 switch (wr->opcode) {
2650 case IB_WR_RDMA_WRITE:
2651 case IB_WR_RDMA_WRITE_WITH_IMM:
2652 set_raddr_seg(seg, wr->wr.rdma.remote_addr,
2654 seg += sizeof(struct mlx5_wqe_raddr_seg);
2655 size += sizeof(struct mlx5_wqe_raddr_seg) / 16;
2666 set_datagram_seg(seg, wr);
2667 seg += sizeof(struct mlx5_wqe_datagram_seg);
2668 size += sizeof(struct mlx5_wqe_datagram_seg) / 16;
2669 if (unlikely((seg == qend)))
2670 seg = mlx5_get_send_wqe(qp, 0);
2673 case MLX5_IB_QPT_REG_UMR:
2674 if (wr->opcode != MLX5_IB_WR_UMR) {
2676 mlx5_ib_warn(dev, "bad opcode\n");
2679 qp->sq.wr_data[idx] = MLX5_IB_WR_UMR;
2680 ctrl->imm = cpu_to_be32(wr->wr.fast_reg.rkey);
2681 set_reg_umr_segment(seg, wr);
2682 seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
2683 size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
2684 if (unlikely((seg == qend)))
2685 seg = mlx5_get_send_wqe(qp, 0);
2686 set_reg_mkey_segment(seg, wr);
2687 seg += sizeof(struct mlx5_mkey_seg);
2688 size += sizeof(struct mlx5_mkey_seg) / 16;
2689 if (unlikely((seg == qend)))
2690 seg = mlx5_get_send_wqe(qp, 0);
2697 if (wr->send_flags & IB_SEND_INLINE && num_sge) {
2698 int uninitialized_var(sz);
2700 err = set_data_inl_seg(qp, wr, seg, &sz);
2701 if (unlikely(err)) {
2702 mlx5_ib_warn(dev, "\n");
2710 for (i = 0; i < num_sge; i++) {
2711 if (unlikely(dpseg == qend)) {
2712 seg = mlx5_get_send_wqe(qp, 0);
2715 if (likely(wr->sg_list[i].length)) {
2716 set_data_ptr_seg(dpseg, wr->sg_list + i);
2717 size += sizeof(struct mlx5_wqe_data_seg) / 16;
2723 finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq,
2724 get_fence(fence, wr), next_fence,
2725 mlx5_ib_opcode[wr->opcode]);
2728 dump_wqe(qp, idx, size);
2733 qp->sq.head += nreq;
2735 /* Make sure that descriptors are written before
2736 * updating doorbell record and ringing the doorbell
2740 qp->db.db[MLX5_SND_DBR] = cpu_to_be32(qp->sq.cur_post);
2742 /* Make sure doorbell record is visible to the HCA before
2743 * we hit doorbell */
2747 spin_lock(&bf->lock);
2750 if (0 && nreq == 1 && bf->uuarn && inl && size > 1 && size <= bf->buf_size / 16) {
2751 mlx5_bf_copy(bf->reg + bf->offset, (u64 *)ctrl, ALIGN(size * 16, 64), qp);
2754 mlx5_write64((__be32 *)ctrl, bf->regreg + bf->offset,
2755 MLX5_GET_DOORBELL_LOCK(&bf->lock32));
2756 /* Make sure doorbells don't leak out of SQ spinlock
2757 * and reach the HCA out of order.
2761 bf->offset ^= bf->buf_size;
2763 spin_unlock(&bf->lock);
2766 spin_unlock_irqrestore(&qp->sq.lock, flags);
2771 static void set_sig_seg(struct mlx5_rwqe_sig *sig, int size)
2773 sig->signature = calc_sig(sig, size);
2776 int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
2777 struct ib_recv_wr **bad_wr)
2779 struct mlx5_ib_qp *qp = to_mqp(ibqp);
2780 struct mlx5_wqe_data_seg *scat;
2781 struct mlx5_rwqe_sig *sig;
2782 unsigned long flags;
2788 spin_lock_irqsave(&qp->rq.lock, flags);
2790 ind = qp->rq.head & (qp->rq.wqe_cnt - 1);
2792 for (nreq = 0; wr; nreq++, wr = wr->next) {
2793 if (mlx5_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
2799 if (unlikely(wr->num_sge > qp->rq.max_gs)) {
2805 scat = get_recv_wqe(qp, ind);
2809 for (i = 0; i < wr->num_sge; i++)
2810 set_data_ptr_seg(scat + i, wr->sg_list + i);
2812 if (i < qp->rq.max_gs) {
2813 scat[i].byte_count = 0;
2814 scat[i].lkey = cpu_to_be32(MLX5_INVALID_LKEY);
2819 sig = (struct mlx5_rwqe_sig *)scat;
2820 set_sig_seg(sig, (qp->rq.max_gs + 1) << 2);
2823 qp->rq.wrid[ind] = wr->wr_id;
2825 ind = (ind + 1) & (qp->rq.wqe_cnt - 1);
2830 qp->rq.head += nreq;
2832 /* Make sure that descriptors are written before
2837 *qp->db.db = cpu_to_be32(qp->rq.head & 0xffff);
2840 spin_unlock_irqrestore(&qp->rq.lock, flags);
2845 static inline enum ib_qp_state to_ib_qp_state(enum mlx5_qp_state mlx5_state)
2847 switch (mlx5_state) {
2848 case MLX5_QP_STATE_RST: return IB_QPS_RESET;
2849 case MLX5_QP_STATE_INIT: return IB_QPS_INIT;
2850 case MLX5_QP_STATE_RTR: return IB_QPS_RTR;
2851 case MLX5_QP_STATE_RTS: return IB_QPS_RTS;
2852 case MLX5_QP_STATE_SQ_DRAINING:
2853 case MLX5_QP_STATE_SQD: return IB_QPS_SQD;
2854 case MLX5_QP_STATE_SQER: return IB_QPS_SQE;
2855 case MLX5_QP_STATE_ERR: return IB_QPS_ERR;
2860 static inline enum ib_mig_state to_ib_mig_state(int mlx5_mig_state)
2862 switch (mlx5_mig_state) {
2863 case MLX5_QP_PM_ARMED: return IB_MIG_ARMED;
2864 case MLX5_QP_PM_REARM: return IB_MIG_REARM;
2865 case MLX5_QP_PM_MIGRATED: return IB_MIG_MIGRATED;
2870 static int to_ib_qp_access_flags(int mlx5_flags)
2874 if (mlx5_flags & MLX5_QP_BIT_RRE)
2875 ib_flags |= IB_ACCESS_REMOTE_READ;
2876 if (mlx5_flags & MLX5_QP_BIT_RWE)
2877 ib_flags |= IB_ACCESS_REMOTE_WRITE;
2878 if (mlx5_flags & MLX5_QP_BIT_RAE)
2879 ib_flags |= IB_ACCESS_REMOTE_ATOMIC;
2884 static void to_ib_ah_attr(struct mlx5_ib_dev *ibdev, struct ib_ah_attr *ib_ah_attr,
2885 struct mlx5_qp_path *path)
2887 struct mlx5_core_dev *dev = &ibdev->mdev;
2889 memset(ib_ah_attr, 0, sizeof(*ib_ah_attr));
2890 ib_ah_attr->port_num = path->port;
2892 if (ib_ah_attr->port_num == 0 || ib_ah_attr->port_num > dev->caps.num_ports)
2895 ib_ah_attr->sl = path->sl & 0xf;
2897 ib_ah_attr->dlid = be16_to_cpu(path->rlid);
2898 ib_ah_attr->src_path_bits = path->grh_mlid & 0x7f;
2899 ib_ah_attr->static_rate = path->static_rate ? path->static_rate - 5 : 0;
2900 ib_ah_attr->ah_flags = (path->grh_mlid & (1 << 7)) ? IB_AH_GRH : 0;
2901 if (ib_ah_attr->ah_flags) {
2902 ib_ah_attr->grh.sgid_index = path->mgid_index;
2903 ib_ah_attr->grh.hop_limit = path->hop_limit;
2904 ib_ah_attr->grh.traffic_class =
2905 (be32_to_cpu(path->tclass_flowlabel) >> 20) & 0xff;
2906 ib_ah_attr->grh.flow_label =
2907 be32_to_cpu(path->tclass_flowlabel) & 0xfffff;
2908 memcpy(ib_ah_attr->grh.dgid.raw,
2909 path->rgid, sizeof(ib_ah_attr->grh.dgid.raw));
2913 int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
2914 struct ib_qp_init_attr *qp_init_attr)
2916 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
2917 struct mlx5_ib_qp *qp = to_mqp(ibqp);
2918 struct mlx5_query_qp_mbox_out *outb;
2919 struct mlx5_qp_context *context;
2923 mutex_lock(&qp->mutex);
2924 outb = kzalloc(sizeof(*outb), GFP_KERNEL);
2929 context = &outb->ctx;
2930 err = mlx5_core_qp_query(&dev->mdev, &qp->mqp, outb, sizeof(*outb));
2934 mlx5_state = be32_to_cpu(context->flags) >> 28;
2936 qp->state = to_ib_qp_state(mlx5_state);
2937 qp_attr->qp_state = qp->state;
2938 qp_attr->path_mtu = context->mtu_msgmax >> 5;
2939 qp_attr->path_mig_state =
2940 to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3);
2941 qp_attr->qkey = be32_to_cpu(context->qkey);
2942 qp_attr->rq_psn = be32_to_cpu(context->rnr_nextrecvpsn) & 0xffffff;
2943 qp_attr->sq_psn = be32_to_cpu(context->next_send_psn) & 0xffffff;
2944 qp_attr->dest_qp_num = be32_to_cpu(context->log_pg_sz_remote_qpn) & 0xffffff;
2945 qp_attr->qp_access_flags =
2946 to_ib_qp_access_flags(be32_to_cpu(context->params2));
2948 if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) {
2949 to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path);
2950 to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path);
2951 qp_attr->alt_pkey_index = context->alt_path.pkey_index & 0x7f;
2952 qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num;
2955 qp_attr->pkey_index = context->pri_path.pkey_index & 0x7f;
2956 qp_attr->port_num = context->pri_path.port;
2958 /* qp_attr->en_sqd_async_notify is only applicable in modify qp */
2959 qp_attr->sq_draining = mlx5_state == MLX5_QP_STATE_SQ_DRAINING;
2961 qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context->params1) >> 21) & 0x7);
2963 qp_attr->max_dest_rd_atomic =
2964 1 << ((be32_to_cpu(context->params2) >> 21) & 0x7);
2965 qp_attr->min_rnr_timer =
2966 (be32_to_cpu(context->rnr_nextrecvpsn) >> 24) & 0x1f;
2967 qp_attr->timeout = context->pri_path.ackto_lt >> 3;
2968 qp_attr->retry_cnt = (be32_to_cpu(context->params1) >> 16) & 0x7;
2969 qp_attr->rnr_retry = (be32_to_cpu(context->params1) >> 13) & 0x7;
2970 qp_attr->alt_timeout = context->alt_path.ackto_lt >> 3;
2971 qp_attr->cur_qp_state = qp_attr->qp_state;
2972 qp_attr->cap.max_recv_wr = qp->rq.wqe_cnt;
2973 qp_attr->cap.max_recv_sge = qp->rq.max_gs;
2975 if (!ibqp->uobject) {
2976 qp_attr->cap.max_send_wr = qp->sq.wqe_cnt;
2977 qp_attr->cap.max_send_sge = qp->sq.max_gs;
2979 qp_attr->cap.max_send_wr = 0;
2980 qp_attr->cap.max_send_sge = 0;
2983 /* We don't support inline sends for kernel QPs (yet), and we
2984 * don't know what userspace's value should be.
2986 qp_attr->cap.max_inline_data = 0;
2988 qp_init_attr->cap = qp_attr->cap;
2990 qp_init_attr->create_flags = 0;
2991 if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK)
2992 qp_init_attr->create_flags |= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK;
2994 qp_init_attr->sq_sig_type = qp->sq_signal_bits & MLX5_WQE_CTRL_CQ_UPDATE ?
2995 IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
3001 mutex_unlock(&qp->mutex);
3005 struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
3006 struct ib_ucontext *context,
3007 struct ib_udata *udata)
3009 struct mlx5_ib_dev *dev = to_mdev(ibdev);
3010 struct mlx5_ib_xrcd *xrcd;
3013 if (!(dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_XRC))
3014 return ERR_PTR(-ENOSYS);
3016 xrcd = kmalloc(sizeof(*xrcd), GFP_KERNEL);
3018 return ERR_PTR(-ENOMEM);
3020 err = mlx5_core_xrcd_alloc(&dev->mdev, &xrcd->xrcdn);
3023 return ERR_PTR(-ENOMEM);
3026 return &xrcd->ibxrcd;
3029 int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
3031 struct mlx5_ib_dev *dev = to_mdev(xrcd->device);
3032 u32 xrcdn = to_mxrcd(xrcd)->xrcdn;
3035 err = mlx5_core_xrcd_dealloc(&dev->mdev, xrcdn);
3037 mlx5_ib_warn(dev, "failed to dealloc xrcdn 0x%x\n", xrcdn);