2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/module.h>
34 #include <rdma/ib_umem.h>
38 /* not supported currently */
39 static int wq_signature;
42 MLX5_IB_ACK_REQ_FREQ = 8,
46 MLX5_IB_DEFAULT_SCHED_QUEUE = 0x83,
47 MLX5_IB_DEFAULT_QP0_SCHED_QUEUE = 0x3f,
48 MLX5_IB_LINK_TYPE_IB = 0,
49 MLX5_IB_LINK_TYPE_ETH = 1
53 MLX5_IB_SQ_STRIDE = 6,
54 MLX5_IB_CACHE_LINE_SIZE = 64,
57 static const u32 mlx5_ib_opcode[] = {
58 [IB_WR_SEND] = MLX5_OPCODE_SEND,
59 [IB_WR_SEND_WITH_IMM] = MLX5_OPCODE_SEND_IMM,
60 [IB_WR_RDMA_WRITE] = MLX5_OPCODE_RDMA_WRITE,
61 [IB_WR_RDMA_WRITE_WITH_IMM] = MLX5_OPCODE_RDMA_WRITE_IMM,
62 [IB_WR_RDMA_READ] = MLX5_OPCODE_RDMA_READ,
63 [IB_WR_ATOMIC_CMP_AND_SWP] = MLX5_OPCODE_ATOMIC_CS,
64 [IB_WR_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_FA,
65 [IB_WR_SEND_WITH_INV] = MLX5_OPCODE_SEND_INVAL,
66 [IB_WR_LOCAL_INV] = MLX5_OPCODE_UMR,
67 [IB_WR_FAST_REG_MR] = MLX5_OPCODE_UMR,
68 [IB_WR_MASKED_ATOMIC_CMP_AND_SWP] = MLX5_OPCODE_ATOMIC_MASKED_CS,
69 [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_MASKED_FA,
70 [MLX5_IB_WR_UMR] = MLX5_OPCODE_UMR,
74 static int is_qp0(enum ib_qp_type qp_type)
76 return qp_type == IB_QPT_SMI;
79 static int is_qp1(enum ib_qp_type qp_type)
81 return qp_type == IB_QPT_GSI;
84 static int is_sqp(enum ib_qp_type qp_type)
86 return is_qp0(qp_type) || is_qp1(qp_type);
89 static void *get_wqe(struct mlx5_ib_qp *qp, int offset)
91 return mlx5_buf_offset(&qp->buf, offset);
94 static void *get_recv_wqe(struct mlx5_ib_qp *qp, int n)
96 return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift));
99 void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n)
101 return get_wqe(qp, qp->sq.offset + (n << MLX5_IB_SQ_STRIDE));
105 * mlx5_ib_read_user_wqe() - Copy a user-space WQE to kernel space.
107 * @qp: QP to copy from.
108 * @send: copy from the send queue when non-zero, use the receive queue
110 * @wqe_index: index to start copying from. For send work queues, the
111 * wqe_index is in units of MLX5_SEND_WQE_BB.
112 * For receive work queue, it is the number of work queue
113 * element in the queue.
114 * @buffer: destination buffer.
115 * @length: maximum number of bytes to copy.
117 * Copies at least a single WQE, but may copy more data.
119 * Return: the number of bytes copied, or an error code.
121 int mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index,
122 void *buffer, u32 length)
124 struct ib_device *ibdev = qp->ibqp.device;
125 struct mlx5_ib_dev *dev = to_mdev(ibdev);
126 struct mlx5_ib_wq *wq = send ? &qp->sq : &qp->rq;
129 struct ib_umem *umem = qp->umem;
130 u32 first_copy_length;
134 if (wq->wqe_cnt == 0) {
135 mlx5_ib_dbg(dev, "mlx5_ib_read_user_wqe for a QP with wqe_cnt == 0. qp_type: 0x%x\n",
140 offset = wq->offset + ((wqe_index % wq->wqe_cnt) << wq->wqe_shift);
141 wq_end = wq->offset + (wq->wqe_cnt << wq->wqe_shift);
143 if (send && length < sizeof(struct mlx5_wqe_ctrl_seg))
146 if (offset > umem->length ||
147 (send && offset + sizeof(struct mlx5_wqe_ctrl_seg) > umem->length))
150 first_copy_length = min_t(u32, offset + length, wq_end) - offset;
151 ret = ib_umem_copy_from(buffer, umem, offset, first_copy_length);
156 struct mlx5_wqe_ctrl_seg *ctrl = buffer;
157 int ds = be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_DS_MASK;
159 wqe_length = ds * MLX5_WQE_DS_UNITS;
161 wqe_length = 1 << wq->wqe_shift;
164 if (wqe_length <= first_copy_length)
165 return first_copy_length;
167 ret = ib_umem_copy_from(buffer + first_copy_length, umem, wq->offset,
168 wqe_length - first_copy_length);
175 static void mlx5_ib_qp_event(struct mlx5_core_qp *qp, int type)
177 struct ib_qp *ibqp = &to_mibqp(qp)->ibqp;
178 struct ib_event event;
180 if (type == MLX5_EVENT_TYPE_PATH_MIG)
181 to_mibqp(qp)->port = to_mibqp(qp)->alt_port;
183 if (ibqp->event_handler) {
184 event.device = ibqp->device;
185 event.element.qp = ibqp;
187 case MLX5_EVENT_TYPE_PATH_MIG:
188 event.event = IB_EVENT_PATH_MIG;
190 case MLX5_EVENT_TYPE_COMM_EST:
191 event.event = IB_EVENT_COMM_EST;
193 case MLX5_EVENT_TYPE_SQ_DRAINED:
194 event.event = IB_EVENT_SQ_DRAINED;
196 case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
197 event.event = IB_EVENT_QP_LAST_WQE_REACHED;
199 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
200 event.event = IB_EVENT_QP_FATAL;
202 case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
203 event.event = IB_EVENT_PATH_MIG_ERR;
205 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
206 event.event = IB_EVENT_QP_REQ_ERR;
208 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
209 event.event = IB_EVENT_QP_ACCESS_ERR;
212 pr_warn("mlx5_ib: Unexpected event type %d on QP %06x\n", type, qp->qpn);
216 ibqp->event_handler(&event, ibqp->qp_context);
220 static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
221 int has_rq, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd)
223 struct mlx5_general_caps *gen;
227 gen = &dev->mdev->caps.gen;
228 /* Sanity check RQ size before proceeding */
229 if (cap->max_recv_wr > gen->max_wqes)
235 qp->rq.wqe_shift = 0;
238 qp->rq.wqe_cnt = ucmd->rq_wqe_count;
239 qp->rq.wqe_shift = ucmd->rq_wqe_shift;
240 qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig;
241 qp->rq.max_post = qp->rq.wqe_cnt;
243 wqe_size = qp->wq_sig ? sizeof(struct mlx5_wqe_signature_seg) : 0;
244 wqe_size += cap->max_recv_sge * sizeof(struct mlx5_wqe_data_seg);
245 wqe_size = roundup_pow_of_two(wqe_size);
246 wq_size = roundup_pow_of_two(cap->max_recv_wr) * wqe_size;
247 wq_size = max_t(int, wq_size, MLX5_SEND_WQE_BB);
248 qp->rq.wqe_cnt = wq_size / wqe_size;
249 if (wqe_size > gen->max_rq_desc_sz) {
250 mlx5_ib_dbg(dev, "wqe_size %d, max %d\n",
252 gen->max_rq_desc_sz);
255 qp->rq.wqe_shift = ilog2(wqe_size);
256 qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig;
257 qp->rq.max_post = qp->rq.wqe_cnt;
264 static int sq_overhead(enum ib_qp_type qp_type)
270 size += sizeof(struct mlx5_wqe_xrc_seg);
273 size += sizeof(struct mlx5_wqe_ctrl_seg) +
274 sizeof(struct mlx5_wqe_atomic_seg) +
275 sizeof(struct mlx5_wqe_raddr_seg);
282 size += sizeof(struct mlx5_wqe_ctrl_seg) +
283 sizeof(struct mlx5_wqe_raddr_seg) +
284 sizeof(struct mlx5_wqe_umr_ctrl_seg) +
285 sizeof(struct mlx5_mkey_seg);
291 size += sizeof(struct mlx5_wqe_ctrl_seg) +
292 sizeof(struct mlx5_wqe_datagram_seg);
295 case MLX5_IB_QPT_REG_UMR:
296 size += sizeof(struct mlx5_wqe_ctrl_seg) +
297 sizeof(struct mlx5_wqe_umr_ctrl_seg) +
298 sizeof(struct mlx5_mkey_seg);
308 static int calc_send_wqe(struct ib_qp_init_attr *attr)
313 size = sq_overhead(attr->qp_type);
317 if (attr->cap.max_inline_data) {
318 inl_size = size + sizeof(struct mlx5_wqe_inline_seg) +
319 attr->cap.max_inline_data;
322 size += attr->cap.max_send_sge * sizeof(struct mlx5_wqe_data_seg);
323 if (attr->create_flags & IB_QP_CREATE_SIGNATURE_EN &&
324 ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB) < MLX5_SIG_WQE_SIZE)
325 return MLX5_SIG_WQE_SIZE;
327 return ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB);
330 static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
331 struct mlx5_ib_qp *qp)
333 struct mlx5_general_caps *gen;
337 gen = &dev->mdev->caps.gen;
338 if (!attr->cap.max_send_wr)
341 wqe_size = calc_send_wqe(attr);
342 mlx5_ib_dbg(dev, "wqe_size %d\n", wqe_size);
346 if (wqe_size > gen->max_sq_desc_sz) {
347 mlx5_ib_dbg(dev, "wqe_size(%d) > max_sq_desc_sz(%d)\n",
348 wqe_size, gen->max_sq_desc_sz);
352 qp->max_inline_data = wqe_size - sq_overhead(attr->qp_type) -
353 sizeof(struct mlx5_wqe_inline_seg);
354 attr->cap.max_inline_data = qp->max_inline_data;
356 if (attr->create_flags & IB_QP_CREATE_SIGNATURE_EN)
357 qp->signature_en = true;
359 wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size);
360 qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB;
361 if (qp->sq.wqe_cnt > gen->max_wqes) {
362 mlx5_ib_dbg(dev, "wqe count(%d) exceeds limits(%d)\n",
363 qp->sq.wqe_cnt, gen->max_wqes);
366 qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
367 qp->sq.max_gs = attr->cap.max_send_sge;
368 qp->sq.max_post = wq_size / wqe_size;
369 attr->cap.max_send_wr = qp->sq.max_post;
374 static int set_user_buf_size(struct mlx5_ib_dev *dev,
375 struct mlx5_ib_qp *qp,
376 struct mlx5_ib_create_qp *ucmd)
378 struct mlx5_general_caps *gen;
379 int desc_sz = 1 << qp->sq.wqe_shift;
381 gen = &dev->mdev->caps.gen;
382 if (desc_sz > gen->max_sq_desc_sz) {
383 mlx5_ib_warn(dev, "desc_sz %d, max_sq_desc_sz %d\n",
384 desc_sz, gen->max_sq_desc_sz);
388 if (ucmd->sq_wqe_count && ((1 << ilog2(ucmd->sq_wqe_count)) != ucmd->sq_wqe_count)) {
389 mlx5_ib_warn(dev, "sq_wqe_count %d, sq_wqe_count %d\n",
390 ucmd->sq_wqe_count, ucmd->sq_wqe_count);
394 qp->sq.wqe_cnt = ucmd->sq_wqe_count;
396 if (qp->sq.wqe_cnt > gen->max_wqes) {
397 mlx5_ib_warn(dev, "wqe_cnt %d, max_wqes %d\n",
398 qp->sq.wqe_cnt, gen->max_wqes);
402 qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
403 (qp->sq.wqe_cnt << 6);
408 static int qp_has_rq(struct ib_qp_init_attr *attr)
410 if (attr->qp_type == IB_QPT_XRC_INI ||
411 attr->qp_type == IB_QPT_XRC_TGT || attr->srq ||
412 attr->qp_type == MLX5_IB_QPT_REG_UMR ||
413 !attr->cap.max_recv_wr)
419 static int first_med_uuar(void)
424 static int next_uuar(int n)
428 while (((n % 4) & 2))
434 static int num_med_uuar(struct mlx5_uuar_info *uuari)
438 n = uuari->num_uars * MLX5_NON_FP_BF_REGS_PER_PAGE -
439 uuari->num_low_latency_uuars - 1;
441 return n >= 0 ? n : 0;
444 static int max_uuari(struct mlx5_uuar_info *uuari)
446 return uuari->num_uars * 4;
449 static int first_hi_uuar(struct mlx5_uuar_info *uuari)
455 med = num_med_uuar(uuari);
456 for (t = 0, i = first_med_uuar();; i = next_uuar(i)) {
465 static int alloc_high_class_uuar(struct mlx5_uuar_info *uuari)
469 for (i = first_hi_uuar(uuari); i < max_uuari(uuari); i = next_uuar(i)) {
470 if (!test_bit(i, uuari->bitmap)) {
471 set_bit(i, uuari->bitmap);
480 static int alloc_med_class_uuar(struct mlx5_uuar_info *uuari)
482 int minidx = first_med_uuar();
485 for (i = first_med_uuar(); i < first_hi_uuar(uuari); i = next_uuar(i)) {
486 if (uuari->count[i] < uuari->count[minidx])
490 uuari->count[minidx]++;
494 static int alloc_uuar(struct mlx5_uuar_info *uuari,
495 enum mlx5_ib_latency_class lat)
499 mutex_lock(&uuari->lock);
501 case MLX5_IB_LATENCY_CLASS_LOW:
503 uuari->count[uuarn]++;
506 case MLX5_IB_LATENCY_CLASS_MEDIUM:
510 uuarn = alloc_med_class_uuar(uuari);
513 case MLX5_IB_LATENCY_CLASS_HIGH:
517 uuarn = alloc_high_class_uuar(uuari);
520 case MLX5_IB_LATENCY_CLASS_FAST_PATH:
524 mutex_unlock(&uuari->lock);
529 static void free_med_class_uuar(struct mlx5_uuar_info *uuari, int uuarn)
531 clear_bit(uuarn, uuari->bitmap);
532 --uuari->count[uuarn];
535 static void free_high_class_uuar(struct mlx5_uuar_info *uuari, int uuarn)
537 clear_bit(uuarn, uuari->bitmap);
538 --uuari->count[uuarn];
541 static void free_uuar(struct mlx5_uuar_info *uuari, int uuarn)
543 int nuuars = uuari->num_uars * MLX5_BF_REGS_PER_PAGE;
544 int high_uuar = nuuars - uuari->num_low_latency_uuars;
546 mutex_lock(&uuari->lock);
548 --uuari->count[uuarn];
552 if (uuarn < high_uuar) {
553 free_med_class_uuar(uuari, uuarn);
557 free_high_class_uuar(uuari, uuarn);
560 mutex_unlock(&uuari->lock);
563 static enum mlx5_qp_state to_mlx5_state(enum ib_qp_state state)
566 case IB_QPS_RESET: return MLX5_QP_STATE_RST;
567 case IB_QPS_INIT: return MLX5_QP_STATE_INIT;
568 case IB_QPS_RTR: return MLX5_QP_STATE_RTR;
569 case IB_QPS_RTS: return MLX5_QP_STATE_RTS;
570 case IB_QPS_SQD: return MLX5_QP_STATE_SQD;
571 case IB_QPS_SQE: return MLX5_QP_STATE_SQER;
572 case IB_QPS_ERR: return MLX5_QP_STATE_ERR;
577 static int to_mlx5_st(enum ib_qp_type type)
580 case IB_QPT_RC: return MLX5_QP_ST_RC;
581 case IB_QPT_UC: return MLX5_QP_ST_UC;
582 case IB_QPT_UD: return MLX5_QP_ST_UD;
583 case MLX5_IB_QPT_REG_UMR: return MLX5_QP_ST_REG_UMR;
585 case IB_QPT_XRC_TGT: return MLX5_QP_ST_XRC;
586 case IB_QPT_SMI: return MLX5_QP_ST_QP0;
587 case IB_QPT_GSI: return MLX5_QP_ST_QP1;
588 case IB_QPT_RAW_IPV6: return MLX5_QP_ST_RAW_IPV6;
589 case IB_QPT_RAW_ETHERTYPE: return MLX5_QP_ST_RAW_ETHERTYPE;
590 case IB_QPT_RAW_PACKET:
592 default: return -EINVAL;
596 static int uuarn_to_uar_index(struct mlx5_uuar_info *uuari, int uuarn)
598 return uuari->uars[uuarn / MLX5_BF_REGS_PER_PAGE].index;
601 static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
602 struct mlx5_ib_qp *qp, struct ib_udata *udata,
603 struct mlx5_create_qp_mbox_in **in,
604 struct mlx5_ib_create_qp_resp *resp, int *inlen)
606 struct mlx5_ib_ucontext *context;
607 struct mlx5_ib_create_qp ucmd;
616 err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
618 mlx5_ib_dbg(dev, "copy failed\n");
622 context = to_mucontext(pd->uobject->context);
624 * TBD: should come from the verbs when we have the API
626 uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_HIGH);
628 mlx5_ib_dbg(dev, "failed to allocate low latency UUAR\n");
629 mlx5_ib_dbg(dev, "reverting to medium latency\n");
630 uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_MEDIUM);
632 mlx5_ib_dbg(dev, "failed to allocate medium latency UUAR\n");
633 mlx5_ib_dbg(dev, "reverting to high latency\n");
634 uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_LOW);
636 mlx5_ib_warn(dev, "uuar allocation failed\n");
642 uar_index = uuarn_to_uar_index(&context->uuari, uuarn);
643 mlx5_ib_dbg(dev, "uuarn 0x%x, uar_index 0x%x\n", uuarn, uar_index);
646 qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
647 qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
649 err = set_user_buf_size(dev, qp, &ucmd);
653 if (ucmd.buf_addr && qp->buf_size) {
654 qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr,
656 if (IS_ERR(qp->umem)) {
657 mlx5_ib_dbg(dev, "umem_get failed\n");
658 err = PTR_ERR(qp->umem);
666 mlx5_ib_cont_pages(qp->umem, ucmd.buf_addr, &npages, &page_shift,
668 err = mlx5_ib_get_buf_offset(ucmd.buf_addr, page_shift, &offset);
670 mlx5_ib_warn(dev, "bad offset\n");
673 mlx5_ib_dbg(dev, "addr 0x%llx, size %d, npages %d, page_shift %d, ncont %d, offset %d\n",
674 ucmd.buf_addr, qp->buf_size, npages, page_shift, ncont, offset);
677 *inlen = sizeof(**in) + sizeof(*(*in)->pas) * ncont;
678 *in = mlx5_vzalloc(*inlen);
684 mlx5_ib_populate_pas(dev, qp->umem, page_shift, (*in)->pas, 0);
685 (*in)->ctx.log_pg_sz_remote_qpn =
686 cpu_to_be32((page_shift - MLX5_ADAPTER_PAGE_SHIFT) << 24);
687 (*in)->ctx.params2 = cpu_to_be32(offset << 6);
689 (*in)->ctx.qp_counter_set_usr_page = cpu_to_be32(uar_index);
690 resp->uuar_index = uuarn;
693 err = mlx5_ib_db_map_user(context, ucmd.db_addr, &qp->db);
695 mlx5_ib_dbg(dev, "map failed\n");
699 err = ib_copy_to_udata(udata, resp, sizeof(*resp));
701 mlx5_ib_dbg(dev, "copy failed\n");
704 qp->create_type = MLX5_QP_USER;
709 mlx5_ib_db_unmap_user(context, &qp->db);
716 ib_umem_release(qp->umem);
719 free_uuar(&context->uuari, uuarn);
723 static void destroy_qp_user(struct ib_pd *pd, struct mlx5_ib_qp *qp)
725 struct mlx5_ib_ucontext *context;
727 context = to_mucontext(pd->uobject->context);
728 mlx5_ib_db_unmap_user(context, &qp->db);
730 ib_umem_release(qp->umem);
731 free_uuar(&context->uuari, qp->uuarn);
734 static int create_kernel_qp(struct mlx5_ib_dev *dev,
735 struct ib_qp_init_attr *init_attr,
736 struct mlx5_ib_qp *qp,
737 struct mlx5_create_qp_mbox_in **in, int *inlen)
739 enum mlx5_ib_latency_class lc = MLX5_IB_LATENCY_CLASS_LOW;
740 struct mlx5_uuar_info *uuari;
745 uuari = &dev->mdev->priv.uuari;
746 if (init_attr->create_flags & ~(IB_QP_CREATE_SIGNATURE_EN | IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK))
749 if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR)
750 lc = MLX5_IB_LATENCY_CLASS_FAST_PATH;
752 uuarn = alloc_uuar(uuari, lc);
754 mlx5_ib_dbg(dev, "\n");
758 qp->bf = &uuari->bfs[uuarn];
759 uar_index = qp->bf->uar->index;
761 err = calc_sq_size(dev, init_attr, qp);
763 mlx5_ib_dbg(dev, "err %d\n", err);
768 qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
769 qp->buf_size = err + (qp->rq.wqe_cnt << qp->rq.wqe_shift);
771 err = mlx5_buf_alloc(dev->mdev, qp->buf_size, PAGE_SIZE * 2, &qp->buf);
773 mlx5_ib_dbg(dev, "err %d\n", err);
777 qp->sq.qend = mlx5_get_send_wqe(qp, qp->sq.wqe_cnt);
778 *inlen = sizeof(**in) + sizeof(*(*in)->pas) * qp->buf.npages;
779 *in = mlx5_vzalloc(*inlen);
784 (*in)->ctx.qp_counter_set_usr_page = cpu_to_be32(uar_index);
785 (*in)->ctx.log_pg_sz_remote_qpn =
786 cpu_to_be32((qp->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT) << 24);
787 /* Set "fast registration enabled" for all kernel QPs */
788 (*in)->ctx.params1 |= cpu_to_be32(1 << 11);
789 (*in)->ctx.sq_crq_size |= cpu_to_be16(1 << 4);
791 mlx5_fill_page_array(&qp->buf, (*in)->pas);
793 err = mlx5_db_alloc(dev->mdev, &qp->db);
795 mlx5_ib_dbg(dev, "err %d\n", err);
799 qp->sq.wrid = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wrid), GFP_KERNEL);
800 qp->sq.wr_data = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wr_data), GFP_KERNEL);
801 qp->rq.wrid = kmalloc(qp->rq.wqe_cnt * sizeof(*qp->rq.wrid), GFP_KERNEL);
802 qp->sq.w_list = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.w_list), GFP_KERNEL);
803 qp->sq.wqe_head = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wqe_head), GFP_KERNEL);
805 if (!qp->sq.wrid || !qp->sq.wr_data || !qp->rq.wrid ||
806 !qp->sq.w_list || !qp->sq.wqe_head) {
810 qp->create_type = MLX5_QP_KERNEL;
815 mlx5_db_free(dev->mdev, &qp->db);
816 kfree(qp->sq.wqe_head);
817 kfree(qp->sq.w_list);
819 kfree(qp->sq.wr_data);
826 mlx5_buf_free(dev->mdev, &qp->buf);
829 free_uuar(&dev->mdev->priv.uuari, uuarn);
833 static void destroy_qp_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
835 mlx5_db_free(dev->mdev, &qp->db);
836 kfree(qp->sq.wqe_head);
837 kfree(qp->sq.w_list);
839 kfree(qp->sq.wr_data);
841 mlx5_buf_free(dev->mdev, &qp->buf);
842 free_uuar(&dev->mdev->priv.uuari, qp->bf->uuarn);
845 static __be32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr)
847 if (attr->srq || (attr->qp_type == IB_QPT_XRC_TGT) ||
848 (attr->qp_type == IB_QPT_XRC_INI))
849 return cpu_to_be32(MLX5_SRQ_RQ);
850 else if (!qp->has_rq)
851 return cpu_to_be32(MLX5_ZERO_LEN_RQ);
853 return cpu_to_be32(MLX5_NON_ZERO_RQ);
856 static int is_connected(enum ib_qp_type qp_type)
858 if (qp_type == IB_QPT_RC || qp_type == IB_QPT_UC)
864 static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
865 struct ib_qp_init_attr *init_attr,
866 struct ib_udata *udata, struct mlx5_ib_qp *qp)
868 struct mlx5_ib_resources *devr = &dev->devr;
869 struct mlx5_ib_create_qp_resp resp;
870 struct mlx5_create_qp_mbox_in *in;
871 struct mlx5_general_caps *gen;
872 struct mlx5_ib_create_qp ucmd;
873 int inlen = sizeof(*in);
876 mlx5_ib_odp_create_qp(qp);
878 gen = &dev->mdev->caps.gen;
879 mutex_init(&qp->mutex);
880 spin_lock_init(&qp->sq.lock);
881 spin_lock_init(&qp->rq.lock);
883 if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
884 if (!(gen->flags & MLX5_DEV_CAP_FLAG_BLOCK_MCAST)) {
885 mlx5_ib_dbg(dev, "block multicast loopback isn't supported\n");
888 qp->flags |= MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK;
892 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
893 qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
895 if (pd && pd->uobject) {
896 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
897 mlx5_ib_dbg(dev, "copy failed\n");
901 qp->wq_sig = !!(ucmd.flags & MLX5_QP_FLAG_SIGNATURE);
902 qp->scat_cqe = !!(ucmd.flags & MLX5_QP_FLAG_SCATTER_CQE);
904 qp->wq_sig = !!wq_signature;
907 qp->has_rq = qp_has_rq(init_attr);
908 err = set_rq_size(dev, &init_attr->cap, qp->has_rq,
909 qp, (pd && pd->uobject) ? &ucmd : NULL);
911 mlx5_ib_dbg(dev, "err %d\n", err);
917 mlx5_ib_dbg(dev, "requested sq_wqe_count (%d)\n", ucmd.sq_wqe_count);
918 if (ucmd.rq_wqe_shift != qp->rq.wqe_shift ||
919 ucmd.rq_wqe_count != qp->rq.wqe_cnt) {
920 mlx5_ib_dbg(dev, "invalid rq params\n");
923 if (ucmd.sq_wqe_count > gen->max_wqes) {
924 mlx5_ib_dbg(dev, "requested sq_wqe_count (%d) > max allowed (%d)\n",
925 ucmd.sq_wqe_count, gen->max_wqes);
928 err = create_user_qp(dev, pd, qp, udata, &in, &resp, &inlen);
930 mlx5_ib_dbg(dev, "err %d\n", err);
932 err = create_kernel_qp(dev, init_attr, qp, &in, &inlen);
934 mlx5_ib_dbg(dev, "err %d\n", err);
936 qp->pa_lkey = to_mpd(pd)->pa_lkey;
942 in = mlx5_vzalloc(sizeof(*in));
946 qp->create_type = MLX5_QP_EMPTY;
949 if (is_sqp(init_attr->qp_type))
950 qp->port = init_attr->port_num;
952 in->ctx.flags = cpu_to_be32(to_mlx5_st(init_attr->qp_type) << 16 |
953 MLX5_QP_PM_MIGRATED << 11);
955 if (init_attr->qp_type != MLX5_IB_QPT_REG_UMR)
956 in->ctx.flags_pd = cpu_to_be32(to_mpd(pd ? pd : devr->p0)->pdn);
958 in->ctx.flags_pd = cpu_to_be32(MLX5_QP_LAT_SENSITIVE);
961 in->ctx.flags_pd |= cpu_to_be32(MLX5_QP_ENABLE_SIG);
963 if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK)
964 in->ctx.flags_pd |= cpu_to_be32(MLX5_QP_BLOCK_MCAST);
966 if (qp->scat_cqe && is_connected(init_attr->qp_type)) {
970 rcqe_sz = mlx5_ib_get_cqe_size(dev, init_attr->recv_cq);
971 scqe_sz = mlx5_ib_get_cqe_size(dev, init_attr->send_cq);
974 in->ctx.cs_res = MLX5_RES_SCAT_DATA64_CQE;
976 in->ctx.cs_res = MLX5_RES_SCAT_DATA32_CQE;
978 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) {
980 in->ctx.cs_req = MLX5_REQ_SCAT_DATA64_CQE;
982 in->ctx.cs_req = MLX5_REQ_SCAT_DATA32_CQE;
986 if (qp->rq.wqe_cnt) {
987 in->ctx.rq_size_stride = (qp->rq.wqe_shift - 4);
988 in->ctx.rq_size_stride |= ilog2(qp->rq.wqe_cnt) << 3;
991 in->ctx.rq_type_srqn = get_rx_type(qp, init_attr);
994 in->ctx.sq_crq_size |= cpu_to_be16(ilog2(qp->sq.wqe_cnt) << 11);
996 in->ctx.sq_crq_size |= cpu_to_be16(0x8000);
998 /* Set default resources */
999 switch (init_attr->qp_type) {
1000 case IB_QPT_XRC_TGT:
1001 in->ctx.cqn_recv = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn);
1002 in->ctx.cqn_send = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn);
1003 in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn);
1004 in->ctx.xrcd = cpu_to_be32(to_mxrcd(init_attr->xrcd)->xrcdn);
1006 case IB_QPT_XRC_INI:
1007 in->ctx.cqn_recv = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn);
1008 in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x1)->xrcdn);
1009 in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn);
1012 if (init_attr->srq) {
1013 in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x0)->xrcdn);
1014 in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(init_attr->srq)->msrq.srqn);
1016 in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x1)->xrcdn);
1017 in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn);
1021 if (init_attr->send_cq)
1022 in->ctx.cqn_send = cpu_to_be32(to_mcq(init_attr->send_cq)->mcq.cqn);
1024 if (init_attr->recv_cq)
1025 in->ctx.cqn_recv = cpu_to_be32(to_mcq(init_attr->recv_cq)->mcq.cqn);
1027 in->ctx.db_rec_addr = cpu_to_be64(qp->db.dma);
1029 err = mlx5_core_create_qp(dev->mdev, &qp->mqp, in, inlen);
1031 mlx5_ib_dbg(dev, "create qp failed\n");
1036 /* Hardware wants QPN written in big-endian order (after
1037 * shifting) for send doorbell. Precompute this value to save
1038 * a little bit when posting sends.
1040 qp->doorbell_qpn = swab32(qp->mqp.qpn << 8);
1042 qp->mqp.event = mlx5_ib_qp_event;
1047 if (qp->create_type == MLX5_QP_USER)
1048 destroy_qp_user(pd, qp);
1049 else if (qp->create_type == MLX5_QP_KERNEL)
1050 destroy_qp_kernel(dev, qp);
1056 static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq)
1057 __acquires(&send_cq->lock) __acquires(&recv_cq->lock)
1061 if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
1062 spin_lock_irq(&send_cq->lock);
1063 spin_lock_nested(&recv_cq->lock,
1064 SINGLE_DEPTH_NESTING);
1065 } else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) {
1066 spin_lock_irq(&send_cq->lock);
1067 __acquire(&recv_cq->lock);
1069 spin_lock_irq(&recv_cq->lock);
1070 spin_lock_nested(&send_cq->lock,
1071 SINGLE_DEPTH_NESTING);
1074 spin_lock_irq(&send_cq->lock);
1075 __acquire(&recv_cq->lock);
1077 } else if (recv_cq) {
1078 spin_lock_irq(&recv_cq->lock);
1079 __acquire(&send_cq->lock);
1081 __acquire(&send_cq->lock);
1082 __acquire(&recv_cq->lock);
1086 static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq)
1087 __releases(&send_cq->lock) __releases(&recv_cq->lock)
1091 if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
1092 spin_unlock(&recv_cq->lock);
1093 spin_unlock_irq(&send_cq->lock);
1094 } else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) {
1095 __release(&recv_cq->lock);
1096 spin_unlock_irq(&send_cq->lock);
1098 spin_unlock(&send_cq->lock);
1099 spin_unlock_irq(&recv_cq->lock);
1102 __release(&recv_cq->lock);
1103 spin_unlock_irq(&send_cq->lock);
1105 } else if (recv_cq) {
1106 __release(&send_cq->lock);
1107 spin_unlock_irq(&recv_cq->lock);
1109 __release(&recv_cq->lock);
1110 __release(&send_cq->lock);
1114 static struct mlx5_ib_pd *get_pd(struct mlx5_ib_qp *qp)
1116 return to_mpd(qp->ibqp.pd);
1119 static void get_cqs(struct mlx5_ib_qp *qp,
1120 struct mlx5_ib_cq **send_cq, struct mlx5_ib_cq **recv_cq)
1122 switch (qp->ibqp.qp_type) {
1123 case IB_QPT_XRC_TGT:
1127 case MLX5_IB_QPT_REG_UMR:
1128 case IB_QPT_XRC_INI:
1129 *send_cq = to_mcq(qp->ibqp.send_cq);
1138 case IB_QPT_RAW_IPV6:
1139 case IB_QPT_RAW_ETHERTYPE:
1140 *send_cq = to_mcq(qp->ibqp.send_cq);
1141 *recv_cq = to_mcq(qp->ibqp.recv_cq);
1144 case IB_QPT_RAW_PACKET:
1153 static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
1155 struct mlx5_ib_cq *send_cq, *recv_cq;
1156 struct mlx5_modify_qp_mbox_in *in;
1159 in = kzalloc(sizeof(*in), GFP_KERNEL);
1163 if (qp->state != IB_QPS_RESET) {
1164 mlx5_ib_qp_disable_pagefaults(qp);
1165 if (mlx5_core_qp_modify(dev->mdev, to_mlx5_state(qp->state),
1166 MLX5_QP_STATE_RST, in, 0, &qp->mqp))
1167 mlx5_ib_warn(dev, "mlx5_ib: modify QP %06x to RESET failed\n",
1171 get_cqs(qp, &send_cq, &recv_cq);
1173 if (qp->create_type == MLX5_QP_KERNEL) {
1174 mlx5_ib_lock_cqs(send_cq, recv_cq);
1175 __mlx5_ib_cq_clean(recv_cq, qp->mqp.qpn,
1176 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
1177 if (send_cq != recv_cq)
1178 __mlx5_ib_cq_clean(send_cq, qp->mqp.qpn, NULL);
1179 mlx5_ib_unlock_cqs(send_cq, recv_cq);
1182 err = mlx5_core_destroy_qp(dev->mdev, &qp->mqp);
1184 mlx5_ib_warn(dev, "failed to destroy QP 0x%x\n", qp->mqp.qpn);
1188 if (qp->create_type == MLX5_QP_KERNEL)
1189 destroy_qp_kernel(dev, qp);
1190 else if (qp->create_type == MLX5_QP_USER)
1191 destroy_qp_user(&get_pd(qp)->ibpd, qp);
1194 static const char *ib_qp_type_str(enum ib_qp_type type)
1198 return "IB_QPT_SMI";
1200 return "IB_QPT_GSI";
1207 case IB_QPT_RAW_IPV6:
1208 return "IB_QPT_RAW_IPV6";
1209 case IB_QPT_RAW_ETHERTYPE:
1210 return "IB_QPT_RAW_ETHERTYPE";
1211 case IB_QPT_XRC_INI:
1212 return "IB_QPT_XRC_INI";
1213 case IB_QPT_XRC_TGT:
1214 return "IB_QPT_XRC_TGT";
1215 case IB_QPT_RAW_PACKET:
1216 return "IB_QPT_RAW_PACKET";
1217 case MLX5_IB_QPT_REG_UMR:
1218 return "MLX5_IB_QPT_REG_UMR";
1221 return "Invalid QP type";
1225 struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
1226 struct ib_qp_init_attr *init_attr,
1227 struct ib_udata *udata)
1229 struct mlx5_general_caps *gen;
1230 struct mlx5_ib_dev *dev;
1231 struct mlx5_ib_qp *qp;
1236 dev = to_mdev(pd->device);
1238 /* being cautious here */
1239 if (init_attr->qp_type != IB_QPT_XRC_TGT &&
1240 init_attr->qp_type != MLX5_IB_QPT_REG_UMR) {
1241 pr_warn("%s: no PD for transport %s\n", __func__,
1242 ib_qp_type_str(init_attr->qp_type));
1243 return ERR_PTR(-EINVAL);
1245 dev = to_mdev(to_mxrcd(init_attr->xrcd)->ibxrcd.device);
1247 gen = &dev->mdev->caps.gen;
1249 switch (init_attr->qp_type) {
1250 case IB_QPT_XRC_TGT:
1251 case IB_QPT_XRC_INI:
1252 if (!(gen->flags & MLX5_DEV_CAP_FLAG_XRC)) {
1253 mlx5_ib_dbg(dev, "XRC not supported\n");
1254 return ERR_PTR(-ENOSYS);
1256 init_attr->recv_cq = NULL;
1257 if (init_attr->qp_type == IB_QPT_XRC_TGT) {
1258 xrcdn = to_mxrcd(init_attr->xrcd)->xrcdn;
1259 init_attr->send_cq = NULL;
1268 case MLX5_IB_QPT_REG_UMR:
1269 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1271 return ERR_PTR(-ENOMEM);
1273 err = create_qp_common(dev, pd, init_attr, udata, qp);
1275 mlx5_ib_dbg(dev, "create_qp_common failed\n");
1277 return ERR_PTR(err);
1280 if (is_qp0(init_attr->qp_type))
1281 qp->ibqp.qp_num = 0;
1282 else if (is_qp1(init_attr->qp_type))
1283 qp->ibqp.qp_num = 1;
1285 qp->ibqp.qp_num = qp->mqp.qpn;
1287 mlx5_ib_dbg(dev, "ib qpnum 0x%x, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x\n",
1288 qp->ibqp.qp_num, qp->mqp.qpn, to_mcq(init_attr->recv_cq)->mcq.cqn,
1289 to_mcq(init_attr->send_cq)->mcq.cqn);
1295 case IB_QPT_RAW_IPV6:
1296 case IB_QPT_RAW_ETHERTYPE:
1297 case IB_QPT_RAW_PACKET:
1300 mlx5_ib_dbg(dev, "unsupported qp type %d\n",
1301 init_attr->qp_type);
1302 /* Don't support raw QPs */
1303 return ERR_PTR(-EINVAL);
1309 int mlx5_ib_destroy_qp(struct ib_qp *qp)
1311 struct mlx5_ib_dev *dev = to_mdev(qp->device);
1312 struct mlx5_ib_qp *mqp = to_mqp(qp);
1314 destroy_qp_common(dev, mqp);
1321 static __be32 to_mlx5_access_flags(struct mlx5_ib_qp *qp, const struct ib_qp_attr *attr,
1324 u32 hw_access_flags = 0;
1328 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1329 dest_rd_atomic = attr->max_dest_rd_atomic;
1331 dest_rd_atomic = qp->resp_depth;
1333 if (attr_mask & IB_QP_ACCESS_FLAGS)
1334 access_flags = attr->qp_access_flags;
1336 access_flags = qp->atomic_rd_en;
1338 if (!dest_rd_atomic)
1339 access_flags &= IB_ACCESS_REMOTE_WRITE;
1341 if (access_flags & IB_ACCESS_REMOTE_READ)
1342 hw_access_flags |= MLX5_QP_BIT_RRE;
1343 if (access_flags & IB_ACCESS_REMOTE_ATOMIC)
1344 hw_access_flags |= (MLX5_QP_BIT_RAE | MLX5_ATOMIC_MODE_CX);
1345 if (access_flags & IB_ACCESS_REMOTE_WRITE)
1346 hw_access_flags |= MLX5_QP_BIT_RWE;
1348 return cpu_to_be32(hw_access_flags);
1352 MLX5_PATH_FLAG_FL = 1 << 0,
1353 MLX5_PATH_FLAG_FREE_AR = 1 << 1,
1354 MLX5_PATH_FLAG_COUNTER = 1 << 2,
1357 static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate)
1359 struct mlx5_general_caps *gen;
1361 gen = &dev->mdev->caps.gen;
1362 if (rate == IB_RATE_PORT_CURRENT) {
1364 } else if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_300_GBPS) {
1367 while (rate != IB_RATE_2_5_GBPS &&
1368 !(1 << (rate + MLX5_STAT_RATE_OFFSET) &
1369 gen->stat_rate_support))
1373 return rate + MLX5_STAT_RATE_OFFSET;
1376 static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah,
1377 struct mlx5_qp_path *path, u8 port, int attr_mask,
1378 u32 path_flags, const struct ib_qp_attr *attr)
1380 struct mlx5_general_caps *gen;
1383 gen = &dev->mdev->caps.gen;
1384 path->fl = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0;
1385 path->free_ar = (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x80 : 0;
1387 if (attr_mask & IB_QP_PKEY_INDEX)
1388 path->pkey_index = attr->pkey_index;
1390 path->grh_mlid = ah->src_path_bits & 0x7f;
1391 path->rlid = cpu_to_be16(ah->dlid);
1393 if (ah->ah_flags & IB_AH_GRH) {
1394 if (ah->grh.sgid_index >= gen->port[port - 1].gid_table_len) {
1395 pr_err(KERN_ERR "sgid_index (%u) too large. max is %d\n",
1396 ah->grh.sgid_index, gen->port[port - 1].gid_table_len);
1399 path->grh_mlid |= 1 << 7;
1400 path->mgid_index = ah->grh.sgid_index;
1401 path->hop_limit = ah->grh.hop_limit;
1402 path->tclass_flowlabel =
1403 cpu_to_be32((ah->grh.traffic_class << 20) |
1404 (ah->grh.flow_label));
1405 memcpy(path->rgid, ah->grh.dgid.raw, 16);
1408 err = ib_rate_to_mlx5(dev, ah->static_rate);
1411 path->static_rate = err;
1414 if (attr_mask & IB_QP_TIMEOUT)
1415 path->ackto_lt = attr->timeout << 3;
1417 path->sl = ah->sl & 0xf;
1422 static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_QP_ST_MAX] = {
1423 [MLX5_QP_STATE_INIT] = {
1424 [MLX5_QP_STATE_INIT] = {
1425 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RRE |
1426 MLX5_QP_OPTPAR_RAE |
1427 MLX5_QP_OPTPAR_RWE |
1428 MLX5_QP_OPTPAR_PKEY_INDEX |
1429 MLX5_QP_OPTPAR_PRI_PORT,
1430 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE |
1431 MLX5_QP_OPTPAR_PKEY_INDEX |
1432 MLX5_QP_OPTPAR_PRI_PORT,
1433 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX |
1434 MLX5_QP_OPTPAR_Q_KEY |
1435 MLX5_QP_OPTPAR_PRI_PORT,
1437 [MLX5_QP_STATE_RTR] = {
1438 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
1439 MLX5_QP_OPTPAR_RRE |
1440 MLX5_QP_OPTPAR_RAE |
1441 MLX5_QP_OPTPAR_RWE |
1442 MLX5_QP_OPTPAR_PKEY_INDEX,
1443 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
1444 MLX5_QP_OPTPAR_RWE |
1445 MLX5_QP_OPTPAR_PKEY_INDEX,
1446 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX |
1447 MLX5_QP_OPTPAR_Q_KEY,
1448 [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_PKEY_INDEX |
1449 MLX5_QP_OPTPAR_Q_KEY,
1450 [MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
1451 MLX5_QP_OPTPAR_RRE |
1452 MLX5_QP_OPTPAR_RAE |
1453 MLX5_QP_OPTPAR_RWE |
1454 MLX5_QP_OPTPAR_PKEY_INDEX,
1457 [MLX5_QP_STATE_RTR] = {
1458 [MLX5_QP_STATE_RTS] = {
1459 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
1460 MLX5_QP_OPTPAR_RRE |
1461 MLX5_QP_OPTPAR_RAE |
1462 MLX5_QP_OPTPAR_RWE |
1463 MLX5_QP_OPTPAR_PM_STATE |
1464 MLX5_QP_OPTPAR_RNR_TIMEOUT,
1465 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
1466 MLX5_QP_OPTPAR_RWE |
1467 MLX5_QP_OPTPAR_PM_STATE,
1468 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY,
1471 [MLX5_QP_STATE_RTS] = {
1472 [MLX5_QP_STATE_RTS] = {
1473 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RRE |
1474 MLX5_QP_OPTPAR_RAE |
1475 MLX5_QP_OPTPAR_RWE |
1476 MLX5_QP_OPTPAR_RNR_TIMEOUT |
1477 MLX5_QP_OPTPAR_PM_STATE |
1478 MLX5_QP_OPTPAR_ALT_ADDR_PATH,
1479 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE |
1480 MLX5_QP_OPTPAR_PM_STATE |
1481 MLX5_QP_OPTPAR_ALT_ADDR_PATH,
1482 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY |
1483 MLX5_QP_OPTPAR_SRQN |
1484 MLX5_QP_OPTPAR_CQN_RCV,
1487 [MLX5_QP_STATE_SQER] = {
1488 [MLX5_QP_STATE_RTS] = {
1489 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY,
1490 [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_Q_KEY,
1491 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE,
1492 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RNR_TIMEOUT |
1493 MLX5_QP_OPTPAR_RWE |
1494 MLX5_QP_OPTPAR_RAE |
1500 static int ib_nr_to_mlx5_nr(int ib_mask)
1505 case IB_QP_CUR_STATE:
1507 case IB_QP_EN_SQD_ASYNC_NOTIFY:
1509 case IB_QP_ACCESS_FLAGS:
1510 return MLX5_QP_OPTPAR_RWE | MLX5_QP_OPTPAR_RRE |
1512 case IB_QP_PKEY_INDEX:
1513 return MLX5_QP_OPTPAR_PKEY_INDEX;
1515 return MLX5_QP_OPTPAR_PRI_PORT;
1517 return MLX5_QP_OPTPAR_Q_KEY;
1519 return MLX5_QP_OPTPAR_PRIMARY_ADDR_PATH |
1520 MLX5_QP_OPTPAR_PRI_PORT;
1521 case IB_QP_PATH_MTU:
1524 return MLX5_QP_OPTPAR_ACK_TIMEOUT;
1525 case IB_QP_RETRY_CNT:
1526 return MLX5_QP_OPTPAR_RETRY_COUNT;
1527 case IB_QP_RNR_RETRY:
1528 return MLX5_QP_OPTPAR_RNR_RETRY;
1531 case IB_QP_MAX_QP_RD_ATOMIC:
1532 return MLX5_QP_OPTPAR_SRA_MAX;
1533 case IB_QP_ALT_PATH:
1534 return MLX5_QP_OPTPAR_ALT_ADDR_PATH;
1535 case IB_QP_MIN_RNR_TIMER:
1536 return MLX5_QP_OPTPAR_RNR_TIMEOUT;
1539 case IB_QP_MAX_DEST_RD_ATOMIC:
1540 return MLX5_QP_OPTPAR_RRA_MAX | MLX5_QP_OPTPAR_RWE |
1541 MLX5_QP_OPTPAR_RRE | MLX5_QP_OPTPAR_RAE;
1542 case IB_QP_PATH_MIG_STATE:
1543 return MLX5_QP_OPTPAR_PM_STATE;
1546 case IB_QP_DEST_QPN:
1552 static int ib_mask_to_mlx5_opt(int ib_mask)
1557 for (i = 0; i < 8 * sizeof(int); i++) {
1558 if ((1 << i) & ib_mask)
1559 result |= ib_nr_to_mlx5_nr(1 << i);
1565 static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
1566 const struct ib_qp_attr *attr, int attr_mask,
1567 enum ib_qp_state cur_state, enum ib_qp_state new_state)
1569 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
1570 struct mlx5_ib_qp *qp = to_mqp(ibqp);
1571 struct mlx5_ib_cq *send_cq, *recv_cq;
1572 struct mlx5_qp_context *context;
1573 struct mlx5_general_caps *gen;
1574 struct mlx5_modify_qp_mbox_in *in;
1575 struct mlx5_ib_pd *pd;
1576 enum mlx5_qp_state mlx5_cur, mlx5_new;
1577 enum mlx5_qp_optpar optpar;
1582 gen = &dev->mdev->caps.gen;
1583 in = kzalloc(sizeof(*in), GFP_KERNEL);
1588 err = to_mlx5_st(ibqp->qp_type);
1592 context->flags = cpu_to_be32(err << 16);
1594 if (!(attr_mask & IB_QP_PATH_MIG_STATE)) {
1595 context->flags |= cpu_to_be32(MLX5_QP_PM_MIGRATED << 11);
1597 switch (attr->path_mig_state) {
1598 case IB_MIG_MIGRATED:
1599 context->flags |= cpu_to_be32(MLX5_QP_PM_MIGRATED << 11);
1602 context->flags |= cpu_to_be32(MLX5_QP_PM_REARM << 11);
1605 context->flags |= cpu_to_be32(MLX5_QP_PM_ARMED << 11);
1610 if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI) {
1611 context->mtu_msgmax = (IB_MTU_256 << 5) | 8;
1612 } else if (ibqp->qp_type == IB_QPT_UD ||
1613 ibqp->qp_type == MLX5_IB_QPT_REG_UMR) {
1614 context->mtu_msgmax = (IB_MTU_4096 << 5) | 12;
1615 } else if (attr_mask & IB_QP_PATH_MTU) {
1616 if (attr->path_mtu < IB_MTU_256 ||
1617 attr->path_mtu > IB_MTU_4096) {
1618 mlx5_ib_warn(dev, "invalid mtu %d\n", attr->path_mtu);
1622 context->mtu_msgmax = (attr->path_mtu << 5) | gen->log_max_msg;
1625 if (attr_mask & IB_QP_DEST_QPN)
1626 context->log_pg_sz_remote_qpn = cpu_to_be32(attr->dest_qp_num);
1628 if (attr_mask & IB_QP_PKEY_INDEX)
1629 context->pri_path.pkey_index = attr->pkey_index;
1631 /* todo implement counter_index functionality */
1633 if (is_sqp(ibqp->qp_type))
1634 context->pri_path.port = qp->port;
1636 if (attr_mask & IB_QP_PORT)
1637 context->pri_path.port = attr->port_num;
1639 if (attr_mask & IB_QP_AV) {
1640 err = mlx5_set_path(dev, &attr->ah_attr, &context->pri_path,
1641 attr_mask & IB_QP_PORT ? attr->port_num : qp->port,
1642 attr_mask, 0, attr);
1647 if (attr_mask & IB_QP_TIMEOUT)
1648 context->pri_path.ackto_lt |= attr->timeout << 3;
1650 if (attr_mask & IB_QP_ALT_PATH) {
1651 err = mlx5_set_path(dev, &attr->alt_ah_attr, &context->alt_path,
1652 attr->alt_port_num, attr_mask, 0, attr);
1658 get_cqs(qp, &send_cq, &recv_cq);
1660 context->flags_pd = cpu_to_be32(pd ? pd->pdn : to_mpd(dev->devr.p0)->pdn);
1661 context->cqn_send = send_cq ? cpu_to_be32(send_cq->mcq.cqn) : 0;
1662 context->cqn_recv = recv_cq ? cpu_to_be32(recv_cq->mcq.cqn) : 0;
1663 context->params1 = cpu_to_be32(MLX5_IB_ACK_REQ_FREQ << 28);
1665 if (attr_mask & IB_QP_RNR_RETRY)
1666 context->params1 |= cpu_to_be32(attr->rnr_retry << 13);
1668 if (attr_mask & IB_QP_RETRY_CNT)
1669 context->params1 |= cpu_to_be32(attr->retry_cnt << 16);
1671 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1672 if (attr->max_rd_atomic)
1674 cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21);
1677 if (attr_mask & IB_QP_SQ_PSN)
1678 context->next_send_psn = cpu_to_be32(attr->sq_psn);
1680 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1681 if (attr->max_dest_rd_atomic)
1683 cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21);
1686 if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC))
1687 context->params2 |= to_mlx5_access_flags(qp, attr, attr_mask);
1689 if (attr_mask & IB_QP_MIN_RNR_TIMER)
1690 context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24);
1692 if (attr_mask & IB_QP_RQ_PSN)
1693 context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn);
1695 if (attr_mask & IB_QP_QKEY)
1696 context->qkey = cpu_to_be32(attr->qkey);
1698 if (qp->rq.wqe_cnt && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
1699 context->db_rec_addr = cpu_to_be64(qp->db.dma);
1701 if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD &&
1702 attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify)
1707 if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
1708 context->sq_crq_size |= cpu_to_be16(1 << 4);
1711 mlx5_cur = to_mlx5_state(cur_state);
1712 mlx5_new = to_mlx5_state(new_state);
1713 mlx5_st = to_mlx5_st(ibqp->qp_type);
1717 /* If moving to a reset or error state, we must disable page faults on
1718 * this QP and flush all current page faults. Otherwise a stale page
1719 * fault may attempt to work on this QP after it is reset and moved
1720 * again to RTS, and may cause the driver and the device to get out of
1722 if (cur_state != IB_QPS_RESET && cur_state != IB_QPS_ERR &&
1723 (new_state == IB_QPS_RESET || new_state == IB_QPS_ERR))
1724 mlx5_ib_qp_disable_pagefaults(qp);
1726 optpar = ib_mask_to_mlx5_opt(attr_mask);
1727 optpar &= opt_mask[mlx5_cur][mlx5_new][mlx5_st];
1728 in->optparam = cpu_to_be32(optpar);
1729 err = mlx5_core_qp_modify(dev->mdev, to_mlx5_state(cur_state),
1730 to_mlx5_state(new_state), in, sqd_event,
1735 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
1736 mlx5_ib_qp_enable_pagefaults(qp);
1738 qp->state = new_state;
1740 if (attr_mask & IB_QP_ACCESS_FLAGS)
1741 qp->atomic_rd_en = attr->qp_access_flags;
1742 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1743 qp->resp_depth = attr->max_dest_rd_atomic;
1744 if (attr_mask & IB_QP_PORT)
1745 qp->port = attr->port_num;
1746 if (attr_mask & IB_QP_ALT_PATH)
1747 qp->alt_port = attr->alt_port_num;
1750 * If we moved a kernel QP to RESET, clean up all old CQ
1751 * entries and reinitialize the QP.
1753 if (new_state == IB_QPS_RESET && !ibqp->uobject) {
1754 mlx5_ib_cq_clean(recv_cq, qp->mqp.qpn,
1755 ibqp->srq ? to_msrq(ibqp->srq) : NULL);
1756 if (send_cq != recv_cq)
1757 mlx5_ib_cq_clean(send_cq, qp->mqp.qpn, NULL);
1763 qp->sq.cur_post = 0;
1764 qp->sq.last_poll = 0;
1765 qp->db.db[MLX5_RCV_DBR] = 0;
1766 qp->db.db[MLX5_SND_DBR] = 0;
1774 int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1775 int attr_mask, struct ib_udata *udata)
1777 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
1778 struct mlx5_ib_qp *qp = to_mqp(ibqp);
1779 enum ib_qp_state cur_state, new_state;
1780 struct mlx5_general_caps *gen;
1784 gen = &dev->mdev->caps.gen;
1785 mutex_lock(&qp->mutex);
1787 cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
1788 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
1790 if (ibqp->qp_type != MLX5_IB_QPT_REG_UMR &&
1791 !ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask,
1792 IB_LINK_LAYER_UNSPECIFIED))
1795 if ((attr_mask & IB_QP_PORT) &&
1796 (attr->port_num == 0 || attr->port_num > gen->num_ports))
1799 if (attr_mask & IB_QP_PKEY_INDEX) {
1800 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
1801 if (attr->pkey_index >= gen->port[port - 1].pkey_table_len)
1805 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
1806 attr->max_rd_atomic > (1 << gen->log_max_ra_res_qp))
1809 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
1810 attr->max_dest_rd_atomic > (1 << gen->log_max_ra_req_qp))
1813 if (cur_state == new_state && cur_state == IB_QPS_RESET) {
1818 err = __mlx5_ib_modify_qp(ibqp, attr, attr_mask, cur_state, new_state);
1821 mutex_unlock(&qp->mutex);
1825 static int mlx5_wq_overflow(struct mlx5_ib_wq *wq, int nreq, struct ib_cq *ib_cq)
1827 struct mlx5_ib_cq *cq;
1830 cur = wq->head - wq->tail;
1831 if (likely(cur + nreq < wq->max_post))
1835 spin_lock(&cq->lock);
1836 cur = wq->head - wq->tail;
1837 spin_unlock(&cq->lock);
1839 return cur + nreq >= wq->max_post;
1842 static __always_inline void set_raddr_seg(struct mlx5_wqe_raddr_seg *rseg,
1843 u64 remote_addr, u32 rkey)
1845 rseg->raddr = cpu_to_be64(remote_addr);
1846 rseg->rkey = cpu_to_be32(rkey);
1850 static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg,
1851 struct ib_send_wr *wr)
1853 memcpy(&dseg->av, &to_mah(wr->wr.ud.ah)->av, sizeof(struct mlx5_av));
1854 dseg->av.dqp_dct = cpu_to_be32(wr->wr.ud.remote_qpn | MLX5_EXTENDED_UD_AV);
1855 dseg->av.key.qkey.qkey = cpu_to_be32(wr->wr.ud.remote_qkey);
1858 static void set_data_ptr_seg(struct mlx5_wqe_data_seg *dseg, struct ib_sge *sg)
1860 dseg->byte_count = cpu_to_be32(sg->length);
1861 dseg->lkey = cpu_to_be32(sg->lkey);
1862 dseg->addr = cpu_to_be64(sg->addr);
1865 static __be16 get_klm_octo(int npages)
1867 return cpu_to_be16(ALIGN(npages, 8) / 2);
1870 static __be64 frwr_mkey_mask(void)
1874 result = MLX5_MKEY_MASK_LEN |
1875 MLX5_MKEY_MASK_PAGE_SIZE |
1876 MLX5_MKEY_MASK_START_ADDR |
1877 MLX5_MKEY_MASK_EN_RINVAL |
1878 MLX5_MKEY_MASK_KEY |
1884 MLX5_MKEY_MASK_SMALL_FENCE |
1885 MLX5_MKEY_MASK_FREE;
1887 return cpu_to_be64(result);
1890 static __be64 sig_mkey_mask(void)
1894 result = MLX5_MKEY_MASK_LEN |
1895 MLX5_MKEY_MASK_PAGE_SIZE |
1896 MLX5_MKEY_MASK_START_ADDR |
1897 MLX5_MKEY_MASK_EN_SIGERR |
1898 MLX5_MKEY_MASK_EN_RINVAL |
1899 MLX5_MKEY_MASK_KEY |
1904 MLX5_MKEY_MASK_SMALL_FENCE |
1905 MLX5_MKEY_MASK_FREE |
1906 MLX5_MKEY_MASK_BSF_EN;
1908 return cpu_to_be64(result);
1911 static void set_frwr_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
1912 struct ib_send_wr *wr, int li)
1914 memset(umr, 0, sizeof(*umr));
1917 umr->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
1918 umr->flags = 1 << 7;
1922 umr->flags = (1 << 5); /* fail if not free */
1923 umr->klm_octowords = get_klm_octo(wr->wr.fast_reg.page_list_len);
1924 umr->mkey_mask = frwr_mkey_mask();
1927 static __be64 get_umr_reg_mr_mask(void)
1931 result = MLX5_MKEY_MASK_LEN |
1932 MLX5_MKEY_MASK_PAGE_SIZE |
1933 MLX5_MKEY_MASK_START_ADDR |
1937 MLX5_MKEY_MASK_KEY |
1941 MLX5_MKEY_MASK_FREE;
1943 return cpu_to_be64(result);
1946 static __be64 get_umr_unreg_mr_mask(void)
1950 result = MLX5_MKEY_MASK_FREE;
1952 return cpu_to_be64(result);
1955 static __be64 get_umr_update_mtt_mask(void)
1959 result = MLX5_MKEY_MASK_FREE;
1961 return cpu_to_be64(result);
1964 static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
1965 struct ib_send_wr *wr)
1967 struct mlx5_umr_wr *umrwr = (struct mlx5_umr_wr *)&wr->wr.fast_reg;
1969 memset(umr, 0, sizeof(*umr));
1971 if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE)
1972 umr->flags = MLX5_UMR_CHECK_FREE; /* fail if free */
1974 umr->flags = MLX5_UMR_CHECK_NOT_FREE; /* fail if not free */
1976 if (!(wr->send_flags & MLX5_IB_SEND_UMR_UNREG)) {
1977 umr->klm_octowords = get_klm_octo(umrwr->npages);
1978 if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_MTT) {
1979 umr->mkey_mask = get_umr_update_mtt_mask();
1980 umr->bsf_octowords = get_klm_octo(umrwr->target.offset);
1981 umr->flags |= MLX5_UMR_TRANSLATION_OFFSET_EN;
1983 umr->mkey_mask = get_umr_reg_mr_mask();
1986 umr->mkey_mask = get_umr_unreg_mr_mask();
1990 umr->flags |= MLX5_UMR_INLINE;
1993 static u8 get_umr_flags(int acc)
1995 return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC : 0) |
1996 (acc & IB_ACCESS_REMOTE_WRITE ? MLX5_PERM_REMOTE_WRITE : 0) |
1997 (acc & IB_ACCESS_REMOTE_READ ? MLX5_PERM_REMOTE_READ : 0) |
1998 (acc & IB_ACCESS_LOCAL_WRITE ? MLX5_PERM_LOCAL_WRITE : 0) |
1999 MLX5_PERM_LOCAL_READ | MLX5_PERM_UMR_EN;
2002 static void set_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr,
2005 memset(seg, 0, sizeof(*seg));
2007 seg->status = MLX5_MKEY_STATUS_FREE;
2011 seg->flags = get_umr_flags(wr->wr.fast_reg.access_flags) |
2012 MLX5_ACCESS_MODE_MTT;
2013 *writ = seg->flags & (MLX5_PERM_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE);
2014 seg->qpn_mkey7_0 = cpu_to_be32((wr->wr.fast_reg.rkey & 0xff) | 0xffffff00);
2015 seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL);
2016 seg->start_addr = cpu_to_be64(wr->wr.fast_reg.iova_start);
2017 seg->len = cpu_to_be64(wr->wr.fast_reg.length);
2018 seg->xlt_oct_size = cpu_to_be32((wr->wr.fast_reg.page_list_len + 1) / 2);
2019 seg->log2_page_size = wr->wr.fast_reg.page_shift;
2022 static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr)
2024 struct mlx5_umr_wr *umrwr = (struct mlx5_umr_wr *)&wr->wr.fast_reg;
2026 memset(seg, 0, sizeof(*seg));
2027 if (wr->send_flags & MLX5_IB_SEND_UMR_UNREG) {
2028 seg->status = MLX5_MKEY_STATUS_FREE;
2032 seg->flags = convert_access(umrwr->access_flags);
2033 if (!(wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_MTT)) {
2034 seg->flags_pd = cpu_to_be32(to_mpd(umrwr->pd)->pdn);
2035 seg->start_addr = cpu_to_be64(umrwr->target.virt_addr);
2037 seg->len = cpu_to_be64(umrwr->length);
2038 seg->log2_page_size = umrwr->page_shift;
2039 seg->qpn_mkey7_0 = cpu_to_be32(0xffffff00 |
2040 mlx5_mkey_variant(umrwr->mkey));
2043 static void set_frwr_pages(struct mlx5_wqe_data_seg *dseg,
2044 struct ib_send_wr *wr,
2045 struct mlx5_core_dev *mdev,
2046 struct mlx5_ib_pd *pd,
2049 struct mlx5_ib_fast_reg_page_list *mfrpl = to_mfrpl(wr->wr.fast_reg.page_list);
2050 u64 *page_list = wr->wr.fast_reg.page_list->page_list;
2051 u64 perm = MLX5_EN_RD | (writ ? MLX5_EN_WR : 0);
2054 for (i = 0; i < wr->wr.fast_reg.page_list_len; i++)
2055 mfrpl->mapped_page_list[i] = cpu_to_be64(page_list[i] | perm);
2056 dseg->addr = cpu_to_be64(mfrpl->map);
2057 dseg->byte_count = cpu_to_be32(ALIGN(sizeof(u64) * wr->wr.fast_reg.page_list_len, 64));
2058 dseg->lkey = cpu_to_be32(pd->pa_lkey);
2061 static __be32 send_ieth(struct ib_send_wr *wr)
2063 switch (wr->opcode) {
2064 case IB_WR_SEND_WITH_IMM:
2065 case IB_WR_RDMA_WRITE_WITH_IMM:
2066 return wr->ex.imm_data;
2068 case IB_WR_SEND_WITH_INV:
2069 return cpu_to_be32(wr->ex.invalidate_rkey);
2076 static u8 calc_sig(void *wqe, int size)
2082 for (i = 0; i < size; i++)
2088 static u8 wq_sig(void *wqe)
2090 return calc_sig(wqe, (*((u8 *)wqe + 8) & 0x3f) << 4);
2093 static int set_data_inl_seg(struct mlx5_ib_qp *qp, struct ib_send_wr *wr,
2096 struct mlx5_wqe_inline_seg *seg;
2097 void *qend = qp->sq.qend;
2105 wqe += sizeof(*seg);
2106 for (i = 0; i < wr->num_sge; i++) {
2107 addr = (void *)(unsigned long)(wr->sg_list[i].addr);
2108 len = wr->sg_list[i].length;
2111 if (unlikely(inl > qp->max_inline_data))
2114 if (unlikely(wqe + len > qend)) {
2116 memcpy(wqe, addr, copy);
2119 wqe = mlx5_get_send_wqe(qp, 0);
2121 memcpy(wqe, addr, len);
2125 seg->byte_count = cpu_to_be32(inl | MLX5_INLINE_SEG);
2127 *sz = ALIGN(inl + sizeof(seg->byte_count), 16) / 16;
2132 static u16 prot_field_size(enum ib_signature_type type)
2135 case IB_SIG_TYPE_T10_DIF:
2136 return MLX5_DIF_SIZE;
2142 static u8 bs_selector(int block_size)
2144 switch (block_size) {
2145 case 512: return 0x1;
2146 case 520: return 0x2;
2147 case 4096: return 0x3;
2148 case 4160: return 0x4;
2149 case 1073741824: return 0x5;
2154 static void mlx5_fill_inl_bsf(struct ib_sig_domain *domain,
2155 struct mlx5_bsf_inl *inl)
2157 /* Valid inline section and allow BSF refresh */
2158 inl->vld_refresh = cpu_to_be16(MLX5_BSF_INL_VALID |
2159 MLX5_BSF_REFRESH_DIF);
2160 inl->dif_apptag = cpu_to_be16(domain->sig.dif.app_tag);
2161 inl->dif_reftag = cpu_to_be32(domain->sig.dif.ref_tag);
2162 /* repeating block */
2163 inl->rp_inv_seed = MLX5_BSF_REPEAT_BLOCK;
2164 inl->sig_type = domain->sig.dif.bg_type == IB_T10DIF_CRC ?
2165 MLX5_DIF_CRC : MLX5_DIF_IPCS;
2167 if (domain->sig.dif.ref_remap)
2168 inl->dif_inc_ref_guard_check |= MLX5_BSF_INC_REFTAG;
2170 if (domain->sig.dif.app_escape) {
2171 if (domain->sig.dif.ref_escape)
2172 inl->dif_inc_ref_guard_check |= MLX5_BSF_APPREF_ESCAPE;
2174 inl->dif_inc_ref_guard_check |= MLX5_BSF_APPTAG_ESCAPE;
2177 inl->dif_app_bitmask_check =
2178 cpu_to_be16(domain->sig.dif.apptag_check_mask);
2181 static int mlx5_set_bsf(struct ib_mr *sig_mr,
2182 struct ib_sig_attrs *sig_attrs,
2183 struct mlx5_bsf *bsf, u32 data_size)
2185 struct mlx5_core_sig_ctx *msig = to_mmr(sig_mr)->sig;
2186 struct mlx5_bsf_basic *basic = &bsf->basic;
2187 struct ib_sig_domain *mem = &sig_attrs->mem;
2188 struct ib_sig_domain *wire = &sig_attrs->wire;
2190 memset(bsf, 0, sizeof(*bsf));
2192 /* Basic + Extended + Inline */
2193 basic->bsf_size_sbs = 1 << 7;
2194 /* Input domain check byte mask */
2195 basic->check_byte_mask = sig_attrs->check_mask;
2196 basic->raw_data_size = cpu_to_be32(data_size);
2199 switch (sig_attrs->mem.sig_type) {
2200 case IB_SIG_TYPE_NONE:
2202 case IB_SIG_TYPE_T10_DIF:
2203 basic->mem.bs_selector = bs_selector(mem->sig.dif.pi_interval);
2204 basic->m_bfs_psv = cpu_to_be32(msig->psv_memory.psv_idx);
2205 mlx5_fill_inl_bsf(mem, &bsf->m_inl);
2212 switch (sig_attrs->wire.sig_type) {
2213 case IB_SIG_TYPE_NONE:
2215 case IB_SIG_TYPE_T10_DIF:
2216 if (mem->sig.dif.pi_interval == wire->sig.dif.pi_interval &&
2217 mem->sig_type == wire->sig_type) {
2218 /* Same block structure */
2219 basic->bsf_size_sbs |= 1 << 4;
2220 if (mem->sig.dif.bg_type == wire->sig.dif.bg_type)
2221 basic->wire.copy_byte_mask |= MLX5_CPY_GRD_MASK;
2222 if (mem->sig.dif.app_tag == wire->sig.dif.app_tag)
2223 basic->wire.copy_byte_mask |= MLX5_CPY_APP_MASK;
2224 if (mem->sig.dif.ref_tag == wire->sig.dif.ref_tag)
2225 basic->wire.copy_byte_mask |= MLX5_CPY_REF_MASK;
2227 basic->wire.bs_selector = bs_selector(wire->sig.dif.pi_interval);
2229 basic->w_bfs_psv = cpu_to_be32(msig->psv_wire.psv_idx);
2230 mlx5_fill_inl_bsf(wire, &bsf->w_inl);
2239 static int set_sig_data_segment(struct ib_send_wr *wr, struct mlx5_ib_qp *qp,
2240 void **seg, int *size)
2242 struct ib_sig_attrs *sig_attrs = wr->wr.sig_handover.sig_attrs;
2243 struct ib_mr *sig_mr = wr->wr.sig_handover.sig_mr;
2244 struct mlx5_bsf *bsf;
2245 u32 data_len = wr->sg_list->length;
2246 u32 data_key = wr->sg_list->lkey;
2247 u64 data_va = wr->sg_list->addr;
2251 if (!wr->wr.sig_handover.prot ||
2252 (data_key == wr->wr.sig_handover.prot->lkey &&
2253 data_va == wr->wr.sig_handover.prot->addr &&
2254 data_len == wr->wr.sig_handover.prot->length)) {
2256 * Source domain doesn't contain signature information
2257 * or data and protection are interleaved in memory.
2258 * So need construct:
2259 * ------------------
2261 * ------------------
2263 * ------------------
2265 struct mlx5_klm *data_klm = *seg;
2267 data_klm->bcount = cpu_to_be32(data_len);
2268 data_klm->key = cpu_to_be32(data_key);
2269 data_klm->va = cpu_to_be64(data_va);
2270 wqe_size = ALIGN(sizeof(*data_klm), 64);
2273 * Source domain contains signature information
2274 * So need construct a strided block format:
2275 * ---------------------------
2276 * | stride_block_ctrl |
2277 * ---------------------------
2279 * ---------------------------
2281 * ---------------------------
2283 * ---------------------------
2285 struct mlx5_stride_block_ctrl_seg *sblock_ctrl;
2286 struct mlx5_stride_block_entry *data_sentry;
2287 struct mlx5_stride_block_entry *prot_sentry;
2288 u32 prot_key = wr->wr.sig_handover.prot->lkey;
2289 u64 prot_va = wr->wr.sig_handover.prot->addr;
2290 u16 block_size = sig_attrs->mem.sig.dif.pi_interval;
2294 data_sentry = (void *)sblock_ctrl + sizeof(*sblock_ctrl);
2295 prot_sentry = (void *)data_sentry + sizeof(*data_sentry);
2297 prot_size = prot_field_size(sig_attrs->mem.sig_type);
2299 pr_err("Bad block size given: %u\n", block_size);
2302 sblock_ctrl->bcount_per_cycle = cpu_to_be32(block_size +
2304 sblock_ctrl->op = cpu_to_be32(MLX5_STRIDE_BLOCK_OP);
2305 sblock_ctrl->repeat_count = cpu_to_be32(data_len / block_size);
2306 sblock_ctrl->num_entries = cpu_to_be16(2);
2308 data_sentry->bcount = cpu_to_be16(block_size);
2309 data_sentry->key = cpu_to_be32(data_key);
2310 data_sentry->va = cpu_to_be64(data_va);
2311 data_sentry->stride = cpu_to_be16(block_size);
2313 prot_sentry->bcount = cpu_to_be16(prot_size);
2314 prot_sentry->key = cpu_to_be32(prot_key);
2315 prot_sentry->va = cpu_to_be64(prot_va);
2316 prot_sentry->stride = cpu_to_be16(prot_size);
2318 wqe_size = ALIGN(sizeof(*sblock_ctrl) + sizeof(*data_sentry) +
2319 sizeof(*prot_sentry), 64);
2323 *size += wqe_size / 16;
2324 if (unlikely((*seg == qp->sq.qend)))
2325 *seg = mlx5_get_send_wqe(qp, 0);
2328 ret = mlx5_set_bsf(sig_mr, sig_attrs, bsf, data_len);
2332 *seg += sizeof(*bsf);
2333 *size += sizeof(*bsf) / 16;
2334 if (unlikely((*seg == qp->sq.qend)))
2335 *seg = mlx5_get_send_wqe(qp, 0);
2340 static void set_sig_mkey_segment(struct mlx5_mkey_seg *seg,
2341 struct ib_send_wr *wr, u32 nelements,
2342 u32 length, u32 pdn)
2344 struct ib_mr *sig_mr = wr->wr.sig_handover.sig_mr;
2345 u32 sig_key = sig_mr->rkey;
2346 u8 sigerr = to_mmr(sig_mr)->sig->sigerr_count & 1;
2348 memset(seg, 0, sizeof(*seg));
2350 seg->flags = get_umr_flags(wr->wr.sig_handover.access_flags) |
2351 MLX5_ACCESS_MODE_KLM;
2352 seg->qpn_mkey7_0 = cpu_to_be32((sig_key & 0xff) | 0xffffff00);
2353 seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL | sigerr << 26 |
2354 MLX5_MKEY_BSF_EN | pdn);
2355 seg->len = cpu_to_be64(length);
2356 seg->xlt_oct_size = cpu_to_be32(be16_to_cpu(get_klm_octo(nelements)));
2357 seg->bsfs_octo_size = cpu_to_be32(MLX5_MKEY_BSF_OCTO_SIZE);
2360 static void set_sig_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
2361 struct ib_send_wr *wr, u32 nelements)
2363 memset(umr, 0, sizeof(*umr));
2365 umr->flags = MLX5_FLAGS_INLINE | MLX5_FLAGS_CHECK_FREE;
2366 umr->klm_octowords = get_klm_octo(nelements);
2367 umr->bsf_octowords = cpu_to_be16(MLX5_MKEY_BSF_OCTO_SIZE);
2368 umr->mkey_mask = sig_mkey_mask();
2372 static int set_sig_umr_wr(struct ib_send_wr *wr, struct mlx5_ib_qp *qp,
2373 void **seg, int *size)
2375 struct mlx5_ib_mr *sig_mr = to_mmr(wr->wr.sig_handover.sig_mr);
2376 u32 pdn = get_pd(qp)->pdn;
2378 int region_len, ret;
2380 if (unlikely(wr->num_sge != 1) ||
2381 unlikely(wr->wr.sig_handover.access_flags &
2382 IB_ACCESS_REMOTE_ATOMIC) ||
2383 unlikely(!sig_mr->sig) || unlikely(!qp->signature_en) ||
2384 unlikely(!sig_mr->sig->sig_status_checked))
2387 /* length of the protected region, data + protection */
2388 region_len = wr->sg_list->length;
2389 if (wr->wr.sig_handover.prot &&
2390 (wr->wr.sig_handover.prot->lkey != wr->sg_list->lkey ||
2391 wr->wr.sig_handover.prot->addr != wr->sg_list->addr ||
2392 wr->wr.sig_handover.prot->length != wr->sg_list->length))
2393 region_len += wr->wr.sig_handover.prot->length;
2396 * KLM octoword size - if protection was provided
2397 * then we use strided block format (3 octowords),
2398 * else we use single KLM (1 octoword)
2400 klm_oct_size = wr->wr.sig_handover.prot ? 3 : 1;
2402 set_sig_umr_segment(*seg, wr, klm_oct_size);
2403 *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
2404 *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
2405 if (unlikely((*seg == qp->sq.qend)))
2406 *seg = mlx5_get_send_wqe(qp, 0);
2408 set_sig_mkey_segment(*seg, wr, klm_oct_size, region_len, pdn);
2409 *seg += sizeof(struct mlx5_mkey_seg);
2410 *size += sizeof(struct mlx5_mkey_seg) / 16;
2411 if (unlikely((*seg == qp->sq.qend)))
2412 *seg = mlx5_get_send_wqe(qp, 0);
2414 ret = set_sig_data_segment(wr, qp, seg, size);
2418 sig_mr->sig->sig_status_checked = false;
2422 static int set_psv_wr(struct ib_sig_domain *domain,
2423 u32 psv_idx, void **seg, int *size)
2425 struct mlx5_seg_set_psv *psv_seg = *seg;
2427 memset(psv_seg, 0, sizeof(*psv_seg));
2428 psv_seg->psv_num = cpu_to_be32(psv_idx);
2429 switch (domain->sig_type) {
2430 case IB_SIG_TYPE_NONE:
2432 case IB_SIG_TYPE_T10_DIF:
2433 psv_seg->transient_sig = cpu_to_be32(domain->sig.dif.bg << 16 |
2434 domain->sig.dif.app_tag);
2435 psv_seg->ref_tag = cpu_to_be32(domain->sig.dif.ref_tag);
2438 pr_err("Bad signature type given.\n");
2442 *seg += sizeof(*psv_seg);
2443 *size += sizeof(*psv_seg) / 16;
2448 static int set_frwr_li_wr(void **seg, struct ib_send_wr *wr, int *size,
2449 struct mlx5_core_dev *mdev, struct mlx5_ib_pd *pd, struct mlx5_ib_qp *qp)
2454 li = wr->opcode == IB_WR_LOCAL_INV ? 1 : 0;
2455 if (unlikely(wr->send_flags & IB_SEND_INLINE))
2458 set_frwr_umr_segment(*seg, wr, li);
2459 *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
2460 *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
2461 if (unlikely((*seg == qp->sq.qend)))
2462 *seg = mlx5_get_send_wqe(qp, 0);
2463 set_mkey_segment(*seg, wr, li, &writ);
2464 *seg += sizeof(struct mlx5_mkey_seg);
2465 *size += sizeof(struct mlx5_mkey_seg) / 16;
2466 if (unlikely((*seg == qp->sq.qend)))
2467 *seg = mlx5_get_send_wqe(qp, 0);
2469 if (unlikely(wr->wr.fast_reg.page_list_len >
2470 wr->wr.fast_reg.page_list->max_page_list_len))
2473 set_frwr_pages(*seg, wr, mdev, pd, writ);
2474 *seg += sizeof(struct mlx5_wqe_data_seg);
2475 *size += (sizeof(struct mlx5_wqe_data_seg) / 16);
2480 static void dump_wqe(struct mlx5_ib_qp *qp, int idx, int size_16)
2486 pr_debug("dump wqe at %p\n", mlx5_get_send_wqe(qp, tidx));
2487 for (i = 0, j = 0; i < size_16 * 4; i += 4, j += 4) {
2488 if ((i & 0xf) == 0) {
2489 void *buf = mlx5_get_send_wqe(qp, tidx);
2490 tidx = (tidx + 1) & (qp->sq.wqe_cnt - 1);
2494 pr_debug("%08x %08x %08x %08x\n", be32_to_cpu(p[j]),
2495 be32_to_cpu(p[j + 1]), be32_to_cpu(p[j + 2]),
2496 be32_to_cpu(p[j + 3]));
2500 static void mlx5_bf_copy(u64 __iomem *dst, u64 *src,
2501 unsigned bytecnt, struct mlx5_ib_qp *qp)
2503 while (bytecnt > 0) {
2504 __iowrite64_copy(dst++, src++, 8);
2505 __iowrite64_copy(dst++, src++, 8);
2506 __iowrite64_copy(dst++, src++, 8);
2507 __iowrite64_copy(dst++, src++, 8);
2508 __iowrite64_copy(dst++, src++, 8);
2509 __iowrite64_copy(dst++, src++, 8);
2510 __iowrite64_copy(dst++, src++, 8);
2511 __iowrite64_copy(dst++, src++, 8);
2513 if (unlikely(src == qp->sq.qend))
2514 src = mlx5_get_send_wqe(qp, 0);
2518 static u8 get_fence(u8 fence, struct ib_send_wr *wr)
2520 if (unlikely(wr->opcode == IB_WR_LOCAL_INV &&
2521 wr->send_flags & IB_SEND_FENCE))
2522 return MLX5_FENCE_MODE_STRONG_ORDERING;
2524 if (unlikely(fence)) {
2525 if (wr->send_flags & IB_SEND_FENCE)
2526 return MLX5_FENCE_MODE_SMALL_AND_FENCE;
2535 static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
2536 struct mlx5_wqe_ctrl_seg **ctrl,
2537 struct ib_send_wr *wr, unsigned *idx,
2538 int *size, int nreq)
2542 if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq))) {
2547 *idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1);
2548 *seg = mlx5_get_send_wqe(qp, *idx);
2550 *(uint32_t *)(*seg + 8) = 0;
2551 (*ctrl)->imm = send_ieth(wr);
2552 (*ctrl)->fm_ce_se = qp->sq_signal_bits |
2553 (wr->send_flags & IB_SEND_SIGNALED ?
2554 MLX5_WQE_CTRL_CQ_UPDATE : 0) |
2555 (wr->send_flags & IB_SEND_SOLICITED ?
2556 MLX5_WQE_CTRL_SOLICITED : 0);
2558 *seg += sizeof(**ctrl);
2559 *size = sizeof(**ctrl) / 16;
2564 static void finish_wqe(struct mlx5_ib_qp *qp,
2565 struct mlx5_wqe_ctrl_seg *ctrl,
2566 u8 size, unsigned idx, u64 wr_id,
2567 int nreq, u8 fence, u8 next_fence,
2572 ctrl->opmod_idx_opcode = cpu_to_be32(((u32)(qp->sq.cur_post) << 8) |
2573 mlx5_opcode | ((u32)opmod << 24));
2574 ctrl->qpn_ds = cpu_to_be32(size | (qp->mqp.qpn << 8));
2575 ctrl->fm_ce_se |= fence;
2576 qp->fm_cache = next_fence;
2577 if (unlikely(qp->wq_sig))
2578 ctrl->signature = wq_sig(ctrl);
2580 qp->sq.wrid[idx] = wr_id;
2581 qp->sq.w_list[idx].opcode = mlx5_opcode;
2582 qp->sq.wqe_head[idx] = qp->sq.head + nreq;
2583 qp->sq.cur_post += DIV_ROUND_UP(size * 16, MLX5_SEND_WQE_BB);
2584 qp->sq.w_list[idx].next = qp->sq.cur_post;
2588 int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2589 struct ib_send_wr **bad_wr)
2591 struct mlx5_wqe_ctrl_seg *ctrl = NULL; /* compiler warning */
2592 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
2593 struct mlx5_core_dev *mdev = dev->mdev;
2594 struct mlx5_ib_qp *qp = to_mqp(ibqp);
2595 struct mlx5_ib_mr *mr;
2596 struct mlx5_wqe_data_seg *dpseg;
2597 struct mlx5_wqe_xrc_seg *xrc;
2598 struct mlx5_bf *bf = qp->bf;
2599 int uninitialized_var(size);
2600 void *qend = qp->sq.qend;
2601 unsigned long flags;
2612 spin_lock_irqsave(&qp->sq.lock, flags);
2614 for (nreq = 0; wr; nreq++, wr = wr->next) {
2615 if (unlikely(wr->opcode >= ARRAY_SIZE(mlx5_ib_opcode))) {
2616 mlx5_ib_warn(dev, "\n");
2622 fence = qp->fm_cache;
2623 num_sge = wr->num_sge;
2624 if (unlikely(num_sge > qp->sq.max_gs)) {
2625 mlx5_ib_warn(dev, "\n");
2631 err = begin_wqe(qp, &seg, &ctrl, wr, &idx, &size, nreq);
2633 mlx5_ib_warn(dev, "\n");
2639 switch (ibqp->qp_type) {
2640 case IB_QPT_XRC_INI:
2642 xrc->xrc_srqn = htonl(wr->xrc_remote_srq_num);
2643 seg += sizeof(*xrc);
2644 size += sizeof(*xrc) / 16;
2647 switch (wr->opcode) {
2648 case IB_WR_RDMA_READ:
2649 case IB_WR_RDMA_WRITE:
2650 case IB_WR_RDMA_WRITE_WITH_IMM:
2651 set_raddr_seg(seg, wr->wr.rdma.remote_addr,
2653 seg += sizeof(struct mlx5_wqe_raddr_seg);
2654 size += sizeof(struct mlx5_wqe_raddr_seg) / 16;
2657 case IB_WR_ATOMIC_CMP_AND_SWP:
2658 case IB_WR_ATOMIC_FETCH_AND_ADD:
2659 case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
2660 mlx5_ib_warn(dev, "Atomic operations are not supported yet\n");
2665 case IB_WR_LOCAL_INV:
2666 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
2667 qp->sq.wr_data[idx] = IB_WR_LOCAL_INV;
2668 ctrl->imm = cpu_to_be32(wr->ex.invalidate_rkey);
2669 err = set_frwr_li_wr(&seg, wr, &size, mdev, to_mpd(ibqp->pd), qp);
2671 mlx5_ib_warn(dev, "\n");
2678 case IB_WR_FAST_REG_MR:
2679 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
2680 qp->sq.wr_data[idx] = IB_WR_FAST_REG_MR;
2681 ctrl->imm = cpu_to_be32(wr->wr.fast_reg.rkey);
2682 err = set_frwr_li_wr(&seg, wr, &size, mdev, to_mpd(ibqp->pd), qp);
2684 mlx5_ib_warn(dev, "\n");
2691 case IB_WR_REG_SIG_MR:
2692 qp->sq.wr_data[idx] = IB_WR_REG_SIG_MR;
2693 mr = to_mmr(wr->wr.sig_handover.sig_mr);
2695 ctrl->imm = cpu_to_be32(mr->ibmr.rkey);
2696 err = set_sig_umr_wr(wr, qp, &seg, &size);
2698 mlx5_ib_warn(dev, "\n");
2703 finish_wqe(qp, ctrl, size, idx, wr->wr_id,
2704 nreq, get_fence(fence, wr),
2705 next_fence, MLX5_OPCODE_UMR);
2707 * SET_PSV WQEs are not signaled and solicited
2710 wr->send_flags &= ~IB_SEND_SIGNALED;
2711 wr->send_flags |= IB_SEND_SOLICITED;
2712 err = begin_wqe(qp, &seg, &ctrl, wr,
2715 mlx5_ib_warn(dev, "\n");
2721 err = set_psv_wr(&wr->wr.sig_handover.sig_attrs->mem,
2722 mr->sig->psv_memory.psv_idx, &seg,
2725 mlx5_ib_warn(dev, "\n");
2730 finish_wqe(qp, ctrl, size, idx, wr->wr_id,
2731 nreq, get_fence(fence, wr),
2732 next_fence, MLX5_OPCODE_SET_PSV);
2733 err = begin_wqe(qp, &seg, &ctrl, wr,
2736 mlx5_ib_warn(dev, "\n");
2742 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
2743 err = set_psv_wr(&wr->wr.sig_handover.sig_attrs->wire,
2744 mr->sig->psv_wire.psv_idx, &seg,
2747 mlx5_ib_warn(dev, "\n");
2752 finish_wqe(qp, ctrl, size, idx, wr->wr_id,
2753 nreq, get_fence(fence, wr),
2754 next_fence, MLX5_OPCODE_SET_PSV);
2764 switch (wr->opcode) {
2765 case IB_WR_RDMA_WRITE:
2766 case IB_WR_RDMA_WRITE_WITH_IMM:
2767 set_raddr_seg(seg, wr->wr.rdma.remote_addr,
2769 seg += sizeof(struct mlx5_wqe_raddr_seg);
2770 size += sizeof(struct mlx5_wqe_raddr_seg) / 16;
2781 set_datagram_seg(seg, wr);
2782 seg += sizeof(struct mlx5_wqe_datagram_seg);
2783 size += sizeof(struct mlx5_wqe_datagram_seg) / 16;
2784 if (unlikely((seg == qend)))
2785 seg = mlx5_get_send_wqe(qp, 0);
2788 case MLX5_IB_QPT_REG_UMR:
2789 if (wr->opcode != MLX5_IB_WR_UMR) {
2791 mlx5_ib_warn(dev, "bad opcode\n");
2794 qp->sq.wr_data[idx] = MLX5_IB_WR_UMR;
2795 ctrl->imm = cpu_to_be32(wr->wr.fast_reg.rkey);
2796 set_reg_umr_segment(seg, wr);
2797 seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
2798 size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
2799 if (unlikely((seg == qend)))
2800 seg = mlx5_get_send_wqe(qp, 0);
2801 set_reg_mkey_segment(seg, wr);
2802 seg += sizeof(struct mlx5_mkey_seg);
2803 size += sizeof(struct mlx5_mkey_seg) / 16;
2804 if (unlikely((seg == qend)))
2805 seg = mlx5_get_send_wqe(qp, 0);
2812 if (wr->send_flags & IB_SEND_INLINE && num_sge) {
2813 int uninitialized_var(sz);
2815 err = set_data_inl_seg(qp, wr, seg, &sz);
2816 if (unlikely(err)) {
2817 mlx5_ib_warn(dev, "\n");
2825 for (i = 0; i < num_sge; i++) {
2826 if (unlikely(dpseg == qend)) {
2827 seg = mlx5_get_send_wqe(qp, 0);
2830 if (likely(wr->sg_list[i].length)) {
2831 set_data_ptr_seg(dpseg, wr->sg_list + i);
2832 size += sizeof(struct mlx5_wqe_data_seg) / 16;
2838 finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq,
2839 get_fence(fence, wr), next_fence,
2840 mlx5_ib_opcode[wr->opcode]);
2843 dump_wqe(qp, idx, size);
2848 qp->sq.head += nreq;
2850 /* Make sure that descriptors are written before
2851 * updating doorbell record and ringing the doorbell
2855 qp->db.db[MLX5_SND_DBR] = cpu_to_be32(qp->sq.cur_post);
2857 /* Make sure doorbell record is visible to the HCA before
2858 * we hit doorbell */
2862 spin_lock(&bf->lock);
2864 __acquire(&bf->lock);
2867 if (0 && nreq == 1 && bf->uuarn && inl && size > 1 && size <= bf->buf_size / 16) {
2868 mlx5_bf_copy(bf->reg + bf->offset, (u64 *)ctrl, ALIGN(size * 16, 64), qp);
2871 mlx5_write64((__be32 *)ctrl, bf->regreg + bf->offset,
2872 MLX5_GET_DOORBELL_LOCK(&bf->lock32));
2873 /* Make sure doorbells don't leak out of SQ spinlock
2874 * and reach the HCA out of order.
2878 bf->offset ^= bf->buf_size;
2880 spin_unlock(&bf->lock);
2882 __release(&bf->lock);
2885 spin_unlock_irqrestore(&qp->sq.lock, flags);
2890 static void set_sig_seg(struct mlx5_rwqe_sig *sig, int size)
2892 sig->signature = calc_sig(sig, size);
2895 int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
2896 struct ib_recv_wr **bad_wr)
2898 struct mlx5_ib_qp *qp = to_mqp(ibqp);
2899 struct mlx5_wqe_data_seg *scat;
2900 struct mlx5_rwqe_sig *sig;
2901 unsigned long flags;
2907 spin_lock_irqsave(&qp->rq.lock, flags);
2909 ind = qp->rq.head & (qp->rq.wqe_cnt - 1);
2911 for (nreq = 0; wr; nreq++, wr = wr->next) {
2912 if (mlx5_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
2918 if (unlikely(wr->num_sge > qp->rq.max_gs)) {
2924 scat = get_recv_wqe(qp, ind);
2928 for (i = 0; i < wr->num_sge; i++)
2929 set_data_ptr_seg(scat + i, wr->sg_list + i);
2931 if (i < qp->rq.max_gs) {
2932 scat[i].byte_count = 0;
2933 scat[i].lkey = cpu_to_be32(MLX5_INVALID_LKEY);
2938 sig = (struct mlx5_rwqe_sig *)scat;
2939 set_sig_seg(sig, (qp->rq.max_gs + 1) << 2);
2942 qp->rq.wrid[ind] = wr->wr_id;
2944 ind = (ind + 1) & (qp->rq.wqe_cnt - 1);
2949 qp->rq.head += nreq;
2951 /* Make sure that descriptors are written before
2956 *qp->db.db = cpu_to_be32(qp->rq.head & 0xffff);
2959 spin_unlock_irqrestore(&qp->rq.lock, flags);
2964 static inline enum ib_qp_state to_ib_qp_state(enum mlx5_qp_state mlx5_state)
2966 switch (mlx5_state) {
2967 case MLX5_QP_STATE_RST: return IB_QPS_RESET;
2968 case MLX5_QP_STATE_INIT: return IB_QPS_INIT;
2969 case MLX5_QP_STATE_RTR: return IB_QPS_RTR;
2970 case MLX5_QP_STATE_RTS: return IB_QPS_RTS;
2971 case MLX5_QP_STATE_SQ_DRAINING:
2972 case MLX5_QP_STATE_SQD: return IB_QPS_SQD;
2973 case MLX5_QP_STATE_SQER: return IB_QPS_SQE;
2974 case MLX5_QP_STATE_ERR: return IB_QPS_ERR;
2979 static inline enum ib_mig_state to_ib_mig_state(int mlx5_mig_state)
2981 switch (mlx5_mig_state) {
2982 case MLX5_QP_PM_ARMED: return IB_MIG_ARMED;
2983 case MLX5_QP_PM_REARM: return IB_MIG_REARM;
2984 case MLX5_QP_PM_MIGRATED: return IB_MIG_MIGRATED;
2989 static int to_ib_qp_access_flags(int mlx5_flags)
2993 if (mlx5_flags & MLX5_QP_BIT_RRE)
2994 ib_flags |= IB_ACCESS_REMOTE_READ;
2995 if (mlx5_flags & MLX5_QP_BIT_RWE)
2996 ib_flags |= IB_ACCESS_REMOTE_WRITE;
2997 if (mlx5_flags & MLX5_QP_BIT_RAE)
2998 ib_flags |= IB_ACCESS_REMOTE_ATOMIC;
3003 static void to_ib_ah_attr(struct mlx5_ib_dev *ibdev, struct ib_ah_attr *ib_ah_attr,
3004 struct mlx5_qp_path *path)
3006 struct mlx5_core_dev *dev = ibdev->mdev;
3008 memset(ib_ah_attr, 0, sizeof(*ib_ah_attr));
3009 ib_ah_attr->port_num = path->port;
3011 if (ib_ah_attr->port_num == 0 ||
3012 ib_ah_attr->port_num > dev->caps.gen.num_ports)
3015 ib_ah_attr->sl = path->sl & 0xf;
3017 ib_ah_attr->dlid = be16_to_cpu(path->rlid);
3018 ib_ah_attr->src_path_bits = path->grh_mlid & 0x7f;
3019 ib_ah_attr->static_rate = path->static_rate ? path->static_rate - 5 : 0;
3020 ib_ah_attr->ah_flags = (path->grh_mlid & (1 << 7)) ? IB_AH_GRH : 0;
3021 if (ib_ah_attr->ah_flags) {
3022 ib_ah_attr->grh.sgid_index = path->mgid_index;
3023 ib_ah_attr->grh.hop_limit = path->hop_limit;
3024 ib_ah_attr->grh.traffic_class =
3025 (be32_to_cpu(path->tclass_flowlabel) >> 20) & 0xff;
3026 ib_ah_attr->grh.flow_label =
3027 be32_to_cpu(path->tclass_flowlabel) & 0xfffff;
3028 memcpy(ib_ah_attr->grh.dgid.raw,
3029 path->rgid, sizeof(ib_ah_attr->grh.dgid.raw));
3033 int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
3034 struct ib_qp_init_attr *qp_init_attr)
3036 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
3037 struct mlx5_ib_qp *qp = to_mqp(ibqp);
3038 struct mlx5_query_qp_mbox_out *outb;
3039 struct mlx5_qp_context *context;
3043 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
3045 * Wait for any outstanding page faults, in case the user frees memory
3046 * based upon this query's result.
3048 flush_workqueue(mlx5_ib_page_fault_wq);
3051 mutex_lock(&qp->mutex);
3052 outb = kzalloc(sizeof(*outb), GFP_KERNEL);
3057 context = &outb->ctx;
3058 err = mlx5_core_qp_query(dev->mdev, &qp->mqp, outb, sizeof(*outb));
3062 mlx5_state = be32_to_cpu(context->flags) >> 28;
3064 qp->state = to_ib_qp_state(mlx5_state);
3065 qp_attr->qp_state = qp->state;
3066 qp_attr->path_mtu = context->mtu_msgmax >> 5;
3067 qp_attr->path_mig_state =
3068 to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3);
3069 qp_attr->qkey = be32_to_cpu(context->qkey);
3070 qp_attr->rq_psn = be32_to_cpu(context->rnr_nextrecvpsn) & 0xffffff;
3071 qp_attr->sq_psn = be32_to_cpu(context->next_send_psn) & 0xffffff;
3072 qp_attr->dest_qp_num = be32_to_cpu(context->log_pg_sz_remote_qpn) & 0xffffff;
3073 qp_attr->qp_access_flags =
3074 to_ib_qp_access_flags(be32_to_cpu(context->params2));
3076 if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) {
3077 to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path);
3078 to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path);
3079 qp_attr->alt_pkey_index = context->alt_path.pkey_index & 0x7f;
3080 qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num;
3083 qp_attr->pkey_index = context->pri_path.pkey_index & 0x7f;
3084 qp_attr->port_num = context->pri_path.port;
3086 /* qp_attr->en_sqd_async_notify is only applicable in modify qp */
3087 qp_attr->sq_draining = mlx5_state == MLX5_QP_STATE_SQ_DRAINING;
3089 qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context->params1) >> 21) & 0x7);
3091 qp_attr->max_dest_rd_atomic =
3092 1 << ((be32_to_cpu(context->params2) >> 21) & 0x7);
3093 qp_attr->min_rnr_timer =
3094 (be32_to_cpu(context->rnr_nextrecvpsn) >> 24) & 0x1f;
3095 qp_attr->timeout = context->pri_path.ackto_lt >> 3;
3096 qp_attr->retry_cnt = (be32_to_cpu(context->params1) >> 16) & 0x7;
3097 qp_attr->rnr_retry = (be32_to_cpu(context->params1) >> 13) & 0x7;
3098 qp_attr->alt_timeout = context->alt_path.ackto_lt >> 3;
3099 qp_attr->cur_qp_state = qp_attr->qp_state;
3100 qp_attr->cap.max_recv_wr = qp->rq.wqe_cnt;
3101 qp_attr->cap.max_recv_sge = qp->rq.max_gs;
3103 if (!ibqp->uobject) {
3104 qp_attr->cap.max_send_wr = qp->sq.wqe_cnt;
3105 qp_attr->cap.max_send_sge = qp->sq.max_gs;
3107 qp_attr->cap.max_send_wr = 0;
3108 qp_attr->cap.max_send_sge = 0;
3111 /* We don't support inline sends for kernel QPs (yet), and we
3112 * don't know what userspace's value should be.
3114 qp_attr->cap.max_inline_data = 0;
3116 qp_init_attr->cap = qp_attr->cap;
3118 qp_init_attr->create_flags = 0;
3119 if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK)
3120 qp_init_attr->create_flags |= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK;
3122 qp_init_attr->sq_sig_type = qp->sq_signal_bits & MLX5_WQE_CTRL_CQ_UPDATE ?
3123 IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
3129 mutex_unlock(&qp->mutex);
3133 struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
3134 struct ib_ucontext *context,
3135 struct ib_udata *udata)
3137 struct mlx5_ib_dev *dev = to_mdev(ibdev);
3138 struct mlx5_general_caps *gen;
3139 struct mlx5_ib_xrcd *xrcd;
3142 gen = &dev->mdev->caps.gen;
3143 if (!(gen->flags & MLX5_DEV_CAP_FLAG_XRC))
3144 return ERR_PTR(-ENOSYS);
3146 xrcd = kmalloc(sizeof(*xrcd), GFP_KERNEL);
3148 return ERR_PTR(-ENOMEM);
3150 err = mlx5_core_xrcd_alloc(dev->mdev, &xrcd->xrcdn);
3153 return ERR_PTR(-ENOMEM);
3156 return &xrcd->ibxrcd;
3159 int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
3161 struct mlx5_ib_dev *dev = to_mdev(xrcd->device);
3162 u32 xrcdn = to_mxrcd(xrcd)->xrcdn;
3165 err = mlx5_core_xrcd_dealloc(dev->mdev, xrcdn);
3167 mlx5_ib_warn(dev, "failed to dealloc xrcdn 0x%x\n", xrcdn);