2 * Copyright (c) 2006 QLogic, Inc. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include "ipath_verbs.h"
35 #include "ips_common.h"
37 /* cut down ridiculously long IB macro names */
38 #define OP(x) IB_OPCODE_RC_##x
41 * ipath_init_restart- initialize the qp->s_sge after a restart
42 * @qp: the QP who's SGE we're restarting
43 * @wqe: the work queue to initialize the QP's SGE from
45 * The QP s_lock should be held.
47 static void ipath_init_restart(struct ipath_qp *qp, struct ipath_swqe *wqe)
49 struct ipath_ibdev *dev;
52 len = ((qp->s_psn - wqe->psn) & IPS_PSN_MASK) *
53 ib_mtu_enum_to_int(qp->path_mtu);
54 qp->s_sge.sge = wqe->sg_list[0];
55 qp->s_sge.sg_list = wqe->sg_list + 1;
56 qp->s_sge.num_sge = wqe->wr.num_sge;
57 ipath_skip_sge(&qp->s_sge, len);
58 qp->s_len = wqe->length - len;
59 dev = to_idev(qp->ibqp.device);
60 spin_lock(&dev->pending_lock);
61 if (list_empty(&qp->timerwait))
62 list_add_tail(&qp->timerwait,
63 &dev->pending[dev->pending_index]);
64 spin_unlock(&dev->pending_lock);
68 * ipath_make_rc_ack - construct a response packet (ACK, NAK, or RDMA read)
69 * @qp: a pointer to the QP
70 * @ohdr: a pointer to the IB header being constructed
73 * Return bth0 if constructed; otherwise, return 0.
74 * Note the QP s_lock must be held.
76 u32 ipath_make_rc_ack(struct ipath_qp *qp,
77 struct ipath_other_headers *ohdr,
80 struct ipath_sge_state *ss;
85 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
89 * Send a response. Note that we are in the responder's
90 * side of the QP context.
92 switch (qp->s_ack_state) {
93 case OP(RDMA_READ_REQUEST):
98 qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
100 qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
101 qp->s_rdma_len -= len;
102 bth0 = qp->s_ack_state << 24;
103 ohdr->u.aeth = ipath_compute_aeth(qp);
107 case OP(RDMA_READ_RESPONSE_FIRST):
108 qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE);
110 case OP(RDMA_READ_RESPONSE_MIDDLE):
111 ss = &qp->s_rdma_sge;
112 len = qp->s_rdma_len;
116 ohdr->u.aeth = ipath_compute_aeth(qp);
118 qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
120 qp->s_rdma_len -= len;
121 bth0 = qp->s_ack_state << 24;
124 case OP(RDMA_READ_RESPONSE_LAST):
125 case OP(RDMA_READ_RESPONSE_ONLY):
127 * We have to prevent new requests from changing
128 * the r_sge state while a ipath_verbs_send()
130 * Changing r_state allows the receiver
131 * to continue processing new packets.
132 * We do it here now instead of above so
133 * that we are sure the packet was sent before
134 * changing the state.
136 qp->r_state = OP(RDMA_READ_RESPONSE_LAST);
137 qp->s_ack_state = OP(ACKNOWLEDGE);
140 case OP(COMPARE_SWAP):
144 qp->r_state = OP(SEND_LAST);
145 qp->s_ack_state = OP(ACKNOWLEDGE);
146 bth0 = IB_OPCODE_ATOMIC_ACKNOWLEDGE << 24;
147 ohdr->u.at.aeth = ipath_compute_aeth(qp);
148 ohdr->u.at.atomic_ack_eth = cpu_to_be64(qp->s_ack_atomic);
149 hwords += sizeof(ohdr->u.at) / 4;
153 /* Send a regular ACK. */
156 qp->s_ack_state = OP(ACKNOWLEDGE);
157 bth0 = qp->s_ack_state << 24;
158 ohdr->u.aeth = ipath_compute_aeth(qp);
161 qp->s_hdrwords = hwords;
163 qp->s_cur_size = len;
169 * ipath_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC)
170 * @qp: a pointer to the QP
171 * @ohdr: a pointer to the IB header being constructed
172 * @pmtu: the path MTU
173 * @bth0p: pointer to the BTH opcode word
174 * @bth2p: pointer to the BTH PSN word
176 * Return 1 if constructed; otherwise, return 0.
177 * Note the QP s_lock must be held.
179 int ipath_make_rc_req(struct ipath_qp *qp,
180 struct ipath_other_headers *ohdr,
181 u32 pmtu, u32 *bth0p, u32 *bth2p)
183 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
184 struct ipath_sge_state *ss;
185 struct ipath_swqe *wqe;
192 if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) ||
196 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
200 /* Send a request. */
201 wqe = get_swqe_ptr(qp, qp->s_cur);
202 switch (qp->s_state) {
205 * Resend an old request or start a new one.
207 * We keep track of the current SWQE so that
208 * we don't reset the "furthest progress" state
209 * if we need to back up.
212 if (qp->s_cur == qp->s_tail) {
213 /* Check if send work queue is empty. */
214 if (qp->s_tail == qp->s_head)
216 qp->s_psn = wqe->psn = qp->s_next_psn;
220 * Note that we have to be careful not to modify the
221 * original work request since we may need to resend
224 qp->s_sge.sge = wqe->sg_list[0];
225 qp->s_sge.sg_list = wqe->sg_list + 1;
226 qp->s_sge.num_sge = wqe->wr.num_sge;
227 qp->s_len = len = wqe->length;
230 switch (wqe->wr.opcode) {
232 case IB_WR_SEND_WITH_IMM:
233 /* If no credit, return. */
234 if (qp->s_lsn != (u32) -1 &&
235 ipath_cmp24(wqe->ssn, qp->s_lsn + 1) > 0)
237 wqe->lpsn = wqe->psn;
239 wqe->lpsn += (len - 1) / pmtu;
240 qp->s_state = OP(SEND_FIRST);
244 if (wqe->wr.opcode == IB_WR_SEND)
245 qp->s_state = OP(SEND_ONLY);
247 qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE);
248 /* Immediate data comes after the BTH */
249 ohdr->u.imm_data = wqe->wr.imm_data;
252 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
254 bth2 = 1 << 31; /* Request ACK. */
255 if (++qp->s_cur == qp->s_size)
259 case IB_WR_RDMA_WRITE:
260 if (newreq && qp->s_lsn != (u32) -1)
263 case IB_WR_RDMA_WRITE_WITH_IMM:
264 /* If no credit, return. */
265 if (qp->s_lsn != (u32) -1 &&
266 ipath_cmp24(wqe->ssn, qp->s_lsn + 1) > 0)
268 ohdr->u.rc.reth.vaddr =
269 cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
270 ohdr->u.rc.reth.rkey =
271 cpu_to_be32(wqe->wr.wr.rdma.rkey);
272 ohdr->u.rc.reth.length = cpu_to_be32(len);
273 hwords += sizeof(struct ib_reth) / 4;
274 wqe->lpsn = wqe->psn;
276 wqe->lpsn += (len - 1) / pmtu;
277 qp->s_state = OP(RDMA_WRITE_FIRST);
281 if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
282 qp->s_state = OP(RDMA_WRITE_ONLY);
285 OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
286 /* Immediate data comes after RETH */
287 ohdr->u.rc.imm_data = wqe->wr.imm_data;
289 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
292 bth2 = 1 << 31; /* Request ACK. */
293 if (++qp->s_cur == qp->s_size)
297 case IB_WR_RDMA_READ:
298 ohdr->u.rc.reth.vaddr =
299 cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
300 ohdr->u.rc.reth.rkey =
301 cpu_to_be32(wqe->wr.wr.rdma.rkey);
302 ohdr->u.rc.reth.length = cpu_to_be32(len);
303 qp->s_state = OP(RDMA_READ_REQUEST);
304 hwords += sizeof(ohdr->u.rc.reth) / 4;
306 if (qp->s_lsn != (u32) -1)
309 * Adjust s_next_psn to count the
310 * expected number of responses.
313 qp->s_next_psn += (len - 1) / pmtu;
314 wqe->lpsn = qp->s_next_psn++;
318 if (++qp->s_cur == qp->s_size)
322 case IB_WR_ATOMIC_CMP_AND_SWP:
323 case IB_WR_ATOMIC_FETCH_AND_ADD:
324 if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP)
325 qp->s_state = OP(COMPARE_SWAP);
327 qp->s_state = OP(FETCH_ADD);
328 ohdr->u.atomic_eth.vaddr = cpu_to_be64(
329 wqe->wr.wr.atomic.remote_addr);
330 ohdr->u.atomic_eth.rkey = cpu_to_be32(
331 wqe->wr.wr.atomic.rkey);
332 ohdr->u.atomic_eth.swap_data = cpu_to_be64(
333 wqe->wr.wr.atomic.swap);
334 ohdr->u.atomic_eth.compare_data = cpu_to_be64(
335 wqe->wr.wr.atomic.compare_add);
336 hwords += sizeof(struct ib_atomic_eth) / 4;
338 if (qp->s_lsn != (u32) -1)
340 wqe->lpsn = wqe->psn;
342 if (++qp->s_cur == qp->s_size)
353 if (qp->s_tail >= qp->s_size)
356 bth2 |= qp->s_psn++ & IPS_PSN_MASK;
357 if ((int)(qp->s_psn - qp->s_next_psn) > 0)
358 qp->s_next_psn = qp->s_psn;
359 spin_lock(&dev->pending_lock);
360 if (list_empty(&qp->timerwait))
361 list_add_tail(&qp->timerwait,
362 &dev->pending[dev->pending_index]);
363 spin_unlock(&dev->pending_lock);
366 case OP(RDMA_READ_RESPONSE_FIRST):
368 * This case can only happen if a send is restarted. See
369 * ipath_restart_rc().
371 ipath_init_restart(qp, wqe);
374 qp->s_state = OP(SEND_MIDDLE);
376 case OP(SEND_MIDDLE):
377 bth2 = qp->s_psn++ & IPS_PSN_MASK;
378 if ((int)(qp->s_psn - qp->s_next_psn) > 0)
379 qp->s_next_psn = qp->s_psn;
384 * Request an ACK every 1/2 MB to avoid retransmit
387 if (((wqe->length - len) % (512 * 1024)) == 0)
392 if (wqe->wr.opcode == IB_WR_SEND)
393 qp->s_state = OP(SEND_LAST);
395 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
396 /* Immediate data comes after the BTH */
397 ohdr->u.imm_data = wqe->wr.imm_data;
400 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
402 bth2 |= 1 << 31; /* Request ACK. */
404 if (qp->s_cur >= qp->s_size)
408 case OP(RDMA_READ_RESPONSE_LAST):
410 * This case can only happen if a RDMA write is restarted.
411 * See ipath_restart_rc().
413 ipath_init_restart(qp, wqe);
415 case OP(RDMA_WRITE_FIRST):
416 qp->s_state = OP(RDMA_WRITE_MIDDLE);
418 case OP(RDMA_WRITE_MIDDLE):
419 bth2 = qp->s_psn++ & IPS_PSN_MASK;
420 if ((int)(qp->s_psn - qp->s_next_psn) > 0)
421 qp->s_next_psn = qp->s_psn;
426 * Request an ACK every 1/2 MB to avoid retransmit
429 if (((wqe->length - len) % (512 * 1024)) == 0)
434 if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
435 qp->s_state = OP(RDMA_WRITE_LAST);
437 qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
438 /* Immediate data comes after the BTH */
439 ohdr->u.imm_data = wqe->wr.imm_data;
441 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
444 bth2 |= 1 << 31; /* Request ACK. */
446 if (qp->s_cur >= qp->s_size)
450 case OP(RDMA_READ_RESPONSE_MIDDLE):
452 * This case can only happen if a RDMA read is restarted.
453 * See ipath_restart_rc().
455 ipath_init_restart(qp, wqe);
456 len = ((qp->s_psn - wqe->psn) & IPS_PSN_MASK) * pmtu;
457 ohdr->u.rc.reth.vaddr =
458 cpu_to_be64(wqe->wr.wr.rdma.remote_addr + len);
459 ohdr->u.rc.reth.rkey =
460 cpu_to_be32(wqe->wr.wr.rdma.rkey);
461 ohdr->u.rc.reth.length = cpu_to_be32(qp->s_len);
462 qp->s_state = OP(RDMA_READ_REQUEST);
463 hwords += sizeof(ohdr->u.rc.reth) / 4;
464 bth2 = qp->s_psn++ & IPS_PSN_MASK;
465 if ((int)(qp->s_psn - qp->s_next_psn) > 0)
466 qp->s_next_psn = qp->s_psn;
470 if (qp->s_cur == qp->s_size)
474 case OP(RDMA_READ_REQUEST):
475 case OP(COMPARE_SWAP):
478 * We shouldn't start anything new until this request is
479 * finished. The ACK will handle rescheduling us. XXX The
480 * number of outstanding ones is negotiated at connection
481 * setup time (see pg. 258,289)? XXX Also, if we support
482 * multiple outstanding requests, we need to check the WQE
483 * IB_SEND_FENCE flag and not send a new request if a RDMA
484 * read or atomic is pending.
489 qp->s_hdrwords = hwords;
491 qp->s_cur_size = len;
492 *bth0p = bth0 | (qp->s_state << 24);
501 * send_rc_ack - Construct an ACK packet and send it
502 * @qp: a pointer to the QP
504 * This is called from ipath_rc_rcv() and only uses the receive
506 * Note that RDMA reads are handled in the send side QP state and tasklet.
508 static void send_rc_ack(struct ipath_qp *qp)
510 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
514 struct ipath_ib_header hdr;
515 struct ipath_other_headers *ohdr;
517 /* Construct the header. */
520 /* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4. */
522 if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
523 hwords += ipath_make_grh(dev, &hdr.u.l.grh,
524 &qp->remote_ah_attr.grh,
529 bth0 = ipath_layer_get_pkey(dev->dd, qp->s_pkey_index);
530 ohdr->u.aeth = ipath_compute_aeth(qp);
531 if (qp->s_ack_state >= OP(COMPARE_SWAP)) {
532 bth0 |= IB_OPCODE_ATOMIC_ACKNOWLEDGE << 24;
533 ohdr->u.at.atomic_ack_eth = cpu_to_be64(qp->s_ack_atomic);
534 hwords += sizeof(ohdr->u.at.atomic_ack_eth) / 4;
536 bth0 |= OP(ACKNOWLEDGE) << 24;
537 lrh0 |= qp->remote_ah_attr.sl << 4;
538 hdr.lrh[0] = cpu_to_be16(lrh0);
539 hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
540 hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC);
541 hdr.lrh[3] = cpu_to_be16(ipath_layer_get_lid(dev->dd));
542 ohdr->bth[0] = cpu_to_be32(bth0);
543 ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
544 ohdr->bth[2] = cpu_to_be32(qp->s_ack_psn & IPS_PSN_MASK);
547 * If we can send the ACK, clear the ACK state.
549 if (ipath_verbs_send(dev->dd, hwords, (u32 *) &hdr, 0, NULL) == 0) {
550 qp->s_ack_state = OP(ACKNOWLEDGE);
552 dev->n_unicast_xmit++;
557 * reset_psn - reset the QP state to send starting from PSN
559 * @psn: the packet sequence number to restart at
561 * This is called from ipath_rc_rcv() to process an incoming RC ACK
563 * Called at interrupt level with the QP s_lock held.
565 static void reset_psn(struct ipath_qp *qp, u32 psn)
568 struct ipath_swqe *wqe = get_swqe_ptr(qp, n);
574 * If we are starting the request from the beginning,
575 * let the normal send code handle initialization.
577 if (ipath_cmp24(psn, wqe->psn) <= 0) {
578 qp->s_state = OP(SEND_LAST);
582 /* Find the work request opcode corresponding to the given PSN. */
583 opcode = wqe->wr.opcode;
587 if (++n == qp->s_size)
591 wqe = get_swqe_ptr(qp, n);
592 diff = ipath_cmp24(psn, wqe->psn);
597 * If we are starting the request from the beginning,
598 * let the normal send code handle initialization.
601 qp->s_state = OP(SEND_LAST);
604 opcode = wqe->wr.opcode;
608 * Set the state to restart in the middle of a request.
609 * Don't change the s_sge, s_cur_sge, or s_cur_size.
610 * See ipath_do_rc_send().
614 case IB_WR_SEND_WITH_IMM:
615 qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
618 case IB_WR_RDMA_WRITE:
619 case IB_WR_RDMA_WRITE_WITH_IMM:
620 qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
623 case IB_WR_RDMA_READ:
624 qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE);
629 * This case shouldn't happen since its only
632 qp->s_state = OP(SEND_LAST);
639 * ipath_restart_rc - back up requester to resend the last un-ACKed request
640 * @qp: the QP to restart
641 * @psn: packet sequence number for the request
642 * @wc: the work completion request
644 * The QP s_lock should be held.
646 void ipath_restart_rc(struct ipath_qp *qp, u32 psn, struct ib_wc *wc)
648 struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
649 struct ipath_ibdev *dev;
652 * If there are no requests pending, we are done.
654 if (ipath_cmp24(psn, qp->s_next_psn) >= 0 ||
655 qp->s_last == qp->s_tail)
658 if (qp->s_retry == 0) {
659 wc->wr_id = wqe->wr.wr_id;
660 wc->status = IB_WC_RETRY_EXC_ERR;
661 wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
664 wc->qp_num = qp->ibqp.qp_num;
665 wc->src_qp = qp->remote_qpn;
667 wc->slid = qp->remote_ah_attr.dlid;
668 wc->sl = qp->remote_ah_attr.sl;
669 wc->dlid_path_bits = 0;
671 ipath_sqerror_qp(qp, wc);
677 * Remove the QP from the timeout queue.
678 * Note: it may already have been removed by ipath_ib_timer().
680 dev = to_idev(qp->ibqp.device);
681 spin_lock(&dev->pending_lock);
682 if (!list_empty(&qp->timerwait))
683 list_del_init(&qp->timerwait);
684 spin_unlock(&dev->pending_lock);
686 if (wqe->wr.opcode == IB_WR_RDMA_READ)
689 dev->n_rc_resends += (int)qp->s_psn - (int)psn;
694 tasklet_hi_schedule(&qp->s_task);
701 * do_rc_ack - process an incoming RC ACK
702 * @qp: the QP the ACK came in on
703 * @psn: the packet sequence number of the ACK
704 * @opcode: the opcode of the request that resulted in the ACK
706 * This is called from ipath_rc_rcv_resp() to process an incoming RC ACK
708 * Called at interrupt level with the QP s_lock held.
709 * Returns 1 if OK, 0 if current operation should be aborted (NAK).
711 static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode)
713 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
715 struct ipath_swqe *wqe;
719 * Remove the QP from the timeout queue (or RNR timeout queue).
720 * If ipath_ib_timer() has already removed it,
721 * it's OK since we hold the QP s_lock and ipath_restart_rc()
722 * just won't find anything to restart if we ACK everything.
724 spin_lock(&dev->pending_lock);
725 if (!list_empty(&qp->timerwait))
726 list_del_init(&qp->timerwait);
727 spin_unlock(&dev->pending_lock);
730 * Note that NAKs implicitly ACK outstanding SEND and RDMA write
731 * requests and implicitly NAK RDMA read and atomic requests issued
732 * before the NAK'ed request. The MSN won't include the NAK'ed
733 * request but will include an ACK'ed request(s).
735 wqe = get_swqe_ptr(qp, qp->s_last);
737 /* Nothing is pending to ACK/NAK. */
738 if (qp->s_last == qp->s_tail)
742 * The MSN might be for a later WQE than the PSN indicates so
743 * only complete WQEs that the PSN finishes.
745 while (ipath_cmp24(psn, wqe->lpsn) >= 0) {
746 /* If we are ACKing a WQE, the MSN should be >= the SSN. */
747 if (ipath_cmp24(aeth, wqe->ssn) < 0)
750 * If this request is a RDMA read or atomic, and the ACK is
751 * for a later operation, this ACK NAKs the RDMA read or
752 * atomic. In other words, only a RDMA_READ_LAST or ONLY
753 * can ACK a RDMA read and likewise for atomic ops. Note
754 * that the NAK case can only happen if relaxed ordering is
755 * used and requests are sent after an RDMA read or atomic
756 * is sent but before the response is received.
758 if ((wqe->wr.opcode == IB_WR_RDMA_READ &&
759 opcode != OP(RDMA_READ_RESPONSE_LAST)) ||
760 ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
761 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) &&
762 (opcode != OP(ATOMIC_ACKNOWLEDGE) ||
763 ipath_cmp24(wqe->psn, psn) != 0))) {
765 * The last valid PSN seen is the previous
768 qp->s_last_psn = wqe->psn - 1;
769 /* Retry this request. */
770 ipath_restart_rc(qp, wqe->psn, &wc);
772 * No need to process the ACK/NAK since we are
773 * restarting an earlier request.
777 /* Post a send completion queue entry if requested. */
778 if (!test_bit(IPATH_S_SIGNAL_REQ_WR, &qp->s_flags) ||
779 (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
780 wc.wr_id = wqe->wr.wr_id;
781 wc.status = IB_WC_SUCCESS;
782 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
784 wc.byte_len = wqe->length;
785 wc.qp_num = qp->ibqp.qp_num;
786 wc.src_qp = qp->remote_qpn;
788 wc.slid = qp->remote_ah_attr.dlid;
789 wc.sl = qp->remote_ah_attr.sl;
790 wc.dlid_path_bits = 0;
792 ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0);
794 qp->s_retry = qp->s_retry_cnt;
796 * If we are completing a request which is in the process of
797 * being resent, we can stop resending it since we know the
798 * responder has already seen it.
800 if (qp->s_last == qp->s_cur) {
801 if (++qp->s_cur >= qp->s_size)
803 wqe = get_swqe_ptr(qp, qp->s_cur);
804 qp->s_state = OP(SEND_LAST);
805 qp->s_psn = wqe->psn;
807 if (++qp->s_last >= qp->s_size)
809 wqe = get_swqe_ptr(qp, qp->s_last);
810 if (qp->s_last == qp->s_tail)
814 switch (aeth >> 29) {
817 /* If this is a partial ACK, reset the retransmit timer. */
818 if (qp->s_last != qp->s_tail) {
819 spin_lock(&dev->pending_lock);
820 list_add_tail(&qp->timerwait,
821 &dev->pending[dev->pending_index]);
822 spin_unlock(&dev->pending_lock);
824 ipath_get_credit(qp, aeth);
825 qp->s_rnr_retry = qp->s_rnr_retry_cnt;
826 qp->s_retry = qp->s_retry_cnt;
827 qp->s_last_psn = psn;
831 case 1: /* RNR NAK */
833 if (qp->s_rnr_retry == 0) {
834 if (qp->s_last == qp->s_tail)
837 wc.status = IB_WC_RNR_RETRY_EXC_ERR;
840 if (qp->s_rnr_retry_cnt < 7)
842 if (qp->s_last == qp->s_tail)
845 /* The last valid PSN is the previous PSN. */
846 qp->s_last_psn = psn - 1;
848 dev->n_rc_resends += (int)qp->s_psn - (int)psn;
853 ib_ipath_rnr_table[(aeth >> IPS_AETH_CREDIT_SHIFT) &
854 IPS_AETH_CREDIT_MASK];
855 ipath_insert_rnr_queue(qp);
859 /* The last valid PSN seen is the previous request's. */
860 if (qp->s_last != qp->s_tail)
861 qp->s_last_psn = wqe->psn - 1;
862 switch ((aeth >> IPS_AETH_CREDIT_SHIFT) &
863 IPS_AETH_CREDIT_MASK) {
864 case 0: /* PSN sequence error */
867 * Back up to the responder's expected PSN. XXX
868 * Note that we might get a NAK in the middle of an
869 * RDMA READ response which terminates the RDMA
872 if (qp->s_last == qp->s_tail)
875 if (ipath_cmp24(psn, wqe->psn) < 0)
878 /* Retry the request. */
879 ipath_restart_rc(qp, psn, &wc);
882 case 1: /* Invalid Request */
883 wc.status = IB_WC_REM_INV_REQ_ERR;
887 case 2: /* Remote Access Error */
888 wc.status = IB_WC_REM_ACCESS_ERR;
892 case 3: /* Remote Operation Error */
893 wc.status = IB_WC_REM_OP_ERR;
896 wc.wr_id = wqe->wr.wr_id;
897 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
900 wc.qp_num = qp->ibqp.qp_num;
901 wc.src_qp = qp->remote_qpn;
903 wc.slid = qp->remote_ah_attr.dlid;
904 wc.sl = qp->remote_ah_attr.sl;
905 wc.dlid_path_bits = 0;
907 ipath_sqerror_qp(qp, &wc);
911 /* Ignore other reserved NAK error codes */
914 qp->s_rnr_retry = qp->s_rnr_retry_cnt;
917 default: /* 2: reserved */
919 /* Ignore reserved NAK codes. */
928 * ipath_rc_rcv_resp - process an incoming RC response packet
929 * @dev: the device this packet came in on
930 * @ohdr: the other headers for this packet
931 * @data: the packet data
932 * @tlen: the packet length
933 * @qp: the QP for this packet
934 * @opcode: the opcode for this packet
935 * @psn: the packet sequence number for this packet
936 * @hdrsize: the header length
937 * @pmtu: the path MTU
938 * @header_in_data: true if part of the header data is in the data buffer
940 * This is called from ipath_rc_rcv() to process an incoming RC response
941 * packet for the given QP.
942 * Called at interrupt level.
944 static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev,
945 struct ipath_other_headers *ohdr,
946 void *data, u32 tlen,
949 u32 psn, u32 hdrsize, u32 pmtu,
958 spin_lock_irqsave(&qp->s_lock, flags);
960 /* Ignore invalid responses. */
961 if (ipath_cmp24(psn, qp->s_next_psn) >= 0)
964 /* Ignore duplicate responses. */
965 diff = ipath_cmp24(psn, qp->s_last_psn);
966 if (unlikely(diff <= 0)) {
967 /* Update credits for "ghost" ACKs */
968 if (diff == 0 && opcode == OP(ACKNOWLEDGE)) {
970 aeth = be32_to_cpu(ohdr->u.aeth);
972 aeth = be32_to_cpu(((__be32 *) data)[0]);
973 data += sizeof(__be32);
975 if ((aeth >> 29) == 0)
976 ipath_get_credit(qp, aeth);
982 case OP(ACKNOWLEDGE):
983 case OP(ATOMIC_ACKNOWLEDGE):
984 case OP(RDMA_READ_RESPONSE_FIRST):
986 aeth = be32_to_cpu(ohdr->u.aeth);
988 aeth = be32_to_cpu(((__be32 *) data)[0]);
989 data += sizeof(__be32);
991 if (opcode == OP(ATOMIC_ACKNOWLEDGE))
992 *(u64 *) qp->s_sge.sge.vaddr = *(u64 *) data;
993 if (!do_rc_ack(qp, aeth, psn, opcode) ||
994 opcode != OP(RDMA_READ_RESPONSE_FIRST))
998 * do_rc_ack() has already checked the PSN so skip
999 * the sequence check.
1003 case OP(RDMA_READ_RESPONSE_MIDDLE):
1004 /* no AETH, no ACK */
1005 if (unlikely(ipath_cmp24(psn, qp->s_last_psn + 1))) {
1007 ipath_restart_rc(qp, qp->s_last_psn + 1, &wc);
1011 if (unlikely(qp->s_state != OP(RDMA_READ_REQUEST)))
1013 if (unlikely(tlen != (hdrsize + pmtu + 4)))
1015 if (unlikely(pmtu >= qp->s_len))
1017 /* We got a response so update the timeout. */
1018 if (unlikely(qp->s_last == qp->s_tail ||
1019 get_swqe_ptr(qp, qp->s_last)->wr.opcode !=
1022 spin_lock(&dev->pending_lock);
1023 if (qp->s_rnr_timeout == 0 && !list_empty(&qp->timerwait))
1024 list_move_tail(&qp->timerwait,
1025 &dev->pending[dev->pending_index]);
1026 spin_unlock(&dev->pending_lock);
1028 * Update the RDMA receive state but do the copy w/o
1029 * holding the locks and blocking interrupts.
1030 * XXX Yet another place that affects relaxed RDMA order
1031 * since we don't want s_sge modified.
1034 qp->s_last_psn = psn;
1035 spin_unlock_irqrestore(&qp->s_lock, flags);
1036 ipath_copy_sge(&qp->s_sge, data, pmtu);
1039 case OP(RDMA_READ_RESPONSE_LAST):
1040 /* ACKs READ req. */
1041 if (unlikely(ipath_cmp24(psn, qp->s_last_psn + 1))) {
1043 ipath_restart_rc(qp, qp->s_last_psn + 1, &wc);
1047 case OP(RDMA_READ_RESPONSE_ONLY):
1048 if (unlikely(qp->s_state != OP(RDMA_READ_REQUEST)))
1051 * Get the number of bytes the message was padded by.
1053 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
1055 * Check that the data size is >= 1 && <= pmtu.
1056 * Remember to account for the AETH header (4) and
1059 if (unlikely(tlen <= (hdrsize + pad + 8))) {
1060 /* XXX Need to generate an error CQ entry. */
1063 tlen -= hdrsize + pad + 8;
1064 if (unlikely(tlen != qp->s_len)) {
1065 /* XXX Need to generate an error CQ entry. */
1068 if (!header_in_data)
1069 aeth = be32_to_cpu(ohdr->u.aeth);
1071 aeth = be32_to_cpu(((__be32 *) data)[0]);
1072 data += sizeof(__be32);
1074 ipath_copy_sge(&qp->s_sge, data, tlen);
1075 if (do_rc_ack(qp, aeth, psn, OP(RDMA_READ_RESPONSE_LAST))) {
1077 * Change the state so we contimue
1078 * processing new requests and wake up the
1079 * tasklet if there are posted sends.
1081 qp->s_state = OP(SEND_LAST);
1082 if (qp->s_tail != qp->s_head)
1083 tasklet_hi_schedule(&qp->s_task);
1089 spin_unlock_irqrestore(&qp->s_lock, flags);
1095 * ipath_rc_rcv_error - process an incoming duplicate or error RC packet
1096 * @dev: the device this packet came in on
1097 * @ohdr: the other headers for this packet
1098 * @data: the packet data
1099 * @qp: the QP for this packet
1100 * @opcode: the opcode for this packet
1101 * @psn: the packet sequence number for this packet
1102 * @diff: the difference between the PSN and the expected PSN
1103 * @header_in_data: true if part of the header data is in the data buffer
1105 * This is called from ipath_rc_rcv() to process an unexpected
1106 * incoming RC packet for the given QP.
1107 * Called at interrupt level.
1108 * Return 1 if no more processing is needed; otherwise return 0 to
1109 * schedule a response to be sent and the s_lock unlocked.
1111 static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev,
1112 struct ipath_other_headers *ohdr,
1114 struct ipath_qp *qp,
1120 struct ib_reth *reth;
1124 * Packet sequence error.
1125 * A NAK will ACK earlier sends and RDMA writes.
1126 * Don't queue the NAK if a RDMA read, atomic, or
1127 * NAK is pending though.
1129 spin_lock(&qp->s_lock);
1130 if ((qp->s_ack_state >= OP(RDMA_READ_REQUEST) &&
1131 qp->s_ack_state != IB_OPCODE_ACKNOWLEDGE) ||
1132 qp->s_nak_state != 0) {
1133 spin_unlock(&qp->s_lock);
1136 qp->s_ack_state = OP(SEND_ONLY);
1137 qp->s_nak_state = IB_NAK_PSN_ERROR;
1138 /* Use the expected PSN. */
1139 qp->s_ack_psn = qp->r_psn;
1144 * Handle a duplicate request. Don't re-execute SEND, RDMA
1145 * write or atomic op. Don't NAK errors, just silently drop
1146 * the duplicate request. Note that r_sge, r_len, and
1147 * r_rcv_len may be in use so don't modify them.
1149 * We are supposed to ACK the earliest duplicate PSN but we
1150 * can coalesce an outstanding duplicate ACK. We have to
1151 * send the earliest so that RDMA reads can be restarted at
1152 * the requester's expected PSN.
1154 spin_lock(&qp->s_lock);
1155 if (qp->s_ack_state != IB_OPCODE_ACKNOWLEDGE &&
1156 ipath_cmp24(psn, qp->s_ack_psn) >= 0) {
1157 if (qp->s_ack_state < IB_OPCODE_RDMA_READ_REQUEST)
1158 qp->s_ack_psn = psn;
1159 spin_unlock(&qp->s_lock);
1163 case OP(RDMA_READ_REQUEST):
1165 * We have to be careful to not change s_rdma_sge
1166 * while ipath_do_rc_send() is using it and not
1167 * holding the s_lock.
1169 if (qp->s_ack_state != OP(ACKNOWLEDGE) &&
1170 qp->s_ack_state >= IB_OPCODE_RDMA_READ_REQUEST) {
1171 spin_unlock(&qp->s_lock);
1172 dev->n_rdma_dup_busy++;
1175 /* RETH comes after BTH */
1176 if (!header_in_data)
1177 reth = &ohdr->u.rc.reth;
1179 reth = (struct ib_reth *)data;
1180 data += sizeof(*reth);
1182 qp->s_rdma_len = be32_to_cpu(reth->length);
1183 if (qp->s_rdma_len != 0) {
1184 u32 rkey = be32_to_cpu(reth->rkey);
1185 u64 vaddr = be64_to_cpu(reth->vaddr);
1189 * Address range must be a subset of the original
1190 * request and start on pmtu boundaries.
1192 ok = ipath_rkey_ok(dev, &qp->s_rdma_sge,
1193 qp->s_rdma_len, vaddr, rkey,
1194 IB_ACCESS_REMOTE_READ);
1198 qp->s_rdma_sge.sg_list = NULL;
1199 qp->s_rdma_sge.num_sge = 0;
1200 qp->s_rdma_sge.sge.mr = NULL;
1201 qp->s_rdma_sge.sge.vaddr = NULL;
1202 qp->s_rdma_sge.sge.length = 0;
1203 qp->s_rdma_sge.sge.sge_length = 0;
1207 case OP(COMPARE_SWAP):
1210 * Check for the PSN of the last atomic operation
1211 * performed and resend the result if found.
1213 if ((psn & IPS_PSN_MASK) != qp->r_atomic_psn) {
1214 spin_unlock(&qp->s_lock);
1217 qp->s_ack_atomic = qp->r_atomic_data;
1220 qp->s_ack_state = opcode;
1221 qp->s_nak_state = 0;
1222 qp->s_ack_psn = psn;
1231 * ipath_rc_rcv - process an incoming RC packet
1232 * @dev: the device this packet came in on
1233 * @hdr: the header of this packet
1234 * @has_grh: true if the header has a GRH
1235 * @data: the packet data
1236 * @tlen: the packet length
1237 * @qp: the QP for this packet
1239 * This is called from ipath_qp_rcv() to process an incoming RC packet
1241 * Called at interrupt level.
1243 void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
1244 int has_grh, void *data, u32 tlen, struct ipath_qp *qp)
1246 struct ipath_other_headers *ohdr;
1251 unsigned long flags;
1253 u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
1255 struct ib_reth *reth;
1261 hdrsize = 8 + 12; /* LRH + BTH */
1262 psn = be32_to_cpu(ohdr->bth[2]);
1265 ohdr = &hdr->u.l.oth;
1266 hdrsize = 8 + 40 + 12; /* LRH + GRH + BTH */
1268 * The header with GRH is 60 bytes and the core driver sets
1269 * the eager header buffer size to 56 bytes so the last 4
1270 * bytes of the BTH header (PSN) is in the data buffer.
1273 ipath_layer_get_rcvhdrentsize(dev->dd) == 16;
1274 if (header_in_data) {
1275 psn = be32_to_cpu(((__be32 *) data)[0]);
1276 data += sizeof(__be32);
1278 psn = be32_to_cpu(ohdr->bth[2]);
1282 * Process responses (ACKs) before anything else. Note that the
1283 * packet sequence number will be for something in the send work
1284 * queue rather than the expected receive packet sequence number.
1285 * In other words, this QP is the requester.
1287 opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
1288 if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
1289 opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
1290 ipath_rc_rcv_resp(dev, ohdr, data, tlen, qp, opcode, psn,
1291 hdrsize, pmtu, header_in_data);
1295 spin_lock_irqsave(&qp->r_rq.lock, flags);
1297 /* Compute 24 bits worth of difference. */
1298 diff = ipath_cmp24(psn, qp->r_psn);
1299 if (unlikely(diff)) {
1300 if (ipath_rc_rcv_error(dev, ohdr, data, qp, opcode,
1301 psn, diff, header_in_data))
1306 /* Check for opcode sequence errors. */
1307 switch (qp->r_state) {
1308 case OP(SEND_FIRST):
1309 case OP(SEND_MIDDLE):
1310 if (opcode == OP(SEND_MIDDLE) ||
1311 opcode == OP(SEND_LAST) ||
1312 opcode == OP(SEND_LAST_WITH_IMMEDIATE))
1316 * A NAK will ACK earlier sends and RDMA writes. Don't queue the
1317 * NAK if a RDMA read, atomic, or NAK is pending though.
1319 spin_lock(&qp->s_lock);
1320 if (qp->s_ack_state >= OP(RDMA_READ_REQUEST) &&
1321 qp->s_ack_state != IB_OPCODE_ACKNOWLEDGE) {
1322 spin_unlock(&qp->s_lock);
1325 /* XXX Flush WQEs */
1326 qp->state = IB_QPS_ERR;
1327 qp->s_ack_state = OP(SEND_ONLY);
1328 qp->s_nak_state = IB_NAK_INVALID_REQUEST;
1329 qp->s_ack_psn = qp->r_psn;
1332 case OP(RDMA_WRITE_FIRST):
1333 case OP(RDMA_WRITE_MIDDLE):
1334 if (opcode == OP(RDMA_WRITE_MIDDLE) ||
1335 opcode == OP(RDMA_WRITE_LAST) ||
1336 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
1340 case OP(RDMA_READ_REQUEST):
1341 case OP(COMPARE_SWAP):
1344 * Drop all new requests until a response has been sent. A
1345 * new request then ACKs the RDMA response we sent. Relaxed
1346 * ordering would allow new requests to be processed but we
1347 * would need to keep a queue of rwqe's for all that are in
1348 * progress. Note that we can't RNR NAK this request since
1349 * the RDMA READ or atomic response is already queued to be
1350 * sent (unless we implement a response send queue).
1355 if (opcode == OP(SEND_MIDDLE) ||
1356 opcode == OP(SEND_LAST) ||
1357 opcode == OP(SEND_LAST_WITH_IMMEDIATE) ||
1358 opcode == OP(RDMA_WRITE_MIDDLE) ||
1359 opcode == OP(RDMA_WRITE_LAST) ||
1360 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
1368 /* OK, process the packet. */
1370 case OP(SEND_FIRST):
1371 if (!ipath_get_rwqe(qp, 0)) {
1374 * A RNR NAK will ACK earlier sends and RDMA writes.
1375 * Don't queue the NAK if a RDMA read or atomic
1376 * is pending though.
1378 spin_lock(&qp->s_lock);
1379 if (qp->s_ack_state >=
1380 OP(RDMA_READ_REQUEST) &&
1381 qp->s_ack_state != IB_OPCODE_ACKNOWLEDGE) {
1382 spin_unlock(&qp->s_lock);
1385 qp->s_ack_state = OP(SEND_ONLY);
1386 qp->s_nak_state = IB_RNR_NAK | qp->s_min_rnr_timer;
1387 qp->s_ack_psn = qp->r_psn;
1392 case OP(SEND_MIDDLE):
1393 case OP(RDMA_WRITE_MIDDLE):
1395 /* Check for invalid length PMTU or posted rwqe len. */
1396 if (unlikely(tlen != (hdrsize + pmtu + 4)))
1398 qp->r_rcv_len += pmtu;
1399 if (unlikely(qp->r_rcv_len > qp->r_len))
1401 ipath_copy_sge(&qp->r_sge, data, pmtu);
1404 case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
1406 if (!ipath_get_rwqe(qp, 1))
1411 case OP(SEND_ONLY_WITH_IMMEDIATE):
1412 if (!ipath_get_rwqe(qp, 0))
1415 if (opcode == OP(SEND_ONLY))
1418 case OP(SEND_LAST_WITH_IMMEDIATE):
1420 if (header_in_data) {
1421 wc.imm_data = *(__be32 *) data;
1422 data += sizeof(__be32);
1424 /* Immediate data comes after BTH */
1425 wc.imm_data = ohdr->u.imm_data;
1428 wc.wc_flags = IB_WC_WITH_IMM;
1431 case OP(RDMA_WRITE_LAST):
1433 /* Get the number of bytes the message was padded by. */
1434 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
1435 /* Check for invalid length. */
1436 /* XXX LAST len should be >= 1 */
1437 if (unlikely(tlen < (hdrsize + pad + 4)))
1439 /* Don't count the CRC. */
1440 tlen -= (hdrsize + pad + 4);
1441 wc.byte_len = tlen + qp->r_rcv_len;
1442 if (unlikely(wc.byte_len > qp->r_len))
1444 ipath_copy_sge(&qp->r_sge, data, tlen);
1445 atomic_inc(&qp->msn);
1446 if (opcode == OP(RDMA_WRITE_LAST) ||
1447 opcode == OP(RDMA_WRITE_ONLY))
1449 wc.wr_id = qp->r_wr_id;
1450 wc.status = IB_WC_SUCCESS;
1451 wc.opcode = IB_WC_RECV;
1453 wc.qp_num = qp->ibqp.qp_num;
1454 wc.src_qp = qp->remote_qpn;
1456 wc.slid = qp->remote_ah_attr.dlid;
1457 wc.sl = qp->remote_ah_attr.sl;
1458 wc.dlid_path_bits = 0;
1460 /* Signal completion event if the solicited bit is set. */
1461 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
1463 __constant_cpu_to_be32(1 << 23)) != 0);
1466 case OP(RDMA_WRITE_FIRST):
1467 case OP(RDMA_WRITE_ONLY):
1468 case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
1470 /* RETH comes after BTH */
1471 if (!header_in_data)
1472 reth = &ohdr->u.rc.reth;
1474 reth = (struct ib_reth *)data;
1475 data += sizeof(*reth);
1477 hdrsize += sizeof(*reth);
1478 qp->r_len = be32_to_cpu(reth->length);
1480 if (qp->r_len != 0) {
1481 u32 rkey = be32_to_cpu(reth->rkey);
1482 u64 vaddr = be64_to_cpu(reth->vaddr);
1485 /* Check rkey & NAK */
1486 ok = ipath_rkey_ok(dev, &qp->r_sge,
1487 qp->r_len, vaddr, rkey,
1488 IB_ACCESS_REMOTE_WRITE);
1489 if (unlikely(!ok)) {
1492 * A NAK will ACK earlier sends and RDMA
1493 * writes. Don't queue the NAK if a RDMA
1494 * read, atomic, or NAK is pending though.
1496 spin_lock(&qp->s_lock);
1497 if (qp->s_ack_state >=
1498 OP(RDMA_READ_REQUEST) &&
1500 IB_OPCODE_ACKNOWLEDGE) {
1501 spin_unlock(&qp->s_lock);
1504 /* XXX Flush WQEs */
1505 qp->state = IB_QPS_ERR;
1506 qp->s_ack_state = OP(RDMA_WRITE_ONLY);
1508 IB_NAK_REMOTE_ACCESS_ERROR;
1509 qp->s_ack_psn = qp->r_psn;
1513 qp->r_sge.sg_list = NULL;
1514 qp->r_sge.sge.mr = NULL;
1515 qp->r_sge.sge.vaddr = NULL;
1516 qp->r_sge.sge.length = 0;
1517 qp->r_sge.sge.sge_length = 0;
1519 if (unlikely(!(qp->qp_access_flags &
1520 IB_ACCESS_REMOTE_WRITE)))
1522 if (opcode == OP(RDMA_WRITE_FIRST))
1524 else if (opcode == OP(RDMA_WRITE_ONLY))
1526 if (!ipath_get_rwqe(qp, 1))
1530 case OP(RDMA_READ_REQUEST):
1531 /* RETH comes after BTH */
1532 if (!header_in_data)
1533 reth = &ohdr->u.rc.reth;
1535 reth = (struct ib_reth *)data;
1536 data += sizeof(*reth);
1538 spin_lock(&qp->s_lock);
1539 if (qp->s_ack_state != OP(ACKNOWLEDGE) &&
1540 qp->s_ack_state >= IB_OPCODE_RDMA_READ_REQUEST) {
1541 spin_unlock(&qp->s_lock);
1544 qp->s_rdma_len = be32_to_cpu(reth->length);
1545 if (qp->s_rdma_len != 0) {
1546 u32 rkey = be32_to_cpu(reth->rkey);
1547 u64 vaddr = be64_to_cpu(reth->vaddr);
1550 /* Check rkey & NAK */
1551 ok = ipath_rkey_ok(dev, &qp->s_rdma_sge,
1552 qp->s_rdma_len, vaddr, rkey,
1553 IB_ACCESS_REMOTE_READ);
1554 if (unlikely(!ok)) {
1555 spin_unlock(&qp->s_lock);
1559 * Update the next expected PSN. We add 1 later
1560 * below, so only add the remainder here.
1562 if (qp->s_rdma_len > pmtu)
1563 qp->r_psn += (qp->s_rdma_len - 1) / pmtu;
1565 qp->s_rdma_sge.sg_list = NULL;
1566 qp->s_rdma_sge.num_sge = 0;
1567 qp->s_rdma_sge.sge.mr = NULL;
1568 qp->s_rdma_sge.sge.vaddr = NULL;
1569 qp->s_rdma_sge.sge.length = 0;
1570 qp->s_rdma_sge.sge.sge_length = 0;
1572 if (unlikely(!(qp->qp_access_flags &
1573 IB_ACCESS_REMOTE_READ)))
1576 * We need to increment the MSN here instead of when we
1577 * finish sending the result since a duplicate request would
1578 * increment it more than once.
1580 atomic_inc(&qp->msn);
1581 qp->s_ack_state = opcode;
1582 qp->s_nak_state = 0;
1583 qp->s_ack_psn = psn;
1585 qp->r_state = opcode;
1588 case OP(COMPARE_SWAP):
1589 case OP(FETCH_ADD): {
1590 struct ib_atomic_eth *ateth;
1595 if (!header_in_data)
1596 ateth = &ohdr->u.atomic_eth;
1598 ateth = (struct ib_atomic_eth *)data;
1599 data += sizeof(*ateth);
1601 vaddr = be64_to_cpu(ateth->vaddr);
1602 if (unlikely(vaddr & (sizeof(u64) - 1)))
1604 rkey = be32_to_cpu(ateth->rkey);
1605 /* Check rkey & NAK */
1606 if (unlikely(!ipath_rkey_ok(dev, &qp->r_sge,
1607 sizeof(u64), vaddr, rkey,
1608 IB_ACCESS_REMOTE_ATOMIC)))
1610 if (unlikely(!(qp->qp_access_flags &
1611 IB_ACCESS_REMOTE_ATOMIC)))
1613 /* Perform atomic OP and save result. */
1614 sdata = be64_to_cpu(ateth->swap_data);
1615 spin_lock(&dev->pending_lock);
1616 qp->r_atomic_data = *(u64 *) qp->r_sge.sge.vaddr;
1617 if (opcode == OP(FETCH_ADD))
1618 *(u64 *) qp->r_sge.sge.vaddr =
1619 qp->r_atomic_data + sdata;
1620 else if (qp->r_atomic_data ==
1621 be64_to_cpu(ateth->compare_data))
1622 *(u64 *) qp->r_sge.sge.vaddr = sdata;
1623 spin_unlock(&dev->pending_lock);
1624 atomic_inc(&qp->msn);
1625 qp->r_atomic_psn = psn & IPS_PSN_MASK;
1631 /* Drop packet for unknown opcodes. */
1635 qp->r_state = opcode;
1636 /* Send an ACK if requested or required. */
1637 if (psn & (1 << 31)) {
1639 * Coalesce ACKs unless there is a RDMA READ or
1642 spin_lock(&qp->s_lock);
1643 if (qp->s_ack_state == OP(ACKNOWLEDGE) ||
1644 qp->s_ack_state < IB_OPCODE_RDMA_READ_REQUEST) {
1645 qp->s_ack_state = opcode;
1646 qp->s_nak_state = 0;
1647 qp->s_ack_psn = psn;
1648 qp->s_ack_atomic = qp->r_atomic_data;
1651 spin_unlock(&qp->s_lock);
1654 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
1659 * Try to send ACK right away but not if ipath_do_rc_send() is
1662 if (qp->s_hdrwords == 0 &&
1663 (qp->s_ack_state < IB_OPCODE_RDMA_READ_REQUEST ||
1664 qp->s_ack_state >= IB_OPCODE_COMPARE_SWAP))
1668 spin_unlock(&qp->s_lock);
1669 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
1671 /* Call ipath_do_rc_send() in another thread. */
1672 tasklet_hi_schedule(&qp->s_task);