2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/list.h>
34 #include <linux/workqueue.h>
35 #include <linux/skbuff.h>
36 #include <linux/timer.h>
37 #include <linux/notifier.h>
38 #include <linux/inetdevice.h>
40 #include <linux/tcp.h>
41 #include <linux/if_vlan.h>
43 #include <net/neighbour.h>
44 #include <net/netevent.h>
45 #include <net/route.h>
50 static char *states[] = {
67 module_param(nocong, int, 0644);
68 MODULE_PARM_DESC(nocong, "Turn of congestion control (default=0)");
70 static int enable_ecn;
71 module_param(enable_ecn, int, 0644);
72 MODULE_PARM_DESC(enable_ecn, "Enable ECN (default=0/disabled)");
74 static int dack_mode = 1;
75 module_param(dack_mode, int, 0644);
76 MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=1)");
78 int c4iw_max_read_depth = 8;
79 module_param(c4iw_max_read_depth, int, 0644);
80 MODULE_PARM_DESC(c4iw_max_read_depth, "Per-connection max ORD/IRD (default=8)");
82 static int enable_tcp_timestamps;
83 module_param(enable_tcp_timestamps, int, 0644);
84 MODULE_PARM_DESC(enable_tcp_timestamps, "Enable tcp timestamps (default=0)");
86 static int enable_tcp_sack;
87 module_param(enable_tcp_sack, int, 0644);
88 MODULE_PARM_DESC(enable_tcp_sack, "Enable tcp SACK (default=0)");
90 static int enable_tcp_window_scaling = 1;
91 module_param(enable_tcp_window_scaling, int, 0644);
92 MODULE_PARM_DESC(enable_tcp_window_scaling,
93 "Enable tcp window scaling (default=1)");
96 module_param(c4iw_debug, int, 0644);
97 MODULE_PARM_DESC(c4iw_debug, "Enable debug logging (default=0)");
100 module_param(peer2peer, int, 0644);
101 MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=0)");
103 static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ;
104 module_param(p2p_type, int, 0644);
105 MODULE_PARM_DESC(p2p_type, "RDMAP opcode to use for the RTR message: "
106 "1=RDMA_READ 0=RDMA_WRITE (default 1)");
108 static int ep_timeout_secs = 60;
109 module_param(ep_timeout_secs, int, 0644);
110 MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout "
111 "in seconds (default=60)");
113 static int mpa_rev = 1;
114 module_param(mpa_rev, int, 0644);
115 MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, "
116 "1 is RFC0544 spec compliant, 2 is IETF MPA Peer Connect Draft"
117 " compliant (default=1)");
119 static int markers_enabled;
120 module_param(markers_enabled, int, 0644);
121 MODULE_PARM_DESC(markers_enabled, "Enable MPA MARKERS (default(0)=disabled)");
123 static int crc_enabled = 1;
124 module_param(crc_enabled, int, 0644);
125 MODULE_PARM_DESC(crc_enabled, "Enable MPA CRC (default(1)=enabled)");
127 static int rcv_win = 256 * 1024;
128 module_param(rcv_win, int, 0644);
129 MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256KB)");
131 static int snd_win = 128 * 1024;
132 module_param(snd_win, int, 0644);
133 MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=128KB)");
135 static struct workqueue_struct *workq;
137 static struct sk_buff_head rxq;
139 static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
140 static void ep_timeout(unsigned long arg);
141 static void connect_reply_upcall(struct c4iw_ep *ep, int status);
143 static LIST_HEAD(timeout_list);
144 static spinlock_t timeout_lock;
146 static void deref_qp(struct c4iw_ep *ep)
148 c4iw_qp_rem_ref(&ep->com.qp->ibqp);
149 clear_bit(QP_REFERENCED, &ep->com.flags);
152 static void ref_qp(struct c4iw_ep *ep)
154 set_bit(QP_REFERENCED, &ep->com.flags);
155 c4iw_qp_add_ref(&ep->com.qp->ibqp);
158 static void start_ep_timer(struct c4iw_ep *ep)
160 PDBG("%s ep %p\n", __func__, ep);
161 if (timer_pending(&ep->timer)) {
162 pr_err("%s timer already started! ep %p\n",
166 clear_bit(TIMEOUT, &ep->com.flags);
167 c4iw_get_ep(&ep->com);
168 ep->timer.expires = jiffies + ep_timeout_secs * HZ;
169 ep->timer.data = (unsigned long)ep;
170 ep->timer.function = ep_timeout;
171 add_timer(&ep->timer);
174 static void stop_ep_timer(struct c4iw_ep *ep)
176 PDBG("%s ep %p stopping\n", __func__, ep);
177 del_timer_sync(&ep->timer);
178 if (!test_and_set_bit(TIMEOUT, &ep->com.flags))
179 c4iw_put_ep(&ep->com);
182 static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb,
183 struct l2t_entry *l2e)
187 if (c4iw_fatal_error(rdev)) {
189 PDBG("%s - device in error state - dropping\n", __func__);
192 error = cxgb4_l2t_send(rdev->lldi.ports[0], skb, l2e);
195 return error < 0 ? error : 0;
198 int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb)
202 if (c4iw_fatal_error(rdev)) {
204 PDBG("%s - device in error state - dropping\n", __func__);
207 error = cxgb4_ofld_send(rdev->lldi.ports[0], skb);
210 return error < 0 ? error : 0;
213 static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb)
215 struct cpl_tid_release *req;
217 skb = get_skb(skb, sizeof *req, GFP_KERNEL);
220 req = (struct cpl_tid_release *) skb_put(skb, sizeof(*req));
221 INIT_TP_WR(req, hwtid);
222 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid));
223 set_wr_txq(skb, CPL_PRIORITY_SETUP, 0);
224 c4iw_ofld_send(rdev, skb);
228 static void set_emss(struct c4iw_ep *ep, u16 opt)
230 ep->emss = ep->com.dev->rdev.lldi.mtus[GET_TCPOPT_MSS(opt)] - 40;
232 if (GET_TCPOPT_TSTAMP(opt))
236 PDBG("%s mss_idx %u mss %u emss=%u\n", __func__, GET_TCPOPT_MSS(opt),
240 static enum c4iw_ep_state state_read(struct c4iw_ep_common *epc)
242 enum c4iw_ep_state state;
244 mutex_lock(&epc->mutex);
246 mutex_unlock(&epc->mutex);
250 static void __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
255 static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
257 mutex_lock(&epc->mutex);
258 PDBG("%s - %s -> %s\n", __func__, states[epc->state], states[new]);
259 __state_set(epc, new);
260 mutex_unlock(&epc->mutex);
264 static void *alloc_ep(int size, gfp_t gfp)
266 struct c4iw_ep_common *epc;
268 epc = kzalloc(size, gfp);
270 kref_init(&epc->kref);
271 mutex_init(&epc->mutex);
272 c4iw_init_wr_wait(&epc->wr_wait);
274 PDBG("%s alloc ep %p\n", __func__, epc);
278 void _c4iw_free_ep(struct kref *kref)
282 ep = container_of(kref, struct c4iw_ep, com.kref);
283 PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]);
284 if (test_bit(QP_REFERENCED, &ep->com.flags))
286 if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) {
287 remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid);
288 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid);
289 dst_release(ep->dst);
290 cxgb4_l2t_release(ep->l2t);
295 static void release_ep_resources(struct c4iw_ep *ep)
297 set_bit(RELEASE_RESOURCES, &ep->com.flags);
298 c4iw_put_ep(&ep->com);
301 static int status2errno(int status)
306 case CPL_ERR_CONN_RESET:
308 case CPL_ERR_ARP_MISS:
309 return -EHOSTUNREACH;
310 case CPL_ERR_CONN_TIMEDOUT:
312 case CPL_ERR_TCAM_FULL:
314 case CPL_ERR_CONN_EXIST:
322 * Try and reuse skbs already allocated...
324 static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp)
326 if (skb && !skb_is_nonlinear(skb) && !skb_cloned(skb)) {
329 skb_reset_transport_header(skb);
331 skb = alloc_skb(len, gfp);
336 static struct rtable *find_route(struct c4iw_dev *dev, __be32 local_ip,
337 __be32 peer_ip, __be16 local_port,
338 __be16 peer_port, u8 tos)
343 rt = ip_route_output_ports(&init_net, &fl4, NULL, peer_ip, local_ip,
344 peer_port, local_port, IPPROTO_TCP,
351 static void arp_failure_discard(void *handle, struct sk_buff *skb)
353 PDBG("%s c4iw_dev %p\n", __func__, handle);
358 * Handle an ARP failure for an active open.
360 static void act_open_req_arp_failure(void *handle, struct sk_buff *skb)
362 printk(KERN_ERR MOD "ARP failure duing connect\n");
367 * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant
370 static void abort_arp_failure(void *handle, struct sk_buff *skb)
372 struct c4iw_rdev *rdev = handle;
373 struct cpl_abort_req *req = cplhdr(skb);
375 PDBG("%s rdev %p\n", __func__, rdev);
376 req->cmd = CPL_ABORT_NO_RST;
377 c4iw_ofld_send(rdev, skb);
380 static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb)
382 unsigned int flowclen = 80;
383 struct fw_flowc_wr *flowc;
386 skb = get_skb(skb, flowclen, GFP_KERNEL);
387 flowc = (struct fw_flowc_wr *)__skb_put(skb, flowclen);
389 flowc->op_to_nparams = cpu_to_be32(FW_WR_OP(FW_FLOWC_WR) |
390 FW_FLOWC_WR_NPARAMS(8));
391 flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16(DIV_ROUND_UP(flowclen,
392 16)) | FW_WR_FLOWID(ep->hwtid));
394 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
395 flowc->mnemval[0].val = cpu_to_be32(PCI_FUNC(ep->com.dev->rdev.lldi.pdev->devfn) << 8);
396 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
397 flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan);
398 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
399 flowc->mnemval[2].val = cpu_to_be32(ep->tx_chan);
400 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
401 flowc->mnemval[3].val = cpu_to_be32(ep->rss_qid);
402 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT;
403 flowc->mnemval[4].val = cpu_to_be32(ep->snd_seq);
404 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
405 flowc->mnemval[5].val = cpu_to_be32(ep->rcv_seq);
406 flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
407 flowc->mnemval[6].val = cpu_to_be32(snd_win);
408 flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
409 flowc->mnemval[7].val = cpu_to_be32(ep->emss);
410 /* Pad WR to 16 byte boundary */
411 flowc->mnemval[8].mnemonic = 0;
412 flowc->mnemval[8].val = 0;
413 for (i = 0; i < 9; i++) {
414 flowc->mnemval[i].r4[0] = 0;
415 flowc->mnemval[i].r4[1] = 0;
416 flowc->mnemval[i].r4[2] = 0;
419 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
420 c4iw_ofld_send(&ep->com.dev->rdev, skb);
423 static int send_halfclose(struct c4iw_ep *ep, gfp_t gfp)
425 struct cpl_close_con_req *req;
427 int wrlen = roundup(sizeof *req, 16);
429 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
430 skb = get_skb(NULL, wrlen, gfp);
432 printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__);
435 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
436 t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
437 req = (struct cpl_close_con_req *) skb_put(skb, wrlen);
438 memset(req, 0, wrlen);
439 INIT_TP_WR(req, ep->hwtid);
440 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ,
442 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
445 static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
447 struct cpl_abort_req *req;
448 int wrlen = roundup(sizeof *req, 16);
450 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
451 skb = get_skb(skb, wrlen, gfp);
453 printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
457 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
458 t4_set_arp_err_handler(skb, &ep->com.dev->rdev, abort_arp_failure);
459 req = (struct cpl_abort_req *) skb_put(skb, wrlen);
460 memset(req, 0, wrlen);
461 INIT_TP_WR(req, ep->hwtid);
462 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid));
463 req->cmd = CPL_ABORT_SEND_RST;
464 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
467 #define VLAN_NONE 0xfff
468 #define FILTER_SEL_VLAN_NONE 0xffff
469 #define FILTER_SEL_WIDTH_P_FC (3+1) /* port uses 3 bits, FCoE one bit */
470 #define FILTER_SEL_WIDTH_VIN_P_FC \
471 (6 + 7 + FILTER_SEL_WIDTH_P_FC) /* 6 bits are unused, VF uses 7 bits*/
472 #define FILTER_SEL_WIDTH_TAG_P_FC \
473 (3 + FILTER_SEL_WIDTH_VIN_P_FC) /* PF uses 3 bits */
474 #define FILTER_SEL_WIDTH_VLD_TAG_P_FC (1 + FILTER_SEL_WIDTH_TAG_P_FC)
476 static unsigned int select_ntuple(struct c4iw_dev *dev, struct dst_entry *dst,
477 struct l2t_entry *l2t)
479 unsigned int ntuple = 0;
482 switch (dev->rdev.lldi.filt_mode) {
484 /* default filter mode */
485 case HW_TPL_FR_MT_PR_IV_P_FC:
486 if (l2t->vlan == VLAN_NONE)
487 ntuple |= FILTER_SEL_VLAN_NONE << FILTER_SEL_WIDTH_P_FC;
489 ntuple |= l2t->vlan << FILTER_SEL_WIDTH_P_FC;
490 ntuple |= 1 << FILTER_SEL_WIDTH_VLD_TAG_P_FC;
492 ntuple |= l2t->lport << S_PORT | IPPROTO_TCP <<
493 FILTER_SEL_WIDTH_VLD_TAG_P_FC;
495 case HW_TPL_FR_MT_PR_OV_P_FC: {
496 viid = cxgb4_port_viid(l2t->neigh->dev);
498 ntuple |= FW_VIID_VIN_GET(viid) << FILTER_SEL_WIDTH_P_FC;
499 ntuple |= FW_VIID_PFN_GET(viid) << FILTER_SEL_WIDTH_VIN_P_FC;
500 ntuple |= FW_VIID_VIVLD_GET(viid) << FILTER_SEL_WIDTH_TAG_P_FC;
501 ntuple |= l2t->lport << S_PORT | IPPROTO_TCP <<
502 FILTER_SEL_WIDTH_VLD_TAG_P_FC;
511 static int send_connect(struct c4iw_ep *ep)
513 struct cpl_act_open_req *req;
517 unsigned int mtu_idx;
519 int wrlen = roundup(sizeof *req, 16);
521 PDBG("%s ep %p atid %u\n", __func__, ep, ep->atid);
523 skb = get_skb(NULL, wrlen, GFP_KERNEL);
525 printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
529 set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
531 cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx);
532 wscale = compute_wscale(rcv_win);
533 opt0 = (nocong ? NO_CONG(1) : 0) |
538 L2T_IDX(ep->l2t->idx) |
539 TX_CHAN(ep->tx_chan) |
540 SMAC_SEL(ep->smac_idx) |
542 ULP_MODE(ULP_MODE_TCPDDP) |
543 RCV_BUFSIZ(rcv_win>>10);
544 opt2 = RX_CHANNEL(0) |
545 CCTRL_ECN(enable_ecn) |
546 RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid);
547 if (enable_tcp_timestamps)
548 opt2 |= TSTAMPS_EN(1);
551 if (wscale && enable_tcp_window_scaling)
552 opt2 |= WND_SCALE_EN(1);
553 t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure);
555 req = (struct cpl_act_open_req *) skb_put(skb, wrlen);
557 OPCODE_TID(req) = cpu_to_be32(
558 MK_OPCODE_TID(CPL_ACT_OPEN_REQ, ((ep->rss_qid<<14)|ep->atid)));
559 req->local_port = ep->com.local_addr.sin_port;
560 req->peer_port = ep->com.remote_addr.sin_port;
561 req->local_ip = ep->com.local_addr.sin_addr.s_addr;
562 req->peer_ip = ep->com.remote_addr.sin_addr.s_addr;
563 req->opt0 = cpu_to_be64(opt0);
564 req->params = cpu_to_be32(select_ntuple(ep->com.dev, ep->dst, ep->l2t));
565 req->opt2 = cpu_to_be32(opt2);
566 set_bit(ACT_OPEN_REQ, &ep->com.history);
567 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
570 static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
574 struct fw_ofld_tx_data_wr *req;
575 struct mpa_message *mpa;
576 struct mpa_v2_conn_params mpa_v2_params;
578 PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);
580 BUG_ON(skb_cloned(skb));
582 mpalen = sizeof(*mpa) + ep->plen;
583 if (mpa_rev_to_use == 2)
584 mpalen += sizeof(struct mpa_v2_conn_params);
585 wrlen = roundup(mpalen + sizeof *req, 16);
586 skb = get_skb(skb, wrlen, GFP_KERNEL);
588 connect_reply_upcall(ep, -ENOMEM);
591 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
593 req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen);
594 memset(req, 0, wrlen);
595 req->op_to_immdlen = cpu_to_be32(
596 FW_WR_OP(FW_OFLD_TX_DATA_WR) |
598 FW_WR_IMMDLEN(mpalen));
599 req->flowid_len16 = cpu_to_be32(
600 FW_WR_FLOWID(ep->hwtid) |
601 FW_WR_LEN16(wrlen >> 4));
602 req->plen = cpu_to_be32(mpalen);
603 req->tunnel_to_proxy = cpu_to_be32(
604 FW_OFLD_TX_DATA_WR_FLUSH(1) |
605 FW_OFLD_TX_DATA_WR_SHOVE(1));
607 mpa = (struct mpa_message *)(req + 1);
608 memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key));
609 mpa->flags = (crc_enabled ? MPA_CRC : 0) |
610 (markers_enabled ? MPA_MARKERS : 0) |
611 (mpa_rev_to_use == 2 ? MPA_ENHANCED_RDMA_CONN : 0);
612 mpa->private_data_size = htons(ep->plen);
613 mpa->revision = mpa_rev_to_use;
614 if (mpa_rev_to_use == 1) {
615 ep->tried_with_mpa_v1 = 1;
616 ep->retry_with_mpa_v1 = 0;
619 if (mpa_rev_to_use == 2) {
620 mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
621 sizeof (struct mpa_v2_conn_params));
622 mpa_v2_params.ird = htons((u16)ep->ird);
623 mpa_v2_params.ord = htons((u16)ep->ord);
626 mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL);
627 if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE)
629 htons(MPA_V2_RDMA_WRITE_RTR);
630 else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ)
632 htons(MPA_V2_RDMA_READ_RTR);
634 memcpy(mpa->private_data, &mpa_v2_params,
635 sizeof(struct mpa_v2_conn_params));
638 memcpy(mpa->private_data +
639 sizeof(struct mpa_v2_conn_params),
640 ep->mpa_pkt + sizeof(*mpa), ep->plen);
643 memcpy(mpa->private_data,
644 ep->mpa_pkt + sizeof(*mpa), ep->plen);
647 * Reference the mpa skb. This ensures the data area
648 * will remain in memory until the hw acks the tx.
649 * Function fw4_ack() will deref it.
652 t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
655 c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
657 state_set(&ep->com, MPA_REQ_SENT);
658 ep->mpa_attr.initiator = 1;
662 static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
665 struct fw_ofld_tx_data_wr *req;
666 struct mpa_message *mpa;
668 struct mpa_v2_conn_params mpa_v2_params;
670 PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);
672 mpalen = sizeof(*mpa) + plen;
673 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn)
674 mpalen += sizeof(struct mpa_v2_conn_params);
675 wrlen = roundup(mpalen + sizeof *req, 16);
677 skb = get_skb(NULL, wrlen, GFP_KERNEL);
679 printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
682 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
684 req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen);
685 memset(req, 0, wrlen);
686 req->op_to_immdlen = cpu_to_be32(
687 FW_WR_OP(FW_OFLD_TX_DATA_WR) |
689 FW_WR_IMMDLEN(mpalen));
690 req->flowid_len16 = cpu_to_be32(
691 FW_WR_FLOWID(ep->hwtid) |
692 FW_WR_LEN16(wrlen >> 4));
693 req->plen = cpu_to_be32(mpalen);
694 req->tunnel_to_proxy = cpu_to_be32(
695 FW_OFLD_TX_DATA_WR_FLUSH(1) |
696 FW_OFLD_TX_DATA_WR_SHOVE(1));
698 mpa = (struct mpa_message *)(req + 1);
699 memset(mpa, 0, sizeof(*mpa));
700 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
701 mpa->flags = MPA_REJECT;
702 mpa->revision = ep->mpa_attr.version;
703 mpa->private_data_size = htons(plen);
705 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
706 mpa->flags |= MPA_ENHANCED_RDMA_CONN;
707 mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
708 sizeof (struct mpa_v2_conn_params));
709 mpa_v2_params.ird = htons(((u16)ep->ird) |
710 (peer2peer ? MPA_V2_PEER2PEER_MODEL :
712 mpa_v2_params.ord = htons(((u16)ep->ord) | (peer2peer ?
714 FW_RI_INIT_P2PTYPE_RDMA_WRITE ?
715 MPA_V2_RDMA_WRITE_RTR : p2p_type ==
716 FW_RI_INIT_P2PTYPE_READ_REQ ?
717 MPA_V2_RDMA_READ_RTR : 0) : 0));
718 memcpy(mpa->private_data, &mpa_v2_params,
719 sizeof(struct mpa_v2_conn_params));
722 memcpy(mpa->private_data +
723 sizeof(struct mpa_v2_conn_params), pdata, plen);
726 memcpy(mpa->private_data, pdata, plen);
729 * Reference the mpa skb again. This ensures the data area
730 * will remain in memory until the hw acks the tx.
731 * Function fw4_ack() will deref it.
734 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
735 t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
738 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
741 static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
744 struct fw_ofld_tx_data_wr *req;
745 struct mpa_message *mpa;
747 struct mpa_v2_conn_params mpa_v2_params;
749 PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);
751 mpalen = sizeof(*mpa) + plen;
752 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn)
753 mpalen += sizeof(struct mpa_v2_conn_params);
754 wrlen = roundup(mpalen + sizeof *req, 16);
756 skb = get_skb(NULL, wrlen, GFP_KERNEL);
758 printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
761 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
763 req = (struct fw_ofld_tx_data_wr *) skb_put(skb, wrlen);
764 memset(req, 0, wrlen);
765 req->op_to_immdlen = cpu_to_be32(
766 FW_WR_OP(FW_OFLD_TX_DATA_WR) |
768 FW_WR_IMMDLEN(mpalen));
769 req->flowid_len16 = cpu_to_be32(
770 FW_WR_FLOWID(ep->hwtid) |
771 FW_WR_LEN16(wrlen >> 4));
772 req->plen = cpu_to_be32(mpalen);
773 req->tunnel_to_proxy = cpu_to_be32(
774 FW_OFLD_TX_DATA_WR_FLUSH(1) |
775 FW_OFLD_TX_DATA_WR_SHOVE(1));
777 mpa = (struct mpa_message *)(req + 1);
778 memset(mpa, 0, sizeof(*mpa));
779 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
780 mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) |
781 (markers_enabled ? MPA_MARKERS : 0);
782 mpa->revision = ep->mpa_attr.version;
783 mpa->private_data_size = htons(plen);
785 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
786 mpa->flags |= MPA_ENHANCED_RDMA_CONN;
787 mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
788 sizeof (struct mpa_v2_conn_params));
789 mpa_v2_params.ird = htons((u16)ep->ird);
790 mpa_v2_params.ord = htons((u16)ep->ord);
791 if (peer2peer && (ep->mpa_attr.p2p_type !=
792 FW_RI_INIT_P2PTYPE_DISABLED)) {
793 mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL);
795 if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE)
797 htons(MPA_V2_RDMA_WRITE_RTR);
798 else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ)
800 htons(MPA_V2_RDMA_READ_RTR);
803 memcpy(mpa->private_data, &mpa_v2_params,
804 sizeof(struct mpa_v2_conn_params));
807 memcpy(mpa->private_data +
808 sizeof(struct mpa_v2_conn_params), pdata, plen);
811 memcpy(mpa->private_data, pdata, plen);
814 * Reference the mpa skb. This ensures the data area
815 * will remain in memory until the hw acks the tx.
816 * Function fw4_ack() will deref it.
819 t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
821 state_set(&ep->com, MPA_REP_SENT);
822 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
825 static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
828 struct cpl_act_establish *req = cplhdr(skb);
829 unsigned int tid = GET_TID(req);
830 unsigned int atid = GET_TID_TID(ntohl(req->tos_atid));
831 struct tid_info *t = dev->rdev.lldi.tids;
833 ep = lookup_atid(t, atid);
835 PDBG("%s ep %p tid %u snd_isn %u rcv_isn %u\n", __func__, ep, tid,
836 be32_to_cpu(req->snd_isn), be32_to_cpu(req->rcv_isn));
838 dst_confirm(ep->dst);
840 /* setup the hwtid for this connection */
842 cxgb4_insert_tid(t, ep, tid);
843 insert_handle(dev, &dev->hwtid_idr, ep, ep->hwtid);
845 ep->snd_seq = be32_to_cpu(req->snd_isn);
846 ep->rcv_seq = be32_to_cpu(req->rcv_isn);
848 set_emss(ep, ntohs(req->tcp_opt));
850 /* dealloc the atid */
851 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, atid);
852 cxgb4_free_atid(t, atid);
853 set_bit(ACT_ESTAB, &ep->com.history);
855 /* start MPA negotiation */
856 send_flowc(ep, NULL);
857 if (ep->retry_with_mpa_v1)
858 send_mpa_req(ep, skb, 1);
860 send_mpa_req(ep, skb, mpa_rev);
865 static void close_complete_upcall(struct c4iw_ep *ep)
867 struct iw_cm_event event;
869 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
870 memset(&event, 0, sizeof(event));
871 event.event = IW_CM_EVENT_CLOSE;
873 PDBG("close complete delivered ep %p cm_id %p tid %u\n",
874 ep, ep->com.cm_id, ep->hwtid);
875 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
876 ep->com.cm_id->rem_ref(ep->com.cm_id);
877 ep->com.cm_id = NULL;
878 set_bit(CLOSE_UPCALL, &ep->com.history);
882 static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
884 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
885 close_complete_upcall(ep);
886 state_set(&ep->com, ABORTING);
887 set_bit(ABORT_CONN, &ep->com.history);
888 return send_abort(ep, skb, gfp);
891 static void peer_close_upcall(struct c4iw_ep *ep)
893 struct iw_cm_event event;
895 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
896 memset(&event, 0, sizeof(event));
897 event.event = IW_CM_EVENT_DISCONNECT;
899 PDBG("peer close delivered ep %p cm_id %p tid %u\n",
900 ep, ep->com.cm_id, ep->hwtid);
901 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
902 set_bit(DISCONN_UPCALL, &ep->com.history);
906 static void peer_abort_upcall(struct c4iw_ep *ep)
908 struct iw_cm_event event;
910 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
911 memset(&event, 0, sizeof(event));
912 event.event = IW_CM_EVENT_CLOSE;
913 event.status = -ECONNRESET;
915 PDBG("abort delivered ep %p cm_id %p tid %u\n", ep,
916 ep->com.cm_id, ep->hwtid);
917 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
918 ep->com.cm_id->rem_ref(ep->com.cm_id);
919 ep->com.cm_id = NULL;
920 set_bit(ABORT_UPCALL, &ep->com.history);
924 static void connect_reply_upcall(struct c4iw_ep *ep, int status)
926 struct iw_cm_event event;
928 PDBG("%s ep %p tid %u status %d\n", __func__, ep, ep->hwtid, status);
929 memset(&event, 0, sizeof(event));
930 event.event = IW_CM_EVENT_CONNECT_REPLY;
931 event.status = status;
932 event.local_addr = ep->com.local_addr;
933 event.remote_addr = ep->com.remote_addr;
935 if ((status == 0) || (status == -ECONNREFUSED)) {
936 if (!ep->tried_with_mpa_v1) {
937 /* this means MPA_v2 is used */
938 event.private_data_len = ep->plen -
939 sizeof(struct mpa_v2_conn_params);
940 event.private_data = ep->mpa_pkt +
941 sizeof(struct mpa_message) +
942 sizeof(struct mpa_v2_conn_params);
944 /* this means MPA_v1 is used */
945 event.private_data_len = ep->plen;
946 event.private_data = ep->mpa_pkt +
947 sizeof(struct mpa_message);
951 PDBG("%s ep %p tid %u status %d\n", __func__, ep,
953 set_bit(CONN_RPL_UPCALL, &ep->com.history);
954 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
957 ep->com.cm_id->rem_ref(ep->com.cm_id);
958 ep->com.cm_id = NULL;
962 static void connect_request_upcall(struct c4iw_ep *ep)
964 struct iw_cm_event event;
966 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
967 memset(&event, 0, sizeof(event));
968 event.event = IW_CM_EVENT_CONNECT_REQUEST;
969 event.local_addr = ep->com.local_addr;
970 event.remote_addr = ep->com.remote_addr;
971 event.provider_data = ep;
972 if (!ep->tried_with_mpa_v1) {
973 /* this means MPA_v2 is used */
976 event.private_data_len = ep->plen -
977 sizeof(struct mpa_v2_conn_params);
978 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message) +
979 sizeof(struct mpa_v2_conn_params);
981 /* this means MPA_v1 is used. Send max supported */
982 event.ord = c4iw_max_read_depth;
983 event.ird = c4iw_max_read_depth;
984 event.private_data_len = ep->plen;
985 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
987 if (state_read(&ep->parent_ep->com) != DEAD) {
988 c4iw_get_ep(&ep->com);
989 ep->parent_ep->com.cm_id->event_handler(
990 ep->parent_ep->com.cm_id,
993 set_bit(CONNREQ_UPCALL, &ep->com.history);
994 c4iw_put_ep(&ep->parent_ep->com);
995 ep->parent_ep = NULL;
998 static void established_upcall(struct c4iw_ep *ep)
1000 struct iw_cm_event event;
1002 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1003 memset(&event, 0, sizeof(event));
1004 event.event = IW_CM_EVENT_ESTABLISHED;
1005 event.ird = ep->ird;
1006 event.ord = ep->ord;
1007 if (ep->com.cm_id) {
1008 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1009 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1010 set_bit(ESTAB_UPCALL, &ep->com.history);
1014 static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
1016 struct cpl_rx_data_ack *req;
1017 struct sk_buff *skb;
1018 int wrlen = roundup(sizeof *req, 16);
1020 PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits);
1021 skb = get_skb(NULL, wrlen, GFP_KERNEL);
1023 printk(KERN_ERR MOD "update_rx_credits - cannot alloc skb!\n");
1027 req = (struct cpl_rx_data_ack *) skb_put(skb, wrlen);
1028 memset(req, 0, wrlen);
1029 INIT_TP_WR(req, ep->hwtid);
1030 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
1032 req->credit_dack = cpu_to_be32(credits | RX_FORCE_ACK(1) |
1034 V_RX_DACK_MODE(dack_mode));
1035 set_wr_txq(skb, CPL_PRIORITY_ACK, ep->ctrlq_idx);
1036 c4iw_ofld_send(&ep->com.dev->rdev, skb);
1040 static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
1042 struct mpa_message *mpa;
1043 struct mpa_v2_conn_params *mpa_v2_params;
1045 u16 resp_ird, resp_ord;
1046 u8 rtr_mismatch = 0, insuff_ird = 0;
1047 struct c4iw_qp_attributes attrs;
1048 enum c4iw_qp_attr_mask mask;
1051 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1054 * Stop mpa timer. If it expired, then the state has
1055 * changed and we bail since ep_timeout already aborted
1059 if (state_read(&ep->com) != MPA_REQ_SENT)
1063 * If we get more than the supported amount of private data
1064 * then we must fail this connection.
1066 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
1072 * copy the new data into our accumulation buffer.
1074 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
1076 ep->mpa_pkt_len += skb->len;
1079 * if we don't even have the mpa message, then bail.
1081 if (ep->mpa_pkt_len < sizeof(*mpa))
1083 mpa = (struct mpa_message *) ep->mpa_pkt;
1085 /* Validate MPA header. */
1086 if (mpa->revision > mpa_rev) {
1087 printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d,"
1088 " Received = %d\n", __func__, mpa_rev, mpa->revision);
1092 if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) {
1097 plen = ntohs(mpa->private_data_size);
1100 * Fail if there's too much private data.
1102 if (plen > MPA_MAX_PRIVATE_DATA) {
1108 * If plen does not account for pkt size
1110 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
1115 ep->plen = (u8) plen;
1118 * If we don't have all the pdata yet, then bail.
1119 * We'll continue process when more data arrives.
1121 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
1124 if (mpa->flags & MPA_REJECT) {
1125 err = -ECONNREFUSED;
1130 * If we get here we have accumulated the entire mpa
1131 * start reply message including private data. And
1132 * the MPA header is valid.
1134 state_set(&ep->com, FPDU_MODE);
1135 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
1136 ep->mpa_attr.recv_marker_enabled = markers_enabled;
1137 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
1138 ep->mpa_attr.version = mpa->revision;
1139 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
1141 if (mpa->revision == 2) {
1142 ep->mpa_attr.enhanced_rdma_conn =
1143 mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0;
1144 if (ep->mpa_attr.enhanced_rdma_conn) {
1145 mpa_v2_params = (struct mpa_v2_conn_params *)
1146 (ep->mpa_pkt + sizeof(*mpa));
1147 resp_ird = ntohs(mpa_v2_params->ird) &
1148 MPA_V2_IRD_ORD_MASK;
1149 resp_ord = ntohs(mpa_v2_params->ord) &
1150 MPA_V2_IRD_ORD_MASK;
1153 * This is a double-check. Ideally, below checks are
1154 * not required since ird/ord stuff has been taken
1155 * care of in c4iw_accept_cr
1157 if ((ep->ird < resp_ord) || (ep->ord > resp_ird)) {
1164 if (ntohs(mpa_v2_params->ird) &
1165 MPA_V2_PEER2PEER_MODEL) {
1166 if (ntohs(mpa_v2_params->ord) &
1167 MPA_V2_RDMA_WRITE_RTR)
1168 ep->mpa_attr.p2p_type =
1169 FW_RI_INIT_P2PTYPE_RDMA_WRITE;
1170 else if (ntohs(mpa_v2_params->ord) &
1171 MPA_V2_RDMA_READ_RTR)
1172 ep->mpa_attr.p2p_type =
1173 FW_RI_INIT_P2PTYPE_READ_REQ;
1176 } else if (mpa->revision == 1)
1178 ep->mpa_attr.p2p_type = p2p_type;
1180 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
1181 "xmit_marker_enabled=%d, version=%d p2p_type=%d local-p2p_type = "
1182 "%d\n", __func__, ep->mpa_attr.crc_enabled,
1183 ep->mpa_attr.recv_marker_enabled,
1184 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
1185 ep->mpa_attr.p2p_type, p2p_type);
1188 * If responder's RTR does not match with that of initiator, assign
1189 * FW_RI_INIT_P2PTYPE_DISABLED in mpa attributes so that RTR is not
1190 * generated when moving QP to RTS state.
1191 * A TERM message will be sent after QP has moved to RTS state
1193 if ((ep->mpa_attr.version == 2) && peer2peer &&
1194 (ep->mpa_attr.p2p_type != p2p_type)) {
1195 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
1199 attrs.mpa_attr = ep->mpa_attr;
1200 attrs.max_ird = ep->ird;
1201 attrs.max_ord = ep->ord;
1202 attrs.llp_stream_handle = ep;
1203 attrs.next_state = C4IW_QP_STATE_RTS;
1205 mask = C4IW_QP_ATTR_NEXT_STATE |
1206 C4IW_QP_ATTR_LLP_STREAM_HANDLE | C4IW_QP_ATTR_MPA_ATTR |
1207 C4IW_QP_ATTR_MAX_IRD | C4IW_QP_ATTR_MAX_ORD;
1209 /* bind QP and TID with INIT_WR */
1210 err = c4iw_modify_qp(ep->com.qp->rhp,
1211 ep->com.qp, mask, &attrs, 1);
1216 * If responder's RTR requirement did not match with what initiator
1217 * supports, generate TERM message
1220 printk(KERN_ERR "%s: RTR mismatch, sending TERM\n", __func__);
1221 attrs.layer_etype = LAYER_MPA | DDP_LLP;
1222 attrs.ecode = MPA_NOMATCH_RTR;
1223 attrs.next_state = C4IW_QP_STATE_TERMINATE;
1224 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1225 C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
1231 * Generate TERM if initiator IRD is not sufficient for responder
1232 * provided ORD. Currently, we do the same behaviour even when
1233 * responder provided IRD is also not sufficient as regards to
1237 printk(KERN_ERR "%s: Insufficient IRD, sending TERM\n",
1239 attrs.layer_etype = LAYER_MPA | DDP_LLP;
1240 attrs.ecode = MPA_INSUFF_IRD;
1241 attrs.next_state = C4IW_QP_STATE_TERMINATE;
1242 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1243 C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
1249 state_set(&ep->com, ABORTING);
1250 send_abort(ep, skb, GFP_KERNEL);
1252 connect_reply_upcall(ep, err);
1256 static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
1258 struct mpa_message *mpa;
1259 struct mpa_v2_conn_params *mpa_v2_params;
1262 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1264 if (state_read(&ep->com) != MPA_REQ_WAIT)
1268 * If we get more than the supported amount of private data
1269 * then we must fail this connection.
1271 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
1273 abort_connection(ep, skb, GFP_KERNEL);
1277 PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
1280 * Copy the new data into our accumulation buffer.
1282 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
1284 ep->mpa_pkt_len += skb->len;
1287 * If we don't even have the mpa message, then bail.
1288 * We'll continue process when more data arrives.
1290 if (ep->mpa_pkt_len < sizeof(*mpa))
1293 PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
1295 mpa = (struct mpa_message *) ep->mpa_pkt;
1298 * Validate MPA Header.
1300 if (mpa->revision > mpa_rev) {
1301 printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d,"
1302 " Received = %d\n", __func__, mpa_rev, mpa->revision);
1304 abort_connection(ep, skb, GFP_KERNEL);
1308 if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) {
1310 abort_connection(ep, skb, GFP_KERNEL);
1314 plen = ntohs(mpa->private_data_size);
1317 * Fail if there's too much private data.
1319 if (plen > MPA_MAX_PRIVATE_DATA) {
1321 abort_connection(ep, skb, GFP_KERNEL);
1326 * If plen does not account for pkt size
1328 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
1330 abort_connection(ep, skb, GFP_KERNEL);
1333 ep->plen = (u8) plen;
1336 * If we don't have all the pdata yet, then bail.
1338 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
1342 * If we get here we have accumulated the entire mpa
1343 * start reply message including private data.
1345 ep->mpa_attr.initiator = 0;
1346 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
1347 ep->mpa_attr.recv_marker_enabled = markers_enabled;
1348 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
1349 ep->mpa_attr.version = mpa->revision;
1350 if (mpa->revision == 1)
1351 ep->tried_with_mpa_v1 = 1;
1352 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
1354 if (mpa->revision == 2) {
1355 ep->mpa_attr.enhanced_rdma_conn =
1356 mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0;
1357 if (ep->mpa_attr.enhanced_rdma_conn) {
1358 mpa_v2_params = (struct mpa_v2_conn_params *)
1359 (ep->mpa_pkt + sizeof(*mpa));
1360 ep->ird = ntohs(mpa_v2_params->ird) &
1361 MPA_V2_IRD_ORD_MASK;
1362 ep->ord = ntohs(mpa_v2_params->ord) &
1363 MPA_V2_IRD_ORD_MASK;
1364 if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL)
1366 if (ntohs(mpa_v2_params->ord) &
1367 MPA_V2_RDMA_WRITE_RTR)
1368 ep->mpa_attr.p2p_type =
1369 FW_RI_INIT_P2PTYPE_RDMA_WRITE;
1370 else if (ntohs(mpa_v2_params->ord) &
1371 MPA_V2_RDMA_READ_RTR)
1372 ep->mpa_attr.p2p_type =
1373 FW_RI_INIT_P2PTYPE_READ_REQ;
1376 } else if (mpa->revision == 1)
1378 ep->mpa_attr.p2p_type = p2p_type;
1380 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
1381 "xmit_marker_enabled=%d, version=%d p2p_type=%d\n", __func__,
1382 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
1383 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
1384 ep->mpa_attr.p2p_type);
1386 state_set(&ep->com, MPA_REQ_RCVD);
1389 connect_request_upcall(ep);
1393 static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
1396 struct cpl_rx_data *hdr = cplhdr(skb);
1397 unsigned int dlen = ntohs(hdr->len);
1398 unsigned int tid = GET_TID(hdr);
1399 struct tid_info *t = dev->rdev.lldi.tids;
1400 __u8 status = hdr->status;
1402 ep = lookup_tid(t, tid);
1403 PDBG("%s ep %p tid %u dlen %u\n", __func__, ep, ep->hwtid, dlen);
1404 skb_pull(skb, sizeof(*hdr));
1405 skb_trim(skb, dlen);
1407 /* update RX credits */
1408 update_rx_credits(ep, dlen);
1410 switch (state_read(&ep->com)) {
1412 ep->rcv_seq += dlen;
1413 process_mpa_reply(ep, skb);
1416 ep->rcv_seq += dlen;
1417 process_mpa_request(ep, skb);
1420 struct c4iw_qp_attributes attrs;
1421 BUG_ON(!ep->com.qp);
1423 pr_err("%s Unexpected streaming data." \
1424 " qpid %u ep %p state %d tid %u status %d\n",
1425 __func__, ep->com.qp->wq.sq.qid, ep,
1426 state_read(&ep->com), ep->hwtid, status);
1427 attrs.next_state = C4IW_QP_STATE_ERROR;
1428 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1429 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1430 c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
1439 static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1442 struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
1444 unsigned int tid = GET_TID(rpl);
1445 struct tid_info *t = dev->rdev.lldi.tids;
1447 ep = lookup_tid(t, tid);
1449 printk(KERN_WARNING MOD "Abort rpl to freed endpoint\n");
1452 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1453 mutex_lock(&ep->com.mutex);
1454 switch (ep->com.state) {
1456 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
1457 __state_set(&ep->com, DEAD);
1461 printk(KERN_ERR "%s ep %p state %d\n",
1462 __func__, ep, ep->com.state);
1465 mutex_unlock(&ep->com.mutex);
1468 release_ep_resources(ep);
1472 static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
1474 struct sk_buff *skb;
1475 struct fw_ofld_connection_wr *req;
1476 unsigned int mtu_idx;
1479 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
1480 req = (struct fw_ofld_connection_wr *)__skb_put(skb, sizeof(*req));
1481 memset(req, 0, sizeof(*req));
1482 req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR));
1483 req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16)));
1484 req->le.filter = cpu_to_be32(select_ntuple(ep->com.dev, ep->dst,
1486 req->le.lport = ep->com.local_addr.sin_port;
1487 req->le.pport = ep->com.remote_addr.sin_port;
1488 req->le.u.ipv4.lip = ep->com.local_addr.sin_addr.s_addr;
1489 req->le.u.ipv4.pip = ep->com.remote_addr.sin_addr.s_addr;
1490 req->tcb.t_state_to_astid =
1491 htonl(V_FW_OFLD_CONNECTION_WR_T_STATE(TCP_SYN_SENT) |
1492 V_FW_OFLD_CONNECTION_WR_ASTID(atid));
1493 req->tcb.cplrxdataack_cplpassacceptrpl =
1494 htons(F_FW_OFLD_CONNECTION_WR_CPLRXDATAACK);
1495 req->tcb.tx_max = (__force __be32) jiffies;
1496 req->tcb.rcv_adv = htons(1);
1497 cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx);
1498 wscale = compute_wscale(rcv_win);
1499 req->tcb.opt0 = (__force __be64) (TCAM_BYPASS(1) |
1500 (nocong ? NO_CONG(1) : 0) |
1505 L2T_IDX(ep->l2t->idx) |
1506 TX_CHAN(ep->tx_chan) |
1507 SMAC_SEL(ep->smac_idx) |
1509 ULP_MODE(ULP_MODE_TCPDDP) |
1510 RCV_BUFSIZ(rcv_win >> 10));
1511 req->tcb.opt2 = (__force __be32) (PACE(1) |
1512 TX_QUEUE(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) |
1514 CCTRL_ECN(enable_ecn) |
1515 RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid));
1516 if (enable_tcp_timestamps)
1517 req->tcb.opt2 |= (__force __be32) TSTAMPS_EN(1);
1518 if (enable_tcp_sack)
1519 req->tcb.opt2 |= (__force __be32) SACK_EN(1);
1520 if (wscale && enable_tcp_window_scaling)
1521 req->tcb.opt2 |= (__force __be32) WND_SCALE_EN(1);
1522 req->tcb.opt0 = cpu_to_be64((__force u64) req->tcb.opt0);
1523 req->tcb.opt2 = cpu_to_be32((__force u32) req->tcb.opt2);
1524 set_wr_txq(skb, CPL_PRIORITY_CONTROL, ep->ctrlq_idx);
1525 set_bit(ACT_OFLD_CONN, &ep->com.history);
1526 c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
1530 * Return whether a failed active open has allocated a TID
1532 static inline int act_open_has_tid(int status)
1534 return status != CPL_ERR_TCAM_FULL && status != CPL_ERR_CONN_EXIST &&
1535 status != CPL_ERR_ARP_MISS;
1538 #define ACT_OPEN_RETRY_COUNT 2
1540 static int c4iw_reconnect(struct c4iw_ep *ep)
1544 struct port_info *pi;
1545 struct net_device *pdev;
1547 struct neighbour *neigh;
1549 PDBG("%s qp %p cm_id %p\n", __func__, ep->com.qp, ep->com.cm_id);
1550 init_timer(&ep->timer);
1553 * Allocate an active TID to initiate a TCP connection.
1555 ep->atid = cxgb4_alloc_atid(ep->com.dev->rdev.lldi.tids, ep);
1556 if (ep->atid == -1) {
1557 pr_err("%s - cannot alloc atid.\n", __func__);
1561 insert_handle(ep->com.dev, &ep->com.dev->atid_idr, ep, ep->atid);
1564 rt = find_route(ep->com.dev,
1565 ep->com.cm_id->local_addr.sin_addr.s_addr,
1566 ep->com.cm_id->remote_addr.sin_addr.s_addr,
1567 ep->com.cm_id->local_addr.sin_port,
1568 ep->com.cm_id->remote_addr.sin_port, 0);
1570 pr_err("%s - cannot find route.\n", __func__);
1571 err = -EHOSTUNREACH;
1576 neigh = dst_neigh_lookup(ep->dst,
1577 &ep->com.cm_id->remote_addr.sin_addr.s_addr);
1579 pr_err("%s - cannot alloc neigh.\n", __func__);
1584 /* get a l2t entry */
1585 if (neigh->dev->flags & IFF_LOOPBACK) {
1586 PDBG("%s LOOPBACK\n", __func__);
1587 pdev = ip_dev_find(&init_net,
1588 ep->com.cm_id->remote_addr.sin_addr.s_addr);
1589 ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t,
1591 pi = (struct port_info *)netdev_priv(pdev);
1592 ep->mtu = pdev->mtu;
1593 ep->tx_chan = cxgb4_port_chan(pdev);
1594 ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
1597 ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t,
1598 neigh, neigh->dev, 0);
1599 pi = (struct port_info *)netdev_priv(neigh->dev);
1600 ep->mtu = dst_mtu(ep->dst);
1601 ep->tx_chan = cxgb4_port_chan(neigh->dev);
1602 ep->smac_idx = (cxgb4_port_viid(neigh->dev) &
1606 step = ep->com.dev->rdev.lldi.ntxq / ep->com.dev->rdev.lldi.nchan;
1607 ep->txq_idx = pi->port_id * step;
1608 ep->ctrlq_idx = pi->port_id;
1609 step = ep->com.dev->rdev.lldi.nrxq / ep->com.dev->rdev.lldi.nchan;
1610 ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[pi->port_id * step];
1613 pr_err("%s - cannot alloc l2e.\n", __func__);
1618 PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
1619 __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
1622 state_set(&ep->com, CONNECTING);
1625 /* send connect request to rnic */
1626 err = send_connect(ep);
1630 cxgb4_l2t_release(ep->l2t);
1632 dst_release(ep->dst);
1634 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);
1635 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
1638 * remember to send notification to upper layer.
1639 * We are in here so the upper layer is not aware that this is
1640 * re-connect attempt and so, upper layer is still waiting for
1641 * response of 1st connect request.
1643 connect_reply_upcall(ep, -ECONNRESET);
1644 c4iw_put_ep(&ep->com);
1649 static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1652 struct cpl_act_open_rpl *rpl = cplhdr(skb);
1653 unsigned int atid = GET_TID_TID(GET_AOPEN_ATID(
1654 ntohl(rpl->atid_status)));
1655 struct tid_info *t = dev->rdev.lldi.tids;
1656 int status = GET_AOPEN_STATUS(ntohl(rpl->atid_status));
1658 ep = lookup_atid(t, atid);
1660 PDBG("%s ep %p atid %u status %u errno %d\n", __func__, ep, atid,
1661 status, status2errno(status));
1663 if (status == CPL_ERR_RTX_NEG_ADVICE) {
1664 printk(KERN_WARNING MOD "Connection problems for atid %u\n",
1669 set_bit(ACT_OPEN_RPL, &ep->com.history);
1672 * Log interesting failures.
1675 case CPL_ERR_CONN_RESET:
1676 case CPL_ERR_CONN_TIMEDOUT:
1678 case CPL_ERR_TCAM_FULL:
1679 if (dev->rdev.lldi.enable_fw_ofld_conn) {
1680 mutex_lock(&dev->rdev.stats.lock);
1681 dev->rdev.stats.tcam_full++;
1682 mutex_unlock(&dev->rdev.stats.lock);
1683 send_fw_act_open_req(ep,
1684 GET_TID_TID(GET_AOPEN_ATID(
1685 ntohl(rpl->atid_status))));
1689 case CPL_ERR_CONN_EXIST:
1690 if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) {
1691 set_bit(ACT_RETRY_INUSE, &ep->com.history);
1692 remove_handle(ep->com.dev, &ep->com.dev->atid_idr,
1694 cxgb4_free_atid(t, atid);
1695 dst_release(ep->dst);
1696 cxgb4_l2t_release(ep->l2t);
1702 printk(KERN_INFO MOD "Active open failure - "
1703 "atid %u status %u errno %d %pI4:%u->%pI4:%u\n",
1704 atid, status, status2errno(status),
1705 &ep->com.local_addr.sin_addr.s_addr,
1706 ntohs(ep->com.local_addr.sin_port),
1707 &ep->com.remote_addr.sin_addr.s_addr,
1708 ntohs(ep->com.remote_addr.sin_port));
1712 connect_reply_upcall(ep, status2errno(status));
1713 state_set(&ep->com, DEAD);
1715 if (status && act_open_has_tid(status))
1716 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, GET_TID(rpl));
1718 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, atid);
1719 cxgb4_free_atid(t, atid);
1720 dst_release(ep->dst);
1721 cxgb4_l2t_release(ep->l2t);
1722 c4iw_put_ep(&ep->com);
1727 static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1729 struct cpl_pass_open_rpl *rpl = cplhdr(skb);
1730 struct tid_info *t = dev->rdev.lldi.tids;
1731 unsigned int stid = GET_TID(rpl);
1732 struct c4iw_listen_ep *ep = lookup_stid(t, stid);
1735 PDBG("%s stid %d lookup failure!\n", __func__, stid);
1738 PDBG("%s ep %p status %d error %d\n", __func__, ep,
1739 rpl->status, status2errno(rpl->status));
1740 c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status));
1746 static int listen_stop(struct c4iw_listen_ep *ep)
1748 struct sk_buff *skb;
1749 struct cpl_close_listsvr_req *req;
1751 PDBG("%s ep %p\n", __func__, ep);
1752 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
1754 printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__);
1757 req = (struct cpl_close_listsvr_req *) skb_put(skb, sizeof(*req));
1759 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ,
1761 req->reply_ctrl = cpu_to_be16(
1762 QUEUENO(ep->com.dev->rdev.lldi.rxq_ids[0]));
1763 set_wr_txq(skb, CPL_PRIORITY_SETUP, 0);
1764 return c4iw_ofld_send(&ep->com.dev->rdev, skb);
1767 static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1769 struct cpl_close_listsvr_rpl *rpl = cplhdr(skb);
1770 struct tid_info *t = dev->rdev.lldi.tids;
1771 unsigned int stid = GET_TID(rpl);
1772 struct c4iw_listen_ep *ep = lookup_stid(t, stid);
1774 PDBG("%s ep %p\n", __func__, ep);
1775 c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status));
1779 static void accept_cr(struct c4iw_ep *ep, __be32 peer_ip, struct sk_buff *skb,
1780 struct cpl_pass_accept_req *req)
1782 struct cpl_pass_accept_rpl *rpl;
1783 unsigned int mtu_idx;
1788 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1789 BUG_ON(skb_cloned(skb));
1790 skb_trim(skb, sizeof(*rpl));
1792 cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx);
1793 wscale = compute_wscale(rcv_win);
1794 opt0 = (nocong ? NO_CONG(1) : 0) |
1799 L2T_IDX(ep->l2t->idx) |
1800 TX_CHAN(ep->tx_chan) |
1801 SMAC_SEL(ep->smac_idx) |
1802 DSCP(ep->tos >> 2) |
1803 ULP_MODE(ULP_MODE_TCPDDP) |
1804 RCV_BUFSIZ(rcv_win>>10);
1805 opt2 = RX_CHANNEL(0) |
1806 RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid);
1808 if (enable_tcp_timestamps && req->tcpopt.tstamp)
1809 opt2 |= TSTAMPS_EN(1);
1810 if (enable_tcp_sack && req->tcpopt.sack)
1812 if (wscale && enable_tcp_window_scaling)
1813 opt2 |= WND_SCALE_EN(1);
1815 const struct tcphdr *tcph;
1816 u32 hlen = ntohl(req->hdr_len);
1818 tcph = (const void *)(req + 1) + G_ETH_HDR_LEN(hlen) +
1820 if (tcph->ece && tcph->cwr)
1821 opt2 |= CCTRL_ECN(1);
1825 INIT_TP_WR(rpl, ep->hwtid);
1826 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
1828 rpl->opt0 = cpu_to_be64(opt0);
1829 rpl->opt2 = cpu_to_be32(opt2);
1830 set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
1831 c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
1836 static void reject_cr(struct c4iw_dev *dev, u32 hwtid, __be32 peer_ip,
1837 struct sk_buff *skb)
1839 PDBG("%s c4iw_dev %p tid %u peer_ip %x\n", __func__, dev, hwtid,
1841 BUG_ON(skb_cloned(skb));
1842 skb_trim(skb, sizeof(struct cpl_tid_release));
1844 release_tid(&dev->rdev, hwtid, skb);
1848 static void get_4tuple(struct cpl_pass_accept_req *req,
1849 __be32 *local_ip, __be32 *peer_ip,
1850 __be16 *local_port, __be16 *peer_port)
1852 int eth_len = G_ETH_HDR_LEN(be32_to_cpu(req->hdr_len));
1853 int ip_len = G_IP_HDR_LEN(be32_to_cpu(req->hdr_len));
1854 struct iphdr *ip = (struct iphdr *)((u8 *)(req + 1) + eth_len);
1855 struct tcphdr *tcp = (struct tcphdr *)
1856 ((u8 *)(req + 1) + eth_len + ip_len);
1858 PDBG("%s saddr 0x%x daddr 0x%x sport %u dport %u\n", __func__,
1859 ntohl(ip->saddr), ntohl(ip->daddr), ntohs(tcp->source),
1862 *peer_ip = ip->saddr;
1863 *local_ip = ip->daddr;
1864 *peer_port = tcp->source;
1865 *local_port = tcp->dest;
1870 static int import_ep(struct c4iw_ep *ep, __be32 peer_ip, struct dst_entry *dst,
1871 struct c4iw_dev *cdev, bool clear_mpa_v1)
1873 struct neighbour *n;
1876 n = dst_neigh_lookup(dst, &peer_ip);
1882 if (n->dev->flags & IFF_LOOPBACK) {
1883 struct net_device *pdev;
1885 pdev = ip_dev_find(&init_net, peer_ip);
1890 ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
1894 ep->mtu = pdev->mtu;
1895 ep->tx_chan = cxgb4_port_chan(pdev);
1896 ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
1897 step = cdev->rdev.lldi.ntxq /
1898 cdev->rdev.lldi.nchan;
1899 ep->txq_idx = cxgb4_port_idx(pdev) * step;
1900 step = cdev->rdev.lldi.nrxq /
1901 cdev->rdev.lldi.nchan;
1902 ep->ctrlq_idx = cxgb4_port_idx(pdev);
1903 ep->rss_qid = cdev->rdev.lldi.rxq_ids[
1904 cxgb4_port_idx(pdev) * step];
1907 ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
1911 ep->mtu = dst_mtu(dst);
1912 ep->tx_chan = cxgb4_port_chan(n->dev);
1913 ep->smac_idx = (cxgb4_port_viid(n->dev) & 0x7F) << 1;
1914 step = cdev->rdev.lldi.ntxq /
1915 cdev->rdev.lldi.nchan;
1916 ep->txq_idx = cxgb4_port_idx(n->dev) * step;
1917 ep->ctrlq_idx = cxgb4_port_idx(n->dev);
1918 step = cdev->rdev.lldi.nrxq /
1919 cdev->rdev.lldi.nchan;
1920 ep->rss_qid = cdev->rdev.lldi.rxq_ids[
1921 cxgb4_port_idx(n->dev) * step];
1924 ep->retry_with_mpa_v1 = 0;
1925 ep->tried_with_mpa_v1 = 0;
1937 static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
1939 struct c4iw_ep *child_ep = NULL, *parent_ep;
1940 struct cpl_pass_accept_req *req = cplhdr(skb);
1941 unsigned int stid = GET_POPEN_TID(ntohl(req->tos_stid));
1942 struct tid_info *t = dev->rdev.lldi.tids;
1943 unsigned int hwtid = GET_TID(req);
1944 struct dst_entry *dst;
1946 __be32 local_ip, peer_ip = 0;
1947 __be16 local_port, peer_port;
1949 u16 peer_mss = ntohs(req->tcpopt.mss);
1951 parent_ep = lookup_stid(t, stid);
1953 PDBG("%s connect request on invalid stid %d\n", __func__, stid);
1956 get_4tuple(req, &local_ip, &peer_ip, &local_port, &peer_port);
1958 PDBG("%s parent ep %p hwtid %u laddr 0x%x raddr 0x%x lport %d " \
1959 "rport %d peer_mss %d\n", __func__, parent_ep, hwtid,
1960 ntohl(local_ip), ntohl(peer_ip), ntohs(local_port),
1961 ntohs(peer_port), peer_mss);
1963 if (state_read(&parent_ep->com) != LISTEN) {
1964 printk(KERN_ERR "%s - listening ep not in LISTEN\n",
1969 /* Find output route */
1970 rt = find_route(dev, local_ip, peer_ip, local_port, peer_port,
1971 GET_POPEN_TOS(ntohl(req->tos_stid)));
1973 printk(KERN_ERR MOD "%s - failed to find dst entry!\n",
1979 child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL);
1981 printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n",
1987 err = import_ep(child_ep, peer_ip, dst, dev, false);
1989 printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
1996 if (peer_mss && child_ep->mtu > (peer_mss + 40))
1997 child_ep->mtu = peer_mss + 40;
1999 state_set(&child_ep->com, CONNECTING);
2000 child_ep->com.dev = dev;
2001 child_ep->com.cm_id = NULL;
2002 child_ep->com.local_addr.sin_family = PF_INET;
2003 child_ep->com.local_addr.sin_port = local_port;
2004 child_ep->com.local_addr.sin_addr.s_addr = local_ip;
2005 child_ep->com.remote_addr.sin_family = PF_INET;
2006 child_ep->com.remote_addr.sin_port = peer_port;
2007 child_ep->com.remote_addr.sin_addr.s_addr = peer_ip;
2008 c4iw_get_ep(&parent_ep->com);
2009 child_ep->parent_ep = parent_ep;
2010 child_ep->tos = GET_POPEN_TOS(ntohl(req->tos_stid));
2011 child_ep->dst = dst;
2012 child_ep->hwtid = hwtid;
2014 PDBG("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__,
2015 child_ep->tx_chan, child_ep->smac_idx, child_ep->rss_qid);
2017 init_timer(&child_ep->timer);
2018 cxgb4_insert_tid(t, child_ep, hwtid);
2019 insert_handle(dev, &dev->hwtid_idr, child_ep, child_ep->hwtid);
2020 accept_cr(child_ep, peer_ip, skb, req);
2021 set_bit(PASS_ACCEPT_REQ, &child_ep->com.history);
2024 reject_cr(dev, hwtid, peer_ip, skb);
2029 static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb)
2032 struct cpl_pass_establish *req = cplhdr(skb);
2033 struct tid_info *t = dev->rdev.lldi.tids;
2034 unsigned int tid = GET_TID(req);
2036 ep = lookup_tid(t, tid);
2037 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
2038 ep->snd_seq = be32_to_cpu(req->snd_isn);
2039 ep->rcv_seq = be32_to_cpu(req->rcv_isn);
2041 PDBG("%s ep %p hwtid %u tcp_opt 0x%02x\n", __func__, ep, tid,
2042 ntohs(req->tcp_opt));
2044 set_emss(ep, ntohs(req->tcp_opt));
2046 dst_confirm(ep->dst);
2047 state_set(&ep->com, MPA_REQ_WAIT);
2049 send_flowc(ep, skb);
2050 set_bit(PASS_ESTAB, &ep->com.history);
2055 static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
2057 struct cpl_peer_close *hdr = cplhdr(skb);
2059 struct c4iw_qp_attributes attrs;
2062 struct tid_info *t = dev->rdev.lldi.tids;
2063 unsigned int tid = GET_TID(hdr);
2066 ep = lookup_tid(t, tid);
2067 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
2068 dst_confirm(ep->dst);
2070 set_bit(PEER_CLOSE, &ep->com.history);
2071 mutex_lock(&ep->com.mutex);
2072 switch (ep->com.state) {
2074 __state_set(&ep->com, CLOSING);
2077 __state_set(&ep->com, CLOSING);
2078 connect_reply_upcall(ep, -ECONNRESET);
2083 * We're gonna mark this puppy DEAD, but keep
2084 * the reference on it until the ULP accepts or
2085 * rejects the CR. Also wake up anyone waiting
2086 * in rdma connection migration (see c4iw_accept_cr()).
2088 __state_set(&ep->com, CLOSING);
2089 PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
2090 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
2093 __state_set(&ep->com, CLOSING);
2094 PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
2095 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
2099 __state_set(&ep->com, CLOSING);
2100 attrs.next_state = C4IW_QP_STATE_CLOSING;
2101 ret = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
2102 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
2103 if (ret != -ECONNRESET) {
2104 peer_close_upcall(ep);
2112 __state_set(&ep->com, MORIBUND);
2117 if (ep->com.cm_id && ep->com.qp) {
2118 attrs.next_state = C4IW_QP_STATE_IDLE;
2119 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
2120 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
2122 close_complete_upcall(ep);
2123 __state_set(&ep->com, DEAD);
2133 mutex_unlock(&ep->com.mutex);
2135 c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
2137 release_ep_resources(ep);
2142 * Returns whether an ABORT_REQ_RSS message is a negative advice.
2144 static int is_neg_adv_abort(unsigned int status)
2146 return status == CPL_ERR_RTX_NEG_ADVICE ||
2147 status == CPL_ERR_PERSIST_NEG_ADVICE;
2150 static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
2152 struct cpl_abort_req_rss *req = cplhdr(skb);
2154 struct cpl_abort_rpl *rpl;
2155 struct sk_buff *rpl_skb;
2156 struct c4iw_qp_attributes attrs;
2159 struct tid_info *t = dev->rdev.lldi.tids;
2160 unsigned int tid = GET_TID(req);
2162 ep = lookup_tid(t, tid);
2163 if (is_neg_adv_abort(req->status)) {
2164 PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep,
2168 PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
2170 set_bit(PEER_ABORT, &ep->com.history);
2173 * Wake up any threads in rdma_init() or rdma_fini().
2174 * However, this is not needed if com state is just
2177 if (ep->com.state != MPA_REQ_SENT)
2178 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
2180 mutex_lock(&ep->com.mutex);
2181 switch (ep->com.state) {
2189 if (mpa_rev == 1 || (mpa_rev == 2 && ep->tried_with_mpa_v1))
2190 connect_reply_upcall(ep, -ECONNRESET);
2193 * we just don't send notification upwards because we
2194 * want to retry with mpa_v1 without upper layers even
2197 * do some housekeeping so as to re-initiate the
2200 PDBG("%s: mpa_rev=%d. Retrying with mpav1\n", __func__,
2202 ep->retry_with_mpa_v1 = 1;
2214 if (ep->com.cm_id && ep->com.qp) {
2215 attrs.next_state = C4IW_QP_STATE_ERROR;
2216 ret = c4iw_modify_qp(ep->com.qp->rhp,
2217 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
2221 "%s - qp <- error failed!\n",
2224 peer_abort_upcall(ep);
2229 PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__);
2230 mutex_unlock(&ep->com.mutex);
2236 dst_confirm(ep->dst);
2237 if (ep->com.state != ABORTING) {
2238 __state_set(&ep->com, DEAD);
2239 /* we don't release if we want to retry with mpa_v1 */
2240 if (!ep->retry_with_mpa_v1)
2243 mutex_unlock(&ep->com.mutex);
2245 rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL);
2247 printk(KERN_ERR MOD "%s - cannot allocate skb!\n",
2252 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
2253 rpl = (struct cpl_abort_rpl *) skb_put(rpl_skb, sizeof(*rpl));
2254 INIT_TP_WR(rpl, ep->hwtid);
2255 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid));
2256 rpl->cmd = CPL_ABORT_NO_RST;
2257 c4iw_ofld_send(&ep->com.dev->rdev, rpl_skb);
2260 release_ep_resources(ep);
2261 else if (ep->retry_with_mpa_v1) {
2262 remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid);
2263 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid);
2264 dst_release(ep->dst);
2265 cxgb4_l2t_release(ep->l2t);
2272 static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
2275 struct c4iw_qp_attributes attrs;
2276 struct cpl_close_con_rpl *rpl = cplhdr(skb);
2278 struct tid_info *t = dev->rdev.lldi.tids;
2279 unsigned int tid = GET_TID(rpl);
2281 ep = lookup_tid(t, tid);
2283 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
2286 /* The cm_id may be null if we failed to connect */
2287 mutex_lock(&ep->com.mutex);
2288 switch (ep->com.state) {
2290 __state_set(&ep->com, MORIBUND);
2294 if ((ep->com.cm_id) && (ep->com.qp)) {
2295 attrs.next_state = C4IW_QP_STATE_IDLE;
2296 c4iw_modify_qp(ep->com.qp->rhp,
2298 C4IW_QP_ATTR_NEXT_STATE,
2301 close_complete_upcall(ep);
2302 __state_set(&ep->com, DEAD);
2312 mutex_unlock(&ep->com.mutex);
2314 release_ep_resources(ep);
2318 static int terminate(struct c4iw_dev *dev, struct sk_buff *skb)
2320 struct cpl_rdma_terminate *rpl = cplhdr(skb);
2321 struct tid_info *t = dev->rdev.lldi.tids;
2322 unsigned int tid = GET_TID(rpl);
2324 struct c4iw_qp_attributes attrs;
2326 ep = lookup_tid(t, tid);
2329 if (ep && ep->com.qp) {
2330 printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n", tid,
2331 ep->com.qp->wq.sq.qid);
2332 attrs.next_state = C4IW_QP_STATE_TERMINATE;
2333 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
2334 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
2336 printk(KERN_WARNING MOD "TERM received tid %u no ep/qp\n", tid);
2342 * Upcall from the adapter indicating data has been transmitted.
2343 * For us its just the single MPA request or reply. We can now free
2344 * the skb holding the mpa message.
2346 static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb)
2349 struct cpl_fw4_ack *hdr = cplhdr(skb);
2350 u8 credits = hdr->credits;
2351 unsigned int tid = GET_TID(hdr);
2352 struct tid_info *t = dev->rdev.lldi.tids;
2355 ep = lookup_tid(t, tid);
2356 PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits);
2358 PDBG("%s 0 credit ack ep %p tid %u state %u\n",
2359 __func__, ep, ep->hwtid, state_read(&ep->com));
2363 dst_confirm(ep->dst);
2365 PDBG("%s last streaming msg ack ep %p tid %u state %u "
2366 "initiator %u freeing skb\n", __func__, ep, ep->hwtid,
2367 state_read(&ep->com), ep->mpa_attr.initiator ? 1 : 0);
2368 kfree_skb(ep->mpa_skb);
2374 int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
2377 struct c4iw_ep *ep = to_ep(cm_id);
2378 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
2380 if (state_read(&ep->com) == DEAD) {
2381 c4iw_put_ep(&ep->com);
2384 set_bit(ULP_REJECT, &ep->com.history);
2385 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
2387 abort_connection(ep, NULL, GFP_KERNEL);
2389 err = send_mpa_reject(ep, pdata, pdata_len);
2390 err = c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
2392 c4iw_put_ep(&ep->com);
2396 int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2399 struct c4iw_qp_attributes attrs;
2400 enum c4iw_qp_attr_mask mask;
2401 struct c4iw_ep *ep = to_ep(cm_id);
2402 struct c4iw_dev *h = to_c4iw_dev(cm_id->device);
2403 struct c4iw_qp *qp = get_qhp(h, conn_param->qpn);
2405 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
2406 if (state_read(&ep->com) == DEAD) {
2411 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
2414 set_bit(ULP_ACCEPT, &ep->com.history);
2415 if ((conn_param->ord > c4iw_max_read_depth) ||
2416 (conn_param->ird > c4iw_max_read_depth)) {
2417 abort_connection(ep, NULL, GFP_KERNEL);
2422 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
2423 if (conn_param->ord > ep->ird) {
2424 ep->ird = conn_param->ird;
2425 ep->ord = conn_param->ord;
2426 send_mpa_reject(ep, conn_param->private_data,
2427 conn_param->private_data_len);
2428 abort_connection(ep, NULL, GFP_KERNEL);
2432 if (conn_param->ird > ep->ord) {
2434 conn_param->ird = 1;
2436 abort_connection(ep, NULL, GFP_KERNEL);
2443 ep->ird = conn_param->ird;
2444 ep->ord = conn_param->ord;
2446 if (ep->mpa_attr.version != 2)
2447 if (peer2peer && ep->ird == 0)
2450 PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord);
2452 cm_id->add_ref(cm_id);
2453 ep->com.cm_id = cm_id;
2457 /* bind QP to EP and move to RTS */
2458 attrs.mpa_attr = ep->mpa_attr;
2459 attrs.max_ird = ep->ird;
2460 attrs.max_ord = ep->ord;
2461 attrs.llp_stream_handle = ep;
2462 attrs.next_state = C4IW_QP_STATE_RTS;
2464 /* bind QP and TID with INIT_WR */
2465 mask = C4IW_QP_ATTR_NEXT_STATE |
2466 C4IW_QP_ATTR_LLP_STREAM_HANDLE |
2467 C4IW_QP_ATTR_MPA_ATTR |
2468 C4IW_QP_ATTR_MAX_IRD |
2469 C4IW_QP_ATTR_MAX_ORD;
2471 err = c4iw_modify_qp(ep->com.qp->rhp,
2472 ep->com.qp, mask, &attrs, 1);
2475 err = send_mpa_reply(ep, conn_param->private_data,
2476 conn_param->private_data_len);
2480 state_set(&ep->com, FPDU_MODE);
2481 established_upcall(ep);
2482 c4iw_put_ep(&ep->com);
2485 ep->com.cm_id = NULL;
2486 cm_id->rem_ref(cm_id);
2488 c4iw_put_ep(&ep->com);
2492 int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2494 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
2499 if ((conn_param->ord > c4iw_max_read_depth) ||
2500 (conn_param->ird > c4iw_max_read_depth)) {
2504 ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
2506 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
2510 init_timer(&ep->timer);
2511 ep->plen = conn_param->private_data_len;
2513 memcpy(ep->mpa_pkt + sizeof(struct mpa_message),
2514 conn_param->private_data, ep->plen);
2515 ep->ird = conn_param->ird;
2516 ep->ord = conn_param->ord;
2518 if (peer2peer && ep->ord == 0)
2521 cm_id->add_ref(cm_id);
2523 ep->com.cm_id = cm_id;
2524 ep->com.qp = get_qhp(dev, conn_param->qpn);
2525 BUG_ON(!ep->com.qp);
2527 PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn,
2531 * Allocate an active TID to initiate a TCP connection.
2533 ep->atid = cxgb4_alloc_atid(dev->rdev.lldi.tids, ep);
2534 if (ep->atid == -1) {
2535 printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__);
2539 insert_handle(dev, &dev->atid_idr, ep, ep->atid);
2541 PDBG("%s saddr 0x%x sport 0x%x raddr 0x%x rport 0x%x\n", __func__,
2542 ntohl(cm_id->local_addr.sin_addr.s_addr),
2543 ntohs(cm_id->local_addr.sin_port),
2544 ntohl(cm_id->remote_addr.sin_addr.s_addr),
2545 ntohs(cm_id->remote_addr.sin_port));
2548 rt = find_route(dev,
2549 cm_id->local_addr.sin_addr.s_addr,
2550 cm_id->remote_addr.sin_addr.s_addr,
2551 cm_id->local_addr.sin_port,
2552 cm_id->remote_addr.sin_port, 0);
2554 printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
2555 err = -EHOSTUNREACH;
2560 err = import_ep(ep, cm_id->remote_addr.sin_addr.s_addr,
2561 ep->dst, ep->com.dev, true);
2563 printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
2567 PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
2568 __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
2571 state_set(&ep->com, CONNECTING);
2573 ep->com.local_addr = cm_id->local_addr;
2574 ep->com.remote_addr = cm_id->remote_addr;
2576 /* send connect request to rnic */
2577 err = send_connect(ep);
2581 cxgb4_l2t_release(ep->l2t);
2583 dst_release(ep->dst);
2585 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);
2586 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
2588 cm_id->rem_ref(cm_id);
2589 c4iw_put_ep(&ep->com);
2594 int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
2597 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
2598 struct c4iw_listen_ep *ep;
2602 ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
2604 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
2608 PDBG("%s ep %p\n", __func__, ep);
2609 cm_id->add_ref(cm_id);
2610 ep->com.cm_id = cm_id;
2612 ep->backlog = backlog;
2613 ep->com.local_addr = cm_id->local_addr;
2616 * Allocate a server TID.
2618 if (dev->rdev.lldi.enable_fw_ofld_conn)
2619 ep->stid = cxgb4_alloc_sftid(dev->rdev.lldi.tids, PF_INET, ep);
2621 ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids, PF_INET, ep);
2623 if (ep->stid == -1) {
2624 printk(KERN_ERR MOD "%s - cannot alloc stid.\n", __func__);
2628 insert_handle(dev, &dev->stid_idr, ep, ep->stid);
2629 state_set(&ep->com, LISTEN);
2630 if (dev->rdev.lldi.enable_fw_ofld_conn) {
2632 err = cxgb4_create_server_filter(
2633 ep->com.dev->rdev.lldi.ports[0], ep->stid,
2634 ep->com.local_addr.sin_addr.s_addr,
2635 ep->com.local_addr.sin_port,
2637 ep->com.dev->rdev.lldi.rxq_ids[0],
2640 if (err == -EBUSY) {
2641 set_current_state(TASK_UNINTERRUPTIBLE);
2642 schedule_timeout(usecs_to_jiffies(100));
2644 } while (err == -EBUSY);
2646 c4iw_init_wr_wait(&ep->com.wr_wait);
2647 err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0],
2648 ep->stid, ep->com.local_addr.sin_addr.s_addr,
2649 ep->com.local_addr.sin_port,
2651 ep->com.dev->rdev.lldi.rxq_ids[0]);
2653 err = c4iw_wait_for_reply(&ep->com.dev->rdev,
2658 cm_id->provider_data = ep;
2661 pr_err("%s cxgb4_create_server/filter failed err %d " \
2662 "stid %d laddr %08x lport %d\n", \
2663 __func__, err, ep->stid,
2664 ntohl(ep->com.local_addr.sin_addr.s_addr),
2665 ntohs(ep->com.local_addr.sin_port));
2666 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET);
2668 cm_id->rem_ref(cm_id);
2669 c4iw_put_ep(&ep->com);
2675 int c4iw_destroy_listen(struct iw_cm_id *cm_id)
2678 struct c4iw_listen_ep *ep = to_listen_ep(cm_id);
2680 PDBG("%s ep %p\n", __func__, ep);
2683 state_set(&ep->com, DEAD);
2684 if (ep->com.dev->rdev.lldi.enable_fw_ofld_conn) {
2685 err = cxgb4_remove_server_filter(
2686 ep->com.dev->rdev.lldi.ports[0], ep->stid,
2687 ep->com.dev->rdev.lldi.rxq_ids[0], 0);
2689 c4iw_init_wr_wait(&ep->com.wr_wait);
2690 err = listen_stop(ep);
2693 err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait,
2696 remove_handle(ep->com.dev, &ep->com.dev->stid_idr, ep->stid);
2697 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET);
2699 cm_id->rem_ref(cm_id);
2700 c4iw_put_ep(&ep->com);
2704 int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
2709 struct c4iw_rdev *rdev;
2711 mutex_lock(&ep->com.mutex);
2713 PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep,
2714 states[ep->com.state], abrupt);
2716 rdev = &ep->com.dev->rdev;
2717 if (c4iw_fatal_error(rdev)) {
2719 close_complete_upcall(ep);
2720 ep->com.state = DEAD;
2722 switch (ep->com.state) {
2730 ep->com.state = ABORTING;
2732 ep->com.state = CLOSING;
2735 set_bit(CLOSE_SENT, &ep->com.flags);
2738 if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) {
2742 ep->com.state = ABORTING;
2744 ep->com.state = MORIBUND;
2750 PDBG("%s ignoring disconnect ep %p state %u\n",
2751 __func__, ep, ep->com.state);
2760 set_bit(EP_DISC_ABORT, &ep->com.history);
2761 close_complete_upcall(ep);
2762 ret = send_abort(ep, NULL, gfp);
2764 set_bit(EP_DISC_CLOSE, &ep->com.history);
2765 ret = send_halfclose(ep, gfp);
2770 mutex_unlock(&ep->com.mutex);
2772 release_ep_resources(ep);
2776 static void active_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
2777 struct cpl_fw6_msg_ofld_connection_wr_rpl *req)
2780 int atid = be32_to_cpu(req->tid);
2782 ep = (struct c4iw_ep *)lookup_atid(dev->rdev.lldi.tids,
2783 (__force u32) req->tid);
2787 switch (req->retval) {
2789 set_bit(ACT_RETRY_NOMEM, &ep->com.history);
2790 if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) {
2791 send_fw_act_open_req(ep, atid);
2795 set_bit(ACT_RETRY_INUSE, &ep->com.history);
2796 if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) {
2797 send_fw_act_open_req(ep, atid);
2802 pr_info("%s unexpected ofld conn wr retval %d\n",
2803 __func__, req->retval);
2806 pr_err("active ofld_connect_wr failure %d atid %d\n",
2808 mutex_lock(&dev->rdev.stats.lock);
2809 dev->rdev.stats.act_ofld_conn_fails++;
2810 mutex_unlock(&dev->rdev.stats.lock);
2811 connect_reply_upcall(ep, status2errno(req->retval));
2812 state_set(&ep->com, DEAD);
2813 remove_handle(dev, &dev->atid_idr, atid);
2814 cxgb4_free_atid(dev->rdev.lldi.tids, atid);
2815 dst_release(ep->dst);
2816 cxgb4_l2t_release(ep->l2t);
2817 c4iw_put_ep(&ep->com);
2820 static void passive_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
2821 struct cpl_fw6_msg_ofld_connection_wr_rpl *req)
2823 struct sk_buff *rpl_skb;
2824 struct cpl_pass_accept_req *cpl;
2827 rpl_skb = (struct sk_buff *)(unsigned long)req->cookie;
2830 PDBG("%s passive open failure %d\n", __func__, req->retval);
2831 mutex_lock(&dev->rdev.stats.lock);
2832 dev->rdev.stats.pas_ofld_conn_fails++;
2833 mutex_unlock(&dev->rdev.stats.lock);
2836 cpl = (struct cpl_pass_accept_req *)cplhdr(rpl_skb);
2837 OPCODE_TID(cpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ,
2838 (__force u32) htonl(
2839 (__force u32) req->tid)));
2840 ret = pass_accept_req(dev, rpl_skb);
2847 static int deferred_fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
2849 struct cpl_fw6_msg *rpl = cplhdr(skb);
2850 struct cpl_fw6_msg_ofld_connection_wr_rpl *req;
2852 switch (rpl->type) {
2854 c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]);
2856 case FW6_TYPE_OFLD_CONNECTION_WR_RPL:
2857 req = (struct cpl_fw6_msg_ofld_connection_wr_rpl *)rpl->data;
2858 switch (req->t_state) {
2860 active_ofld_conn_reply(dev, skb, req);
2863 passive_ofld_conn_reply(dev, skb, req);
2866 pr_err("%s unexpected ofld conn wr state %d\n",
2867 __func__, req->t_state);
2875 static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos)
2878 u16 vlantag, len, hdr_len;
2880 struct cpl_rx_pkt *cpl = cplhdr(skb);
2881 struct cpl_pass_accept_req *req;
2882 struct tcp_options_received tmp_opt;
2884 /* Store values from cpl_rx_pkt in temporary location. */
2885 vlantag = (__force u16) cpl->vlan;
2886 len = (__force u16) cpl->len;
2887 l2info = (__force u32) cpl->l2info;
2888 hdr_len = (__force u16) cpl->hdr_len;
2891 __skb_pull(skb, sizeof(*req) + sizeof(struct rss_header));
2894 * We need to parse the TCP options from SYN packet.
2895 * to generate cpl_pass_accept_req.
2897 memset(&tmp_opt, 0, sizeof(tmp_opt));
2898 tcp_clear_options(&tmp_opt);
2899 tcp_parse_options(skb, &tmp_opt, NULL, 0, NULL);
2901 req = (struct cpl_pass_accept_req *)__skb_push(skb, sizeof(*req));
2902 memset(req, 0, sizeof(*req));
2903 req->l2info = cpu_to_be16(V_SYN_INTF(intf) |
2904 V_SYN_MAC_IDX(G_RX_MACIDX(
2905 (__force int) htonl(l2info))) |
2907 req->hdr_len = cpu_to_be32(V_SYN_RX_CHAN(G_RX_CHAN(
2908 (__force int) htonl(l2info))) |
2909 V_TCP_HDR_LEN(G_RX_TCPHDR_LEN(
2910 (__force int) htons(hdr_len))) |
2911 V_IP_HDR_LEN(G_RX_IPHDR_LEN(
2912 (__force int) htons(hdr_len))) |
2913 V_ETH_HDR_LEN(G_RX_ETHHDR_LEN(
2914 (__force int) htonl(l2info))));
2915 req->vlan = (__force __be16) vlantag;
2916 req->len = (__force __be16) len;
2917 req->tos_stid = cpu_to_be32(PASS_OPEN_TID(stid) |
2918 PASS_OPEN_TOS(tos));
2919 req->tcpopt.mss = htons(tmp_opt.mss_clamp);
2920 if (tmp_opt.wscale_ok)
2921 req->tcpopt.wsf = tmp_opt.snd_wscale;
2922 req->tcpopt.tstamp = tmp_opt.saw_tstamp;
2923 if (tmp_opt.sack_ok)
2924 req->tcpopt.sack = 1;
2925 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ, 0));
2929 static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb,
2930 __be32 laddr, __be16 lport,
2931 __be32 raddr, __be16 rport,
2932 u32 rcv_isn, u32 filter, u16 window,
2933 u32 rss_qid, u8 port_id)
2935 struct sk_buff *req_skb;
2936 struct fw_ofld_connection_wr *req;
2937 struct cpl_pass_accept_req *cpl = cplhdr(skb);
2939 req_skb = alloc_skb(sizeof(struct fw_ofld_connection_wr), GFP_KERNEL);
2940 req = (struct fw_ofld_connection_wr *)__skb_put(req_skb, sizeof(*req));
2941 memset(req, 0, sizeof(*req));
2942 req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL(1));
2943 req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16)));
2944 req->le.version_cpl = htonl(F_FW_OFLD_CONNECTION_WR_CPL);
2945 req->le.filter = (__force __be32) filter;
2946 req->le.lport = lport;
2947 req->le.pport = rport;
2948 req->le.u.ipv4.lip = laddr;
2949 req->le.u.ipv4.pip = raddr;
2950 req->tcb.rcv_nxt = htonl(rcv_isn + 1);
2951 req->tcb.rcv_adv = htons(window);
2952 req->tcb.t_state_to_astid =
2953 htonl(V_FW_OFLD_CONNECTION_WR_T_STATE(TCP_SYN_RECV) |
2954 V_FW_OFLD_CONNECTION_WR_RCV_SCALE(cpl->tcpopt.wsf) |
2955 V_FW_OFLD_CONNECTION_WR_ASTID(
2956 GET_PASS_OPEN_TID(ntohl(cpl->tos_stid))));
2959 * We store the qid in opt2 which will be used by the firmware
2960 * to send us the wr response.
2962 req->tcb.opt2 = htonl(V_RSS_QUEUE(rss_qid));
2965 * We initialize the MSS index in TCB to 0xF.
2966 * So that when driver sends cpl_pass_accept_rpl
2967 * TCB picks up the correct value. If this was 0
2968 * TP will ignore any value > 0 for MSS index.
2970 req->tcb.opt0 = cpu_to_be64(V_MSS_IDX(0xF));
2971 req->cookie = (unsigned long)skb;
2973 set_wr_txq(req_skb, CPL_PRIORITY_CONTROL, port_id);
2974 cxgb4_ofld_send(dev->rdev.lldi.ports[0], req_skb);
2978 * Handler for CPL_RX_PKT message. Need to handle cpl_rx_pkt
2979 * messages when a filter is being used instead of server to
2980 * redirect a syn packet. When packets hit filter they are redirected
2981 * to the offload queue and driver tries to establish the connection
2982 * using firmware work request.
2984 static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
2987 unsigned int filter;
2988 struct ethhdr *eh = NULL;
2989 struct vlan_ethhdr *vlan_eh = NULL;
2991 struct tcphdr *tcph;
2992 struct rss_header *rss = (void *)skb->data;
2993 struct cpl_rx_pkt *cpl = (void *)skb->data;
2994 struct cpl_pass_accept_req *req = (void *)(rss + 1);
2995 struct l2t_entry *e;
2996 struct dst_entry *dst;
2998 struct c4iw_ep *lep;
3000 struct port_info *pi;
3001 struct net_device *pdev;
3005 struct neighbour *neigh;
3007 /* Drop all non-SYN packets */
3008 if (!(cpl->l2info & cpu_to_be32(F_RXF_SYN)))
3012 * Drop all packets which did not hit the filter.
3013 * Unlikely to happen.
3015 if (!(rss->filter_hit && rss->filter_tid))
3019 * Calculate the server tid from filter hit index from cpl_rx_pkt.
3021 stid = (__force int) cpu_to_be32((__force u32) rss->hash_val)
3022 - dev->rdev.lldi.tids->sftid_base
3023 + dev->rdev.lldi.tids->nstids;
3025 lep = (struct c4iw_ep *)lookup_stid(dev->rdev.lldi.tids, stid);
3027 PDBG("%s connect request on invalid stid %d\n", __func__, stid);
3031 if (G_RX_ETHHDR_LEN(ntohl(cpl->l2info)) == ETH_HLEN) {
3032 eh = (struct ethhdr *)(req + 1);
3033 iph = (struct iphdr *)(eh + 1);
3035 vlan_eh = (struct vlan_ethhdr *)(req + 1);
3036 iph = (struct iphdr *)(vlan_eh + 1);
3037 skb->vlan_tci = ntohs(cpl->vlan);
3040 if (iph->version != 0x4)
3043 tcph = (struct tcphdr *)(iph + 1);
3044 skb_set_network_header(skb, (void *)iph - (void *)rss);
3045 skb_set_transport_header(skb, (void *)tcph - (void *)rss);
3048 PDBG("%s lip 0x%x lport %u pip 0x%x pport %u tos %d\n", __func__,
3049 ntohl(iph->daddr), ntohs(tcph->dest), ntohl(iph->saddr),
3050 ntohs(tcph->source), iph->tos);
3052 rt = find_route(dev, iph->daddr, iph->saddr, tcph->dest, tcph->source,
3055 pr_err("%s - failed to find dst entry!\n",
3060 neigh = dst_neigh_lookup_skb(dst, skb);
3063 pr_err("%s - failed to allocate neigh!\n",
3068 if (neigh->dev->flags & IFF_LOOPBACK) {
3069 pdev = ip_dev_find(&init_net, iph->daddr);
3070 e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh,
3072 pi = (struct port_info *)netdev_priv(pdev);
3073 tx_chan = cxgb4_port_chan(pdev);
3076 e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh,
3078 pi = (struct port_info *)netdev_priv(neigh->dev);
3079 tx_chan = cxgb4_port_chan(neigh->dev);
3082 pr_err("%s - failed to allocate l2t entry!\n",
3087 step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
3088 rss_qid = dev->rdev.lldi.rxq_ids[pi->port_id * step];
3089 window = (__force u16) htons((__force u16)tcph->window);
3091 /* Calcuate filter portion for LE region. */
3092 filter = (__force unsigned int) cpu_to_be32(select_ntuple(dev, dst, e));
3095 * Synthesize the cpl_pass_accept_req. We have everything except the
3096 * TID. Once firmware sends a reply with TID we update the TID field
3097 * in cpl and pass it through the regular cpl_pass_accept_req path.
3099 build_cpl_pass_accept_req(skb, stid, iph->tos);
3100 send_fw_pass_open_req(dev, skb, iph->daddr, tcph->dest, iph->saddr,
3101 tcph->source, ntohl(tcph->seq), filter, window,
3102 rss_qid, pi->port_id);
3103 cxgb4_l2t_release(e);
3111 * These are the real handlers that are called from a
3114 static c4iw_handler_func work_handlers[NUM_CPL_CMDS] = {
3115 [CPL_ACT_ESTABLISH] = act_establish,
3116 [CPL_ACT_OPEN_RPL] = act_open_rpl,
3117 [CPL_RX_DATA] = rx_data,
3118 [CPL_ABORT_RPL_RSS] = abort_rpl,
3119 [CPL_ABORT_RPL] = abort_rpl,
3120 [CPL_PASS_OPEN_RPL] = pass_open_rpl,
3121 [CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl,
3122 [CPL_PASS_ACCEPT_REQ] = pass_accept_req,
3123 [CPL_PASS_ESTABLISH] = pass_establish,
3124 [CPL_PEER_CLOSE] = peer_close,
3125 [CPL_ABORT_REQ_RSS] = peer_abort,
3126 [CPL_CLOSE_CON_RPL] = close_con_rpl,
3127 [CPL_RDMA_TERMINATE] = terminate,
3128 [CPL_FW4_ACK] = fw4_ack,
3129 [CPL_FW6_MSG] = deferred_fw6_msg,
3130 [CPL_RX_PKT] = rx_pkt
3133 static void process_timeout(struct c4iw_ep *ep)
3135 struct c4iw_qp_attributes attrs;
3138 mutex_lock(&ep->com.mutex);
3139 PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid,
3141 set_bit(TIMEDOUT, &ep->com.history);
3142 switch (ep->com.state) {
3144 __state_set(&ep->com, ABORTING);
3145 connect_reply_upcall(ep, -ETIMEDOUT);
3148 __state_set(&ep->com, ABORTING);
3152 if (ep->com.cm_id && ep->com.qp) {
3153 attrs.next_state = C4IW_QP_STATE_ERROR;
3154 c4iw_modify_qp(ep->com.qp->rhp,
3155 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
3158 __state_set(&ep->com, ABORTING);
3161 WARN(1, "%s unexpected state ep %p tid %u state %u\n",
3162 __func__, ep, ep->hwtid, ep->com.state);
3165 mutex_unlock(&ep->com.mutex);
3167 abort_connection(ep, NULL, GFP_KERNEL);
3168 c4iw_put_ep(&ep->com);
3171 static void process_timedout_eps(void)
3175 spin_lock_irq(&timeout_lock);
3176 while (!list_empty(&timeout_list)) {
3177 struct list_head *tmp;
3179 tmp = timeout_list.next;
3181 spin_unlock_irq(&timeout_lock);
3182 ep = list_entry(tmp, struct c4iw_ep, entry);
3183 process_timeout(ep);
3184 spin_lock_irq(&timeout_lock);
3186 spin_unlock_irq(&timeout_lock);
3189 static void process_work(struct work_struct *work)
3191 struct sk_buff *skb = NULL;
3192 struct c4iw_dev *dev;
3193 struct cpl_act_establish *rpl;
3194 unsigned int opcode;
3197 while ((skb = skb_dequeue(&rxq))) {
3199 dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *)));
3200 opcode = rpl->ot.opcode;
3202 BUG_ON(!work_handlers[opcode]);
3203 ret = work_handlers[opcode](dev, skb);
3207 process_timedout_eps();
3210 static DECLARE_WORK(skb_work, process_work);
3212 static void ep_timeout(unsigned long arg)
3214 struct c4iw_ep *ep = (struct c4iw_ep *)arg;
3217 spin_lock(&timeout_lock);
3218 if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) {
3219 list_add_tail(&ep->entry, &timeout_list);
3222 spin_unlock(&timeout_lock);
3224 queue_work(workq, &skb_work);
3228 * All the CM events are handled on a work queue to have a safe context.
3230 static int sched(struct c4iw_dev *dev, struct sk_buff *skb)
3234 * Save dev in the skb->cb area.
3236 *((struct c4iw_dev **) (skb->cb + sizeof(void *))) = dev;
3239 * Queue the skb and schedule the worker thread.
3241 skb_queue_tail(&rxq, skb);
3242 queue_work(workq, &skb_work);
3246 static int set_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
3248 struct cpl_set_tcb_rpl *rpl = cplhdr(skb);
3250 if (rpl->status != CPL_ERR_NONE) {
3251 printk(KERN_ERR MOD "Unexpected SET_TCB_RPL status %u "
3252 "for tid %u\n", rpl->status, GET_TID(rpl));
3258 static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
3260 struct cpl_fw6_msg *rpl = cplhdr(skb);
3261 struct c4iw_wr_wait *wr_waitp;
3264 PDBG("%s type %u\n", __func__, rpl->type);
3266 switch (rpl->type) {
3267 case FW6_TYPE_WR_RPL:
3268 ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff);
3269 wr_waitp = (struct c4iw_wr_wait *)(__force unsigned long) rpl->data[1];
3270 PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret);
3272 c4iw_wake_up(wr_waitp, ret ? -ret : 0);
3276 case FW6_TYPE_OFLD_CONNECTION_WR_RPL:
3280 printk(KERN_ERR MOD "%s unexpected fw6 msg type %u\n", __func__,
3288 static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb)
3290 struct cpl_abort_req_rss *req = cplhdr(skb);
3292 struct tid_info *t = dev->rdev.lldi.tids;
3293 unsigned int tid = GET_TID(req);
3295 ep = lookup_tid(t, tid);
3297 printk(KERN_WARNING MOD
3298 "Abort on non-existent endpoint, tid %d\n", tid);
3302 if (is_neg_adv_abort(req->status)) {
3303 PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep,
3308 PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
3312 * Wake up any threads in rdma_init() or rdma_fini().
3313 * However, if we are on MPAv2 and want to retry with MPAv1
3314 * then, don't wake up yet.
3316 if (mpa_rev == 2 && !ep->tried_with_mpa_v1) {
3317 if (ep->com.state != MPA_REQ_SENT)
3318 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
3320 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
3326 * Most upcalls from the T4 Core go to sched() to
3327 * schedule the processing on a work queue.
3329 c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS] = {
3330 [CPL_ACT_ESTABLISH] = sched,
3331 [CPL_ACT_OPEN_RPL] = sched,
3332 [CPL_RX_DATA] = sched,
3333 [CPL_ABORT_RPL_RSS] = sched,
3334 [CPL_ABORT_RPL] = sched,
3335 [CPL_PASS_OPEN_RPL] = sched,
3336 [CPL_CLOSE_LISTSRV_RPL] = sched,
3337 [CPL_PASS_ACCEPT_REQ] = sched,
3338 [CPL_PASS_ESTABLISH] = sched,
3339 [CPL_PEER_CLOSE] = sched,
3340 [CPL_CLOSE_CON_RPL] = sched,
3341 [CPL_ABORT_REQ_RSS] = peer_abort_intr,
3342 [CPL_RDMA_TERMINATE] = sched,
3343 [CPL_FW4_ACK] = sched,
3344 [CPL_SET_TCB_RPL] = set_tcb_rpl,
3345 [CPL_FW6_MSG] = fw6_msg,
3346 [CPL_RX_PKT] = sched
3349 int __init c4iw_cm_init(void)
3351 spin_lock_init(&timeout_lock);
3352 skb_queue_head_init(&rxq);
3354 workq = create_singlethread_workqueue("iw_cxgb4");
3361 void __exit c4iw_cm_term(void)
3363 WARN_ON(!list_empty(&timeout_list));
3364 flush_workqueue(workq);
3365 destroy_workqueue(workq);