2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/delay.h>
37 #include <linux/moduleparam.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/slab.h>
42 #include <linux/tcp.h>
46 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA
47 static int data_debug_level;
49 module_param(data_debug_level, int, 0644);
50 MODULE_PARM_DESC(data_debug_level,
51 "Enable data path debug tracing if > 0");
54 static DEFINE_MUTEX(pkey_mutex);
56 struct ipoib_ah *ipoib_create_ah(struct net_device *dev,
57 struct ib_pd *pd, struct ib_ah_attr *attr)
62 ah = kmalloc(sizeof *ah, GFP_KERNEL);
64 return ERR_PTR(-ENOMEM);
70 vah = ib_create_ah(pd, attr);
73 ah = (struct ipoib_ah *)vah;
76 ipoib_dbg(netdev_priv(dev), "Created ah %p\n", ah->ah);
82 void ipoib_free_ah(struct kref *kref)
84 struct ipoib_ah *ah = container_of(kref, struct ipoib_ah, ref);
85 struct ipoib_dev_priv *priv = netdev_priv(ah->dev);
89 spin_lock_irqsave(&priv->lock, flags);
90 list_add_tail(&ah->list, &priv->dead_ahs);
91 spin_unlock_irqrestore(&priv->lock, flags);
94 static void ipoib_ud_dma_unmap_rx(struct ipoib_dev_priv *priv,
95 u64 mapping[IPOIB_UD_RX_SG])
97 ib_dma_unmap_single(priv->ca, mapping[0],
98 IPOIB_UD_BUF_SIZE(priv->max_ib_mtu),
102 static int ipoib_ib_post_receive(struct net_device *dev, int id)
104 struct ipoib_dev_priv *priv = netdev_priv(dev);
105 struct ib_recv_wr *bad_wr;
108 priv->rx_wr.wr_id = id | IPOIB_OP_RECV;
109 priv->rx_sge[0].addr = priv->rx_ring[id].mapping[0];
110 priv->rx_sge[1].addr = priv->rx_ring[id].mapping[1];
113 ret = ib_post_recv(priv->qp, &priv->rx_wr, &bad_wr);
115 ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret);
116 ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[id].mapping);
117 dev_kfree_skb_any(priv->rx_ring[id].skb);
118 priv->rx_ring[id].skb = NULL;
124 static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id)
126 struct ipoib_dev_priv *priv = netdev_priv(dev);
131 buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
133 skb = dev_alloc_skb(buf_size + IPOIB_HARD_LEN);
138 * the IP header will be at IPOIP_HARD_LEN + IB_GRH_BYTES, that is
141 skb_reserve(skb, sizeof(struct ipoib_pseudo_header));
143 mapping = priv->rx_ring[id].mapping;
144 mapping[0] = ib_dma_map_single(priv->ca, skb->data, buf_size,
146 if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0])))
149 priv->rx_ring[id].skb = skb;
152 dev_kfree_skb_any(skb);
156 static int ipoib_ib_post_receives(struct net_device *dev)
158 struct ipoib_dev_priv *priv = netdev_priv(dev);
161 for (i = 0; i < ipoib_recvq_size; ++i) {
162 if (!ipoib_alloc_rx_skb(dev, i)) {
163 ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
166 if (ipoib_ib_post_receive(dev, i)) {
167 ipoib_warn(priv, "ipoib_ib_post_receive failed for buf %d\n", i);
175 static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
177 struct ipoib_dev_priv *priv = netdev_priv(dev);
178 unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV;
180 u64 mapping[IPOIB_UD_RX_SG];
183 ipoib_dbg_data(priv, "recv completion: id %d, status: %d\n",
186 if (unlikely(wr_id >= ipoib_recvq_size)) {
187 ipoib_warn(priv, "recv completion event with wrid %d (> %d)\n",
188 wr_id, ipoib_recvq_size);
192 skb = priv->rx_ring[wr_id].skb;
194 if (unlikely(wc->status != IB_WC_SUCCESS)) {
195 if (wc->status != IB_WC_WR_FLUSH_ERR)
196 ipoib_warn(priv, "failed recv event "
197 "(status=%d, wrid=%d vend_err %x)\n",
198 wc->status, wr_id, wc->vendor_err);
199 ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[wr_id].mapping);
200 dev_kfree_skb_any(skb);
201 priv->rx_ring[wr_id].skb = NULL;
206 * Drop packets that this interface sent, ie multicast packets
207 * that the HCA has replicated.
209 if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num)
212 memcpy(mapping, priv->rx_ring[wr_id].mapping,
213 IPOIB_UD_RX_SG * sizeof *mapping);
216 * If we can't allocate a new RX buffer, dump
217 * this packet and reuse the old buffer.
219 if (unlikely(!ipoib_alloc_rx_skb(dev, wr_id))) {
220 ++dev->stats.rx_dropped;
224 ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
225 wc->byte_len, wc->slid);
227 ipoib_ud_dma_unmap_rx(priv, mapping);
229 skb_put(skb, wc->byte_len);
231 /* First byte of dgid signals multicast when 0xff */
232 dgid = &((struct ib_grh *)skb->data)->dgid;
234 if (!(wc->wc_flags & IB_WC_GRH) || dgid->raw[0] != 0xff)
235 skb->pkt_type = PACKET_HOST;
236 else if (memcmp(dgid, dev->broadcast + 4, sizeof(union ib_gid)) == 0)
237 skb->pkt_type = PACKET_BROADCAST;
239 skb->pkt_type = PACKET_MULTICAST;
241 skb_pull(skb, IB_GRH_BYTES);
243 skb->protocol = ((struct ipoib_header *) skb->data)->proto;
244 skb_add_pseudo_hdr(skb);
246 ++dev->stats.rx_packets;
247 dev->stats.rx_bytes += skb->len;
250 if ((dev->features & NETIF_F_RXCSUM) &&
251 likely(wc->wc_flags & IB_WC_IP_CSUM_OK))
252 skb->ip_summed = CHECKSUM_UNNECESSARY;
254 napi_gro_receive(&priv->napi, skb);
257 if (unlikely(ipoib_ib_post_receive(dev, wr_id)))
258 ipoib_warn(priv, "ipoib_ib_post_receive failed "
259 "for buf %d\n", wr_id);
262 int ipoib_dma_map_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req)
264 struct sk_buff *skb = tx_req->skb;
265 u64 *mapping = tx_req->mapping;
269 if (skb_headlen(skb)) {
270 mapping[0] = ib_dma_map_single(ca, skb->data, skb_headlen(skb),
272 if (unlikely(ib_dma_mapping_error(ca, mapping[0])))
279 for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
280 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
281 mapping[i + off] = ib_dma_map_page(ca,
283 frag->page_offset, skb_frag_size(frag),
285 if (unlikely(ib_dma_mapping_error(ca, mapping[i + off])))
292 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
294 ib_dma_unmap_page(ca, mapping[i - !off], skb_frag_size(frag), DMA_TO_DEVICE);
298 ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE);
303 void ipoib_dma_unmap_tx(struct ipoib_dev_priv *priv,
304 struct ipoib_tx_buf *tx_req)
306 struct sk_buff *skb = tx_req->skb;
307 u64 *mapping = tx_req->mapping;
311 if (skb_headlen(skb)) {
312 ib_dma_unmap_single(priv->ca, mapping[0], skb_headlen(skb),
318 for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
319 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
321 ib_dma_unmap_page(priv->ca, mapping[i + off],
322 skb_frag_size(frag), DMA_TO_DEVICE);
327 * As the result of a completion error the QP Can be transferred to SQE states.
328 * The function checks if the (send)QP is in SQE state and
329 * moves it back to RTS state, that in order to have it functional again.
331 static void ipoib_qp_state_validate_work(struct work_struct *work)
333 struct ipoib_qp_state_validate *qp_work =
334 container_of(work, struct ipoib_qp_state_validate, work);
336 struct ipoib_dev_priv *priv = qp_work->priv;
337 struct ib_qp_attr qp_attr;
338 struct ib_qp_init_attr query_init_attr;
341 ret = ib_query_qp(priv->qp, &qp_attr, IB_QP_STATE, &query_init_attr);
343 ipoib_warn(priv, "%s: Failed to query QP ret: %d\n",
347 pr_info("%s: QP: 0x%x is in state: %d\n",
348 __func__, priv->qp->qp_num, qp_attr.qp_state);
350 /* currently support only in SQE->RTS transition*/
351 if (qp_attr.qp_state == IB_QPS_SQE) {
352 qp_attr.qp_state = IB_QPS_RTS;
354 ret = ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE);
356 pr_warn("failed(%d) modify QP:0x%x SQE->RTS\n",
357 ret, priv->qp->qp_num);
360 pr_info("%s: QP: 0x%x moved from IB_QPS_SQE to IB_QPS_RTS\n",
361 __func__, priv->qp->qp_num);
363 pr_warn("QP (%d) will stay in state: %d\n",
364 priv->qp->qp_num, qp_attr.qp_state);
371 static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
373 struct ipoib_dev_priv *priv = netdev_priv(dev);
374 unsigned int wr_id = wc->wr_id;
375 struct ipoib_tx_buf *tx_req;
377 ipoib_dbg_data(priv, "send completion: id %d, status: %d\n",
380 if (unlikely(wr_id >= ipoib_sendq_size)) {
381 ipoib_warn(priv, "send completion event with wrid %d (> %d)\n",
382 wr_id, ipoib_sendq_size);
386 tx_req = &priv->tx_ring[wr_id];
388 ipoib_dma_unmap_tx(priv, tx_req);
390 ++dev->stats.tx_packets;
391 dev->stats.tx_bytes += tx_req->skb->len;
393 dev_kfree_skb_any(tx_req->skb);
396 if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
397 netif_queue_stopped(dev) &&
398 test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
399 netif_wake_queue(dev);
401 if (wc->status != IB_WC_SUCCESS &&
402 wc->status != IB_WC_WR_FLUSH_ERR) {
403 struct ipoib_qp_state_validate *qp_work;
404 ipoib_warn(priv, "failed send event "
405 "(status=%d, wrid=%d vend_err %x)\n",
406 wc->status, wr_id, wc->vendor_err);
407 qp_work = kzalloc(sizeof(*qp_work), GFP_ATOMIC);
409 ipoib_warn(priv, "%s Failed alloc ipoib_qp_state_validate for qp: 0x%x\n",
410 __func__, priv->qp->qp_num);
414 INIT_WORK(&qp_work->work, ipoib_qp_state_validate_work);
415 qp_work->priv = priv;
416 queue_work(priv->wq, &qp_work->work);
420 static int poll_tx(struct ipoib_dev_priv *priv)
424 n = ib_poll_cq(priv->send_cq, MAX_SEND_CQE, priv->send_wc);
425 for (i = 0; i < n; ++i)
426 ipoib_ib_handle_tx_wc(priv->dev, priv->send_wc + i);
428 return n == MAX_SEND_CQE;
431 int ipoib_poll(struct napi_struct *napi, int budget)
433 struct ipoib_dev_priv *priv = container_of(napi, struct ipoib_dev_priv, napi);
434 struct net_device *dev = priv->dev;
442 while (done < budget) {
443 int max = (budget - done);
445 t = min(IPOIB_NUM_WC, max);
446 n = ib_poll_cq(priv->recv_cq, t, priv->ibwc);
448 for (i = 0; i < n; i++) {
449 struct ib_wc *wc = priv->ibwc + i;
451 if (wc->wr_id & IPOIB_OP_RECV) {
453 if (wc->wr_id & IPOIB_OP_CM)
454 ipoib_cm_handle_rx_wc(dev, wc);
456 ipoib_ib_handle_rx_wc(dev, wc);
458 ipoib_cm_handle_tx_wc(priv->dev, wc);
467 if (unlikely(ib_req_notify_cq(priv->recv_cq,
469 IB_CQ_REPORT_MISSED_EVENTS)) &&
470 napi_reschedule(napi))
477 void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr)
479 struct net_device *dev = dev_ptr;
480 struct ipoib_dev_priv *priv = netdev_priv(dev);
482 napi_schedule(&priv->napi);
485 static void drain_tx_cq(struct net_device *dev)
487 struct ipoib_dev_priv *priv = netdev_priv(dev);
490 while (poll_tx(priv))
493 if (netif_queue_stopped(dev))
494 mod_timer(&priv->poll_timer, jiffies + 1);
496 netif_tx_unlock(dev);
499 void ipoib_send_comp_handler(struct ib_cq *cq, void *dev_ptr)
501 struct ipoib_dev_priv *priv = netdev_priv(dev_ptr);
503 mod_timer(&priv->poll_timer, jiffies);
506 static inline int post_send(struct ipoib_dev_priv *priv,
508 struct ib_ah *address, u32 qpn,
509 struct ipoib_tx_buf *tx_req,
510 void *head, int hlen)
512 struct ib_send_wr *bad_wr;
513 struct sk_buff *skb = tx_req->skb;
515 ipoib_build_sge(priv, tx_req);
517 priv->tx_wr.wr.wr_id = wr_id;
518 priv->tx_wr.remote_qpn = qpn;
519 priv->tx_wr.ah = address;
522 priv->tx_wr.mss = skb_shinfo(skb)->gso_size;
523 priv->tx_wr.header = head;
524 priv->tx_wr.hlen = hlen;
525 priv->tx_wr.wr.opcode = IB_WR_LSO;
527 priv->tx_wr.wr.opcode = IB_WR_SEND;
529 return ib_post_send(priv->qp, &priv->tx_wr.wr, &bad_wr);
532 void ipoib_send(struct net_device *dev, struct sk_buff *skb,
533 struct ipoib_ah *address, u32 qpn)
535 struct ipoib_dev_priv *priv = netdev_priv(dev);
536 struct ipoib_tx_buf *tx_req;
540 if (skb_is_gso(skb)) {
541 hlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
543 if (unlikely(!skb_pull(skb, hlen))) {
544 ipoib_warn(priv, "linear data too small\n");
545 ++dev->stats.tx_dropped;
546 ++dev->stats.tx_errors;
547 dev_kfree_skb_any(skb);
551 if (unlikely(skb->len > priv->mcast_mtu + IPOIB_ENCAP_LEN)) {
552 ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
553 skb->len, priv->mcast_mtu + IPOIB_ENCAP_LEN);
554 ++dev->stats.tx_dropped;
555 ++dev->stats.tx_errors;
556 ipoib_cm_skb_too_long(dev, skb, priv->mcast_mtu);
563 ipoib_dbg_data(priv, "sending packet, length=%d address=%p qpn=0x%06x\n",
564 skb->len, address, qpn);
567 * We put the skb into the tx_ring _before_ we call post_send()
568 * because it's entirely possible that the completion handler will
569 * run before we execute anything after the post_send(). That
570 * means we have to make sure everything is properly recorded and
571 * our state is consistent before we call post_send().
573 tx_req = &priv->tx_ring[priv->tx_head & (ipoib_sendq_size - 1)];
575 if (unlikely(ipoib_dma_map_tx(priv->ca, tx_req))) {
576 ++dev->stats.tx_errors;
577 dev_kfree_skb_any(skb);
581 if (skb->ip_summed == CHECKSUM_PARTIAL)
582 priv->tx_wr.wr.send_flags |= IB_SEND_IP_CSUM;
584 priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM;
586 if (++priv->tx_outstanding == ipoib_sendq_size) {
587 ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n");
588 if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP))
589 ipoib_warn(priv, "request notify on send CQ failed\n");
590 netif_stop_queue(dev);
596 rc = post_send(priv, priv->tx_head & (ipoib_sendq_size - 1),
597 address->ah, qpn, tx_req, phead, hlen);
599 ipoib_warn(priv, "post_send failed, error %d\n", rc);
600 ++dev->stats.tx_errors;
601 --priv->tx_outstanding;
602 ipoib_dma_unmap_tx(priv, tx_req);
603 dev_kfree_skb_any(skb);
604 if (netif_queue_stopped(dev))
605 netif_wake_queue(dev);
607 dev->trans_start = jiffies;
609 address->last_send = priv->tx_head;
613 if (unlikely(priv->tx_outstanding > MAX_SEND_CQE))
614 while (poll_tx(priv))
618 static void __ipoib_reap_ah(struct net_device *dev)
620 struct ipoib_dev_priv *priv = netdev_priv(dev);
621 struct ipoib_ah *ah, *tah;
622 LIST_HEAD(remove_list);
625 netif_tx_lock_bh(dev);
626 spin_lock_irqsave(&priv->lock, flags);
628 list_for_each_entry_safe(ah, tah, &priv->dead_ahs, list)
629 if ((int) priv->tx_tail - (int) ah->last_send >= 0) {
631 ib_destroy_ah(ah->ah);
635 spin_unlock_irqrestore(&priv->lock, flags);
636 netif_tx_unlock_bh(dev);
639 void ipoib_reap_ah(struct work_struct *work)
641 struct ipoib_dev_priv *priv =
642 container_of(work, struct ipoib_dev_priv, ah_reap_task.work);
643 struct net_device *dev = priv->dev;
645 __ipoib_reap_ah(dev);
647 if (!test_bit(IPOIB_STOP_REAPER, &priv->flags))
648 queue_delayed_work(priv->wq, &priv->ah_reap_task,
649 round_jiffies_relative(HZ));
652 static void ipoib_flush_ah(struct net_device *dev)
654 struct ipoib_dev_priv *priv = netdev_priv(dev);
656 cancel_delayed_work(&priv->ah_reap_task);
657 flush_workqueue(priv->wq);
658 ipoib_reap_ah(&priv->ah_reap_task.work);
661 static void ipoib_stop_ah(struct net_device *dev)
663 struct ipoib_dev_priv *priv = netdev_priv(dev);
665 set_bit(IPOIB_STOP_REAPER, &priv->flags);
669 static void ipoib_ib_tx_timer_func(unsigned long ctx)
671 drain_tx_cq((struct net_device *)ctx);
674 int ipoib_ib_dev_open(struct net_device *dev)
676 struct ipoib_dev_priv *priv = netdev_priv(dev);
679 ipoib_pkey_dev_check_presence(dev);
681 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
682 ipoib_warn(priv, "P_Key 0x%04x is %s\n", priv->pkey,
683 (!(priv->pkey & 0x7fff) ? "Invalid" : "not found"));
687 ret = ipoib_init_qp(dev);
689 ipoib_warn(priv, "ipoib_init_qp returned %d\n", ret);
693 ret = ipoib_ib_post_receives(dev);
695 ipoib_warn(priv, "ipoib_ib_post_receives returned %d\n", ret);
699 ret = ipoib_cm_dev_open(dev);
701 ipoib_warn(priv, "ipoib_cm_dev_open returned %d\n", ret);
705 clear_bit(IPOIB_STOP_REAPER, &priv->flags);
706 queue_delayed_work(priv->wq, &priv->ah_reap_task,
707 round_jiffies_relative(HZ));
709 if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
710 napi_enable(&priv->napi);
714 if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
715 napi_enable(&priv->napi);
716 ipoib_ib_dev_stop(dev);
720 void ipoib_pkey_dev_check_presence(struct net_device *dev)
722 struct ipoib_dev_priv *priv = netdev_priv(dev);
724 if (!(priv->pkey & 0x7fff) ||
725 ib_find_pkey(priv->ca, priv->port, priv->pkey,
727 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
729 set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
732 int ipoib_ib_dev_up(struct net_device *dev)
734 struct ipoib_dev_priv *priv = netdev_priv(dev);
736 ipoib_pkey_dev_check_presence(dev);
738 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
739 ipoib_dbg(priv, "PKEY is not assigned.\n");
743 set_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
745 return ipoib_mcast_start_thread(dev);
748 int ipoib_ib_dev_down(struct net_device *dev)
750 struct ipoib_dev_priv *priv = netdev_priv(dev);
752 ipoib_dbg(priv, "downing ib_dev\n");
754 clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
755 netif_carrier_off(dev);
757 ipoib_mcast_stop_thread(dev);
758 ipoib_mcast_dev_flush(dev);
760 ipoib_flush_paths(dev);
765 static int recvs_pending(struct net_device *dev)
767 struct ipoib_dev_priv *priv = netdev_priv(dev);
771 for (i = 0; i < ipoib_recvq_size; ++i)
772 if (priv->rx_ring[i].skb)
778 void ipoib_drain_cq(struct net_device *dev)
780 struct ipoib_dev_priv *priv = netdev_priv(dev);
784 * We call completion handling routines that expect to be
785 * called from the BH-disabled NAPI poll context, so disable
791 n = ib_poll_cq(priv->recv_cq, IPOIB_NUM_WC, priv->ibwc);
792 for (i = 0; i < n; ++i) {
794 * Convert any successful completions to flush
795 * errors to avoid passing packets up the
796 * stack after bringing the device down.
798 if (priv->ibwc[i].status == IB_WC_SUCCESS)
799 priv->ibwc[i].status = IB_WC_WR_FLUSH_ERR;
801 if (priv->ibwc[i].wr_id & IPOIB_OP_RECV) {
802 if (priv->ibwc[i].wr_id & IPOIB_OP_CM)
803 ipoib_cm_handle_rx_wc(dev, priv->ibwc + i);
805 ipoib_ib_handle_rx_wc(dev, priv->ibwc + i);
807 ipoib_cm_handle_tx_wc(dev, priv->ibwc + i);
809 } while (n == IPOIB_NUM_WC);
811 while (poll_tx(priv))
817 int ipoib_ib_dev_stop(struct net_device *dev)
819 struct ipoib_dev_priv *priv = netdev_priv(dev);
820 struct ib_qp_attr qp_attr;
822 struct ipoib_tx_buf *tx_req;
825 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
826 napi_disable(&priv->napi);
828 ipoib_cm_dev_stop(dev);
831 * Move our QP to the error state and then reinitialize in
832 * when all work requests have completed or have been flushed.
834 qp_attr.qp_state = IB_QPS_ERR;
835 if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
836 ipoib_warn(priv, "Failed to modify QP to ERROR state\n");
838 /* Wait for all sends and receives to complete */
841 while (priv->tx_head != priv->tx_tail || recvs_pending(dev)) {
842 if (time_after(jiffies, begin + 5 * HZ)) {
843 ipoib_warn(priv, "timing out; %d sends %d receives not completed\n",
844 priv->tx_head - priv->tx_tail, recvs_pending(dev));
847 * assume the HW is wedged and just free up
848 * all our pending work requests.
850 while ((int) priv->tx_tail - (int) priv->tx_head < 0) {
851 tx_req = &priv->tx_ring[priv->tx_tail &
852 (ipoib_sendq_size - 1)];
853 ipoib_dma_unmap_tx(priv, tx_req);
854 dev_kfree_skb_any(tx_req->skb);
856 --priv->tx_outstanding;
859 for (i = 0; i < ipoib_recvq_size; ++i) {
860 struct ipoib_rx_buf *rx_req;
862 rx_req = &priv->rx_ring[i];
865 ipoib_ud_dma_unmap_rx(priv,
866 priv->rx_ring[i].mapping);
867 dev_kfree_skb_any(rx_req->skb);
879 ipoib_dbg(priv, "All sends and receives done.\n");
882 del_timer_sync(&priv->poll_timer);
883 qp_attr.qp_state = IB_QPS_RESET;
884 if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
885 ipoib_warn(priv, "Failed to modify QP to RESET state\n");
889 ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP);
894 int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
896 struct ipoib_dev_priv *priv = netdev_priv(dev);
902 if (ipoib_transport_dev_init(dev, ca)) {
903 printk(KERN_WARNING "%s: ipoib_transport_dev_init failed\n", ca->name);
907 setup_timer(&priv->poll_timer, ipoib_ib_tx_timer_func,
908 (unsigned long) dev);
910 if (dev->flags & IFF_UP) {
911 if (ipoib_ib_dev_open(dev)) {
912 ipoib_transport_dev_cleanup(dev);
921 * Takes whatever value which is in pkey index 0 and updates priv->pkey
922 * returns 0 if the pkey value was changed.
924 static inline int update_parent_pkey(struct ipoib_dev_priv *priv)
929 prev_pkey = priv->pkey;
930 result = ib_query_pkey(priv->ca, priv->port, 0, &priv->pkey);
932 ipoib_warn(priv, "ib_query_pkey port %d failed (ret = %d)\n",
937 priv->pkey |= 0x8000;
939 if (prev_pkey != priv->pkey) {
940 ipoib_dbg(priv, "pkey changed from 0x%x to 0x%x\n",
941 prev_pkey, priv->pkey);
943 * Update the pkey in the broadcast address, while making sure to set
944 * the full membership bit, so that we join the right broadcast group.
946 priv->dev->broadcast[8] = priv->pkey >> 8;
947 priv->dev->broadcast[9] = priv->pkey & 0xff;
954 * returns 0 if pkey value was found in a different slot.
956 static inline int update_child_pkey(struct ipoib_dev_priv *priv)
958 u16 old_index = priv->pkey_index;
960 priv->pkey_index = 0;
961 ipoib_pkey_dev_check_presence(priv->dev);
963 if (test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags) &&
964 (old_index == priv->pkey_index))
969 static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
970 enum ipoib_flush_level level,
973 struct ipoib_dev_priv *cpriv;
974 struct net_device *dev = priv->dev;
977 down_read_nested(&priv->vlan_rwsem, nesting);
980 * Flush any child interfaces too -- they might be up even if
981 * the parent is down.
983 list_for_each_entry(cpriv, &priv->child_intfs, list)
984 __ipoib_ib_dev_flush(cpriv, level, nesting + 1);
986 up_read(&priv->vlan_rwsem);
988 if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags) &&
989 level != IPOIB_FLUSH_HEAVY) {
990 ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n");
994 if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
995 /* interface is down. update pkey and leave. */
996 if (level == IPOIB_FLUSH_HEAVY) {
997 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags))
998 update_parent_pkey(priv);
1000 update_child_pkey(priv);
1002 ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_ADMIN_UP not set.\n");
1006 if (level == IPOIB_FLUSH_HEAVY) {
1007 /* child devices chase their origin pkey value, while non-child
1008 * (parent) devices should always takes what present in pkey index 0
1010 if (test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
1011 result = update_child_pkey(priv);
1013 /* restart QP only if P_Key index is changed */
1014 ipoib_dbg(priv, "Not flushing - P_Key index not changed.\n");
1019 result = update_parent_pkey(priv);
1020 /* restart QP only if P_Key value changed */
1022 ipoib_dbg(priv, "Not flushing - P_Key value not changed.\n");
1028 if (level == IPOIB_FLUSH_LIGHT) {
1030 ipoib_mark_paths_invalid(dev);
1031 /* Set IPoIB operation as down to prevent races between:
1032 * the flush flow which leaves MCG and on the fly joins
1033 * which can happen during that time. mcast restart task
1034 * should deal with join requests we missed.
1036 oper_up = test_and_clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
1037 ipoib_mcast_dev_flush(dev);
1039 set_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
1040 ipoib_flush_ah(dev);
1043 if (level >= IPOIB_FLUSH_NORMAL)
1044 ipoib_ib_dev_down(dev);
1046 if (level == IPOIB_FLUSH_HEAVY) {
1047 if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
1048 ipoib_ib_dev_stop(dev);
1049 if (ipoib_ib_dev_open(dev) != 0)
1051 if (netif_queue_stopped(dev))
1052 netif_start_queue(dev);
1056 * The device could have been brought down between the start and when
1057 * we get here, don't bring it back up if it's not configured up
1059 if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
1060 if (level >= IPOIB_FLUSH_NORMAL)
1061 ipoib_ib_dev_up(dev);
1062 ipoib_mcast_restart_task(&priv->restart_task);
1066 void ipoib_ib_dev_flush_light(struct work_struct *work)
1068 struct ipoib_dev_priv *priv =
1069 container_of(work, struct ipoib_dev_priv, flush_light);
1071 __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_LIGHT, 0);
1074 void ipoib_ib_dev_flush_normal(struct work_struct *work)
1076 struct ipoib_dev_priv *priv =
1077 container_of(work, struct ipoib_dev_priv, flush_normal);
1079 __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_NORMAL, 0);
1082 void ipoib_ib_dev_flush_heavy(struct work_struct *work)
1084 struct ipoib_dev_priv *priv =
1085 container_of(work, struct ipoib_dev_priv, flush_heavy);
1087 __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY, 0);
1090 void ipoib_ib_dev_cleanup(struct net_device *dev)
1092 struct ipoib_dev_priv *priv = netdev_priv(dev);
1094 ipoib_dbg(priv, "cleaning up ib_dev\n");
1096 * We must make sure there are no more (path) completions
1097 * that may wish to touch priv fields that are no longer valid
1099 ipoib_flush_paths(dev);
1101 ipoib_mcast_stop_thread(dev);
1102 ipoib_mcast_dev_flush(dev);
1105 * All of our ah references aren't free until after
1106 * ipoib_mcast_dev_flush(), ipoib_flush_paths, and
1107 * the neighbor garbage collection is stopped and reaped.
1108 * That should all be done now, so make a final ah flush.
1112 ipoib_transport_dev_cleanup(dev);