2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/module.h>
35 #include <linux/init.h>
36 #include <linux/slab.h>
37 #include <linux/errno.h>
38 #include <linux/netdevice.h>
39 #include <linux/inetdevice.h>
40 #include <linux/rtnetlink.h>
41 #include <linux/if_vlan.h>
43 #include <rdma/ib_smi.h>
44 #include <rdma/ib_user_verbs.h>
45 #include <rdma/ib_addr.h>
47 #include <linux/mlx4/driver.h>
48 #include <linux/mlx4/cmd.h>
53 #define DRV_NAME "mlx4_ib"
54 #define DRV_VERSION "1.0"
55 #define DRV_RELDATE "April 4, 2008"
57 MODULE_AUTHOR("Roland Dreier");
58 MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
59 MODULE_LICENSE("Dual BSD/GPL");
60 MODULE_VERSION(DRV_VERSION);
62 static const char mlx4_ib_version[] =
63 DRV_NAME ": Mellanox ConnectX InfiniBand driver v"
64 DRV_VERSION " (" DRV_RELDATE ")\n";
66 struct update_gid_work {
67 struct work_struct work;
68 union ib_gid gids[128];
69 struct mlx4_ib_dev *dev;
73 static struct workqueue_struct *wq;
75 static void init_query_mad(struct ib_smp *mad)
77 mad->base_version = 1;
78 mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
79 mad->class_version = 1;
80 mad->method = IB_MGMT_METHOD_GET;
83 static union ib_gid zgid;
85 static int mlx4_ib_query_device(struct ib_device *ibdev,
86 struct ib_device_attr *props)
88 struct mlx4_ib_dev *dev = to_mdev(ibdev);
89 struct ib_smp *in_mad = NULL;
90 struct ib_smp *out_mad = NULL;
93 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
94 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
95 if (!in_mad || !out_mad)
98 init_query_mad(in_mad);
99 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
101 err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, 1, NULL, NULL, in_mad, out_mad);
105 memset(props, 0, sizeof *props);
107 props->fw_ver = dev->dev->caps.fw_ver;
108 props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT |
109 IB_DEVICE_PORT_ACTIVE_EVENT |
110 IB_DEVICE_SYS_IMAGE_GUID |
111 IB_DEVICE_RC_RNR_NAK_GEN |
112 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
113 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR)
114 props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
115 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR)
116 props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
117 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM)
118 props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
119 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT)
120 props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
121 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
122 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
123 if (dev->dev->caps.max_gso_sz && dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH)
124 props->device_cap_flags |= IB_DEVICE_UD_TSO;
125 if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY)
126 props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
127 if ((dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_LOCAL_INV) &&
128 (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_REMOTE_INV) &&
129 (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_FAST_REG_WR))
130 props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
131 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)
132 props->device_cap_flags |= IB_DEVICE_XRC;
134 props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
136 props->vendor_part_id = be16_to_cpup((__be16 *) (out_mad->data + 30));
137 props->hw_ver = be32_to_cpup((__be32 *) (out_mad->data + 32));
138 memcpy(&props->sys_image_guid, out_mad->data + 4, 8);
140 props->max_mr_size = ~0ull;
141 props->page_size_cap = dev->dev->caps.page_size_cap;
142 props->max_qp = dev->dev->caps.num_qps - dev->dev->caps.reserved_qps;
143 props->max_qp_wr = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE;
144 props->max_sge = min(dev->dev->caps.max_sq_sg,
145 dev->dev->caps.max_rq_sg);
146 props->max_cq = dev->dev->caps.num_cqs - dev->dev->caps.reserved_cqs;
147 props->max_cqe = dev->dev->caps.max_cqes;
148 props->max_mr = dev->dev->caps.num_mpts - dev->dev->caps.reserved_mrws;
149 props->max_pd = dev->dev->caps.num_pds - dev->dev->caps.reserved_pds;
150 props->max_qp_rd_atom = dev->dev->caps.max_qp_dest_rdma;
151 props->max_qp_init_rd_atom = dev->dev->caps.max_qp_init_rdma;
152 props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp;
153 props->max_srq = dev->dev->caps.num_srqs - dev->dev->caps.reserved_srqs;
154 props->max_srq_wr = dev->dev->caps.max_srq_wqes - 1;
155 props->max_srq_sge = dev->dev->caps.max_srq_sge;
156 props->max_fast_reg_page_list_len = MLX4_MAX_FAST_REG_PAGES;
157 props->local_ca_ack_delay = dev->dev->caps.local_ca_ack_delay;
158 props->atomic_cap = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ?
159 IB_ATOMIC_HCA : IB_ATOMIC_NONE;
160 props->masked_atomic_cap = IB_ATOMIC_HCA;
161 props->max_pkeys = dev->dev->caps.pkey_table_len[1];
162 props->max_mcast_grp = dev->dev->caps.num_mgms + dev->dev->caps.num_amgms;
163 props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm;
164 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
165 props->max_mcast_grp;
166 props->max_map_per_fmr = dev->dev->caps.max_fmr_maps;
175 static enum rdma_link_layer
176 mlx4_ib_port_link_layer(struct ib_device *device, u8 port_num)
178 struct mlx4_dev *dev = to_mdev(device)->dev;
180 return dev->caps.port_mask[port_num] == MLX4_PORT_TYPE_IB ?
181 IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
184 static int ib_link_query_port(struct ib_device *ibdev, u8 port,
185 struct ib_port_attr *props)
187 struct ib_smp *in_mad = NULL;
188 struct ib_smp *out_mad = NULL;
189 int ext_active_speed;
192 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
193 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
194 if (!in_mad || !out_mad)
197 init_query_mad(in_mad);
198 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
199 in_mad->attr_mod = cpu_to_be32(port);
201 err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL,
207 props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16));
208 props->lmc = out_mad->data[34] & 0x7;
209 props->sm_lid = be16_to_cpup((__be16 *) (out_mad->data + 18));
210 props->sm_sl = out_mad->data[36] & 0xf;
211 props->state = out_mad->data[32] & 0xf;
212 props->phys_state = out_mad->data[33] >> 4;
213 props->port_cap_flags = be32_to_cpup((__be32 *) (out_mad->data + 20));
214 props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port];
215 props->max_msg_sz = to_mdev(ibdev)->dev->caps.max_msg_sz;
216 props->pkey_tbl_len = to_mdev(ibdev)->dev->caps.pkey_table_len[port];
217 props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46));
218 props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48));
219 props->active_width = out_mad->data[31] & 0xf;
220 props->active_speed = out_mad->data[35] >> 4;
221 props->max_mtu = out_mad->data[41] & 0xf;
222 props->active_mtu = out_mad->data[36] >> 4;
223 props->subnet_timeout = out_mad->data[51] & 0x1f;
224 props->max_vl_num = out_mad->data[37] >> 4;
225 props->init_type_reply = out_mad->data[41] >> 4;
227 /* Check if extended speeds (EDR/FDR/...) are supported */
228 if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) {
229 ext_active_speed = out_mad->data[62] >> 4;
231 switch (ext_active_speed) {
233 props->active_speed = IB_SPEED_FDR;
236 props->active_speed = IB_SPEED_EDR;
241 /* If reported active speed is QDR, check if is FDR-10 */
242 if (props->active_speed == IB_SPEED_QDR) {
243 init_query_mad(in_mad);
244 in_mad->attr_id = MLX4_ATTR_EXTENDED_PORT_INFO;
245 in_mad->attr_mod = cpu_to_be32(port);
247 err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port,
248 NULL, NULL, in_mad, out_mad);
252 /* Checking LinkSpeedActive for FDR-10 */
253 if (out_mad->data[15] & 0x1)
254 props->active_speed = IB_SPEED_FDR10;
257 /* Avoid wrong speed value returned by FW if the IB link is down. */
258 if (props->state == IB_PORT_DOWN)
259 props->active_speed = IB_SPEED_SDR;
267 static u8 state_to_phys_state(enum ib_port_state state)
269 return state == IB_PORT_ACTIVE ? 5 : 3;
272 static int eth_link_query_port(struct ib_device *ibdev, u8 port,
273 struct ib_port_attr *props)
276 struct mlx4_ib_dev *mdev = to_mdev(ibdev);
277 struct mlx4_ib_iboe *iboe = &mdev->iboe;
278 struct net_device *ndev;
280 struct mlx4_cmd_mailbox *mailbox;
283 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
285 return PTR_ERR(mailbox);
287 err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0,
288 MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
293 props->active_width = (((u8 *)mailbox->buf)[5] == 0x40) ?
294 IB_WIDTH_4X : IB_WIDTH_1X;
295 props->active_speed = IB_SPEED_QDR;
296 props->port_cap_flags = IB_PORT_CM_SUP;
297 props->gid_tbl_len = mdev->dev->caps.gid_table_len[port];
298 props->max_msg_sz = mdev->dev->caps.max_msg_sz;
299 props->pkey_tbl_len = 1;
300 props->max_mtu = IB_MTU_4096;
301 props->max_vl_num = 2;
302 props->state = IB_PORT_DOWN;
303 props->phys_state = state_to_phys_state(props->state);
304 props->active_mtu = IB_MTU_256;
305 spin_lock(&iboe->lock);
306 ndev = iboe->netdevs[port - 1];
310 tmp = iboe_get_mtu(ndev->mtu);
311 props->active_mtu = tmp ? min(props->max_mtu, tmp) : IB_MTU_256;
313 props->state = (netif_running(ndev) && netif_carrier_ok(ndev)) ?
314 IB_PORT_ACTIVE : IB_PORT_DOWN;
315 props->phys_state = state_to_phys_state(props->state);
317 spin_unlock(&iboe->lock);
319 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
323 static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
324 struct ib_port_attr *props)
328 memset(props, 0, sizeof *props);
330 err = mlx4_ib_port_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND ?
331 ib_link_query_port(ibdev, port, props) :
332 eth_link_query_port(ibdev, port, props);
337 static int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
340 struct ib_smp *in_mad = NULL;
341 struct ib_smp *out_mad = NULL;
344 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
345 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
346 if (!in_mad || !out_mad)
349 init_query_mad(in_mad);
350 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
351 in_mad->attr_mod = cpu_to_be32(port);
353 err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad);
357 memcpy(gid->raw, out_mad->data + 8, 8);
359 init_query_mad(in_mad);
360 in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
361 in_mad->attr_mod = cpu_to_be32(index / 8);
363 err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad);
367 memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
375 static int iboe_query_gid(struct ib_device *ibdev, u8 port, int index,
378 struct mlx4_ib_dev *dev = to_mdev(ibdev);
380 *gid = dev->iboe.gid_table[port - 1][index];
385 static int mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
388 if (rdma_port_get_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND)
389 return __mlx4_ib_query_gid(ibdev, port, index, gid);
391 return iboe_query_gid(ibdev, port, index, gid);
394 static int mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
397 struct ib_smp *in_mad = NULL;
398 struct ib_smp *out_mad = NULL;
401 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
402 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
403 if (!in_mad || !out_mad)
406 init_query_mad(in_mad);
407 in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE;
408 in_mad->attr_mod = cpu_to_be32(index / 32);
410 err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad);
414 *pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]);
422 static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
423 struct ib_device_modify *props)
425 struct mlx4_cmd_mailbox *mailbox;
427 if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
430 if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
433 spin_lock(&to_mdev(ibdev)->sm_lock);
434 memcpy(ibdev->node_desc, props->node_desc, 64);
435 spin_unlock(&to_mdev(ibdev)->sm_lock);
438 * If possible, pass node desc to FW, so it can generate
439 * a 144 trap. If cmd fails, just ignore.
441 mailbox = mlx4_alloc_cmd_mailbox(to_mdev(ibdev)->dev);
445 memset(mailbox->buf, 0, 256);
446 memcpy(mailbox->buf, props->node_desc, 64);
447 mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0,
448 MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
450 mlx4_free_cmd_mailbox(to_mdev(ibdev)->dev, mailbox);
455 static int mlx4_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols,
458 struct mlx4_cmd_mailbox *mailbox;
460 u8 is_eth = dev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
462 mailbox = mlx4_alloc_cmd_mailbox(dev->dev);
464 return PTR_ERR(mailbox);
466 memset(mailbox->buf, 0, 256);
468 if (dev->dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
469 *(u8 *) mailbox->buf = !!reset_qkey_viols << 6;
470 ((__be32 *) mailbox->buf)[2] = cpu_to_be32(cap_mask);
472 ((u8 *) mailbox->buf)[3] = !!reset_qkey_viols;
473 ((__be32 *) mailbox->buf)[1] = cpu_to_be32(cap_mask);
476 err = mlx4_cmd(dev->dev, mailbox->dma, port, is_eth, MLX4_CMD_SET_PORT,
477 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
479 mlx4_free_cmd_mailbox(dev->dev, mailbox);
483 static int mlx4_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
484 struct ib_port_modify *props)
486 struct ib_port_attr attr;
490 mutex_lock(&to_mdev(ibdev)->cap_mask_mutex);
492 err = mlx4_ib_query_port(ibdev, port, &attr);
496 cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) &
497 ~props->clr_port_cap_mask;
499 err = mlx4_SET_PORT(to_mdev(ibdev), port,
500 !!(mask & IB_PORT_RESET_QKEY_CNTR),
504 mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
508 static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev,
509 struct ib_udata *udata)
511 struct mlx4_ib_dev *dev = to_mdev(ibdev);
512 struct mlx4_ib_ucontext *context;
513 struct mlx4_ib_alloc_ucontext_resp resp;
517 return ERR_PTR(-EAGAIN);
519 resp.qp_tab_size = dev->dev->caps.num_qps;
520 resp.bf_reg_size = dev->dev->caps.bf_reg_size;
521 resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
523 context = kmalloc(sizeof *context, GFP_KERNEL);
525 return ERR_PTR(-ENOMEM);
527 err = mlx4_uar_alloc(to_mdev(ibdev)->dev, &context->uar);
533 INIT_LIST_HEAD(&context->db_page_list);
534 mutex_init(&context->db_page_mutex);
536 err = ib_copy_to_udata(udata, &resp, sizeof resp);
538 mlx4_uar_free(to_mdev(ibdev)->dev, &context->uar);
540 return ERR_PTR(-EFAULT);
543 return &context->ibucontext;
546 static int mlx4_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
548 struct mlx4_ib_ucontext *context = to_mucontext(ibcontext);
550 mlx4_uar_free(to_mdev(ibcontext->device)->dev, &context->uar);
556 static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
558 struct mlx4_ib_dev *dev = to_mdev(context->device);
560 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
563 if (vma->vm_pgoff == 0) {
564 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
566 if (io_remap_pfn_range(vma, vma->vm_start,
567 to_mucontext(context)->uar.pfn,
568 PAGE_SIZE, vma->vm_page_prot))
570 } else if (vma->vm_pgoff == 1 && dev->dev->caps.bf_reg_size != 0) {
571 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
573 if (io_remap_pfn_range(vma, vma->vm_start,
574 to_mucontext(context)->uar.pfn +
575 dev->dev->caps.num_uars,
576 PAGE_SIZE, vma->vm_page_prot))
584 static struct ib_pd *mlx4_ib_alloc_pd(struct ib_device *ibdev,
585 struct ib_ucontext *context,
586 struct ib_udata *udata)
588 struct mlx4_ib_pd *pd;
591 pd = kmalloc(sizeof *pd, GFP_KERNEL);
593 return ERR_PTR(-ENOMEM);
595 err = mlx4_pd_alloc(to_mdev(ibdev)->dev, &pd->pdn);
602 if (ib_copy_to_udata(udata, &pd->pdn, sizeof (__u32))) {
603 mlx4_pd_free(to_mdev(ibdev)->dev, pd->pdn);
605 return ERR_PTR(-EFAULT);
611 static int mlx4_ib_dealloc_pd(struct ib_pd *pd)
613 mlx4_pd_free(to_mdev(pd->device)->dev, to_mpd(pd)->pdn);
619 static struct ib_xrcd *mlx4_ib_alloc_xrcd(struct ib_device *ibdev,
620 struct ib_ucontext *context,
621 struct ib_udata *udata)
623 struct mlx4_ib_xrcd *xrcd;
626 if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC))
627 return ERR_PTR(-ENOSYS);
629 xrcd = kmalloc(sizeof *xrcd, GFP_KERNEL);
631 return ERR_PTR(-ENOMEM);
633 err = mlx4_xrcd_alloc(to_mdev(ibdev)->dev, &xrcd->xrcdn);
637 xrcd->pd = ib_alloc_pd(ibdev);
638 if (IS_ERR(xrcd->pd)) {
639 err = PTR_ERR(xrcd->pd);
643 xrcd->cq = ib_create_cq(ibdev, NULL, NULL, xrcd, 1, 0);
644 if (IS_ERR(xrcd->cq)) {
645 err = PTR_ERR(xrcd->cq);
649 return &xrcd->ibxrcd;
652 ib_dealloc_pd(xrcd->pd);
654 mlx4_xrcd_free(to_mdev(ibdev)->dev, xrcd->xrcdn);
660 static int mlx4_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
662 ib_destroy_cq(to_mxrcd(xrcd)->cq);
663 ib_dealloc_pd(to_mxrcd(xrcd)->pd);
664 mlx4_xrcd_free(to_mdev(xrcd->device)->dev, to_mxrcd(xrcd)->xrcdn);
670 static int add_gid_entry(struct ib_qp *ibqp, union ib_gid *gid)
672 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
673 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
674 struct mlx4_ib_gid_entry *ge;
676 ge = kzalloc(sizeof *ge, GFP_KERNEL);
681 if (mlx4_ib_add_mc(mdev, mqp, gid)) {
682 ge->port = mqp->port;
686 mutex_lock(&mqp->mutex);
687 list_add_tail(&ge->list, &mqp->gid_list);
688 mutex_unlock(&mqp->mutex);
693 int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
697 struct net_device *ndev;
703 spin_lock(&mdev->iboe.lock);
704 ndev = mdev->iboe.netdevs[mqp->port - 1];
707 spin_unlock(&mdev->iboe.lock);
710 rdma_get_mcast_mac((struct in6_addr *)gid, mac);
712 dev_mc_add(mdev->iboe.netdevs[mqp->port - 1], mac);
721 struct mlx4_ib_steering {
722 struct list_head list;
727 static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
730 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
731 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
733 struct mlx4_ib_steering *ib_steering = NULL;
735 if (mdev->dev->caps.steering_mode ==
736 MLX4_STEERING_MODE_DEVICE_MANAGED) {
737 ib_steering = kmalloc(sizeof(*ib_steering), GFP_KERNEL);
742 err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port,
744 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
745 MLX4_PROT_IB_IPV6, ®_id);
749 err = add_gid_entry(ibqp, gid);
754 memcpy(ib_steering->gid.raw, gid->raw, 16);
755 ib_steering->reg_id = reg_id;
756 mutex_lock(&mqp->mutex);
757 list_add(&ib_steering->list, &mqp->steering_rules);
758 mutex_unlock(&mqp->mutex);
763 mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
764 MLX4_PROT_IB_IPV6, reg_id);
771 static struct mlx4_ib_gid_entry *find_gid_entry(struct mlx4_ib_qp *qp, u8 *raw)
773 struct mlx4_ib_gid_entry *ge;
774 struct mlx4_ib_gid_entry *tmp;
775 struct mlx4_ib_gid_entry *ret = NULL;
777 list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) {
778 if (!memcmp(raw, ge->gid.raw, 16)) {
787 static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
790 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
791 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
793 struct net_device *ndev;
794 struct mlx4_ib_gid_entry *ge;
797 if (mdev->dev->caps.steering_mode ==
798 MLX4_STEERING_MODE_DEVICE_MANAGED) {
799 struct mlx4_ib_steering *ib_steering;
801 mutex_lock(&mqp->mutex);
802 list_for_each_entry(ib_steering, &mqp->steering_rules, list) {
803 if (!memcmp(ib_steering->gid.raw, gid->raw, 16)) {
804 list_del(&ib_steering->list);
808 mutex_unlock(&mqp->mutex);
809 if (&ib_steering->list == &mqp->steering_rules) {
810 pr_err("Couldn't find reg_id for mgid. Steering rule is left attached\n");
813 reg_id = ib_steering->reg_id;
817 err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
818 MLX4_PROT_IB_IPV6, reg_id);
822 mutex_lock(&mqp->mutex);
823 ge = find_gid_entry(mqp, gid->raw);
825 spin_lock(&mdev->iboe.lock);
826 ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL;
829 spin_unlock(&mdev->iboe.lock);
830 rdma_get_mcast_mac((struct in6_addr *)gid, mac);
833 dev_mc_del(mdev->iboe.netdevs[ge->port - 1], mac);
840 pr_warn("could not find mgid entry\n");
842 mutex_unlock(&mqp->mutex);
847 static int init_node_data(struct mlx4_ib_dev *dev)
849 struct ib_smp *in_mad = NULL;
850 struct ib_smp *out_mad = NULL;
853 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
854 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
855 if (!in_mad || !out_mad)
858 init_query_mad(in_mad);
859 in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
861 err = mlx4_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
865 memcpy(dev->ib_dev.node_desc, out_mad->data, 64);
867 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
869 err = mlx4_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
873 memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
881 static ssize_t show_hca(struct device *device, struct device_attribute *attr,
884 struct mlx4_ib_dev *dev =
885 container_of(device, struct mlx4_ib_dev, ib_dev.dev);
886 return sprintf(buf, "MT%d\n", dev->dev->pdev->device);
889 static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
892 struct mlx4_ib_dev *dev =
893 container_of(device, struct mlx4_ib_dev, ib_dev.dev);
894 return sprintf(buf, "%d.%d.%d\n", (int) (dev->dev->caps.fw_ver >> 32),
895 (int) (dev->dev->caps.fw_ver >> 16) & 0xffff,
896 (int) dev->dev->caps.fw_ver & 0xffff);
899 static ssize_t show_rev(struct device *device, struct device_attribute *attr,
902 struct mlx4_ib_dev *dev =
903 container_of(device, struct mlx4_ib_dev, ib_dev.dev);
904 return sprintf(buf, "%x\n", dev->dev->rev_id);
907 static ssize_t show_board(struct device *device, struct device_attribute *attr,
910 struct mlx4_ib_dev *dev =
911 container_of(device, struct mlx4_ib_dev, ib_dev.dev);
912 return sprintf(buf, "%.*s\n", MLX4_BOARD_ID_LEN,
916 static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
917 static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
918 static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
919 static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
921 static struct device_attribute *mlx4_class_attributes[] = {
928 static void mlx4_addrconf_ifid_eui48(u8 *eui, u16 vlan_id, struct net_device *dev)
930 memcpy(eui, dev->dev_addr, 3);
931 memcpy(eui + 5, dev->dev_addr + 3, 3);
932 if (vlan_id < 0x1000) {
933 eui[3] = vlan_id >> 8;
934 eui[4] = vlan_id & 0xff;
942 static void update_gids_task(struct work_struct *work)
944 struct update_gid_work *gw = container_of(work, struct update_gid_work, work);
945 struct mlx4_cmd_mailbox *mailbox;
948 struct mlx4_dev *dev = gw->dev->dev;
949 struct ib_event event;
951 mailbox = mlx4_alloc_cmd_mailbox(dev);
952 if (IS_ERR(mailbox)) {
953 pr_warn("update gid table failed %ld\n", PTR_ERR(mailbox));
958 memcpy(gids, gw->gids, sizeof gw->gids);
960 err = mlx4_cmd(dev, mailbox->dma, MLX4_SET_PORT_GID_TABLE << 8 | gw->port,
961 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
964 pr_warn("set port command failed\n");
966 memcpy(gw->dev->iboe.gid_table[gw->port - 1], gw->gids, sizeof gw->gids);
967 event.device = &gw->dev->ib_dev;
968 event.element.port_num = gw->port;
969 event.event = IB_EVENT_GID_CHANGE;
970 ib_dispatch_event(&event);
973 mlx4_free_cmd_mailbox(dev, mailbox);
977 static int update_ipv6_gids(struct mlx4_ib_dev *dev, int port, int clear)
979 struct net_device *ndev = dev->iboe.netdevs[port - 1];
980 struct update_gid_work *work;
981 struct net_device *tmp;
991 work = kzalloc(sizeof *work, GFP_ATOMIC);
995 hits = kzalloc(128, GFP_ATOMIC);
1002 for_each_netdev_rcu(&init_net, tmp) {
1003 if (ndev && (tmp == ndev || rdma_vlan_dev_real_dev(tmp) == ndev)) {
1004 gid.global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
1005 vid = rdma_vlan_dev_vlan_id(tmp);
1006 mlx4_addrconf_ifid_eui48(&gid.raw[8], vid, ndev);
1009 for (i = 0; i < 128; ++i) {
1011 !memcmp(&dev->iboe.gid_table[port - 1][i], &zgid, sizeof zgid))
1013 if (!memcmp(&dev->iboe.gid_table[port - 1][i], &gid, sizeof gid)) {
1022 (memcmp(&dev->iboe.gid_table[port - 1][0],
1023 &gid, sizeof gid) ||
1024 !memcmp(&dev->iboe.gid_table[port - 1][0],
1025 &zgid, sizeof gid))) {
1026 dev->iboe.gid_table[port - 1][0] = gid;
1029 } else if (free >= 0) {
1030 dev->iboe.gid_table[port - 1][free] = gid;
1039 for (i = 0; i < 128; ++i)
1041 if (memcmp(&dev->iboe.gid_table[port - 1][i], &zgid, sizeof zgid))
1043 dev->iboe.gid_table[port - 1][i] = zgid;
1047 memcpy(work->gids, dev->iboe.gid_table[port - 1], sizeof work->gids);
1048 INIT_WORK(&work->work, update_gids_task);
1051 queue_work(wq, &work->work);
1063 static void handle_en_event(struct mlx4_ib_dev *dev, int port, unsigned long event)
1067 case NETDEV_CHANGEADDR:
1068 update_ipv6_gids(dev, port, 0);
1072 update_ipv6_gids(dev, port, 1);
1073 dev->iboe.netdevs[port - 1] = NULL;
1077 static void netdev_added(struct mlx4_ib_dev *dev, int port)
1079 update_ipv6_gids(dev, port, 0);
1082 static void netdev_removed(struct mlx4_ib_dev *dev, int port)
1084 update_ipv6_gids(dev, port, 1);
1087 static int mlx4_ib_netdev_event(struct notifier_block *this, unsigned long event,
1090 struct net_device *dev = ptr;
1091 struct mlx4_ib_dev *ibdev;
1092 struct net_device *oldnd;
1093 struct mlx4_ib_iboe *iboe;
1096 if (!net_eq(dev_net(dev), &init_net))
1099 ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb);
1100 iboe = &ibdev->iboe;
1102 spin_lock(&iboe->lock);
1103 mlx4_foreach_ib_transport_port(port, ibdev->dev) {
1104 oldnd = iboe->netdevs[port - 1];
1105 iboe->netdevs[port - 1] =
1106 mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port);
1107 if (oldnd != iboe->netdevs[port - 1]) {
1108 if (iboe->netdevs[port - 1])
1109 netdev_added(ibdev, port);
1111 netdev_removed(ibdev, port);
1115 if (dev == iboe->netdevs[0] ||
1116 (iboe->netdevs[0] && rdma_vlan_dev_real_dev(dev) == iboe->netdevs[0]))
1117 handle_en_event(ibdev, 1, event);
1118 else if (dev == iboe->netdevs[1]
1119 || (iboe->netdevs[1] && rdma_vlan_dev_real_dev(dev) == iboe->netdevs[1]))
1120 handle_en_event(ibdev, 2, event);
1122 spin_unlock(&iboe->lock);
1127 static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
1130 int eq_per_port = 0;
1135 /* Legacy mode or comp_pool is not large enough */
1136 if (dev->caps.comp_pool == 0 ||
1137 dev->caps.num_ports > dev->caps.comp_pool)
1140 eq_per_port = rounddown_pow_of_two(dev->caps.comp_pool/
1141 dev->caps.num_ports);
1145 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
1146 added_eqs += eq_per_port;
1148 total_eqs = dev->caps.num_comp_vectors + added_eqs;
1150 ibdev->eq_table = kzalloc(total_eqs * sizeof(int), GFP_KERNEL);
1151 if (!ibdev->eq_table)
1154 ibdev->eq_added = added_eqs;
1157 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) {
1158 for (j = 0; j < eq_per_port; j++) {
1159 sprintf(name, "mlx4-ib-%d-%d@%s",
1160 i, j, dev->pdev->bus->name);
1161 /* Set IRQ for specific name (per ring) */
1162 if (mlx4_assign_eq(dev, name, &ibdev->eq_table[eq])) {
1163 /* Use legacy (same as mlx4_en driver) */
1164 pr_warn("Can't allocate EQ %d; reverting to legacy\n", eq);
1165 ibdev->eq_table[eq] =
1166 (eq % dev->caps.num_comp_vectors);
1172 /* Fill the reset of the vector with legacy EQ */
1173 for (i = 0, eq = added_eqs; i < dev->caps.num_comp_vectors; i++)
1174 ibdev->eq_table[eq++] = i;
1176 /* Advertise the new number of EQs to clients */
1177 ibdev->ib_dev.num_comp_vectors = total_eqs;
1180 static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
1184 /* no additional eqs were added */
1185 if (!ibdev->eq_table)
1188 /* Reset the advertised EQ number */
1189 ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
1191 /* Free only the added eqs */
1192 for (i = 0; i < ibdev->eq_added; i++) {
1193 /* Don't free legacy eqs if used */
1194 if (ibdev->eq_table[i] <= dev->caps.num_comp_vectors)
1196 mlx4_release_eq(dev, ibdev->eq_table[i]);
1199 kfree(ibdev->eq_table);
1202 static void *mlx4_ib_add(struct mlx4_dev *dev)
1204 struct mlx4_ib_dev *ibdev;
1208 struct mlx4_ib_iboe *iboe;
1210 pr_info_once("%s", mlx4_ib_version);
1212 if (mlx4_is_mfunc(dev)) {
1213 pr_warn("IB not yet supported in SRIOV\n");
1217 mlx4_foreach_ib_transport_port(i, dev)
1220 /* No point in registering a device with no ports... */
1224 ibdev = (struct mlx4_ib_dev *) ib_alloc_device(sizeof *ibdev);
1226 dev_err(&dev->pdev->dev, "Device struct alloc failed\n");
1230 iboe = &ibdev->iboe;
1232 if (mlx4_pd_alloc(dev, &ibdev->priv_pdn))
1235 if (mlx4_uar_alloc(dev, &ibdev->priv_uar))
1238 ibdev->uar_map = ioremap((phys_addr_t) ibdev->priv_uar.pfn << PAGE_SHIFT,
1240 if (!ibdev->uar_map)
1242 MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock);
1246 strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX);
1247 ibdev->ib_dev.owner = THIS_MODULE;
1248 ibdev->ib_dev.node_type = RDMA_NODE_IB_CA;
1249 ibdev->ib_dev.local_dma_lkey = dev->caps.reserved_lkey;
1250 ibdev->num_ports = num_ports;
1251 ibdev->ib_dev.phys_port_cnt = ibdev->num_ports;
1252 ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
1253 ibdev->ib_dev.dma_device = &dev->pdev->dev;
1255 ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION;
1256 ibdev->ib_dev.uverbs_cmd_mask =
1257 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
1258 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
1259 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
1260 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
1261 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
1262 (1ull << IB_USER_VERBS_CMD_REG_MR) |
1263 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
1264 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
1265 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
1266 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
1267 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
1268 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
1269 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
1270 (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
1271 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
1272 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
1273 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
1274 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
1275 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
1276 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
1277 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
1278 (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) |
1279 (1ull << IB_USER_VERBS_CMD_OPEN_QP);
1281 ibdev->ib_dev.query_device = mlx4_ib_query_device;
1282 ibdev->ib_dev.query_port = mlx4_ib_query_port;
1283 ibdev->ib_dev.get_link_layer = mlx4_ib_port_link_layer;
1284 ibdev->ib_dev.query_gid = mlx4_ib_query_gid;
1285 ibdev->ib_dev.query_pkey = mlx4_ib_query_pkey;
1286 ibdev->ib_dev.modify_device = mlx4_ib_modify_device;
1287 ibdev->ib_dev.modify_port = mlx4_ib_modify_port;
1288 ibdev->ib_dev.alloc_ucontext = mlx4_ib_alloc_ucontext;
1289 ibdev->ib_dev.dealloc_ucontext = mlx4_ib_dealloc_ucontext;
1290 ibdev->ib_dev.mmap = mlx4_ib_mmap;
1291 ibdev->ib_dev.alloc_pd = mlx4_ib_alloc_pd;
1292 ibdev->ib_dev.dealloc_pd = mlx4_ib_dealloc_pd;
1293 ibdev->ib_dev.create_ah = mlx4_ib_create_ah;
1294 ibdev->ib_dev.query_ah = mlx4_ib_query_ah;
1295 ibdev->ib_dev.destroy_ah = mlx4_ib_destroy_ah;
1296 ibdev->ib_dev.create_srq = mlx4_ib_create_srq;
1297 ibdev->ib_dev.modify_srq = mlx4_ib_modify_srq;
1298 ibdev->ib_dev.query_srq = mlx4_ib_query_srq;
1299 ibdev->ib_dev.destroy_srq = mlx4_ib_destroy_srq;
1300 ibdev->ib_dev.post_srq_recv = mlx4_ib_post_srq_recv;
1301 ibdev->ib_dev.create_qp = mlx4_ib_create_qp;
1302 ibdev->ib_dev.modify_qp = mlx4_ib_modify_qp;
1303 ibdev->ib_dev.query_qp = mlx4_ib_query_qp;
1304 ibdev->ib_dev.destroy_qp = mlx4_ib_destroy_qp;
1305 ibdev->ib_dev.post_send = mlx4_ib_post_send;
1306 ibdev->ib_dev.post_recv = mlx4_ib_post_recv;
1307 ibdev->ib_dev.create_cq = mlx4_ib_create_cq;
1308 ibdev->ib_dev.modify_cq = mlx4_ib_modify_cq;
1309 ibdev->ib_dev.resize_cq = mlx4_ib_resize_cq;
1310 ibdev->ib_dev.destroy_cq = mlx4_ib_destroy_cq;
1311 ibdev->ib_dev.poll_cq = mlx4_ib_poll_cq;
1312 ibdev->ib_dev.req_notify_cq = mlx4_ib_arm_cq;
1313 ibdev->ib_dev.get_dma_mr = mlx4_ib_get_dma_mr;
1314 ibdev->ib_dev.reg_user_mr = mlx4_ib_reg_user_mr;
1315 ibdev->ib_dev.dereg_mr = mlx4_ib_dereg_mr;
1316 ibdev->ib_dev.alloc_fast_reg_mr = mlx4_ib_alloc_fast_reg_mr;
1317 ibdev->ib_dev.alloc_fast_reg_page_list = mlx4_ib_alloc_fast_reg_page_list;
1318 ibdev->ib_dev.free_fast_reg_page_list = mlx4_ib_free_fast_reg_page_list;
1319 ibdev->ib_dev.attach_mcast = mlx4_ib_mcg_attach;
1320 ibdev->ib_dev.detach_mcast = mlx4_ib_mcg_detach;
1321 ibdev->ib_dev.process_mad = mlx4_ib_process_mad;
1323 ibdev->ib_dev.alloc_fmr = mlx4_ib_fmr_alloc;
1324 ibdev->ib_dev.map_phys_fmr = mlx4_ib_map_phys_fmr;
1325 ibdev->ib_dev.unmap_fmr = mlx4_ib_unmap_fmr;
1326 ibdev->ib_dev.dealloc_fmr = mlx4_ib_fmr_dealloc;
1328 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) {
1329 ibdev->ib_dev.alloc_xrcd = mlx4_ib_alloc_xrcd;
1330 ibdev->ib_dev.dealloc_xrcd = mlx4_ib_dealloc_xrcd;
1331 ibdev->ib_dev.uverbs_cmd_mask |=
1332 (1ull << IB_USER_VERBS_CMD_OPEN_XRCD) |
1333 (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
1336 mlx4_ib_alloc_eqs(dev, ibdev);
1338 spin_lock_init(&iboe->lock);
1340 if (init_node_data(ibdev))
1343 for (i = 0; i < ibdev->num_ports; ++i) {
1344 if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) ==
1345 IB_LINK_LAYER_ETHERNET) {
1346 err = mlx4_counter_alloc(ibdev->dev, &ibdev->counters[i]);
1348 ibdev->counters[i] = -1;
1350 ibdev->counters[i] = -1;
1353 spin_lock_init(&ibdev->sm_lock);
1354 mutex_init(&ibdev->cap_mask_mutex);
1356 if (ib_register_device(&ibdev->ib_dev, NULL))
1359 if (mlx4_ib_mad_init(ibdev))
1362 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE && !iboe->nb.notifier_call) {
1363 iboe->nb.notifier_call = mlx4_ib_netdev_event;
1364 err = register_netdevice_notifier(&iboe->nb);
1369 for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) {
1370 if (device_create_file(&ibdev->ib_dev.dev,
1371 mlx4_class_attributes[j]))
1375 ibdev->ib_active = true;
1380 if (unregister_netdevice_notifier(&ibdev->iboe.nb))
1381 pr_warn("failure unregistering notifier\n");
1382 flush_workqueue(wq);
1385 ib_unregister_device(&ibdev->ib_dev);
1389 if (ibdev->counters[i - 1] != -1)
1390 mlx4_counter_free(ibdev->dev, ibdev->counters[i - 1]);
1393 iounmap(ibdev->uar_map);
1396 mlx4_uar_free(dev, &ibdev->priv_uar);
1399 mlx4_pd_free(dev, ibdev->priv_pdn);
1402 ib_dealloc_device(&ibdev->ib_dev);
1407 static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
1409 struct mlx4_ib_dev *ibdev = ibdev_ptr;
1412 mlx4_ib_mad_cleanup(ibdev);
1413 ib_unregister_device(&ibdev->ib_dev);
1414 if (ibdev->iboe.nb.notifier_call) {
1415 if (unregister_netdevice_notifier(&ibdev->iboe.nb))
1416 pr_warn("failure unregistering notifier\n");
1417 ibdev->iboe.nb.notifier_call = NULL;
1419 iounmap(ibdev->uar_map);
1420 for (p = 0; p < ibdev->num_ports; ++p)
1421 if (ibdev->counters[p] != -1)
1422 mlx4_counter_free(ibdev->dev, ibdev->counters[p]);
1423 mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB)
1424 mlx4_CLOSE_PORT(dev, p);
1426 mlx4_ib_free_eqs(dev, ibdev);
1428 mlx4_uar_free(dev, &ibdev->priv_uar);
1429 mlx4_pd_free(dev, ibdev->priv_pdn);
1430 ib_dealloc_device(&ibdev->ib_dev);
1433 static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
1434 enum mlx4_dev_event event, int port)
1436 struct ib_event ibev;
1437 struct mlx4_ib_dev *ibdev = to_mdev((struct ib_device *) ibdev_ptr);
1439 if (port > ibdev->num_ports)
1443 case MLX4_DEV_EVENT_PORT_UP:
1444 ibev.event = IB_EVENT_PORT_ACTIVE;
1447 case MLX4_DEV_EVENT_PORT_DOWN:
1448 ibev.event = IB_EVENT_PORT_ERR;
1451 case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
1452 ibdev->ib_active = false;
1453 ibev.event = IB_EVENT_DEVICE_FATAL;
1460 ibev.device = ibdev_ptr;
1461 ibev.element.port_num = port;
1463 ib_dispatch_event(&ibev);
1466 static struct mlx4_interface mlx4_ib_interface = {
1468 .remove = mlx4_ib_remove,
1469 .event = mlx4_ib_event,
1470 .protocol = MLX4_PROT_IB_IPV6
1473 static int __init mlx4_ib_init(void)
1477 wq = create_singlethread_workqueue("mlx4_ib");
1481 err = mlx4_register_interface(&mlx4_ib_interface);
1483 destroy_workqueue(wq);
1490 static void __exit mlx4_ib_cleanup(void)
1492 mlx4_unregister_interface(&mlx4_ib_interface);
1493 destroy_workqueue(wq);
1496 module_init(mlx4_ib_init);
1497 module_exit(mlx4_ib_cleanup);