2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/module.h>
37 #include <linux/init.h>
38 #include <linux/errno.h>
39 #include <linux/pci.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/slab.h>
42 #include <linux/io-mapping.h>
43 #include <linux/delay.h>
44 #include <linux/kmod.h>
46 #include <linux/mlx4/device.h>
47 #include <linux/mlx4/doorbell.h>
53 MODULE_AUTHOR("Roland Dreier");
54 MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver");
55 MODULE_LICENSE("Dual BSD/GPL");
56 MODULE_VERSION(DRV_VERSION);
58 struct workqueue_struct *mlx4_wq;
60 #ifdef CONFIG_MLX4_DEBUG
62 int mlx4_debug_level = 0;
63 module_param_named(debug_level, mlx4_debug_level, int, 0644);
64 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
66 #endif /* CONFIG_MLX4_DEBUG */
71 module_param(msi_x, int, 0444);
72 MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");
74 #else /* CONFIG_PCI_MSI */
78 #endif /* CONFIG_PCI_MSI */
80 static uint8_t num_vfs[3] = {0, 0, 0};
81 static int num_vfs_argc;
82 module_param_array(num_vfs, byte , &num_vfs_argc, 0444);
83 MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0\n"
84 "num_vfs=port1,port2,port1+2");
86 static uint8_t probe_vf[3] = {0, 0, 0};
87 static int probe_vfs_argc;
88 module_param_array(probe_vf, byte, &probe_vfs_argc, 0444);
89 MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)\n"
90 "probe_vf=port1,port2,port1+2");
92 int mlx4_log_num_mgm_entry_size = MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE;
93 module_param_named(log_num_mgm_entry_size,
94 mlx4_log_num_mgm_entry_size, int, 0444);
95 MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num"
96 " of qp per mcg, for example:"
97 " 10 gives 248.range: 7 <="
98 " log_num_mgm_entry_size <= 12."
99 " To activate device managed"
100 " flow steering when available, set to -1");
102 static bool enable_64b_cqe_eqe = true;
103 module_param(enable_64b_cqe_eqe, bool, 0444);
104 MODULE_PARM_DESC(enable_64b_cqe_eqe,
105 "Enable 64 byte CQEs/EQEs when the FW supports this (default: True)");
107 #define PF_CONTEXT_BEHAVIOUR_MASK (MLX4_FUNC_CAP_64B_EQE_CQE | \
108 MLX4_FUNC_CAP_EQE_CQE_STRIDE | \
109 MLX4_FUNC_CAP_DMFS_A0_STATIC)
111 #define RESET_PERSIST_MASK_FLAGS (MLX4_FLAG_SRIOV)
113 static char mlx4_version[] =
114 DRV_NAME ": Mellanox ConnectX core driver v"
115 DRV_VERSION " (" DRV_RELDATE ")\n";
117 static struct mlx4_profile default_profile = {
120 .rdmarc_per_qp = 1 << 4,
124 .num_mtt = 1 << 20, /* It is really num mtt segements */
127 static struct mlx4_profile low_mem_profile = {
130 .rdmarc_per_qp = 1 << 4,
137 static int log_num_mac = 7;
138 module_param_named(log_num_mac, log_num_mac, int, 0444);
139 MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)");
141 static int log_num_vlan;
142 module_param_named(log_num_vlan, log_num_vlan, int, 0444);
143 MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)");
144 /* Log2 max number of VLANs per ETH port (0-7) */
145 #define MLX4_LOG_NUM_VLANS 7
146 #define MLX4_MIN_LOG_NUM_VLANS 0
147 #define MLX4_MIN_LOG_NUM_MAC 1
149 static bool use_prio;
150 module_param_named(use_prio, use_prio, bool, 0444);
151 MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports (deprecated)");
153 int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG);
154 module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
155 MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-7)");
157 static int port_type_array[2] = {MLX4_PORT_TYPE_NONE, MLX4_PORT_TYPE_NONE};
158 static int arr_argc = 2;
159 module_param_array(port_type_array, int, &arr_argc, 0444);
160 MODULE_PARM_DESC(port_type_array, "Array of port types: HW_DEFAULT (0) is default "
161 "1 for IB, 2 for Ethernet");
163 struct mlx4_port_config {
164 struct list_head list;
165 enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1];
166 struct pci_dev *pdev;
169 static atomic_t pf_loading = ATOMIC_INIT(0);
171 int mlx4_check_port_params(struct mlx4_dev *dev,
172 enum mlx4_port_type *port_type)
176 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
177 for (i = 0; i < dev->caps.num_ports - 1; i++) {
178 if (port_type[i] != port_type[i + 1]) {
179 mlx4_err(dev, "Only same port types supported on this HCA, aborting\n");
185 for (i = 0; i < dev->caps.num_ports; i++) {
186 if (!(port_type[i] & dev->caps.supported_type[i+1])) {
187 mlx4_err(dev, "Requested port type for port %d is not supported on this HCA\n",
195 static void mlx4_set_port_mask(struct mlx4_dev *dev)
199 for (i = 1; i <= dev->caps.num_ports; ++i)
200 dev->caps.port_mask[i] = dev->caps.port_type[i];
204 MLX4_QUERY_FUNC_NUM_SYS_EQS = 1 << 0,
207 static int mlx4_query_func(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
210 struct mlx4_func func;
212 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) {
213 err = mlx4_QUERY_FUNC(dev, &func, 0);
215 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
218 dev_cap->max_eqs = func.max_eq;
219 dev_cap->reserved_eqs = func.rsvd_eqs;
220 dev_cap->reserved_uars = func.rsvd_uars;
221 err |= MLX4_QUERY_FUNC_NUM_SYS_EQS;
226 static void mlx4_enable_cqe_eqe_stride(struct mlx4_dev *dev)
228 struct mlx4_caps *dev_cap = &dev->caps;
230 /* FW not supporting or cancelled by user */
231 if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_EQE_STRIDE) ||
232 !(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_CQE_STRIDE))
235 /* Must have 64B CQE_EQE enabled by FW to use bigger stride
236 * When FW has NCSI it may decide not to report 64B CQE/EQEs
238 if (!(dev_cap->flags & MLX4_DEV_CAP_FLAG_64B_EQE) ||
239 !(dev_cap->flags & MLX4_DEV_CAP_FLAG_64B_CQE)) {
240 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE;
241 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE;
245 if (cache_line_size() == 128 || cache_line_size() == 256) {
246 mlx4_dbg(dev, "Enabling CQE stride cacheLine supported\n");
247 /* Changing the real data inside CQE size to 32B */
248 dev_cap->flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE;
249 dev_cap->flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE;
251 if (mlx4_is_master(dev))
252 dev_cap->function_caps |= MLX4_FUNC_CAP_EQE_CQE_STRIDE;
254 if (cache_line_size() != 32 && cache_line_size() != 64)
255 mlx4_dbg(dev, "Disabling CQE stride, cacheLine size unsupported\n");
256 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE;
257 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE;
261 static int _mlx4_dev_port(struct mlx4_dev *dev, int port,
262 struct mlx4_port_cap *port_cap)
264 dev->caps.vl_cap[port] = port_cap->max_vl;
265 dev->caps.ib_mtu_cap[port] = port_cap->ib_mtu;
266 dev->phys_caps.gid_phys_table_len[port] = port_cap->max_gids;
267 dev->phys_caps.pkey_phys_table_len[port] = port_cap->max_pkeys;
268 /* set gid and pkey table operating lengths by default
269 * to non-sriov values
271 dev->caps.gid_table_len[port] = port_cap->max_gids;
272 dev->caps.pkey_table_len[port] = port_cap->max_pkeys;
273 dev->caps.port_width_cap[port] = port_cap->max_port_width;
274 dev->caps.eth_mtu_cap[port] = port_cap->eth_mtu;
275 dev->caps.def_mac[port] = port_cap->def_mac;
276 dev->caps.supported_type[port] = port_cap->supported_port_types;
277 dev->caps.suggested_type[port] = port_cap->suggested_type;
278 dev->caps.default_sense[port] = port_cap->default_sense;
279 dev->caps.trans_type[port] = port_cap->trans_type;
280 dev->caps.vendor_oui[port] = port_cap->vendor_oui;
281 dev->caps.wavelength[port] = port_cap->wavelength;
282 dev->caps.trans_code[port] = port_cap->trans_code;
287 static int mlx4_dev_port(struct mlx4_dev *dev, int port,
288 struct mlx4_port_cap *port_cap)
292 err = mlx4_QUERY_PORT(dev, port, port_cap);
295 mlx4_err(dev, "QUERY_PORT command failed.\n");
300 static inline void mlx4_enable_ignore_fcs(struct mlx4_dev *dev)
302 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_IGNORE_FCS))
305 if (mlx4_is_mfunc(dev)) {
306 mlx4_dbg(dev, "SRIOV mode - Disabling Ignore FCS");
307 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_IGNORE_FCS;
311 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)) {
313 "Keep FCS is not supported - Disabling Ignore FCS");
314 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_IGNORE_FCS;
319 #define MLX4_A0_STEERING_TABLE_SIZE 256
320 static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
325 err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
327 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
330 mlx4_dev_cap_dump(dev, dev_cap);
332 if (dev_cap->min_page_sz > PAGE_SIZE) {
333 mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n",
334 dev_cap->min_page_sz, PAGE_SIZE);
337 if (dev_cap->num_ports > MLX4_MAX_PORTS) {
338 mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n",
339 dev_cap->num_ports, MLX4_MAX_PORTS);
343 if (dev_cap->uar_size > pci_resource_len(dev->persist->pdev, 2)) {
344 mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n",
347 pci_resource_len(dev->persist->pdev, 2));
351 dev->caps.num_ports = dev_cap->num_ports;
352 dev->caps.num_sys_eqs = dev_cap->num_sys_eqs;
353 dev->phys_caps.num_phys_eqs = dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS ?
354 dev->caps.num_sys_eqs :
356 for (i = 1; i <= dev->caps.num_ports; ++i) {
357 err = _mlx4_dev_port(dev, i, dev_cap->port_cap + i);
359 mlx4_err(dev, "QUERY_PORT command failed, aborting\n");
364 dev->caps.uar_page_size = PAGE_SIZE;
365 dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE;
366 dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay;
367 dev->caps.bf_reg_size = dev_cap->bf_reg_size;
368 dev->caps.bf_regs_per_page = dev_cap->bf_regs_per_page;
369 dev->caps.max_sq_sg = dev_cap->max_sq_sg;
370 dev->caps.max_rq_sg = dev_cap->max_rq_sg;
371 dev->caps.max_wqes = dev_cap->max_qp_sz;
372 dev->caps.max_qp_init_rdma = dev_cap->max_requester_per_qp;
373 dev->caps.max_srq_wqes = dev_cap->max_srq_sz;
374 dev->caps.max_srq_sge = dev_cap->max_rq_sg - 1;
375 dev->caps.reserved_srqs = dev_cap->reserved_srqs;
376 dev->caps.max_sq_desc_sz = dev_cap->max_sq_desc_sz;
377 dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz;
379 * Subtract 1 from the limit because we need to allocate a
380 * spare CQE so the HCA HW can tell the difference between an
381 * empty CQ and a full CQ.
383 dev->caps.max_cqes = dev_cap->max_cq_sz - 1;
384 dev->caps.reserved_cqs = dev_cap->reserved_cqs;
385 dev->caps.reserved_eqs = dev_cap->reserved_eqs;
386 dev->caps.reserved_mtts = dev_cap->reserved_mtts;
387 dev->caps.reserved_mrws = dev_cap->reserved_mrws;
389 /* The first 128 UARs are used for EQ doorbells */
390 dev->caps.reserved_uars = max_t(int, 128, dev_cap->reserved_uars);
391 dev->caps.reserved_pds = dev_cap->reserved_pds;
392 dev->caps.reserved_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
393 dev_cap->reserved_xrcds : 0;
394 dev->caps.max_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
395 dev_cap->max_xrcds : 0;
396 dev->caps.mtt_entry_sz = dev_cap->mtt_entry_sz;
398 dev->caps.max_msg_sz = dev_cap->max_msg_sz;
399 dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1);
400 dev->caps.flags = dev_cap->flags;
401 dev->caps.flags2 = dev_cap->flags2;
402 dev->caps.bmme_flags = dev_cap->bmme_flags;
403 dev->caps.reserved_lkey = dev_cap->reserved_lkey;
404 dev->caps.stat_rate_support = dev_cap->stat_rate_support;
405 dev->caps.max_gso_sz = dev_cap->max_gso_sz;
406 dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz;
408 /* Sense port always allowed on supported devices for ConnectX-1 and -2 */
409 if (mlx4_priv(dev)->pci_dev_data & MLX4_PCI_DEV_FORCE_SENSE_PORT)
410 dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
411 /* Don't do sense port on multifunction devices (for now at least) */
412 if (mlx4_is_mfunc(dev))
413 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
415 if (mlx4_low_memory_profile()) {
416 dev->caps.log_num_macs = MLX4_MIN_LOG_NUM_MAC;
417 dev->caps.log_num_vlans = MLX4_MIN_LOG_NUM_VLANS;
419 dev->caps.log_num_macs = log_num_mac;
420 dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS;
423 for (i = 1; i <= dev->caps.num_ports; ++i) {
424 dev->caps.port_type[i] = MLX4_PORT_TYPE_NONE;
425 if (dev->caps.supported_type[i]) {
426 /* if only ETH is supported - assign ETH */
427 if (dev->caps.supported_type[i] == MLX4_PORT_TYPE_ETH)
428 dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
429 /* if only IB is supported, assign IB */
430 else if (dev->caps.supported_type[i] ==
432 dev->caps.port_type[i] = MLX4_PORT_TYPE_IB;
434 /* if IB and ETH are supported, we set the port
435 * type according to user selection of port type;
436 * if user selected none, take the FW hint */
437 if (port_type_array[i - 1] == MLX4_PORT_TYPE_NONE)
438 dev->caps.port_type[i] = dev->caps.suggested_type[i] ?
439 MLX4_PORT_TYPE_ETH : MLX4_PORT_TYPE_IB;
441 dev->caps.port_type[i] = port_type_array[i - 1];
445 * Link sensing is allowed on the port if 3 conditions are true:
446 * 1. Both protocols are supported on the port.
447 * 2. Different types are supported on the port
448 * 3. FW declared that it supports link sensing
450 mlx4_priv(dev)->sense.sense_allowed[i] =
451 ((dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO) &&
452 (dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) &&
453 (dev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT));
456 * If "default_sense" bit is set, we move the port to "AUTO" mode
457 * and perform sense_port FW command to try and set the correct
458 * port type from beginning
460 if (mlx4_priv(dev)->sense.sense_allowed[i] && dev->caps.default_sense[i]) {
461 enum mlx4_port_type sensed_port = MLX4_PORT_TYPE_NONE;
462 dev->caps.possible_type[i] = MLX4_PORT_TYPE_AUTO;
463 mlx4_SENSE_PORT(dev, i, &sensed_port);
464 if (sensed_port != MLX4_PORT_TYPE_NONE)
465 dev->caps.port_type[i] = sensed_port;
467 dev->caps.possible_type[i] = dev->caps.port_type[i];
470 if (dev->caps.log_num_macs > dev_cap->port_cap[i].log_max_macs) {
471 dev->caps.log_num_macs = dev_cap->port_cap[i].log_max_macs;
472 mlx4_warn(dev, "Requested number of MACs is too much for port %d, reducing to %d\n",
473 i, 1 << dev->caps.log_num_macs);
475 if (dev->caps.log_num_vlans > dev_cap->port_cap[i].log_max_vlans) {
476 dev->caps.log_num_vlans = dev_cap->port_cap[i].log_max_vlans;
477 mlx4_warn(dev, "Requested number of VLANs is too much for port %d, reducing to %d\n",
478 i, 1 << dev->caps.log_num_vlans);
482 dev->caps.max_counters = 1 << ilog2(dev_cap->max_counters);
484 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps;
485 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] =
486 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] =
487 (1 << dev->caps.log_num_macs) *
488 (1 << dev->caps.log_num_vlans) *
490 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH;
492 if (dev_cap->dmfs_high_rate_qpn_base > 0 &&
493 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN)
494 dev->caps.dmfs_high_rate_qpn_base = dev_cap->dmfs_high_rate_qpn_base;
496 dev->caps.dmfs_high_rate_qpn_base =
497 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
499 if (dev_cap->dmfs_high_rate_qpn_range > 0 &&
500 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN) {
501 dev->caps.dmfs_high_rate_qpn_range = dev_cap->dmfs_high_rate_qpn_range;
502 dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_DEFAULT;
503 dev->caps.flags2 |= MLX4_DEV_CAP_FLAG2_FS_A0;
505 dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_NOT_SUPPORTED;
506 dev->caps.dmfs_high_rate_qpn_base =
507 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
508 dev->caps.dmfs_high_rate_qpn_range = MLX4_A0_STEERING_TABLE_SIZE;
511 dev->caps.rl_caps = dev_cap->rl_caps;
513 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_RSS_RAW_ETH] =
514 dev->caps.dmfs_high_rate_qpn_range;
516 dev->caps.reserved_qps = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] +
517 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] +
518 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] +
519 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH];
521 dev->caps.sqp_demux = (mlx4_is_master(dev)) ? MLX4_MAX_NUM_SLAVES : 0;
523 if (!enable_64b_cqe_eqe && !mlx4_is_slave(dev)) {
525 (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) {
526 mlx4_warn(dev, "64B EQEs/CQEs supported by the device but not enabled\n");
527 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE;
528 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE;
531 if (dev_cap->flags2 &
532 (MLX4_DEV_CAP_FLAG2_CQE_STRIDE |
533 MLX4_DEV_CAP_FLAG2_EQE_STRIDE)) {
534 mlx4_warn(dev, "Disabling EQE/CQE stride per user request\n");
535 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE;
536 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE;
540 if ((dev->caps.flags &
541 (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) &&
543 dev->caps.function_caps |= MLX4_FUNC_CAP_64B_EQE_CQE;
545 if (!mlx4_is_slave(dev)) {
546 mlx4_enable_cqe_eqe_stride(dev);
547 dev->caps.alloc_res_qp_mask =
548 (dev->caps.bf_reg_size ? MLX4_RESERVE_ETH_BF_QP : 0) |
551 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) &&
552 dev->caps.flags & MLX4_DEV_CAP_FLAG_SET_ETH_SCHED) {
553 mlx4_warn(dev, "Old device ETS support detected\n");
554 mlx4_warn(dev, "Consider upgrading device FW.\n");
555 dev->caps.flags2 |= MLX4_DEV_CAP_FLAG2_ETS_CFG;
559 dev->caps.alloc_res_qp_mask = 0;
562 mlx4_enable_ignore_fcs(dev);
567 static int mlx4_get_pcie_dev_link_caps(struct mlx4_dev *dev,
568 enum pci_bus_speed *speed,
569 enum pcie_link_width *width)
571 u32 lnkcap1, lnkcap2;
574 #define PCIE_MLW_CAP_SHIFT 4 /* start of MLW mask in link capabilities */
576 *speed = PCI_SPEED_UNKNOWN;
577 *width = PCIE_LNK_WIDTH_UNKNOWN;
579 err1 = pcie_capability_read_dword(dev->persist->pdev, PCI_EXP_LNKCAP,
581 err2 = pcie_capability_read_dword(dev->persist->pdev, PCI_EXP_LNKCAP2,
583 if (!err2 && lnkcap2) { /* PCIe r3.0-compliant */
584 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
585 *speed = PCIE_SPEED_8_0GT;
586 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
587 *speed = PCIE_SPEED_5_0GT;
588 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
589 *speed = PCIE_SPEED_2_5GT;
592 *width = (lnkcap1 & PCI_EXP_LNKCAP_MLW) >> PCIE_MLW_CAP_SHIFT;
593 if (!lnkcap2) { /* pre-r3.0 */
594 if (lnkcap1 & PCI_EXP_LNKCAP_SLS_5_0GB)
595 *speed = PCIE_SPEED_5_0GT;
596 else if (lnkcap1 & PCI_EXP_LNKCAP_SLS_2_5GB)
597 *speed = PCIE_SPEED_2_5GT;
601 if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN) {
603 err2 ? err2 : -EINVAL;
608 static void mlx4_check_pcie_caps(struct mlx4_dev *dev)
610 enum pcie_link_width width, width_cap;
611 enum pci_bus_speed speed, speed_cap;
614 #define PCIE_SPEED_STR(speed) \
615 (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : \
616 speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : \
617 speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : \
620 err = mlx4_get_pcie_dev_link_caps(dev, &speed_cap, &width_cap);
623 "Unable to determine PCIe device BW capabilities\n");
627 err = pcie_get_minimum_link(dev->persist->pdev, &speed, &width);
628 if (err || speed == PCI_SPEED_UNKNOWN ||
629 width == PCIE_LNK_WIDTH_UNKNOWN) {
631 "Unable to determine PCI device chain minimum BW\n");
635 if (width != width_cap || speed != speed_cap)
637 "PCIe BW is different than device's capability\n");
639 mlx4_info(dev, "PCIe link speed is %s, device supports %s\n",
640 PCIE_SPEED_STR(speed), PCIE_SPEED_STR(speed_cap));
641 mlx4_info(dev, "PCIe link width is x%d, device supports x%d\n",
646 /*The function checks if there are live vf, return the num of them*/
647 static int mlx4_how_many_lives_vf(struct mlx4_dev *dev)
649 struct mlx4_priv *priv = mlx4_priv(dev);
650 struct mlx4_slave_state *s_state;
654 for (i = 1/*the ppf is 0*/; i < dev->num_slaves; ++i) {
655 s_state = &priv->mfunc.master.slave_state[i];
656 if (s_state->active && s_state->last_cmd !=
657 MLX4_COMM_CMD_RESET) {
658 mlx4_warn(dev, "%s: slave: %d is still active\n",
666 int mlx4_get_parav_qkey(struct mlx4_dev *dev, u32 qpn, u32 *qkey)
668 u32 qk = MLX4_RESERVED_QKEY_BASE;
670 if (qpn >= dev->phys_caps.base_tunnel_sqpn + 8 * MLX4_MFUNC_MAX ||
671 qpn < dev->phys_caps.base_proxy_sqpn)
674 if (qpn >= dev->phys_caps.base_tunnel_sqpn)
676 qk += qpn - dev->phys_caps.base_tunnel_sqpn;
678 qk += qpn - dev->phys_caps.base_proxy_sqpn;
682 EXPORT_SYMBOL(mlx4_get_parav_qkey);
684 void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port, int i, int val)
686 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev);
688 if (!mlx4_is_master(dev))
691 priv->virt2phys_pkey[slave][port - 1][i] = val;
693 EXPORT_SYMBOL(mlx4_sync_pkey_table);
695 void mlx4_put_slave_node_guid(struct mlx4_dev *dev, int slave, __be64 guid)
697 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev);
699 if (!mlx4_is_master(dev))
702 priv->slave_node_guids[slave] = guid;
704 EXPORT_SYMBOL(mlx4_put_slave_node_guid);
706 __be64 mlx4_get_slave_node_guid(struct mlx4_dev *dev, int slave)
708 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev);
710 if (!mlx4_is_master(dev))
713 return priv->slave_node_guids[slave];
715 EXPORT_SYMBOL(mlx4_get_slave_node_guid);
717 int mlx4_is_slave_active(struct mlx4_dev *dev, int slave)
719 struct mlx4_priv *priv = mlx4_priv(dev);
720 struct mlx4_slave_state *s_slave;
722 if (!mlx4_is_master(dev))
725 s_slave = &priv->mfunc.master.slave_state[slave];
726 return !!s_slave->active;
728 EXPORT_SYMBOL(mlx4_is_slave_active);
730 static void slave_adjust_steering_mode(struct mlx4_dev *dev,
731 struct mlx4_dev_cap *dev_cap,
732 struct mlx4_init_hca_param *hca_param)
734 dev->caps.steering_mode = hca_param->steering_mode;
735 if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
736 dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry;
737 dev->caps.fs_log_max_ucast_qp_range_size =
738 dev_cap->fs_log_max_ucast_qp_range_size;
740 dev->caps.num_qp_per_mgm =
741 4 * ((1 << hca_param->log_mc_entry_sz)/16 - 2);
743 mlx4_dbg(dev, "Steering mode is: %s\n",
744 mlx4_steering_mode_str(dev->caps.steering_mode));
747 static int mlx4_slave_cap(struct mlx4_dev *dev)
751 struct mlx4_dev_cap dev_cap;
752 struct mlx4_func_cap func_cap;
753 struct mlx4_init_hca_param hca_param;
756 memset(&hca_param, 0, sizeof(hca_param));
757 err = mlx4_QUERY_HCA(dev, &hca_param);
759 mlx4_err(dev, "QUERY_HCA command failed, aborting\n");
763 /* fail if the hca has an unknown global capability
764 * at this time global_caps should be always zeroed
766 if (hca_param.global_caps) {
767 mlx4_err(dev, "Unknown hca global capabilities\n");
771 mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz;
773 dev->caps.hca_core_clock = hca_param.hca_core_clock;
775 memset(&dev_cap, 0, sizeof(dev_cap));
776 dev->caps.max_qp_dest_rdma = 1 << hca_param.log_rd_per_qp;
777 err = mlx4_dev_cap(dev, &dev_cap);
779 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
783 err = mlx4_QUERY_FW(dev);
785 mlx4_err(dev, "QUERY_FW command failed: could not get FW version\n");
787 page_size = ~dev->caps.page_size_cap + 1;
788 mlx4_warn(dev, "HCA minimum page size:%d\n", page_size);
789 if (page_size > PAGE_SIZE) {
790 mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n",
791 page_size, PAGE_SIZE);
795 /* slave gets uar page size from QUERY_HCA fw command */
796 dev->caps.uar_page_size = 1 << (hca_param.uar_page_sz + 12);
798 /* TODO: relax this assumption */
799 if (dev->caps.uar_page_size != PAGE_SIZE) {
800 mlx4_err(dev, "UAR size:%d != kernel PAGE_SIZE of %ld\n",
801 dev->caps.uar_page_size, PAGE_SIZE);
805 memset(&func_cap, 0, sizeof(func_cap));
806 err = mlx4_QUERY_FUNC_CAP(dev, 0, &func_cap);
808 mlx4_err(dev, "QUERY_FUNC_CAP general command failed, aborting (%d)\n",
813 if ((func_cap.pf_context_behaviour | PF_CONTEXT_BEHAVIOUR_MASK) !=
814 PF_CONTEXT_BEHAVIOUR_MASK) {
815 mlx4_err(dev, "Unknown pf context behaviour %x known flags %x\n",
816 func_cap.pf_context_behaviour, PF_CONTEXT_BEHAVIOUR_MASK);
820 dev->caps.num_ports = func_cap.num_ports;
821 dev->quotas.qp = func_cap.qp_quota;
822 dev->quotas.srq = func_cap.srq_quota;
823 dev->quotas.cq = func_cap.cq_quota;
824 dev->quotas.mpt = func_cap.mpt_quota;
825 dev->quotas.mtt = func_cap.mtt_quota;
826 dev->caps.num_qps = 1 << hca_param.log_num_qps;
827 dev->caps.num_srqs = 1 << hca_param.log_num_srqs;
828 dev->caps.num_cqs = 1 << hca_param.log_num_cqs;
829 dev->caps.num_mpts = 1 << hca_param.log_mpt_sz;
830 dev->caps.num_eqs = func_cap.max_eq;
831 dev->caps.reserved_eqs = func_cap.reserved_eq;
832 dev->caps.reserved_lkey = func_cap.reserved_lkey;
833 dev->caps.num_pds = MLX4_NUM_PDS;
834 dev->caps.num_mgms = 0;
835 dev->caps.num_amgms = 0;
837 if (dev->caps.num_ports > MLX4_MAX_PORTS) {
838 mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n",
839 dev->caps.num_ports, MLX4_MAX_PORTS);
843 dev->caps.qp0_qkey = kcalloc(dev->caps.num_ports, sizeof(u32), GFP_KERNEL);
844 dev->caps.qp0_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
845 dev->caps.qp0_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
846 dev->caps.qp1_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
847 dev->caps.qp1_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
849 if (!dev->caps.qp0_tunnel || !dev->caps.qp0_proxy ||
850 !dev->caps.qp1_tunnel || !dev->caps.qp1_proxy ||
851 !dev->caps.qp0_qkey) {
856 for (i = 1; i <= dev->caps.num_ports; ++i) {
857 err = mlx4_QUERY_FUNC_CAP(dev, i, &func_cap);
859 mlx4_err(dev, "QUERY_FUNC_CAP port command failed for port %d, aborting (%d)\n",
863 dev->caps.qp0_qkey[i - 1] = func_cap.qp0_qkey;
864 dev->caps.qp0_tunnel[i - 1] = func_cap.qp0_tunnel_qpn;
865 dev->caps.qp0_proxy[i - 1] = func_cap.qp0_proxy_qpn;
866 dev->caps.qp1_tunnel[i - 1] = func_cap.qp1_tunnel_qpn;
867 dev->caps.qp1_proxy[i - 1] = func_cap.qp1_proxy_qpn;
868 dev->caps.port_mask[i] = dev->caps.port_type[i];
869 dev->caps.phys_port_id[i] = func_cap.phys_port_id;
870 if (mlx4_get_slave_pkey_gid_tbl_len(dev, i,
871 &dev->caps.gid_table_len[i],
872 &dev->caps.pkey_table_len[i]))
876 if (dev->caps.uar_page_size * (dev->caps.num_uars -
877 dev->caps.reserved_uars) >
878 pci_resource_len(dev->persist->pdev,
880 mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n",
881 dev->caps.uar_page_size * dev->caps.num_uars,
883 pci_resource_len(dev->persist->pdev, 2));
887 if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_64B_EQE_ENABLED) {
888 dev->caps.eqe_size = 64;
889 dev->caps.eqe_factor = 1;
891 dev->caps.eqe_size = 32;
892 dev->caps.eqe_factor = 0;
895 if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_64B_CQE_ENABLED) {
896 dev->caps.cqe_size = 64;
897 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE;
899 dev->caps.cqe_size = 32;
902 if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_EQE_STRIDE_ENABLED) {
903 dev->caps.eqe_size = hca_param.eqe_size;
904 dev->caps.eqe_factor = 0;
907 if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_CQE_STRIDE_ENABLED) {
908 dev->caps.cqe_size = hca_param.cqe_size;
909 /* User still need to know when CQE > 32B */
910 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE;
913 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
914 mlx4_warn(dev, "Timestamping is not supported in slave mode\n");
916 slave_adjust_steering_mode(dev, &dev_cap, &hca_param);
917 mlx4_dbg(dev, "RSS support for IP fragments is %s\n",
918 hca_param.rss_ip_frags ? "on" : "off");
920 if (func_cap.extra_flags & MLX4_QUERY_FUNC_FLAGS_BF_RES_QP &&
921 dev->caps.bf_reg_size)
922 dev->caps.alloc_res_qp_mask |= MLX4_RESERVE_ETH_BF_QP;
924 if (func_cap.extra_flags & MLX4_QUERY_FUNC_FLAGS_A0_RES_QP)
925 dev->caps.alloc_res_qp_mask |= MLX4_RESERVE_A0_QP;
930 kfree(dev->caps.qp0_qkey);
931 kfree(dev->caps.qp0_tunnel);
932 kfree(dev->caps.qp0_proxy);
933 kfree(dev->caps.qp1_tunnel);
934 kfree(dev->caps.qp1_proxy);
935 dev->caps.qp0_qkey = NULL;
936 dev->caps.qp0_tunnel = NULL;
937 dev->caps.qp0_proxy = NULL;
938 dev->caps.qp1_tunnel = NULL;
939 dev->caps.qp1_proxy = NULL;
944 static void mlx4_request_modules(struct mlx4_dev *dev)
947 int has_ib_port = false;
948 int has_eth_port = false;
949 #define EN_DRV_NAME "mlx4_en"
950 #define IB_DRV_NAME "mlx4_ib"
952 for (port = 1; port <= dev->caps.num_ports; port++) {
953 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_IB)
955 else if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
960 request_module_nowait(EN_DRV_NAME);
961 if (has_ib_port || (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE))
962 request_module_nowait(IB_DRV_NAME);
966 * Change the port configuration of the device.
967 * Every user of this function must hold the port mutex.
969 int mlx4_change_port_types(struct mlx4_dev *dev,
970 enum mlx4_port_type *port_types)
976 for (port = 0; port < dev->caps.num_ports; port++) {
977 /* Change the port type only if the new type is different
978 * from the current, and not set to Auto */
979 if (port_types[port] != dev->caps.port_type[port + 1])
983 mlx4_unregister_device(dev);
984 for (port = 1; port <= dev->caps.num_ports; port++) {
985 mlx4_CLOSE_PORT(dev, port);
986 dev->caps.port_type[port] = port_types[port - 1];
987 err = mlx4_SET_PORT(dev, port, -1);
989 mlx4_err(dev, "Failed to set port %d, aborting\n",
994 mlx4_set_port_mask(dev);
995 err = mlx4_register_device(dev);
997 mlx4_err(dev, "Failed to register device\n");
1000 mlx4_request_modules(dev);
1007 static ssize_t show_port_type(struct device *dev,
1008 struct device_attribute *attr,
1011 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
1013 struct mlx4_dev *mdev = info->dev;
1017 (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_IB) ?
1019 if (mdev->caps.possible_type[info->port] == MLX4_PORT_TYPE_AUTO)
1020 sprintf(buf, "auto (%s)\n", type);
1022 sprintf(buf, "%s\n", type);
1027 static ssize_t set_port_type(struct device *dev,
1028 struct device_attribute *attr,
1029 const char *buf, size_t count)
1031 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
1033 struct mlx4_dev *mdev = info->dev;
1034 struct mlx4_priv *priv = mlx4_priv(mdev);
1035 enum mlx4_port_type types[MLX4_MAX_PORTS];
1036 enum mlx4_port_type new_types[MLX4_MAX_PORTS];
1037 static DEFINE_MUTEX(set_port_type_mutex);
1041 mutex_lock(&set_port_type_mutex);
1043 if (!strcmp(buf, "ib\n"))
1044 info->tmp_type = MLX4_PORT_TYPE_IB;
1045 else if (!strcmp(buf, "eth\n"))
1046 info->tmp_type = MLX4_PORT_TYPE_ETH;
1047 else if (!strcmp(buf, "auto\n"))
1048 info->tmp_type = MLX4_PORT_TYPE_AUTO;
1050 mlx4_err(mdev, "%s is not supported port type\n", buf);
1055 mlx4_stop_sense(mdev);
1056 mutex_lock(&priv->port_mutex);
1057 /* Possible type is always the one that was delivered */
1058 mdev->caps.possible_type[info->port] = info->tmp_type;
1060 for (i = 0; i < mdev->caps.num_ports; i++) {
1061 types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type :
1062 mdev->caps.possible_type[i+1];
1063 if (types[i] == MLX4_PORT_TYPE_AUTO)
1064 types[i] = mdev->caps.port_type[i+1];
1067 if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) &&
1068 !(mdev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)) {
1069 for (i = 1; i <= mdev->caps.num_ports; i++) {
1070 if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) {
1071 mdev->caps.possible_type[i] = mdev->caps.port_type[i];
1077 mlx4_err(mdev, "Auto sensing is not supported on this HCA. Set only 'eth' or 'ib' for both ports (should be the same)\n");
1081 mlx4_do_sense_ports(mdev, new_types, types);
1083 err = mlx4_check_port_params(mdev, new_types);
1087 /* We are about to apply the changes after the configuration
1088 * was verified, no need to remember the temporary types
1090 for (i = 0; i < mdev->caps.num_ports; i++)
1091 priv->port[i + 1].tmp_type = 0;
1093 err = mlx4_change_port_types(mdev, new_types);
1096 mlx4_start_sense(mdev);
1097 mutex_unlock(&priv->port_mutex);
1099 mutex_unlock(&set_port_type_mutex);
1101 return err ? err : count;
1112 static inline int int_to_ibta_mtu(int mtu)
1115 case 256: return IB_MTU_256;
1116 case 512: return IB_MTU_512;
1117 case 1024: return IB_MTU_1024;
1118 case 2048: return IB_MTU_2048;
1119 case 4096: return IB_MTU_4096;
1124 static inline int ibta_mtu_to_int(enum ibta_mtu mtu)
1127 case IB_MTU_256: return 256;
1128 case IB_MTU_512: return 512;
1129 case IB_MTU_1024: return 1024;
1130 case IB_MTU_2048: return 2048;
1131 case IB_MTU_4096: return 4096;
1136 static ssize_t show_port_ib_mtu(struct device *dev,
1137 struct device_attribute *attr,
1140 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
1142 struct mlx4_dev *mdev = info->dev;
1144 if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH)
1145 mlx4_warn(mdev, "port level mtu is only used for IB ports\n");
1147 sprintf(buf, "%d\n",
1148 ibta_mtu_to_int(mdev->caps.port_ib_mtu[info->port]));
1152 static ssize_t set_port_ib_mtu(struct device *dev,
1153 struct device_attribute *attr,
1154 const char *buf, size_t count)
1156 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
1158 struct mlx4_dev *mdev = info->dev;
1159 struct mlx4_priv *priv = mlx4_priv(mdev);
1160 int err, port, mtu, ibta_mtu = -1;
1162 if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH) {
1163 mlx4_warn(mdev, "port level mtu is only used for IB ports\n");
1167 err = kstrtoint(buf, 0, &mtu);
1169 ibta_mtu = int_to_ibta_mtu(mtu);
1171 if (err || ibta_mtu < 0) {
1172 mlx4_err(mdev, "%s is invalid IBTA mtu\n", buf);
1176 mdev->caps.port_ib_mtu[info->port] = ibta_mtu;
1178 mlx4_stop_sense(mdev);
1179 mutex_lock(&priv->port_mutex);
1180 mlx4_unregister_device(mdev);
1181 for (port = 1; port <= mdev->caps.num_ports; port++) {
1182 mlx4_CLOSE_PORT(mdev, port);
1183 err = mlx4_SET_PORT(mdev, port, -1);
1185 mlx4_err(mdev, "Failed to set port %d, aborting\n",
1190 err = mlx4_register_device(mdev);
1192 mutex_unlock(&priv->port_mutex);
1193 mlx4_start_sense(mdev);
1194 return err ? err : count;
1197 int mlx4_bond(struct mlx4_dev *dev)
1200 struct mlx4_priv *priv = mlx4_priv(dev);
1202 mutex_lock(&priv->bond_mutex);
1204 if (!mlx4_is_bonded(dev))
1205 ret = mlx4_do_bond(dev, true);
1209 mutex_unlock(&priv->bond_mutex);
1211 mlx4_err(dev, "Failed to bond device: %d\n", ret);
1213 mlx4_dbg(dev, "Device is bonded\n");
1216 EXPORT_SYMBOL_GPL(mlx4_bond);
1218 int mlx4_unbond(struct mlx4_dev *dev)
1221 struct mlx4_priv *priv = mlx4_priv(dev);
1223 mutex_lock(&priv->bond_mutex);
1225 if (mlx4_is_bonded(dev))
1226 ret = mlx4_do_bond(dev, false);
1228 mutex_unlock(&priv->bond_mutex);
1230 mlx4_err(dev, "Failed to unbond device: %d\n", ret);
1232 mlx4_dbg(dev, "Device is unbonded\n");
1235 EXPORT_SYMBOL_GPL(mlx4_unbond);
1238 int mlx4_port_map_set(struct mlx4_dev *dev, struct mlx4_port_map *v2p)
1240 u8 port1 = v2p->port1;
1241 u8 port2 = v2p->port2;
1242 struct mlx4_priv *priv = mlx4_priv(dev);
1245 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP))
1248 mutex_lock(&priv->bond_mutex);
1250 /* zero means keep current mapping for this port */
1252 port1 = priv->v2p.port1;
1254 port2 = priv->v2p.port2;
1256 if ((port1 < 1) || (port1 > MLX4_MAX_PORTS) ||
1257 (port2 < 1) || (port2 > MLX4_MAX_PORTS) ||
1258 (port1 == 2 && port2 == 1)) {
1259 /* besides boundary checks cross mapping makes
1260 * no sense and therefore not allowed */
1262 } else if ((port1 == priv->v2p.port1) &&
1263 (port2 == priv->v2p.port2)) {
1266 err = mlx4_virt2phy_port_map(dev, port1, port2);
1268 mlx4_dbg(dev, "port map changed: [%d][%d]\n",
1270 priv->v2p.port1 = port1;
1271 priv->v2p.port2 = port2;
1273 mlx4_err(dev, "Failed to change port mape: %d\n", err);
1277 mutex_unlock(&priv->bond_mutex);
1280 EXPORT_SYMBOL_GPL(mlx4_port_map_set);
1282 static int mlx4_load_fw(struct mlx4_dev *dev)
1284 struct mlx4_priv *priv = mlx4_priv(dev);
1287 priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages,
1288 GFP_HIGHUSER | __GFP_NOWARN, 0);
1289 if (!priv->fw.fw_icm) {
1290 mlx4_err(dev, "Couldn't allocate FW area, aborting\n");
1294 err = mlx4_MAP_FA(dev, priv->fw.fw_icm);
1296 mlx4_err(dev, "MAP_FA command failed, aborting\n");
1300 err = mlx4_RUN_FW(dev);
1302 mlx4_err(dev, "RUN_FW command failed, aborting\n");
1312 mlx4_free_icm(dev, priv->fw.fw_icm, 0);
1316 static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
1319 struct mlx4_priv *priv = mlx4_priv(dev);
1323 err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table,
1325 ((u64) (MLX4_CMPT_TYPE_QP *
1326 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
1327 cmpt_entry_sz, dev->caps.num_qps,
1328 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1333 err = mlx4_init_icm_table(dev, &priv->srq_table.cmpt_table,
1335 ((u64) (MLX4_CMPT_TYPE_SRQ *
1336 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
1337 cmpt_entry_sz, dev->caps.num_srqs,
1338 dev->caps.reserved_srqs, 0, 0);
1342 err = mlx4_init_icm_table(dev, &priv->cq_table.cmpt_table,
1344 ((u64) (MLX4_CMPT_TYPE_CQ *
1345 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
1346 cmpt_entry_sz, dev->caps.num_cqs,
1347 dev->caps.reserved_cqs, 0, 0);
1351 num_eqs = dev->phys_caps.num_phys_eqs;
1352 err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table,
1354 ((u64) (MLX4_CMPT_TYPE_EQ *
1355 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
1356 cmpt_entry_sz, num_eqs, num_eqs, 0, 0);
1363 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
1366 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
1369 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
1375 static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
1376 struct mlx4_init_hca_param *init_hca, u64 icm_size)
1378 struct mlx4_priv *priv = mlx4_priv(dev);
1383 err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages);
1385 mlx4_err(dev, "SET_ICM_SIZE command failed, aborting\n");
1389 mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory\n",
1390 (unsigned long long) icm_size >> 10,
1391 (unsigned long long) aux_pages << 2);
1393 priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages,
1394 GFP_HIGHUSER | __GFP_NOWARN, 0);
1395 if (!priv->fw.aux_icm) {
1396 mlx4_err(dev, "Couldn't allocate aux memory, aborting\n");
1400 err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm);
1402 mlx4_err(dev, "MAP_ICM_AUX command failed, aborting\n");
1406 err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz);
1408 mlx4_err(dev, "Failed to map cMPT context memory, aborting\n");
1413 num_eqs = dev->phys_caps.num_phys_eqs;
1414 err = mlx4_init_icm_table(dev, &priv->eq_table.table,
1415 init_hca->eqc_base, dev_cap->eqc_entry_sz,
1416 num_eqs, num_eqs, 0, 0);
1418 mlx4_err(dev, "Failed to map EQ context memory, aborting\n");
1419 goto err_unmap_cmpt;
1423 * Reserved MTT entries must be aligned up to a cacheline
1424 * boundary, since the FW will write to them, while the driver
1425 * writes to all other MTT entries. (The variable
1426 * dev->caps.mtt_entry_sz below is really the MTT segment
1427 * size, not the raw entry size)
1429 dev->caps.reserved_mtts =
1430 ALIGN(dev->caps.reserved_mtts * dev->caps.mtt_entry_sz,
1431 dma_get_cache_alignment()) / dev->caps.mtt_entry_sz;
1433 err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table,
1435 dev->caps.mtt_entry_sz,
1437 dev->caps.reserved_mtts, 1, 0);
1439 mlx4_err(dev, "Failed to map MTT context memory, aborting\n");
1443 err = mlx4_init_icm_table(dev, &priv->mr_table.dmpt_table,
1444 init_hca->dmpt_base,
1445 dev_cap->dmpt_entry_sz,
1447 dev->caps.reserved_mrws, 1, 1);
1449 mlx4_err(dev, "Failed to map dMPT context memory, aborting\n");
1453 err = mlx4_init_icm_table(dev, &priv->qp_table.qp_table,
1455 dev_cap->qpc_entry_sz,
1457 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1460 mlx4_err(dev, "Failed to map QP context memory, aborting\n");
1461 goto err_unmap_dmpt;
1464 err = mlx4_init_icm_table(dev, &priv->qp_table.auxc_table,
1465 init_hca->auxc_base,
1466 dev_cap->aux_entry_sz,
1468 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1471 mlx4_err(dev, "Failed to map AUXC context memory, aborting\n");
1475 err = mlx4_init_icm_table(dev, &priv->qp_table.altc_table,
1476 init_hca->altc_base,
1477 dev_cap->altc_entry_sz,
1479 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1482 mlx4_err(dev, "Failed to map ALTC context memory, aborting\n");
1483 goto err_unmap_auxc;
1486 err = mlx4_init_icm_table(dev, &priv->qp_table.rdmarc_table,
1487 init_hca->rdmarc_base,
1488 dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift,
1490 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1493 mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n");
1494 goto err_unmap_altc;
1497 err = mlx4_init_icm_table(dev, &priv->cq_table.table,
1499 dev_cap->cqc_entry_sz,
1501 dev->caps.reserved_cqs, 0, 0);
1503 mlx4_err(dev, "Failed to map CQ context memory, aborting\n");
1504 goto err_unmap_rdmarc;
1507 err = mlx4_init_icm_table(dev, &priv->srq_table.table,
1508 init_hca->srqc_base,
1509 dev_cap->srq_entry_sz,
1511 dev->caps.reserved_srqs, 0, 0);
1513 mlx4_err(dev, "Failed to map SRQ context memory, aborting\n");
1518 * For flow steering device managed mode it is required to use
1519 * mlx4_init_icm_table. For B0 steering mode it's not strictly
1520 * required, but for simplicity just map the whole multicast
1521 * group table now. The table isn't very big and it's a lot
1522 * easier than trying to track ref counts.
1524 err = mlx4_init_icm_table(dev, &priv->mcg_table.table,
1526 mlx4_get_mgm_entry_size(dev),
1527 dev->caps.num_mgms + dev->caps.num_amgms,
1528 dev->caps.num_mgms + dev->caps.num_amgms,
1531 mlx4_err(dev, "Failed to map MCG context memory, aborting\n");
1538 mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
1541 mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
1544 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
1547 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
1550 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
1553 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
1556 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
1559 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
1562 mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
1565 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
1566 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
1567 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
1568 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
1571 mlx4_UNMAP_ICM_AUX(dev);
1574 mlx4_free_icm(dev, priv->fw.aux_icm, 0);
1579 static void mlx4_free_icms(struct mlx4_dev *dev)
1581 struct mlx4_priv *priv = mlx4_priv(dev);
1583 mlx4_cleanup_icm_table(dev, &priv->mcg_table.table);
1584 mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
1585 mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
1586 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
1587 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
1588 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
1589 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
1590 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
1591 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
1592 mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
1593 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
1594 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
1595 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
1596 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
1598 mlx4_UNMAP_ICM_AUX(dev);
1599 mlx4_free_icm(dev, priv->fw.aux_icm, 0);
1602 static void mlx4_slave_exit(struct mlx4_dev *dev)
1604 struct mlx4_priv *priv = mlx4_priv(dev);
1606 mutex_lock(&priv->cmd.slave_cmd_mutex);
1607 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_CMD_NA_OP,
1609 mlx4_warn(dev, "Failed to close slave function\n");
1610 mutex_unlock(&priv->cmd.slave_cmd_mutex);
1613 static int map_bf_area(struct mlx4_dev *dev)
1615 struct mlx4_priv *priv = mlx4_priv(dev);
1616 resource_size_t bf_start;
1617 resource_size_t bf_len;
1620 if (!dev->caps.bf_reg_size)
1623 bf_start = pci_resource_start(dev->persist->pdev, 2) +
1624 (dev->caps.num_uars << PAGE_SHIFT);
1625 bf_len = pci_resource_len(dev->persist->pdev, 2) -
1626 (dev->caps.num_uars << PAGE_SHIFT);
1627 priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len);
1628 if (!priv->bf_mapping)
1634 static void unmap_bf_area(struct mlx4_dev *dev)
1636 if (mlx4_priv(dev)->bf_mapping)
1637 io_mapping_free(mlx4_priv(dev)->bf_mapping);
1640 cycle_t mlx4_read_clock(struct mlx4_dev *dev)
1642 u32 clockhi, clocklo, clockhi1;
1645 struct mlx4_priv *priv = mlx4_priv(dev);
1647 for (i = 0; i < 10; i++) {
1648 clockhi = swab32(readl(priv->clock_mapping));
1649 clocklo = swab32(readl(priv->clock_mapping + 4));
1650 clockhi1 = swab32(readl(priv->clock_mapping));
1651 if (clockhi == clockhi1)
1655 cycles = (u64) clockhi << 32 | (u64) clocklo;
1659 EXPORT_SYMBOL_GPL(mlx4_read_clock);
1662 static int map_internal_clock(struct mlx4_dev *dev)
1664 struct mlx4_priv *priv = mlx4_priv(dev);
1666 priv->clock_mapping =
1667 ioremap(pci_resource_start(dev->persist->pdev,
1668 priv->fw.clock_bar) +
1669 priv->fw.clock_offset, MLX4_CLOCK_SIZE);
1671 if (!priv->clock_mapping)
1677 int mlx4_get_internal_clock_params(struct mlx4_dev *dev,
1678 struct mlx4_clock_params *params)
1680 struct mlx4_priv *priv = mlx4_priv(dev);
1682 if (mlx4_is_slave(dev))
1688 params->bar = priv->fw.clock_bar;
1689 params->offset = priv->fw.clock_offset;
1690 params->size = MLX4_CLOCK_SIZE;
1694 EXPORT_SYMBOL_GPL(mlx4_get_internal_clock_params);
1696 static void unmap_internal_clock(struct mlx4_dev *dev)
1698 struct mlx4_priv *priv = mlx4_priv(dev);
1700 if (priv->clock_mapping)
1701 iounmap(priv->clock_mapping);
1704 static void mlx4_close_hca(struct mlx4_dev *dev)
1706 unmap_internal_clock(dev);
1708 if (mlx4_is_slave(dev))
1709 mlx4_slave_exit(dev);
1711 mlx4_CLOSE_HCA(dev, 0);
1712 mlx4_free_icms(dev);
1716 static void mlx4_close_fw(struct mlx4_dev *dev)
1718 if (!mlx4_is_slave(dev)) {
1720 mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0);
1724 static int mlx4_comm_check_offline(struct mlx4_dev *dev)
1726 #define COMM_CHAN_OFFLINE_OFFSET 0x09
1731 struct mlx4_priv *priv = mlx4_priv(dev);
1733 end = msecs_to_jiffies(MLX4_COMM_OFFLINE_TIME_OUT) + jiffies;
1734 while (time_before(jiffies, end)) {
1735 comm_flags = swab32(readl((__iomem char *)priv->mfunc.comm +
1736 MLX4_COMM_CHAN_FLAGS));
1737 offline_bit = (comm_flags &
1738 (u32)(1 << COMM_CHAN_OFFLINE_OFFSET));
1741 /* There are cases as part of AER/Reset flow that PF needs
1742 * around 100 msec to load. We therefore sleep for 100 msec
1743 * to allow other tasks to make use of that CPU during this
1748 mlx4_err(dev, "Communication channel is offline.\n");
1752 static void mlx4_reset_vf_support(struct mlx4_dev *dev)
1754 #define COMM_CHAN_RST_OFFSET 0x1e
1756 struct mlx4_priv *priv = mlx4_priv(dev);
1760 comm_caps = swab32(readl((__iomem char *)priv->mfunc.comm +
1761 MLX4_COMM_CHAN_CAPS));
1762 comm_rst = (comm_caps & (u32)(1 << COMM_CHAN_RST_OFFSET));
1765 dev->caps.vf_caps |= MLX4_VF_CAP_FLAG_RESET;
1768 static int mlx4_init_slave(struct mlx4_dev *dev)
1770 struct mlx4_priv *priv = mlx4_priv(dev);
1771 u64 dma = (u64) priv->mfunc.vhcr_dma;
1772 int ret_from_reset = 0;
1774 u32 cmd_channel_ver;
1776 if (atomic_read(&pf_loading)) {
1777 mlx4_warn(dev, "PF is not ready - Deferring probe\n");
1778 return -EPROBE_DEFER;
1781 mutex_lock(&priv->cmd.slave_cmd_mutex);
1782 priv->cmd.max_cmds = 1;
1783 if (mlx4_comm_check_offline(dev)) {
1784 mlx4_err(dev, "PF is not responsive, skipping initialization\n");
1788 mlx4_reset_vf_support(dev);
1789 mlx4_warn(dev, "Sending reset\n");
1790 ret_from_reset = mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0,
1791 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME);
1792 /* if we are in the middle of flr the slave will try
1793 * NUM_OF_RESET_RETRIES times before leaving.*/
1794 if (ret_from_reset) {
1795 if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) {
1796 mlx4_warn(dev, "slave is currently in the middle of FLR - Deferring probe\n");
1797 mutex_unlock(&priv->cmd.slave_cmd_mutex);
1798 return -EPROBE_DEFER;
1803 /* check the driver version - the slave I/F revision
1804 * must match the master's */
1805 slave_read = swab32(readl(&priv->mfunc.comm->slave_read));
1806 cmd_channel_ver = mlx4_comm_get_version();
1808 if (MLX4_COMM_GET_IF_REV(cmd_channel_ver) !=
1809 MLX4_COMM_GET_IF_REV(slave_read)) {
1810 mlx4_err(dev, "slave driver version is not supported by the master\n");
1814 mlx4_warn(dev, "Sending vhcr0\n");
1815 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR0, dma >> 48,
1816 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME))
1818 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR1, dma >> 32,
1819 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME))
1821 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR2, dma >> 16,
1822 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME))
1824 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_EN, dma,
1825 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME))
1828 mutex_unlock(&priv->cmd.slave_cmd_mutex);
1832 mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_CMD_NA_OP, 0);
1834 mutex_unlock(&priv->cmd.slave_cmd_mutex);
1838 static void mlx4_parav_master_pf_caps(struct mlx4_dev *dev)
1842 for (i = 1; i <= dev->caps.num_ports; i++) {
1843 if (dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH)
1844 dev->caps.gid_table_len[i] =
1845 mlx4_get_slave_num_gids(dev, 0, i);
1847 dev->caps.gid_table_len[i] = 1;
1848 dev->caps.pkey_table_len[i] =
1849 dev->phys_caps.pkey_phys_table_len[i] - 1;
1853 static int choose_log_fs_mgm_entry_size(int qp_per_entry)
1855 int i = MLX4_MIN_MGM_LOG_ENTRY_SIZE;
1857 for (i = MLX4_MIN_MGM_LOG_ENTRY_SIZE; i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE;
1859 if (qp_per_entry <= 4 * ((1 << i) / 16 - 2))
1863 return (i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE) ? i : -1;
1866 static const char *dmfs_high_rate_steering_mode_str(int dmfs_high_steer_mode)
1868 switch (dmfs_high_steer_mode) {
1869 case MLX4_STEERING_DMFS_A0_DEFAULT:
1870 return "default performance";
1872 case MLX4_STEERING_DMFS_A0_DYNAMIC:
1873 return "dynamic hybrid mode";
1875 case MLX4_STEERING_DMFS_A0_STATIC:
1876 return "performance optimized for limited rule configuration (static)";
1878 case MLX4_STEERING_DMFS_A0_DISABLE:
1879 return "disabled performance optimized steering";
1881 case MLX4_STEERING_DMFS_A0_NOT_SUPPORTED:
1882 return "performance optimized steering not supported";
1885 return "Unrecognized mode";
1889 #define MLX4_DMFS_A0_STEERING (1UL << 2)
1891 static void choose_steering_mode(struct mlx4_dev *dev,
1892 struct mlx4_dev_cap *dev_cap)
1894 if (mlx4_log_num_mgm_entry_size <= 0) {
1895 if ((-mlx4_log_num_mgm_entry_size) & MLX4_DMFS_A0_STEERING) {
1896 if (dev->caps.dmfs_high_steer_mode ==
1897 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED)
1898 mlx4_err(dev, "DMFS high rate mode not supported\n");
1900 dev->caps.dmfs_high_steer_mode =
1901 MLX4_STEERING_DMFS_A0_STATIC;
1905 if (mlx4_log_num_mgm_entry_size <= 0 &&
1906 dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN &&
1907 (!mlx4_is_mfunc(dev) ||
1908 (dev_cap->fs_max_num_qp_per_entry >=
1909 (dev->persist->num_vfs + 1))) &&
1910 choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry) >=
1911 MLX4_MIN_MGM_LOG_ENTRY_SIZE) {
1912 dev->oper_log_mgm_entry_size =
1913 choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry);
1914 dev->caps.steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED;
1915 dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry;
1916 dev->caps.fs_log_max_ucast_qp_range_size =
1917 dev_cap->fs_log_max_ucast_qp_range_size;
1919 if (dev->caps.dmfs_high_steer_mode !=
1920 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED)
1921 dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_DISABLE;
1922 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER &&
1923 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
1924 dev->caps.steering_mode = MLX4_STEERING_MODE_B0;
1926 dev->caps.steering_mode = MLX4_STEERING_MODE_A0;
1928 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER ||
1929 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
1930 mlx4_warn(dev, "Must have both UC_STEER and MC_STEER flags set to use B0 steering - falling back to A0 steering mode\n");
1932 dev->oper_log_mgm_entry_size =
1933 mlx4_log_num_mgm_entry_size > 0 ?
1934 mlx4_log_num_mgm_entry_size :
1935 MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE;
1936 dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev);
1938 mlx4_dbg(dev, "Steering mode is: %s, oper_log_mgm_entry_size = %d, modparam log_num_mgm_entry_size = %d\n",
1939 mlx4_steering_mode_str(dev->caps.steering_mode),
1940 dev->oper_log_mgm_entry_size,
1941 mlx4_log_num_mgm_entry_size);
1944 static void choose_tunnel_offload_mode(struct mlx4_dev *dev,
1945 struct mlx4_dev_cap *dev_cap)
1947 if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED &&
1948 dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS)
1949 dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_VXLAN;
1951 dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_NONE;
1953 mlx4_dbg(dev, "Tunneling offload mode is: %s\n", (dev->caps.tunnel_offload_mode
1954 == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) ? "vxlan" : "none");
1957 static int mlx4_validate_optimized_steering(struct mlx4_dev *dev)
1960 struct mlx4_port_cap port_cap;
1962 if (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_NOT_SUPPORTED)
1965 for (i = 1; i <= dev->caps.num_ports; i++) {
1966 if (mlx4_dev_port(dev, i, &port_cap)) {
1968 "QUERY_DEV_CAP command failed, can't veify DMFS high rate steering.\n");
1969 } else if ((dev->caps.dmfs_high_steer_mode !=
1970 MLX4_STEERING_DMFS_A0_DEFAULT) &&
1971 (port_cap.dmfs_optimized_state ==
1972 !!(dev->caps.dmfs_high_steer_mode ==
1973 MLX4_STEERING_DMFS_A0_DISABLE))) {
1975 "DMFS high rate steer mode differ, driver requested %s but %s in FW.\n",
1976 dmfs_high_rate_steering_mode_str(
1977 dev->caps.dmfs_high_steer_mode),
1978 (port_cap.dmfs_optimized_state ?
1979 "enabled" : "disabled"));
1986 static int mlx4_init_fw(struct mlx4_dev *dev)
1988 struct mlx4_mod_stat_cfg mlx4_cfg;
1991 if (!mlx4_is_slave(dev)) {
1992 err = mlx4_QUERY_FW(dev);
1995 mlx4_info(dev, "non-primary physical function, skipping\n");
1997 mlx4_err(dev, "QUERY_FW command failed, aborting\n");
2001 err = mlx4_load_fw(dev);
2003 mlx4_err(dev, "Failed to start FW, aborting\n");
2007 mlx4_cfg.log_pg_sz_m = 1;
2008 mlx4_cfg.log_pg_sz = 0;
2009 err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg);
2011 mlx4_warn(dev, "Failed to override log_pg_sz parameter\n");
2017 static int mlx4_init_hca(struct mlx4_dev *dev)
2019 struct mlx4_priv *priv = mlx4_priv(dev);
2020 struct mlx4_adapter adapter;
2021 struct mlx4_dev_cap dev_cap;
2022 struct mlx4_profile profile;
2023 struct mlx4_init_hca_param init_hca;
2025 struct mlx4_config_dev_params params;
2028 if (!mlx4_is_slave(dev)) {
2029 err = mlx4_dev_cap(dev, &dev_cap);
2031 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
2035 choose_steering_mode(dev, &dev_cap);
2036 choose_tunnel_offload_mode(dev, &dev_cap);
2038 if (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC &&
2039 mlx4_is_master(dev))
2040 dev->caps.function_caps |= MLX4_FUNC_CAP_DMFS_A0_STATIC;
2042 err = mlx4_get_phys_port_id(dev);
2044 mlx4_err(dev, "Fail to get physical port id\n");
2046 if (mlx4_is_master(dev))
2047 mlx4_parav_master_pf_caps(dev);
2049 if (mlx4_low_memory_profile()) {
2050 mlx4_info(dev, "Running from within kdump kernel. Using low memory profile\n");
2051 profile = low_mem_profile;
2053 profile = default_profile;
2055 if (dev->caps.steering_mode ==
2056 MLX4_STEERING_MODE_DEVICE_MANAGED)
2057 profile.num_mcg = MLX4_FS_NUM_MCG;
2059 icm_size = mlx4_make_profile(dev, &profile, &dev_cap,
2061 if ((long long) icm_size < 0) {
2066 dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1;
2068 init_hca.log_uar_sz = ilog2(dev->caps.num_uars);
2069 init_hca.uar_page_sz = PAGE_SHIFT - 12;
2070 init_hca.mw_enabled = 0;
2071 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW ||
2072 dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN)
2073 init_hca.mw_enabled = INIT_HCA_TPT_MW_ENABLE;
2075 err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
2079 err = mlx4_INIT_HCA(dev, &init_hca);
2081 mlx4_err(dev, "INIT_HCA command failed, aborting\n");
2085 if (dev_cap.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) {
2086 err = mlx4_query_func(dev, &dev_cap);
2088 mlx4_err(dev, "QUERY_FUNC command failed, aborting.\n");
2090 } else if (err & MLX4_QUERY_FUNC_NUM_SYS_EQS) {
2091 dev->caps.num_eqs = dev_cap.max_eqs;
2092 dev->caps.reserved_eqs = dev_cap.reserved_eqs;
2093 dev->caps.reserved_uars = dev_cap.reserved_uars;
2098 * If TS is supported by FW
2099 * read HCA frequency by QUERY_HCA command
2101 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) {
2102 memset(&init_hca, 0, sizeof(init_hca));
2103 err = mlx4_QUERY_HCA(dev, &init_hca);
2105 mlx4_err(dev, "QUERY_HCA command failed, disable timestamp\n");
2106 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
2108 dev->caps.hca_core_clock =
2109 init_hca.hca_core_clock;
2112 /* In case we got HCA frequency 0 - disable timestamping
2113 * to avoid dividing by zero
2115 if (!dev->caps.hca_core_clock) {
2116 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
2118 "HCA frequency is 0 - timestamping is not supported\n");
2119 } else if (map_internal_clock(dev)) {
2121 * Map internal clock,
2122 * in case of failure disable timestamping
2124 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
2125 mlx4_err(dev, "Failed to map internal clock. Timestamping is not supported\n");
2129 if (dev->caps.dmfs_high_steer_mode !=
2130 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) {
2131 if (mlx4_validate_optimized_steering(dev))
2132 mlx4_warn(dev, "Optimized steering validation failed\n");
2134 if (dev->caps.dmfs_high_steer_mode ==
2135 MLX4_STEERING_DMFS_A0_DISABLE) {
2136 dev->caps.dmfs_high_rate_qpn_base =
2137 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
2138 dev->caps.dmfs_high_rate_qpn_range =
2139 MLX4_A0_STEERING_TABLE_SIZE;
2142 mlx4_dbg(dev, "DMFS high rate steer mode is: %s\n",
2143 dmfs_high_rate_steering_mode_str(
2144 dev->caps.dmfs_high_steer_mode));
2147 err = mlx4_init_slave(dev);
2149 if (err != -EPROBE_DEFER)
2150 mlx4_err(dev, "Failed to initialize slave\n");
2154 err = mlx4_slave_cap(dev);
2156 mlx4_err(dev, "Failed to obtain slave caps\n");
2161 if (map_bf_area(dev))
2162 mlx4_dbg(dev, "Failed to map blue flame area\n");
2164 /*Only the master set the ports, all the rest got it from it.*/
2165 if (!mlx4_is_slave(dev))
2166 mlx4_set_port_mask(dev);
2168 err = mlx4_QUERY_ADAPTER(dev, &adapter);
2170 mlx4_err(dev, "QUERY_ADAPTER command failed, aborting\n");
2174 /* Query CONFIG_DEV parameters */
2175 err = mlx4_config_dev_retrieval(dev, ¶ms);
2176 if (err && err != -ENOTSUPP) {
2177 mlx4_err(dev, "Failed to query CONFIG_DEV parameters\n");
2179 dev->caps.rx_checksum_flags_port[1] = params.rx_csum_flags_port_1;
2180 dev->caps.rx_checksum_flags_port[2] = params.rx_csum_flags_port_2;
2182 priv->eq_table.inta_pin = adapter.inta_pin;
2183 memcpy(dev->board_id, adapter.board_id, sizeof dev->board_id);
2188 unmap_internal_clock(dev);
2191 if (mlx4_is_slave(dev)) {
2192 kfree(dev->caps.qp0_qkey);
2193 kfree(dev->caps.qp0_tunnel);
2194 kfree(dev->caps.qp0_proxy);
2195 kfree(dev->caps.qp1_tunnel);
2196 kfree(dev->caps.qp1_proxy);
2200 if (mlx4_is_slave(dev))
2201 mlx4_slave_exit(dev);
2203 mlx4_CLOSE_HCA(dev, 0);
2206 if (!mlx4_is_slave(dev))
2207 mlx4_free_icms(dev);
2212 static int mlx4_init_counters_table(struct mlx4_dev *dev)
2214 struct mlx4_priv *priv = mlx4_priv(dev);
2217 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
2220 nent = dev->caps.max_counters;
2221 return mlx4_bitmap_init(&priv->counters_bitmap, nent, nent - 1, 0, 0);
2224 static void mlx4_cleanup_counters_table(struct mlx4_dev *dev)
2226 mlx4_bitmap_cleanup(&mlx4_priv(dev)->counters_bitmap);
2229 int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx)
2231 struct mlx4_priv *priv = mlx4_priv(dev);
2233 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
2236 *idx = mlx4_bitmap_alloc(&priv->counters_bitmap);
2243 int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx)
2248 if (mlx4_is_mfunc(dev)) {
2249 err = mlx4_cmd_imm(dev, 0, &out_param, RES_COUNTER,
2250 RES_OP_RESERVE, MLX4_CMD_ALLOC_RES,
2251 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
2253 *idx = get_param_l(&out_param);
2257 return __mlx4_counter_alloc(dev, idx);
2259 EXPORT_SYMBOL_GPL(mlx4_counter_alloc);
2261 void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
2263 mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx, MLX4_USE_RR);
2267 void mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
2271 if (mlx4_is_mfunc(dev)) {
2272 set_param_l(&in_param, idx);
2273 mlx4_cmd(dev, in_param, RES_COUNTER, RES_OP_RESERVE,
2274 MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
2278 __mlx4_counter_free(dev, idx);
2280 EXPORT_SYMBOL_GPL(mlx4_counter_free);
2282 void mlx4_set_admin_guid(struct mlx4_dev *dev, __be64 guid, int entry, int port)
2284 struct mlx4_priv *priv = mlx4_priv(dev);
2286 priv->mfunc.master.vf_admin[entry].vport[port].guid = guid;
2288 EXPORT_SYMBOL_GPL(mlx4_set_admin_guid);
2290 __be64 mlx4_get_admin_guid(struct mlx4_dev *dev, int entry, int port)
2292 struct mlx4_priv *priv = mlx4_priv(dev);
2294 return priv->mfunc.master.vf_admin[entry].vport[port].guid;
2296 EXPORT_SYMBOL_GPL(mlx4_get_admin_guid);
2298 void mlx4_set_random_admin_guid(struct mlx4_dev *dev, int entry, int port)
2300 struct mlx4_priv *priv = mlx4_priv(dev);
2307 get_random_bytes((char *)&guid, sizeof(guid));
2308 guid &= ~(cpu_to_be64(1ULL << 56));
2309 guid |= cpu_to_be64(1ULL << 57);
2310 priv->mfunc.master.vf_admin[entry].vport[port].guid = guid;
2313 static int mlx4_setup_hca(struct mlx4_dev *dev)
2315 struct mlx4_priv *priv = mlx4_priv(dev);
2318 __be32 ib_port_default_caps;
2320 err = mlx4_init_uar_table(dev);
2322 mlx4_err(dev, "Failed to initialize user access region table, aborting\n");
2326 err = mlx4_uar_alloc(dev, &priv->driver_uar);
2328 mlx4_err(dev, "Failed to allocate driver access region, aborting\n");
2329 goto err_uar_table_free;
2332 priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
2334 mlx4_err(dev, "Couldn't map kernel access region, aborting\n");
2339 err = mlx4_init_pd_table(dev);
2341 mlx4_err(dev, "Failed to initialize protection domain table, aborting\n");
2345 err = mlx4_init_xrcd_table(dev);
2347 mlx4_err(dev, "Failed to initialize reliable connection domain table, aborting\n");
2348 goto err_pd_table_free;
2351 err = mlx4_init_mr_table(dev);
2353 mlx4_err(dev, "Failed to initialize memory region table, aborting\n");
2354 goto err_xrcd_table_free;
2357 if (!mlx4_is_slave(dev)) {
2358 err = mlx4_init_mcg_table(dev);
2360 mlx4_err(dev, "Failed to initialize multicast group table, aborting\n");
2361 goto err_mr_table_free;
2363 err = mlx4_config_mad_demux(dev);
2365 mlx4_err(dev, "Failed in config_mad_demux, aborting\n");
2366 goto err_mcg_table_free;
2370 err = mlx4_init_eq_table(dev);
2372 mlx4_err(dev, "Failed to initialize event queue table, aborting\n");
2373 goto err_mcg_table_free;
2376 err = mlx4_cmd_use_events(dev);
2378 mlx4_err(dev, "Failed to switch to event-driven firmware commands, aborting\n");
2379 goto err_eq_table_free;
2382 err = mlx4_NOP(dev);
2384 if (dev->flags & MLX4_FLAG_MSI_X) {
2385 mlx4_warn(dev, "NOP command failed to generate MSI-X interrupt IRQ %d)\n",
2386 priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
2387 mlx4_warn(dev, "Trying again without MSI-X\n");
2389 mlx4_err(dev, "NOP command failed to generate interrupt (IRQ %d), aborting\n",
2390 priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
2391 mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n");
2397 mlx4_dbg(dev, "NOP command IRQ test passed\n");
2399 err = mlx4_init_cq_table(dev);
2401 mlx4_err(dev, "Failed to initialize completion queue table, aborting\n");
2405 err = mlx4_init_srq_table(dev);
2407 mlx4_err(dev, "Failed to initialize shared receive queue table, aborting\n");
2408 goto err_cq_table_free;
2411 err = mlx4_init_qp_table(dev);
2413 mlx4_err(dev, "Failed to initialize queue pair table, aborting\n");
2414 goto err_srq_table_free;
2417 err = mlx4_init_counters_table(dev);
2418 if (err && err != -ENOENT) {
2419 mlx4_err(dev, "Failed to initialize counters table, aborting\n");
2420 goto err_qp_table_free;
2423 if (!mlx4_is_slave(dev)) {
2424 for (port = 1; port <= dev->caps.num_ports; port++) {
2425 ib_port_default_caps = 0;
2426 err = mlx4_get_port_ib_caps(dev, port,
2427 &ib_port_default_caps);
2429 mlx4_warn(dev, "failed to get port %d default ib capabilities (%d). Continuing with caps = 0\n",
2431 dev->caps.ib_port_def_cap[port] = ib_port_default_caps;
2433 /* initialize per-slave default ib port capabilities */
2434 if (mlx4_is_master(dev)) {
2436 for (i = 0; i < dev->num_slaves; i++) {
2437 if (i == mlx4_master_func_num(dev))
2439 priv->mfunc.master.slave_state[i].ib_cap_mask[port] =
2440 ib_port_default_caps;
2444 if (mlx4_is_mfunc(dev))
2445 dev->caps.port_ib_mtu[port] = IB_MTU_2048;
2447 dev->caps.port_ib_mtu[port] = IB_MTU_4096;
2449 err = mlx4_SET_PORT(dev, port, mlx4_is_master(dev) ?
2450 dev->caps.pkey_table_len[port] : -1);
2452 mlx4_err(dev, "Failed to set port %d, aborting\n",
2454 goto err_counters_table_free;
2461 err_counters_table_free:
2462 mlx4_cleanup_counters_table(dev);
2465 mlx4_cleanup_qp_table(dev);
2468 mlx4_cleanup_srq_table(dev);
2471 mlx4_cleanup_cq_table(dev);
2474 mlx4_cmd_use_polling(dev);
2477 mlx4_cleanup_eq_table(dev);
2480 if (!mlx4_is_slave(dev))
2481 mlx4_cleanup_mcg_table(dev);
2484 mlx4_cleanup_mr_table(dev);
2486 err_xrcd_table_free:
2487 mlx4_cleanup_xrcd_table(dev);
2490 mlx4_cleanup_pd_table(dev);
2496 mlx4_uar_free(dev, &priv->driver_uar);
2499 mlx4_cleanup_uar_table(dev);
2503 static void mlx4_enable_msi_x(struct mlx4_dev *dev)
2505 struct mlx4_priv *priv = mlx4_priv(dev);
2506 struct msix_entry *entries;
2510 int nreq = dev->caps.num_ports * num_online_cpus() + MSIX_LEGACY_SZ;
2512 nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
2515 entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL);
2519 for (i = 0; i < nreq; ++i)
2520 entries[i].entry = i;
2522 nreq = pci_enable_msix_range(dev->persist->pdev, entries, 2,
2528 } else if (nreq < MSIX_LEGACY_SZ +
2529 dev->caps.num_ports * MIN_MSIX_P_PORT) {
2530 /*Working in legacy mode , all EQ's shared*/
2531 dev->caps.comp_pool = 0;
2532 dev->caps.num_comp_vectors = nreq - 1;
2534 dev->caps.comp_pool = nreq - MSIX_LEGACY_SZ;
2535 dev->caps.num_comp_vectors = MSIX_LEGACY_SZ - 1;
2537 for (i = 0; i < nreq; ++i)
2538 priv->eq_table.eq[i].irq = entries[i].vector;
2540 dev->flags |= MLX4_FLAG_MSI_X;
2547 dev->caps.num_comp_vectors = 1;
2548 dev->caps.comp_pool = 0;
2550 for (i = 0; i < 2; ++i)
2551 priv->eq_table.eq[i].irq = dev->persist->pdev->irq;
2554 static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
2556 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
2561 if (!mlx4_is_slave(dev)) {
2562 mlx4_init_mac_table(dev, &info->mac_table);
2563 mlx4_init_vlan_table(dev, &info->vlan_table);
2564 mlx4_init_roce_gid_table(dev, &info->gid_table);
2565 info->base_qpn = mlx4_get_base_qpn(dev, port);
2568 sprintf(info->dev_name, "mlx4_port%d", port);
2569 info->port_attr.attr.name = info->dev_name;
2570 if (mlx4_is_mfunc(dev))
2571 info->port_attr.attr.mode = S_IRUGO;
2573 info->port_attr.attr.mode = S_IRUGO | S_IWUSR;
2574 info->port_attr.store = set_port_type;
2576 info->port_attr.show = show_port_type;
2577 sysfs_attr_init(&info->port_attr.attr);
2579 err = device_create_file(&dev->persist->pdev->dev, &info->port_attr);
2581 mlx4_err(dev, "Failed to create file for port %d\n", port);
2585 sprintf(info->dev_mtu_name, "mlx4_port%d_mtu", port);
2586 info->port_mtu_attr.attr.name = info->dev_mtu_name;
2587 if (mlx4_is_mfunc(dev))
2588 info->port_mtu_attr.attr.mode = S_IRUGO;
2590 info->port_mtu_attr.attr.mode = S_IRUGO | S_IWUSR;
2591 info->port_mtu_attr.store = set_port_ib_mtu;
2593 info->port_mtu_attr.show = show_port_ib_mtu;
2594 sysfs_attr_init(&info->port_mtu_attr.attr);
2596 err = device_create_file(&dev->persist->pdev->dev,
2597 &info->port_mtu_attr);
2599 mlx4_err(dev, "Failed to create mtu file for port %d\n", port);
2600 device_remove_file(&info->dev->persist->pdev->dev,
2608 static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
2613 device_remove_file(&info->dev->persist->pdev->dev, &info->port_attr);
2614 device_remove_file(&info->dev->persist->pdev->dev,
2615 &info->port_mtu_attr);
2618 static int mlx4_init_steering(struct mlx4_dev *dev)
2620 struct mlx4_priv *priv = mlx4_priv(dev);
2621 int num_entries = dev->caps.num_ports;
2624 priv->steer = kzalloc(sizeof(struct mlx4_steer) * num_entries, GFP_KERNEL);
2628 for (i = 0; i < num_entries; i++)
2629 for (j = 0; j < MLX4_NUM_STEERS; j++) {
2630 INIT_LIST_HEAD(&priv->steer[i].promisc_qps[j]);
2631 INIT_LIST_HEAD(&priv->steer[i].steer_entries[j]);
2636 static void mlx4_clear_steering(struct mlx4_dev *dev)
2638 struct mlx4_priv *priv = mlx4_priv(dev);
2639 struct mlx4_steer_index *entry, *tmp_entry;
2640 struct mlx4_promisc_qp *pqp, *tmp_pqp;
2641 int num_entries = dev->caps.num_ports;
2644 for (i = 0; i < num_entries; i++) {
2645 for (j = 0; j < MLX4_NUM_STEERS; j++) {
2646 list_for_each_entry_safe(pqp, tmp_pqp,
2647 &priv->steer[i].promisc_qps[j],
2649 list_del(&pqp->list);
2652 list_for_each_entry_safe(entry, tmp_entry,
2653 &priv->steer[i].steer_entries[j],
2655 list_del(&entry->list);
2656 list_for_each_entry_safe(pqp, tmp_pqp,
2659 list_del(&pqp->list);
2669 static int extended_func_num(struct pci_dev *pdev)
2671 return PCI_SLOT(pdev->devfn) * 8 + PCI_FUNC(pdev->devfn);
2674 #define MLX4_OWNER_BASE 0x8069c
2675 #define MLX4_OWNER_SIZE 4
2677 static int mlx4_get_ownership(struct mlx4_dev *dev)
2679 void __iomem *owner;
2682 if (pci_channel_offline(dev->persist->pdev))
2685 owner = ioremap(pci_resource_start(dev->persist->pdev, 0) +
2689 mlx4_err(dev, "Failed to obtain ownership bit\n");
2698 static void mlx4_free_ownership(struct mlx4_dev *dev)
2700 void __iomem *owner;
2702 if (pci_channel_offline(dev->persist->pdev))
2705 owner = ioremap(pci_resource_start(dev->persist->pdev, 0) +
2709 mlx4_err(dev, "Failed to obtain ownership bit\n");
2717 #define SRIOV_VALID_STATE(flags) (!!((flags) & MLX4_FLAG_SRIOV) ==\
2718 !!((flags) & MLX4_FLAG_MASTER))
2720 static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev,
2721 u8 total_vfs, int existing_vfs, int reset_flow)
2723 u64 dev_flags = dev->flags;
2727 dev->dev_vfs = kcalloc(total_vfs, sizeof(*dev->dev_vfs),
2734 atomic_inc(&pf_loading);
2735 if (dev->flags & MLX4_FLAG_SRIOV) {
2736 if (existing_vfs != total_vfs) {
2737 mlx4_err(dev, "SR-IOV was already enabled, but with num_vfs (%d) different than requested (%d)\n",
2738 existing_vfs, total_vfs);
2739 total_vfs = existing_vfs;
2743 dev->dev_vfs = kzalloc(total_vfs * sizeof(*dev->dev_vfs), GFP_KERNEL);
2744 if (NULL == dev->dev_vfs) {
2745 mlx4_err(dev, "Failed to allocate memory for VFs\n");
2749 if (!(dev->flags & MLX4_FLAG_SRIOV)) {
2750 mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", total_vfs);
2751 err = pci_enable_sriov(pdev, total_vfs);
2754 mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n",
2758 mlx4_warn(dev, "Running in master mode\n");
2759 dev_flags |= MLX4_FLAG_SRIOV |
2761 dev_flags &= ~MLX4_FLAG_SLAVE;
2762 dev->persist->num_vfs = total_vfs;
2767 atomic_dec(&pf_loading);
2769 dev->persist->num_vfs = 0;
2770 kfree(dev->dev_vfs);
2771 return dev_flags & ~MLX4_FLAG_MASTER;
2775 MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64 = -1,
2778 static int mlx4_check_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
2781 int requested_vfs = nvfs[0] + nvfs[1] + nvfs[2];
2782 /* Checking for 64 VFs as a limitation of CX2 */
2783 if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_80_VFS) &&
2784 requested_vfs >= 64) {
2785 mlx4_err(dev, "Requested %d VFs, but FW does not support more than 64\n",
2787 return MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64;
2792 static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
2793 int total_vfs, int *nvfs, struct mlx4_priv *priv,
2796 struct mlx4_dev *dev;
2801 struct mlx4_dev_cap *dev_cap = NULL;
2802 int existing_vfs = 0;
2806 INIT_LIST_HEAD(&priv->ctx_list);
2807 spin_lock_init(&priv->ctx_lock);
2809 mutex_init(&priv->port_mutex);
2810 mutex_init(&priv->bond_mutex);
2812 INIT_LIST_HEAD(&priv->pgdir_list);
2813 mutex_init(&priv->pgdir_mutex);
2815 INIT_LIST_HEAD(&priv->bf_list);
2816 mutex_init(&priv->bf_mutex);
2818 dev->rev_id = pdev->revision;
2819 dev->numa_node = dev_to_node(&pdev->dev);
2821 /* Detect if this device is a virtual function */
2822 if (pci_dev_data & MLX4_PCI_DEV_IS_VF) {
2823 mlx4_warn(dev, "Detected virtual function - running in slave mode\n");
2824 dev->flags |= MLX4_FLAG_SLAVE;
2826 /* We reset the device and enable SRIOV only for physical
2827 * devices. Try to claim ownership on the device;
2828 * if already taken, skip -- do not allow multiple PFs */
2829 err = mlx4_get_ownership(dev);
2834 mlx4_warn(dev, "Multiple PFs not yet supported - Skipping PF\n");
2839 atomic_set(&priv->opreq_count, 0);
2840 INIT_WORK(&priv->opreq_task, mlx4_opreq_action);
2843 * Now reset the HCA before we touch the PCI capabilities or
2844 * attempt a firmware command, since a boot ROM may have left
2845 * the HCA in an undefined state.
2847 err = mlx4_reset(dev);
2849 mlx4_err(dev, "Failed to reset HCA, aborting\n");
2854 dev->flags = MLX4_FLAG_MASTER;
2855 existing_vfs = pci_num_vf(pdev);
2857 dev->flags |= MLX4_FLAG_SRIOV;
2858 dev->persist->num_vfs = total_vfs;
2862 /* on load remove any previous indication of internal error,
2865 dev->persist->state = MLX4_DEVICE_STATE_UP;
2868 err = mlx4_cmd_init(dev);
2870 mlx4_err(dev, "Failed to init command interface, aborting\n");
2874 /* In slave functions, the communication channel must be initialized
2875 * before posting commands. Also, init num_slaves before calling
2877 if (mlx4_is_mfunc(dev)) {
2878 if (mlx4_is_master(dev)) {
2879 dev->num_slaves = MLX4_MAX_NUM_SLAVES;
2882 dev->num_slaves = 0;
2883 err = mlx4_multi_func_init(dev);
2885 mlx4_err(dev, "Failed to init slave mfunc interface, aborting\n");
2891 err = mlx4_init_fw(dev);
2893 mlx4_err(dev, "Failed to init fw, aborting.\n");
2897 if (mlx4_is_master(dev)) {
2898 /* when we hit the goto slave_start below, dev_cap already initialized */
2900 dev_cap = kzalloc(sizeof(*dev_cap), GFP_KERNEL);
2907 err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
2909 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
2913 if (mlx4_check_dev_cap(dev, dev_cap, nvfs))
2916 if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) {
2917 u64 dev_flags = mlx4_enable_sriov(dev, pdev,
2922 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
2923 dev->flags = dev_flags;
2924 if (!SRIOV_VALID_STATE(dev->flags)) {
2925 mlx4_err(dev, "Invalid SRIOV state\n");
2928 err = mlx4_reset(dev);
2930 mlx4_err(dev, "Failed to reset HCA, aborting.\n");
2936 /* Legacy mode FW requires SRIOV to be enabled before
2937 * doing QUERY_DEV_CAP, since max_eq's value is different if
2940 memset(dev_cap, 0, sizeof(*dev_cap));
2941 err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
2943 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
2947 if (mlx4_check_dev_cap(dev, dev_cap, nvfs))
2952 err = mlx4_init_hca(dev);
2954 if (err == -EACCES) {
2955 /* Not primary Physical function
2956 * Running in slave mode */
2957 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
2958 /* We're not a PF */
2959 if (dev->flags & MLX4_FLAG_SRIOV) {
2961 pci_disable_sriov(pdev);
2962 if (mlx4_is_master(dev) && !reset_flow)
2963 atomic_dec(&pf_loading);
2964 dev->flags &= ~MLX4_FLAG_SRIOV;
2966 if (!mlx4_is_slave(dev))
2967 mlx4_free_ownership(dev);
2968 dev->flags |= MLX4_FLAG_SLAVE;
2969 dev->flags &= ~MLX4_FLAG_MASTER;
2975 if (mlx4_is_master(dev) && (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) {
2976 u64 dev_flags = mlx4_enable_sriov(dev, pdev, total_vfs,
2977 existing_vfs, reset_flow);
2979 if ((dev->flags ^ dev_flags) & (MLX4_FLAG_MASTER | MLX4_FLAG_SLAVE)) {
2980 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_VHCR);
2981 dev->flags = dev_flags;
2982 err = mlx4_cmd_init(dev);
2984 /* Only VHCR is cleaned up, so could still
2987 mlx4_err(dev, "Failed to init VHCR command interface, aborting\n");
2991 dev->flags = dev_flags;
2994 if (!SRIOV_VALID_STATE(dev->flags)) {
2995 mlx4_err(dev, "Invalid SRIOV state\n");
3000 /* check if the device is functioning at its maximum possible speed.
3001 * No return code for this call, just warn the user in case of PCI
3002 * express device capabilities are under-satisfied by the bus.
3004 if (!mlx4_is_slave(dev))
3005 mlx4_check_pcie_caps(dev);
3007 /* In master functions, the communication channel must be initialized
3008 * after obtaining its address from fw */
3009 if (mlx4_is_master(dev)) {
3012 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
3016 (num_vfs_argc > 1 || probe_vfs_argc > 1)) {
3018 "Invalid syntax of num_vfs/probe_vfs with IB port - single port VFs syntax is only supported when all ports are configured as ethernet\n");
3022 if (dev->caps.num_ports < 2 &&
3026 "Error: Trying to configure VFs on port 2, but HCA has only %d physical ports\n",
3027 dev->caps.num_ports);
3030 memcpy(dev->persist->nvfs, nvfs, sizeof(dev->persist->nvfs));
3033 i < sizeof(dev->persist->nvfs)/
3034 sizeof(dev->persist->nvfs[0]); i++) {
3037 for (j = 0; j < dev->persist->nvfs[i]; ++sum, ++j) {
3038 dev->dev_vfs[sum].min_port = i < 2 ? i + 1 : 1;
3039 dev->dev_vfs[sum].n_ports = i < 2 ? 1 :
3040 dev->caps.num_ports;
3044 /* In master functions, the communication channel
3045 * must be initialized after obtaining its address from fw
3047 err = mlx4_multi_func_init(dev);
3049 mlx4_err(dev, "Failed to init master mfunc interface, aborting.\n");
3054 err = mlx4_alloc_eq_table(dev);
3056 goto err_master_mfunc;
3058 priv->msix_ctl.pool_bm = 0;
3059 mutex_init(&priv->msix_ctl.pool_lock);
3061 mlx4_enable_msi_x(dev);
3062 if ((mlx4_is_mfunc(dev)) &&
3063 !(dev->flags & MLX4_FLAG_MSI_X)) {
3065 mlx4_err(dev, "INTx is not supported in multi-function mode, aborting\n");
3069 if (!mlx4_is_slave(dev)) {
3070 err = mlx4_init_steering(dev);
3072 goto err_disable_msix;
3075 err = mlx4_setup_hca(dev);
3076 if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X) &&
3077 !mlx4_is_mfunc(dev)) {
3078 dev->flags &= ~MLX4_FLAG_MSI_X;
3079 dev->caps.num_comp_vectors = 1;
3080 dev->caps.comp_pool = 0;
3081 pci_disable_msix(pdev);
3082 err = mlx4_setup_hca(dev);
3088 mlx4_init_quotas(dev);
3089 /* When PF resources are ready arm its comm channel to enable
3092 if (mlx4_is_master(dev)) {
3093 err = mlx4_ARM_COMM_CHANNEL(dev);
3095 mlx4_err(dev, " Failed to arm comm channel eq: %x\n",
3101 for (port = 1; port <= dev->caps.num_ports; port++) {
3102 err = mlx4_init_port_info(dev, port);
3107 priv->v2p.port1 = 1;
3108 priv->v2p.port2 = 2;
3110 err = mlx4_register_device(dev);
3114 mlx4_request_modules(dev);
3116 mlx4_sense_init(dev);
3117 mlx4_start_sense(dev);
3121 if (mlx4_is_master(dev) && dev->persist->num_vfs && !reset_flow)
3122 atomic_dec(&pf_loading);
3128 for (--port; port >= 1; --port)
3129 mlx4_cleanup_port_info(&priv->port[port]);
3131 mlx4_cleanup_counters_table(dev);
3132 mlx4_cleanup_qp_table(dev);
3133 mlx4_cleanup_srq_table(dev);
3134 mlx4_cleanup_cq_table(dev);
3135 mlx4_cmd_use_polling(dev);
3136 mlx4_cleanup_eq_table(dev);
3137 mlx4_cleanup_mcg_table(dev);
3138 mlx4_cleanup_mr_table(dev);
3139 mlx4_cleanup_xrcd_table(dev);
3140 mlx4_cleanup_pd_table(dev);
3141 mlx4_cleanup_uar_table(dev);
3144 if (!mlx4_is_slave(dev))
3145 mlx4_clear_steering(dev);
3148 if (dev->flags & MLX4_FLAG_MSI_X)
3149 pci_disable_msix(pdev);
3152 mlx4_free_eq_table(dev);
3155 if (mlx4_is_master(dev)) {
3156 mlx4_free_resource_tracker(dev, RES_TR_FREE_STRUCTS_ONLY);
3157 mlx4_multi_func_cleanup(dev);
3160 if (mlx4_is_slave(dev)) {
3161 kfree(dev->caps.qp0_qkey);
3162 kfree(dev->caps.qp0_tunnel);
3163 kfree(dev->caps.qp0_proxy);
3164 kfree(dev->caps.qp1_tunnel);
3165 kfree(dev->caps.qp1_proxy);
3169 mlx4_close_hca(dev);
3175 if (mlx4_is_slave(dev))
3176 mlx4_multi_func_cleanup(dev);
3179 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
3182 if (dev->flags & MLX4_FLAG_SRIOV && !existing_vfs) {
3183 pci_disable_sriov(pdev);
3184 dev->flags &= ~MLX4_FLAG_SRIOV;
3187 if (mlx4_is_master(dev) && dev->persist->num_vfs && !reset_flow)
3188 atomic_dec(&pf_loading);
3190 kfree(priv->dev.dev_vfs);
3192 if (!mlx4_is_slave(dev))
3193 mlx4_free_ownership(dev);
3199 static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data,
3200 struct mlx4_priv *priv)
3203 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
3204 int prb_vf[MLX4_MAX_PORTS + 1] = {0, 0, 0};
3205 const int param_map[MLX4_MAX_PORTS + 1][MLX4_MAX_PORTS + 1] = {
3206 {2, 0, 0}, {0, 1, 2}, {0, 1, 2} };
3207 unsigned total_vfs = 0;
3210 pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev));
3212 err = pci_enable_device(pdev);
3214 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
3218 /* Due to requirement that all VFs and the PF are *guaranteed* 2 MACS
3219 * per port, we must limit the number of VFs to 63 (since their are
3222 for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) && i < num_vfs_argc;
3223 total_vfs += nvfs[param_map[num_vfs_argc - 1][i]], i++) {
3224 nvfs[param_map[num_vfs_argc - 1][i]] = num_vfs[i];
3226 dev_err(&pdev->dev, "num_vfs module parameter cannot be negative\n");
3228 goto err_disable_pdev;
3231 for (i = 0; i < sizeof(prb_vf)/sizeof(prb_vf[0]) && i < probe_vfs_argc;
3233 prb_vf[param_map[probe_vfs_argc - 1][i]] = probe_vf[i];
3234 if (prb_vf[i] < 0 || prb_vf[i] > nvfs[i]) {
3235 dev_err(&pdev->dev, "probe_vf module parameter cannot be negative or greater than num_vfs\n");
3237 goto err_disable_pdev;
3240 if (total_vfs >= MLX4_MAX_NUM_VF) {
3242 "Requested more VF's (%d) than allowed (%d)\n",
3243 total_vfs, MLX4_MAX_NUM_VF - 1);
3245 goto err_disable_pdev;
3248 for (i = 0; i < MLX4_MAX_PORTS; i++) {
3249 if (nvfs[i] + nvfs[2] >= MLX4_MAX_NUM_VF_P_PORT) {
3251 "Requested more VF's (%d) for port (%d) than allowed (%d)\n",
3252 nvfs[i] + nvfs[2], i + 1,
3253 MLX4_MAX_NUM_VF_P_PORT - 1);
3255 goto err_disable_pdev;
3259 /* Check for BARs. */
3260 if (!(pci_dev_data & MLX4_PCI_DEV_IS_VF) &&
3261 !(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
3262 dev_err(&pdev->dev, "Missing DCS, aborting (driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\n",
3263 pci_dev_data, pci_resource_flags(pdev, 0));
3265 goto err_disable_pdev;
3267 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
3268 dev_err(&pdev->dev, "Missing UAR, aborting\n");
3270 goto err_disable_pdev;
3273 err = pci_request_regions(pdev, DRV_NAME);
3275 dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n");
3276 goto err_disable_pdev;
3279 pci_set_master(pdev);
3281 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
3283 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n");
3284 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3286 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n");
3287 goto err_release_regions;
3290 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3292 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit consistent PCI DMA mask\n");
3293 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
3295 dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, aborting\n");
3296 goto err_release_regions;
3300 /* Allow large DMA segments, up to the firmware limit of 1 GB */
3301 dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024);
3302 /* Detect if this device is a virtual function */
3303 if (pci_dev_data & MLX4_PCI_DEV_IS_VF) {
3304 /* When acting as pf, we normally skip vfs unless explicitly
3305 * requested to probe them.
3308 unsigned vfs_offset = 0;
3310 for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) &&
3311 vfs_offset + nvfs[i] < extended_func_num(pdev);
3312 vfs_offset += nvfs[i], i++)
3314 if (i == sizeof(nvfs)/sizeof(nvfs[0])) {
3316 goto err_release_regions;
3318 if ((extended_func_num(pdev) - vfs_offset)
3320 dev_warn(&pdev->dev, "Skipping virtual function:%d\n",
3321 extended_func_num(pdev));
3323 goto err_release_regions;
3328 err = mlx4_catas_init(&priv->dev);
3330 goto err_release_regions;
3332 err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 0);
3339 mlx4_catas_end(&priv->dev);
3341 err_release_regions:
3342 pci_release_regions(pdev);
3345 pci_disable_device(pdev);
3346 pci_set_drvdata(pdev, NULL);
3350 static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
3352 struct mlx4_priv *priv;
3353 struct mlx4_dev *dev;
3356 printk_once(KERN_INFO "%s", mlx4_version);
3358 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
3363 dev->persist = kzalloc(sizeof(*dev->persist), GFP_KERNEL);
3364 if (!dev->persist) {
3368 dev->persist->pdev = pdev;
3369 dev->persist->dev = dev;
3370 pci_set_drvdata(pdev, dev->persist);
3371 priv->pci_dev_data = id->driver_data;
3372 mutex_init(&dev->persist->device_state_mutex);
3373 mutex_init(&dev->persist->interface_state_mutex);
3375 ret = __mlx4_init_one(pdev, id->driver_data, priv);
3377 kfree(dev->persist);
3380 pci_save_state(pdev);
3386 static void mlx4_clean_dev(struct mlx4_dev *dev)
3388 struct mlx4_dev_persistent *persist = dev->persist;
3389 struct mlx4_priv *priv = mlx4_priv(dev);
3390 unsigned long flags = (dev->flags & RESET_PERSIST_MASK_FLAGS);
3392 memset(priv, 0, sizeof(*priv));
3393 priv->dev.persist = persist;
3394 priv->dev.flags = flags;
3397 static void mlx4_unload_one(struct pci_dev *pdev)
3399 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
3400 struct mlx4_dev *dev = persist->dev;
3401 struct mlx4_priv *priv = mlx4_priv(dev);
3408 /* saving current ports type for further use */
3409 for (i = 0; i < dev->caps.num_ports; i++) {
3410 dev->persist->curr_port_type[i] = dev->caps.port_type[i + 1];
3411 dev->persist->curr_port_poss_type[i] = dev->caps.
3412 possible_type[i + 1];
3415 pci_dev_data = priv->pci_dev_data;
3417 mlx4_stop_sense(dev);
3418 mlx4_unregister_device(dev);
3420 for (p = 1; p <= dev->caps.num_ports; p++) {
3421 mlx4_cleanup_port_info(&priv->port[p]);
3422 mlx4_CLOSE_PORT(dev, p);
3425 if (mlx4_is_master(dev))
3426 mlx4_free_resource_tracker(dev,
3427 RES_TR_FREE_SLAVES_ONLY);
3429 mlx4_cleanup_counters_table(dev);
3430 mlx4_cleanup_qp_table(dev);
3431 mlx4_cleanup_srq_table(dev);
3432 mlx4_cleanup_cq_table(dev);
3433 mlx4_cmd_use_polling(dev);
3434 mlx4_cleanup_eq_table(dev);
3435 mlx4_cleanup_mcg_table(dev);
3436 mlx4_cleanup_mr_table(dev);
3437 mlx4_cleanup_xrcd_table(dev);
3438 mlx4_cleanup_pd_table(dev);
3440 if (mlx4_is_master(dev))
3441 mlx4_free_resource_tracker(dev,
3442 RES_TR_FREE_STRUCTS_ONLY);
3445 mlx4_uar_free(dev, &priv->driver_uar);
3446 mlx4_cleanup_uar_table(dev);
3447 if (!mlx4_is_slave(dev))
3448 mlx4_clear_steering(dev);
3449 mlx4_free_eq_table(dev);
3450 if (mlx4_is_master(dev))
3451 mlx4_multi_func_cleanup(dev);
3452 mlx4_close_hca(dev);
3454 if (mlx4_is_slave(dev))
3455 mlx4_multi_func_cleanup(dev);
3456 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
3458 if (dev->flags & MLX4_FLAG_MSI_X)
3459 pci_disable_msix(pdev);
3461 if (!mlx4_is_slave(dev))
3462 mlx4_free_ownership(dev);
3464 kfree(dev->caps.qp0_qkey);
3465 kfree(dev->caps.qp0_tunnel);
3466 kfree(dev->caps.qp0_proxy);
3467 kfree(dev->caps.qp1_tunnel);
3468 kfree(dev->caps.qp1_proxy);
3469 kfree(dev->dev_vfs);
3471 mlx4_clean_dev(dev);
3472 priv->pci_dev_data = pci_dev_data;
3476 static void mlx4_remove_one(struct pci_dev *pdev)
3478 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
3479 struct mlx4_dev *dev = persist->dev;
3480 struct mlx4_priv *priv = mlx4_priv(dev);
3483 mutex_lock(&persist->interface_state_mutex);
3484 persist->interface_state |= MLX4_INTERFACE_STATE_DELETION;
3485 mutex_unlock(&persist->interface_state_mutex);
3487 /* Disabling SR-IOV is not allowed while there are active vf's */
3488 if (mlx4_is_master(dev) && dev->flags & MLX4_FLAG_SRIOV) {
3489 active_vfs = mlx4_how_many_lives_vf(dev);
3491 pr_warn("Removing PF when there are active VF's !!\n");
3492 pr_warn("Will not disable SR-IOV.\n");
3496 /* device marked to be under deletion running now without the lock
3497 * letting other tasks to be terminated
3499 if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
3500 mlx4_unload_one(pdev);
3502 mlx4_info(dev, "%s: interface is down\n", __func__);
3503 mlx4_catas_end(dev);
3504 if (dev->flags & MLX4_FLAG_SRIOV && !active_vfs) {
3505 mlx4_warn(dev, "Disabling SR-IOV\n");
3506 pci_disable_sriov(pdev);
3509 pci_release_regions(pdev);
3510 pci_disable_device(pdev);
3511 kfree(dev->persist);
3513 pci_set_drvdata(pdev, NULL);
3516 static int restore_current_port_types(struct mlx4_dev *dev,
3517 enum mlx4_port_type *types,
3518 enum mlx4_port_type *poss_types)
3520 struct mlx4_priv *priv = mlx4_priv(dev);
3523 mlx4_stop_sense(dev);
3525 mutex_lock(&priv->port_mutex);
3526 for (i = 0; i < dev->caps.num_ports; i++)
3527 dev->caps.possible_type[i + 1] = poss_types[i];
3528 err = mlx4_change_port_types(dev, types);
3529 mlx4_start_sense(dev);
3530 mutex_unlock(&priv->port_mutex);
3535 int mlx4_restart_one(struct pci_dev *pdev)
3537 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
3538 struct mlx4_dev *dev = persist->dev;
3539 struct mlx4_priv *priv = mlx4_priv(dev);
3540 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
3541 int pci_dev_data, err, total_vfs;
3543 pci_dev_data = priv->pci_dev_data;
3544 total_vfs = dev->persist->num_vfs;
3545 memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
3547 mlx4_unload_one(pdev);
3548 err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 1);
3550 mlx4_err(dev, "%s: ERROR: mlx4_load_one failed, pci_name=%s, err=%d\n",
3551 __func__, pci_name(pdev), err);
3555 err = restore_current_port_types(dev, dev->persist->curr_port_type,
3556 dev->persist->curr_port_poss_type);
3558 mlx4_err(dev, "could not restore original port types (%d)\n",
3564 static const struct pci_device_id mlx4_pci_table[] = {
3565 /* MT25408 "Hermon" SDR */
3566 { PCI_VDEVICE(MELLANOX, 0x6340), MLX4_PCI_DEV_FORCE_SENSE_PORT },
3567 /* MT25408 "Hermon" DDR */
3568 { PCI_VDEVICE(MELLANOX, 0x634a), MLX4_PCI_DEV_FORCE_SENSE_PORT },
3569 /* MT25408 "Hermon" QDR */
3570 { PCI_VDEVICE(MELLANOX, 0x6354), MLX4_PCI_DEV_FORCE_SENSE_PORT },
3571 /* MT25408 "Hermon" DDR PCIe gen2 */
3572 { PCI_VDEVICE(MELLANOX, 0x6732), MLX4_PCI_DEV_FORCE_SENSE_PORT },
3573 /* MT25408 "Hermon" QDR PCIe gen2 */
3574 { PCI_VDEVICE(MELLANOX, 0x673c), MLX4_PCI_DEV_FORCE_SENSE_PORT },
3575 /* MT25408 "Hermon" EN 10GigE */
3576 { PCI_VDEVICE(MELLANOX, 0x6368), MLX4_PCI_DEV_FORCE_SENSE_PORT },
3577 /* MT25408 "Hermon" EN 10GigE PCIe gen2 */
3578 { PCI_VDEVICE(MELLANOX, 0x6750), MLX4_PCI_DEV_FORCE_SENSE_PORT },
3579 /* MT25458 ConnectX EN 10GBASE-T 10GigE */
3580 { PCI_VDEVICE(MELLANOX, 0x6372), MLX4_PCI_DEV_FORCE_SENSE_PORT },
3581 /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */
3582 { PCI_VDEVICE(MELLANOX, 0x675a), MLX4_PCI_DEV_FORCE_SENSE_PORT },
3583 /* MT26468 ConnectX EN 10GigE PCIe gen2*/
3584 { PCI_VDEVICE(MELLANOX, 0x6764), MLX4_PCI_DEV_FORCE_SENSE_PORT },
3585 /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */
3586 { PCI_VDEVICE(MELLANOX, 0x6746), MLX4_PCI_DEV_FORCE_SENSE_PORT },
3587 /* MT26478 ConnectX2 40GigE PCIe gen2 */
3588 { PCI_VDEVICE(MELLANOX, 0x676e), MLX4_PCI_DEV_FORCE_SENSE_PORT },
3589 /* MT25400 Family [ConnectX-2 Virtual Function] */
3590 { PCI_VDEVICE(MELLANOX, 0x1002), MLX4_PCI_DEV_IS_VF },
3591 /* MT27500 Family [ConnectX-3] */
3592 { PCI_VDEVICE(MELLANOX, 0x1003), 0 },
3593 /* MT27500 Family [ConnectX-3 Virtual Function] */
3594 { PCI_VDEVICE(MELLANOX, 0x1004), MLX4_PCI_DEV_IS_VF },
3595 { PCI_VDEVICE(MELLANOX, 0x1005), 0 }, /* MT27510 Family */
3596 { PCI_VDEVICE(MELLANOX, 0x1006), 0 }, /* MT27511 Family */
3597 { PCI_VDEVICE(MELLANOX, 0x1007), 0 }, /* MT27520 Family */
3598 { PCI_VDEVICE(MELLANOX, 0x1008), 0 }, /* MT27521 Family */
3599 { PCI_VDEVICE(MELLANOX, 0x1009), 0 }, /* MT27530 Family */
3600 { PCI_VDEVICE(MELLANOX, 0x100a), 0 }, /* MT27531 Family */
3601 { PCI_VDEVICE(MELLANOX, 0x100b), 0 }, /* MT27540 Family */
3602 { PCI_VDEVICE(MELLANOX, 0x100c), 0 }, /* MT27541 Family */
3603 { PCI_VDEVICE(MELLANOX, 0x100d), 0 }, /* MT27550 Family */
3604 { PCI_VDEVICE(MELLANOX, 0x100e), 0 }, /* MT27551 Family */
3605 { PCI_VDEVICE(MELLANOX, 0x100f), 0 }, /* MT27560 Family */
3606 { PCI_VDEVICE(MELLANOX, 0x1010), 0 }, /* MT27561 Family */
3610 MODULE_DEVICE_TABLE(pci, mlx4_pci_table);
3612 static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev,
3613 pci_channel_state_t state)
3615 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
3617 mlx4_err(persist->dev, "mlx4_pci_err_detected was called\n");
3618 mlx4_enter_error_state(persist);
3620 mutex_lock(&persist->interface_state_mutex);
3621 if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
3622 mlx4_unload_one(pdev);
3624 mutex_unlock(&persist->interface_state_mutex);
3625 if (state == pci_channel_io_perm_failure)
3626 return PCI_ERS_RESULT_DISCONNECT;
3628 pci_disable_device(pdev);
3629 return PCI_ERS_RESULT_NEED_RESET;
3632 static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev)
3634 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
3635 struct mlx4_dev *dev = persist->dev;
3636 struct mlx4_priv *priv = mlx4_priv(dev);
3638 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
3641 mlx4_err(dev, "mlx4_pci_slot_reset was called\n");
3642 ret = pci_enable_device(pdev);
3644 mlx4_err(dev, "Can not re-enable device, ret=%d\n", ret);
3645 return PCI_ERS_RESULT_DISCONNECT;
3648 pci_set_master(pdev);
3649 pci_restore_state(pdev);
3650 pci_save_state(pdev);
3652 total_vfs = dev->persist->num_vfs;
3653 memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
3655 mutex_lock(&persist->interface_state_mutex);
3656 if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) {
3657 ret = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs,
3660 mlx4_err(dev, "%s: mlx4_load_one failed, ret=%d\n",
3665 ret = restore_current_port_types(dev, dev->persist->
3666 curr_port_type, dev->persist->
3667 curr_port_poss_type);
3669 mlx4_err(dev, "could not restore original port types (%d)\n", ret);
3672 mutex_unlock(&persist->interface_state_mutex);
3674 return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
3677 static void mlx4_shutdown(struct pci_dev *pdev)
3679 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
3681 mlx4_info(persist->dev, "mlx4_shutdown was called\n");
3682 mutex_lock(&persist->interface_state_mutex);
3683 if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
3684 mlx4_unload_one(pdev);
3685 mutex_unlock(&persist->interface_state_mutex);
3688 static const struct pci_error_handlers mlx4_err_handler = {
3689 .error_detected = mlx4_pci_err_detected,
3690 .slot_reset = mlx4_pci_slot_reset,
3693 static struct pci_driver mlx4_driver = {
3695 .id_table = mlx4_pci_table,
3696 .probe = mlx4_init_one,
3697 .shutdown = mlx4_shutdown,
3698 .remove = mlx4_remove_one,
3699 .err_handler = &mlx4_err_handler,
3702 static int __init mlx4_verify_params(void)
3704 if ((log_num_mac < 0) || (log_num_mac > 7)) {
3705 pr_warn("mlx4_core: bad num_mac: %d\n", log_num_mac);
3709 if (log_num_vlan != 0)
3710 pr_warn("mlx4_core: log_num_vlan - obsolete module param, using %d\n",
3711 MLX4_LOG_NUM_VLANS);
3714 pr_warn("mlx4_core: use_prio - obsolete module param, ignored\n");
3716 if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) {
3717 pr_warn("mlx4_core: bad log_mtts_per_seg: %d\n",
3722 /* Check if module param for ports type has legal combination */
3723 if (port_type_array[0] == false && port_type_array[1] == true) {
3724 pr_warn("Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n");
3725 port_type_array[0] = true;
3728 if (mlx4_log_num_mgm_entry_size < -7 ||
3729 (mlx4_log_num_mgm_entry_size > 0 &&
3730 (mlx4_log_num_mgm_entry_size < MLX4_MIN_MGM_LOG_ENTRY_SIZE ||
3731 mlx4_log_num_mgm_entry_size > MLX4_MAX_MGM_LOG_ENTRY_SIZE))) {
3732 pr_warn("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not in legal range (-7..0 or %d..%d)\n",
3733 mlx4_log_num_mgm_entry_size,
3734 MLX4_MIN_MGM_LOG_ENTRY_SIZE,
3735 MLX4_MAX_MGM_LOG_ENTRY_SIZE);
3742 static int __init mlx4_init(void)
3746 if (mlx4_verify_params())
3750 mlx4_wq = create_singlethread_workqueue("mlx4");
3754 ret = pci_register_driver(&mlx4_driver);
3756 destroy_workqueue(mlx4_wq);
3757 return ret < 0 ? ret : 0;
3760 static void __exit mlx4_cleanup(void)
3762 pci_unregister_driver(&mlx4_driver);
3763 destroy_workqueue(mlx4_wq);
3766 module_init(mlx4_init);
3767 module_exit(mlx4_cleanup);