2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/mlx5/flow_table.h>
36 struct mlx5e_rq_param {
37 u32 rqc[MLX5_ST_SZ_DW(rqc)];
38 struct mlx5_wq_param wq;
41 struct mlx5e_sq_param {
42 u32 sqc[MLX5_ST_SZ_DW(sqc)];
43 struct mlx5_wq_param wq;
47 struct mlx5e_cq_param {
48 u32 cqc[MLX5_ST_SZ_DW(cqc)];
49 struct mlx5_wq_param wq;
53 struct mlx5e_channel_param {
54 struct mlx5e_rq_param rq;
55 struct mlx5e_sq_param sq;
56 struct mlx5e_cq_param rx_cq;
57 struct mlx5e_cq_param tx_cq;
60 static void mlx5e_update_carrier(struct mlx5e_priv *priv)
62 struct mlx5_core_dev *mdev = priv->mdev;
65 port_state = mlx5_query_vport_state(mdev,
66 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT);
68 if (port_state == VPORT_STATE_UP)
69 netif_carrier_on(priv->netdev);
71 netif_carrier_off(priv->netdev);
74 static void mlx5e_update_carrier_work(struct work_struct *work)
76 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
79 mutex_lock(&priv->state_lock);
80 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
81 mlx5e_update_carrier(priv);
82 mutex_unlock(&priv->state_lock);
85 void mlx5e_update_stats(struct mlx5e_priv *priv)
87 struct mlx5_core_dev *mdev = priv->mdev;
88 struct mlx5e_vport_stats *s = &priv->stats.vport;
89 struct mlx5e_rq_stats *rq_stats;
90 struct mlx5e_sq_stats *sq_stats;
91 u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)];
93 int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
97 out = mlx5_vzalloc(outlen);
101 /* Collect firts the SW counters and then HW for consistency */
104 s->tx_queue_stopped = 0;
105 s->tx_queue_wake = 0;
106 s->tx_queue_dropped = 0;
112 for (i = 0; i < priv->params.num_channels; i++) {
113 rq_stats = &priv->channel[i]->rq.stats;
115 s->lro_packets += rq_stats->lro_packets;
116 s->lro_bytes += rq_stats->lro_bytes;
117 s->rx_csum_none += rq_stats->csum_none;
118 s->rx_wqe_err += rq_stats->wqe_err;
120 for (j = 0; j < priv->params.num_tc; j++) {
121 sq_stats = &priv->channel[i]->sq[j].stats;
123 s->tso_packets += sq_stats->tso_packets;
124 s->tso_bytes += sq_stats->tso_bytes;
125 s->tx_queue_stopped += sq_stats->stopped;
126 s->tx_queue_wake += sq_stats->wake;
127 s->tx_queue_dropped += sq_stats->dropped;
128 tx_offload_none += sq_stats->csum_offload_none;
133 memset(in, 0, sizeof(in));
135 MLX5_SET(query_vport_counter_in, in, opcode,
136 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
137 MLX5_SET(query_vport_counter_in, in, op_mod, 0);
138 MLX5_SET(query_vport_counter_in, in, other_vport, 0);
140 memset(out, 0, outlen);
142 if (mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen))
145 #define MLX5_GET_CTR(p, x) \
146 MLX5_GET64(query_vport_counter_out, p, x)
148 s->rx_error_packets =
149 MLX5_GET_CTR(out, received_errors.packets);
151 MLX5_GET_CTR(out, received_errors.octets);
152 s->tx_error_packets =
153 MLX5_GET_CTR(out, transmit_errors.packets);
155 MLX5_GET_CTR(out, transmit_errors.octets);
157 s->rx_unicast_packets =
158 MLX5_GET_CTR(out, received_eth_unicast.packets);
159 s->rx_unicast_bytes =
160 MLX5_GET_CTR(out, received_eth_unicast.octets);
161 s->tx_unicast_packets =
162 MLX5_GET_CTR(out, transmitted_eth_unicast.packets);
163 s->tx_unicast_bytes =
164 MLX5_GET_CTR(out, transmitted_eth_unicast.octets);
166 s->rx_multicast_packets =
167 MLX5_GET_CTR(out, received_eth_multicast.packets);
168 s->rx_multicast_bytes =
169 MLX5_GET_CTR(out, received_eth_multicast.octets);
170 s->tx_multicast_packets =
171 MLX5_GET_CTR(out, transmitted_eth_multicast.packets);
172 s->tx_multicast_bytes =
173 MLX5_GET_CTR(out, transmitted_eth_multicast.octets);
175 s->rx_broadcast_packets =
176 MLX5_GET_CTR(out, received_eth_broadcast.packets);
177 s->rx_broadcast_bytes =
178 MLX5_GET_CTR(out, received_eth_broadcast.octets);
179 s->tx_broadcast_packets =
180 MLX5_GET_CTR(out, transmitted_eth_broadcast.packets);
181 s->tx_broadcast_bytes =
182 MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
185 s->rx_unicast_packets +
186 s->rx_multicast_packets +
187 s->rx_broadcast_packets;
189 s->rx_unicast_bytes +
190 s->rx_multicast_bytes +
191 s->rx_broadcast_bytes;
193 s->tx_unicast_packets +
194 s->tx_multicast_packets +
195 s->tx_broadcast_packets;
197 s->tx_unicast_bytes +
198 s->tx_multicast_bytes +
199 s->tx_broadcast_bytes;
201 /* Update calculated offload counters */
202 s->tx_csum_offload = s->tx_packets - tx_offload_none;
203 s->rx_csum_good = s->rx_packets - s->rx_csum_none;
209 static void mlx5e_update_stats_work(struct work_struct *work)
211 struct delayed_work *dwork = to_delayed_work(work);
212 struct mlx5e_priv *priv = container_of(dwork, struct mlx5e_priv,
214 mutex_lock(&priv->state_lock);
215 if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
216 mlx5e_update_stats(priv);
217 schedule_delayed_work(dwork,
219 MLX5E_UPDATE_STATS_INTERVAL));
221 mutex_unlock(&priv->state_lock);
224 static void __mlx5e_async_event(struct mlx5e_priv *priv,
225 enum mlx5_dev_event event)
228 case MLX5_DEV_EVENT_PORT_UP:
229 case MLX5_DEV_EVENT_PORT_DOWN:
230 schedule_work(&priv->update_carrier_work);
238 static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
239 enum mlx5_dev_event event, unsigned long param)
241 struct mlx5e_priv *priv = vpriv;
243 spin_lock(&priv->async_events_spinlock);
244 if (test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state))
245 __mlx5e_async_event(priv, event);
246 spin_unlock(&priv->async_events_spinlock);
249 static void mlx5e_enable_async_events(struct mlx5e_priv *priv)
251 set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
254 static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
256 spin_lock_irq(&priv->async_events_spinlock);
257 clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
258 spin_unlock_irq(&priv->async_events_spinlock);
261 #define MLX5E_HW2SW_MTU(hwmtu) (hwmtu - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
262 #define MLX5E_SW2HW_MTU(swmtu) (swmtu + (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
264 static int mlx5e_create_rq(struct mlx5e_channel *c,
265 struct mlx5e_rq_param *param,
268 struct mlx5e_priv *priv = c->priv;
269 struct mlx5_core_dev *mdev = priv->mdev;
270 void *rqc = param->rqc;
271 void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
276 param->wq.db_numa_node = cpu_to_node(c->cpu);
278 err = mlx5_wq_ll_create(mdev, ¶m->wq, rqc_wq, &rq->wq,
283 rq->wq.db = &rq->wq.db[MLX5_RCV_DBR];
285 wq_sz = mlx5_wq_ll_get_size(&rq->wq);
286 rq->skb = kzalloc_node(wq_sz * sizeof(*rq->skb), GFP_KERNEL,
287 cpu_to_node(c->cpu));
290 goto err_rq_wq_destroy;
293 rq->wqe_sz = (priv->params.lro_en) ? priv->params.lro_wqe_sz :
294 MLX5E_SW2HW_MTU(priv->netdev->mtu);
295 rq->wqe_sz = SKB_DATA_ALIGN(rq->wqe_sz + MLX5E_NET_IP_ALIGN);
297 for (i = 0; i < wq_sz; i++) {
298 struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);
299 u32 byte_count = rq->wqe_sz - MLX5E_NET_IP_ALIGN;
301 wqe->data.lkey = c->mkey_be;
302 wqe->data.byte_count =
303 cpu_to_be32(byte_count | MLX5_HW_START_PADDING);
307 rq->netdev = c->netdev;
315 mlx5_wq_destroy(&rq->wq_ctrl);
320 static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
323 mlx5_wq_destroy(&rq->wq_ctrl);
326 static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
328 struct mlx5e_priv *priv = rq->priv;
329 struct mlx5_core_dev *mdev = priv->mdev;
337 inlen = MLX5_ST_SZ_BYTES(create_rq_in) +
338 sizeof(u64) * rq->wq_ctrl.buf.npages;
339 in = mlx5_vzalloc(inlen);
343 rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
344 wq = MLX5_ADDR_OF(rqc, rqc, wq);
346 memcpy(rqc, param->rqc, sizeof(param->rqc));
348 MLX5_SET(rqc, rqc, cqn, rq->cq.mcq.cqn);
349 MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
350 MLX5_SET(rqc, rqc, flush_in_error_en, 1);
351 MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift -
352 MLX5_ADAPTER_PAGE_SHIFT);
353 MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma);
355 mlx5_fill_page_array(&rq->wq_ctrl.buf,
356 (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
358 err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn);
365 static int mlx5e_modify_rq(struct mlx5e_rq *rq, int curr_state, int next_state)
367 struct mlx5e_channel *c = rq->channel;
368 struct mlx5e_priv *priv = c->priv;
369 struct mlx5_core_dev *mdev = priv->mdev;
376 inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
377 in = mlx5_vzalloc(inlen);
381 rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
383 MLX5_SET(modify_rq_in, in, rq_state, curr_state);
384 MLX5_SET(rqc, rqc, state, next_state);
386 err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
393 static void mlx5e_disable_rq(struct mlx5e_rq *rq)
395 mlx5_core_destroy_rq(rq->priv->mdev, rq->rqn);
398 static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq)
400 struct mlx5e_channel *c = rq->channel;
401 struct mlx5e_priv *priv = c->priv;
402 struct mlx5_wq_ll *wq = &rq->wq;
405 for (i = 0; i < 1000; i++) {
406 if (wq->cur_sz >= priv->params.min_rx_wqes)
415 static int mlx5e_open_rq(struct mlx5e_channel *c,
416 struct mlx5e_rq_param *param,
421 err = mlx5e_create_rq(c, param, rq);
425 err = mlx5e_enable_rq(rq, param);
429 err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
433 set_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
434 mlx5e_send_nop(&c->sq[0], true); /* trigger mlx5e_post_rx_wqes() */
439 mlx5e_disable_rq(rq);
441 mlx5e_destroy_rq(rq);
446 static void mlx5e_close_rq(struct mlx5e_rq *rq)
448 clear_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
449 napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
451 mlx5e_modify_rq(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
452 while (!mlx5_wq_ll_is_empty(&rq->wq))
455 /* avoid destroying rq before mlx5e_poll_rx_cq() is done with it */
456 napi_synchronize(&rq->channel->napi);
458 mlx5e_disable_rq(rq);
459 mlx5e_destroy_rq(rq);
462 static void mlx5e_free_sq_db(struct mlx5e_sq *sq)
468 static int mlx5e_alloc_sq_db(struct mlx5e_sq *sq, int numa)
470 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
471 int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
473 sq->skb = kzalloc_node(wq_sz * sizeof(*sq->skb), GFP_KERNEL, numa);
474 sq->dma_fifo = kzalloc_node(df_sz * sizeof(*sq->dma_fifo), GFP_KERNEL,
477 if (!sq->skb || !sq->dma_fifo) {
478 mlx5e_free_sq_db(sq);
482 sq->dma_fifo_mask = df_sz - 1;
487 static int mlx5e_create_sq(struct mlx5e_channel *c,
489 struct mlx5e_sq_param *param,
492 struct mlx5e_priv *priv = c->priv;
493 struct mlx5_core_dev *mdev = priv->mdev;
495 void *sqc = param->sqc;
496 void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq);
500 err = mlx5_alloc_map_uar(mdev, &sq->uar);
504 param->wq.db_numa_node = cpu_to_node(c->cpu);
506 err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, &sq->wq,
509 goto err_unmap_free_uar;
511 sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
512 sq->uar_map = sq->uar.map;
513 sq->uar_bf_map = sq->uar.bf_map;
514 sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
515 sq->max_inline = param->max_inline;
517 err = mlx5e_alloc_sq_db(sq, cpu_to_node(c->cpu));
519 goto err_sq_wq_destroy;
521 txq_ix = c->ix + tc * priv->params.num_channels;
522 sq->txq = netdev_get_tx_queue(priv->netdev, txq_ix);
525 sq->mkey_be = c->mkey_be;
528 sq->edge = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS;
529 sq->bf_budget = MLX5E_SQ_BF_BUDGET;
530 priv->txq_to_sq_map[txq_ix] = sq;
535 mlx5_wq_destroy(&sq->wq_ctrl);
538 mlx5_unmap_free_uar(mdev, &sq->uar);
543 static void mlx5e_destroy_sq(struct mlx5e_sq *sq)
545 struct mlx5e_channel *c = sq->channel;
546 struct mlx5e_priv *priv = c->priv;
548 mlx5e_free_sq_db(sq);
549 mlx5_wq_destroy(&sq->wq_ctrl);
550 mlx5_unmap_free_uar(priv->mdev, &sq->uar);
553 static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
555 struct mlx5e_channel *c = sq->channel;
556 struct mlx5e_priv *priv = c->priv;
557 struct mlx5_core_dev *mdev = priv->mdev;
565 inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
566 sizeof(u64) * sq->wq_ctrl.buf.npages;
567 in = mlx5_vzalloc(inlen);
571 sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
572 wq = MLX5_ADDR_OF(sqc, sqc, wq);
574 memcpy(sqc, param->sqc, sizeof(param->sqc));
576 MLX5_SET(sqc, sqc, tis_num_0, priv->tisn[sq->tc]);
577 MLX5_SET(sqc, sqc, cqn, c->sq[sq->tc].cq.mcq.cqn);
578 MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
579 MLX5_SET(sqc, sqc, tis_lst_sz, 1);
580 MLX5_SET(sqc, sqc, flush_in_error_en, 1);
582 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
583 MLX5_SET(wq, wq, uar_page, sq->uar.index);
584 MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift -
585 MLX5_ADAPTER_PAGE_SHIFT);
586 MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma);
588 mlx5_fill_page_array(&sq->wq_ctrl.buf,
589 (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
591 err = mlx5_core_create_sq(mdev, in, inlen, &sq->sqn);
598 static int mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state, int next_state)
600 struct mlx5e_channel *c = sq->channel;
601 struct mlx5e_priv *priv = c->priv;
602 struct mlx5_core_dev *mdev = priv->mdev;
609 inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
610 in = mlx5_vzalloc(inlen);
614 sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
616 MLX5_SET(modify_sq_in, in, sq_state, curr_state);
617 MLX5_SET(sqc, sqc, state, next_state);
619 err = mlx5_core_modify_sq(mdev, sq->sqn, in, inlen);
626 static void mlx5e_disable_sq(struct mlx5e_sq *sq)
628 struct mlx5e_channel *c = sq->channel;
629 struct mlx5e_priv *priv = c->priv;
630 struct mlx5_core_dev *mdev = priv->mdev;
632 mlx5_core_destroy_sq(mdev, sq->sqn);
635 static int mlx5e_open_sq(struct mlx5e_channel *c,
637 struct mlx5e_sq_param *param,
642 err = mlx5e_create_sq(c, tc, param, sq);
646 err = mlx5e_enable_sq(sq, param);
650 err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY);
654 set_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
655 netdev_tx_reset_queue(sq->txq);
656 netif_tx_start_queue(sq->txq);
661 mlx5e_disable_sq(sq);
663 mlx5e_destroy_sq(sq);
668 static inline void netif_tx_disable_queue(struct netdev_queue *txq)
670 __netif_tx_lock_bh(txq);
671 netif_tx_stop_queue(txq);
672 __netif_tx_unlock_bh(txq);
675 static void mlx5e_close_sq(struct mlx5e_sq *sq)
677 clear_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
678 napi_synchronize(&sq->channel->napi); /* prevent netif_tx_wake_queue */
679 netif_tx_disable_queue(sq->txq);
681 /* ensure hw is notified of all pending wqes */
682 if (mlx5e_sq_has_room_for(sq, 1))
683 mlx5e_send_nop(sq, true);
685 mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR);
686 while (sq->cc != sq->pc) /* wait till sq is empty */
689 /* avoid destroying sq before mlx5e_poll_tx_cq() is done with it */
690 napi_synchronize(&sq->channel->napi);
692 mlx5e_disable_sq(sq);
693 mlx5e_destroy_sq(sq);
696 static int mlx5e_create_cq(struct mlx5e_channel *c,
697 struct mlx5e_cq_param *param,
700 struct mlx5e_priv *priv = c->priv;
701 struct mlx5_core_dev *mdev = priv->mdev;
702 struct mlx5_core_cq *mcq = &cq->mcq;
708 param->wq.buf_numa_node = cpu_to_node(c->cpu);
709 param->wq.db_numa_node = cpu_to_node(c->cpu);
710 param->eq_ix = c->ix;
712 err = mlx5_cqwq_create(mdev, ¶m->wq, param->cqc, &cq->wq,
717 mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
722 mcq->set_ci_db = cq->wq_ctrl.db.db;
723 mcq->arm_db = cq->wq_ctrl.db.db + 1;
726 mcq->vector = param->eq_ix;
727 mcq->comp = mlx5e_completion_event;
728 mcq->event = mlx5e_cq_error_event;
730 mcq->uar = &priv->cq_uar;
732 for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
733 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
744 static void mlx5e_destroy_cq(struct mlx5e_cq *cq)
746 mlx5_wq_destroy(&cq->wq_ctrl);
749 static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
751 struct mlx5e_priv *priv = cq->priv;
752 struct mlx5_core_dev *mdev = priv->mdev;
753 struct mlx5_core_cq *mcq = &cq->mcq;
762 inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
763 sizeof(u64) * cq->wq_ctrl.buf.npages;
764 in = mlx5_vzalloc(inlen);
768 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
770 memcpy(cqc, param->cqc, sizeof(param->cqc));
772 mlx5_fill_page_array(&cq->wq_ctrl.buf,
773 (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
775 mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
777 MLX5_SET(cqc, cqc, c_eqn, eqn);
778 MLX5_SET(cqc, cqc, uar_page, mcq->uar->index);
779 MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
780 MLX5_ADAPTER_PAGE_SHIFT);
781 MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
783 err = mlx5_core_create_cq(mdev, mcq, in, inlen);
795 static void mlx5e_disable_cq(struct mlx5e_cq *cq)
797 struct mlx5e_priv *priv = cq->priv;
798 struct mlx5_core_dev *mdev = priv->mdev;
800 mlx5_core_destroy_cq(mdev, &cq->mcq);
803 static int mlx5e_open_cq(struct mlx5e_channel *c,
804 struct mlx5e_cq_param *param,
806 u16 moderation_usecs,
807 u16 moderation_frames)
810 struct mlx5e_priv *priv = c->priv;
811 struct mlx5_core_dev *mdev = priv->mdev;
813 err = mlx5e_create_cq(c, param, cq);
817 err = mlx5e_enable_cq(cq, param);
821 err = mlx5_core_modify_cq_moderation(mdev, &cq->mcq,
830 mlx5e_destroy_cq(cq);
835 static void mlx5e_close_cq(struct mlx5e_cq *cq)
837 mlx5e_disable_cq(cq);
838 mlx5e_destroy_cq(cq);
841 static int mlx5e_get_cpu(struct mlx5e_priv *priv, int ix)
843 return cpumask_first(priv->mdev->priv.irq_info[ix].mask);
846 static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
847 struct mlx5e_channel_param *cparam)
849 struct mlx5e_priv *priv = c->priv;
853 for (tc = 0; tc < c->num_tc; tc++) {
854 err = mlx5e_open_cq(c, &cparam->tx_cq, &c->sq[tc].cq,
855 priv->params.tx_cq_moderation_usec,
856 priv->params.tx_cq_moderation_pkts);
858 goto err_close_tx_cqs;
864 for (tc--; tc >= 0; tc--)
865 mlx5e_close_cq(&c->sq[tc].cq);
870 static void mlx5e_close_tx_cqs(struct mlx5e_channel *c)
874 for (tc = 0; tc < c->num_tc; tc++)
875 mlx5e_close_cq(&c->sq[tc].cq);
878 static int mlx5e_open_sqs(struct mlx5e_channel *c,
879 struct mlx5e_channel_param *cparam)
884 for (tc = 0; tc < c->num_tc; tc++) {
885 err = mlx5e_open_sq(c, tc, &cparam->sq, &c->sq[tc]);
893 for (tc--; tc >= 0; tc--)
894 mlx5e_close_sq(&c->sq[tc]);
899 static void mlx5e_close_sqs(struct mlx5e_channel *c)
903 for (tc = 0; tc < c->num_tc; tc++)
904 mlx5e_close_sq(&c->sq[tc]);
907 static void mlx5e_build_tc_to_txq_map(struct mlx5e_channel *c,
912 for (i = 0; i < MLX5E_MAX_NUM_TC; i++)
913 c->tc_to_txq_map[i] = c->ix + i * num_channels;
916 static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
917 struct mlx5e_channel_param *cparam,
918 struct mlx5e_channel **cp)
920 struct net_device *netdev = priv->netdev;
921 int cpu = mlx5e_get_cpu(priv, ix);
922 struct mlx5e_channel *c;
925 c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
932 c->pdev = &priv->mdev->pdev->dev;
933 c->netdev = priv->netdev;
934 c->mkey_be = cpu_to_be32(priv->mr.key);
935 c->num_tc = priv->params.num_tc;
937 mlx5e_build_tc_to_txq_map(c, priv->params.num_channels);
939 netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
941 err = mlx5e_open_tx_cqs(c, cparam);
945 err = mlx5e_open_cq(c, &cparam->rx_cq, &c->rq.cq,
946 priv->params.rx_cq_moderation_usec,
947 priv->params.rx_cq_moderation_pkts);
949 goto err_close_tx_cqs;
951 napi_enable(&c->napi);
953 err = mlx5e_open_sqs(c, cparam);
955 goto err_disable_napi;
957 err = mlx5e_open_rq(c, &cparam->rq, &c->rq);
961 netif_set_xps_queue(netdev, get_cpu_mask(c->cpu), ix);
970 napi_disable(&c->napi);
971 mlx5e_close_cq(&c->rq.cq);
974 mlx5e_close_tx_cqs(c);
977 netif_napi_del(&c->napi);
983 static void mlx5e_close_channel(struct mlx5e_channel *c)
985 mlx5e_close_rq(&c->rq);
987 napi_disable(&c->napi);
988 mlx5e_close_cq(&c->rq.cq);
989 mlx5e_close_tx_cqs(c);
990 netif_napi_del(&c->napi);
994 static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
995 struct mlx5e_rq_param *param)
997 void *rqc = param->rqc;
998 void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
1000 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
1001 MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
1002 MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe)));
1003 MLX5_SET(wq, wq, log_wq_sz, priv->params.log_rq_size);
1004 MLX5_SET(wq, wq, pd, priv->pdn);
1006 param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
1007 param->wq.linear = 1;
1010 static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
1011 struct mlx5e_sq_param *param)
1013 void *sqc = param->sqc;
1014 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
1016 MLX5_SET(wq, wq, log_wq_sz, priv->params.log_sq_size);
1017 MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
1018 MLX5_SET(wq, wq, pd, priv->pdn);
1020 param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
1021 param->max_inline = priv->params.tx_max_inline;
1024 static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
1025 struct mlx5e_cq_param *param)
1027 void *cqc = param->cqc;
1029 MLX5_SET(cqc, cqc, uar_page, priv->cq_uar.index);
1032 static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
1033 struct mlx5e_cq_param *param)
1035 void *cqc = param->cqc;
1037 MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_rq_size);
1039 mlx5e_build_common_cq_param(priv, param);
1042 static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
1043 struct mlx5e_cq_param *param)
1045 void *cqc = param->cqc;
1047 MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_sq_size);
1049 mlx5e_build_common_cq_param(priv, param);
1052 static void mlx5e_build_channel_param(struct mlx5e_priv *priv,
1053 struct mlx5e_channel_param *cparam)
1055 memset(cparam, 0, sizeof(*cparam));
1057 mlx5e_build_rq_param(priv, &cparam->rq);
1058 mlx5e_build_sq_param(priv, &cparam->sq);
1059 mlx5e_build_rx_cq_param(priv, &cparam->rx_cq);
1060 mlx5e_build_tx_cq_param(priv, &cparam->tx_cq);
1063 static int mlx5e_open_channels(struct mlx5e_priv *priv)
1065 struct mlx5e_channel_param cparam;
1066 int nch = priv->params.num_channels;
1071 priv->channel = kcalloc(nch, sizeof(struct mlx5e_channel *),
1074 priv->txq_to_sq_map = kcalloc(nch * priv->params.num_tc,
1075 sizeof(struct mlx5e_sq *), GFP_KERNEL);
1077 if (!priv->channel || !priv->txq_to_sq_map)
1078 goto err_free_txq_to_sq_map;
1080 mlx5e_build_channel_param(priv, &cparam);
1081 for (i = 0; i < nch; i++) {
1082 err = mlx5e_open_channel(priv, i, &cparam, &priv->channel[i]);
1084 goto err_close_channels;
1087 for (j = 0; j < nch; j++) {
1088 err = mlx5e_wait_for_min_rx_wqes(&priv->channel[j]->rq);
1090 goto err_close_channels;
1096 for (i--; i >= 0; i--)
1097 mlx5e_close_channel(priv->channel[i]);
1099 err_free_txq_to_sq_map:
1100 kfree(priv->txq_to_sq_map);
1101 kfree(priv->channel);
1106 static void mlx5e_close_channels(struct mlx5e_priv *priv)
1110 for (i = 0; i < priv->params.num_channels; i++)
1111 mlx5e_close_channel(priv->channel[i]);
1113 kfree(priv->txq_to_sq_map);
1114 kfree(priv->channel);
1117 static int mlx5e_create_drop_rq(struct mlx5e_priv *priv,
1118 struct mlx5e_rq *rq,
1119 struct mlx5e_rq_param *param)
1121 struct mlx5_core_dev *mdev = priv->mdev;
1122 void *rqc = param->rqc;
1123 void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
1126 param->wq.db_numa_node = param->wq.buf_numa_node;
1128 err = mlx5_wq_ll_create(mdev, ¶m->wq, rqc_wq, &rq->wq,
1138 static int mlx5e_create_drop_cq(struct mlx5e_priv *priv,
1139 struct mlx5e_cq *cq,
1140 struct mlx5e_cq_param *param)
1142 struct mlx5_core_dev *mdev = priv->mdev;
1143 struct mlx5_core_cq *mcq = &cq->mcq;
1148 err = mlx5_cqwq_create(mdev, ¶m->wq, param->cqc, &cq->wq,
1153 mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
1156 mcq->set_ci_db = cq->wq_ctrl.db.db;
1157 mcq->arm_db = cq->wq_ctrl.db.db + 1;
1158 *mcq->set_ci_db = 0;
1160 mcq->vector = param->eq_ix;
1161 mcq->comp = mlx5e_completion_event;
1162 mcq->event = mlx5e_cq_error_event;
1164 mcq->uar = &priv->cq_uar;
1171 static int mlx5e_open_drop_rq(struct mlx5e_priv *priv)
1173 struct mlx5e_cq_param cq_param;
1174 struct mlx5e_rq_param rq_param;
1175 struct mlx5e_rq *rq = &priv->drop_rq;
1176 struct mlx5e_cq *cq = &priv->drop_rq.cq;
1179 memset(&cq_param, 0, sizeof(cq_param));
1180 memset(&rq_param, 0, sizeof(rq_param));
1181 mlx5e_build_rx_cq_param(priv, &cq_param);
1182 mlx5e_build_rq_param(priv, &rq_param);
1184 err = mlx5e_create_drop_cq(priv, cq, &cq_param);
1188 err = mlx5e_enable_cq(cq, &cq_param);
1190 goto err_destroy_cq;
1192 err = mlx5e_create_drop_rq(priv, rq, &rq_param);
1194 goto err_disable_cq;
1196 err = mlx5e_enable_rq(rq, &rq_param);
1198 goto err_destroy_rq;
1203 mlx5e_destroy_rq(&priv->drop_rq);
1206 mlx5e_disable_cq(&priv->drop_rq.cq);
1209 mlx5e_destroy_cq(&priv->drop_rq.cq);
1214 static void mlx5e_close_drop_rq(struct mlx5e_priv *priv)
1216 mlx5e_disable_rq(&priv->drop_rq);
1217 mlx5e_destroy_rq(&priv->drop_rq);
1218 mlx5e_disable_cq(&priv->drop_rq.cq);
1219 mlx5e_destroy_cq(&priv->drop_rq.cq);
1222 static int mlx5e_open_tis(struct mlx5e_priv *priv, int tc)
1224 struct mlx5_core_dev *mdev = priv->mdev;
1225 u32 in[MLX5_ST_SZ_DW(create_tis_in)];
1226 void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
1228 memset(in, 0, sizeof(in));
1230 MLX5_SET(tisc, tisc, prio, tc);
1231 MLX5_SET(tisc, tisc, transport_domain, priv->tdn);
1233 return mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]);
1236 static void mlx5e_close_tis(struct mlx5e_priv *priv, int tc)
1238 mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]);
1241 static int mlx5e_open_tises(struct mlx5e_priv *priv)
1246 for (tc = 0; tc < priv->params.num_tc; tc++) {
1247 err = mlx5e_open_tis(priv, tc);
1249 goto err_close_tises;
1255 for (tc--; tc >= 0; tc--)
1256 mlx5e_close_tis(priv, tc);
1261 static void mlx5e_close_tises(struct mlx5e_priv *priv)
1265 for (tc = 0; tc < priv->params.num_tc; tc++)
1266 mlx5e_close_tis(priv, tc);
1269 static int mlx5e_rx_hash_fn(int hfunc)
1271 return (hfunc == ETH_RSS_HASH_TOP) ?
1272 MLX5_RX_HASH_FN_TOEPLITZ :
1273 MLX5_RX_HASH_FN_INVERTED_XOR8;
1276 static int mlx5e_bits_invert(unsigned long a, int size)
1281 for (i = 0; i < size; i++)
1282 inv |= (test_bit(size - i - 1, &a) ? 1 : 0) << i;
1287 static void mlx5e_fill_rqt_rqns(struct mlx5e_priv *priv, void *rqtc,
1288 enum mlx5e_rqt_ix rqt_ix)
1294 case MLX5E_INDIRECTION_RQT:
1295 log_sz = priv->params.rx_hash_log_tbl_sz;
1296 for (i = 0; i < (1 << log_sz); i++) {
1299 if (priv->params.rss_hfunc == ETH_RSS_HASH_XOR)
1300 ix = mlx5e_bits_invert(i, log_sz);
1302 ix = ix % priv->params.num_channels;
1303 MLX5_SET(rqtc, rqtc, rq_num[i],
1304 priv->channel[ix]->rq.rqn);
1309 default: /* MLX5E_SINGLE_RQ_RQT */
1310 MLX5_SET(rqtc, rqtc, rq_num[0],
1311 priv->channel[0]->rq.rqn);
1317 static int mlx5e_open_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
1319 struct mlx5_core_dev *mdev = priv->mdev;
1327 log_sz = (rqt_ix == MLX5E_SINGLE_RQ_RQT) ? 0 :
1328 priv->params.rx_hash_log_tbl_sz;
1331 inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
1332 in = mlx5_vzalloc(inlen);
1336 rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
1338 MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
1339 MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
1341 mlx5e_fill_rqt_rqns(priv, rqtc, rqt_ix);
1343 err = mlx5_core_create_rqt(mdev, in, inlen, &priv->rqtn[rqt_ix]);
1350 static void mlx5e_close_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
1352 mlx5_core_destroy_rqt(priv->mdev, priv->rqtn[rqt_ix]);
1355 static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
1357 void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
1359 MLX5_SET(tirc, tirc, transport_domain, priv->tdn);
1361 #define ROUGH_MAX_L2_L3_HDR_SZ 256
1363 #define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
1364 MLX5_HASH_FIELD_SEL_DST_IP)
1366 #define MLX5_HASH_IP_L4PORTS (MLX5_HASH_FIELD_SEL_SRC_IP |\
1367 MLX5_HASH_FIELD_SEL_DST_IP |\
1368 MLX5_HASH_FIELD_SEL_L4_SPORT |\
1369 MLX5_HASH_FIELD_SEL_L4_DPORT)
1371 #define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\
1372 MLX5_HASH_FIELD_SEL_DST_IP |\
1373 MLX5_HASH_FIELD_SEL_IPSEC_SPI)
1375 if (priv->params.lro_en) {
1376 MLX5_SET(tirc, tirc, lro_enable_mask,
1377 MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
1378 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
1379 MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
1380 (priv->params.lro_wqe_sz -
1381 ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
1382 MLX5_SET(tirc, tirc, lro_timeout_period_usecs,
1383 MLX5_CAP_ETH(priv->mdev,
1384 lro_timer_supported_periods[3]));
1387 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
1391 MLX5_SET(tirc, tirc, indirect_table,
1392 priv->rqtn[MLX5E_SINGLE_RQ_RQT]);
1393 MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
1396 MLX5_SET(tirc, tirc, indirect_table,
1397 priv->rqtn[MLX5E_INDIRECTION_RQT]);
1398 MLX5_SET(tirc, tirc, rx_hash_fn,
1399 mlx5e_rx_hash_fn(priv->params.rss_hfunc));
1400 if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) {
1401 void *rss_key = MLX5_ADDR_OF(tirc, tirc,
1402 rx_hash_toeplitz_key);
1403 size_t len = MLX5_FLD_SZ_BYTES(tirc,
1404 rx_hash_toeplitz_key);
1406 MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
1407 netdev_rss_key_fill(rss_key, len);
1413 case MLX5E_TT_IPV4_TCP:
1414 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1415 MLX5_L3_PROT_TYPE_IPV4);
1416 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
1417 MLX5_L4_PROT_TYPE_TCP);
1418 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1419 MLX5_HASH_IP_L4PORTS);
1422 case MLX5E_TT_IPV6_TCP:
1423 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1424 MLX5_L3_PROT_TYPE_IPV6);
1425 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
1426 MLX5_L4_PROT_TYPE_TCP);
1427 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1428 MLX5_HASH_IP_L4PORTS);
1431 case MLX5E_TT_IPV4_UDP:
1432 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1433 MLX5_L3_PROT_TYPE_IPV4);
1434 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
1435 MLX5_L4_PROT_TYPE_UDP);
1436 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1437 MLX5_HASH_IP_L4PORTS);
1440 case MLX5E_TT_IPV6_UDP:
1441 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1442 MLX5_L3_PROT_TYPE_IPV6);
1443 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
1444 MLX5_L4_PROT_TYPE_UDP);
1445 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1446 MLX5_HASH_IP_L4PORTS);
1449 case MLX5E_TT_IPV4_IPSEC_AH:
1450 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1451 MLX5_L3_PROT_TYPE_IPV4);
1452 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1453 MLX5_HASH_IP_IPSEC_SPI);
1456 case MLX5E_TT_IPV6_IPSEC_AH:
1457 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1458 MLX5_L3_PROT_TYPE_IPV6);
1459 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1460 MLX5_HASH_IP_IPSEC_SPI);
1463 case MLX5E_TT_IPV4_IPSEC_ESP:
1464 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1465 MLX5_L3_PROT_TYPE_IPV4);
1466 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1467 MLX5_HASH_IP_IPSEC_SPI);
1470 case MLX5E_TT_IPV6_IPSEC_ESP:
1471 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1472 MLX5_L3_PROT_TYPE_IPV6);
1473 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1474 MLX5_HASH_IP_IPSEC_SPI);
1478 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1479 MLX5_L3_PROT_TYPE_IPV4);
1480 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1485 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1486 MLX5_L3_PROT_TYPE_IPV6);
1487 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1493 static int mlx5e_open_tir(struct mlx5e_priv *priv, int tt)
1495 struct mlx5_core_dev *mdev = priv->mdev;
1501 inlen = MLX5_ST_SZ_BYTES(create_tir_in);
1502 in = mlx5_vzalloc(inlen);
1506 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
1508 mlx5e_build_tir_ctx(priv, tirc, tt);
1510 err = mlx5_core_create_tir(mdev, in, inlen, &priv->tirn[tt]);
1517 static void mlx5e_close_tir(struct mlx5e_priv *priv, int tt)
1519 mlx5_core_destroy_tir(priv->mdev, priv->tirn[tt]);
1522 static int mlx5e_open_tirs(struct mlx5e_priv *priv)
1527 for (i = 0; i < MLX5E_NUM_TT; i++) {
1528 err = mlx5e_open_tir(priv, i);
1530 goto err_close_tirs;
1536 for (i--; i >= 0; i--)
1537 mlx5e_close_tir(priv, i);
1542 static void mlx5e_close_tirs(struct mlx5e_priv *priv)
1546 for (i = 0; i < MLX5E_NUM_TT; i++)
1547 mlx5e_close_tir(priv, i);
1550 static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
1552 struct mlx5e_priv *priv = netdev_priv(netdev);
1553 struct mlx5_core_dev *mdev = priv->mdev;
1557 err = mlx5_set_port_mtu(mdev, MLX5E_SW2HW_MTU(netdev->mtu), 1);
1561 mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
1563 if (MLX5E_HW2SW_MTU(hw_mtu) != netdev->mtu)
1564 netdev_warn(netdev, "%s: Port MTU %d is different than netdev mtu %d\n",
1565 __func__, MLX5E_HW2SW_MTU(hw_mtu), netdev->mtu);
1567 netdev->mtu = MLX5E_HW2SW_MTU(hw_mtu);
1571 int mlx5e_open_locked(struct net_device *netdev)
1573 struct mlx5e_priv *priv = netdev_priv(netdev);
1577 num_txqs = priv->params.num_channels * priv->params.num_tc;
1578 netif_set_real_num_tx_queues(netdev, num_txqs);
1579 netif_set_real_num_rx_queues(netdev, priv->params.num_channels);
1581 err = mlx5e_set_dev_port_mtu(netdev);
1585 err = mlx5e_open_tises(priv);
1587 netdev_err(netdev, "%s: mlx5e_open_tises failed, %d\n",
1592 err = mlx5e_open_channels(priv);
1594 netdev_err(netdev, "%s: mlx5e_open_channels failed, %d\n",
1596 goto err_close_tises;
1599 err = mlx5e_open_rqt(priv, MLX5E_INDIRECTION_RQT);
1601 netdev_err(netdev, "%s: mlx5e_open_rqt(INDIR) failed, %d\n",
1603 goto err_close_channels;
1606 err = mlx5e_open_rqt(priv, MLX5E_SINGLE_RQ_RQT);
1608 netdev_err(netdev, "%s: mlx5e_open_rqt(SINGLE) failed, %d\n",
1610 goto err_close_rqt_indir;
1613 err = mlx5e_open_tirs(priv);
1615 netdev_err(netdev, "%s: mlx5e_open_tir failed, %d\n",
1617 goto err_close_rqt_single;
1620 err = mlx5e_open_flow_table(priv);
1622 netdev_err(netdev, "%s: mlx5e_open_flow_table failed, %d\n",
1624 goto err_close_tirs;
1627 err = mlx5e_add_all_vlan_rules(priv);
1629 netdev_err(netdev, "%s: mlx5e_add_all_vlan_rules failed, %d\n",
1631 goto err_close_flow_table;
1634 mlx5e_init_eth_addr(priv);
1636 set_bit(MLX5E_STATE_OPENED, &priv->state);
1638 mlx5e_update_carrier(priv);
1639 mlx5e_set_rx_mode_core(priv);
1641 schedule_delayed_work(&priv->update_stats_work, 0);
1644 err_close_flow_table:
1645 mlx5e_close_flow_table(priv);
1648 mlx5e_close_tirs(priv);
1650 err_close_rqt_single:
1651 mlx5e_close_rqt(priv, MLX5E_SINGLE_RQ_RQT);
1653 err_close_rqt_indir:
1654 mlx5e_close_rqt(priv, MLX5E_INDIRECTION_RQT);
1657 mlx5e_close_channels(priv);
1660 mlx5e_close_tises(priv);
1665 static int mlx5e_open(struct net_device *netdev)
1667 struct mlx5e_priv *priv = netdev_priv(netdev);
1670 mutex_lock(&priv->state_lock);
1671 err = mlx5e_open_locked(netdev);
1672 mutex_unlock(&priv->state_lock);
1677 int mlx5e_close_locked(struct net_device *netdev)
1679 struct mlx5e_priv *priv = netdev_priv(netdev);
1681 clear_bit(MLX5E_STATE_OPENED, &priv->state);
1683 mlx5e_set_rx_mode_core(priv);
1684 mlx5e_del_all_vlan_rules(priv);
1685 netif_carrier_off(priv->netdev);
1686 mlx5e_close_flow_table(priv);
1687 mlx5e_close_tirs(priv);
1688 mlx5e_close_rqt(priv, MLX5E_SINGLE_RQ_RQT);
1689 mlx5e_close_rqt(priv, MLX5E_INDIRECTION_RQT);
1690 mlx5e_close_channels(priv);
1691 mlx5e_close_tises(priv);
1696 static int mlx5e_close(struct net_device *netdev)
1698 struct mlx5e_priv *priv = netdev_priv(netdev);
1701 mutex_lock(&priv->state_lock);
1702 err = mlx5e_close_locked(netdev);
1703 mutex_unlock(&priv->state_lock);
1708 static struct rtnl_link_stats64 *
1709 mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
1711 struct mlx5e_priv *priv = netdev_priv(dev);
1712 struct mlx5e_vport_stats *vstats = &priv->stats.vport;
1714 stats->rx_packets = vstats->rx_packets;
1715 stats->rx_bytes = vstats->rx_bytes;
1716 stats->tx_packets = vstats->tx_packets;
1717 stats->tx_bytes = vstats->tx_bytes;
1718 stats->multicast = vstats->rx_multicast_packets +
1719 vstats->tx_multicast_packets;
1720 stats->tx_errors = vstats->tx_error_packets;
1721 stats->rx_errors = vstats->rx_error_packets;
1722 stats->tx_dropped = vstats->tx_queue_dropped;
1723 stats->rx_crc_errors = 0;
1724 stats->rx_length_errors = 0;
1729 static void mlx5e_set_rx_mode(struct net_device *dev)
1731 struct mlx5e_priv *priv = netdev_priv(dev);
1733 schedule_work(&priv->set_rx_mode_work);
1736 static int mlx5e_set_mac(struct net_device *netdev, void *addr)
1738 struct mlx5e_priv *priv = netdev_priv(netdev);
1739 struct sockaddr *saddr = addr;
1741 if (!is_valid_ether_addr(saddr->sa_data))
1742 return -EADDRNOTAVAIL;
1744 netif_addr_lock_bh(netdev);
1745 ether_addr_copy(netdev->dev_addr, saddr->sa_data);
1746 netif_addr_unlock_bh(netdev);
1748 schedule_work(&priv->set_rx_mode_work);
1753 static int mlx5e_set_features(struct net_device *netdev,
1754 netdev_features_t features)
1756 struct mlx5e_priv *priv = netdev_priv(netdev);
1758 netdev_features_t changes = features ^ netdev->features;
1760 mutex_lock(&priv->state_lock);
1762 if (changes & NETIF_F_LRO) {
1763 bool was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
1766 mlx5e_close_locked(priv->netdev);
1768 priv->params.lro_en = !!(features & NETIF_F_LRO);
1771 err = mlx5e_open_locked(priv->netdev);
1774 if (changes & NETIF_F_HW_VLAN_CTAG_FILTER) {
1775 if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
1776 mlx5e_enable_vlan_filter(priv);
1778 mlx5e_disable_vlan_filter(priv);
1781 mutex_unlock(&priv->state_lock);
1786 static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
1788 struct mlx5e_priv *priv = netdev_priv(netdev);
1789 struct mlx5_core_dev *mdev = priv->mdev;
1794 mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
1796 if (new_mtu > max_mtu) {
1798 "%s: Bad MTU (%d) > (%d) Max\n",
1799 __func__, new_mtu, max_mtu);
1803 mutex_lock(&priv->state_lock);
1805 was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
1807 mlx5e_close_locked(netdev);
1809 netdev->mtu = new_mtu;
1812 err = mlx5e_open_locked(netdev);
1814 mutex_unlock(&priv->state_lock);
1819 static struct net_device_ops mlx5e_netdev_ops = {
1820 .ndo_open = mlx5e_open,
1821 .ndo_stop = mlx5e_close,
1822 .ndo_start_xmit = mlx5e_xmit,
1823 .ndo_get_stats64 = mlx5e_get_stats,
1824 .ndo_set_rx_mode = mlx5e_set_rx_mode,
1825 .ndo_set_mac_address = mlx5e_set_mac,
1826 .ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid,
1827 .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid,
1828 .ndo_set_features = mlx5e_set_features,
1829 .ndo_change_mtu = mlx5e_change_mtu,
1832 static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
1834 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1836 if (!MLX5_CAP_GEN(mdev, eth_net_offloads) ||
1837 !MLX5_CAP_GEN(mdev, nic_flow_table) ||
1838 !MLX5_CAP_ETH(mdev, csum_cap) ||
1839 !MLX5_CAP_ETH(mdev, max_lso_cap) ||
1840 !MLX5_CAP_ETH(mdev, vlan_cap) ||
1841 !MLX5_CAP_ETH(mdev, rss_ind_tbl_cap) ||
1842 MLX5_CAP_FLOWTABLE(mdev,
1843 flow_table_properties_nic_receive.max_ft_level)
1845 mlx5_core_warn(mdev,
1846 "Not creating net device, some required device capabilities are missing\n");
1852 u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev)
1854 int bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
1856 return bf_buf_size -
1857 sizeof(struct mlx5e_tx_wqe) +
1858 2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/;
1861 static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
1862 struct net_device *netdev,
1863 int num_comp_vectors)
1865 struct mlx5e_priv *priv = netdev_priv(netdev);
1867 priv->params.log_sq_size =
1868 MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
1869 priv->params.log_rq_size =
1870 MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
1871 priv->params.rx_cq_moderation_usec =
1872 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
1873 priv->params.rx_cq_moderation_pkts =
1874 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
1875 priv->params.tx_cq_moderation_usec =
1876 MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
1877 priv->params.tx_cq_moderation_pkts =
1878 MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
1879 priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev);
1880 priv->params.min_rx_wqes =
1881 MLX5E_PARAMS_DEFAULT_MIN_RX_WQES;
1882 priv->params.rx_hash_log_tbl_sz =
1883 (order_base_2(num_comp_vectors) >
1884 MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ) ?
1885 order_base_2(num_comp_vectors) :
1886 MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ;
1887 priv->params.num_tc = 1;
1888 priv->params.default_vlan_prio = 0;
1889 priv->params.rss_hfunc = ETH_RSS_HASH_XOR;
1891 priv->params.lro_en = false && !!MLX5_CAP_ETH(priv->mdev, lro_cap);
1892 priv->params.lro_wqe_sz =
1893 MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
1896 priv->netdev = netdev;
1897 priv->params.num_channels = num_comp_vectors;
1898 priv->default_vlan_prio = priv->params.default_vlan_prio;
1900 spin_lock_init(&priv->async_events_spinlock);
1901 mutex_init(&priv->state_lock);
1903 INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
1904 INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
1905 INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
1908 static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
1910 struct mlx5e_priv *priv = netdev_priv(netdev);
1912 mlx5_query_nic_vport_mac_address(priv->mdev, netdev->dev_addr);
1915 static void mlx5e_build_netdev(struct net_device *netdev)
1917 struct mlx5e_priv *priv = netdev_priv(netdev);
1918 struct mlx5_core_dev *mdev = priv->mdev;
1920 SET_NETDEV_DEV(netdev, &mdev->pdev->dev);
1922 if (priv->params.num_tc > 1)
1923 mlx5e_netdev_ops.ndo_select_queue = mlx5e_select_queue;
1925 netdev->netdev_ops = &mlx5e_netdev_ops;
1926 netdev->watchdog_timeo = 15 * HZ;
1928 netdev->ethtool_ops = &mlx5e_ethtool_ops;
1930 netdev->vlan_features |= NETIF_F_SG;
1931 netdev->vlan_features |= NETIF_F_IP_CSUM;
1932 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
1933 netdev->vlan_features |= NETIF_F_GRO;
1934 netdev->vlan_features |= NETIF_F_TSO;
1935 netdev->vlan_features |= NETIF_F_TSO6;
1936 netdev->vlan_features |= NETIF_F_RXCSUM;
1937 netdev->vlan_features |= NETIF_F_RXHASH;
1939 if (!!MLX5_CAP_ETH(mdev, lro_cap))
1940 netdev->vlan_features |= NETIF_F_LRO;
1942 netdev->hw_features = netdev->vlan_features;
1943 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
1944 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1946 netdev->features = netdev->hw_features;
1947 if (!priv->params.lro_en)
1948 netdev->features &= ~NETIF_F_LRO;
1950 netdev->features |= NETIF_F_HIGHDMA;
1952 netdev->priv_flags |= IFF_UNICAST_FLT;
1954 mlx5e_set_netdev_dev_addr(netdev);
1957 static int mlx5e_create_mkey(struct mlx5e_priv *priv, u32 pdn,
1958 struct mlx5_core_mr *mr)
1960 struct mlx5_core_dev *mdev = priv->mdev;
1961 struct mlx5_create_mkey_mbox_in *in;
1964 in = mlx5_vzalloc(sizeof(*in));
1968 in->seg.flags = MLX5_PERM_LOCAL_WRITE |
1969 MLX5_PERM_LOCAL_READ |
1970 MLX5_ACCESS_MODE_PA;
1971 in->seg.flags_pd = cpu_to_be32(pdn | MLX5_MKEY_LEN64);
1972 in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
1974 err = mlx5_core_create_mkey(mdev, mr, in, sizeof(*in), NULL, NULL,
1982 static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
1984 struct net_device *netdev;
1985 struct mlx5e_priv *priv;
1986 int ncv = mdev->priv.eq_table.num_comp_vectors;
1989 if (mlx5e_check_required_hca_cap(mdev))
1992 netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), ncv, ncv);
1994 mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
1998 mlx5e_build_netdev_priv(mdev, netdev, ncv);
1999 mlx5e_build_netdev(netdev);
2001 netif_carrier_off(netdev);
2003 priv = netdev_priv(netdev);
2005 err = mlx5_alloc_map_uar(mdev, &priv->cq_uar);
2007 mlx5_core_err(mdev, "alloc_map uar failed, %d\n", err);
2008 goto err_free_netdev;
2011 err = mlx5_core_alloc_pd(mdev, &priv->pdn);
2013 mlx5_core_err(mdev, "alloc pd failed, %d\n", err);
2014 goto err_unmap_free_uar;
2017 err = mlx5_alloc_transport_domain(mdev, &priv->tdn);
2019 mlx5_core_err(mdev, "alloc td failed, %d\n", err);
2020 goto err_dealloc_pd;
2023 err = mlx5e_create_mkey(priv, priv->pdn, &priv->mr);
2025 mlx5_core_err(mdev, "create mkey failed, %d\n", err);
2026 goto err_dealloc_transport_domain;
2029 err = register_netdev(netdev);
2031 mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
2032 goto err_destroy_mkey;
2035 mlx5e_enable_async_events(priv);
2040 mlx5_core_destroy_mkey(mdev, &priv->mr);
2042 err_dealloc_transport_domain:
2043 mlx5_dealloc_transport_domain(mdev, priv->tdn);
2046 mlx5_core_dealloc_pd(mdev, priv->pdn);
2049 mlx5_unmap_free_uar(mdev, &priv->cq_uar);
2052 free_netdev(netdev);
2057 static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
2059 struct mlx5e_priv *priv = vpriv;
2060 struct net_device *netdev = priv->netdev;
2062 unregister_netdev(netdev);
2063 mlx5_core_destroy_mkey(priv->mdev, &priv->mr);
2064 mlx5_dealloc_transport_domain(priv->mdev, priv->tdn);
2065 mlx5_core_dealloc_pd(priv->mdev, priv->pdn);
2066 mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar);
2067 mlx5e_disable_async_events(priv);
2068 flush_scheduled_work();
2069 free_netdev(netdev);
2072 static void *mlx5e_get_netdev(void *vpriv)
2074 struct mlx5e_priv *priv = vpriv;
2076 return priv->netdev;
2079 static struct mlx5_interface mlx5e_interface = {
2080 .add = mlx5e_create_netdev,
2081 .remove = mlx5e_destroy_netdev,
2082 .event = mlx5e_async_event,
2083 .protocol = MLX5_INTERFACE_PROTOCOL_ETH,
2084 .get_dev = mlx5e_get_netdev,
2087 void mlx5e_init(void)
2089 mlx5_register_interface(&mlx5e_interface);
2092 void mlx5e_cleanup(void)
2094 mlx5_unregister_interface(&mlx5e_interface);