2 * linux/drivers/net/ethernet/ibm/ehea/ehea_main.c
4 * eHEA ethernet device driver for IBM eServer System p
6 * (C) Copyright IBM Corp. 2006
9 * Christoph Raisch <raisch@de.ibm.com>
10 * Jan-Bernd Themann <themann@de.ibm.com>
11 * Thomas Klein <tklein@de.ibm.com>
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33 #include <linux/tcp.h>
34 #include <linux/udp.h>
36 #include <linux/list.h>
37 #include <linux/slab.h>
38 #include <linux/if_ether.h>
39 #include <linux/notifier.h>
40 #include <linux/reboot.h>
41 #include <linux/memory.h>
42 #include <asm/kexec.h>
43 #include <linux/mutex.h>
44 #include <linux/prefetch.h>
50 #include "ehea_phyp.h"
53 MODULE_LICENSE("GPL");
54 MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
55 MODULE_DESCRIPTION("IBM eServer HEA Driver");
56 MODULE_VERSION(DRV_VERSION);
59 static int msg_level = -1;
60 static int rq1_entries = EHEA_DEF_ENTRIES_RQ1;
61 static int rq2_entries = EHEA_DEF_ENTRIES_RQ2;
62 static int rq3_entries = EHEA_DEF_ENTRIES_RQ3;
63 static int sq_entries = EHEA_DEF_ENTRIES_SQ;
64 static int use_mcs = 1;
65 static int prop_carrier_state;
67 module_param(msg_level, int, 0);
68 module_param(rq1_entries, int, 0);
69 module_param(rq2_entries, int, 0);
70 module_param(rq3_entries, int, 0);
71 module_param(sq_entries, int, 0);
72 module_param(prop_carrier_state, int, 0);
73 module_param(use_mcs, int, 0);
75 MODULE_PARM_DESC(msg_level, "msg_level");
76 MODULE_PARM_DESC(prop_carrier_state, "Propagate carrier state of physical "
77 "port to stack. 1:yes, 0:no. Default = 0 ");
78 MODULE_PARM_DESC(rq3_entries, "Number of entries for Receive Queue 3 "
79 "[2^x - 1], x = [7..14]. Default = "
80 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ3) ")");
81 MODULE_PARM_DESC(rq2_entries, "Number of entries for Receive Queue 2 "
82 "[2^x - 1], x = [7..14]. Default = "
83 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ2) ")");
84 MODULE_PARM_DESC(rq1_entries, "Number of entries for Receive Queue 1 "
85 "[2^x - 1], x = [7..14]. Default = "
86 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ1) ")");
87 MODULE_PARM_DESC(sq_entries, " Number of entries for the Send Queue "
88 "[2^x - 1], x = [7..14]. Default = "
89 __MODULE_STRING(EHEA_DEF_ENTRIES_SQ) ")");
90 MODULE_PARM_DESC(use_mcs, " Multiple receive queues, 1: enable, 0: disable, "
93 static int port_name_cnt;
94 static LIST_HEAD(adapter_list);
95 static unsigned long ehea_driver_flags;
96 static DEFINE_MUTEX(dlpar_mem_lock);
97 static struct ehea_fw_handle_array ehea_fw_handles;
98 static struct ehea_bcmc_reg_array ehea_bcmc_regs;
101 static int ehea_probe_adapter(struct platform_device *dev,
102 const struct of_device_id *id);
104 static int ehea_remove(struct platform_device *dev);
106 static struct of_device_id ehea_device_table[] = {
109 .compatible = "IBM,lhea",
113 MODULE_DEVICE_TABLE(of, ehea_device_table);
115 static struct of_platform_driver ehea_driver = {
118 .owner = THIS_MODULE,
119 .of_match_table = ehea_device_table,
121 .probe = ehea_probe_adapter,
122 .remove = ehea_remove,
125 void ehea_dump(void *adr, int len, char *msg)
128 unsigned char *deb = adr;
129 for (x = 0; x < len; x += 16) {
130 pr_info("%s adr=%p ofs=%04x %016llx %016llx\n",
131 msg, deb, x, *((u64 *)&deb[0]), *((u64 *)&deb[8]));
136 static void ehea_schedule_port_reset(struct ehea_port *port)
138 if (!test_bit(__EHEA_DISABLE_PORT_RESET, &port->flags))
139 schedule_work(&port->reset_task);
142 static void ehea_update_firmware_handles(void)
144 struct ehea_fw_handle_entry *arr = NULL;
145 struct ehea_adapter *adapter;
146 int num_adapters = 0;
150 int num_fw_handles, k, l;
152 /* Determine number of handles */
153 mutex_lock(&ehea_fw_handles.lock);
155 list_for_each_entry(adapter, &adapter_list, list) {
158 for (k = 0; k < EHEA_MAX_PORTS; k++) {
159 struct ehea_port *port = adapter->port[k];
161 if (!port || (port->state != EHEA_PORT_UP))
165 num_portres += port->num_def_qps;
169 num_fw_handles = num_adapters * EHEA_NUM_ADAPTER_FW_HANDLES +
170 num_ports * EHEA_NUM_PORT_FW_HANDLES +
171 num_portres * EHEA_NUM_PORTRES_FW_HANDLES;
173 if (num_fw_handles) {
174 arr = kcalloc(num_fw_handles, sizeof(*arr), GFP_KERNEL);
176 goto out; /* Keep the existing array */
180 list_for_each_entry(adapter, &adapter_list, list) {
181 if (num_adapters == 0)
184 for (k = 0; k < EHEA_MAX_PORTS; k++) {
185 struct ehea_port *port = adapter->port[k];
187 if (!port || (port->state != EHEA_PORT_UP) ||
191 for (l = 0; l < port->num_def_qps; l++) {
192 struct ehea_port_res *pr = &port->port_res[l];
194 arr[i].adh = adapter->handle;
195 arr[i++].fwh = pr->qp->fw_handle;
196 arr[i].adh = adapter->handle;
197 arr[i++].fwh = pr->send_cq->fw_handle;
198 arr[i].adh = adapter->handle;
199 arr[i++].fwh = pr->recv_cq->fw_handle;
200 arr[i].adh = adapter->handle;
201 arr[i++].fwh = pr->eq->fw_handle;
202 arr[i].adh = adapter->handle;
203 arr[i++].fwh = pr->send_mr.handle;
204 arr[i].adh = adapter->handle;
205 arr[i++].fwh = pr->recv_mr.handle;
207 arr[i].adh = adapter->handle;
208 arr[i++].fwh = port->qp_eq->fw_handle;
212 arr[i].adh = adapter->handle;
213 arr[i++].fwh = adapter->neq->fw_handle;
215 if (adapter->mr.handle) {
216 arr[i].adh = adapter->handle;
217 arr[i++].fwh = adapter->mr.handle;
223 kfree(ehea_fw_handles.arr);
224 ehea_fw_handles.arr = arr;
225 ehea_fw_handles.num_entries = i;
227 mutex_unlock(&ehea_fw_handles.lock);
230 static void ehea_update_bcmc_registrations(void)
233 struct ehea_bcmc_reg_entry *arr = NULL;
234 struct ehea_adapter *adapter;
235 struct ehea_mc_list *mc_entry;
236 int num_registrations = 0;
240 spin_lock_irqsave(&ehea_bcmc_regs.lock, flags);
242 /* Determine number of registrations */
243 list_for_each_entry(adapter, &adapter_list, list)
244 for (k = 0; k < EHEA_MAX_PORTS; k++) {
245 struct ehea_port *port = adapter->port[k];
247 if (!port || (port->state != EHEA_PORT_UP))
250 num_registrations += 2; /* Broadcast registrations */
252 list_for_each_entry(mc_entry, &port->mc_list->list,list)
253 num_registrations += 2;
256 if (num_registrations) {
257 arr = kcalloc(num_registrations, sizeof(*arr), GFP_ATOMIC);
259 goto out; /* Keep the existing array */
263 list_for_each_entry(adapter, &adapter_list, list) {
264 for (k = 0; k < EHEA_MAX_PORTS; k++) {
265 struct ehea_port *port = adapter->port[k];
267 if (!port || (port->state != EHEA_PORT_UP))
270 if (num_registrations == 0)
273 arr[i].adh = adapter->handle;
274 arr[i].port_id = port->logical_port_id;
275 arr[i].reg_type = EHEA_BCMC_BROADCAST |
277 arr[i++].macaddr = port->mac_addr;
279 arr[i].adh = adapter->handle;
280 arr[i].port_id = port->logical_port_id;
281 arr[i].reg_type = EHEA_BCMC_BROADCAST |
282 EHEA_BCMC_VLANID_ALL;
283 arr[i++].macaddr = port->mac_addr;
284 num_registrations -= 2;
286 list_for_each_entry(mc_entry,
287 &port->mc_list->list, list) {
288 if (num_registrations == 0)
291 arr[i].adh = adapter->handle;
292 arr[i].port_id = port->logical_port_id;
293 arr[i].reg_type = EHEA_BCMC_MULTICAST |
295 if (mc_entry->macaddr == 0)
296 arr[i].reg_type |= EHEA_BCMC_SCOPE_ALL;
297 arr[i++].macaddr = mc_entry->macaddr;
299 arr[i].adh = adapter->handle;
300 arr[i].port_id = port->logical_port_id;
301 arr[i].reg_type = EHEA_BCMC_MULTICAST |
302 EHEA_BCMC_VLANID_ALL;
303 if (mc_entry->macaddr == 0)
304 arr[i].reg_type |= EHEA_BCMC_SCOPE_ALL;
305 arr[i++].macaddr = mc_entry->macaddr;
306 num_registrations -= 2;
312 kfree(ehea_bcmc_regs.arr);
313 ehea_bcmc_regs.arr = arr;
314 ehea_bcmc_regs.num_entries = i;
316 spin_unlock_irqrestore(&ehea_bcmc_regs.lock, flags);
319 static struct rtnl_link_stats64 *ehea_get_stats64(struct net_device *dev,
320 struct rtnl_link_stats64 *stats)
322 struct ehea_port *port = netdev_priv(dev);
323 u64 rx_packets = 0, tx_packets = 0, rx_bytes = 0, tx_bytes = 0;
326 for (i = 0; i < port->num_def_qps; i++) {
327 rx_packets += port->port_res[i].rx_packets;
328 rx_bytes += port->port_res[i].rx_bytes;
331 for (i = 0; i < port->num_def_qps; i++) {
332 tx_packets += port->port_res[i].tx_packets;
333 tx_bytes += port->port_res[i].tx_bytes;
336 stats->tx_packets = tx_packets;
337 stats->rx_bytes = rx_bytes;
338 stats->tx_bytes = tx_bytes;
339 stats->rx_packets = rx_packets;
341 stats->multicast = port->stats.multicast;
342 stats->rx_errors = port->stats.rx_errors;
346 static void ehea_update_stats(struct work_struct *work)
348 struct ehea_port *port =
349 container_of(work, struct ehea_port, stats_work.work);
350 struct net_device *dev = port->netdev;
351 struct rtnl_link_stats64 *stats = &port->stats;
352 struct hcp_ehea_port_cb2 *cb2;
355 cb2 = (void *)get_zeroed_page(GFP_KERNEL);
357 netdev_err(dev, "No mem for cb2. Some interface statistics were not updated\n");
361 hret = ehea_h_query_ehea_port(port->adapter->handle,
362 port->logical_port_id,
363 H_PORT_CB2, H_PORT_CB2_ALL, cb2);
364 if (hret != H_SUCCESS) {
365 netdev_err(dev, "query_ehea_port failed\n");
369 if (netif_msg_hw(port))
370 ehea_dump(cb2, sizeof(*cb2), "net_device_stats");
372 stats->multicast = cb2->rxmcp;
373 stats->rx_errors = cb2->rxuerr;
376 free_page((unsigned long)cb2);
378 schedule_delayed_work(&port->stats_work,
379 round_jiffies_relative(msecs_to_jiffies(1000)));
382 static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
384 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
385 struct net_device *dev = pr->port->netdev;
386 int max_index_mask = pr->rq1_skba.len - 1;
387 int fill_wqes = pr->rq1_skba.os_skbs + nr_of_wqes;
391 pr->rq1_skba.os_skbs = 0;
393 if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
395 pr->rq1_skba.index = index;
396 pr->rq1_skba.os_skbs = fill_wqes;
400 for (i = 0; i < fill_wqes; i++) {
401 if (!skb_arr_rq1[index]) {
402 skb_arr_rq1[index] = netdev_alloc_skb(dev,
404 if (!skb_arr_rq1[index]) {
405 netdev_info(dev, "Unable to allocate enough skb in the array\n");
406 pr->rq1_skba.os_skbs = fill_wqes - i;
411 index &= max_index_mask;
419 ehea_update_rq1a(pr->qp, adder);
422 static void ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a)
424 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
425 struct net_device *dev = pr->port->netdev;
428 if (nr_rq1a > pr->rq1_skba.len) {
429 netdev_err(dev, "NR_RQ1A bigger than skb array len\n");
433 for (i = 0; i < nr_rq1a; i++) {
434 skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE);
435 if (!skb_arr_rq1[i]) {
436 netdev_info(dev, "Not enough memory to allocate skb array\n");
441 ehea_update_rq1a(pr->qp, i - 1);
444 static int ehea_refill_rq_def(struct ehea_port_res *pr,
445 struct ehea_q_skb_arr *q_skba, int rq_nr,
446 int num_wqes, int wqe_type, int packet_size)
448 struct net_device *dev = pr->port->netdev;
449 struct ehea_qp *qp = pr->qp;
450 struct sk_buff **skb_arr = q_skba->arr;
451 struct ehea_rwqe *rwqe;
452 int i, index, max_index_mask, fill_wqes;
456 fill_wqes = q_skba->os_skbs + num_wqes;
459 if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
460 q_skba->os_skbs = fill_wqes;
464 index = q_skba->index;
465 max_index_mask = q_skba->len - 1;
466 for (i = 0; i < fill_wqes; i++) {
470 skb = netdev_alloc_skb_ip_align(dev, packet_size);
472 q_skba->os_skbs = fill_wqes - i;
473 if (q_skba->os_skbs == q_skba->len - 2) {
474 netdev_info(pr->port->netdev,
475 "rq%i ran dry - no mem for skb\n",
482 skb_arr[index] = skb;
483 tmp_addr = ehea_map_vaddr(skb->data);
484 if (tmp_addr == -1) {
486 q_skba->os_skbs = fill_wqes - i;
491 rwqe = ehea_get_next_rwqe(qp, rq_nr);
492 rwqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, wqe_type)
493 | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, index);
494 rwqe->sg_list[0].l_key = pr->recv_mr.lkey;
495 rwqe->sg_list[0].vaddr = tmp_addr;
496 rwqe->sg_list[0].len = packet_size;
497 rwqe->data_segments = 1;
500 index &= max_index_mask;
504 q_skba->index = index;
511 ehea_update_rq2a(pr->qp, adder);
513 ehea_update_rq3a(pr->qp, adder);
519 static int ehea_refill_rq2(struct ehea_port_res *pr, int nr_of_wqes)
521 return ehea_refill_rq_def(pr, &pr->rq2_skba, 2,
522 nr_of_wqes, EHEA_RWQE2_TYPE,
527 static int ehea_refill_rq3(struct ehea_port_res *pr, int nr_of_wqes)
529 return ehea_refill_rq_def(pr, &pr->rq3_skba, 3,
530 nr_of_wqes, EHEA_RWQE3_TYPE,
531 EHEA_MAX_PACKET_SIZE);
534 static inline int ehea_check_cqe(struct ehea_cqe *cqe, int *rq_num)
536 *rq_num = (cqe->type & EHEA_CQE_TYPE_RQ) >> 5;
537 if ((cqe->status & EHEA_CQE_STAT_ERR_MASK) == 0)
539 if (((cqe->status & EHEA_CQE_STAT_ERR_TCP) != 0) &&
540 (cqe->header_length == 0))
545 static inline void ehea_fill_skb(struct net_device *dev,
546 struct sk_buff *skb, struct ehea_cqe *cqe,
547 struct ehea_port_res *pr)
549 int length = cqe->num_bytes_transfered - 4; /*remove CRC */
551 skb_put(skb, length);
552 skb->protocol = eth_type_trans(skb, dev);
554 /* The packet was not an IPV4 packet so a complemented checksum was
555 calculated. The value is found in the Internet Checksum field. */
556 if (cqe->status & EHEA_CQE_BLIND_CKSUM) {
557 skb->ip_summed = CHECKSUM_COMPLETE;
558 skb->csum = csum_unfold(~cqe->inet_checksum_value);
560 skb->ip_summed = CHECKSUM_UNNECESSARY;
562 skb_record_rx_queue(skb, pr - &pr->port->port_res[0]);
565 static inline struct sk_buff *get_skb_by_index(struct sk_buff **skb_array,
567 struct ehea_cqe *cqe)
569 int skb_index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
580 prefetchw(pref + EHEA_CACHE_LINE);
582 pref = (skb_array[x]->data);
584 prefetch(pref + EHEA_CACHE_LINE);
585 prefetch(pref + EHEA_CACHE_LINE * 2);
586 prefetch(pref + EHEA_CACHE_LINE * 3);
589 skb = skb_array[skb_index];
590 skb_array[skb_index] = NULL;
594 static inline struct sk_buff *get_skb_by_index_ll(struct sk_buff **skb_array,
595 int arr_len, int wqe_index)
607 prefetchw(pref + EHEA_CACHE_LINE);
609 pref = (skb_array[x]->data);
611 prefetchw(pref + EHEA_CACHE_LINE);
614 skb = skb_array[wqe_index];
615 skb_array[wqe_index] = NULL;
619 static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq,
620 struct ehea_cqe *cqe, int *processed_rq2,
625 if (cqe->status & EHEA_CQE_STAT_ERR_TCP)
626 pr->p_stats.err_tcp_cksum++;
627 if (cqe->status & EHEA_CQE_STAT_ERR_IP)
628 pr->p_stats.err_ip_cksum++;
629 if (cqe->status & EHEA_CQE_STAT_ERR_CRC)
630 pr->p_stats.err_frame_crc++;
634 skb = get_skb_by_index(pr->rq2_skba.arr, pr->rq2_skba.len, cqe);
636 } else if (rq == 3) {
638 skb = get_skb_by_index(pr->rq3_skba.arr, pr->rq3_skba.len, cqe);
642 if (cqe->status & EHEA_CQE_STAT_FAT_ERR_MASK) {
643 if (netif_msg_rx_err(pr->port)) {
644 pr_err("Critical receive error for QP %d. Resetting port.\n",
645 pr->qp->init_attr.qp_nr);
646 ehea_dump(cqe, sizeof(*cqe), "CQE");
648 ehea_schedule_port_reset(pr->port);
655 static int ehea_proc_rwqes(struct net_device *dev,
656 struct ehea_port_res *pr,
659 struct ehea_port *port = pr->port;
660 struct ehea_qp *qp = pr->qp;
661 struct ehea_cqe *cqe;
663 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
664 struct sk_buff **skb_arr_rq2 = pr->rq2_skba.arr;
665 struct sk_buff **skb_arr_rq3 = pr->rq3_skba.arr;
666 int skb_arr_rq1_len = pr->rq1_skba.len;
667 int skb_arr_rq2_len = pr->rq2_skba.len;
668 int skb_arr_rq3_len = pr->rq3_skba.len;
669 int processed, processed_rq1, processed_rq2, processed_rq3;
670 u64 processed_bytes = 0;
671 int wqe_index, last_wqe_index, rq, port_reset;
673 processed = processed_rq1 = processed_rq2 = processed_rq3 = 0;
676 cqe = ehea_poll_rq1(qp, &wqe_index);
677 while ((processed < budget) && cqe) {
681 if (netif_msg_rx_status(port))
682 ehea_dump(cqe, sizeof(*cqe), "CQE");
684 last_wqe_index = wqe_index;
686 if (!ehea_check_cqe(cqe, &rq)) {
689 skb = get_skb_by_index_ll(skb_arr_rq1,
692 if (unlikely(!skb)) {
693 netif_info(port, rx_err, dev,
694 "LL rq1: skb=NULL\n");
696 skb = netdev_alloc_skb(dev,
699 netdev_err(dev, "Not enough memory to allocate skb\n");
703 skb_copy_to_linear_data(skb, ((char *)cqe) + 64,
704 cqe->num_bytes_transfered - 4);
705 ehea_fill_skb(dev, skb, cqe, pr);
706 } else if (rq == 2) {
708 skb = get_skb_by_index(skb_arr_rq2,
709 skb_arr_rq2_len, cqe);
710 if (unlikely(!skb)) {
711 netif_err(port, rx_err, dev,
715 ehea_fill_skb(dev, skb, cqe, pr);
719 skb = get_skb_by_index(skb_arr_rq3,
720 skb_arr_rq3_len, cqe);
721 if (unlikely(!skb)) {
722 netif_err(port, rx_err, dev,
726 ehea_fill_skb(dev, skb, cqe, pr);
730 processed_bytes += skb->len;
732 if (cqe->status & EHEA_CQE_VLAN_TAG_XTRACT)
733 __vlan_hwaccel_put_tag(skb, cqe->vlan_tag);
735 napi_gro_receive(&pr->napi, skb);
737 pr->p_stats.poll_receive_errors++;
738 port_reset = ehea_treat_poll_error(pr, rq, cqe,
744 cqe = ehea_poll_rq1(qp, &wqe_index);
747 pr->rx_packets += processed;
748 pr->rx_bytes += processed_bytes;
750 ehea_refill_rq1(pr, last_wqe_index, processed_rq1);
751 ehea_refill_rq2(pr, processed_rq2);
752 ehea_refill_rq3(pr, processed_rq3);
757 #define SWQE_RESTART_CHECK 0xdeadbeaff00d0000ull
759 static void reset_sq_restart_flag(struct ehea_port *port)
763 for (i = 0; i < port->num_def_qps; i++) {
764 struct ehea_port_res *pr = &port->port_res[i];
765 pr->sq_restart_flag = 0;
767 wake_up(&port->restart_wq);
770 static void check_sqs(struct ehea_port *port)
772 struct ehea_swqe *swqe;
776 for (i = 0; i < port->num_def_qps; i++) {
777 struct ehea_port_res *pr = &port->port_res[i];
780 swqe = ehea_get_swqe(pr->qp, &swqe_index);
781 memset(swqe, 0, SWQE_HEADER_SIZE);
782 atomic_dec(&pr->swqe_avail);
784 swqe->tx_control |= EHEA_SWQE_PURGE;
785 swqe->wr_id = SWQE_RESTART_CHECK;
786 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
787 swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT;
788 swqe->immediate_data_length = 80;
790 ehea_post_swqe(pr->qp, swqe);
792 ret = wait_event_timeout(port->restart_wq,
793 pr->sq_restart_flag == 0,
794 msecs_to_jiffies(100));
797 pr_err("HW/SW queues out of sync\n");
798 ehea_schedule_port_reset(pr->port);
805 static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
808 struct ehea_cq *send_cq = pr->send_cq;
809 struct ehea_cqe *cqe;
810 int quota = my_quota;
814 struct netdev_queue *txq = netdev_get_tx_queue(pr->port->netdev,
815 pr - &pr->port->port_res[0]);
817 cqe = ehea_poll_cq(send_cq);
818 while (cqe && (quota > 0)) {
819 ehea_inc_cq(send_cq);
824 if (cqe->wr_id == SWQE_RESTART_CHECK) {
825 pr->sq_restart_flag = 1;
830 if (cqe->status & EHEA_CQE_STAT_ERR_MASK) {
831 pr_err("Bad send completion status=0x%04X\n",
834 if (netif_msg_tx_err(pr->port))
835 ehea_dump(cqe, sizeof(*cqe), "Send CQE");
837 if (cqe->status & EHEA_CQE_STAT_RESET_MASK) {
838 pr_err("Resetting port\n");
839 ehea_schedule_port_reset(pr->port);
844 if (netif_msg_tx_done(pr->port))
845 ehea_dump(cqe, sizeof(*cqe), "CQE");
847 if (likely(EHEA_BMASK_GET(EHEA_WR_ID_TYPE, cqe->wr_id)
848 == EHEA_SWQE2_TYPE)) {
850 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
851 skb = pr->sq_skba.arr[index];
853 pr->sq_skba.arr[index] = NULL;
856 swqe_av += EHEA_BMASK_GET(EHEA_WR_ID_REFILL, cqe->wr_id);
859 cqe = ehea_poll_cq(send_cq);
862 ehea_update_feca(send_cq, cqe_counter);
863 atomic_add(swqe_av, &pr->swqe_avail);
865 if (unlikely(netif_tx_queue_stopped(txq) &&
866 (atomic_read(&pr->swqe_avail) >= pr->swqe_refill_th))) {
867 __netif_tx_lock(txq, smp_processor_id());
868 if (netif_tx_queue_stopped(txq) &&
869 (atomic_read(&pr->swqe_avail) >= pr->swqe_refill_th))
870 netif_tx_wake_queue(txq);
871 __netif_tx_unlock(txq);
874 wake_up(&pr->port->swqe_avail_wq);
879 #define EHEA_POLL_MAX_CQES 65535
881 static int ehea_poll(struct napi_struct *napi, int budget)
883 struct ehea_port_res *pr = container_of(napi, struct ehea_port_res,
885 struct net_device *dev = pr->port->netdev;
886 struct ehea_cqe *cqe;
887 struct ehea_cqe *cqe_skb = NULL;
891 cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
892 rx += ehea_proc_rwqes(dev, pr, budget - rx);
894 while (rx != budget) {
896 ehea_reset_cq_ep(pr->recv_cq);
897 ehea_reset_cq_ep(pr->send_cq);
898 ehea_reset_cq_n1(pr->recv_cq);
899 ehea_reset_cq_n1(pr->send_cq);
901 cqe = ehea_poll_rq1(pr->qp, &wqe_index);
902 cqe_skb = ehea_poll_cq(pr->send_cq);
904 if (!cqe && !cqe_skb)
907 if (!napi_reschedule(napi))
910 cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
911 rx += ehea_proc_rwqes(dev, pr, budget - rx);
917 #ifdef CONFIG_NET_POLL_CONTROLLER
918 static void ehea_netpoll(struct net_device *dev)
920 struct ehea_port *port = netdev_priv(dev);
923 for (i = 0; i < port->num_def_qps; i++)
924 napi_schedule(&port->port_res[i].napi);
928 static irqreturn_t ehea_recv_irq_handler(int irq, void *param)
930 struct ehea_port_res *pr = param;
932 napi_schedule(&pr->napi);
937 static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param)
939 struct ehea_port *port = param;
940 struct ehea_eqe *eqe;
943 u64 resource_type, aer, aerr;
946 eqe = ehea_poll_eq(port->qp_eq);
949 qp_token = EHEA_BMASK_GET(EHEA_EQE_QP_TOKEN, eqe->entry);
950 pr_err("QP aff_err: entry=0x%llx, token=0x%x\n",
951 eqe->entry, qp_token);
953 qp = port->port_res[qp_token].qp;
955 resource_type = ehea_error_data(port->adapter, qp->fw_handle,
958 if (resource_type == EHEA_AER_RESTYPE_QP) {
959 if ((aer & EHEA_AER_RESET_MASK) ||
960 (aerr & EHEA_AERR_RESET_MASK))
963 reset_port = 1; /* Reset in case of CQ or EQ error */
965 eqe = ehea_poll_eq(port->qp_eq);
969 pr_err("Resetting port\n");
970 ehea_schedule_port_reset(port);
976 static struct ehea_port *ehea_get_port(struct ehea_adapter *adapter,
981 for (i = 0; i < EHEA_MAX_PORTS; i++)
982 if (adapter->port[i])
983 if (adapter->port[i]->logical_port_id == logical_port)
984 return adapter->port[i];
988 int ehea_sense_port_attr(struct ehea_port *port)
992 struct hcp_ehea_port_cb0 *cb0;
994 /* may be called via ehea_neq_tasklet() */
995 cb0 = (void *)get_zeroed_page(GFP_ATOMIC);
997 pr_err("no mem for cb0\n");
1002 hret = ehea_h_query_ehea_port(port->adapter->handle,
1003 port->logical_port_id, H_PORT_CB0,
1004 EHEA_BMASK_SET(H_PORT_CB0_ALL, 0xFFFF),
1006 if (hret != H_SUCCESS) {
1012 port->mac_addr = cb0->port_mac_addr << 16;
1014 if (!is_valid_ether_addr((u8 *)&port->mac_addr)) {
1015 ret = -EADDRNOTAVAIL;
1020 switch (cb0->port_speed) {
1022 port->port_speed = EHEA_SPEED_10M;
1023 port->full_duplex = 0;
1026 port->port_speed = EHEA_SPEED_10M;
1027 port->full_duplex = 1;
1029 case H_SPEED_100M_H:
1030 port->port_speed = EHEA_SPEED_100M;
1031 port->full_duplex = 0;
1033 case H_SPEED_100M_F:
1034 port->port_speed = EHEA_SPEED_100M;
1035 port->full_duplex = 1;
1038 port->port_speed = EHEA_SPEED_1G;
1039 port->full_duplex = 1;
1042 port->port_speed = EHEA_SPEED_10G;
1043 port->full_duplex = 1;
1046 port->port_speed = 0;
1047 port->full_duplex = 0;
1052 port->num_mcs = cb0->num_default_qps;
1054 /* Number of default QPs */
1056 port->num_def_qps = cb0->num_default_qps;
1058 port->num_def_qps = 1;
1060 if (!port->num_def_qps) {
1067 if (ret || netif_msg_probe(port))
1068 ehea_dump(cb0, sizeof(*cb0), "ehea_sense_port_attr");
1069 free_page((unsigned long)cb0);
1074 int ehea_set_portspeed(struct ehea_port *port, u32 port_speed)
1076 struct hcp_ehea_port_cb4 *cb4;
1080 cb4 = (void *)get_zeroed_page(GFP_KERNEL);
1082 pr_err("no mem for cb4\n");
1087 cb4->port_speed = port_speed;
1089 netif_carrier_off(port->netdev);
1091 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1092 port->logical_port_id,
1093 H_PORT_CB4, H_PORT_CB4_SPEED, cb4);
1094 if (hret == H_SUCCESS) {
1095 port->autoneg = port_speed == EHEA_SPEED_AUTONEG ? 1 : 0;
1097 hret = ehea_h_query_ehea_port(port->adapter->handle,
1098 port->logical_port_id,
1099 H_PORT_CB4, H_PORT_CB4_SPEED,
1101 if (hret == H_SUCCESS) {
1102 switch (cb4->port_speed) {
1104 port->port_speed = EHEA_SPEED_10M;
1105 port->full_duplex = 0;
1108 port->port_speed = EHEA_SPEED_10M;
1109 port->full_duplex = 1;
1111 case H_SPEED_100M_H:
1112 port->port_speed = EHEA_SPEED_100M;
1113 port->full_duplex = 0;
1115 case H_SPEED_100M_F:
1116 port->port_speed = EHEA_SPEED_100M;
1117 port->full_duplex = 1;
1120 port->port_speed = EHEA_SPEED_1G;
1121 port->full_duplex = 1;
1124 port->port_speed = EHEA_SPEED_10G;
1125 port->full_duplex = 1;
1128 port->port_speed = 0;
1129 port->full_duplex = 0;
1133 pr_err("Failed sensing port speed\n");
1137 if (hret == H_AUTHORITY) {
1138 pr_info("Hypervisor denied setting port speed\n");
1142 pr_err("Failed setting port speed\n");
1145 if (!prop_carrier_state || (port->phy_link == EHEA_PHY_LINK_UP))
1146 netif_carrier_on(port->netdev);
1148 free_page((unsigned long)cb4);
1153 static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe)
1158 struct ehea_port *port;
1159 struct net_device *dev;
1161 ec = EHEA_BMASK_GET(NEQE_EVENT_CODE, eqe);
1162 portnum = EHEA_BMASK_GET(NEQE_PORTNUM, eqe);
1163 port = ehea_get_port(adapter, portnum);
1167 case EHEA_EC_PORTSTATE_CHG: /* port state change */
1170 netdev_err(dev, "unknown portnum %x\n", portnum);
1174 if (EHEA_BMASK_GET(NEQE_PORT_UP, eqe)) {
1175 if (!netif_carrier_ok(dev)) {
1176 ret = ehea_sense_port_attr(port);
1178 netdev_err(dev, "failed resensing port attributes\n");
1182 netif_info(port, link, dev,
1183 "Logical port up: %dMbps %s Duplex\n",
1185 port->full_duplex == 1 ?
1188 netif_carrier_on(dev);
1189 netif_wake_queue(dev);
1192 if (netif_carrier_ok(dev)) {
1193 netif_info(port, link, dev,
1194 "Logical port down\n");
1195 netif_carrier_off(dev);
1196 netif_tx_disable(dev);
1199 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PORT_UP, eqe)) {
1200 port->phy_link = EHEA_PHY_LINK_UP;
1201 netif_info(port, link, dev,
1202 "Physical port up\n");
1203 if (prop_carrier_state)
1204 netif_carrier_on(dev);
1206 port->phy_link = EHEA_PHY_LINK_DOWN;
1207 netif_info(port, link, dev,
1208 "Physical port down\n");
1209 if (prop_carrier_state)
1210 netif_carrier_off(dev);
1213 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PRIMARY, eqe))
1215 "External switch port is primary port\n");
1218 "External switch port is backup port\n");
1221 case EHEA_EC_ADAPTER_MALFUNC:
1222 netdev_err(dev, "Adapter malfunction\n");
1224 case EHEA_EC_PORT_MALFUNC:
1225 netdev_info(dev, "Port malfunction\n");
1226 netif_carrier_off(dev);
1227 netif_tx_disable(dev);
1230 netdev_err(dev, "unknown event code %x, eqe=0x%llX\n", ec, eqe);
1235 static void ehea_neq_tasklet(unsigned long data)
1237 struct ehea_adapter *adapter = (struct ehea_adapter *)data;
1238 struct ehea_eqe *eqe;
1241 eqe = ehea_poll_eq(adapter->neq);
1242 pr_debug("eqe=%p\n", eqe);
1245 pr_debug("*eqe=%lx\n", (unsigned long) eqe->entry);
1246 ehea_parse_eqe(adapter, eqe->entry);
1247 eqe = ehea_poll_eq(adapter->neq);
1248 pr_debug("next eqe=%p\n", eqe);
1251 event_mask = EHEA_BMASK_SET(NELR_PORTSTATE_CHG, 1)
1252 | EHEA_BMASK_SET(NELR_ADAPTER_MALFUNC, 1)
1253 | EHEA_BMASK_SET(NELR_PORT_MALFUNC, 1);
1255 ehea_h_reset_events(adapter->handle,
1256 adapter->neq->fw_handle, event_mask);
1259 static irqreturn_t ehea_interrupt_neq(int irq, void *param)
1261 struct ehea_adapter *adapter = param;
1262 tasklet_hi_schedule(&adapter->neq_tasklet);
1267 static int ehea_fill_port_res(struct ehea_port_res *pr)
1270 struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr;
1272 ehea_init_fill_rq1(pr, pr->rq1_skba.len);
1274 ret = ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1);
1276 ret |= ehea_refill_rq3(pr, init_attr->act_nr_rwqes_rq3 - 1);
1281 static int ehea_reg_interrupts(struct net_device *dev)
1283 struct ehea_port *port = netdev_priv(dev);
1284 struct ehea_port_res *pr;
1288 snprintf(port->int_aff_name, EHEA_IRQ_NAME_SIZE - 1, "%s-aff",
1291 ret = ibmebus_request_irq(port->qp_eq->attr.ist1,
1292 ehea_qp_aff_irq_handler,
1293 IRQF_DISABLED, port->int_aff_name, port);
1295 netdev_err(dev, "failed registering irq for qp_aff_irq_handler:ist=%X\n",
1296 port->qp_eq->attr.ist1);
1300 netif_info(port, ifup, dev,
1301 "irq_handle 0x%X for function qp_aff_irq_handler registered\n",
1302 port->qp_eq->attr.ist1);
1305 for (i = 0; i < port->num_def_qps; i++) {
1306 pr = &port->port_res[i];
1307 snprintf(pr->int_send_name, EHEA_IRQ_NAME_SIZE - 1,
1308 "%s-queue%d", dev->name, i);
1309 ret = ibmebus_request_irq(pr->eq->attr.ist1,
1310 ehea_recv_irq_handler,
1311 IRQF_DISABLED, pr->int_send_name,
1314 netdev_err(dev, "failed registering irq for ehea_queue port_res_nr:%d, ist=%X\n",
1315 i, pr->eq->attr.ist1);
1318 netif_info(port, ifup, dev,
1319 "irq_handle 0x%X for function ehea_queue_int %d registered\n",
1320 pr->eq->attr.ist1, i);
1328 u32 ist = port->port_res[i].eq->attr.ist1;
1329 ibmebus_free_irq(ist, &port->port_res[i]);
1333 ibmebus_free_irq(port->qp_eq->attr.ist1, port);
1334 i = port->num_def_qps;
1340 static void ehea_free_interrupts(struct net_device *dev)
1342 struct ehea_port *port = netdev_priv(dev);
1343 struct ehea_port_res *pr;
1348 for (i = 0; i < port->num_def_qps; i++) {
1349 pr = &port->port_res[i];
1350 ibmebus_free_irq(pr->eq->attr.ist1, pr);
1351 netif_info(port, intr, dev,
1352 "free send irq for res %d with handle 0x%X\n",
1353 i, pr->eq->attr.ist1);
1356 /* associated events */
1357 ibmebus_free_irq(port->qp_eq->attr.ist1, port);
1358 netif_info(port, intr, dev,
1359 "associated event interrupt for handle 0x%X freed\n",
1360 port->qp_eq->attr.ist1);
1363 static int ehea_configure_port(struct ehea_port *port)
1367 struct hcp_ehea_port_cb0 *cb0;
1370 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
1374 cb0->port_rc = EHEA_BMASK_SET(PXLY_RC_VALID, 1)
1375 | EHEA_BMASK_SET(PXLY_RC_IP_CHKSUM, 1)
1376 | EHEA_BMASK_SET(PXLY_RC_TCP_UDP_CHKSUM, 1)
1377 | EHEA_BMASK_SET(PXLY_RC_VLAN_XTRACT, 1)
1378 | EHEA_BMASK_SET(PXLY_RC_VLAN_TAG_FILTER,
1379 PXLY_RC_VLAN_FILTER)
1380 | EHEA_BMASK_SET(PXLY_RC_JUMBO_FRAME, 1);
1382 for (i = 0; i < port->num_mcs; i++)
1384 cb0->default_qpn_arr[i] =
1385 port->port_res[i].qp->init_attr.qp_nr;
1387 cb0->default_qpn_arr[i] =
1388 port->port_res[0].qp->init_attr.qp_nr;
1390 if (netif_msg_ifup(port))
1391 ehea_dump(cb0, sizeof(*cb0), "ehea_configure_port");
1393 mask = EHEA_BMASK_SET(H_PORT_CB0_PRC, 1)
1394 | EHEA_BMASK_SET(H_PORT_CB0_DEFQPNARRAY, 1);
1396 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1397 port->logical_port_id,
1398 H_PORT_CB0, mask, cb0);
1400 if (hret != H_SUCCESS)
1406 free_page((unsigned long)cb0);
1411 static int ehea_gen_smrs(struct ehea_port_res *pr)
1414 struct ehea_adapter *adapter = pr->port->adapter;
1416 ret = ehea_gen_smr(adapter, &adapter->mr, &pr->send_mr);
1420 ret = ehea_gen_smr(adapter, &adapter->mr, &pr->recv_mr);
1427 ehea_rem_mr(&pr->send_mr);
1429 pr_err("Generating SMRS failed\n");
1433 static int ehea_rem_smrs(struct ehea_port_res *pr)
1435 if ((ehea_rem_mr(&pr->send_mr)) ||
1436 (ehea_rem_mr(&pr->recv_mr)))
1442 static int ehea_init_q_skba(struct ehea_q_skb_arr *q_skba, int max_q_entries)
1444 int arr_size = sizeof(void *) * max_q_entries;
1446 q_skba->arr = vzalloc(arr_size);
1450 q_skba->len = max_q_entries;
1452 q_skba->os_skbs = 0;
1457 static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
1458 struct port_res_cfg *pr_cfg, int queue_token)
1460 struct ehea_adapter *adapter = port->adapter;
1461 enum ehea_eq_type eq_type = EHEA_EQ;
1462 struct ehea_qp_init_attr *init_attr = NULL;
1464 u64 tx_bytes, rx_bytes, tx_packets, rx_packets;
1466 tx_bytes = pr->tx_bytes;
1467 tx_packets = pr->tx_packets;
1468 rx_bytes = pr->rx_bytes;
1469 rx_packets = pr->rx_packets;
1471 memset(pr, 0, sizeof(struct ehea_port_res));
1473 pr->tx_bytes = rx_bytes;
1474 pr->tx_packets = tx_packets;
1475 pr->rx_bytes = rx_bytes;
1476 pr->rx_packets = rx_packets;
1480 pr->eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0);
1482 pr_err("create_eq failed (eq)\n");
1486 pr->recv_cq = ehea_create_cq(adapter, pr_cfg->max_entries_rcq,
1488 port->logical_port_id);
1490 pr_err("create_cq failed (cq_recv)\n");
1494 pr->send_cq = ehea_create_cq(adapter, pr_cfg->max_entries_scq,
1496 port->logical_port_id);
1498 pr_err("create_cq failed (cq_send)\n");
1502 if (netif_msg_ifup(port))
1503 pr_info("Send CQ: act_nr_cqes=%d, Recv CQ: act_nr_cqes=%d\n",
1504 pr->send_cq->attr.act_nr_of_cqes,
1505 pr->recv_cq->attr.act_nr_of_cqes);
1507 init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL);
1510 pr_err("no mem for ehea_qp_init_attr\n");
1514 init_attr->low_lat_rq1 = 1;
1515 init_attr->signalingtype = 1; /* generate CQE if specified in WQE */
1516 init_attr->rq_count = 3;
1517 init_attr->qp_token = queue_token;
1518 init_attr->max_nr_send_wqes = pr_cfg->max_entries_sq;
1519 init_attr->max_nr_rwqes_rq1 = pr_cfg->max_entries_rq1;
1520 init_attr->max_nr_rwqes_rq2 = pr_cfg->max_entries_rq2;
1521 init_attr->max_nr_rwqes_rq3 = pr_cfg->max_entries_rq3;
1522 init_attr->wqe_size_enc_sq = EHEA_SG_SQ;
1523 init_attr->wqe_size_enc_rq1 = EHEA_SG_RQ1;
1524 init_attr->wqe_size_enc_rq2 = EHEA_SG_RQ2;
1525 init_attr->wqe_size_enc_rq3 = EHEA_SG_RQ3;
1526 init_attr->rq2_threshold = EHEA_RQ2_THRESHOLD;
1527 init_attr->rq3_threshold = EHEA_RQ3_THRESHOLD;
1528 init_attr->port_nr = port->logical_port_id;
1529 init_attr->send_cq_handle = pr->send_cq->fw_handle;
1530 init_attr->recv_cq_handle = pr->recv_cq->fw_handle;
1531 init_attr->aff_eq_handle = port->qp_eq->fw_handle;
1533 pr->qp = ehea_create_qp(adapter, adapter->pd, init_attr);
1535 pr_err("create_qp failed\n");
1540 if (netif_msg_ifup(port))
1541 pr_info("QP: qp_nr=%d\n act_nr_snd_wqe=%d\n nr_rwqe_rq1=%d\n nr_rwqe_rq2=%d\n nr_rwqe_rq3=%d\n",
1543 init_attr->act_nr_send_wqes,
1544 init_attr->act_nr_rwqes_rq1,
1545 init_attr->act_nr_rwqes_rq2,
1546 init_attr->act_nr_rwqes_rq3);
1548 pr->sq_skba_size = init_attr->act_nr_send_wqes + 1;
1550 ret = ehea_init_q_skba(&pr->sq_skba, pr->sq_skba_size);
1551 ret |= ehea_init_q_skba(&pr->rq1_skba, init_attr->act_nr_rwqes_rq1 + 1);
1552 ret |= ehea_init_q_skba(&pr->rq2_skba, init_attr->act_nr_rwqes_rq2 + 1);
1553 ret |= ehea_init_q_skba(&pr->rq3_skba, init_attr->act_nr_rwqes_rq3 + 1);
1557 pr->swqe_refill_th = init_attr->act_nr_send_wqes / 10;
1558 if (ehea_gen_smrs(pr) != 0) {
1563 atomic_set(&pr->swqe_avail, init_attr->act_nr_send_wqes - 1);
1567 netif_napi_add(pr->port->netdev, &pr->napi, ehea_poll, 64);
1574 vfree(pr->sq_skba.arr);
1575 vfree(pr->rq1_skba.arr);
1576 vfree(pr->rq2_skba.arr);
1577 vfree(pr->rq3_skba.arr);
1578 ehea_destroy_qp(pr->qp);
1579 ehea_destroy_cq(pr->send_cq);
1580 ehea_destroy_cq(pr->recv_cq);
1581 ehea_destroy_eq(pr->eq);
1586 static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr)
1591 netif_napi_del(&pr->napi);
1593 ret = ehea_destroy_qp(pr->qp);
1596 ehea_destroy_cq(pr->send_cq);
1597 ehea_destroy_cq(pr->recv_cq);
1598 ehea_destroy_eq(pr->eq);
1600 for (i = 0; i < pr->rq1_skba.len; i++)
1601 if (pr->rq1_skba.arr[i])
1602 dev_kfree_skb(pr->rq1_skba.arr[i]);
1604 for (i = 0; i < pr->rq2_skba.len; i++)
1605 if (pr->rq2_skba.arr[i])
1606 dev_kfree_skb(pr->rq2_skba.arr[i]);
1608 for (i = 0; i < pr->rq3_skba.len; i++)
1609 if (pr->rq3_skba.arr[i])
1610 dev_kfree_skb(pr->rq3_skba.arr[i]);
1612 for (i = 0; i < pr->sq_skba.len; i++)
1613 if (pr->sq_skba.arr[i])
1614 dev_kfree_skb(pr->sq_skba.arr[i]);
1616 vfree(pr->rq1_skba.arr);
1617 vfree(pr->rq2_skba.arr);
1618 vfree(pr->rq3_skba.arr);
1619 vfree(pr->sq_skba.arr);
1620 ret = ehea_rem_smrs(pr);
1625 static void write_swqe2_immediate(struct sk_buff *skb, struct ehea_swqe *swqe,
1628 int skb_data_size = skb_headlen(skb);
1629 u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
1630 struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
1631 unsigned int immediate_len = SWQE2_MAX_IMM;
1633 swqe->descriptors = 0;
1635 if (skb_is_gso(skb)) {
1636 swqe->tx_control |= EHEA_SWQE_TSO;
1637 swqe->mss = skb_shinfo(skb)->gso_size;
1639 * For TSO packets we only copy the headers into the
1642 immediate_len = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
1645 if (skb_is_gso(skb) || skb_data_size >= SWQE2_MAX_IMM) {
1646 skb_copy_from_linear_data(skb, imm_data, immediate_len);
1647 swqe->immediate_data_length = immediate_len;
1649 if (skb_data_size > immediate_len) {
1650 sg1entry->l_key = lkey;
1651 sg1entry->len = skb_data_size - immediate_len;
1653 ehea_map_vaddr(skb->data + immediate_len);
1654 swqe->descriptors++;
1657 skb_copy_from_linear_data(skb, imm_data, skb_data_size);
1658 swqe->immediate_data_length = skb_data_size;
1662 static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev,
1663 struct ehea_swqe *swqe, u32 lkey)
1665 struct ehea_vsgentry *sg_list, *sg1entry, *sgentry;
1667 int nfrags, sg1entry_contains_frag_data, i;
1669 nfrags = skb_shinfo(skb)->nr_frags;
1670 sg1entry = &swqe->u.immdata_desc.sg_entry;
1671 sg_list = (struct ehea_vsgentry *)&swqe->u.immdata_desc.sg_list;
1672 sg1entry_contains_frag_data = 0;
1674 write_swqe2_immediate(skb, swqe, lkey);
1676 /* write descriptors */
1678 if (swqe->descriptors == 0) {
1679 /* sg1entry not yet used */
1680 frag = &skb_shinfo(skb)->frags[0];
1682 /* copy sg1entry data */
1683 sg1entry->l_key = lkey;
1684 sg1entry->len = skb_frag_size(frag);
1686 ehea_map_vaddr(skb_frag_address(frag));
1687 swqe->descriptors++;
1688 sg1entry_contains_frag_data = 1;
1691 for (i = sg1entry_contains_frag_data; i < nfrags; i++) {
1693 frag = &skb_shinfo(skb)->frags[i];
1694 sgentry = &sg_list[i - sg1entry_contains_frag_data];
1696 sgentry->l_key = lkey;
1697 sgentry->len = skb_frag_size(frag);
1698 sgentry->vaddr = ehea_map_vaddr(skb_frag_address(frag));
1699 swqe->descriptors++;
1704 static int ehea_broadcast_reg_helper(struct ehea_port *port, u32 hcallid)
1710 /* De/Register untagged packets */
1711 reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_UNTAGGED;
1712 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1713 port->logical_port_id,
1714 reg_type, port->mac_addr, 0, hcallid);
1715 if (hret != H_SUCCESS) {
1716 pr_err("%sregistering bc address failed (tagged)\n",
1717 hcallid == H_REG_BCMC ? "" : "de");
1722 /* De/Register VLAN packets */
1723 reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_VLANID_ALL;
1724 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1725 port->logical_port_id,
1726 reg_type, port->mac_addr, 0, hcallid);
1727 if (hret != H_SUCCESS) {
1728 pr_err("%sregistering bc address failed (vlan)\n",
1729 hcallid == H_REG_BCMC ? "" : "de");
1736 static int ehea_set_mac_addr(struct net_device *dev, void *sa)
1738 struct ehea_port *port = netdev_priv(dev);
1739 struct sockaddr *mac_addr = sa;
1740 struct hcp_ehea_port_cb0 *cb0;
1744 if (!is_valid_ether_addr(mac_addr->sa_data)) {
1745 ret = -EADDRNOTAVAIL;
1749 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
1751 pr_err("no mem for cb0\n");
1756 memcpy(&(cb0->port_mac_addr), &(mac_addr->sa_data[0]), ETH_ALEN);
1758 cb0->port_mac_addr = cb0->port_mac_addr >> 16;
1760 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1761 port->logical_port_id, H_PORT_CB0,
1762 EHEA_BMASK_SET(H_PORT_CB0_MAC, 1), cb0);
1763 if (hret != H_SUCCESS) {
1768 memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len);
1770 /* Deregister old MAC in pHYP */
1771 if (port->state == EHEA_PORT_UP) {
1772 ret = ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
1777 port->mac_addr = cb0->port_mac_addr << 16;
1779 /* Register new MAC in pHYP */
1780 if (port->state == EHEA_PORT_UP) {
1781 ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
1789 ehea_update_bcmc_registrations();
1791 free_page((unsigned long)cb0);
1796 static void ehea_promiscuous_error(u64 hret, int enable)
1798 if (hret == H_AUTHORITY)
1799 pr_info("Hypervisor denied %sabling promiscuous mode\n",
1800 enable == 1 ? "en" : "dis");
1802 pr_err("failed %sabling promiscuous mode\n",
1803 enable == 1 ? "en" : "dis");
1806 static void ehea_promiscuous(struct net_device *dev, int enable)
1808 struct ehea_port *port = netdev_priv(dev);
1809 struct hcp_ehea_port_cb7 *cb7;
1812 if (enable == port->promisc)
1815 cb7 = (void *)get_zeroed_page(GFP_ATOMIC);
1817 pr_err("no mem for cb7\n");
1821 /* Modify Pxs_DUCQPN in CB7 */
1822 cb7->def_uc_qpn = enable == 1 ? port->port_res[0].qp->fw_handle : 0;
1824 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1825 port->logical_port_id,
1826 H_PORT_CB7, H_PORT_CB7_DUCQPN, cb7);
1828 ehea_promiscuous_error(hret, enable);
1832 port->promisc = enable;
1834 free_page((unsigned long)cb7);
1837 static u64 ehea_multicast_reg_helper(struct ehea_port *port, u64 mc_mac_addr,
1843 reg_type = EHEA_BCMC_MULTICAST | EHEA_BCMC_UNTAGGED;
1844 if (mc_mac_addr == 0)
1845 reg_type |= EHEA_BCMC_SCOPE_ALL;
1847 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1848 port->logical_port_id,
1849 reg_type, mc_mac_addr, 0, hcallid);
1853 reg_type = EHEA_BCMC_MULTICAST | EHEA_BCMC_VLANID_ALL;
1854 if (mc_mac_addr == 0)
1855 reg_type |= EHEA_BCMC_SCOPE_ALL;
1857 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1858 port->logical_port_id,
1859 reg_type, mc_mac_addr, 0, hcallid);
1864 static int ehea_drop_multicast_list(struct net_device *dev)
1866 struct ehea_port *port = netdev_priv(dev);
1867 struct ehea_mc_list *mc_entry = port->mc_list;
1868 struct list_head *pos;
1869 struct list_head *temp;
1873 list_for_each_safe(pos, temp, &(port->mc_list->list)) {
1874 mc_entry = list_entry(pos, struct ehea_mc_list, list);
1876 hret = ehea_multicast_reg_helper(port, mc_entry->macaddr,
1879 pr_err("failed deregistering mcast MAC\n");
1889 static void ehea_allmulti(struct net_device *dev, int enable)
1891 struct ehea_port *port = netdev_priv(dev);
1894 if (!port->allmulti) {
1896 /* Enable ALLMULTI */
1897 ehea_drop_multicast_list(dev);
1898 hret = ehea_multicast_reg_helper(port, 0, H_REG_BCMC);
1903 "failed enabling IFF_ALLMULTI\n");
1907 /* Disable ALLMULTI */
1908 hret = ehea_multicast_reg_helper(port, 0, H_DEREG_BCMC);
1913 "failed disabling IFF_ALLMULTI\n");
1918 static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr)
1920 struct ehea_mc_list *ehea_mcl_entry;
1923 ehea_mcl_entry = kzalloc(sizeof(*ehea_mcl_entry), GFP_ATOMIC);
1924 if (!ehea_mcl_entry)
1927 INIT_LIST_HEAD(&ehea_mcl_entry->list);
1929 memcpy(&ehea_mcl_entry->macaddr, mc_mac_addr, ETH_ALEN);
1931 hret = ehea_multicast_reg_helper(port, ehea_mcl_entry->macaddr,
1934 list_add(&ehea_mcl_entry->list, &port->mc_list->list);
1936 pr_err("failed registering mcast MAC\n");
1937 kfree(ehea_mcl_entry);
1941 static void ehea_set_multicast_list(struct net_device *dev)
1943 struct ehea_port *port = netdev_priv(dev);
1944 struct netdev_hw_addr *ha;
1947 ehea_promiscuous(dev, !!(dev->flags & IFF_PROMISC));
1949 if (dev->flags & IFF_ALLMULTI) {
1950 ehea_allmulti(dev, 1);
1953 ehea_allmulti(dev, 0);
1955 if (!netdev_mc_empty(dev)) {
1956 ret = ehea_drop_multicast_list(dev);
1958 /* Dropping the current multicast list failed.
1959 * Enabling ALL_MULTI is the best we can do.
1961 ehea_allmulti(dev, 1);
1964 if (netdev_mc_count(dev) > port->adapter->max_mc_mac) {
1965 pr_info("Mcast registration limit reached (0x%llx). Use ALLMULTI!\n",
1966 port->adapter->max_mc_mac);
1970 netdev_for_each_mc_addr(ha, dev)
1971 ehea_add_multicast_entry(port, ha->addr);
1975 ehea_update_bcmc_registrations();
1978 static int ehea_change_mtu(struct net_device *dev, int new_mtu)
1980 if ((new_mtu < 68) || (new_mtu > EHEA_MAX_PACKET_SIZE))
1986 static void xmit_common(struct sk_buff *skb, struct ehea_swqe *swqe)
1988 swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT | EHEA_SWQE_CRC;
1990 if (skb->protocol != htons(ETH_P_IP))
1993 if (skb->ip_summed == CHECKSUM_PARTIAL)
1994 swqe->tx_control |= EHEA_SWQE_IP_CHECKSUM;
1996 swqe->ip_start = skb_network_offset(skb);
1997 swqe->ip_end = swqe->ip_start + ip_hdrlen(skb) - 1;
1999 switch (ip_hdr(skb)->protocol) {
2001 if (skb->ip_summed == CHECKSUM_PARTIAL)
2002 swqe->tx_control |= EHEA_SWQE_TCP_CHECKSUM;
2004 swqe->tcp_offset = swqe->ip_end + 1 +
2005 offsetof(struct udphdr, check);
2009 if (skb->ip_summed == CHECKSUM_PARTIAL)
2010 swqe->tx_control |= EHEA_SWQE_TCP_CHECKSUM;
2012 swqe->tcp_offset = swqe->ip_end + 1 +
2013 offsetof(struct tcphdr, check);
2018 static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev,
2019 struct ehea_swqe *swqe, u32 lkey)
2021 swqe->tx_control |= EHEA_SWQE_DESCRIPTORS_PRESENT;
2023 xmit_common(skb, swqe);
2025 write_swqe2_data(skb, dev, swqe, lkey);
2028 static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
2029 struct ehea_swqe *swqe)
2031 u8 *imm_data = &swqe->u.immdata_nodesc.immediate_data[0];
2033 xmit_common(skb, swqe);
2036 skb_copy_from_linear_data(skb, imm_data, skb->len);
2038 skb_copy_bits(skb, 0, imm_data, skb->len);
2040 swqe->immediate_data_length = skb->len;
2044 static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
2046 struct ehea_port *port = netdev_priv(dev);
2047 struct ehea_swqe *swqe;
2050 struct ehea_port_res *pr;
2051 struct netdev_queue *txq;
2053 pr = &port->port_res[skb_get_queue_mapping(skb)];
2054 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
2056 swqe = ehea_get_swqe(pr->qp, &swqe_index);
2057 memset(swqe, 0, SWQE_HEADER_SIZE);
2058 atomic_dec(&pr->swqe_avail);
2060 if (vlan_tx_tag_present(skb)) {
2061 swqe->tx_control |= EHEA_SWQE_VLAN_INSERT;
2062 swqe->vlan_tag = vlan_tx_tag_get(skb);
2066 pr->tx_bytes += skb->len;
2068 if (skb->len <= SWQE3_MAX_IMM) {
2069 u32 sig_iv = port->sig_comp_iv;
2070 u32 swqe_num = pr->swqe_id_counter;
2071 ehea_xmit3(skb, dev, swqe);
2072 swqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE3_TYPE)
2073 | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, swqe_num);
2074 if (pr->swqe_ll_count >= (sig_iv - 1)) {
2075 swqe->wr_id |= EHEA_BMASK_SET(EHEA_WR_ID_REFILL,
2077 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
2078 pr->swqe_ll_count = 0;
2080 pr->swqe_ll_count += 1;
2083 EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE2_TYPE)
2084 | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, pr->swqe_id_counter)
2085 | EHEA_BMASK_SET(EHEA_WR_ID_REFILL, 1)
2086 | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, pr->sq_skba.index);
2087 pr->sq_skba.arr[pr->sq_skba.index] = skb;
2089 pr->sq_skba.index++;
2090 pr->sq_skba.index &= (pr->sq_skba.len - 1);
2092 lkey = pr->send_mr.lkey;
2093 ehea_xmit2(skb, dev, swqe, lkey);
2094 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
2096 pr->swqe_id_counter += 1;
2098 netif_info(port, tx_queued, dev,
2099 "post swqe on QP %d\n", pr->qp->init_attr.qp_nr);
2100 if (netif_msg_tx_queued(port))
2101 ehea_dump(swqe, 512, "swqe");
2103 if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
2104 netif_tx_stop_queue(txq);
2105 swqe->tx_control |= EHEA_SWQE_PURGE;
2108 ehea_post_swqe(pr->qp, swqe);
2110 if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
2111 pr->p_stats.queue_stopped++;
2112 netif_tx_stop_queue(txq);
2115 return NETDEV_TX_OK;
2118 static int ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
2120 struct ehea_port *port = netdev_priv(dev);
2121 struct ehea_adapter *adapter = port->adapter;
2122 struct hcp_ehea_port_cb1 *cb1;
2127 cb1 = (void *)get_zeroed_page(GFP_KERNEL);
2129 pr_err("no mem for cb1\n");
2134 hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
2135 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2136 if (hret != H_SUCCESS) {
2137 pr_err("query_ehea_port failed\n");
2143 cb1->vlan_filter[index] |= ((u64)(0x8000000000000000 >> (vid & 0x3F)));
2145 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
2146 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2147 if (hret != H_SUCCESS) {
2148 pr_err("modify_ehea_port failed\n");
2152 free_page((unsigned long)cb1);
2156 static int ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
2158 struct ehea_port *port = netdev_priv(dev);
2159 struct ehea_adapter *adapter = port->adapter;
2160 struct hcp_ehea_port_cb1 *cb1;
2165 cb1 = (void *)get_zeroed_page(GFP_KERNEL);
2167 pr_err("no mem for cb1\n");
2172 hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
2173 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2174 if (hret != H_SUCCESS) {
2175 pr_err("query_ehea_port failed\n");
2181 cb1->vlan_filter[index] &= ~((u64)(0x8000000000000000 >> (vid & 0x3F)));
2183 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
2184 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2185 if (hret != H_SUCCESS) {
2186 pr_err("modify_ehea_port failed\n");
2190 free_page((unsigned long)cb1);
2194 static int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
2200 struct hcp_modify_qp_cb0 *cb0;
2202 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
2208 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2209 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2210 if (hret != H_SUCCESS) {
2211 pr_err("query_ehea_qp failed (1)\n");
2215 cb0->qp_ctl_reg = H_QP_CR_STATE_INITIALIZED;
2216 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2217 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2218 &dummy64, &dummy64, &dummy16, &dummy16);
2219 if (hret != H_SUCCESS) {
2220 pr_err("modify_ehea_qp failed (1)\n");
2224 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2225 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2226 if (hret != H_SUCCESS) {
2227 pr_err("query_ehea_qp failed (2)\n");
2231 cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_INITIALIZED;
2232 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2233 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2234 &dummy64, &dummy64, &dummy16, &dummy16);
2235 if (hret != H_SUCCESS) {
2236 pr_err("modify_ehea_qp failed (2)\n");
2240 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2241 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2242 if (hret != H_SUCCESS) {
2243 pr_err("query_ehea_qp failed (3)\n");
2247 cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_RDY2SND;
2248 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2249 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2250 &dummy64, &dummy64, &dummy16, &dummy16);
2251 if (hret != H_SUCCESS) {
2252 pr_err("modify_ehea_qp failed (3)\n");
2256 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2257 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2258 if (hret != H_SUCCESS) {
2259 pr_err("query_ehea_qp failed (4)\n");
2265 free_page((unsigned long)cb0);
2269 static int ehea_port_res_setup(struct ehea_port *port, int def_qps)
2272 struct port_res_cfg pr_cfg, pr_cfg_small_rx;
2273 enum ehea_eq_type eq_type = EHEA_EQ;
2275 port->qp_eq = ehea_create_eq(port->adapter, eq_type,
2276 EHEA_MAX_ENTRIES_EQ, 1);
2279 pr_err("ehea_create_eq failed (qp_eq)\n");
2283 pr_cfg.max_entries_rcq = rq1_entries + rq2_entries + rq3_entries;
2284 pr_cfg.max_entries_scq = sq_entries * 2;
2285 pr_cfg.max_entries_sq = sq_entries;
2286 pr_cfg.max_entries_rq1 = rq1_entries;
2287 pr_cfg.max_entries_rq2 = rq2_entries;
2288 pr_cfg.max_entries_rq3 = rq3_entries;
2290 pr_cfg_small_rx.max_entries_rcq = 1;
2291 pr_cfg_small_rx.max_entries_scq = sq_entries;
2292 pr_cfg_small_rx.max_entries_sq = sq_entries;
2293 pr_cfg_small_rx.max_entries_rq1 = 1;
2294 pr_cfg_small_rx.max_entries_rq2 = 1;
2295 pr_cfg_small_rx.max_entries_rq3 = 1;
2297 for (i = 0; i < def_qps; i++) {
2298 ret = ehea_init_port_res(port, &port->port_res[i], &pr_cfg, i);
2302 for (i = def_qps; i < def_qps; i++) {
2303 ret = ehea_init_port_res(port, &port->port_res[i],
2304 &pr_cfg_small_rx, i);
2313 ehea_clean_portres(port, &port->port_res[i]);
2316 ehea_destroy_eq(port->qp_eq);
2320 static int ehea_clean_all_portres(struct ehea_port *port)
2325 for (i = 0; i < port->num_def_qps; i++)
2326 ret |= ehea_clean_portres(port, &port->port_res[i]);
2328 ret |= ehea_destroy_eq(port->qp_eq);
2333 static void ehea_remove_adapter_mr(struct ehea_adapter *adapter)
2335 if (adapter->active_ports)
2338 ehea_rem_mr(&adapter->mr);
2341 static int ehea_add_adapter_mr(struct ehea_adapter *adapter)
2343 if (adapter->active_ports)
2346 return ehea_reg_kernel_mr(adapter, &adapter->mr);
2349 static int ehea_up(struct net_device *dev)
2352 struct ehea_port *port = netdev_priv(dev);
2354 if (port->state == EHEA_PORT_UP)
2357 ret = ehea_port_res_setup(port, port->num_def_qps);
2359 netdev_err(dev, "port_res_failed\n");
2363 /* Set default QP for this port */
2364 ret = ehea_configure_port(port);
2366 netdev_err(dev, "ehea_configure_port failed. ret:%d\n", ret);
2370 ret = ehea_reg_interrupts(dev);
2372 netdev_err(dev, "reg_interrupts failed. ret:%d\n", ret);
2376 for (i = 0; i < port->num_def_qps; i++) {
2377 ret = ehea_activate_qp(port->adapter, port->port_res[i].qp);
2379 netdev_err(dev, "activate_qp failed\n");
2384 for (i = 0; i < port->num_def_qps; i++) {
2385 ret = ehea_fill_port_res(&port->port_res[i]);
2387 netdev_err(dev, "out_free_irqs\n");
2392 ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
2398 port->state = EHEA_PORT_UP;
2404 ehea_free_interrupts(dev);
2407 ehea_clean_all_portres(port);
2410 netdev_info(dev, "Failed starting. ret=%i\n", ret);
2412 ehea_update_bcmc_registrations();
2413 ehea_update_firmware_handles();
2418 static void port_napi_disable(struct ehea_port *port)
2422 for (i = 0; i < port->num_def_qps; i++)
2423 napi_disable(&port->port_res[i].napi);
2426 static void port_napi_enable(struct ehea_port *port)
2430 for (i = 0; i < port->num_def_qps; i++)
2431 napi_enable(&port->port_res[i].napi);
2434 static int ehea_open(struct net_device *dev)
2437 struct ehea_port *port = netdev_priv(dev);
2439 mutex_lock(&port->port_lock);
2441 netif_info(port, ifup, dev, "enabling port\n");
2445 port_napi_enable(port);
2446 netif_tx_start_all_queues(dev);
2449 mutex_unlock(&port->port_lock);
2450 schedule_delayed_work(&port->stats_work,
2451 round_jiffies_relative(msecs_to_jiffies(1000)));
2456 static int ehea_down(struct net_device *dev)
2459 struct ehea_port *port = netdev_priv(dev);
2461 if (port->state == EHEA_PORT_DOWN)
2464 ehea_drop_multicast_list(dev);
2465 ehea_allmulti(dev, 0);
2466 ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
2468 ehea_free_interrupts(dev);
2470 port->state = EHEA_PORT_DOWN;
2472 ehea_update_bcmc_registrations();
2474 ret = ehea_clean_all_portres(port);
2476 netdev_info(dev, "Failed freeing resources. ret=%i\n", ret);
2478 ehea_update_firmware_handles();
2483 static int ehea_stop(struct net_device *dev)
2486 struct ehea_port *port = netdev_priv(dev);
2488 netif_info(port, ifdown, dev, "disabling port\n");
2490 set_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
2491 cancel_work_sync(&port->reset_task);
2492 cancel_delayed_work_sync(&port->stats_work);
2493 mutex_lock(&port->port_lock);
2494 netif_tx_stop_all_queues(dev);
2495 port_napi_disable(port);
2496 ret = ehea_down(dev);
2497 mutex_unlock(&port->port_lock);
2498 clear_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
2502 static void ehea_purge_sq(struct ehea_qp *orig_qp)
2504 struct ehea_qp qp = *orig_qp;
2505 struct ehea_qp_init_attr *init_attr = &qp.init_attr;
2506 struct ehea_swqe *swqe;
2510 for (i = 0; i < init_attr->act_nr_send_wqes; i++) {
2511 swqe = ehea_get_swqe(&qp, &wqe_index);
2512 swqe->tx_control |= EHEA_SWQE_PURGE;
2516 static void ehea_flush_sq(struct ehea_port *port)
2520 for (i = 0; i < port->num_def_qps; i++) {
2521 struct ehea_port_res *pr = &port->port_res[i];
2522 int swqe_max = pr->sq_skba_size - 2 - pr->swqe_ll_count;
2525 ret = wait_event_timeout(port->swqe_avail_wq,
2526 atomic_read(&pr->swqe_avail) >= swqe_max,
2527 msecs_to_jiffies(100));
2530 pr_err("WARNING: sq not flushed completely\n");
2536 static int ehea_stop_qps(struct net_device *dev)
2538 struct ehea_port *port = netdev_priv(dev);
2539 struct ehea_adapter *adapter = port->adapter;
2540 struct hcp_modify_qp_cb0 *cb0;
2548 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
2554 for (i = 0; i < (port->num_def_qps); i++) {
2555 struct ehea_port_res *pr = &port->port_res[i];
2556 struct ehea_qp *qp = pr->qp;
2558 /* Purge send queue */
2561 /* Disable queue pair */
2562 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2563 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2565 if (hret != H_SUCCESS) {
2566 pr_err("query_ehea_qp failed (1)\n");
2570 cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8;
2571 cb0->qp_ctl_reg &= ~H_QP_CR_ENABLED;
2573 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2574 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG,
2576 &dummy64, &dummy16, &dummy16);
2577 if (hret != H_SUCCESS) {
2578 pr_err("modify_ehea_qp failed (1)\n");
2582 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2583 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2585 if (hret != H_SUCCESS) {
2586 pr_err("query_ehea_qp failed (2)\n");
2590 /* deregister shared memory regions */
2591 dret = ehea_rem_smrs(pr);
2593 pr_err("unreg shared memory region failed\n");
2600 free_page((unsigned long)cb0);
2605 static void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res *pr)
2607 struct ehea_qp qp = *orig_qp;
2608 struct ehea_qp_init_attr *init_attr = &qp.init_attr;
2609 struct ehea_rwqe *rwqe;
2610 struct sk_buff **skba_rq2 = pr->rq2_skba.arr;
2611 struct sk_buff **skba_rq3 = pr->rq3_skba.arr;
2612 struct sk_buff *skb;
2613 u32 lkey = pr->recv_mr.lkey;
2619 for (i = 0; i < init_attr->act_nr_rwqes_rq2 + 1; i++) {
2620 rwqe = ehea_get_next_rwqe(&qp, 2);
2621 rwqe->sg_list[0].l_key = lkey;
2622 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id);
2623 skb = skba_rq2[index];
2625 rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
2628 for (i = 0; i < init_attr->act_nr_rwqes_rq3 + 1; i++) {
2629 rwqe = ehea_get_next_rwqe(&qp, 3);
2630 rwqe->sg_list[0].l_key = lkey;
2631 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id);
2632 skb = skba_rq3[index];
2634 rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
2638 static int ehea_restart_qps(struct net_device *dev)
2640 struct ehea_port *port = netdev_priv(dev);
2641 struct ehea_adapter *adapter = port->adapter;
2645 struct hcp_modify_qp_cb0 *cb0;
2650 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
2656 for (i = 0; i < (port->num_def_qps); i++) {
2657 struct ehea_port_res *pr = &port->port_res[i];
2658 struct ehea_qp *qp = pr->qp;
2660 ret = ehea_gen_smrs(pr);
2662 netdev_err(dev, "creation of shared memory regions failed\n");
2666 ehea_update_rqs(qp, pr);
2668 /* Enable queue pair */
2669 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2670 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2672 if (hret != H_SUCCESS) {
2673 netdev_err(dev, "query_ehea_qp failed (1)\n");
2677 cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8;
2678 cb0->qp_ctl_reg |= H_QP_CR_ENABLED;
2680 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2681 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG,
2683 &dummy64, &dummy16, &dummy16);
2684 if (hret != H_SUCCESS) {
2685 netdev_err(dev, "modify_ehea_qp failed (1)\n");
2689 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2690 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2692 if (hret != H_SUCCESS) {
2693 netdev_err(dev, "query_ehea_qp failed (2)\n");
2697 /* refill entire queue */
2698 ehea_refill_rq1(pr, pr->rq1_skba.index, 0);
2699 ehea_refill_rq2(pr, 0);
2700 ehea_refill_rq3(pr, 0);
2703 free_page((unsigned long)cb0);
2708 static void ehea_reset_port(struct work_struct *work)
2711 struct ehea_port *port =
2712 container_of(work, struct ehea_port, reset_task);
2713 struct net_device *dev = port->netdev;
2715 mutex_lock(&dlpar_mem_lock);
2717 mutex_lock(&port->port_lock);
2718 netif_tx_disable(dev);
2720 port_napi_disable(port);
2728 ehea_set_multicast_list(dev);
2730 netif_info(port, timer, dev, "reset successful\n");
2732 port_napi_enable(port);
2734 netif_tx_wake_all_queues(dev);
2736 mutex_unlock(&port->port_lock);
2737 mutex_unlock(&dlpar_mem_lock);
2740 static void ehea_rereg_mrs(void)
2743 struct ehea_adapter *adapter;
2745 pr_info("LPAR memory changed - re-initializing driver\n");
2747 list_for_each_entry(adapter, &adapter_list, list)
2748 if (adapter->active_ports) {
2749 /* Shutdown all ports */
2750 for (i = 0; i < EHEA_MAX_PORTS; i++) {
2751 struct ehea_port *port = adapter->port[i];
2752 struct net_device *dev;
2759 if (dev->flags & IFF_UP) {
2760 mutex_lock(&port->port_lock);
2761 netif_tx_disable(dev);
2762 ehea_flush_sq(port);
2763 ret = ehea_stop_qps(dev);
2765 mutex_unlock(&port->port_lock);
2768 port_napi_disable(port);
2769 mutex_unlock(&port->port_lock);
2771 reset_sq_restart_flag(port);
2774 /* Unregister old memory region */
2775 ret = ehea_rem_mr(&adapter->mr);
2777 pr_err("unregister MR failed - driver inoperable!\n");
2782 clear_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
2784 list_for_each_entry(adapter, &adapter_list, list)
2785 if (adapter->active_ports) {
2786 /* Register new memory region */
2787 ret = ehea_reg_kernel_mr(adapter, &adapter->mr);
2789 pr_err("register MR failed - driver inoperable!\n");
2793 /* Restart all ports */
2794 for (i = 0; i < EHEA_MAX_PORTS; i++) {
2795 struct ehea_port *port = adapter->port[i];
2798 struct net_device *dev = port->netdev;
2800 if (dev->flags & IFF_UP) {
2801 mutex_lock(&port->port_lock);
2802 ret = ehea_restart_qps(dev);
2805 port_napi_enable(port);
2806 netif_tx_wake_all_queues(dev);
2808 netdev_err(dev, "Unable to restart QPS\n");
2810 mutex_unlock(&port->port_lock);
2815 pr_info("re-initializing driver complete\n");
2820 static void ehea_tx_watchdog(struct net_device *dev)
2822 struct ehea_port *port = netdev_priv(dev);
2824 if (netif_carrier_ok(dev) &&
2825 !test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))
2826 ehea_schedule_port_reset(port);
2829 static int ehea_sense_adapter_attr(struct ehea_adapter *adapter)
2831 struct hcp_query_ehea *cb;
2835 cb = (void *)get_zeroed_page(GFP_KERNEL);
2841 hret = ehea_h_query_ehea(adapter->handle, cb);
2843 if (hret != H_SUCCESS) {
2848 adapter->max_mc_mac = cb->max_mc_mac - 1;
2852 free_page((unsigned long)cb);
2857 static int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo)
2859 struct hcp_ehea_port_cb4 *cb4;
2865 /* (Try to) enable *jumbo frames */
2866 cb4 = (void *)get_zeroed_page(GFP_KERNEL);
2868 pr_err("no mem for cb4\n");
2872 hret = ehea_h_query_ehea_port(port->adapter->handle,
2873 port->logical_port_id,
2875 H_PORT_CB4_JUMBO, cb4);
2876 if (hret == H_SUCCESS) {
2877 if (cb4->jumbo_frame)
2880 cb4->jumbo_frame = 1;
2881 hret = ehea_h_modify_ehea_port(port->adapter->
2888 if (hret == H_SUCCESS)
2894 free_page((unsigned long)cb4);
2900 static ssize_t ehea_show_port_id(struct device *dev,
2901 struct device_attribute *attr, char *buf)
2903 struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
2904 return sprintf(buf, "%d", port->logical_port_id);
2907 static DEVICE_ATTR(log_port_id, S_IRUSR | S_IRGRP | S_IROTH, ehea_show_port_id,
2910 static void logical_port_release(struct device *dev)
2912 struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
2913 of_node_put(port->ofdev.dev.of_node);
2916 static struct device *ehea_register_port(struct ehea_port *port,
2917 struct device_node *dn)
2921 port->ofdev.dev.of_node = of_node_get(dn);
2922 port->ofdev.dev.parent = &port->adapter->ofdev->dev;
2923 port->ofdev.dev.bus = &ibmebus_bus_type;
2925 dev_set_name(&port->ofdev.dev, "port%d", port_name_cnt++);
2926 port->ofdev.dev.release = logical_port_release;
2928 ret = of_device_register(&port->ofdev);
2930 pr_err("failed to register device. ret=%d\n", ret);
2934 ret = device_create_file(&port->ofdev.dev, &dev_attr_log_port_id);
2936 pr_err("failed to register attributes, ret=%d\n", ret);
2937 goto out_unreg_of_dev;
2940 return &port->ofdev.dev;
2943 of_device_unregister(&port->ofdev);
2948 static void ehea_unregister_port(struct ehea_port *port)
2950 device_remove_file(&port->ofdev.dev, &dev_attr_log_port_id);
2951 of_device_unregister(&port->ofdev);
2954 static const struct net_device_ops ehea_netdev_ops = {
2955 .ndo_open = ehea_open,
2956 .ndo_stop = ehea_stop,
2957 .ndo_start_xmit = ehea_start_xmit,
2958 #ifdef CONFIG_NET_POLL_CONTROLLER
2959 .ndo_poll_controller = ehea_netpoll,
2961 .ndo_get_stats64 = ehea_get_stats64,
2962 .ndo_set_mac_address = ehea_set_mac_addr,
2963 .ndo_validate_addr = eth_validate_addr,
2964 .ndo_set_rx_mode = ehea_set_multicast_list,
2965 .ndo_change_mtu = ehea_change_mtu,
2966 .ndo_vlan_rx_add_vid = ehea_vlan_rx_add_vid,
2967 .ndo_vlan_rx_kill_vid = ehea_vlan_rx_kill_vid,
2968 .ndo_tx_timeout = ehea_tx_watchdog,
2971 static struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
2972 u32 logical_port_id,
2973 struct device_node *dn)
2976 struct net_device *dev;
2977 struct ehea_port *port;
2978 struct device *port_dev;
2981 /* allocate memory for the port structures */
2982 dev = alloc_etherdev_mq(sizeof(struct ehea_port), EHEA_MAX_PORT_RES);
2989 port = netdev_priv(dev);
2991 mutex_init(&port->port_lock);
2992 port->state = EHEA_PORT_DOWN;
2993 port->sig_comp_iv = sq_entries / 10;
2995 port->adapter = adapter;
2997 port->logical_port_id = logical_port_id;
2999 port->msg_enable = netif_msg_init(msg_level, EHEA_MSG_DEFAULT);
3001 port->mc_list = kzalloc(sizeof(struct ehea_mc_list), GFP_KERNEL);
3002 if (!port->mc_list) {
3004 goto out_free_ethdev;
3007 INIT_LIST_HEAD(&port->mc_list->list);
3009 ret = ehea_sense_port_attr(port);
3011 goto out_free_mc_list;
3013 netif_set_real_num_rx_queues(dev, port->num_def_qps);
3014 netif_set_real_num_tx_queues(dev, port->num_def_qps);
3016 port_dev = ehea_register_port(port, dn);
3018 goto out_free_mc_list;
3020 SET_NETDEV_DEV(dev, port_dev);
3022 /* initialize net_device structure */
3023 memcpy(dev->dev_addr, &port->mac_addr, ETH_ALEN);
3025 dev->netdev_ops = &ehea_netdev_ops;
3026 ehea_set_ethtool_ops(dev);
3028 dev->hw_features = NETIF_F_SG | NETIF_F_TSO
3029 | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX;
3030 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO
3031 | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX
3032 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER
3034 dev->vlan_features = NETIF_F_SG | NETIF_F_TSO | NETIF_F_HIGHDMA |
3036 dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
3038 INIT_WORK(&port->reset_task, ehea_reset_port);
3039 INIT_DELAYED_WORK(&port->stats_work, ehea_update_stats);
3041 init_waitqueue_head(&port->swqe_avail_wq);
3042 init_waitqueue_head(&port->restart_wq);
3044 memset(&port->stats, 0, sizeof(struct net_device_stats));
3045 ret = register_netdev(dev);
3047 pr_err("register_netdev failed. ret=%d\n", ret);
3048 goto out_unreg_port;
3051 ret = ehea_get_jumboframe_status(port, &jumbo);
3053 netdev_err(dev, "failed determining jumbo frame status\n");
3055 netdev_info(dev, "Jumbo frames are %sabled\n",
3056 jumbo == 1 ? "en" : "dis");
3058 adapter->active_ports++;
3063 ehea_unregister_port(port);
3066 kfree(port->mc_list);
3072 pr_err("setting up logical port with id=%d failed, ret=%d\n",
3073 logical_port_id, ret);
3077 static void ehea_shutdown_single_port(struct ehea_port *port)
3079 struct ehea_adapter *adapter = port->adapter;
3081 cancel_work_sync(&port->reset_task);
3082 cancel_delayed_work_sync(&port->stats_work);
3083 unregister_netdev(port->netdev);
3084 ehea_unregister_port(port);
3085 kfree(port->mc_list);
3086 free_netdev(port->netdev);
3087 adapter->active_ports--;
3090 static int ehea_setup_ports(struct ehea_adapter *adapter)
3092 struct device_node *lhea_dn;
3093 struct device_node *eth_dn = NULL;
3095 const u32 *dn_log_port_id;
3098 lhea_dn = adapter->ofdev->dev.of_node;
3099 while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
3101 dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
3103 if (!dn_log_port_id) {
3104 pr_err("bad device node: eth_dn name=%s\n",
3109 if (ehea_add_adapter_mr(adapter)) {
3110 pr_err("creating MR failed\n");
3111 of_node_put(eth_dn);
3115 adapter->port[i] = ehea_setup_single_port(adapter,
3118 if (adapter->port[i])
3119 netdev_info(adapter->port[i]->netdev,
3120 "logical port id #%d\n", *dn_log_port_id);
3122 ehea_remove_adapter_mr(adapter);
3129 static struct device_node *ehea_get_eth_dn(struct ehea_adapter *adapter,
3130 u32 logical_port_id)
3132 struct device_node *lhea_dn;
3133 struct device_node *eth_dn = NULL;
3134 const u32 *dn_log_port_id;
3136 lhea_dn = adapter->ofdev->dev.of_node;
3137 while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
3139 dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
3142 if (*dn_log_port_id == logical_port_id)
3149 static ssize_t ehea_probe_port(struct device *dev,
3150 struct device_attribute *attr,
3151 const char *buf, size_t count)
3153 struct ehea_adapter *adapter = dev_get_drvdata(dev);
3154 struct ehea_port *port;
3155 struct device_node *eth_dn = NULL;
3158 u32 logical_port_id;
3160 sscanf(buf, "%d", &logical_port_id);
3162 port = ehea_get_port(adapter, logical_port_id);
3165 netdev_info(port->netdev, "adding port with logical port id=%d failed: port already configured\n",
3170 eth_dn = ehea_get_eth_dn(adapter, logical_port_id);
3173 pr_info("no logical port with id %d found\n", logical_port_id);
3177 if (ehea_add_adapter_mr(adapter)) {
3178 pr_err("creating MR failed\n");
3182 port = ehea_setup_single_port(adapter, logical_port_id, eth_dn);
3184 of_node_put(eth_dn);
3187 for (i = 0; i < EHEA_MAX_PORTS; i++)
3188 if (!adapter->port[i]) {
3189 adapter->port[i] = port;
3193 netdev_info(port->netdev, "added: (logical port id=%d)\n",
3196 ehea_remove_adapter_mr(adapter);
3200 return (ssize_t) count;
3203 static ssize_t ehea_remove_port(struct device *dev,
3204 struct device_attribute *attr,
3205 const char *buf, size_t count)
3207 struct ehea_adapter *adapter = dev_get_drvdata(dev);
3208 struct ehea_port *port;
3210 u32 logical_port_id;
3212 sscanf(buf, "%d", &logical_port_id);
3214 port = ehea_get_port(adapter, logical_port_id);
3217 netdev_info(port->netdev, "removed: (logical port id=%d)\n",
3220 ehea_shutdown_single_port(port);
3222 for (i = 0; i < EHEA_MAX_PORTS; i++)
3223 if (adapter->port[i] == port) {
3224 adapter->port[i] = NULL;
3228 pr_err("removing port with logical port id=%d failed. port not configured.\n",
3233 ehea_remove_adapter_mr(adapter);
3235 return (ssize_t) count;
3238 static DEVICE_ATTR(probe_port, S_IWUSR, NULL, ehea_probe_port);
3239 static DEVICE_ATTR(remove_port, S_IWUSR, NULL, ehea_remove_port);
3241 static int ehea_create_device_sysfs(struct platform_device *dev)
3243 int ret = device_create_file(&dev->dev, &dev_attr_probe_port);
3247 ret = device_create_file(&dev->dev, &dev_attr_remove_port);
3252 static void ehea_remove_device_sysfs(struct platform_device *dev)
3254 device_remove_file(&dev->dev, &dev_attr_probe_port);
3255 device_remove_file(&dev->dev, &dev_attr_remove_port);
3258 static int ehea_probe_adapter(struct platform_device *dev,
3259 const struct of_device_id *id)
3261 struct ehea_adapter *adapter;
3262 const u64 *adapter_handle;
3266 if (!dev || !dev->dev.of_node) {
3267 pr_err("Invalid ibmebus device probed\n");
3271 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3274 dev_err(&dev->dev, "no mem for ehea_adapter\n");
3278 list_add(&adapter->list, &adapter_list);
3280 adapter->ofdev = dev;
3282 adapter_handle = of_get_property(dev->dev.of_node, "ibm,hea-handle",
3285 adapter->handle = *adapter_handle;
3287 if (!adapter->handle) {
3288 dev_err(&dev->dev, "failed getting handle for adapter"
3289 " '%s'\n", dev->dev.of_node->full_name);
3294 adapter->pd = EHEA_PD_ID;
3296 dev_set_drvdata(&dev->dev, adapter);
3299 /* initialize adapter and ports */
3300 /* get adapter properties */
3301 ret = ehea_sense_adapter_attr(adapter);
3303 dev_err(&dev->dev, "sense_adapter_attr failed: %d\n", ret);
3307 adapter->neq = ehea_create_eq(adapter,
3308 EHEA_NEQ, EHEA_MAX_ENTRIES_EQ, 1);
3309 if (!adapter->neq) {
3311 dev_err(&dev->dev, "NEQ creation failed\n");
3315 tasklet_init(&adapter->neq_tasklet, ehea_neq_tasklet,
3316 (unsigned long)adapter);
3318 ret = ehea_create_device_sysfs(dev);
3322 ret = ehea_setup_ports(adapter);
3324 dev_err(&dev->dev, "setup_ports failed\n");
3325 goto out_rem_dev_sysfs;
3328 ret = ibmebus_request_irq(adapter->neq->attr.ist1,
3329 ehea_interrupt_neq, IRQF_DISABLED,
3330 "ehea_neq", adapter);
3332 dev_err(&dev->dev, "requesting NEQ IRQ failed\n");
3333 goto out_shutdown_ports;
3336 /* Handle any events that might be pending. */
3337 tasklet_hi_schedule(&adapter->neq_tasklet);
3343 for (i = 0; i < EHEA_MAX_PORTS; i++)
3344 if (adapter->port[i]) {
3345 ehea_shutdown_single_port(adapter->port[i]);
3346 adapter->port[i] = NULL;
3350 ehea_remove_device_sysfs(dev);
3353 ehea_destroy_eq(adapter->neq);
3356 list_del(&adapter->list);
3360 ehea_update_firmware_handles();
3365 static int ehea_remove(struct platform_device *dev)
3367 struct ehea_adapter *adapter = dev_get_drvdata(&dev->dev);
3370 for (i = 0; i < EHEA_MAX_PORTS; i++)
3371 if (adapter->port[i]) {
3372 ehea_shutdown_single_port(adapter->port[i]);
3373 adapter->port[i] = NULL;
3376 ehea_remove_device_sysfs(dev);
3378 ibmebus_free_irq(adapter->neq->attr.ist1, adapter);
3379 tasklet_kill(&adapter->neq_tasklet);
3381 ehea_destroy_eq(adapter->neq);
3382 ehea_remove_adapter_mr(adapter);
3383 list_del(&adapter->list);
3386 ehea_update_firmware_handles();
3391 static void ehea_crash_handler(void)
3395 if (ehea_fw_handles.arr)
3396 for (i = 0; i < ehea_fw_handles.num_entries; i++)
3397 ehea_h_free_resource(ehea_fw_handles.arr[i].adh,
3398 ehea_fw_handles.arr[i].fwh,
3401 if (ehea_bcmc_regs.arr)
3402 for (i = 0; i < ehea_bcmc_regs.num_entries; i++)
3403 ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh,
3404 ehea_bcmc_regs.arr[i].port_id,
3405 ehea_bcmc_regs.arr[i].reg_type,
3406 ehea_bcmc_regs.arr[i].macaddr,
3410 static int ehea_mem_notifier(struct notifier_block *nb,
3411 unsigned long action, void *data)
3413 int ret = NOTIFY_BAD;
3414 struct memory_notify *arg = data;
3416 mutex_lock(&dlpar_mem_lock);
3419 case MEM_CANCEL_OFFLINE:
3420 pr_info("memory offlining canceled");
3421 /* Readd canceled memory block */
3423 pr_info("memory is going online");
3424 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3425 if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
3429 case MEM_GOING_OFFLINE:
3430 pr_info("memory is going offline");
3431 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3432 if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
3440 ehea_update_firmware_handles();
3444 mutex_unlock(&dlpar_mem_lock);
3448 static struct notifier_block ehea_mem_nb = {
3449 .notifier_call = ehea_mem_notifier,
3452 static int ehea_reboot_notifier(struct notifier_block *nb,
3453 unsigned long action, void *unused)
3455 if (action == SYS_RESTART) {
3456 pr_info("Reboot: freeing all eHEA resources\n");
3457 ibmebus_unregister_driver(&ehea_driver);
3462 static struct notifier_block ehea_reboot_nb = {
3463 .notifier_call = ehea_reboot_notifier,
3466 static int check_module_parm(void)
3470 if ((rq1_entries < EHEA_MIN_ENTRIES_QP) ||
3471 (rq1_entries > EHEA_MAX_ENTRIES_RQ1)) {
3472 pr_info("Bad parameter: rq1_entries\n");
3475 if ((rq2_entries < EHEA_MIN_ENTRIES_QP) ||
3476 (rq2_entries > EHEA_MAX_ENTRIES_RQ2)) {
3477 pr_info("Bad parameter: rq2_entries\n");
3480 if ((rq3_entries < EHEA_MIN_ENTRIES_QP) ||
3481 (rq3_entries > EHEA_MAX_ENTRIES_RQ3)) {
3482 pr_info("Bad parameter: rq3_entries\n");
3485 if ((sq_entries < EHEA_MIN_ENTRIES_QP) ||
3486 (sq_entries > EHEA_MAX_ENTRIES_SQ)) {
3487 pr_info("Bad parameter: sq_entries\n");
3494 static ssize_t ehea_show_capabilities(struct device_driver *drv,
3497 return sprintf(buf, "%d", EHEA_CAPABILITIES);
3500 static DRIVER_ATTR(capabilities, S_IRUSR | S_IRGRP | S_IROTH,
3501 ehea_show_capabilities, NULL);
3503 static int __init ehea_module_init(void)
3507 pr_info("IBM eHEA ethernet device driver (Release %s)\n", DRV_VERSION);
3509 memset(&ehea_fw_handles, 0, sizeof(ehea_fw_handles));
3510 memset(&ehea_bcmc_regs, 0, sizeof(ehea_bcmc_regs));
3512 mutex_init(&ehea_fw_handles.lock);
3513 spin_lock_init(&ehea_bcmc_regs.lock);
3515 ret = check_module_parm();
3519 ret = ehea_create_busmap();
3523 ret = register_reboot_notifier(&ehea_reboot_nb);
3525 pr_info("failed registering reboot notifier\n");
3527 ret = register_memory_notifier(&ehea_mem_nb);
3529 pr_info("failed registering memory remove notifier\n");
3531 ret = crash_shutdown_register(ehea_crash_handler);
3533 pr_info("failed registering crash handler\n");
3535 ret = ibmebus_register_driver(&ehea_driver);
3537 pr_err("failed registering eHEA device driver on ebus\n");
3541 ret = driver_create_file(&ehea_driver.driver,
3542 &driver_attr_capabilities);
3544 pr_err("failed to register capabilities attribute, ret=%d\n",
3552 ibmebus_unregister_driver(&ehea_driver);
3554 unregister_memory_notifier(&ehea_mem_nb);
3555 unregister_reboot_notifier(&ehea_reboot_nb);
3556 crash_shutdown_unregister(ehea_crash_handler);
3561 static void __exit ehea_module_exit(void)
3565 driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities);
3566 ibmebus_unregister_driver(&ehea_driver);
3567 unregister_reboot_notifier(&ehea_reboot_nb);
3568 ret = crash_shutdown_unregister(ehea_crash_handler);
3570 pr_info("failed unregistering crash handler\n");
3571 unregister_memory_notifier(&ehea_mem_nb);
3572 kfree(ehea_fw_handles.arr);
3573 kfree(ehea_bcmc_regs.arr);
3574 ehea_destroy_busmap();
3577 module_init(ehea_module_init);
3578 module_exit(ehea_module_exit);