1 /* cnic.c: QLogic CNIC core network driver.
3 * Copyright (c) 2006-2014 Broadcom Corporation
4 * Copyright (c) 2014 QLogic Corporation
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
10 * Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com)
11 * Previously modified and maintained by: Michael Chan <mchan@broadcom.com>
12 * Maintained By: Dept-HSGLinuxNICDev@qlogic.com
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 #include <linux/module.h>
19 #include <linux/kernel.h>
20 #include <linux/errno.h>
21 #include <linux/list.h>
22 #include <linux/slab.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/uio_driver.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/delay.h>
30 #include <linux/ethtool.h>
31 #include <linux/if_vlan.h>
32 #include <linux/prefetch.h>
33 #include <linux/random.h>
34 #if IS_ENABLED(CONFIG_VLAN_8021Q)
39 #include <net/route.h>
41 #include <net/ip6_route.h>
42 #include <net/ip6_checksum.h>
43 #include <scsi/iscsi_if.h>
48 #include "bnx2x/bnx2x.h"
49 #include "bnx2x/bnx2x_reg.h"
50 #include "bnx2x/bnx2x_fw_defs.h"
51 #include "bnx2x/bnx2x_hsi.h"
52 #include "../../../scsi/bnx2i/57xx_iscsi_constants.h"
53 #include "../../../scsi/bnx2i/57xx_iscsi_hsi.h"
54 #include "../../../scsi/bnx2fc/bnx2fc_constants.h"
56 #include "cnic_defs.h"
58 #define CNIC_MODULE_NAME "cnic"
60 static char version[] =
61 "QLogic NetXtreme II CNIC Driver " CNIC_MODULE_NAME " v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n";
63 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) "
64 "Chen (zongxi@broadcom.com");
65 MODULE_DESCRIPTION("QLogic NetXtreme II CNIC Driver");
66 MODULE_LICENSE("GPL");
67 MODULE_VERSION(CNIC_MODULE_VERSION);
69 /* cnic_dev_list modifications are protected by both rtnl and cnic_dev_lock */
70 static LIST_HEAD(cnic_dev_list);
71 static LIST_HEAD(cnic_udev_list);
72 static DEFINE_RWLOCK(cnic_dev_lock);
73 static DEFINE_MUTEX(cnic_lock);
75 static struct cnic_ulp_ops __rcu *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE];
77 /* helper function, assuming cnic_lock is held */
78 static inline struct cnic_ulp_ops *cnic_ulp_tbl_prot(int type)
80 return rcu_dereference_protected(cnic_ulp_tbl[type],
81 lockdep_is_held(&cnic_lock));
84 static int cnic_service_bnx2(void *, void *);
85 static int cnic_service_bnx2x(void *, void *);
86 static int cnic_ctl(void *, struct cnic_ctl_info *);
88 static struct cnic_ops cnic_bnx2_ops = {
89 .cnic_owner = THIS_MODULE,
90 .cnic_handler = cnic_service_bnx2,
94 static struct cnic_ops cnic_bnx2x_ops = {
95 .cnic_owner = THIS_MODULE,
96 .cnic_handler = cnic_service_bnx2x,
100 static struct workqueue_struct *cnic_wq;
102 static void cnic_shutdown_rings(struct cnic_dev *);
103 static void cnic_init_rings(struct cnic_dev *);
104 static int cnic_cm_set_pg(struct cnic_sock *);
106 static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode)
108 struct cnic_uio_dev *udev = uinfo->priv;
109 struct cnic_dev *dev;
111 if (!capable(CAP_NET_ADMIN))
114 if (udev->uio_dev != -1)
120 if (!dev || !test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
125 udev->uio_dev = iminor(inode);
127 cnic_shutdown_rings(dev);
128 cnic_init_rings(dev);
134 static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode)
136 struct cnic_uio_dev *udev = uinfo->priv;
142 static inline void cnic_hold(struct cnic_dev *dev)
144 atomic_inc(&dev->ref_count);
147 static inline void cnic_put(struct cnic_dev *dev)
149 atomic_dec(&dev->ref_count);
152 static inline void csk_hold(struct cnic_sock *csk)
154 atomic_inc(&csk->ref_count);
157 static inline void csk_put(struct cnic_sock *csk)
159 atomic_dec(&csk->ref_count);
162 static struct cnic_dev *cnic_from_netdev(struct net_device *netdev)
164 struct cnic_dev *cdev;
166 read_lock(&cnic_dev_lock);
167 list_for_each_entry(cdev, &cnic_dev_list, list) {
168 if (netdev == cdev->netdev) {
170 read_unlock(&cnic_dev_lock);
174 read_unlock(&cnic_dev_lock);
178 static inline void ulp_get(struct cnic_ulp_ops *ulp_ops)
180 atomic_inc(&ulp_ops->ref_count);
183 static inline void ulp_put(struct cnic_ulp_ops *ulp_ops)
185 atomic_dec(&ulp_ops->ref_count);
188 static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val)
190 struct cnic_local *cp = dev->cnic_priv;
191 struct cnic_eth_dev *ethdev = cp->ethdev;
192 struct drv_ctl_info info;
193 struct drv_ctl_io *io = &info.data.io;
195 info.cmd = DRV_CTL_CTX_WR_CMD;
196 io->cid_addr = cid_addr;
199 ethdev->drv_ctl(dev->netdev, &info);
202 static void cnic_ctx_tbl_wr(struct cnic_dev *dev, u32 off, dma_addr_t addr)
204 struct cnic_local *cp = dev->cnic_priv;
205 struct cnic_eth_dev *ethdev = cp->ethdev;
206 struct drv_ctl_info info;
207 struct drv_ctl_io *io = &info.data.io;
209 info.cmd = DRV_CTL_CTXTBL_WR_CMD;
212 ethdev->drv_ctl(dev->netdev, &info);
215 static void cnic_ring_ctl(struct cnic_dev *dev, u32 cid, u32 cl_id, int start)
217 struct cnic_local *cp = dev->cnic_priv;
218 struct cnic_eth_dev *ethdev = cp->ethdev;
219 struct drv_ctl_info info;
220 struct drv_ctl_l2_ring *ring = &info.data.ring;
223 info.cmd = DRV_CTL_START_L2_CMD;
225 info.cmd = DRV_CTL_STOP_L2_CMD;
228 ring->client_id = cl_id;
229 ethdev->drv_ctl(dev->netdev, &info);
232 static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val)
234 struct cnic_local *cp = dev->cnic_priv;
235 struct cnic_eth_dev *ethdev = cp->ethdev;
236 struct drv_ctl_info info;
237 struct drv_ctl_io *io = &info.data.io;
239 info.cmd = DRV_CTL_IO_WR_CMD;
242 ethdev->drv_ctl(dev->netdev, &info);
245 static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off)
247 struct cnic_local *cp = dev->cnic_priv;
248 struct cnic_eth_dev *ethdev = cp->ethdev;
249 struct drv_ctl_info info;
250 struct drv_ctl_io *io = &info.data.io;
252 info.cmd = DRV_CTL_IO_RD_CMD;
254 ethdev->drv_ctl(dev->netdev, &info);
258 static void cnic_ulp_ctl(struct cnic_dev *dev, int ulp_type, bool reg)
260 struct cnic_local *cp = dev->cnic_priv;
261 struct cnic_eth_dev *ethdev = cp->ethdev;
262 struct drv_ctl_info info;
263 struct fcoe_capabilities *fcoe_cap =
264 &info.data.register_data.fcoe_features;
267 info.cmd = DRV_CTL_ULP_REGISTER_CMD;
268 if (ulp_type == CNIC_ULP_FCOE && dev->fcoe_cap)
269 memcpy(fcoe_cap, dev->fcoe_cap, sizeof(*fcoe_cap));
271 info.cmd = DRV_CTL_ULP_UNREGISTER_CMD;
274 info.data.ulp_type = ulp_type;
275 ethdev->drv_ctl(dev->netdev, &info);
278 static int cnic_in_use(struct cnic_sock *csk)
280 return test_bit(SK_F_INUSE, &csk->flags);
283 static void cnic_spq_completion(struct cnic_dev *dev, int cmd, u32 count)
285 struct cnic_local *cp = dev->cnic_priv;
286 struct cnic_eth_dev *ethdev = cp->ethdev;
287 struct drv_ctl_info info;
290 info.data.credit.credit_count = count;
291 ethdev->drv_ctl(dev->netdev, &info);
294 static int cnic_get_l5_cid(struct cnic_local *cp, u32 cid, u32 *l5_cid)
301 for (i = 0; i < cp->max_cid_space; i++) {
302 if (cp->ctx_tbl[i].cid == cid) {
310 static int cnic_send_nlmsg(struct cnic_local *cp, u32 type,
311 struct cnic_sock *csk)
313 struct iscsi_path path_req;
316 u32 msg_type = ISCSI_KEVENT_IF_DOWN;
317 struct cnic_ulp_ops *ulp_ops;
318 struct cnic_uio_dev *udev = cp->udev;
319 int rc = 0, retry = 0;
321 if (!udev || udev->uio_dev == -1)
325 len = sizeof(path_req);
326 buf = (char *) &path_req;
327 memset(&path_req, 0, len);
329 msg_type = ISCSI_KEVENT_PATH_REQ;
330 path_req.handle = (u64) csk->l5_cid;
331 if (test_bit(SK_F_IPV6, &csk->flags)) {
332 memcpy(&path_req.dst.v6_addr, &csk->dst_ip[0],
333 sizeof(struct in6_addr));
334 path_req.ip_addr_len = 16;
336 memcpy(&path_req.dst.v4_addr, &csk->dst_ip[0],
337 sizeof(struct in_addr));
338 path_req.ip_addr_len = 4;
340 path_req.vlan_id = csk->vlan_id;
341 path_req.pmtu = csk->mtu;
347 ulp_ops = rcu_dereference(cp->ulp_ops[CNIC_ULP_ISCSI]);
349 rc = ulp_ops->iscsi_nl_send_msg(
350 cp->ulp_handle[CNIC_ULP_ISCSI],
353 if (rc == 0 || msg_type != ISCSI_KEVENT_PATH_REQ)
362 static void cnic_cm_upcall(struct cnic_local *, struct cnic_sock *, u8);
364 static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
370 case ISCSI_UEVENT_PATH_UPDATE: {
371 struct cnic_local *cp;
373 struct cnic_sock *csk;
374 struct iscsi_path *path_resp;
376 if (len < sizeof(*path_resp))
379 path_resp = (struct iscsi_path *) buf;
381 l5_cid = (u32) path_resp->handle;
382 if (l5_cid >= MAX_CM_SK_TBL_SZ)
386 if (!rcu_access_pointer(cp->ulp_ops[CNIC_ULP_L4])) {
391 csk = &cp->csk_tbl[l5_cid];
393 if (cnic_in_use(csk) &&
394 test_bit(SK_F_CONNECT_START, &csk->flags)) {
396 csk->vlan_id = path_resp->vlan_id;
398 memcpy(csk->ha, path_resp->mac_addr, ETH_ALEN);
399 if (test_bit(SK_F_IPV6, &csk->flags))
400 memcpy(&csk->src_ip[0], &path_resp->src.v6_addr,
401 sizeof(struct in6_addr));
403 memcpy(&csk->src_ip[0], &path_resp->src.v4_addr,
404 sizeof(struct in_addr));
406 if (is_valid_ether_addr(csk->ha)) {
408 } else if (!test_bit(SK_F_OFFLD_SCHED, &csk->flags) &&
409 !test_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
411 cnic_cm_upcall(cp, csk,
412 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
413 clear_bit(SK_F_CONNECT_START, &csk->flags);
425 static int cnic_offld_prep(struct cnic_sock *csk)
427 if (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
430 if (!test_bit(SK_F_CONNECT_START, &csk->flags)) {
431 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
438 static int cnic_close_prep(struct cnic_sock *csk)
440 clear_bit(SK_F_CONNECT_START, &csk->flags);
441 smp_mb__after_atomic();
443 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
444 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
452 static int cnic_abort_prep(struct cnic_sock *csk)
454 clear_bit(SK_F_CONNECT_START, &csk->flags);
455 smp_mb__after_atomic();
457 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
460 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
461 csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP;
468 int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
470 struct cnic_dev *dev;
472 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
473 pr_err("%s: Bad type %d\n", __func__, ulp_type);
476 mutex_lock(&cnic_lock);
477 if (cnic_ulp_tbl_prot(ulp_type)) {
478 pr_err("%s: Type %d has already been registered\n",
480 mutex_unlock(&cnic_lock);
484 read_lock(&cnic_dev_lock);
485 list_for_each_entry(dev, &cnic_dev_list, list) {
486 struct cnic_local *cp = dev->cnic_priv;
488 clear_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]);
490 read_unlock(&cnic_dev_lock);
492 atomic_set(&ulp_ops->ref_count, 0);
493 rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops);
494 mutex_unlock(&cnic_lock);
496 /* Prevent race conditions with netdev_event */
498 list_for_each_entry(dev, &cnic_dev_list, list) {
499 struct cnic_local *cp = dev->cnic_priv;
501 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]))
502 ulp_ops->cnic_init(dev);
509 int cnic_unregister_driver(int ulp_type)
511 struct cnic_dev *dev;
512 struct cnic_ulp_ops *ulp_ops;
515 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
516 pr_err("%s: Bad type %d\n", __func__, ulp_type);
519 mutex_lock(&cnic_lock);
520 ulp_ops = cnic_ulp_tbl_prot(ulp_type);
522 pr_err("%s: Type %d has not been registered\n",
526 read_lock(&cnic_dev_lock);
527 list_for_each_entry(dev, &cnic_dev_list, list) {
528 struct cnic_local *cp = dev->cnic_priv;
530 if (rcu_access_pointer(cp->ulp_ops[ulp_type])) {
531 pr_err("%s: Type %d still has devices registered\n",
533 read_unlock(&cnic_dev_lock);
537 read_unlock(&cnic_dev_lock);
539 RCU_INIT_POINTER(cnic_ulp_tbl[ulp_type], NULL);
541 mutex_unlock(&cnic_lock);
543 while ((atomic_read(&ulp_ops->ref_count) != 0) && (i < 20)) {
548 if (atomic_read(&ulp_ops->ref_count) != 0)
549 pr_warn("%s: Failed waiting for ref count to go to zero\n",
554 mutex_unlock(&cnic_lock);
558 static int cnic_start_hw(struct cnic_dev *);
559 static void cnic_stop_hw(struct cnic_dev *);
561 static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
564 struct cnic_local *cp = dev->cnic_priv;
565 struct cnic_ulp_ops *ulp_ops;
567 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
568 pr_err("%s: Bad type %d\n", __func__, ulp_type);
571 mutex_lock(&cnic_lock);
572 if (cnic_ulp_tbl_prot(ulp_type) == NULL) {
573 pr_err("%s: Driver with type %d has not been registered\n",
575 mutex_unlock(&cnic_lock);
578 if (rcu_access_pointer(cp->ulp_ops[ulp_type])) {
579 pr_err("%s: Type %d has already been registered to this device\n",
581 mutex_unlock(&cnic_lock);
585 clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]);
586 cp->ulp_handle[ulp_type] = ulp_ctx;
587 ulp_ops = cnic_ulp_tbl_prot(ulp_type);
588 rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops);
591 if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
592 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[ulp_type]))
593 ulp_ops->cnic_start(cp->ulp_handle[ulp_type]);
595 mutex_unlock(&cnic_lock);
597 cnic_ulp_ctl(dev, ulp_type, true);
602 EXPORT_SYMBOL(cnic_register_driver);
604 static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
606 struct cnic_local *cp = dev->cnic_priv;
609 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
610 pr_err("%s: Bad type %d\n", __func__, ulp_type);
614 if (ulp_type == CNIC_ULP_ISCSI)
615 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
617 mutex_lock(&cnic_lock);
618 if (rcu_dereference(cp->ulp_ops[ulp_type])) {
619 RCU_INIT_POINTER(cp->ulp_ops[ulp_type], NULL);
622 pr_err("%s: device not registered to this ulp type %d\n",
624 mutex_unlock(&cnic_lock);
627 mutex_unlock(&cnic_lock);
629 if (ulp_type == CNIC_ULP_FCOE)
630 dev->fcoe_cap = NULL;
634 while (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]) &&
639 if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]))
640 netdev_warn(dev->netdev, "Failed waiting for ULP up call to complete\n");
642 cnic_ulp_ctl(dev, ulp_type, false);
646 EXPORT_SYMBOL(cnic_unregister_driver);
648 static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id,
651 id_tbl->start = start_id;
654 spin_lock_init(&id_tbl->lock);
655 id_tbl->table = kzalloc(DIV_ROUND_UP(size, 32) * 4, GFP_KERNEL);
662 static void cnic_free_id_tbl(struct cnic_id_tbl *id_tbl)
664 kfree(id_tbl->table);
665 id_tbl->table = NULL;
668 static int cnic_alloc_id(struct cnic_id_tbl *id_tbl, u32 id)
673 if (id >= id_tbl->max)
676 spin_lock(&id_tbl->lock);
677 if (!test_bit(id, id_tbl->table)) {
678 set_bit(id, id_tbl->table);
681 spin_unlock(&id_tbl->lock);
685 /* Returns -1 if not successful */
686 static u32 cnic_alloc_new_id(struct cnic_id_tbl *id_tbl)
690 spin_lock(&id_tbl->lock);
691 id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next);
692 if (id >= id_tbl->max) {
694 if (id_tbl->next != 0) {
695 id = find_first_zero_bit(id_tbl->table, id_tbl->next);
696 if (id >= id_tbl->next)
701 if (id < id_tbl->max) {
702 set_bit(id, id_tbl->table);
703 id_tbl->next = (id + 1) & (id_tbl->max - 1);
707 spin_unlock(&id_tbl->lock);
712 static void cnic_free_id(struct cnic_id_tbl *id_tbl, u32 id)
718 if (id >= id_tbl->max)
721 clear_bit(id, id_tbl->table);
724 static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma)
731 for (i = 0; i < dma->num_pages; i++) {
732 if (dma->pg_arr[i]) {
733 dma_free_coherent(&dev->pcidev->dev, CNIC_PAGE_SIZE,
734 dma->pg_arr[i], dma->pg_map_arr[i]);
735 dma->pg_arr[i] = NULL;
739 dma_free_coherent(&dev->pcidev->dev, dma->pgtbl_size,
740 dma->pgtbl, dma->pgtbl_map);
748 static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma)
751 __le32 *page_table = (__le32 *) dma->pgtbl;
753 for (i = 0; i < dma->num_pages; i++) {
754 /* Each entry needs to be in big endian format. */
755 *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
757 *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
762 static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma)
765 __le32 *page_table = (__le32 *) dma->pgtbl;
767 for (i = 0; i < dma->num_pages; i++) {
768 /* Each entry needs to be in little endian format. */
769 *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
771 *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
776 static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma,
777 int pages, int use_pg_tbl)
780 struct cnic_local *cp = dev->cnic_priv;
782 size = pages * (sizeof(void *) + sizeof(dma_addr_t));
783 dma->pg_arr = kzalloc(size, GFP_ATOMIC);
784 if (dma->pg_arr == NULL)
787 dma->pg_map_arr = (dma_addr_t *) (dma->pg_arr + pages);
788 dma->num_pages = pages;
790 for (i = 0; i < pages; i++) {
791 dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev,
795 if (dma->pg_arr[i] == NULL)
801 dma->pgtbl_size = ((pages * 8) + CNIC_PAGE_SIZE - 1) &
802 ~(CNIC_PAGE_SIZE - 1);
803 dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size,
804 &dma->pgtbl_map, GFP_ATOMIC);
805 if (dma->pgtbl == NULL)
808 cp->setup_pgtbl(dev, dma);
813 cnic_free_dma(dev, dma);
817 static void cnic_free_context(struct cnic_dev *dev)
819 struct cnic_local *cp = dev->cnic_priv;
822 for (i = 0; i < cp->ctx_blks; i++) {
823 if (cp->ctx_arr[i].ctx) {
824 dma_free_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
826 cp->ctx_arr[i].mapping);
827 cp->ctx_arr[i].ctx = NULL;
832 static void __cnic_free_uio_rings(struct cnic_uio_dev *udev)
835 dma_free_coherent(&udev->pdev->dev, udev->l2_buf_size,
836 udev->l2_buf, udev->l2_buf_map);
841 dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size,
842 udev->l2_ring, udev->l2_ring_map);
843 udev->l2_ring = NULL;
848 static void __cnic_free_uio(struct cnic_uio_dev *udev)
850 uio_unregister_device(&udev->cnic_uinfo);
852 __cnic_free_uio_rings(udev);
854 pci_dev_put(udev->pdev);
858 static void cnic_free_uio(struct cnic_uio_dev *udev)
863 write_lock(&cnic_dev_lock);
864 list_del_init(&udev->list);
865 write_unlock(&cnic_dev_lock);
866 __cnic_free_uio(udev);
869 static void cnic_free_resc(struct cnic_dev *dev)
871 struct cnic_local *cp = dev->cnic_priv;
872 struct cnic_uio_dev *udev = cp->udev;
877 if (udev->uio_dev == -1)
878 __cnic_free_uio_rings(udev);
881 cnic_free_context(dev);
886 cnic_free_dma(dev, &cp->gbl_buf_info);
887 cnic_free_dma(dev, &cp->kwq_info);
888 cnic_free_dma(dev, &cp->kwq_16_data_info);
889 cnic_free_dma(dev, &cp->kcq2.dma);
890 cnic_free_dma(dev, &cp->kcq1.dma);
891 kfree(cp->iscsi_tbl);
892 cp->iscsi_tbl = NULL;
896 cnic_free_id_tbl(&cp->fcoe_cid_tbl);
897 cnic_free_id_tbl(&cp->cid_tbl);
900 static int cnic_alloc_context(struct cnic_dev *dev)
902 struct cnic_local *cp = dev->cnic_priv;
904 if (BNX2_CHIP(cp) == BNX2_CHIP_5709) {
907 cp->ctx_blk_size = CNIC_PAGE_SIZE;
908 cp->cids_per_blk = CNIC_PAGE_SIZE / 128;
909 arr_size = BNX2_MAX_CID / cp->cids_per_blk *
910 sizeof(struct cnic_ctx);
911 cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL);
912 if (cp->ctx_arr == NULL)
916 for (i = 0; i < 2; i++) {
917 u32 j, reg, off, lo, hi;
920 off = BNX2_PG_CTX_MAP;
922 off = BNX2_ISCSI_CTX_MAP;
924 reg = cnic_reg_rd_ind(dev, off);
927 for (j = lo; j < hi; j += cp->cids_per_blk, k++)
928 cp->ctx_arr[k].cid = j;
932 if (cp->ctx_blks >= (BNX2_MAX_CID / cp->cids_per_blk)) {
937 for (i = 0; i < cp->ctx_blks; i++) {
939 dma_alloc_coherent(&dev->pcidev->dev,
941 &cp->ctx_arr[i].mapping,
943 if (cp->ctx_arr[i].ctx == NULL)
950 static u16 cnic_bnx2_next_idx(u16 idx)
955 static u16 cnic_bnx2_hw_idx(u16 idx)
960 static u16 cnic_bnx2x_next_idx(u16 idx)
963 if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
969 static u16 cnic_bnx2x_hw_idx(u16 idx)
971 if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
976 static int cnic_alloc_kcq(struct cnic_dev *dev, struct kcq_info *info,
979 int err, i, use_page_tbl = 0;
985 err = cnic_alloc_dma(dev, &info->dma, KCQ_PAGE_CNT, use_page_tbl);
989 kcq = (struct kcqe **) info->dma.pg_arr;
992 info->next_idx = cnic_bnx2_next_idx;
993 info->hw_idx = cnic_bnx2_hw_idx;
997 info->next_idx = cnic_bnx2x_next_idx;
998 info->hw_idx = cnic_bnx2x_hw_idx;
1000 for (i = 0; i < KCQ_PAGE_CNT; i++) {
1001 struct bnx2x_bd_chain_next *next =
1002 (struct bnx2x_bd_chain_next *) &kcq[i][MAX_KCQE_CNT];
1005 if (j >= KCQ_PAGE_CNT)
1007 next->addr_hi = (u64) info->dma.pg_map_arr[j] >> 32;
1008 next->addr_lo = info->dma.pg_map_arr[j] & 0xffffffff;
1013 static int __cnic_alloc_uio_rings(struct cnic_uio_dev *udev, int pages)
1015 struct cnic_local *cp = udev->dev->cnic_priv;
1020 udev->l2_ring_size = pages * CNIC_PAGE_SIZE;
1021 udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size,
1023 GFP_KERNEL | __GFP_COMP);
1027 udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
1028 udev->l2_buf_size = CNIC_PAGE_ALIGN(udev->l2_buf_size);
1029 udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size,
1031 GFP_KERNEL | __GFP_COMP);
1032 if (!udev->l2_buf) {
1033 __cnic_free_uio_rings(udev);
1041 static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
1043 struct cnic_local *cp = dev->cnic_priv;
1044 struct cnic_uio_dev *udev;
1046 list_for_each_entry(udev, &cnic_udev_list, list) {
1047 if (udev->pdev == dev->pcidev) {
1049 if (__cnic_alloc_uio_rings(udev, pages)) {
1058 udev = kzalloc(sizeof(struct cnic_uio_dev), GFP_ATOMIC);
1065 udev->pdev = dev->pcidev;
1067 if (__cnic_alloc_uio_rings(udev, pages))
1070 list_add(&udev->list, &cnic_udev_list);
1072 pci_dev_get(udev->pdev);
1083 static int cnic_init_uio(struct cnic_dev *dev)
1085 struct cnic_local *cp = dev->cnic_priv;
1086 struct cnic_uio_dev *udev = cp->udev;
1087 struct uio_info *uinfo;
1093 uinfo = &udev->cnic_uinfo;
1095 uinfo->mem[0].addr = pci_resource_start(dev->pcidev, 0);
1096 uinfo->mem[0].internal_addr = dev->regview;
1097 uinfo->mem[0].memtype = UIO_MEM_PHYS;
1099 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
1100 uinfo->mem[0].size = MB_GET_CID_ADDR(TX_TSS_CID +
1101 TX_MAX_TSS_RINGS + 1);
1102 uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen &
1104 if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
1105 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9;
1107 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE;
1109 uinfo->name = "bnx2_cnic";
1110 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
1111 uinfo->mem[0].size = pci_resource_len(dev->pcidev, 0);
1113 uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk &
1115 uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk);
1117 uinfo->name = "bnx2x_cnic";
1120 uinfo->mem[1].memtype = UIO_MEM_LOGICAL;
1122 uinfo->mem[2].addr = (unsigned long) udev->l2_ring;
1123 uinfo->mem[2].size = udev->l2_ring_size;
1124 uinfo->mem[2].memtype = UIO_MEM_LOGICAL;
1126 uinfo->mem[3].addr = (unsigned long) udev->l2_buf;
1127 uinfo->mem[3].size = udev->l2_buf_size;
1128 uinfo->mem[3].memtype = UIO_MEM_LOGICAL;
1130 uinfo->version = CNIC_MODULE_VERSION;
1131 uinfo->irq = UIO_IRQ_CUSTOM;
1133 uinfo->open = cnic_uio_open;
1134 uinfo->release = cnic_uio_close;
1136 if (udev->uio_dev == -1) {
1140 ret = uio_register_device(&udev->pdev->dev, uinfo);
1143 cnic_init_rings(dev);
1149 static int cnic_alloc_bnx2_resc(struct cnic_dev *dev)
1151 struct cnic_local *cp = dev->cnic_priv;
1154 ret = cnic_alloc_dma(dev, &cp->kwq_info, KWQ_PAGE_CNT, 1);
1157 cp->kwq = (struct kwqe **) cp->kwq_info.pg_arr;
1159 ret = cnic_alloc_kcq(dev, &cp->kcq1, true);
1163 ret = cnic_alloc_context(dev);
1167 ret = cnic_alloc_uio_rings(dev, 2);
1171 ret = cnic_init_uio(dev);
1178 cnic_free_resc(dev);
1182 static int cnic_alloc_bnx2x_context(struct cnic_dev *dev)
1184 struct cnic_local *cp = dev->cnic_priv;
1185 struct bnx2x *bp = netdev_priv(dev->netdev);
1186 int ctx_blk_size = cp->ethdev->ctx_blk_size;
1187 int total_mem, blks, i;
1189 total_mem = BNX2X_CONTEXT_MEM_SIZE * cp->max_cid_space;
1190 blks = total_mem / ctx_blk_size;
1191 if (total_mem % ctx_blk_size)
1194 if (blks > cp->ethdev->ctx_tbl_len)
1197 cp->ctx_arr = kcalloc(blks, sizeof(struct cnic_ctx), GFP_KERNEL);
1198 if (cp->ctx_arr == NULL)
1201 cp->ctx_blks = blks;
1202 cp->ctx_blk_size = ctx_blk_size;
1203 if (!CHIP_IS_E1(bp))
1206 cp->ctx_align = ctx_blk_size;
1208 cp->cids_per_blk = ctx_blk_size / BNX2X_CONTEXT_MEM_SIZE;
1210 for (i = 0; i < blks; i++) {
1211 cp->ctx_arr[i].ctx =
1212 dma_alloc_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
1213 &cp->ctx_arr[i].mapping,
1215 if (cp->ctx_arr[i].ctx == NULL)
1218 if (cp->ctx_align && cp->ctx_blk_size == ctx_blk_size) {
1219 if (cp->ctx_arr[i].mapping & (cp->ctx_align - 1)) {
1220 cnic_free_context(dev);
1221 cp->ctx_blk_size += cp->ctx_align;
1230 static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
1232 struct cnic_local *cp = dev->cnic_priv;
1233 struct bnx2x *bp = netdev_priv(dev->netdev);
1234 struct cnic_eth_dev *ethdev = cp->ethdev;
1235 u32 start_cid = ethdev->starting_cid;
1236 int i, j, n, ret, pages;
1237 struct cnic_dma *kwq_16_dma = &cp->kwq_16_data_info;
1239 cp->max_cid_space = MAX_ISCSI_TBL_SZ;
1240 cp->iscsi_start_cid = start_cid;
1241 cp->fcoe_start_cid = start_cid + MAX_ISCSI_TBL_SZ;
1243 if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
1244 cp->max_cid_space += dev->max_fcoe_conn;
1245 cp->fcoe_init_cid = ethdev->fcoe_init_cid;
1246 if (!cp->fcoe_init_cid)
1247 cp->fcoe_init_cid = 0x10;
1250 cp->iscsi_tbl = kzalloc(sizeof(struct cnic_iscsi) * MAX_ISCSI_TBL_SZ,
1255 cp->ctx_tbl = kzalloc(sizeof(struct cnic_context) *
1256 cp->max_cid_space, GFP_KERNEL);
1260 for (i = 0; i < MAX_ISCSI_TBL_SZ; i++) {
1261 cp->ctx_tbl[i].proto.iscsi = &cp->iscsi_tbl[i];
1262 cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_ISCSI;
1265 for (i = MAX_ISCSI_TBL_SZ; i < cp->max_cid_space; i++)
1266 cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_FCOE;
1268 pages = CNIC_PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) /
1271 ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0);
1275 n = CNIC_PAGE_SIZE / CNIC_KWQ16_DATA_SIZE;
1276 for (i = 0, j = 0; i < cp->max_cid_space; i++) {
1277 long off = CNIC_KWQ16_DATA_SIZE * (i % n);
1279 cp->ctx_tbl[i].kwqe_data = kwq_16_dma->pg_arr[j] + off;
1280 cp->ctx_tbl[i].kwqe_data_mapping = kwq_16_dma->pg_map_arr[j] +
1283 if ((i % n) == (n - 1))
1287 ret = cnic_alloc_kcq(dev, &cp->kcq1, false);
1291 if (CNIC_SUPPORTS_FCOE(bp)) {
1292 ret = cnic_alloc_kcq(dev, &cp->kcq2, true);
1297 pages = CNIC_PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / CNIC_PAGE_SIZE;
1298 ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0);
1302 ret = cnic_alloc_bnx2x_context(dev);
1306 if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)
1309 cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk;
1311 cp->l2_rx_ring_size = 15;
1313 ret = cnic_alloc_uio_rings(dev, 4);
1317 ret = cnic_init_uio(dev);
1324 cnic_free_resc(dev);
1328 static inline u32 cnic_kwq_avail(struct cnic_local *cp)
1330 return cp->max_kwq_idx -
1331 ((cp->kwq_prod_idx - cp->kwq_con_idx) & cp->max_kwq_idx);
1334 static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
1337 struct cnic_local *cp = dev->cnic_priv;
1338 struct kwqe *prod_qe;
1339 u16 prod, sw_prod, i;
1341 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
1342 return -EAGAIN; /* bnx2 is down */
1344 spin_lock_bh(&cp->cnic_ulp_lock);
1345 if (num_wqes > cnic_kwq_avail(cp) &&
1346 !test_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags)) {
1347 spin_unlock_bh(&cp->cnic_ulp_lock);
1351 clear_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
1353 prod = cp->kwq_prod_idx;
1354 sw_prod = prod & MAX_KWQ_IDX;
1355 for (i = 0; i < num_wqes; i++) {
1356 prod_qe = &cp->kwq[KWQ_PG(sw_prod)][KWQ_IDX(sw_prod)];
1357 memcpy(prod_qe, wqes[i], sizeof(struct kwqe));
1359 sw_prod = prod & MAX_KWQ_IDX;
1361 cp->kwq_prod_idx = prod;
1363 CNIC_WR16(dev, cp->kwq_io_addr, cp->kwq_prod_idx);
1365 spin_unlock_bh(&cp->cnic_ulp_lock);
1369 static void *cnic_get_kwqe_16_data(struct cnic_local *cp, u32 l5_cid,
1370 union l5cm_specific_data *l5_data)
1372 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1375 map = ctx->kwqe_data_mapping;
1376 l5_data->phy_address.lo = (u64) map & 0xffffffff;
1377 l5_data->phy_address.hi = (u64) map >> 32;
1378 return ctx->kwqe_data;
1381 static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid,
1382 u32 type, union l5cm_specific_data *l5_data)
1384 struct cnic_local *cp = dev->cnic_priv;
1385 struct bnx2x *bp = netdev_priv(dev->netdev);
1386 struct l5cm_spe kwqe;
1387 struct kwqe_16 *kwq[1];
1391 kwqe.hdr.conn_and_cmd_data =
1392 cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) |
1393 BNX2X_HW_CID(bp, cid)));
1395 type_16 = (type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE;
1396 type_16 |= (bp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) &
1397 SPE_HDR_FUNCTION_ID;
1399 kwqe.hdr.type = cpu_to_le16(type_16);
1400 kwqe.hdr.reserved1 = 0;
1401 kwqe.data.phy_address.lo = cpu_to_le32(l5_data->phy_address.lo);
1402 kwqe.data.phy_address.hi = cpu_to_le32(l5_data->phy_address.hi);
1404 kwq[0] = (struct kwqe_16 *) &kwqe;
1406 spin_lock_bh(&cp->cnic_ulp_lock);
1407 ret = cp->ethdev->drv_submit_kwqes_16(dev->netdev, kwq, 1);
1408 spin_unlock_bh(&cp->cnic_ulp_lock);
1416 static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type,
1417 struct kcqe *cqes[], u32 num_cqes)
1419 struct cnic_local *cp = dev->cnic_priv;
1420 struct cnic_ulp_ops *ulp_ops;
1423 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
1424 if (likely(ulp_ops)) {
1425 ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
1431 static void cnic_bnx2x_set_tcp_options(struct cnic_dev *dev, int time_stamps,
1434 struct bnx2x *bp = netdev_priv(dev->netdev);
1435 u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN;
1436 u16 tstorm_flags = 0;
1439 xstorm_flags |= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
1440 tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
1443 tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_DELAYED_ACK_EN;
1445 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1446 XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(bp->pfid), xstorm_flags);
1448 CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
1449 TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(bp->pfid), tstorm_flags);
1452 static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
1454 struct cnic_local *cp = dev->cnic_priv;
1455 struct bnx2x *bp = netdev_priv(dev->netdev);
1456 struct iscsi_kwqe_init1 *req1 = (struct iscsi_kwqe_init1 *) kwqe;
1458 u32 pfid = bp->pfid;
1460 cp->num_iscsi_tasks = req1->num_tasks_per_conn;
1461 cp->num_ccells = req1->num_ccells_per_conn;
1462 cp->task_array_size = BNX2X_ISCSI_TASK_CONTEXT_SIZE *
1463 cp->num_iscsi_tasks;
1464 cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS *
1465 BNX2X_ISCSI_R2TQE_SIZE;
1466 cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE;
1467 pages = CNIC_PAGE_ALIGN(cp->hq_size) / CNIC_PAGE_SIZE;
1468 hq_bds = pages * (CNIC_PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE);
1469 cp->num_cqs = req1->num_cqs;
1471 if (!dev->max_iscsi_conn)
1474 /* init Tstorm RAM */
1475 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
1477 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1479 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1480 TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
1481 CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
1482 TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1483 req1->num_tasks_per_conn);
1485 /* init Ustorm RAM */
1486 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1487 USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfid),
1488 req1->rq_buffer_size);
1489 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1491 CNIC_WR8(dev, BAR_USTRORM_INTMEM +
1492 USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
1493 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1494 USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1495 req1->num_tasks_per_conn);
1496 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
1498 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
1500 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
1501 cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
1503 /* init Xstorm RAM */
1504 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1506 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1507 XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
1508 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
1509 XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1510 req1->num_tasks_per_conn);
1511 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
1513 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_SQ_SIZE_OFFSET(pfid),
1514 req1->num_tasks_per_conn);
1515 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
1516 cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
1518 /* init Cstorm RAM */
1519 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1521 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
1522 CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
1523 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
1524 CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1525 req1->num_tasks_per_conn);
1526 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
1528 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
1531 cnic_bnx2x_set_tcp_options(dev,
1532 req1->flags & ISCSI_KWQE_INIT1_TIME_STAMPS_ENABLE,
1533 req1->flags & ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE);
1538 static int cnic_bnx2x_iscsi_init2(struct cnic_dev *dev, struct kwqe *kwqe)
1540 struct iscsi_kwqe_init2 *req2 = (struct iscsi_kwqe_init2 *) kwqe;
1541 struct bnx2x *bp = netdev_priv(dev->netdev);
1542 u32 pfid = bp->pfid;
1543 struct iscsi_kcqe kcqe;
1544 struct kcqe *cqes[1];
1546 memset(&kcqe, 0, sizeof(kcqe));
1547 if (!dev->max_iscsi_conn) {
1548 kcqe.completion_status =
1549 ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED;
1553 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
1554 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]);
1555 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
1556 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4,
1557 req2->error_bit_map[1]);
1559 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1560 USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn);
1561 CNIC_WR(dev, BAR_USTRORM_INTMEM +
1562 USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]);
1563 CNIC_WR(dev, BAR_USTRORM_INTMEM +
1564 USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4,
1565 req2->error_bit_map[1]);
1567 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
1568 CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn);
1570 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1573 kcqe.op_code = ISCSI_KCQE_OPCODE_INIT;
1574 cqes[0] = (struct kcqe *) &kcqe;
1575 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1580 static void cnic_free_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
1582 struct cnic_local *cp = dev->cnic_priv;
1583 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1585 if (ctx->ulp_proto_id == CNIC_ULP_ISCSI) {
1586 struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1588 cnic_free_dma(dev, &iscsi->hq_info);
1589 cnic_free_dma(dev, &iscsi->r2tq_info);
1590 cnic_free_dma(dev, &iscsi->task_array_info);
1591 cnic_free_id(&cp->cid_tbl, ctx->cid);
1593 cnic_free_id(&cp->fcoe_cid_tbl, ctx->cid);
1599 static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
1603 struct cnic_local *cp = dev->cnic_priv;
1604 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1605 struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1607 if (ctx->ulp_proto_id == CNIC_ULP_FCOE) {
1608 cid = cnic_alloc_new_id(&cp->fcoe_cid_tbl);
1617 cid = cnic_alloc_new_id(&cp->cid_tbl);
1624 pages = CNIC_PAGE_ALIGN(cp->task_array_size) / CNIC_PAGE_SIZE;
1626 ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1);
1630 pages = CNIC_PAGE_ALIGN(cp->r2tq_size) / CNIC_PAGE_SIZE;
1631 ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1);
1635 pages = CNIC_PAGE_ALIGN(cp->hq_size) / CNIC_PAGE_SIZE;
1636 ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1);
1643 cnic_free_bnx2x_conn_resc(dev, l5_cid);
1647 static void *cnic_get_bnx2x_ctx(struct cnic_dev *dev, u32 cid, int init,
1648 struct regpair *ctx_addr)
1650 struct cnic_local *cp = dev->cnic_priv;
1651 struct cnic_eth_dev *ethdev = cp->ethdev;
1652 int blk = (cid - ethdev->starting_cid) / cp->cids_per_blk;
1653 int off = (cid - ethdev->starting_cid) % cp->cids_per_blk;
1654 unsigned long align_off = 0;
1658 if (cp->ctx_align) {
1659 unsigned long mask = cp->ctx_align - 1;
1661 if (cp->ctx_arr[blk].mapping & mask)
1662 align_off = cp->ctx_align -
1663 (cp->ctx_arr[blk].mapping & mask);
1665 ctx_map = cp->ctx_arr[blk].mapping + align_off +
1666 (off * BNX2X_CONTEXT_MEM_SIZE);
1667 ctx = cp->ctx_arr[blk].ctx + align_off +
1668 (off * BNX2X_CONTEXT_MEM_SIZE);
1670 memset(ctx, 0, BNX2X_CONTEXT_MEM_SIZE);
1672 ctx_addr->lo = ctx_map & 0xffffffff;
1673 ctx_addr->hi = (u64) ctx_map >> 32;
1677 static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
1680 struct cnic_local *cp = dev->cnic_priv;
1681 struct bnx2x *bp = netdev_priv(dev->netdev);
1682 struct iscsi_kwqe_conn_offload1 *req1 =
1683 (struct iscsi_kwqe_conn_offload1 *) wqes[0];
1684 struct iscsi_kwqe_conn_offload2 *req2 =
1685 (struct iscsi_kwqe_conn_offload2 *) wqes[1];
1686 struct iscsi_kwqe_conn_offload3 *req3;
1687 struct cnic_context *ctx = &cp->ctx_tbl[req1->iscsi_conn_id];
1688 struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1690 u32 hw_cid = BNX2X_HW_CID(bp, cid);
1691 struct iscsi_context *ictx;
1692 struct regpair context_addr;
1693 int i, j, n = 2, n_max;
1694 u8 port = BP_PORT(bp);
1697 if (!req2->num_additional_wqes)
1700 n_max = req2->num_additional_wqes + 2;
1702 ictx = cnic_get_bnx2x_ctx(dev, cid, 1, &context_addr);
1706 req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
1708 ictx->xstorm_ag_context.hq_prod = 1;
1710 ictx->xstorm_st_context.iscsi.first_burst_length =
1711 ISCSI_DEF_FIRST_BURST_LEN;
1712 ictx->xstorm_st_context.iscsi.max_send_pdu_length =
1713 ISCSI_DEF_MAX_RECV_SEG_LEN;
1714 ictx->xstorm_st_context.iscsi.sq_pbl_base.lo =
1715 req1->sq_page_table_addr_lo;
1716 ictx->xstorm_st_context.iscsi.sq_pbl_base.hi =
1717 req1->sq_page_table_addr_hi;
1718 ictx->xstorm_st_context.iscsi.sq_curr_pbe.lo = req2->sq_first_pte.hi;
1719 ictx->xstorm_st_context.iscsi.sq_curr_pbe.hi = req2->sq_first_pte.lo;
1720 ictx->xstorm_st_context.iscsi.hq_pbl_base.lo =
1721 iscsi->hq_info.pgtbl_map & 0xffffffff;
1722 ictx->xstorm_st_context.iscsi.hq_pbl_base.hi =
1723 (u64) iscsi->hq_info.pgtbl_map >> 32;
1724 ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.lo =
1725 iscsi->hq_info.pgtbl[0];
1726 ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.hi =
1727 iscsi->hq_info.pgtbl[1];
1728 ictx->xstorm_st_context.iscsi.r2tq_pbl_base.lo =
1729 iscsi->r2tq_info.pgtbl_map & 0xffffffff;
1730 ictx->xstorm_st_context.iscsi.r2tq_pbl_base.hi =
1731 (u64) iscsi->r2tq_info.pgtbl_map >> 32;
1732 ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.lo =
1733 iscsi->r2tq_info.pgtbl[0];
1734 ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.hi =
1735 iscsi->r2tq_info.pgtbl[1];
1736 ictx->xstorm_st_context.iscsi.task_pbl_base.lo =
1737 iscsi->task_array_info.pgtbl_map & 0xffffffff;
1738 ictx->xstorm_st_context.iscsi.task_pbl_base.hi =
1739 (u64) iscsi->task_array_info.pgtbl_map >> 32;
1740 ictx->xstorm_st_context.iscsi.task_pbl_cache_idx =
1741 BNX2X_ISCSI_PBL_NOT_CACHED;
1742 ictx->xstorm_st_context.iscsi.flags.flags |=
1743 XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA;
1744 ictx->xstorm_st_context.iscsi.flags.flags |=
1745 XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T;
1746 ictx->xstorm_st_context.common.ethernet.reserved_vlan_type =
1748 if (BNX2X_CHIP_IS_E2_PLUS(bp) &&
1749 bp->common.chip_port_mode == CHIP_2_PORT_MODE) {
1753 ictx->xstorm_st_context.common.flags =
1754 1 << XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED_SHIFT;
1755 ictx->xstorm_st_context.common.flags =
1756 port << XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT_SHIFT;
1758 ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE;
1759 /* TSTORM requires the base address of RQ DB & not PTE */
1760 ictx->tstorm_st_context.iscsi.rq_db_phy_addr.lo =
1761 req2->rq_page_table_addr_lo & CNIC_PAGE_MASK;
1762 ictx->tstorm_st_context.iscsi.rq_db_phy_addr.hi =
1763 req2->rq_page_table_addr_hi;
1764 ictx->tstorm_st_context.iscsi.iscsi_conn_id = req1->iscsi_conn_id;
1765 ictx->tstorm_st_context.tcp.cwnd = 0x5A8;
1766 ictx->tstorm_st_context.tcp.flags2 |=
1767 TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN;
1768 ictx->tstorm_st_context.tcp.ooo_support_mode =
1769 TCP_TSTORM_OOO_DROP_AND_PROC_ACK;
1771 ictx->timers_context.flags |= TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG;
1773 ictx->ustorm_st_context.ring.rq.pbl_base.lo =
1774 req2->rq_page_table_addr_lo;
1775 ictx->ustorm_st_context.ring.rq.pbl_base.hi =
1776 req2->rq_page_table_addr_hi;
1777 ictx->ustorm_st_context.ring.rq.curr_pbe.lo = req3->qp_first_pte[0].hi;
1778 ictx->ustorm_st_context.ring.rq.curr_pbe.hi = req3->qp_first_pte[0].lo;
1779 ictx->ustorm_st_context.ring.r2tq.pbl_base.lo =
1780 iscsi->r2tq_info.pgtbl_map & 0xffffffff;
1781 ictx->ustorm_st_context.ring.r2tq.pbl_base.hi =
1782 (u64) iscsi->r2tq_info.pgtbl_map >> 32;
1783 ictx->ustorm_st_context.ring.r2tq.curr_pbe.lo =
1784 iscsi->r2tq_info.pgtbl[0];
1785 ictx->ustorm_st_context.ring.r2tq.curr_pbe.hi =
1786 iscsi->r2tq_info.pgtbl[1];
1787 ictx->ustorm_st_context.ring.cq_pbl_base.lo =
1788 req1->cq_page_table_addr_lo;
1789 ictx->ustorm_st_context.ring.cq_pbl_base.hi =
1790 req1->cq_page_table_addr_hi;
1791 ictx->ustorm_st_context.ring.cq[0].cq_sn = ISCSI_INITIAL_SN;
1792 ictx->ustorm_st_context.ring.cq[0].curr_pbe.lo = req2->cq_first_pte.hi;
1793 ictx->ustorm_st_context.ring.cq[0].curr_pbe.hi = req2->cq_first_pte.lo;
1794 ictx->ustorm_st_context.task_pbe_cache_index =
1795 BNX2X_ISCSI_PBL_NOT_CACHED;
1796 ictx->ustorm_st_context.task_pdu_cache_index =
1797 BNX2X_ISCSI_PDU_HEADER_NOT_CACHED;
1799 for (i = 1, j = 1; i < cp->num_cqs; i++, j++) {
1803 req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
1806 ictx->ustorm_st_context.ring.cq[i].cq_sn = ISCSI_INITIAL_SN;
1807 ictx->ustorm_st_context.ring.cq[i].curr_pbe.lo =
1808 req3->qp_first_pte[j].hi;
1809 ictx->ustorm_st_context.ring.cq[i].curr_pbe.hi =
1810 req3->qp_first_pte[j].lo;
1813 ictx->ustorm_st_context.task_pbl_base.lo =
1814 iscsi->task_array_info.pgtbl_map & 0xffffffff;
1815 ictx->ustorm_st_context.task_pbl_base.hi =
1816 (u64) iscsi->task_array_info.pgtbl_map >> 32;
1817 ictx->ustorm_st_context.tce_phy_addr.lo =
1818 iscsi->task_array_info.pgtbl[0];
1819 ictx->ustorm_st_context.tce_phy_addr.hi =
1820 iscsi->task_array_info.pgtbl[1];
1821 ictx->ustorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
1822 ictx->ustorm_st_context.num_cqs = cp->num_cqs;
1823 ictx->ustorm_st_context.negotiated_rx |= ISCSI_DEF_MAX_RECV_SEG_LEN;
1824 ictx->ustorm_st_context.negotiated_rx_and_flags |=
1825 ISCSI_DEF_MAX_BURST_LEN;
1826 ictx->ustorm_st_context.negotiated_rx |=
1827 ISCSI_DEFAULT_MAX_OUTSTANDING_R2T <<
1828 USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS_SHIFT;
1830 ictx->cstorm_st_context.hq_pbl_base.lo =
1831 iscsi->hq_info.pgtbl_map & 0xffffffff;
1832 ictx->cstorm_st_context.hq_pbl_base.hi =
1833 (u64) iscsi->hq_info.pgtbl_map >> 32;
1834 ictx->cstorm_st_context.hq_curr_pbe.lo = iscsi->hq_info.pgtbl[0];
1835 ictx->cstorm_st_context.hq_curr_pbe.hi = iscsi->hq_info.pgtbl[1];
1836 ictx->cstorm_st_context.task_pbl_base.lo =
1837 iscsi->task_array_info.pgtbl_map & 0xffffffff;
1838 ictx->cstorm_st_context.task_pbl_base.hi =
1839 (u64) iscsi->task_array_info.pgtbl_map >> 32;
1840 /* CSTORM and USTORM initialization is different, CSTORM requires
1841 * CQ DB base & not PTE addr */
1842 ictx->cstorm_st_context.cq_db_base.lo =
1843 req1->cq_page_table_addr_lo & CNIC_PAGE_MASK;
1844 ictx->cstorm_st_context.cq_db_base.hi = req1->cq_page_table_addr_hi;
1845 ictx->cstorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
1846 ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1;
1847 for (i = 0; i < cp->num_cqs; i++) {
1848 ictx->cstorm_st_context.cq_c_prod_sqn_arr.sqn[i] =
1850 ictx->cstorm_st_context.cq_c_sqn_2_notify_arr.sqn[i] =
1854 ictx->xstorm_ag_context.cdu_reserved =
1855 CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
1856 ISCSI_CONNECTION_TYPE);
1857 ictx->ustorm_ag_context.cdu_usage =
1858 CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG,
1859 ISCSI_CONNECTION_TYPE);
1864 static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
1867 struct iscsi_kwqe_conn_offload1 *req1;
1868 struct iscsi_kwqe_conn_offload2 *req2;
1869 struct cnic_local *cp = dev->cnic_priv;
1870 struct bnx2x *bp = netdev_priv(dev->netdev);
1871 struct cnic_context *ctx;
1872 struct iscsi_kcqe kcqe;
1873 struct kcqe *cqes[1];
1882 req1 = (struct iscsi_kwqe_conn_offload1 *) wqes[0];
1883 req2 = (struct iscsi_kwqe_conn_offload2 *) wqes[1];
1884 if ((num - 2) < req2->num_additional_wqes) {
1888 *work = 2 + req2->num_additional_wqes;
1890 l5_cid = req1->iscsi_conn_id;
1891 if (l5_cid >= MAX_ISCSI_TBL_SZ)
1894 memset(&kcqe, 0, sizeof(kcqe));
1895 kcqe.op_code = ISCSI_KCQE_OPCODE_OFFLOAD_CONN;
1896 kcqe.iscsi_conn_id = l5_cid;
1897 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE;
1899 ctx = &cp->ctx_tbl[l5_cid];
1900 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) {
1901 kcqe.completion_status =
1902 ISCSI_KCQE_COMPLETION_STATUS_CID_BUSY;
1906 if (atomic_inc_return(&cp->iscsi_conn) > dev->max_iscsi_conn) {
1907 atomic_dec(&cp->iscsi_conn);
1910 ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
1912 atomic_dec(&cp->iscsi_conn);
1916 ret = cnic_setup_bnx2x_ctx(dev, wqes, num);
1918 cnic_free_bnx2x_conn_resc(dev, l5_cid);
1919 atomic_dec(&cp->iscsi_conn);
1923 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1924 kcqe.iscsi_conn_context_id = BNX2X_HW_CID(bp, cp->ctx_tbl[l5_cid].cid);
1927 cqes[0] = (struct kcqe *) &kcqe;
1928 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1933 static int cnic_bnx2x_iscsi_update(struct cnic_dev *dev, struct kwqe *kwqe)
1935 struct cnic_local *cp = dev->cnic_priv;
1936 struct iscsi_kwqe_conn_update *req =
1937 (struct iscsi_kwqe_conn_update *) kwqe;
1939 union l5cm_specific_data l5_data;
1940 u32 l5_cid, cid = BNX2X_SW_CID(req->context_id);
1943 if (cnic_get_l5_cid(cp, cid, &l5_cid) != 0)
1946 data = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
1950 memcpy(data, kwqe, sizeof(struct kwqe));
1952 ret = cnic_submit_kwqe_16(dev, ISCSI_RAMROD_CMD_ID_UPDATE_CONN,
1953 req->context_id, ISCSI_CONNECTION_TYPE, &l5_data);
1957 static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid)
1959 struct cnic_local *cp = dev->cnic_priv;
1960 struct bnx2x *bp = netdev_priv(dev->netdev);
1961 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1962 union l5cm_specific_data l5_data;
1966 init_waitqueue_head(&ctx->waitq);
1968 memset(&l5_data, 0, sizeof(l5_data));
1969 hw_cid = BNX2X_HW_CID(bp, ctx->cid);
1971 ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
1972 hw_cid, NONE_CONNECTION_TYPE, &l5_data);
1975 wait_event_timeout(ctx->waitq, ctx->wait_cond, CNIC_RAMROD_TMO);
1976 if (unlikely(test_bit(CTX_FL_CID_ERROR, &ctx->ctx_flags)))
1983 static int cnic_bnx2x_iscsi_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
1985 struct cnic_local *cp = dev->cnic_priv;
1986 struct iscsi_kwqe_conn_destroy *req =
1987 (struct iscsi_kwqe_conn_destroy *) kwqe;
1988 u32 l5_cid = req->reserved0;
1989 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1991 struct iscsi_kcqe kcqe;
1992 struct kcqe *cqes[1];
1994 if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
1995 goto skip_cfc_delete;
1997 if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) {
1998 unsigned long delta = ctx->timestamp + (2 * HZ) - jiffies;
2000 if (delta > (2 * HZ))
2003 set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags);
2004 queue_delayed_work(cnic_wq, &cp->delete_task, delta);
2008 ret = cnic_bnx2x_destroy_ramrod(dev, l5_cid);
2011 cnic_free_bnx2x_conn_resc(dev, l5_cid);
2014 atomic_dec(&cp->iscsi_conn);
2015 clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
2019 memset(&kcqe, 0, sizeof(kcqe));
2020 kcqe.op_code = ISCSI_KCQE_OPCODE_DESTROY_CONN;
2021 kcqe.iscsi_conn_id = l5_cid;
2022 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
2023 kcqe.iscsi_conn_context_id = req->context_id;
2025 cqes[0] = (struct kcqe *) &kcqe;
2026 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
2031 static void cnic_init_storm_conn_bufs(struct cnic_dev *dev,
2032 struct l4_kwq_connect_req1 *kwqe1,
2033 struct l4_kwq_connect_req3 *kwqe3,
2034 struct l5cm_active_conn_buffer *conn_buf)
2036 struct l5cm_conn_addr_params *conn_addr = &conn_buf->conn_addr_buf;
2037 struct l5cm_xstorm_conn_buffer *xstorm_buf =
2038 &conn_buf->xstorm_conn_buffer;
2039 struct l5cm_tstorm_conn_buffer *tstorm_buf =
2040 &conn_buf->tstorm_conn_buffer;
2041 struct regpair context_addr;
2042 u32 cid = BNX2X_SW_CID(kwqe1->cid);
2043 struct in6_addr src_ip, dst_ip;
2047 addrp = (u32 *) &conn_addr->local_ip_addr;
2048 for (i = 0; i < 4; i++, addrp++)
2049 src_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
2051 addrp = (u32 *) &conn_addr->remote_ip_addr;
2052 for (i = 0; i < 4; i++, addrp++)
2053 dst_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
2055 cnic_get_bnx2x_ctx(dev, cid, 0, &context_addr);
2057 xstorm_buf->context_addr.hi = context_addr.hi;
2058 xstorm_buf->context_addr.lo = context_addr.lo;
2059 xstorm_buf->mss = 0xffff;
2060 xstorm_buf->rcv_buf = kwqe3->rcv_buf;
2061 if (kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE)
2062 xstorm_buf->params |= L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE;
2063 xstorm_buf->pseudo_header_checksum =
2064 swab16(~csum_ipv6_magic(&src_ip, &dst_ip, 0, IPPROTO_TCP, 0));
2066 if (kwqe3->ka_timeout) {
2067 tstorm_buf->ka_enable = 1;
2068 tstorm_buf->ka_timeout = kwqe3->ka_timeout;
2069 tstorm_buf->ka_interval = kwqe3->ka_interval;
2070 tstorm_buf->ka_max_probe_count = kwqe3->ka_max_probe_count;
2072 tstorm_buf->max_rt_time = 0xffffffff;
2075 static void cnic_init_bnx2x_mac(struct cnic_dev *dev)
2077 struct bnx2x *bp = netdev_priv(dev->netdev);
2078 u32 pfid = bp->pfid;
2079 u8 *mac = dev->mac_addr;
2081 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2082 XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfid), mac[0]);
2083 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2084 XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfid), mac[1]);
2085 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2086 XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfid), mac[2]);
2087 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2088 XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfid), mac[3]);
2089 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2090 XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfid), mac[4]);
2091 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2092 XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfid), mac[5]);
2094 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2095 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[5]);
2096 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2097 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
2099 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2100 TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid), mac[3]);
2101 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2102 TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
2104 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2105 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[1]);
2106 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2107 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
2111 static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[],
2114 struct cnic_local *cp = dev->cnic_priv;
2115 struct bnx2x *bp = netdev_priv(dev->netdev);
2116 struct l4_kwq_connect_req1 *kwqe1 =
2117 (struct l4_kwq_connect_req1 *) wqes[0];
2118 struct l4_kwq_connect_req3 *kwqe3;
2119 struct l5cm_active_conn_buffer *conn_buf;
2120 struct l5cm_conn_addr_params *conn_addr;
2121 union l5cm_specific_data l5_data;
2122 u32 l5_cid = kwqe1->pg_cid;
2123 struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
2124 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
2132 if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6)
2142 if (sizeof(*conn_buf) > CNIC_KWQ16_DATA_SIZE) {
2143 netdev_err(dev->netdev, "conn_buf size too big\n");
2146 conn_buf = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2150 memset(conn_buf, 0, sizeof(*conn_buf));
2152 conn_addr = &conn_buf->conn_addr_buf;
2153 conn_addr->remote_addr_0 = csk->ha[0];
2154 conn_addr->remote_addr_1 = csk->ha[1];
2155 conn_addr->remote_addr_2 = csk->ha[2];
2156 conn_addr->remote_addr_3 = csk->ha[3];
2157 conn_addr->remote_addr_4 = csk->ha[4];
2158 conn_addr->remote_addr_5 = csk->ha[5];
2160 if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6) {
2161 struct l4_kwq_connect_req2 *kwqe2 =
2162 (struct l4_kwq_connect_req2 *) wqes[1];
2164 conn_addr->local_ip_addr.ip_addr_hi_hi = kwqe2->src_ip_v6_4;
2165 conn_addr->local_ip_addr.ip_addr_hi_lo = kwqe2->src_ip_v6_3;
2166 conn_addr->local_ip_addr.ip_addr_lo_hi = kwqe2->src_ip_v6_2;
2168 conn_addr->remote_ip_addr.ip_addr_hi_hi = kwqe2->dst_ip_v6_4;
2169 conn_addr->remote_ip_addr.ip_addr_hi_lo = kwqe2->dst_ip_v6_3;
2170 conn_addr->remote_ip_addr.ip_addr_lo_hi = kwqe2->dst_ip_v6_2;
2171 conn_addr->params |= L5CM_CONN_ADDR_PARAMS_IP_VERSION;
2173 kwqe3 = (struct l4_kwq_connect_req3 *) wqes[*work - 1];
2175 conn_addr->local_ip_addr.ip_addr_lo_lo = kwqe1->src_ip;
2176 conn_addr->remote_ip_addr.ip_addr_lo_lo = kwqe1->dst_ip;
2177 conn_addr->local_tcp_port = kwqe1->src_port;
2178 conn_addr->remote_tcp_port = kwqe1->dst_port;
2180 conn_addr->pmtu = kwqe3->pmtu;
2181 cnic_init_storm_conn_bufs(dev, kwqe1, kwqe3, conn_buf);
2183 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
2184 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(bp->pfid), csk->vlan_id);
2186 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_TCP_CONNECT,
2187 kwqe1->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2189 set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
2194 static int cnic_bnx2x_close(struct cnic_dev *dev, struct kwqe *kwqe)
2196 struct l4_kwq_close_req *req = (struct l4_kwq_close_req *) kwqe;
2197 union l5cm_specific_data l5_data;
2200 memset(&l5_data, 0, sizeof(l5_data));
2201 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_CLOSE,
2202 req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2206 static int cnic_bnx2x_reset(struct cnic_dev *dev, struct kwqe *kwqe)
2208 struct l4_kwq_reset_req *req = (struct l4_kwq_reset_req *) kwqe;
2209 union l5cm_specific_data l5_data;
2212 memset(&l5_data, 0, sizeof(l5_data));
2213 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_ABORT,
2214 req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2217 static int cnic_bnx2x_offload_pg(struct cnic_dev *dev, struct kwqe *kwqe)
2219 struct l4_kwq_offload_pg *req = (struct l4_kwq_offload_pg *) kwqe;
2221 struct kcqe *cqes[1];
2223 memset(&kcqe, 0, sizeof(kcqe));
2224 kcqe.pg_host_opaque = req->host_opaque;
2225 kcqe.pg_cid = req->host_opaque;
2226 kcqe.op_code = L4_KCQE_OPCODE_VALUE_OFFLOAD_PG;
2227 cqes[0] = (struct kcqe *) &kcqe;
2228 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
2232 static int cnic_bnx2x_update_pg(struct cnic_dev *dev, struct kwqe *kwqe)
2234 struct l4_kwq_update_pg *req = (struct l4_kwq_update_pg *) kwqe;
2236 struct kcqe *cqes[1];
2238 memset(&kcqe, 0, sizeof(kcqe));
2239 kcqe.pg_host_opaque = req->pg_host_opaque;
2240 kcqe.pg_cid = req->pg_cid;
2241 kcqe.op_code = L4_KCQE_OPCODE_VALUE_UPDATE_PG;
2242 cqes[0] = (struct kcqe *) &kcqe;
2243 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
2247 static int cnic_bnx2x_fcoe_stat(struct cnic_dev *dev, struct kwqe *kwqe)
2249 struct fcoe_kwqe_stat *req;
2250 struct fcoe_stat_ramrod_params *fcoe_stat;
2251 union l5cm_specific_data l5_data;
2252 struct cnic_local *cp = dev->cnic_priv;
2253 struct bnx2x *bp = netdev_priv(dev->netdev);
2257 req = (struct fcoe_kwqe_stat *) kwqe;
2258 cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid);
2260 fcoe_stat = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data);
2264 memset(fcoe_stat, 0, sizeof(*fcoe_stat));
2265 memcpy(&fcoe_stat->stat_kwqe, req, sizeof(*req));
2267 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_STAT_FUNC, cid,
2268 FCOE_CONNECTION_TYPE, &l5_data);
2272 static int cnic_bnx2x_fcoe_init1(struct cnic_dev *dev, struct kwqe *wqes[],
2276 struct cnic_local *cp = dev->cnic_priv;
2277 struct bnx2x *bp = netdev_priv(dev->netdev);
2279 struct fcoe_init_ramrod_params *fcoe_init;
2280 struct fcoe_kwqe_init1 *req1;
2281 struct fcoe_kwqe_init2 *req2;
2282 struct fcoe_kwqe_init3 *req3;
2283 union l5cm_specific_data l5_data;
2289 req1 = (struct fcoe_kwqe_init1 *) wqes[0];
2290 req2 = (struct fcoe_kwqe_init2 *) wqes[1];
2291 req3 = (struct fcoe_kwqe_init3 *) wqes[2];
2292 if (req2->hdr.op_code != FCOE_KWQE_OPCODE_INIT2) {
2296 if (req3->hdr.op_code != FCOE_KWQE_OPCODE_INIT3) {
2301 if (sizeof(*fcoe_init) > CNIC_KWQ16_DATA_SIZE) {
2302 netdev_err(dev->netdev, "fcoe_init size too big\n");
2305 fcoe_init = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data);
2309 memset(fcoe_init, 0, sizeof(*fcoe_init));
2310 memcpy(&fcoe_init->init_kwqe1, req1, sizeof(*req1));
2311 memcpy(&fcoe_init->init_kwqe2, req2, sizeof(*req2));
2312 memcpy(&fcoe_init->init_kwqe3, req3, sizeof(*req3));
2313 fcoe_init->eq_pbl_base.lo = cp->kcq2.dma.pgtbl_map & 0xffffffff;
2314 fcoe_init->eq_pbl_base.hi = (u64) cp->kcq2.dma.pgtbl_map >> 32;
2315 fcoe_init->eq_pbl_size = cp->kcq2.dma.num_pages;
2317 fcoe_init->sb_num = cp->status_blk_num;
2318 fcoe_init->eq_prod = MAX_KCQ_IDX;
2319 fcoe_init->sb_id = HC_INDEX_FCOE_EQ_CONS;
2320 cp->kcq2.sw_prod_idx = 0;
2322 cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid);
2323 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_INIT_FUNC, cid,
2324 FCOE_CONNECTION_TYPE, &l5_data);
2329 static int cnic_bnx2x_fcoe_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
2333 u32 cid = -1, l5_cid;
2334 struct cnic_local *cp = dev->cnic_priv;
2335 struct bnx2x *bp = netdev_priv(dev->netdev);
2336 struct fcoe_kwqe_conn_offload1 *req1;
2337 struct fcoe_kwqe_conn_offload2 *req2;
2338 struct fcoe_kwqe_conn_offload3 *req3;
2339 struct fcoe_kwqe_conn_offload4 *req4;
2340 struct fcoe_conn_offload_ramrod_params *fcoe_offload;
2341 struct cnic_context *ctx;
2342 struct fcoe_context *fctx;
2343 struct regpair ctx_addr;
2344 union l5cm_specific_data l5_data;
2345 struct fcoe_kcqe kcqe;
2346 struct kcqe *cqes[1];
2352 req1 = (struct fcoe_kwqe_conn_offload1 *) wqes[0];
2353 req2 = (struct fcoe_kwqe_conn_offload2 *) wqes[1];
2354 req3 = (struct fcoe_kwqe_conn_offload3 *) wqes[2];
2355 req4 = (struct fcoe_kwqe_conn_offload4 *) wqes[3];
2359 l5_cid = req1->fcoe_conn_id;
2360 if (l5_cid >= dev->max_fcoe_conn)
2363 l5_cid += BNX2X_FCOE_L5_CID_BASE;
2365 ctx = &cp->ctx_tbl[l5_cid];
2366 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2369 ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
2376 fctx = cnic_get_bnx2x_ctx(dev, cid, 1, &ctx_addr);
2378 u32 hw_cid = BNX2X_HW_CID(bp, cid);
2381 val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
2382 FCOE_CONNECTION_TYPE);
2383 fctx->xstorm_ag_context.cdu_reserved = val;
2384 val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG,
2385 FCOE_CONNECTION_TYPE);
2386 fctx->ustorm_ag_context.cdu_usage = val;
2388 if (sizeof(*fcoe_offload) > CNIC_KWQ16_DATA_SIZE) {
2389 netdev_err(dev->netdev, "fcoe_offload size too big\n");
2392 fcoe_offload = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2396 memset(fcoe_offload, 0, sizeof(*fcoe_offload));
2397 memcpy(&fcoe_offload->offload_kwqe1, req1, sizeof(*req1));
2398 memcpy(&fcoe_offload->offload_kwqe2, req2, sizeof(*req2));
2399 memcpy(&fcoe_offload->offload_kwqe3, req3, sizeof(*req3));
2400 memcpy(&fcoe_offload->offload_kwqe4, req4, sizeof(*req4));
2402 cid = BNX2X_HW_CID(bp, cid);
2403 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_OFFLOAD_CONN, cid,
2404 FCOE_CONNECTION_TYPE, &l5_data);
2406 set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
2412 cnic_free_bnx2x_conn_resc(dev, l5_cid);
2414 memset(&kcqe, 0, sizeof(kcqe));
2415 kcqe.op_code = FCOE_KCQE_OPCODE_OFFLOAD_CONN;
2416 kcqe.fcoe_conn_id = req1->fcoe_conn_id;
2417 kcqe.completion_status = FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE;
2419 cqes[0] = (struct kcqe *) &kcqe;
2420 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1);
2424 static int cnic_bnx2x_fcoe_enable(struct cnic_dev *dev, struct kwqe *kwqe)
2426 struct fcoe_kwqe_conn_enable_disable *req;
2427 struct fcoe_conn_enable_disable_ramrod_params *fcoe_enable;
2428 union l5cm_specific_data l5_data;
2431 struct cnic_local *cp = dev->cnic_priv;
2433 req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
2434 cid = req->context_id;
2435 l5_cid = req->conn_id + BNX2X_FCOE_L5_CID_BASE;
2437 if (sizeof(*fcoe_enable) > CNIC_KWQ16_DATA_SIZE) {
2438 netdev_err(dev->netdev, "fcoe_enable size too big\n");
2441 fcoe_enable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2445 memset(fcoe_enable, 0, sizeof(*fcoe_enable));
2446 memcpy(&fcoe_enable->enable_disable_kwqe, req, sizeof(*req));
2447 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_ENABLE_CONN, cid,
2448 FCOE_CONNECTION_TYPE, &l5_data);
2452 static int cnic_bnx2x_fcoe_disable(struct cnic_dev *dev, struct kwqe *kwqe)
2454 struct fcoe_kwqe_conn_enable_disable *req;
2455 struct fcoe_conn_enable_disable_ramrod_params *fcoe_disable;
2456 union l5cm_specific_data l5_data;
2459 struct cnic_local *cp = dev->cnic_priv;
2461 req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
2462 cid = req->context_id;
2463 l5_cid = req->conn_id;
2464 if (l5_cid >= dev->max_fcoe_conn)
2467 l5_cid += BNX2X_FCOE_L5_CID_BASE;
2469 if (sizeof(*fcoe_disable) > CNIC_KWQ16_DATA_SIZE) {
2470 netdev_err(dev->netdev, "fcoe_disable size too big\n");
2473 fcoe_disable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2477 memset(fcoe_disable, 0, sizeof(*fcoe_disable));
2478 memcpy(&fcoe_disable->enable_disable_kwqe, req, sizeof(*req));
2479 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DISABLE_CONN, cid,
2480 FCOE_CONNECTION_TYPE, &l5_data);
2484 static int cnic_bnx2x_fcoe_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
2486 struct fcoe_kwqe_conn_destroy *req;
2487 union l5cm_specific_data l5_data;
2490 struct cnic_local *cp = dev->cnic_priv;
2491 struct cnic_context *ctx;
2492 struct fcoe_kcqe kcqe;
2493 struct kcqe *cqes[1];
2495 req = (struct fcoe_kwqe_conn_destroy *) kwqe;
2496 cid = req->context_id;
2497 l5_cid = req->conn_id;
2498 if (l5_cid >= dev->max_fcoe_conn)
2501 l5_cid += BNX2X_FCOE_L5_CID_BASE;
2503 ctx = &cp->ctx_tbl[l5_cid];
2505 init_waitqueue_head(&ctx->waitq);
2508 memset(&kcqe, 0, sizeof(kcqe));
2509 kcqe.completion_status = FCOE_KCQE_COMPLETION_STATUS_ERROR;
2510 memset(&l5_data, 0, sizeof(l5_data));
2511 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_TERMINATE_CONN, cid,
2512 FCOE_CONNECTION_TYPE, &l5_data);
2514 wait_event_timeout(ctx->waitq, ctx->wait_cond, CNIC_RAMROD_TMO);
2516 kcqe.completion_status = 0;
2519 set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags);
2520 queue_delayed_work(cnic_wq, &cp->delete_task, msecs_to_jiffies(2000));
2522 kcqe.op_code = FCOE_KCQE_OPCODE_DESTROY_CONN;
2523 kcqe.fcoe_conn_id = req->conn_id;
2524 kcqe.fcoe_conn_context_id = cid;
2526 cqes[0] = (struct kcqe *) &kcqe;
2527 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1);
2531 static void cnic_bnx2x_delete_wait(struct cnic_dev *dev, u32 start_cid)
2533 struct cnic_local *cp = dev->cnic_priv;
2536 for (i = start_cid; i < cp->max_cid_space; i++) {
2537 struct cnic_context *ctx = &cp->ctx_tbl[i];
2540 while (test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
2543 for (j = 0; j < 5; j++) {
2544 if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2549 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2550 netdev_warn(dev->netdev, "CID %x not deleted\n",
2555 static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
2557 struct fcoe_kwqe_destroy *req;
2558 union l5cm_specific_data l5_data;
2559 struct cnic_local *cp = dev->cnic_priv;
2560 struct bnx2x *bp = netdev_priv(dev->netdev);
2564 cnic_bnx2x_delete_wait(dev, MAX_ISCSI_TBL_SZ);
2566 req = (struct fcoe_kwqe_destroy *) kwqe;
2567 cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid);
2569 memset(&l5_data, 0, sizeof(l5_data));
2570 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DESTROY_FUNC, cid,
2571 FCOE_CONNECTION_TYPE, &l5_data);
2575 static void cnic_bnx2x_kwqe_err(struct cnic_dev *dev, struct kwqe *kwqe)
2577 struct cnic_local *cp = dev->cnic_priv;
2579 struct kcqe *cqes[1];
2581 u32 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
2582 u32 layer_code = kwqe->kwqe_op_flag & KWQE_LAYER_MASK;
2586 cid = kwqe->kwqe_info0;
2587 memset(&kcqe, 0, sizeof(kcqe));
2589 if (layer_code == KWQE_FLAGS_LAYER_MASK_L5_FCOE) {
2592 ulp_type = CNIC_ULP_FCOE;
2593 if (opcode == FCOE_KWQE_OPCODE_DISABLE_CONN) {
2594 struct fcoe_kwqe_conn_enable_disable *req;
2596 req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
2597 kcqe_op = FCOE_KCQE_OPCODE_DISABLE_CONN;
2598 cid = req->context_id;
2599 l5_cid = req->conn_id;
2600 } else if (opcode == FCOE_KWQE_OPCODE_DESTROY) {
2601 kcqe_op = FCOE_KCQE_OPCODE_DESTROY_FUNC;
2605 kcqe.kcqe_op_flag = kcqe_op << KCQE_FLAGS_OPCODE_SHIFT;
2606 kcqe.kcqe_op_flag |= KCQE_FLAGS_LAYER_MASK_L5_FCOE;
2607 kcqe.kcqe_info1 = FCOE_KCQE_COMPLETION_STATUS_PARITY_ERROR;
2608 kcqe.kcqe_info2 = cid;
2609 kcqe.kcqe_info0 = l5_cid;
2611 } else if (layer_code == KWQE_FLAGS_LAYER_MASK_L5_ISCSI) {
2612 ulp_type = CNIC_ULP_ISCSI;
2613 if (opcode == ISCSI_KWQE_OPCODE_UPDATE_CONN)
2614 cid = kwqe->kwqe_info1;
2616 kcqe.kcqe_op_flag = (opcode + 0x10) << KCQE_FLAGS_OPCODE_SHIFT;
2617 kcqe.kcqe_op_flag |= KCQE_FLAGS_LAYER_MASK_L5_ISCSI;
2618 kcqe.kcqe_info1 = ISCSI_KCQE_COMPLETION_STATUS_PARITY_ERR;
2619 kcqe.kcqe_info2 = cid;
2620 cnic_get_l5_cid(cp, BNX2X_SW_CID(cid), &kcqe.kcqe_info0);
2622 } else if (layer_code == KWQE_FLAGS_LAYER_MASK_L4) {
2623 struct l4_kcq *l4kcqe = (struct l4_kcq *) &kcqe;
2625 ulp_type = CNIC_ULP_L4;
2626 if (opcode == L4_KWQE_OPCODE_VALUE_CONNECT1)
2627 kcqe_op = L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE;
2628 else if (opcode == L4_KWQE_OPCODE_VALUE_RESET)
2629 kcqe_op = L4_KCQE_OPCODE_VALUE_RESET_COMP;
2630 else if (opcode == L4_KWQE_OPCODE_VALUE_CLOSE)
2631 kcqe_op = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
2635 kcqe.kcqe_op_flag = (kcqe_op << KCQE_FLAGS_OPCODE_SHIFT) |
2636 KCQE_FLAGS_LAYER_MASK_L4;
2637 l4kcqe->status = L4_KCQE_COMPLETION_STATUS_PARITY_ERROR;
2639 cnic_get_l5_cid(cp, BNX2X_SW_CID(cid), &l4kcqe->conn_id);
2645 cnic_reply_bnx2x_kcqes(dev, ulp_type, cqes, 1);
2648 static int cnic_submit_bnx2x_iscsi_kwqes(struct cnic_dev *dev,
2649 struct kwqe *wqes[], u32 num_wqes)
2655 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2656 return -EAGAIN; /* bnx2 is down */
2658 for (i = 0; i < num_wqes; ) {
2660 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
2664 case ISCSI_KWQE_OPCODE_INIT1:
2665 ret = cnic_bnx2x_iscsi_init1(dev, kwqe);
2667 case ISCSI_KWQE_OPCODE_INIT2:
2668 ret = cnic_bnx2x_iscsi_init2(dev, kwqe);
2670 case ISCSI_KWQE_OPCODE_OFFLOAD_CONN1:
2671 ret = cnic_bnx2x_iscsi_ofld1(dev, &wqes[i],
2672 num_wqes - i, &work);
2674 case ISCSI_KWQE_OPCODE_UPDATE_CONN:
2675 ret = cnic_bnx2x_iscsi_update(dev, kwqe);
2677 case ISCSI_KWQE_OPCODE_DESTROY_CONN:
2678 ret = cnic_bnx2x_iscsi_destroy(dev, kwqe);
2680 case L4_KWQE_OPCODE_VALUE_CONNECT1:
2681 ret = cnic_bnx2x_connect(dev, &wqes[i], num_wqes - i,
2684 case L4_KWQE_OPCODE_VALUE_CLOSE:
2685 ret = cnic_bnx2x_close(dev, kwqe);
2687 case L4_KWQE_OPCODE_VALUE_RESET:
2688 ret = cnic_bnx2x_reset(dev, kwqe);
2690 case L4_KWQE_OPCODE_VALUE_OFFLOAD_PG:
2691 ret = cnic_bnx2x_offload_pg(dev, kwqe);
2693 case L4_KWQE_OPCODE_VALUE_UPDATE_PG:
2694 ret = cnic_bnx2x_update_pg(dev, kwqe);
2696 case L4_KWQE_OPCODE_VALUE_UPLOAD_PG:
2701 netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
2706 netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
2709 /* Possibly bnx2x parity error, send completion
2710 * to ulp drivers with error code to speed up
2711 * cleanup and reset recovery.
2713 if (ret == -EIO || ret == -EAGAIN)
2714 cnic_bnx2x_kwqe_err(dev, kwqe);
2721 static int cnic_submit_bnx2x_fcoe_kwqes(struct cnic_dev *dev,
2722 struct kwqe *wqes[], u32 num_wqes)
2724 struct bnx2x *bp = netdev_priv(dev->netdev);
2729 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2730 return -EAGAIN; /* bnx2 is down */
2732 if (!BNX2X_CHIP_IS_E2_PLUS(bp))
2735 for (i = 0; i < num_wqes; ) {
2737 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
2741 case FCOE_KWQE_OPCODE_INIT1:
2742 ret = cnic_bnx2x_fcoe_init1(dev, &wqes[i],
2743 num_wqes - i, &work);
2745 case FCOE_KWQE_OPCODE_OFFLOAD_CONN1:
2746 ret = cnic_bnx2x_fcoe_ofld1(dev, &wqes[i],
2747 num_wqes - i, &work);
2749 case FCOE_KWQE_OPCODE_ENABLE_CONN:
2750 ret = cnic_bnx2x_fcoe_enable(dev, kwqe);
2752 case FCOE_KWQE_OPCODE_DISABLE_CONN:
2753 ret = cnic_bnx2x_fcoe_disable(dev, kwqe);
2755 case FCOE_KWQE_OPCODE_DESTROY_CONN:
2756 ret = cnic_bnx2x_fcoe_destroy(dev, kwqe);
2758 case FCOE_KWQE_OPCODE_DESTROY:
2759 ret = cnic_bnx2x_fcoe_fw_destroy(dev, kwqe);
2761 case FCOE_KWQE_OPCODE_STAT:
2762 ret = cnic_bnx2x_fcoe_stat(dev, kwqe);
2766 netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
2771 netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
2774 /* Possibly bnx2x parity error, send completion
2775 * to ulp drivers with error code to speed up
2776 * cleanup and reset recovery.
2778 if (ret == -EIO || ret == -EAGAIN)
2779 cnic_bnx2x_kwqe_err(dev, kwqe);
2786 static int cnic_submit_bnx2x_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
2792 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2793 return -EAGAIN; /* bnx2x is down */
2798 layer_code = wqes[0]->kwqe_op_flag & KWQE_LAYER_MASK;
2799 switch (layer_code) {
2800 case KWQE_FLAGS_LAYER_MASK_L5_ISCSI:
2801 case KWQE_FLAGS_LAYER_MASK_L4:
2802 case KWQE_FLAGS_LAYER_MASK_L2:
2803 ret = cnic_submit_bnx2x_iscsi_kwqes(dev, wqes, num_wqes);
2806 case KWQE_FLAGS_LAYER_MASK_L5_FCOE:
2807 ret = cnic_submit_bnx2x_fcoe_kwqes(dev, wqes, num_wqes);
2813 static inline u32 cnic_get_kcqe_layer_mask(u32 opflag)
2815 if (unlikely(KCQE_OPCODE(opflag) == FCOE_RAMROD_CMD_ID_TERMINATE_CONN))
2816 return KCQE_FLAGS_LAYER_MASK_L4;
2818 return opflag & KCQE_FLAGS_LAYER_MASK;
2821 static void service_kcqes(struct cnic_dev *dev, int num_cqes)
2823 struct cnic_local *cp = dev->cnic_priv;
2829 struct cnic_ulp_ops *ulp_ops;
2831 u32 kcqe_op_flag = cp->completed_kcq[i]->kcqe_op_flag;
2832 u32 kcqe_layer = cnic_get_kcqe_layer_mask(kcqe_op_flag);
2834 if (unlikely(kcqe_op_flag & KCQE_RAMROD_COMPLETION))
2837 while (j < num_cqes) {
2838 u32 next_op = cp->completed_kcq[i + j]->kcqe_op_flag;
2840 if (cnic_get_kcqe_layer_mask(next_op) != kcqe_layer)
2843 if (unlikely(next_op & KCQE_RAMROD_COMPLETION))
2848 if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_RDMA)
2849 ulp_type = CNIC_ULP_RDMA;
2850 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_ISCSI)
2851 ulp_type = CNIC_ULP_ISCSI;
2852 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_FCOE)
2853 ulp_type = CNIC_ULP_FCOE;
2854 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L4)
2855 ulp_type = CNIC_ULP_L4;
2856 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L2)
2859 netdev_err(dev->netdev, "Unknown type of KCQE(0x%x)\n",
2865 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
2866 if (likely(ulp_ops)) {
2867 ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
2868 cp->completed_kcq + i, j);
2877 cnic_spq_completion(dev, DRV_CTL_RET_L5_SPQ_CREDIT_CMD, comp);
2880 static int cnic_get_kcqes(struct cnic_dev *dev, struct kcq_info *info)
2882 struct cnic_local *cp = dev->cnic_priv;
2883 u16 i, ri, hw_prod, last;
2885 int kcqe_cnt = 0, last_cnt = 0;
2887 i = ri = last = info->sw_prod_idx;
2889 hw_prod = *info->hw_prod_idx_ptr;
2890 hw_prod = info->hw_idx(hw_prod);
2892 while ((i != hw_prod) && (kcqe_cnt < MAX_COMPLETED_KCQE)) {
2893 kcqe = &info->kcq[KCQ_PG(ri)][KCQ_IDX(ri)];
2894 cp->completed_kcq[kcqe_cnt++] = kcqe;
2895 i = info->next_idx(i);
2896 ri = i & MAX_KCQ_IDX;
2897 if (likely(!(kcqe->kcqe_op_flag & KCQE_FLAGS_NEXT))) {
2898 last_cnt = kcqe_cnt;
2903 info->sw_prod_idx = last;
2907 static int cnic_l2_completion(struct cnic_local *cp)
2909 u16 hw_cons, sw_cons;
2910 struct cnic_uio_dev *udev = cp->udev;
2911 union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *)
2912 (udev->l2_ring + (2 * CNIC_PAGE_SIZE));
2916 if (!test_bit(CNIC_F_BNX2X_CLASS, &cp->dev->flags))
2919 hw_cons = *cp->rx_cons_ptr;
2920 if ((hw_cons & BNX2X_MAX_RCQ_DESC_CNT) == BNX2X_MAX_RCQ_DESC_CNT)
2923 sw_cons = cp->rx_cons;
2924 while (sw_cons != hw_cons) {
2927 cqe = &cqe_ring[sw_cons & BNX2X_MAX_RCQ_DESC_CNT];
2928 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
2929 if (cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE) {
2930 cmd = le32_to_cpu(cqe->ramrod_cqe.conn_and_cmd_data);
2931 cmd >>= COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT;
2932 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP ||
2933 cmd == RAMROD_CMD_ID_ETH_HALT)
2936 sw_cons = BNX2X_NEXT_RCQE(sw_cons);
2941 static void cnic_chk_pkt_rings(struct cnic_local *cp)
2943 u16 rx_cons, tx_cons;
2946 if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
2949 rx_cons = *cp->rx_cons_ptr;
2950 tx_cons = *cp->tx_cons_ptr;
2951 if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) {
2952 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
2953 comp = cnic_l2_completion(cp);
2955 cp->tx_cons = tx_cons;
2956 cp->rx_cons = rx_cons;
2959 uio_event_notify(&cp->udev->cnic_uinfo);
2962 clear_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
2965 static u32 cnic_service_bnx2_queues(struct cnic_dev *dev)
2967 struct cnic_local *cp = dev->cnic_priv;
2968 u32 status_idx = (u16) *cp->kcq1.status_idx_ptr;
2971 /* status block index must be read before reading other fields */
2973 cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
2975 while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) {
2977 service_kcqes(dev, kcqe_cnt);
2979 /* Tell compiler that status_blk fields can change. */
2981 status_idx = (u16) *cp->kcq1.status_idx_ptr;
2982 /* status block index must be read first */
2984 cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
2987 CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx);
2989 cnic_chk_pkt_rings(cp);
2994 static int cnic_service_bnx2(void *data, void *status_blk)
2996 struct cnic_dev *dev = data;
2998 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
2999 struct status_block *sblk = status_blk;
3001 return sblk->status_idx;
3004 return cnic_service_bnx2_queues(dev);
3007 static void cnic_service_bnx2_msix(unsigned long data)
3009 struct cnic_dev *dev = (struct cnic_dev *) data;
3010 struct cnic_local *cp = dev->cnic_priv;
3012 cp->last_status_idx = cnic_service_bnx2_queues(dev);
3014 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
3015 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
3018 static void cnic_doirq(struct cnic_dev *dev)
3020 struct cnic_local *cp = dev->cnic_priv;
3022 if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
3023 u16 prod = cp->kcq1.sw_prod_idx & MAX_KCQ_IDX;
3025 prefetch(cp->status_blk.gen);
3026 prefetch(&cp->kcq1.kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
3028 tasklet_schedule(&cp->cnic_irq_task);
3032 static irqreturn_t cnic_irq(int irq, void *dev_instance)
3034 struct cnic_dev *dev = dev_instance;
3035 struct cnic_local *cp = dev->cnic_priv;
3045 static inline void cnic_ack_bnx2x_int(struct cnic_dev *dev, u8 id, u8 storm,
3046 u16 index, u8 op, u8 update)
3048 struct bnx2x *bp = netdev_priv(dev->netdev);
3049 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp) * 32 +
3050 COMMAND_REG_INT_ACK);
3051 struct igu_ack_register igu_ack;
3053 igu_ack.status_block_index = index;
3054 igu_ack.sb_id_and_flags =
3055 ((id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
3056 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
3057 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
3058 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
3060 CNIC_WR(dev, hc_addr, (*(u32 *)&igu_ack));
3063 static void cnic_ack_igu_sb(struct cnic_dev *dev, u8 igu_sb_id, u8 segment,
3064 u16 index, u8 op, u8 update)
3066 struct igu_regular cmd_data;
3067 u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id) * 8;
3069 cmd_data.sb_id_and_flags =
3070 (index << IGU_REGULAR_SB_INDEX_SHIFT) |
3071 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
3072 (update << IGU_REGULAR_BUPDATE_SHIFT) |
3073 (op << IGU_REGULAR_ENABLE_INT_SHIFT);
3076 CNIC_WR(dev, igu_addr, cmd_data.sb_id_and_flags);
3079 static void cnic_ack_bnx2x_msix(struct cnic_dev *dev)
3081 struct cnic_local *cp = dev->cnic_priv;
3083 cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, 0,
3084 IGU_INT_DISABLE, 0);
3087 static void cnic_ack_bnx2x_e2_msix(struct cnic_dev *dev)
3089 struct cnic_local *cp = dev->cnic_priv;
3091 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, 0,
3092 IGU_INT_DISABLE, 0);
3095 static void cnic_arm_bnx2x_msix(struct cnic_dev *dev, u32 idx)
3097 struct cnic_local *cp = dev->cnic_priv;
3099 cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, idx,
3103 static void cnic_arm_bnx2x_e2_msix(struct cnic_dev *dev, u32 idx)
3105 struct cnic_local *cp = dev->cnic_priv;
3107 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, idx,
3111 static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
3113 u32 last_status = *info->status_idx_ptr;
3116 /* status block index must be read before reading the KCQ */
3118 while ((kcqe_cnt = cnic_get_kcqes(dev, info))) {
3120 service_kcqes(dev, kcqe_cnt);
3122 /* Tell compiler that sblk fields can change. */
3125 last_status = *info->status_idx_ptr;
3126 /* status block index must be read before reading the KCQ */
3132 static void cnic_service_bnx2x_bh(unsigned long data)
3134 struct cnic_dev *dev = (struct cnic_dev *) data;
3135 struct cnic_local *cp = dev->cnic_priv;
3136 struct bnx2x *bp = netdev_priv(dev->netdev);
3137 u32 status_idx, new_status_idx;
3139 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
3143 status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1);
3145 CNIC_WR16(dev, cp->kcq1.io_addr,
3146 cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
3148 if (!CNIC_SUPPORTS_FCOE(bp)) {
3149 cp->arm_int(dev, status_idx);
3153 new_status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2);
3155 if (new_status_idx != status_idx)
3158 CNIC_WR16(dev, cp->kcq2.io_addr, cp->kcq2.sw_prod_idx +
3161 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF,
3162 status_idx, IGU_INT_ENABLE, 1);
3168 static int cnic_service_bnx2x(void *data, void *status_blk)
3170 struct cnic_dev *dev = data;
3171 struct cnic_local *cp = dev->cnic_priv;
3173 if (!(cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
3176 cnic_chk_pkt_rings(cp);
3181 static void cnic_ulp_stop_one(struct cnic_local *cp, int if_type)
3183 struct cnic_ulp_ops *ulp_ops;
3185 if (if_type == CNIC_ULP_ISCSI)
3186 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
3188 mutex_lock(&cnic_lock);
3189 ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
3190 lockdep_is_held(&cnic_lock));
3192 mutex_unlock(&cnic_lock);
3195 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3196 mutex_unlock(&cnic_lock);
3198 if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type]))
3199 ulp_ops->cnic_stop(cp->ulp_handle[if_type]);
3201 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3204 static void cnic_ulp_stop(struct cnic_dev *dev)
3206 struct cnic_local *cp = dev->cnic_priv;
3209 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++)
3210 cnic_ulp_stop_one(cp, if_type);
3213 static void cnic_ulp_start(struct cnic_dev *dev)
3215 struct cnic_local *cp = dev->cnic_priv;
3218 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
3219 struct cnic_ulp_ops *ulp_ops;
3221 mutex_lock(&cnic_lock);
3222 ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
3223 lockdep_is_held(&cnic_lock));
3224 if (!ulp_ops || !ulp_ops->cnic_start) {
3225 mutex_unlock(&cnic_lock);
3228 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3229 mutex_unlock(&cnic_lock);
3231 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[if_type]))
3232 ulp_ops->cnic_start(cp->ulp_handle[if_type]);
3234 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3238 static int cnic_copy_ulp_stats(struct cnic_dev *dev, int ulp_type)
3240 struct cnic_local *cp = dev->cnic_priv;
3241 struct cnic_ulp_ops *ulp_ops;
3244 mutex_lock(&cnic_lock);
3245 ulp_ops = rcu_dereference_protected(cp->ulp_ops[ulp_type],
3246 lockdep_is_held(&cnic_lock));
3247 if (ulp_ops && ulp_ops->cnic_get_stats)
3248 rc = ulp_ops->cnic_get_stats(cp->ulp_handle[ulp_type]);
3251 mutex_unlock(&cnic_lock);
3255 static int cnic_ctl(void *data, struct cnic_ctl_info *info)
3257 struct cnic_dev *dev = data;
3258 int ulp_type = CNIC_ULP_ISCSI;
3260 switch (info->cmd) {
3261 case CNIC_CTL_STOP_CMD:
3269 case CNIC_CTL_START_CMD:
3272 if (!cnic_start_hw(dev))
3273 cnic_ulp_start(dev);
3277 case CNIC_CTL_STOP_ISCSI_CMD: {
3278 struct cnic_local *cp = dev->cnic_priv;
3279 set_bit(CNIC_LCL_FL_STOP_ISCSI, &cp->cnic_local_flags);
3280 queue_delayed_work(cnic_wq, &cp->delete_task, 0);
3283 case CNIC_CTL_COMPLETION_CMD: {
3284 struct cnic_ctl_completion *comp = &info->data.comp;
3285 u32 cid = BNX2X_SW_CID(comp->cid);
3287 struct cnic_local *cp = dev->cnic_priv;
3289 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
3292 if (cnic_get_l5_cid(cp, cid, &l5_cid) == 0) {
3293 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3295 if (unlikely(comp->error)) {
3296 set_bit(CTX_FL_CID_ERROR, &ctx->ctx_flags);
3297 netdev_err(dev->netdev,
3298 "CID %x CFC delete comp error %x\n",
3303 wake_up(&ctx->waitq);
3307 case CNIC_CTL_FCOE_STATS_GET_CMD:
3308 ulp_type = CNIC_ULP_FCOE;
3310 case CNIC_CTL_ISCSI_STATS_GET_CMD:
3312 cnic_copy_ulp_stats(dev, ulp_type);
3322 static void cnic_ulp_init(struct cnic_dev *dev)
3325 struct cnic_local *cp = dev->cnic_priv;
3327 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
3328 struct cnic_ulp_ops *ulp_ops;
3330 mutex_lock(&cnic_lock);
3331 ulp_ops = cnic_ulp_tbl_prot(i);
3332 if (!ulp_ops || !ulp_ops->cnic_init) {
3333 mutex_unlock(&cnic_lock);
3337 mutex_unlock(&cnic_lock);
3339 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i]))
3340 ulp_ops->cnic_init(dev);
3346 static void cnic_ulp_exit(struct cnic_dev *dev)
3349 struct cnic_local *cp = dev->cnic_priv;
3351 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
3352 struct cnic_ulp_ops *ulp_ops;
3354 mutex_lock(&cnic_lock);
3355 ulp_ops = cnic_ulp_tbl_prot(i);
3356 if (!ulp_ops || !ulp_ops->cnic_exit) {
3357 mutex_unlock(&cnic_lock);
3361 mutex_unlock(&cnic_lock);
3363 if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i]))
3364 ulp_ops->cnic_exit(dev);
3370 static int cnic_cm_offload_pg(struct cnic_sock *csk)
3372 struct cnic_dev *dev = csk->dev;
3373 struct l4_kwq_offload_pg *l4kwqe;
3374 struct kwqe *wqes[1];
3376 l4kwqe = (struct l4_kwq_offload_pg *) &csk->kwqe1;
3377 memset(l4kwqe, 0, sizeof(*l4kwqe));
3378 wqes[0] = (struct kwqe *) l4kwqe;
3380 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_OFFLOAD_PG;
3382 L4_LAYER_CODE << L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT;
3383 l4kwqe->l2hdr_nbytes = ETH_HLEN;
3385 l4kwqe->da0 = csk->ha[0];
3386 l4kwqe->da1 = csk->ha[1];
3387 l4kwqe->da2 = csk->ha[2];
3388 l4kwqe->da3 = csk->ha[3];
3389 l4kwqe->da4 = csk->ha[4];
3390 l4kwqe->da5 = csk->ha[5];
3392 l4kwqe->sa0 = dev->mac_addr[0];
3393 l4kwqe->sa1 = dev->mac_addr[1];
3394 l4kwqe->sa2 = dev->mac_addr[2];
3395 l4kwqe->sa3 = dev->mac_addr[3];
3396 l4kwqe->sa4 = dev->mac_addr[4];
3397 l4kwqe->sa5 = dev->mac_addr[5];
3399 l4kwqe->etype = ETH_P_IP;
3400 l4kwqe->ipid_start = DEF_IPID_START;
3401 l4kwqe->host_opaque = csk->l5_cid;
3404 l4kwqe->pg_flags |= L4_KWQ_OFFLOAD_PG_VLAN_TAGGING;
3405 l4kwqe->vlan_tag = csk->vlan_id;
3406 l4kwqe->l2hdr_nbytes += 4;
3409 return dev->submit_kwqes(dev, wqes, 1);
3412 static int cnic_cm_update_pg(struct cnic_sock *csk)
3414 struct cnic_dev *dev = csk->dev;
3415 struct l4_kwq_update_pg *l4kwqe;
3416 struct kwqe *wqes[1];
3418 l4kwqe = (struct l4_kwq_update_pg *) &csk->kwqe1;
3419 memset(l4kwqe, 0, sizeof(*l4kwqe));
3420 wqes[0] = (struct kwqe *) l4kwqe;
3422 l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPDATE_PG;
3424 L4_LAYER_CODE << L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT;
3425 l4kwqe->pg_cid = csk->pg_cid;
3427 l4kwqe->da0 = csk->ha[0];
3428 l4kwqe->da1 = csk->ha[1];
3429 l4kwqe->da2 = csk->ha[2];
3430 l4kwqe->da3 = csk->ha[3];
3431 l4kwqe->da4 = csk->ha[4];
3432 l4kwqe->da5 = csk->ha[5];
3434 l4kwqe->pg_host_opaque = csk->l5_cid;
3435 l4kwqe->pg_valids = L4_KWQ_UPDATE_PG_VALIDS_DA;
3437 return dev->submit_kwqes(dev, wqes, 1);
3440 static int cnic_cm_upload_pg(struct cnic_sock *csk)
3442 struct cnic_dev *dev = csk->dev;
3443 struct l4_kwq_upload *l4kwqe;
3444 struct kwqe *wqes[1];
3446 l4kwqe = (struct l4_kwq_upload *) &csk->kwqe1;
3447 memset(l4kwqe, 0, sizeof(*l4kwqe));
3448 wqes[0] = (struct kwqe *) l4kwqe;
3450 l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPLOAD_PG;
3452 L4_LAYER_CODE << L4_KWQ_UPLOAD_LAYER_CODE_SHIFT;
3453 l4kwqe->cid = csk->pg_cid;
3455 return dev->submit_kwqes(dev, wqes, 1);
3458 static int cnic_cm_conn_req(struct cnic_sock *csk)
3460 struct cnic_dev *dev = csk->dev;
3461 struct l4_kwq_connect_req1 *l4kwqe1;
3462 struct l4_kwq_connect_req2 *l4kwqe2;
3463 struct l4_kwq_connect_req3 *l4kwqe3;
3464 struct kwqe *wqes[3];
3468 l4kwqe1 = (struct l4_kwq_connect_req1 *) &csk->kwqe1;
3469 l4kwqe2 = (struct l4_kwq_connect_req2 *) &csk->kwqe2;
3470 l4kwqe3 = (struct l4_kwq_connect_req3 *) &csk->kwqe3;
3471 memset(l4kwqe1, 0, sizeof(*l4kwqe1));
3472 memset(l4kwqe2, 0, sizeof(*l4kwqe2));
3473 memset(l4kwqe3, 0, sizeof(*l4kwqe3));
3475 l4kwqe3->op_code = L4_KWQE_OPCODE_VALUE_CONNECT3;
3477 L4_LAYER_CODE << L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT;
3478 l4kwqe3->ka_timeout = csk->ka_timeout;
3479 l4kwqe3->ka_interval = csk->ka_interval;
3480 l4kwqe3->ka_max_probe_count = csk->ka_max_probe_count;
3481 l4kwqe3->tos = csk->tos;
3482 l4kwqe3->ttl = csk->ttl;
3483 l4kwqe3->snd_seq_scale = csk->snd_seq_scale;
3484 l4kwqe3->pmtu = csk->mtu;
3485 l4kwqe3->rcv_buf = csk->rcv_buf;
3486 l4kwqe3->snd_buf = csk->snd_buf;
3487 l4kwqe3->seed = csk->seed;
3489 wqes[0] = (struct kwqe *) l4kwqe1;
3490 if (test_bit(SK_F_IPV6, &csk->flags)) {
3491 wqes[1] = (struct kwqe *) l4kwqe2;
3492 wqes[2] = (struct kwqe *) l4kwqe3;
3495 l4kwqe1->conn_flags = L4_KWQ_CONNECT_REQ1_IP_V6;
3496 l4kwqe2->op_code = L4_KWQE_OPCODE_VALUE_CONNECT2;
3498 L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT |
3499 L4_LAYER_CODE << L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT;
3500 l4kwqe2->src_ip_v6_2 = be32_to_cpu(csk->src_ip[1]);
3501 l4kwqe2->src_ip_v6_3 = be32_to_cpu(csk->src_ip[2]);
3502 l4kwqe2->src_ip_v6_4 = be32_to_cpu(csk->src_ip[3]);
3503 l4kwqe2->dst_ip_v6_2 = be32_to_cpu(csk->dst_ip[1]);
3504 l4kwqe2->dst_ip_v6_3 = be32_to_cpu(csk->dst_ip[2]);
3505 l4kwqe2->dst_ip_v6_4 = be32_to_cpu(csk->dst_ip[3]);
3506 l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct ipv6hdr) -
3507 sizeof(struct tcphdr);
3509 wqes[1] = (struct kwqe *) l4kwqe3;
3510 l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct iphdr) -
3511 sizeof(struct tcphdr);
3514 l4kwqe1->op_code = L4_KWQE_OPCODE_VALUE_CONNECT1;
3516 (L4_LAYER_CODE << L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT) |
3517 L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT;
3518 l4kwqe1->cid = csk->cid;
3519 l4kwqe1->pg_cid = csk->pg_cid;
3520 l4kwqe1->src_ip = be32_to_cpu(csk->src_ip[0]);
3521 l4kwqe1->dst_ip = be32_to_cpu(csk->dst_ip[0]);
3522 l4kwqe1->src_port = be16_to_cpu(csk->src_port);
3523 l4kwqe1->dst_port = be16_to_cpu(csk->dst_port);
3524 if (csk->tcp_flags & SK_TCP_NO_DELAY_ACK)
3525 tcp_flags |= L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK;
3526 if (csk->tcp_flags & SK_TCP_KEEP_ALIVE)
3527 tcp_flags |= L4_KWQ_CONNECT_REQ1_KEEP_ALIVE;
3528 if (csk->tcp_flags & SK_TCP_NAGLE)
3529 tcp_flags |= L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE;
3530 if (csk->tcp_flags & SK_TCP_TIMESTAMP)
3531 tcp_flags |= L4_KWQ_CONNECT_REQ1_TIME_STAMP;
3532 if (csk->tcp_flags & SK_TCP_SACK)
3533 tcp_flags |= L4_KWQ_CONNECT_REQ1_SACK;
3534 if (csk->tcp_flags & SK_TCP_SEG_SCALING)
3535 tcp_flags |= L4_KWQ_CONNECT_REQ1_SEG_SCALING;
3537 l4kwqe1->tcp_flags = tcp_flags;
3539 return dev->submit_kwqes(dev, wqes, num_wqes);
3542 static int cnic_cm_close_req(struct cnic_sock *csk)
3544 struct cnic_dev *dev = csk->dev;
3545 struct l4_kwq_close_req *l4kwqe;
3546 struct kwqe *wqes[1];
3548 l4kwqe = (struct l4_kwq_close_req *) &csk->kwqe2;
3549 memset(l4kwqe, 0, sizeof(*l4kwqe));
3550 wqes[0] = (struct kwqe *) l4kwqe;
3552 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_CLOSE;
3553 l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT;
3554 l4kwqe->cid = csk->cid;
3556 return dev->submit_kwqes(dev, wqes, 1);
3559 static int cnic_cm_abort_req(struct cnic_sock *csk)
3561 struct cnic_dev *dev = csk->dev;
3562 struct l4_kwq_reset_req *l4kwqe;
3563 struct kwqe *wqes[1];
3565 l4kwqe = (struct l4_kwq_reset_req *) &csk->kwqe2;
3566 memset(l4kwqe, 0, sizeof(*l4kwqe));
3567 wqes[0] = (struct kwqe *) l4kwqe;
3569 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_RESET;
3570 l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT;
3571 l4kwqe->cid = csk->cid;
3573 return dev->submit_kwqes(dev, wqes, 1);
3576 static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid,
3577 u32 l5_cid, struct cnic_sock **csk, void *context)
3579 struct cnic_local *cp = dev->cnic_priv;
3580 struct cnic_sock *csk1;
3582 if (l5_cid >= MAX_CM_SK_TBL_SZ)
3586 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3588 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
3592 csk1 = &cp->csk_tbl[l5_cid];
3593 if (atomic_read(&csk1->ref_count))
3596 if (test_and_set_bit(SK_F_INUSE, &csk1->flags))
3601 csk1->l5_cid = l5_cid;
3602 csk1->ulp_type = ulp_type;
3603 csk1->context = context;
3605 csk1->ka_timeout = DEF_KA_TIMEOUT;
3606 csk1->ka_interval = DEF_KA_INTERVAL;
3607 csk1->ka_max_probe_count = DEF_KA_MAX_PROBE_COUNT;
3608 csk1->tos = DEF_TOS;
3609 csk1->ttl = DEF_TTL;
3610 csk1->snd_seq_scale = DEF_SND_SEQ_SCALE;
3611 csk1->rcv_buf = DEF_RCV_BUF;
3612 csk1->snd_buf = DEF_SND_BUF;
3613 csk1->seed = DEF_SEED;
3614 csk1->tcp_flags = 0;
3620 static void cnic_cm_cleanup(struct cnic_sock *csk)
3622 if (csk->src_port) {
3623 struct cnic_dev *dev = csk->dev;
3624 struct cnic_local *cp = dev->cnic_priv;
3626 cnic_free_id(&cp->csk_port_tbl, be16_to_cpu(csk->src_port));
3631 static void cnic_close_conn(struct cnic_sock *csk)
3633 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) {
3634 cnic_cm_upload_pg(csk);
3635 clear_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
3637 cnic_cm_cleanup(csk);
3640 static int cnic_cm_destroy(struct cnic_sock *csk)
3642 if (!cnic_in_use(csk))
3646 clear_bit(SK_F_INUSE, &csk->flags);
3647 smp_mb__after_atomic();
3648 while (atomic_read(&csk->ref_count) != 1)
3650 cnic_cm_cleanup(csk);
3657 static inline u16 cnic_get_vlan(struct net_device *dev,
3658 struct net_device **vlan_dev)
3660 if (dev->priv_flags & IFF_802_1Q_VLAN) {
3661 *vlan_dev = vlan_dev_real_dev(dev);
3662 return vlan_dev_vlan_id(dev);
3668 static int cnic_get_v4_route(struct sockaddr_in *dst_addr,
3669 struct dst_entry **dst)
3671 #if defined(CONFIG_INET)
3674 rt = ip_route_output(&init_net, dst_addr->sin_addr.s_addr, 0, 0, 0);
3681 return -ENETUNREACH;
3685 static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr,
3686 struct dst_entry **dst)
3688 #if IS_ENABLED(CONFIG_IPV6)
3691 memset(&fl6, 0, sizeof(fl6));
3692 fl6.daddr = dst_addr->sin6_addr;
3693 if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
3694 fl6.flowi6_oif = dst_addr->sin6_scope_id;
3696 *dst = ip6_route_output(&init_net, NULL, &fl6);
3697 if ((*dst)->error) {
3700 return -ENETUNREACH;
3705 return -ENETUNREACH;
3708 static struct cnic_dev *cnic_cm_select_dev(struct sockaddr_in *dst_addr,
3711 struct cnic_dev *dev = NULL;
3712 struct dst_entry *dst;
3713 struct net_device *netdev = NULL;
3714 int err = -ENETUNREACH;
3716 if (dst_addr->sin_family == AF_INET)
3717 err = cnic_get_v4_route(dst_addr, &dst);
3718 else if (dst_addr->sin_family == AF_INET6) {
3719 struct sockaddr_in6 *dst_addr6 =
3720 (struct sockaddr_in6 *) dst_addr;
3722 err = cnic_get_v6_route(dst_addr6, &dst);
3732 cnic_get_vlan(dst->dev, &netdev);
3734 dev = cnic_from_netdev(netdev);
3743 static int cnic_resolve_addr(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3745 struct cnic_dev *dev = csk->dev;
3746 struct cnic_local *cp = dev->cnic_priv;
3748 return cnic_send_nlmsg(cp, ISCSI_KEVENT_PATH_REQ, csk);
3751 static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3753 struct cnic_dev *dev = csk->dev;
3754 struct cnic_local *cp = dev->cnic_priv;
3756 struct dst_entry *dst = NULL;
3757 struct net_device *realdev;
3761 if (saddr->local.v6.sin6_family == AF_INET6 &&
3762 saddr->remote.v6.sin6_family == AF_INET6)
3764 else if (saddr->local.v4.sin_family == AF_INET &&
3765 saddr->remote.v4.sin_family == AF_INET)
3770 clear_bit(SK_F_IPV6, &csk->flags);
3773 set_bit(SK_F_IPV6, &csk->flags);
3774 cnic_get_v6_route(&saddr->remote.v6, &dst);
3776 memcpy(&csk->dst_ip[0], &saddr->remote.v6.sin6_addr,
3777 sizeof(struct in6_addr));
3778 csk->dst_port = saddr->remote.v6.sin6_port;
3779 local_port = saddr->local.v6.sin6_port;
3782 cnic_get_v4_route(&saddr->remote.v4, &dst);
3784 csk->dst_ip[0] = saddr->remote.v4.sin_addr.s_addr;
3785 csk->dst_port = saddr->remote.v4.sin_port;
3786 local_port = saddr->local.v4.sin_port;
3790 csk->mtu = dev->netdev->mtu;
3791 if (dst && dst->dev) {
3792 u16 vlan = cnic_get_vlan(dst->dev, &realdev);
3793 if (realdev == dev->netdev) {
3794 csk->vlan_id = vlan;
3795 csk->mtu = dst_mtu(dst);
3799 port_id = be16_to_cpu(local_port);
3800 if (port_id >= CNIC_LOCAL_PORT_MIN &&
3801 port_id < CNIC_LOCAL_PORT_MAX) {
3802 if (cnic_alloc_id(&cp->csk_port_tbl, port_id))
3808 port_id = cnic_alloc_new_id(&cp->csk_port_tbl);
3809 if (port_id == -1) {
3813 local_port = cpu_to_be16(port_id);
3815 csk->src_port = local_port;
3822 static void cnic_init_csk_state(struct cnic_sock *csk)
3825 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3826 clear_bit(SK_F_CLOSING, &csk->flags);
3829 static int cnic_cm_connect(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3831 struct cnic_local *cp = csk->dev->cnic_priv;
3834 if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)
3837 if (!cnic_in_use(csk))
3840 if (test_and_set_bit(SK_F_CONNECT_START, &csk->flags))
3843 cnic_init_csk_state(csk);
3845 err = cnic_get_route(csk, saddr);
3849 err = cnic_resolve_addr(csk, saddr);
3854 clear_bit(SK_F_CONNECT_START, &csk->flags);
3858 static int cnic_cm_abort(struct cnic_sock *csk)
3860 struct cnic_local *cp = csk->dev->cnic_priv;
3861 u32 opcode = L4_KCQE_OPCODE_VALUE_RESET_COMP;
3863 if (!cnic_in_use(csk))
3866 if (cnic_abort_prep(csk))
3867 return cnic_cm_abort_req(csk);
3869 /* Getting here means that we haven't started connect, or
3870 * connect was not successful, or it has been reset by the target.
3873 cp->close_conn(csk, opcode);
3874 if (csk->state != opcode) {
3875 /* Wait for remote reset sequence to complete */
3876 while (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
3885 static int cnic_cm_close(struct cnic_sock *csk)
3887 if (!cnic_in_use(csk))
3890 if (cnic_close_prep(csk)) {
3891 csk->state = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
3892 return cnic_cm_close_req(csk);
3894 /* Wait for remote reset sequence to complete */
3895 while (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
3903 static void cnic_cm_upcall(struct cnic_local *cp, struct cnic_sock *csk,
3906 struct cnic_ulp_ops *ulp_ops;
3907 int ulp_type = csk->ulp_type;
3910 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
3912 if (opcode == L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE)
3913 ulp_ops->cm_connect_complete(csk);
3914 else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)
3915 ulp_ops->cm_close_complete(csk);
3916 else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED)
3917 ulp_ops->cm_remote_abort(csk);
3918 else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_COMP)
3919 ulp_ops->cm_abort_complete(csk);
3920 else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED)
3921 ulp_ops->cm_remote_close(csk);
3926 static int cnic_cm_set_pg(struct cnic_sock *csk)
3928 if (cnic_offld_prep(csk)) {
3929 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
3930 cnic_cm_update_pg(csk);
3932 cnic_cm_offload_pg(csk);
3937 static void cnic_cm_process_offld_pg(struct cnic_dev *dev, struct l4_kcq *kcqe)
3939 struct cnic_local *cp = dev->cnic_priv;
3940 u32 l5_cid = kcqe->pg_host_opaque;
3941 u8 opcode = kcqe->op_code;
3942 struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
3945 if (!cnic_in_use(csk))
3948 if (opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
3949 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3952 /* Possible PG kcqe status: SUCCESS, OFFLOADED_PG, or CTX_ALLOC_FAIL */
3953 if (kcqe->status == L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL) {
3954 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3955 cnic_cm_upcall(cp, csk,
3956 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
3960 csk->pg_cid = kcqe->pg_cid;
3961 set_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
3962 cnic_cm_conn_req(csk);
3968 static void cnic_process_fcoe_term_conn(struct cnic_dev *dev, struct kcqe *kcqe)
3970 struct cnic_local *cp = dev->cnic_priv;
3971 struct fcoe_kcqe *fc_kcqe = (struct fcoe_kcqe *) kcqe;
3972 u32 l5_cid = fc_kcqe->fcoe_conn_id + BNX2X_FCOE_L5_CID_BASE;
3973 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3975 ctx->timestamp = jiffies;
3977 wake_up(&ctx->waitq);
3980 static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
3982 struct cnic_local *cp = dev->cnic_priv;
3983 struct l4_kcq *l4kcqe = (struct l4_kcq *) kcqe;
3984 u8 opcode = l4kcqe->op_code;
3986 struct cnic_sock *csk;
3988 if (opcode == FCOE_RAMROD_CMD_ID_TERMINATE_CONN) {
3989 cnic_process_fcoe_term_conn(dev, kcqe);
3992 if (opcode == L4_KCQE_OPCODE_VALUE_OFFLOAD_PG ||
3993 opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
3994 cnic_cm_process_offld_pg(dev, l4kcqe);
3998 l5_cid = l4kcqe->conn_id;
4000 l5_cid = l4kcqe->cid;
4001 if (l5_cid >= MAX_CM_SK_TBL_SZ)
4004 csk = &cp->csk_tbl[l5_cid];
4007 if (!cnic_in_use(csk)) {
4013 case L5CM_RAMROD_CMD_ID_TCP_CONNECT:
4014 if (l4kcqe->status != 0) {
4015 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
4016 cnic_cm_upcall(cp, csk,
4017 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
4020 case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE:
4021 if (l4kcqe->status == 0)
4022 set_bit(SK_F_OFFLD_COMPLETE, &csk->flags);
4023 else if (l4kcqe->status ==
4024 L4_KCQE_COMPLETION_STATUS_PARITY_ERROR)
4025 set_bit(SK_F_HW_ERR, &csk->flags);
4027 smp_mb__before_atomic();
4028 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
4029 cnic_cm_upcall(cp, csk, opcode);
4032 case L5CM_RAMROD_CMD_ID_CLOSE: {
4033 struct iscsi_kcqe *l5kcqe = (struct iscsi_kcqe *) kcqe;
4035 if (l4kcqe->status != 0 || l5kcqe->completion_status != 0) {
4036 netdev_warn(dev->netdev, "RAMROD CLOSE compl with status 0x%x completion status 0x%x\n",
4037 l4kcqe->status, l5kcqe->completion_status);
4038 opcode = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
4044 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
4045 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
4046 case L4_KCQE_OPCODE_VALUE_RESET_COMP:
4047 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
4048 case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
4049 if (l4kcqe->status == L4_KCQE_COMPLETION_STATUS_PARITY_ERROR)
4050 set_bit(SK_F_HW_ERR, &csk->flags);
4052 cp->close_conn(csk, opcode);
4055 case L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED:
4056 /* after we already sent CLOSE_REQ */
4057 if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags) &&
4058 !test_bit(SK_F_OFFLD_COMPLETE, &csk->flags) &&
4059 csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)
4060 cp->close_conn(csk, L4_KCQE_OPCODE_VALUE_RESET_COMP);
4062 cnic_cm_upcall(cp, csk, opcode);
4068 static void cnic_cm_indicate_kcqe(void *data, struct kcqe *kcqe[], u32 num)
4070 struct cnic_dev *dev = data;
4073 for (i = 0; i < num; i++)
4074 cnic_cm_process_kcqe(dev, kcqe[i]);
4077 static struct cnic_ulp_ops cm_ulp_ops = {
4078 .indicate_kcqes = cnic_cm_indicate_kcqe,
4081 static void cnic_cm_free_mem(struct cnic_dev *dev)
4083 struct cnic_local *cp = dev->cnic_priv;
4087 cnic_free_id_tbl(&cp->csk_port_tbl);
4090 static int cnic_cm_alloc_mem(struct cnic_dev *dev)
4092 struct cnic_local *cp = dev->cnic_priv;
4095 cp->csk_tbl = kzalloc(sizeof(struct cnic_sock) * MAX_CM_SK_TBL_SZ,
4100 port_id = prandom_u32();
4101 port_id %= CNIC_LOCAL_PORT_RANGE;
4102 if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE,
4103 CNIC_LOCAL_PORT_MIN, port_id)) {
4104 cnic_cm_free_mem(dev);
4110 static int cnic_ready_to_close(struct cnic_sock *csk, u32 opcode)
4112 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
4113 /* Unsolicited RESET_COMP or RESET_RECEIVED */
4114 opcode = L4_KCQE_OPCODE_VALUE_RESET_RECEIVED;
4115 csk->state = opcode;
4118 /* 1. If event opcode matches the expected event in csk->state
4119 * 2. If the expected event is CLOSE_COMP or RESET_COMP, we accept any
4121 * 3. If the expected event is 0, meaning the connection was never
4122 * never established, we accept the opcode from cm_abort.
4124 if (opcode == csk->state || csk->state == 0 ||
4125 csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP ||
4126 csk->state == L4_KCQE_OPCODE_VALUE_RESET_COMP) {
4127 if (!test_and_set_bit(SK_F_CLOSING, &csk->flags)) {
4128 if (csk->state == 0)
4129 csk->state = opcode;
4136 static void cnic_close_bnx2_conn(struct cnic_sock *csk, u32 opcode)
4138 struct cnic_dev *dev = csk->dev;
4139 struct cnic_local *cp = dev->cnic_priv;
4141 if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED) {
4142 cnic_cm_upcall(cp, csk, opcode);
4146 clear_bit(SK_F_CONNECT_START, &csk->flags);
4147 cnic_close_conn(csk);
4148 csk->state = opcode;
4149 cnic_cm_upcall(cp, csk, opcode);
4152 static void cnic_cm_stop_bnx2_hw(struct cnic_dev *dev)
4156 static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev)
4160 seed = prandom_u32();
4161 cnic_ctx_wr(dev, 45, 0, seed);
4165 static void cnic_close_bnx2x_conn(struct cnic_sock *csk, u32 opcode)
4167 struct cnic_dev *dev = csk->dev;
4168 struct cnic_local *cp = dev->cnic_priv;
4169 struct cnic_context *ctx = &cp->ctx_tbl[csk->l5_cid];
4170 union l5cm_specific_data l5_data;
4172 int close_complete = 0;
4175 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
4176 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
4177 case L4_KCQE_OPCODE_VALUE_RESET_COMP:
4178 if (cnic_ready_to_close(csk, opcode)) {
4179 if (test_bit(SK_F_HW_ERR, &csk->flags))
4181 else if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
4182 cmd = L5CM_RAMROD_CMD_ID_SEARCHER_DELETE;
4187 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
4188 cmd = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD;
4190 case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
4195 memset(&l5_data, 0, sizeof(l5_data));
4197 cnic_submit_kwqe_16(dev, cmd, csk->cid, ISCSI_CONNECTION_TYPE,
4199 } else if (close_complete) {
4200 ctx->timestamp = jiffies;
4201 cnic_close_conn(csk);
4202 cnic_cm_upcall(cp, csk, csk->state);
4206 static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev)
4208 struct cnic_local *cp = dev->cnic_priv;
4213 if (!netif_running(dev->netdev))
4216 cnic_bnx2x_delete_wait(dev, 0);
4218 cancel_delayed_work(&cp->delete_task);
4219 flush_workqueue(cnic_wq);
4221 if (atomic_read(&cp->iscsi_conn) != 0)
4222 netdev_warn(dev->netdev, "%d iSCSI connections not destroyed\n",
4223 atomic_read(&cp->iscsi_conn));
4226 static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev)
4228 struct bnx2x *bp = netdev_priv(dev->netdev);
4229 u32 pfid = bp->pfid;
4230 u32 port = BP_PORT(bp);
4232 cnic_init_bnx2x_mac(dev);
4233 cnic_bnx2x_set_tcp_options(dev, 0, 1);
4235 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
4236 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfid), 0);
4238 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
4239 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(port), 1);
4240 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
4241 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(port),
4244 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
4245 XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfid), DEF_TTL);
4246 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
4247 XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfid), DEF_TOS);
4248 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
4249 XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfid), 2);
4250 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
4251 XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfid), DEF_SWS_TIMER);
4253 CNIC_WR(dev, BAR_TSTRORM_INTMEM + TSTORM_TCP_MAX_CWND_OFFSET(pfid),
4258 static void cnic_delete_task(struct work_struct *work)
4260 struct cnic_local *cp;
4261 struct cnic_dev *dev;
4263 int need_resched = 0;
4265 cp = container_of(work, struct cnic_local, delete_task.work);
4268 if (test_and_clear_bit(CNIC_LCL_FL_STOP_ISCSI, &cp->cnic_local_flags)) {
4269 struct drv_ctl_info info;
4271 cnic_ulp_stop_one(cp, CNIC_ULP_ISCSI);
4273 info.cmd = DRV_CTL_ISCSI_STOPPED_CMD;
4274 cp->ethdev->drv_ctl(dev->netdev, &info);
4277 for (i = 0; i < cp->max_cid_space; i++) {
4278 struct cnic_context *ctx = &cp->ctx_tbl[i];
4281 if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags) ||
4282 !test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
4285 if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) {
4290 if (!test_and_clear_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
4293 err = cnic_bnx2x_destroy_ramrod(dev, i);
4295 cnic_free_bnx2x_conn_resc(dev, i);
4297 if (ctx->ulp_proto_id == CNIC_ULP_ISCSI)
4298 atomic_dec(&cp->iscsi_conn);
4300 clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
4305 queue_delayed_work(cnic_wq, &cp->delete_task,
4306 msecs_to_jiffies(10));
4310 static int cnic_cm_open(struct cnic_dev *dev)
4312 struct cnic_local *cp = dev->cnic_priv;
4315 err = cnic_cm_alloc_mem(dev);
4319 err = cp->start_cm(dev);
4324 INIT_DELAYED_WORK(&cp->delete_task, cnic_delete_task);
4326 dev->cm_create = cnic_cm_create;
4327 dev->cm_destroy = cnic_cm_destroy;
4328 dev->cm_connect = cnic_cm_connect;
4329 dev->cm_abort = cnic_cm_abort;
4330 dev->cm_close = cnic_cm_close;
4331 dev->cm_select_dev = cnic_cm_select_dev;
4333 cp->ulp_handle[CNIC_ULP_L4] = dev;
4334 rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], &cm_ulp_ops);
4338 cnic_cm_free_mem(dev);
4342 static int cnic_cm_shutdown(struct cnic_dev *dev)
4344 struct cnic_local *cp = dev->cnic_priv;
4350 for (i = 0; i < MAX_CM_SK_TBL_SZ; i++) {
4351 struct cnic_sock *csk = &cp->csk_tbl[i];
4353 clear_bit(SK_F_INUSE, &csk->flags);
4354 cnic_cm_cleanup(csk);
4356 cnic_cm_free_mem(dev);
4361 static void cnic_init_context(struct cnic_dev *dev, u32 cid)
4366 cid_addr = GET_CID_ADDR(cid);
4368 for (i = 0; i < CTX_SIZE; i += 4)
4369 cnic_ctx_wr(dev, cid_addr, i, 0);
4372 static int cnic_setup_5709_context(struct cnic_dev *dev, int valid)
4374 struct cnic_local *cp = dev->cnic_priv;
4376 u32 valid_bit = valid ? BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID : 0;
4378 if (BNX2_CHIP(cp) != BNX2_CHIP_5709)
4381 for (i = 0; i < cp->ctx_blks; i++) {
4383 u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk;
4386 memset(cp->ctx_arr[i].ctx, 0, CNIC_PAGE_SIZE);
4388 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0,
4389 (cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit);
4390 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA1,
4391 (u64) cp->ctx_arr[i].mapping >> 32);
4392 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL, idx |
4393 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
4394 for (j = 0; j < 10; j++) {
4396 val = CNIC_RD(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL);
4397 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
4401 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
4409 static void cnic_free_irq(struct cnic_dev *dev)
4411 struct cnic_local *cp = dev->cnic_priv;
4412 struct cnic_eth_dev *ethdev = cp->ethdev;
4414 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4415 cp->disable_int_sync(dev);
4416 tasklet_kill(&cp->cnic_irq_task);
4417 free_irq(ethdev->irq_arr[0].vector, dev);
4421 static int cnic_request_irq(struct cnic_dev *dev)
4423 struct cnic_local *cp = dev->cnic_priv;
4424 struct cnic_eth_dev *ethdev = cp->ethdev;
4427 err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, "cnic", dev);
4429 tasklet_disable(&cp->cnic_irq_task);
4434 static int cnic_init_bnx2_irq(struct cnic_dev *dev)
4436 struct cnic_local *cp = dev->cnic_priv;
4437 struct cnic_eth_dev *ethdev = cp->ethdev;
4439 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4441 int sblk_num = cp->status_blk_num;
4442 u32 base = ((sblk_num - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4443 BNX2_HC_SB_CONFIG_1;
4445 CNIC_WR(dev, base, BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4447 CNIC_WR(dev, base + BNX2_HC_COMP_PROD_TRIP_OFF, (2 << 16) | 8);
4448 CNIC_WR(dev, base + BNX2_HC_COM_TICKS_OFF, (64 << 16) | 220);
4449 CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220);
4451 cp->last_status_idx = cp->status_blk.bnx2->status_idx;
4452 tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2_msix,
4453 (unsigned long) dev);
4454 err = cnic_request_irq(dev);
4458 while (cp->status_blk.bnx2->status_completion_producer_index &&
4460 CNIC_WR(dev, BNX2_HC_COALESCE_NOW,
4461 1 << (11 + sblk_num));
4466 if (cp->status_blk.bnx2->status_completion_producer_index) {
4472 struct status_block *sblk = cp->status_blk.gen;
4473 u32 hc_cmd = CNIC_RD(dev, BNX2_HC_COMMAND);
4476 while (sblk->status_completion_producer_index && i < 10) {
4477 CNIC_WR(dev, BNX2_HC_COMMAND,
4478 hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4483 if (sblk->status_completion_producer_index)
4490 netdev_err(dev->netdev, "KCQ index not resetting to 0\n");
4494 static void cnic_enable_bnx2_int(struct cnic_dev *dev)
4496 struct cnic_local *cp = dev->cnic_priv;
4497 struct cnic_eth_dev *ethdev = cp->ethdev;
4499 if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
4502 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
4503 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
4506 static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev)
4508 struct cnic_local *cp = dev->cnic_priv;
4509 struct cnic_eth_dev *ethdev = cp->ethdev;
4511 if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
4514 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
4515 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4516 CNIC_RD(dev, BNX2_PCICFG_INT_ACK_CMD);
4517 synchronize_irq(ethdev->irq_arr[0].vector);
4520 static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
4522 struct cnic_local *cp = dev->cnic_priv;
4523 struct cnic_eth_dev *ethdev = cp->ethdev;
4524 struct cnic_uio_dev *udev = cp->udev;
4525 u32 cid_addr, tx_cid, sb_id;
4526 u32 val, offset0, offset1, offset2, offset3;
4528 struct bnx2_tx_bd *txbd;
4529 dma_addr_t buf_map, ring_map = udev->l2_ring_map;
4530 struct status_block *s_blk = cp->status_blk.gen;
4532 sb_id = cp->status_blk_num;
4534 cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2;
4535 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4536 struct status_block_msix *sblk = cp->status_blk.bnx2;
4538 tx_cid = TX_TSS_CID + sb_id - 1;
4539 CNIC_WR(dev, BNX2_TSCH_TSS_CFG, (sb_id << 24) |
4541 cp->tx_cons_ptr = &sblk->status_tx_quick_consumer_index;
4543 cp->tx_cons = *cp->tx_cons_ptr;
4545 cid_addr = GET_CID_ADDR(tx_cid);
4546 if (BNX2_CHIP(cp) == BNX2_CHIP_5709) {
4547 u32 cid_addr2 = GET_CID_ADDR(tx_cid + 4) + 0x40;
4549 for (i = 0; i < PHY_CTX_SIZE; i += 4)
4550 cnic_ctx_wr(dev, cid_addr2, i, 0);
4552 offset0 = BNX2_L2CTX_TYPE_XI;
4553 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4554 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4555 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4557 cnic_init_context(dev, tx_cid);
4558 cnic_init_context(dev, tx_cid + 1);
4560 offset0 = BNX2_L2CTX_TYPE;
4561 offset1 = BNX2_L2CTX_CMD_TYPE;
4562 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4563 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4565 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4566 cnic_ctx_wr(dev, cid_addr, offset0, val);
4568 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4569 cnic_ctx_wr(dev, cid_addr, offset1, val);
4571 txbd = udev->l2_ring;
4573 buf_map = udev->l2_buf_map;
4574 for (i = 0; i < BNX2_MAX_TX_DESC_CNT; i++, txbd++) {
4575 txbd->tx_bd_haddr_hi = (u64) buf_map >> 32;
4576 txbd->tx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
4578 val = (u64) ring_map >> 32;
4579 cnic_ctx_wr(dev, cid_addr, offset2, val);
4580 txbd->tx_bd_haddr_hi = val;
4582 val = (u64) ring_map & 0xffffffff;
4583 cnic_ctx_wr(dev, cid_addr, offset3, val);
4584 txbd->tx_bd_haddr_lo = val;
4587 static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
4589 struct cnic_local *cp = dev->cnic_priv;
4590 struct cnic_eth_dev *ethdev = cp->ethdev;
4591 struct cnic_uio_dev *udev = cp->udev;
4592 u32 cid_addr, sb_id, val, coal_reg, coal_val;
4594 struct bnx2_rx_bd *rxbd;
4595 struct status_block *s_blk = cp->status_blk.gen;
4596 dma_addr_t ring_map = udev->l2_ring_map;
4598 sb_id = cp->status_blk_num;
4599 cnic_init_context(dev, 2);
4600 cp->rx_cons_ptr = &s_blk->status_rx_quick_consumer_index2;
4601 coal_reg = BNX2_HC_COMMAND;
4602 coal_val = CNIC_RD(dev, coal_reg);
4603 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4604 struct status_block_msix *sblk = cp->status_blk.bnx2;
4606 cp->rx_cons_ptr = &sblk->status_rx_quick_consumer_index;
4607 coal_reg = BNX2_HC_COALESCE_NOW;
4608 coal_val = 1 << (11 + sb_id);
4611 while (!(*cp->rx_cons_ptr != 0) && i < 10) {
4612 CNIC_WR(dev, coal_reg, coal_val);
4617 cp->rx_cons = *cp->rx_cons_ptr;
4619 cid_addr = GET_CID_ADDR(2);
4620 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE |
4621 BNX2_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8);
4622 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_CTX_TYPE, val);
4625 val = 2 << BNX2_L2CTX_L2_STATUSB_NUM_SHIFT;
4627 val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id);
4628 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val);
4630 rxbd = udev->l2_ring + CNIC_PAGE_SIZE;
4631 for (i = 0; i < BNX2_MAX_RX_DESC_CNT; i++, rxbd++) {
4633 int n = (i % cp->l2_rx_ring_size) + 1;
4635 buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size);
4636 rxbd->rx_bd_len = cp->l2_single_buf_size;
4637 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4638 rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32;
4639 rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
4641 val = (u64) (ring_map + CNIC_PAGE_SIZE) >> 32;
4642 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4643 rxbd->rx_bd_haddr_hi = val;
4645 val = (u64) (ring_map + CNIC_PAGE_SIZE) & 0xffffffff;
4646 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4647 rxbd->rx_bd_haddr_lo = val;
4649 val = cnic_reg_rd_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD);
4650 cnic_reg_wr_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD, val | (1 << 2));
4653 static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *dev)
4655 struct kwqe *wqes[1], l2kwqe;
4657 memset(&l2kwqe, 0, sizeof(l2kwqe));
4659 l2kwqe.kwqe_op_flag = (L2_LAYER_CODE << KWQE_LAYER_SHIFT) |
4660 (L2_KWQE_OPCODE_VALUE_FLUSH <<
4661 KWQE_OPCODE_SHIFT) | 2;
4662 dev->submit_kwqes(dev, wqes, 1);
4665 static void cnic_set_bnx2_mac(struct cnic_dev *dev)
4667 struct cnic_local *cp = dev->cnic_priv;
4670 val = cp->func << 2;
4672 cp->shmem_base = cnic_reg_rd_ind(dev, BNX2_SHM_HDR_ADDR_0 + val);
4674 val = cnic_reg_rd_ind(dev, cp->shmem_base +
4675 BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER);
4676 dev->mac_addr[0] = (u8) (val >> 8);
4677 dev->mac_addr[1] = (u8) val;
4679 CNIC_WR(dev, BNX2_EMAC_MAC_MATCH4, val);
4681 val = cnic_reg_rd_ind(dev, cp->shmem_base +
4682 BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER);
4683 dev->mac_addr[2] = (u8) (val >> 24);
4684 dev->mac_addr[3] = (u8) (val >> 16);
4685 dev->mac_addr[4] = (u8) (val >> 8);
4686 dev->mac_addr[5] = (u8) val;
4688 CNIC_WR(dev, BNX2_EMAC_MAC_MATCH5, val);
4690 val = 4 | BNX2_RPM_SORT_USER2_BC_EN;
4691 if (BNX2_CHIP(cp) != BNX2_CHIP_5709)
4692 val |= BNX2_RPM_SORT_USER2_PROM_VLAN;
4694 CNIC_WR(dev, BNX2_RPM_SORT_USER2, 0x0);
4695 CNIC_WR(dev, BNX2_RPM_SORT_USER2, val);
4696 CNIC_WR(dev, BNX2_RPM_SORT_USER2, val | BNX2_RPM_SORT_USER2_ENA);
4699 static int cnic_start_bnx2_hw(struct cnic_dev *dev)
4701 struct cnic_local *cp = dev->cnic_priv;
4702 struct cnic_eth_dev *ethdev = cp->ethdev;
4703 struct status_block *sblk = cp->status_blk.gen;
4704 u32 val, kcq_cid_addr, kwq_cid_addr;
4707 cnic_set_bnx2_mac(dev);
4709 val = CNIC_RD(dev, BNX2_MQ_CONFIG);
4710 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4711 if (CNIC_PAGE_BITS > 12)
4712 val |= (12 - 8) << 4;
4714 val |= (CNIC_PAGE_BITS - 8) << 4;
4716 CNIC_WR(dev, BNX2_MQ_CONFIG, val);
4718 CNIC_WR(dev, BNX2_HC_COMP_PROD_TRIP, (2 << 16) | 8);
4719 CNIC_WR(dev, BNX2_HC_COM_TICKS, (64 << 16) | 220);
4720 CNIC_WR(dev, BNX2_HC_CMD_TICKS, (64 << 16) | 220);
4722 err = cnic_setup_5709_context(dev, 1);
4726 cnic_init_context(dev, KWQ_CID);
4727 cnic_init_context(dev, KCQ_CID);
4729 kwq_cid_addr = GET_CID_ADDR(KWQ_CID);
4730 cp->kwq_io_addr = MB_GET_CID_ADDR(KWQ_CID) + L5_KRNLQ_HOST_QIDX;
4732 cp->max_kwq_idx = MAX_KWQ_IDX;
4733 cp->kwq_prod_idx = 0;
4734 cp->kwq_con_idx = 0;
4735 set_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
4737 if (BNX2_CHIP(cp) == BNX2_CHIP_5706 || BNX2_CHIP(cp) == BNX2_CHIP_5708)
4738 cp->kwq_con_idx_ptr = &sblk->status_rx_quick_consumer_index15;
4740 cp->kwq_con_idx_ptr = &sblk->status_cmd_consumer_index;
4742 /* Initialize the kernel work queue context. */
4743 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
4744 (CNIC_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
4745 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_TYPE, val);
4747 val = (CNIC_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16;
4748 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
4750 val = ((CNIC_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT;
4751 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
4753 val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32);
4754 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
4756 val = (u32) cp->kwq_info.pgtbl_map;
4757 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
4759 kcq_cid_addr = GET_CID_ADDR(KCQ_CID);
4760 cp->kcq1.io_addr = MB_GET_CID_ADDR(KCQ_CID) + L5_KRNLQ_HOST_QIDX;
4762 cp->kcq1.sw_prod_idx = 0;
4763 cp->kcq1.hw_prod_idx_ptr =
4764 &sblk->status_completion_producer_index;
4766 cp->kcq1.status_idx_ptr = &sblk->status_idx;
4768 /* Initialize the kernel complete queue context. */
4769 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
4770 (CNIC_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
4771 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_TYPE, val);
4773 val = (CNIC_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16;
4774 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
4776 val = ((CNIC_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT;
4777 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
4779 val = (u32) ((u64) cp->kcq1.dma.pgtbl_map >> 32);
4780 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
4782 val = (u32) cp->kcq1.dma.pgtbl_map;
4783 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
4786 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4787 struct status_block_msix *msblk = cp->status_blk.bnx2;
4788 u32 sb_id = cp->status_blk_num;
4789 u32 sb = BNX2_L2CTX_L5_STATUSB_NUM(sb_id);
4791 cp->kcq1.hw_prod_idx_ptr =
4792 &msblk->status_completion_producer_index;
4793 cp->kcq1.status_idx_ptr = &msblk->status_idx;
4794 cp->kwq_con_idx_ptr = &msblk->status_cmd_consumer_index;
4795 cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT;
4796 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
4797 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
4800 /* Enable Commnad Scheduler notification when we write to the
4801 * host producer index of the kernel contexts. */
4802 CNIC_WR(dev, BNX2_MQ_KNL_CMD_MASK1, 2);
4804 /* Enable Command Scheduler notification when we write to either
4805 * the Send Queue or Receive Queue producer indexes of the kernel
4806 * bypass contexts. */
4807 CNIC_WR(dev, BNX2_MQ_KNL_BYP_CMD_MASK1, 7);
4808 CNIC_WR(dev, BNX2_MQ_KNL_BYP_WRITE_MASK1, 7);
4810 /* Notify COM when the driver post an application buffer. */
4811 CNIC_WR(dev, BNX2_MQ_KNL_RX_V2P_MASK2, 0x2000);
4813 /* Set the CP and COM doorbells. These two processors polls the
4814 * doorbell for a non zero value before running. This must be done
4815 * after setting up the kernel queue contexts. */
4816 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 1);
4817 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 1);
4819 cnic_init_bnx2_tx_ring(dev);
4820 cnic_init_bnx2_rx_ring(dev);
4822 err = cnic_init_bnx2_irq(dev);
4824 netdev_err(dev->netdev, "cnic_init_irq failed\n");
4825 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
4826 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
4830 ethdev->drv_state |= CNIC_DRV_STATE_HANDLES_IRQ;
4835 static void cnic_setup_bnx2x_context(struct cnic_dev *dev)
4837 struct cnic_local *cp = dev->cnic_priv;
4838 struct cnic_eth_dev *ethdev = cp->ethdev;
4839 u32 start_offset = ethdev->ctx_tbl_offset;
4842 for (i = 0; i < cp->ctx_blks; i++) {
4843 struct cnic_ctx *ctx = &cp->ctx_arr[i];
4844 dma_addr_t map = ctx->mapping;
4846 if (cp->ctx_align) {
4847 unsigned long mask = cp->ctx_align - 1;
4849 map = (map + mask) & ~mask;
4852 cnic_ctx_tbl_wr(dev, start_offset + i, map);
4856 static int cnic_init_bnx2x_irq(struct cnic_dev *dev)
4858 struct cnic_local *cp = dev->cnic_priv;
4859 struct cnic_eth_dev *ethdev = cp->ethdev;
4862 tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2x_bh,
4863 (unsigned long) dev);
4864 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
4865 err = cnic_request_irq(dev);
4870 static inline void cnic_storm_memset_hc_disable(struct cnic_dev *dev,
4871 u16 sb_id, u8 sb_index,
4874 struct bnx2x *bp = netdev_priv(dev->netdev);
4876 u32 addr = BAR_CSTRORM_INTMEM +
4877 CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
4878 offsetof(struct hc_status_block_data_e1x, index_data) +
4879 sizeof(struct hc_index_data)*sb_index +
4880 offsetof(struct hc_index_data, flags);
4881 u16 flags = CNIC_RD16(dev, addr);
4883 flags &= ~HC_INDEX_DATA_HC_ENABLED;
4884 flags |= (((~disable) << HC_INDEX_DATA_HC_ENABLED_SHIFT) &
4885 HC_INDEX_DATA_HC_ENABLED);
4886 CNIC_WR16(dev, addr, flags);
4889 static void cnic_enable_bnx2x_int(struct cnic_dev *dev)
4891 struct cnic_local *cp = dev->cnic_priv;
4892 struct bnx2x *bp = netdev_priv(dev->netdev);
4893 u8 sb_id = cp->status_blk_num;
4895 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
4896 CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
4897 offsetof(struct hc_status_block_data_e1x, index_data) +
4898 sizeof(struct hc_index_data)*HC_INDEX_ISCSI_EQ_CONS +
4899 offsetof(struct hc_index_data, timeout), 64 / 4);
4900 cnic_storm_memset_hc_disable(dev, sb_id, HC_INDEX_ISCSI_EQ_CONS, 0);
4903 static void cnic_disable_bnx2x_int_sync(struct cnic_dev *dev)
4907 static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
4908 struct client_init_ramrod_data *data)
4910 struct cnic_local *cp = dev->cnic_priv;
4911 struct bnx2x *bp = netdev_priv(dev->netdev);
4912 struct cnic_uio_dev *udev = cp->udev;
4913 union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) udev->l2_ring;
4914 dma_addr_t buf_map, ring_map = udev->l2_ring_map;
4915 struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
4917 u32 cli = cp->ethdev->iscsi_l2_client_id;
4920 memset(txbd, 0, CNIC_PAGE_SIZE);
4922 buf_map = udev->l2_buf_map;
4923 for (i = 0; i < BNX2_MAX_TX_DESC_CNT; i += 3, txbd += 3) {
4924 struct eth_tx_start_bd *start_bd = &txbd->start_bd;
4925 struct eth_tx_parse_bd_e1x *pbd_e1x =
4926 &((txbd + 1)->parse_bd_e1x);
4927 struct eth_tx_parse_bd_e2 *pbd_e2 = &((txbd + 1)->parse_bd_e2);
4928 struct eth_tx_bd *reg_bd = &((txbd + 2)->reg_bd);
4930 start_bd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
4931 start_bd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
4932 reg_bd->addr_hi = start_bd->addr_hi;
4933 reg_bd->addr_lo = start_bd->addr_lo + 0x10;
4934 start_bd->nbytes = cpu_to_le16(0x10);
4935 start_bd->nbd = cpu_to_le16(3);
4936 start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
4937 start_bd->general_data &= ~ETH_TX_START_BD_PARSE_NBDS;
4938 start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
4940 if (BNX2X_CHIP_IS_E2_PLUS(bp))
4941 pbd_e2->parsing_data = (UNICAST_ADDRESS <<
4942 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE_SHIFT);
4944 pbd_e1x->global_data = (UNICAST_ADDRESS <<
4945 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE_SHIFT);
4948 val = (u64) ring_map >> 32;
4949 txbd->next_bd.addr_hi = cpu_to_le32(val);
4951 data->tx.tx_bd_page_base.hi = cpu_to_le32(val);
4953 val = (u64) ring_map & 0xffffffff;
4954 txbd->next_bd.addr_lo = cpu_to_le32(val);
4956 data->tx.tx_bd_page_base.lo = cpu_to_le32(val);
4958 /* Other ramrod params */
4959 data->tx.tx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_CQ_CONS;
4960 data->tx.tx_status_block_id = BNX2X_DEF_SB_ID;
4962 /* reset xstorm per client statistics */
4963 if (cli < MAX_STAT_COUNTER_ID) {
4964 data->general.statistics_zero_flg = 1;
4965 data->general.statistics_en_flg = 1;
4966 data->general.statistics_counter_id = cli;
4970 &sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_CQ_CONS];
4973 static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
4974 struct client_init_ramrod_data *data)
4976 struct cnic_local *cp = dev->cnic_priv;
4977 struct bnx2x *bp = netdev_priv(dev->netdev);
4978 struct cnic_uio_dev *udev = cp->udev;
4979 struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring +
4981 struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *)
4982 (udev->l2_ring + (2 * CNIC_PAGE_SIZE));
4983 struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
4985 u32 cli = cp->ethdev->iscsi_l2_client_id;
4986 int cl_qzone_id = BNX2X_CL_QZONE_ID(bp, cli);
4988 dma_addr_t ring_map = udev->l2_ring_map;
4991 data->general.client_id = cli;
4992 data->general.activate_flg = 1;
4993 data->general.sp_client_id = cli;
4994 data->general.mtu = cpu_to_le16(cp->l2_single_buf_size - 14);
4995 data->general.func_id = bp->pfid;
4997 for (i = 0; i < BNX2X_MAX_RX_DESC_CNT; i++, rxbd++) {
4999 int n = (i % cp->l2_rx_ring_size) + 1;
5001 buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size);
5002 rxbd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
5003 rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
5006 val = (u64) (ring_map + CNIC_PAGE_SIZE) >> 32;
5007 rxbd->addr_hi = cpu_to_le32(val);
5008 data->rx.bd_page_base.hi = cpu_to_le32(val);
5010 val = (u64) (ring_map + CNIC_PAGE_SIZE) & 0xffffffff;
5011 rxbd->addr_lo = cpu_to_le32(val);
5012 data->rx.bd_page_base.lo = cpu_to_le32(val);
5014 rxcqe += BNX2X_MAX_RCQ_DESC_CNT;
5015 val = (u64) (ring_map + (2 * CNIC_PAGE_SIZE)) >> 32;
5016 rxcqe->addr_hi = cpu_to_le32(val);
5017 data->rx.cqe_page_base.hi = cpu_to_le32(val);
5019 val = (u64) (ring_map + (2 * CNIC_PAGE_SIZE)) & 0xffffffff;
5020 rxcqe->addr_lo = cpu_to_le32(val);
5021 data->rx.cqe_page_base.lo = cpu_to_le32(val);
5023 /* Other ramrod params */
5024 data->rx.client_qzone_id = cl_qzone_id;
5025 data->rx.rx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS;
5026 data->rx.status_block_id = BNX2X_DEF_SB_ID;
5028 data->rx.cache_line_alignment_log_size = L1_CACHE_SHIFT;
5030 data->rx.max_bytes_on_bd = cpu_to_le16(cp->l2_single_buf_size);
5031 data->rx.outer_vlan_removal_enable_flg = 1;
5032 data->rx.silent_vlan_removal_flg = 1;
5033 data->rx.silent_vlan_value = 0;
5034 data->rx.silent_vlan_mask = 0xffff;
5037 &sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS];
5038 cp->rx_cons = *cp->rx_cons_ptr;
5041 static void cnic_init_bnx2x_kcq(struct cnic_dev *dev)
5043 struct cnic_local *cp = dev->cnic_priv;
5044 struct bnx2x *bp = netdev_priv(dev->netdev);
5045 u32 pfid = bp->pfid;
5047 cp->kcq1.io_addr = BAR_CSTRORM_INTMEM +
5048 CSTORM_ISCSI_EQ_PROD_OFFSET(pfid, 0);
5049 cp->kcq1.sw_prod_idx = 0;
5051 if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
5052 struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
5054 cp->kcq1.hw_prod_idx_ptr =
5055 &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
5056 cp->kcq1.status_idx_ptr =
5057 &sb->sb.running_index[SM_RX_ID];
5059 struct host_hc_status_block_e1x *sb = cp->status_blk.gen;
5061 cp->kcq1.hw_prod_idx_ptr =
5062 &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
5063 cp->kcq1.status_idx_ptr =
5064 &sb->sb.running_index[SM_RX_ID];
5067 if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
5068 struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
5070 cp->kcq2.io_addr = BAR_USTRORM_INTMEM +
5071 USTORM_FCOE_EQ_PROD_OFFSET(pfid);
5072 cp->kcq2.sw_prod_idx = 0;
5073 cp->kcq2.hw_prod_idx_ptr =
5074 &sb->sb.index_values[HC_INDEX_FCOE_EQ_CONS];
5075 cp->kcq2.status_idx_ptr =
5076 &sb->sb.running_index[SM_RX_ID];
5080 static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
5082 struct cnic_local *cp = dev->cnic_priv;
5083 struct bnx2x *bp = netdev_priv(dev->netdev);
5084 struct cnic_eth_dev *ethdev = cp->ethdev;
5088 dev->stats_addr = ethdev->addr_drv_info_to_mcp;
5089 cp->func = bp->pf_num;
5091 func = CNIC_FUNC(cp);
5094 ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ,
5095 cp->iscsi_start_cid, 0);
5100 if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
5101 ret = cnic_init_id_tbl(&cp->fcoe_cid_tbl, dev->max_fcoe_conn,
5102 cp->fcoe_start_cid, 0);
5108 cp->bnx2x_igu_sb_id = ethdev->irq_arr[0].status_blk_num2;
5110 cnic_init_bnx2x_kcq(dev);
5113 CNIC_WR16(dev, cp->kcq1.io_addr, MAX_KCQ_IDX);
5114 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5115 CSTORM_ISCSI_EQ_CONS_OFFSET(pfid, 0), 0);
5116 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5117 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0),
5118 cp->kcq1.dma.pg_map_arr[1] & 0xffffffff);
5119 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5120 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0) + 4,
5121 (u64) cp->kcq1.dma.pg_map_arr[1] >> 32);
5122 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5123 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0),
5124 cp->kcq1.dma.pg_map_arr[0] & 0xffffffff);
5125 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5126 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0) + 4,
5127 (u64) cp->kcq1.dma.pg_map_arr[0] >> 32);
5128 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
5129 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfid, 0), 1);
5130 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
5131 CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfid, 0), cp->status_blk_num);
5132 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
5133 CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfid, 0),
5134 HC_INDEX_ISCSI_EQ_CONS);
5136 CNIC_WR(dev, BAR_USTRORM_INTMEM +
5137 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid),
5138 cp->gbl_buf_info.pg_map_arr[0] & 0xffffffff);
5139 CNIC_WR(dev, BAR_USTRORM_INTMEM +
5140 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid) + 4,
5141 (u64) cp->gbl_buf_info.pg_map_arr[0] >> 32);
5143 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
5144 TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfid), DEF_RCV_BUF);
5146 cnic_setup_bnx2x_context(dev);
5148 ret = cnic_init_bnx2x_irq(dev);
5152 ethdev->drv_state |= CNIC_DRV_STATE_HANDLES_IRQ;
5156 static void cnic_init_rings(struct cnic_dev *dev)
5158 struct cnic_local *cp = dev->cnic_priv;
5159 struct bnx2x *bp = netdev_priv(dev->netdev);
5160 struct cnic_uio_dev *udev = cp->udev;
5162 if (test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
5165 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
5166 cnic_init_bnx2_tx_ring(dev);
5167 cnic_init_bnx2_rx_ring(dev);
5168 set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
5169 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
5170 u32 cli = cp->ethdev->iscsi_l2_client_id;
5171 u32 cid = cp->ethdev->iscsi_l2_cid;
5173 struct client_init_ramrod_data *data;
5174 union l5cm_specific_data l5_data;
5175 struct ustorm_eth_rx_producers rx_prods = {0};
5176 u32 off, i, *cid_ptr;
5178 rx_prods.bd_prod = 0;
5179 rx_prods.cqe_prod = BNX2X_MAX_RCQ_DESC_CNT;
5182 cl_qzone_id = BNX2X_CL_QZONE_ID(bp, cli);
5184 off = BAR_USTRORM_INTMEM +
5185 (BNX2X_CHIP_IS_E2_PLUS(bp) ?
5186 USTORM_RX_PRODS_E2_OFFSET(cl_qzone_id) :
5187 USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), cli));
5189 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++)
5190 CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]);
5192 set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
5194 data = udev->l2_buf;
5195 cid_ptr = udev->l2_buf + 12;
5197 memset(data, 0, sizeof(*data));
5199 cnic_init_bnx2x_tx_ring(dev, data);
5200 cnic_init_bnx2x_rx_ring(dev, data);
5202 l5_data.phy_address.lo = udev->l2_buf_map & 0xffffffff;
5203 l5_data.phy_address.hi = (u64) udev->l2_buf_map >> 32;
5205 set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
5207 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CLIENT_SETUP,
5208 cid, ETH_CONNECTION_TYPE, &l5_data);
5211 while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
5215 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
5216 netdev_err(dev->netdev,
5217 "iSCSI CLIENT_SETUP did not complete\n");
5218 cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
5219 cnic_ring_ctl(dev, cid, cli, 1);
5220 *cid_ptr = cid >> 4;
5221 *(cid_ptr + 1) = cid * bp->db_size;
5222 *(cid_ptr + 2) = UIO_USE_TX_DOORBELL;
5226 static void cnic_shutdown_rings(struct cnic_dev *dev)
5228 struct cnic_local *cp = dev->cnic_priv;
5229 struct cnic_uio_dev *udev = cp->udev;
5232 if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
5235 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
5236 cnic_shutdown_bnx2_rx_ring(dev);
5237 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
5238 u32 cli = cp->ethdev->iscsi_l2_client_id;
5239 u32 cid = cp->ethdev->iscsi_l2_cid;
5240 union l5cm_specific_data l5_data;
5243 cnic_ring_ctl(dev, cid, cli, 0);
5245 set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
5247 l5_data.phy_address.lo = cli;
5248 l5_data.phy_address.hi = 0;
5249 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_HALT,
5250 cid, ETH_CONNECTION_TYPE, &l5_data);
5252 while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
5256 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
5257 netdev_err(dev->netdev,
5258 "iSCSI CLIENT_HALT did not complete\n");
5259 cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
5261 memset(&l5_data, 0, sizeof(l5_data));
5262 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
5263 cid, NONE_CONNECTION_TYPE, &l5_data);
5266 clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
5267 rx_ring = udev->l2_ring + CNIC_PAGE_SIZE;
5268 memset(rx_ring, 0, CNIC_PAGE_SIZE);
5271 static int cnic_register_netdev(struct cnic_dev *dev)
5273 struct cnic_local *cp = dev->cnic_priv;
5274 struct cnic_eth_dev *ethdev = cp->ethdev;
5280 if (ethdev->drv_state & CNIC_DRV_STATE_REGD)
5283 err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev);
5285 netdev_err(dev->netdev, "register_cnic failed\n");
5287 /* Read iSCSI config again. On some bnx2x device, iSCSI config
5288 * can change after firmware is downloaded.
5290 dev->max_iscsi_conn = ethdev->max_iscsi_conn;
5291 if (ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)
5292 dev->max_iscsi_conn = 0;
5297 static void cnic_unregister_netdev(struct cnic_dev *dev)
5299 struct cnic_local *cp = dev->cnic_priv;
5300 struct cnic_eth_dev *ethdev = cp->ethdev;
5305 ethdev->drv_unregister_cnic(dev->netdev);
5308 static int cnic_start_hw(struct cnic_dev *dev)
5310 struct cnic_local *cp = dev->cnic_priv;
5311 struct cnic_eth_dev *ethdev = cp->ethdev;
5314 if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
5317 dev->regview = ethdev->io_base;
5318 pci_dev_get(dev->pcidev);
5319 cp->func = PCI_FUNC(dev->pcidev->devfn);
5320 cp->status_blk.gen = ethdev->irq_arr[0].status_blk;
5321 cp->status_blk_num = ethdev->irq_arr[0].status_blk_num;
5323 err = cp->alloc_resc(dev);
5325 netdev_err(dev->netdev, "allocate resource failure\n");
5329 err = cp->start_hw(dev);
5333 err = cnic_cm_open(dev);
5337 set_bit(CNIC_F_CNIC_UP, &dev->flags);
5339 cp->enable_int(dev);
5345 pci_dev_put(dev->pcidev);
5349 static void cnic_stop_bnx2_hw(struct cnic_dev *dev)
5351 cnic_disable_bnx2_int_sync(dev);
5353 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
5354 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
5356 cnic_init_context(dev, KWQ_CID);
5357 cnic_init_context(dev, KCQ_CID);
5359 cnic_setup_5709_context(dev, 0);
5362 cnic_free_resc(dev);
5366 static void cnic_stop_bnx2x_hw(struct cnic_dev *dev)
5368 struct cnic_local *cp = dev->cnic_priv;
5369 struct bnx2x *bp = netdev_priv(dev->netdev);
5370 u32 hc_index = HC_INDEX_ISCSI_EQ_CONS;
5371 u32 sb_id = cp->status_blk_num;
5372 u32 idx_off, syn_off;
5376 if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
5377 idx_off = offsetof(struct hc_status_block_e2, index_values) +
5378 (hc_index * sizeof(u16));
5380 syn_off = CSTORM_HC_SYNC_LINE_INDEX_E2_OFFSET(hc_index, sb_id);
5382 idx_off = offsetof(struct hc_status_block_e1x, index_values) +
5383 (hc_index * sizeof(u16));
5385 syn_off = CSTORM_HC_SYNC_LINE_INDEX_E1X_OFFSET(hc_index, sb_id);
5387 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + syn_off, 0);
5388 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_OFFSET(sb_id) +
5391 *cp->kcq1.hw_prod_idx_ptr = 0;
5392 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5393 CSTORM_ISCSI_EQ_CONS_OFFSET(bp->pfid, 0), 0);
5394 CNIC_WR16(dev, cp->kcq1.io_addr, 0);
5395 cnic_free_resc(dev);
5398 static void cnic_stop_hw(struct cnic_dev *dev)
5400 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
5401 struct cnic_local *cp = dev->cnic_priv;
5404 /* Need to wait for the ring shutdown event to complete
5405 * before clearing the CNIC_UP flag.
5407 while (cp->udev && cp->udev->uio_dev != -1 && i < 15) {
5411 cnic_shutdown_rings(dev);
5413 cp->ethdev->drv_state &= ~CNIC_DRV_STATE_HANDLES_IRQ;
5414 clear_bit(CNIC_F_CNIC_UP, &dev->flags);
5415 RCU_INIT_POINTER(cp->ulp_ops[CNIC_ULP_L4], NULL);
5417 cnic_cm_shutdown(dev);
5419 pci_dev_put(dev->pcidev);
5423 static void cnic_free_dev(struct cnic_dev *dev)
5427 while ((atomic_read(&dev->ref_count) != 0) && i < 10) {
5431 if (atomic_read(&dev->ref_count) != 0)
5432 netdev_err(dev->netdev, "Failed waiting for ref count to go to zero\n");
5434 netdev_info(dev->netdev, "Removed CNIC device\n");
5435 dev_put(dev->netdev);
5439 static struct cnic_dev *cnic_alloc_dev(struct net_device *dev,
5440 struct pci_dev *pdev)
5442 struct cnic_dev *cdev;
5443 struct cnic_local *cp;
5446 alloc_size = sizeof(struct cnic_dev) + sizeof(struct cnic_local);
5448 cdev = kzalloc(alloc_size, GFP_KERNEL);
5453 cdev->cnic_priv = (char *)cdev + sizeof(struct cnic_dev);
5454 cdev->register_device = cnic_register_device;
5455 cdev->unregister_device = cnic_unregister_device;
5456 cdev->iscsi_nl_msg_recv = cnic_iscsi_nl_msg_recv;
5458 cp = cdev->cnic_priv;
5460 cp->l2_single_buf_size = 0x400;
5461 cp->l2_rx_ring_size = 3;
5463 spin_lock_init(&cp->cnic_ulp_lock);
5465 netdev_info(dev, "Added CNIC device\n");
5470 static struct cnic_dev *init_bnx2_cnic(struct net_device *dev)
5472 struct pci_dev *pdev;
5473 struct cnic_dev *cdev;
5474 struct cnic_local *cp;
5475 struct bnx2 *bp = netdev_priv(dev);
5476 struct cnic_eth_dev *ethdev = NULL;
5479 ethdev = (bp->cnic_probe)(dev);
5484 pdev = ethdev->pdev;
5490 if ((pdev->device == PCI_DEVICE_ID_NX2_5709 ||
5491 pdev->device == PCI_DEVICE_ID_NX2_5709S) &&
5492 (pdev->revision < 0x10)) {
5498 cdev = cnic_alloc_dev(dev, pdev);
5502 set_bit(CNIC_F_BNX2_CLASS, &cdev->flags);
5503 cdev->submit_kwqes = cnic_submit_bnx2_kwqes;
5505 cp = cdev->cnic_priv;
5506 cp->ethdev = ethdev;
5507 cdev->pcidev = pdev;
5508 cp->chip_id = ethdev->chip_id;
5510 cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
5512 cp->cnic_ops = &cnic_bnx2_ops;
5513 cp->start_hw = cnic_start_bnx2_hw;
5514 cp->stop_hw = cnic_stop_bnx2_hw;
5515 cp->setup_pgtbl = cnic_setup_page_tbl;
5516 cp->alloc_resc = cnic_alloc_bnx2_resc;
5517 cp->free_resc = cnic_free_resc;
5518 cp->start_cm = cnic_cm_init_bnx2_hw;
5519 cp->stop_cm = cnic_cm_stop_bnx2_hw;
5520 cp->enable_int = cnic_enable_bnx2_int;
5521 cp->disable_int_sync = cnic_disable_bnx2_int_sync;
5522 cp->close_conn = cnic_close_bnx2_conn;
5530 static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
5532 struct pci_dev *pdev;
5533 struct cnic_dev *cdev;
5534 struct cnic_local *cp;
5535 struct bnx2x *bp = netdev_priv(dev);
5536 struct cnic_eth_dev *ethdev = NULL;
5539 ethdev = bp->cnic_probe(dev);
5544 pdev = ethdev->pdev;
5549 cdev = cnic_alloc_dev(dev, pdev);
5555 set_bit(CNIC_F_BNX2X_CLASS, &cdev->flags);
5556 cdev->submit_kwqes = cnic_submit_bnx2x_kwqes;
5558 cp = cdev->cnic_priv;
5559 cp->ethdev = ethdev;
5560 cdev->pcidev = pdev;
5561 cp->chip_id = ethdev->chip_id;
5563 cdev->stats_addr = ethdev->addr_drv_info_to_mcp;
5565 if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI))
5566 cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
5567 if (CNIC_SUPPORTS_FCOE(bp)) {
5568 cdev->max_fcoe_conn = ethdev->max_fcoe_conn;
5569 cdev->max_fcoe_exchanges = ethdev->max_fcoe_exchanges;
5572 if (cdev->max_fcoe_conn > BNX2X_FCOE_NUM_CONNECTIONS)
5573 cdev->max_fcoe_conn = BNX2X_FCOE_NUM_CONNECTIONS;
5575 memcpy(cdev->mac_addr, ethdev->iscsi_mac, ETH_ALEN);
5577 cp->cnic_ops = &cnic_bnx2x_ops;
5578 cp->start_hw = cnic_start_bnx2x_hw;
5579 cp->stop_hw = cnic_stop_bnx2x_hw;
5580 cp->setup_pgtbl = cnic_setup_page_tbl_le;
5581 cp->alloc_resc = cnic_alloc_bnx2x_resc;
5582 cp->free_resc = cnic_free_resc;
5583 cp->start_cm = cnic_cm_init_bnx2x_hw;
5584 cp->stop_cm = cnic_cm_stop_bnx2x_hw;
5585 cp->enable_int = cnic_enable_bnx2x_int;
5586 cp->disable_int_sync = cnic_disable_bnx2x_int_sync;
5587 if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
5588 cp->ack_int = cnic_ack_bnx2x_e2_msix;
5589 cp->arm_int = cnic_arm_bnx2x_e2_msix;
5591 cp->ack_int = cnic_ack_bnx2x_msix;
5592 cp->arm_int = cnic_arm_bnx2x_msix;
5594 cp->close_conn = cnic_close_bnx2x_conn;
5598 static struct cnic_dev *is_cnic_dev(struct net_device *dev)
5600 struct ethtool_drvinfo drvinfo;
5601 struct cnic_dev *cdev = NULL;
5603 if (dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) {
5604 memset(&drvinfo, 0, sizeof(drvinfo));
5605 dev->ethtool_ops->get_drvinfo(dev, &drvinfo);
5607 if (!strcmp(drvinfo.driver, "bnx2"))
5608 cdev = init_bnx2_cnic(dev);
5609 if (!strcmp(drvinfo.driver, "bnx2x"))
5610 cdev = init_bnx2x_cnic(dev);
5612 write_lock(&cnic_dev_lock);
5613 list_add(&cdev->list, &cnic_dev_list);
5614 write_unlock(&cnic_dev_lock);
5620 static void cnic_rcv_netevent(struct cnic_local *cp, unsigned long event,
5625 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
5626 struct cnic_ulp_ops *ulp_ops;
5629 mutex_lock(&cnic_lock);
5630 ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
5631 lockdep_is_held(&cnic_lock));
5632 if (!ulp_ops || !ulp_ops->indicate_netevent) {
5633 mutex_unlock(&cnic_lock);
5637 ctx = cp->ulp_handle[if_type];
5639 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
5640 mutex_unlock(&cnic_lock);
5642 ulp_ops->indicate_netevent(ctx, event, vlan_id);
5644 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
5648 /* netdev event handler */
5649 static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
5652 struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
5653 struct cnic_dev *dev;
5656 dev = cnic_from_netdev(netdev);
5658 if (!dev && event == NETDEV_REGISTER) {
5659 /* Check for the hot-plug device */
5660 dev = is_cnic_dev(netdev);
5667 struct cnic_local *cp = dev->cnic_priv;
5671 else if (event == NETDEV_UNREGISTER)
5674 if (event == NETDEV_UP) {
5675 if (cnic_register_netdev(dev) != 0) {
5679 if (!cnic_start_hw(dev))
5680 cnic_ulp_start(dev);
5683 cnic_rcv_netevent(cp, event, 0);
5685 if (event == NETDEV_GOING_DOWN) {
5688 cnic_unregister_netdev(dev);
5689 } else if (event == NETDEV_UNREGISTER) {
5690 write_lock(&cnic_dev_lock);
5691 list_del_init(&dev->list);
5692 write_unlock(&cnic_dev_lock);
5700 struct net_device *realdev;
5703 vid = cnic_get_vlan(netdev, &realdev);
5705 dev = cnic_from_netdev(realdev);
5707 vid |= VLAN_TAG_PRESENT;
5708 cnic_rcv_netevent(dev->cnic_priv, event, vid);
5717 static struct notifier_block cnic_netdev_notifier = {
5718 .notifier_call = cnic_netdev_event
5721 static void cnic_release(void)
5723 struct cnic_uio_dev *udev;
5725 while (!list_empty(&cnic_udev_list)) {
5726 udev = list_entry(cnic_udev_list.next, struct cnic_uio_dev,
5728 cnic_free_uio(udev);
5732 static int __init cnic_init(void)
5736 pr_info("%s", version);
5738 rc = register_netdevice_notifier(&cnic_netdev_notifier);
5744 cnic_wq = create_singlethread_workqueue("cnic_wq");
5747 unregister_netdevice_notifier(&cnic_netdev_notifier);
5754 static void __exit cnic_exit(void)
5756 unregister_netdevice_notifier(&cnic_netdev_notifier);
5758 destroy_workqueue(cnic_wq);
5761 module_init(cnic_init);
5762 module_exit(cnic_exit);