1 /* cnic.c: Broadcom CNIC core network driver.
3 * Copyright (c) 2006-2012 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com)
10 * Modified and maintained by: Michael Chan <mchan@broadcom.com>
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/module.h>
17 #include <linux/kernel.h>
18 #include <linux/errno.h>
19 #include <linux/list.h>
20 #include <linux/slab.h>
21 #include <linux/pci.h>
22 #include <linux/init.h>
23 #include <linux/netdevice.h>
24 #include <linux/uio_driver.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/delay.h>
28 #include <linux/ethtool.h>
29 #include <linux/if_vlan.h>
30 #include <linux/prefetch.h>
31 #include <linux/random.h>
32 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
37 #include <net/route.h>
39 #include <net/ip6_route.h>
40 #include <net/ip6_checksum.h>
41 #include <scsi/iscsi_if.h>
45 #include "bnx2x/bnx2x_reg.h"
46 #include "bnx2x/bnx2x_fw_defs.h"
47 #include "bnx2x/bnx2x_hsi.h"
48 #include "../../../scsi/bnx2i/57xx_iscsi_constants.h"
49 #include "../../../scsi/bnx2i/57xx_iscsi_hsi.h"
50 #include "../../../scsi/bnx2fc/bnx2fc_constants.h"
52 #include "cnic_defs.h"
54 #define DRV_MODULE_NAME "cnic"
56 static char version[] __devinitdata =
57 "Broadcom NetXtreme II CNIC Driver " DRV_MODULE_NAME " v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n";
59 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) "
60 "Chen (zongxi@broadcom.com");
61 MODULE_DESCRIPTION("Broadcom NetXtreme II CNIC Driver");
62 MODULE_LICENSE("GPL");
63 MODULE_VERSION(CNIC_MODULE_VERSION);
65 /* cnic_dev_list modifications are protected by both rtnl and cnic_dev_lock */
66 static LIST_HEAD(cnic_dev_list);
67 static LIST_HEAD(cnic_udev_list);
68 static DEFINE_RWLOCK(cnic_dev_lock);
69 static DEFINE_MUTEX(cnic_lock);
71 static struct cnic_ulp_ops __rcu *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE];
73 /* helper function, assuming cnic_lock is held */
74 static inline struct cnic_ulp_ops *cnic_ulp_tbl_prot(int type)
76 return rcu_dereference_protected(cnic_ulp_tbl[type],
77 lockdep_is_held(&cnic_lock));
80 static int cnic_service_bnx2(void *, void *);
81 static int cnic_service_bnx2x(void *, void *);
82 static int cnic_ctl(void *, struct cnic_ctl_info *);
84 static struct cnic_ops cnic_bnx2_ops = {
85 .cnic_owner = THIS_MODULE,
86 .cnic_handler = cnic_service_bnx2,
90 static struct cnic_ops cnic_bnx2x_ops = {
91 .cnic_owner = THIS_MODULE,
92 .cnic_handler = cnic_service_bnx2x,
96 static struct workqueue_struct *cnic_wq;
98 static void cnic_shutdown_rings(struct cnic_dev *);
99 static void cnic_init_rings(struct cnic_dev *);
100 static int cnic_cm_set_pg(struct cnic_sock *);
102 static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode)
104 struct cnic_uio_dev *udev = uinfo->priv;
105 struct cnic_dev *dev;
107 if (!capable(CAP_NET_ADMIN))
110 if (udev->uio_dev != -1)
116 if (!dev || !test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
121 udev->uio_dev = iminor(inode);
123 cnic_shutdown_rings(dev);
124 cnic_init_rings(dev);
130 static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode)
132 struct cnic_uio_dev *udev = uinfo->priv;
138 static inline void cnic_hold(struct cnic_dev *dev)
140 atomic_inc(&dev->ref_count);
143 static inline void cnic_put(struct cnic_dev *dev)
145 atomic_dec(&dev->ref_count);
148 static inline void csk_hold(struct cnic_sock *csk)
150 atomic_inc(&csk->ref_count);
153 static inline void csk_put(struct cnic_sock *csk)
155 atomic_dec(&csk->ref_count);
158 static struct cnic_dev *cnic_from_netdev(struct net_device *netdev)
160 struct cnic_dev *cdev;
162 read_lock(&cnic_dev_lock);
163 list_for_each_entry(cdev, &cnic_dev_list, list) {
164 if (netdev == cdev->netdev) {
166 read_unlock(&cnic_dev_lock);
170 read_unlock(&cnic_dev_lock);
174 static inline void ulp_get(struct cnic_ulp_ops *ulp_ops)
176 atomic_inc(&ulp_ops->ref_count);
179 static inline void ulp_put(struct cnic_ulp_ops *ulp_ops)
181 atomic_dec(&ulp_ops->ref_count);
184 static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val)
186 struct cnic_local *cp = dev->cnic_priv;
187 struct cnic_eth_dev *ethdev = cp->ethdev;
188 struct drv_ctl_info info;
189 struct drv_ctl_io *io = &info.data.io;
191 info.cmd = DRV_CTL_CTX_WR_CMD;
192 io->cid_addr = cid_addr;
195 ethdev->drv_ctl(dev->netdev, &info);
198 static void cnic_ctx_tbl_wr(struct cnic_dev *dev, u32 off, dma_addr_t addr)
200 struct cnic_local *cp = dev->cnic_priv;
201 struct cnic_eth_dev *ethdev = cp->ethdev;
202 struct drv_ctl_info info;
203 struct drv_ctl_io *io = &info.data.io;
205 info.cmd = DRV_CTL_CTXTBL_WR_CMD;
208 ethdev->drv_ctl(dev->netdev, &info);
211 static void cnic_ring_ctl(struct cnic_dev *dev, u32 cid, u32 cl_id, int start)
213 struct cnic_local *cp = dev->cnic_priv;
214 struct cnic_eth_dev *ethdev = cp->ethdev;
215 struct drv_ctl_info info;
216 struct drv_ctl_l2_ring *ring = &info.data.ring;
219 info.cmd = DRV_CTL_START_L2_CMD;
221 info.cmd = DRV_CTL_STOP_L2_CMD;
224 ring->client_id = cl_id;
225 ethdev->drv_ctl(dev->netdev, &info);
228 static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val)
230 struct cnic_local *cp = dev->cnic_priv;
231 struct cnic_eth_dev *ethdev = cp->ethdev;
232 struct drv_ctl_info info;
233 struct drv_ctl_io *io = &info.data.io;
235 info.cmd = DRV_CTL_IO_WR_CMD;
238 ethdev->drv_ctl(dev->netdev, &info);
241 static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off)
243 struct cnic_local *cp = dev->cnic_priv;
244 struct cnic_eth_dev *ethdev = cp->ethdev;
245 struct drv_ctl_info info;
246 struct drv_ctl_io *io = &info.data.io;
248 info.cmd = DRV_CTL_IO_RD_CMD;
250 ethdev->drv_ctl(dev->netdev, &info);
254 static void cnic_ulp_ctl(struct cnic_dev *dev, int ulp_type, bool reg)
256 struct cnic_local *cp = dev->cnic_priv;
257 struct cnic_eth_dev *ethdev = cp->ethdev;
258 struct drv_ctl_info info;
259 struct fcoe_capabilities *fcoe_cap =
260 &info.data.register_data.fcoe_features;
263 info.cmd = DRV_CTL_ULP_REGISTER_CMD;
264 if (ulp_type == CNIC_ULP_FCOE && dev->fcoe_cap)
265 memcpy(fcoe_cap, dev->fcoe_cap, sizeof(*fcoe_cap));
267 info.cmd = DRV_CTL_ULP_UNREGISTER_CMD;
270 info.data.ulp_type = ulp_type;
271 ethdev->drv_ctl(dev->netdev, &info);
274 static int cnic_in_use(struct cnic_sock *csk)
276 return test_bit(SK_F_INUSE, &csk->flags);
279 static void cnic_spq_completion(struct cnic_dev *dev, int cmd, u32 count)
281 struct cnic_local *cp = dev->cnic_priv;
282 struct cnic_eth_dev *ethdev = cp->ethdev;
283 struct drv_ctl_info info;
286 info.data.credit.credit_count = count;
287 ethdev->drv_ctl(dev->netdev, &info);
290 static int cnic_get_l5_cid(struct cnic_local *cp, u32 cid, u32 *l5_cid)
297 for (i = 0; i < cp->max_cid_space; i++) {
298 if (cp->ctx_tbl[i].cid == cid) {
306 static int cnic_send_nlmsg(struct cnic_local *cp, u32 type,
307 struct cnic_sock *csk)
309 struct iscsi_path path_req;
312 u32 msg_type = ISCSI_KEVENT_IF_DOWN;
313 struct cnic_ulp_ops *ulp_ops;
314 struct cnic_uio_dev *udev = cp->udev;
315 int rc = 0, retry = 0;
317 if (!udev || udev->uio_dev == -1)
321 len = sizeof(path_req);
322 buf = (char *) &path_req;
323 memset(&path_req, 0, len);
325 msg_type = ISCSI_KEVENT_PATH_REQ;
326 path_req.handle = (u64) csk->l5_cid;
327 if (test_bit(SK_F_IPV6, &csk->flags)) {
328 memcpy(&path_req.dst.v6_addr, &csk->dst_ip[0],
329 sizeof(struct in6_addr));
330 path_req.ip_addr_len = 16;
332 memcpy(&path_req.dst.v4_addr, &csk->dst_ip[0],
333 sizeof(struct in_addr));
334 path_req.ip_addr_len = 4;
336 path_req.vlan_id = csk->vlan_id;
337 path_req.pmtu = csk->mtu;
343 ulp_ops = rcu_dereference(cnic_ulp_tbl[CNIC_ULP_ISCSI]);
345 rc = ulp_ops->iscsi_nl_send_msg(
346 cp->ulp_handle[CNIC_ULP_ISCSI],
349 if (rc == 0 || msg_type != ISCSI_KEVENT_PATH_REQ)
358 static void cnic_cm_upcall(struct cnic_local *, struct cnic_sock *, u8);
360 static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
366 case ISCSI_UEVENT_PATH_UPDATE: {
367 struct cnic_local *cp;
369 struct cnic_sock *csk;
370 struct iscsi_path *path_resp;
372 if (len < sizeof(*path_resp))
375 path_resp = (struct iscsi_path *) buf;
377 l5_cid = (u32) path_resp->handle;
378 if (l5_cid >= MAX_CM_SK_TBL_SZ)
382 if (!rcu_dereference(cp->ulp_ops[CNIC_ULP_L4])) {
387 csk = &cp->csk_tbl[l5_cid];
389 if (cnic_in_use(csk) &&
390 test_bit(SK_F_CONNECT_START, &csk->flags)) {
392 csk->vlan_id = path_resp->vlan_id;
394 memcpy(csk->ha, path_resp->mac_addr, 6);
395 if (test_bit(SK_F_IPV6, &csk->flags))
396 memcpy(&csk->src_ip[0], &path_resp->src.v6_addr,
397 sizeof(struct in6_addr));
399 memcpy(&csk->src_ip[0], &path_resp->src.v4_addr,
400 sizeof(struct in_addr));
402 if (is_valid_ether_addr(csk->ha)) {
404 } else if (!test_bit(SK_F_OFFLD_SCHED, &csk->flags) &&
405 !test_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
407 cnic_cm_upcall(cp, csk,
408 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
409 clear_bit(SK_F_CONNECT_START, &csk->flags);
421 static int cnic_offld_prep(struct cnic_sock *csk)
423 if (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
426 if (!test_bit(SK_F_CONNECT_START, &csk->flags)) {
427 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
434 static int cnic_close_prep(struct cnic_sock *csk)
436 clear_bit(SK_F_CONNECT_START, &csk->flags);
437 smp_mb__after_clear_bit();
439 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
440 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
448 static int cnic_abort_prep(struct cnic_sock *csk)
450 clear_bit(SK_F_CONNECT_START, &csk->flags);
451 smp_mb__after_clear_bit();
453 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
456 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
457 csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP;
464 int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
466 struct cnic_dev *dev;
468 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
469 pr_err("%s: Bad type %d\n", __func__, ulp_type);
472 mutex_lock(&cnic_lock);
473 if (cnic_ulp_tbl_prot(ulp_type)) {
474 pr_err("%s: Type %d has already been registered\n",
476 mutex_unlock(&cnic_lock);
480 read_lock(&cnic_dev_lock);
481 list_for_each_entry(dev, &cnic_dev_list, list) {
482 struct cnic_local *cp = dev->cnic_priv;
484 clear_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]);
486 read_unlock(&cnic_dev_lock);
488 atomic_set(&ulp_ops->ref_count, 0);
489 rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops);
490 mutex_unlock(&cnic_lock);
492 /* Prevent race conditions with netdev_event */
494 list_for_each_entry(dev, &cnic_dev_list, list) {
495 struct cnic_local *cp = dev->cnic_priv;
497 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]))
498 ulp_ops->cnic_init(dev);
505 int cnic_unregister_driver(int ulp_type)
507 struct cnic_dev *dev;
508 struct cnic_ulp_ops *ulp_ops;
511 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
512 pr_err("%s: Bad type %d\n", __func__, ulp_type);
515 mutex_lock(&cnic_lock);
516 ulp_ops = cnic_ulp_tbl_prot(ulp_type);
518 pr_err("%s: Type %d has not been registered\n",
522 read_lock(&cnic_dev_lock);
523 list_for_each_entry(dev, &cnic_dev_list, list) {
524 struct cnic_local *cp = dev->cnic_priv;
526 if (rcu_dereference(cp->ulp_ops[ulp_type])) {
527 pr_err("%s: Type %d still has devices registered\n",
529 read_unlock(&cnic_dev_lock);
533 read_unlock(&cnic_dev_lock);
535 RCU_INIT_POINTER(cnic_ulp_tbl[ulp_type], NULL);
537 mutex_unlock(&cnic_lock);
539 while ((atomic_read(&ulp_ops->ref_count) != 0) && (i < 20)) {
544 if (atomic_read(&ulp_ops->ref_count) != 0)
545 pr_warn("%s: Failed waiting for ref count to go to zero\n",
550 mutex_unlock(&cnic_lock);
554 static int cnic_start_hw(struct cnic_dev *);
555 static void cnic_stop_hw(struct cnic_dev *);
557 static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
560 struct cnic_local *cp = dev->cnic_priv;
561 struct cnic_ulp_ops *ulp_ops;
563 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
564 pr_err("%s: Bad type %d\n", __func__, ulp_type);
567 mutex_lock(&cnic_lock);
568 if (cnic_ulp_tbl_prot(ulp_type) == NULL) {
569 pr_err("%s: Driver with type %d has not been registered\n",
571 mutex_unlock(&cnic_lock);
574 if (rcu_dereference(cp->ulp_ops[ulp_type])) {
575 pr_err("%s: Type %d has already been registered to this device\n",
577 mutex_unlock(&cnic_lock);
581 clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]);
582 cp->ulp_handle[ulp_type] = ulp_ctx;
583 ulp_ops = cnic_ulp_tbl_prot(ulp_type);
584 rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops);
587 if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
588 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[ulp_type]))
589 ulp_ops->cnic_start(cp->ulp_handle[ulp_type]);
591 mutex_unlock(&cnic_lock);
593 cnic_ulp_ctl(dev, ulp_type, true);
598 EXPORT_SYMBOL(cnic_register_driver);
600 static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
602 struct cnic_local *cp = dev->cnic_priv;
605 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
606 pr_err("%s: Bad type %d\n", __func__, ulp_type);
609 mutex_lock(&cnic_lock);
610 if (rcu_dereference(cp->ulp_ops[ulp_type])) {
611 RCU_INIT_POINTER(cp->ulp_ops[ulp_type], NULL);
614 pr_err("%s: device not registered to this ulp type %d\n",
616 mutex_unlock(&cnic_lock);
619 mutex_unlock(&cnic_lock);
621 if (ulp_type == CNIC_ULP_ISCSI)
622 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
623 else if (ulp_type == CNIC_ULP_FCOE)
624 dev->fcoe_cap = NULL;
628 while (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]) &&
633 if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]))
634 netdev_warn(dev->netdev, "Failed waiting for ULP up call to complete\n");
636 cnic_ulp_ctl(dev, ulp_type, false);
640 EXPORT_SYMBOL(cnic_unregister_driver);
642 static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id,
645 id_tbl->start = start_id;
648 spin_lock_init(&id_tbl->lock);
649 id_tbl->table = kzalloc(DIV_ROUND_UP(size, 32) * 4, GFP_KERNEL);
656 static void cnic_free_id_tbl(struct cnic_id_tbl *id_tbl)
658 kfree(id_tbl->table);
659 id_tbl->table = NULL;
662 static int cnic_alloc_id(struct cnic_id_tbl *id_tbl, u32 id)
667 if (id >= id_tbl->max)
670 spin_lock(&id_tbl->lock);
671 if (!test_bit(id, id_tbl->table)) {
672 set_bit(id, id_tbl->table);
675 spin_unlock(&id_tbl->lock);
679 /* Returns -1 if not successful */
680 static u32 cnic_alloc_new_id(struct cnic_id_tbl *id_tbl)
684 spin_lock(&id_tbl->lock);
685 id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next);
686 if (id >= id_tbl->max) {
688 if (id_tbl->next != 0) {
689 id = find_first_zero_bit(id_tbl->table, id_tbl->next);
690 if (id >= id_tbl->next)
695 if (id < id_tbl->max) {
696 set_bit(id, id_tbl->table);
697 id_tbl->next = (id + 1) & (id_tbl->max - 1);
701 spin_unlock(&id_tbl->lock);
706 static void cnic_free_id(struct cnic_id_tbl *id_tbl, u32 id)
712 if (id >= id_tbl->max)
715 clear_bit(id, id_tbl->table);
718 static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma)
725 for (i = 0; i < dma->num_pages; i++) {
726 if (dma->pg_arr[i]) {
727 dma_free_coherent(&dev->pcidev->dev, BCM_PAGE_SIZE,
728 dma->pg_arr[i], dma->pg_map_arr[i]);
729 dma->pg_arr[i] = NULL;
733 dma_free_coherent(&dev->pcidev->dev, dma->pgtbl_size,
734 dma->pgtbl, dma->pgtbl_map);
742 static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma)
745 __le32 *page_table = (__le32 *) dma->pgtbl;
747 for (i = 0; i < dma->num_pages; i++) {
748 /* Each entry needs to be in big endian format. */
749 *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
751 *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
756 static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma)
759 __le32 *page_table = (__le32 *) dma->pgtbl;
761 for (i = 0; i < dma->num_pages; i++) {
762 /* Each entry needs to be in little endian format. */
763 *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
765 *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
770 static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma,
771 int pages, int use_pg_tbl)
774 struct cnic_local *cp = dev->cnic_priv;
776 size = pages * (sizeof(void *) + sizeof(dma_addr_t));
777 dma->pg_arr = kzalloc(size, GFP_ATOMIC);
778 if (dma->pg_arr == NULL)
781 dma->pg_map_arr = (dma_addr_t *) (dma->pg_arr + pages);
782 dma->num_pages = pages;
784 for (i = 0; i < pages; i++) {
785 dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev,
789 if (dma->pg_arr[i] == NULL)
795 dma->pgtbl_size = ((pages * 8) + BCM_PAGE_SIZE - 1) &
796 ~(BCM_PAGE_SIZE - 1);
797 dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size,
798 &dma->pgtbl_map, GFP_ATOMIC);
799 if (dma->pgtbl == NULL)
802 cp->setup_pgtbl(dev, dma);
807 cnic_free_dma(dev, dma);
811 static void cnic_free_context(struct cnic_dev *dev)
813 struct cnic_local *cp = dev->cnic_priv;
816 for (i = 0; i < cp->ctx_blks; i++) {
817 if (cp->ctx_arr[i].ctx) {
818 dma_free_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
820 cp->ctx_arr[i].mapping);
821 cp->ctx_arr[i].ctx = NULL;
826 static void __cnic_free_uio_rings(struct cnic_uio_dev *udev)
829 dma_free_coherent(&udev->pdev->dev, udev->l2_buf_size,
830 udev->l2_buf, udev->l2_buf_map);
835 dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size,
836 udev->l2_ring, udev->l2_ring_map);
837 udev->l2_ring = NULL;
842 static void __cnic_free_uio(struct cnic_uio_dev *udev)
844 uio_unregister_device(&udev->cnic_uinfo);
846 __cnic_free_uio_rings(udev);
848 pci_dev_put(udev->pdev);
852 static void cnic_free_uio(struct cnic_uio_dev *udev)
857 write_lock(&cnic_dev_lock);
858 list_del_init(&udev->list);
859 write_unlock(&cnic_dev_lock);
860 __cnic_free_uio(udev);
863 static void cnic_free_resc(struct cnic_dev *dev)
865 struct cnic_local *cp = dev->cnic_priv;
866 struct cnic_uio_dev *udev = cp->udev;
871 if (udev->uio_dev == -1)
872 __cnic_free_uio_rings(udev);
875 cnic_free_context(dev);
880 cnic_free_dma(dev, &cp->gbl_buf_info);
881 cnic_free_dma(dev, &cp->kwq_info);
882 cnic_free_dma(dev, &cp->kwq_16_data_info);
883 cnic_free_dma(dev, &cp->kcq2.dma);
884 cnic_free_dma(dev, &cp->kcq1.dma);
885 kfree(cp->iscsi_tbl);
886 cp->iscsi_tbl = NULL;
890 cnic_free_id_tbl(&cp->fcoe_cid_tbl);
891 cnic_free_id_tbl(&cp->cid_tbl);
894 static int cnic_alloc_context(struct cnic_dev *dev)
896 struct cnic_local *cp = dev->cnic_priv;
898 if (CHIP_NUM(cp) == CHIP_NUM_5709) {
901 cp->ctx_blk_size = BCM_PAGE_SIZE;
902 cp->cids_per_blk = BCM_PAGE_SIZE / 128;
903 arr_size = BNX2_MAX_CID / cp->cids_per_blk *
904 sizeof(struct cnic_ctx);
905 cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL);
906 if (cp->ctx_arr == NULL)
910 for (i = 0; i < 2; i++) {
911 u32 j, reg, off, lo, hi;
914 off = BNX2_PG_CTX_MAP;
916 off = BNX2_ISCSI_CTX_MAP;
918 reg = cnic_reg_rd_ind(dev, off);
921 for (j = lo; j < hi; j += cp->cids_per_blk, k++)
922 cp->ctx_arr[k].cid = j;
926 if (cp->ctx_blks >= (BNX2_MAX_CID / cp->cids_per_blk)) {
931 for (i = 0; i < cp->ctx_blks; i++) {
933 dma_alloc_coherent(&dev->pcidev->dev,
935 &cp->ctx_arr[i].mapping,
937 if (cp->ctx_arr[i].ctx == NULL)
944 static u16 cnic_bnx2_next_idx(u16 idx)
949 static u16 cnic_bnx2_hw_idx(u16 idx)
954 static u16 cnic_bnx2x_next_idx(u16 idx)
957 if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
963 static u16 cnic_bnx2x_hw_idx(u16 idx)
965 if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
970 static int cnic_alloc_kcq(struct cnic_dev *dev, struct kcq_info *info,
973 int err, i, use_page_tbl = 0;
979 err = cnic_alloc_dma(dev, &info->dma, KCQ_PAGE_CNT, use_page_tbl);
983 kcq = (struct kcqe **) info->dma.pg_arr;
986 info->next_idx = cnic_bnx2_next_idx;
987 info->hw_idx = cnic_bnx2_hw_idx;
991 info->next_idx = cnic_bnx2x_next_idx;
992 info->hw_idx = cnic_bnx2x_hw_idx;
994 for (i = 0; i < KCQ_PAGE_CNT; i++) {
995 struct bnx2x_bd_chain_next *next =
996 (struct bnx2x_bd_chain_next *) &kcq[i][MAX_KCQE_CNT];
999 if (j >= KCQ_PAGE_CNT)
1001 next->addr_hi = (u64) info->dma.pg_map_arr[j] >> 32;
1002 next->addr_lo = info->dma.pg_map_arr[j] & 0xffffffff;
1007 static int __cnic_alloc_uio_rings(struct cnic_uio_dev *udev, int pages)
1009 struct cnic_local *cp = udev->dev->cnic_priv;
1014 udev->l2_ring_size = pages * BCM_PAGE_SIZE;
1015 udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size,
1017 GFP_KERNEL | __GFP_COMP);
1021 udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
1022 udev->l2_buf_size = PAGE_ALIGN(udev->l2_buf_size);
1023 udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size,
1025 GFP_KERNEL | __GFP_COMP);
1026 if (!udev->l2_buf) {
1027 __cnic_free_uio_rings(udev);
1035 static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
1037 struct cnic_local *cp = dev->cnic_priv;
1038 struct cnic_uio_dev *udev;
1040 read_lock(&cnic_dev_lock);
1041 list_for_each_entry(udev, &cnic_udev_list, list) {
1042 if (udev->pdev == dev->pcidev) {
1044 if (__cnic_alloc_uio_rings(udev, pages)) {
1046 read_unlock(&cnic_dev_lock);
1050 read_unlock(&cnic_dev_lock);
1054 read_unlock(&cnic_dev_lock);
1056 udev = kzalloc(sizeof(struct cnic_uio_dev), GFP_ATOMIC);
1063 udev->pdev = dev->pcidev;
1065 if (__cnic_alloc_uio_rings(udev, pages))
1068 write_lock(&cnic_dev_lock);
1069 list_add(&udev->list, &cnic_udev_list);
1070 write_unlock(&cnic_dev_lock);
1072 pci_dev_get(udev->pdev);
1083 static int cnic_init_uio(struct cnic_dev *dev)
1085 struct cnic_local *cp = dev->cnic_priv;
1086 struct cnic_uio_dev *udev = cp->udev;
1087 struct uio_info *uinfo;
1093 uinfo = &udev->cnic_uinfo;
1095 uinfo->mem[0].addr = pci_resource_start(dev->pcidev, 0);
1096 uinfo->mem[0].internal_addr = dev->regview;
1097 uinfo->mem[0].memtype = UIO_MEM_PHYS;
1099 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
1100 uinfo->mem[0].size = MB_GET_CID_ADDR(TX_TSS_CID +
1101 TX_MAX_TSS_RINGS + 1);
1102 uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen &
1104 if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
1105 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9;
1107 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE;
1109 uinfo->name = "bnx2_cnic";
1110 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
1111 uinfo->mem[0].size = pci_resource_len(dev->pcidev, 0);
1113 uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk &
1115 uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk);
1117 uinfo->name = "bnx2x_cnic";
1120 uinfo->mem[1].memtype = UIO_MEM_LOGICAL;
1122 uinfo->mem[2].addr = (unsigned long) udev->l2_ring;
1123 uinfo->mem[2].size = udev->l2_ring_size;
1124 uinfo->mem[2].memtype = UIO_MEM_LOGICAL;
1126 uinfo->mem[3].addr = (unsigned long) udev->l2_buf;
1127 uinfo->mem[3].size = udev->l2_buf_size;
1128 uinfo->mem[3].memtype = UIO_MEM_LOGICAL;
1130 uinfo->version = CNIC_MODULE_VERSION;
1131 uinfo->irq = UIO_IRQ_CUSTOM;
1133 uinfo->open = cnic_uio_open;
1134 uinfo->release = cnic_uio_close;
1136 if (udev->uio_dev == -1) {
1140 ret = uio_register_device(&udev->pdev->dev, uinfo);
1143 cnic_init_rings(dev);
1149 static int cnic_alloc_bnx2_resc(struct cnic_dev *dev)
1151 struct cnic_local *cp = dev->cnic_priv;
1154 ret = cnic_alloc_dma(dev, &cp->kwq_info, KWQ_PAGE_CNT, 1);
1157 cp->kwq = (struct kwqe **) cp->kwq_info.pg_arr;
1159 ret = cnic_alloc_kcq(dev, &cp->kcq1, true);
1163 ret = cnic_alloc_context(dev);
1167 ret = cnic_alloc_uio_rings(dev, 2);
1171 ret = cnic_init_uio(dev);
1178 cnic_free_resc(dev);
1182 static int cnic_alloc_bnx2x_context(struct cnic_dev *dev)
1184 struct cnic_local *cp = dev->cnic_priv;
1185 int ctx_blk_size = cp->ethdev->ctx_blk_size;
1186 int total_mem, blks, i;
1188 total_mem = BNX2X_CONTEXT_MEM_SIZE * cp->max_cid_space;
1189 blks = total_mem / ctx_blk_size;
1190 if (total_mem % ctx_blk_size)
1193 if (blks > cp->ethdev->ctx_tbl_len)
1196 cp->ctx_arr = kcalloc(blks, sizeof(struct cnic_ctx), GFP_KERNEL);
1197 if (cp->ctx_arr == NULL)
1200 cp->ctx_blks = blks;
1201 cp->ctx_blk_size = ctx_blk_size;
1202 if (!BNX2X_CHIP_IS_57710(cp->chip_id))
1205 cp->ctx_align = ctx_blk_size;
1207 cp->cids_per_blk = ctx_blk_size / BNX2X_CONTEXT_MEM_SIZE;
1209 for (i = 0; i < blks; i++) {
1210 cp->ctx_arr[i].ctx =
1211 dma_alloc_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
1212 &cp->ctx_arr[i].mapping,
1214 if (cp->ctx_arr[i].ctx == NULL)
1217 if (cp->ctx_align && cp->ctx_blk_size == ctx_blk_size) {
1218 if (cp->ctx_arr[i].mapping & (cp->ctx_align - 1)) {
1219 cnic_free_context(dev);
1220 cp->ctx_blk_size += cp->ctx_align;
1229 static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
1231 struct cnic_local *cp = dev->cnic_priv;
1232 struct cnic_eth_dev *ethdev = cp->ethdev;
1233 u32 start_cid = ethdev->starting_cid;
1234 int i, j, n, ret, pages;
1235 struct cnic_dma *kwq_16_dma = &cp->kwq_16_data_info;
1237 cp->iro_arr = ethdev->iro_arr;
1239 cp->max_cid_space = MAX_ISCSI_TBL_SZ;
1240 cp->iscsi_start_cid = start_cid;
1241 cp->fcoe_start_cid = start_cid + MAX_ISCSI_TBL_SZ;
1243 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
1244 cp->max_cid_space += dev->max_fcoe_conn;
1245 cp->fcoe_init_cid = ethdev->fcoe_init_cid;
1246 if (!cp->fcoe_init_cid)
1247 cp->fcoe_init_cid = 0x10;
1250 cp->iscsi_tbl = kzalloc(sizeof(struct cnic_iscsi) * MAX_ISCSI_TBL_SZ,
1255 cp->ctx_tbl = kzalloc(sizeof(struct cnic_context) *
1256 cp->max_cid_space, GFP_KERNEL);
1260 for (i = 0; i < MAX_ISCSI_TBL_SZ; i++) {
1261 cp->ctx_tbl[i].proto.iscsi = &cp->iscsi_tbl[i];
1262 cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_ISCSI;
1265 for (i = MAX_ISCSI_TBL_SZ; i < cp->max_cid_space; i++)
1266 cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_FCOE;
1268 pages = PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) /
1271 ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0);
1275 n = PAGE_SIZE / CNIC_KWQ16_DATA_SIZE;
1276 for (i = 0, j = 0; i < cp->max_cid_space; i++) {
1277 long off = CNIC_KWQ16_DATA_SIZE * (i % n);
1279 cp->ctx_tbl[i].kwqe_data = kwq_16_dma->pg_arr[j] + off;
1280 cp->ctx_tbl[i].kwqe_data_mapping = kwq_16_dma->pg_map_arr[j] +
1283 if ((i % n) == (n - 1))
1287 ret = cnic_alloc_kcq(dev, &cp->kcq1, false);
1291 if (CNIC_SUPPORTS_FCOE(cp)) {
1292 ret = cnic_alloc_kcq(dev, &cp->kcq2, true);
1297 pages = PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / PAGE_SIZE;
1298 ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0);
1302 ret = cnic_alloc_bnx2x_context(dev);
1306 cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk;
1308 cp->l2_rx_ring_size = 15;
1310 ret = cnic_alloc_uio_rings(dev, 4);
1314 ret = cnic_init_uio(dev);
1321 cnic_free_resc(dev);
1325 static inline u32 cnic_kwq_avail(struct cnic_local *cp)
1327 return cp->max_kwq_idx -
1328 ((cp->kwq_prod_idx - cp->kwq_con_idx) & cp->max_kwq_idx);
1331 static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
1334 struct cnic_local *cp = dev->cnic_priv;
1335 struct kwqe *prod_qe;
1336 u16 prod, sw_prod, i;
1338 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
1339 return -EAGAIN; /* bnx2 is down */
1341 spin_lock_bh(&cp->cnic_ulp_lock);
1342 if (num_wqes > cnic_kwq_avail(cp) &&
1343 !test_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags)) {
1344 spin_unlock_bh(&cp->cnic_ulp_lock);
1348 clear_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
1350 prod = cp->kwq_prod_idx;
1351 sw_prod = prod & MAX_KWQ_IDX;
1352 for (i = 0; i < num_wqes; i++) {
1353 prod_qe = &cp->kwq[KWQ_PG(sw_prod)][KWQ_IDX(sw_prod)];
1354 memcpy(prod_qe, wqes[i], sizeof(struct kwqe));
1356 sw_prod = prod & MAX_KWQ_IDX;
1358 cp->kwq_prod_idx = prod;
1360 CNIC_WR16(dev, cp->kwq_io_addr, cp->kwq_prod_idx);
1362 spin_unlock_bh(&cp->cnic_ulp_lock);
1366 static void *cnic_get_kwqe_16_data(struct cnic_local *cp, u32 l5_cid,
1367 union l5cm_specific_data *l5_data)
1369 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1372 map = ctx->kwqe_data_mapping;
1373 l5_data->phy_address.lo = (u64) map & 0xffffffff;
1374 l5_data->phy_address.hi = (u64) map >> 32;
1375 return ctx->kwqe_data;
1378 static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid,
1379 u32 type, union l5cm_specific_data *l5_data)
1381 struct cnic_local *cp = dev->cnic_priv;
1382 struct l5cm_spe kwqe;
1383 struct kwqe_16 *kwq[1];
1387 kwqe.hdr.conn_and_cmd_data =
1388 cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) |
1389 BNX2X_HW_CID(cp, cid)));
1391 type_16 = (type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE;
1392 type_16 |= (cp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) &
1393 SPE_HDR_FUNCTION_ID;
1395 kwqe.hdr.type = cpu_to_le16(type_16);
1396 kwqe.hdr.reserved1 = 0;
1397 kwqe.data.phy_address.lo = cpu_to_le32(l5_data->phy_address.lo);
1398 kwqe.data.phy_address.hi = cpu_to_le32(l5_data->phy_address.hi);
1400 kwq[0] = (struct kwqe_16 *) &kwqe;
1402 spin_lock_bh(&cp->cnic_ulp_lock);
1403 ret = cp->ethdev->drv_submit_kwqes_16(dev->netdev, kwq, 1);
1404 spin_unlock_bh(&cp->cnic_ulp_lock);
1412 static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type,
1413 struct kcqe *cqes[], u32 num_cqes)
1415 struct cnic_local *cp = dev->cnic_priv;
1416 struct cnic_ulp_ops *ulp_ops;
1419 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
1420 if (likely(ulp_ops)) {
1421 ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
1427 static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
1429 struct cnic_local *cp = dev->cnic_priv;
1430 struct iscsi_kwqe_init1 *req1 = (struct iscsi_kwqe_init1 *) kwqe;
1432 u32 pfid = cp->pfid;
1434 cp->num_iscsi_tasks = req1->num_tasks_per_conn;
1435 cp->num_ccells = req1->num_ccells_per_conn;
1436 cp->task_array_size = BNX2X_ISCSI_TASK_CONTEXT_SIZE *
1437 cp->num_iscsi_tasks;
1438 cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS *
1439 BNX2X_ISCSI_R2TQE_SIZE;
1440 cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE;
1441 pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE;
1442 hq_bds = pages * (PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE);
1443 cp->num_cqs = req1->num_cqs;
1445 if (!dev->max_iscsi_conn)
1448 /* init Tstorm RAM */
1449 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
1451 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1453 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1454 TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
1455 CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
1456 TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1457 req1->num_tasks_per_conn);
1459 /* init Ustorm RAM */
1460 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1461 USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfid),
1462 req1->rq_buffer_size);
1463 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1465 CNIC_WR8(dev, BAR_USTRORM_INTMEM +
1466 USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
1467 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1468 USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1469 req1->num_tasks_per_conn);
1470 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
1472 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
1474 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
1475 cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
1477 /* init Xstorm RAM */
1478 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1480 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1481 XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
1482 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
1483 XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1484 req1->num_tasks_per_conn);
1485 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
1487 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_SQ_SIZE_OFFSET(pfid),
1488 req1->num_tasks_per_conn);
1489 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
1490 cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
1492 /* init Cstorm RAM */
1493 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1495 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
1496 CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
1497 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
1498 CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1499 req1->num_tasks_per_conn);
1500 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
1502 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
1508 static int cnic_bnx2x_iscsi_init2(struct cnic_dev *dev, struct kwqe *kwqe)
1510 struct iscsi_kwqe_init2 *req2 = (struct iscsi_kwqe_init2 *) kwqe;
1511 struct cnic_local *cp = dev->cnic_priv;
1512 u32 pfid = cp->pfid;
1513 struct iscsi_kcqe kcqe;
1514 struct kcqe *cqes[1];
1516 memset(&kcqe, 0, sizeof(kcqe));
1517 if (!dev->max_iscsi_conn) {
1518 kcqe.completion_status =
1519 ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED;
1523 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
1524 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]);
1525 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
1526 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4,
1527 req2->error_bit_map[1]);
1529 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1530 USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn);
1531 CNIC_WR(dev, BAR_USTRORM_INTMEM +
1532 USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]);
1533 CNIC_WR(dev, BAR_USTRORM_INTMEM +
1534 USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4,
1535 req2->error_bit_map[1]);
1537 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
1538 CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn);
1540 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1543 kcqe.op_code = ISCSI_KCQE_OPCODE_INIT;
1544 cqes[0] = (struct kcqe *) &kcqe;
1545 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1550 static void cnic_free_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
1552 struct cnic_local *cp = dev->cnic_priv;
1553 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1555 if (ctx->ulp_proto_id == CNIC_ULP_ISCSI) {
1556 struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1558 cnic_free_dma(dev, &iscsi->hq_info);
1559 cnic_free_dma(dev, &iscsi->r2tq_info);
1560 cnic_free_dma(dev, &iscsi->task_array_info);
1561 cnic_free_id(&cp->cid_tbl, ctx->cid);
1563 cnic_free_id(&cp->fcoe_cid_tbl, ctx->cid);
1569 static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
1573 struct cnic_local *cp = dev->cnic_priv;
1574 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1575 struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1577 if (ctx->ulp_proto_id == CNIC_ULP_FCOE) {
1578 cid = cnic_alloc_new_id(&cp->fcoe_cid_tbl);
1587 cid = cnic_alloc_new_id(&cp->cid_tbl);
1594 pages = PAGE_ALIGN(cp->task_array_size) / PAGE_SIZE;
1596 ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1);
1600 pages = PAGE_ALIGN(cp->r2tq_size) / PAGE_SIZE;
1601 ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1);
1605 pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE;
1606 ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1);
1613 cnic_free_bnx2x_conn_resc(dev, l5_cid);
1617 static void *cnic_get_bnx2x_ctx(struct cnic_dev *dev, u32 cid, int init,
1618 struct regpair *ctx_addr)
1620 struct cnic_local *cp = dev->cnic_priv;
1621 struct cnic_eth_dev *ethdev = cp->ethdev;
1622 int blk = (cid - ethdev->starting_cid) / cp->cids_per_blk;
1623 int off = (cid - ethdev->starting_cid) % cp->cids_per_blk;
1624 unsigned long align_off = 0;
1628 if (cp->ctx_align) {
1629 unsigned long mask = cp->ctx_align - 1;
1631 if (cp->ctx_arr[blk].mapping & mask)
1632 align_off = cp->ctx_align -
1633 (cp->ctx_arr[blk].mapping & mask);
1635 ctx_map = cp->ctx_arr[blk].mapping + align_off +
1636 (off * BNX2X_CONTEXT_MEM_SIZE);
1637 ctx = cp->ctx_arr[blk].ctx + align_off +
1638 (off * BNX2X_CONTEXT_MEM_SIZE);
1640 memset(ctx, 0, BNX2X_CONTEXT_MEM_SIZE);
1642 ctx_addr->lo = ctx_map & 0xffffffff;
1643 ctx_addr->hi = (u64) ctx_map >> 32;
1647 static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
1650 struct cnic_local *cp = dev->cnic_priv;
1651 struct iscsi_kwqe_conn_offload1 *req1 =
1652 (struct iscsi_kwqe_conn_offload1 *) wqes[0];
1653 struct iscsi_kwqe_conn_offload2 *req2 =
1654 (struct iscsi_kwqe_conn_offload2 *) wqes[1];
1655 struct iscsi_kwqe_conn_offload3 *req3;
1656 struct cnic_context *ctx = &cp->ctx_tbl[req1->iscsi_conn_id];
1657 struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1659 u32 hw_cid = BNX2X_HW_CID(cp, cid);
1660 struct iscsi_context *ictx;
1661 struct regpair context_addr;
1662 int i, j, n = 2, n_max;
1663 u8 port = CNIC_PORT(cp);
1666 if (!req2->num_additional_wqes)
1669 n_max = req2->num_additional_wqes + 2;
1671 ictx = cnic_get_bnx2x_ctx(dev, cid, 1, &context_addr);
1675 req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
1677 ictx->xstorm_ag_context.hq_prod = 1;
1679 ictx->xstorm_st_context.iscsi.first_burst_length =
1680 ISCSI_DEF_FIRST_BURST_LEN;
1681 ictx->xstorm_st_context.iscsi.max_send_pdu_length =
1682 ISCSI_DEF_MAX_RECV_SEG_LEN;
1683 ictx->xstorm_st_context.iscsi.sq_pbl_base.lo =
1684 req1->sq_page_table_addr_lo;
1685 ictx->xstorm_st_context.iscsi.sq_pbl_base.hi =
1686 req1->sq_page_table_addr_hi;
1687 ictx->xstorm_st_context.iscsi.sq_curr_pbe.lo = req2->sq_first_pte.hi;
1688 ictx->xstorm_st_context.iscsi.sq_curr_pbe.hi = req2->sq_first_pte.lo;
1689 ictx->xstorm_st_context.iscsi.hq_pbl_base.lo =
1690 iscsi->hq_info.pgtbl_map & 0xffffffff;
1691 ictx->xstorm_st_context.iscsi.hq_pbl_base.hi =
1692 (u64) iscsi->hq_info.pgtbl_map >> 32;
1693 ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.lo =
1694 iscsi->hq_info.pgtbl[0];
1695 ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.hi =
1696 iscsi->hq_info.pgtbl[1];
1697 ictx->xstorm_st_context.iscsi.r2tq_pbl_base.lo =
1698 iscsi->r2tq_info.pgtbl_map & 0xffffffff;
1699 ictx->xstorm_st_context.iscsi.r2tq_pbl_base.hi =
1700 (u64) iscsi->r2tq_info.pgtbl_map >> 32;
1701 ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.lo =
1702 iscsi->r2tq_info.pgtbl[0];
1703 ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.hi =
1704 iscsi->r2tq_info.pgtbl[1];
1705 ictx->xstorm_st_context.iscsi.task_pbl_base.lo =
1706 iscsi->task_array_info.pgtbl_map & 0xffffffff;
1707 ictx->xstorm_st_context.iscsi.task_pbl_base.hi =
1708 (u64) iscsi->task_array_info.pgtbl_map >> 32;
1709 ictx->xstorm_st_context.iscsi.task_pbl_cache_idx =
1710 BNX2X_ISCSI_PBL_NOT_CACHED;
1711 ictx->xstorm_st_context.iscsi.flags.flags |=
1712 XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA;
1713 ictx->xstorm_st_context.iscsi.flags.flags |=
1714 XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T;
1715 ictx->xstorm_st_context.common.ethernet.reserved_vlan_type =
1717 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) &&
1718 cp->port_mode == CHIP_2_PORT_MODE) {
1722 ictx->xstorm_st_context.common.flags =
1723 1 << XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED_SHIFT;
1724 ictx->xstorm_st_context.common.flags =
1725 port << XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT_SHIFT;
1727 ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE;
1728 /* TSTORM requires the base address of RQ DB & not PTE */
1729 ictx->tstorm_st_context.iscsi.rq_db_phy_addr.lo =
1730 req2->rq_page_table_addr_lo & PAGE_MASK;
1731 ictx->tstorm_st_context.iscsi.rq_db_phy_addr.hi =
1732 req2->rq_page_table_addr_hi;
1733 ictx->tstorm_st_context.iscsi.iscsi_conn_id = req1->iscsi_conn_id;
1734 ictx->tstorm_st_context.tcp.cwnd = 0x5A8;
1735 ictx->tstorm_st_context.tcp.flags2 |=
1736 TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN;
1737 ictx->tstorm_st_context.tcp.ooo_support_mode =
1738 TCP_TSTORM_OOO_DROP_AND_PROC_ACK;
1740 ictx->timers_context.flags |= TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG;
1742 ictx->ustorm_st_context.ring.rq.pbl_base.lo =
1743 req2->rq_page_table_addr_lo;
1744 ictx->ustorm_st_context.ring.rq.pbl_base.hi =
1745 req2->rq_page_table_addr_hi;
1746 ictx->ustorm_st_context.ring.rq.curr_pbe.lo = req3->qp_first_pte[0].hi;
1747 ictx->ustorm_st_context.ring.rq.curr_pbe.hi = req3->qp_first_pte[0].lo;
1748 ictx->ustorm_st_context.ring.r2tq.pbl_base.lo =
1749 iscsi->r2tq_info.pgtbl_map & 0xffffffff;
1750 ictx->ustorm_st_context.ring.r2tq.pbl_base.hi =
1751 (u64) iscsi->r2tq_info.pgtbl_map >> 32;
1752 ictx->ustorm_st_context.ring.r2tq.curr_pbe.lo =
1753 iscsi->r2tq_info.pgtbl[0];
1754 ictx->ustorm_st_context.ring.r2tq.curr_pbe.hi =
1755 iscsi->r2tq_info.pgtbl[1];
1756 ictx->ustorm_st_context.ring.cq_pbl_base.lo =
1757 req1->cq_page_table_addr_lo;
1758 ictx->ustorm_st_context.ring.cq_pbl_base.hi =
1759 req1->cq_page_table_addr_hi;
1760 ictx->ustorm_st_context.ring.cq[0].cq_sn = ISCSI_INITIAL_SN;
1761 ictx->ustorm_st_context.ring.cq[0].curr_pbe.lo = req2->cq_first_pte.hi;
1762 ictx->ustorm_st_context.ring.cq[0].curr_pbe.hi = req2->cq_first_pte.lo;
1763 ictx->ustorm_st_context.task_pbe_cache_index =
1764 BNX2X_ISCSI_PBL_NOT_CACHED;
1765 ictx->ustorm_st_context.task_pdu_cache_index =
1766 BNX2X_ISCSI_PDU_HEADER_NOT_CACHED;
1768 for (i = 1, j = 1; i < cp->num_cqs; i++, j++) {
1772 req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
1775 ictx->ustorm_st_context.ring.cq[i].cq_sn = ISCSI_INITIAL_SN;
1776 ictx->ustorm_st_context.ring.cq[i].curr_pbe.lo =
1777 req3->qp_first_pte[j].hi;
1778 ictx->ustorm_st_context.ring.cq[i].curr_pbe.hi =
1779 req3->qp_first_pte[j].lo;
1782 ictx->ustorm_st_context.task_pbl_base.lo =
1783 iscsi->task_array_info.pgtbl_map & 0xffffffff;
1784 ictx->ustorm_st_context.task_pbl_base.hi =
1785 (u64) iscsi->task_array_info.pgtbl_map >> 32;
1786 ictx->ustorm_st_context.tce_phy_addr.lo =
1787 iscsi->task_array_info.pgtbl[0];
1788 ictx->ustorm_st_context.tce_phy_addr.hi =
1789 iscsi->task_array_info.pgtbl[1];
1790 ictx->ustorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
1791 ictx->ustorm_st_context.num_cqs = cp->num_cqs;
1792 ictx->ustorm_st_context.negotiated_rx |= ISCSI_DEF_MAX_RECV_SEG_LEN;
1793 ictx->ustorm_st_context.negotiated_rx_and_flags |=
1794 ISCSI_DEF_MAX_BURST_LEN;
1795 ictx->ustorm_st_context.negotiated_rx |=
1796 ISCSI_DEFAULT_MAX_OUTSTANDING_R2T <<
1797 USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS_SHIFT;
1799 ictx->cstorm_st_context.hq_pbl_base.lo =
1800 iscsi->hq_info.pgtbl_map & 0xffffffff;
1801 ictx->cstorm_st_context.hq_pbl_base.hi =
1802 (u64) iscsi->hq_info.pgtbl_map >> 32;
1803 ictx->cstorm_st_context.hq_curr_pbe.lo = iscsi->hq_info.pgtbl[0];
1804 ictx->cstorm_st_context.hq_curr_pbe.hi = iscsi->hq_info.pgtbl[1];
1805 ictx->cstorm_st_context.task_pbl_base.lo =
1806 iscsi->task_array_info.pgtbl_map & 0xffffffff;
1807 ictx->cstorm_st_context.task_pbl_base.hi =
1808 (u64) iscsi->task_array_info.pgtbl_map >> 32;
1809 /* CSTORM and USTORM initialization is different, CSTORM requires
1810 * CQ DB base & not PTE addr */
1811 ictx->cstorm_st_context.cq_db_base.lo =
1812 req1->cq_page_table_addr_lo & PAGE_MASK;
1813 ictx->cstorm_st_context.cq_db_base.hi = req1->cq_page_table_addr_hi;
1814 ictx->cstorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
1815 ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1;
1816 for (i = 0; i < cp->num_cqs; i++) {
1817 ictx->cstorm_st_context.cq_c_prod_sqn_arr.sqn[i] =
1819 ictx->cstorm_st_context.cq_c_sqn_2_notify_arr.sqn[i] =
1823 ictx->xstorm_ag_context.cdu_reserved =
1824 CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
1825 ISCSI_CONNECTION_TYPE);
1826 ictx->ustorm_ag_context.cdu_usage =
1827 CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG,
1828 ISCSI_CONNECTION_TYPE);
1833 static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
1836 struct iscsi_kwqe_conn_offload1 *req1;
1837 struct iscsi_kwqe_conn_offload2 *req2;
1838 struct cnic_local *cp = dev->cnic_priv;
1839 struct cnic_context *ctx;
1840 struct iscsi_kcqe kcqe;
1841 struct kcqe *cqes[1];
1850 req1 = (struct iscsi_kwqe_conn_offload1 *) wqes[0];
1851 req2 = (struct iscsi_kwqe_conn_offload2 *) wqes[1];
1852 if ((num - 2) < req2->num_additional_wqes) {
1856 *work = 2 + req2->num_additional_wqes;
1858 l5_cid = req1->iscsi_conn_id;
1859 if (l5_cid >= MAX_ISCSI_TBL_SZ)
1862 memset(&kcqe, 0, sizeof(kcqe));
1863 kcqe.op_code = ISCSI_KCQE_OPCODE_OFFLOAD_CONN;
1864 kcqe.iscsi_conn_id = l5_cid;
1865 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE;
1867 ctx = &cp->ctx_tbl[l5_cid];
1868 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) {
1869 kcqe.completion_status =
1870 ISCSI_KCQE_COMPLETION_STATUS_CID_BUSY;
1874 if (atomic_inc_return(&cp->iscsi_conn) > dev->max_iscsi_conn) {
1875 atomic_dec(&cp->iscsi_conn);
1878 ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
1880 atomic_dec(&cp->iscsi_conn);
1884 ret = cnic_setup_bnx2x_ctx(dev, wqes, num);
1886 cnic_free_bnx2x_conn_resc(dev, l5_cid);
1887 atomic_dec(&cp->iscsi_conn);
1891 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1892 kcqe.iscsi_conn_context_id = BNX2X_HW_CID(cp, cp->ctx_tbl[l5_cid].cid);
1895 cqes[0] = (struct kcqe *) &kcqe;
1896 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1901 static int cnic_bnx2x_iscsi_update(struct cnic_dev *dev, struct kwqe *kwqe)
1903 struct cnic_local *cp = dev->cnic_priv;
1904 struct iscsi_kwqe_conn_update *req =
1905 (struct iscsi_kwqe_conn_update *) kwqe;
1907 union l5cm_specific_data l5_data;
1908 u32 l5_cid, cid = BNX2X_SW_CID(req->context_id);
1911 if (cnic_get_l5_cid(cp, cid, &l5_cid) != 0)
1914 data = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
1918 memcpy(data, kwqe, sizeof(struct kwqe));
1920 ret = cnic_submit_kwqe_16(dev, ISCSI_RAMROD_CMD_ID_UPDATE_CONN,
1921 req->context_id, ISCSI_CONNECTION_TYPE, &l5_data);
1925 static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid)
1927 struct cnic_local *cp = dev->cnic_priv;
1928 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1929 union l5cm_specific_data l5_data;
1933 init_waitqueue_head(&ctx->waitq);
1935 memset(&l5_data, 0, sizeof(l5_data));
1936 hw_cid = BNX2X_HW_CID(cp, ctx->cid);
1938 ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
1939 hw_cid, NONE_CONNECTION_TYPE, &l5_data);
1942 wait_event_timeout(ctx->waitq, ctx->wait_cond, CNIC_RAMROD_TMO);
1943 if (unlikely(test_bit(CTX_FL_CID_ERROR, &ctx->ctx_flags)))
1950 static int cnic_bnx2x_iscsi_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
1952 struct cnic_local *cp = dev->cnic_priv;
1953 struct iscsi_kwqe_conn_destroy *req =
1954 (struct iscsi_kwqe_conn_destroy *) kwqe;
1955 u32 l5_cid = req->reserved0;
1956 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1958 struct iscsi_kcqe kcqe;
1959 struct kcqe *cqes[1];
1961 if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
1962 goto skip_cfc_delete;
1964 if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) {
1965 unsigned long delta = ctx->timestamp + (2 * HZ) - jiffies;
1967 if (delta > (2 * HZ))
1970 set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags);
1971 queue_delayed_work(cnic_wq, &cp->delete_task, delta);
1975 ret = cnic_bnx2x_destroy_ramrod(dev, l5_cid);
1978 cnic_free_bnx2x_conn_resc(dev, l5_cid);
1981 atomic_dec(&cp->iscsi_conn);
1982 clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
1986 memset(&kcqe, 0, sizeof(kcqe));
1987 kcqe.op_code = ISCSI_KCQE_OPCODE_DESTROY_CONN;
1988 kcqe.iscsi_conn_id = l5_cid;
1989 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1990 kcqe.iscsi_conn_context_id = req->context_id;
1992 cqes[0] = (struct kcqe *) &kcqe;
1993 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1998 static void cnic_init_storm_conn_bufs(struct cnic_dev *dev,
1999 struct l4_kwq_connect_req1 *kwqe1,
2000 struct l4_kwq_connect_req3 *kwqe3,
2001 struct l5cm_active_conn_buffer *conn_buf)
2003 struct l5cm_conn_addr_params *conn_addr = &conn_buf->conn_addr_buf;
2004 struct l5cm_xstorm_conn_buffer *xstorm_buf =
2005 &conn_buf->xstorm_conn_buffer;
2006 struct l5cm_tstorm_conn_buffer *tstorm_buf =
2007 &conn_buf->tstorm_conn_buffer;
2008 struct regpair context_addr;
2009 u32 cid = BNX2X_SW_CID(kwqe1->cid);
2010 struct in6_addr src_ip, dst_ip;
2014 addrp = (u32 *) &conn_addr->local_ip_addr;
2015 for (i = 0; i < 4; i++, addrp++)
2016 src_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
2018 addrp = (u32 *) &conn_addr->remote_ip_addr;
2019 for (i = 0; i < 4; i++, addrp++)
2020 dst_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
2022 cnic_get_bnx2x_ctx(dev, cid, 0, &context_addr);
2024 xstorm_buf->context_addr.hi = context_addr.hi;
2025 xstorm_buf->context_addr.lo = context_addr.lo;
2026 xstorm_buf->mss = 0xffff;
2027 xstorm_buf->rcv_buf = kwqe3->rcv_buf;
2028 if (kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE)
2029 xstorm_buf->params |= L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE;
2030 xstorm_buf->pseudo_header_checksum =
2031 swab16(~csum_ipv6_magic(&src_ip, &dst_ip, 0, IPPROTO_TCP, 0));
2033 if (!(kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK))
2034 tstorm_buf->params |=
2035 L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE;
2036 if (kwqe3->ka_timeout) {
2037 tstorm_buf->ka_enable = 1;
2038 tstorm_buf->ka_timeout = kwqe3->ka_timeout;
2039 tstorm_buf->ka_interval = kwqe3->ka_interval;
2040 tstorm_buf->ka_max_probe_count = kwqe3->ka_max_probe_count;
2042 tstorm_buf->max_rt_time = 0xffffffff;
2045 static void cnic_init_bnx2x_mac(struct cnic_dev *dev)
2047 struct cnic_local *cp = dev->cnic_priv;
2048 u32 pfid = cp->pfid;
2049 u8 *mac = dev->mac_addr;
2051 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2052 XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfid), mac[0]);
2053 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2054 XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfid), mac[1]);
2055 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2056 XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfid), mac[2]);
2057 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2058 XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfid), mac[3]);
2059 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2060 XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfid), mac[4]);
2061 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2062 XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfid), mac[5]);
2064 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2065 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[5]);
2066 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2067 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
2069 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2070 TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid), mac[3]);
2071 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2072 TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
2074 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2075 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[1]);
2076 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2077 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
2081 static void cnic_bnx2x_set_tcp_timestamp(struct cnic_dev *dev, int tcp_ts)
2083 struct cnic_local *cp = dev->cnic_priv;
2084 u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN;
2085 u16 tstorm_flags = 0;
2088 xstorm_flags |= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
2089 tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
2092 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2093 XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), xstorm_flags);
2095 CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
2096 TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), tstorm_flags);
2099 static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[],
2102 struct cnic_local *cp = dev->cnic_priv;
2103 struct l4_kwq_connect_req1 *kwqe1 =
2104 (struct l4_kwq_connect_req1 *) wqes[0];
2105 struct l4_kwq_connect_req3 *kwqe3;
2106 struct l5cm_active_conn_buffer *conn_buf;
2107 struct l5cm_conn_addr_params *conn_addr;
2108 union l5cm_specific_data l5_data;
2109 u32 l5_cid = kwqe1->pg_cid;
2110 struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
2111 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
2119 if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6)
2129 if (sizeof(*conn_buf) > CNIC_KWQ16_DATA_SIZE) {
2130 netdev_err(dev->netdev, "conn_buf size too big\n");
2133 conn_buf = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2137 memset(conn_buf, 0, sizeof(*conn_buf));
2139 conn_addr = &conn_buf->conn_addr_buf;
2140 conn_addr->remote_addr_0 = csk->ha[0];
2141 conn_addr->remote_addr_1 = csk->ha[1];
2142 conn_addr->remote_addr_2 = csk->ha[2];
2143 conn_addr->remote_addr_3 = csk->ha[3];
2144 conn_addr->remote_addr_4 = csk->ha[4];
2145 conn_addr->remote_addr_5 = csk->ha[5];
2147 if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6) {
2148 struct l4_kwq_connect_req2 *kwqe2 =
2149 (struct l4_kwq_connect_req2 *) wqes[1];
2151 conn_addr->local_ip_addr.ip_addr_hi_hi = kwqe2->src_ip_v6_4;
2152 conn_addr->local_ip_addr.ip_addr_hi_lo = kwqe2->src_ip_v6_3;
2153 conn_addr->local_ip_addr.ip_addr_lo_hi = kwqe2->src_ip_v6_2;
2155 conn_addr->remote_ip_addr.ip_addr_hi_hi = kwqe2->dst_ip_v6_4;
2156 conn_addr->remote_ip_addr.ip_addr_hi_lo = kwqe2->dst_ip_v6_3;
2157 conn_addr->remote_ip_addr.ip_addr_lo_hi = kwqe2->dst_ip_v6_2;
2158 conn_addr->params |= L5CM_CONN_ADDR_PARAMS_IP_VERSION;
2160 kwqe3 = (struct l4_kwq_connect_req3 *) wqes[*work - 1];
2162 conn_addr->local_ip_addr.ip_addr_lo_lo = kwqe1->src_ip;
2163 conn_addr->remote_ip_addr.ip_addr_lo_lo = kwqe1->dst_ip;
2164 conn_addr->local_tcp_port = kwqe1->src_port;
2165 conn_addr->remote_tcp_port = kwqe1->dst_port;
2167 conn_addr->pmtu = kwqe3->pmtu;
2168 cnic_init_storm_conn_bufs(dev, kwqe1, kwqe3, conn_buf);
2170 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
2171 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(cp->pfid), csk->vlan_id);
2173 cnic_bnx2x_set_tcp_timestamp(dev,
2174 kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_TIME_STAMP);
2176 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_TCP_CONNECT,
2177 kwqe1->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2179 set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
2184 static int cnic_bnx2x_close(struct cnic_dev *dev, struct kwqe *kwqe)
2186 struct l4_kwq_close_req *req = (struct l4_kwq_close_req *) kwqe;
2187 union l5cm_specific_data l5_data;
2190 memset(&l5_data, 0, sizeof(l5_data));
2191 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_CLOSE,
2192 req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2196 static int cnic_bnx2x_reset(struct cnic_dev *dev, struct kwqe *kwqe)
2198 struct l4_kwq_reset_req *req = (struct l4_kwq_reset_req *) kwqe;
2199 union l5cm_specific_data l5_data;
2202 memset(&l5_data, 0, sizeof(l5_data));
2203 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_ABORT,
2204 req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2207 static int cnic_bnx2x_offload_pg(struct cnic_dev *dev, struct kwqe *kwqe)
2209 struct l4_kwq_offload_pg *req = (struct l4_kwq_offload_pg *) kwqe;
2211 struct kcqe *cqes[1];
2213 memset(&kcqe, 0, sizeof(kcqe));
2214 kcqe.pg_host_opaque = req->host_opaque;
2215 kcqe.pg_cid = req->host_opaque;
2216 kcqe.op_code = L4_KCQE_OPCODE_VALUE_OFFLOAD_PG;
2217 cqes[0] = (struct kcqe *) &kcqe;
2218 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
2222 static int cnic_bnx2x_update_pg(struct cnic_dev *dev, struct kwqe *kwqe)
2224 struct l4_kwq_update_pg *req = (struct l4_kwq_update_pg *) kwqe;
2226 struct kcqe *cqes[1];
2228 memset(&kcqe, 0, sizeof(kcqe));
2229 kcqe.pg_host_opaque = req->pg_host_opaque;
2230 kcqe.pg_cid = req->pg_cid;
2231 kcqe.op_code = L4_KCQE_OPCODE_VALUE_UPDATE_PG;
2232 cqes[0] = (struct kcqe *) &kcqe;
2233 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
2237 static int cnic_bnx2x_fcoe_stat(struct cnic_dev *dev, struct kwqe *kwqe)
2239 struct fcoe_kwqe_stat *req;
2240 struct fcoe_stat_ramrod_params *fcoe_stat;
2241 union l5cm_specific_data l5_data;
2242 struct cnic_local *cp = dev->cnic_priv;
2246 req = (struct fcoe_kwqe_stat *) kwqe;
2247 cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid);
2249 fcoe_stat = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data);
2253 memset(fcoe_stat, 0, sizeof(*fcoe_stat));
2254 memcpy(&fcoe_stat->stat_kwqe, req, sizeof(*req));
2256 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_STAT_FUNC, cid,
2257 FCOE_CONNECTION_TYPE, &l5_data);
2261 static int cnic_bnx2x_fcoe_init1(struct cnic_dev *dev, struct kwqe *wqes[],
2265 struct cnic_local *cp = dev->cnic_priv;
2267 struct fcoe_init_ramrod_params *fcoe_init;
2268 struct fcoe_kwqe_init1 *req1;
2269 struct fcoe_kwqe_init2 *req2;
2270 struct fcoe_kwqe_init3 *req3;
2271 union l5cm_specific_data l5_data;
2277 req1 = (struct fcoe_kwqe_init1 *) wqes[0];
2278 req2 = (struct fcoe_kwqe_init2 *) wqes[1];
2279 req3 = (struct fcoe_kwqe_init3 *) wqes[2];
2280 if (req2->hdr.op_code != FCOE_KWQE_OPCODE_INIT2) {
2284 if (req3->hdr.op_code != FCOE_KWQE_OPCODE_INIT3) {
2289 if (sizeof(*fcoe_init) > CNIC_KWQ16_DATA_SIZE) {
2290 netdev_err(dev->netdev, "fcoe_init size too big\n");
2293 fcoe_init = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data);
2297 memset(fcoe_init, 0, sizeof(*fcoe_init));
2298 memcpy(&fcoe_init->init_kwqe1, req1, sizeof(*req1));
2299 memcpy(&fcoe_init->init_kwqe2, req2, sizeof(*req2));
2300 memcpy(&fcoe_init->init_kwqe3, req3, sizeof(*req3));
2301 fcoe_init->eq_pbl_base.lo = cp->kcq2.dma.pgtbl_map & 0xffffffff;
2302 fcoe_init->eq_pbl_base.hi = (u64) cp->kcq2.dma.pgtbl_map >> 32;
2303 fcoe_init->eq_pbl_size = cp->kcq2.dma.num_pages;
2305 fcoe_init->sb_num = cp->status_blk_num;
2306 fcoe_init->eq_prod = MAX_KCQ_IDX;
2307 fcoe_init->sb_id = HC_INDEX_FCOE_EQ_CONS;
2308 cp->kcq2.sw_prod_idx = 0;
2310 cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid);
2311 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_INIT_FUNC, cid,
2312 FCOE_CONNECTION_TYPE, &l5_data);
2317 static int cnic_bnx2x_fcoe_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
2321 u32 cid = -1, l5_cid;
2322 struct cnic_local *cp = dev->cnic_priv;
2323 struct fcoe_kwqe_conn_offload1 *req1;
2324 struct fcoe_kwqe_conn_offload2 *req2;
2325 struct fcoe_kwqe_conn_offload3 *req3;
2326 struct fcoe_kwqe_conn_offload4 *req4;
2327 struct fcoe_conn_offload_ramrod_params *fcoe_offload;
2328 struct cnic_context *ctx;
2329 struct fcoe_context *fctx;
2330 struct regpair ctx_addr;
2331 union l5cm_specific_data l5_data;
2332 struct fcoe_kcqe kcqe;
2333 struct kcqe *cqes[1];
2339 req1 = (struct fcoe_kwqe_conn_offload1 *) wqes[0];
2340 req2 = (struct fcoe_kwqe_conn_offload2 *) wqes[1];
2341 req3 = (struct fcoe_kwqe_conn_offload3 *) wqes[2];
2342 req4 = (struct fcoe_kwqe_conn_offload4 *) wqes[3];
2346 l5_cid = req1->fcoe_conn_id;
2347 if (l5_cid >= dev->max_fcoe_conn)
2350 l5_cid += BNX2X_FCOE_L5_CID_BASE;
2352 ctx = &cp->ctx_tbl[l5_cid];
2353 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2356 ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
2363 fctx = cnic_get_bnx2x_ctx(dev, cid, 1, &ctx_addr);
2365 u32 hw_cid = BNX2X_HW_CID(cp, cid);
2368 val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
2369 FCOE_CONNECTION_TYPE);
2370 fctx->xstorm_ag_context.cdu_reserved = val;
2371 val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG,
2372 FCOE_CONNECTION_TYPE);
2373 fctx->ustorm_ag_context.cdu_usage = val;
2375 if (sizeof(*fcoe_offload) > CNIC_KWQ16_DATA_SIZE) {
2376 netdev_err(dev->netdev, "fcoe_offload size too big\n");
2379 fcoe_offload = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2383 memset(fcoe_offload, 0, sizeof(*fcoe_offload));
2384 memcpy(&fcoe_offload->offload_kwqe1, req1, sizeof(*req1));
2385 memcpy(&fcoe_offload->offload_kwqe2, req2, sizeof(*req2));
2386 memcpy(&fcoe_offload->offload_kwqe3, req3, sizeof(*req3));
2387 memcpy(&fcoe_offload->offload_kwqe4, req4, sizeof(*req4));
2389 cid = BNX2X_HW_CID(cp, cid);
2390 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_OFFLOAD_CONN, cid,
2391 FCOE_CONNECTION_TYPE, &l5_data);
2393 set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
2399 cnic_free_bnx2x_conn_resc(dev, l5_cid);
2401 memset(&kcqe, 0, sizeof(kcqe));
2402 kcqe.op_code = FCOE_KCQE_OPCODE_OFFLOAD_CONN;
2403 kcqe.fcoe_conn_id = req1->fcoe_conn_id;
2404 kcqe.completion_status = FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE;
2406 cqes[0] = (struct kcqe *) &kcqe;
2407 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1);
2411 static int cnic_bnx2x_fcoe_enable(struct cnic_dev *dev, struct kwqe *kwqe)
2413 struct fcoe_kwqe_conn_enable_disable *req;
2414 struct fcoe_conn_enable_disable_ramrod_params *fcoe_enable;
2415 union l5cm_specific_data l5_data;
2418 struct cnic_local *cp = dev->cnic_priv;
2420 req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
2421 cid = req->context_id;
2422 l5_cid = req->conn_id + BNX2X_FCOE_L5_CID_BASE;
2424 if (sizeof(*fcoe_enable) > CNIC_KWQ16_DATA_SIZE) {
2425 netdev_err(dev->netdev, "fcoe_enable size too big\n");
2428 fcoe_enable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2432 memset(fcoe_enable, 0, sizeof(*fcoe_enable));
2433 memcpy(&fcoe_enable->enable_disable_kwqe, req, sizeof(*req));
2434 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_ENABLE_CONN, cid,
2435 FCOE_CONNECTION_TYPE, &l5_data);
2439 static int cnic_bnx2x_fcoe_disable(struct cnic_dev *dev, struct kwqe *kwqe)
2441 struct fcoe_kwqe_conn_enable_disable *req;
2442 struct fcoe_conn_enable_disable_ramrod_params *fcoe_disable;
2443 union l5cm_specific_data l5_data;
2446 struct cnic_local *cp = dev->cnic_priv;
2448 req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
2449 cid = req->context_id;
2450 l5_cid = req->conn_id;
2451 if (l5_cid >= dev->max_fcoe_conn)
2454 l5_cid += BNX2X_FCOE_L5_CID_BASE;
2456 if (sizeof(*fcoe_disable) > CNIC_KWQ16_DATA_SIZE) {
2457 netdev_err(dev->netdev, "fcoe_disable size too big\n");
2460 fcoe_disable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2464 memset(fcoe_disable, 0, sizeof(*fcoe_disable));
2465 memcpy(&fcoe_disable->enable_disable_kwqe, req, sizeof(*req));
2466 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DISABLE_CONN, cid,
2467 FCOE_CONNECTION_TYPE, &l5_data);
2471 static int cnic_bnx2x_fcoe_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
2473 struct fcoe_kwqe_conn_destroy *req;
2474 union l5cm_specific_data l5_data;
2477 struct cnic_local *cp = dev->cnic_priv;
2478 struct cnic_context *ctx;
2479 struct fcoe_kcqe kcqe;
2480 struct kcqe *cqes[1];
2482 req = (struct fcoe_kwqe_conn_destroy *) kwqe;
2483 cid = req->context_id;
2484 l5_cid = req->conn_id;
2485 if (l5_cid >= dev->max_fcoe_conn)
2488 l5_cid += BNX2X_FCOE_L5_CID_BASE;
2490 ctx = &cp->ctx_tbl[l5_cid];
2492 init_waitqueue_head(&ctx->waitq);
2495 memset(&kcqe, 0, sizeof(kcqe));
2496 kcqe.completion_status = FCOE_KCQE_COMPLETION_STATUS_ERROR;
2497 memset(&l5_data, 0, sizeof(l5_data));
2498 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_TERMINATE_CONN, cid,
2499 FCOE_CONNECTION_TYPE, &l5_data);
2501 wait_event_timeout(ctx->waitq, ctx->wait_cond, CNIC_RAMROD_TMO);
2503 kcqe.completion_status = 0;
2506 set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags);
2507 queue_delayed_work(cnic_wq, &cp->delete_task, msecs_to_jiffies(2000));
2509 kcqe.op_code = FCOE_KCQE_OPCODE_DESTROY_CONN;
2510 kcqe.fcoe_conn_id = req->conn_id;
2511 kcqe.fcoe_conn_context_id = cid;
2513 cqes[0] = (struct kcqe *) &kcqe;
2514 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1);
2518 static void cnic_bnx2x_delete_wait(struct cnic_dev *dev, u32 start_cid)
2520 struct cnic_local *cp = dev->cnic_priv;
2523 for (i = start_cid; i < cp->max_cid_space; i++) {
2524 struct cnic_context *ctx = &cp->ctx_tbl[i];
2527 while (test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
2530 for (j = 0; j < 5; j++) {
2531 if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2536 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2537 netdev_warn(dev->netdev, "CID %x not deleted\n",
2542 static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
2544 struct fcoe_kwqe_destroy *req;
2545 union l5cm_specific_data l5_data;
2546 struct cnic_local *cp = dev->cnic_priv;
2550 cnic_bnx2x_delete_wait(dev, MAX_ISCSI_TBL_SZ);
2552 req = (struct fcoe_kwqe_destroy *) kwqe;
2553 cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid);
2555 memset(&l5_data, 0, sizeof(l5_data));
2556 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DESTROY_FUNC, cid,
2557 FCOE_CONNECTION_TYPE, &l5_data);
2561 static void cnic_bnx2x_kwqe_err(struct cnic_dev *dev, struct kwqe *kwqe)
2563 struct cnic_local *cp = dev->cnic_priv;
2565 struct kcqe *cqes[1];
2567 u32 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
2568 u32 layer_code = kwqe->kwqe_op_flag & KWQE_LAYER_MASK;
2572 cid = kwqe->kwqe_info0;
2573 memset(&kcqe, 0, sizeof(kcqe));
2575 if (layer_code == KWQE_FLAGS_LAYER_MASK_L5_FCOE) {
2578 ulp_type = CNIC_ULP_FCOE;
2579 if (opcode == FCOE_KWQE_OPCODE_DISABLE_CONN) {
2580 struct fcoe_kwqe_conn_enable_disable *req;
2582 req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
2583 kcqe_op = FCOE_KCQE_OPCODE_DISABLE_CONN;
2584 cid = req->context_id;
2585 l5_cid = req->conn_id;
2586 } else if (opcode == FCOE_KWQE_OPCODE_DESTROY) {
2587 kcqe_op = FCOE_KCQE_OPCODE_DESTROY_FUNC;
2591 kcqe.kcqe_op_flag = kcqe_op << KCQE_FLAGS_OPCODE_SHIFT;
2592 kcqe.kcqe_op_flag |= KCQE_FLAGS_LAYER_MASK_L5_FCOE;
2593 kcqe.kcqe_info1 = FCOE_KCQE_COMPLETION_STATUS_PARITY_ERROR;
2594 kcqe.kcqe_info2 = cid;
2595 kcqe.kcqe_info0 = l5_cid;
2597 } else if (layer_code == KWQE_FLAGS_LAYER_MASK_L5_ISCSI) {
2598 ulp_type = CNIC_ULP_ISCSI;
2599 if (opcode == ISCSI_KWQE_OPCODE_UPDATE_CONN)
2600 cid = kwqe->kwqe_info1;
2602 kcqe.kcqe_op_flag = (opcode + 0x10) << KCQE_FLAGS_OPCODE_SHIFT;
2603 kcqe.kcqe_op_flag |= KCQE_FLAGS_LAYER_MASK_L5_ISCSI;
2604 kcqe.kcqe_info1 = ISCSI_KCQE_COMPLETION_STATUS_PARITY_ERR;
2605 kcqe.kcqe_info2 = cid;
2606 cnic_get_l5_cid(cp, BNX2X_SW_CID(cid), &kcqe.kcqe_info0);
2608 } else if (layer_code == KWQE_FLAGS_LAYER_MASK_L4) {
2609 struct l4_kcq *l4kcqe = (struct l4_kcq *) &kcqe;
2611 ulp_type = CNIC_ULP_L4;
2612 if (opcode == L4_KWQE_OPCODE_VALUE_CONNECT1)
2613 kcqe_op = L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE;
2614 else if (opcode == L4_KWQE_OPCODE_VALUE_RESET)
2615 kcqe_op = L4_KCQE_OPCODE_VALUE_RESET_COMP;
2616 else if (opcode == L4_KWQE_OPCODE_VALUE_CLOSE)
2617 kcqe_op = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
2621 kcqe.kcqe_op_flag = (kcqe_op << KCQE_FLAGS_OPCODE_SHIFT) |
2622 KCQE_FLAGS_LAYER_MASK_L4;
2623 l4kcqe->status = L4_KCQE_COMPLETION_STATUS_PARITY_ERROR;
2625 cnic_get_l5_cid(cp, BNX2X_SW_CID(cid), &l4kcqe->conn_id);
2631 cnic_reply_bnx2x_kcqes(dev, ulp_type, cqes, 1);
2634 static int cnic_submit_bnx2x_iscsi_kwqes(struct cnic_dev *dev,
2635 struct kwqe *wqes[], u32 num_wqes)
2641 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2642 return -EAGAIN; /* bnx2 is down */
2644 for (i = 0; i < num_wqes; ) {
2646 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
2650 case ISCSI_KWQE_OPCODE_INIT1:
2651 ret = cnic_bnx2x_iscsi_init1(dev, kwqe);
2653 case ISCSI_KWQE_OPCODE_INIT2:
2654 ret = cnic_bnx2x_iscsi_init2(dev, kwqe);
2656 case ISCSI_KWQE_OPCODE_OFFLOAD_CONN1:
2657 ret = cnic_bnx2x_iscsi_ofld1(dev, &wqes[i],
2658 num_wqes - i, &work);
2660 case ISCSI_KWQE_OPCODE_UPDATE_CONN:
2661 ret = cnic_bnx2x_iscsi_update(dev, kwqe);
2663 case ISCSI_KWQE_OPCODE_DESTROY_CONN:
2664 ret = cnic_bnx2x_iscsi_destroy(dev, kwqe);
2666 case L4_KWQE_OPCODE_VALUE_CONNECT1:
2667 ret = cnic_bnx2x_connect(dev, &wqes[i], num_wqes - i,
2670 case L4_KWQE_OPCODE_VALUE_CLOSE:
2671 ret = cnic_bnx2x_close(dev, kwqe);
2673 case L4_KWQE_OPCODE_VALUE_RESET:
2674 ret = cnic_bnx2x_reset(dev, kwqe);
2676 case L4_KWQE_OPCODE_VALUE_OFFLOAD_PG:
2677 ret = cnic_bnx2x_offload_pg(dev, kwqe);
2679 case L4_KWQE_OPCODE_VALUE_UPDATE_PG:
2680 ret = cnic_bnx2x_update_pg(dev, kwqe);
2682 case L4_KWQE_OPCODE_VALUE_UPLOAD_PG:
2687 netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
2692 netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
2695 /* Possibly bnx2x parity error, send completion
2696 * to ulp drivers with error code to speed up
2697 * cleanup and reset recovery.
2699 if (ret == -EIO || ret == -EAGAIN)
2700 cnic_bnx2x_kwqe_err(dev, kwqe);
2707 static int cnic_submit_bnx2x_fcoe_kwqes(struct cnic_dev *dev,
2708 struct kwqe *wqes[], u32 num_wqes)
2710 struct cnic_local *cp = dev->cnic_priv;
2715 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2716 return -EAGAIN; /* bnx2 is down */
2718 if (!BNX2X_CHIP_IS_E2_PLUS(cp->chip_id))
2721 for (i = 0; i < num_wqes; ) {
2723 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
2727 case FCOE_KWQE_OPCODE_INIT1:
2728 ret = cnic_bnx2x_fcoe_init1(dev, &wqes[i],
2729 num_wqes - i, &work);
2731 case FCOE_KWQE_OPCODE_OFFLOAD_CONN1:
2732 ret = cnic_bnx2x_fcoe_ofld1(dev, &wqes[i],
2733 num_wqes - i, &work);
2735 case FCOE_KWQE_OPCODE_ENABLE_CONN:
2736 ret = cnic_bnx2x_fcoe_enable(dev, kwqe);
2738 case FCOE_KWQE_OPCODE_DISABLE_CONN:
2739 ret = cnic_bnx2x_fcoe_disable(dev, kwqe);
2741 case FCOE_KWQE_OPCODE_DESTROY_CONN:
2742 ret = cnic_bnx2x_fcoe_destroy(dev, kwqe);
2744 case FCOE_KWQE_OPCODE_DESTROY:
2745 ret = cnic_bnx2x_fcoe_fw_destroy(dev, kwqe);
2747 case FCOE_KWQE_OPCODE_STAT:
2748 ret = cnic_bnx2x_fcoe_stat(dev, kwqe);
2752 netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
2757 netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
2760 /* Possibly bnx2x parity error, send completion
2761 * to ulp drivers with error code to speed up
2762 * cleanup and reset recovery.
2764 if (ret == -EIO || ret == -EAGAIN)
2765 cnic_bnx2x_kwqe_err(dev, kwqe);
2772 static int cnic_submit_bnx2x_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
2778 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2779 return -EAGAIN; /* bnx2x is down */
2784 layer_code = wqes[0]->kwqe_op_flag & KWQE_LAYER_MASK;
2785 switch (layer_code) {
2786 case KWQE_FLAGS_LAYER_MASK_L5_ISCSI:
2787 case KWQE_FLAGS_LAYER_MASK_L4:
2788 case KWQE_FLAGS_LAYER_MASK_L2:
2789 ret = cnic_submit_bnx2x_iscsi_kwqes(dev, wqes, num_wqes);
2792 case KWQE_FLAGS_LAYER_MASK_L5_FCOE:
2793 ret = cnic_submit_bnx2x_fcoe_kwqes(dev, wqes, num_wqes);
2799 static inline u32 cnic_get_kcqe_layer_mask(u32 opflag)
2801 if (unlikely(KCQE_OPCODE(opflag) == FCOE_RAMROD_CMD_ID_TERMINATE_CONN))
2802 return KCQE_FLAGS_LAYER_MASK_L4;
2804 return opflag & KCQE_FLAGS_LAYER_MASK;
2807 static void service_kcqes(struct cnic_dev *dev, int num_cqes)
2809 struct cnic_local *cp = dev->cnic_priv;
2815 struct cnic_ulp_ops *ulp_ops;
2817 u32 kcqe_op_flag = cp->completed_kcq[i]->kcqe_op_flag;
2818 u32 kcqe_layer = cnic_get_kcqe_layer_mask(kcqe_op_flag);
2820 if (unlikely(kcqe_op_flag & KCQE_RAMROD_COMPLETION))
2823 while (j < num_cqes) {
2824 u32 next_op = cp->completed_kcq[i + j]->kcqe_op_flag;
2826 if (cnic_get_kcqe_layer_mask(next_op) != kcqe_layer)
2829 if (unlikely(next_op & KCQE_RAMROD_COMPLETION))
2834 if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_RDMA)
2835 ulp_type = CNIC_ULP_RDMA;
2836 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_ISCSI)
2837 ulp_type = CNIC_ULP_ISCSI;
2838 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_FCOE)
2839 ulp_type = CNIC_ULP_FCOE;
2840 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L4)
2841 ulp_type = CNIC_ULP_L4;
2842 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L2)
2845 netdev_err(dev->netdev, "Unknown type of KCQE(0x%x)\n",
2851 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
2852 if (likely(ulp_ops)) {
2853 ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
2854 cp->completed_kcq + i, j);
2863 cnic_spq_completion(dev, DRV_CTL_RET_L5_SPQ_CREDIT_CMD, comp);
2866 static int cnic_get_kcqes(struct cnic_dev *dev, struct kcq_info *info)
2868 struct cnic_local *cp = dev->cnic_priv;
2869 u16 i, ri, hw_prod, last;
2871 int kcqe_cnt = 0, last_cnt = 0;
2873 i = ri = last = info->sw_prod_idx;
2875 hw_prod = *info->hw_prod_idx_ptr;
2876 hw_prod = info->hw_idx(hw_prod);
2878 while ((i != hw_prod) && (kcqe_cnt < MAX_COMPLETED_KCQE)) {
2879 kcqe = &info->kcq[KCQ_PG(ri)][KCQ_IDX(ri)];
2880 cp->completed_kcq[kcqe_cnt++] = kcqe;
2881 i = info->next_idx(i);
2882 ri = i & MAX_KCQ_IDX;
2883 if (likely(!(kcqe->kcqe_op_flag & KCQE_FLAGS_NEXT))) {
2884 last_cnt = kcqe_cnt;
2889 info->sw_prod_idx = last;
2893 static int cnic_l2_completion(struct cnic_local *cp)
2895 u16 hw_cons, sw_cons;
2896 struct cnic_uio_dev *udev = cp->udev;
2897 union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *)
2898 (udev->l2_ring + (2 * BCM_PAGE_SIZE));
2902 if (!test_bit(CNIC_F_BNX2X_CLASS, &cp->dev->flags))
2905 hw_cons = *cp->rx_cons_ptr;
2906 if ((hw_cons & BNX2X_MAX_RCQ_DESC_CNT) == BNX2X_MAX_RCQ_DESC_CNT)
2909 sw_cons = cp->rx_cons;
2910 while (sw_cons != hw_cons) {
2913 cqe = &cqe_ring[sw_cons & BNX2X_MAX_RCQ_DESC_CNT];
2914 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
2915 if (cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE) {
2916 cmd = le32_to_cpu(cqe->ramrod_cqe.conn_and_cmd_data);
2917 cmd >>= COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT;
2918 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP ||
2919 cmd == RAMROD_CMD_ID_ETH_HALT)
2922 sw_cons = BNX2X_NEXT_RCQE(sw_cons);
2927 static void cnic_chk_pkt_rings(struct cnic_local *cp)
2929 u16 rx_cons, tx_cons;
2932 if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
2935 rx_cons = *cp->rx_cons_ptr;
2936 tx_cons = *cp->tx_cons_ptr;
2937 if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) {
2938 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
2939 comp = cnic_l2_completion(cp);
2941 cp->tx_cons = tx_cons;
2942 cp->rx_cons = rx_cons;
2945 uio_event_notify(&cp->udev->cnic_uinfo);
2948 clear_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
2951 static u32 cnic_service_bnx2_queues(struct cnic_dev *dev)
2953 struct cnic_local *cp = dev->cnic_priv;
2954 u32 status_idx = (u16) *cp->kcq1.status_idx_ptr;
2957 /* status block index must be read before reading other fields */
2959 cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
2961 while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) {
2963 service_kcqes(dev, kcqe_cnt);
2965 /* Tell compiler that status_blk fields can change. */
2967 status_idx = (u16) *cp->kcq1.status_idx_ptr;
2968 /* status block index must be read first */
2970 cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
2973 CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx);
2975 cnic_chk_pkt_rings(cp);
2980 static int cnic_service_bnx2(void *data, void *status_blk)
2982 struct cnic_dev *dev = data;
2984 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
2985 struct status_block *sblk = status_blk;
2987 return sblk->status_idx;
2990 return cnic_service_bnx2_queues(dev);
2993 static void cnic_service_bnx2_msix(unsigned long data)
2995 struct cnic_dev *dev = (struct cnic_dev *) data;
2996 struct cnic_local *cp = dev->cnic_priv;
2998 cp->last_status_idx = cnic_service_bnx2_queues(dev);
3000 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
3001 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
3004 static void cnic_doirq(struct cnic_dev *dev)
3006 struct cnic_local *cp = dev->cnic_priv;
3008 if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
3009 u16 prod = cp->kcq1.sw_prod_idx & MAX_KCQ_IDX;
3011 prefetch(cp->status_blk.gen);
3012 prefetch(&cp->kcq1.kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
3014 tasklet_schedule(&cp->cnic_irq_task);
3018 static irqreturn_t cnic_irq(int irq, void *dev_instance)
3020 struct cnic_dev *dev = dev_instance;
3021 struct cnic_local *cp = dev->cnic_priv;
3031 static inline void cnic_ack_bnx2x_int(struct cnic_dev *dev, u8 id, u8 storm,
3032 u16 index, u8 op, u8 update)
3034 struct cnic_local *cp = dev->cnic_priv;
3035 u32 hc_addr = (HC_REG_COMMAND_REG + CNIC_PORT(cp) * 32 +
3036 COMMAND_REG_INT_ACK);
3037 struct igu_ack_register igu_ack;
3039 igu_ack.status_block_index = index;
3040 igu_ack.sb_id_and_flags =
3041 ((id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
3042 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
3043 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
3044 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
3046 CNIC_WR(dev, hc_addr, (*(u32 *)&igu_ack));
3049 static void cnic_ack_igu_sb(struct cnic_dev *dev, u8 igu_sb_id, u8 segment,
3050 u16 index, u8 op, u8 update)
3052 struct igu_regular cmd_data;
3053 u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id) * 8;
3055 cmd_data.sb_id_and_flags =
3056 (index << IGU_REGULAR_SB_INDEX_SHIFT) |
3057 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
3058 (update << IGU_REGULAR_BUPDATE_SHIFT) |
3059 (op << IGU_REGULAR_ENABLE_INT_SHIFT);
3062 CNIC_WR(dev, igu_addr, cmd_data.sb_id_and_flags);
3065 static void cnic_ack_bnx2x_msix(struct cnic_dev *dev)
3067 struct cnic_local *cp = dev->cnic_priv;
3069 cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, 0,
3070 IGU_INT_DISABLE, 0);
3073 static void cnic_ack_bnx2x_e2_msix(struct cnic_dev *dev)
3075 struct cnic_local *cp = dev->cnic_priv;
3077 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, 0,
3078 IGU_INT_DISABLE, 0);
3081 static void cnic_arm_bnx2x_msix(struct cnic_dev *dev, u32 idx)
3083 struct cnic_local *cp = dev->cnic_priv;
3085 cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, idx,
3089 static void cnic_arm_bnx2x_e2_msix(struct cnic_dev *dev, u32 idx)
3091 struct cnic_local *cp = dev->cnic_priv;
3093 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, idx,
3097 static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
3099 u32 last_status = *info->status_idx_ptr;
3102 /* status block index must be read before reading the KCQ */
3104 while ((kcqe_cnt = cnic_get_kcqes(dev, info))) {
3106 service_kcqes(dev, kcqe_cnt);
3108 /* Tell compiler that sblk fields can change. */
3111 last_status = *info->status_idx_ptr;
3112 /* status block index must be read before reading the KCQ */
3118 static void cnic_service_bnx2x_bh(unsigned long data)
3120 struct cnic_dev *dev = (struct cnic_dev *) data;
3121 struct cnic_local *cp = dev->cnic_priv;
3122 u32 status_idx, new_status_idx;
3124 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
3128 status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1);
3130 CNIC_WR16(dev, cp->kcq1.io_addr,
3131 cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
3133 if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE) {
3134 cp->arm_int(dev, status_idx);
3138 new_status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2);
3140 if (new_status_idx != status_idx)
3143 CNIC_WR16(dev, cp->kcq2.io_addr, cp->kcq2.sw_prod_idx +
3146 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF,
3147 status_idx, IGU_INT_ENABLE, 1);
3153 static int cnic_service_bnx2x(void *data, void *status_blk)
3155 struct cnic_dev *dev = data;
3156 struct cnic_local *cp = dev->cnic_priv;
3158 if (!(cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
3161 cnic_chk_pkt_rings(cp);
3166 static void cnic_ulp_stop_one(struct cnic_local *cp, int if_type)
3168 struct cnic_ulp_ops *ulp_ops;
3170 if (if_type == CNIC_ULP_ISCSI)
3171 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
3173 mutex_lock(&cnic_lock);
3174 ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
3175 lockdep_is_held(&cnic_lock));
3177 mutex_unlock(&cnic_lock);
3180 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3181 mutex_unlock(&cnic_lock);
3183 if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type]))
3184 ulp_ops->cnic_stop(cp->ulp_handle[if_type]);
3186 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3189 static void cnic_ulp_stop(struct cnic_dev *dev)
3191 struct cnic_local *cp = dev->cnic_priv;
3194 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++)
3195 cnic_ulp_stop_one(cp, if_type);
3198 static void cnic_ulp_start(struct cnic_dev *dev)
3200 struct cnic_local *cp = dev->cnic_priv;
3203 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
3204 struct cnic_ulp_ops *ulp_ops;
3206 mutex_lock(&cnic_lock);
3207 ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
3208 lockdep_is_held(&cnic_lock));
3209 if (!ulp_ops || !ulp_ops->cnic_start) {
3210 mutex_unlock(&cnic_lock);
3213 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3214 mutex_unlock(&cnic_lock);
3216 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[if_type]))
3217 ulp_ops->cnic_start(cp->ulp_handle[if_type]);
3219 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3223 static int cnic_copy_ulp_stats(struct cnic_dev *dev, int ulp_type)
3225 struct cnic_local *cp = dev->cnic_priv;
3226 struct cnic_ulp_ops *ulp_ops;
3229 mutex_lock(&cnic_lock);
3230 ulp_ops = cnic_ulp_tbl_prot(ulp_type);
3231 if (ulp_ops && ulp_ops->cnic_get_stats)
3232 rc = ulp_ops->cnic_get_stats(cp->ulp_handle[ulp_type]);
3235 mutex_unlock(&cnic_lock);
3239 static int cnic_ctl(void *data, struct cnic_ctl_info *info)
3241 struct cnic_dev *dev = data;
3242 int ulp_type = CNIC_ULP_ISCSI;
3244 switch (info->cmd) {
3245 case CNIC_CTL_STOP_CMD:
3253 case CNIC_CTL_START_CMD:
3256 if (!cnic_start_hw(dev))
3257 cnic_ulp_start(dev);
3261 case CNIC_CTL_STOP_ISCSI_CMD: {
3262 struct cnic_local *cp = dev->cnic_priv;
3263 set_bit(CNIC_LCL_FL_STOP_ISCSI, &cp->cnic_local_flags);
3264 queue_delayed_work(cnic_wq, &cp->delete_task, 0);
3267 case CNIC_CTL_COMPLETION_CMD: {
3268 struct cnic_ctl_completion *comp = &info->data.comp;
3269 u32 cid = BNX2X_SW_CID(comp->cid);
3271 struct cnic_local *cp = dev->cnic_priv;
3273 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
3276 if (cnic_get_l5_cid(cp, cid, &l5_cid) == 0) {
3277 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3279 if (unlikely(comp->error)) {
3280 set_bit(CTX_FL_CID_ERROR, &ctx->ctx_flags);
3281 netdev_err(dev->netdev,
3282 "CID %x CFC delete comp error %x\n",
3287 wake_up(&ctx->waitq);
3291 case CNIC_CTL_FCOE_STATS_GET_CMD:
3292 ulp_type = CNIC_ULP_FCOE;
3294 case CNIC_CTL_ISCSI_STATS_GET_CMD:
3296 cnic_copy_ulp_stats(dev, ulp_type);
3306 static void cnic_ulp_init(struct cnic_dev *dev)
3309 struct cnic_local *cp = dev->cnic_priv;
3311 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
3312 struct cnic_ulp_ops *ulp_ops;
3314 mutex_lock(&cnic_lock);
3315 ulp_ops = cnic_ulp_tbl_prot(i);
3316 if (!ulp_ops || !ulp_ops->cnic_init) {
3317 mutex_unlock(&cnic_lock);
3321 mutex_unlock(&cnic_lock);
3323 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i]))
3324 ulp_ops->cnic_init(dev);
3330 static void cnic_ulp_exit(struct cnic_dev *dev)
3333 struct cnic_local *cp = dev->cnic_priv;
3335 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
3336 struct cnic_ulp_ops *ulp_ops;
3338 mutex_lock(&cnic_lock);
3339 ulp_ops = cnic_ulp_tbl_prot(i);
3340 if (!ulp_ops || !ulp_ops->cnic_exit) {
3341 mutex_unlock(&cnic_lock);
3345 mutex_unlock(&cnic_lock);
3347 if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i]))
3348 ulp_ops->cnic_exit(dev);
3354 static int cnic_cm_offload_pg(struct cnic_sock *csk)
3356 struct cnic_dev *dev = csk->dev;
3357 struct l4_kwq_offload_pg *l4kwqe;
3358 struct kwqe *wqes[1];
3360 l4kwqe = (struct l4_kwq_offload_pg *) &csk->kwqe1;
3361 memset(l4kwqe, 0, sizeof(*l4kwqe));
3362 wqes[0] = (struct kwqe *) l4kwqe;
3364 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_OFFLOAD_PG;
3366 L4_LAYER_CODE << L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT;
3367 l4kwqe->l2hdr_nbytes = ETH_HLEN;
3369 l4kwqe->da0 = csk->ha[0];
3370 l4kwqe->da1 = csk->ha[1];
3371 l4kwqe->da2 = csk->ha[2];
3372 l4kwqe->da3 = csk->ha[3];
3373 l4kwqe->da4 = csk->ha[4];
3374 l4kwqe->da5 = csk->ha[5];
3376 l4kwqe->sa0 = dev->mac_addr[0];
3377 l4kwqe->sa1 = dev->mac_addr[1];
3378 l4kwqe->sa2 = dev->mac_addr[2];
3379 l4kwqe->sa3 = dev->mac_addr[3];
3380 l4kwqe->sa4 = dev->mac_addr[4];
3381 l4kwqe->sa5 = dev->mac_addr[5];
3383 l4kwqe->etype = ETH_P_IP;
3384 l4kwqe->ipid_start = DEF_IPID_START;
3385 l4kwqe->host_opaque = csk->l5_cid;
3388 l4kwqe->pg_flags |= L4_KWQ_OFFLOAD_PG_VLAN_TAGGING;
3389 l4kwqe->vlan_tag = csk->vlan_id;
3390 l4kwqe->l2hdr_nbytes += 4;
3393 return dev->submit_kwqes(dev, wqes, 1);
3396 static int cnic_cm_update_pg(struct cnic_sock *csk)
3398 struct cnic_dev *dev = csk->dev;
3399 struct l4_kwq_update_pg *l4kwqe;
3400 struct kwqe *wqes[1];
3402 l4kwqe = (struct l4_kwq_update_pg *) &csk->kwqe1;
3403 memset(l4kwqe, 0, sizeof(*l4kwqe));
3404 wqes[0] = (struct kwqe *) l4kwqe;
3406 l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPDATE_PG;
3408 L4_LAYER_CODE << L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT;
3409 l4kwqe->pg_cid = csk->pg_cid;
3411 l4kwqe->da0 = csk->ha[0];
3412 l4kwqe->da1 = csk->ha[1];
3413 l4kwqe->da2 = csk->ha[2];
3414 l4kwqe->da3 = csk->ha[3];
3415 l4kwqe->da4 = csk->ha[4];
3416 l4kwqe->da5 = csk->ha[5];
3418 l4kwqe->pg_host_opaque = csk->l5_cid;
3419 l4kwqe->pg_valids = L4_KWQ_UPDATE_PG_VALIDS_DA;
3421 return dev->submit_kwqes(dev, wqes, 1);
3424 static int cnic_cm_upload_pg(struct cnic_sock *csk)
3426 struct cnic_dev *dev = csk->dev;
3427 struct l4_kwq_upload *l4kwqe;
3428 struct kwqe *wqes[1];
3430 l4kwqe = (struct l4_kwq_upload *) &csk->kwqe1;
3431 memset(l4kwqe, 0, sizeof(*l4kwqe));
3432 wqes[0] = (struct kwqe *) l4kwqe;
3434 l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPLOAD_PG;
3436 L4_LAYER_CODE << L4_KWQ_UPLOAD_LAYER_CODE_SHIFT;
3437 l4kwqe->cid = csk->pg_cid;
3439 return dev->submit_kwqes(dev, wqes, 1);
3442 static int cnic_cm_conn_req(struct cnic_sock *csk)
3444 struct cnic_dev *dev = csk->dev;
3445 struct l4_kwq_connect_req1 *l4kwqe1;
3446 struct l4_kwq_connect_req2 *l4kwqe2;
3447 struct l4_kwq_connect_req3 *l4kwqe3;
3448 struct kwqe *wqes[3];
3452 l4kwqe1 = (struct l4_kwq_connect_req1 *) &csk->kwqe1;
3453 l4kwqe2 = (struct l4_kwq_connect_req2 *) &csk->kwqe2;
3454 l4kwqe3 = (struct l4_kwq_connect_req3 *) &csk->kwqe3;
3455 memset(l4kwqe1, 0, sizeof(*l4kwqe1));
3456 memset(l4kwqe2, 0, sizeof(*l4kwqe2));
3457 memset(l4kwqe3, 0, sizeof(*l4kwqe3));
3459 l4kwqe3->op_code = L4_KWQE_OPCODE_VALUE_CONNECT3;
3461 L4_LAYER_CODE << L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT;
3462 l4kwqe3->ka_timeout = csk->ka_timeout;
3463 l4kwqe3->ka_interval = csk->ka_interval;
3464 l4kwqe3->ka_max_probe_count = csk->ka_max_probe_count;
3465 l4kwqe3->tos = csk->tos;
3466 l4kwqe3->ttl = csk->ttl;
3467 l4kwqe3->snd_seq_scale = csk->snd_seq_scale;
3468 l4kwqe3->pmtu = csk->mtu;
3469 l4kwqe3->rcv_buf = csk->rcv_buf;
3470 l4kwqe3->snd_buf = csk->snd_buf;
3471 l4kwqe3->seed = csk->seed;
3473 wqes[0] = (struct kwqe *) l4kwqe1;
3474 if (test_bit(SK_F_IPV6, &csk->flags)) {
3475 wqes[1] = (struct kwqe *) l4kwqe2;
3476 wqes[2] = (struct kwqe *) l4kwqe3;
3479 l4kwqe1->conn_flags = L4_KWQ_CONNECT_REQ1_IP_V6;
3480 l4kwqe2->op_code = L4_KWQE_OPCODE_VALUE_CONNECT2;
3482 L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT |
3483 L4_LAYER_CODE << L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT;
3484 l4kwqe2->src_ip_v6_2 = be32_to_cpu(csk->src_ip[1]);
3485 l4kwqe2->src_ip_v6_3 = be32_to_cpu(csk->src_ip[2]);
3486 l4kwqe2->src_ip_v6_4 = be32_to_cpu(csk->src_ip[3]);
3487 l4kwqe2->dst_ip_v6_2 = be32_to_cpu(csk->dst_ip[1]);
3488 l4kwqe2->dst_ip_v6_3 = be32_to_cpu(csk->dst_ip[2]);
3489 l4kwqe2->dst_ip_v6_4 = be32_to_cpu(csk->dst_ip[3]);
3490 l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct ipv6hdr) -
3491 sizeof(struct tcphdr);
3493 wqes[1] = (struct kwqe *) l4kwqe3;
3494 l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct iphdr) -
3495 sizeof(struct tcphdr);
3498 l4kwqe1->op_code = L4_KWQE_OPCODE_VALUE_CONNECT1;
3500 (L4_LAYER_CODE << L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT) |
3501 L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT;
3502 l4kwqe1->cid = csk->cid;
3503 l4kwqe1->pg_cid = csk->pg_cid;
3504 l4kwqe1->src_ip = be32_to_cpu(csk->src_ip[0]);
3505 l4kwqe1->dst_ip = be32_to_cpu(csk->dst_ip[0]);
3506 l4kwqe1->src_port = be16_to_cpu(csk->src_port);
3507 l4kwqe1->dst_port = be16_to_cpu(csk->dst_port);
3508 if (csk->tcp_flags & SK_TCP_NO_DELAY_ACK)
3509 tcp_flags |= L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK;
3510 if (csk->tcp_flags & SK_TCP_KEEP_ALIVE)
3511 tcp_flags |= L4_KWQ_CONNECT_REQ1_KEEP_ALIVE;
3512 if (csk->tcp_flags & SK_TCP_NAGLE)
3513 tcp_flags |= L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE;
3514 if (csk->tcp_flags & SK_TCP_TIMESTAMP)
3515 tcp_flags |= L4_KWQ_CONNECT_REQ1_TIME_STAMP;
3516 if (csk->tcp_flags & SK_TCP_SACK)
3517 tcp_flags |= L4_KWQ_CONNECT_REQ1_SACK;
3518 if (csk->tcp_flags & SK_TCP_SEG_SCALING)
3519 tcp_flags |= L4_KWQ_CONNECT_REQ1_SEG_SCALING;
3521 l4kwqe1->tcp_flags = tcp_flags;
3523 return dev->submit_kwqes(dev, wqes, num_wqes);
3526 static int cnic_cm_close_req(struct cnic_sock *csk)
3528 struct cnic_dev *dev = csk->dev;
3529 struct l4_kwq_close_req *l4kwqe;
3530 struct kwqe *wqes[1];
3532 l4kwqe = (struct l4_kwq_close_req *) &csk->kwqe2;
3533 memset(l4kwqe, 0, sizeof(*l4kwqe));
3534 wqes[0] = (struct kwqe *) l4kwqe;
3536 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_CLOSE;
3537 l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT;
3538 l4kwqe->cid = csk->cid;
3540 return dev->submit_kwqes(dev, wqes, 1);
3543 static int cnic_cm_abort_req(struct cnic_sock *csk)
3545 struct cnic_dev *dev = csk->dev;
3546 struct l4_kwq_reset_req *l4kwqe;
3547 struct kwqe *wqes[1];
3549 l4kwqe = (struct l4_kwq_reset_req *) &csk->kwqe2;
3550 memset(l4kwqe, 0, sizeof(*l4kwqe));
3551 wqes[0] = (struct kwqe *) l4kwqe;
3553 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_RESET;
3554 l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT;
3555 l4kwqe->cid = csk->cid;
3557 return dev->submit_kwqes(dev, wqes, 1);
3560 static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid,
3561 u32 l5_cid, struct cnic_sock **csk, void *context)
3563 struct cnic_local *cp = dev->cnic_priv;
3564 struct cnic_sock *csk1;
3566 if (l5_cid >= MAX_CM_SK_TBL_SZ)
3570 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3572 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
3576 csk1 = &cp->csk_tbl[l5_cid];
3577 if (atomic_read(&csk1->ref_count))
3580 if (test_and_set_bit(SK_F_INUSE, &csk1->flags))
3585 csk1->l5_cid = l5_cid;
3586 csk1->ulp_type = ulp_type;
3587 csk1->context = context;
3589 csk1->ka_timeout = DEF_KA_TIMEOUT;
3590 csk1->ka_interval = DEF_KA_INTERVAL;
3591 csk1->ka_max_probe_count = DEF_KA_MAX_PROBE_COUNT;
3592 csk1->tos = DEF_TOS;
3593 csk1->ttl = DEF_TTL;
3594 csk1->snd_seq_scale = DEF_SND_SEQ_SCALE;
3595 csk1->rcv_buf = DEF_RCV_BUF;
3596 csk1->snd_buf = DEF_SND_BUF;
3597 csk1->seed = DEF_SEED;
3603 static void cnic_cm_cleanup(struct cnic_sock *csk)
3605 if (csk->src_port) {
3606 struct cnic_dev *dev = csk->dev;
3607 struct cnic_local *cp = dev->cnic_priv;
3609 cnic_free_id(&cp->csk_port_tbl, be16_to_cpu(csk->src_port));
3614 static void cnic_close_conn(struct cnic_sock *csk)
3616 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) {
3617 cnic_cm_upload_pg(csk);
3618 clear_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
3620 cnic_cm_cleanup(csk);
3623 static int cnic_cm_destroy(struct cnic_sock *csk)
3625 if (!cnic_in_use(csk))
3629 clear_bit(SK_F_INUSE, &csk->flags);
3630 smp_mb__after_clear_bit();
3631 while (atomic_read(&csk->ref_count) != 1)
3633 cnic_cm_cleanup(csk);
3640 static inline u16 cnic_get_vlan(struct net_device *dev,
3641 struct net_device **vlan_dev)
3643 if (dev->priv_flags & IFF_802_1Q_VLAN) {
3644 *vlan_dev = vlan_dev_real_dev(dev);
3645 return vlan_dev_vlan_id(dev);
3651 static int cnic_get_v4_route(struct sockaddr_in *dst_addr,
3652 struct dst_entry **dst)
3654 #if defined(CONFIG_INET)
3657 rt = ip_route_output(&init_net, dst_addr->sin_addr.s_addr, 0, 0, 0);
3664 return -ENETUNREACH;
3668 static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr,
3669 struct dst_entry **dst)
3671 #if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE))
3674 memset(&fl6, 0, sizeof(fl6));
3675 fl6.daddr = dst_addr->sin6_addr;
3676 if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
3677 fl6.flowi6_oif = dst_addr->sin6_scope_id;
3679 *dst = ip6_route_output(&init_net, NULL, &fl6);
3680 if ((*dst)->error) {
3683 return -ENETUNREACH;
3688 return -ENETUNREACH;
3691 static struct cnic_dev *cnic_cm_select_dev(struct sockaddr_in *dst_addr,
3694 struct cnic_dev *dev = NULL;
3695 struct dst_entry *dst;
3696 struct net_device *netdev = NULL;
3697 int err = -ENETUNREACH;
3699 if (dst_addr->sin_family == AF_INET)
3700 err = cnic_get_v4_route(dst_addr, &dst);
3701 else if (dst_addr->sin_family == AF_INET6) {
3702 struct sockaddr_in6 *dst_addr6 =
3703 (struct sockaddr_in6 *) dst_addr;
3705 err = cnic_get_v6_route(dst_addr6, &dst);
3715 cnic_get_vlan(dst->dev, &netdev);
3717 dev = cnic_from_netdev(netdev);
3726 static int cnic_resolve_addr(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3728 struct cnic_dev *dev = csk->dev;
3729 struct cnic_local *cp = dev->cnic_priv;
3731 return cnic_send_nlmsg(cp, ISCSI_KEVENT_PATH_REQ, csk);
3734 static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3736 struct cnic_dev *dev = csk->dev;
3737 struct cnic_local *cp = dev->cnic_priv;
3739 struct dst_entry *dst = NULL;
3740 struct net_device *realdev;
3744 if (saddr->local.v6.sin6_family == AF_INET6 &&
3745 saddr->remote.v6.sin6_family == AF_INET6)
3747 else if (saddr->local.v4.sin_family == AF_INET &&
3748 saddr->remote.v4.sin_family == AF_INET)
3753 clear_bit(SK_F_IPV6, &csk->flags);
3756 set_bit(SK_F_IPV6, &csk->flags);
3757 cnic_get_v6_route(&saddr->remote.v6, &dst);
3759 memcpy(&csk->dst_ip[0], &saddr->remote.v6.sin6_addr,
3760 sizeof(struct in6_addr));
3761 csk->dst_port = saddr->remote.v6.sin6_port;
3762 local_port = saddr->local.v6.sin6_port;
3765 cnic_get_v4_route(&saddr->remote.v4, &dst);
3767 csk->dst_ip[0] = saddr->remote.v4.sin_addr.s_addr;
3768 csk->dst_port = saddr->remote.v4.sin_port;
3769 local_port = saddr->local.v4.sin_port;
3773 csk->mtu = dev->netdev->mtu;
3774 if (dst && dst->dev) {
3775 u16 vlan = cnic_get_vlan(dst->dev, &realdev);
3776 if (realdev == dev->netdev) {
3777 csk->vlan_id = vlan;
3778 csk->mtu = dst_mtu(dst);
3782 port_id = be16_to_cpu(local_port);
3783 if (port_id >= CNIC_LOCAL_PORT_MIN &&
3784 port_id < CNIC_LOCAL_PORT_MAX) {
3785 if (cnic_alloc_id(&cp->csk_port_tbl, port_id))
3791 port_id = cnic_alloc_new_id(&cp->csk_port_tbl);
3792 if (port_id == -1) {
3796 local_port = cpu_to_be16(port_id);
3798 csk->src_port = local_port;
3805 static void cnic_init_csk_state(struct cnic_sock *csk)
3808 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3809 clear_bit(SK_F_CLOSING, &csk->flags);
3812 static int cnic_cm_connect(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3814 struct cnic_local *cp = csk->dev->cnic_priv;
3817 if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)
3820 if (!cnic_in_use(csk))
3823 if (test_and_set_bit(SK_F_CONNECT_START, &csk->flags))
3826 cnic_init_csk_state(csk);
3828 err = cnic_get_route(csk, saddr);
3832 err = cnic_resolve_addr(csk, saddr);
3837 clear_bit(SK_F_CONNECT_START, &csk->flags);
3841 static int cnic_cm_abort(struct cnic_sock *csk)
3843 struct cnic_local *cp = csk->dev->cnic_priv;
3844 u32 opcode = L4_KCQE_OPCODE_VALUE_RESET_COMP;
3846 if (!cnic_in_use(csk))
3849 if (cnic_abort_prep(csk))
3850 return cnic_cm_abort_req(csk);
3852 /* Getting here means that we haven't started connect, or
3853 * connect was not successful.
3856 cp->close_conn(csk, opcode);
3857 if (csk->state != opcode)
3863 static int cnic_cm_close(struct cnic_sock *csk)
3865 if (!cnic_in_use(csk))
3868 if (cnic_close_prep(csk)) {
3869 csk->state = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
3870 return cnic_cm_close_req(csk);
3877 static void cnic_cm_upcall(struct cnic_local *cp, struct cnic_sock *csk,
3880 struct cnic_ulp_ops *ulp_ops;
3881 int ulp_type = csk->ulp_type;
3884 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
3886 if (opcode == L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE)
3887 ulp_ops->cm_connect_complete(csk);
3888 else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)
3889 ulp_ops->cm_close_complete(csk);
3890 else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED)
3891 ulp_ops->cm_remote_abort(csk);
3892 else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_COMP)
3893 ulp_ops->cm_abort_complete(csk);
3894 else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED)
3895 ulp_ops->cm_remote_close(csk);
3900 static int cnic_cm_set_pg(struct cnic_sock *csk)
3902 if (cnic_offld_prep(csk)) {
3903 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
3904 cnic_cm_update_pg(csk);
3906 cnic_cm_offload_pg(csk);
3911 static void cnic_cm_process_offld_pg(struct cnic_dev *dev, struct l4_kcq *kcqe)
3913 struct cnic_local *cp = dev->cnic_priv;
3914 u32 l5_cid = kcqe->pg_host_opaque;
3915 u8 opcode = kcqe->op_code;
3916 struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
3919 if (!cnic_in_use(csk))
3922 if (opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
3923 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3926 /* Possible PG kcqe status: SUCCESS, OFFLOADED_PG, or CTX_ALLOC_FAIL */
3927 if (kcqe->status == L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL) {
3928 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3929 cnic_cm_upcall(cp, csk,
3930 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
3934 csk->pg_cid = kcqe->pg_cid;
3935 set_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
3936 cnic_cm_conn_req(csk);
3942 static void cnic_process_fcoe_term_conn(struct cnic_dev *dev, struct kcqe *kcqe)
3944 struct cnic_local *cp = dev->cnic_priv;
3945 struct fcoe_kcqe *fc_kcqe = (struct fcoe_kcqe *) kcqe;
3946 u32 l5_cid = fc_kcqe->fcoe_conn_id + BNX2X_FCOE_L5_CID_BASE;
3947 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3949 ctx->timestamp = jiffies;
3951 wake_up(&ctx->waitq);
3954 static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
3956 struct cnic_local *cp = dev->cnic_priv;
3957 struct l4_kcq *l4kcqe = (struct l4_kcq *) kcqe;
3958 u8 opcode = l4kcqe->op_code;
3960 struct cnic_sock *csk;
3962 if (opcode == FCOE_RAMROD_CMD_ID_TERMINATE_CONN) {
3963 cnic_process_fcoe_term_conn(dev, kcqe);
3966 if (opcode == L4_KCQE_OPCODE_VALUE_OFFLOAD_PG ||
3967 opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
3968 cnic_cm_process_offld_pg(dev, l4kcqe);
3972 l5_cid = l4kcqe->conn_id;
3974 l5_cid = l4kcqe->cid;
3975 if (l5_cid >= MAX_CM_SK_TBL_SZ)
3978 csk = &cp->csk_tbl[l5_cid];
3981 if (!cnic_in_use(csk)) {
3987 case L5CM_RAMROD_CMD_ID_TCP_CONNECT:
3988 if (l4kcqe->status != 0) {
3989 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3990 cnic_cm_upcall(cp, csk,
3991 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
3994 case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE:
3995 if (l4kcqe->status == 0)
3996 set_bit(SK_F_OFFLD_COMPLETE, &csk->flags);
3997 else if (l4kcqe->status ==
3998 L4_KCQE_COMPLETION_STATUS_PARITY_ERROR)
3999 set_bit(SK_F_HW_ERR, &csk->flags);
4001 smp_mb__before_clear_bit();
4002 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
4003 cnic_cm_upcall(cp, csk, opcode);
4006 case L5CM_RAMROD_CMD_ID_CLOSE:
4007 if (l4kcqe->status != 0) {
4008 netdev_warn(dev->netdev, "RAMROD CLOSE compl with "
4009 "status 0x%x\n", l4kcqe->status);
4010 opcode = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
4015 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
4016 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
4017 case L4_KCQE_OPCODE_VALUE_RESET_COMP:
4018 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
4019 case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
4020 if (l4kcqe->status == L4_KCQE_COMPLETION_STATUS_PARITY_ERROR)
4021 set_bit(SK_F_HW_ERR, &csk->flags);
4023 cp->close_conn(csk, opcode);
4026 case L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED:
4027 /* after we already sent CLOSE_REQ */
4028 if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags) &&
4029 !test_bit(SK_F_OFFLD_COMPLETE, &csk->flags) &&
4030 csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)
4031 cp->close_conn(csk, L4_KCQE_OPCODE_VALUE_RESET_COMP);
4033 cnic_cm_upcall(cp, csk, opcode);
4039 static void cnic_cm_indicate_kcqe(void *data, struct kcqe *kcqe[], u32 num)
4041 struct cnic_dev *dev = data;
4044 for (i = 0; i < num; i++)
4045 cnic_cm_process_kcqe(dev, kcqe[i]);
4048 static struct cnic_ulp_ops cm_ulp_ops = {
4049 .indicate_kcqes = cnic_cm_indicate_kcqe,
4052 static void cnic_cm_free_mem(struct cnic_dev *dev)
4054 struct cnic_local *cp = dev->cnic_priv;
4058 cnic_free_id_tbl(&cp->csk_port_tbl);
4061 static int cnic_cm_alloc_mem(struct cnic_dev *dev)
4063 struct cnic_local *cp = dev->cnic_priv;
4066 cp->csk_tbl = kzalloc(sizeof(struct cnic_sock) * MAX_CM_SK_TBL_SZ,
4071 port_id = random32();
4072 port_id %= CNIC_LOCAL_PORT_RANGE;
4073 if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE,
4074 CNIC_LOCAL_PORT_MIN, port_id)) {
4075 cnic_cm_free_mem(dev);
4081 static int cnic_ready_to_close(struct cnic_sock *csk, u32 opcode)
4083 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
4084 /* Unsolicited RESET_COMP or RESET_RECEIVED */
4085 opcode = L4_KCQE_OPCODE_VALUE_RESET_RECEIVED;
4086 csk->state = opcode;
4089 /* 1. If event opcode matches the expected event in csk->state
4090 * 2. If the expected event is CLOSE_COMP or RESET_COMP, we accept any
4092 * 3. If the expected event is 0, meaning the connection was never
4093 * never established, we accept the opcode from cm_abort.
4095 if (opcode == csk->state || csk->state == 0 ||
4096 csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP ||
4097 csk->state == L4_KCQE_OPCODE_VALUE_RESET_COMP) {
4098 if (!test_and_set_bit(SK_F_CLOSING, &csk->flags)) {
4099 if (csk->state == 0)
4100 csk->state = opcode;
4107 static void cnic_close_bnx2_conn(struct cnic_sock *csk, u32 opcode)
4109 struct cnic_dev *dev = csk->dev;
4110 struct cnic_local *cp = dev->cnic_priv;
4112 if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED) {
4113 cnic_cm_upcall(cp, csk, opcode);
4117 clear_bit(SK_F_CONNECT_START, &csk->flags);
4118 cnic_close_conn(csk);
4119 csk->state = opcode;
4120 cnic_cm_upcall(cp, csk, opcode);
4123 static void cnic_cm_stop_bnx2_hw(struct cnic_dev *dev)
4127 static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev)
4132 cnic_ctx_wr(dev, 45, 0, seed);
4136 static void cnic_close_bnx2x_conn(struct cnic_sock *csk, u32 opcode)
4138 struct cnic_dev *dev = csk->dev;
4139 struct cnic_local *cp = dev->cnic_priv;
4140 struct cnic_context *ctx = &cp->ctx_tbl[csk->l5_cid];
4141 union l5cm_specific_data l5_data;
4143 int close_complete = 0;
4146 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
4147 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
4148 case L4_KCQE_OPCODE_VALUE_RESET_COMP:
4149 if (cnic_ready_to_close(csk, opcode)) {
4150 if (test_bit(SK_F_HW_ERR, &csk->flags))
4152 else if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
4153 cmd = L5CM_RAMROD_CMD_ID_SEARCHER_DELETE;
4158 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
4159 cmd = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD;
4161 case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
4166 memset(&l5_data, 0, sizeof(l5_data));
4168 cnic_submit_kwqe_16(dev, cmd, csk->cid, ISCSI_CONNECTION_TYPE,
4170 } else if (close_complete) {
4171 ctx->timestamp = jiffies;
4172 cnic_close_conn(csk);
4173 cnic_cm_upcall(cp, csk, csk->state);
4177 static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev)
4179 struct cnic_local *cp = dev->cnic_priv;
4184 if (!netif_running(dev->netdev))
4187 cnic_bnx2x_delete_wait(dev, 0);
4189 cancel_delayed_work(&cp->delete_task);
4190 flush_workqueue(cnic_wq);
4192 if (atomic_read(&cp->iscsi_conn) != 0)
4193 netdev_warn(dev->netdev, "%d iSCSI connections not destroyed\n",
4194 atomic_read(&cp->iscsi_conn));
4197 static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev)
4199 struct cnic_local *cp = dev->cnic_priv;
4200 u32 pfid = cp->pfid;
4201 u32 port = CNIC_PORT(cp);
4203 cnic_init_bnx2x_mac(dev);
4204 cnic_bnx2x_set_tcp_timestamp(dev, 1);
4206 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
4207 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfid), 0);
4209 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
4210 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(port), 1);
4211 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
4212 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(port),
4215 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
4216 XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfid), DEF_TTL);
4217 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
4218 XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfid), DEF_TOS);
4219 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
4220 XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfid), 2);
4221 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
4222 XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfid), DEF_SWS_TIMER);
4224 CNIC_WR(dev, BAR_TSTRORM_INTMEM + TSTORM_TCP_MAX_CWND_OFFSET(pfid),
4229 static void cnic_delete_task(struct work_struct *work)
4231 struct cnic_local *cp;
4232 struct cnic_dev *dev;
4234 int need_resched = 0;
4236 cp = container_of(work, struct cnic_local, delete_task.work);
4239 if (test_and_clear_bit(CNIC_LCL_FL_STOP_ISCSI, &cp->cnic_local_flags)) {
4240 struct drv_ctl_info info;
4242 cnic_ulp_stop_one(cp, CNIC_ULP_ISCSI);
4244 info.cmd = DRV_CTL_ISCSI_STOPPED_CMD;
4245 cp->ethdev->drv_ctl(dev->netdev, &info);
4248 for (i = 0; i < cp->max_cid_space; i++) {
4249 struct cnic_context *ctx = &cp->ctx_tbl[i];
4252 if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags) ||
4253 !test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
4256 if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) {
4261 if (!test_and_clear_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
4264 err = cnic_bnx2x_destroy_ramrod(dev, i);
4266 cnic_free_bnx2x_conn_resc(dev, i);
4268 if (ctx->ulp_proto_id == CNIC_ULP_ISCSI)
4269 atomic_dec(&cp->iscsi_conn);
4271 clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
4276 queue_delayed_work(cnic_wq, &cp->delete_task,
4277 msecs_to_jiffies(10));
4281 static int cnic_cm_open(struct cnic_dev *dev)
4283 struct cnic_local *cp = dev->cnic_priv;
4286 err = cnic_cm_alloc_mem(dev);
4290 err = cp->start_cm(dev);
4295 INIT_DELAYED_WORK(&cp->delete_task, cnic_delete_task);
4297 dev->cm_create = cnic_cm_create;
4298 dev->cm_destroy = cnic_cm_destroy;
4299 dev->cm_connect = cnic_cm_connect;
4300 dev->cm_abort = cnic_cm_abort;
4301 dev->cm_close = cnic_cm_close;
4302 dev->cm_select_dev = cnic_cm_select_dev;
4304 cp->ulp_handle[CNIC_ULP_L4] = dev;
4305 rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], &cm_ulp_ops);
4309 cnic_cm_free_mem(dev);
4313 static int cnic_cm_shutdown(struct cnic_dev *dev)
4315 struct cnic_local *cp = dev->cnic_priv;
4321 for (i = 0; i < MAX_CM_SK_TBL_SZ; i++) {
4322 struct cnic_sock *csk = &cp->csk_tbl[i];
4324 clear_bit(SK_F_INUSE, &csk->flags);
4325 cnic_cm_cleanup(csk);
4327 cnic_cm_free_mem(dev);
4332 static void cnic_init_context(struct cnic_dev *dev, u32 cid)
4337 cid_addr = GET_CID_ADDR(cid);
4339 for (i = 0; i < CTX_SIZE; i += 4)
4340 cnic_ctx_wr(dev, cid_addr, i, 0);
4343 static int cnic_setup_5709_context(struct cnic_dev *dev, int valid)
4345 struct cnic_local *cp = dev->cnic_priv;
4347 u32 valid_bit = valid ? BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID : 0;
4349 if (CHIP_NUM(cp) != CHIP_NUM_5709)
4352 for (i = 0; i < cp->ctx_blks; i++) {
4354 u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk;
4357 memset(cp->ctx_arr[i].ctx, 0, BCM_PAGE_SIZE);
4359 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0,
4360 (cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit);
4361 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA1,
4362 (u64) cp->ctx_arr[i].mapping >> 32);
4363 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL, idx |
4364 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
4365 for (j = 0; j < 10; j++) {
4367 val = CNIC_RD(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL);
4368 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
4372 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
4380 static void cnic_free_irq(struct cnic_dev *dev)
4382 struct cnic_local *cp = dev->cnic_priv;
4383 struct cnic_eth_dev *ethdev = cp->ethdev;
4385 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4386 cp->disable_int_sync(dev);
4387 tasklet_kill(&cp->cnic_irq_task);
4388 free_irq(ethdev->irq_arr[0].vector, dev);
4392 static int cnic_request_irq(struct cnic_dev *dev)
4394 struct cnic_local *cp = dev->cnic_priv;
4395 struct cnic_eth_dev *ethdev = cp->ethdev;
4398 err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, "cnic", dev);
4400 tasklet_disable(&cp->cnic_irq_task);
4405 static int cnic_init_bnx2_irq(struct cnic_dev *dev)
4407 struct cnic_local *cp = dev->cnic_priv;
4408 struct cnic_eth_dev *ethdev = cp->ethdev;
4410 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4412 int sblk_num = cp->status_blk_num;
4413 u32 base = ((sblk_num - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4414 BNX2_HC_SB_CONFIG_1;
4416 CNIC_WR(dev, base, BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4418 CNIC_WR(dev, base + BNX2_HC_COMP_PROD_TRIP_OFF, (2 << 16) | 8);
4419 CNIC_WR(dev, base + BNX2_HC_COM_TICKS_OFF, (64 << 16) | 220);
4420 CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220);
4422 cp->last_status_idx = cp->status_blk.bnx2->status_idx;
4423 tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2_msix,
4424 (unsigned long) dev);
4425 err = cnic_request_irq(dev);
4429 while (cp->status_blk.bnx2->status_completion_producer_index &&
4431 CNIC_WR(dev, BNX2_HC_COALESCE_NOW,
4432 1 << (11 + sblk_num));
4437 if (cp->status_blk.bnx2->status_completion_producer_index) {
4443 struct status_block *sblk = cp->status_blk.gen;
4444 u32 hc_cmd = CNIC_RD(dev, BNX2_HC_COMMAND);
4447 while (sblk->status_completion_producer_index && i < 10) {
4448 CNIC_WR(dev, BNX2_HC_COMMAND,
4449 hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4454 if (sblk->status_completion_producer_index)
4461 netdev_err(dev->netdev, "KCQ index not resetting to 0\n");
4465 static void cnic_enable_bnx2_int(struct cnic_dev *dev)
4467 struct cnic_local *cp = dev->cnic_priv;
4468 struct cnic_eth_dev *ethdev = cp->ethdev;
4470 if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
4473 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
4474 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
4477 static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev)
4479 struct cnic_local *cp = dev->cnic_priv;
4480 struct cnic_eth_dev *ethdev = cp->ethdev;
4482 if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
4485 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
4486 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4487 CNIC_RD(dev, BNX2_PCICFG_INT_ACK_CMD);
4488 synchronize_irq(ethdev->irq_arr[0].vector);
4491 static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
4493 struct cnic_local *cp = dev->cnic_priv;
4494 struct cnic_eth_dev *ethdev = cp->ethdev;
4495 struct cnic_uio_dev *udev = cp->udev;
4496 u32 cid_addr, tx_cid, sb_id;
4497 u32 val, offset0, offset1, offset2, offset3;
4500 dma_addr_t buf_map, ring_map = udev->l2_ring_map;
4501 struct status_block *s_blk = cp->status_blk.gen;
4503 sb_id = cp->status_blk_num;
4505 cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2;
4506 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4507 struct status_block_msix *sblk = cp->status_blk.bnx2;
4509 tx_cid = TX_TSS_CID + sb_id - 1;
4510 CNIC_WR(dev, BNX2_TSCH_TSS_CFG, (sb_id << 24) |
4512 cp->tx_cons_ptr = &sblk->status_tx_quick_consumer_index;
4514 cp->tx_cons = *cp->tx_cons_ptr;
4516 cid_addr = GET_CID_ADDR(tx_cid);
4517 if (CHIP_NUM(cp) == CHIP_NUM_5709) {
4518 u32 cid_addr2 = GET_CID_ADDR(tx_cid + 4) + 0x40;
4520 for (i = 0; i < PHY_CTX_SIZE; i += 4)
4521 cnic_ctx_wr(dev, cid_addr2, i, 0);
4523 offset0 = BNX2_L2CTX_TYPE_XI;
4524 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4525 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4526 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4528 cnic_init_context(dev, tx_cid);
4529 cnic_init_context(dev, tx_cid + 1);
4531 offset0 = BNX2_L2CTX_TYPE;
4532 offset1 = BNX2_L2CTX_CMD_TYPE;
4533 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4534 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4536 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4537 cnic_ctx_wr(dev, cid_addr, offset0, val);
4539 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4540 cnic_ctx_wr(dev, cid_addr, offset1, val);
4542 txbd = udev->l2_ring;
4544 buf_map = udev->l2_buf_map;
4545 for (i = 0; i < MAX_TX_DESC_CNT; i++, txbd++) {
4546 txbd->tx_bd_haddr_hi = (u64) buf_map >> 32;
4547 txbd->tx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
4549 val = (u64) ring_map >> 32;
4550 cnic_ctx_wr(dev, cid_addr, offset2, val);
4551 txbd->tx_bd_haddr_hi = val;
4553 val = (u64) ring_map & 0xffffffff;
4554 cnic_ctx_wr(dev, cid_addr, offset3, val);
4555 txbd->tx_bd_haddr_lo = val;
4558 static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
4560 struct cnic_local *cp = dev->cnic_priv;
4561 struct cnic_eth_dev *ethdev = cp->ethdev;
4562 struct cnic_uio_dev *udev = cp->udev;
4563 u32 cid_addr, sb_id, val, coal_reg, coal_val;
4566 struct status_block *s_blk = cp->status_blk.gen;
4567 dma_addr_t ring_map = udev->l2_ring_map;
4569 sb_id = cp->status_blk_num;
4570 cnic_init_context(dev, 2);
4571 cp->rx_cons_ptr = &s_blk->status_rx_quick_consumer_index2;
4572 coal_reg = BNX2_HC_COMMAND;
4573 coal_val = CNIC_RD(dev, coal_reg);
4574 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4575 struct status_block_msix *sblk = cp->status_blk.bnx2;
4577 cp->rx_cons_ptr = &sblk->status_rx_quick_consumer_index;
4578 coal_reg = BNX2_HC_COALESCE_NOW;
4579 coal_val = 1 << (11 + sb_id);
4582 while (!(*cp->rx_cons_ptr != 0) && i < 10) {
4583 CNIC_WR(dev, coal_reg, coal_val);
4588 cp->rx_cons = *cp->rx_cons_ptr;
4590 cid_addr = GET_CID_ADDR(2);
4591 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE |
4592 BNX2_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8);
4593 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_CTX_TYPE, val);
4596 val = 2 << BNX2_L2CTX_L2_STATUSB_NUM_SHIFT;
4598 val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id);
4599 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val);
4601 rxbd = udev->l2_ring + BCM_PAGE_SIZE;
4602 for (i = 0; i < MAX_RX_DESC_CNT; i++, rxbd++) {
4604 int n = (i % cp->l2_rx_ring_size) + 1;
4606 buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size);
4607 rxbd->rx_bd_len = cp->l2_single_buf_size;
4608 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4609 rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32;
4610 rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
4612 val = (u64) (ring_map + BCM_PAGE_SIZE) >> 32;
4613 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4614 rxbd->rx_bd_haddr_hi = val;
4616 val = (u64) (ring_map + BCM_PAGE_SIZE) & 0xffffffff;
4617 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4618 rxbd->rx_bd_haddr_lo = val;
4620 val = cnic_reg_rd_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD);
4621 cnic_reg_wr_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD, val | (1 << 2));
4624 static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *dev)
4626 struct kwqe *wqes[1], l2kwqe;
4628 memset(&l2kwqe, 0, sizeof(l2kwqe));
4630 l2kwqe.kwqe_op_flag = (L2_LAYER_CODE << KWQE_LAYER_SHIFT) |
4631 (L2_KWQE_OPCODE_VALUE_FLUSH <<
4632 KWQE_OPCODE_SHIFT) | 2;
4633 dev->submit_kwqes(dev, wqes, 1);
4636 static void cnic_set_bnx2_mac(struct cnic_dev *dev)
4638 struct cnic_local *cp = dev->cnic_priv;
4641 val = cp->func << 2;
4643 cp->shmem_base = cnic_reg_rd_ind(dev, BNX2_SHM_HDR_ADDR_0 + val);
4645 val = cnic_reg_rd_ind(dev, cp->shmem_base +
4646 BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER);
4647 dev->mac_addr[0] = (u8) (val >> 8);
4648 dev->mac_addr[1] = (u8) val;
4650 CNIC_WR(dev, BNX2_EMAC_MAC_MATCH4, val);
4652 val = cnic_reg_rd_ind(dev, cp->shmem_base +
4653 BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER);
4654 dev->mac_addr[2] = (u8) (val >> 24);
4655 dev->mac_addr[3] = (u8) (val >> 16);
4656 dev->mac_addr[4] = (u8) (val >> 8);
4657 dev->mac_addr[5] = (u8) val;
4659 CNIC_WR(dev, BNX2_EMAC_MAC_MATCH5, val);
4661 val = 4 | BNX2_RPM_SORT_USER2_BC_EN;
4662 if (CHIP_NUM(cp) != CHIP_NUM_5709)
4663 val |= BNX2_RPM_SORT_USER2_PROM_VLAN;
4665 CNIC_WR(dev, BNX2_RPM_SORT_USER2, 0x0);
4666 CNIC_WR(dev, BNX2_RPM_SORT_USER2, val);
4667 CNIC_WR(dev, BNX2_RPM_SORT_USER2, val | BNX2_RPM_SORT_USER2_ENA);
4670 static int cnic_start_bnx2_hw(struct cnic_dev *dev)
4672 struct cnic_local *cp = dev->cnic_priv;
4673 struct cnic_eth_dev *ethdev = cp->ethdev;
4674 struct status_block *sblk = cp->status_blk.gen;
4675 u32 val, kcq_cid_addr, kwq_cid_addr;
4678 cnic_set_bnx2_mac(dev);
4680 val = CNIC_RD(dev, BNX2_MQ_CONFIG);
4681 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4682 if (BCM_PAGE_BITS > 12)
4683 val |= (12 - 8) << 4;
4685 val |= (BCM_PAGE_BITS - 8) << 4;
4687 CNIC_WR(dev, BNX2_MQ_CONFIG, val);
4689 CNIC_WR(dev, BNX2_HC_COMP_PROD_TRIP, (2 << 16) | 8);
4690 CNIC_WR(dev, BNX2_HC_COM_TICKS, (64 << 16) | 220);
4691 CNIC_WR(dev, BNX2_HC_CMD_TICKS, (64 << 16) | 220);
4693 err = cnic_setup_5709_context(dev, 1);
4697 cnic_init_context(dev, KWQ_CID);
4698 cnic_init_context(dev, KCQ_CID);
4700 kwq_cid_addr = GET_CID_ADDR(KWQ_CID);
4701 cp->kwq_io_addr = MB_GET_CID_ADDR(KWQ_CID) + L5_KRNLQ_HOST_QIDX;
4703 cp->max_kwq_idx = MAX_KWQ_IDX;
4704 cp->kwq_prod_idx = 0;
4705 cp->kwq_con_idx = 0;
4706 set_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
4708 if (CHIP_NUM(cp) == CHIP_NUM_5706 || CHIP_NUM(cp) == CHIP_NUM_5708)
4709 cp->kwq_con_idx_ptr = &sblk->status_rx_quick_consumer_index15;
4711 cp->kwq_con_idx_ptr = &sblk->status_cmd_consumer_index;
4713 /* Initialize the kernel work queue context. */
4714 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
4715 (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
4716 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_TYPE, val);
4718 val = (BCM_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16;
4719 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
4721 val = ((BCM_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT;
4722 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
4724 val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32);
4725 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
4727 val = (u32) cp->kwq_info.pgtbl_map;
4728 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
4730 kcq_cid_addr = GET_CID_ADDR(KCQ_CID);
4731 cp->kcq1.io_addr = MB_GET_CID_ADDR(KCQ_CID) + L5_KRNLQ_HOST_QIDX;
4733 cp->kcq1.sw_prod_idx = 0;
4734 cp->kcq1.hw_prod_idx_ptr =
4735 &sblk->status_completion_producer_index;
4737 cp->kcq1.status_idx_ptr = &sblk->status_idx;
4739 /* Initialize the kernel complete queue context. */
4740 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
4741 (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
4742 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_TYPE, val);
4744 val = (BCM_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16;
4745 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
4747 val = ((BCM_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT;
4748 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
4750 val = (u32) ((u64) cp->kcq1.dma.pgtbl_map >> 32);
4751 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
4753 val = (u32) cp->kcq1.dma.pgtbl_map;
4754 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
4757 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4758 struct status_block_msix *msblk = cp->status_blk.bnx2;
4759 u32 sb_id = cp->status_blk_num;
4760 u32 sb = BNX2_L2CTX_L5_STATUSB_NUM(sb_id);
4762 cp->kcq1.hw_prod_idx_ptr =
4763 &msblk->status_completion_producer_index;
4764 cp->kcq1.status_idx_ptr = &msblk->status_idx;
4765 cp->kwq_con_idx_ptr = &msblk->status_cmd_consumer_index;
4766 cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT;
4767 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
4768 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
4771 /* Enable Commnad Scheduler notification when we write to the
4772 * host producer index of the kernel contexts. */
4773 CNIC_WR(dev, BNX2_MQ_KNL_CMD_MASK1, 2);
4775 /* Enable Command Scheduler notification when we write to either
4776 * the Send Queue or Receive Queue producer indexes of the kernel
4777 * bypass contexts. */
4778 CNIC_WR(dev, BNX2_MQ_KNL_BYP_CMD_MASK1, 7);
4779 CNIC_WR(dev, BNX2_MQ_KNL_BYP_WRITE_MASK1, 7);
4781 /* Notify COM when the driver post an application buffer. */
4782 CNIC_WR(dev, BNX2_MQ_KNL_RX_V2P_MASK2, 0x2000);
4784 /* Set the CP and COM doorbells. These two processors polls the
4785 * doorbell for a non zero value before running. This must be done
4786 * after setting up the kernel queue contexts. */
4787 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 1);
4788 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 1);
4790 cnic_init_bnx2_tx_ring(dev);
4791 cnic_init_bnx2_rx_ring(dev);
4793 err = cnic_init_bnx2_irq(dev);
4795 netdev_err(dev->netdev, "cnic_init_irq failed\n");
4796 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
4797 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
4804 static void cnic_setup_bnx2x_context(struct cnic_dev *dev)
4806 struct cnic_local *cp = dev->cnic_priv;
4807 struct cnic_eth_dev *ethdev = cp->ethdev;
4808 u32 start_offset = ethdev->ctx_tbl_offset;
4811 for (i = 0; i < cp->ctx_blks; i++) {
4812 struct cnic_ctx *ctx = &cp->ctx_arr[i];
4813 dma_addr_t map = ctx->mapping;
4815 if (cp->ctx_align) {
4816 unsigned long mask = cp->ctx_align - 1;
4818 map = (map + mask) & ~mask;
4821 cnic_ctx_tbl_wr(dev, start_offset + i, map);
4825 static int cnic_init_bnx2x_irq(struct cnic_dev *dev)
4827 struct cnic_local *cp = dev->cnic_priv;
4828 struct cnic_eth_dev *ethdev = cp->ethdev;
4831 tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2x_bh,
4832 (unsigned long) dev);
4833 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
4834 err = cnic_request_irq(dev);
4839 static inline void cnic_storm_memset_hc_disable(struct cnic_dev *dev,
4840 u16 sb_id, u8 sb_index,
4844 u32 addr = BAR_CSTRORM_INTMEM +
4845 CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
4846 offsetof(struct hc_status_block_data_e1x, index_data) +
4847 sizeof(struct hc_index_data)*sb_index +
4848 offsetof(struct hc_index_data, flags);
4849 u16 flags = CNIC_RD16(dev, addr);
4851 flags &= ~HC_INDEX_DATA_HC_ENABLED;
4852 flags |= (((~disable) << HC_INDEX_DATA_HC_ENABLED_SHIFT) &
4853 HC_INDEX_DATA_HC_ENABLED);
4854 CNIC_WR16(dev, addr, flags);
4857 static void cnic_enable_bnx2x_int(struct cnic_dev *dev)
4859 struct cnic_local *cp = dev->cnic_priv;
4860 u8 sb_id = cp->status_blk_num;
4862 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
4863 CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
4864 offsetof(struct hc_status_block_data_e1x, index_data) +
4865 sizeof(struct hc_index_data)*HC_INDEX_ISCSI_EQ_CONS +
4866 offsetof(struct hc_index_data, timeout), 64 / 4);
4867 cnic_storm_memset_hc_disable(dev, sb_id, HC_INDEX_ISCSI_EQ_CONS, 0);
4870 static void cnic_disable_bnx2x_int_sync(struct cnic_dev *dev)
4874 static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
4875 struct client_init_ramrod_data *data)
4877 struct cnic_local *cp = dev->cnic_priv;
4878 struct cnic_uio_dev *udev = cp->udev;
4879 union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) udev->l2_ring;
4880 dma_addr_t buf_map, ring_map = udev->l2_ring_map;
4881 struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
4883 u32 cli = cp->ethdev->iscsi_l2_client_id;
4886 memset(txbd, 0, BCM_PAGE_SIZE);
4888 buf_map = udev->l2_buf_map;
4889 for (i = 0; i < MAX_TX_DESC_CNT; i += 3, txbd += 3) {
4890 struct eth_tx_start_bd *start_bd = &txbd->start_bd;
4891 struct eth_tx_bd *reg_bd = &((txbd + 2)->reg_bd);
4893 start_bd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
4894 start_bd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
4895 reg_bd->addr_hi = start_bd->addr_hi;
4896 reg_bd->addr_lo = start_bd->addr_lo + 0x10;
4897 start_bd->nbytes = cpu_to_le16(0x10);
4898 start_bd->nbd = cpu_to_le16(3);
4899 start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
4900 start_bd->general_data = (UNICAST_ADDRESS <<
4901 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
4902 start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
4906 val = (u64) ring_map >> 32;
4907 txbd->next_bd.addr_hi = cpu_to_le32(val);
4909 data->tx.tx_bd_page_base.hi = cpu_to_le32(val);
4911 val = (u64) ring_map & 0xffffffff;
4912 txbd->next_bd.addr_lo = cpu_to_le32(val);
4914 data->tx.tx_bd_page_base.lo = cpu_to_le32(val);
4916 /* Other ramrod params */
4917 data->tx.tx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_CQ_CONS;
4918 data->tx.tx_status_block_id = BNX2X_DEF_SB_ID;
4920 /* reset xstorm per client statistics */
4921 if (cli < MAX_STAT_COUNTER_ID) {
4922 data->general.statistics_zero_flg = 1;
4923 data->general.statistics_en_flg = 1;
4924 data->general.statistics_counter_id = cli;
4928 &sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_CQ_CONS];
4931 static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
4932 struct client_init_ramrod_data *data)
4934 struct cnic_local *cp = dev->cnic_priv;
4935 struct cnic_uio_dev *udev = cp->udev;
4936 struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring +
4938 struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *)
4939 (udev->l2_ring + (2 * BCM_PAGE_SIZE));
4940 struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
4942 u32 cli = cp->ethdev->iscsi_l2_client_id;
4943 int cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli);
4945 dma_addr_t ring_map = udev->l2_ring_map;
4948 data->general.client_id = cli;
4949 data->general.activate_flg = 1;
4950 data->general.sp_client_id = cli;
4951 data->general.mtu = cpu_to_le16(cp->l2_single_buf_size - 14);
4952 data->general.func_id = cp->pfid;
4954 for (i = 0; i < BNX2X_MAX_RX_DESC_CNT; i++, rxbd++) {
4956 int n = (i % cp->l2_rx_ring_size) + 1;
4958 buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size);
4959 rxbd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
4960 rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
4963 val = (u64) (ring_map + BCM_PAGE_SIZE) >> 32;
4964 rxbd->addr_hi = cpu_to_le32(val);
4965 data->rx.bd_page_base.hi = cpu_to_le32(val);
4967 val = (u64) (ring_map + BCM_PAGE_SIZE) & 0xffffffff;
4968 rxbd->addr_lo = cpu_to_le32(val);
4969 data->rx.bd_page_base.lo = cpu_to_le32(val);
4971 rxcqe += BNX2X_MAX_RCQ_DESC_CNT;
4972 val = (u64) (ring_map + (2 * BCM_PAGE_SIZE)) >> 32;
4973 rxcqe->addr_hi = cpu_to_le32(val);
4974 data->rx.cqe_page_base.hi = cpu_to_le32(val);
4976 val = (u64) (ring_map + (2 * BCM_PAGE_SIZE)) & 0xffffffff;
4977 rxcqe->addr_lo = cpu_to_le32(val);
4978 data->rx.cqe_page_base.lo = cpu_to_le32(val);
4980 /* Other ramrod params */
4981 data->rx.client_qzone_id = cl_qzone_id;
4982 data->rx.rx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS;
4983 data->rx.status_block_id = BNX2X_DEF_SB_ID;
4985 data->rx.cache_line_alignment_log_size = L1_CACHE_SHIFT;
4987 data->rx.max_bytes_on_bd = cpu_to_le16(cp->l2_single_buf_size);
4988 data->rx.outer_vlan_removal_enable_flg = 1;
4989 data->rx.silent_vlan_removal_flg = 1;
4990 data->rx.silent_vlan_value = 0;
4991 data->rx.silent_vlan_mask = 0xffff;
4994 &sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS];
4995 cp->rx_cons = *cp->rx_cons_ptr;
4998 static void cnic_init_bnx2x_kcq(struct cnic_dev *dev)
5000 struct cnic_local *cp = dev->cnic_priv;
5001 u32 pfid = cp->pfid;
5003 cp->kcq1.io_addr = BAR_CSTRORM_INTMEM +
5004 CSTORM_ISCSI_EQ_PROD_OFFSET(pfid, 0);
5005 cp->kcq1.sw_prod_idx = 0;
5007 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
5008 struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
5010 cp->kcq1.hw_prod_idx_ptr =
5011 &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
5012 cp->kcq1.status_idx_ptr =
5013 &sb->sb.running_index[SM_RX_ID];
5015 struct host_hc_status_block_e1x *sb = cp->status_blk.gen;
5017 cp->kcq1.hw_prod_idx_ptr =
5018 &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
5019 cp->kcq1.status_idx_ptr =
5020 &sb->sb.running_index[SM_RX_ID];
5023 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
5024 struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
5026 cp->kcq2.io_addr = BAR_USTRORM_INTMEM +
5027 USTORM_FCOE_EQ_PROD_OFFSET(pfid);
5028 cp->kcq2.sw_prod_idx = 0;
5029 cp->kcq2.hw_prod_idx_ptr =
5030 &sb->sb.index_values[HC_INDEX_FCOE_EQ_CONS];
5031 cp->kcq2.status_idx_ptr =
5032 &sb->sb.running_index[SM_RX_ID];
5036 static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
5038 struct cnic_local *cp = dev->cnic_priv;
5039 struct cnic_eth_dev *ethdev = cp->ethdev;
5040 int func = CNIC_FUNC(cp), ret;
5043 dev->stats_addr = ethdev->addr_drv_info_to_mcp;
5044 cp->port_mode = CHIP_PORT_MODE_NONE;
5046 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
5049 pci_read_config_dword(dev->pcidev, PCICFG_ME_REGISTER, &val);
5050 cp->func = (u8) ((val & ME_REG_ABS_PF_NUM) >>
5051 ME_REG_ABS_PF_NUM_SHIFT);
5052 func = CNIC_FUNC(cp);
5054 val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN_OVWR);
5056 val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN);
5058 val = (val >> 1) & 1;
5061 cp->port_mode = CHIP_4_PORT_MODE;
5062 cp->pfid = func >> 1;
5064 cp->port_mode = CHIP_2_PORT_MODE;
5065 cp->pfid = func & 0x6;
5072 ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ,
5073 cp->iscsi_start_cid, 0);
5078 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
5079 ret = cnic_init_id_tbl(&cp->fcoe_cid_tbl, dev->max_fcoe_conn,
5080 cp->fcoe_start_cid, 0);
5086 cp->bnx2x_igu_sb_id = ethdev->irq_arr[0].status_blk_num2;
5088 cnic_init_bnx2x_kcq(dev);
5091 CNIC_WR16(dev, cp->kcq1.io_addr, MAX_KCQ_IDX);
5092 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5093 CSTORM_ISCSI_EQ_CONS_OFFSET(pfid, 0), 0);
5094 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5095 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0),
5096 cp->kcq1.dma.pg_map_arr[1] & 0xffffffff);
5097 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5098 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0) + 4,
5099 (u64) cp->kcq1.dma.pg_map_arr[1] >> 32);
5100 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5101 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0),
5102 cp->kcq1.dma.pg_map_arr[0] & 0xffffffff);
5103 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5104 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0) + 4,
5105 (u64) cp->kcq1.dma.pg_map_arr[0] >> 32);
5106 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
5107 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfid, 0), 1);
5108 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
5109 CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfid, 0), cp->status_blk_num);
5110 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
5111 CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfid, 0),
5112 HC_INDEX_ISCSI_EQ_CONS);
5114 CNIC_WR(dev, BAR_USTRORM_INTMEM +
5115 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid),
5116 cp->gbl_buf_info.pg_map_arr[0] & 0xffffffff);
5117 CNIC_WR(dev, BAR_USTRORM_INTMEM +
5118 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid) + 4,
5119 (u64) cp->gbl_buf_info.pg_map_arr[0] >> 32);
5121 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
5122 TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfid), DEF_RCV_BUF);
5124 cnic_setup_bnx2x_context(dev);
5126 ret = cnic_init_bnx2x_irq(dev);
5133 static void cnic_init_rings(struct cnic_dev *dev)
5135 struct cnic_local *cp = dev->cnic_priv;
5136 struct cnic_uio_dev *udev = cp->udev;
5138 if (test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
5141 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
5142 cnic_init_bnx2_tx_ring(dev);
5143 cnic_init_bnx2_rx_ring(dev);
5144 set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
5145 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
5146 u32 cli = cp->ethdev->iscsi_l2_client_id;
5147 u32 cid = cp->ethdev->iscsi_l2_cid;
5149 struct client_init_ramrod_data *data;
5150 union l5cm_specific_data l5_data;
5151 struct ustorm_eth_rx_producers rx_prods = {0};
5152 u32 off, i, *cid_ptr;
5154 rx_prods.bd_prod = 0;
5155 rx_prods.cqe_prod = BNX2X_MAX_RCQ_DESC_CNT;
5158 cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli);
5160 off = BAR_USTRORM_INTMEM +
5161 (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) ?
5162 USTORM_RX_PRODS_E2_OFFSET(cl_qzone_id) :
5163 USTORM_RX_PRODS_E1X_OFFSET(CNIC_PORT(cp), cli));
5165 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++)
5166 CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]);
5168 set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
5170 data = udev->l2_buf;
5171 cid_ptr = udev->l2_buf + 12;
5173 memset(data, 0, sizeof(*data));
5175 cnic_init_bnx2x_tx_ring(dev, data);
5176 cnic_init_bnx2x_rx_ring(dev, data);
5178 l5_data.phy_address.lo = udev->l2_buf_map & 0xffffffff;
5179 l5_data.phy_address.hi = (u64) udev->l2_buf_map >> 32;
5181 set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
5183 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CLIENT_SETUP,
5184 cid, ETH_CONNECTION_TYPE, &l5_data);
5187 while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
5191 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
5192 netdev_err(dev->netdev,
5193 "iSCSI CLIENT_SETUP did not complete\n");
5194 cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
5195 cnic_ring_ctl(dev, cid, cli, 1);
5200 static void cnic_shutdown_rings(struct cnic_dev *dev)
5202 struct cnic_local *cp = dev->cnic_priv;
5203 struct cnic_uio_dev *udev = cp->udev;
5206 if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
5209 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
5210 cnic_shutdown_bnx2_rx_ring(dev);
5211 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
5212 u32 cli = cp->ethdev->iscsi_l2_client_id;
5213 u32 cid = cp->ethdev->iscsi_l2_cid;
5214 union l5cm_specific_data l5_data;
5217 cnic_ring_ctl(dev, cid, cli, 0);
5219 set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
5221 l5_data.phy_address.lo = cli;
5222 l5_data.phy_address.hi = 0;
5223 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_HALT,
5224 cid, ETH_CONNECTION_TYPE, &l5_data);
5226 while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
5230 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
5231 netdev_err(dev->netdev,
5232 "iSCSI CLIENT_HALT did not complete\n");
5233 cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
5235 memset(&l5_data, 0, sizeof(l5_data));
5236 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
5237 cid, NONE_CONNECTION_TYPE, &l5_data);
5240 clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
5241 rx_ring = udev->l2_ring + BCM_PAGE_SIZE;
5242 memset(rx_ring, 0, BCM_PAGE_SIZE);
5245 static int cnic_register_netdev(struct cnic_dev *dev)
5247 struct cnic_local *cp = dev->cnic_priv;
5248 struct cnic_eth_dev *ethdev = cp->ethdev;
5254 if (ethdev->drv_state & CNIC_DRV_STATE_REGD)
5257 err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev);
5259 netdev_err(dev->netdev, "register_cnic failed\n");
5264 static void cnic_unregister_netdev(struct cnic_dev *dev)
5266 struct cnic_local *cp = dev->cnic_priv;
5267 struct cnic_eth_dev *ethdev = cp->ethdev;
5272 ethdev->drv_unregister_cnic(dev->netdev);
5275 static int cnic_start_hw(struct cnic_dev *dev)
5277 struct cnic_local *cp = dev->cnic_priv;
5278 struct cnic_eth_dev *ethdev = cp->ethdev;
5281 if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
5284 dev->regview = ethdev->io_base;
5285 pci_dev_get(dev->pcidev);
5286 cp->func = PCI_FUNC(dev->pcidev->devfn);
5287 cp->status_blk.gen = ethdev->irq_arr[0].status_blk;
5288 cp->status_blk_num = ethdev->irq_arr[0].status_blk_num;
5290 err = cp->alloc_resc(dev);
5292 netdev_err(dev->netdev, "allocate resource failure\n");
5296 err = cp->start_hw(dev);
5300 err = cnic_cm_open(dev);
5304 set_bit(CNIC_F_CNIC_UP, &dev->flags);
5306 cp->enable_int(dev);
5312 pci_dev_put(dev->pcidev);
5316 static void cnic_stop_bnx2_hw(struct cnic_dev *dev)
5318 cnic_disable_bnx2_int_sync(dev);
5320 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
5321 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
5323 cnic_init_context(dev, KWQ_CID);
5324 cnic_init_context(dev, KCQ_CID);
5326 cnic_setup_5709_context(dev, 0);
5329 cnic_free_resc(dev);
5333 static void cnic_stop_bnx2x_hw(struct cnic_dev *dev)
5335 struct cnic_local *cp = dev->cnic_priv;
5338 *cp->kcq1.hw_prod_idx_ptr = 0;
5339 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5340 CSTORM_ISCSI_EQ_CONS_OFFSET(cp->pfid, 0), 0);
5341 CNIC_WR16(dev, cp->kcq1.io_addr, 0);
5342 cnic_free_resc(dev);
5345 static void cnic_stop_hw(struct cnic_dev *dev)
5347 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
5348 struct cnic_local *cp = dev->cnic_priv;
5351 /* Need to wait for the ring shutdown event to complete
5352 * before clearing the CNIC_UP flag.
5354 while (cp->udev->uio_dev != -1 && i < 15) {
5358 cnic_shutdown_rings(dev);
5360 clear_bit(CNIC_F_CNIC_UP, &dev->flags);
5361 RCU_INIT_POINTER(cp->ulp_ops[CNIC_ULP_L4], NULL);
5363 cnic_cm_shutdown(dev);
5365 pci_dev_put(dev->pcidev);
5369 static void cnic_free_dev(struct cnic_dev *dev)
5373 while ((atomic_read(&dev->ref_count) != 0) && i < 10) {
5377 if (atomic_read(&dev->ref_count) != 0)
5378 netdev_err(dev->netdev, "Failed waiting for ref count to go to zero\n");
5380 netdev_info(dev->netdev, "Removed CNIC device\n");
5381 dev_put(dev->netdev);
5385 static struct cnic_dev *cnic_alloc_dev(struct net_device *dev,
5386 struct pci_dev *pdev)
5388 struct cnic_dev *cdev;
5389 struct cnic_local *cp;
5392 alloc_size = sizeof(struct cnic_dev) + sizeof(struct cnic_local);
5394 cdev = kzalloc(alloc_size , GFP_KERNEL);
5396 netdev_err(dev, "allocate dev struct failure\n");
5401 cdev->cnic_priv = (char *)cdev + sizeof(struct cnic_dev);
5402 cdev->register_device = cnic_register_device;
5403 cdev->unregister_device = cnic_unregister_device;
5404 cdev->iscsi_nl_msg_recv = cnic_iscsi_nl_msg_recv;
5406 cp = cdev->cnic_priv;
5408 cp->l2_single_buf_size = 0x400;
5409 cp->l2_rx_ring_size = 3;
5411 spin_lock_init(&cp->cnic_ulp_lock);
5413 netdev_info(dev, "Added CNIC device\n");
5418 static struct cnic_dev *init_bnx2_cnic(struct net_device *dev)
5420 struct pci_dev *pdev;
5421 struct cnic_dev *cdev;
5422 struct cnic_local *cp;
5423 struct cnic_eth_dev *ethdev = NULL;
5424 struct cnic_eth_dev *(*probe)(struct net_device *) = NULL;
5426 probe = symbol_get(bnx2_cnic_probe);
5428 ethdev = (*probe)(dev);
5429 symbol_put(bnx2_cnic_probe);
5434 pdev = ethdev->pdev;
5440 if ((pdev->device == PCI_DEVICE_ID_NX2_5709 ||
5441 pdev->device == PCI_DEVICE_ID_NX2_5709S) &&
5442 (pdev->revision < 0x10)) {
5448 cdev = cnic_alloc_dev(dev, pdev);
5452 set_bit(CNIC_F_BNX2_CLASS, &cdev->flags);
5453 cdev->submit_kwqes = cnic_submit_bnx2_kwqes;
5455 cp = cdev->cnic_priv;
5456 cp->ethdev = ethdev;
5457 cdev->pcidev = pdev;
5458 cp->chip_id = ethdev->chip_id;
5460 cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
5462 cp->cnic_ops = &cnic_bnx2_ops;
5463 cp->start_hw = cnic_start_bnx2_hw;
5464 cp->stop_hw = cnic_stop_bnx2_hw;
5465 cp->setup_pgtbl = cnic_setup_page_tbl;
5466 cp->alloc_resc = cnic_alloc_bnx2_resc;
5467 cp->free_resc = cnic_free_resc;
5468 cp->start_cm = cnic_cm_init_bnx2_hw;
5469 cp->stop_cm = cnic_cm_stop_bnx2_hw;
5470 cp->enable_int = cnic_enable_bnx2_int;
5471 cp->disable_int_sync = cnic_disable_bnx2_int_sync;
5472 cp->close_conn = cnic_close_bnx2_conn;
5480 static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
5482 struct pci_dev *pdev;
5483 struct cnic_dev *cdev;
5484 struct cnic_local *cp;
5485 struct cnic_eth_dev *ethdev = NULL;
5486 struct cnic_eth_dev *(*probe)(struct net_device *) = NULL;
5488 probe = symbol_get(bnx2x_cnic_probe);
5490 ethdev = (*probe)(dev);
5491 symbol_put(bnx2x_cnic_probe);
5496 pdev = ethdev->pdev;
5501 cdev = cnic_alloc_dev(dev, pdev);
5507 set_bit(CNIC_F_BNX2X_CLASS, &cdev->flags);
5508 cdev->submit_kwqes = cnic_submit_bnx2x_kwqes;
5510 cp = cdev->cnic_priv;
5511 cp->ethdev = ethdev;
5512 cdev->pcidev = pdev;
5513 cp->chip_id = ethdev->chip_id;
5515 cdev->stats_addr = ethdev->addr_drv_info_to_mcp;
5517 if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI))
5518 cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
5519 if (CNIC_SUPPORTS_FCOE(cp))
5520 cdev->max_fcoe_conn = ethdev->max_fcoe_conn;
5522 if (cdev->max_fcoe_conn > BNX2X_FCOE_NUM_CONNECTIONS)
5523 cdev->max_fcoe_conn = BNX2X_FCOE_NUM_CONNECTIONS;
5525 memcpy(cdev->mac_addr, ethdev->iscsi_mac, 6);
5527 cp->cnic_ops = &cnic_bnx2x_ops;
5528 cp->start_hw = cnic_start_bnx2x_hw;
5529 cp->stop_hw = cnic_stop_bnx2x_hw;
5530 cp->setup_pgtbl = cnic_setup_page_tbl_le;
5531 cp->alloc_resc = cnic_alloc_bnx2x_resc;
5532 cp->free_resc = cnic_free_resc;
5533 cp->start_cm = cnic_cm_init_bnx2x_hw;
5534 cp->stop_cm = cnic_cm_stop_bnx2x_hw;
5535 cp->enable_int = cnic_enable_bnx2x_int;
5536 cp->disable_int_sync = cnic_disable_bnx2x_int_sync;
5537 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
5538 cp->ack_int = cnic_ack_bnx2x_e2_msix;
5539 cp->arm_int = cnic_arm_bnx2x_e2_msix;
5541 cp->ack_int = cnic_ack_bnx2x_msix;
5542 cp->arm_int = cnic_arm_bnx2x_msix;
5544 cp->close_conn = cnic_close_bnx2x_conn;
5548 static struct cnic_dev *is_cnic_dev(struct net_device *dev)
5550 struct ethtool_drvinfo drvinfo;
5551 struct cnic_dev *cdev = NULL;
5553 if (dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) {
5554 memset(&drvinfo, 0, sizeof(drvinfo));
5555 dev->ethtool_ops->get_drvinfo(dev, &drvinfo);
5557 if (!strcmp(drvinfo.driver, "bnx2"))
5558 cdev = init_bnx2_cnic(dev);
5559 if (!strcmp(drvinfo.driver, "bnx2x"))
5560 cdev = init_bnx2x_cnic(dev);
5562 write_lock(&cnic_dev_lock);
5563 list_add(&cdev->list, &cnic_dev_list);
5564 write_unlock(&cnic_dev_lock);
5570 static void cnic_rcv_netevent(struct cnic_local *cp, unsigned long event,
5576 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
5577 struct cnic_ulp_ops *ulp_ops;
5580 ulp_ops = rcu_dereference(cp->ulp_ops[if_type]);
5581 if (!ulp_ops || !ulp_ops->indicate_netevent)
5584 ctx = cp->ulp_handle[if_type];
5586 ulp_ops->indicate_netevent(ctx, event, vlan_id);
5591 /* netdev event handler */
5592 static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
5595 struct net_device *netdev = ptr;
5596 struct cnic_dev *dev;
5599 dev = cnic_from_netdev(netdev);
5601 if (!dev && (event == NETDEV_REGISTER || netif_running(netdev))) {
5602 /* Check for the hot-plug device */
5603 dev = is_cnic_dev(netdev);
5610 struct cnic_local *cp = dev->cnic_priv;
5614 else if (event == NETDEV_UNREGISTER)
5617 if (event == NETDEV_UP || (new_dev && netif_running(netdev))) {
5618 if (cnic_register_netdev(dev) != 0) {
5622 if (!cnic_start_hw(dev))
5623 cnic_ulp_start(dev);
5626 cnic_rcv_netevent(cp, event, 0);
5628 if (event == NETDEV_GOING_DOWN) {
5631 cnic_unregister_netdev(dev);
5632 } else if (event == NETDEV_UNREGISTER) {
5633 write_lock(&cnic_dev_lock);
5634 list_del_init(&dev->list);
5635 write_unlock(&cnic_dev_lock);
5643 struct net_device *realdev;
5646 vid = cnic_get_vlan(netdev, &realdev);
5648 dev = cnic_from_netdev(realdev);
5650 vid |= VLAN_TAG_PRESENT;
5651 cnic_rcv_netevent(dev->cnic_priv, event, vid);
5660 static struct notifier_block cnic_netdev_notifier = {
5661 .notifier_call = cnic_netdev_event
5664 static void cnic_release(void)
5666 struct cnic_dev *dev;
5667 struct cnic_uio_dev *udev;
5669 while (!list_empty(&cnic_dev_list)) {
5670 dev = list_entry(cnic_dev_list.next, struct cnic_dev, list);
5671 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
5677 cnic_unregister_netdev(dev);
5678 list_del_init(&dev->list);
5681 while (!list_empty(&cnic_udev_list)) {
5682 udev = list_entry(cnic_udev_list.next, struct cnic_uio_dev,
5684 cnic_free_uio(udev);
5688 static int __init cnic_init(void)
5692 pr_info("%s", version);
5694 rc = register_netdevice_notifier(&cnic_netdev_notifier);
5700 cnic_wq = create_singlethread_workqueue("cnic_wq");
5703 unregister_netdevice_notifier(&cnic_netdev_notifier);
5710 static void __exit cnic_exit(void)
5712 unregister_netdevice_notifier(&cnic_netdev_notifier);
5714 destroy_workqueue(cnic_wq);
5717 module_init(cnic_init);
5718 module_exit(cnic_exit);