1 /* cnic.c: Broadcom CNIC core network driver.
3 * Copyright (c) 2006-2010 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com)
10 * Modified and maintained by: Michael Chan <mchan@broadcom.com>
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/module.h>
17 #include <linux/kernel.h>
18 #include <linux/errno.h>
19 #include <linux/list.h>
20 #include <linux/slab.h>
21 #include <linux/pci.h>
22 #include <linux/init.h>
23 #include <linux/netdevice.h>
24 #include <linux/uio_driver.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/delay.h>
28 #include <linux/ethtool.h>
29 #include <linux/if_vlan.h>
30 #include <linux/prefetch.h>
31 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
36 #include <net/route.h>
38 #include <net/ip6_route.h>
39 #include <net/ip6_checksum.h>
40 #include <scsi/iscsi_if.h>
44 #include "bnx2x/bnx2x_reg.h"
45 #include "bnx2x/bnx2x_fw_defs.h"
46 #include "bnx2x/bnx2x_hsi.h"
47 #include "../scsi/bnx2i/57xx_iscsi_constants.h"
48 #include "../scsi/bnx2i/57xx_iscsi_hsi.h"
50 #include "cnic_defs.h"
52 #define DRV_MODULE_NAME "cnic"
54 static char version[] __devinitdata =
55 "Broadcom NetXtreme II CNIC Driver " DRV_MODULE_NAME " v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n";
57 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) "
58 "Chen (zongxi@broadcom.com");
59 MODULE_DESCRIPTION("Broadcom NetXtreme II CNIC Driver");
60 MODULE_LICENSE("GPL");
61 MODULE_VERSION(CNIC_MODULE_VERSION);
63 /* cnic_dev_list modifications are protected by both rtnl and cnic_dev_lock */
64 static LIST_HEAD(cnic_dev_list);
65 static LIST_HEAD(cnic_udev_list);
66 static DEFINE_RWLOCK(cnic_dev_lock);
67 static DEFINE_MUTEX(cnic_lock);
69 static struct cnic_ulp_ops __rcu *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE];
71 /* helper function, assuming cnic_lock is held */
72 static inline struct cnic_ulp_ops *cnic_ulp_tbl_prot(int type)
74 return rcu_dereference_protected(cnic_ulp_tbl[type],
75 lockdep_is_held(&cnic_lock));
78 static int cnic_service_bnx2(void *, void *);
79 static int cnic_service_bnx2x(void *, void *);
80 static int cnic_ctl(void *, struct cnic_ctl_info *);
82 static struct cnic_ops cnic_bnx2_ops = {
83 .cnic_owner = THIS_MODULE,
84 .cnic_handler = cnic_service_bnx2,
88 static struct cnic_ops cnic_bnx2x_ops = {
89 .cnic_owner = THIS_MODULE,
90 .cnic_handler = cnic_service_bnx2x,
94 static struct workqueue_struct *cnic_wq;
96 static void cnic_shutdown_rings(struct cnic_dev *);
97 static void cnic_init_rings(struct cnic_dev *);
98 static int cnic_cm_set_pg(struct cnic_sock *);
100 static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode)
102 struct cnic_uio_dev *udev = uinfo->priv;
103 struct cnic_dev *dev;
105 if (!capable(CAP_NET_ADMIN))
108 if (udev->uio_dev != -1)
114 if (!dev || !test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
119 udev->uio_dev = iminor(inode);
121 cnic_shutdown_rings(dev);
122 cnic_init_rings(dev);
128 static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode)
130 struct cnic_uio_dev *udev = uinfo->priv;
136 static inline void cnic_hold(struct cnic_dev *dev)
138 atomic_inc(&dev->ref_count);
141 static inline void cnic_put(struct cnic_dev *dev)
143 atomic_dec(&dev->ref_count);
146 static inline void csk_hold(struct cnic_sock *csk)
148 atomic_inc(&csk->ref_count);
151 static inline void csk_put(struct cnic_sock *csk)
153 atomic_dec(&csk->ref_count);
156 static struct cnic_dev *cnic_from_netdev(struct net_device *netdev)
158 struct cnic_dev *cdev;
160 read_lock(&cnic_dev_lock);
161 list_for_each_entry(cdev, &cnic_dev_list, list) {
162 if (netdev == cdev->netdev) {
164 read_unlock(&cnic_dev_lock);
168 read_unlock(&cnic_dev_lock);
172 static inline void ulp_get(struct cnic_ulp_ops *ulp_ops)
174 atomic_inc(&ulp_ops->ref_count);
177 static inline void ulp_put(struct cnic_ulp_ops *ulp_ops)
179 atomic_dec(&ulp_ops->ref_count);
182 static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val)
184 struct cnic_local *cp = dev->cnic_priv;
185 struct cnic_eth_dev *ethdev = cp->ethdev;
186 struct drv_ctl_info info;
187 struct drv_ctl_io *io = &info.data.io;
189 info.cmd = DRV_CTL_CTX_WR_CMD;
190 io->cid_addr = cid_addr;
193 ethdev->drv_ctl(dev->netdev, &info);
196 static void cnic_ctx_tbl_wr(struct cnic_dev *dev, u32 off, dma_addr_t addr)
198 struct cnic_local *cp = dev->cnic_priv;
199 struct cnic_eth_dev *ethdev = cp->ethdev;
200 struct drv_ctl_info info;
201 struct drv_ctl_io *io = &info.data.io;
203 info.cmd = DRV_CTL_CTXTBL_WR_CMD;
206 ethdev->drv_ctl(dev->netdev, &info);
209 static void cnic_ring_ctl(struct cnic_dev *dev, u32 cid, u32 cl_id, int start)
211 struct cnic_local *cp = dev->cnic_priv;
212 struct cnic_eth_dev *ethdev = cp->ethdev;
213 struct drv_ctl_info info;
214 struct drv_ctl_l2_ring *ring = &info.data.ring;
217 info.cmd = DRV_CTL_START_L2_CMD;
219 info.cmd = DRV_CTL_STOP_L2_CMD;
222 ring->client_id = cl_id;
223 ethdev->drv_ctl(dev->netdev, &info);
226 static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val)
228 struct cnic_local *cp = dev->cnic_priv;
229 struct cnic_eth_dev *ethdev = cp->ethdev;
230 struct drv_ctl_info info;
231 struct drv_ctl_io *io = &info.data.io;
233 info.cmd = DRV_CTL_IO_WR_CMD;
236 ethdev->drv_ctl(dev->netdev, &info);
239 static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off)
241 struct cnic_local *cp = dev->cnic_priv;
242 struct cnic_eth_dev *ethdev = cp->ethdev;
243 struct drv_ctl_info info;
244 struct drv_ctl_io *io = &info.data.io;
246 info.cmd = DRV_CTL_IO_RD_CMD;
248 ethdev->drv_ctl(dev->netdev, &info);
252 static int cnic_in_use(struct cnic_sock *csk)
254 return test_bit(SK_F_INUSE, &csk->flags);
257 static void cnic_spq_completion(struct cnic_dev *dev, int cmd, u32 count)
259 struct cnic_local *cp = dev->cnic_priv;
260 struct cnic_eth_dev *ethdev = cp->ethdev;
261 struct drv_ctl_info info;
264 info.data.credit.credit_count = count;
265 ethdev->drv_ctl(dev->netdev, &info);
268 static int cnic_get_l5_cid(struct cnic_local *cp, u32 cid, u32 *l5_cid)
272 for (i = 0; i < cp->max_cid_space; i++) {
273 if (cp->ctx_tbl[i].cid == cid) {
281 static int cnic_send_nlmsg(struct cnic_local *cp, u32 type,
282 struct cnic_sock *csk)
284 struct iscsi_path path_req;
287 u32 msg_type = ISCSI_KEVENT_IF_DOWN;
288 struct cnic_ulp_ops *ulp_ops;
289 struct cnic_uio_dev *udev = cp->udev;
290 int rc = 0, retry = 0;
292 if (!udev || udev->uio_dev == -1)
296 len = sizeof(path_req);
297 buf = (char *) &path_req;
298 memset(&path_req, 0, len);
300 msg_type = ISCSI_KEVENT_PATH_REQ;
301 path_req.handle = (u64) csk->l5_cid;
302 if (test_bit(SK_F_IPV6, &csk->flags)) {
303 memcpy(&path_req.dst.v6_addr, &csk->dst_ip[0],
304 sizeof(struct in6_addr));
305 path_req.ip_addr_len = 16;
307 memcpy(&path_req.dst.v4_addr, &csk->dst_ip[0],
308 sizeof(struct in_addr));
309 path_req.ip_addr_len = 4;
311 path_req.vlan_id = csk->vlan_id;
312 path_req.pmtu = csk->mtu;
318 ulp_ops = rcu_dereference(cnic_ulp_tbl[CNIC_ULP_ISCSI]);
320 rc = ulp_ops->iscsi_nl_send_msg(
321 cp->ulp_handle[CNIC_ULP_ISCSI],
324 if (rc == 0 || msg_type != ISCSI_KEVENT_PATH_REQ)
333 static void cnic_cm_upcall(struct cnic_local *, struct cnic_sock *, u8);
335 static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
341 case ISCSI_UEVENT_PATH_UPDATE: {
342 struct cnic_local *cp;
344 struct cnic_sock *csk;
345 struct iscsi_path *path_resp;
347 if (len < sizeof(*path_resp))
350 path_resp = (struct iscsi_path *) buf;
352 l5_cid = (u32) path_resp->handle;
353 if (l5_cid >= MAX_CM_SK_TBL_SZ)
357 if (!rcu_dereference(cp->ulp_ops[CNIC_ULP_L4])) {
362 csk = &cp->csk_tbl[l5_cid];
364 if (cnic_in_use(csk) &&
365 test_bit(SK_F_CONNECT_START, &csk->flags)) {
367 memcpy(csk->ha, path_resp->mac_addr, 6);
368 if (test_bit(SK_F_IPV6, &csk->flags))
369 memcpy(&csk->src_ip[0], &path_resp->src.v6_addr,
370 sizeof(struct in6_addr));
372 memcpy(&csk->src_ip[0], &path_resp->src.v4_addr,
373 sizeof(struct in_addr));
375 if (is_valid_ether_addr(csk->ha)) {
377 } else if (!test_bit(SK_F_OFFLD_SCHED, &csk->flags) &&
378 !test_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
380 cnic_cm_upcall(cp, csk,
381 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
382 clear_bit(SK_F_CONNECT_START, &csk->flags);
394 static int cnic_offld_prep(struct cnic_sock *csk)
396 if (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
399 if (!test_bit(SK_F_CONNECT_START, &csk->flags)) {
400 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
407 static int cnic_close_prep(struct cnic_sock *csk)
409 clear_bit(SK_F_CONNECT_START, &csk->flags);
410 smp_mb__after_clear_bit();
412 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
413 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
421 static int cnic_abort_prep(struct cnic_sock *csk)
423 clear_bit(SK_F_CONNECT_START, &csk->flags);
424 smp_mb__after_clear_bit();
426 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
429 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
430 csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP;
437 int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
439 struct cnic_dev *dev;
441 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
442 pr_err("%s: Bad type %d\n", __func__, ulp_type);
445 mutex_lock(&cnic_lock);
446 if (cnic_ulp_tbl_prot(ulp_type)) {
447 pr_err("%s: Type %d has already been registered\n",
449 mutex_unlock(&cnic_lock);
453 read_lock(&cnic_dev_lock);
454 list_for_each_entry(dev, &cnic_dev_list, list) {
455 struct cnic_local *cp = dev->cnic_priv;
457 clear_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]);
459 read_unlock(&cnic_dev_lock);
461 atomic_set(&ulp_ops->ref_count, 0);
462 rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops);
463 mutex_unlock(&cnic_lock);
465 /* Prevent race conditions with netdev_event */
467 list_for_each_entry(dev, &cnic_dev_list, list) {
468 struct cnic_local *cp = dev->cnic_priv;
470 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]))
471 ulp_ops->cnic_init(dev);
478 int cnic_unregister_driver(int ulp_type)
480 struct cnic_dev *dev;
481 struct cnic_ulp_ops *ulp_ops;
484 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
485 pr_err("%s: Bad type %d\n", __func__, ulp_type);
488 mutex_lock(&cnic_lock);
489 ulp_ops = cnic_ulp_tbl_prot(ulp_type);
491 pr_err("%s: Type %d has not been registered\n",
495 read_lock(&cnic_dev_lock);
496 list_for_each_entry(dev, &cnic_dev_list, list) {
497 struct cnic_local *cp = dev->cnic_priv;
499 if (rcu_dereference(cp->ulp_ops[ulp_type])) {
500 pr_err("%s: Type %d still has devices registered\n",
502 read_unlock(&cnic_dev_lock);
506 read_unlock(&cnic_dev_lock);
508 rcu_assign_pointer(cnic_ulp_tbl[ulp_type], NULL);
510 mutex_unlock(&cnic_lock);
512 while ((atomic_read(&ulp_ops->ref_count) != 0) && (i < 20)) {
517 if (atomic_read(&ulp_ops->ref_count) != 0)
518 netdev_warn(dev->netdev, "Failed waiting for ref count to go to zero\n");
522 mutex_unlock(&cnic_lock);
526 static int cnic_start_hw(struct cnic_dev *);
527 static void cnic_stop_hw(struct cnic_dev *);
529 static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
532 struct cnic_local *cp = dev->cnic_priv;
533 struct cnic_ulp_ops *ulp_ops;
535 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
536 pr_err("%s: Bad type %d\n", __func__, ulp_type);
539 mutex_lock(&cnic_lock);
540 if (cnic_ulp_tbl_prot(ulp_type) == NULL) {
541 pr_err("%s: Driver with type %d has not been registered\n",
543 mutex_unlock(&cnic_lock);
546 if (rcu_dereference(cp->ulp_ops[ulp_type])) {
547 pr_err("%s: Type %d has already been registered to this device\n",
549 mutex_unlock(&cnic_lock);
553 clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]);
554 cp->ulp_handle[ulp_type] = ulp_ctx;
555 ulp_ops = cnic_ulp_tbl_prot(ulp_type);
556 rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops);
559 if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
560 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[ulp_type]))
561 ulp_ops->cnic_start(cp->ulp_handle[ulp_type]);
563 mutex_unlock(&cnic_lock);
568 EXPORT_SYMBOL(cnic_register_driver);
570 static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
572 struct cnic_local *cp = dev->cnic_priv;
575 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
576 pr_err("%s: Bad type %d\n", __func__, ulp_type);
579 mutex_lock(&cnic_lock);
580 if (rcu_dereference(cp->ulp_ops[ulp_type])) {
581 rcu_assign_pointer(cp->ulp_ops[ulp_type], NULL);
584 pr_err("%s: device not registered to this ulp type %d\n",
586 mutex_unlock(&cnic_lock);
589 mutex_unlock(&cnic_lock);
591 if (ulp_type == CNIC_ULP_ISCSI)
592 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
596 while (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]) &&
601 if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]))
602 netdev_warn(dev->netdev, "Failed waiting for ULP up call to complete\n");
606 EXPORT_SYMBOL(cnic_unregister_driver);
608 static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id,
611 id_tbl->start = start_id;
614 spin_lock_init(&id_tbl->lock);
615 id_tbl->table = kzalloc(DIV_ROUND_UP(size, 32) * 4, GFP_KERNEL);
622 static void cnic_free_id_tbl(struct cnic_id_tbl *id_tbl)
624 kfree(id_tbl->table);
625 id_tbl->table = NULL;
628 static int cnic_alloc_id(struct cnic_id_tbl *id_tbl, u32 id)
633 if (id >= id_tbl->max)
636 spin_lock(&id_tbl->lock);
637 if (!test_bit(id, id_tbl->table)) {
638 set_bit(id, id_tbl->table);
641 spin_unlock(&id_tbl->lock);
645 /* Returns -1 if not successful */
646 static u32 cnic_alloc_new_id(struct cnic_id_tbl *id_tbl)
650 spin_lock(&id_tbl->lock);
651 id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next);
652 if (id >= id_tbl->max) {
654 if (id_tbl->next != 0) {
655 id = find_first_zero_bit(id_tbl->table, id_tbl->next);
656 if (id >= id_tbl->next)
661 if (id < id_tbl->max) {
662 set_bit(id, id_tbl->table);
663 id_tbl->next = (id + 1) & (id_tbl->max - 1);
667 spin_unlock(&id_tbl->lock);
672 static void cnic_free_id(struct cnic_id_tbl *id_tbl, u32 id)
678 if (id >= id_tbl->max)
681 clear_bit(id, id_tbl->table);
684 static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma)
691 for (i = 0; i < dma->num_pages; i++) {
692 if (dma->pg_arr[i]) {
693 dma_free_coherent(&dev->pcidev->dev, BCM_PAGE_SIZE,
694 dma->pg_arr[i], dma->pg_map_arr[i]);
695 dma->pg_arr[i] = NULL;
699 dma_free_coherent(&dev->pcidev->dev, dma->pgtbl_size,
700 dma->pgtbl, dma->pgtbl_map);
708 static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma)
711 __le32 *page_table = (__le32 *) dma->pgtbl;
713 for (i = 0; i < dma->num_pages; i++) {
714 /* Each entry needs to be in big endian format. */
715 *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
717 *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
722 static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma)
725 __le32 *page_table = (__le32 *) dma->pgtbl;
727 for (i = 0; i < dma->num_pages; i++) {
728 /* Each entry needs to be in little endian format. */
729 *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
731 *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
736 static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma,
737 int pages, int use_pg_tbl)
740 struct cnic_local *cp = dev->cnic_priv;
742 size = pages * (sizeof(void *) + sizeof(dma_addr_t));
743 dma->pg_arr = kzalloc(size, GFP_ATOMIC);
744 if (dma->pg_arr == NULL)
747 dma->pg_map_arr = (dma_addr_t *) (dma->pg_arr + pages);
748 dma->num_pages = pages;
750 for (i = 0; i < pages; i++) {
751 dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev,
755 if (dma->pg_arr[i] == NULL)
761 dma->pgtbl_size = ((pages * 8) + BCM_PAGE_SIZE - 1) &
762 ~(BCM_PAGE_SIZE - 1);
763 dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size,
764 &dma->pgtbl_map, GFP_ATOMIC);
765 if (dma->pgtbl == NULL)
768 cp->setup_pgtbl(dev, dma);
773 cnic_free_dma(dev, dma);
777 static void cnic_free_context(struct cnic_dev *dev)
779 struct cnic_local *cp = dev->cnic_priv;
782 for (i = 0; i < cp->ctx_blks; i++) {
783 if (cp->ctx_arr[i].ctx) {
784 dma_free_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
786 cp->ctx_arr[i].mapping);
787 cp->ctx_arr[i].ctx = NULL;
792 static void __cnic_free_uio(struct cnic_uio_dev *udev)
794 uio_unregister_device(&udev->cnic_uinfo);
797 dma_free_coherent(&udev->pdev->dev, udev->l2_buf_size,
798 udev->l2_buf, udev->l2_buf_map);
803 dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size,
804 udev->l2_ring, udev->l2_ring_map);
805 udev->l2_ring = NULL;
808 pci_dev_put(udev->pdev);
812 static void cnic_free_uio(struct cnic_uio_dev *udev)
817 write_lock(&cnic_dev_lock);
818 list_del_init(&udev->list);
819 write_unlock(&cnic_dev_lock);
820 __cnic_free_uio(udev);
823 static void cnic_free_resc(struct cnic_dev *dev)
825 struct cnic_local *cp = dev->cnic_priv;
826 struct cnic_uio_dev *udev = cp->udev;
833 cnic_free_context(dev);
838 cnic_free_dma(dev, &cp->gbl_buf_info);
839 cnic_free_dma(dev, &cp->conn_buf_info);
840 cnic_free_dma(dev, &cp->kwq_info);
841 cnic_free_dma(dev, &cp->kwq_16_data_info);
842 cnic_free_dma(dev, &cp->kcq2.dma);
843 cnic_free_dma(dev, &cp->kcq1.dma);
844 kfree(cp->iscsi_tbl);
845 cp->iscsi_tbl = NULL;
849 cnic_free_id_tbl(&cp->fcoe_cid_tbl);
850 cnic_free_id_tbl(&cp->cid_tbl);
853 static int cnic_alloc_context(struct cnic_dev *dev)
855 struct cnic_local *cp = dev->cnic_priv;
857 if (CHIP_NUM(cp) == CHIP_NUM_5709) {
860 cp->ctx_blk_size = BCM_PAGE_SIZE;
861 cp->cids_per_blk = BCM_PAGE_SIZE / 128;
862 arr_size = BNX2_MAX_CID / cp->cids_per_blk *
863 sizeof(struct cnic_ctx);
864 cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL);
865 if (cp->ctx_arr == NULL)
869 for (i = 0; i < 2; i++) {
870 u32 j, reg, off, lo, hi;
873 off = BNX2_PG_CTX_MAP;
875 off = BNX2_ISCSI_CTX_MAP;
877 reg = cnic_reg_rd_ind(dev, off);
880 for (j = lo; j < hi; j += cp->cids_per_blk, k++)
881 cp->ctx_arr[k].cid = j;
885 if (cp->ctx_blks >= (BNX2_MAX_CID / cp->cids_per_blk)) {
890 for (i = 0; i < cp->ctx_blks; i++) {
892 dma_alloc_coherent(&dev->pcidev->dev,
894 &cp->ctx_arr[i].mapping,
896 if (cp->ctx_arr[i].ctx == NULL)
903 static int cnic_alloc_kcq(struct cnic_dev *dev, struct kcq_info *info)
905 int err, i, is_bnx2 = 0;
908 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags))
911 err = cnic_alloc_dma(dev, &info->dma, KCQ_PAGE_CNT, is_bnx2);
915 kcq = (struct kcqe **) info->dma.pg_arr;
921 for (i = 0; i < KCQ_PAGE_CNT; i++) {
922 struct bnx2x_bd_chain_next *next =
923 (struct bnx2x_bd_chain_next *) &kcq[i][MAX_KCQE_CNT];
926 if (j >= KCQ_PAGE_CNT)
928 next->addr_hi = (u64) info->dma.pg_map_arr[j] >> 32;
929 next->addr_lo = info->dma.pg_map_arr[j] & 0xffffffff;
934 static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
936 struct cnic_local *cp = dev->cnic_priv;
937 struct cnic_uio_dev *udev;
939 read_lock(&cnic_dev_lock);
940 list_for_each_entry(udev, &cnic_udev_list, list) {
941 if (udev->pdev == dev->pcidev) {
944 read_unlock(&cnic_dev_lock);
948 read_unlock(&cnic_dev_lock);
950 udev = kzalloc(sizeof(struct cnic_uio_dev), GFP_ATOMIC);
957 udev->pdev = dev->pcidev;
958 udev->l2_ring_size = pages * BCM_PAGE_SIZE;
959 udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size,
961 GFP_KERNEL | __GFP_COMP);
965 udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
966 udev->l2_buf_size = PAGE_ALIGN(udev->l2_buf_size);
967 udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size,
969 GFP_KERNEL | __GFP_COMP);
973 write_lock(&cnic_dev_lock);
974 list_add(&udev->list, &cnic_udev_list);
975 write_unlock(&cnic_dev_lock);
977 pci_dev_get(udev->pdev);
983 dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size,
984 udev->l2_ring, udev->l2_ring_map);
990 static int cnic_init_uio(struct cnic_dev *dev)
992 struct cnic_local *cp = dev->cnic_priv;
993 struct cnic_uio_dev *udev = cp->udev;
994 struct uio_info *uinfo;
1000 uinfo = &udev->cnic_uinfo;
1002 uinfo->mem[0].addr = dev->netdev->base_addr;
1003 uinfo->mem[0].internal_addr = dev->regview;
1004 uinfo->mem[0].size = dev->netdev->mem_end - dev->netdev->mem_start;
1005 uinfo->mem[0].memtype = UIO_MEM_PHYS;
1007 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
1008 uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen &
1010 if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
1011 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9;
1013 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE;
1015 uinfo->name = "bnx2_cnic";
1016 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
1017 uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk &
1019 uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk);
1021 uinfo->name = "bnx2x_cnic";
1024 uinfo->mem[1].memtype = UIO_MEM_LOGICAL;
1026 uinfo->mem[2].addr = (unsigned long) udev->l2_ring;
1027 uinfo->mem[2].size = udev->l2_ring_size;
1028 uinfo->mem[2].memtype = UIO_MEM_LOGICAL;
1030 uinfo->mem[3].addr = (unsigned long) udev->l2_buf;
1031 uinfo->mem[3].size = udev->l2_buf_size;
1032 uinfo->mem[3].memtype = UIO_MEM_LOGICAL;
1034 uinfo->version = CNIC_MODULE_VERSION;
1035 uinfo->irq = UIO_IRQ_CUSTOM;
1037 uinfo->open = cnic_uio_open;
1038 uinfo->release = cnic_uio_close;
1040 if (udev->uio_dev == -1) {
1044 ret = uio_register_device(&udev->pdev->dev, uinfo);
1047 cnic_init_rings(dev);
1053 static int cnic_alloc_bnx2_resc(struct cnic_dev *dev)
1055 struct cnic_local *cp = dev->cnic_priv;
1058 ret = cnic_alloc_dma(dev, &cp->kwq_info, KWQ_PAGE_CNT, 1);
1061 cp->kwq = (struct kwqe **) cp->kwq_info.pg_arr;
1063 ret = cnic_alloc_kcq(dev, &cp->kcq1);
1067 ret = cnic_alloc_context(dev);
1071 ret = cnic_alloc_uio_rings(dev, 2);
1075 ret = cnic_init_uio(dev);
1082 cnic_free_resc(dev);
1086 static int cnic_alloc_bnx2x_context(struct cnic_dev *dev)
1088 struct cnic_local *cp = dev->cnic_priv;
1089 int ctx_blk_size = cp->ethdev->ctx_blk_size;
1090 int total_mem, blks, i;
1092 total_mem = BNX2X_CONTEXT_MEM_SIZE * cp->max_cid_space;
1093 blks = total_mem / ctx_blk_size;
1094 if (total_mem % ctx_blk_size)
1097 if (blks > cp->ethdev->ctx_tbl_len)
1100 cp->ctx_arr = kcalloc(blks, sizeof(struct cnic_ctx), GFP_KERNEL);
1101 if (cp->ctx_arr == NULL)
1104 cp->ctx_blks = blks;
1105 cp->ctx_blk_size = ctx_blk_size;
1106 if (!BNX2X_CHIP_IS_57710(cp->chip_id))
1109 cp->ctx_align = ctx_blk_size;
1111 cp->cids_per_blk = ctx_blk_size / BNX2X_CONTEXT_MEM_SIZE;
1113 for (i = 0; i < blks; i++) {
1114 cp->ctx_arr[i].ctx =
1115 dma_alloc_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
1116 &cp->ctx_arr[i].mapping,
1118 if (cp->ctx_arr[i].ctx == NULL)
1121 if (cp->ctx_align && cp->ctx_blk_size == ctx_blk_size) {
1122 if (cp->ctx_arr[i].mapping & (cp->ctx_align - 1)) {
1123 cnic_free_context(dev);
1124 cp->ctx_blk_size += cp->ctx_align;
1133 static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
1135 struct cnic_local *cp = dev->cnic_priv;
1136 struct cnic_eth_dev *ethdev = cp->ethdev;
1137 u32 start_cid = ethdev->starting_cid;
1138 int i, j, n, ret, pages;
1139 struct cnic_dma *kwq_16_dma = &cp->kwq_16_data_info;
1141 cp->iro_arr = ethdev->iro_arr;
1143 cp->max_cid_space = MAX_ISCSI_TBL_SZ + BNX2X_FCOE_NUM_CONNECTIONS;
1144 cp->iscsi_start_cid = start_cid;
1145 cp->fcoe_start_cid = start_cid + MAX_ISCSI_TBL_SZ;
1147 if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
1148 cp->max_cid_space += BNX2X_FCOE_NUM_CONNECTIONS;
1149 cp->fcoe_init_cid = ethdev->fcoe_init_cid;
1150 if (!cp->fcoe_init_cid)
1151 cp->fcoe_init_cid = 0x10;
1154 if (start_cid < BNX2X_ISCSI_START_CID) {
1155 u32 delta = BNX2X_ISCSI_START_CID - start_cid;
1157 cp->iscsi_start_cid = BNX2X_ISCSI_START_CID;
1158 cp->fcoe_start_cid += delta;
1159 cp->max_cid_space += delta;
1162 cp->iscsi_tbl = kzalloc(sizeof(struct cnic_iscsi) * MAX_ISCSI_TBL_SZ,
1167 cp->ctx_tbl = kzalloc(sizeof(struct cnic_context) *
1168 cp->max_cid_space, GFP_KERNEL);
1172 for (i = 0; i < MAX_ISCSI_TBL_SZ; i++) {
1173 cp->ctx_tbl[i].proto.iscsi = &cp->iscsi_tbl[i];
1174 cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_ISCSI;
1177 for (i = MAX_ISCSI_TBL_SZ; i < cp->max_cid_space; i++)
1178 cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_FCOE;
1180 pages = PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) /
1183 ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0);
1187 n = PAGE_SIZE / CNIC_KWQ16_DATA_SIZE;
1188 for (i = 0, j = 0; i < cp->max_cid_space; i++) {
1189 long off = CNIC_KWQ16_DATA_SIZE * (i % n);
1191 cp->ctx_tbl[i].kwqe_data = kwq_16_dma->pg_arr[j] + off;
1192 cp->ctx_tbl[i].kwqe_data_mapping = kwq_16_dma->pg_map_arr[j] +
1195 if ((i % n) == (n - 1))
1199 ret = cnic_alloc_kcq(dev, &cp->kcq1);
1203 if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
1204 ret = cnic_alloc_kcq(dev, &cp->kcq2);
1209 pages = PAGE_ALIGN(BNX2X_ISCSI_NUM_CONNECTIONS *
1210 BNX2X_ISCSI_CONN_BUF_SIZE) / PAGE_SIZE;
1211 ret = cnic_alloc_dma(dev, &cp->conn_buf_info, pages, 1);
1215 pages = PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / PAGE_SIZE;
1216 ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0);
1220 ret = cnic_alloc_bnx2x_context(dev);
1224 cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk;
1226 cp->l2_rx_ring_size = 15;
1228 ret = cnic_alloc_uio_rings(dev, 4);
1232 ret = cnic_init_uio(dev);
1239 cnic_free_resc(dev);
1243 static inline u32 cnic_kwq_avail(struct cnic_local *cp)
1245 return cp->max_kwq_idx -
1246 ((cp->kwq_prod_idx - cp->kwq_con_idx) & cp->max_kwq_idx);
1249 static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
1252 struct cnic_local *cp = dev->cnic_priv;
1253 struct kwqe *prod_qe;
1254 u16 prod, sw_prod, i;
1256 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
1257 return -EAGAIN; /* bnx2 is down */
1259 spin_lock_bh(&cp->cnic_ulp_lock);
1260 if (num_wqes > cnic_kwq_avail(cp) &&
1261 !test_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags)) {
1262 spin_unlock_bh(&cp->cnic_ulp_lock);
1266 clear_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
1268 prod = cp->kwq_prod_idx;
1269 sw_prod = prod & MAX_KWQ_IDX;
1270 for (i = 0; i < num_wqes; i++) {
1271 prod_qe = &cp->kwq[KWQ_PG(sw_prod)][KWQ_IDX(sw_prod)];
1272 memcpy(prod_qe, wqes[i], sizeof(struct kwqe));
1274 sw_prod = prod & MAX_KWQ_IDX;
1276 cp->kwq_prod_idx = prod;
1278 CNIC_WR16(dev, cp->kwq_io_addr, cp->kwq_prod_idx);
1280 spin_unlock_bh(&cp->cnic_ulp_lock);
1284 static void *cnic_get_kwqe_16_data(struct cnic_local *cp, u32 l5_cid,
1285 union l5cm_specific_data *l5_data)
1287 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1290 map = ctx->kwqe_data_mapping;
1291 l5_data->phy_address.lo = (u64) map & 0xffffffff;
1292 l5_data->phy_address.hi = (u64) map >> 32;
1293 return ctx->kwqe_data;
1296 static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid,
1297 u32 type, union l5cm_specific_data *l5_data)
1299 struct cnic_local *cp = dev->cnic_priv;
1300 struct l5cm_spe kwqe;
1301 struct kwqe_16 *kwq[1];
1305 kwqe.hdr.conn_and_cmd_data =
1306 cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) |
1307 BNX2X_HW_CID(cp, cid)));
1309 type_16 = (type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE;
1310 type_16 |= (cp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) &
1311 SPE_HDR_FUNCTION_ID;
1313 kwqe.hdr.type = cpu_to_le16(type_16);
1314 kwqe.hdr.reserved1 = 0;
1315 kwqe.data.phy_address.lo = cpu_to_le32(l5_data->phy_address.lo);
1316 kwqe.data.phy_address.hi = cpu_to_le32(l5_data->phy_address.hi);
1318 kwq[0] = (struct kwqe_16 *) &kwqe;
1320 spin_lock_bh(&cp->cnic_ulp_lock);
1321 ret = cp->ethdev->drv_submit_kwqes_16(dev->netdev, kwq, 1);
1322 spin_unlock_bh(&cp->cnic_ulp_lock);
1330 static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type,
1331 struct kcqe *cqes[], u32 num_cqes)
1333 struct cnic_local *cp = dev->cnic_priv;
1334 struct cnic_ulp_ops *ulp_ops;
1337 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
1338 if (likely(ulp_ops)) {
1339 ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
1345 static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
1347 struct cnic_local *cp = dev->cnic_priv;
1348 struct iscsi_kwqe_init1 *req1 = (struct iscsi_kwqe_init1 *) kwqe;
1350 u32 pfid = cp->pfid;
1352 cp->num_iscsi_tasks = req1->num_tasks_per_conn;
1353 cp->num_ccells = req1->num_ccells_per_conn;
1354 cp->task_array_size = BNX2X_ISCSI_TASK_CONTEXT_SIZE *
1355 cp->num_iscsi_tasks;
1356 cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS *
1357 BNX2X_ISCSI_R2TQE_SIZE;
1358 cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE;
1359 pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE;
1360 hq_bds = pages * (PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE);
1361 cp->num_cqs = req1->num_cqs;
1363 if (!dev->max_iscsi_conn)
1366 /* init Tstorm RAM */
1367 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
1369 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1371 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1372 TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
1373 CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
1374 TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1375 req1->num_tasks_per_conn);
1377 /* init Ustorm RAM */
1378 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1379 USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfid),
1380 req1->rq_buffer_size);
1381 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1383 CNIC_WR8(dev, BAR_USTRORM_INTMEM +
1384 USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
1385 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1386 USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1387 req1->num_tasks_per_conn);
1388 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
1390 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
1392 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
1393 cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
1395 /* init Xstorm RAM */
1396 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1398 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1399 XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
1400 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
1401 XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1402 req1->num_tasks_per_conn);
1403 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
1405 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_SQ_SIZE_OFFSET(pfid),
1406 req1->num_tasks_per_conn);
1407 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
1408 cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
1410 /* init Cstorm RAM */
1411 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1413 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
1414 CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
1415 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
1416 CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1417 req1->num_tasks_per_conn);
1418 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
1420 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
1426 static int cnic_bnx2x_iscsi_init2(struct cnic_dev *dev, struct kwqe *kwqe)
1428 struct iscsi_kwqe_init2 *req2 = (struct iscsi_kwqe_init2 *) kwqe;
1429 struct cnic_local *cp = dev->cnic_priv;
1430 u32 pfid = cp->pfid;
1431 struct iscsi_kcqe kcqe;
1432 struct kcqe *cqes[1];
1434 memset(&kcqe, 0, sizeof(kcqe));
1435 if (!dev->max_iscsi_conn) {
1436 kcqe.completion_status =
1437 ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED;
1441 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
1442 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]);
1443 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
1444 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4,
1445 req2->error_bit_map[1]);
1447 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1448 USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn);
1449 CNIC_WR(dev, BAR_USTRORM_INTMEM +
1450 USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]);
1451 CNIC_WR(dev, BAR_USTRORM_INTMEM +
1452 USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4,
1453 req2->error_bit_map[1]);
1455 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
1456 CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn);
1458 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1461 kcqe.op_code = ISCSI_KCQE_OPCODE_INIT;
1462 cqes[0] = (struct kcqe *) &kcqe;
1463 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1468 static void cnic_free_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
1470 struct cnic_local *cp = dev->cnic_priv;
1471 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1473 if (ctx->ulp_proto_id == CNIC_ULP_ISCSI) {
1474 struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1476 cnic_free_dma(dev, &iscsi->hq_info);
1477 cnic_free_dma(dev, &iscsi->r2tq_info);
1478 cnic_free_dma(dev, &iscsi->task_array_info);
1479 cnic_free_id(&cp->cid_tbl, ctx->cid);
1481 cnic_free_id(&cp->fcoe_cid_tbl, ctx->cid);
1487 static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
1491 struct cnic_local *cp = dev->cnic_priv;
1492 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1493 struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1495 if (ctx->ulp_proto_id == CNIC_ULP_FCOE) {
1496 cid = cnic_alloc_new_id(&cp->fcoe_cid_tbl);
1505 cid = cnic_alloc_new_id(&cp->cid_tbl);
1512 pages = PAGE_ALIGN(cp->task_array_size) / PAGE_SIZE;
1514 ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1);
1518 pages = PAGE_ALIGN(cp->r2tq_size) / PAGE_SIZE;
1519 ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1);
1523 pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE;
1524 ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1);
1531 cnic_free_bnx2x_conn_resc(dev, l5_cid);
1535 static void *cnic_get_bnx2x_ctx(struct cnic_dev *dev, u32 cid, int init,
1536 struct regpair *ctx_addr)
1538 struct cnic_local *cp = dev->cnic_priv;
1539 struct cnic_eth_dev *ethdev = cp->ethdev;
1540 int blk = (cid - ethdev->starting_cid) / cp->cids_per_blk;
1541 int off = (cid - ethdev->starting_cid) % cp->cids_per_blk;
1542 unsigned long align_off = 0;
1546 if (cp->ctx_align) {
1547 unsigned long mask = cp->ctx_align - 1;
1549 if (cp->ctx_arr[blk].mapping & mask)
1550 align_off = cp->ctx_align -
1551 (cp->ctx_arr[blk].mapping & mask);
1553 ctx_map = cp->ctx_arr[blk].mapping + align_off +
1554 (off * BNX2X_CONTEXT_MEM_SIZE);
1555 ctx = cp->ctx_arr[blk].ctx + align_off +
1556 (off * BNX2X_CONTEXT_MEM_SIZE);
1558 memset(ctx, 0, BNX2X_CONTEXT_MEM_SIZE);
1560 ctx_addr->lo = ctx_map & 0xffffffff;
1561 ctx_addr->hi = (u64) ctx_map >> 32;
1565 static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
1568 struct cnic_local *cp = dev->cnic_priv;
1569 struct iscsi_kwqe_conn_offload1 *req1 =
1570 (struct iscsi_kwqe_conn_offload1 *) wqes[0];
1571 struct iscsi_kwqe_conn_offload2 *req2 =
1572 (struct iscsi_kwqe_conn_offload2 *) wqes[1];
1573 struct iscsi_kwqe_conn_offload3 *req3;
1574 struct cnic_context *ctx = &cp->ctx_tbl[req1->iscsi_conn_id];
1575 struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1577 u32 hw_cid = BNX2X_HW_CID(cp, cid);
1578 struct iscsi_context *ictx;
1579 struct regpair context_addr;
1580 int i, j, n = 2, n_max;
1583 if (!req2->num_additional_wqes)
1586 n_max = req2->num_additional_wqes + 2;
1588 ictx = cnic_get_bnx2x_ctx(dev, cid, 1, &context_addr);
1592 req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
1594 ictx->xstorm_ag_context.hq_prod = 1;
1596 ictx->xstorm_st_context.iscsi.first_burst_length =
1597 ISCSI_DEF_FIRST_BURST_LEN;
1598 ictx->xstorm_st_context.iscsi.max_send_pdu_length =
1599 ISCSI_DEF_MAX_RECV_SEG_LEN;
1600 ictx->xstorm_st_context.iscsi.sq_pbl_base.lo =
1601 req1->sq_page_table_addr_lo;
1602 ictx->xstorm_st_context.iscsi.sq_pbl_base.hi =
1603 req1->sq_page_table_addr_hi;
1604 ictx->xstorm_st_context.iscsi.sq_curr_pbe.lo = req2->sq_first_pte.hi;
1605 ictx->xstorm_st_context.iscsi.sq_curr_pbe.hi = req2->sq_first_pte.lo;
1606 ictx->xstorm_st_context.iscsi.hq_pbl_base.lo =
1607 iscsi->hq_info.pgtbl_map & 0xffffffff;
1608 ictx->xstorm_st_context.iscsi.hq_pbl_base.hi =
1609 (u64) iscsi->hq_info.pgtbl_map >> 32;
1610 ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.lo =
1611 iscsi->hq_info.pgtbl[0];
1612 ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.hi =
1613 iscsi->hq_info.pgtbl[1];
1614 ictx->xstorm_st_context.iscsi.r2tq_pbl_base.lo =
1615 iscsi->r2tq_info.pgtbl_map & 0xffffffff;
1616 ictx->xstorm_st_context.iscsi.r2tq_pbl_base.hi =
1617 (u64) iscsi->r2tq_info.pgtbl_map >> 32;
1618 ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.lo =
1619 iscsi->r2tq_info.pgtbl[0];
1620 ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.hi =
1621 iscsi->r2tq_info.pgtbl[1];
1622 ictx->xstorm_st_context.iscsi.task_pbl_base.lo =
1623 iscsi->task_array_info.pgtbl_map & 0xffffffff;
1624 ictx->xstorm_st_context.iscsi.task_pbl_base.hi =
1625 (u64) iscsi->task_array_info.pgtbl_map >> 32;
1626 ictx->xstorm_st_context.iscsi.task_pbl_cache_idx =
1627 BNX2X_ISCSI_PBL_NOT_CACHED;
1628 ictx->xstorm_st_context.iscsi.flags.flags |=
1629 XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA;
1630 ictx->xstorm_st_context.iscsi.flags.flags |=
1631 XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T;
1633 ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE;
1634 /* TSTORM requires the base address of RQ DB & not PTE */
1635 ictx->tstorm_st_context.iscsi.rq_db_phy_addr.lo =
1636 req2->rq_page_table_addr_lo & PAGE_MASK;
1637 ictx->tstorm_st_context.iscsi.rq_db_phy_addr.hi =
1638 req2->rq_page_table_addr_hi;
1639 ictx->tstorm_st_context.iscsi.iscsi_conn_id = req1->iscsi_conn_id;
1640 ictx->tstorm_st_context.tcp.cwnd = 0x5A8;
1641 ictx->tstorm_st_context.tcp.flags2 |=
1642 TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN;
1643 ictx->tstorm_st_context.tcp.ooo_support_mode =
1644 TCP_TSTORM_OOO_DROP_AND_PROC_ACK;
1646 ictx->timers_context.flags |= TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG;
1648 ictx->ustorm_st_context.ring.rq.pbl_base.lo =
1649 req2->rq_page_table_addr_lo;
1650 ictx->ustorm_st_context.ring.rq.pbl_base.hi =
1651 req2->rq_page_table_addr_hi;
1652 ictx->ustorm_st_context.ring.rq.curr_pbe.lo = req3->qp_first_pte[0].hi;
1653 ictx->ustorm_st_context.ring.rq.curr_pbe.hi = req3->qp_first_pte[0].lo;
1654 ictx->ustorm_st_context.ring.r2tq.pbl_base.lo =
1655 iscsi->r2tq_info.pgtbl_map & 0xffffffff;
1656 ictx->ustorm_st_context.ring.r2tq.pbl_base.hi =
1657 (u64) iscsi->r2tq_info.pgtbl_map >> 32;
1658 ictx->ustorm_st_context.ring.r2tq.curr_pbe.lo =
1659 iscsi->r2tq_info.pgtbl[0];
1660 ictx->ustorm_st_context.ring.r2tq.curr_pbe.hi =
1661 iscsi->r2tq_info.pgtbl[1];
1662 ictx->ustorm_st_context.ring.cq_pbl_base.lo =
1663 req1->cq_page_table_addr_lo;
1664 ictx->ustorm_st_context.ring.cq_pbl_base.hi =
1665 req1->cq_page_table_addr_hi;
1666 ictx->ustorm_st_context.ring.cq[0].cq_sn = ISCSI_INITIAL_SN;
1667 ictx->ustorm_st_context.ring.cq[0].curr_pbe.lo = req2->cq_first_pte.hi;
1668 ictx->ustorm_st_context.ring.cq[0].curr_pbe.hi = req2->cq_first_pte.lo;
1669 ictx->ustorm_st_context.task_pbe_cache_index =
1670 BNX2X_ISCSI_PBL_NOT_CACHED;
1671 ictx->ustorm_st_context.task_pdu_cache_index =
1672 BNX2X_ISCSI_PDU_HEADER_NOT_CACHED;
1674 for (i = 1, j = 1; i < cp->num_cqs; i++, j++) {
1678 req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
1681 ictx->ustorm_st_context.ring.cq[i].cq_sn = ISCSI_INITIAL_SN;
1682 ictx->ustorm_st_context.ring.cq[i].curr_pbe.lo =
1683 req3->qp_first_pte[j].hi;
1684 ictx->ustorm_st_context.ring.cq[i].curr_pbe.hi =
1685 req3->qp_first_pte[j].lo;
1688 ictx->ustorm_st_context.task_pbl_base.lo =
1689 iscsi->task_array_info.pgtbl_map & 0xffffffff;
1690 ictx->ustorm_st_context.task_pbl_base.hi =
1691 (u64) iscsi->task_array_info.pgtbl_map >> 32;
1692 ictx->ustorm_st_context.tce_phy_addr.lo =
1693 iscsi->task_array_info.pgtbl[0];
1694 ictx->ustorm_st_context.tce_phy_addr.hi =
1695 iscsi->task_array_info.pgtbl[1];
1696 ictx->ustorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
1697 ictx->ustorm_st_context.num_cqs = cp->num_cqs;
1698 ictx->ustorm_st_context.negotiated_rx |= ISCSI_DEF_MAX_RECV_SEG_LEN;
1699 ictx->ustorm_st_context.negotiated_rx_and_flags |=
1700 ISCSI_DEF_MAX_BURST_LEN;
1701 ictx->ustorm_st_context.negotiated_rx |=
1702 ISCSI_DEFAULT_MAX_OUTSTANDING_R2T <<
1703 USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS_SHIFT;
1705 ictx->cstorm_st_context.hq_pbl_base.lo =
1706 iscsi->hq_info.pgtbl_map & 0xffffffff;
1707 ictx->cstorm_st_context.hq_pbl_base.hi =
1708 (u64) iscsi->hq_info.pgtbl_map >> 32;
1709 ictx->cstorm_st_context.hq_curr_pbe.lo = iscsi->hq_info.pgtbl[0];
1710 ictx->cstorm_st_context.hq_curr_pbe.hi = iscsi->hq_info.pgtbl[1];
1711 ictx->cstorm_st_context.task_pbl_base.lo =
1712 iscsi->task_array_info.pgtbl_map & 0xffffffff;
1713 ictx->cstorm_st_context.task_pbl_base.hi =
1714 (u64) iscsi->task_array_info.pgtbl_map >> 32;
1715 /* CSTORM and USTORM initialization is different, CSTORM requires
1716 * CQ DB base & not PTE addr */
1717 ictx->cstorm_st_context.cq_db_base.lo =
1718 req1->cq_page_table_addr_lo & PAGE_MASK;
1719 ictx->cstorm_st_context.cq_db_base.hi = req1->cq_page_table_addr_hi;
1720 ictx->cstorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
1721 ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1;
1722 for (i = 0; i < cp->num_cqs; i++) {
1723 ictx->cstorm_st_context.cq_c_prod_sqn_arr.sqn[i] =
1725 ictx->cstorm_st_context.cq_c_sqn_2_notify_arr.sqn[i] =
1729 ictx->xstorm_ag_context.cdu_reserved =
1730 CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
1731 ISCSI_CONNECTION_TYPE);
1732 ictx->ustorm_ag_context.cdu_usage =
1733 CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG,
1734 ISCSI_CONNECTION_TYPE);
1739 static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
1742 struct iscsi_kwqe_conn_offload1 *req1;
1743 struct iscsi_kwqe_conn_offload2 *req2;
1744 struct cnic_local *cp = dev->cnic_priv;
1745 struct cnic_context *ctx;
1746 struct iscsi_kcqe kcqe;
1747 struct kcqe *cqes[1];
1756 req1 = (struct iscsi_kwqe_conn_offload1 *) wqes[0];
1757 req2 = (struct iscsi_kwqe_conn_offload2 *) wqes[1];
1758 if ((num - 2) < req2->num_additional_wqes) {
1762 *work = 2 + req2->num_additional_wqes;
1764 l5_cid = req1->iscsi_conn_id;
1765 if (l5_cid >= MAX_ISCSI_TBL_SZ)
1768 memset(&kcqe, 0, sizeof(kcqe));
1769 kcqe.op_code = ISCSI_KCQE_OPCODE_OFFLOAD_CONN;
1770 kcqe.iscsi_conn_id = l5_cid;
1771 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE;
1773 ctx = &cp->ctx_tbl[l5_cid];
1774 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) {
1775 kcqe.completion_status =
1776 ISCSI_KCQE_COMPLETION_STATUS_CID_BUSY;
1780 if (atomic_inc_return(&cp->iscsi_conn) > dev->max_iscsi_conn) {
1781 atomic_dec(&cp->iscsi_conn);
1784 ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
1786 atomic_dec(&cp->iscsi_conn);
1790 ret = cnic_setup_bnx2x_ctx(dev, wqes, num);
1792 cnic_free_bnx2x_conn_resc(dev, l5_cid);
1793 atomic_dec(&cp->iscsi_conn);
1797 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1798 kcqe.iscsi_conn_context_id = BNX2X_HW_CID(cp, cp->ctx_tbl[l5_cid].cid);
1801 cqes[0] = (struct kcqe *) &kcqe;
1802 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1807 static int cnic_bnx2x_iscsi_update(struct cnic_dev *dev, struct kwqe *kwqe)
1809 struct cnic_local *cp = dev->cnic_priv;
1810 struct iscsi_kwqe_conn_update *req =
1811 (struct iscsi_kwqe_conn_update *) kwqe;
1813 union l5cm_specific_data l5_data;
1814 u32 l5_cid, cid = BNX2X_SW_CID(req->context_id);
1817 if (cnic_get_l5_cid(cp, cid, &l5_cid) != 0)
1820 data = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
1824 memcpy(data, kwqe, sizeof(struct kwqe));
1826 ret = cnic_submit_kwqe_16(dev, ISCSI_RAMROD_CMD_ID_UPDATE_CONN,
1827 req->context_id, ISCSI_CONNECTION_TYPE, &l5_data);
1831 static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid)
1833 struct cnic_local *cp = dev->cnic_priv;
1834 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1835 union l5cm_specific_data l5_data;
1839 init_waitqueue_head(&ctx->waitq);
1841 memset(&l5_data, 0, sizeof(l5_data));
1842 hw_cid = BNX2X_HW_CID(cp, ctx->cid);
1844 ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
1845 hw_cid, NONE_CONNECTION_TYPE, &l5_data);
1848 wait_event(ctx->waitq, ctx->wait_cond);
1853 static int cnic_bnx2x_iscsi_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
1855 struct cnic_local *cp = dev->cnic_priv;
1856 struct iscsi_kwqe_conn_destroy *req =
1857 (struct iscsi_kwqe_conn_destroy *) kwqe;
1858 u32 l5_cid = req->reserved0;
1859 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1861 struct iscsi_kcqe kcqe;
1862 struct kcqe *cqes[1];
1864 if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
1865 goto skip_cfc_delete;
1867 if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) {
1868 unsigned long delta = ctx->timestamp + (2 * HZ) - jiffies;
1870 if (delta > (2 * HZ))
1873 set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags);
1874 queue_delayed_work(cnic_wq, &cp->delete_task, delta);
1878 ret = cnic_bnx2x_destroy_ramrod(dev, l5_cid);
1881 cnic_free_bnx2x_conn_resc(dev, l5_cid);
1883 atomic_dec(&cp->iscsi_conn);
1884 clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
1887 memset(&kcqe, 0, sizeof(kcqe));
1888 kcqe.op_code = ISCSI_KCQE_OPCODE_DESTROY_CONN;
1889 kcqe.iscsi_conn_id = l5_cid;
1890 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1891 kcqe.iscsi_conn_context_id = req->context_id;
1893 cqes[0] = (struct kcqe *) &kcqe;
1894 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1899 static void cnic_init_storm_conn_bufs(struct cnic_dev *dev,
1900 struct l4_kwq_connect_req1 *kwqe1,
1901 struct l4_kwq_connect_req3 *kwqe3,
1902 struct l5cm_active_conn_buffer *conn_buf)
1904 struct l5cm_conn_addr_params *conn_addr = &conn_buf->conn_addr_buf;
1905 struct l5cm_xstorm_conn_buffer *xstorm_buf =
1906 &conn_buf->xstorm_conn_buffer;
1907 struct l5cm_tstorm_conn_buffer *tstorm_buf =
1908 &conn_buf->tstorm_conn_buffer;
1909 struct regpair context_addr;
1910 u32 cid = BNX2X_SW_CID(kwqe1->cid);
1911 struct in6_addr src_ip, dst_ip;
1915 addrp = (u32 *) &conn_addr->local_ip_addr;
1916 for (i = 0; i < 4; i++, addrp++)
1917 src_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
1919 addrp = (u32 *) &conn_addr->remote_ip_addr;
1920 for (i = 0; i < 4; i++, addrp++)
1921 dst_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
1923 cnic_get_bnx2x_ctx(dev, cid, 0, &context_addr);
1925 xstorm_buf->context_addr.hi = context_addr.hi;
1926 xstorm_buf->context_addr.lo = context_addr.lo;
1927 xstorm_buf->mss = 0xffff;
1928 xstorm_buf->rcv_buf = kwqe3->rcv_buf;
1929 if (kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE)
1930 xstorm_buf->params |= L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE;
1931 xstorm_buf->pseudo_header_checksum =
1932 swab16(~csum_ipv6_magic(&src_ip, &dst_ip, 0, IPPROTO_TCP, 0));
1934 if (!(kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK))
1935 tstorm_buf->params |=
1936 L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE;
1937 if (kwqe3->ka_timeout) {
1938 tstorm_buf->ka_enable = 1;
1939 tstorm_buf->ka_timeout = kwqe3->ka_timeout;
1940 tstorm_buf->ka_interval = kwqe3->ka_interval;
1941 tstorm_buf->ka_max_probe_count = kwqe3->ka_max_probe_count;
1943 tstorm_buf->rcv_buf = kwqe3->rcv_buf;
1944 tstorm_buf->snd_buf = kwqe3->snd_buf;
1945 tstorm_buf->max_rt_time = 0xffffffff;
1948 static void cnic_init_bnx2x_mac(struct cnic_dev *dev)
1950 struct cnic_local *cp = dev->cnic_priv;
1951 u32 pfid = cp->pfid;
1952 u8 *mac = dev->mac_addr;
1954 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1955 XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfid), mac[0]);
1956 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1957 XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfid), mac[1]);
1958 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1959 XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfid), mac[2]);
1960 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1961 XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfid), mac[3]);
1962 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1963 XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfid), mac[4]);
1964 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1965 XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfid), mac[5]);
1967 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1968 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[5]);
1969 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1970 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
1972 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1973 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[3]);
1974 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1975 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
1977 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1978 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 2,
1980 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1981 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 3,
1985 static void cnic_bnx2x_set_tcp_timestamp(struct cnic_dev *dev, int tcp_ts)
1987 struct cnic_local *cp = dev->cnic_priv;
1988 u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN;
1989 u16 tstorm_flags = 0;
1992 xstorm_flags |= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
1993 tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
1996 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1997 XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), xstorm_flags);
1999 CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
2000 TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), tstorm_flags);
2003 static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[],
2006 struct cnic_local *cp = dev->cnic_priv;
2007 struct l4_kwq_connect_req1 *kwqe1 =
2008 (struct l4_kwq_connect_req1 *) wqes[0];
2009 struct l4_kwq_connect_req3 *kwqe3;
2010 struct l5cm_active_conn_buffer *conn_buf;
2011 struct l5cm_conn_addr_params *conn_addr;
2012 union l5cm_specific_data l5_data;
2013 u32 l5_cid = kwqe1->pg_cid;
2014 struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
2015 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
2023 if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6)
2033 if (sizeof(*conn_buf) > CNIC_KWQ16_DATA_SIZE) {
2034 netdev_err(dev->netdev, "conn_buf size too big\n");
2037 conn_buf = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2041 memset(conn_buf, 0, sizeof(*conn_buf));
2043 conn_addr = &conn_buf->conn_addr_buf;
2044 conn_addr->remote_addr_0 = csk->ha[0];
2045 conn_addr->remote_addr_1 = csk->ha[1];
2046 conn_addr->remote_addr_2 = csk->ha[2];
2047 conn_addr->remote_addr_3 = csk->ha[3];
2048 conn_addr->remote_addr_4 = csk->ha[4];
2049 conn_addr->remote_addr_5 = csk->ha[5];
2051 if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6) {
2052 struct l4_kwq_connect_req2 *kwqe2 =
2053 (struct l4_kwq_connect_req2 *) wqes[1];
2055 conn_addr->local_ip_addr.ip_addr_hi_hi = kwqe2->src_ip_v6_4;
2056 conn_addr->local_ip_addr.ip_addr_hi_lo = kwqe2->src_ip_v6_3;
2057 conn_addr->local_ip_addr.ip_addr_lo_hi = kwqe2->src_ip_v6_2;
2059 conn_addr->remote_ip_addr.ip_addr_hi_hi = kwqe2->dst_ip_v6_4;
2060 conn_addr->remote_ip_addr.ip_addr_hi_lo = kwqe2->dst_ip_v6_3;
2061 conn_addr->remote_ip_addr.ip_addr_lo_hi = kwqe2->dst_ip_v6_2;
2062 conn_addr->params |= L5CM_CONN_ADDR_PARAMS_IP_VERSION;
2064 kwqe3 = (struct l4_kwq_connect_req3 *) wqes[*work - 1];
2066 conn_addr->local_ip_addr.ip_addr_lo_lo = kwqe1->src_ip;
2067 conn_addr->remote_ip_addr.ip_addr_lo_lo = kwqe1->dst_ip;
2068 conn_addr->local_tcp_port = kwqe1->src_port;
2069 conn_addr->remote_tcp_port = kwqe1->dst_port;
2071 conn_addr->pmtu = kwqe3->pmtu;
2072 cnic_init_storm_conn_bufs(dev, kwqe1, kwqe3, conn_buf);
2074 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
2075 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(cp->pfid), csk->vlan_id);
2077 cnic_bnx2x_set_tcp_timestamp(dev,
2078 kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_TIME_STAMP);
2080 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_TCP_CONNECT,
2081 kwqe1->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2083 set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
2088 static int cnic_bnx2x_close(struct cnic_dev *dev, struct kwqe *kwqe)
2090 struct l4_kwq_close_req *req = (struct l4_kwq_close_req *) kwqe;
2091 union l5cm_specific_data l5_data;
2094 memset(&l5_data, 0, sizeof(l5_data));
2095 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_CLOSE,
2096 req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2100 static int cnic_bnx2x_reset(struct cnic_dev *dev, struct kwqe *kwqe)
2102 struct l4_kwq_reset_req *req = (struct l4_kwq_reset_req *) kwqe;
2103 union l5cm_specific_data l5_data;
2106 memset(&l5_data, 0, sizeof(l5_data));
2107 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_ABORT,
2108 req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2111 static int cnic_bnx2x_offload_pg(struct cnic_dev *dev, struct kwqe *kwqe)
2113 struct l4_kwq_offload_pg *req = (struct l4_kwq_offload_pg *) kwqe;
2115 struct kcqe *cqes[1];
2117 memset(&kcqe, 0, sizeof(kcqe));
2118 kcqe.pg_host_opaque = req->host_opaque;
2119 kcqe.pg_cid = req->host_opaque;
2120 kcqe.op_code = L4_KCQE_OPCODE_VALUE_OFFLOAD_PG;
2121 cqes[0] = (struct kcqe *) &kcqe;
2122 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
2126 static int cnic_bnx2x_update_pg(struct cnic_dev *dev, struct kwqe *kwqe)
2128 struct l4_kwq_update_pg *req = (struct l4_kwq_update_pg *) kwqe;
2130 struct kcqe *cqes[1];
2132 memset(&kcqe, 0, sizeof(kcqe));
2133 kcqe.pg_host_opaque = req->pg_host_opaque;
2134 kcqe.pg_cid = req->pg_cid;
2135 kcqe.op_code = L4_KCQE_OPCODE_VALUE_UPDATE_PG;
2136 cqes[0] = (struct kcqe *) &kcqe;
2137 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
2141 static int cnic_bnx2x_fcoe_stat(struct cnic_dev *dev, struct kwqe *kwqe)
2143 struct fcoe_kwqe_stat *req;
2144 struct fcoe_stat_ramrod_params *fcoe_stat;
2145 union l5cm_specific_data l5_data;
2146 struct cnic_local *cp = dev->cnic_priv;
2150 req = (struct fcoe_kwqe_stat *) kwqe;
2151 cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid);
2153 fcoe_stat = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data);
2157 memset(fcoe_stat, 0, sizeof(*fcoe_stat));
2158 memcpy(&fcoe_stat->stat_kwqe, req, sizeof(*req));
2160 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_STAT, cid,
2161 FCOE_CONNECTION_TYPE, &l5_data);
2165 static int cnic_bnx2x_fcoe_init1(struct cnic_dev *dev, struct kwqe *wqes[],
2169 struct cnic_local *cp = dev->cnic_priv;
2171 struct fcoe_init_ramrod_params *fcoe_init;
2172 struct fcoe_kwqe_init1 *req1;
2173 struct fcoe_kwqe_init2 *req2;
2174 struct fcoe_kwqe_init3 *req3;
2175 union l5cm_specific_data l5_data;
2181 req1 = (struct fcoe_kwqe_init1 *) wqes[0];
2182 req2 = (struct fcoe_kwqe_init2 *) wqes[1];
2183 req3 = (struct fcoe_kwqe_init3 *) wqes[2];
2184 if (req2->hdr.op_code != FCOE_KWQE_OPCODE_INIT2) {
2188 if (req3->hdr.op_code != FCOE_KWQE_OPCODE_INIT3) {
2193 if (sizeof(*fcoe_init) > CNIC_KWQ16_DATA_SIZE) {
2194 netdev_err(dev->netdev, "fcoe_init size too big\n");
2197 fcoe_init = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data);
2201 memset(fcoe_init, 0, sizeof(*fcoe_init));
2202 memcpy(&fcoe_init->init_kwqe1, req1, sizeof(*req1));
2203 memcpy(&fcoe_init->init_kwqe2, req2, sizeof(*req2));
2204 memcpy(&fcoe_init->init_kwqe3, req3, sizeof(*req3));
2205 fcoe_init->eq_addr.lo = cp->kcq2.dma.pg_map_arr[0] & 0xffffffff;
2206 fcoe_init->eq_addr.hi = (u64) cp->kcq2.dma.pg_map_arr[0] >> 32;
2207 fcoe_init->eq_next_page_addr.lo =
2208 cp->kcq2.dma.pg_map_arr[1] & 0xffffffff;
2209 fcoe_init->eq_next_page_addr.hi =
2210 (u64) cp->kcq2.dma.pg_map_arr[1] >> 32;
2212 fcoe_init->sb_num = cp->status_blk_num;
2213 fcoe_init->eq_prod = MAX_KCQ_IDX;
2214 fcoe_init->sb_id = HC_INDEX_FCOE_EQ_CONS;
2215 cp->kcq2.sw_prod_idx = 0;
2217 cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid);
2218 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_INIT, cid,
2219 FCOE_CONNECTION_TYPE, &l5_data);
2224 static int cnic_bnx2x_fcoe_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
2228 u32 cid = -1, l5_cid;
2229 struct cnic_local *cp = dev->cnic_priv;
2230 struct fcoe_kwqe_conn_offload1 *req1;
2231 struct fcoe_kwqe_conn_offload2 *req2;
2232 struct fcoe_kwqe_conn_offload3 *req3;
2233 struct fcoe_kwqe_conn_offload4 *req4;
2234 struct fcoe_conn_offload_ramrod_params *fcoe_offload;
2235 struct cnic_context *ctx;
2236 struct fcoe_context *fctx;
2237 struct regpair ctx_addr;
2238 union l5cm_specific_data l5_data;
2239 struct fcoe_kcqe kcqe;
2240 struct kcqe *cqes[1];
2246 req1 = (struct fcoe_kwqe_conn_offload1 *) wqes[0];
2247 req2 = (struct fcoe_kwqe_conn_offload2 *) wqes[1];
2248 req3 = (struct fcoe_kwqe_conn_offload3 *) wqes[2];
2249 req4 = (struct fcoe_kwqe_conn_offload4 *) wqes[3];
2253 l5_cid = req1->fcoe_conn_id;
2254 if (l5_cid >= BNX2X_FCOE_NUM_CONNECTIONS)
2257 l5_cid += BNX2X_FCOE_L5_CID_BASE;
2259 ctx = &cp->ctx_tbl[l5_cid];
2260 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2263 ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
2270 fctx = cnic_get_bnx2x_ctx(dev, cid, 1, &ctx_addr);
2272 u32 hw_cid = BNX2X_HW_CID(cp, cid);
2275 val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
2276 FCOE_CONNECTION_TYPE);
2277 fctx->xstorm_ag_context.cdu_reserved = val;
2278 val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG,
2279 FCOE_CONNECTION_TYPE);
2280 fctx->ustorm_ag_context.cdu_usage = val;
2282 if (sizeof(*fcoe_offload) > CNIC_KWQ16_DATA_SIZE) {
2283 netdev_err(dev->netdev, "fcoe_offload size too big\n");
2286 fcoe_offload = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2290 memset(fcoe_offload, 0, sizeof(*fcoe_offload));
2291 memcpy(&fcoe_offload->offload_kwqe1, req1, sizeof(*req1));
2292 memcpy(&fcoe_offload->offload_kwqe2, req2, sizeof(*req2));
2293 memcpy(&fcoe_offload->offload_kwqe3, req3, sizeof(*req3));
2294 memcpy(&fcoe_offload->offload_kwqe4, req4, sizeof(*req4));
2296 cid = BNX2X_HW_CID(cp, cid);
2297 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_OFFLOAD_CONN, cid,
2298 FCOE_CONNECTION_TYPE, &l5_data);
2300 set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
2306 cnic_free_bnx2x_conn_resc(dev, l5_cid);
2308 memset(&kcqe, 0, sizeof(kcqe));
2309 kcqe.op_code = FCOE_KCQE_OPCODE_OFFLOAD_CONN;
2310 kcqe.fcoe_conn_id = req1->fcoe_conn_id;
2311 kcqe.completion_status = FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE;
2313 cqes[0] = (struct kcqe *) &kcqe;
2314 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1);
2318 static int cnic_bnx2x_fcoe_enable(struct cnic_dev *dev, struct kwqe *kwqe)
2320 struct fcoe_kwqe_conn_enable_disable *req;
2321 struct fcoe_conn_enable_disable_ramrod_params *fcoe_enable;
2322 union l5cm_specific_data l5_data;
2325 struct cnic_local *cp = dev->cnic_priv;
2327 req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
2328 cid = req->context_id;
2329 l5_cid = req->conn_id + BNX2X_FCOE_L5_CID_BASE;
2331 if (sizeof(*fcoe_enable) > CNIC_KWQ16_DATA_SIZE) {
2332 netdev_err(dev->netdev, "fcoe_enable size too big\n");
2335 fcoe_enable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2339 memset(fcoe_enable, 0, sizeof(*fcoe_enable));
2340 memcpy(&fcoe_enable->enable_disable_kwqe, req, sizeof(*req));
2341 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_ENABLE_CONN, cid,
2342 FCOE_CONNECTION_TYPE, &l5_data);
2346 static int cnic_bnx2x_fcoe_disable(struct cnic_dev *dev, struct kwqe *kwqe)
2348 struct fcoe_kwqe_conn_enable_disable *req;
2349 struct fcoe_conn_enable_disable_ramrod_params *fcoe_disable;
2350 union l5cm_specific_data l5_data;
2353 struct cnic_local *cp = dev->cnic_priv;
2355 req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
2356 cid = req->context_id;
2357 l5_cid = req->conn_id;
2358 if (l5_cid >= BNX2X_FCOE_NUM_CONNECTIONS)
2361 l5_cid += BNX2X_FCOE_L5_CID_BASE;
2363 if (sizeof(*fcoe_disable) > CNIC_KWQ16_DATA_SIZE) {
2364 netdev_err(dev->netdev, "fcoe_disable size too big\n");
2367 fcoe_disable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2371 memset(fcoe_disable, 0, sizeof(*fcoe_disable));
2372 memcpy(&fcoe_disable->enable_disable_kwqe, req, sizeof(*req));
2373 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DISABLE_CONN, cid,
2374 FCOE_CONNECTION_TYPE, &l5_data);
2378 static int cnic_bnx2x_fcoe_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
2380 struct fcoe_kwqe_conn_destroy *req;
2381 union l5cm_specific_data l5_data;
2384 struct cnic_local *cp = dev->cnic_priv;
2385 struct cnic_context *ctx;
2386 struct fcoe_kcqe kcqe;
2387 struct kcqe *cqes[1];
2389 req = (struct fcoe_kwqe_conn_destroy *) kwqe;
2390 cid = req->context_id;
2391 l5_cid = req->conn_id;
2392 if (l5_cid >= BNX2X_FCOE_NUM_CONNECTIONS)
2395 l5_cid += BNX2X_FCOE_L5_CID_BASE;
2397 ctx = &cp->ctx_tbl[l5_cid];
2399 init_waitqueue_head(&ctx->waitq);
2402 memset(&l5_data, 0, sizeof(l5_data));
2403 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_TERMINATE_CONN, cid,
2404 FCOE_CONNECTION_TYPE, &l5_data);
2406 wait_event(ctx->waitq, ctx->wait_cond);
2407 set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags);
2408 queue_delayed_work(cnic_wq, &cp->delete_task,
2409 msecs_to_jiffies(2000));
2412 memset(&kcqe, 0, sizeof(kcqe));
2413 kcqe.op_code = FCOE_KCQE_OPCODE_DESTROY_CONN;
2414 kcqe.fcoe_conn_id = req->conn_id;
2415 kcqe.fcoe_conn_context_id = cid;
2417 cqes[0] = (struct kcqe *) &kcqe;
2418 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1);
2422 static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
2424 struct fcoe_kwqe_destroy *req;
2425 union l5cm_specific_data l5_data;
2426 struct cnic_local *cp = dev->cnic_priv;
2430 req = (struct fcoe_kwqe_destroy *) kwqe;
2431 cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid);
2433 memset(&l5_data, 0, sizeof(l5_data));
2434 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DESTROY, cid,
2435 FCOE_CONNECTION_TYPE, &l5_data);
2439 static int cnic_submit_bnx2x_iscsi_kwqes(struct cnic_dev *dev,
2440 struct kwqe *wqes[], u32 num_wqes)
2446 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2447 return -EAGAIN; /* bnx2 is down */
2449 for (i = 0; i < num_wqes; ) {
2451 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
2455 case ISCSI_KWQE_OPCODE_INIT1:
2456 ret = cnic_bnx2x_iscsi_init1(dev, kwqe);
2458 case ISCSI_KWQE_OPCODE_INIT2:
2459 ret = cnic_bnx2x_iscsi_init2(dev, kwqe);
2461 case ISCSI_KWQE_OPCODE_OFFLOAD_CONN1:
2462 ret = cnic_bnx2x_iscsi_ofld1(dev, &wqes[i],
2463 num_wqes - i, &work);
2465 case ISCSI_KWQE_OPCODE_UPDATE_CONN:
2466 ret = cnic_bnx2x_iscsi_update(dev, kwqe);
2468 case ISCSI_KWQE_OPCODE_DESTROY_CONN:
2469 ret = cnic_bnx2x_iscsi_destroy(dev, kwqe);
2471 case L4_KWQE_OPCODE_VALUE_CONNECT1:
2472 ret = cnic_bnx2x_connect(dev, &wqes[i], num_wqes - i,
2475 case L4_KWQE_OPCODE_VALUE_CLOSE:
2476 ret = cnic_bnx2x_close(dev, kwqe);
2478 case L4_KWQE_OPCODE_VALUE_RESET:
2479 ret = cnic_bnx2x_reset(dev, kwqe);
2481 case L4_KWQE_OPCODE_VALUE_OFFLOAD_PG:
2482 ret = cnic_bnx2x_offload_pg(dev, kwqe);
2484 case L4_KWQE_OPCODE_VALUE_UPDATE_PG:
2485 ret = cnic_bnx2x_update_pg(dev, kwqe);
2487 case L4_KWQE_OPCODE_VALUE_UPLOAD_PG:
2492 netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
2497 netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
2504 static int cnic_submit_bnx2x_fcoe_kwqes(struct cnic_dev *dev,
2505 struct kwqe *wqes[], u32 num_wqes)
2507 struct cnic_local *cp = dev->cnic_priv;
2512 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2513 return -EAGAIN; /* bnx2 is down */
2515 if (BNX2X_CHIP_NUM(cp->chip_id) == BNX2X_CHIP_NUM_57710)
2518 for (i = 0; i < num_wqes; ) {
2520 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
2524 case FCOE_KWQE_OPCODE_INIT1:
2525 ret = cnic_bnx2x_fcoe_init1(dev, &wqes[i],
2526 num_wqes - i, &work);
2528 case FCOE_KWQE_OPCODE_OFFLOAD_CONN1:
2529 ret = cnic_bnx2x_fcoe_ofld1(dev, &wqes[i],
2530 num_wqes - i, &work);
2532 case FCOE_KWQE_OPCODE_ENABLE_CONN:
2533 ret = cnic_bnx2x_fcoe_enable(dev, kwqe);
2535 case FCOE_KWQE_OPCODE_DISABLE_CONN:
2536 ret = cnic_bnx2x_fcoe_disable(dev, kwqe);
2538 case FCOE_KWQE_OPCODE_DESTROY_CONN:
2539 ret = cnic_bnx2x_fcoe_destroy(dev, kwqe);
2541 case FCOE_KWQE_OPCODE_DESTROY:
2542 ret = cnic_bnx2x_fcoe_fw_destroy(dev, kwqe);
2544 case FCOE_KWQE_OPCODE_STAT:
2545 ret = cnic_bnx2x_fcoe_stat(dev, kwqe);
2549 netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
2554 netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
2561 static int cnic_submit_bnx2x_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
2567 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2568 return -EAGAIN; /* bnx2x is down */
2573 layer_code = wqes[0]->kwqe_op_flag & KWQE_LAYER_MASK;
2574 switch (layer_code) {
2575 case KWQE_FLAGS_LAYER_MASK_L5_ISCSI:
2576 case KWQE_FLAGS_LAYER_MASK_L4:
2577 case KWQE_FLAGS_LAYER_MASK_L2:
2578 ret = cnic_submit_bnx2x_iscsi_kwqes(dev, wqes, num_wqes);
2581 case KWQE_FLAGS_LAYER_MASK_L5_FCOE:
2582 ret = cnic_submit_bnx2x_fcoe_kwqes(dev, wqes, num_wqes);
2588 static inline u32 cnic_get_kcqe_layer_mask(u32 opflag)
2590 if (unlikely(KCQE_OPCODE(opflag) == FCOE_RAMROD_CMD_ID_TERMINATE_CONN))
2591 return KCQE_FLAGS_LAYER_MASK_L4;
2593 return opflag & KCQE_FLAGS_LAYER_MASK;
2596 static void service_kcqes(struct cnic_dev *dev, int num_cqes)
2598 struct cnic_local *cp = dev->cnic_priv;
2604 struct cnic_ulp_ops *ulp_ops;
2606 u32 kcqe_op_flag = cp->completed_kcq[i]->kcqe_op_flag;
2607 u32 kcqe_layer = cnic_get_kcqe_layer_mask(kcqe_op_flag);
2609 if (unlikely(kcqe_op_flag & KCQE_RAMROD_COMPLETION))
2612 while (j < num_cqes) {
2613 u32 next_op = cp->completed_kcq[i + j]->kcqe_op_flag;
2615 if (cnic_get_kcqe_layer_mask(next_op) != kcqe_layer)
2618 if (unlikely(next_op & KCQE_RAMROD_COMPLETION))
2623 if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_RDMA)
2624 ulp_type = CNIC_ULP_RDMA;
2625 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_ISCSI)
2626 ulp_type = CNIC_ULP_ISCSI;
2627 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_FCOE)
2628 ulp_type = CNIC_ULP_FCOE;
2629 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L4)
2630 ulp_type = CNIC_ULP_L4;
2631 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L2)
2634 netdev_err(dev->netdev, "Unknown type of KCQE(0x%x)\n",
2640 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
2641 if (likely(ulp_ops)) {
2642 ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
2643 cp->completed_kcq + i, j);
2652 cnic_spq_completion(dev, DRV_CTL_RET_L5_SPQ_CREDIT_CMD, comp);
2655 static u16 cnic_bnx2_next_idx(u16 idx)
2660 static u16 cnic_bnx2_hw_idx(u16 idx)
2665 static u16 cnic_bnx2x_next_idx(u16 idx)
2668 if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
2674 static u16 cnic_bnx2x_hw_idx(u16 idx)
2676 if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
2681 static int cnic_get_kcqes(struct cnic_dev *dev, struct kcq_info *info)
2683 struct cnic_local *cp = dev->cnic_priv;
2684 u16 i, ri, hw_prod, last;
2686 int kcqe_cnt = 0, last_cnt = 0;
2688 i = ri = last = info->sw_prod_idx;
2690 hw_prod = *info->hw_prod_idx_ptr;
2691 hw_prod = cp->hw_idx(hw_prod);
2693 while ((i != hw_prod) && (kcqe_cnt < MAX_COMPLETED_KCQE)) {
2694 kcqe = &info->kcq[KCQ_PG(ri)][KCQ_IDX(ri)];
2695 cp->completed_kcq[kcqe_cnt++] = kcqe;
2696 i = cp->next_idx(i);
2697 ri = i & MAX_KCQ_IDX;
2698 if (likely(!(kcqe->kcqe_op_flag & KCQE_FLAGS_NEXT))) {
2699 last_cnt = kcqe_cnt;
2704 info->sw_prod_idx = last;
2708 static int cnic_l2_completion(struct cnic_local *cp)
2710 u16 hw_cons, sw_cons;
2711 struct cnic_uio_dev *udev = cp->udev;
2712 union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *)
2713 (udev->l2_ring + (2 * BCM_PAGE_SIZE));
2717 if (!test_bit(CNIC_F_BNX2X_CLASS, &cp->dev->flags))
2720 hw_cons = *cp->rx_cons_ptr;
2721 if ((hw_cons & BNX2X_MAX_RCQ_DESC_CNT) == BNX2X_MAX_RCQ_DESC_CNT)
2724 sw_cons = cp->rx_cons;
2725 while (sw_cons != hw_cons) {
2728 cqe = &cqe_ring[sw_cons & BNX2X_MAX_RCQ_DESC_CNT];
2729 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
2730 if (cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE) {
2731 cmd = le32_to_cpu(cqe->ramrod_cqe.conn_and_cmd_data);
2732 cmd >>= COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT;
2733 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP ||
2734 cmd == RAMROD_CMD_ID_ETH_HALT)
2737 sw_cons = BNX2X_NEXT_RCQE(sw_cons);
2742 static void cnic_chk_pkt_rings(struct cnic_local *cp)
2744 u16 rx_cons, tx_cons;
2747 if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
2750 rx_cons = *cp->rx_cons_ptr;
2751 tx_cons = *cp->tx_cons_ptr;
2752 if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) {
2753 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
2754 comp = cnic_l2_completion(cp);
2756 cp->tx_cons = tx_cons;
2757 cp->rx_cons = rx_cons;
2760 uio_event_notify(&cp->udev->cnic_uinfo);
2763 clear_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
2766 static u32 cnic_service_bnx2_queues(struct cnic_dev *dev)
2768 struct cnic_local *cp = dev->cnic_priv;
2769 u32 status_idx = (u16) *cp->kcq1.status_idx_ptr;
2772 /* status block index must be read before reading other fields */
2774 cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
2776 while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) {
2778 service_kcqes(dev, kcqe_cnt);
2780 /* Tell compiler that status_blk fields can change. */
2782 status_idx = (u16) *cp->kcq1.status_idx_ptr;
2783 /* status block index must be read first */
2785 cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
2788 CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx);
2790 cnic_chk_pkt_rings(cp);
2795 static int cnic_service_bnx2(void *data, void *status_blk)
2797 struct cnic_dev *dev = data;
2799 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
2800 struct status_block *sblk = status_blk;
2802 return sblk->status_idx;
2805 return cnic_service_bnx2_queues(dev);
2808 static void cnic_service_bnx2_msix(unsigned long data)
2810 struct cnic_dev *dev = (struct cnic_dev *) data;
2811 struct cnic_local *cp = dev->cnic_priv;
2813 cp->last_status_idx = cnic_service_bnx2_queues(dev);
2815 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
2816 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
2819 static void cnic_doirq(struct cnic_dev *dev)
2821 struct cnic_local *cp = dev->cnic_priv;
2823 if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
2824 u16 prod = cp->kcq1.sw_prod_idx & MAX_KCQ_IDX;
2826 prefetch(cp->status_blk.gen);
2827 prefetch(&cp->kcq1.kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
2829 tasklet_schedule(&cp->cnic_irq_task);
2833 static irqreturn_t cnic_irq(int irq, void *dev_instance)
2835 struct cnic_dev *dev = dev_instance;
2836 struct cnic_local *cp = dev->cnic_priv;
2846 static inline void cnic_ack_bnx2x_int(struct cnic_dev *dev, u8 id, u8 storm,
2847 u16 index, u8 op, u8 update)
2849 struct cnic_local *cp = dev->cnic_priv;
2850 u32 hc_addr = (HC_REG_COMMAND_REG + CNIC_PORT(cp) * 32 +
2851 COMMAND_REG_INT_ACK);
2852 struct igu_ack_register igu_ack;
2854 igu_ack.status_block_index = index;
2855 igu_ack.sb_id_and_flags =
2856 ((id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
2857 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
2858 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
2859 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
2861 CNIC_WR(dev, hc_addr, (*(u32 *)&igu_ack));
2864 static void cnic_ack_igu_sb(struct cnic_dev *dev, u8 igu_sb_id, u8 segment,
2865 u16 index, u8 op, u8 update)
2867 struct igu_regular cmd_data;
2868 u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id) * 8;
2870 cmd_data.sb_id_and_flags =
2871 (index << IGU_REGULAR_SB_INDEX_SHIFT) |
2872 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
2873 (update << IGU_REGULAR_BUPDATE_SHIFT) |
2874 (op << IGU_REGULAR_ENABLE_INT_SHIFT);
2877 CNIC_WR(dev, igu_addr, cmd_data.sb_id_and_flags);
2880 static void cnic_ack_bnx2x_msix(struct cnic_dev *dev)
2882 struct cnic_local *cp = dev->cnic_priv;
2884 cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, 0,
2885 IGU_INT_DISABLE, 0);
2888 static void cnic_ack_bnx2x_e2_msix(struct cnic_dev *dev)
2890 struct cnic_local *cp = dev->cnic_priv;
2892 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, 0,
2893 IGU_INT_DISABLE, 0);
2896 static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
2898 u32 last_status = *info->status_idx_ptr;
2901 /* status block index must be read before reading the KCQ */
2903 while ((kcqe_cnt = cnic_get_kcqes(dev, info))) {
2905 service_kcqes(dev, kcqe_cnt);
2907 /* Tell compiler that sblk fields can change. */
2910 last_status = *info->status_idx_ptr;
2911 /* status block index must be read before reading the KCQ */
2917 static void cnic_service_bnx2x_bh(unsigned long data)
2919 struct cnic_dev *dev = (struct cnic_dev *) data;
2920 struct cnic_local *cp = dev->cnic_priv;
2921 u32 status_idx, new_status_idx;
2923 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
2927 status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1);
2929 CNIC_WR16(dev, cp->kcq1.io_addr,
2930 cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
2932 if (!BNX2X_CHIP_IS_E2(cp->chip_id)) {
2933 cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID,
2934 status_idx, IGU_INT_ENABLE, 1);
2938 new_status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2);
2940 if (new_status_idx != status_idx)
2943 CNIC_WR16(dev, cp->kcq2.io_addr, cp->kcq2.sw_prod_idx +
2946 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF,
2947 status_idx, IGU_INT_ENABLE, 1);
2953 static int cnic_service_bnx2x(void *data, void *status_blk)
2955 struct cnic_dev *dev = data;
2956 struct cnic_local *cp = dev->cnic_priv;
2958 if (!(cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
2961 cnic_chk_pkt_rings(cp);
2966 static void cnic_ulp_stop_one(struct cnic_local *cp, int if_type)
2968 struct cnic_ulp_ops *ulp_ops;
2970 if (if_type == CNIC_ULP_ISCSI)
2971 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
2973 mutex_lock(&cnic_lock);
2974 ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
2975 lockdep_is_held(&cnic_lock));
2977 mutex_unlock(&cnic_lock);
2980 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
2981 mutex_unlock(&cnic_lock);
2983 if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type]))
2984 ulp_ops->cnic_stop(cp->ulp_handle[if_type]);
2986 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
2989 static void cnic_ulp_stop(struct cnic_dev *dev)
2991 struct cnic_local *cp = dev->cnic_priv;
2994 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++)
2995 cnic_ulp_stop_one(cp, if_type);
2998 static void cnic_ulp_start(struct cnic_dev *dev)
3000 struct cnic_local *cp = dev->cnic_priv;
3003 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
3004 struct cnic_ulp_ops *ulp_ops;
3006 mutex_lock(&cnic_lock);
3007 ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
3008 lockdep_is_held(&cnic_lock));
3009 if (!ulp_ops || !ulp_ops->cnic_start) {
3010 mutex_unlock(&cnic_lock);
3013 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3014 mutex_unlock(&cnic_lock);
3016 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[if_type]))
3017 ulp_ops->cnic_start(cp->ulp_handle[if_type]);
3019 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3023 static int cnic_ctl(void *data, struct cnic_ctl_info *info)
3025 struct cnic_dev *dev = data;
3027 switch (info->cmd) {
3028 case CNIC_CTL_STOP_CMD:
3036 case CNIC_CTL_START_CMD:
3039 if (!cnic_start_hw(dev))
3040 cnic_ulp_start(dev);
3044 case CNIC_CTL_STOP_ISCSI_CMD: {
3045 struct cnic_local *cp = dev->cnic_priv;
3046 set_bit(CNIC_LCL_FL_STOP_ISCSI, &cp->cnic_local_flags);
3047 queue_delayed_work(cnic_wq, &cp->delete_task, 0);
3050 case CNIC_CTL_COMPLETION_CMD: {
3051 u32 cid = BNX2X_SW_CID(info->data.comp.cid);
3053 struct cnic_local *cp = dev->cnic_priv;
3055 if (cnic_get_l5_cid(cp, cid, &l5_cid) == 0) {
3056 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3059 wake_up(&ctx->waitq);
3069 static void cnic_ulp_init(struct cnic_dev *dev)
3072 struct cnic_local *cp = dev->cnic_priv;
3074 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
3075 struct cnic_ulp_ops *ulp_ops;
3077 mutex_lock(&cnic_lock);
3078 ulp_ops = cnic_ulp_tbl_prot(i);
3079 if (!ulp_ops || !ulp_ops->cnic_init) {
3080 mutex_unlock(&cnic_lock);
3084 mutex_unlock(&cnic_lock);
3086 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i]))
3087 ulp_ops->cnic_init(dev);
3093 static void cnic_ulp_exit(struct cnic_dev *dev)
3096 struct cnic_local *cp = dev->cnic_priv;
3098 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
3099 struct cnic_ulp_ops *ulp_ops;
3101 mutex_lock(&cnic_lock);
3102 ulp_ops = cnic_ulp_tbl_prot(i);
3103 if (!ulp_ops || !ulp_ops->cnic_exit) {
3104 mutex_unlock(&cnic_lock);
3108 mutex_unlock(&cnic_lock);
3110 if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i]))
3111 ulp_ops->cnic_exit(dev);
3117 static int cnic_cm_offload_pg(struct cnic_sock *csk)
3119 struct cnic_dev *dev = csk->dev;
3120 struct l4_kwq_offload_pg *l4kwqe;
3121 struct kwqe *wqes[1];
3123 l4kwqe = (struct l4_kwq_offload_pg *) &csk->kwqe1;
3124 memset(l4kwqe, 0, sizeof(*l4kwqe));
3125 wqes[0] = (struct kwqe *) l4kwqe;
3127 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_OFFLOAD_PG;
3129 L4_LAYER_CODE << L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT;
3130 l4kwqe->l2hdr_nbytes = ETH_HLEN;
3132 l4kwqe->da0 = csk->ha[0];
3133 l4kwqe->da1 = csk->ha[1];
3134 l4kwqe->da2 = csk->ha[2];
3135 l4kwqe->da3 = csk->ha[3];
3136 l4kwqe->da4 = csk->ha[4];
3137 l4kwqe->da5 = csk->ha[5];
3139 l4kwqe->sa0 = dev->mac_addr[0];
3140 l4kwqe->sa1 = dev->mac_addr[1];
3141 l4kwqe->sa2 = dev->mac_addr[2];
3142 l4kwqe->sa3 = dev->mac_addr[3];
3143 l4kwqe->sa4 = dev->mac_addr[4];
3144 l4kwqe->sa5 = dev->mac_addr[5];
3146 l4kwqe->etype = ETH_P_IP;
3147 l4kwqe->ipid_start = DEF_IPID_START;
3148 l4kwqe->host_opaque = csk->l5_cid;
3151 l4kwqe->pg_flags |= L4_KWQ_OFFLOAD_PG_VLAN_TAGGING;
3152 l4kwqe->vlan_tag = csk->vlan_id;
3153 l4kwqe->l2hdr_nbytes += 4;
3156 return dev->submit_kwqes(dev, wqes, 1);
3159 static int cnic_cm_update_pg(struct cnic_sock *csk)
3161 struct cnic_dev *dev = csk->dev;
3162 struct l4_kwq_update_pg *l4kwqe;
3163 struct kwqe *wqes[1];
3165 l4kwqe = (struct l4_kwq_update_pg *) &csk->kwqe1;
3166 memset(l4kwqe, 0, sizeof(*l4kwqe));
3167 wqes[0] = (struct kwqe *) l4kwqe;
3169 l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPDATE_PG;
3171 L4_LAYER_CODE << L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT;
3172 l4kwqe->pg_cid = csk->pg_cid;
3174 l4kwqe->da0 = csk->ha[0];
3175 l4kwqe->da1 = csk->ha[1];
3176 l4kwqe->da2 = csk->ha[2];
3177 l4kwqe->da3 = csk->ha[3];
3178 l4kwqe->da4 = csk->ha[4];
3179 l4kwqe->da5 = csk->ha[5];
3181 l4kwqe->pg_host_opaque = csk->l5_cid;
3182 l4kwqe->pg_valids = L4_KWQ_UPDATE_PG_VALIDS_DA;
3184 return dev->submit_kwqes(dev, wqes, 1);
3187 static int cnic_cm_upload_pg(struct cnic_sock *csk)
3189 struct cnic_dev *dev = csk->dev;
3190 struct l4_kwq_upload *l4kwqe;
3191 struct kwqe *wqes[1];
3193 l4kwqe = (struct l4_kwq_upload *) &csk->kwqe1;
3194 memset(l4kwqe, 0, sizeof(*l4kwqe));
3195 wqes[0] = (struct kwqe *) l4kwqe;
3197 l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPLOAD_PG;
3199 L4_LAYER_CODE << L4_KWQ_UPLOAD_LAYER_CODE_SHIFT;
3200 l4kwqe->cid = csk->pg_cid;
3202 return dev->submit_kwqes(dev, wqes, 1);
3205 static int cnic_cm_conn_req(struct cnic_sock *csk)
3207 struct cnic_dev *dev = csk->dev;
3208 struct l4_kwq_connect_req1 *l4kwqe1;
3209 struct l4_kwq_connect_req2 *l4kwqe2;
3210 struct l4_kwq_connect_req3 *l4kwqe3;
3211 struct kwqe *wqes[3];
3215 l4kwqe1 = (struct l4_kwq_connect_req1 *) &csk->kwqe1;
3216 l4kwqe2 = (struct l4_kwq_connect_req2 *) &csk->kwqe2;
3217 l4kwqe3 = (struct l4_kwq_connect_req3 *) &csk->kwqe3;
3218 memset(l4kwqe1, 0, sizeof(*l4kwqe1));
3219 memset(l4kwqe2, 0, sizeof(*l4kwqe2));
3220 memset(l4kwqe3, 0, sizeof(*l4kwqe3));
3222 l4kwqe3->op_code = L4_KWQE_OPCODE_VALUE_CONNECT3;
3224 L4_LAYER_CODE << L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT;
3225 l4kwqe3->ka_timeout = csk->ka_timeout;
3226 l4kwqe3->ka_interval = csk->ka_interval;
3227 l4kwqe3->ka_max_probe_count = csk->ka_max_probe_count;
3228 l4kwqe3->tos = csk->tos;
3229 l4kwqe3->ttl = csk->ttl;
3230 l4kwqe3->snd_seq_scale = csk->snd_seq_scale;
3231 l4kwqe3->pmtu = csk->mtu;
3232 l4kwqe3->rcv_buf = csk->rcv_buf;
3233 l4kwqe3->snd_buf = csk->snd_buf;
3234 l4kwqe3->seed = csk->seed;
3236 wqes[0] = (struct kwqe *) l4kwqe1;
3237 if (test_bit(SK_F_IPV6, &csk->flags)) {
3238 wqes[1] = (struct kwqe *) l4kwqe2;
3239 wqes[2] = (struct kwqe *) l4kwqe3;
3242 l4kwqe1->conn_flags = L4_KWQ_CONNECT_REQ1_IP_V6;
3243 l4kwqe2->op_code = L4_KWQE_OPCODE_VALUE_CONNECT2;
3245 L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT |
3246 L4_LAYER_CODE << L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT;
3247 l4kwqe2->src_ip_v6_2 = be32_to_cpu(csk->src_ip[1]);
3248 l4kwqe2->src_ip_v6_3 = be32_to_cpu(csk->src_ip[2]);
3249 l4kwqe2->src_ip_v6_4 = be32_to_cpu(csk->src_ip[3]);
3250 l4kwqe2->dst_ip_v6_2 = be32_to_cpu(csk->dst_ip[1]);
3251 l4kwqe2->dst_ip_v6_3 = be32_to_cpu(csk->dst_ip[2]);
3252 l4kwqe2->dst_ip_v6_4 = be32_to_cpu(csk->dst_ip[3]);
3253 l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct ipv6hdr) -
3254 sizeof(struct tcphdr);
3256 wqes[1] = (struct kwqe *) l4kwqe3;
3257 l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct iphdr) -
3258 sizeof(struct tcphdr);
3261 l4kwqe1->op_code = L4_KWQE_OPCODE_VALUE_CONNECT1;
3263 (L4_LAYER_CODE << L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT) |
3264 L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT;
3265 l4kwqe1->cid = csk->cid;
3266 l4kwqe1->pg_cid = csk->pg_cid;
3267 l4kwqe1->src_ip = be32_to_cpu(csk->src_ip[0]);
3268 l4kwqe1->dst_ip = be32_to_cpu(csk->dst_ip[0]);
3269 l4kwqe1->src_port = be16_to_cpu(csk->src_port);
3270 l4kwqe1->dst_port = be16_to_cpu(csk->dst_port);
3271 if (csk->tcp_flags & SK_TCP_NO_DELAY_ACK)
3272 tcp_flags |= L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK;
3273 if (csk->tcp_flags & SK_TCP_KEEP_ALIVE)
3274 tcp_flags |= L4_KWQ_CONNECT_REQ1_KEEP_ALIVE;
3275 if (csk->tcp_flags & SK_TCP_NAGLE)
3276 tcp_flags |= L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE;
3277 if (csk->tcp_flags & SK_TCP_TIMESTAMP)
3278 tcp_flags |= L4_KWQ_CONNECT_REQ1_TIME_STAMP;
3279 if (csk->tcp_flags & SK_TCP_SACK)
3280 tcp_flags |= L4_KWQ_CONNECT_REQ1_SACK;
3281 if (csk->tcp_flags & SK_TCP_SEG_SCALING)
3282 tcp_flags |= L4_KWQ_CONNECT_REQ1_SEG_SCALING;
3284 l4kwqe1->tcp_flags = tcp_flags;
3286 return dev->submit_kwqes(dev, wqes, num_wqes);
3289 static int cnic_cm_close_req(struct cnic_sock *csk)
3291 struct cnic_dev *dev = csk->dev;
3292 struct l4_kwq_close_req *l4kwqe;
3293 struct kwqe *wqes[1];
3295 l4kwqe = (struct l4_kwq_close_req *) &csk->kwqe2;
3296 memset(l4kwqe, 0, sizeof(*l4kwqe));
3297 wqes[0] = (struct kwqe *) l4kwqe;
3299 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_CLOSE;
3300 l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT;
3301 l4kwqe->cid = csk->cid;
3303 return dev->submit_kwqes(dev, wqes, 1);
3306 static int cnic_cm_abort_req(struct cnic_sock *csk)
3308 struct cnic_dev *dev = csk->dev;
3309 struct l4_kwq_reset_req *l4kwqe;
3310 struct kwqe *wqes[1];
3312 l4kwqe = (struct l4_kwq_reset_req *) &csk->kwqe2;
3313 memset(l4kwqe, 0, sizeof(*l4kwqe));
3314 wqes[0] = (struct kwqe *) l4kwqe;
3316 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_RESET;
3317 l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT;
3318 l4kwqe->cid = csk->cid;
3320 return dev->submit_kwqes(dev, wqes, 1);
3323 static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid,
3324 u32 l5_cid, struct cnic_sock **csk, void *context)
3326 struct cnic_local *cp = dev->cnic_priv;
3327 struct cnic_sock *csk1;
3329 if (l5_cid >= MAX_CM_SK_TBL_SZ)
3333 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3335 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
3339 csk1 = &cp->csk_tbl[l5_cid];
3340 if (atomic_read(&csk1->ref_count))
3343 if (test_and_set_bit(SK_F_INUSE, &csk1->flags))
3348 csk1->l5_cid = l5_cid;
3349 csk1->ulp_type = ulp_type;
3350 csk1->context = context;
3352 csk1->ka_timeout = DEF_KA_TIMEOUT;
3353 csk1->ka_interval = DEF_KA_INTERVAL;
3354 csk1->ka_max_probe_count = DEF_KA_MAX_PROBE_COUNT;
3355 csk1->tos = DEF_TOS;
3356 csk1->ttl = DEF_TTL;
3357 csk1->snd_seq_scale = DEF_SND_SEQ_SCALE;
3358 csk1->rcv_buf = DEF_RCV_BUF;
3359 csk1->snd_buf = DEF_SND_BUF;
3360 csk1->seed = DEF_SEED;
3366 static void cnic_cm_cleanup(struct cnic_sock *csk)
3368 if (csk->src_port) {
3369 struct cnic_dev *dev = csk->dev;
3370 struct cnic_local *cp = dev->cnic_priv;
3372 cnic_free_id(&cp->csk_port_tbl, be16_to_cpu(csk->src_port));
3377 static void cnic_close_conn(struct cnic_sock *csk)
3379 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) {
3380 cnic_cm_upload_pg(csk);
3381 clear_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
3383 cnic_cm_cleanup(csk);
3386 static int cnic_cm_destroy(struct cnic_sock *csk)
3388 if (!cnic_in_use(csk))
3392 clear_bit(SK_F_INUSE, &csk->flags);
3393 smp_mb__after_clear_bit();
3394 while (atomic_read(&csk->ref_count) != 1)
3396 cnic_cm_cleanup(csk);
3403 static inline u16 cnic_get_vlan(struct net_device *dev,
3404 struct net_device **vlan_dev)
3406 if (dev->priv_flags & IFF_802_1Q_VLAN) {
3407 *vlan_dev = vlan_dev_real_dev(dev);
3408 return vlan_dev_vlan_id(dev);
3414 static int cnic_get_v4_route(struct sockaddr_in *dst_addr,
3415 struct dst_entry **dst)
3417 #if defined(CONFIG_INET)
3420 rt = ip_route_output(&init_net, dst_addr->sin_addr.s_addr, 0, 0, 0);
3427 return -ENETUNREACH;
3431 static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr,
3432 struct dst_entry **dst)
3434 #if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE))
3437 memset(&fl6, 0, sizeof(fl6));
3438 ipv6_addr_copy(&fl6.daddr, &dst_addr->sin6_addr);
3439 if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
3440 fl6.flowi6_oif = dst_addr->sin6_scope_id;
3442 *dst = ip6_route_output(&init_net, NULL, &fl6);
3447 return -ENETUNREACH;
3450 static struct cnic_dev *cnic_cm_select_dev(struct sockaddr_in *dst_addr,
3453 struct cnic_dev *dev = NULL;
3454 struct dst_entry *dst;
3455 struct net_device *netdev = NULL;
3456 int err = -ENETUNREACH;
3458 if (dst_addr->sin_family == AF_INET)
3459 err = cnic_get_v4_route(dst_addr, &dst);
3460 else if (dst_addr->sin_family == AF_INET6) {
3461 struct sockaddr_in6 *dst_addr6 =
3462 (struct sockaddr_in6 *) dst_addr;
3464 err = cnic_get_v6_route(dst_addr6, &dst);
3474 cnic_get_vlan(dst->dev, &netdev);
3476 dev = cnic_from_netdev(netdev);
3485 static int cnic_resolve_addr(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3487 struct cnic_dev *dev = csk->dev;
3488 struct cnic_local *cp = dev->cnic_priv;
3490 return cnic_send_nlmsg(cp, ISCSI_KEVENT_PATH_REQ, csk);
3493 static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3495 struct cnic_dev *dev = csk->dev;
3496 struct cnic_local *cp = dev->cnic_priv;
3498 struct dst_entry *dst = NULL;
3499 struct net_device *realdev;
3503 if (saddr->local.v6.sin6_family == AF_INET6 &&
3504 saddr->remote.v6.sin6_family == AF_INET6)
3506 else if (saddr->local.v4.sin_family == AF_INET &&
3507 saddr->remote.v4.sin_family == AF_INET)
3512 clear_bit(SK_F_IPV6, &csk->flags);
3515 set_bit(SK_F_IPV6, &csk->flags);
3516 cnic_get_v6_route(&saddr->remote.v6, &dst);
3518 memcpy(&csk->dst_ip[0], &saddr->remote.v6.sin6_addr,
3519 sizeof(struct in6_addr));
3520 csk->dst_port = saddr->remote.v6.sin6_port;
3521 local_port = saddr->local.v6.sin6_port;
3524 cnic_get_v4_route(&saddr->remote.v4, &dst);
3526 csk->dst_ip[0] = saddr->remote.v4.sin_addr.s_addr;
3527 csk->dst_port = saddr->remote.v4.sin_port;
3528 local_port = saddr->local.v4.sin_port;
3532 csk->mtu = dev->netdev->mtu;
3533 if (dst && dst->dev) {
3534 u16 vlan = cnic_get_vlan(dst->dev, &realdev);
3535 if (realdev == dev->netdev) {
3536 csk->vlan_id = vlan;
3537 csk->mtu = dst_mtu(dst);
3541 port_id = be16_to_cpu(local_port);
3542 if (port_id >= CNIC_LOCAL_PORT_MIN &&
3543 port_id < CNIC_LOCAL_PORT_MAX) {
3544 if (cnic_alloc_id(&cp->csk_port_tbl, port_id))
3550 port_id = cnic_alloc_new_id(&cp->csk_port_tbl);
3551 if (port_id == -1) {
3555 local_port = cpu_to_be16(port_id);
3557 csk->src_port = local_port;
3564 static void cnic_init_csk_state(struct cnic_sock *csk)
3567 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3568 clear_bit(SK_F_CLOSING, &csk->flags);
3571 static int cnic_cm_connect(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3573 struct cnic_local *cp = csk->dev->cnic_priv;
3576 if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)
3579 if (!cnic_in_use(csk))
3582 if (test_and_set_bit(SK_F_CONNECT_START, &csk->flags))
3585 cnic_init_csk_state(csk);
3587 err = cnic_get_route(csk, saddr);
3591 err = cnic_resolve_addr(csk, saddr);
3596 clear_bit(SK_F_CONNECT_START, &csk->flags);
3600 static int cnic_cm_abort(struct cnic_sock *csk)
3602 struct cnic_local *cp = csk->dev->cnic_priv;
3603 u32 opcode = L4_KCQE_OPCODE_VALUE_RESET_COMP;
3605 if (!cnic_in_use(csk))
3608 if (cnic_abort_prep(csk))
3609 return cnic_cm_abort_req(csk);
3611 /* Getting here means that we haven't started connect, or
3612 * connect was not successful.
3615 cp->close_conn(csk, opcode);
3616 if (csk->state != opcode)
3622 static int cnic_cm_close(struct cnic_sock *csk)
3624 if (!cnic_in_use(csk))
3627 if (cnic_close_prep(csk)) {
3628 csk->state = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
3629 return cnic_cm_close_req(csk);
3636 static void cnic_cm_upcall(struct cnic_local *cp, struct cnic_sock *csk,
3639 struct cnic_ulp_ops *ulp_ops;
3640 int ulp_type = csk->ulp_type;
3643 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
3645 if (opcode == L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE)
3646 ulp_ops->cm_connect_complete(csk);
3647 else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)
3648 ulp_ops->cm_close_complete(csk);
3649 else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED)
3650 ulp_ops->cm_remote_abort(csk);
3651 else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_COMP)
3652 ulp_ops->cm_abort_complete(csk);
3653 else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED)
3654 ulp_ops->cm_remote_close(csk);
3659 static int cnic_cm_set_pg(struct cnic_sock *csk)
3661 if (cnic_offld_prep(csk)) {
3662 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
3663 cnic_cm_update_pg(csk);
3665 cnic_cm_offload_pg(csk);
3670 static void cnic_cm_process_offld_pg(struct cnic_dev *dev, struct l4_kcq *kcqe)
3672 struct cnic_local *cp = dev->cnic_priv;
3673 u32 l5_cid = kcqe->pg_host_opaque;
3674 u8 opcode = kcqe->op_code;
3675 struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
3678 if (!cnic_in_use(csk))
3681 if (opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
3682 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3685 /* Possible PG kcqe status: SUCCESS, OFFLOADED_PG, or CTX_ALLOC_FAIL */
3686 if (kcqe->status == L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL) {
3687 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3688 cnic_cm_upcall(cp, csk,
3689 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
3693 csk->pg_cid = kcqe->pg_cid;
3694 set_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
3695 cnic_cm_conn_req(csk);
3701 static void cnic_process_fcoe_term_conn(struct cnic_dev *dev, struct kcqe *kcqe)
3703 struct cnic_local *cp = dev->cnic_priv;
3704 struct fcoe_kcqe *fc_kcqe = (struct fcoe_kcqe *) kcqe;
3705 u32 l5_cid = fc_kcqe->fcoe_conn_id + BNX2X_FCOE_L5_CID_BASE;
3706 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3708 ctx->timestamp = jiffies;
3710 wake_up(&ctx->waitq);
3713 static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
3715 struct cnic_local *cp = dev->cnic_priv;
3716 struct l4_kcq *l4kcqe = (struct l4_kcq *) kcqe;
3717 u8 opcode = l4kcqe->op_code;
3719 struct cnic_sock *csk;
3721 if (opcode == FCOE_RAMROD_CMD_ID_TERMINATE_CONN) {
3722 cnic_process_fcoe_term_conn(dev, kcqe);
3725 if (opcode == L4_KCQE_OPCODE_VALUE_OFFLOAD_PG ||
3726 opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
3727 cnic_cm_process_offld_pg(dev, l4kcqe);
3731 l5_cid = l4kcqe->conn_id;
3733 l5_cid = l4kcqe->cid;
3734 if (l5_cid >= MAX_CM_SK_TBL_SZ)
3737 csk = &cp->csk_tbl[l5_cid];
3740 if (!cnic_in_use(csk)) {
3746 case L5CM_RAMROD_CMD_ID_TCP_CONNECT:
3747 if (l4kcqe->status != 0) {
3748 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3749 cnic_cm_upcall(cp, csk,
3750 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
3753 case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE:
3754 if (l4kcqe->status == 0)
3755 set_bit(SK_F_OFFLD_COMPLETE, &csk->flags);
3757 smp_mb__before_clear_bit();
3758 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3759 cnic_cm_upcall(cp, csk, opcode);
3762 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
3763 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
3764 case L4_KCQE_OPCODE_VALUE_RESET_COMP:
3765 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
3766 case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
3767 cp->close_conn(csk, opcode);
3770 case L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED:
3771 /* after we already sent CLOSE_REQ */
3772 if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags) &&
3773 !test_bit(SK_F_OFFLD_COMPLETE, &csk->flags) &&
3774 csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)
3775 cp->close_conn(csk, L4_KCQE_OPCODE_VALUE_RESET_COMP);
3777 cnic_cm_upcall(cp, csk, opcode);
3783 static void cnic_cm_indicate_kcqe(void *data, struct kcqe *kcqe[], u32 num)
3785 struct cnic_dev *dev = data;
3788 for (i = 0; i < num; i++)
3789 cnic_cm_process_kcqe(dev, kcqe[i]);
3792 static struct cnic_ulp_ops cm_ulp_ops = {
3793 .indicate_kcqes = cnic_cm_indicate_kcqe,
3796 static void cnic_cm_free_mem(struct cnic_dev *dev)
3798 struct cnic_local *cp = dev->cnic_priv;
3802 cnic_free_id_tbl(&cp->csk_port_tbl);
3805 static int cnic_cm_alloc_mem(struct cnic_dev *dev)
3807 struct cnic_local *cp = dev->cnic_priv;
3810 cp->csk_tbl = kzalloc(sizeof(struct cnic_sock) * MAX_CM_SK_TBL_SZ,
3815 get_random_bytes(&port_id, sizeof(port_id));
3816 port_id %= CNIC_LOCAL_PORT_RANGE;
3817 if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE,
3818 CNIC_LOCAL_PORT_MIN, port_id)) {
3819 cnic_cm_free_mem(dev);
3825 static int cnic_ready_to_close(struct cnic_sock *csk, u32 opcode)
3827 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
3828 /* Unsolicited RESET_COMP or RESET_RECEIVED */
3829 opcode = L4_KCQE_OPCODE_VALUE_RESET_RECEIVED;
3830 csk->state = opcode;
3833 /* 1. If event opcode matches the expected event in csk->state
3834 * 2. If the expected event is CLOSE_COMP or RESET_COMP, we accept any
3836 * 3. If the expected event is 0, meaning the connection was never
3837 * never established, we accept the opcode from cm_abort.
3839 if (opcode == csk->state || csk->state == 0 ||
3840 csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP ||
3841 csk->state == L4_KCQE_OPCODE_VALUE_RESET_COMP) {
3842 if (!test_and_set_bit(SK_F_CLOSING, &csk->flags)) {
3843 if (csk->state == 0)
3844 csk->state = opcode;
3851 static void cnic_close_bnx2_conn(struct cnic_sock *csk, u32 opcode)
3853 struct cnic_dev *dev = csk->dev;
3854 struct cnic_local *cp = dev->cnic_priv;
3856 if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED) {
3857 cnic_cm_upcall(cp, csk, opcode);
3861 clear_bit(SK_F_CONNECT_START, &csk->flags);
3862 cnic_close_conn(csk);
3863 csk->state = opcode;
3864 cnic_cm_upcall(cp, csk, opcode);
3867 static void cnic_cm_stop_bnx2_hw(struct cnic_dev *dev)
3871 static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev)
3875 get_random_bytes(&seed, 4);
3876 cnic_ctx_wr(dev, 45, 0, seed);
3880 static void cnic_close_bnx2x_conn(struct cnic_sock *csk, u32 opcode)
3882 struct cnic_dev *dev = csk->dev;
3883 struct cnic_local *cp = dev->cnic_priv;
3884 struct cnic_context *ctx = &cp->ctx_tbl[csk->l5_cid];
3885 union l5cm_specific_data l5_data;
3887 int close_complete = 0;
3890 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
3891 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
3892 case L4_KCQE_OPCODE_VALUE_RESET_COMP:
3893 if (cnic_ready_to_close(csk, opcode)) {
3894 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
3895 cmd = L5CM_RAMROD_CMD_ID_SEARCHER_DELETE;
3900 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
3901 cmd = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD;
3903 case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
3908 memset(&l5_data, 0, sizeof(l5_data));
3910 cnic_submit_kwqe_16(dev, cmd, csk->cid, ISCSI_CONNECTION_TYPE,
3912 } else if (close_complete) {
3913 ctx->timestamp = jiffies;
3914 cnic_close_conn(csk);
3915 cnic_cm_upcall(cp, csk, csk->state);
3919 static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev)
3921 struct cnic_local *cp = dev->cnic_priv;
3927 if (!netif_running(dev->netdev))
3930 for (i = 0; i < cp->max_cid_space; i++) {
3931 struct cnic_context *ctx = &cp->ctx_tbl[i];
3933 while (test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
3936 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
3937 netdev_warn(dev->netdev, "CID %x not deleted\n",
3941 cancel_delayed_work(&cp->delete_task);
3942 flush_workqueue(cnic_wq);
3944 if (atomic_read(&cp->iscsi_conn) != 0)
3945 netdev_warn(dev->netdev, "%d iSCSI connections not destroyed\n",
3946 atomic_read(&cp->iscsi_conn));
3949 static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev)
3951 struct cnic_local *cp = dev->cnic_priv;
3952 u32 pfid = cp->pfid;
3953 u32 port = CNIC_PORT(cp);
3955 cnic_init_bnx2x_mac(dev);
3956 cnic_bnx2x_set_tcp_timestamp(dev, 1);
3958 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
3959 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfid), 0);
3961 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
3962 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(port), 1);
3963 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
3964 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(port),
3967 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
3968 XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfid), DEF_TTL);
3969 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
3970 XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfid), DEF_TOS);
3971 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
3972 XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfid), 2);
3973 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
3974 XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfid), DEF_SWS_TIMER);
3976 CNIC_WR(dev, BAR_TSTRORM_INTMEM + TSTORM_TCP_MAX_CWND_OFFSET(pfid),
3981 static void cnic_delete_task(struct work_struct *work)
3983 struct cnic_local *cp;
3984 struct cnic_dev *dev;
3986 int need_resched = 0;
3988 cp = container_of(work, struct cnic_local, delete_task.work);
3991 if (test_and_clear_bit(CNIC_LCL_FL_STOP_ISCSI, &cp->cnic_local_flags)) {
3992 struct drv_ctl_info info;
3994 cnic_ulp_stop_one(cp, CNIC_ULP_ISCSI);
3996 info.cmd = DRV_CTL_ISCSI_STOPPED_CMD;
3997 cp->ethdev->drv_ctl(dev->netdev, &info);
4000 for (i = 0; i < cp->max_cid_space; i++) {
4001 struct cnic_context *ctx = &cp->ctx_tbl[i];
4003 if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags) ||
4004 !test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
4007 if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) {
4012 if (!test_and_clear_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
4015 cnic_bnx2x_destroy_ramrod(dev, i);
4017 cnic_free_bnx2x_conn_resc(dev, i);
4018 if (ctx->ulp_proto_id == CNIC_ULP_ISCSI)
4019 atomic_dec(&cp->iscsi_conn);
4021 clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
4025 queue_delayed_work(cnic_wq, &cp->delete_task,
4026 msecs_to_jiffies(10));
4030 static int cnic_cm_open(struct cnic_dev *dev)
4032 struct cnic_local *cp = dev->cnic_priv;
4035 err = cnic_cm_alloc_mem(dev);
4039 err = cp->start_cm(dev);
4044 INIT_DELAYED_WORK(&cp->delete_task, cnic_delete_task);
4046 dev->cm_create = cnic_cm_create;
4047 dev->cm_destroy = cnic_cm_destroy;
4048 dev->cm_connect = cnic_cm_connect;
4049 dev->cm_abort = cnic_cm_abort;
4050 dev->cm_close = cnic_cm_close;
4051 dev->cm_select_dev = cnic_cm_select_dev;
4053 cp->ulp_handle[CNIC_ULP_L4] = dev;
4054 rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], &cm_ulp_ops);
4058 cnic_cm_free_mem(dev);
4062 static int cnic_cm_shutdown(struct cnic_dev *dev)
4064 struct cnic_local *cp = dev->cnic_priv;
4072 for (i = 0; i < MAX_CM_SK_TBL_SZ; i++) {
4073 struct cnic_sock *csk = &cp->csk_tbl[i];
4075 clear_bit(SK_F_INUSE, &csk->flags);
4076 cnic_cm_cleanup(csk);
4078 cnic_cm_free_mem(dev);
4083 static void cnic_init_context(struct cnic_dev *dev, u32 cid)
4088 cid_addr = GET_CID_ADDR(cid);
4090 for (i = 0; i < CTX_SIZE; i += 4)
4091 cnic_ctx_wr(dev, cid_addr, i, 0);
4094 static int cnic_setup_5709_context(struct cnic_dev *dev, int valid)
4096 struct cnic_local *cp = dev->cnic_priv;
4098 u32 valid_bit = valid ? BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID : 0;
4100 if (CHIP_NUM(cp) != CHIP_NUM_5709)
4103 for (i = 0; i < cp->ctx_blks; i++) {
4105 u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk;
4108 memset(cp->ctx_arr[i].ctx, 0, BCM_PAGE_SIZE);
4110 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0,
4111 (cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit);
4112 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA1,
4113 (u64) cp->ctx_arr[i].mapping >> 32);
4114 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL, idx |
4115 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
4116 for (j = 0; j < 10; j++) {
4118 val = CNIC_RD(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL);
4119 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
4123 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
4131 static void cnic_free_irq(struct cnic_dev *dev)
4133 struct cnic_local *cp = dev->cnic_priv;
4134 struct cnic_eth_dev *ethdev = cp->ethdev;
4136 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4137 cp->disable_int_sync(dev);
4138 tasklet_kill(&cp->cnic_irq_task);
4139 free_irq(ethdev->irq_arr[0].vector, dev);
4143 static int cnic_request_irq(struct cnic_dev *dev)
4145 struct cnic_local *cp = dev->cnic_priv;
4146 struct cnic_eth_dev *ethdev = cp->ethdev;
4149 err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, "cnic", dev);
4151 tasklet_disable(&cp->cnic_irq_task);
4156 static int cnic_init_bnx2_irq(struct cnic_dev *dev)
4158 struct cnic_local *cp = dev->cnic_priv;
4159 struct cnic_eth_dev *ethdev = cp->ethdev;
4161 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4163 int sblk_num = cp->status_blk_num;
4164 u32 base = ((sblk_num - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4165 BNX2_HC_SB_CONFIG_1;
4167 CNIC_WR(dev, base, BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4169 CNIC_WR(dev, base + BNX2_HC_COMP_PROD_TRIP_OFF, (2 << 16) | 8);
4170 CNIC_WR(dev, base + BNX2_HC_COM_TICKS_OFF, (64 << 16) | 220);
4171 CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220);
4173 cp->last_status_idx = cp->status_blk.bnx2->status_idx;
4174 tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2_msix,
4175 (unsigned long) dev);
4176 err = cnic_request_irq(dev);
4180 while (cp->status_blk.bnx2->status_completion_producer_index &&
4182 CNIC_WR(dev, BNX2_HC_COALESCE_NOW,
4183 1 << (11 + sblk_num));
4188 if (cp->status_blk.bnx2->status_completion_producer_index) {
4194 struct status_block *sblk = cp->status_blk.gen;
4195 u32 hc_cmd = CNIC_RD(dev, BNX2_HC_COMMAND);
4198 while (sblk->status_completion_producer_index && i < 10) {
4199 CNIC_WR(dev, BNX2_HC_COMMAND,
4200 hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4205 if (sblk->status_completion_producer_index)
4212 netdev_err(dev->netdev, "KCQ index not resetting to 0\n");
4216 static void cnic_enable_bnx2_int(struct cnic_dev *dev)
4218 struct cnic_local *cp = dev->cnic_priv;
4219 struct cnic_eth_dev *ethdev = cp->ethdev;
4221 if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
4224 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
4225 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
4228 static void cnic_get_bnx2_iscsi_info(struct cnic_dev *dev)
4232 max_conn = cnic_reg_rd_ind(dev, BNX2_FW_MAX_ISCSI_CONN);
4233 dev->max_iscsi_conn = max_conn;
4236 static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev)
4238 struct cnic_local *cp = dev->cnic_priv;
4239 struct cnic_eth_dev *ethdev = cp->ethdev;
4241 if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
4244 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
4245 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4246 CNIC_RD(dev, BNX2_PCICFG_INT_ACK_CMD);
4247 synchronize_irq(ethdev->irq_arr[0].vector);
4250 static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
4252 struct cnic_local *cp = dev->cnic_priv;
4253 struct cnic_eth_dev *ethdev = cp->ethdev;
4254 struct cnic_uio_dev *udev = cp->udev;
4255 u32 cid_addr, tx_cid, sb_id;
4256 u32 val, offset0, offset1, offset2, offset3;
4259 dma_addr_t buf_map, ring_map = udev->l2_ring_map;
4260 struct status_block *s_blk = cp->status_blk.gen;
4262 sb_id = cp->status_blk_num;
4264 cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2;
4265 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4266 struct status_block_msix *sblk = cp->status_blk.bnx2;
4268 tx_cid = TX_TSS_CID + sb_id - 1;
4269 CNIC_WR(dev, BNX2_TSCH_TSS_CFG, (sb_id << 24) |
4271 cp->tx_cons_ptr = &sblk->status_tx_quick_consumer_index;
4273 cp->tx_cons = *cp->tx_cons_ptr;
4275 cid_addr = GET_CID_ADDR(tx_cid);
4276 if (CHIP_NUM(cp) == CHIP_NUM_5709) {
4277 u32 cid_addr2 = GET_CID_ADDR(tx_cid + 4) + 0x40;
4279 for (i = 0; i < PHY_CTX_SIZE; i += 4)
4280 cnic_ctx_wr(dev, cid_addr2, i, 0);
4282 offset0 = BNX2_L2CTX_TYPE_XI;
4283 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4284 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4285 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4287 cnic_init_context(dev, tx_cid);
4288 cnic_init_context(dev, tx_cid + 1);
4290 offset0 = BNX2_L2CTX_TYPE;
4291 offset1 = BNX2_L2CTX_CMD_TYPE;
4292 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4293 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4295 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4296 cnic_ctx_wr(dev, cid_addr, offset0, val);
4298 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4299 cnic_ctx_wr(dev, cid_addr, offset1, val);
4301 txbd = (struct tx_bd *) udev->l2_ring;
4303 buf_map = udev->l2_buf_map;
4304 for (i = 0; i < MAX_TX_DESC_CNT; i++, txbd++) {
4305 txbd->tx_bd_haddr_hi = (u64) buf_map >> 32;
4306 txbd->tx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
4308 val = (u64) ring_map >> 32;
4309 cnic_ctx_wr(dev, cid_addr, offset2, val);
4310 txbd->tx_bd_haddr_hi = val;
4312 val = (u64) ring_map & 0xffffffff;
4313 cnic_ctx_wr(dev, cid_addr, offset3, val);
4314 txbd->tx_bd_haddr_lo = val;
4317 static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
4319 struct cnic_local *cp = dev->cnic_priv;
4320 struct cnic_eth_dev *ethdev = cp->ethdev;
4321 struct cnic_uio_dev *udev = cp->udev;
4322 u32 cid_addr, sb_id, val, coal_reg, coal_val;
4325 struct status_block *s_blk = cp->status_blk.gen;
4326 dma_addr_t ring_map = udev->l2_ring_map;
4328 sb_id = cp->status_blk_num;
4329 cnic_init_context(dev, 2);
4330 cp->rx_cons_ptr = &s_blk->status_rx_quick_consumer_index2;
4331 coal_reg = BNX2_HC_COMMAND;
4332 coal_val = CNIC_RD(dev, coal_reg);
4333 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4334 struct status_block_msix *sblk = cp->status_blk.bnx2;
4336 cp->rx_cons_ptr = &sblk->status_rx_quick_consumer_index;
4337 coal_reg = BNX2_HC_COALESCE_NOW;
4338 coal_val = 1 << (11 + sb_id);
4341 while (!(*cp->rx_cons_ptr != 0) && i < 10) {
4342 CNIC_WR(dev, coal_reg, coal_val);
4347 cp->rx_cons = *cp->rx_cons_ptr;
4349 cid_addr = GET_CID_ADDR(2);
4350 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE |
4351 BNX2_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8);
4352 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_CTX_TYPE, val);
4355 val = 2 << BNX2_L2CTX_L2_STATUSB_NUM_SHIFT;
4357 val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id);
4358 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val);
4360 rxbd = (struct rx_bd *) (udev->l2_ring + BCM_PAGE_SIZE);
4361 for (i = 0; i < MAX_RX_DESC_CNT; i++, rxbd++) {
4363 int n = (i % cp->l2_rx_ring_size) + 1;
4365 buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size);
4366 rxbd->rx_bd_len = cp->l2_single_buf_size;
4367 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4368 rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32;
4369 rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
4371 val = (u64) (ring_map + BCM_PAGE_SIZE) >> 32;
4372 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4373 rxbd->rx_bd_haddr_hi = val;
4375 val = (u64) (ring_map + BCM_PAGE_SIZE) & 0xffffffff;
4376 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4377 rxbd->rx_bd_haddr_lo = val;
4379 val = cnic_reg_rd_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD);
4380 cnic_reg_wr_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD, val | (1 << 2));
4383 static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *dev)
4385 struct kwqe *wqes[1], l2kwqe;
4387 memset(&l2kwqe, 0, sizeof(l2kwqe));
4389 l2kwqe.kwqe_op_flag = (L2_LAYER_CODE << KWQE_LAYER_SHIFT) |
4390 (L2_KWQE_OPCODE_VALUE_FLUSH <<
4391 KWQE_OPCODE_SHIFT) | 2;
4392 dev->submit_kwqes(dev, wqes, 1);
4395 static void cnic_set_bnx2_mac(struct cnic_dev *dev)
4397 struct cnic_local *cp = dev->cnic_priv;
4400 val = cp->func << 2;
4402 cp->shmem_base = cnic_reg_rd_ind(dev, BNX2_SHM_HDR_ADDR_0 + val);
4404 val = cnic_reg_rd_ind(dev, cp->shmem_base +
4405 BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER);
4406 dev->mac_addr[0] = (u8) (val >> 8);
4407 dev->mac_addr[1] = (u8) val;
4409 CNIC_WR(dev, BNX2_EMAC_MAC_MATCH4, val);
4411 val = cnic_reg_rd_ind(dev, cp->shmem_base +
4412 BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER);
4413 dev->mac_addr[2] = (u8) (val >> 24);
4414 dev->mac_addr[3] = (u8) (val >> 16);
4415 dev->mac_addr[4] = (u8) (val >> 8);
4416 dev->mac_addr[5] = (u8) val;
4418 CNIC_WR(dev, BNX2_EMAC_MAC_MATCH5, val);
4420 val = 4 | BNX2_RPM_SORT_USER2_BC_EN;
4421 if (CHIP_NUM(cp) != CHIP_NUM_5709)
4422 val |= BNX2_RPM_SORT_USER2_PROM_VLAN;
4424 CNIC_WR(dev, BNX2_RPM_SORT_USER2, 0x0);
4425 CNIC_WR(dev, BNX2_RPM_SORT_USER2, val);
4426 CNIC_WR(dev, BNX2_RPM_SORT_USER2, val | BNX2_RPM_SORT_USER2_ENA);
4429 static int cnic_start_bnx2_hw(struct cnic_dev *dev)
4431 struct cnic_local *cp = dev->cnic_priv;
4432 struct cnic_eth_dev *ethdev = cp->ethdev;
4433 struct status_block *sblk = cp->status_blk.gen;
4434 u32 val, kcq_cid_addr, kwq_cid_addr;
4437 cnic_set_bnx2_mac(dev);
4439 val = CNIC_RD(dev, BNX2_MQ_CONFIG);
4440 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4441 if (BCM_PAGE_BITS > 12)
4442 val |= (12 - 8) << 4;
4444 val |= (BCM_PAGE_BITS - 8) << 4;
4446 CNIC_WR(dev, BNX2_MQ_CONFIG, val);
4448 CNIC_WR(dev, BNX2_HC_COMP_PROD_TRIP, (2 << 16) | 8);
4449 CNIC_WR(dev, BNX2_HC_COM_TICKS, (64 << 16) | 220);
4450 CNIC_WR(dev, BNX2_HC_CMD_TICKS, (64 << 16) | 220);
4452 err = cnic_setup_5709_context(dev, 1);
4456 cnic_init_context(dev, KWQ_CID);
4457 cnic_init_context(dev, KCQ_CID);
4459 kwq_cid_addr = GET_CID_ADDR(KWQ_CID);
4460 cp->kwq_io_addr = MB_GET_CID_ADDR(KWQ_CID) + L5_KRNLQ_HOST_QIDX;
4462 cp->max_kwq_idx = MAX_KWQ_IDX;
4463 cp->kwq_prod_idx = 0;
4464 cp->kwq_con_idx = 0;
4465 set_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
4467 if (CHIP_NUM(cp) == CHIP_NUM_5706 || CHIP_NUM(cp) == CHIP_NUM_5708)
4468 cp->kwq_con_idx_ptr = &sblk->status_rx_quick_consumer_index15;
4470 cp->kwq_con_idx_ptr = &sblk->status_cmd_consumer_index;
4472 /* Initialize the kernel work queue context. */
4473 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
4474 (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
4475 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_TYPE, val);
4477 val = (BCM_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16;
4478 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
4480 val = ((BCM_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT;
4481 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
4483 val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32);
4484 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
4486 val = (u32) cp->kwq_info.pgtbl_map;
4487 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
4489 kcq_cid_addr = GET_CID_ADDR(KCQ_CID);
4490 cp->kcq1.io_addr = MB_GET_CID_ADDR(KCQ_CID) + L5_KRNLQ_HOST_QIDX;
4492 cp->kcq1.sw_prod_idx = 0;
4493 cp->kcq1.hw_prod_idx_ptr =
4494 (u16 *) &sblk->status_completion_producer_index;
4496 cp->kcq1.status_idx_ptr = (u16 *) &sblk->status_idx;
4498 /* Initialize the kernel complete queue context. */
4499 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
4500 (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
4501 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_TYPE, val);
4503 val = (BCM_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16;
4504 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
4506 val = ((BCM_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT;
4507 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
4509 val = (u32) ((u64) cp->kcq1.dma.pgtbl_map >> 32);
4510 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
4512 val = (u32) cp->kcq1.dma.pgtbl_map;
4513 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
4516 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4517 struct status_block_msix *msblk = cp->status_blk.bnx2;
4518 u32 sb_id = cp->status_blk_num;
4519 u32 sb = BNX2_L2CTX_L5_STATUSB_NUM(sb_id);
4521 cp->kcq1.hw_prod_idx_ptr =
4522 (u16 *) &msblk->status_completion_producer_index;
4523 cp->kcq1.status_idx_ptr = (u16 *) &msblk->status_idx;
4524 cp->kwq_con_idx_ptr = (u16 *) &msblk->status_cmd_consumer_index;
4525 cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT;
4526 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
4527 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
4530 /* Enable Commnad Scheduler notification when we write to the
4531 * host producer index of the kernel contexts. */
4532 CNIC_WR(dev, BNX2_MQ_KNL_CMD_MASK1, 2);
4534 /* Enable Command Scheduler notification when we write to either
4535 * the Send Queue or Receive Queue producer indexes of the kernel
4536 * bypass contexts. */
4537 CNIC_WR(dev, BNX2_MQ_KNL_BYP_CMD_MASK1, 7);
4538 CNIC_WR(dev, BNX2_MQ_KNL_BYP_WRITE_MASK1, 7);
4540 /* Notify COM when the driver post an application buffer. */
4541 CNIC_WR(dev, BNX2_MQ_KNL_RX_V2P_MASK2, 0x2000);
4543 /* Set the CP and COM doorbells. These two processors polls the
4544 * doorbell for a non zero value before running. This must be done
4545 * after setting up the kernel queue contexts. */
4546 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 1);
4547 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 1);
4549 cnic_init_bnx2_tx_ring(dev);
4550 cnic_init_bnx2_rx_ring(dev);
4552 err = cnic_init_bnx2_irq(dev);
4554 netdev_err(dev->netdev, "cnic_init_irq failed\n");
4555 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
4556 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
4560 cnic_get_bnx2_iscsi_info(dev);
4565 static void cnic_setup_bnx2x_context(struct cnic_dev *dev)
4567 struct cnic_local *cp = dev->cnic_priv;
4568 struct cnic_eth_dev *ethdev = cp->ethdev;
4569 u32 start_offset = ethdev->ctx_tbl_offset;
4572 for (i = 0; i < cp->ctx_blks; i++) {
4573 struct cnic_ctx *ctx = &cp->ctx_arr[i];
4574 dma_addr_t map = ctx->mapping;
4576 if (cp->ctx_align) {
4577 unsigned long mask = cp->ctx_align - 1;
4579 map = (map + mask) & ~mask;
4582 cnic_ctx_tbl_wr(dev, start_offset + i, map);
4586 static int cnic_init_bnx2x_irq(struct cnic_dev *dev)
4588 struct cnic_local *cp = dev->cnic_priv;
4589 struct cnic_eth_dev *ethdev = cp->ethdev;
4592 tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2x_bh,
4593 (unsigned long) dev);
4594 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
4595 err = cnic_request_irq(dev);
4600 static inline void cnic_storm_memset_hc_disable(struct cnic_dev *dev,
4601 u16 sb_id, u8 sb_index,
4605 u32 addr = BAR_CSTRORM_INTMEM +
4606 CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
4607 offsetof(struct hc_status_block_data_e1x, index_data) +
4608 sizeof(struct hc_index_data)*sb_index +
4609 offsetof(struct hc_index_data, flags);
4610 u16 flags = CNIC_RD16(dev, addr);
4612 flags &= ~HC_INDEX_DATA_HC_ENABLED;
4613 flags |= (((~disable) << HC_INDEX_DATA_HC_ENABLED_SHIFT) &
4614 HC_INDEX_DATA_HC_ENABLED);
4615 CNIC_WR16(dev, addr, flags);
4618 static void cnic_enable_bnx2x_int(struct cnic_dev *dev)
4620 struct cnic_local *cp = dev->cnic_priv;
4621 u8 sb_id = cp->status_blk_num;
4623 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
4624 CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
4625 offsetof(struct hc_status_block_data_e1x, index_data) +
4626 sizeof(struct hc_index_data)*HC_INDEX_ISCSI_EQ_CONS +
4627 offsetof(struct hc_index_data, timeout), 64 / 12);
4628 cnic_storm_memset_hc_disable(dev, sb_id, HC_INDEX_ISCSI_EQ_CONS, 0);
4631 static void cnic_disable_bnx2x_int_sync(struct cnic_dev *dev)
4635 static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
4636 struct client_init_ramrod_data *data)
4638 struct cnic_local *cp = dev->cnic_priv;
4639 struct cnic_uio_dev *udev = cp->udev;
4640 union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) udev->l2_ring;
4641 dma_addr_t buf_map, ring_map = udev->l2_ring_map;
4642 struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
4643 int port = CNIC_PORT(cp);
4645 u32 cli = cp->ethdev->iscsi_l2_client_id;
4648 memset(txbd, 0, BCM_PAGE_SIZE);
4650 buf_map = udev->l2_buf_map;
4651 for (i = 0; i < MAX_TX_DESC_CNT; i += 3, txbd += 3) {
4652 struct eth_tx_start_bd *start_bd = &txbd->start_bd;
4653 struct eth_tx_bd *reg_bd = &((txbd + 2)->reg_bd);
4655 start_bd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
4656 start_bd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
4657 reg_bd->addr_hi = start_bd->addr_hi;
4658 reg_bd->addr_lo = start_bd->addr_lo + 0x10;
4659 start_bd->nbytes = cpu_to_le16(0x10);
4660 start_bd->nbd = cpu_to_le16(3);
4661 start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
4662 start_bd->general_data = (UNICAST_ADDRESS <<
4663 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
4664 start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
4668 val = (u64) ring_map >> 32;
4669 txbd->next_bd.addr_hi = cpu_to_le32(val);
4671 data->tx.tx_bd_page_base.hi = cpu_to_le32(val);
4673 val = (u64) ring_map & 0xffffffff;
4674 txbd->next_bd.addr_lo = cpu_to_le32(val);
4676 data->tx.tx_bd_page_base.lo = cpu_to_le32(val);
4678 /* Other ramrod params */
4679 data->tx.tx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_CQ_CONS;
4680 data->tx.tx_status_block_id = BNX2X_DEF_SB_ID;
4682 /* reset xstorm per client statistics */
4683 if (cli < MAX_STAT_COUNTER_ID) {
4684 val = BAR_XSTRORM_INTMEM +
4685 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli);
4686 for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++)
4687 CNIC_WR(dev, val + i * 4, 0);
4691 &sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_CQ_CONS];
4694 static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
4695 struct client_init_ramrod_data *data)
4697 struct cnic_local *cp = dev->cnic_priv;
4698 struct cnic_uio_dev *udev = cp->udev;
4699 struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring +
4701 struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *)
4702 (udev->l2_ring + (2 * BCM_PAGE_SIZE));
4703 struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
4705 int port = CNIC_PORT(cp);
4706 u32 cli = cp->ethdev->iscsi_l2_client_id;
4707 int cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli);
4709 dma_addr_t ring_map = udev->l2_ring_map;
4712 data->general.client_id = cli;
4713 data->general.statistics_en_flg = 1;
4714 data->general.statistics_counter_id = cli;
4715 data->general.activate_flg = 1;
4716 data->general.sp_client_id = cli;
4718 for (i = 0; i < BNX2X_MAX_RX_DESC_CNT; i++, rxbd++) {
4720 int n = (i % cp->l2_rx_ring_size) + 1;
4722 buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size);
4723 rxbd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
4724 rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
4727 val = (u64) (ring_map + BCM_PAGE_SIZE) >> 32;
4728 rxbd->addr_hi = cpu_to_le32(val);
4729 data->rx.bd_page_base.hi = cpu_to_le32(val);
4731 val = (u64) (ring_map + BCM_PAGE_SIZE) & 0xffffffff;
4732 rxbd->addr_lo = cpu_to_le32(val);
4733 data->rx.bd_page_base.lo = cpu_to_le32(val);
4735 rxcqe += BNX2X_MAX_RCQ_DESC_CNT;
4736 val = (u64) (ring_map + (2 * BCM_PAGE_SIZE)) >> 32;
4737 rxcqe->addr_hi = cpu_to_le32(val);
4738 data->rx.cqe_page_base.hi = cpu_to_le32(val);
4740 val = (u64) (ring_map + (2 * BCM_PAGE_SIZE)) & 0xffffffff;
4741 rxcqe->addr_lo = cpu_to_le32(val);
4742 data->rx.cqe_page_base.lo = cpu_to_le32(val);
4744 /* Other ramrod params */
4745 data->rx.client_qzone_id = cl_qzone_id;
4746 data->rx.rx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS;
4747 data->rx.status_block_id = BNX2X_DEF_SB_ID;
4749 data->rx.cache_line_alignment_log_size = L1_CACHE_SHIFT;
4750 data->rx.bd_buff_size = cpu_to_le16(cp->l2_single_buf_size);
4752 data->rx.mtu = cpu_to_le16(cp->l2_single_buf_size - 14);
4753 data->rx.outer_vlan_removal_enable_flg = 1;
4755 /* reset tstorm and ustorm per client statistics */
4756 if (cli < MAX_STAT_COUNTER_ID) {
4757 val = BAR_TSTRORM_INTMEM +
4758 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli);
4759 for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++)
4760 CNIC_WR(dev, val + i * 4, 0);
4762 val = BAR_USTRORM_INTMEM +
4763 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli);
4764 for (i = 0; i < sizeof(struct ustorm_per_client_stats) / 4; i++)
4765 CNIC_WR(dev, val + i * 4, 0);
4769 &sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS];
4770 cp->rx_cons = *cp->rx_cons_ptr;
4773 static void cnic_init_bnx2x_kcq(struct cnic_dev *dev)
4775 struct cnic_local *cp = dev->cnic_priv;
4776 u32 pfid = cp->pfid;
4778 cp->kcq1.io_addr = BAR_CSTRORM_INTMEM +
4779 CSTORM_ISCSI_EQ_PROD_OFFSET(pfid, 0);
4780 cp->kcq1.sw_prod_idx = 0;
4782 if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
4783 struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
4785 cp->kcq1.hw_prod_idx_ptr =
4786 &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
4787 cp->kcq1.status_idx_ptr =
4788 &sb->sb.running_index[SM_RX_ID];
4790 struct host_hc_status_block_e1x *sb = cp->status_blk.gen;
4792 cp->kcq1.hw_prod_idx_ptr =
4793 &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
4794 cp->kcq1.status_idx_ptr =
4795 &sb->sb.running_index[SM_RX_ID];
4798 if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
4799 struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
4801 cp->kcq2.io_addr = BAR_USTRORM_INTMEM +
4802 USTORM_FCOE_EQ_PROD_OFFSET(pfid);
4803 cp->kcq2.sw_prod_idx = 0;
4804 cp->kcq2.hw_prod_idx_ptr =
4805 &sb->sb.index_values[HC_INDEX_FCOE_EQ_CONS];
4806 cp->kcq2.status_idx_ptr =
4807 &sb->sb.running_index[SM_RX_ID];
4811 static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
4813 struct cnic_local *cp = dev->cnic_priv;
4814 struct cnic_eth_dev *ethdev = cp->ethdev;
4815 int func = CNIC_FUNC(cp), ret, i;
4818 if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
4819 u32 val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN_OVWR);
4822 val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN);
4824 val = (val >> 1) & 1;
4827 cp->pfid = func >> 1;
4829 cp->pfid = func & 0x6;
4835 ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ,
4836 cp->iscsi_start_cid, 0);
4841 if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
4842 ret = cnic_init_id_tbl(&cp->fcoe_cid_tbl,
4843 BNX2X_FCOE_NUM_CONNECTIONS,
4844 cp->fcoe_start_cid, 0);
4850 cp->bnx2x_igu_sb_id = ethdev->irq_arr[0].status_blk_num2;
4852 cnic_init_bnx2x_kcq(dev);
4855 CNIC_WR16(dev, cp->kcq1.io_addr, MAX_KCQ_IDX);
4856 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4857 CSTORM_ISCSI_EQ_CONS_OFFSET(pfid, 0), 0);
4858 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4859 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0),
4860 cp->kcq1.dma.pg_map_arr[1] & 0xffffffff);
4861 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4862 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0) + 4,
4863 (u64) cp->kcq1.dma.pg_map_arr[1] >> 32);
4864 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4865 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0),
4866 cp->kcq1.dma.pg_map_arr[0] & 0xffffffff);
4867 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4868 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0) + 4,
4869 (u64) cp->kcq1.dma.pg_map_arr[0] >> 32);
4870 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
4871 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfid, 0), 1);
4872 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
4873 CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfid, 0), cp->status_blk_num);
4874 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
4875 CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfid, 0),
4876 HC_INDEX_ISCSI_EQ_CONS);
4878 for (i = 0; i < cp->conn_buf_info.num_pages; i++) {
4879 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
4880 TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(pfid, i),
4881 cp->conn_buf_info.pgtbl[2 * i]);
4882 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
4883 TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(pfid, i) + 4,
4884 cp->conn_buf_info.pgtbl[(2 * i) + 1]);
4887 CNIC_WR(dev, BAR_USTRORM_INTMEM +
4888 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid),
4889 cp->gbl_buf_info.pg_map_arr[0] & 0xffffffff);
4890 CNIC_WR(dev, BAR_USTRORM_INTMEM +
4891 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid) + 4,
4892 (u64) cp->gbl_buf_info.pg_map_arr[0] >> 32);
4894 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
4895 TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfid), DEF_RCV_BUF);
4897 cnic_setup_bnx2x_context(dev);
4899 ret = cnic_init_bnx2x_irq(dev);
4906 static void cnic_init_rings(struct cnic_dev *dev)
4908 struct cnic_local *cp = dev->cnic_priv;
4909 struct cnic_uio_dev *udev = cp->udev;
4911 if (test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
4914 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
4915 cnic_init_bnx2_tx_ring(dev);
4916 cnic_init_bnx2_rx_ring(dev);
4917 set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
4918 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
4919 u32 cli = cp->ethdev->iscsi_l2_client_id;
4920 u32 cid = cp->ethdev->iscsi_l2_cid;
4922 struct client_init_ramrod_data *data;
4923 union l5cm_specific_data l5_data;
4924 struct ustorm_eth_rx_producers rx_prods = {0};
4927 rx_prods.bd_prod = 0;
4928 rx_prods.cqe_prod = BNX2X_MAX_RCQ_DESC_CNT;
4931 cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli);
4933 off = BAR_USTRORM_INTMEM +
4934 (BNX2X_CHIP_IS_E2(cp->chip_id) ?
4935 USTORM_RX_PRODS_E2_OFFSET(cl_qzone_id) :
4936 USTORM_RX_PRODS_E1X_OFFSET(CNIC_PORT(cp), cli));
4938 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++)
4939 CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]);
4941 set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
4943 data = udev->l2_buf;
4945 memset(data, 0, sizeof(*data));
4947 cnic_init_bnx2x_tx_ring(dev, data);
4948 cnic_init_bnx2x_rx_ring(dev, data);
4950 l5_data.phy_address.lo = udev->l2_buf_map & 0xffffffff;
4951 l5_data.phy_address.hi = (u64) udev->l2_buf_map >> 32;
4953 set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
4955 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CLIENT_SETUP,
4956 cid, ETH_CONNECTION_TYPE, &l5_data);
4959 while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
4963 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
4964 netdev_err(dev->netdev,
4965 "iSCSI CLIENT_SETUP did not complete\n");
4966 cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
4967 cnic_ring_ctl(dev, cid, cli, 1);
4971 static void cnic_shutdown_rings(struct cnic_dev *dev)
4973 struct cnic_local *cp = dev->cnic_priv;
4975 if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
4978 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
4979 cnic_shutdown_bnx2_rx_ring(dev);
4980 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
4981 struct cnic_local *cp = dev->cnic_priv;
4982 u32 cli = cp->ethdev->iscsi_l2_client_id;
4983 u32 cid = cp->ethdev->iscsi_l2_cid;
4984 union l5cm_specific_data l5_data;
4987 cnic_ring_ctl(dev, cid, cli, 0);
4989 set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
4991 l5_data.phy_address.lo = cli;
4992 l5_data.phy_address.hi = 0;
4993 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_HALT,
4994 cid, ETH_CONNECTION_TYPE, &l5_data);
4996 while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
5000 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
5001 netdev_err(dev->netdev,
5002 "iSCSI CLIENT_HALT did not complete\n");
5003 cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
5005 memset(&l5_data, 0, sizeof(l5_data));
5006 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
5007 cid, NONE_CONNECTION_TYPE, &l5_data);
5010 clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
5013 static int cnic_register_netdev(struct cnic_dev *dev)
5015 struct cnic_local *cp = dev->cnic_priv;
5016 struct cnic_eth_dev *ethdev = cp->ethdev;
5022 if (ethdev->drv_state & CNIC_DRV_STATE_REGD)
5025 err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev);
5027 netdev_err(dev->netdev, "register_cnic failed\n");
5032 static void cnic_unregister_netdev(struct cnic_dev *dev)
5034 struct cnic_local *cp = dev->cnic_priv;
5035 struct cnic_eth_dev *ethdev = cp->ethdev;
5040 ethdev->drv_unregister_cnic(dev->netdev);
5043 static int cnic_start_hw(struct cnic_dev *dev)
5045 struct cnic_local *cp = dev->cnic_priv;
5046 struct cnic_eth_dev *ethdev = cp->ethdev;
5049 if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
5052 dev->regview = ethdev->io_base;
5053 pci_dev_get(dev->pcidev);
5054 cp->func = PCI_FUNC(dev->pcidev->devfn);
5055 cp->status_blk.gen = ethdev->irq_arr[0].status_blk;
5056 cp->status_blk_num = ethdev->irq_arr[0].status_blk_num;
5058 err = cp->alloc_resc(dev);
5060 netdev_err(dev->netdev, "allocate resource failure\n");
5064 err = cp->start_hw(dev);
5068 err = cnic_cm_open(dev);
5072 set_bit(CNIC_F_CNIC_UP, &dev->flags);
5074 cp->enable_int(dev);
5080 pci_dev_put(dev->pcidev);
5084 static void cnic_stop_bnx2_hw(struct cnic_dev *dev)
5086 cnic_disable_bnx2_int_sync(dev);
5088 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
5089 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
5091 cnic_init_context(dev, KWQ_CID);
5092 cnic_init_context(dev, KCQ_CID);
5094 cnic_setup_5709_context(dev, 0);
5097 cnic_free_resc(dev);
5101 static void cnic_stop_bnx2x_hw(struct cnic_dev *dev)
5103 struct cnic_local *cp = dev->cnic_priv;
5106 *cp->kcq1.hw_prod_idx_ptr = 0;
5107 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5108 CSTORM_ISCSI_EQ_CONS_OFFSET(cp->pfid, 0), 0);
5109 CNIC_WR16(dev, cp->kcq1.io_addr, 0);
5110 cnic_free_resc(dev);
5113 static void cnic_stop_hw(struct cnic_dev *dev)
5115 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
5116 struct cnic_local *cp = dev->cnic_priv;
5119 /* Need to wait for the ring shutdown event to complete
5120 * before clearing the CNIC_UP flag.
5122 while (cp->udev->uio_dev != -1 && i < 15) {
5126 cnic_shutdown_rings(dev);
5127 clear_bit(CNIC_F_CNIC_UP, &dev->flags);
5128 rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], NULL);
5130 cnic_cm_shutdown(dev);
5132 pci_dev_put(dev->pcidev);
5136 static void cnic_free_dev(struct cnic_dev *dev)
5140 while ((atomic_read(&dev->ref_count) != 0) && i < 10) {
5144 if (atomic_read(&dev->ref_count) != 0)
5145 netdev_err(dev->netdev, "Failed waiting for ref count to go to zero\n");
5147 netdev_info(dev->netdev, "Removed CNIC device\n");
5148 dev_put(dev->netdev);
5152 static struct cnic_dev *cnic_alloc_dev(struct net_device *dev,
5153 struct pci_dev *pdev)
5155 struct cnic_dev *cdev;
5156 struct cnic_local *cp;
5159 alloc_size = sizeof(struct cnic_dev) + sizeof(struct cnic_local);
5161 cdev = kzalloc(alloc_size , GFP_KERNEL);
5163 netdev_err(dev, "allocate dev struct failure\n");
5168 cdev->cnic_priv = (char *)cdev + sizeof(struct cnic_dev);
5169 cdev->register_device = cnic_register_device;
5170 cdev->unregister_device = cnic_unregister_device;
5171 cdev->iscsi_nl_msg_recv = cnic_iscsi_nl_msg_recv;
5173 cp = cdev->cnic_priv;
5175 cp->l2_single_buf_size = 0x400;
5176 cp->l2_rx_ring_size = 3;
5178 spin_lock_init(&cp->cnic_ulp_lock);
5180 netdev_info(dev, "Added CNIC device\n");
5185 static struct cnic_dev *init_bnx2_cnic(struct net_device *dev)
5187 struct pci_dev *pdev;
5188 struct cnic_dev *cdev;
5189 struct cnic_local *cp;
5190 struct cnic_eth_dev *ethdev = NULL;
5191 struct cnic_eth_dev *(*probe)(struct net_device *) = NULL;
5193 probe = symbol_get(bnx2_cnic_probe);
5195 ethdev = (*probe)(dev);
5196 symbol_put(bnx2_cnic_probe);
5201 pdev = ethdev->pdev;
5207 if ((pdev->device == PCI_DEVICE_ID_NX2_5709 ||
5208 pdev->device == PCI_DEVICE_ID_NX2_5709S) &&
5209 (pdev->revision < 0x10)) {
5215 cdev = cnic_alloc_dev(dev, pdev);
5219 set_bit(CNIC_F_BNX2_CLASS, &cdev->flags);
5220 cdev->submit_kwqes = cnic_submit_bnx2_kwqes;
5222 cp = cdev->cnic_priv;
5223 cp->ethdev = ethdev;
5224 cdev->pcidev = pdev;
5225 cp->chip_id = ethdev->chip_id;
5227 cp->cnic_ops = &cnic_bnx2_ops;
5228 cp->start_hw = cnic_start_bnx2_hw;
5229 cp->stop_hw = cnic_stop_bnx2_hw;
5230 cp->setup_pgtbl = cnic_setup_page_tbl;
5231 cp->alloc_resc = cnic_alloc_bnx2_resc;
5232 cp->free_resc = cnic_free_resc;
5233 cp->start_cm = cnic_cm_init_bnx2_hw;
5234 cp->stop_cm = cnic_cm_stop_bnx2_hw;
5235 cp->enable_int = cnic_enable_bnx2_int;
5236 cp->disable_int_sync = cnic_disable_bnx2_int_sync;
5237 cp->close_conn = cnic_close_bnx2_conn;
5238 cp->next_idx = cnic_bnx2_next_idx;
5239 cp->hw_idx = cnic_bnx2_hw_idx;
5247 static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
5249 struct pci_dev *pdev;
5250 struct cnic_dev *cdev;
5251 struct cnic_local *cp;
5252 struct cnic_eth_dev *ethdev = NULL;
5253 struct cnic_eth_dev *(*probe)(struct net_device *) = NULL;
5255 probe = symbol_get(bnx2x_cnic_probe);
5257 ethdev = (*probe)(dev);
5258 symbol_put(bnx2x_cnic_probe);
5263 pdev = ethdev->pdev;
5268 cdev = cnic_alloc_dev(dev, pdev);
5274 set_bit(CNIC_F_BNX2X_CLASS, &cdev->flags);
5275 cdev->submit_kwqes = cnic_submit_bnx2x_kwqes;
5277 cp = cdev->cnic_priv;
5278 cp->ethdev = ethdev;
5279 cdev->pcidev = pdev;
5280 cp->chip_id = ethdev->chip_id;
5282 if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI))
5283 cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
5284 if (BNX2X_CHIP_IS_E2(cp->chip_id) &&
5285 !(ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE))
5286 cdev->max_fcoe_conn = ethdev->max_fcoe_conn;
5288 memcpy(cdev->mac_addr, ethdev->iscsi_mac, 6);
5290 cp->cnic_ops = &cnic_bnx2x_ops;
5291 cp->start_hw = cnic_start_bnx2x_hw;
5292 cp->stop_hw = cnic_stop_bnx2x_hw;
5293 cp->setup_pgtbl = cnic_setup_page_tbl_le;
5294 cp->alloc_resc = cnic_alloc_bnx2x_resc;
5295 cp->free_resc = cnic_free_resc;
5296 cp->start_cm = cnic_cm_init_bnx2x_hw;
5297 cp->stop_cm = cnic_cm_stop_bnx2x_hw;
5298 cp->enable_int = cnic_enable_bnx2x_int;
5299 cp->disable_int_sync = cnic_disable_bnx2x_int_sync;
5300 if (BNX2X_CHIP_IS_E2(cp->chip_id))
5301 cp->ack_int = cnic_ack_bnx2x_e2_msix;
5303 cp->ack_int = cnic_ack_bnx2x_msix;
5304 cp->close_conn = cnic_close_bnx2x_conn;
5305 cp->next_idx = cnic_bnx2x_next_idx;
5306 cp->hw_idx = cnic_bnx2x_hw_idx;
5310 static struct cnic_dev *is_cnic_dev(struct net_device *dev)
5312 struct ethtool_drvinfo drvinfo;
5313 struct cnic_dev *cdev = NULL;
5315 if (dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) {
5316 memset(&drvinfo, 0, sizeof(drvinfo));
5317 dev->ethtool_ops->get_drvinfo(dev, &drvinfo);
5319 if (!strcmp(drvinfo.driver, "bnx2"))
5320 cdev = init_bnx2_cnic(dev);
5321 if (!strcmp(drvinfo.driver, "bnx2x"))
5322 cdev = init_bnx2x_cnic(dev);
5324 write_lock(&cnic_dev_lock);
5325 list_add(&cdev->list, &cnic_dev_list);
5326 write_unlock(&cnic_dev_lock);
5333 * netdev event handler
5335 static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
5338 struct net_device *netdev = ptr;
5339 struct cnic_dev *dev;
5343 dev = cnic_from_netdev(netdev);
5345 if (!dev && (event == NETDEV_REGISTER || netif_running(netdev))) {
5346 /* Check for the hot-plug device */
5347 dev = is_cnic_dev(netdev);
5354 struct cnic_local *cp = dev->cnic_priv;
5358 else if (event == NETDEV_UNREGISTER)
5361 if (event == NETDEV_UP || (new_dev && netif_running(netdev))) {
5362 if (cnic_register_netdev(dev) != 0) {
5366 if (!cnic_start_hw(dev))
5367 cnic_ulp_start(dev);
5371 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
5372 struct cnic_ulp_ops *ulp_ops;
5375 ulp_ops = rcu_dereference(cp->ulp_ops[if_type]);
5376 if (!ulp_ops || !ulp_ops->indicate_netevent)
5379 ctx = cp->ulp_handle[if_type];
5381 ulp_ops->indicate_netevent(ctx, event);
5385 if (event == NETDEV_GOING_DOWN) {
5388 cnic_unregister_netdev(dev);
5389 } else if (event == NETDEV_UNREGISTER) {
5390 write_lock(&cnic_dev_lock);
5391 list_del_init(&dev->list);
5392 write_unlock(&cnic_dev_lock);
5404 static struct notifier_block cnic_netdev_notifier = {
5405 .notifier_call = cnic_netdev_event
5408 static void cnic_release(void)
5410 struct cnic_dev *dev;
5411 struct cnic_uio_dev *udev;
5413 while (!list_empty(&cnic_dev_list)) {
5414 dev = list_entry(cnic_dev_list.next, struct cnic_dev, list);
5415 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
5421 cnic_unregister_netdev(dev);
5422 list_del_init(&dev->list);
5425 while (!list_empty(&cnic_udev_list)) {
5426 udev = list_entry(cnic_udev_list.next, struct cnic_uio_dev,
5428 cnic_free_uio(udev);
5432 static int __init cnic_init(void)
5436 pr_info("%s", version);
5438 rc = register_netdevice_notifier(&cnic_netdev_notifier);
5444 cnic_wq = create_singlethread_workqueue("cnic_wq");
5447 unregister_netdevice_notifier(&cnic_netdev_notifier);
5454 static void __exit cnic_exit(void)
5456 unregister_netdevice_notifier(&cnic_netdev_notifier);
5458 destroy_workqueue(cnic_wq);
5461 module_init(cnic_init);
5462 module_exit(cnic_exit);