2 * Copyright (C) 2005 - 2015 Avago Technologies
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Written by: Jayamohan Kallickal (jayamohan.kallickal@avagotech.com)
12 * Contact Information:
13 * linux-drivers@avagotech.com
17 * Costa Mesa, CA 92626
20 #include <linux/reboot.h>
21 #include <linux/delay.h>
22 #include <linux/slab.h>
23 #include <linux/interrupt.h>
24 #include <linux/blkdev.h>
25 #include <linux/pci.h>
26 #include <linux/string.h>
27 #include <linux/kernel.h>
28 #include <linux/semaphore.h>
29 #include <linux/iscsi_boot_sysfs.h>
30 #include <linux/module.h>
31 #include <linux/bsg-lib.h>
33 #include <scsi/libiscsi.h>
34 #include <scsi/scsi_bsg_iscsi.h>
35 #include <scsi/scsi_netlink.h>
36 #include <scsi/scsi_transport_iscsi.h>
37 #include <scsi/scsi_transport.h>
38 #include <scsi/scsi_cmnd.h>
39 #include <scsi/scsi_device.h>
40 #include <scsi/scsi_host.h>
41 #include <scsi/scsi.h>
47 static unsigned int be_iopoll_budget = 10;
48 static unsigned int be_max_phys_size = 64;
49 static unsigned int enable_msix = 1;
51 MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR);
52 MODULE_VERSION(BUILD_STR);
53 MODULE_AUTHOR("Avago Technologies");
54 MODULE_LICENSE("GPL");
55 module_param(be_iopoll_budget, int, 0);
56 module_param(enable_msix, int, 0);
57 module_param(be_max_phys_size, uint, S_IRUGO);
58 MODULE_PARM_DESC(be_max_phys_size,
59 "Maximum Size (In Kilobytes) of physically contiguous "
60 "memory that can be allocated. Range is 16 - 128");
62 #define beiscsi_disp_param(_name)\
64 beiscsi_##_name##_disp(struct device *dev,\
65 struct device_attribute *attrib, char *buf) \
67 struct Scsi_Host *shost = class_to_shost(dev);\
68 struct beiscsi_hba *phba = iscsi_host_priv(shost); \
69 uint32_t param_val = 0; \
70 param_val = phba->attr_##_name;\
71 return snprintf(buf, PAGE_SIZE, "%d\n",\
75 #define beiscsi_change_param(_name, _minval, _maxval, _defaval)\
77 beiscsi_##_name##_change(struct beiscsi_hba *phba, uint32_t val)\
79 if (val >= _minval && val <= _maxval) {\
80 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,\
81 "BA_%d : beiscsi_"#_name" updated "\
82 "from 0x%x ==> 0x%x\n",\
83 phba->attr_##_name, val); \
84 phba->attr_##_name = val;\
87 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, \
88 "BA_%d beiscsi_"#_name" attribute "\
89 "cannot be updated to 0x%x, "\
90 "range allowed is ["#_minval" - "#_maxval"]\n", val);\
94 #define beiscsi_store_param(_name) \
96 beiscsi_##_name##_store(struct device *dev,\
97 struct device_attribute *attr, const char *buf,\
100 struct Scsi_Host *shost = class_to_shost(dev);\
101 struct beiscsi_hba *phba = iscsi_host_priv(shost);\
102 uint32_t param_val = 0;\
103 if (!isdigit(buf[0]))\
105 if (sscanf(buf, "%i", ¶m_val) != 1)\
107 if (beiscsi_##_name##_change(phba, param_val) == 0) \
113 #define beiscsi_init_param(_name, _minval, _maxval, _defval) \
115 beiscsi_##_name##_init(struct beiscsi_hba *phba, uint32_t val) \
117 if (val >= _minval && val <= _maxval) {\
118 phba->attr_##_name = val;\
121 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,\
122 "BA_%d beiscsi_"#_name" attribute " \
123 "cannot be updated to 0x%x, "\
124 "range allowed is ["#_minval" - "#_maxval"]\n", val);\
125 phba->attr_##_name = _defval;\
129 #define BEISCSI_RW_ATTR(_name, _minval, _maxval, _defval, _descp) \
130 static uint beiscsi_##_name = _defval;\
131 module_param(beiscsi_##_name, uint, S_IRUGO);\
132 MODULE_PARM_DESC(beiscsi_##_name, _descp);\
133 beiscsi_disp_param(_name)\
134 beiscsi_change_param(_name, _minval, _maxval, _defval)\
135 beiscsi_store_param(_name)\
136 beiscsi_init_param(_name, _minval, _maxval, _defval)\
137 DEVICE_ATTR(beiscsi_##_name, S_IRUGO | S_IWUSR,\
138 beiscsi_##_name##_disp, beiscsi_##_name##_store)
141 * When new log level added update the
142 * the MAX allowed value for log_enable
144 BEISCSI_RW_ATTR(log_enable, 0x00,
145 0xFF, 0x00, "Enable logging Bit Mask\n"
146 "\t\t\t\tInitialization Events : 0x01\n"
147 "\t\t\t\tMailbox Events : 0x02\n"
148 "\t\t\t\tMiscellaneous Events : 0x04\n"
149 "\t\t\t\tError Handling : 0x08\n"
150 "\t\t\t\tIO Path Events : 0x10\n"
151 "\t\t\t\tConfiguration Path : 0x20\n"
152 "\t\t\t\tiSCSI Protocol : 0x40\n");
154 DEVICE_ATTR(beiscsi_drvr_ver, S_IRUGO, beiscsi_drvr_ver_disp, NULL);
155 DEVICE_ATTR(beiscsi_adapter_family, S_IRUGO, beiscsi_adap_family_disp, NULL);
156 DEVICE_ATTR(beiscsi_fw_ver, S_IRUGO, beiscsi_fw_ver_disp, NULL);
157 DEVICE_ATTR(beiscsi_phys_port, S_IRUGO, beiscsi_phys_port_disp, NULL);
158 DEVICE_ATTR(beiscsi_active_session_count, S_IRUGO,
159 beiscsi_active_session_disp, NULL);
160 DEVICE_ATTR(beiscsi_free_session_count, S_IRUGO,
161 beiscsi_free_session_disp, NULL);
162 struct device_attribute *beiscsi_attrs[] = {
163 &dev_attr_beiscsi_log_enable,
164 &dev_attr_beiscsi_drvr_ver,
165 &dev_attr_beiscsi_adapter_family,
166 &dev_attr_beiscsi_fw_ver,
167 &dev_attr_beiscsi_active_session_count,
168 &dev_attr_beiscsi_free_session_count,
169 &dev_attr_beiscsi_phys_port,
173 static char const *cqe_desc[] = {
176 "SOL_CMD_KILLED_DATA_DIGEST_ERR",
177 "CXN_KILLED_PDU_SIZE_EXCEEDS_DSL",
178 "CXN_KILLED_BURST_LEN_MISMATCH",
179 "CXN_KILLED_AHS_RCVD",
180 "CXN_KILLED_HDR_DIGEST_ERR",
181 "CXN_KILLED_UNKNOWN_HDR",
182 "CXN_KILLED_STALE_ITT_TTT_RCVD",
183 "CXN_KILLED_INVALID_ITT_TTT_RCVD",
184 "CXN_KILLED_RST_RCVD",
185 "CXN_KILLED_TIMED_OUT",
186 "CXN_KILLED_RST_SENT",
187 "CXN_KILLED_FIN_RCVD",
188 "CXN_KILLED_BAD_UNSOL_PDU_RCVD",
189 "CXN_KILLED_BAD_WRB_INDEX_ERROR",
190 "CXN_KILLED_OVER_RUN_RESIDUAL",
191 "CXN_KILLED_UNDER_RUN_RESIDUAL",
192 "CMD_KILLED_INVALID_STATSN_RCVD",
193 "CMD_KILLED_INVALID_R2T_RCVD",
194 "CMD_CXN_KILLED_LUN_INVALID",
195 "CMD_CXN_KILLED_ICD_INVALID",
196 "CMD_CXN_KILLED_ITT_INVALID",
197 "CMD_CXN_KILLED_SEQ_OUTOFORDER",
198 "CMD_CXN_KILLED_INVALID_DATASN_RCVD",
199 "CXN_INVALIDATE_NOTIFY",
200 "CXN_INVALIDATE_INDEX_NOTIFY",
201 "CMD_INVALIDATED_NOTIFY",
204 "UNSOL_DATA_DIGEST_ERROR_NOTIFY",
206 "CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN",
207 "SOL_CMD_KILLED_DIF_ERR",
208 "CXN_KILLED_SYN_RCVD",
209 "CXN_KILLED_IMM_DATA_RCVD"
212 static int beiscsi_slave_configure(struct scsi_device *sdev)
214 blk_queue_max_segment_size(sdev->request_queue, 65536);
218 static int beiscsi_eh_abort(struct scsi_cmnd *sc)
220 struct iscsi_cls_session *cls_session;
221 struct iscsi_task *aborted_task = (struct iscsi_task *)sc->SCp.ptr;
222 struct beiscsi_io_task *aborted_io_task;
223 struct iscsi_conn *conn;
224 struct beiscsi_conn *beiscsi_conn;
225 struct beiscsi_hba *phba;
226 struct iscsi_session *session;
227 struct invalidate_command_table *inv_tbl;
228 struct be_dma_mem nonemb_cmd;
229 unsigned int cid, tag, num_invalidate;
232 cls_session = starget_to_session(scsi_target(sc->device));
233 session = cls_session->dd_data;
235 spin_lock_bh(&session->frwd_lock);
236 if (!aborted_task || !aborted_task->sc) {
238 spin_unlock_bh(&session->frwd_lock);
242 aborted_io_task = aborted_task->dd_data;
243 if (!aborted_io_task->scsi_cmnd) {
244 /* raced or invalid command */
245 spin_unlock_bh(&session->frwd_lock);
248 spin_unlock_bh(&session->frwd_lock);
249 /* Invalidate WRB Posted for this Task */
250 AMAP_SET_BITS(struct amap_iscsi_wrb, invld,
251 aborted_io_task->pwrb_handle->pwrb,
254 conn = aborted_task->conn;
255 beiscsi_conn = conn->dd_data;
256 phba = beiscsi_conn->phba;
258 /* invalidate iocb */
259 cid = beiscsi_conn->beiscsi_conn_cid;
260 inv_tbl = phba->inv_tbl;
261 memset(inv_tbl, 0x0, sizeof(*inv_tbl));
263 inv_tbl->icd = aborted_io_task->psgl_handle->sgl_index;
265 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
266 sizeof(struct invalidate_commands_params_in),
268 if (nonemb_cmd.va == NULL) {
269 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH,
270 "BM_%d : Failed to allocate memory for"
271 "mgmt_invalidate_icds\n");
274 nonemb_cmd.size = sizeof(struct invalidate_commands_params_in);
276 tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate,
279 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_EH,
280 "BM_%d : mgmt_invalidate_icds could not be"
282 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
283 nonemb_cmd.va, nonemb_cmd.dma);
288 rc = beiscsi_mccq_compl(phba, tag, NULL, &nonemb_cmd);
290 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
291 nonemb_cmd.va, nonemb_cmd.dma);
293 return iscsi_eh_abort(sc);
296 static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
298 struct iscsi_task *abrt_task;
299 struct beiscsi_io_task *abrt_io_task;
300 struct iscsi_conn *conn;
301 struct beiscsi_conn *beiscsi_conn;
302 struct beiscsi_hba *phba;
303 struct iscsi_session *session;
304 struct iscsi_cls_session *cls_session;
305 struct invalidate_command_table *inv_tbl;
306 struct be_dma_mem nonemb_cmd;
307 unsigned int cid, tag, i, num_invalidate;
310 /* invalidate iocbs */
311 cls_session = starget_to_session(scsi_target(sc->device));
312 session = cls_session->dd_data;
313 spin_lock_bh(&session->frwd_lock);
314 if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN) {
315 spin_unlock_bh(&session->frwd_lock);
318 conn = session->leadconn;
319 beiscsi_conn = conn->dd_data;
320 phba = beiscsi_conn->phba;
321 cid = beiscsi_conn->beiscsi_conn_cid;
322 inv_tbl = phba->inv_tbl;
323 memset(inv_tbl, 0x0, sizeof(*inv_tbl) * BE2_CMDS_PER_CXN);
325 for (i = 0; i < conn->session->cmds_max; i++) {
326 abrt_task = conn->session->cmds[i];
327 abrt_io_task = abrt_task->dd_data;
328 if (!abrt_task->sc || abrt_task->state == ISCSI_TASK_FREE)
331 if (sc->device->lun != abrt_task->sc->device->lun)
334 /* Invalidate WRB Posted for this Task */
335 AMAP_SET_BITS(struct amap_iscsi_wrb, invld,
336 abrt_io_task->pwrb_handle->pwrb,
340 inv_tbl->icd = abrt_io_task->psgl_handle->sgl_index;
344 spin_unlock_bh(&session->frwd_lock);
345 inv_tbl = phba->inv_tbl;
347 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
348 sizeof(struct invalidate_commands_params_in),
350 if (nonemb_cmd.va == NULL) {
351 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH,
352 "BM_%d : Failed to allocate memory for"
353 "mgmt_invalidate_icds\n");
356 nonemb_cmd.size = sizeof(struct invalidate_commands_params_in);
357 memset(nonemb_cmd.va, 0, nonemb_cmd.size);
358 tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate,
361 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_EH,
362 "BM_%d : mgmt_invalidate_icds could not be"
364 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
365 nonemb_cmd.va, nonemb_cmd.dma);
369 rc = beiscsi_mccq_compl(phba, tag, NULL, &nonemb_cmd);
371 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
372 nonemb_cmd.va, nonemb_cmd.dma);
373 return iscsi_eh_device_reset(sc);
376 static ssize_t beiscsi_show_boot_tgt_info(void *data, int type, char *buf)
378 struct beiscsi_hba *phba = data;
379 struct mgmt_session_info *boot_sess = &phba->boot_sess;
380 struct mgmt_conn_info *boot_conn = &boot_sess->conn_list[0];
385 case ISCSI_BOOT_TGT_NAME:
386 rc = sprintf(buf, "%.*s\n",
387 (int)strlen(boot_sess->target_name),
388 (char *)&boot_sess->target_name);
390 case ISCSI_BOOT_TGT_IP_ADDR:
391 if (boot_conn->dest_ipaddr.ip_type == 0x1)
392 rc = sprintf(buf, "%pI4\n",
393 (char *)&boot_conn->dest_ipaddr.addr);
395 rc = sprintf(str, "%pI6\n",
396 (char *)&boot_conn->dest_ipaddr.addr);
398 case ISCSI_BOOT_TGT_PORT:
399 rc = sprintf(str, "%d\n", boot_conn->dest_port);
402 case ISCSI_BOOT_TGT_CHAP_NAME:
403 rc = sprintf(str, "%.*s\n",
404 boot_conn->negotiated_login_options.auth_data.chap.
405 target_chap_name_length,
406 (char *)&boot_conn->negotiated_login_options.
407 auth_data.chap.target_chap_name);
409 case ISCSI_BOOT_TGT_CHAP_SECRET:
410 rc = sprintf(str, "%.*s\n",
411 boot_conn->negotiated_login_options.auth_data.chap.
412 target_secret_length,
413 (char *)&boot_conn->negotiated_login_options.
414 auth_data.chap.target_secret);
416 case ISCSI_BOOT_TGT_REV_CHAP_NAME:
417 rc = sprintf(str, "%.*s\n",
418 boot_conn->negotiated_login_options.auth_data.chap.
419 intr_chap_name_length,
420 (char *)&boot_conn->negotiated_login_options.
421 auth_data.chap.intr_chap_name);
423 case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
424 rc = sprintf(str, "%.*s\n",
425 boot_conn->negotiated_login_options.auth_data.chap.
427 (char *)&boot_conn->negotiated_login_options.
428 auth_data.chap.intr_secret);
430 case ISCSI_BOOT_TGT_FLAGS:
431 rc = sprintf(str, "2\n");
433 case ISCSI_BOOT_TGT_NIC_ASSOC:
434 rc = sprintf(str, "0\n");
443 static ssize_t beiscsi_show_boot_ini_info(void *data, int type, char *buf)
445 struct beiscsi_hba *phba = data;
450 case ISCSI_BOOT_INI_INITIATOR_NAME:
451 rc = sprintf(str, "%s\n", phba->boot_sess.initiator_iscsiname);
460 static ssize_t beiscsi_show_boot_eth_info(void *data, int type, char *buf)
462 struct beiscsi_hba *phba = data;
467 case ISCSI_BOOT_ETH_FLAGS:
468 rc = sprintf(str, "2\n");
470 case ISCSI_BOOT_ETH_INDEX:
471 rc = sprintf(str, "0\n");
473 case ISCSI_BOOT_ETH_MAC:
474 rc = beiscsi_get_macaddr(str, phba);
484 static umode_t beiscsi_tgt_get_attr_visibility(void *data, int type)
489 case ISCSI_BOOT_TGT_NAME:
490 case ISCSI_BOOT_TGT_IP_ADDR:
491 case ISCSI_BOOT_TGT_PORT:
492 case ISCSI_BOOT_TGT_CHAP_NAME:
493 case ISCSI_BOOT_TGT_CHAP_SECRET:
494 case ISCSI_BOOT_TGT_REV_CHAP_NAME:
495 case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
496 case ISCSI_BOOT_TGT_NIC_ASSOC:
497 case ISCSI_BOOT_TGT_FLAGS:
507 static umode_t beiscsi_ini_get_attr_visibility(void *data, int type)
512 case ISCSI_BOOT_INI_INITIATOR_NAME:
523 static umode_t beiscsi_eth_get_attr_visibility(void *data, int type)
528 case ISCSI_BOOT_ETH_FLAGS:
529 case ISCSI_BOOT_ETH_MAC:
530 case ISCSI_BOOT_ETH_INDEX:
540 /*------------------- PCI Driver operations and data ----------------- */
541 static const struct pci_device_id beiscsi_pci_id_table[] = {
542 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
543 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
544 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
545 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
546 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) },
547 { PCI_DEVICE(ELX_VENDOR_ID, OC_SKH_ID1) },
550 MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
553 static struct scsi_host_template beiscsi_sht = {
554 .module = THIS_MODULE,
555 .name = "Avago Technologies 10Gbe open-iscsi Initiator Driver",
556 .proc_name = DRV_NAME,
557 .queuecommand = iscsi_queuecommand,
558 .change_queue_depth = scsi_change_queue_depth,
559 .slave_configure = beiscsi_slave_configure,
560 .target_alloc = iscsi_target_alloc,
561 .eh_abort_handler = beiscsi_eh_abort,
562 .eh_device_reset_handler = beiscsi_eh_device_reset,
563 .eh_target_reset_handler = iscsi_eh_session_reset,
564 .shost_attrs = beiscsi_attrs,
565 .sg_tablesize = BEISCSI_SGLIST_ELEMENTS,
566 .can_queue = BE2_IO_DEPTH,
568 .max_sectors = BEISCSI_MAX_SECTORS,
569 .cmd_per_lun = BEISCSI_CMD_PER_LUN,
570 .use_clustering = ENABLE_CLUSTERING,
571 .vendor_id = SCSI_NL_VID_TYPE_PCI | BE_VENDOR_ID,
572 .track_queue_depth = 1,
575 static struct scsi_transport_template *beiscsi_scsi_transport;
577 static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
579 struct beiscsi_hba *phba;
580 struct Scsi_Host *shost;
582 shost = iscsi_host_alloc(&beiscsi_sht, sizeof(*phba), 0);
584 dev_err(&pcidev->dev,
585 "beiscsi_hba_alloc - iscsi_host_alloc failed\n");
588 shost->max_id = BE2_MAX_SESSIONS;
589 shost->max_channel = 0;
590 shost->max_cmd_len = BEISCSI_MAX_CMD_LEN;
591 shost->max_lun = BEISCSI_NUM_MAX_LUN;
592 shost->transportt = beiscsi_scsi_transport;
593 phba = iscsi_host_priv(shost);
594 memset(phba, 0, sizeof(*phba));
596 phba->pcidev = pci_dev_get(pcidev);
597 pci_set_drvdata(pcidev, phba);
598 phba->interface_handle = 0xFFFFFFFF;
603 static void beiscsi_unmap_pci_function(struct beiscsi_hba *phba)
606 iounmap(phba->csr_va);
610 iounmap(phba->db_va);
614 iounmap(phba->pci_va);
619 static int beiscsi_map_pci_bars(struct beiscsi_hba *phba,
620 struct pci_dev *pcidev)
625 addr = ioremap_nocache(pci_resource_start(pcidev, 2),
626 pci_resource_len(pcidev, 2));
629 phba->ctrl.csr = addr;
631 phba->csr_pa.u.a64.address = pci_resource_start(pcidev, 2);
633 addr = ioremap_nocache(pci_resource_start(pcidev, 4), 128 * 1024);
636 phba->ctrl.db = addr;
638 phba->db_pa.u.a64.address = pci_resource_start(pcidev, 4);
640 if (phba->generation == BE_GEN2)
645 addr = ioremap_nocache(pci_resource_start(pcidev, pcicfg_reg),
646 pci_resource_len(pcidev, pcicfg_reg));
650 phba->ctrl.pcicfg = addr;
652 phba->pci_pa.u.a64.address = pci_resource_start(pcidev, pcicfg_reg);
656 beiscsi_unmap_pci_function(phba);
660 static int beiscsi_enable_pci(struct pci_dev *pcidev)
664 ret = pci_enable_device(pcidev);
666 dev_err(&pcidev->dev,
667 "beiscsi_enable_pci - enable device failed\n");
671 ret = pci_request_regions(pcidev, DRV_NAME);
673 dev_err(&pcidev->dev,
674 "beiscsi_enable_pci - request region failed\n");
675 goto pci_dev_disable;
678 pci_set_master(pcidev);
679 ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(64));
681 ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32));
683 dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n");
684 goto pci_region_release;
686 ret = pci_set_consistent_dma_mask(pcidev,
690 ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64));
692 dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n");
693 goto pci_region_release;
699 pci_release_regions(pcidev);
701 pci_disable_device(pcidev);
706 static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev)
708 struct be_ctrl_info *ctrl = &phba->ctrl;
709 struct be_dma_mem *mbox_mem_alloc = &ctrl->mbox_mem_alloced;
710 struct be_dma_mem *mbox_mem_align = &ctrl->mbox_mem;
714 status = beiscsi_map_pci_bars(phba, pdev);
717 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
718 mbox_mem_alloc->va = pci_alloc_consistent(pdev,
719 mbox_mem_alloc->size,
720 &mbox_mem_alloc->dma);
721 if (!mbox_mem_alloc->va) {
722 beiscsi_unmap_pci_function(phba);
726 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
727 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
728 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
729 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
730 spin_lock_init(&ctrl->mbox_lock);
731 spin_lock_init(&phba->ctrl.mcc_lock);
732 spin_lock_init(&phba->ctrl.mcc_cq_lock);
738 * beiscsi_get_params()- Set the config paramters
739 * @phba: ptr device priv structure
741 static void beiscsi_get_params(struct beiscsi_hba *phba)
743 uint32_t total_cid_count = 0;
744 uint32_t total_icd_count = 0;
747 total_cid_count = BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP0) +
748 BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP1);
750 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
751 uint32_t align_mask = 0;
752 uint32_t icd_post_per_page = 0;
753 uint32_t icd_count_unavailable = 0;
754 uint32_t icd_start = 0, icd_count = 0;
755 uint32_t icd_start_align = 0, icd_count_align = 0;
757 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
758 icd_start = phba->fw_config.iscsi_icd_start[ulp_num];
759 icd_count = phba->fw_config.iscsi_icd_count[ulp_num];
761 /* Get ICD count that can be posted on each page */
762 icd_post_per_page = (PAGE_SIZE / (BE2_SGE *
763 sizeof(struct iscsi_sge)));
764 align_mask = (icd_post_per_page - 1);
766 /* Check if icd_start is aligned ICD per page posting */
767 if (icd_start % icd_post_per_page) {
768 icd_start_align = ((icd_start +
772 iscsi_icd_start[ulp_num] =
776 icd_count_align = (icd_count & ~align_mask);
778 /* ICD discarded in the process of alignment */
780 icd_count_unavailable = ((icd_start_align -
785 /* Updated ICD count available */
786 phba->fw_config.iscsi_icd_count[ulp_num] = (icd_count -
787 icd_count_unavailable);
789 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
790 "BM_%d : Aligned ICD values\n"
791 "\t ICD Start : %d\n"
792 "\t ICD Count : %d\n"
793 "\t ICD Discarded : %d\n",
795 iscsi_icd_start[ulp_num],
797 iscsi_icd_count[ulp_num],
798 icd_count_unavailable);
803 total_icd_count = phba->fw_config.iscsi_icd_count[ulp_num];
804 phba->params.ios_per_ctrl = (total_icd_count -
806 BE2_TMFS + BE2_NOPOUT_REQ));
807 phba->params.cxns_per_ctrl = total_cid_count;
808 phba->params.asyncpdus_per_ctrl = total_cid_count;
809 phba->params.icds_per_ctrl = total_icd_count;
810 phba->params.num_sge_per_io = BE2_SGE;
811 phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ;
812 phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ;
813 phba->params.eq_timer = 64;
814 phba->params.num_eq_entries = 1024;
815 phba->params.num_cq_entries = 1024;
816 phba->params.wrbs_per_cxn = 256;
819 static void hwi_ring_eq_db(struct beiscsi_hba *phba,
820 unsigned int id, unsigned int clr_interrupt,
821 unsigned int num_processed,
822 unsigned char rearm, unsigned char event)
827 val |= 1 << DB_EQ_REARM_SHIFT;
829 val |= 1 << DB_EQ_CLR_SHIFT;
831 val |= 1 << DB_EQ_EVNT_SHIFT;
833 val |= num_processed << DB_EQ_NUM_POPPED_SHIFT;
834 /* Setting lower order EQ_ID Bits */
835 val |= (id & DB_EQ_RING_ID_LOW_MASK);
837 /* Setting Higher order EQ_ID Bits */
838 val |= (((id >> DB_EQ_HIGH_FEILD_SHIFT) &
839 DB_EQ_RING_ID_HIGH_MASK)
840 << DB_EQ_HIGH_SET_SHIFT);
842 iowrite32(val, phba->db_va + DB_EQ_OFFSET);
846 * be_isr_mcc - The isr routine of the driver.
848 * @dev_id: Pointer to host adapter structure
850 static irqreturn_t be_isr_mcc(int irq, void *dev_id)
852 struct beiscsi_hba *phba;
853 struct be_eq_entry *eqe = NULL;
854 struct be_queue_info *eq;
855 struct be_queue_info *mcc;
856 unsigned int num_eq_processed;
857 struct be_eq_obj *pbe_eq;
863 mcc = &phba->ctrl.mcc_obj.cq;
864 eqe = queue_tail_node(eq);
866 num_eq_processed = 0;
868 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
870 if (((eqe->dw[offsetof(struct amap_eq_entry,
872 EQE_RESID_MASK) >> 16) == mcc->id) {
873 spin_lock_irqsave(&phba->isr_lock, flags);
874 pbe_eq->todo_mcc_cq = true;
875 spin_unlock_irqrestore(&phba->isr_lock, flags);
877 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
879 eqe = queue_tail_node(eq);
882 if (pbe_eq->todo_mcc_cq)
883 queue_work(phba->wq, &pbe_eq->work_cqs);
884 if (num_eq_processed)
885 hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1);
891 * be_isr_msix - The isr routine of the driver.
893 * @dev_id: Pointer to host adapter structure
895 static irqreturn_t be_isr_msix(int irq, void *dev_id)
897 struct beiscsi_hba *phba;
898 struct be_eq_entry *eqe = NULL;
899 struct be_queue_info *eq;
900 struct be_queue_info *cq;
901 unsigned int num_eq_processed;
902 struct be_eq_obj *pbe_eq;
907 eqe = queue_tail_node(eq);
910 num_eq_processed = 0;
911 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
913 if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
914 blk_iopoll_sched(&pbe_eq->iopoll);
916 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
918 eqe = queue_tail_node(eq);
922 if (num_eq_processed)
923 hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 0, 1);
929 * be_isr - The isr routine of the driver.
931 * @dev_id: Pointer to host adapter structure
933 static irqreturn_t be_isr(int irq, void *dev_id)
935 struct beiscsi_hba *phba;
936 struct hwi_controller *phwi_ctrlr;
937 struct hwi_context_memory *phwi_context;
938 struct be_eq_entry *eqe = NULL;
939 struct be_queue_info *eq;
940 struct be_queue_info *mcc;
941 unsigned long flags, index;
942 unsigned int num_mcceq_processed, num_ioeq_processed;
943 struct be_ctrl_info *ctrl;
944 struct be_eq_obj *pbe_eq;
949 isr = ioread32(ctrl->csr + CEV_ISR0_OFFSET +
950 (PCI_FUNC(ctrl->pdev->devfn) * CEV_ISR_SIZE));
954 phwi_ctrlr = phba->phwi_ctrlr;
955 phwi_context = phwi_ctrlr->phwi_ctxt;
956 pbe_eq = &phwi_context->be_eq[0];
958 eq = &phwi_context->be_eq[0].q;
959 mcc = &phba->ctrl.mcc_obj.cq;
961 eqe = queue_tail_node(eq);
963 num_ioeq_processed = 0;
964 num_mcceq_processed = 0;
965 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
967 if (((eqe->dw[offsetof(struct amap_eq_entry,
969 EQE_RESID_MASK) >> 16) == mcc->id) {
970 spin_lock_irqsave(&phba->isr_lock, flags);
971 pbe_eq->todo_mcc_cq = true;
972 spin_unlock_irqrestore(&phba->isr_lock, flags);
973 num_mcceq_processed++;
975 if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
976 blk_iopoll_sched(&pbe_eq->iopoll);
977 num_ioeq_processed++;
979 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
981 eqe = queue_tail_node(eq);
983 if (num_ioeq_processed || num_mcceq_processed) {
984 if (pbe_eq->todo_mcc_cq)
985 queue_work(phba->wq, &pbe_eq->work_cqs);
987 if ((num_mcceq_processed) && (!num_ioeq_processed))
988 hwi_ring_eq_db(phba, eq->id, 0,
989 (num_ioeq_processed +
990 num_mcceq_processed) , 1, 1);
992 hwi_ring_eq_db(phba, eq->id, 0,
993 (num_ioeq_processed +
994 num_mcceq_processed), 0, 1);
1001 static int beiscsi_init_irqs(struct beiscsi_hba *phba)
1003 struct pci_dev *pcidev = phba->pcidev;
1004 struct hwi_controller *phwi_ctrlr;
1005 struct hwi_context_memory *phwi_context;
1006 int ret, msix_vec, i, j;
1008 phwi_ctrlr = phba->phwi_ctrlr;
1009 phwi_context = phwi_ctrlr->phwi_ctxt;
1011 if (phba->msix_enabled) {
1012 for (i = 0; i < phba->num_cpus; i++) {
1013 phba->msi_name[i] = kzalloc(BEISCSI_MSI_NAME,
1015 if (!phba->msi_name[i]) {
1017 goto free_msix_irqs;
1020 sprintf(phba->msi_name[i], "beiscsi_%02x_%02x",
1021 phba->shost->host_no, i);
1022 msix_vec = phba->msix_entries[i].vector;
1023 ret = request_irq(msix_vec, be_isr_msix, 0,
1025 &phwi_context->be_eq[i]);
1027 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1028 "BM_%d : beiscsi_init_irqs-Failed to"
1029 "register msix for i = %d\n",
1031 kfree(phba->msi_name[i]);
1032 goto free_msix_irqs;
1035 phba->msi_name[i] = kzalloc(BEISCSI_MSI_NAME, GFP_KERNEL);
1036 if (!phba->msi_name[i]) {
1038 goto free_msix_irqs;
1040 sprintf(phba->msi_name[i], "beiscsi_mcc_%02x",
1041 phba->shost->host_no);
1042 msix_vec = phba->msix_entries[i].vector;
1043 ret = request_irq(msix_vec, be_isr_mcc, 0, phba->msi_name[i],
1044 &phwi_context->be_eq[i]);
1046 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT ,
1047 "BM_%d : beiscsi_init_irqs-"
1048 "Failed to register beiscsi_msix_mcc\n");
1049 kfree(phba->msi_name[i]);
1050 goto free_msix_irqs;
1054 ret = request_irq(pcidev->irq, be_isr, IRQF_SHARED,
1057 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1058 "BM_%d : beiscsi_init_irqs-"
1059 "Failed to register irq\\n");
1065 for (j = i - 1; j >= 0; j--) {
1066 kfree(phba->msi_name[j]);
1067 msix_vec = phba->msix_entries[j].vector;
1068 free_irq(msix_vec, &phwi_context->be_eq[j]);
1073 void hwi_ring_cq_db(struct beiscsi_hba *phba,
1074 unsigned int id, unsigned int num_processed,
1075 unsigned char rearm, unsigned char event)
1080 val |= 1 << DB_CQ_REARM_SHIFT;
1082 val |= num_processed << DB_CQ_NUM_POPPED_SHIFT;
1084 /* Setting lower order CQ_ID Bits */
1085 val |= (id & DB_CQ_RING_ID_LOW_MASK);
1087 /* Setting Higher order CQ_ID Bits */
1088 val |= (((id >> DB_CQ_HIGH_FEILD_SHIFT) &
1089 DB_CQ_RING_ID_HIGH_MASK)
1090 << DB_CQ_HIGH_SET_SHIFT);
1092 iowrite32(val, phba->db_va + DB_CQ_OFFSET);
1096 beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn,
1097 struct beiscsi_hba *phba,
1098 struct pdu_base *ppdu,
1099 unsigned long pdu_len,
1100 void *pbuffer, unsigned long buf_len)
1102 struct iscsi_conn *conn = beiscsi_conn->conn;
1103 struct iscsi_session *session = conn->session;
1104 struct iscsi_task *task;
1105 struct beiscsi_io_task *io_task;
1106 struct iscsi_hdr *login_hdr;
1108 switch (ppdu->dw[offsetof(struct amap_pdu_base, opcode) / 32] &
1109 PDUBASE_OPCODE_MASK) {
1110 case ISCSI_OP_NOOP_IN:
1114 case ISCSI_OP_ASYNC_EVENT:
1116 case ISCSI_OP_REJECT:
1118 WARN_ON(!(buf_len == 48));
1119 beiscsi_log(phba, KERN_ERR,
1120 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
1121 "BM_%d : In ISCSI_OP_REJECT\n");
1123 case ISCSI_OP_LOGIN_RSP:
1124 case ISCSI_OP_TEXT_RSP:
1125 task = conn->login_task;
1126 io_task = task->dd_data;
1127 login_hdr = (struct iscsi_hdr *)ppdu;
1128 login_hdr->itt = io_task->libiscsi_itt;
1131 beiscsi_log(phba, KERN_WARNING,
1132 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
1133 "BM_%d : Unrecognized opcode 0x%x in async msg\n",
1135 dw[offsetof(struct amap_pdu_base, opcode) / 32]
1136 & PDUBASE_OPCODE_MASK));
1140 spin_lock_bh(&session->back_lock);
1141 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)ppdu, pbuffer, buf_len);
1142 spin_unlock_bh(&session->back_lock);
1146 static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
1148 struct sgl_handle *psgl_handle;
1150 if (phba->io_sgl_hndl_avbl) {
1151 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO,
1152 "BM_%d : In alloc_io_sgl_handle,"
1153 " io_sgl_alloc_index=%d\n",
1154 phba->io_sgl_alloc_index);
1156 psgl_handle = phba->io_sgl_hndl_base[phba->
1157 io_sgl_alloc_index];
1158 phba->io_sgl_hndl_base[phba->io_sgl_alloc_index] = NULL;
1159 phba->io_sgl_hndl_avbl--;
1160 if (phba->io_sgl_alloc_index == (phba->params.
1162 phba->io_sgl_alloc_index = 0;
1164 phba->io_sgl_alloc_index++;
1171 free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
1173 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO,
1174 "BM_%d : In free_,io_sgl_free_index=%d\n",
1175 phba->io_sgl_free_index);
1177 if (phba->io_sgl_hndl_base[phba->io_sgl_free_index]) {
1179 * this can happen if clean_task is called on a task that
1180 * failed in xmit_task or alloc_pdu.
1182 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO,
1183 "BM_%d : Double Free in IO SGL io_sgl_free_index=%d,"
1184 "value there=%p\n", phba->io_sgl_free_index,
1185 phba->io_sgl_hndl_base
1186 [phba->io_sgl_free_index]);
1189 phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle;
1190 phba->io_sgl_hndl_avbl++;
1191 if (phba->io_sgl_free_index == (phba->params.ios_per_ctrl - 1))
1192 phba->io_sgl_free_index = 0;
1194 phba->io_sgl_free_index++;
1198 * alloc_wrb_handle - To allocate a wrb handle
1199 * @phba: The hba pointer
1200 * @cid: The cid to use for allocation
1202 * This happens under session_lock until submission to chip
1204 struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid)
1206 struct hwi_wrb_context *pwrb_context;
1207 struct hwi_controller *phwi_ctrlr;
1208 struct wrb_handle *pwrb_handle, *pwrb_handle_tmp;
1209 uint16_t cri_index = BE_GET_CRI_FROM_CID(cid);
1211 phwi_ctrlr = phba->phwi_ctrlr;
1212 pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
1213 if (pwrb_context->wrb_handles_available >= 2) {
1214 pwrb_handle = pwrb_context->pwrb_handle_base[
1215 pwrb_context->alloc_index];
1216 pwrb_context->wrb_handles_available--;
1217 if (pwrb_context->alloc_index ==
1218 (phba->params.wrbs_per_cxn - 1))
1219 pwrb_context->alloc_index = 0;
1221 pwrb_context->alloc_index++;
1222 pwrb_handle_tmp = pwrb_context->pwrb_handle_base[
1223 pwrb_context->alloc_index];
1224 pwrb_handle->nxt_wrb_index = pwrb_handle_tmp->wrb_index;
1231 * free_wrb_handle - To free the wrb handle back to pool
1232 * @phba: The hba pointer
1233 * @pwrb_context: The context to free from
1234 * @pwrb_handle: The wrb_handle to free
1236 * This happens under session_lock until submission to chip
1239 free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context,
1240 struct wrb_handle *pwrb_handle)
1242 pwrb_context->pwrb_handle_base[pwrb_context->free_index] = pwrb_handle;
1243 pwrb_context->wrb_handles_available++;
1244 if (pwrb_context->free_index == (phba->params.wrbs_per_cxn - 1))
1245 pwrb_context->free_index = 0;
1247 pwrb_context->free_index++;
1249 beiscsi_log(phba, KERN_INFO,
1250 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
1251 "BM_%d : FREE WRB: pwrb_handle=%p free_index=0x%x"
1252 "wrb_handles_available=%d\n",
1253 pwrb_handle, pwrb_context->free_index,
1254 pwrb_context->wrb_handles_available);
1257 static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba)
1259 struct sgl_handle *psgl_handle;
1261 if (phba->eh_sgl_hndl_avbl) {
1262 psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index];
1263 phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL;
1264 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
1265 "BM_%d : mgmt_sgl_alloc_index=%d=0x%x\n",
1266 phba->eh_sgl_alloc_index,
1267 phba->eh_sgl_alloc_index);
1269 phba->eh_sgl_hndl_avbl--;
1270 if (phba->eh_sgl_alloc_index ==
1271 (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl -
1273 phba->eh_sgl_alloc_index = 0;
1275 phba->eh_sgl_alloc_index++;
1282 free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
1285 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
1286 "BM_%d : In free_mgmt_sgl_handle,"
1287 "eh_sgl_free_index=%d\n",
1288 phba->eh_sgl_free_index);
1290 if (phba->eh_sgl_hndl_base[phba->eh_sgl_free_index]) {
1292 * this can happen if clean_task is called on a task that
1293 * failed in xmit_task or alloc_pdu.
1295 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
1296 "BM_%d : Double Free in eh SGL ,"
1297 "eh_sgl_free_index=%d\n",
1298 phba->eh_sgl_free_index);
1301 phba->eh_sgl_hndl_base[phba->eh_sgl_free_index] = psgl_handle;
1302 phba->eh_sgl_hndl_avbl++;
1303 if (phba->eh_sgl_free_index ==
1304 (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl - 1))
1305 phba->eh_sgl_free_index = 0;
1307 phba->eh_sgl_free_index++;
1311 be_complete_io(struct beiscsi_conn *beiscsi_conn,
1312 struct iscsi_task *task,
1313 struct common_sol_cqe *csol_cqe)
1315 struct beiscsi_io_task *io_task = task->dd_data;
1316 struct be_status_bhs *sts_bhs =
1317 (struct be_status_bhs *)io_task->cmd_bhs;
1318 struct iscsi_conn *conn = beiscsi_conn->conn;
1319 unsigned char *sense;
1320 u32 resid = 0, exp_cmdsn, max_cmdsn;
1321 u8 rsp, status, flags;
1323 exp_cmdsn = csol_cqe->exp_cmdsn;
1324 max_cmdsn = (csol_cqe->exp_cmdsn +
1325 csol_cqe->cmd_wnd - 1);
1326 rsp = csol_cqe->i_resp;
1327 status = csol_cqe->i_sts;
1328 flags = csol_cqe->i_flags;
1329 resid = csol_cqe->res_cnt;
1332 if (io_task->scsi_cmnd) {
1333 scsi_dma_unmap(io_task->scsi_cmnd);
1334 io_task->scsi_cmnd = NULL;
1339 task->sc->result = (DID_OK << 16) | status;
1340 if (rsp != ISCSI_STATUS_CMD_COMPLETED) {
1341 task->sc->result = DID_ERROR << 16;
1345 /* bidi not initially supported */
1346 if (flags & (ISCSI_FLAG_CMD_UNDERFLOW | ISCSI_FLAG_CMD_OVERFLOW)) {
1347 if (!status && (flags & ISCSI_FLAG_CMD_OVERFLOW))
1348 task->sc->result = DID_ERROR << 16;
1350 if (flags & ISCSI_FLAG_CMD_UNDERFLOW) {
1351 scsi_set_resid(task->sc, resid);
1352 if (!status && (scsi_bufflen(task->sc) - resid <
1353 task->sc->underflow))
1354 task->sc->result = DID_ERROR << 16;
1358 if (status == SAM_STAT_CHECK_CONDITION) {
1360 unsigned short *slen = (unsigned short *)sts_bhs->sense_info;
1362 sense = sts_bhs->sense_info + sizeof(unsigned short);
1363 sense_len = be16_to_cpu(*slen);
1364 memcpy(task->sc->sense_buffer, sense,
1365 min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE));
1368 if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ)
1369 conn->rxdata_octets += resid;
1371 scsi_dma_unmap(io_task->scsi_cmnd);
1372 io_task->scsi_cmnd = NULL;
1373 iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn);
1377 be_complete_logout(struct beiscsi_conn *beiscsi_conn,
1378 struct iscsi_task *task,
1379 struct common_sol_cqe *csol_cqe)
1381 struct iscsi_logout_rsp *hdr;
1382 struct beiscsi_io_task *io_task = task->dd_data;
1383 struct iscsi_conn *conn = beiscsi_conn->conn;
1385 hdr = (struct iscsi_logout_rsp *)task->hdr;
1386 hdr->opcode = ISCSI_OP_LOGOUT_RSP;
1389 hdr->flags = csol_cqe->i_flags;
1390 hdr->response = csol_cqe->i_resp;
1391 hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn);
1392 hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn +
1393 csol_cqe->cmd_wnd - 1);
1395 hdr->dlength[0] = 0;
1396 hdr->dlength[1] = 0;
1397 hdr->dlength[2] = 0;
1399 hdr->itt = io_task->libiscsi_itt;
1400 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1404 be_complete_tmf(struct beiscsi_conn *beiscsi_conn,
1405 struct iscsi_task *task,
1406 struct common_sol_cqe *csol_cqe)
1408 struct iscsi_tm_rsp *hdr;
1409 struct iscsi_conn *conn = beiscsi_conn->conn;
1410 struct beiscsi_io_task *io_task = task->dd_data;
1412 hdr = (struct iscsi_tm_rsp *)task->hdr;
1413 hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP;
1414 hdr->flags = csol_cqe->i_flags;
1415 hdr->response = csol_cqe->i_resp;
1416 hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn);
1417 hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn +
1418 csol_cqe->cmd_wnd - 1);
1420 hdr->itt = io_task->libiscsi_itt;
1421 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1425 hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
1426 struct beiscsi_hba *phba, struct sol_cqe *psol)
1428 struct hwi_wrb_context *pwrb_context;
1429 struct wrb_handle *pwrb_handle = NULL;
1430 struct hwi_controller *phwi_ctrlr;
1431 struct iscsi_task *task;
1432 struct beiscsi_io_task *io_task;
1433 uint16_t wrb_index, cid, cri_index;
1435 phwi_ctrlr = phba->phwi_ctrlr;
1436 if (is_chip_be2_be3r(phba)) {
1437 wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe,
1439 cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe,
1442 wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2,
1444 cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2,
1448 cri_index = BE_GET_CRI_FROM_CID(cid);
1449 pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
1450 pwrb_handle = pwrb_context->pwrb_handle_basestd[wrb_index];
1451 task = pwrb_handle->pio_handle;
1453 io_task = task->dd_data;
1454 memset(io_task->pwrb_handle->pwrb, 0, sizeof(struct iscsi_wrb));
1455 iscsi_put_task(task);
1459 be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn,
1460 struct iscsi_task *task,
1461 struct common_sol_cqe *csol_cqe)
1463 struct iscsi_nopin *hdr;
1464 struct iscsi_conn *conn = beiscsi_conn->conn;
1465 struct beiscsi_io_task *io_task = task->dd_data;
1467 hdr = (struct iscsi_nopin *)task->hdr;
1468 hdr->flags = csol_cqe->i_flags;
1469 hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn);
1470 hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn +
1471 csol_cqe->cmd_wnd - 1);
1473 hdr->opcode = ISCSI_OP_NOOP_IN;
1474 hdr->itt = io_task->libiscsi_itt;
1475 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1478 static void adapter_get_sol_cqe(struct beiscsi_hba *phba,
1479 struct sol_cqe *psol,
1480 struct common_sol_cqe *csol_cqe)
1482 if (is_chip_be2_be3r(phba)) {
1483 csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe,
1484 i_exp_cmd_sn, psol);
1485 csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe,
1487 csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe,
1489 csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe,
1491 csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe,
1493 csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe,
1495 csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe,
1497 csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe,
1499 csol_cqe->i_flags = AMAP_GET_BITS(struct amap_sol_cqe,
1502 csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe_v2,
1503 i_exp_cmd_sn, psol);
1504 csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe_v2,
1506 csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe_v2,
1508 csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe_v2,
1510 csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe_v2,
1512 csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe_v2,
1514 if (AMAP_GET_BITS(struct amap_sol_cqe_v2,
1516 csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe_v2,
1519 csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe_v2,
1521 if (AMAP_GET_BITS(struct amap_sol_cqe_v2,
1523 csol_cqe->i_flags = ISCSI_FLAG_CMD_UNDERFLOW;
1525 if (AMAP_GET_BITS(struct amap_sol_cqe_v2,
1527 csol_cqe->i_flags |= ISCSI_FLAG_CMD_OVERFLOW;
1532 static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
1533 struct beiscsi_hba *phba, struct sol_cqe *psol)
1535 struct hwi_wrb_context *pwrb_context;
1536 struct wrb_handle *pwrb_handle;
1537 struct iscsi_wrb *pwrb = NULL;
1538 struct hwi_controller *phwi_ctrlr;
1539 struct iscsi_task *task;
1541 struct iscsi_conn *conn = beiscsi_conn->conn;
1542 struct iscsi_session *session = conn->session;
1543 struct common_sol_cqe csol_cqe = {0};
1544 uint16_t cri_index = 0;
1546 phwi_ctrlr = phba->phwi_ctrlr;
1548 /* Copy the elements to a common structure */
1549 adapter_get_sol_cqe(phba, psol, &csol_cqe);
1551 cri_index = BE_GET_CRI_FROM_CID(csol_cqe.cid);
1552 pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
1554 pwrb_handle = pwrb_context->pwrb_handle_basestd[
1555 csol_cqe.wrb_index];
1557 task = pwrb_handle->pio_handle;
1558 pwrb = pwrb_handle->pwrb;
1559 type = ((struct beiscsi_io_task *)task->dd_data)->wrb_type;
1561 spin_lock_bh(&session->back_lock);
1564 case HWH_TYPE_IO_RD:
1565 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) ==
1567 be_complete_nopin_resp(beiscsi_conn, task, &csol_cqe);
1569 be_complete_io(beiscsi_conn, task, &csol_cqe);
1572 case HWH_TYPE_LOGOUT:
1573 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
1574 be_complete_logout(beiscsi_conn, task, &csol_cqe);
1576 be_complete_tmf(beiscsi_conn, task, &csol_cqe);
1579 case HWH_TYPE_LOGIN:
1580 beiscsi_log(phba, KERN_ERR,
1581 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
1582 "BM_%d :\t\t No HWH_TYPE_LOGIN Expected in"
1583 " hwi_complete_cmd- Solicited path\n");
1587 be_complete_nopin_resp(beiscsi_conn, task, &csol_cqe);
1591 beiscsi_log(phba, KERN_WARNING,
1592 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
1593 "BM_%d : In hwi_complete_cmd, unknown type = %d"
1594 "wrb_index 0x%x CID 0x%x\n", type,
1600 spin_unlock_bh(&session->back_lock);
1603 static struct list_head *hwi_get_async_busy_list(struct hwi_async_pdu_context
1604 *pasync_ctx, unsigned int is_header,
1605 unsigned int host_write_ptr)
1608 return &pasync_ctx->async_entry[host_write_ptr].
1611 return &pasync_ctx->async_entry[host_write_ptr].data_busy_list;
1614 static struct async_pdu_handle *
1615 hwi_get_async_handle(struct beiscsi_hba *phba,
1616 struct beiscsi_conn *beiscsi_conn,
1617 struct hwi_async_pdu_context *pasync_ctx,
1618 struct i_t_dpdu_cqe *pdpdu_cqe, unsigned int *pcq_index)
1620 struct be_bus_address phys_addr;
1621 struct list_head *pbusy_list;
1622 struct async_pdu_handle *pasync_handle = NULL;
1623 unsigned char is_header = 0;
1624 unsigned int index, dpl;
1626 if (is_chip_be2_be3r(phba)) {
1627 dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
1629 index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
1632 dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
1634 index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
1638 phys_addr.u.a32.address_lo =
1639 (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1640 db_addr_lo) / 32] - dpl);
1641 phys_addr.u.a32.address_hi =
1642 pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1645 phys_addr.u.a64.address =
1646 *((unsigned long long *)(&phys_addr.u.a64.address));
1648 switch (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, code) / 32]
1649 & PDUCQE_CODE_MASK) {
1650 case UNSOL_HDR_NOTIFY:
1653 pbusy_list = hwi_get_async_busy_list(pasync_ctx,
1656 case UNSOL_DATA_NOTIFY:
1657 pbusy_list = hwi_get_async_busy_list(pasync_ctx,
1662 beiscsi_log(phba, KERN_WARNING,
1663 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
1664 "BM_%d : Unexpected code=%d\n",
1665 pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1666 code) / 32] & PDUCQE_CODE_MASK);
1670 WARN_ON(list_empty(pbusy_list));
1671 list_for_each_entry(pasync_handle, pbusy_list, link) {
1672 if (pasync_handle->pa.u.a64.address == phys_addr.u.a64.address)
1676 WARN_ON(!pasync_handle);
1678 pasync_handle->cri = BE_GET_ASYNC_CRI_FROM_CID(
1679 beiscsi_conn->beiscsi_conn_cid);
1680 pasync_handle->is_header = is_header;
1681 pasync_handle->buffer_len = dpl;
1684 return pasync_handle;
1688 hwi_update_async_writables(struct beiscsi_hba *phba,
1689 struct hwi_async_pdu_context *pasync_ctx,
1690 unsigned int is_header, unsigned int cq_index)
1692 struct list_head *pbusy_list;
1693 struct async_pdu_handle *pasync_handle;
1694 unsigned int num_entries, writables = 0;
1695 unsigned int *pep_read_ptr, *pwritables;
1697 num_entries = pasync_ctx->num_entries;
1699 pep_read_ptr = &pasync_ctx->async_header.ep_read_ptr;
1700 pwritables = &pasync_ctx->async_header.writables;
1702 pep_read_ptr = &pasync_ctx->async_data.ep_read_ptr;
1703 pwritables = &pasync_ctx->async_data.writables;
1706 while ((*pep_read_ptr) != cq_index) {
1708 *pep_read_ptr = (*pep_read_ptr) % num_entries;
1710 pbusy_list = hwi_get_async_busy_list(pasync_ctx, is_header,
1713 WARN_ON(list_empty(pbusy_list));
1715 if (!list_empty(pbusy_list)) {
1716 pasync_handle = list_entry(pbusy_list->next,
1717 struct async_pdu_handle,
1719 WARN_ON(!pasync_handle);
1720 pasync_handle->consumed = 1;
1727 beiscsi_log(phba, KERN_ERR,
1728 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
1729 "BM_%d : Duplicate notification received - index 0x%x!!\n",
1734 *pwritables = *pwritables + writables;
1738 static void hwi_free_async_msg(struct beiscsi_hba *phba,
1739 struct hwi_async_pdu_context *pasync_ctx,
1742 struct async_pdu_handle *pasync_handle, *tmp_handle;
1743 struct list_head *plist;
1745 plist = &pasync_ctx->async_entry[cri].wait_queue.list;
1746 list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) {
1747 list_del(&pasync_handle->link);
1749 if (pasync_handle->is_header) {
1750 list_add_tail(&pasync_handle->link,
1751 &pasync_ctx->async_header.free_list);
1752 pasync_ctx->async_header.free_entries++;
1754 list_add_tail(&pasync_handle->link,
1755 &pasync_ctx->async_data.free_list);
1756 pasync_ctx->async_data.free_entries++;
1760 INIT_LIST_HEAD(&pasync_ctx->async_entry[cri].wait_queue.list);
1761 pasync_ctx->async_entry[cri].wait_queue.hdr_received = 0;
1762 pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
1765 static struct phys_addr *
1766 hwi_get_ring_address(struct hwi_async_pdu_context *pasync_ctx,
1767 unsigned int is_header, unsigned int host_write_ptr)
1769 struct phys_addr *pasync_sge = NULL;
1772 pasync_sge = pasync_ctx->async_header.ring_base;
1774 pasync_sge = pasync_ctx->async_data.ring_base;
1776 return pasync_sge + host_write_ptr;
1779 static void hwi_post_async_buffers(struct beiscsi_hba *phba,
1780 unsigned int is_header, uint8_t ulp_num)
1782 struct hwi_controller *phwi_ctrlr;
1783 struct hwi_async_pdu_context *pasync_ctx;
1784 struct async_pdu_handle *pasync_handle;
1785 struct list_head *pfree_link, *pbusy_list;
1786 struct phys_addr *pasync_sge;
1787 unsigned int ring_id, num_entries;
1788 unsigned int host_write_num, doorbell_offset;
1789 unsigned int writables;
1793 phwi_ctrlr = phba->phwi_ctrlr;
1794 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, ulp_num);
1795 num_entries = pasync_ctx->num_entries;
1798 writables = min(pasync_ctx->async_header.writables,
1799 pasync_ctx->async_header.free_entries);
1800 pfree_link = pasync_ctx->async_header.free_list.next;
1801 host_write_num = pasync_ctx->async_header.host_write_ptr;
1802 ring_id = phwi_ctrlr->default_pdu_hdr[ulp_num].id;
1803 doorbell_offset = phwi_ctrlr->default_pdu_hdr[ulp_num].
1806 writables = min(pasync_ctx->async_data.writables,
1807 pasync_ctx->async_data.free_entries);
1808 pfree_link = pasync_ctx->async_data.free_list.next;
1809 host_write_num = pasync_ctx->async_data.host_write_ptr;
1810 ring_id = phwi_ctrlr->default_pdu_data[ulp_num].id;
1811 doorbell_offset = phwi_ctrlr->default_pdu_data[ulp_num].
1815 writables = (writables / 8) * 8;
1817 for (i = 0; i < writables; i++) {
1819 hwi_get_async_busy_list(pasync_ctx, is_header,
1822 list_entry(pfree_link, struct async_pdu_handle,
1824 WARN_ON(!pasync_handle);
1825 pasync_handle->consumed = 0;
1827 pfree_link = pfree_link->next;
1829 pasync_sge = hwi_get_ring_address(pasync_ctx,
1830 is_header, host_write_num);
1832 pasync_sge->hi = pasync_handle->pa.u.a32.address_lo;
1833 pasync_sge->lo = pasync_handle->pa.u.a32.address_hi;
1835 list_move(&pasync_handle->link, pbusy_list);
1838 host_write_num = host_write_num % num_entries;
1842 pasync_ctx->async_header.host_write_ptr =
1844 pasync_ctx->async_header.free_entries -= writables;
1845 pasync_ctx->async_header.writables -= writables;
1846 pasync_ctx->async_header.busy_entries += writables;
1848 pasync_ctx->async_data.host_write_ptr = host_write_num;
1849 pasync_ctx->async_data.free_entries -= writables;
1850 pasync_ctx->async_data.writables -= writables;
1851 pasync_ctx->async_data.busy_entries += writables;
1854 doorbell |= ring_id & DB_DEF_PDU_RING_ID_MASK;
1855 doorbell |= 1 << DB_DEF_PDU_REARM_SHIFT;
1856 doorbell |= 0 << DB_DEF_PDU_EVENT_SHIFT;
1857 doorbell |= (writables & DB_DEF_PDU_CQPROC_MASK)
1858 << DB_DEF_PDU_CQPROC_SHIFT;
1860 iowrite32(doorbell, phba->db_va + doorbell_offset);
1864 static void hwi_flush_default_pdu_buffer(struct beiscsi_hba *phba,
1865 struct beiscsi_conn *beiscsi_conn,
1866 struct i_t_dpdu_cqe *pdpdu_cqe)
1868 struct hwi_controller *phwi_ctrlr;
1869 struct hwi_async_pdu_context *pasync_ctx;
1870 struct async_pdu_handle *pasync_handle = NULL;
1871 unsigned int cq_index = -1;
1872 uint16_t cri_index = BE_GET_CRI_FROM_CID(
1873 beiscsi_conn->beiscsi_conn_cid);
1875 phwi_ctrlr = phba->phwi_ctrlr;
1876 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr,
1877 BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr,
1880 pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
1881 pdpdu_cqe, &cq_index);
1882 BUG_ON(pasync_handle->is_header != 0);
1883 if (pasync_handle->consumed == 0)
1884 hwi_update_async_writables(phba, pasync_ctx,
1885 pasync_handle->is_header, cq_index);
1887 hwi_free_async_msg(phba, pasync_ctx, pasync_handle->cri);
1888 hwi_post_async_buffers(phba, pasync_handle->is_header,
1889 BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr,
1894 hwi_fwd_async_msg(struct beiscsi_conn *beiscsi_conn,
1895 struct beiscsi_hba *phba,
1896 struct hwi_async_pdu_context *pasync_ctx, unsigned short cri)
1898 struct list_head *plist;
1899 struct async_pdu_handle *pasync_handle;
1901 unsigned int hdr_len = 0, buf_len = 0;
1902 unsigned int status, index = 0, offset = 0;
1903 void *pfirst_buffer = NULL;
1904 unsigned int num_buf = 0;
1906 plist = &pasync_ctx->async_entry[cri].wait_queue.list;
1908 list_for_each_entry(pasync_handle, plist, link) {
1910 phdr = pasync_handle->pbuffer;
1911 hdr_len = pasync_handle->buffer_len;
1913 buf_len = pasync_handle->buffer_len;
1915 pfirst_buffer = pasync_handle->pbuffer;
1918 memcpy(pfirst_buffer + offset,
1919 pasync_handle->pbuffer, buf_len);
1925 status = beiscsi_process_async_pdu(beiscsi_conn, phba,
1926 phdr, hdr_len, pfirst_buffer,
1929 hwi_free_async_msg(phba, pasync_ctx, cri);
1934 hwi_gather_async_pdu(struct beiscsi_conn *beiscsi_conn,
1935 struct beiscsi_hba *phba,
1936 struct async_pdu_handle *pasync_handle)
1938 struct hwi_async_pdu_context *pasync_ctx;
1939 struct hwi_controller *phwi_ctrlr;
1940 unsigned int bytes_needed = 0, status = 0;
1941 unsigned short cri = pasync_handle->cri;
1942 struct pdu_base *ppdu;
1944 phwi_ctrlr = phba->phwi_ctrlr;
1945 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr,
1946 BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr,
1947 BE_GET_CRI_FROM_CID(beiscsi_conn->
1948 beiscsi_conn_cid)));
1950 list_del(&pasync_handle->link);
1951 if (pasync_handle->is_header) {
1952 pasync_ctx->async_header.busy_entries--;
1953 if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
1954 hwi_free_async_msg(phba, pasync_ctx, cri);
1958 pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
1959 pasync_ctx->async_entry[cri].wait_queue.hdr_received = 1;
1960 pasync_ctx->async_entry[cri].wait_queue.hdr_len =
1961 (unsigned short)pasync_handle->buffer_len;
1962 list_add_tail(&pasync_handle->link,
1963 &pasync_ctx->async_entry[cri].wait_queue.list);
1965 ppdu = pasync_handle->pbuffer;
1966 bytes_needed = ((((ppdu->dw[offsetof(struct amap_pdu_base,
1967 data_len_hi) / 32] & PDUBASE_DATALENHI_MASK) << 8) &
1968 0xFFFF0000) | ((be16_to_cpu((ppdu->
1969 dw[offsetof(struct amap_pdu_base, data_len_lo) / 32]
1970 & PDUBASE_DATALENLO_MASK) >> 16)) & 0x0000FFFF));
1973 pasync_ctx->async_entry[cri].wait_queue.bytes_needed =
1976 if (bytes_needed == 0)
1977 status = hwi_fwd_async_msg(beiscsi_conn, phba,
1981 pasync_ctx->async_data.busy_entries--;
1982 if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
1983 list_add_tail(&pasync_handle->link,
1984 &pasync_ctx->async_entry[cri].wait_queue.
1986 pasync_ctx->async_entry[cri].wait_queue.
1988 (unsigned short)pasync_handle->buffer_len;
1990 if (pasync_ctx->async_entry[cri].wait_queue.
1992 pasync_ctx->async_entry[cri].wait_queue.
1994 status = hwi_fwd_async_msg(beiscsi_conn, phba,
2001 static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn,
2002 struct beiscsi_hba *phba,
2003 struct i_t_dpdu_cqe *pdpdu_cqe)
2005 struct hwi_controller *phwi_ctrlr;
2006 struct hwi_async_pdu_context *pasync_ctx;
2007 struct async_pdu_handle *pasync_handle = NULL;
2008 unsigned int cq_index = -1;
2009 uint16_t cri_index = BE_GET_CRI_FROM_CID(
2010 beiscsi_conn->beiscsi_conn_cid);
2012 phwi_ctrlr = phba->phwi_ctrlr;
2013 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr,
2014 BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr,
2017 pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
2018 pdpdu_cqe, &cq_index);
2020 if (pasync_handle->consumed == 0)
2021 hwi_update_async_writables(phba, pasync_ctx,
2022 pasync_handle->is_header, cq_index);
2024 hwi_gather_async_pdu(beiscsi_conn, phba, pasync_handle);
2025 hwi_post_async_buffers(phba, pasync_handle->is_header,
2026 BEISCSI_GET_ULP_FROM_CRI(
2027 phwi_ctrlr, cri_index));
2030 static void beiscsi_process_mcc_isr(struct beiscsi_hba *phba)
2032 struct be_queue_info *mcc_cq;
2033 struct be_mcc_compl *mcc_compl;
2034 unsigned int num_processed = 0;
2036 mcc_cq = &phba->ctrl.mcc_obj.cq;
2037 mcc_compl = queue_tail_node(mcc_cq);
2038 mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
2039 while (mcc_compl->flags & CQE_FLAGS_VALID_MASK) {
2041 if (num_processed >= 32) {
2042 hwi_ring_cq_db(phba, mcc_cq->id,
2043 num_processed, 0, 0);
2046 if (mcc_compl->flags & CQE_FLAGS_ASYNC_MASK) {
2047 /* Interpret flags as an async trailer */
2048 if (is_link_state_evt(mcc_compl->flags))
2049 /* Interpret compl as a async link evt */
2050 beiscsi_async_link_state_process(phba,
2051 (struct be_async_event_link_state *) mcc_compl);
2053 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_MBOX,
2054 "BM_%d : Unsupported Async Event, flags"
2057 if (phba->state & BE_ADAPTER_LINK_UP) {
2058 phba->state |= BE_ADAPTER_CHECK_BOOT;
2059 phba->get_boot = BE_GET_BOOT_RETRIES;
2062 } else if (mcc_compl->flags & CQE_FLAGS_COMPLETED_MASK) {
2063 be_mcc_compl_process_isr(&phba->ctrl, mcc_compl);
2064 atomic_dec(&phba->ctrl.mcc_obj.q.used);
2067 mcc_compl->flags = 0;
2068 queue_tail_inc(mcc_cq);
2069 mcc_compl = queue_tail_node(mcc_cq);
2070 mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
2074 if (num_processed > 0)
2075 hwi_ring_cq_db(phba, mcc_cq->id, num_processed, 1, 0);
2080 * beiscsi_process_cq()- Process the Completion Queue
2081 * @pbe_eq: Event Q on which the Completion has come
2084 * Number of Completion Entries processed.
2086 unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
2088 struct be_queue_info *cq;
2089 struct sol_cqe *sol;
2090 struct dmsg_cqe *dmsg;
2091 unsigned int num_processed = 0;
2092 unsigned int tot_nump = 0;
2093 unsigned short code = 0, cid = 0;
2094 uint16_t cri_index = 0;
2095 struct beiscsi_conn *beiscsi_conn;
2096 struct beiscsi_endpoint *beiscsi_ep;
2097 struct iscsi_endpoint *ep;
2098 struct beiscsi_hba *phba;
2101 sol = queue_tail_node(cq);
2102 phba = pbe_eq->phba;
2104 while (sol->dw[offsetof(struct amap_sol_cqe, valid) / 32] &
2106 be_dws_le_to_cpu(sol, sizeof(struct sol_cqe));
2108 code = (sol->dw[offsetof(struct amap_sol_cqe, code) /
2109 32] & CQE_CODE_MASK);
2112 if (is_chip_be2_be3r(phba)) {
2113 cid = AMAP_GET_BITS(struct amap_sol_cqe, cid, sol);
2115 if ((code == DRIVERMSG_NOTIFY) ||
2116 (code == UNSOL_HDR_NOTIFY) ||
2117 (code == UNSOL_DATA_NOTIFY))
2118 cid = AMAP_GET_BITS(
2119 struct amap_i_t_dpdu_cqe_v2,
2122 cid = AMAP_GET_BITS(struct amap_sol_cqe_v2,
2126 cri_index = BE_GET_CRI_FROM_CID(cid);
2127 ep = phba->ep_array[cri_index];
2130 /* connection has already been freed
2131 * just move on to next one
2133 beiscsi_log(phba, KERN_WARNING,
2135 "BM_%d : proc cqe of disconn ep: cid %d\n",
2140 beiscsi_ep = ep->dd_data;
2141 beiscsi_conn = beiscsi_ep->conn;
2143 if (num_processed >= 32) {
2144 hwi_ring_cq_db(phba, cq->id,
2145 num_processed, 0, 0);
2146 tot_nump += num_processed;
2151 case SOL_CMD_COMPLETE:
2152 hwi_complete_cmd(beiscsi_conn, phba, sol);
2154 case DRIVERMSG_NOTIFY:
2155 beiscsi_log(phba, KERN_INFO,
2156 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
2157 "BM_%d : Received %s[%d] on CID : %d\n",
2158 cqe_desc[code], code, cid);
2160 dmsg = (struct dmsg_cqe *)sol;
2161 hwi_complete_drvr_msgs(beiscsi_conn, phba, sol);
2163 case UNSOL_HDR_NOTIFY:
2164 beiscsi_log(phba, KERN_INFO,
2165 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
2166 "BM_%d : Received %s[%d] on CID : %d\n",
2167 cqe_desc[code], code, cid);
2169 spin_lock_bh(&phba->async_pdu_lock);
2170 hwi_process_default_pdu_ring(beiscsi_conn, phba,
2171 (struct i_t_dpdu_cqe *)sol);
2172 spin_unlock_bh(&phba->async_pdu_lock);
2174 case UNSOL_DATA_NOTIFY:
2175 beiscsi_log(phba, KERN_INFO,
2176 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
2177 "BM_%d : Received %s[%d] on CID : %d\n",
2178 cqe_desc[code], code, cid);
2180 spin_lock_bh(&phba->async_pdu_lock);
2181 hwi_process_default_pdu_ring(beiscsi_conn, phba,
2182 (struct i_t_dpdu_cqe *)sol);
2183 spin_unlock_bh(&phba->async_pdu_lock);
2185 case CXN_INVALIDATE_INDEX_NOTIFY:
2186 case CMD_INVALIDATED_NOTIFY:
2187 case CXN_INVALIDATE_NOTIFY:
2188 beiscsi_log(phba, KERN_ERR,
2189 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
2190 "BM_%d : Ignoring %s[%d] on CID : %d\n",
2191 cqe_desc[code], code, cid);
2193 case SOL_CMD_KILLED_DATA_DIGEST_ERR:
2194 case CMD_KILLED_INVALID_STATSN_RCVD:
2195 case CMD_KILLED_INVALID_R2T_RCVD:
2196 case CMD_CXN_KILLED_LUN_INVALID:
2197 case CMD_CXN_KILLED_ICD_INVALID:
2198 case CMD_CXN_KILLED_ITT_INVALID:
2199 case CMD_CXN_KILLED_SEQ_OUTOFORDER:
2200 case CMD_CXN_KILLED_INVALID_DATASN_RCVD:
2201 beiscsi_log(phba, KERN_ERR,
2202 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
2203 "BM_%d : Cmd Notification %s[%d] on CID : %d\n",
2204 cqe_desc[code], code, cid);
2206 case UNSOL_DATA_DIGEST_ERROR_NOTIFY:
2207 beiscsi_log(phba, KERN_ERR,
2208 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
2209 "BM_%d : Dropping %s[%d] on DPDU ring on CID : %d\n",
2210 cqe_desc[code], code, cid);
2211 spin_lock_bh(&phba->async_pdu_lock);
2212 hwi_flush_default_pdu_buffer(phba, beiscsi_conn,
2213 (struct i_t_dpdu_cqe *) sol);
2214 spin_unlock_bh(&phba->async_pdu_lock);
2216 case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL:
2217 case CXN_KILLED_BURST_LEN_MISMATCH:
2218 case CXN_KILLED_AHS_RCVD:
2219 case CXN_KILLED_HDR_DIGEST_ERR:
2220 case CXN_KILLED_UNKNOWN_HDR:
2221 case CXN_KILLED_STALE_ITT_TTT_RCVD:
2222 case CXN_KILLED_INVALID_ITT_TTT_RCVD:
2223 case CXN_KILLED_TIMED_OUT:
2224 case CXN_KILLED_FIN_RCVD:
2225 case CXN_KILLED_RST_SENT:
2226 case CXN_KILLED_RST_RCVD:
2227 case CXN_KILLED_BAD_UNSOL_PDU_RCVD:
2228 case CXN_KILLED_BAD_WRB_INDEX_ERROR:
2229 case CXN_KILLED_OVER_RUN_RESIDUAL:
2230 case CXN_KILLED_UNDER_RUN_RESIDUAL:
2231 case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN:
2232 beiscsi_log(phba, KERN_ERR,
2233 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
2234 "BM_%d : Event %s[%d] received on CID : %d\n",
2235 cqe_desc[code], code, cid);
2237 iscsi_conn_failure(beiscsi_conn->conn,
2238 ISCSI_ERR_CONN_FAILED);
2241 beiscsi_log(phba, KERN_ERR,
2242 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
2243 "BM_%d : Invalid CQE Event Received Code : %d"
2250 AMAP_SET_BITS(struct amap_sol_cqe, valid, sol, 0);
2252 sol = queue_tail_node(cq);
2256 if (num_processed > 0) {
2257 tot_nump += num_processed;
2258 hwi_ring_cq_db(phba, cq->id, num_processed, 1, 0);
2263 void beiscsi_process_all_cqs(struct work_struct *work)
2265 unsigned long flags;
2266 struct hwi_controller *phwi_ctrlr;
2267 struct hwi_context_memory *phwi_context;
2268 struct beiscsi_hba *phba;
2269 struct be_eq_obj *pbe_eq =
2270 container_of(work, struct be_eq_obj, work_cqs);
2272 phba = pbe_eq->phba;
2273 phwi_ctrlr = phba->phwi_ctrlr;
2274 phwi_context = phwi_ctrlr->phwi_ctxt;
2276 if (pbe_eq->todo_mcc_cq) {
2277 spin_lock_irqsave(&phba->isr_lock, flags);
2278 pbe_eq->todo_mcc_cq = false;
2279 spin_unlock_irqrestore(&phba->isr_lock, flags);
2280 beiscsi_process_mcc_isr(phba);
2283 if (pbe_eq->todo_cq) {
2284 spin_lock_irqsave(&phba->isr_lock, flags);
2285 pbe_eq->todo_cq = false;
2286 spin_unlock_irqrestore(&phba->isr_lock, flags);
2287 beiscsi_process_cq(pbe_eq);
2290 /* rearm EQ for further interrupts */
2291 hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
2294 static int be_iopoll(struct blk_iopoll *iop, int budget)
2297 struct beiscsi_hba *phba;
2298 struct be_eq_obj *pbe_eq;
2300 pbe_eq = container_of(iop, struct be_eq_obj, iopoll);
2301 ret = beiscsi_process_cq(pbe_eq);
2302 pbe_eq->cq_count += ret;
2304 phba = pbe_eq->phba;
2305 blk_iopoll_complete(iop);
2306 beiscsi_log(phba, KERN_INFO,
2307 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
2308 "BM_%d : rearm pbe_eq->q.id =%d\n",
2310 hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
2316 hwi_write_sgl_v2(struct iscsi_wrb *pwrb, struct scatterlist *sg,
2317 unsigned int num_sg, struct beiscsi_io_task *io_task)
2319 struct iscsi_sge *psgl;
2320 unsigned int sg_len, index;
2321 unsigned int sge_len = 0;
2322 unsigned long long addr;
2323 struct scatterlist *l_sg;
2324 unsigned int offset;
2326 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, iscsi_bhs_addr_lo, pwrb,
2327 io_task->bhs_pa.u.a32.address_lo);
2328 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, iscsi_bhs_addr_hi, pwrb,
2329 io_task->bhs_pa.u.a32.address_hi);
2332 for (index = 0; (index < num_sg) && (index < 2); index++,
2335 sg_len = sg_dma_len(sg);
2336 addr = (u64) sg_dma_address(sg);
2337 AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
2339 lower_32_bits(addr));
2340 AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
2342 upper_32_bits(addr));
2343 AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
2348 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_r2t_offset,
2350 sg_len = sg_dma_len(sg);
2351 addr = (u64) sg_dma_address(sg);
2352 AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
2354 lower_32_bits(addr));
2355 AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
2357 upper_32_bits(addr));
2358 AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
2363 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
2364 memset(psgl, 0, sizeof(*psgl) * BE2_SGE);
2366 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2);
2368 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2369 io_task->bhs_pa.u.a32.address_hi);
2370 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2371 io_task->bhs_pa.u.a32.address_lo);
2374 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb,
2376 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb,
2378 } else if (num_sg == 2) {
2379 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb,
2381 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb,
2384 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb,
2386 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb,
2394 for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) {
2395 sg_len = sg_dma_len(sg);
2396 addr = (u64) sg_dma_address(sg);
2397 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2398 lower_32_bits(addr));
2399 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2400 upper_32_bits(addr));
2401 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len);
2402 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset);
2403 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
2407 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
2411 hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg,
2412 unsigned int num_sg, struct beiscsi_io_task *io_task)
2414 struct iscsi_sge *psgl;
2415 unsigned int sg_len, index;
2416 unsigned int sge_len = 0;
2417 unsigned long long addr;
2418 struct scatterlist *l_sg;
2419 unsigned int offset;
2421 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
2422 io_task->bhs_pa.u.a32.address_lo);
2423 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
2424 io_task->bhs_pa.u.a32.address_hi);
2427 for (index = 0; (index < num_sg) && (index < 2); index++,
2430 sg_len = sg_dma_len(sg);
2431 addr = (u64) sg_dma_address(sg);
2432 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
2433 ((u32)(addr & 0xFFFFFFFF)));
2434 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
2435 ((u32)(addr >> 32)));
2436 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
2440 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_r2t_offset,
2442 sg_len = sg_dma_len(sg);
2443 addr = (u64) sg_dma_address(sg);
2444 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_lo, pwrb,
2445 ((u32)(addr & 0xFFFFFFFF)));
2446 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_hi, pwrb,
2447 ((u32)(addr >> 32)));
2448 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_len, pwrb,
2452 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
2453 memset(psgl, 0, sizeof(*psgl) * BE2_SGE);
2455 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2);
2457 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2458 io_task->bhs_pa.u.a32.address_hi);
2459 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2460 io_task->bhs_pa.u.a32.address_lo);
2463 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
2465 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
2467 } else if (num_sg == 2) {
2468 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
2470 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
2473 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
2475 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
2482 for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) {
2483 sg_len = sg_dma_len(sg);
2484 addr = (u64) sg_dma_address(sg);
2485 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2486 (addr & 0xFFFFFFFF));
2487 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2489 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len);
2490 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset);
2491 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
2495 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
2499 * hwi_write_buffer()- Populate the WRB with task info
2500 * @pwrb: ptr to the WRB entry
2501 * @task: iscsi task which is to be executed
2503 static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
2505 struct iscsi_sge *psgl;
2506 struct beiscsi_io_task *io_task = task->dd_data;
2507 struct beiscsi_conn *beiscsi_conn = io_task->conn;
2508 struct beiscsi_hba *phba = beiscsi_conn->phba;
2509 uint8_t dsp_value = 0;
2511 io_task->bhs_len = sizeof(struct be_nonio_bhs) - 2;
2512 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
2513 io_task->bhs_pa.u.a32.address_lo);
2514 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
2515 io_task->bhs_pa.u.a32.address_hi);
2519 /* Check for the data_count */
2520 dsp_value = (task->data_count) ? 1 : 0;
2522 if (is_chip_be2_be3r(phba))
2523 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp,
2526 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp,
2529 /* Map addr only if there is data_count */
2531 io_task->mtask_addr = pci_map_single(phba->pcidev,
2535 io_task->mtask_data_count = task->data_count;
2537 io_task->mtask_addr = 0;
2539 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
2540 lower_32_bits(io_task->mtask_addr));
2541 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
2542 upper_32_bits(io_task->mtask_addr));
2543 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
2546 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 1);
2548 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
2549 io_task->mtask_addr = 0;
2552 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
2554 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len);
2556 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2557 io_task->bhs_pa.u.a32.address_hi);
2558 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2559 io_task->bhs_pa.u.a32.address_lo);
2562 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 0);
2563 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 0);
2564 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0);
2565 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, 0);
2566 AMAP_SET_BITS(struct amap_iscsi_sge, rsvd0, psgl, 0);
2567 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
2571 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2572 lower_32_bits(io_task->mtask_addr));
2573 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2574 upper_32_bits(io_task->mtask_addr));
2576 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0x106);
2578 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
2582 * beiscsi_find_mem_req()- Find mem needed
2583 * @phba: ptr to HBA struct
2585 static void beiscsi_find_mem_req(struct beiscsi_hba *phba)
2587 uint8_t mem_descr_index, ulp_num;
2588 unsigned int num_cq_pages, num_async_pdu_buf_pages;
2589 unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn;
2590 unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages;
2592 num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
2593 sizeof(struct sol_cqe));
2595 phba->params.hwi_ws_sz = sizeof(struct hwi_controller);
2597 phba->mem_req[ISCSI_MEM_GLOBAL_HEADER] = 2 *
2598 BE_ISCSI_PDU_HEADER_SIZE;
2599 phba->mem_req[HWI_MEM_ADDN_CONTEXT] =
2600 sizeof(struct hwi_context_memory);
2603 phba->mem_req[HWI_MEM_WRB] = sizeof(struct iscsi_wrb)
2604 * (phba->params.wrbs_per_cxn)
2605 * phba->params.cxns_per_ctrl;
2606 wrb_sz_per_cxn = sizeof(struct wrb_handle) *
2607 (phba->params.wrbs_per_cxn);
2608 phba->mem_req[HWI_MEM_WRBH] = roundup_pow_of_two((wrb_sz_per_cxn) *
2609 phba->params.cxns_per_ctrl);
2611 phba->mem_req[HWI_MEM_SGLH] = sizeof(struct sgl_handle) *
2612 phba->params.icds_per_ctrl;
2613 phba->mem_req[HWI_MEM_SGE] = sizeof(struct iscsi_sge) *
2614 phba->params.num_sge_per_io * phba->params.icds_per_ctrl;
2615 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
2616 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
2618 num_async_pdu_buf_sgl_pages =
2619 PAGES_REQUIRED(BEISCSI_GET_CID_COUNT(
2621 sizeof(struct phys_addr));
2623 num_async_pdu_buf_pages =
2624 PAGES_REQUIRED(BEISCSI_GET_CID_COUNT(
2626 phba->params.defpdu_hdr_sz);
2628 num_async_pdu_data_pages =
2629 PAGES_REQUIRED(BEISCSI_GET_CID_COUNT(
2631 phba->params.defpdu_data_sz);
2633 num_async_pdu_data_sgl_pages =
2634 PAGES_REQUIRED(BEISCSI_GET_CID_COUNT(
2636 sizeof(struct phys_addr));
2638 mem_descr_index = (HWI_MEM_TEMPLATE_HDR_ULP0 +
2639 (ulp_num * MEM_DESCR_OFFSET));
2640 phba->mem_req[mem_descr_index] =
2641 BEISCSI_GET_CID_COUNT(phba, ulp_num) *
2642 BEISCSI_TEMPLATE_HDR_PER_CXN_SIZE;
2644 mem_descr_index = (HWI_MEM_ASYNC_HEADER_BUF_ULP0 +
2645 (ulp_num * MEM_DESCR_OFFSET));
2646 phba->mem_req[mem_descr_index] =
2647 num_async_pdu_buf_pages *
2650 mem_descr_index = (HWI_MEM_ASYNC_DATA_BUF_ULP0 +
2651 (ulp_num * MEM_DESCR_OFFSET));
2652 phba->mem_req[mem_descr_index] =
2653 num_async_pdu_data_pages *
2656 mem_descr_index = (HWI_MEM_ASYNC_HEADER_RING_ULP0 +
2657 (ulp_num * MEM_DESCR_OFFSET));
2658 phba->mem_req[mem_descr_index] =
2659 num_async_pdu_buf_sgl_pages *
2662 mem_descr_index = (HWI_MEM_ASYNC_DATA_RING_ULP0 +
2663 (ulp_num * MEM_DESCR_OFFSET));
2664 phba->mem_req[mem_descr_index] =
2665 num_async_pdu_data_sgl_pages *
2668 mem_descr_index = (HWI_MEM_ASYNC_HEADER_HANDLE_ULP0 +
2669 (ulp_num * MEM_DESCR_OFFSET));
2670 phba->mem_req[mem_descr_index] =
2671 BEISCSI_GET_CID_COUNT(phba, ulp_num) *
2672 sizeof(struct async_pdu_handle);
2674 mem_descr_index = (HWI_MEM_ASYNC_DATA_HANDLE_ULP0 +
2675 (ulp_num * MEM_DESCR_OFFSET));
2676 phba->mem_req[mem_descr_index] =
2677 BEISCSI_GET_CID_COUNT(phba, ulp_num) *
2678 sizeof(struct async_pdu_handle);
2680 mem_descr_index = (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 +
2681 (ulp_num * MEM_DESCR_OFFSET));
2682 phba->mem_req[mem_descr_index] =
2683 sizeof(struct hwi_async_pdu_context) +
2684 (BEISCSI_GET_CID_COUNT(phba, ulp_num) *
2685 sizeof(struct hwi_async_entry));
2690 static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
2693 struct hwi_controller *phwi_ctrlr;
2694 struct be_mem_descriptor *mem_descr;
2695 struct mem_array *mem_arr, *mem_arr_orig;
2696 unsigned int i, j, alloc_size, curr_alloc_size;
2698 phba->phwi_ctrlr = kzalloc(phba->params.hwi_ws_sz, GFP_KERNEL);
2699 if (!phba->phwi_ctrlr)
2702 /* Allocate memory for wrb_context */
2703 phwi_ctrlr = phba->phwi_ctrlr;
2704 phwi_ctrlr->wrb_context = kzalloc(sizeof(struct hwi_wrb_context) *
2705 phba->params.cxns_per_ctrl,
2707 if (!phwi_ctrlr->wrb_context)
2710 phba->init_mem = kcalloc(SE_MEM_MAX, sizeof(*mem_descr),
2712 if (!phba->init_mem) {
2713 kfree(phwi_ctrlr->wrb_context);
2714 kfree(phba->phwi_ctrlr);
2718 mem_arr_orig = kmalloc(sizeof(*mem_arr_orig) * BEISCSI_MAX_FRAGS_INIT,
2720 if (!mem_arr_orig) {
2721 kfree(phba->init_mem);
2722 kfree(phwi_ctrlr->wrb_context);
2723 kfree(phba->phwi_ctrlr);
2727 mem_descr = phba->init_mem;
2728 for (i = 0; i < SE_MEM_MAX; i++) {
2729 if (!phba->mem_req[i]) {
2730 mem_descr->mem_array = NULL;
2736 mem_arr = mem_arr_orig;
2737 alloc_size = phba->mem_req[i];
2738 memset(mem_arr, 0, sizeof(struct mem_array) *
2739 BEISCSI_MAX_FRAGS_INIT);
2740 curr_alloc_size = min(be_max_phys_size * 1024, alloc_size);
2742 mem_arr->virtual_address = pci_alloc_consistent(
2746 if (!mem_arr->virtual_address) {
2747 if (curr_alloc_size <= BE_MIN_MEM_SIZE)
2749 if (curr_alloc_size -
2750 rounddown_pow_of_two(curr_alloc_size))
2751 curr_alloc_size = rounddown_pow_of_two
2754 curr_alloc_size = curr_alloc_size / 2;
2756 mem_arr->bus_address.u.
2757 a64.address = (__u64) bus_add;
2758 mem_arr->size = curr_alloc_size;
2759 alloc_size -= curr_alloc_size;
2760 curr_alloc_size = min(be_max_phys_size *
2765 } while (alloc_size);
2766 mem_descr->num_elements = j;
2767 mem_descr->size_in_bytes = phba->mem_req[i];
2768 mem_descr->mem_array = kmalloc(sizeof(*mem_arr) * j,
2770 if (!mem_descr->mem_array)
2773 memcpy(mem_descr->mem_array, mem_arr_orig,
2774 sizeof(struct mem_array) * j);
2777 kfree(mem_arr_orig);
2780 mem_descr->num_elements = j;
2781 while ((i) || (j)) {
2782 for (j = mem_descr->num_elements; j > 0; j--) {
2783 pci_free_consistent(phba->pcidev,
2784 mem_descr->mem_array[j - 1].size,
2785 mem_descr->mem_array[j - 1].
2787 (unsigned long)mem_descr->
2789 bus_address.u.a64.address);
2793 kfree(mem_descr->mem_array);
2797 kfree(mem_arr_orig);
2798 kfree(phba->init_mem);
2799 kfree(phba->phwi_ctrlr->wrb_context);
2800 kfree(phba->phwi_ctrlr);
2804 static int beiscsi_get_memory(struct beiscsi_hba *phba)
2806 beiscsi_find_mem_req(phba);
2807 return beiscsi_alloc_mem(phba);
2810 static void iscsi_init_global_templates(struct beiscsi_hba *phba)
2812 struct pdu_data_out *pdata_out;
2813 struct pdu_nop_out *pnop_out;
2814 struct be_mem_descriptor *mem_descr;
2816 mem_descr = phba->init_mem;
2817 mem_descr += ISCSI_MEM_GLOBAL_HEADER;
2819 (struct pdu_data_out *)mem_descr->mem_array[0].virtual_address;
2820 memset(pdata_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
2822 AMAP_SET_BITS(struct amap_pdu_data_out, opcode, pdata_out,
2826 (struct pdu_nop_out *)((unsigned char *)mem_descr->mem_array[0].
2827 virtual_address + BE_ISCSI_PDU_HEADER_SIZE);
2829 memset(pnop_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
2830 AMAP_SET_BITS(struct amap_pdu_nop_out, ttt, pnop_out, 0xFFFFFFFF);
2831 AMAP_SET_BITS(struct amap_pdu_nop_out, f_bit, pnop_out, 1);
2832 AMAP_SET_BITS(struct amap_pdu_nop_out, i_bit, pnop_out, 0);
2835 static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
2837 struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb;
2838 struct hwi_context_memory *phwi_ctxt;
2839 struct wrb_handle *pwrb_handle = NULL;
2840 struct hwi_controller *phwi_ctrlr;
2841 struct hwi_wrb_context *pwrb_context;
2842 struct iscsi_wrb *pwrb = NULL;
2843 unsigned int num_cxn_wrbh = 0;
2844 unsigned int num_cxn_wrb = 0, j, idx = 0, index;
2846 mem_descr_wrbh = phba->init_mem;
2847 mem_descr_wrbh += HWI_MEM_WRBH;
2849 mem_descr_wrb = phba->init_mem;
2850 mem_descr_wrb += HWI_MEM_WRB;
2851 phwi_ctrlr = phba->phwi_ctrlr;
2853 /* Allocate memory for WRBQ */
2854 phwi_ctxt = phwi_ctrlr->phwi_ctxt;
2855 phwi_ctxt->be_wrbq = kzalloc(sizeof(struct be_queue_info) *
2856 phba->params.cxns_per_ctrl,
2858 if (!phwi_ctxt->be_wrbq) {
2859 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
2860 "BM_%d : WRBQ Mem Alloc Failed\n");
2864 for (index = 0; index < phba->params.cxns_per_ctrl; index++) {
2865 pwrb_context = &phwi_ctrlr->wrb_context[index];
2866 pwrb_context->pwrb_handle_base =
2867 kzalloc(sizeof(struct wrb_handle *) *
2868 phba->params.wrbs_per_cxn, GFP_KERNEL);
2869 if (!pwrb_context->pwrb_handle_base) {
2870 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
2871 "BM_%d : Mem Alloc Failed. Failing to load\n");
2872 goto init_wrb_hndl_failed;
2874 pwrb_context->pwrb_handle_basestd =
2875 kzalloc(sizeof(struct wrb_handle *) *
2876 phba->params.wrbs_per_cxn, GFP_KERNEL);
2877 if (!pwrb_context->pwrb_handle_basestd) {
2878 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
2879 "BM_%d : Mem Alloc Failed. Failing to load\n");
2880 goto init_wrb_hndl_failed;
2882 if (!num_cxn_wrbh) {
2884 mem_descr_wrbh->mem_array[idx].virtual_address;
2885 num_cxn_wrbh = ((mem_descr_wrbh->mem_array[idx].size) /
2886 ((sizeof(struct wrb_handle)) *
2887 phba->params.wrbs_per_cxn));
2890 pwrb_context->alloc_index = 0;
2891 pwrb_context->wrb_handles_available = 0;
2892 pwrb_context->free_index = 0;
2895 for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2896 pwrb_context->pwrb_handle_base[j] = pwrb_handle;
2897 pwrb_context->pwrb_handle_basestd[j] =
2899 pwrb_context->wrb_handles_available++;
2900 pwrb_handle->wrb_index = j;
2907 for (index = 0; index < phba->params.cxns_per_ctrl; index++) {
2908 pwrb_context = &phwi_ctrlr->wrb_context[index];
2910 pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
2911 num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) /
2912 ((sizeof(struct iscsi_wrb) *
2913 phba->params.wrbs_per_cxn));
2918 for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2919 pwrb_handle = pwrb_context->pwrb_handle_base[j];
2920 pwrb_handle->pwrb = pwrb;
2927 init_wrb_hndl_failed:
2928 for (j = index; j > 0; j--) {
2929 pwrb_context = &phwi_ctrlr->wrb_context[j];
2930 kfree(pwrb_context->pwrb_handle_base);
2931 kfree(pwrb_context->pwrb_handle_basestd);
2936 static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
2939 struct hwi_controller *phwi_ctrlr;
2940 struct hba_parameters *p = &phba->params;
2941 struct hwi_async_pdu_context *pasync_ctx;
2942 struct async_pdu_handle *pasync_header_h, *pasync_data_h;
2943 unsigned int index, idx, num_per_mem, num_async_data;
2944 struct be_mem_descriptor *mem_descr;
2946 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
2947 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
2949 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2950 mem_descr += (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 +
2951 (ulp_num * MEM_DESCR_OFFSET));
2953 phwi_ctrlr = phba->phwi_ctrlr;
2954 phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num] =
2955 (struct hwi_async_pdu_context *)
2956 mem_descr->mem_array[0].virtual_address;
2958 pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num];
2959 memset(pasync_ctx, 0, sizeof(*pasync_ctx));
2961 pasync_ctx->async_entry =
2962 (struct hwi_async_entry *)
2963 ((long unsigned int)pasync_ctx +
2964 sizeof(struct hwi_async_pdu_context));
2966 pasync_ctx->num_entries = BEISCSI_GET_CID_COUNT(phba,
2968 pasync_ctx->buffer_size = p->defpdu_hdr_sz;
2970 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2971 mem_descr += HWI_MEM_ASYNC_HEADER_BUF_ULP0 +
2972 (ulp_num * MEM_DESCR_OFFSET);
2973 if (mem_descr->mem_array[0].virtual_address) {
2974 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
2975 "BM_%d : hwi_init_async_pdu_ctx"
2976 " HWI_MEM_ASYNC_HEADER_BUF_ULP%d va=%p\n",
2978 mem_descr->mem_array[0].
2981 beiscsi_log(phba, KERN_WARNING,
2983 "BM_%d : No Virtual address for ULP : %d\n",
2986 pasync_ctx->async_header.va_base =
2987 mem_descr->mem_array[0].virtual_address;
2989 pasync_ctx->async_header.pa_base.u.a64.address =
2990 mem_descr->mem_array[0].
2991 bus_address.u.a64.address;
2993 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2994 mem_descr += HWI_MEM_ASYNC_HEADER_RING_ULP0 +
2995 (ulp_num * MEM_DESCR_OFFSET);
2996 if (mem_descr->mem_array[0].virtual_address) {
2997 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
2998 "BM_%d : hwi_init_async_pdu_ctx"
2999 " HWI_MEM_ASYNC_HEADER_RING_ULP%d va=%p\n",
3001 mem_descr->mem_array[0].
3004 beiscsi_log(phba, KERN_WARNING,
3006 "BM_%d : No Virtual address for ULP : %d\n",
3009 pasync_ctx->async_header.ring_base =
3010 mem_descr->mem_array[0].virtual_address;
3012 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
3013 mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE_ULP0 +
3014 (ulp_num * MEM_DESCR_OFFSET);
3015 if (mem_descr->mem_array[0].virtual_address) {
3016 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3017 "BM_%d : hwi_init_async_pdu_ctx"
3018 " HWI_MEM_ASYNC_HEADER_HANDLE_ULP%d va=%p\n",
3020 mem_descr->mem_array[0].
3023 beiscsi_log(phba, KERN_WARNING,
3025 "BM_%d : No Virtual address for ULP : %d\n",
3028 pasync_ctx->async_header.handle_base =
3029 mem_descr->mem_array[0].virtual_address;
3030 pasync_ctx->async_header.writables = 0;
3031 INIT_LIST_HEAD(&pasync_ctx->async_header.free_list);
3033 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
3034 mem_descr += HWI_MEM_ASYNC_DATA_RING_ULP0 +
3035 (ulp_num * MEM_DESCR_OFFSET);
3036 if (mem_descr->mem_array[0].virtual_address) {
3037 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3038 "BM_%d : hwi_init_async_pdu_ctx"
3039 " HWI_MEM_ASYNC_DATA_RING_ULP%d va=%p\n",
3041 mem_descr->mem_array[0].
3044 beiscsi_log(phba, KERN_WARNING,
3046 "BM_%d : No Virtual address for ULP : %d\n",
3049 pasync_ctx->async_data.ring_base =
3050 mem_descr->mem_array[0].virtual_address;
3052 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
3053 mem_descr += HWI_MEM_ASYNC_DATA_HANDLE_ULP0 +
3054 (ulp_num * MEM_DESCR_OFFSET);
3055 if (!mem_descr->mem_array[0].virtual_address)
3056 beiscsi_log(phba, KERN_WARNING,
3058 "BM_%d : No Virtual address for ULP : %d\n",
3061 pasync_ctx->async_data.handle_base =
3062 mem_descr->mem_array[0].virtual_address;
3063 pasync_ctx->async_data.writables = 0;
3064 INIT_LIST_HEAD(&pasync_ctx->async_data.free_list);
3067 (struct async_pdu_handle *)
3068 pasync_ctx->async_header.handle_base;
3070 (struct async_pdu_handle *)
3071 pasync_ctx->async_data.handle_base;
3073 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
3074 mem_descr += HWI_MEM_ASYNC_DATA_BUF_ULP0 +
3075 (ulp_num * MEM_DESCR_OFFSET);
3076 if (mem_descr->mem_array[0].virtual_address) {
3077 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3078 "BM_%d : hwi_init_async_pdu_ctx"
3079 " HWI_MEM_ASYNC_DATA_BUF_ULP%d va=%p\n",
3081 mem_descr->mem_array[0].
3084 beiscsi_log(phba, KERN_WARNING,
3086 "BM_%d : No Virtual address for ULP : %d\n",
3090 pasync_ctx->async_data.va_base =
3091 mem_descr->mem_array[idx].virtual_address;
3092 pasync_ctx->async_data.pa_base.u.a64.address =
3093 mem_descr->mem_array[idx].
3094 bus_address.u.a64.address;
3096 num_async_data = ((mem_descr->mem_array[idx].size) /
3097 phba->params.defpdu_data_sz);
3100 for (index = 0; index < BEISCSI_GET_CID_COUNT
3101 (phba, ulp_num); index++) {
3102 pasync_header_h->cri = -1;
3103 pasync_header_h->index = (char)index;
3104 INIT_LIST_HEAD(&pasync_header_h->link);
3105 pasync_header_h->pbuffer =
3106 (void *)((unsigned long)
3108 async_header.va_base) +
3109 (p->defpdu_hdr_sz * index));
3111 pasync_header_h->pa.u.a64.address =
3112 pasync_ctx->async_header.pa_base.u.a64.
3113 address + (p->defpdu_hdr_sz * index);
3115 list_add_tail(&pasync_header_h->link,
3116 &pasync_ctx->async_header.
3119 pasync_ctx->async_header.free_entries++;
3120 pasync_ctx->async_header.writables++;
3122 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
3124 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
3126 pasync_data_h->cri = -1;
3127 pasync_data_h->index = (char)index;
3128 INIT_LIST_HEAD(&pasync_data_h->link);
3130 if (!num_async_data) {
3133 pasync_ctx->async_data.va_base =
3134 mem_descr->mem_array[idx].
3136 pasync_ctx->async_data.pa_base.u.
3138 mem_descr->mem_array[idx].
3139 bus_address.u.a64.address;
3141 ((mem_descr->mem_array[idx].
3143 phba->params.defpdu_data_sz);
3145 pasync_data_h->pbuffer =
3146 (void *)((unsigned long)
3147 (pasync_ctx->async_data.va_base) +
3148 (p->defpdu_data_sz * num_per_mem));
3150 pasync_data_h->pa.u.a64.address =
3151 pasync_ctx->async_data.pa_base.u.a64.
3152 address + (p->defpdu_data_sz *
3157 list_add_tail(&pasync_data_h->link,
3158 &pasync_ctx->async_data.
3161 pasync_ctx->async_data.free_entries++;
3162 pasync_ctx->async_data.writables++;
3164 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
3168 pasync_ctx->async_header.host_write_ptr = 0;
3169 pasync_ctx->async_header.ep_read_ptr = -1;
3170 pasync_ctx->async_data.host_write_ptr = 0;
3171 pasync_ctx->async_data.ep_read_ptr = -1;
3179 be_sgl_create_contiguous(void *virtual_address,
3180 u64 physical_address, u32 length,
3181 struct be_dma_mem *sgl)
3183 WARN_ON(!virtual_address);
3184 WARN_ON(!physical_address);
3185 WARN_ON(!length > 0);
3188 sgl->va = virtual_address;
3189 sgl->dma = (unsigned long)physical_address;
3195 static void be_sgl_destroy_contiguous(struct be_dma_mem *sgl)
3197 memset(sgl, 0, sizeof(*sgl));
3201 hwi_build_be_sgl_arr(struct beiscsi_hba *phba,
3202 struct mem_array *pmem, struct be_dma_mem *sgl)
3205 be_sgl_destroy_contiguous(sgl);
3207 be_sgl_create_contiguous(pmem->virtual_address,
3208 pmem->bus_address.u.a64.address,
3213 hwi_build_be_sgl_by_offset(struct beiscsi_hba *phba,
3214 struct mem_array *pmem, struct be_dma_mem *sgl)
3217 be_sgl_destroy_contiguous(sgl);
3219 be_sgl_create_contiguous((unsigned char *)pmem->virtual_address,
3220 pmem->bus_address.u.a64.address,
3224 static int be_fill_queue(struct be_queue_info *q,
3225 u16 len, u16 entry_size, void *vaddress)
3227 struct be_dma_mem *mem = &q->dma_mem;
3229 memset(q, 0, sizeof(*q));
3231 q->entry_size = entry_size;
3232 mem->size = len * entry_size;
3236 memset(mem->va, 0, mem->size);
3240 static int beiscsi_create_eqs(struct beiscsi_hba *phba,
3241 struct hwi_context_memory *phwi_context)
3243 unsigned int i, num_eq_pages;
3244 int ret = 0, eq_for_mcc;
3245 struct be_queue_info *eq;
3246 struct be_dma_mem *mem;
3250 num_eq_pages = PAGES_REQUIRED(phba->params.num_eq_entries * \
3251 sizeof(struct be_eq_entry));
3253 if (phba->msix_enabled)
3257 for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
3258 eq = &phwi_context->be_eq[i].q;
3260 phwi_context->be_eq[i].phba = phba;
3261 eq_vaddress = pci_alloc_consistent(phba->pcidev,
3262 num_eq_pages * PAGE_SIZE,
3265 goto create_eq_error;
3267 mem->va = eq_vaddress;
3268 ret = be_fill_queue(eq, phba->params.num_eq_entries,
3269 sizeof(struct be_eq_entry), eq_vaddress);
3271 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3272 "BM_%d : be_fill_queue Failed for EQ\n");
3273 goto create_eq_error;
3277 ret = beiscsi_cmd_eq_create(&phba->ctrl, eq,
3278 phwi_context->cur_eqd);
3280 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3281 "BM_%d : beiscsi_cmd_eq_create"
3283 goto create_eq_error;
3286 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3287 "BM_%d : eqid = %d\n",
3288 phwi_context->be_eq[i].q.id);
3292 for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
3293 eq = &phwi_context->be_eq[i].q;
3296 pci_free_consistent(phba->pcidev, num_eq_pages
3303 static int beiscsi_create_cqs(struct beiscsi_hba *phba,
3304 struct hwi_context_memory *phwi_context)
3306 unsigned int i, num_cq_pages;
3308 struct be_queue_info *cq, *eq;
3309 struct be_dma_mem *mem;
3310 struct be_eq_obj *pbe_eq;
3314 num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
3315 sizeof(struct sol_cqe));
3317 for (i = 0; i < phba->num_cpus; i++) {
3318 cq = &phwi_context->be_cq[i];
3319 eq = &phwi_context->be_eq[i].q;
3320 pbe_eq = &phwi_context->be_eq[i];
3322 pbe_eq->phba = phba;
3324 cq_vaddress = pci_alloc_consistent(phba->pcidev,
3325 num_cq_pages * PAGE_SIZE,
3328 goto create_cq_error;
3329 ret = be_fill_queue(cq, phba->params.num_cq_entries,
3330 sizeof(struct sol_cqe), cq_vaddress);
3332 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3333 "BM_%d : be_fill_queue Failed "
3335 goto create_cq_error;
3339 ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false,
3342 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3343 "BM_%d : beiscsi_cmd_eq_create"
3344 "Failed for ISCSI CQ\n");
3345 goto create_cq_error;
3347 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3348 "BM_%d : iscsi cq_id is %d for eq_id %d\n"
3349 "iSCSI CQ CREATED\n", cq->id, eq->id);
3354 for (i = 0; i < phba->num_cpus; i++) {
3355 cq = &phwi_context->be_cq[i];
3358 pci_free_consistent(phba->pcidev, num_cq_pages
3367 beiscsi_create_def_hdr(struct beiscsi_hba *phba,
3368 struct hwi_context_memory *phwi_context,
3369 struct hwi_controller *phwi_ctrlr,
3370 unsigned int def_pdu_ring_sz, uint8_t ulp_num)
3374 struct be_queue_info *dq, *cq;
3375 struct be_dma_mem *mem;
3376 struct be_mem_descriptor *mem_descr;
3380 dq = &phwi_context->be_def_hdrq[ulp_num];
3381 cq = &phwi_context->be_cq[0];
3383 mem_descr = phba->init_mem;
3384 mem_descr += HWI_MEM_ASYNC_HEADER_RING_ULP0 +
3385 (ulp_num * MEM_DESCR_OFFSET);
3386 dq_vaddress = mem_descr->mem_array[idx].virtual_address;
3387 ret = be_fill_queue(dq, mem_descr->mem_array[0].size /
3388 sizeof(struct phys_addr),
3389 sizeof(struct phys_addr), dq_vaddress);
3391 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3392 "BM_%d : be_fill_queue Failed for DEF PDU HDR on ULP : %d\n",
3397 mem->dma = (unsigned long)mem_descr->mem_array[idx].
3398 bus_address.u.a64.address;
3399 ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dq,
3401 phba->params.defpdu_hdr_sz,
3402 BEISCSI_DEFQ_HDR, ulp_num);
3404 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3405 "BM_%d : be_cmd_create_default_pdu_queue Failed DEFHDR on ULP : %d\n",
3411 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3412 "BM_%d : iscsi hdr def pdu id for ULP : %d is %d\n",
3414 phwi_context->be_def_hdrq[ulp_num].id);
3415 hwi_post_async_buffers(phba, BEISCSI_DEFQ_HDR, ulp_num);
3420 beiscsi_create_def_data(struct beiscsi_hba *phba,
3421 struct hwi_context_memory *phwi_context,
3422 struct hwi_controller *phwi_ctrlr,
3423 unsigned int def_pdu_ring_sz, uint8_t ulp_num)
3427 struct be_queue_info *dataq, *cq;
3428 struct be_dma_mem *mem;
3429 struct be_mem_descriptor *mem_descr;
3433 dataq = &phwi_context->be_def_dataq[ulp_num];
3434 cq = &phwi_context->be_cq[0];
3435 mem = &dataq->dma_mem;
3436 mem_descr = phba->init_mem;
3437 mem_descr += HWI_MEM_ASYNC_DATA_RING_ULP0 +
3438 (ulp_num * MEM_DESCR_OFFSET);
3439 dq_vaddress = mem_descr->mem_array[idx].virtual_address;
3440 ret = be_fill_queue(dataq, mem_descr->mem_array[0].size /
3441 sizeof(struct phys_addr),
3442 sizeof(struct phys_addr), dq_vaddress);
3444 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3445 "BM_%d : be_fill_queue Failed for DEF PDU "
3446 "DATA on ULP : %d\n",
3451 mem->dma = (unsigned long)mem_descr->mem_array[idx].
3452 bus_address.u.a64.address;
3453 ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dataq,
3455 phba->params.defpdu_data_sz,
3456 BEISCSI_DEFQ_DATA, ulp_num);
3458 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3459 "BM_%d be_cmd_create_default_pdu_queue"
3460 " Failed for DEF PDU DATA on ULP : %d\n",
3465 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3466 "BM_%d : iscsi def data id on ULP : %d is %d\n",
3468 phwi_context->be_def_dataq[ulp_num].id);
3470 hwi_post_async_buffers(phba, BEISCSI_DEFQ_DATA, ulp_num);
3471 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3472 "BM_%d : DEFAULT PDU DATA RING CREATED"
3473 "on ULP : %d\n", ulp_num);
3480 beiscsi_post_template_hdr(struct beiscsi_hba *phba)
3482 struct be_mem_descriptor *mem_descr;
3483 struct mem_array *pm_arr;
3484 struct be_dma_mem sgl;
3485 int status, ulp_num;
3487 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
3488 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
3489 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
3490 mem_descr += HWI_MEM_TEMPLATE_HDR_ULP0 +
3491 (ulp_num * MEM_DESCR_OFFSET);
3492 pm_arr = mem_descr->mem_array;
3494 hwi_build_be_sgl_arr(phba, pm_arr, &sgl);
3495 status = be_cmd_iscsi_post_template_hdr(
3499 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3500 "BM_%d : Post Template HDR Failed for"
3501 "ULP_%d\n", ulp_num);
3505 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3506 "BM_%d : Template HDR Pages Posted for"
3507 "ULP_%d\n", ulp_num);
3514 beiscsi_post_pages(struct beiscsi_hba *phba)
3516 struct be_mem_descriptor *mem_descr;
3517 struct mem_array *pm_arr;
3518 unsigned int page_offset, i;
3519 struct be_dma_mem sgl;
3520 int status, ulp_num = 0;
3522 mem_descr = phba->init_mem;
3523 mem_descr += HWI_MEM_SGE;
3524 pm_arr = mem_descr->mem_array;
3526 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
3527 if (test_bit(ulp_num, &phba->fw_config.ulp_supported))
3530 page_offset = (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io *
3531 phba->fw_config.iscsi_icd_start[ulp_num]) / PAGE_SIZE;
3532 for (i = 0; i < mem_descr->num_elements; i++) {
3533 hwi_build_be_sgl_arr(phba, pm_arr, &sgl);
3534 status = be_cmd_iscsi_post_sgl_pages(&phba->ctrl, &sgl,
3536 (pm_arr->size / PAGE_SIZE));
3537 page_offset += pm_arr->size / PAGE_SIZE;
3539 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3540 "BM_%d : post sgl failed.\n");
3545 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3546 "BM_%d : POSTED PAGES\n");
3550 static void be_queue_free(struct beiscsi_hba *phba, struct be_queue_info *q)
3552 struct be_dma_mem *mem = &q->dma_mem;
3554 pci_free_consistent(phba->pcidev, mem->size,
3560 static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q,
3561 u16 len, u16 entry_size)
3563 struct be_dma_mem *mem = &q->dma_mem;
3565 memset(q, 0, sizeof(*q));
3567 q->entry_size = entry_size;
3568 mem->size = len * entry_size;
3569 mem->va = pci_zalloc_consistent(phba->pcidev, mem->size, &mem->dma);
3576 beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
3577 struct hwi_context_memory *phwi_context,
3578 struct hwi_controller *phwi_ctrlr)
3580 unsigned int wrb_mem_index, offset, size, num_wrb_rings;
3582 unsigned int idx, num, i, ulp_num;
3583 struct mem_array *pwrb_arr;
3585 struct be_dma_mem sgl;
3586 struct be_mem_descriptor *mem_descr;
3587 struct hwi_wrb_context *pwrb_context;
3589 uint8_t ulp_count = 0, ulp_base_num = 0;
3590 uint16_t cid_count_ulp[BEISCSI_ULP_COUNT] = { 0 };
3593 mem_descr = phba->init_mem;
3594 mem_descr += HWI_MEM_WRB;
3595 pwrb_arr = kmalloc(sizeof(*pwrb_arr) * phba->params.cxns_per_ctrl,
3598 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3599 "BM_%d : Memory alloc failed in create wrb ring.\n");
3602 wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
3603 pa_addr_lo = mem_descr->mem_array[idx].bus_address.u.a64.address;
3604 num_wrb_rings = mem_descr->mem_array[idx].size /
3605 (phba->params.wrbs_per_cxn * sizeof(struct iscsi_wrb));
3607 for (num = 0; num < phba->params.cxns_per_ctrl; num++) {
3608 if (num_wrb_rings) {
3609 pwrb_arr[num].virtual_address = wrb_vaddr;
3610 pwrb_arr[num].bus_address.u.a64.address = pa_addr_lo;
3611 pwrb_arr[num].size = phba->params.wrbs_per_cxn *
3612 sizeof(struct iscsi_wrb);
3613 wrb_vaddr += pwrb_arr[num].size;
3614 pa_addr_lo += pwrb_arr[num].size;
3618 wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
3619 pa_addr_lo = mem_descr->mem_array[idx].\
3620 bus_address.u.a64.address;
3621 num_wrb_rings = mem_descr->mem_array[idx].size /
3622 (phba->params.wrbs_per_cxn *
3623 sizeof(struct iscsi_wrb));
3624 pwrb_arr[num].virtual_address = wrb_vaddr;
3625 pwrb_arr[num].bus_address.u.a64.address\
3627 pwrb_arr[num].size = phba->params.wrbs_per_cxn *
3628 sizeof(struct iscsi_wrb);
3629 wrb_vaddr += pwrb_arr[num].size;
3630 pa_addr_lo += pwrb_arr[num].size;
3635 /* Get the ULP Count */
3636 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
3637 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
3639 ulp_base_num = ulp_num;
3640 cid_count_ulp[ulp_num] =
3641 BEISCSI_GET_CID_COUNT(phba, ulp_num);
3644 for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
3649 if (ulp_count > 1) {
3650 ulp_base_num = (ulp_base_num + 1) % BEISCSI_ULP_COUNT;
3652 if (!cid_count_ulp[ulp_base_num])
3653 ulp_base_num = (ulp_base_num + 1) %
3656 cid_count_ulp[ulp_base_num]--;
3660 hwi_build_be_sgl_by_offset(phba, &pwrb_arr[i], &sgl);
3661 status = be_cmd_wrbq_create(&phba->ctrl, &sgl,
3662 &phwi_context->be_wrbq[i],
3663 &phwi_ctrlr->wrb_context[i],
3666 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3667 "BM_%d : wrbq create failed.");
3671 pwrb_context = &phwi_ctrlr->wrb_context[i];
3672 BE_SET_CID_TO_CRI(i, pwrb_context->cid);
3678 static void free_wrb_handles(struct beiscsi_hba *phba)
3681 struct hwi_controller *phwi_ctrlr;
3682 struct hwi_wrb_context *pwrb_context;
3684 phwi_ctrlr = phba->phwi_ctrlr;
3685 for (index = 0; index < phba->params.cxns_per_ctrl; index++) {
3686 pwrb_context = &phwi_ctrlr->wrb_context[index];
3687 kfree(pwrb_context->pwrb_handle_base);
3688 kfree(pwrb_context->pwrb_handle_basestd);
3692 static void be_mcc_queues_destroy(struct beiscsi_hba *phba)
3694 struct be_queue_info *q;
3695 struct be_ctrl_info *ctrl = &phba->ctrl;
3697 q = &phba->ctrl.mcc_obj.q;
3699 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_MCCQ);
3700 be_queue_free(phba, q);
3702 q = &phba->ctrl.mcc_obj.cq;
3704 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
3705 be_queue_free(phba, q);
3708 static void hwi_cleanup(struct beiscsi_hba *phba)
3710 struct be_queue_info *q;
3711 struct be_ctrl_info *ctrl = &phba->ctrl;
3712 struct hwi_controller *phwi_ctrlr;
3713 struct hwi_context_memory *phwi_context;
3714 struct hwi_async_pdu_context *pasync_ctx;
3715 int i, eq_for_mcc, ulp_num;
3717 phwi_ctrlr = phba->phwi_ctrlr;
3718 phwi_context = phwi_ctrlr->phwi_ctxt;
3720 be_cmd_iscsi_remove_template_hdr(ctrl);
3722 for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
3723 q = &phwi_context->be_wrbq[i];
3725 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ);
3727 kfree(phwi_context->be_wrbq);
3728 free_wrb_handles(phba);
3730 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
3731 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
3733 q = &phwi_context->be_def_hdrq[ulp_num];
3735 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
3737 q = &phwi_context->be_def_dataq[ulp_num];
3739 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
3741 pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num];
3745 beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
3747 for (i = 0; i < (phba->num_cpus); i++) {
3748 q = &phwi_context->be_cq[i];
3750 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
3753 be_mcc_queues_destroy(phba);
3754 if (phba->msix_enabled)
3758 for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
3759 q = &phwi_context->be_eq[i].q;
3761 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ);
3763 be_cmd_fw_uninit(ctrl);
3766 static int be_mcc_queues_create(struct beiscsi_hba *phba,
3767 struct hwi_context_memory *phwi_context)
3769 struct be_queue_info *q, *cq;
3770 struct be_ctrl_info *ctrl = &phba->ctrl;
3772 /* Alloc MCC compl queue */
3773 cq = &phba->ctrl.mcc_obj.cq;
3774 if (be_queue_alloc(phba, cq, MCC_CQ_LEN,
3775 sizeof(struct be_mcc_compl)))
3777 /* Ask BE to create MCC compl queue; */
3778 if (phba->msix_enabled) {
3779 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq
3780 [phba->num_cpus].q, false, true, 0))
3783 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq[0].q,
3788 /* Alloc MCC queue */
3789 q = &phba->ctrl.mcc_obj.q;
3790 if (be_queue_alloc(phba, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
3791 goto mcc_cq_destroy;
3793 /* Ask BE to create MCC queue */
3794 if (beiscsi_cmd_mccq_create(phba, q, cq))
3800 be_queue_free(phba, q);
3802 beiscsi_cmd_q_destroy(ctrl, cq, QTYPE_CQ);
3804 be_queue_free(phba, cq);
3810 * find_num_cpus()- Get the CPU online count
3811 * @phba: ptr to priv structure
3813 * CPU count is used for creating EQ.
3815 static void find_num_cpus(struct beiscsi_hba *phba)
3819 num_cpus = num_online_cpus();
3821 switch (phba->generation) {
3824 phba->num_cpus = (num_cpus > BEISCSI_MAX_NUM_CPUS) ?
3825 BEISCSI_MAX_NUM_CPUS : num_cpus;
3829 * If eqid_count == 1 fall back to
3832 if (phba->fw_config.eqid_count == 1) {
3839 (num_cpus > (phba->fw_config.eqid_count - 1)) ?
3840 (phba->fw_config.eqid_count - 1) : num_cpus;
3847 static int hwi_init_port(struct beiscsi_hba *phba)
3849 struct hwi_controller *phwi_ctrlr;
3850 struct hwi_context_memory *phwi_context;
3851 unsigned int def_pdu_ring_sz;
3852 struct be_ctrl_info *ctrl = &phba->ctrl;
3853 int status, ulp_num;
3855 phwi_ctrlr = phba->phwi_ctrlr;
3856 phwi_context = phwi_ctrlr->phwi_ctxt;
3857 phwi_context->max_eqd = 128;
3858 phwi_context->min_eqd = 0;
3859 phwi_context->cur_eqd = 0;
3860 be_cmd_fw_initialize(&phba->ctrl);
3862 status = beiscsi_create_eqs(phba, phwi_context);
3864 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3865 "BM_%d : EQ not created\n");
3869 status = be_mcc_queues_create(phba, phwi_context);
3873 status = mgmt_check_supported_fw(ctrl, phba);
3875 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3876 "BM_%d : Unsupported fw version\n");
3880 status = beiscsi_create_cqs(phba, phwi_context);
3882 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3883 "BM_%d : CQ not created\n");
3887 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
3888 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
3891 BEISCSI_GET_CID_COUNT(phba, ulp_num) *
3892 sizeof(struct phys_addr);
3894 status = beiscsi_create_def_hdr(phba, phwi_context,
3899 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3900 "BM_%d : Default Header not created for ULP : %d\n",
3905 status = beiscsi_create_def_data(phba, phwi_context,
3910 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3911 "BM_%d : Default Data not created for ULP : %d\n",
3918 status = beiscsi_post_pages(phba);
3920 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3921 "BM_%d : Post SGL Pages Failed\n");
3925 status = beiscsi_post_template_hdr(phba);
3927 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3928 "BM_%d : Template HDR Posting for CXN Failed\n");
3931 status = beiscsi_create_wrb_rings(phba, phwi_context, phwi_ctrlr);
3933 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3934 "BM_%d : WRB Rings not created\n");
3938 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
3939 uint16_t async_arr_idx = 0;
3941 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
3943 struct hwi_async_pdu_context *pasync_ctx;
3945 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(
3946 phwi_ctrlr, ulp_num);
3948 phba->params.cxns_per_ctrl; cri++) {
3949 if (ulp_num == BEISCSI_GET_ULP_FROM_CRI
3951 pasync_ctx->cid_to_async_cri_map[
3952 phwi_ctrlr->wrb_context[cri].cid] =
3958 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3959 "BM_%d : hwi_init_port success\n");
3963 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3964 "BM_%d : hwi_init_port failed");
3969 static int hwi_init_controller(struct beiscsi_hba *phba)
3971 struct hwi_controller *phwi_ctrlr;
3973 phwi_ctrlr = phba->phwi_ctrlr;
3974 if (1 == phba->init_mem[HWI_MEM_ADDN_CONTEXT].num_elements) {
3975 phwi_ctrlr->phwi_ctxt = (struct hwi_context_memory *)phba->
3976 init_mem[HWI_MEM_ADDN_CONTEXT].mem_array[0].virtual_address;
3977 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3978 "BM_%d : phwi_ctrlr->phwi_ctxt=%p\n",
3979 phwi_ctrlr->phwi_ctxt);
3981 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3982 "BM_%d : HWI_MEM_ADDN_CONTEXT is more "
3983 "than one element.Failing to load\n");
3987 iscsi_init_global_templates(phba);
3988 if (beiscsi_init_wrb_handle(phba))
3991 if (hwi_init_async_pdu_ctx(phba)) {
3992 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3993 "BM_%d : hwi_init_async_pdu_ctx failed\n");
3997 if (hwi_init_port(phba) != 0) {
3998 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3999 "BM_%d : hwi_init_controller failed\n");
4006 static void beiscsi_free_mem(struct beiscsi_hba *phba)
4008 struct be_mem_descriptor *mem_descr;
4011 mem_descr = phba->init_mem;
4014 for (i = 0; i < SE_MEM_MAX; i++) {
4015 for (j = mem_descr->num_elements; j > 0; j--) {
4016 pci_free_consistent(phba->pcidev,
4017 mem_descr->mem_array[j - 1].size,
4018 mem_descr->mem_array[j - 1].virtual_address,
4019 (unsigned long)mem_descr->mem_array[j - 1].
4020 bus_address.u.a64.address);
4023 kfree(mem_descr->mem_array);
4026 kfree(phba->init_mem);
4027 kfree(phba->phwi_ctrlr->wrb_context);
4028 kfree(phba->phwi_ctrlr);
4031 static int beiscsi_init_controller(struct beiscsi_hba *phba)
4035 ret = beiscsi_get_memory(phba);
4037 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4038 "BM_%d : beiscsi_dev_probe -"
4039 "Failed in beiscsi_alloc_memory\n");
4043 ret = hwi_init_controller(phba);
4046 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
4047 "BM_%d : Return success from beiscsi_init_controller");
4052 beiscsi_free_mem(phba);
4056 static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
4058 struct be_mem_descriptor *mem_descr_sglh, *mem_descr_sg;
4059 struct sgl_handle *psgl_handle;
4060 struct iscsi_sge *pfrag;
4061 unsigned int arr_index, i, idx;
4062 unsigned int ulp_icd_start, ulp_num = 0;
4064 phba->io_sgl_hndl_avbl = 0;
4065 phba->eh_sgl_hndl_avbl = 0;
4067 mem_descr_sglh = phba->init_mem;
4068 mem_descr_sglh += HWI_MEM_SGLH;
4069 if (1 == mem_descr_sglh->num_elements) {
4070 phba->io_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
4071 phba->params.ios_per_ctrl,
4073 if (!phba->io_sgl_hndl_base) {
4074 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4075 "BM_%d : Mem Alloc Failed. Failing to load\n");
4078 phba->eh_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
4079 (phba->params.icds_per_ctrl -
4080 phba->params.ios_per_ctrl),
4082 if (!phba->eh_sgl_hndl_base) {
4083 kfree(phba->io_sgl_hndl_base);
4084 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4085 "BM_%d : Mem Alloc Failed. Failing to load\n");
4089 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4090 "BM_%d : HWI_MEM_SGLH is more than one element."
4091 "Failing to load\n");
4097 while (idx < mem_descr_sglh->num_elements) {
4098 psgl_handle = mem_descr_sglh->mem_array[idx].virtual_address;
4100 for (i = 0; i < (mem_descr_sglh->mem_array[idx].size /
4101 sizeof(struct sgl_handle)); i++) {
4102 if (arr_index < phba->params.ios_per_ctrl) {
4103 phba->io_sgl_hndl_base[arr_index] = psgl_handle;
4104 phba->io_sgl_hndl_avbl++;
4107 phba->eh_sgl_hndl_base[arr_index -
4108 phba->params.ios_per_ctrl] =
4111 phba->eh_sgl_hndl_avbl++;
4117 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
4118 "BM_%d : phba->io_sgl_hndl_avbl=%d"
4119 "phba->eh_sgl_hndl_avbl=%d\n",
4120 phba->io_sgl_hndl_avbl,
4121 phba->eh_sgl_hndl_avbl);
4123 mem_descr_sg = phba->init_mem;
4124 mem_descr_sg += HWI_MEM_SGE;
4125 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
4126 "\n BM_%d : mem_descr_sg->num_elements=%d\n",
4127 mem_descr_sg->num_elements);
4129 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
4130 if (test_bit(ulp_num, &phba->fw_config.ulp_supported))
4133 ulp_icd_start = phba->fw_config.iscsi_icd_start[ulp_num];
4137 while (idx < mem_descr_sg->num_elements) {
4138 pfrag = mem_descr_sg->mem_array[idx].virtual_address;
4141 i < (mem_descr_sg->mem_array[idx].size) /
4142 (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io);
4144 if (arr_index < phba->params.ios_per_ctrl)
4145 psgl_handle = phba->io_sgl_hndl_base[arr_index];
4147 psgl_handle = phba->eh_sgl_hndl_base[arr_index -
4148 phba->params.ios_per_ctrl];
4149 psgl_handle->pfrag = pfrag;
4150 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, pfrag, 0);
4151 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, pfrag, 0);
4152 pfrag += phba->params.num_sge_per_io;
4153 psgl_handle->sgl_index = ulp_icd_start + arr_index++;
4157 phba->io_sgl_free_index = 0;
4158 phba->io_sgl_alloc_index = 0;
4159 phba->eh_sgl_free_index = 0;
4160 phba->eh_sgl_alloc_index = 0;
4164 static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
4167 uint16_t i, ulp_num;
4168 struct ulp_cid_info *ptr_cid_info = NULL;
4170 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
4171 if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) {
4172 ptr_cid_info = kzalloc(sizeof(struct ulp_cid_info),
4175 if (!ptr_cid_info) {
4176 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4177 "BM_%d : Failed to allocate memory"
4178 "for ULP_CID_INFO for ULP : %d\n",
4185 /* Allocate memory for CID array */
4186 ptr_cid_info->cid_array = kzalloc(sizeof(void *) *
4187 BEISCSI_GET_CID_COUNT(phba,
4188 ulp_num), GFP_KERNEL);
4189 if (!ptr_cid_info->cid_array) {
4190 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4191 "BM_%d : Failed to allocate memory"
4192 "for CID_ARRAY for ULP : %d\n",
4194 kfree(ptr_cid_info);
4195 ptr_cid_info = NULL;
4200 ptr_cid_info->avlbl_cids = BEISCSI_GET_CID_COUNT(
4203 /* Save the cid_info_array ptr */
4204 phba->cid_array_info[ulp_num] = ptr_cid_info;
4207 phba->ep_array = kzalloc(sizeof(struct iscsi_endpoint *) *
4208 phba->params.cxns_per_ctrl, GFP_KERNEL);
4209 if (!phba->ep_array) {
4210 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4211 "BM_%d : Failed to allocate memory in "
4212 "hba_setup_cid_tbls\n");
4218 phba->conn_table = kzalloc(sizeof(struct beiscsi_conn *) *
4219 phba->params.cxns_per_ctrl, GFP_KERNEL);
4220 if (!phba->conn_table) {
4221 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4222 "BM_%d : Failed to allocate memory in"
4223 "hba_setup_cid_tbls\n");
4225 kfree(phba->ep_array);
4226 phba->ep_array = NULL;
4232 for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
4233 ulp_num = phba->phwi_ctrlr->wrb_context[i].ulp_num;
4235 ptr_cid_info = phba->cid_array_info[ulp_num];
4236 ptr_cid_info->cid_array[ptr_cid_info->cid_alloc++] =
4237 phba->phwi_ctrlr->wrb_context[i].cid;
4241 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
4242 if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) {
4243 ptr_cid_info = phba->cid_array_info[ulp_num];
4245 ptr_cid_info->cid_alloc = 0;
4246 ptr_cid_info->cid_free = 0;
4252 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
4253 if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) {
4254 ptr_cid_info = phba->cid_array_info[ulp_num];
4257 kfree(ptr_cid_info->cid_array);
4258 kfree(ptr_cid_info);
4259 phba->cid_array_info[ulp_num] = NULL;
4267 static void hwi_enable_intr(struct beiscsi_hba *phba)
4269 struct be_ctrl_info *ctrl = &phba->ctrl;
4270 struct hwi_controller *phwi_ctrlr;
4271 struct hwi_context_memory *phwi_context;
4272 struct be_queue_info *eq;
4277 phwi_ctrlr = phba->phwi_ctrlr;
4278 phwi_context = phwi_ctrlr->phwi_ctxt;
4280 addr = (u8 __iomem *) ((u8 __iomem *) ctrl->pcicfg +
4281 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET);
4282 reg = ioread32(addr);
4284 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
4286 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
4287 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
4288 "BM_%d : reg =x%08x addr=%p\n", reg, addr);
4289 iowrite32(reg, addr);
4292 if (!phba->msix_enabled) {
4293 eq = &phwi_context->be_eq[0].q;
4294 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
4295 "BM_%d : eq->id=%d\n", eq->id);
4297 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
4299 for (i = 0; i <= phba->num_cpus; i++) {
4300 eq = &phwi_context->be_eq[i].q;
4301 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
4302 "BM_%d : eq->id=%d\n", eq->id);
4303 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
4308 static void hwi_disable_intr(struct beiscsi_hba *phba)
4310 struct be_ctrl_info *ctrl = &phba->ctrl;
4312 u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
4313 u32 reg = ioread32(addr);
4315 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
4317 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
4318 iowrite32(reg, addr);
4320 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
4321 "BM_%d : In hwi_disable_intr, Already Disabled\n");
4325 * beiscsi_get_boot_info()- Get the boot session info
4326 * @phba: The device priv structure instance
4328 * Get the boot target info and store in driver priv structure
4332 * Failure: Non-Zero Value
4334 static int beiscsi_get_boot_info(struct beiscsi_hba *phba)
4336 struct be_cmd_get_session_resp *session_resp;
4337 struct be_dma_mem nonemb_cmd;
4339 unsigned int s_handle;
4342 /* Get the session handle of the boot target */
4343 ret = be_mgmt_get_boot_shandle(phba, &s_handle);
4345 beiscsi_log(phba, KERN_ERR,
4346 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
4347 "BM_%d : No boot session\n");
4356 nonemb_cmd.va = pci_zalloc_consistent(phba->ctrl.pdev,
4357 sizeof(*session_resp),
4359 if (nonemb_cmd.va == NULL) {
4360 beiscsi_log(phba, KERN_ERR,
4361 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
4362 "BM_%d : Failed to allocate memory for"
4363 "beiscsi_get_session_info\n");
4368 tag = mgmt_get_session_info(phba, s_handle,
4371 beiscsi_log(phba, KERN_ERR,
4372 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
4373 "BM_%d : beiscsi_get_session_info"
4379 ret = beiscsi_mccq_compl(phba, tag, NULL, &nonemb_cmd);
4381 beiscsi_log(phba, KERN_ERR,
4382 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
4383 "BM_%d : beiscsi_get_session_info Failed");
4391 session_resp = nonemb_cmd.va ;
4393 memcpy(&phba->boot_sess, &session_resp->session_info,
4394 sizeof(struct mgmt_session_info));
4398 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
4399 nonemb_cmd.va, nonemb_cmd.dma);
4403 static void beiscsi_boot_release(void *data)
4405 struct beiscsi_hba *phba = data;
4407 scsi_host_put(phba->shost);
4410 static int beiscsi_setup_boot_info(struct beiscsi_hba *phba)
4412 struct iscsi_boot_kobj *boot_kobj;
4414 /* it has been created previously */
4415 if (phba->boot_kset)
4418 /* get boot info using mgmt cmd */
4419 if (beiscsi_get_boot_info(phba))
4420 /* Try to see if we can carry on without this */
4423 phba->boot_kset = iscsi_boot_create_host_kset(phba->shost->host_no);
4424 if (!phba->boot_kset)
4427 /* get a ref because the show function will ref the phba */
4428 if (!scsi_host_get(phba->shost))
4430 boot_kobj = iscsi_boot_create_target(phba->boot_kset, 0, phba,
4431 beiscsi_show_boot_tgt_info,
4432 beiscsi_tgt_get_attr_visibility,
4433 beiscsi_boot_release);
4437 if (!scsi_host_get(phba->shost))
4439 boot_kobj = iscsi_boot_create_initiator(phba->boot_kset, 0, phba,
4440 beiscsi_show_boot_ini_info,
4441 beiscsi_ini_get_attr_visibility,
4442 beiscsi_boot_release);
4446 if (!scsi_host_get(phba->shost))
4448 boot_kobj = iscsi_boot_create_ethernet(phba->boot_kset, 0, phba,
4449 beiscsi_show_boot_eth_info,
4450 beiscsi_eth_get_attr_visibility,
4451 beiscsi_boot_release);
4457 scsi_host_put(phba->shost);
4459 iscsi_boot_destroy_kset(phba->boot_kset);
4463 static int beiscsi_init_port(struct beiscsi_hba *phba)
4467 ret = beiscsi_init_controller(phba);
4469 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4470 "BM_%d : beiscsi_dev_probe - Failed in"
4471 "beiscsi_init_controller\n");
4474 ret = beiscsi_init_sgl_handle(phba);
4476 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4477 "BM_%d : beiscsi_dev_probe - Failed in"
4478 "beiscsi_init_sgl_handle\n");
4479 goto do_cleanup_ctrlr;
4482 if (hba_setup_cid_tbls(phba)) {
4483 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4484 "BM_%d : Failed in hba_setup_cid_tbls\n");
4485 kfree(phba->io_sgl_hndl_base);
4486 kfree(phba->eh_sgl_hndl_base);
4487 goto do_cleanup_ctrlr;
4497 static void hwi_purge_eq(struct beiscsi_hba *phba)
4499 struct hwi_controller *phwi_ctrlr;
4500 struct hwi_context_memory *phwi_context;
4501 struct be_queue_info *eq;
4502 struct be_eq_entry *eqe = NULL;
4504 unsigned int num_processed;
4506 phwi_ctrlr = phba->phwi_ctrlr;
4507 phwi_context = phwi_ctrlr->phwi_ctxt;
4508 if (phba->msix_enabled)
4513 for (i = 0; i < (phba->num_cpus + eq_msix); i++) {
4514 eq = &phwi_context->be_eq[i].q;
4515 eqe = queue_tail_node(eq);
4517 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
4519 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
4521 eqe = queue_tail_node(eq);
4526 hwi_ring_eq_db(phba, eq->id, 1, num_processed, 1, 1);
4530 static void beiscsi_clean_port(struct beiscsi_hba *phba)
4532 int mgmt_status, ulp_num;
4533 struct ulp_cid_info *ptr_cid_info = NULL;
4535 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
4536 if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) {
4537 mgmt_status = mgmt_epfw_cleanup(phba, ulp_num);
4539 beiscsi_log(phba, KERN_WARNING,
4541 "BM_%d : mgmt_epfw_cleanup FAILED"
4542 " for ULP_%d\n", ulp_num);
4548 kfree(phba->io_sgl_hndl_base);
4549 kfree(phba->eh_sgl_hndl_base);
4550 kfree(phba->ep_array);
4551 kfree(phba->conn_table);
4553 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
4554 if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) {
4555 ptr_cid_info = phba->cid_array_info[ulp_num];
4558 kfree(ptr_cid_info->cid_array);
4559 kfree(ptr_cid_info);
4560 phba->cid_array_info[ulp_num] = NULL;
4568 * beiscsi_free_mgmt_task_handles()- Free driver CXN resources
4569 * @beiscsi_conn: ptr to the conn to be cleaned up
4570 * @task: ptr to iscsi_task resource to be freed.
4572 * Free driver mgmt resources binded to CXN.
4575 beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn,
4576 struct iscsi_task *task)
4578 struct beiscsi_io_task *io_task;
4579 struct beiscsi_hba *phba = beiscsi_conn->phba;
4580 struct hwi_wrb_context *pwrb_context;
4581 struct hwi_controller *phwi_ctrlr;
4582 uint16_t cri_index = BE_GET_CRI_FROM_CID(
4583 beiscsi_conn->beiscsi_conn_cid);
4585 phwi_ctrlr = phba->phwi_ctrlr;
4586 pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
4588 io_task = task->dd_data;
4590 if (io_task->pwrb_handle) {
4591 memset(io_task->pwrb_handle->pwrb, 0,
4592 sizeof(struct iscsi_wrb));
4593 free_wrb_handle(phba, pwrb_context,
4594 io_task->pwrb_handle);
4595 io_task->pwrb_handle = NULL;
4598 if (io_task->psgl_handle) {
4599 spin_lock_bh(&phba->mgmt_sgl_lock);
4600 free_mgmt_sgl_handle(phba,
4601 io_task->psgl_handle);
4602 io_task->psgl_handle = NULL;
4603 spin_unlock_bh(&phba->mgmt_sgl_lock);
4606 if (io_task->mtask_addr)
4607 pci_unmap_single(phba->pcidev,
4608 io_task->mtask_addr,
4609 io_task->mtask_data_count,
4614 * beiscsi_cleanup_task()- Free driver resources of the task
4615 * @task: ptr to the iscsi task
4618 static void beiscsi_cleanup_task(struct iscsi_task *task)
4620 struct beiscsi_io_task *io_task = task->dd_data;
4621 struct iscsi_conn *conn = task->conn;
4622 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
4623 struct beiscsi_hba *phba = beiscsi_conn->phba;
4624 struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
4625 struct hwi_wrb_context *pwrb_context;
4626 struct hwi_controller *phwi_ctrlr;
4627 uint16_t cri_index = BE_GET_CRI_FROM_CID(
4628 beiscsi_conn->beiscsi_conn_cid);
4630 phwi_ctrlr = phba->phwi_ctrlr;
4631 pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
4633 if (io_task->cmd_bhs) {
4634 pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
4635 io_task->bhs_pa.u.a64.address);
4636 io_task->cmd_bhs = NULL;
4640 if (io_task->pwrb_handle) {
4641 free_wrb_handle(phba, pwrb_context,
4642 io_task->pwrb_handle);
4643 io_task->pwrb_handle = NULL;
4646 if (io_task->psgl_handle) {
4647 spin_lock(&phba->io_sgl_lock);
4648 free_io_sgl_handle(phba, io_task->psgl_handle);
4649 spin_unlock(&phba->io_sgl_lock);
4650 io_task->psgl_handle = NULL;
4653 if (io_task->scsi_cmnd) {
4654 scsi_dma_unmap(io_task->scsi_cmnd);
4655 io_task->scsi_cmnd = NULL;
4658 if (!beiscsi_conn->login_in_progress)
4659 beiscsi_free_mgmt_task_handles(beiscsi_conn, task);
4664 beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
4665 struct beiscsi_offload_params *params)
4667 struct wrb_handle *pwrb_handle;
4668 struct beiscsi_hba *phba = beiscsi_conn->phba;
4669 struct iscsi_task *task = beiscsi_conn->task;
4670 struct iscsi_session *session = task->conn->session;
4674 * We can always use 0 here because it is reserved by libiscsi for
4675 * login/startup related tasks.
4677 beiscsi_conn->login_in_progress = 0;
4678 spin_lock_bh(&session->back_lock);
4679 beiscsi_cleanup_task(task);
4680 spin_unlock_bh(&session->back_lock);
4682 pwrb_handle = alloc_wrb_handle(phba, beiscsi_conn->beiscsi_conn_cid);
4684 /* Check for the adapter family */
4685 if (is_chip_be2_be3r(phba))
4686 beiscsi_offload_cxn_v0(params, pwrb_handle,
4689 beiscsi_offload_cxn_v2(params, pwrb_handle);
4691 be_dws_le_to_cpu(pwrb_handle->pwrb,
4692 sizeof(struct iscsi_target_context_update_wrb));
4694 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
4695 doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK)
4696 << DB_DEF_PDU_WRB_INDEX_SHIFT;
4697 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
4698 iowrite32(doorbell, phba->db_va +
4699 beiscsi_conn->doorbell_offset);
4702 static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt,
4703 int *index, int *age)
4707 *age = conn->session->age;
4711 * beiscsi_alloc_pdu - allocates pdu and related resources
4712 * @task: libiscsi task
4713 * @opcode: opcode of pdu for task
4715 * This is called with the session lock held. It will allocate
4716 * the wrb and sgl if needed for the command. And it will prep
4717 * the pdu's itt. beiscsi_parse_pdu will later translate
4718 * the pdu itt to the libiscsi task itt.
4720 static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
4722 struct beiscsi_io_task *io_task = task->dd_data;
4723 struct iscsi_conn *conn = task->conn;
4724 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
4725 struct beiscsi_hba *phba = beiscsi_conn->phba;
4726 struct hwi_wrb_context *pwrb_context;
4727 struct hwi_controller *phwi_ctrlr;
4729 uint16_t cri_index = 0;
4730 struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
4733 io_task->cmd_bhs = pci_pool_alloc(beiscsi_sess->bhs_pool,
4734 GFP_ATOMIC, &paddr);
4735 if (!io_task->cmd_bhs)
4737 io_task->bhs_pa.u.a64.address = paddr;
4738 io_task->libiscsi_itt = (itt_t)task->itt;
4739 io_task->conn = beiscsi_conn;
4741 task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr;
4742 task->hdr_max = sizeof(struct be_cmd_bhs);
4743 io_task->psgl_handle = NULL;
4744 io_task->pwrb_handle = NULL;
4747 spin_lock(&phba->io_sgl_lock);
4748 io_task->psgl_handle = alloc_io_sgl_handle(phba);
4749 spin_unlock(&phba->io_sgl_lock);
4750 if (!io_task->psgl_handle) {
4751 beiscsi_log(phba, KERN_ERR,
4752 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
4753 "BM_%d : Alloc of IO_SGL_ICD Failed"
4754 "for the CID : %d\n",
4755 beiscsi_conn->beiscsi_conn_cid);
4758 io_task->pwrb_handle = alloc_wrb_handle(phba,
4759 beiscsi_conn->beiscsi_conn_cid);
4760 if (!io_task->pwrb_handle) {
4761 beiscsi_log(phba, KERN_ERR,
4762 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
4763 "BM_%d : Alloc of WRB_HANDLE Failed"
4764 "for the CID : %d\n",
4765 beiscsi_conn->beiscsi_conn_cid);
4769 io_task->scsi_cmnd = NULL;
4770 if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) {
4771 beiscsi_conn->task = task;
4772 if (!beiscsi_conn->login_in_progress) {
4773 spin_lock(&phba->mgmt_sgl_lock);
4774 io_task->psgl_handle = (struct sgl_handle *)
4775 alloc_mgmt_sgl_handle(phba);
4776 spin_unlock(&phba->mgmt_sgl_lock);
4777 if (!io_task->psgl_handle) {
4778 beiscsi_log(phba, KERN_ERR,
4781 "BM_%d : Alloc of MGMT_SGL_ICD Failed"
4782 "for the CID : %d\n",
4788 beiscsi_conn->login_in_progress = 1;
4789 beiscsi_conn->plogin_sgl_handle =
4790 io_task->psgl_handle;
4791 io_task->pwrb_handle =
4792 alloc_wrb_handle(phba,
4793 beiscsi_conn->beiscsi_conn_cid);
4794 if (!io_task->pwrb_handle) {
4795 beiscsi_log(phba, KERN_ERR,
4798 "BM_%d : Alloc of WRB_HANDLE Failed"
4799 "for the CID : %d\n",
4802 goto free_mgmt_hndls;
4804 beiscsi_conn->plogin_wrb_handle =
4805 io_task->pwrb_handle;
4808 io_task->psgl_handle =
4809 beiscsi_conn->plogin_sgl_handle;
4810 io_task->pwrb_handle =
4811 beiscsi_conn->plogin_wrb_handle;
4814 spin_lock(&phba->mgmt_sgl_lock);
4815 io_task->psgl_handle = alloc_mgmt_sgl_handle(phba);
4816 spin_unlock(&phba->mgmt_sgl_lock);
4817 if (!io_task->psgl_handle) {
4818 beiscsi_log(phba, KERN_ERR,
4821 "BM_%d : Alloc of MGMT_SGL_ICD Failed"
4822 "for the CID : %d\n",
4827 io_task->pwrb_handle =
4828 alloc_wrb_handle(phba,
4829 beiscsi_conn->beiscsi_conn_cid);
4830 if (!io_task->pwrb_handle) {
4831 beiscsi_log(phba, KERN_ERR,
4832 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
4833 "BM_%d : Alloc of WRB_HANDLE Failed"
4834 "for the CID : %d\n",
4835 beiscsi_conn->beiscsi_conn_cid);
4836 goto free_mgmt_hndls;
4841 itt = (itt_t) cpu_to_be32(((unsigned int)io_task->pwrb_handle->
4842 wrb_index << 16) | (unsigned int)
4843 (io_task->psgl_handle->sgl_index));
4844 io_task->pwrb_handle->pio_handle = task;
4846 io_task->cmd_bhs->iscsi_hdr.itt = itt;
4850 spin_lock(&phba->io_sgl_lock);
4851 free_io_sgl_handle(phba, io_task->psgl_handle);
4852 spin_unlock(&phba->io_sgl_lock);
4855 spin_lock(&phba->mgmt_sgl_lock);
4856 free_mgmt_sgl_handle(phba, io_task->psgl_handle);
4857 io_task->psgl_handle = NULL;
4858 spin_unlock(&phba->mgmt_sgl_lock);
4860 phwi_ctrlr = phba->phwi_ctrlr;
4861 cri_index = BE_GET_CRI_FROM_CID(
4862 beiscsi_conn->beiscsi_conn_cid);
4863 pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
4864 if (io_task->pwrb_handle)
4865 free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
4866 io_task->pwrb_handle = NULL;
4867 pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
4868 io_task->bhs_pa.u.a64.address);
4869 io_task->cmd_bhs = NULL;
4872 int beiscsi_iotask_v2(struct iscsi_task *task, struct scatterlist *sg,
4873 unsigned int num_sg, unsigned int xferlen,
4874 unsigned int writedir)
4877 struct beiscsi_io_task *io_task = task->dd_data;
4878 struct iscsi_conn *conn = task->conn;
4879 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
4880 struct beiscsi_hba *phba = beiscsi_conn->phba;
4881 struct iscsi_wrb *pwrb = NULL;
4882 unsigned int doorbell = 0;
4884 pwrb = io_task->pwrb_handle->pwrb;
4886 io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0;
4887 io_task->bhs_len = sizeof(struct be_cmd_bhs);
4890 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, type, pwrb,
4892 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, pwrb, 1);
4894 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, type, pwrb,
4896 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, pwrb, 0);
4899 io_task->wrb_type = AMAP_GET_BITS(struct amap_iscsi_wrb_v2,
4902 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, lun, pwrb,
4903 cpu_to_be16(*(unsigned short *)
4904 &io_task->cmd_bhs->iscsi_hdr.lun));
4905 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb, xferlen);
4906 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb,
4907 io_task->pwrb_handle->wrb_index);
4908 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb,
4909 be32_to_cpu(task->cmdsn));
4910 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb,
4911 io_task->psgl_handle->sgl_index);
4913 hwi_write_sgl_v2(pwrb, sg, num_sg, io_task);
4914 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb,
4915 io_task->pwrb_handle->nxt_wrb_index);
4917 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
4919 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
4920 doorbell |= (io_task->pwrb_handle->wrb_index &
4921 DB_DEF_PDU_WRB_INDEX_MASK) <<
4922 DB_DEF_PDU_WRB_INDEX_SHIFT;
4923 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
4924 iowrite32(doorbell, phba->db_va +
4925 beiscsi_conn->doorbell_offset);
4929 static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
4930 unsigned int num_sg, unsigned int xferlen,
4931 unsigned int writedir)
4934 struct beiscsi_io_task *io_task = task->dd_data;
4935 struct iscsi_conn *conn = task->conn;
4936 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
4937 struct beiscsi_hba *phba = beiscsi_conn->phba;
4938 struct iscsi_wrb *pwrb = NULL;
4939 unsigned int doorbell = 0;
4941 pwrb = io_task->pwrb_handle->pwrb;
4942 io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0;
4943 io_task->bhs_len = sizeof(struct be_cmd_bhs);
4946 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4948 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
4950 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4952 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
4955 io_task->wrb_type = AMAP_GET_BITS(struct amap_iscsi_wrb,
4958 AMAP_SET_BITS(struct amap_iscsi_wrb, lun, pwrb,
4959 cpu_to_be16(*(unsigned short *)
4960 &io_task->cmd_bhs->iscsi_hdr.lun));
4961 AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, xferlen);
4962 AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
4963 io_task->pwrb_handle->wrb_index);
4964 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
4965 be32_to_cpu(task->cmdsn));
4966 AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
4967 io_task->psgl_handle->sgl_index);
4969 hwi_write_sgl(pwrb, sg, num_sg, io_task);
4971 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
4972 io_task->pwrb_handle->nxt_wrb_index);
4973 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
4975 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
4976 doorbell |= (io_task->pwrb_handle->wrb_index &
4977 DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
4978 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
4980 iowrite32(doorbell, phba->db_va +
4981 beiscsi_conn->doorbell_offset);
4985 static int beiscsi_mtask(struct iscsi_task *task)
4987 struct beiscsi_io_task *io_task = task->dd_data;
4988 struct iscsi_conn *conn = task->conn;
4989 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
4990 struct beiscsi_hba *phba = beiscsi_conn->phba;
4991 struct iscsi_wrb *pwrb = NULL;
4992 unsigned int doorbell = 0;
4994 unsigned int pwrb_typeoffset = 0;
4996 cid = beiscsi_conn->beiscsi_conn_cid;
4997 pwrb = io_task->pwrb_handle->pwrb;
4998 memset(pwrb, 0, sizeof(*pwrb));
5000 if (is_chip_be2_be3r(phba)) {
5001 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
5002 be32_to_cpu(task->cmdsn));
5003 AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
5004 io_task->pwrb_handle->wrb_index);
5005 AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
5006 io_task->psgl_handle->sgl_index);
5007 AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb,
5009 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
5010 io_task->pwrb_handle->nxt_wrb_index);
5011 pwrb_typeoffset = BE_WRB_TYPE_OFFSET;
5013 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb,
5014 be32_to_cpu(task->cmdsn));
5015 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb,
5016 io_task->pwrb_handle->wrb_index);
5017 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb,
5018 io_task->psgl_handle->sgl_index);
5019 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb,
5021 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb,
5022 io_task->pwrb_handle->nxt_wrb_index);
5023 pwrb_typeoffset = SKH_WRB_TYPE_OFFSET;
5027 switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
5028 case ISCSI_OP_LOGIN:
5029 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1);
5030 ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset);
5031 hwi_write_buffer(pwrb, task);
5033 case ISCSI_OP_NOOP_OUT:
5034 if (task->hdr->ttt != ISCSI_RESERVED_TAG) {
5035 ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset);
5036 if (is_chip_be2_be3r(phba))
5037 AMAP_SET_BITS(struct amap_iscsi_wrb,
5040 AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
5043 ADAPTER_SET_WRB_TYPE(pwrb, INI_RD_CMD, pwrb_typeoffset);
5044 if (is_chip_be2_be3r(phba))
5045 AMAP_SET_BITS(struct amap_iscsi_wrb,
5048 AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
5051 hwi_write_buffer(pwrb, task);
5054 ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset);
5055 hwi_write_buffer(pwrb, task);
5057 case ISCSI_OP_SCSI_TMFUNC:
5058 ADAPTER_SET_WRB_TYPE(pwrb, INI_TMF_CMD, pwrb_typeoffset);
5059 hwi_write_buffer(pwrb, task);
5061 case ISCSI_OP_LOGOUT:
5062 ADAPTER_SET_WRB_TYPE(pwrb, HWH_TYPE_LOGOUT, pwrb_typeoffset);
5063 hwi_write_buffer(pwrb, task);
5067 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
5068 "BM_%d : opcode =%d Not supported\n",
5069 task->hdr->opcode & ISCSI_OPCODE_MASK);
5074 /* Set the task type */
5075 io_task->wrb_type = (is_chip_be2_be3r(phba)) ?
5076 AMAP_GET_BITS(struct amap_iscsi_wrb, type, pwrb) :
5077 AMAP_GET_BITS(struct amap_iscsi_wrb_v2, type, pwrb);
5079 doorbell |= cid & DB_WRB_POST_CID_MASK;
5080 doorbell |= (io_task->pwrb_handle->wrb_index &
5081 DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
5082 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
5083 iowrite32(doorbell, phba->db_va +
5084 beiscsi_conn->doorbell_offset);
5088 static int beiscsi_task_xmit(struct iscsi_task *task)
5090 struct beiscsi_io_task *io_task = task->dd_data;
5091 struct scsi_cmnd *sc = task->sc;
5092 struct beiscsi_hba *phba = NULL;
5093 struct scatterlist *sg;
5095 unsigned int writedir = 0, xferlen = 0;
5097 phba = ((struct beiscsi_conn *)task->conn->dd_data)->phba;
5100 return beiscsi_mtask(task);
5102 io_task->scsi_cmnd = sc;
5103 num_sg = scsi_dma_map(sc);
5105 struct iscsi_conn *conn = task->conn;
5106 struct beiscsi_hba *phba = NULL;
5108 phba = ((struct beiscsi_conn *)conn->dd_data)->phba;
5109 beiscsi_log(phba, KERN_ERR,
5110 BEISCSI_LOG_IO | BEISCSI_LOG_ISCSI,
5111 "BM_%d : scsi_dma_map Failed "
5112 "Driver_ITT : 0x%x ITT : 0x%x Xferlen : 0x%x\n",
5113 be32_to_cpu(io_task->cmd_bhs->iscsi_hdr.itt),
5114 io_task->libiscsi_itt, scsi_bufflen(sc));
5118 xferlen = scsi_bufflen(sc);
5119 sg = scsi_sglist(sc);
5120 if (sc->sc_data_direction == DMA_TO_DEVICE)
5125 return phba->iotask_fn(task, sg, num_sg, xferlen, writedir);
5129 * beiscsi_bsg_request - handle bsg request from ISCSI transport
5130 * @job: job to handle
5132 static int beiscsi_bsg_request(struct bsg_job *job)
5134 struct Scsi_Host *shost;
5135 struct beiscsi_hba *phba;
5136 struct iscsi_bsg_request *bsg_req = job->request;
5139 struct be_dma_mem nonemb_cmd;
5140 struct be_cmd_resp_hdr *resp;
5141 struct iscsi_bsg_reply *bsg_reply = job->reply;
5142 unsigned short status, extd_status;
5144 shost = iscsi_job_to_shost(job);
5145 phba = iscsi_host_priv(shost);
5147 switch (bsg_req->msgcode) {
5148 case ISCSI_BSG_HST_VENDOR:
5149 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
5150 job->request_payload.payload_len,
5152 if (nonemb_cmd.va == NULL) {
5153 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
5154 "BM_%d : Failed to allocate memory for "
5155 "beiscsi_bsg_request\n");
5158 tag = mgmt_vendor_specific_fw_cmd(&phba->ctrl, phba, job,
5161 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
5162 "BM_%d : MBX Tag Allocation Failed\n");
5164 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
5165 nonemb_cmd.va, nonemb_cmd.dma);
5169 rc = wait_event_interruptible_timeout(
5170 phba->ctrl.mcc_wait[tag],
5171 phba->ctrl.mcc_numtag[tag],
5173 BEISCSI_HOST_MBX_TIMEOUT));
5174 extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
5175 status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
5176 free_mcc_tag(&phba->ctrl, tag);
5177 resp = (struct be_cmd_resp_hdr *)nonemb_cmd.va;
5178 sg_copy_from_buffer(job->reply_payload.sg_list,
5179 job->reply_payload.sg_cnt,
5180 nonemb_cmd.va, (resp->response_length
5182 bsg_reply->reply_payload_rcv_len = resp->response_length;
5183 bsg_reply->result = status;
5184 bsg_job_done(job, bsg_reply->result,
5185 bsg_reply->reply_payload_rcv_len);
5186 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
5187 nonemb_cmd.va, nonemb_cmd.dma);
5188 if (status || extd_status) {
5189 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
5190 "BM_%d : MBX Cmd Failed"
5191 " status = %d extd_status = %d\n",
5192 status, extd_status);
5201 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
5202 "BM_%d : Unsupported bsg command: 0x%x\n",
5210 void beiscsi_hba_attrs_init(struct beiscsi_hba *phba)
5212 /* Set the logging parameter */
5213 beiscsi_log_enable_init(phba, beiscsi_log_enable);
5217 * beiscsi_quiesce()- Cleanup Driver resources
5218 * @phba: Instance Priv structure
5219 * @unload_state:i Clean or EEH unload state
5221 * Free the OS and HW resources held by the driver
5223 static void beiscsi_quiesce(struct beiscsi_hba *phba,
5224 uint32_t unload_state)
5226 struct hwi_controller *phwi_ctrlr;
5227 struct hwi_context_memory *phwi_context;
5228 struct be_eq_obj *pbe_eq;
5229 unsigned int i, msix_vec;
5231 phwi_ctrlr = phba->phwi_ctrlr;
5232 phwi_context = phwi_ctrlr->phwi_ctxt;
5233 hwi_disable_intr(phba);
5234 if (phba->msix_enabled) {
5235 for (i = 0; i <= phba->num_cpus; i++) {
5236 msix_vec = phba->msix_entries[i].vector;
5237 synchronize_irq(msix_vec);
5238 free_irq(msix_vec, &phwi_context->be_eq[i]);
5239 kfree(phba->msi_name[i]);
5242 if (phba->pcidev->irq) {
5243 synchronize_irq(phba->pcidev->irq);
5244 free_irq(phba->pcidev->irq, phba);
5246 pci_disable_msix(phba->pcidev);
5247 cancel_delayed_work_sync(&phba->beiscsi_hw_check_task);
5249 for (i = 0; i < phba->num_cpus; i++) {
5250 pbe_eq = &phwi_context->be_eq[i];
5251 blk_iopoll_disable(&pbe_eq->iopoll);
5254 if (unload_state == BEISCSI_CLEAN_UNLOAD) {
5255 destroy_workqueue(phba->wq);
5256 beiscsi_clean_port(phba);
5257 beiscsi_free_mem(phba);
5259 beiscsi_unmap_pci_function(phba);
5260 pci_free_consistent(phba->pcidev,
5261 phba->ctrl.mbox_mem_alloced.size,
5262 phba->ctrl.mbox_mem_alloced.va,
5263 phba->ctrl.mbox_mem_alloced.dma);
5271 static void beiscsi_remove(struct pci_dev *pcidev)
5274 struct beiscsi_hba *phba = NULL;
5276 phba = pci_get_drvdata(pcidev);
5278 dev_err(&pcidev->dev, "beiscsi_remove called with no phba\n");
5282 beiscsi_destroy_def_ifaces(phba);
5283 beiscsi_quiesce(phba, BEISCSI_CLEAN_UNLOAD);
5284 iscsi_boot_destroy_kset(phba->boot_kset);
5285 iscsi_host_remove(phba->shost);
5286 pci_dev_put(phba->pcidev);
5287 iscsi_host_free(phba->shost);
5288 pci_disable_pcie_error_reporting(pcidev);
5289 pci_set_drvdata(pcidev, NULL);
5290 pci_release_regions(pcidev);
5291 pci_disable_device(pcidev);
5294 static void beiscsi_shutdown(struct pci_dev *pcidev)
5297 struct beiscsi_hba *phba = NULL;
5299 phba = (struct beiscsi_hba *)pci_get_drvdata(pcidev);
5301 dev_err(&pcidev->dev, "beiscsi_shutdown called with no phba\n");
5305 phba->state = BE_ADAPTER_STATE_SHUTDOWN;
5306 iscsi_host_for_each_session(phba->shost, be2iscsi_fail_session);
5307 beiscsi_quiesce(phba, BEISCSI_CLEAN_UNLOAD);
5308 pci_disable_device(pcidev);
5311 static void beiscsi_msix_enable(struct beiscsi_hba *phba)
5315 for (i = 0; i <= phba->num_cpus; i++)
5316 phba->msix_entries[i].entry = i;
5318 status = pci_enable_msix_range(phba->pcidev, phba->msix_entries,
5319 phba->num_cpus + 1, phba->num_cpus + 1);
5321 phba->msix_enabled = true;
5326 static void be_eqd_update(struct beiscsi_hba *phba)
5328 struct be_set_eqd set_eqd[MAX_CPUS];
5329 struct be_aic_obj *aic;
5330 struct be_eq_obj *pbe_eq;
5331 struct hwi_controller *phwi_ctrlr;
5332 struct hwi_context_memory *phwi_context;
5333 int eqd, i, num = 0;
5338 phwi_ctrlr = phba->phwi_ctrlr;
5339 phwi_context = phwi_ctrlr->phwi_ctxt;
5341 for (i = 0; i <= phba->num_cpus; i++) {
5342 aic = &phba->aic_obj[i];
5343 pbe_eq = &phwi_context->be_eq[i];
5345 if (!aic->jiffs || time_before(now, aic->jiffs) ||
5346 pbe_eq->cq_count < aic->eq_prev) {
5348 aic->eq_prev = pbe_eq->cq_count;
5351 delta = jiffies_to_msecs(now - aic->jiffs);
5352 pps = (((u32)(pbe_eq->cq_count - aic->eq_prev) * 1000) / delta);
5353 eqd = (pps / 1500) << 2;
5357 eqd = min_t(u32, eqd, phwi_context->max_eqd);
5358 eqd = max_t(u32, eqd, phwi_context->min_eqd);
5361 aic->eq_prev = pbe_eq->cq_count;
5363 if (eqd != aic->prev_eqd) {
5364 set_eqd[num].delay_multiplier = (eqd * 65)/100;
5365 set_eqd[num].eq_id = pbe_eq->q.id;
5366 aic->prev_eqd = eqd;
5371 tag = be_cmd_modify_eq_delay(phba, set_eqd, num);
5373 beiscsi_mccq_compl(phba, tag, NULL, NULL);
5377 static void be_check_boot_session(struct beiscsi_hba *phba)
5379 if (beiscsi_setup_boot_info(phba))
5380 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5381 "BM_%d : Could not set up "
5382 "iSCSI boot info on async event.\n");
5386 * beiscsi_hw_health_check()- Check adapter health
5387 * @work: work item to check HW health
5389 * Check if adapter in an unrecoverable state or not.
5392 beiscsi_hw_health_check(struct work_struct *work)
5394 struct beiscsi_hba *phba =
5395 container_of(work, struct beiscsi_hba,
5396 beiscsi_hw_check_task.work);
5398 be_eqd_update(phba);
5400 if (phba->state & BE_ADAPTER_CHECK_BOOT) {
5401 if ((phba->get_boot > 0) && (!phba->boot_kset)) {
5403 if (!(phba->get_boot % BE_GET_BOOT_TO))
5404 be_check_boot_session(phba);
5406 phba->state &= ~BE_ADAPTER_CHECK_BOOT;
5411 beiscsi_ue_detect(phba);
5413 schedule_delayed_work(&phba->beiscsi_hw_check_task,
5414 msecs_to_jiffies(1000));
5418 static pci_ers_result_t beiscsi_eeh_err_detected(struct pci_dev *pdev,
5419 pci_channel_state_t state)
5421 struct beiscsi_hba *phba = NULL;
5423 phba = (struct beiscsi_hba *)pci_get_drvdata(pdev);
5424 phba->state |= BE_ADAPTER_PCI_ERR;
5426 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5427 "BM_%d : EEH error detected\n");
5429 beiscsi_quiesce(phba, BEISCSI_EEH_UNLOAD);
5431 if (state == pci_channel_io_perm_failure) {
5432 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5433 "BM_%d : EEH : State PERM Failure");
5434 return PCI_ERS_RESULT_DISCONNECT;
5437 pci_disable_device(pdev);
5439 /* The error could cause the FW to trigger a flash debug dump.
5440 * Resetting the card while flash dump is in progress
5441 * can cause it not to recover; wait for it to finish.
5442 * Wait only for first function as it is needed only once per
5445 if (pdev->devfn == 0)
5448 return PCI_ERS_RESULT_NEED_RESET;
5451 static pci_ers_result_t beiscsi_eeh_reset(struct pci_dev *pdev)
5453 struct beiscsi_hba *phba = NULL;
5456 phba = (struct beiscsi_hba *)pci_get_drvdata(pdev);
5458 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5459 "BM_%d : EEH Reset\n");
5461 status = pci_enable_device(pdev);
5463 return PCI_ERS_RESULT_DISCONNECT;
5465 pci_set_master(pdev);
5466 pci_set_power_state(pdev, PCI_D0);
5467 pci_restore_state(pdev);
5469 /* Wait for the CHIP Reset to complete */
5470 status = be_chk_reset_complete(phba);
5472 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
5473 "BM_%d : EEH Reset Completed\n");
5475 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
5476 "BM_%d : EEH Reset Completion Failure\n");
5477 return PCI_ERS_RESULT_DISCONNECT;
5480 pci_cleanup_aer_uncorrect_error_status(pdev);
5481 return PCI_ERS_RESULT_RECOVERED;
5484 static void beiscsi_eeh_resume(struct pci_dev *pdev)
5487 struct be_eq_obj *pbe_eq;
5488 struct beiscsi_hba *phba = NULL;
5489 struct hwi_controller *phwi_ctrlr;
5490 struct hwi_context_memory *phwi_context;
5492 phba = (struct beiscsi_hba *)pci_get_drvdata(pdev);
5493 pci_save_state(pdev);
5496 find_num_cpus(phba);
5501 beiscsi_msix_enable(phba);
5502 if (!phba->msix_enabled)
5506 ret = beiscsi_cmd_reset_function(phba);
5508 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5509 "BM_%d : Reset Failed\n");
5513 ret = be_chk_reset_complete(phba);
5515 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5516 "BM_%d : Failed to get out of reset.\n");
5520 beiscsi_get_params(phba);
5521 phba->shost->max_id = phba->params.cxns_per_ctrl;
5522 phba->shost->can_queue = phba->params.ios_per_ctrl;
5523 ret = hwi_init_controller(phba);
5525 for (i = 0; i < MAX_MCC_CMD; i++) {
5526 init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]);
5527 phba->ctrl.mcc_tag[i] = i + 1;
5528 phba->ctrl.mcc_numtag[i + 1] = 0;
5529 phba->ctrl.mcc_tag_available++;
5532 phwi_ctrlr = phba->phwi_ctrlr;
5533 phwi_context = phwi_ctrlr->phwi_ctxt;
5535 for (i = 0; i < phba->num_cpus; i++) {
5536 pbe_eq = &phwi_context->be_eq[i];
5537 blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget,
5539 blk_iopoll_enable(&pbe_eq->iopoll);
5542 i = (phba->msix_enabled) ? i : 0;
5543 /* Work item for MCC handling */
5544 pbe_eq = &phwi_context->be_eq[i];
5545 INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs);
5547 ret = beiscsi_init_irqs(phba);
5549 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5550 "BM_%d : beiscsi_eeh_resume - "
5551 "Failed to beiscsi_init_irqs\n");
5555 hwi_enable_intr(phba);
5556 phba->state &= ~BE_ADAPTER_PCI_ERR;
5560 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5561 "BM_%d : AER EEH Resume Failed\n");
5564 static int beiscsi_dev_probe(struct pci_dev *pcidev,
5565 const struct pci_device_id *id)
5567 struct beiscsi_hba *phba = NULL;
5568 struct hwi_controller *phwi_ctrlr;
5569 struct hwi_context_memory *phwi_context;
5570 struct be_eq_obj *pbe_eq;
5573 ret = beiscsi_enable_pci(pcidev);
5575 dev_err(&pcidev->dev,
5576 "beiscsi_dev_probe - Failed to enable pci device\n");
5580 phba = beiscsi_hba_alloc(pcidev);
5582 dev_err(&pcidev->dev,
5583 "beiscsi_dev_probe - Failed in beiscsi_hba_alloc\n");
5587 /* Enable EEH reporting */
5588 ret = pci_enable_pcie_error_reporting(pcidev);
5590 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
5591 "BM_%d : PCIe Error Reporting "
5592 "Enabling Failed\n");
5594 pci_save_state(pcidev);
5596 /* Initialize Driver configuration Paramters */
5597 beiscsi_hba_attrs_init(phba);
5599 phba->fw_timeout = false;
5600 phba->mac_addr_set = false;
5603 switch (pcidev->device) {
5607 phba->generation = BE_GEN2;
5608 phba->iotask_fn = beiscsi_iotask;
5612 phba->generation = BE_GEN3;
5613 phba->iotask_fn = beiscsi_iotask;
5616 phba->generation = BE_GEN4;
5617 phba->iotask_fn = beiscsi_iotask_v2;
5620 phba->generation = 0;
5623 ret = be_ctrl_init(phba, pcidev);
5625 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5626 "BM_%d : beiscsi_dev_probe-"
5627 "Failed in be_ctrl_init\n");
5631 ret = beiscsi_cmd_reset_function(phba);
5633 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5634 "BM_%d : Reset Failed\n");
5637 ret = be_chk_reset_complete(phba);
5639 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5640 "BM_%d : Failed to get out of reset.\n");
5644 spin_lock_init(&phba->io_sgl_lock);
5645 spin_lock_init(&phba->mgmt_sgl_lock);
5646 spin_lock_init(&phba->isr_lock);
5647 spin_lock_init(&phba->async_pdu_lock);
5648 ret = mgmt_get_fw_config(&phba->ctrl, phba);
5650 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5651 "BM_%d : Error getting fw config\n");
5656 find_num_cpus(phba);
5660 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
5661 "BM_%d : num_cpus = %d\n",
5665 beiscsi_msix_enable(phba);
5666 if (!phba->msix_enabled)
5670 phba->shost->max_id = phba->params.cxns_per_ctrl;
5671 beiscsi_get_params(phba);
5672 phba->shost->can_queue = phba->params.ios_per_ctrl;
5673 ret = beiscsi_init_port(phba);
5675 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5676 "BM_%d : beiscsi_dev_probe-"
5677 "Failed in beiscsi_init_port\n");
5681 for (i = 0; i < MAX_MCC_CMD; i++) {
5682 init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]);
5683 phba->ctrl.mcc_tag[i] = i + 1;
5684 phba->ctrl.mcc_numtag[i + 1] = 0;
5685 phba->ctrl.mcc_tag_available++;
5686 memset(&phba->ctrl.ptag_state[i].tag_mem_state, 0,
5687 sizeof(struct be_dma_mem));
5690 phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0;
5692 snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_%02x_wq",
5693 phba->shost->host_no);
5694 phba->wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, phba->wq_name);
5696 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5697 "BM_%d : beiscsi_dev_probe-"
5698 "Failed to allocate work queue\n");
5702 INIT_DELAYED_WORK(&phba->beiscsi_hw_check_task,
5703 beiscsi_hw_health_check);
5705 phwi_ctrlr = phba->phwi_ctrlr;
5706 phwi_context = phwi_ctrlr->phwi_ctxt;
5708 for (i = 0; i < phba->num_cpus; i++) {
5709 pbe_eq = &phwi_context->be_eq[i];
5710 blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget,
5712 blk_iopoll_enable(&pbe_eq->iopoll);
5715 i = (phba->msix_enabled) ? i : 0;
5716 /* Work item for MCC handling */
5717 pbe_eq = &phwi_context->be_eq[i];
5718 INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs);
5720 ret = beiscsi_init_irqs(phba);
5722 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5723 "BM_%d : beiscsi_dev_probe-"
5724 "Failed to beiscsi_init_irqs\n");
5727 hwi_enable_intr(phba);
5729 if (iscsi_host_add(phba->shost, &phba->pcidev->dev))
5732 if (beiscsi_setup_boot_info(phba))
5734 * log error but continue, because we may not be using
5737 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5738 "BM_%d : Could not set up "
5739 "iSCSI boot info.\n");
5741 beiscsi_create_def_ifaces(phba);
5742 schedule_delayed_work(&phba->beiscsi_hw_check_task,
5743 msecs_to_jiffies(1000));
5745 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
5746 "\n\n\n BM_%d : SUCCESS - DRIVER LOADED\n\n\n");
5750 destroy_workqueue(phba->wq);
5751 for (i = 0; i < phba->num_cpus; i++) {
5752 pbe_eq = &phwi_context->be_eq[i];
5753 blk_iopoll_disable(&pbe_eq->iopoll);
5756 beiscsi_clean_port(phba);
5757 beiscsi_free_mem(phba);
5759 pci_free_consistent(phba->pcidev,
5760 phba->ctrl.mbox_mem_alloced.size,
5761 phba->ctrl.mbox_mem_alloced.va,
5762 phba->ctrl.mbox_mem_alloced.dma);
5763 beiscsi_unmap_pci_function(phba);
5765 if (phba->msix_enabled)
5766 pci_disable_msix(phba->pcidev);
5767 pci_dev_put(phba->pcidev);
5768 iscsi_host_free(phba->shost);
5769 pci_set_drvdata(pcidev, NULL);
5771 pci_release_regions(pcidev);
5772 pci_disable_device(pcidev);
5776 static struct pci_error_handlers beiscsi_eeh_handlers = {
5777 .error_detected = beiscsi_eeh_err_detected,
5778 .slot_reset = beiscsi_eeh_reset,
5779 .resume = beiscsi_eeh_resume,
5782 struct iscsi_transport beiscsi_iscsi_transport = {
5783 .owner = THIS_MODULE,
5785 .caps = CAP_RECOVERY_L0 | CAP_HDRDGST | CAP_TEXT_NEGO |
5786 CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD,
5787 .create_session = beiscsi_session_create,
5788 .destroy_session = beiscsi_session_destroy,
5789 .create_conn = beiscsi_conn_create,
5790 .bind_conn = beiscsi_conn_bind,
5791 .destroy_conn = iscsi_conn_teardown,
5792 .attr_is_visible = be2iscsi_attr_is_visible,
5793 .set_iface_param = be2iscsi_iface_set_param,
5794 .get_iface_param = be2iscsi_iface_get_param,
5795 .set_param = beiscsi_set_param,
5796 .get_conn_param = iscsi_conn_get_param,
5797 .get_session_param = iscsi_session_get_param,
5798 .get_host_param = beiscsi_get_host_param,
5799 .start_conn = beiscsi_conn_start,
5800 .stop_conn = iscsi_conn_stop,
5801 .send_pdu = iscsi_conn_send_pdu,
5802 .xmit_task = beiscsi_task_xmit,
5803 .cleanup_task = beiscsi_cleanup_task,
5804 .alloc_pdu = beiscsi_alloc_pdu,
5805 .parse_pdu_itt = beiscsi_parse_pdu,
5806 .get_stats = beiscsi_conn_get_stats,
5807 .get_ep_param = beiscsi_ep_get_param,
5808 .ep_connect = beiscsi_ep_connect,
5809 .ep_poll = beiscsi_ep_poll,
5810 .ep_disconnect = beiscsi_ep_disconnect,
5811 .session_recovery_timedout = iscsi_session_recovery_timedout,
5812 .bsg_request = beiscsi_bsg_request,
5815 static struct pci_driver beiscsi_pci_driver = {
5817 .probe = beiscsi_dev_probe,
5818 .remove = beiscsi_remove,
5819 .shutdown = beiscsi_shutdown,
5820 .id_table = beiscsi_pci_id_table,
5821 .err_handler = &beiscsi_eeh_handlers
5825 static int __init beiscsi_module_init(void)
5829 beiscsi_scsi_transport =
5830 iscsi_register_transport(&beiscsi_iscsi_transport);
5831 if (!beiscsi_scsi_transport) {
5833 "beiscsi_module_init - Unable to register beiscsi transport.\n");
5836 printk(KERN_INFO "In beiscsi_module_init, tt=%p\n",
5837 &beiscsi_iscsi_transport);
5839 ret = pci_register_driver(&beiscsi_pci_driver);
5842 "beiscsi_module_init - Unable to register beiscsi pci driver.\n");
5843 goto unregister_iscsi_transport;
5847 unregister_iscsi_transport:
5848 iscsi_unregister_transport(&beiscsi_iscsi_transport);
5852 static void __exit beiscsi_module_exit(void)
5854 pci_unregister_driver(&beiscsi_pci_driver);
5855 iscsi_unregister_transport(&beiscsi_iscsi_transport);
5858 module_init(beiscsi_module_init);
5859 module_exit(beiscsi_module_exit);