2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation
5 * See LICENSE.qla4xxx for copyright and licensing details.
7 #include <linux/moduleparam.h>
8 #include <linux/slab.h>
9 #include <linux/blkdev.h>
10 #include <linux/iscsi_boot_sysfs.h>
11 #include <linux/inet.h>
13 #include <scsi/scsi_tcq.h>
14 #include <scsi/scsicam.h>
17 #include "ql4_version.h"
20 #include "ql4_inline.h"
25 static char qla4xxx_version_str[40];
28 * SRB allocation cache
30 static struct kmem_cache *srb_cachep;
33 * Module parameter information and variables
35 static int ql4xdisablesysfsboot = 1;
36 module_param(ql4xdisablesysfsboot, int, S_IRUGO | S_IWUSR);
37 MODULE_PARM_DESC(ql4xdisablesysfsboot,
38 " Set to disable exporting boot targets to sysfs.\n"
39 "\t\t 0 - Export boot targets\n"
40 "\t\t 1 - Do not export boot targets (Default)");
43 module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR);
44 MODULE_PARM_DESC(ql4xdontresethba,
45 " Don't reset the HBA for driver recovery.\n"
46 "\t\t 0 - It will reset HBA (Default)\n"
47 "\t\t 1 - It will NOT reset HBA");
49 int ql4xextended_error_logging;
50 module_param(ql4xextended_error_logging, int, S_IRUGO | S_IWUSR);
51 MODULE_PARM_DESC(ql4xextended_error_logging,
52 " Option to enable extended error logging.\n"
53 "\t\t 0 - no logging (Default)\n"
54 "\t\t 2 - debug logging");
56 int ql4xenablemsix = 1;
57 module_param(ql4xenablemsix, int, S_IRUGO|S_IWUSR);
58 MODULE_PARM_DESC(ql4xenablemsix,
59 " Set to enable MSI or MSI-X interrupt mechanism.\n"
60 "\t\t 0 = enable INTx interrupt mechanism.\n"
61 "\t\t 1 = enable MSI-X interrupt mechanism (Default).\n"
62 "\t\t 2 = enable MSI interrupt mechanism.");
64 #define QL4_DEF_QDEPTH 32
65 static int ql4xmaxqdepth = QL4_DEF_QDEPTH;
66 module_param(ql4xmaxqdepth, int, S_IRUGO | S_IWUSR);
67 MODULE_PARM_DESC(ql4xmaxqdepth,
68 " Maximum queue depth to report for target devices.\n"
71 static int ql4xqfulltracking = 1;
72 module_param(ql4xqfulltracking, int, S_IRUGO | S_IWUSR);
73 MODULE_PARM_DESC(ql4xqfulltracking,
74 " Enable or disable dynamic tracking and adjustment of\n"
75 "\t\t scsi device queue depth.\n"
77 "\t\t 1 - Enable. (Default)");
79 static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO;
80 module_param(ql4xsess_recovery_tmo, int, S_IRUGO);
81 MODULE_PARM_DESC(ql4xsess_recovery_tmo,
82 " Target Session Recovery Timeout.\n"
83 "\t\t Default: 120 sec.");
85 int ql4xmdcapmask = 0x1F;
86 module_param(ql4xmdcapmask, int, S_IRUGO);
87 MODULE_PARM_DESC(ql4xmdcapmask,
88 " Set the Minidump driver capture mask level.\n"
89 "\t\t Default is 0x1F.\n"
90 "\t\t Can be set to 0x3, 0x7, 0xF, 0x1F, 0x3F, 0x7F");
93 module_param(ql4xenablemd, int, S_IRUGO | S_IWUSR);
94 MODULE_PARM_DESC(ql4xenablemd,
95 " Set to enable minidump.\n"
96 "\t\t 0 - disable minidump\n"
97 "\t\t 1 - enable minidump (Default)");
99 static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha);
101 * SCSI host template entry points
103 static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha);
106 * iSCSI template entry points
108 static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess,
109 enum iscsi_param param, char *buf);
110 static int qla4xxx_conn_get_param(struct iscsi_cls_conn *conn,
111 enum iscsi_param param, char *buf);
112 static int qla4xxx_host_get_param(struct Scsi_Host *shost,
113 enum iscsi_host_param param, char *buf);
114 static int qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data,
116 static int qla4xxx_get_iface_param(struct iscsi_iface *iface,
117 enum iscsi_param_type param_type,
118 int param, char *buf);
119 static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc);
120 static struct iscsi_endpoint *qla4xxx_ep_connect(struct Scsi_Host *shost,
121 struct sockaddr *dst_addr,
123 static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms);
124 static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep);
125 static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep,
126 enum iscsi_param param, char *buf);
127 static int qla4xxx_conn_start(struct iscsi_cls_conn *conn);
128 static struct iscsi_cls_conn *
129 qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx);
130 static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
131 struct iscsi_cls_conn *cls_conn,
132 uint64_t transport_fd, int is_leading);
133 static void qla4xxx_conn_destroy(struct iscsi_cls_conn *conn);
134 static struct iscsi_cls_session *
135 qla4xxx_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
136 uint16_t qdepth, uint32_t initial_cmdsn);
137 static void qla4xxx_session_destroy(struct iscsi_cls_session *sess);
138 static void qla4xxx_task_work(struct work_struct *wdata);
139 static int qla4xxx_alloc_pdu(struct iscsi_task *, uint8_t);
140 static int qla4xxx_task_xmit(struct iscsi_task *);
141 static void qla4xxx_task_cleanup(struct iscsi_task *);
142 static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session);
143 static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn,
144 struct iscsi_stats *stats);
145 static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num,
146 uint32_t iface_type, uint32_t payload_size,
147 uint32_t pid, struct sockaddr *dst_addr);
148 static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx,
149 uint32_t *num_entries, char *buf);
150 static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx);
153 * SCSI host template entry points
155 static int qla4xxx_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmd);
156 static int qla4xxx_eh_abort(struct scsi_cmnd *cmd);
157 static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd);
158 static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd);
159 static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd);
160 static int qla4xxx_slave_alloc(struct scsi_device *device);
161 static int qla4xxx_slave_configure(struct scsi_device *device);
162 static void qla4xxx_slave_destroy(struct scsi_device *sdev);
163 static umode_t ql4_attr_is_visible(int param_type, int param);
164 static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type);
165 static int qla4xxx_change_queue_depth(struct scsi_device *sdev, int qdepth,
168 static struct qla4_8xxx_legacy_intr_set legacy_intr[] =
169 QLA82XX_LEGACY_INTR_CONFIG;
171 static struct scsi_host_template qla4xxx_driver_template = {
172 .module = THIS_MODULE,
174 .proc_name = DRIVER_NAME,
175 .queuecommand = qla4xxx_queuecommand,
177 .eh_abort_handler = qla4xxx_eh_abort,
178 .eh_device_reset_handler = qla4xxx_eh_device_reset,
179 .eh_target_reset_handler = qla4xxx_eh_target_reset,
180 .eh_host_reset_handler = qla4xxx_eh_host_reset,
181 .eh_timed_out = qla4xxx_eh_cmd_timed_out,
183 .slave_configure = qla4xxx_slave_configure,
184 .slave_alloc = qla4xxx_slave_alloc,
185 .slave_destroy = qla4xxx_slave_destroy,
186 .change_queue_depth = qla4xxx_change_queue_depth,
190 .use_clustering = ENABLE_CLUSTERING,
191 .sg_tablesize = SG_ALL,
193 .max_sectors = 0xFFFF,
194 .shost_attrs = qla4xxx_host_attrs,
195 .host_reset = qla4xxx_host_reset,
196 .vendor_id = SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC,
199 static struct iscsi_transport qla4xxx_iscsi_transport = {
200 .owner = THIS_MODULE,
202 .caps = CAP_TEXT_NEGO |
203 CAP_DATA_PATH_OFFLOAD | CAP_HDRDGST |
204 CAP_DATADGST | CAP_LOGIN_OFFLOAD |
206 .attr_is_visible = ql4_attr_is_visible,
207 .create_session = qla4xxx_session_create,
208 .destroy_session = qla4xxx_session_destroy,
209 .start_conn = qla4xxx_conn_start,
210 .create_conn = qla4xxx_conn_create,
211 .bind_conn = qla4xxx_conn_bind,
212 .stop_conn = iscsi_conn_stop,
213 .destroy_conn = qla4xxx_conn_destroy,
214 .set_param = iscsi_set_param,
215 .get_conn_param = qla4xxx_conn_get_param,
216 .get_session_param = qla4xxx_session_get_param,
217 .get_ep_param = qla4xxx_get_ep_param,
218 .ep_connect = qla4xxx_ep_connect,
219 .ep_poll = qla4xxx_ep_poll,
220 .ep_disconnect = qla4xxx_ep_disconnect,
221 .get_stats = qla4xxx_conn_get_stats,
222 .send_pdu = iscsi_conn_send_pdu,
223 .xmit_task = qla4xxx_task_xmit,
224 .cleanup_task = qla4xxx_task_cleanup,
225 .alloc_pdu = qla4xxx_alloc_pdu,
227 .get_host_param = qla4xxx_host_get_param,
228 .set_iface_param = qla4xxx_iface_set_param,
229 .get_iface_param = qla4xxx_get_iface_param,
230 .bsg_request = qla4xxx_bsg_request,
231 .send_ping = qla4xxx_send_ping,
232 .get_chap = qla4xxx_get_chap_list,
233 .delete_chap = qla4xxx_delete_chap,
236 static struct scsi_transport_template *qla4xxx_scsi_transport;
238 static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num,
239 uint32_t iface_type, uint32_t payload_size,
240 uint32_t pid, struct sockaddr *dst_addr)
242 struct scsi_qla_host *ha = to_qla_host(shost);
243 struct sockaddr_in *addr;
244 struct sockaddr_in6 *addr6;
245 uint32_t options = 0;
246 uint8_t ipaddr[IPv6_ADDR_LEN];
249 memset(ipaddr, 0, IPv6_ADDR_LEN);
251 if ((iface_type == ISCSI_IFACE_TYPE_IPV4) &&
252 (dst_addr->sa_family == AF_INET)) {
253 addr = (struct sockaddr_in *)dst_addr;
254 memcpy(ipaddr, &addr->sin_addr.s_addr, IP_ADDR_LEN);
255 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv4 Ping src: %pI4 "
256 "dest: %pI4\n", __func__,
257 &ha->ip_config.ip_address, ipaddr));
258 rval = qla4xxx_ping_iocb(ha, options, payload_size, pid,
262 } else if ((iface_type == ISCSI_IFACE_TYPE_IPV6) &&
263 (dst_addr->sa_family == AF_INET6)) {
265 addr6 = (struct sockaddr_in6 *)dst_addr;
266 memcpy(ipaddr, &addr6->sin6_addr.in6_u.u6_addr8, IPv6_ADDR_LEN);
268 options |= PING_IPV6_PROTOCOL_ENABLE;
270 /* Ping using LinkLocal address */
271 if ((iface_num == 0) || (iface_num == 1)) {
272 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: LinkLocal Ping "
273 "src: %pI6 dest: %pI6\n", __func__,
274 &ha->ip_config.ipv6_link_local_addr,
276 options |= PING_IPV6_LINKLOCAL_ADDR;
277 rval = qla4xxx_ping_iocb(ha, options, payload_size,
280 ql4_printk(KERN_WARNING, ha, "%s: iface num = %d "
281 "not supported\n", __func__, iface_num);
287 * If ping using LinkLocal address fails, try ping using
290 if (rval != QLA_SUCCESS) {
291 options &= ~PING_IPV6_LINKLOCAL_ADDR;
292 if (iface_num == 0) {
293 options |= PING_IPV6_ADDR0;
294 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv6 "
296 "dest: %pI6\n", __func__,
297 &ha->ip_config.ipv6_addr0,
299 } else if (iface_num == 1) {
300 options |= PING_IPV6_ADDR1;
301 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv6 "
303 "dest: %pI6\n", __func__,
304 &ha->ip_config.ipv6_addr1,
307 rval = qla4xxx_ping_iocb(ha, options, payload_size,
318 static umode_t ql4_attr_is_visible(int param_type, int param)
320 switch (param_type) {
321 case ISCSI_HOST_PARAM:
323 case ISCSI_HOST_PARAM_HWADDRESS:
324 case ISCSI_HOST_PARAM_IPADDRESS:
325 case ISCSI_HOST_PARAM_INITIATOR_NAME:
326 case ISCSI_HOST_PARAM_PORT_STATE:
327 case ISCSI_HOST_PARAM_PORT_SPEED:
334 case ISCSI_PARAM_PERSISTENT_ADDRESS:
335 case ISCSI_PARAM_PERSISTENT_PORT:
336 case ISCSI_PARAM_CONN_ADDRESS:
337 case ISCSI_PARAM_CONN_PORT:
338 case ISCSI_PARAM_TARGET_NAME:
339 case ISCSI_PARAM_TPGT:
340 case ISCSI_PARAM_TARGET_ALIAS:
341 case ISCSI_PARAM_MAX_BURST:
342 case ISCSI_PARAM_MAX_R2T:
343 case ISCSI_PARAM_FIRST_BURST:
344 case ISCSI_PARAM_MAX_RECV_DLENGTH:
345 case ISCSI_PARAM_MAX_XMIT_DLENGTH:
346 case ISCSI_PARAM_IFACE_NAME:
347 case ISCSI_PARAM_CHAP_OUT_IDX:
348 case ISCSI_PARAM_CHAP_IN_IDX:
349 case ISCSI_PARAM_USERNAME:
350 case ISCSI_PARAM_PASSWORD:
351 case ISCSI_PARAM_USERNAME_IN:
352 case ISCSI_PARAM_PASSWORD_IN:
357 case ISCSI_NET_PARAM:
359 case ISCSI_NET_PARAM_IPV4_ADDR:
360 case ISCSI_NET_PARAM_IPV4_SUBNET:
361 case ISCSI_NET_PARAM_IPV4_GW:
362 case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
363 case ISCSI_NET_PARAM_IFACE_ENABLE:
364 case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
365 case ISCSI_NET_PARAM_IPV6_ADDR:
366 case ISCSI_NET_PARAM_IPV6_ROUTER:
367 case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
368 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
369 case ISCSI_NET_PARAM_VLAN_ID:
370 case ISCSI_NET_PARAM_VLAN_PRIORITY:
371 case ISCSI_NET_PARAM_VLAN_ENABLED:
372 case ISCSI_NET_PARAM_MTU:
373 case ISCSI_NET_PARAM_PORT:
383 static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx,
384 uint32_t *num_entries, char *buf)
386 struct scsi_qla_host *ha = to_qla_host(shost);
387 struct ql4_chap_table *chap_table;
388 struct iscsi_chap_rec *chap_rec;
389 int max_chap_entries = 0;
390 int valid_chap_entries = 0;
394 max_chap_entries = (ha->hw.flt_chap_size / 2) /
395 sizeof(struct ql4_chap_table);
397 max_chap_entries = MAX_CHAP_ENTRIES_40XX;
399 ql4_printk(KERN_INFO, ha, "%s: num_entries = %d, CHAP idx = %d\n",
400 __func__, *num_entries, chap_tbl_idx);
404 goto exit_get_chap_list;
407 chap_rec = (struct iscsi_chap_rec *) buf;
408 mutex_lock(&ha->chap_sem);
409 for (i = chap_tbl_idx; i < max_chap_entries; i++) {
410 chap_table = (struct ql4_chap_table *)ha->chap_list + i;
411 if (chap_table->cookie !=
412 __constant_cpu_to_le16(CHAP_VALID_COOKIE))
415 chap_rec->chap_tbl_idx = i;
416 strncpy(chap_rec->username, chap_table->name,
417 ISCSI_CHAP_AUTH_NAME_MAX_LEN);
418 strncpy(chap_rec->password, chap_table->secret,
419 QL4_CHAP_MAX_SECRET_LEN);
420 chap_rec->password_length = chap_table->secret_len;
422 if (chap_table->flags & BIT_7) /* local */
423 chap_rec->chap_type = CHAP_TYPE_OUT;
425 if (chap_table->flags & BIT_6) /* peer */
426 chap_rec->chap_type = CHAP_TYPE_IN;
430 valid_chap_entries++;
431 if (valid_chap_entries == *num_entries)
436 mutex_unlock(&ha->chap_sem);
439 ql4_printk(KERN_INFO, ha, "%s: Valid CHAP Entries = %d\n",
440 __func__, valid_chap_entries);
441 *num_entries = valid_chap_entries;
445 static int __qla4xxx_is_chap_active(struct device *dev, void *data)
448 uint16_t *chap_tbl_idx = (uint16_t *) data;
449 struct iscsi_cls_session *cls_session;
450 struct iscsi_session *sess;
451 struct ddb_entry *ddb_entry;
453 if (!iscsi_is_session_dev(dev))
454 goto exit_is_chap_active;
456 cls_session = iscsi_dev_to_session(dev);
457 sess = cls_session->dd_data;
458 ddb_entry = sess->dd_data;
460 if (iscsi_session_chkready(cls_session))
461 goto exit_is_chap_active;
463 if (ddb_entry->chap_tbl_idx == *chap_tbl_idx)
470 static int qla4xxx_is_chap_active(struct Scsi_Host *shost,
471 uint16_t chap_tbl_idx)
475 ret = device_for_each_child(&shost->shost_gendev, &chap_tbl_idx,
476 __qla4xxx_is_chap_active);
481 static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx)
483 struct scsi_qla_host *ha = to_qla_host(shost);
484 struct ql4_chap_table *chap_table;
486 int max_chap_entries = 0;
491 chap_table = dma_pool_alloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma);
492 if (chap_table == NULL)
495 memset(chap_table, 0, sizeof(struct ql4_chap_table));
498 max_chap_entries = (ha->hw.flt_chap_size / 2) /
499 sizeof(struct ql4_chap_table);
501 max_chap_entries = MAX_CHAP_ENTRIES_40XX;
503 if (chap_tbl_idx > max_chap_entries) {
505 goto exit_delete_chap;
508 /* Check if chap index is in use.
509 * If chap is in use don't delet chap entry */
510 ret = qla4xxx_is_chap_active(shost, chap_tbl_idx);
512 ql4_printk(KERN_INFO, ha, "CHAP entry %d is in use, cannot "
513 "delete from flash\n", chap_tbl_idx);
515 goto exit_delete_chap;
518 chap_size = sizeof(struct ql4_chap_table);
520 offset = FLASH_CHAP_OFFSET | (chap_tbl_idx * chap_size);
522 offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2);
523 /* flt_chap_size is CHAP table size for both ports
524 * so divide it by 2 to calculate the offset for second port
526 if (ha->port_num == 1)
527 offset += (ha->hw.flt_chap_size / 2);
528 offset += (chap_tbl_idx * chap_size);
531 ret = qla4xxx_get_flash(ha, chap_dma, offset, chap_size);
532 if (ret != QLA_SUCCESS) {
534 goto exit_delete_chap;
537 DEBUG2(ql4_printk(KERN_INFO, ha, "Chap Cookie: x%x\n",
538 __le16_to_cpu(chap_table->cookie)));
540 if (__le16_to_cpu(chap_table->cookie) != CHAP_VALID_COOKIE) {
541 ql4_printk(KERN_ERR, ha, "No valid chap entry found\n");
542 goto exit_delete_chap;
545 chap_table->cookie = __constant_cpu_to_le16(0xFFFF);
547 offset = FLASH_CHAP_OFFSET |
548 (chap_tbl_idx * sizeof(struct ql4_chap_table));
549 ret = qla4xxx_set_flash(ha, chap_dma, offset, chap_size,
550 FLASH_OPT_RMW_COMMIT);
551 if (ret == QLA_SUCCESS && ha->chap_list) {
552 mutex_lock(&ha->chap_sem);
553 /* Update ha chap_list cache */
554 memcpy((struct ql4_chap_table *)ha->chap_list + chap_tbl_idx,
555 chap_table, sizeof(struct ql4_chap_table));
556 mutex_unlock(&ha->chap_sem);
558 if (ret != QLA_SUCCESS)
562 dma_pool_free(ha->chap_dma_pool, chap_table, chap_dma);
566 static int qla4xxx_get_iface_param(struct iscsi_iface *iface,
567 enum iscsi_param_type param_type,
568 int param, char *buf)
570 struct Scsi_Host *shost = iscsi_iface_to_shost(iface);
571 struct scsi_qla_host *ha = to_qla_host(shost);
574 if (param_type != ISCSI_NET_PARAM)
578 case ISCSI_NET_PARAM_IPV4_ADDR:
579 len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address);
581 case ISCSI_NET_PARAM_IPV4_SUBNET:
582 len = sprintf(buf, "%pI4\n", &ha->ip_config.subnet_mask);
584 case ISCSI_NET_PARAM_IPV4_GW:
585 len = sprintf(buf, "%pI4\n", &ha->ip_config.gateway);
587 case ISCSI_NET_PARAM_IFACE_ENABLE:
588 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
589 len = sprintf(buf, "%s\n",
590 (ha->ip_config.ipv4_options &
591 IPOPT_IPV4_PROTOCOL_ENABLE) ?
592 "enabled" : "disabled");
593 else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
594 len = sprintf(buf, "%s\n",
595 (ha->ip_config.ipv6_options &
596 IPV6_OPT_IPV6_PROTOCOL_ENABLE) ?
597 "enabled" : "disabled");
599 case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
600 len = sprintf(buf, "%s\n",
601 (ha->ip_config.tcp_options & TCPOPT_DHCP_ENABLE) ?
604 case ISCSI_NET_PARAM_IPV6_ADDR:
605 if (iface->iface_num == 0)
606 len = sprintf(buf, "%pI6\n", &ha->ip_config.ipv6_addr0);
607 if (iface->iface_num == 1)
608 len = sprintf(buf, "%pI6\n", &ha->ip_config.ipv6_addr1);
610 case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
611 len = sprintf(buf, "%pI6\n",
612 &ha->ip_config.ipv6_link_local_addr);
614 case ISCSI_NET_PARAM_IPV6_ROUTER:
615 len = sprintf(buf, "%pI6\n",
616 &ha->ip_config.ipv6_default_router_addr);
618 case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
619 len = sprintf(buf, "%s\n",
620 (ha->ip_config.ipv6_addl_options &
621 IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE) ?
624 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
625 len = sprintf(buf, "%s\n",
626 (ha->ip_config.ipv6_addl_options &
627 IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR) ?
630 case ISCSI_NET_PARAM_VLAN_ID:
631 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
632 len = sprintf(buf, "%d\n",
633 (ha->ip_config.ipv4_vlan_tag &
635 else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
636 len = sprintf(buf, "%d\n",
637 (ha->ip_config.ipv6_vlan_tag &
640 case ISCSI_NET_PARAM_VLAN_PRIORITY:
641 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
642 len = sprintf(buf, "%d\n",
643 ((ha->ip_config.ipv4_vlan_tag >> 13) &
644 ISCSI_MAX_VLAN_PRIORITY));
645 else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
646 len = sprintf(buf, "%d\n",
647 ((ha->ip_config.ipv6_vlan_tag >> 13) &
648 ISCSI_MAX_VLAN_PRIORITY));
650 case ISCSI_NET_PARAM_VLAN_ENABLED:
651 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
652 len = sprintf(buf, "%s\n",
653 (ha->ip_config.ipv4_options &
654 IPOPT_VLAN_TAGGING_ENABLE) ?
655 "enabled" : "disabled");
656 else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
657 len = sprintf(buf, "%s\n",
658 (ha->ip_config.ipv6_options &
659 IPV6_OPT_VLAN_TAGGING_ENABLE) ?
660 "enabled" : "disabled");
662 case ISCSI_NET_PARAM_MTU:
663 len = sprintf(buf, "%d\n", ha->ip_config.eth_mtu_size);
665 case ISCSI_NET_PARAM_PORT:
666 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
667 len = sprintf(buf, "%d\n", ha->ip_config.ipv4_port);
668 else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
669 len = sprintf(buf, "%d\n", ha->ip_config.ipv6_port);
678 static struct iscsi_endpoint *
679 qla4xxx_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
683 struct iscsi_endpoint *ep;
684 struct qla_endpoint *qla_ep;
685 struct scsi_qla_host *ha;
686 struct sockaddr_in *addr;
687 struct sockaddr_in6 *addr6;
689 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
692 printk(KERN_ERR "%s: shost is NULL\n",
697 ha = iscsi_host_priv(shost);
699 ep = iscsi_create_endpoint(sizeof(struct qla_endpoint));
705 qla_ep = ep->dd_data;
706 memset(qla_ep, 0, sizeof(struct qla_endpoint));
707 if (dst_addr->sa_family == AF_INET) {
708 memcpy(&qla_ep->dst_addr, dst_addr, sizeof(struct sockaddr_in));
709 addr = (struct sockaddr_in *)&qla_ep->dst_addr;
710 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI4\n", __func__,
711 (char *)&addr->sin_addr));
712 } else if (dst_addr->sa_family == AF_INET6) {
713 memcpy(&qla_ep->dst_addr, dst_addr,
714 sizeof(struct sockaddr_in6));
715 addr6 = (struct sockaddr_in6 *)&qla_ep->dst_addr;
716 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI6\n", __func__,
717 (char *)&addr6->sin6_addr));
720 qla_ep->host = shost;
725 static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
727 struct qla_endpoint *qla_ep;
728 struct scsi_qla_host *ha;
731 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
732 qla_ep = ep->dd_data;
733 ha = to_qla_host(qla_ep->host);
735 if (adapter_up(ha) && !test_bit(AF_BUILD_DDB_LIST, &ha->flags))
741 static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep)
743 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
744 iscsi_destroy_endpoint(ep);
747 static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep,
748 enum iscsi_param param,
751 struct qla_endpoint *qla_ep = ep->dd_data;
752 struct sockaddr *dst_addr;
754 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
757 case ISCSI_PARAM_CONN_PORT:
758 case ISCSI_PARAM_CONN_ADDRESS:
762 dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
766 return iscsi_conn_get_addr_param((struct sockaddr_storage *)
767 &qla_ep->dst_addr, param, buf);
773 static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn,
774 struct iscsi_stats *stats)
776 struct iscsi_session *sess;
777 struct iscsi_cls_session *cls_sess;
778 struct ddb_entry *ddb_entry;
779 struct scsi_qla_host *ha;
780 struct ql_iscsi_stats *ql_iscsi_stats;
783 dma_addr_t iscsi_stats_dma;
785 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
787 cls_sess = iscsi_conn_to_session(cls_conn);
788 sess = cls_sess->dd_data;
789 ddb_entry = sess->dd_data;
792 stats_size = PAGE_ALIGN(sizeof(struct ql_iscsi_stats));
793 /* Allocate memory */
794 ql_iscsi_stats = dma_alloc_coherent(&ha->pdev->dev, stats_size,
795 &iscsi_stats_dma, GFP_KERNEL);
796 if (!ql_iscsi_stats) {
797 ql4_printk(KERN_ERR, ha,
798 "Unable to allocate memory for iscsi stats\n");
802 ret = qla4xxx_get_mgmt_data(ha, ddb_entry->fw_ddb_index, stats_size,
804 if (ret != QLA_SUCCESS) {
805 ql4_printk(KERN_ERR, ha,
806 "Unable to retreive iscsi stats\n");
811 stats->txdata_octets = le64_to_cpu(ql_iscsi_stats->tx_data_octets);
812 stats->rxdata_octets = le64_to_cpu(ql_iscsi_stats->rx_data_octets);
814 stats->noptx_pdus = le32_to_cpu(ql_iscsi_stats->tx_nopout_pdus);
815 stats->scsicmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_cmd_pdus);
816 stats->tmfcmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_tmf_cmd_pdus);
817 stats->login_pdus = le32_to_cpu(ql_iscsi_stats->tx_login_cmd_pdus);
818 stats->text_pdus = le32_to_cpu(ql_iscsi_stats->tx_text_cmd_pdus);
819 stats->dataout_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_write_pdus);
820 stats->logout_pdus = le32_to_cpu(ql_iscsi_stats->tx_logout_cmd_pdus);
821 stats->snack_pdus = le32_to_cpu(ql_iscsi_stats->tx_snack_req_pdus);
823 stats->noprx_pdus = le32_to_cpu(ql_iscsi_stats->rx_nopin_pdus);
824 stats->scsirsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_resp_pdus);
825 stats->tmfrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_tmf_resp_pdus);
826 stats->textrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_text_resp_pdus);
827 stats->datain_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_read_pdus);
828 stats->logoutrsp_pdus =
829 le32_to_cpu(ql_iscsi_stats->rx_logout_resp_pdus);
830 stats->r2t_pdus = le32_to_cpu(ql_iscsi_stats->rx_r2t_pdus);
831 stats->async_pdus = le32_to_cpu(ql_iscsi_stats->rx_async_pdus);
832 stats->rjt_pdus = le32_to_cpu(ql_iscsi_stats->rx_reject_pdus);
835 dma_free_coherent(&ha->pdev->dev, stats_size, ql_iscsi_stats,
841 static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc)
843 struct iscsi_cls_session *session;
844 struct iscsi_session *sess;
846 enum blk_eh_timer_return ret = BLK_EH_NOT_HANDLED;
848 session = starget_to_session(scsi_target(sc->device));
849 sess = session->dd_data;
851 spin_lock_irqsave(&session->lock, flags);
852 if (session->state == ISCSI_SESSION_FAILED)
853 ret = BLK_EH_RESET_TIMER;
854 spin_unlock_irqrestore(&session->lock, flags);
859 static void qla4xxx_set_port_speed(struct Scsi_Host *shost)
861 struct scsi_qla_host *ha = to_qla_host(shost);
862 struct iscsi_cls_host *ihost = shost->shost_data;
863 uint32_t speed = ISCSI_PORT_SPEED_UNKNOWN;
865 qla4xxx_get_firmware_state(ha);
867 switch (ha->addl_fw_state & 0x0F00) {
868 case FW_ADDSTATE_LINK_SPEED_10MBPS:
869 speed = ISCSI_PORT_SPEED_10MBPS;
871 case FW_ADDSTATE_LINK_SPEED_100MBPS:
872 speed = ISCSI_PORT_SPEED_100MBPS;
874 case FW_ADDSTATE_LINK_SPEED_1GBPS:
875 speed = ISCSI_PORT_SPEED_1GBPS;
877 case FW_ADDSTATE_LINK_SPEED_10GBPS:
878 speed = ISCSI_PORT_SPEED_10GBPS;
881 ihost->port_speed = speed;
884 static void qla4xxx_set_port_state(struct Scsi_Host *shost)
886 struct scsi_qla_host *ha = to_qla_host(shost);
887 struct iscsi_cls_host *ihost = shost->shost_data;
888 uint32_t state = ISCSI_PORT_STATE_DOWN;
890 if (test_bit(AF_LINK_UP, &ha->flags))
891 state = ISCSI_PORT_STATE_UP;
893 ihost->port_state = state;
896 static int qla4xxx_host_get_param(struct Scsi_Host *shost,
897 enum iscsi_host_param param, char *buf)
899 struct scsi_qla_host *ha = to_qla_host(shost);
903 case ISCSI_HOST_PARAM_HWADDRESS:
904 len = sysfs_format_mac(buf, ha->my_mac, MAC_ADDR_LEN);
906 case ISCSI_HOST_PARAM_IPADDRESS:
907 len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address);
909 case ISCSI_HOST_PARAM_INITIATOR_NAME:
910 len = sprintf(buf, "%s\n", ha->name_string);
912 case ISCSI_HOST_PARAM_PORT_STATE:
913 qla4xxx_set_port_state(shost);
914 len = sprintf(buf, "%s\n", iscsi_get_port_state_name(shost));
916 case ISCSI_HOST_PARAM_PORT_SPEED:
917 qla4xxx_set_port_speed(shost);
918 len = sprintf(buf, "%s\n", iscsi_get_port_speed_name(shost));
927 static void qla4xxx_create_ipv4_iface(struct scsi_qla_host *ha)
933 ha->iface_ipv4 = iscsi_create_iface(ha->host,
934 &qla4xxx_iscsi_transport,
935 ISCSI_IFACE_TYPE_IPV4, 0, 0);
937 ql4_printk(KERN_ERR, ha, "Could not create IPv4 iSCSI "
941 static void qla4xxx_create_ipv6_iface(struct scsi_qla_host *ha)
943 if (!ha->iface_ipv6_0)
945 ha->iface_ipv6_0 = iscsi_create_iface(ha->host,
946 &qla4xxx_iscsi_transport,
947 ISCSI_IFACE_TYPE_IPV6, 0,
949 if (!ha->iface_ipv6_0)
950 ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI "
953 if (!ha->iface_ipv6_1)
955 ha->iface_ipv6_1 = iscsi_create_iface(ha->host,
956 &qla4xxx_iscsi_transport,
957 ISCSI_IFACE_TYPE_IPV6, 1,
959 if (!ha->iface_ipv6_1)
960 ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI "
964 static void qla4xxx_create_ifaces(struct scsi_qla_host *ha)
966 if (ha->ip_config.ipv4_options & IPOPT_IPV4_PROTOCOL_ENABLE)
967 qla4xxx_create_ipv4_iface(ha);
969 if (ha->ip_config.ipv6_options & IPV6_OPT_IPV6_PROTOCOL_ENABLE)
970 qla4xxx_create_ipv6_iface(ha);
973 static void qla4xxx_destroy_ipv4_iface(struct scsi_qla_host *ha)
975 if (ha->iface_ipv4) {
976 iscsi_destroy_iface(ha->iface_ipv4);
977 ha->iface_ipv4 = NULL;
981 static void qla4xxx_destroy_ipv6_iface(struct scsi_qla_host *ha)
983 if (ha->iface_ipv6_0) {
984 iscsi_destroy_iface(ha->iface_ipv6_0);
985 ha->iface_ipv6_0 = NULL;
987 if (ha->iface_ipv6_1) {
988 iscsi_destroy_iface(ha->iface_ipv6_1);
989 ha->iface_ipv6_1 = NULL;
993 static void qla4xxx_destroy_ifaces(struct scsi_qla_host *ha)
995 qla4xxx_destroy_ipv4_iface(ha);
996 qla4xxx_destroy_ipv6_iface(ha);
999 static void qla4xxx_set_ipv6(struct scsi_qla_host *ha,
1000 struct iscsi_iface_param_info *iface_param,
1001 struct addr_ctrl_blk *init_fw_cb)
1004 * iface_num 0 is valid for IPv6 Addr, linklocal, router, autocfg.
1005 * iface_num 1 is valid only for IPv6 Addr.
1007 switch (iface_param->param) {
1008 case ISCSI_NET_PARAM_IPV6_ADDR:
1009 if (iface_param->iface_num & 0x1)
1011 memcpy(init_fw_cb->ipv6_addr1, iface_param->value,
1012 sizeof(init_fw_cb->ipv6_addr1));
1015 memcpy(init_fw_cb->ipv6_addr0, iface_param->value,
1016 sizeof(init_fw_cb->ipv6_addr0));
1018 case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
1019 if (iface_param->iface_num & 0x1)
1021 memcpy(init_fw_cb->ipv6_if_id, &iface_param->value[8],
1022 sizeof(init_fw_cb->ipv6_if_id));
1024 case ISCSI_NET_PARAM_IPV6_ROUTER:
1025 if (iface_param->iface_num & 0x1)
1027 memcpy(init_fw_cb->ipv6_dflt_rtr_addr, iface_param->value,
1028 sizeof(init_fw_cb->ipv6_dflt_rtr_addr));
1030 case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
1031 /* Autocfg applies to even interface */
1032 if (iface_param->iface_num & 0x1)
1035 if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_DISABLE)
1036 init_fw_cb->ipv6_addtl_opts &=
1038 ~IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE);
1039 else if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_ND_ENABLE)
1040 init_fw_cb->ipv6_addtl_opts |=
1042 IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE);
1044 ql4_printk(KERN_ERR, ha, "Invalid autocfg setting for "
1047 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
1048 /* Autocfg applies to even interface */
1049 if (iface_param->iface_num & 0x1)
1052 if (iface_param->value[0] ==
1053 ISCSI_IPV6_LINKLOCAL_AUTOCFG_ENABLE)
1054 init_fw_cb->ipv6_addtl_opts |= cpu_to_le16(
1055 IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR);
1056 else if (iface_param->value[0] ==
1057 ISCSI_IPV6_LINKLOCAL_AUTOCFG_DISABLE)
1058 init_fw_cb->ipv6_addtl_opts &= cpu_to_le16(
1059 ~IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR);
1061 ql4_printk(KERN_ERR, ha, "Invalid autocfg setting for "
1062 "IPv6 linklocal addr\n");
1064 case ISCSI_NET_PARAM_IPV6_ROUTER_AUTOCFG:
1065 /* Autocfg applies to even interface */
1066 if (iface_param->iface_num & 0x1)
1069 if (iface_param->value[0] == ISCSI_IPV6_ROUTER_AUTOCFG_ENABLE)
1070 memset(init_fw_cb->ipv6_dflt_rtr_addr, 0,
1071 sizeof(init_fw_cb->ipv6_dflt_rtr_addr));
1073 case ISCSI_NET_PARAM_IFACE_ENABLE:
1074 if (iface_param->value[0] == ISCSI_IFACE_ENABLE) {
1075 init_fw_cb->ipv6_opts |=
1076 cpu_to_le16(IPV6_OPT_IPV6_PROTOCOL_ENABLE);
1077 qla4xxx_create_ipv6_iface(ha);
1079 init_fw_cb->ipv6_opts &=
1080 cpu_to_le16(~IPV6_OPT_IPV6_PROTOCOL_ENABLE &
1082 qla4xxx_destroy_ipv6_iface(ha);
1085 case ISCSI_NET_PARAM_VLAN_TAG:
1086 if (iface_param->len != sizeof(init_fw_cb->ipv6_vlan_tag))
1088 init_fw_cb->ipv6_vlan_tag =
1089 cpu_to_be16(*(uint16_t *)iface_param->value);
1091 case ISCSI_NET_PARAM_VLAN_ENABLED:
1092 if (iface_param->value[0] == ISCSI_VLAN_ENABLE)
1093 init_fw_cb->ipv6_opts |=
1094 cpu_to_le16(IPV6_OPT_VLAN_TAGGING_ENABLE);
1096 init_fw_cb->ipv6_opts &=
1097 cpu_to_le16(~IPV6_OPT_VLAN_TAGGING_ENABLE);
1099 case ISCSI_NET_PARAM_MTU:
1100 init_fw_cb->eth_mtu_size =
1101 cpu_to_le16(*(uint16_t *)iface_param->value);
1103 case ISCSI_NET_PARAM_PORT:
1104 /* Autocfg applies to even interface */
1105 if (iface_param->iface_num & 0x1)
1108 init_fw_cb->ipv6_port =
1109 cpu_to_le16(*(uint16_t *)iface_param->value);
1112 ql4_printk(KERN_ERR, ha, "Unknown IPv6 param = %d\n",
1113 iface_param->param);
1118 static void qla4xxx_set_ipv4(struct scsi_qla_host *ha,
1119 struct iscsi_iface_param_info *iface_param,
1120 struct addr_ctrl_blk *init_fw_cb)
1122 switch (iface_param->param) {
1123 case ISCSI_NET_PARAM_IPV4_ADDR:
1124 memcpy(init_fw_cb->ipv4_addr, iface_param->value,
1125 sizeof(init_fw_cb->ipv4_addr));
1127 case ISCSI_NET_PARAM_IPV4_SUBNET:
1128 memcpy(init_fw_cb->ipv4_subnet, iface_param->value,
1129 sizeof(init_fw_cb->ipv4_subnet));
1131 case ISCSI_NET_PARAM_IPV4_GW:
1132 memcpy(init_fw_cb->ipv4_gw_addr, iface_param->value,
1133 sizeof(init_fw_cb->ipv4_gw_addr));
1135 case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
1136 if (iface_param->value[0] == ISCSI_BOOTPROTO_DHCP)
1137 init_fw_cb->ipv4_tcp_opts |=
1138 cpu_to_le16(TCPOPT_DHCP_ENABLE);
1139 else if (iface_param->value[0] == ISCSI_BOOTPROTO_STATIC)
1140 init_fw_cb->ipv4_tcp_opts &=
1141 cpu_to_le16(~TCPOPT_DHCP_ENABLE);
1143 ql4_printk(KERN_ERR, ha, "Invalid IPv4 bootproto\n");
1145 case ISCSI_NET_PARAM_IFACE_ENABLE:
1146 if (iface_param->value[0] == ISCSI_IFACE_ENABLE) {
1147 init_fw_cb->ipv4_ip_opts |=
1148 cpu_to_le16(IPOPT_IPV4_PROTOCOL_ENABLE);
1149 qla4xxx_create_ipv4_iface(ha);
1151 init_fw_cb->ipv4_ip_opts &=
1152 cpu_to_le16(~IPOPT_IPV4_PROTOCOL_ENABLE &
1154 qla4xxx_destroy_ipv4_iface(ha);
1157 case ISCSI_NET_PARAM_VLAN_TAG:
1158 if (iface_param->len != sizeof(init_fw_cb->ipv4_vlan_tag))
1160 init_fw_cb->ipv4_vlan_tag =
1161 cpu_to_be16(*(uint16_t *)iface_param->value);
1163 case ISCSI_NET_PARAM_VLAN_ENABLED:
1164 if (iface_param->value[0] == ISCSI_VLAN_ENABLE)
1165 init_fw_cb->ipv4_ip_opts |=
1166 cpu_to_le16(IPOPT_VLAN_TAGGING_ENABLE);
1168 init_fw_cb->ipv4_ip_opts &=
1169 cpu_to_le16(~IPOPT_VLAN_TAGGING_ENABLE);
1171 case ISCSI_NET_PARAM_MTU:
1172 init_fw_cb->eth_mtu_size =
1173 cpu_to_le16(*(uint16_t *)iface_param->value);
1175 case ISCSI_NET_PARAM_PORT:
1176 init_fw_cb->ipv4_port =
1177 cpu_to_le16(*(uint16_t *)iface_param->value);
1180 ql4_printk(KERN_ERR, ha, "Unknown IPv4 param = %d\n",
1181 iface_param->param);
1187 qla4xxx_initcb_to_acb(struct addr_ctrl_blk *init_fw_cb)
1189 struct addr_ctrl_blk_def *acb;
1190 acb = (struct addr_ctrl_blk_def *)init_fw_cb;
1191 memset(acb->reserved1, 0, sizeof(acb->reserved1));
1192 memset(acb->reserved2, 0, sizeof(acb->reserved2));
1193 memset(acb->reserved3, 0, sizeof(acb->reserved3));
1194 memset(acb->reserved4, 0, sizeof(acb->reserved4));
1195 memset(acb->reserved5, 0, sizeof(acb->reserved5));
1196 memset(acb->reserved6, 0, sizeof(acb->reserved6));
1197 memset(acb->reserved7, 0, sizeof(acb->reserved7));
1198 memset(acb->reserved8, 0, sizeof(acb->reserved8));
1199 memset(acb->reserved9, 0, sizeof(acb->reserved9));
1200 memset(acb->reserved10, 0, sizeof(acb->reserved10));
1201 memset(acb->reserved11, 0, sizeof(acb->reserved11));
1202 memset(acb->reserved12, 0, sizeof(acb->reserved12));
1203 memset(acb->reserved13, 0, sizeof(acb->reserved13));
1204 memset(acb->reserved14, 0, sizeof(acb->reserved14));
1205 memset(acb->reserved15, 0, sizeof(acb->reserved15));
1209 qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, uint32_t len)
1211 struct scsi_qla_host *ha = to_qla_host(shost);
1213 struct iscsi_iface_param_info *iface_param = NULL;
1214 struct addr_ctrl_blk *init_fw_cb = NULL;
1215 dma_addr_t init_fw_cb_dma;
1216 uint32_t mbox_cmd[MBOX_REG_COUNT];
1217 uint32_t mbox_sts[MBOX_REG_COUNT];
1219 struct nlattr *attr;
1221 init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
1222 sizeof(struct addr_ctrl_blk),
1223 &init_fw_cb_dma, GFP_KERNEL);
1225 ql4_printk(KERN_ERR, ha, "%s: Unable to alloc init_cb\n",
1230 memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
1231 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
1232 memset(&mbox_sts, 0, sizeof(mbox_sts));
1234 if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma)) {
1235 ql4_printk(KERN_ERR, ha, "%s: get ifcb failed\n", __func__);
1237 goto exit_init_fw_cb;
1240 nla_for_each_attr(attr, data, len, rem) {
1241 iface_param = nla_data(attr);
1243 if (iface_param->param_type != ISCSI_NET_PARAM)
1246 switch (iface_param->iface_type) {
1247 case ISCSI_IFACE_TYPE_IPV4:
1248 switch (iface_param->iface_num) {
1250 qla4xxx_set_ipv4(ha, iface_param, init_fw_cb);
1253 /* Cannot have more than one IPv4 interface */
1254 ql4_printk(KERN_ERR, ha, "Invalid IPv4 iface "
1256 iface_param->iface_num);
1260 case ISCSI_IFACE_TYPE_IPV6:
1261 switch (iface_param->iface_num) {
1264 qla4xxx_set_ipv6(ha, iface_param, init_fw_cb);
1267 /* Cannot have more than two IPv6 interface */
1268 ql4_printk(KERN_ERR, ha, "Invalid IPv6 iface "
1270 iface_param->iface_num);
1275 ql4_printk(KERN_ERR, ha, "Invalid iface type\n");
1280 init_fw_cb->cookie = cpu_to_le32(0x11BEAD5A);
1282 rval = qla4xxx_set_flash(ha, init_fw_cb_dma, FLASH_SEGMENT_IFCB,
1283 sizeof(struct addr_ctrl_blk),
1284 FLASH_OPT_RMW_COMMIT);
1285 if (rval != QLA_SUCCESS) {
1286 ql4_printk(KERN_ERR, ha, "%s: set flash mbx failed\n",
1289 goto exit_init_fw_cb;
1292 rval = qla4xxx_disable_acb(ha);
1293 if (rval != QLA_SUCCESS) {
1294 ql4_printk(KERN_ERR, ha, "%s: disable acb mbx failed\n",
1297 goto exit_init_fw_cb;
1300 wait_for_completion_timeout(&ha->disable_acb_comp,
1301 DISABLE_ACB_TOV * HZ);
1303 qla4xxx_initcb_to_acb(init_fw_cb);
1305 rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma);
1306 if (rval != QLA_SUCCESS) {
1307 ql4_printk(KERN_ERR, ha, "%s: set acb mbx failed\n",
1310 goto exit_init_fw_cb;
1313 memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
1314 qla4xxx_update_local_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb,
1318 dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk),
1319 init_fw_cb, init_fw_cb_dma);
1324 static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess,
1325 enum iscsi_param param, char *buf)
1327 struct iscsi_session *sess = cls_sess->dd_data;
1328 struct ddb_entry *ddb_entry = sess->dd_data;
1329 struct scsi_qla_host *ha = ddb_entry->ha;
1334 case ISCSI_PARAM_CHAP_IN_IDX:
1335 rval = qla4xxx_get_chap_index(ha, sess->username_in,
1336 sess->password_in, BIDI_CHAP,
1341 len = sprintf(buf, "%hu\n", idx);
1343 case ISCSI_PARAM_CHAP_OUT_IDX:
1344 rval = qla4xxx_get_chap_index(ha, sess->username,
1345 sess->password, LOCAL_CHAP,
1350 len = sprintf(buf, "%hu\n", idx);
1353 return iscsi_session_get_param(cls_sess, param, buf);
1359 static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn,
1360 enum iscsi_param param, char *buf)
1362 struct iscsi_conn *conn;
1363 struct qla_conn *qla_conn;
1364 struct sockaddr *dst_addr;
1367 conn = cls_conn->dd_data;
1368 qla_conn = conn->dd_data;
1369 dst_addr = (struct sockaddr *)&qla_conn->qla_ep->dst_addr;
1372 case ISCSI_PARAM_CONN_PORT:
1373 case ISCSI_PARAM_CONN_ADDRESS:
1374 return iscsi_conn_get_addr_param((struct sockaddr_storage *)
1375 dst_addr, param, buf);
1377 return iscsi_conn_get_param(cls_conn, param, buf);
1384 int qla4xxx_get_ddb_index(struct scsi_qla_host *ha, uint16_t *ddb_index)
1386 uint32_t mbx_sts = 0;
1387 uint16_t tmp_ddb_index;
1391 tmp_ddb_index = find_first_zero_bit(ha->ddb_idx_map, MAX_DDB_ENTRIES);
1393 if (tmp_ddb_index >= MAX_DDB_ENTRIES) {
1394 DEBUG2(ql4_printk(KERN_INFO, ha,
1395 "Free DDB index not available\n"));
1397 goto exit_get_ddb_index;
1400 if (test_and_set_bit(tmp_ddb_index, ha->ddb_idx_map))
1403 DEBUG2(ql4_printk(KERN_INFO, ha,
1404 "Found a free DDB index at %d\n", tmp_ddb_index));
1405 ret = qla4xxx_req_ddb_entry(ha, tmp_ddb_index, &mbx_sts);
1406 if (ret == QLA_ERROR) {
1407 if (mbx_sts == MBOX_STS_COMMAND_ERROR) {
1408 ql4_printk(KERN_INFO, ha,
1409 "DDB index = %d not available trying next\n",
1413 DEBUG2(ql4_printk(KERN_INFO, ha,
1414 "Free FW DDB not available\n"));
1417 *ddb_index = tmp_ddb_index;
1423 static int qla4xxx_match_ipaddress(struct scsi_qla_host *ha,
1424 struct ddb_entry *ddb_entry,
1425 char *existing_ipaddr,
1428 uint8_t dst_ipaddr[IPv6_ADDR_LEN];
1429 char formatted_ipaddr[DDB_IPADDR_LEN];
1430 int status = QLA_SUCCESS, ret = 0;
1432 if (ddb_entry->fw_ddb_entry.options & DDB_OPT_IPV6_DEVICE) {
1433 ret = in6_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr,
1439 ret = sprintf(formatted_ipaddr, "%pI6", dst_ipaddr);
1441 ret = in4_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr,
1447 ret = sprintf(formatted_ipaddr, "%pI4", dst_ipaddr);
1450 if (strcmp(existing_ipaddr, formatted_ipaddr))
1457 static int qla4xxx_match_fwdb_session(struct scsi_qla_host *ha,
1458 struct iscsi_cls_conn *cls_conn)
1460 int idx = 0, max_ddbs, rval;
1461 struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
1462 struct iscsi_session *sess, *existing_sess;
1463 struct iscsi_conn *conn, *existing_conn;
1464 struct ddb_entry *ddb_entry;
1466 sess = cls_sess->dd_data;
1467 conn = cls_conn->dd_data;
1469 if (sess->targetname == NULL ||
1470 conn->persistent_address == NULL ||
1471 conn->persistent_port == 0)
1474 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
1477 for (idx = 0; idx < max_ddbs; idx++) {
1478 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
1479 if (ddb_entry == NULL)
1482 if (ddb_entry->ddb_type != FLASH_DDB)
1485 existing_sess = ddb_entry->sess->dd_data;
1486 existing_conn = ddb_entry->conn->dd_data;
1488 if (existing_sess->targetname == NULL ||
1489 existing_conn->persistent_address == NULL ||
1490 existing_conn->persistent_port == 0)
1493 DEBUG2(ql4_printk(KERN_INFO, ha,
1494 "IQN = %s User IQN = %s\n",
1495 existing_sess->targetname,
1498 DEBUG2(ql4_printk(KERN_INFO, ha,
1499 "IP = %s User IP = %s\n",
1500 existing_conn->persistent_address,
1501 conn->persistent_address));
1503 DEBUG2(ql4_printk(KERN_INFO, ha,
1504 "Port = %d User Port = %d\n",
1505 existing_conn->persistent_port,
1506 conn->persistent_port));
1508 if (strcmp(existing_sess->targetname, sess->targetname))
1510 rval = qla4xxx_match_ipaddress(ha, ddb_entry,
1511 existing_conn->persistent_address,
1512 conn->persistent_address);
1513 if (rval == QLA_ERROR)
1515 if (existing_conn->persistent_port != conn->persistent_port)
1520 if (idx == max_ddbs)
1523 DEBUG2(ql4_printk(KERN_INFO, ha,
1524 "Match found in fwdb sessions\n"));
1528 static struct iscsi_cls_session *
1529 qla4xxx_session_create(struct iscsi_endpoint *ep,
1530 uint16_t cmds_max, uint16_t qdepth,
1531 uint32_t initial_cmdsn)
1533 struct iscsi_cls_session *cls_sess;
1534 struct scsi_qla_host *ha;
1535 struct qla_endpoint *qla_ep;
1536 struct ddb_entry *ddb_entry;
1538 struct iscsi_session *sess;
1539 struct sockaddr *dst_addr;
1542 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1544 printk(KERN_ERR "qla4xxx: missing ep.\n");
1548 qla_ep = ep->dd_data;
1549 dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
1550 ha = to_qla_host(qla_ep->host);
1552 ret = qla4xxx_get_ddb_index(ha, &ddb_index);
1553 if (ret == QLA_ERROR)
1556 cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, qla_ep->host,
1557 cmds_max, sizeof(struct ddb_entry),
1558 sizeof(struct ql4_task_data),
1559 initial_cmdsn, ddb_index);
1563 sess = cls_sess->dd_data;
1564 ddb_entry = sess->dd_data;
1565 ddb_entry->fw_ddb_index = ddb_index;
1566 ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
1568 ddb_entry->sess = cls_sess;
1569 ddb_entry->unblock_sess = qla4xxx_unblock_ddb;
1570 ddb_entry->ddb_change = qla4xxx_ddb_change;
1571 cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
1572 ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry;
1578 static void qla4xxx_session_destroy(struct iscsi_cls_session *cls_sess)
1580 struct iscsi_session *sess;
1581 struct ddb_entry *ddb_entry;
1582 struct scsi_qla_host *ha;
1583 unsigned long flags, wtime;
1584 struct dev_db_entry *fw_ddb_entry = NULL;
1585 dma_addr_t fw_ddb_entry_dma;
1589 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1590 sess = cls_sess->dd_data;
1591 ddb_entry = sess->dd_data;
1594 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1595 &fw_ddb_entry_dma, GFP_KERNEL);
1596 if (!fw_ddb_entry) {
1597 ql4_printk(KERN_ERR, ha,
1598 "%s: Unable to allocate dma buffer\n", __func__);
1599 goto destroy_session;
1602 wtime = jiffies + (HZ * LOGOUT_TOV);
1604 ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index,
1605 fw_ddb_entry, fw_ddb_entry_dma,
1606 NULL, NULL, &ddb_state, NULL,
1608 if (ret == QLA_ERROR)
1609 goto destroy_session;
1611 if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) ||
1612 (ddb_state == DDB_DS_SESSION_FAILED))
1613 goto destroy_session;
1615 schedule_timeout_uninterruptible(HZ);
1616 } while ((time_after(wtime, jiffies)));
1619 qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
1621 spin_lock_irqsave(&ha->hardware_lock, flags);
1622 qla4xxx_free_ddb(ha, ddb_entry);
1623 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1625 iscsi_session_teardown(cls_sess);
1628 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1629 fw_ddb_entry, fw_ddb_entry_dma);
1632 static struct iscsi_cls_conn *
1633 qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx)
1635 struct iscsi_cls_conn *cls_conn;
1636 struct iscsi_session *sess;
1637 struct ddb_entry *ddb_entry;
1639 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1640 cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn),
1645 sess = cls_sess->dd_data;
1646 ddb_entry = sess->dd_data;
1647 ddb_entry->conn = cls_conn;
1652 static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
1653 struct iscsi_cls_conn *cls_conn,
1654 uint64_t transport_fd, int is_leading)
1656 struct iscsi_conn *conn;
1657 struct qla_conn *qla_conn;
1658 struct iscsi_endpoint *ep;
1660 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1662 if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
1664 ep = iscsi_lookup_endpoint(transport_fd);
1665 conn = cls_conn->dd_data;
1666 qla_conn = conn->dd_data;
1667 qla_conn->qla_ep = ep->dd_data;
1671 static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn)
1673 struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
1674 struct iscsi_session *sess;
1675 struct ddb_entry *ddb_entry;
1676 struct scsi_qla_host *ha;
1677 struct dev_db_entry *fw_ddb_entry = NULL;
1678 dma_addr_t fw_ddb_entry_dma;
1679 uint32_t mbx_sts = 0;
1681 int status = QLA_SUCCESS;
1683 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1684 sess = cls_sess->dd_data;
1685 ddb_entry = sess->dd_data;
1688 /* Check if we have matching FW DDB, if yes then do not
1689 * login to this target. This could cause target to logout previous
1692 ret = qla4xxx_match_fwdb_session(ha, cls_conn);
1693 if (ret == QLA_SUCCESS) {
1694 ql4_printk(KERN_INFO, ha,
1695 "Session already exist in FW.\n");
1697 goto exit_conn_start;
1700 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1701 &fw_ddb_entry_dma, GFP_KERNEL);
1702 if (!fw_ddb_entry) {
1703 ql4_printk(KERN_ERR, ha,
1704 "%s: Unable to allocate dma buffer\n", __func__);
1706 goto exit_conn_start;
1709 ret = qla4xxx_set_param_ddbentry(ha, ddb_entry, cls_conn, &mbx_sts);
1711 /* If iscsid is stopped and started then no need to do
1712 * set param again since ddb state will be already
1713 * active and FW does not allow set ddb to an
1717 if (ddb_entry->fw_ddb_device_state ==
1718 DDB_DS_SESSION_ACTIVE) {
1719 ddb_entry->unblock_sess(ddb_entry->sess);
1720 goto exit_set_param;
1723 ql4_printk(KERN_ERR, ha, "%s: Failed set param for index[%d]\n",
1724 __func__, ddb_entry->fw_ddb_index);
1725 goto exit_conn_start;
1728 status = qla4xxx_conn_open(ha, ddb_entry->fw_ddb_index);
1729 if (status == QLA_ERROR) {
1730 ql4_printk(KERN_ERR, ha, "%s: Login failed: %s\n", __func__,
1733 goto exit_conn_start;
1736 if (ddb_entry->fw_ddb_device_state == DDB_DS_NO_CONNECTION_ACTIVE)
1737 ddb_entry->fw_ddb_device_state = DDB_DS_LOGIN_IN_PROCESS;
1739 DEBUG2(printk(KERN_INFO "%s: DDB state [%d]\n", __func__,
1740 ddb_entry->fw_ddb_device_state));
1747 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1748 fw_ddb_entry, fw_ddb_entry_dma);
1752 static void qla4xxx_conn_destroy(struct iscsi_cls_conn *cls_conn)
1754 struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
1755 struct iscsi_session *sess;
1756 struct scsi_qla_host *ha;
1757 struct ddb_entry *ddb_entry;
1760 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1761 sess = cls_sess->dd_data;
1762 ddb_entry = sess->dd_data;
1765 options = LOGOUT_OPTION_CLOSE_SESSION;
1766 if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR)
1767 ql4_printk(KERN_ERR, ha, "%s: Logout failed\n", __func__);
1770 static void qla4xxx_task_work(struct work_struct *wdata)
1772 struct ql4_task_data *task_data;
1773 struct scsi_qla_host *ha;
1774 struct passthru_status *sts;
1775 struct iscsi_task *task;
1776 struct iscsi_hdr *hdr;
1779 struct iscsi_conn *conn;
1783 task_data = container_of(wdata, struct ql4_task_data, task_work);
1785 task = task_data->task;
1786 sts = &task_data->sts;
1787 hdr_len = sizeof(struct iscsi_hdr);
1789 DEBUG3(printk(KERN_INFO "Status returned\n"));
1790 DEBUG3(qla4xxx_dump_buffer(sts, 64));
1791 DEBUG3(printk(KERN_INFO "Response buffer"));
1792 DEBUG3(qla4xxx_dump_buffer(task_data->resp_buffer, 64));
1796 switch (sts->completionStatus) {
1797 case PASSTHRU_STATUS_COMPLETE:
1798 hdr = (struct iscsi_hdr *)task_data->resp_buffer;
1799 /* Assign back the itt in hdr, until we use the PREASSIGN_TAG */
1802 data = task_data->resp_buffer + hdr_len;
1803 data_len = task_data->resp_len - hdr_len;
1804 iscsi_complete_pdu(conn, hdr, data, data_len);
1807 ql4_printk(KERN_ERR, ha, "Passthru failed status = 0x%x\n",
1808 sts->completionStatus);
1814 static int qla4xxx_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
1816 struct ql4_task_data *task_data;
1817 struct iscsi_session *sess;
1818 struct ddb_entry *ddb_entry;
1819 struct scsi_qla_host *ha;
1822 sess = task->conn->session;
1823 ddb_entry = sess->dd_data;
1825 task_data = task->dd_data;
1826 memset(task_data, 0, sizeof(struct ql4_task_data));
1829 ql4_printk(KERN_INFO, ha,
1830 "%s: SCSI Commands not implemented\n", __func__);
1834 hdr_len = sizeof(struct iscsi_hdr);
1836 task_data->task = task;
1838 if (task->data_count) {
1839 task_data->data_dma = dma_map_single(&ha->pdev->dev, task->data,
1844 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n",
1845 __func__, task->conn->max_recv_dlength, hdr_len));
1847 task_data->resp_len = task->conn->max_recv_dlength + hdr_len;
1848 task_data->resp_buffer = dma_alloc_coherent(&ha->pdev->dev,
1849 task_data->resp_len,
1850 &task_data->resp_dma,
1852 if (!task_data->resp_buffer)
1853 goto exit_alloc_pdu;
1855 task_data->req_len = task->data_count + hdr_len;
1856 task_data->req_buffer = dma_alloc_coherent(&ha->pdev->dev,
1858 &task_data->req_dma,
1860 if (!task_data->req_buffer)
1861 goto exit_alloc_pdu;
1863 task->hdr = task_data->req_buffer;
1865 INIT_WORK(&task_data->task_work, qla4xxx_task_work);
1870 if (task_data->resp_buffer)
1871 dma_free_coherent(&ha->pdev->dev, task_data->resp_len,
1872 task_data->resp_buffer, task_data->resp_dma);
1874 if (task_data->req_buffer)
1875 dma_free_coherent(&ha->pdev->dev, task_data->req_len,
1876 task_data->req_buffer, task_data->req_dma);
1880 static void qla4xxx_task_cleanup(struct iscsi_task *task)
1882 struct ql4_task_data *task_data;
1883 struct iscsi_session *sess;
1884 struct ddb_entry *ddb_entry;
1885 struct scsi_qla_host *ha;
1888 hdr_len = sizeof(struct iscsi_hdr);
1889 sess = task->conn->session;
1890 ddb_entry = sess->dd_data;
1892 task_data = task->dd_data;
1894 if (task->data_count) {
1895 dma_unmap_single(&ha->pdev->dev, task_data->data_dma,
1896 task->data_count, PCI_DMA_TODEVICE);
1899 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n",
1900 __func__, task->conn->max_recv_dlength, hdr_len));
1902 dma_free_coherent(&ha->pdev->dev, task_data->resp_len,
1903 task_data->resp_buffer, task_data->resp_dma);
1904 dma_free_coherent(&ha->pdev->dev, task_data->req_len,
1905 task_data->req_buffer, task_data->req_dma);
1909 static int qla4xxx_task_xmit(struct iscsi_task *task)
1911 struct scsi_cmnd *sc = task->sc;
1912 struct iscsi_session *sess = task->conn->session;
1913 struct ddb_entry *ddb_entry = sess->dd_data;
1914 struct scsi_qla_host *ha = ddb_entry->ha;
1917 return qla4xxx_send_passthru0(task);
1919 ql4_printk(KERN_INFO, ha, "%s: scsi cmd xmit not implemented\n",
1924 static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha,
1925 struct dev_db_entry *fw_ddb_entry,
1926 struct iscsi_cls_session *cls_sess,
1927 struct iscsi_cls_conn *cls_conn)
1930 struct iscsi_session *sess;
1931 struct ddb_entry *ddb_entry;
1932 struct iscsi_conn *conn;
1933 char ip_addr[DDB_IPADDR_LEN];
1934 uint16_t options = 0;
1936 sess = cls_sess->dd_data;
1937 ddb_entry = sess->dd_data;
1938 conn = cls_conn->dd_data;
1940 ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
1942 conn->max_recv_dlength = BYTE_UNITS *
1943 le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
1945 conn->max_xmit_dlength = BYTE_UNITS *
1946 le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len);
1948 sess->initial_r2t_en =
1949 (BIT_10 & le16_to_cpu(fw_ddb_entry->iscsi_options));
1951 sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t);
1953 sess->imm_data_en = (BIT_11 & le16_to_cpu(fw_ddb_entry->iscsi_options));
1955 sess->first_burst = BYTE_UNITS *
1956 le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len);
1958 sess->max_burst = BYTE_UNITS *
1959 le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len);
1961 sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
1963 sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain);
1965 conn->persistent_port = le16_to_cpu(fw_ddb_entry->port);
1967 sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
1969 options = le16_to_cpu(fw_ddb_entry->options);
1970 if (options & DDB_OPT_IPV6_DEVICE)
1971 sprintf(ip_addr, "%pI6", fw_ddb_entry->ip_addr);
1973 sprintf(ip_addr, "%pI4", fw_ddb_entry->ip_addr);
1975 iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_NAME,
1976 (char *)fw_ddb_entry->iscsi_name, buflen);
1977 iscsi_set_param(cls_conn, ISCSI_PARAM_INITIATOR_NAME,
1978 (char *)ha->name_string, buflen);
1979 iscsi_set_param(cls_conn, ISCSI_PARAM_PERSISTENT_ADDRESS,
1980 (char *)ip_addr, buflen);
1981 iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_ALIAS,
1982 (char *)fw_ddb_entry->iscsi_alias, buflen);
1985 void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha,
1986 struct ddb_entry *ddb_entry)
1988 struct iscsi_cls_session *cls_sess;
1989 struct iscsi_cls_conn *cls_conn;
1991 dma_addr_t fw_ddb_entry_dma;
1992 struct dev_db_entry *fw_ddb_entry;
1994 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1995 &fw_ddb_entry_dma, GFP_KERNEL);
1996 if (!fw_ddb_entry) {
1997 ql4_printk(KERN_ERR, ha,
1998 "%s: Unable to allocate dma buffer\n", __func__);
1999 goto exit_session_conn_fwddb_param;
2002 if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry,
2003 fw_ddb_entry_dma, NULL, NULL, &ddb_state,
2004 NULL, NULL, NULL) == QLA_ERROR) {
2005 DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
2006 "get_ddb_entry for fw_ddb_index %d\n",
2007 ha->host_no, __func__,
2008 ddb_entry->fw_ddb_index));
2009 goto exit_session_conn_fwddb_param;
2012 cls_sess = ddb_entry->sess;
2014 cls_conn = ddb_entry->conn;
2017 qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn);
2019 exit_session_conn_fwddb_param:
2021 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
2022 fw_ddb_entry, fw_ddb_entry_dma);
2025 void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
2026 struct ddb_entry *ddb_entry)
2028 struct iscsi_cls_session *cls_sess;
2029 struct iscsi_cls_conn *cls_conn;
2030 struct iscsi_session *sess;
2031 struct iscsi_conn *conn;
2033 dma_addr_t fw_ddb_entry_dma;
2034 struct dev_db_entry *fw_ddb_entry;
2036 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
2037 &fw_ddb_entry_dma, GFP_KERNEL);
2038 if (!fw_ddb_entry) {
2039 ql4_printk(KERN_ERR, ha,
2040 "%s: Unable to allocate dma buffer\n", __func__);
2041 goto exit_session_conn_param;
2044 if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry,
2045 fw_ddb_entry_dma, NULL, NULL, &ddb_state,
2046 NULL, NULL, NULL) == QLA_ERROR) {
2047 DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
2048 "get_ddb_entry for fw_ddb_index %d\n",
2049 ha->host_no, __func__,
2050 ddb_entry->fw_ddb_index));
2051 goto exit_session_conn_param;
2054 cls_sess = ddb_entry->sess;
2055 sess = cls_sess->dd_data;
2057 cls_conn = ddb_entry->conn;
2058 conn = cls_conn->dd_data;
2060 /* Update timers after login */
2061 ddb_entry->default_relogin_timeout =
2062 (le16_to_cpu(fw_ddb_entry->def_timeout) > LOGIN_TOV) &&
2063 (le16_to_cpu(fw_ddb_entry->def_timeout) < LOGIN_TOV * 10) ?
2064 le16_to_cpu(fw_ddb_entry->def_timeout) : LOGIN_TOV;
2065 ddb_entry->default_time2wait =
2066 le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
2069 ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
2070 conn->max_recv_dlength = BYTE_UNITS *
2071 le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
2073 conn->max_xmit_dlength = BYTE_UNITS *
2074 le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len);
2076 sess->initial_r2t_en =
2077 (BIT_10 & le16_to_cpu(fw_ddb_entry->iscsi_options));
2079 sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t);
2081 sess->imm_data_en = (BIT_11 & le16_to_cpu(fw_ddb_entry->iscsi_options));
2083 sess->first_burst = BYTE_UNITS *
2084 le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len);
2086 sess->max_burst = BYTE_UNITS *
2087 le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len);
2089 sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
2091 sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain);
2093 sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
2095 memcpy(sess->initiatorname, ha->name_string,
2096 min(sizeof(ha->name_string), sizeof(sess->initiatorname)));
2098 iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_ALIAS,
2099 (char *)fw_ddb_entry->iscsi_alias, 0);
2101 exit_session_conn_param:
2103 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
2104 fw_ddb_entry, fw_ddb_entry_dma);
2111 static void qla4xxx_start_timer(struct scsi_qla_host *ha, void *func,
2112 unsigned long interval)
2114 DEBUG(printk("scsi: %s: Starting timer thread for adapter %d\n",
2115 __func__, ha->host->host_no));
2116 init_timer(&ha->timer);
2117 ha->timer.expires = jiffies + interval * HZ;
2118 ha->timer.data = (unsigned long)ha;
2119 ha->timer.function = (void (*)(unsigned long))func;
2120 add_timer(&ha->timer);
2121 ha->timer_active = 1;
2124 static void qla4xxx_stop_timer(struct scsi_qla_host *ha)
2126 del_timer_sync(&ha->timer);
2127 ha->timer_active = 0;
2131 * qla4xxx_mark_device_missing - blocks the session
2132 * @cls_session: Pointer to the session to be blocked
2133 * @ddb_entry: Pointer to device database entry
2135 * This routine marks a device missing and close connection.
2137 void qla4xxx_mark_device_missing(struct iscsi_cls_session *cls_session)
2139 iscsi_block_session(cls_session);
2143 * qla4xxx_mark_all_devices_missing - mark all devices as missing.
2144 * @ha: Pointer to host adapter structure.
2146 * This routine marks a device missing and resets the relogin retry count.
2148 void qla4xxx_mark_all_devices_missing(struct scsi_qla_host *ha)
2150 iscsi_host_for_each_session(ha->host, qla4xxx_mark_device_missing);
2153 static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha,
2154 struct ddb_entry *ddb_entry,
2155 struct scsi_cmnd *cmd)
2159 srb = mempool_alloc(ha->srb_mempool, GFP_ATOMIC);
2163 kref_init(&srb->srb_ref);
2165 srb->ddb = ddb_entry;
2168 CMD_SP(cmd) = (void *)srb;
2173 static void qla4xxx_srb_free_dma(struct scsi_qla_host *ha, struct srb *srb)
2175 struct scsi_cmnd *cmd = srb->cmd;
2177 if (srb->flags & SRB_DMA_VALID) {
2178 scsi_dma_unmap(cmd);
2179 srb->flags &= ~SRB_DMA_VALID;
2184 void qla4xxx_srb_compl(struct kref *ref)
2186 struct srb *srb = container_of(ref, struct srb, srb_ref);
2187 struct scsi_cmnd *cmd = srb->cmd;
2188 struct scsi_qla_host *ha = srb->ha;
2190 qla4xxx_srb_free_dma(ha, srb);
2192 mempool_free(srb, ha->srb_mempool);
2194 cmd->scsi_done(cmd);
2198 * qla4xxx_queuecommand - scsi layer issues scsi command to driver.
2200 * @cmd: Pointer to Linux's SCSI command structure
2203 * This routine is invoked by Linux to send a SCSI command to the driver.
2204 * The mid-level driver tries to ensure that queuecommand never gets
2205 * invoked concurrently with itself or the interrupt handler (although
2206 * the interrupt handler may call this routine as part of request-
2207 * completion handling). Unfortunely, it sometimes calls the scheduler
2208 * in interrupt context which is a big NO! NO!.
2210 static int qla4xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
2212 struct scsi_qla_host *ha = to_qla_host(host);
2213 struct ddb_entry *ddb_entry = cmd->device->hostdata;
2214 struct iscsi_cls_session *sess = ddb_entry->sess;
2218 if (test_bit(AF_EEH_BUSY, &ha->flags)) {
2219 if (test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags))
2220 cmd->result = DID_NO_CONNECT << 16;
2222 cmd->result = DID_REQUEUE << 16;
2223 goto qc_fail_command;
2227 cmd->result = DID_IMM_RETRY << 16;
2228 goto qc_fail_command;
2231 rval = iscsi_session_chkready(sess);
2234 goto qc_fail_command;
2237 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
2238 test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
2239 test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
2240 test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) ||
2241 test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) ||
2242 !test_bit(AF_ONLINE, &ha->flags) ||
2243 !test_bit(AF_LINK_UP, &ha->flags) ||
2244 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))
2247 srb = qla4xxx_get_new_srb(ha, ddb_entry, cmd);
2251 rval = qla4xxx_send_command_to_isp(ha, srb);
2252 if (rval != QLA_SUCCESS)
2253 goto qc_host_busy_free_sp;
2257 qc_host_busy_free_sp:
2258 qla4xxx_srb_free_dma(ha, srb);
2259 mempool_free(srb, ha->srb_mempool);
2262 return SCSI_MLQUEUE_HOST_BUSY;
2265 cmd->scsi_done(cmd);
2271 * qla4xxx_mem_free - frees memory allocated to adapter
2272 * @ha: Pointer to host adapter structure.
2274 * Frees memory previously allocated by qla4xxx_mem_alloc
2276 static void qla4xxx_mem_free(struct scsi_qla_host *ha)
2279 dma_free_coherent(&ha->pdev->dev, ha->queues_len, ha->queues,
2288 ha->request_ring = NULL;
2289 ha->request_dma = 0;
2290 ha->response_ring = NULL;
2291 ha->response_dma = 0;
2292 ha->shadow_regs = NULL;
2293 ha->shadow_regs_dma = 0;
2295 ha->fw_dump_size = 0;
2297 /* Free srb pool. */
2298 if (ha->srb_mempool)
2299 mempool_destroy(ha->srb_mempool);
2301 ha->srb_mempool = NULL;
2303 if (ha->chap_dma_pool)
2304 dma_pool_destroy(ha->chap_dma_pool);
2307 vfree(ha->chap_list);
2308 ha->chap_list = NULL;
2310 if (ha->fw_ddb_dma_pool)
2311 dma_pool_destroy(ha->fw_ddb_dma_pool);
2313 /* release io space registers */
2314 if (is_qla8022(ha)) {
2317 (struct device_reg_82xx __iomem *)ha->nx_pcibase);
2320 pci_release_regions(ha->pdev);
2324 * qla4xxx_mem_alloc - allocates memory for use by adapter.
2325 * @ha: Pointer to host adapter structure
2327 * Allocates DMA memory for request and response queues. Also allocates memory
2330 static int qla4xxx_mem_alloc(struct scsi_qla_host *ha)
2332 unsigned long align;
2334 /* Allocate contiguous block of DMA memory for queues. */
2335 ha->queues_len = ((REQUEST_QUEUE_DEPTH * QUEUE_SIZE) +
2336 (RESPONSE_QUEUE_DEPTH * QUEUE_SIZE) +
2337 sizeof(struct shadow_regs) +
2339 (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
2340 ha->queues = dma_alloc_coherent(&ha->pdev->dev, ha->queues_len,
2341 &ha->queues_dma, GFP_KERNEL);
2342 if (ha->queues == NULL) {
2343 ql4_printk(KERN_WARNING, ha,
2344 "Memory Allocation failed - queues.\n");
2346 goto mem_alloc_error_exit;
2348 memset(ha->queues, 0, ha->queues_len);
2351 * As per RISC alignment requirements -- the bus-address must be a
2352 * multiple of the request-ring size (in bytes).
2355 if ((unsigned long)ha->queues_dma & (MEM_ALIGN_VALUE - 1))
2356 align = MEM_ALIGN_VALUE - ((unsigned long)ha->queues_dma &
2357 (MEM_ALIGN_VALUE - 1));
2359 /* Update request and response queue pointers. */
2360 ha->request_dma = ha->queues_dma + align;
2361 ha->request_ring = (struct queue_entry *) (ha->queues + align);
2362 ha->response_dma = ha->queues_dma + align +
2363 (REQUEST_QUEUE_DEPTH * QUEUE_SIZE);
2364 ha->response_ring = (struct queue_entry *) (ha->queues + align +
2365 (REQUEST_QUEUE_DEPTH *
2367 ha->shadow_regs_dma = ha->queues_dma + align +
2368 (REQUEST_QUEUE_DEPTH * QUEUE_SIZE) +
2369 (RESPONSE_QUEUE_DEPTH * QUEUE_SIZE);
2370 ha->shadow_regs = (struct shadow_regs *) (ha->queues + align +
2371 (REQUEST_QUEUE_DEPTH *
2373 (RESPONSE_QUEUE_DEPTH *
2376 /* Allocate memory for srb pool. */
2377 ha->srb_mempool = mempool_create(SRB_MIN_REQ, mempool_alloc_slab,
2378 mempool_free_slab, srb_cachep);
2379 if (ha->srb_mempool == NULL) {
2380 ql4_printk(KERN_WARNING, ha,
2381 "Memory Allocation failed - SRB Pool.\n");
2383 goto mem_alloc_error_exit;
2386 ha->chap_dma_pool = dma_pool_create("ql4_chap", &ha->pdev->dev,
2387 CHAP_DMA_BLOCK_SIZE, 8, 0);
2389 if (ha->chap_dma_pool == NULL) {
2390 ql4_printk(KERN_WARNING, ha,
2391 "%s: chap_dma_pool allocation failed..\n", __func__);
2392 goto mem_alloc_error_exit;
2395 ha->fw_ddb_dma_pool = dma_pool_create("ql4_fw_ddb", &ha->pdev->dev,
2396 DDB_DMA_BLOCK_SIZE, 8, 0);
2398 if (ha->fw_ddb_dma_pool == NULL) {
2399 ql4_printk(KERN_WARNING, ha,
2400 "%s: fw_ddb_dma_pool allocation failed..\n",
2402 goto mem_alloc_error_exit;
2407 mem_alloc_error_exit:
2408 qla4xxx_mem_free(ha);
2413 * qla4_8xxx_check_temp - Check the ISP82XX temperature.
2414 * @ha: adapter block pointer.
2416 * Note: The caller should not hold the idc lock.
2418 static int qla4_8xxx_check_temp(struct scsi_qla_host *ha)
2420 uint32_t temp, temp_state, temp_val;
2421 int status = QLA_SUCCESS;
2423 temp = qla4_82xx_rd_32(ha, CRB_TEMP_STATE);
2425 temp_state = qla82xx_get_temp_state(temp);
2426 temp_val = qla82xx_get_temp_val(temp);
2428 if (temp_state == QLA82XX_TEMP_PANIC) {
2429 ql4_printk(KERN_WARNING, ha, "Device temperature %d degrees C"
2430 " exceeds maximum allowed. Hardware has been shut"
2431 " down.\n", temp_val);
2433 } else if (temp_state == QLA82XX_TEMP_WARN) {
2434 if (ha->temperature == QLA82XX_TEMP_NORMAL)
2435 ql4_printk(KERN_WARNING, ha, "Device temperature %d"
2436 " degrees C exceeds operating range."
2437 " Immediate action needed.\n", temp_val);
2439 if (ha->temperature == QLA82XX_TEMP_WARN)
2440 ql4_printk(KERN_INFO, ha, "Device temperature is"
2441 " now %d degrees C in normal range.\n",
2444 ha->temperature = temp_state;
2449 * qla4_8xxx_check_fw_alive - Check firmware health
2450 * @ha: Pointer to host adapter structure.
2452 * Context: Interrupt
2454 static int qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
2456 uint32_t fw_heartbeat_counter;
2457 int status = QLA_SUCCESS;
2459 fw_heartbeat_counter = qla4_82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
2460 /* If PEG_ALIVE_COUNTER is 0xffffffff, AER/EEH is in progress, ignore */
2461 if (fw_heartbeat_counter == 0xffffffff) {
2462 DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Device in frozen "
2463 "state, QLA82XX_PEG_ALIVE_COUNTER is 0xffffffff\n",
2464 ha->host_no, __func__));
2468 if (ha->fw_heartbeat_counter == fw_heartbeat_counter) {
2469 ha->seconds_since_last_heartbeat++;
2470 /* FW not alive after 2 seconds */
2471 if (ha->seconds_since_last_heartbeat == 2) {
2472 ha->seconds_since_last_heartbeat = 0;
2474 ql4_printk(KERN_INFO, ha,
2475 "scsi(%ld): %s, Dumping hw/fw registers:\n "
2476 " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2:"
2477 " 0x%x,\n PEG_NET_0_PC: 0x%x, PEG_NET_1_PC:"
2478 " 0x%x,\n PEG_NET_2_PC: 0x%x, PEG_NET_3_PC:"
2479 " 0x%x,\n PEG_NET_4_PC: 0x%x\n",
2480 ha->host_no, __func__,
2482 QLA82XX_PEG_HALT_STATUS1),
2484 QLA82XX_PEG_HALT_STATUS2),
2485 qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_0 +
2487 qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_1 +
2489 qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_2 +
2491 qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_3 +
2493 qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_4 +
2498 ha->seconds_since_last_heartbeat = 0;
2500 ha->fw_heartbeat_counter = fw_heartbeat_counter;
2505 * qla4_8xxx_watchdog - Poll dev state
2506 * @ha: Pointer to host adapter structure.
2508 * Context: Interrupt
2510 void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
2512 uint32_t dev_state, halt_status;
2514 /* don't poll if reset is going on */
2515 if (!(test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
2516 test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
2517 test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags))) {
2518 dev_state = qla4_82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
2520 if (qla4_8xxx_check_temp(ha)) {
2521 ql4_printk(KERN_INFO, ha, "disabling pause"
2522 " transmit on port 0 & 1.\n");
2523 qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
2524 CRB_NIU_XG_PAUSE_CTL_P0 |
2525 CRB_NIU_XG_PAUSE_CTL_P1);
2526 set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags);
2527 qla4xxx_wake_dpc(ha);
2528 } else if (dev_state == QLA82XX_DEV_NEED_RESET &&
2529 !test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
2530 if (!ql4xdontresethba) {
2531 ql4_printk(KERN_INFO, ha, "%s: HW State: "
2532 "NEED RESET!\n", __func__);
2533 set_bit(DPC_RESET_HA, &ha->dpc_flags);
2534 qla4xxx_wake_dpc(ha);
2536 } else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT &&
2537 !test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
2538 ql4_printk(KERN_INFO, ha, "%s: HW State: NEED QUIES!\n",
2540 set_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags);
2541 qla4xxx_wake_dpc(ha);
2543 /* Check firmware health */
2544 if (qla4_8xxx_check_fw_alive(ha)) {
2545 ql4_printk(KERN_INFO, ha, "disabling pause"
2546 " transmit on port 0 & 1.\n");
2547 qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
2548 CRB_NIU_XG_PAUSE_CTL_P0 |
2549 CRB_NIU_XG_PAUSE_CTL_P1);
2550 halt_status = qla4_82xx_rd_32(ha,
2551 QLA82XX_PEG_HALT_STATUS1);
2553 if (QLA82XX_FWERROR_CODE(halt_status) == 0x67)
2554 ql4_printk(KERN_ERR, ha, "%s:"
2555 " Firmware aborted with"
2556 " error code 0x00006700."
2557 " Device is being reset\n",
2560 /* Since we cannot change dev_state in interrupt
2561 * context, set appropriate DPC flag then wakeup
2563 if (halt_status & HALT_STATUS_UNRECOVERABLE)
2564 set_bit(DPC_HA_UNRECOVERABLE,
2567 ql4_printk(KERN_INFO, ha, "%s: detect "
2568 "abort needed!\n", __func__);
2569 set_bit(DPC_RESET_HA, &ha->dpc_flags);
2571 qla4xxx_mailbox_premature_completion(ha);
2572 qla4xxx_wake_dpc(ha);
2578 static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
2580 struct iscsi_session *sess;
2581 struct ddb_entry *ddb_entry;
2582 struct scsi_qla_host *ha;
2584 sess = cls_sess->dd_data;
2585 ddb_entry = sess->dd_data;
2588 if (!(ddb_entry->ddb_type == FLASH_DDB))
2591 if (adapter_up(ha) && !test_bit(DF_RELOGIN, &ddb_entry->flags) &&
2592 !iscsi_is_session_online(cls_sess)) {
2593 if (atomic_read(&ddb_entry->retry_relogin_timer) !=
2595 if (atomic_read(&ddb_entry->retry_relogin_timer) ==
2597 atomic_set(&ddb_entry->retry_relogin_timer,
2599 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
2600 set_bit(DF_RELOGIN, &ddb_entry->flags);
2601 DEBUG2(ql4_printk(KERN_INFO, ha,
2602 "%s: index [%d] login device\n",
2603 __func__, ddb_entry->fw_ddb_index));
2605 atomic_dec(&ddb_entry->retry_relogin_timer);
2609 /* Wait for relogin to timeout */
2610 if (atomic_read(&ddb_entry->relogin_timer) &&
2611 (atomic_dec_and_test(&ddb_entry->relogin_timer) != 0)) {
2613 * If the relogin times out and the device is
2614 * still NOT ONLINE then try and relogin again.
2616 if (!iscsi_is_session_online(cls_sess)) {
2617 /* Reset retry relogin timer */
2618 atomic_inc(&ddb_entry->relogin_retry_count);
2619 DEBUG2(ql4_printk(KERN_INFO, ha,
2620 "%s: index[%d] relogin timed out-retrying"
2621 " relogin (%d), retry (%d)\n", __func__,
2622 ddb_entry->fw_ddb_index,
2623 atomic_read(&ddb_entry->relogin_retry_count),
2624 ddb_entry->default_time2wait + 4));
2625 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
2626 atomic_set(&ddb_entry->retry_relogin_timer,
2627 ddb_entry->default_time2wait + 4);
2633 * qla4xxx_timer - checks every second for work to do.
2634 * @ha: Pointer to host adapter structure.
2636 static void qla4xxx_timer(struct scsi_qla_host *ha)
2641 iscsi_host_for_each_session(ha->host, qla4xxx_check_relogin_flash_ddb);
2643 /* If we are in the middle of AER/EEH processing
2644 * skip any processing and reschedule the timer
2646 if (test_bit(AF_EEH_BUSY, &ha->flags)) {
2647 mod_timer(&ha->timer, jiffies + HZ);
2651 /* Hardware read to trigger an EEH error during mailbox waits. */
2652 if (!pci_channel_offline(ha->pdev))
2653 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
2655 if (is_qla8022(ha)) {
2656 qla4_8xxx_watchdog(ha);
2659 if (!is_qla8022(ha)) {
2660 /* Check for heartbeat interval. */
2661 if (ha->firmware_options & FWOPT_HEARTBEAT_ENABLE &&
2662 ha->heartbeat_interval != 0) {
2663 ha->seconds_since_last_heartbeat++;
2664 if (ha->seconds_since_last_heartbeat >
2665 ha->heartbeat_interval + 2)
2666 set_bit(DPC_RESET_HA, &ha->dpc_flags);
2670 /* Process any deferred work. */
2671 if (!list_empty(&ha->work_list))
2674 /* Wakeup the dpc routine for this adapter, if needed. */
2676 test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
2677 test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags) ||
2678 test_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags) ||
2679 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) ||
2680 test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
2681 test_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags) ||
2682 test_bit(DPC_LINK_CHANGED, &ha->dpc_flags) ||
2683 test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) ||
2684 test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) ||
2685 test_bit(DPC_AEN, &ha->dpc_flags)) {
2686 DEBUG2(printk("scsi%ld: %s: scheduling dpc routine"
2687 " - dpc flags = 0x%lx\n",
2688 ha->host_no, __func__, ha->dpc_flags));
2689 qla4xxx_wake_dpc(ha);
2692 /* Reschedule timer thread to call us back in one second */
2693 mod_timer(&ha->timer, jiffies + HZ);
2695 DEBUG2(ha->seconds_since_last_intr++);
2699 * qla4xxx_cmd_wait - waits for all outstanding commands to complete
2700 * @ha: Pointer to host adapter structure.
2702 * This routine stalls the driver until all outstanding commands are returned.
2703 * Caller must release the Hardware Lock prior to calling this routine.
2705 static int qla4xxx_cmd_wait(struct scsi_qla_host *ha)
2708 unsigned long flags;
2709 struct scsi_cmnd *cmd;
2711 unsigned long wtime = jiffies + (WAIT_CMD_TOV * HZ);
2713 DEBUG2(ql4_printk(KERN_INFO, ha, "Wait up to %d seconds for cmds to "
2714 "complete\n", WAIT_CMD_TOV));
2716 while (!time_after_eq(jiffies, wtime)) {
2717 spin_lock_irqsave(&ha->hardware_lock, flags);
2718 /* Find a command that hasn't completed. */
2719 for (index = 0; index < ha->host->can_queue; index++) {
2720 cmd = scsi_host_find_tag(ha->host, index);
2722 * We cannot just check if the index is valid,
2723 * becase if we are run from the scsi eh, then
2724 * the scsi/block layer is going to prevent
2725 * the tag from being released.
2727 if (cmd != NULL && CMD_SP(cmd))
2730 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2732 /* If No Commands are pending, wait is complete */
2733 if (index == ha->host->can_queue)
2738 /* If we timed out on waiting for commands to come back
2743 int qla4xxx_hw_reset(struct scsi_qla_host *ha)
2745 uint32_t ctrl_status;
2746 unsigned long flags = 0;
2748 DEBUG2(printk(KERN_ERR "scsi%ld: %s\n", ha->host_no, __func__));
2750 if (ql4xxx_lock_drvr_wait(ha) != QLA_SUCCESS)
2753 spin_lock_irqsave(&ha->hardware_lock, flags);
2756 * If the SCSI Reset Interrupt bit is set, clear it.
2757 * Otherwise, the Soft Reset won't work.
2759 ctrl_status = readw(&ha->reg->ctrl_status);
2760 if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0)
2761 writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status);
2763 /* Issue Soft Reset */
2764 writel(set_rmask(CSR_SOFT_RESET), &ha->reg->ctrl_status);
2765 readl(&ha->reg->ctrl_status);
2767 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2772 * qla4xxx_soft_reset - performs soft reset.
2773 * @ha: Pointer to host adapter structure.
2775 int qla4xxx_soft_reset(struct scsi_qla_host *ha)
2777 uint32_t max_wait_time;
2778 unsigned long flags = 0;
2780 uint32_t ctrl_status;
2782 status = qla4xxx_hw_reset(ha);
2783 if (status != QLA_SUCCESS)
2787 /* Wait until the Network Reset Intr bit is cleared */
2788 max_wait_time = RESET_INTR_TOV;
2790 spin_lock_irqsave(&ha->hardware_lock, flags);
2791 ctrl_status = readw(&ha->reg->ctrl_status);
2792 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2794 if ((ctrl_status & CSR_NET_RESET_INTR) == 0)
2798 } while ((--max_wait_time));
2800 if ((ctrl_status & CSR_NET_RESET_INTR) != 0) {
2801 DEBUG2(printk(KERN_WARNING
2802 "scsi%ld: Network Reset Intr not cleared by "
2803 "Network function, clearing it now!\n",
2805 spin_lock_irqsave(&ha->hardware_lock, flags);
2806 writel(set_rmask(CSR_NET_RESET_INTR), &ha->reg->ctrl_status);
2807 readl(&ha->reg->ctrl_status);
2808 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2811 /* Wait until the firmware tells us the Soft Reset is done */
2812 max_wait_time = SOFT_RESET_TOV;
2814 spin_lock_irqsave(&ha->hardware_lock, flags);
2815 ctrl_status = readw(&ha->reg->ctrl_status);
2816 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2818 if ((ctrl_status & CSR_SOFT_RESET) == 0) {
2819 status = QLA_SUCCESS;
2824 } while ((--max_wait_time));
2827 * Also, make sure that the SCSI Reset Interrupt bit has been cleared
2828 * after the soft reset has taken place.
2830 spin_lock_irqsave(&ha->hardware_lock, flags);
2831 ctrl_status = readw(&ha->reg->ctrl_status);
2832 if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0) {
2833 writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status);
2834 readl(&ha->reg->ctrl_status);
2836 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2838 /* If soft reset fails then most probably the bios on other
2839 * function is also enabled.
2840 * Since the initialization is sequential the other fn
2841 * wont be able to acknowledge the soft reset.
2842 * Issue a force soft reset to workaround this scenario.
2844 if (max_wait_time == 0) {
2845 /* Issue Force Soft Reset */
2846 spin_lock_irqsave(&ha->hardware_lock, flags);
2847 writel(set_rmask(CSR_FORCE_SOFT_RESET), &ha->reg->ctrl_status);
2848 readl(&ha->reg->ctrl_status);
2849 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2850 /* Wait until the firmware tells us the Soft Reset is done */
2851 max_wait_time = SOFT_RESET_TOV;
2853 spin_lock_irqsave(&ha->hardware_lock, flags);
2854 ctrl_status = readw(&ha->reg->ctrl_status);
2855 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2857 if ((ctrl_status & CSR_FORCE_SOFT_RESET) == 0) {
2858 status = QLA_SUCCESS;
2863 } while ((--max_wait_time));
2870 * qla4xxx_abort_active_cmds - returns all outstanding i/o requests to O.S.
2871 * @ha: Pointer to host adapter structure.
2872 * @res: returned scsi status
2874 * This routine is called just prior to a HARD RESET to return all
2875 * outstanding commands back to the Operating System.
2876 * Caller should make sure that the following locks are released
2877 * before this calling routine: Hardware lock, and io_request_lock.
2879 static void qla4xxx_abort_active_cmds(struct scsi_qla_host *ha, int res)
2883 unsigned long flags;
2885 spin_lock_irqsave(&ha->hardware_lock, flags);
2886 for (i = 0; i < ha->host->can_queue; i++) {
2887 srb = qla4xxx_del_from_active_array(ha, i);
2889 srb->cmd->result = res;
2890 kref_put(&srb->srb_ref, qla4xxx_srb_compl);
2893 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2896 void qla4xxx_dead_adapter_cleanup(struct scsi_qla_host *ha)
2898 clear_bit(AF_ONLINE, &ha->flags);
2900 /* Disable the board */
2901 ql4_printk(KERN_INFO, ha, "Disabling the board\n");
2903 qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
2904 qla4xxx_mark_all_devices_missing(ha);
2905 clear_bit(AF_INIT_DONE, &ha->flags);
2908 static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session)
2910 struct iscsi_session *sess;
2911 struct ddb_entry *ddb_entry;
2913 sess = cls_session->dd_data;
2914 ddb_entry = sess->dd_data;
2915 ddb_entry->fw_ddb_device_state = DDB_DS_SESSION_FAILED;
2917 if (ddb_entry->ddb_type == FLASH_DDB)
2918 iscsi_block_session(ddb_entry->sess);
2920 iscsi_session_failure(cls_session->dd_data,
2921 ISCSI_ERR_CONN_FAILED);
2925 * qla4xxx_recover_adapter - recovers adapter after a fatal error
2926 * @ha: Pointer to host adapter structure.
2928 static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
2930 int status = QLA_ERROR;
2931 uint8_t reset_chip = 0;
2935 /* Stall incoming I/O until we are done */
2936 scsi_block_requests(ha->host);
2937 clear_bit(AF_ONLINE, &ha->flags);
2938 clear_bit(AF_LINK_UP, &ha->flags);
2940 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: adapter OFFLINE\n", __func__));
2942 set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
2944 iscsi_host_for_each_session(ha->host, qla4xxx_fail_session);
2946 if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
2949 /* For the DPC_RESET_HA_INTR case (ISP-4xxx specific)
2950 * do not reset adapter, jump to initialize_adapter */
2951 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
2952 status = QLA_SUCCESS;
2953 goto recover_ha_init_adapter;
2956 /* For the ISP-82xx adapter, issue a stop_firmware if invoked
2957 * from eh_host_reset or ioctl module */
2958 if (is_qla8022(ha) && !reset_chip &&
2959 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) {
2961 DEBUG2(ql4_printk(KERN_INFO, ha,
2962 "scsi%ld: %s - Performing stop_firmware...\n",
2963 ha->host_no, __func__));
2964 status = ha->isp_ops->reset_firmware(ha);
2965 if (status == QLA_SUCCESS) {
2966 if (!test_bit(AF_FW_RECOVERY, &ha->flags))
2967 qla4xxx_cmd_wait(ha);
2968 ha->isp_ops->disable_intrs(ha);
2969 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
2970 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
2972 /* If the stop_firmware fails then
2973 * reset the entire chip */
2975 clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
2976 set_bit(DPC_RESET_HA, &ha->dpc_flags);
2980 /* Issue full chip reset if recovering from a catastrophic error,
2981 * or if stop_firmware fails for ISP-82xx.
2982 * This is the default case for ISP-4xxx */
2983 if (!is_qla8022(ha) || reset_chip) {
2984 if (!is_qla8022(ha))
2987 /* Check if 82XX firmware is alive or not
2988 * We may have arrived here from NEED_RESET
2990 if (test_bit(AF_FW_RECOVERY, &ha->flags))
2993 wait = jiffies + (FW_ALIVE_WAIT_TOV * HZ);
2994 while (time_before(jiffies, wait)) {
2995 if (qla4_8xxx_check_fw_alive(ha)) {
2996 qla4xxx_mailbox_premature_completion(ha);
3000 set_current_state(TASK_UNINTERRUPTIBLE);
3001 schedule_timeout(HZ);
3004 if (!test_bit(AF_FW_RECOVERY, &ha->flags))
3005 qla4xxx_cmd_wait(ha);
3007 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
3008 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
3009 DEBUG2(ql4_printk(KERN_INFO, ha,
3010 "scsi%ld: %s - Performing chip reset..\n",
3011 ha->host_no, __func__));
3012 status = ha->isp_ops->reset_chip(ha);
3015 /* Flush any pending ddb changed AENs */
3016 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
3018 recover_ha_init_adapter:
3019 /* Upon successful firmware/chip reset, re-initialize the adapter */
3020 if (status == QLA_SUCCESS) {
3021 /* For ISP-4xxx, force function 1 to always initialize
3022 * before function 3 to prevent both funcions from
3023 * stepping on top of the other */
3024 if (!is_qla8022(ha) && (ha->mac_index == 3))
3027 /* NOTE: AF_ONLINE flag set upon successful completion of
3028 * qla4xxx_initialize_adapter */
3029 status = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
3032 /* Retry failed adapter initialization, if necessary
3033 * Do not retry initialize_adapter for RESET_HA_INTR (ISP-4xxx specific)
3034 * case to prevent ping-pong resets between functions */
3035 if (!test_bit(AF_ONLINE, &ha->flags) &&
3036 !test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
3037 /* Adapter initialization failed, see if we can retry
3039 * Since we don't want to block the DPC for too long
3040 * with multiple resets in the same thread,
3041 * utilize DPC to retry */
3042 if (is_qla8022(ha)) {
3043 qla4_82xx_idc_lock(ha);
3044 dev_state = qla4_82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3045 qla4_82xx_idc_unlock(ha);
3046 if (dev_state == QLA82XX_DEV_FAILED) {
3047 ql4_printk(KERN_INFO, ha, "%s: don't retry "
3048 "recover adapter. H/W is in Failed "
3049 "state\n", __func__);
3050 qla4xxx_dead_adapter_cleanup(ha);
3051 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
3052 clear_bit(DPC_RESET_HA, &ha->dpc_flags);
3053 clear_bit(DPC_RESET_HA_FW_CONTEXT,
3061 if (!test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags)) {
3062 ha->retry_reset_ha_cnt = MAX_RESET_HA_RETRIES;
3063 DEBUG2(printk("scsi%ld: recover adapter - retrying "
3064 "(%d) more times\n", ha->host_no,
3065 ha->retry_reset_ha_cnt));
3066 set_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
3069 if (ha->retry_reset_ha_cnt > 0) {
3070 /* Schedule another Reset HA--DPC will retry */
3071 ha->retry_reset_ha_cnt--;
3072 DEBUG2(printk("scsi%ld: recover adapter - "
3073 "retry remaining %d\n",
3075 ha->retry_reset_ha_cnt));
3079 if (ha->retry_reset_ha_cnt == 0) {
3080 /* Recover adapter retries have been exhausted.
3082 DEBUG2(printk("scsi%ld: recover adapter "
3083 "failed - board disabled\n",
3085 qla4xxx_dead_adapter_cleanup(ha);
3086 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
3087 clear_bit(DPC_RESET_HA, &ha->dpc_flags);
3088 clear_bit(DPC_RESET_HA_FW_CONTEXT,
3094 clear_bit(DPC_RESET_HA, &ha->dpc_flags);
3095 clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
3096 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
3100 ha->adapter_error_count++;
3102 if (test_bit(AF_ONLINE, &ha->flags))
3103 ha->isp_ops->enable_intrs(ha);
3105 scsi_unblock_requests(ha->host);
3107 clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
3108 DEBUG2(printk("scsi%ld: recover adapter: %s\n", ha->host_no,
3109 status == QLA_ERROR ? "FAILED" : "SUCCEEDED"));
3114 static void qla4xxx_relogin_devices(struct iscsi_cls_session *cls_session)
3116 struct iscsi_session *sess;
3117 struct ddb_entry *ddb_entry;
3118 struct scsi_qla_host *ha;
3120 sess = cls_session->dd_data;
3121 ddb_entry = sess->dd_data;
3123 if (!iscsi_is_session_online(cls_session)) {
3124 if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) {
3125 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
3126 " unblock session\n", ha->host_no, __func__,
3127 ddb_entry->fw_ddb_index);
3128 iscsi_unblock_session(ddb_entry->sess);
3130 /* Trigger relogin */
3131 if (ddb_entry->ddb_type == FLASH_DDB) {
3132 if (!test_bit(DF_RELOGIN, &ddb_entry->flags))
3133 qla4xxx_arm_relogin_timer(ddb_entry);
3135 iscsi_session_failure(cls_session->dd_data,
3136 ISCSI_ERR_CONN_FAILED);
3141 int qla4xxx_unblock_flash_ddb(struct iscsi_cls_session *cls_session)
3143 struct iscsi_session *sess;
3144 struct ddb_entry *ddb_entry;
3145 struct scsi_qla_host *ha;
3147 sess = cls_session->dd_data;
3148 ddb_entry = sess->dd_data;
3150 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
3151 " unblock session\n", ha->host_no, __func__,
3152 ddb_entry->fw_ddb_index);
3154 iscsi_unblock_session(ddb_entry->sess);
3156 /* Start scan target */
3157 if (test_bit(AF_ONLINE, &ha->flags)) {
3158 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
3159 " start scan\n", ha->host_no, __func__,
3160 ddb_entry->fw_ddb_index);
3161 scsi_queue_work(ha->host, &ddb_entry->sess->scan_work);
3166 int qla4xxx_unblock_ddb(struct iscsi_cls_session *cls_session)
3168 struct iscsi_session *sess;
3169 struct ddb_entry *ddb_entry;
3170 struct scsi_qla_host *ha;
3171 int status = QLA_SUCCESS;
3173 sess = cls_session->dd_data;
3174 ddb_entry = sess->dd_data;
3176 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
3177 " unblock user space session\n", ha->host_no, __func__,
3178 ddb_entry->fw_ddb_index);
3180 if (!iscsi_is_session_online(cls_session)) {
3181 iscsi_conn_start(ddb_entry->conn);
3182 iscsi_conn_login_event(ddb_entry->conn,
3183 ISCSI_CONN_STATE_LOGGED_IN);
3185 ql4_printk(KERN_INFO, ha,
3186 "scsi%ld: %s: ddb[%d] session [%d] already logged in\n",
3187 ha->host_no, __func__, ddb_entry->fw_ddb_index,
3195 static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha)
3197 iscsi_host_for_each_session(ha->host, qla4xxx_relogin_devices);
3200 static void qla4xxx_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
3202 uint16_t relogin_timer;
3203 struct iscsi_session *sess;
3204 struct ddb_entry *ddb_entry;
3205 struct scsi_qla_host *ha;
3207 sess = cls_sess->dd_data;
3208 ddb_entry = sess->dd_data;
3211 relogin_timer = max(ddb_entry->default_relogin_timeout,
3212 (uint16_t)RELOGIN_TOV);
3213 atomic_set(&ddb_entry->relogin_timer, relogin_timer);
3215 DEBUG2(ql4_printk(KERN_INFO, ha,
3216 "scsi%ld: Relogin index [%d]. TOV=%d\n", ha->host_no,
3217 ddb_entry->fw_ddb_index, relogin_timer));
3219 qla4xxx_login_flash_ddb(cls_sess);
3222 static void qla4xxx_dpc_relogin(struct iscsi_cls_session *cls_sess)
3224 struct iscsi_session *sess;
3225 struct ddb_entry *ddb_entry;
3226 struct scsi_qla_host *ha;
3228 sess = cls_sess->dd_data;
3229 ddb_entry = sess->dd_data;
3232 if (!(ddb_entry->ddb_type == FLASH_DDB))
3235 if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags) &&
3236 !iscsi_is_session_online(cls_sess)) {
3237 DEBUG2(ql4_printk(KERN_INFO, ha,
3238 "relogin issued\n"));
3239 qla4xxx_relogin_flash_ddb(cls_sess);
3243 void qla4xxx_wake_dpc(struct scsi_qla_host *ha)
3246 queue_work(ha->dpc_thread, &ha->dpc_work);
3249 static struct qla4_work_evt *
3250 qla4xxx_alloc_work(struct scsi_qla_host *ha, uint32_t data_size,
3251 enum qla4_work_type type)
3253 struct qla4_work_evt *e;
3254 uint32_t size = sizeof(struct qla4_work_evt) + data_size;
3256 e = kzalloc(size, GFP_ATOMIC);
3260 INIT_LIST_HEAD(&e->list);
3265 static void qla4xxx_post_work(struct scsi_qla_host *ha,
3266 struct qla4_work_evt *e)
3268 unsigned long flags;
3270 spin_lock_irqsave(&ha->work_lock, flags);
3271 list_add_tail(&e->list, &ha->work_list);
3272 spin_unlock_irqrestore(&ha->work_lock, flags);
3273 qla4xxx_wake_dpc(ha);
3276 int qla4xxx_post_aen_work(struct scsi_qla_host *ha,
3277 enum iscsi_host_event_code aen_code,
3278 uint32_t data_size, uint8_t *data)
3280 struct qla4_work_evt *e;
3282 e = qla4xxx_alloc_work(ha, data_size, QLA4_EVENT_AEN);
3286 e->u.aen.code = aen_code;
3287 e->u.aen.data_size = data_size;
3288 memcpy(e->u.aen.data, data, data_size);
3290 qla4xxx_post_work(ha, e);
3295 int qla4xxx_post_ping_evt_work(struct scsi_qla_host *ha,
3296 uint32_t status, uint32_t pid,
3297 uint32_t data_size, uint8_t *data)
3299 struct qla4_work_evt *e;
3301 e = qla4xxx_alloc_work(ha, data_size, QLA4_EVENT_PING_STATUS);
3305 e->u.ping.status = status;
3306 e->u.ping.pid = pid;
3307 e->u.ping.data_size = data_size;
3308 memcpy(e->u.ping.data, data, data_size);
3310 qla4xxx_post_work(ha, e);
3315 static void qla4xxx_do_work(struct scsi_qla_host *ha)
3317 struct qla4_work_evt *e, *tmp;
3318 unsigned long flags;
3321 spin_lock_irqsave(&ha->work_lock, flags);
3322 list_splice_init(&ha->work_list, &work);
3323 spin_unlock_irqrestore(&ha->work_lock, flags);
3325 list_for_each_entry_safe(e, tmp, &work, list) {
3326 list_del_init(&e->list);
3329 case QLA4_EVENT_AEN:
3330 iscsi_post_host_event(ha->host_no,
3331 &qla4xxx_iscsi_transport,
3336 case QLA4_EVENT_PING_STATUS:
3337 iscsi_ping_comp_event(ha->host_no,
3338 &qla4xxx_iscsi_transport,
3341 e->u.ping.data_size,
3345 ql4_printk(KERN_WARNING, ha, "event type: 0x%x not "
3346 "supported", e->type);
3353 * qla4xxx_do_dpc - dpc routine
3354 * @data: in our case pointer to adapter structure
3356 * This routine is a task that is schedule by the interrupt handler
3357 * to perform the background processing for interrupts. We put it
3358 * on a task queue that is consumed whenever the scheduler runs; that's
3359 * so you can do anything (i.e. put the process to sleep etc). In fact,
3360 * the mid-level tries to sleep when it reaches the driver threshold
3361 * "host->can_queue". This can cause a panic if we were in our interrupt code.
3363 static void qla4xxx_do_dpc(struct work_struct *work)
3365 struct scsi_qla_host *ha =
3366 container_of(work, struct scsi_qla_host, dpc_work);
3367 int status = QLA_ERROR;
3369 DEBUG2(printk("scsi%ld: %s: DPC handler waking up."
3370 "flags = 0x%08lx, dpc_flags = 0x%08lx\n",
3371 ha->host_no, __func__, ha->flags, ha->dpc_flags))
3373 /* Initialization not yet finished. Don't do anything yet. */
3374 if (!test_bit(AF_INIT_DONE, &ha->flags))
3377 if (test_bit(AF_EEH_BUSY, &ha->flags)) {
3378 DEBUG2(printk(KERN_INFO "scsi%ld: %s: flags = %lx\n",
3379 ha->host_no, __func__, ha->flags));
3383 /* post events to application */
3384 qla4xxx_do_work(ha);
3386 if (is_qla8022(ha)) {
3387 if (test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags)) {
3388 qla4_82xx_idc_lock(ha);
3389 qla4_82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3390 QLA82XX_DEV_FAILED);
3391 qla4_82xx_idc_unlock(ha);
3392 ql4_printk(KERN_INFO, ha, "HW State: FAILED\n");
3393 qla4_8xxx_device_state_handler(ha);
3395 if (test_and_clear_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
3396 qla4_8xxx_need_qsnt_handler(ha);
3400 if (!test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) &&
3401 (test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
3402 test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
3403 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))) {
3404 if (ql4xdontresethba) {
3405 DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
3406 ha->host_no, __func__));
3407 clear_bit(DPC_RESET_HA, &ha->dpc_flags);
3408 clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
3409 clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
3410 goto dpc_post_reset_ha;
3412 if (test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) ||
3413 test_bit(DPC_RESET_HA, &ha->dpc_flags))
3414 qla4xxx_recover_adapter(ha);
3416 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
3417 uint8_t wait_time = RESET_INTR_TOV;
3419 while ((readw(&ha->reg->ctrl_status) &
3420 (CSR_SOFT_RESET | CSR_FORCE_SOFT_RESET)) != 0) {
3421 if (--wait_time == 0)
3426 DEBUG2(printk("scsi%ld: %s: SR|FSR "
3427 "bit not cleared-- resetting\n",
3428 ha->host_no, __func__));
3429 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
3430 if (ql4xxx_lock_drvr_wait(ha) == QLA_SUCCESS) {
3431 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
3432 status = qla4xxx_recover_adapter(ha);
3434 clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
3435 if (status == QLA_SUCCESS)
3436 ha->isp_ops->enable_intrs(ha);
3441 /* ---- process AEN? --- */
3442 if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags))
3443 qla4xxx_process_aen(ha, PROCESS_ALL_AENS);
3445 /* ---- Get DHCP IP Address? --- */
3446 if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags))
3447 qla4xxx_get_dhcp_ip_address(ha);
3449 /* ---- relogin device? --- */
3450 if (adapter_up(ha) &&
3451 test_and_clear_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags)) {
3452 iscsi_host_for_each_session(ha->host, qla4xxx_dpc_relogin);
3455 /* ---- link change? --- */
3456 if (test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) {
3457 if (!test_bit(AF_LINK_UP, &ha->flags)) {
3458 /* ---- link down? --- */
3459 qla4xxx_mark_all_devices_missing(ha);
3461 /* ---- link up? --- *
3462 * F/W will auto login to all devices ONLY ONCE after
3463 * link up during driver initialization and runtime
3464 * fatal error recovery. Therefore, the driver must
3465 * manually relogin to devices when recovering from
3466 * connection failures, logouts, expired KATO, etc. */
3467 if (test_and_clear_bit(AF_BUILD_DDB_LIST, &ha->flags)) {
3468 qla4xxx_build_ddb_list(ha, ha->is_reset);
3469 iscsi_host_for_each_session(ha->host,
3470 qla4xxx_login_flash_ddb);
3472 qla4xxx_relogin_all_devices(ha);
3478 * qla4xxx_free_adapter - release the adapter
3479 * @ha: pointer to adapter structure
3481 static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
3483 qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
3485 if (test_bit(AF_INTERRUPTS_ON, &ha->flags)) {
3486 /* Turn-off interrupts on the card. */
3487 ha->isp_ops->disable_intrs(ha);
3490 if (is_qla40XX(ha)) {
3491 writel(set_rmask(CSR_SCSI_PROCESSOR_INTR),
3492 &ha->reg->ctrl_status);
3493 readl(&ha->reg->ctrl_status);
3494 } else if (is_qla8022(ha)) {
3495 writel(0, &ha->qla4_8xxx_reg->host_int);
3496 readl(&ha->qla4_8xxx_reg->host_int);
3499 /* Remove timer thread, if present */
3500 if (ha->timer_active)
3501 qla4xxx_stop_timer(ha);
3503 /* Kill the kernel thread for this host */
3505 destroy_workqueue(ha->dpc_thread);
3507 /* Kill the kernel thread for this host */
3509 destroy_workqueue(ha->task_wq);
3511 /* Put firmware in known state */
3512 ha->isp_ops->reset_firmware(ha);
3514 if (is_qla8022(ha)) {
3515 qla4_82xx_idc_lock(ha);
3516 qla4_8xxx_clear_drv_active(ha);
3517 qla4_82xx_idc_unlock(ha);
3520 /* Detach interrupts */
3521 if (test_and_clear_bit(AF_IRQ_ATTACHED, &ha->flags))
3522 qla4xxx_free_irqs(ha);
3524 /* free extra memory */
3525 qla4xxx_mem_free(ha);
3528 int qla4_8xxx_iospace_config(struct scsi_qla_host *ha)
3531 unsigned long mem_base, mem_len, db_base, db_len;
3532 struct pci_dev *pdev = ha->pdev;
3534 status = pci_request_regions(pdev, DRIVER_NAME);
3537 "scsi(%ld) Failed to reserve PIO regions (%s) "
3538 "status=%d\n", ha->host_no, pci_name(pdev), status);
3539 goto iospace_error_exit;
3542 DEBUG2(printk(KERN_INFO "%s: revision-id=%d\n",
3543 __func__, pdev->revision));
3544 ha->revision_id = pdev->revision;
3546 /* remap phys address */
3547 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
3548 mem_len = pci_resource_len(pdev, 0);
3549 DEBUG2(printk(KERN_INFO "%s: ioremap from %lx a size of %lx\n",
3550 __func__, mem_base, mem_len));
3552 /* mapping of pcibase pointer */
3553 ha->nx_pcibase = (unsigned long)ioremap(mem_base, mem_len);
3554 if (!ha->nx_pcibase) {
3556 "cannot remap MMIO (%s), aborting\n", pci_name(pdev));
3557 pci_release_regions(ha->pdev);
3558 goto iospace_error_exit;
3561 /* Mapping of IO base pointer, door bell read and write pointer */
3563 /* mapping of IO base pointer */
3565 (struct device_reg_82xx __iomem *)((uint8_t *)ha->nx_pcibase +
3566 0xbc000 + (ha->pdev->devfn << 11));
3568 db_base = pci_resource_start(pdev, 4); /* doorbell is on bar 4 */
3569 db_len = pci_resource_len(pdev, 4);
3571 ha->nx_db_wr_ptr = (ha->pdev->devfn == 4 ? QLA82XX_CAM_RAM_DB1 :
3572 QLA82XX_CAM_RAM_DB2);
3580 * qla4xxx_iospace_config - maps registers
3581 * @ha: pointer to adapter structure
3583 * This routines maps HBA's registers from the pci address space
3584 * into the kernel virtual address space for memory mapped i/o.
3586 int qla4xxx_iospace_config(struct scsi_qla_host *ha)
3588 unsigned long pio, pio_len, pio_flags;
3589 unsigned long mmio, mmio_len, mmio_flags;
3591 pio = pci_resource_start(ha->pdev, 0);
3592 pio_len = pci_resource_len(ha->pdev, 0);
3593 pio_flags = pci_resource_flags(ha->pdev, 0);
3594 if (pio_flags & IORESOURCE_IO) {
3595 if (pio_len < MIN_IOBASE_LEN) {
3596 ql4_printk(KERN_WARNING, ha,
3597 "Invalid PCI I/O region size\n");
3601 ql4_printk(KERN_WARNING, ha, "region #0 not a PIO resource\n");
3605 /* Use MMIO operations for all accesses. */
3606 mmio = pci_resource_start(ha->pdev, 1);
3607 mmio_len = pci_resource_len(ha->pdev, 1);
3608 mmio_flags = pci_resource_flags(ha->pdev, 1);
3610 if (!(mmio_flags & IORESOURCE_MEM)) {
3611 ql4_printk(KERN_ERR, ha,
3612 "region #0 not an MMIO resource, aborting\n");
3614 goto iospace_error_exit;
3617 if (mmio_len < MIN_IOBASE_LEN) {
3618 ql4_printk(KERN_ERR, ha,
3619 "Invalid PCI mem region size, aborting\n");
3620 goto iospace_error_exit;
3623 if (pci_request_regions(ha->pdev, DRIVER_NAME)) {
3624 ql4_printk(KERN_WARNING, ha,
3625 "Failed to reserve PIO/MMIO regions\n");
3627 goto iospace_error_exit;
3630 ha->pio_address = pio;
3631 ha->pio_length = pio_len;
3632 ha->reg = ioremap(mmio, MIN_IOBASE_LEN);
3634 ql4_printk(KERN_ERR, ha,
3635 "cannot remap MMIO, aborting\n");
3637 goto iospace_error_exit;
3646 static struct isp_operations qla4xxx_isp_ops = {
3647 .iospace_config = qla4xxx_iospace_config,
3648 .pci_config = qla4xxx_pci_config,
3649 .disable_intrs = qla4xxx_disable_intrs,
3650 .enable_intrs = qla4xxx_enable_intrs,
3651 .start_firmware = qla4xxx_start_firmware,
3652 .intr_handler = qla4xxx_intr_handler,
3653 .interrupt_service_routine = qla4xxx_interrupt_service_routine,
3654 .reset_chip = qla4xxx_soft_reset,
3655 .reset_firmware = qla4xxx_hw_reset,
3656 .queue_iocb = qla4xxx_queue_iocb,
3657 .complete_iocb = qla4xxx_complete_iocb,
3658 .rd_shdw_req_q_out = qla4xxx_rd_shdw_req_q_out,
3659 .rd_shdw_rsp_q_in = qla4xxx_rd_shdw_rsp_q_in,
3660 .get_sys_info = qla4xxx_get_sys_info,
3663 static struct isp_operations qla4_8xxx_isp_ops = {
3664 .iospace_config = qla4_8xxx_iospace_config,
3665 .pci_config = qla4_8xxx_pci_config,
3666 .disable_intrs = qla4_82xx_disable_intrs,
3667 .enable_intrs = qla4_82xx_enable_intrs,
3668 .start_firmware = qla4_8xxx_load_risc,
3669 .intr_handler = qla4_82xx_intr_handler,
3670 .interrupt_service_routine = qla4_82xx_interrupt_service_routine,
3671 .reset_chip = qla4_82xx_isp_reset,
3672 .reset_firmware = qla4_8xxx_stop_firmware,
3673 .queue_iocb = qla4_82xx_queue_iocb,
3674 .complete_iocb = qla4_82xx_complete_iocb,
3675 .rd_shdw_req_q_out = qla4_82xx_rd_shdw_req_q_out,
3676 .rd_shdw_rsp_q_in = qla4_82xx_rd_shdw_rsp_q_in,
3677 .get_sys_info = qla4_8xxx_get_sys_info,
3680 uint16_t qla4xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
3682 return (uint16_t)le32_to_cpu(ha->shadow_regs->req_q_out);
3685 uint16_t qla4_82xx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
3687 return (uint16_t)le32_to_cpu(readl(&ha->qla4_8xxx_reg->req_q_out));
3690 uint16_t qla4xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
3692 return (uint16_t)le32_to_cpu(ha->shadow_regs->rsp_q_in);
3695 uint16_t qla4_82xx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
3697 return (uint16_t)le32_to_cpu(readl(&ha->qla4_8xxx_reg->rsp_q_in));
3700 static ssize_t qla4xxx_show_boot_eth_info(void *data, int type, char *buf)
3702 struct scsi_qla_host *ha = data;
3707 case ISCSI_BOOT_ETH_FLAGS:
3708 rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT);
3710 case ISCSI_BOOT_ETH_INDEX:
3711 rc = sprintf(str, "0\n");
3713 case ISCSI_BOOT_ETH_MAC:
3714 rc = sysfs_format_mac(str, ha->my_mac,
3724 static umode_t qla4xxx_eth_get_attr_visibility(void *data, int type)
3729 case ISCSI_BOOT_ETH_FLAGS:
3730 case ISCSI_BOOT_ETH_MAC:
3731 case ISCSI_BOOT_ETH_INDEX:
3741 static ssize_t qla4xxx_show_boot_ini_info(void *data, int type, char *buf)
3743 struct scsi_qla_host *ha = data;
3748 case ISCSI_BOOT_INI_INITIATOR_NAME:
3749 rc = sprintf(str, "%s\n", ha->name_string);
3758 static umode_t qla4xxx_ini_get_attr_visibility(void *data, int type)
3763 case ISCSI_BOOT_INI_INITIATOR_NAME:
3774 qla4xxx_show_boot_tgt_info(struct ql4_boot_session_info *boot_sess, int type,
3777 struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0];
3782 case ISCSI_BOOT_TGT_NAME:
3783 rc = sprintf(buf, "%s\n", (char *)&boot_sess->target_name);
3785 case ISCSI_BOOT_TGT_IP_ADDR:
3786 if (boot_sess->conn_list[0].dest_ipaddr.ip_type == 0x1)
3787 rc = sprintf(buf, "%pI4\n",
3788 &boot_conn->dest_ipaddr.ip_address);
3790 rc = sprintf(str, "%pI6\n",
3791 &boot_conn->dest_ipaddr.ip_address);
3793 case ISCSI_BOOT_TGT_PORT:
3794 rc = sprintf(str, "%d\n", boot_conn->dest_port);
3796 case ISCSI_BOOT_TGT_CHAP_NAME:
3797 rc = sprintf(str, "%.*s\n",
3798 boot_conn->chap.target_chap_name_length,
3799 (char *)&boot_conn->chap.target_chap_name);
3801 case ISCSI_BOOT_TGT_CHAP_SECRET:
3802 rc = sprintf(str, "%.*s\n",
3803 boot_conn->chap.target_secret_length,
3804 (char *)&boot_conn->chap.target_secret);
3806 case ISCSI_BOOT_TGT_REV_CHAP_NAME:
3807 rc = sprintf(str, "%.*s\n",
3808 boot_conn->chap.intr_chap_name_length,
3809 (char *)&boot_conn->chap.intr_chap_name);
3811 case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
3812 rc = sprintf(str, "%.*s\n",
3813 boot_conn->chap.intr_secret_length,
3814 (char *)&boot_conn->chap.intr_secret);
3816 case ISCSI_BOOT_TGT_FLAGS:
3817 rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT);
3819 case ISCSI_BOOT_TGT_NIC_ASSOC:
3820 rc = sprintf(str, "0\n");
3829 static ssize_t qla4xxx_show_boot_tgt_pri_info(void *data, int type, char *buf)
3831 struct scsi_qla_host *ha = data;
3832 struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_pri_sess);
3834 return qla4xxx_show_boot_tgt_info(boot_sess, type, buf);
3837 static ssize_t qla4xxx_show_boot_tgt_sec_info(void *data, int type, char *buf)
3839 struct scsi_qla_host *ha = data;
3840 struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_sec_sess);
3842 return qla4xxx_show_boot_tgt_info(boot_sess, type, buf);
3845 static umode_t qla4xxx_tgt_get_attr_visibility(void *data, int type)
3850 case ISCSI_BOOT_TGT_NAME:
3851 case ISCSI_BOOT_TGT_IP_ADDR:
3852 case ISCSI_BOOT_TGT_PORT:
3853 case ISCSI_BOOT_TGT_CHAP_NAME:
3854 case ISCSI_BOOT_TGT_CHAP_SECRET:
3855 case ISCSI_BOOT_TGT_REV_CHAP_NAME:
3856 case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
3857 case ISCSI_BOOT_TGT_NIC_ASSOC:
3858 case ISCSI_BOOT_TGT_FLAGS:
3868 static void qla4xxx_boot_release(void *data)
3870 struct scsi_qla_host *ha = data;
3872 scsi_host_put(ha->host);
3875 static int get_fw_boot_info(struct scsi_qla_host *ha, uint16_t ddb_index[])
3878 uint32_t addr, pri_addr, sec_addr;
3882 uint8_t *buf = NULL;
3883 size_t size = 13 * sizeof(uint8_t);
3884 int ret = QLA_SUCCESS;
3886 func_num = PCI_FUNC(ha->pdev->devfn);
3888 ql4_printk(KERN_INFO, ha, "%s: Get FW boot info for 0x%x func %d\n",
3889 __func__, ha->pdev->device, func_num);
3891 if (is_qla40XX(ha)) {
3892 if (func_num == 1) {
3893 addr = NVRAM_PORT0_BOOT_MODE;
3894 pri_addr = NVRAM_PORT0_BOOT_PRI_TGT;
3895 sec_addr = NVRAM_PORT0_BOOT_SEC_TGT;
3896 } else if (func_num == 3) {
3897 addr = NVRAM_PORT1_BOOT_MODE;
3898 pri_addr = NVRAM_PORT1_BOOT_PRI_TGT;
3899 sec_addr = NVRAM_PORT1_BOOT_SEC_TGT;
3902 goto exit_boot_info;
3905 /* Check Boot Mode */
3906 val = rd_nvram_byte(ha, addr);
3907 if (!(val & 0x07)) {
3908 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Adapter boot "
3909 "options : 0x%x\n", __func__, val));
3911 goto exit_boot_info;
3914 /* get primary valid target index */
3915 val = rd_nvram_byte(ha, pri_addr);
3917 ddb_index[0] = (val & 0x7f);
3919 /* get secondary valid target index */
3920 val = rd_nvram_byte(ha, sec_addr);
3922 ddb_index[1] = (val & 0x7f);
3924 } else if (is_qla8022(ha)) {
3925 buf = dma_alloc_coherent(&ha->pdev->dev, size,
3926 &buf_dma, GFP_KERNEL);
3928 DEBUG2(ql4_printk(KERN_ERR, ha,
3929 "%s: Unable to allocate dma buffer\n",
3932 goto exit_boot_info;
3935 if (ha->port_num == 0)
3936 offset = BOOT_PARAM_OFFSET_PORT0;
3937 else if (ha->port_num == 1)
3938 offset = BOOT_PARAM_OFFSET_PORT1;
3941 goto exit_boot_info_free;
3943 addr = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_iscsi_param * 4) +
3945 if (qla4xxx_get_flash(ha, buf_dma, addr,
3946 13 * sizeof(uint8_t)) != QLA_SUCCESS) {
3947 DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: Get Flash"
3948 " failed\n", ha->host_no, __func__));
3950 goto exit_boot_info_free;
3952 /* Check Boot Mode */
3953 if (!(buf[1] & 0x07)) {
3954 DEBUG2(ql4_printk(KERN_INFO, ha, "Firmware boot options"
3955 " : 0x%x\n", buf[1]));
3957 goto exit_boot_info_free;
3960 /* get primary valid target index */
3962 ddb_index[0] = buf[2] & 0x7f;
3964 /* get secondary valid target index */
3965 if (buf[11] & BIT_7)
3966 ddb_index[1] = buf[11] & 0x7f;
3969 goto exit_boot_info;
3972 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Primary target ID %d, Secondary"
3973 " target ID %d\n", __func__, ddb_index[0],
3976 exit_boot_info_free:
3977 dma_free_coherent(&ha->pdev->dev, size, buf, buf_dma);
3979 ha->pri_ddb_idx = ddb_index[0];
3980 ha->sec_ddb_idx = ddb_index[1];
3985 * qla4xxx_get_bidi_chap - Get a BIDI CHAP user and password
3986 * @ha: pointer to adapter structure
3987 * @username: CHAP username to be returned
3988 * @password: CHAP password to be returned
3990 * If a boot entry has BIDI CHAP enabled then we need to set the BIDI CHAP
3991 * user and password in the sysfs entry in /sys/firmware/iscsi_boot#/.
3992 * So from the CHAP cache find the first BIDI CHAP entry and set it
3993 * to the boot record in sysfs.
3995 static int qla4xxx_get_bidi_chap(struct scsi_qla_host *ha, char *username,
3998 int i, ret = -EINVAL;
3999 int max_chap_entries = 0;
4000 struct ql4_chap_table *chap_table;
4003 max_chap_entries = (ha->hw.flt_chap_size / 2) /
4004 sizeof(struct ql4_chap_table);
4006 max_chap_entries = MAX_CHAP_ENTRIES_40XX;
4008 if (!ha->chap_list) {
4009 ql4_printk(KERN_ERR, ha, "Do not have CHAP table cache\n");
4013 mutex_lock(&ha->chap_sem);
4014 for (i = 0; i < max_chap_entries; i++) {
4015 chap_table = (struct ql4_chap_table *)ha->chap_list + i;
4016 if (chap_table->cookie !=
4017 __constant_cpu_to_le16(CHAP_VALID_COOKIE)) {
4021 if (chap_table->flags & BIT_7) /* local */
4024 if (!(chap_table->flags & BIT_6)) /* Not BIDI */
4027 strncpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN);
4028 strncpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN);
4032 mutex_unlock(&ha->chap_sem);
4038 static int qla4xxx_get_boot_target(struct scsi_qla_host *ha,
4039 struct ql4_boot_session_info *boot_sess,
4042 struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0];
4043 struct dev_db_entry *fw_ddb_entry;
4044 dma_addr_t fw_ddb_entry_dma;
4047 int ret = QLA_SUCCESS;
4049 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
4050 &fw_ddb_entry_dma, GFP_KERNEL);
4051 if (!fw_ddb_entry) {
4052 DEBUG2(ql4_printk(KERN_ERR, ha,
4053 "%s: Unable to allocate dma buffer.\n",
4059 if (qla4xxx_bootdb_by_index(ha, fw_ddb_entry,
4060 fw_ddb_entry_dma, ddb_index)) {
4061 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: No Flash DDB found at "
4062 "index [%d]\n", __func__, ddb_index));
4064 goto exit_boot_target;
4067 /* Update target name and IP from DDB */
4068 memcpy(boot_sess->target_name, fw_ddb_entry->iscsi_name,
4069 min(sizeof(boot_sess->target_name),
4070 sizeof(fw_ddb_entry->iscsi_name)));
4072 options = le16_to_cpu(fw_ddb_entry->options);
4073 if (options & DDB_OPT_IPV6_DEVICE) {
4074 memcpy(&boot_conn->dest_ipaddr.ip_address,
4075 &fw_ddb_entry->ip_addr[0], IPv6_ADDR_LEN);
4077 boot_conn->dest_ipaddr.ip_type = 0x1;
4078 memcpy(&boot_conn->dest_ipaddr.ip_address,
4079 &fw_ddb_entry->ip_addr[0], IP_ADDR_LEN);
4082 boot_conn->dest_port = le16_to_cpu(fw_ddb_entry->port);
4084 /* update chap information */
4085 idx = __le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
4087 if (BIT_7 & le16_to_cpu(fw_ddb_entry->iscsi_options)) {
4089 DEBUG2(ql4_printk(KERN_INFO, ha, "Setting chap\n"));
4091 ret = qla4xxx_get_chap(ha, (char *)&boot_conn->chap.
4093 (char *)&boot_conn->chap.target_secret,
4096 ql4_printk(KERN_ERR, ha, "Failed to set chap\n");
4098 goto exit_boot_target;
4101 boot_conn->chap.target_chap_name_length = QL4_CHAP_MAX_NAME_LEN;
4102 boot_conn->chap.target_secret_length = QL4_CHAP_MAX_SECRET_LEN;
4105 if (BIT_4 & le16_to_cpu(fw_ddb_entry->iscsi_options)) {
4107 DEBUG2(ql4_printk(KERN_INFO, ha, "Setting BIDI chap\n"));
4109 ret = qla4xxx_get_bidi_chap(ha,
4110 (char *)&boot_conn->chap.intr_chap_name,
4111 (char *)&boot_conn->chap.intr_secret);
4114 ql4_printk(KERN_ERR, ha, "Failed to set BIDI chap\n");
4116 goto exit_boot_target;
4119 boot_conn->chap.intr_chap_name_length = QL4_CHAP_MAX_NAME_LEN;
4120 boot_conn->chap.intr_secret_length = QL4_CHAP_MAX_SECRET_LEN;
4124 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
4125 fw_ddb_entry, fw_ddb_entry_dma);
4129 static int qla4xxx_get_boot_info(struct scsi_qla_host *ha)
4131 uint16_t ddb_index[2];
4132 int ret = QLA_ERROR;
4135 memset(ddb_index, 0, sizeof(ddb_index));
4136 ddb_index[0] = 0xffff;
4137 ddb_index[1] = 0xffff;
4138 ret = get_fw_boot_info(ha, ddb_index);
4139 if (ret != QLA_SUCCESS) {
4140 DEBUG2(ql4_printk(KERN_INFO, ha,
4141 "%s: No boot target configured.\n", __func__));
4145 if (ql4xdisablesysfsboot)
4148 if (ddb_index[0] == 0xffff)
4151 rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_pri_sess),
4153 if (rval != QLA_SUCCESS) {
4154 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Primary boot target not "
4155 "configured\n", __func__));
4160 if (ddb_index[1] == 0xffff)
4161 goto exit_get_boot_info;
4163 rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_sec_sess),
4165 if (rval != QLA_SUCCESS) {
4166 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Secondary boot target not"
4167 " configured\n", __func__));
4175 static int qla4xxx_setup_boot_info(struct scsi_qla_host *ha)
4177 struct iscsi_boot_kobj *boot_kobj;
4179 if (qla4xxx_get_boot_info(ha) != QLA_SUCCESS)
4182 if (ql4xdisablesysfsboot) {
4183 ql4_printk(KERN_INFO, ha,
4184 "%s: syfsboot disabled - driver will trigger login "
4185 "and publish session for discovery .\n", __func__);
4190 ha->boot_kset = iscsi_boot_create_host_kset(ha->host->host_no);
4194 if (!scsi_host_get(ha->host))
4196 boot_kobj = iscsi_boot_create_target(ha->boot_kset, 0, ha,
4197 qla4xxx_show_boot_tgt_pri_info,
4198 qla4xxx_tgt_get_attr_visibility,
4199 qla4xxx_boot_release);
4203 if (!scsi_host_get(ha->host))
4205 boot_kobj = iscsi_boot_create_target(ha->boot_kset, 1, ha,
4206 qla4xxx_show_boot_tgt_sec_info,
4207 qla4xxx_tgt_get_attr_visibility,
4208 qla4xxx_boot_release);
4212 if (!scsi_host_get(ha->host))
4214 boot_kobj = iscsi_boot_create_initiator(ha->boot_kset, 0, ha,
4215 qla4xxx_show_boot_ini_info,
4216 qla4xxx_ini_get_attr_visibility,
4217 qla4xxx_boot_release);
4221 if (!scsi_host_get(ha->host))
4223 boot_kobj = iscsi_boot_create_ethernet(ha->boot_kset, 0, ha,
4224 qla4xxx_show_boot_eth_info,
4225 qla4xxx_eth_get_attr_visibility,
4226 qla4xxx_boot_release);
4233 scsi_host_put(ha->host);
4235 iscsi_boot_destroy_kset(ha->boot_kset);
4241 * qla4xxx_create chap_list - Create CHAP list from FLASH
4242 * @ha: pointer to adapter structure
4244 * Read flash and make a list of CHAP entries, during login when a CHAP entry
4245 * is received, it will be checked in this list. If entry exist then the CHAP
4246 * entry index is set in the DDB. If CHAP entry does not exist in this list
4247 * then a new entry is added in FLASH in CHAP table and the index obtained is
4250 static void qla4xxx_create_chap_list(struct scsi_qla_host *ha)
4253 uint8_t *chap_flash_data = NULL;
4255 dma_addr_t chap_dma;
4256 uint32_t chap_size = 0;
4259 chap_size = MAX_CHAP_ENTRIES_40XX *
4260 sizeof(struct ql4_chap_table);
4261 else /* Single region contains CHAP info for both
4262 * ports which is divided into half for each port.
4264 chap_size = ha->hw.flt_chap_size / 2;
4266 chap_flash_data = dma_alloc_coherent(&ha->pdev->dev, chap_size,
4267 &chap_dma, GFP_KERNEL);
4268 if (!chap_flash_data) {
4269 ql4_printk(KERN_ERR, ha, "No memory for chap_flash_data\n");
4273 offset = FLASH_CHAP_OFFSET;
4275 offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2);
4276 if (ha->port_num == 1)
4277 offset += chap_size;
4280 rval = qla4xxx_get_flash(ha, chap_dma, offset, chap_size);
4281 if (rval != QLA_SUCCESS)
4282 goto exit_chap_list;
4284 if (ha->chap_list == NULL)
4285 ha->chap_list = vmalloc(chap_size);
4286 if (ha->chap_list == NULL) {
4287 ql4_printk(KERN_ERR, ha, "No memory for ha->chap_list\n");
4288 goto exit_chap_list;
4291 memcpy(ha->chap_list, chap_flash_data, chap_size);
4294 dma_free_coherent(&ha->pdev->dev, chap_size,
4295 chap_flash_data, chap_dma);
4298 static void qla4xxx_get_param_ddb(struct ddb_entry *ddb_entry,
4299 struct ql4_tuple_ddb *tddb)
4301 struct scsi_qla_host *ha;
4302 struct iscsi_cls_session *cls_sess;
4303 struct iscsi_cls_conn *cls_conn;
4304 struct iscsi_session *sess;
4305 struct iscsi_conn *conn;
4307 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
4309 cls_sess = ddb_entry->sess;
4310 sess = cls_sess->dd_data;
4311 cls_conn = ddb_entry->conn;
4312 conn = cls_conn->dd_data;
4314 tddb->tpgt = sess->tpgt;
4315 tddb->port = conn->persistent_port;
4316 strncpy(tddb->iscsi_name, sess->targetname, ISCSI_NAME_SIZE);
4317 strncpy(tddb->ip_addr, conn->persistent_address, DDB_IPADDR_LEN);
4320 static void qla4xxx_convert_param_ddb(struct dev_db_entry *fw_ddb_entry,
4321 struct ql4_tuple_ddb *tddb,
4322 uint8_t *flash_isid)
4324 uint16_t options = 0;
4326 tddb->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
4327 memcpy(&tddb->iscsi_name[0], &fw_ddb_entry->iscsi_name[0],
4328 min(sizeof(tddb->iscsi_name), sizeof(fw_ddb_entry->iscsi_name)));
4330 options = le16_to_cpu(fw_ddb_entry->options);
4331 if (options & DDB_OPT_IPV6_DEVICE)
4332 sprintf(tddb->ip_addr, "%pI6", fw_ddb_entry->ip_addr);
4334 sprintf(tddb->ip_addr, "%pI4", fw_ddb_entry->ip_addr);
4336 tddb->port = le16_to_cpu(fw_ddb_entry->port);
4338 if (flash_isid == NULL)
4339 memcpy(&tddb->isid[0], &fw_ddb_entry->isid[0],
4340 sizeof(tddb->isid));
4342 memcpy(&tddb->isid[0], &flash_isid[0], sizeof(tddb->isid));
4345 static int qla4xxx_compare_tuple_ddb(struct scsi_qla_host *ha,
4346 struct ql4_tuple_ddb *old_tddb,
4347 struct ql4_tuple_ddb *new_tddb,
4348 uint8_t is_isid_compare)
4350 if (strcmp(old_tddb->iscsi_name, new_tddb->iscsi_name))
4353 if (strcmp(old_tddb->ip_addr, new_tddb->ip_addr))
4356 if (old_tddb->port != new_tddb->port)
4359 /* For multi sessions, driver generates the ISID, so do not compare
4360 * ISID in reset path since it would be a comparision between the
4361 * driver generated ISID and firmware generated ISID. This could
4362 * lead to adding duplicated DDBs in the list as driver generated
4363 * ISID would not match firmware generated ISID.
4365 if (is_isid_compare) {
4366 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: old ISID [%02x%02x%02x"
4367 "%02x%02x%02x] New ISID [%02x%02x%02x%02x%02x%02x]\n",
4368 __func__, old_tddb->isid[5], old_tddb->isid[4],
4369 old_tddb->isid[3], old_tddb->isid[2], old_tddb->isid[1],
4370 old_tddb->isid[0], new_tddb->isid[5], new_tddb->isid[4],
4371 new_tddb->isid[3], new_tddb->isid[2], new_tddb->isid[1],
4372 new_tddb->isid[0]));
4374 if (memcmp(&old_tddb->isid[0], &new_tddb->isid[0],
4375 sizeof(old_tddb->isid)))
4379 DEBUG2(ql4_printk(KERN_INFO, ha,
4380 "Match Found, fw[%d,%d,%s,%s], [%d,%d,%s,%s]",
4381 old_tddb->port, old_tddb->tpgt, old_tddb->ip_addr,
4382 old_tddb->iscsi_name, new_tddb->port, new_tddb->tpgt,
4383 new_tddb->ip_addr, new_tddb->iscsi_name));
4388 static int qla4xxx_is_session_exists(struct scsi_qla_host *ha,
4389 struct dev_db_entry *fw_ddb_entry)
4391 struct ddb_entry *ddb_entry;
4392 struct ql4_tuple_ddb *fw_tddb = NULL;
4393 struct ql4_tuple_ddb *tmp_tddb = NULL;
4395 int ret = QLA_ERROR;
4397 fw_tddb = vzalloc(sizeof(*fw_tddb));
4399 DEBUG2(ql4_printk(KERN_WARNING, ha,
4400 "Memory Allocation failed.\n"));
4405 tmp_tddb = vzalloc(sizeof(*tmp_tddb));
4407 DEBUG2(ql4_printk(KERN_WARNING, ha,
4408 "Memory Allocation failed.\n"));
4413 qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb, NULL);
4415 for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
4416 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
4417 if (ddb_entry == NULL)
4420 qla4xxx_get_param_ddb(ddb_entry, tmp_tddb);
4421 if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, false)) {
4422 ret = QLA_SUCCESS; /* found */
4436 * qla4xxx_check_existing_isid - check if target with same isid exist
4438 * @list_nt: list of target
4439 * @isid: isid to check
4441 * This routine return QLA_SUCCESS if target with same isid exist
4443 static int qla4xxx_check_existing_isid(struct list_head *list_nt, uint8_t *isid)
4445 struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp;
4446 struct dev_db_entry *fw_ddb_entry;
4448 list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
4449 fw_ddb_entry = &nt_ddb_idx->fw_ddb;
4451 if (memcmp(&fw_ddb_entry->isid[0], &isid[0],
4452 sizeof(nt_ddb_idx->fw_ddb.isid)) == 0) {
4460 * qla4xxx_update_isid - compare ddbs and updated isid
4461 * @ha: Pointer to host adapter structure.
4462 * @list_nt: list of nt target
4463 * @fw_ddb_entry: firmware ddb entry
4465 * This routine update isid if ddbs have same iqn, same isid and
4466 * different IP addr.
4467 * Return QLA_SUCCESS if isid is updated.
4469 static int qla4xxx_update_isid(struct scsi_qla_host *ha,
4470 struct list_head *list_nt,
4471 struct dev_db_entry *fw_ddb_entry)
4473 uint8_t base_value, i;
4475 base_value = fw_ddb_entry->isid[1] & 0x1f;
4476 for (i = 0; i < 8; i++) {
4477 fw_ddb_entry->isid[1] = (base_value | (i << 5));
4478 if (qla4xxx_check_existing_isid(list_nt, fw_ddb_entry->isid))
4482 if (!qla4xxx_check_existing_isid(list_nt, fw_ddb_entry->isid))
4489 * qla4xxx_should_update_isid - check if isid need to update
4490 * @ha: Pointer to host adapter structure.
4491 * @old_tddb: ddb tuple
4492 * @new_tddb: ddb tuple
4494 * Return QLA_SUCCESS if different IP, different PORT, same iqn,
4497 static int qla4xxx_should_update_isid(struct scsi_qla_host *ha,
4498 struct ql4_tuple_ddb *old_tddb,
4499 struct ql4_tuple_ddb *new_tddb)
4501 if (strcmp(old_tddb->ip_addr, new_tddb->ip_addr) == 0) {
4503 if (old_tddb->port == new_tddb->port)
4507 if (strcmp(old_tddb->iscsi_name, new_tddb->iscsi_name))
4511 if (memcmp(&old_tddb->isid[0], &new_tddb->isid[0],
4512 sizeof(old_tddb->isid)))
4513 /* different isid */
4520 * qla4xxx_is_flash_ddb_exists - check if fw_ddb_entry already exists in list_nt
4521 * @ha: Pointer to host adapter structure.
4522 * @list_nt: list of nt target.
4523 * @fw_ddb_entry: firmware ddb entry.
4525 * This routine check if fw_ddb_entry already exists in list_nt to avoid
4526 * duplicate ddb in list_nt.
4527 * Return QLA_SUCCESS if duplicate ddb exit in list_nl.
4528 * Note: This function also update isid of DDB if required.
4531 static int qla4xxx_is_flash_ddb_exists(struct scsi_qla_host *ha,
4532 struct list_head *list_nt,
4533 struct dev_db_entry *fw_ddb_entry)
4535 struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp;
4536 struct ql4_tuple_ddb *fw_tddb = NULL;
4537 struct ql4_tuple_ddb *tmp_tddb = NULL;
4538 int rval, ret = QLA_ERROR;
4540 fw_tddb = vzalloc(sizeof(*fw_tddb));
4542 DEBUG2(ql4_printk(KERN_WARNING, ha,
4543 "Memory Allocation failed.\n"));
4548 tmp_tddb = vzalloc(sizeof(*tmp_tddb));
4550 DEBUG2(ql4_printk(KERN_WARNING, ha,
4551 "Memory Allocation failed.\n"));
4556 qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb, NULL);
4558 list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
4559 qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb,
4560 nt_ddb_idx->flash_isid);
4561 ret = qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, true);
4562 /* found duplicate ddb */
4563 if (ret == QLA_SUCCESS)
4567 list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
4568 qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb, NULL);
4570 ret = qla4xxx_should_update_isid(ha, tmp_tddb, fw_tddb);
4571 if (ret == QLA_SUCCESS) {
4572 rval = qla4xxx_update_isid(ha, list_nt, fw_ddb_entry);
4573 if (rval == QLA_SUCCESS)
4590 static void qla4xxx_free_ddb_list(struct list_head *list_ddb)
4592 struct qla_ddb_index *ddb_idx, *ddb_idx_tmp;
4594 list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) {
4595 list_del_init(&ddb_idx->list);
4600 static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha,
4601 struct dev_db_entry *fw_ddb_entry)
4603 struct iscsi_endpoint *ep;
4604 struct sockaddr_in *addr;
4605 struct sockaddr_in6 *addr6;
4606 struct sockaddr *dst_addr;
4609 /* TODO: need to destroy on unload iscsi_endpoint*/
4610 dst_addr = vmalloc(sizeof(*dst_addr));
4614 if (fw_ddb_entry->options & DDB_OPT_IPV6_DEVICE) {
4615 dst_addr->sa_family = AF_INET6;
4616 addr6 = (struct sockaddr_in6 *)dst_addr;
4617 ip = (char *)&addr6->sin6_addr;
4618 memcpy(ip, fw_ddb_entry->ip_addr, IPv6_ADDR_LEN);
4619 addr6->sin6_port = htons(le16_to_cpu(fw_ddb_entry->port));
4622 dst_addr->sa_family = AF_INET;
4623 addr = (struct sockaddr_in *)dst_addr;
4624 ip = (char *)&addr->sin_addr;
4625 memcpy(ip, fw_ddb_entry->ip_addr, IP_ADDR_LEN);
4626 addr->sin_port = htons(le16_to_cpu(fw_ddb_entry->port));
4629 ep = qla4xxx_ep_connect(ha->host, dst_addr, 0);
4634 static int qla4xxx_verify_boot_idx(struct scsi_qla_host *ha, uint16_t idx)
4636 if (ql4xdisablesysfsboot)
4638 if (idx == ha->pri_ddb_idx || idx == ha->sec_ddb_idx)
4643 static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
4644 struct ddb_entry *ddb_entry)
4646 uint16_t def_timeout;
4648 ddb_entry->ddb_type = FLASH_DDB;
4649 ddb_entry->fw_ddb_index = INVALID_ENTRY;
4650 ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
4652 ddb_entry->unblock_sess = qla4xxx_unblock_flash_ddb;
4653 ddb_entry->ddb_change = qla4xxx_flash_ddb_change;
4655 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
4656 atomic_set(&ddb_entry->relogin_timer, 0);
4657 atomic_set(&ddb_entry->relogin_retry_count, 0);
4658 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
4659 ddb_entry->default_relogin_timeout =
4660 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
4661 def_timeout : LOGIN_TOV;
4662 ddb_entry->default_time2wait =
4663 le16_to_cpu(ddb_entry->fw_ddb_entry.iscsi_def_time2wait);
4666 static void qla4xxx_wait_for_ip_configuration(struct scsi_qla_host *ha)
4669 uint32_t ip_idx[IP_ADDR_COUNT] = {0, 1, 2, 3}; /* 4 IP interfaces */
4670 uint32_t sts[MBOX_REG_COUNT];
4672 unsigned long wtime;
4675 wtime = jiffies + (HZ * IP_CONFIG_TOV);
4677 for (idx = 0; idx < IP_ADDR_COUNT; idx++) {
4678 if (ip_idx[idx] == -1)
4681 ret = qla4xxx_get_ip_state(ha, 0, ip_idx[idx], sts);
4683 if (ret == QLA_ERROR) {
4688 ip_state = (sts[1] & IP_STATE_MASK) >> IP_STATE_SHIFT;
4690 DEBUG2(ql4_printk(KERN_INFO, ha,
4691 "Waiting for IP state for idx = %d, state = 0x%x\n",
4692 ip_idx[idx], ip_state));
4693 if (ip_state == IP_ADDRSTATE_UNCONFIGURED ||
4694 ip_state == IP_ADDRSTATE_INVALID ||
4695 ip_state == IP_ADDRSTATE_PREFERRED ||
4696 ip_state == IP_ADDRSTATE_DEPRICATED ||
4697 ip_state == IP_ADDRSTATE_DISABLING)
4701 /* Break if all IP states checked */
4702 if ((ip_idx[0] == -1) &&
4703 (ip_idx[1] == -1) &&
4704 (ip_idx[2] == -1) &&
4707 schedule_timeout_uninterruptible(HZ);
4708 } while (time_after(wtime, jiffies));
4711 static void qla4xxx_build_st_list(struct scsi_qla_host *ha,
4712 struct list_head *list_st)
4714 struct qla_ddb_index *st_ddb_idx;
4717 struct dev_db_entry *fw_ddb_entry;
4718 dma_addr_t fw_ddb_dma;
4720 uint32_t idx = 0, next_idx = 0;
4721 uint32_t state = 0, conn_err = 0;
4722 uint16_t conn_id = 0;
4724 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
4726 if (fw_ddb_entry == NULL) {
4727 DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
4731 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
4733 fw_idx_size = sizeof(struct qla_ddb_index);
4735 for (idx = 0; idx < max_ddbs; idx = next_idx) {
4736 ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma,
4737 NULL, &next_idx, &state,
4738 &conn_err, NULL, &conn_id);
4739 if (ret == QLA_ERROR)
4742 /* Ignore DDB if invalid state (unassigned) */
4743 if (state == DDB_DS_UNASSIGNED)
4744 goto continue_next_st;
4746 /* Check if ST, add to the list_st */
4747 if (strlen((char *) fw_ddb_entry->iscsi_name) != 0)
4748 goto continue_next_st;
4750 st_ddb_idx = vzalloc(fw_idx_size);
4754 st_ddb_idx->fw_ddb_idx = idx;
4756 list_add_tail(&st_ddb_idx->list, list_st);
4764 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
4768 * qla4xxx_remove_failed_ddb - Remove inactive or failed ddb from list
4769 * @ha: pointer to adapter structure
4770 * @list_ddb: List from which failed ddb to be removed
4772 * Iterate over the list of DDBs and find and remove DDBs that are either in
4773 * no connection active state or failed state
4775 static void qla4xxx_remove_failed_ddb(struct scsi_qla_host *ha,
4776 struct list_head *list_ddb)
4778 struct qla_ddb_index *ddb_idx, *ddb_idx_tmp;
4779 uint32_t next_idx = 0;
4780 uint32_t state = 0, conn_err = 0;
4783 list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) {
4784 ret = qla4xxx_get_fwddb_entry(ha, ddb_idx->fw_ddb_idx,
4785 NULL, 0, NULL, &next_idx, &state,
4786 &conn_err, NULL, NULL);
4787 if (ret == QLA_ERROR)
4790 if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
4791 state == DDB_DS_SESSION_FAILED) {
4792 list_del_init(&ddb_idx->list);
4798 static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha,
4799 struct dev_db_entry *fw_ddb_entry,
4802 struct iscsi_cls_session *cls_sess;
4803 struct iscsi_session *sess;
4804 struct iscsi_cls_conn *cls_conn;
4805 struct iscsi_endpoint *ep;
4806 uint16_t cmds_max = 32;
4807 uint16_t conn_id = 0;
4808 uint32_t initial_cmdsn = 0;
4809 int ret = QLA_SUCCESS;
4811 struct ddb_entry *ddb_entry = NULL;
4813 /* Create session object, with INVALID_ENTRY,
4814 * the targer_id would get set when we issue the login
4816 cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, ha->host,
4817 cmds_max, sizeof(struct ddb_entry),
4818 sizeof(struct ql4_task_data),
4819 initial_cmdsn, INVALID_ENTRY);
4826 * so calling module_put function to decrement the
4829 module_put(qla4xxx_iscsi_transport.owner);
4830 sess = cls_sess->dd_data;
4831 ddb_entry = sess->dd_data;
4832 ddb_entry->sess = cls_sess;
4834 cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
4835 memcpy(&ddb_entry->fw_ddb_entry, fw_ddb_entry,
4836 sizeof(struct dev_db_entry));
4838 qla4xxx_setup_flash_ddb_entry(ha, ddb_entry);
4840 cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn), conn_id);
4847 ddb_entry->conn = cls_conn;
4849 /* Setup ep, for displaying attributes in sysfs */
4850 ep = qla4xxx_get_ep_fwdb(ha, fw_ddb_entry);
4852 ep->conn = cls_conn;
4855 DEBUG2(ql4_printk(KERN_ERR, ha, "Unable to get ep\n"));
4860 /* Update sess/conn params */
4861 qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn);
4863 if (is_reset == RESET_ADAPTER) {
4864 iscsi_block_session(cls_sess);
4865 /* Use the relogin path to discover new devices
4866 * by short-circuting the logic of setting
4867 * timer to relogin - instead set the flags
4868 * to initiate login right away.
4870 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
4871 set_bit(DF_RELOGIN, &ddb_entry->flags);
4878 static void qla4xxx_build_nt_list(struct scsi_qla_host *ha,
4879 struct list_head *list_nt, int is_reset)
4881 struct dev_db_entry *fw_ddb_entry;
4882 dma_addr_t fw_ddb_dma;
4886 uint32_t idx = 0, next_idx = 0;
4887 uint32_t state = 0, conn_err = 0;
4888 uint16_t conn_id = 0;
4889 struct qla_ddb_index *nt_ddb_idx;
4891 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
4893 if (fw_ddb_entry == NULL) {
4894 DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
4897 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
4899 fw_idx_size = sizeof(struct qla_ddb_index);
4901 for (idx = 0; idx < max_ddbs; idx = next_idx) {
4902 ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma,
4903 NULL, &next_idx, &state,
4904 &conn_err, NULL, &conn_id);
4905 if (ret == QLA_ERROR)
4908 if (qla4xxx_verify_boot_idx(ha, idx) != QLA_SUCCESS)
4909 goto continue_next_nt;
4911 /* Check if NT, then add to list it */
4912 if (strlen((char *) fw_ddb_entry->iscsi_name) == 0)
4913 goto continue_next_nt;
4915 if (!(state == DDB_DS_NO_CONNECTION_ACTIVE ||
4916 state == DDB_DS_SESSION_FAILED))
4917 goto continue_next_nt;
4919 DEBUG2(ql4_printk(KERN_INFO, ha,
4920 "Adding DDB to session = 0x%x\n", idx));
4921 if (is_reset == INIT_ADAPTER) {
4922 nt_ddb_idx = vmalloc(fw_idx_size);
4926 nt_ddb_idx->fw_ddb_idx = idx;
4928 /* Copy original isid as it may get updated in function
4929 * qla4xxx_update_isid(). We need original isid in
4930 * function qla4xxx_compare_tuple_ddb to find duplicate
4932 memcpy(&nt_ddb_idx->flash_isid[0],
4933 &fw_ddb_entry->isid[0],
4934 sizeof(nt_ddb_idx->flash_isid));
4936 ret = qla4xxx_is_flash_ddb_exists(ha, list_nt,
4938 if (ret == QLA_SUCCESS) {
4939 /* free nt_ddb_idx and do not add to list_nt */
4941 goto continue_next_nt;
4944 /* Copy updated isid */
4945 memcpy(&nt_ddb_idx->fw_ddb, fw_ddb_entry,
4946 sizeof(struct dev_db_entry));
4948 list_add_tail(&nt_ddb_idx->list, list_nt);
4949 } else if (is_reset == RESET_ADAPTER) {
4950 if (qla4xxx_is_session_exists(ha, fw_ddb_entry) ==
4952 goto continue_next_nt;
4955 ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, is_reset);
4956 if (ret == QLA_ERROR)
4966 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
4970 * qla4xxx_build_ddb_list - Build ddb list and setup sessions
4971 * @ha: pointer to adapter structure
4972 * @is_reset: Is this init path or reset path
4974 * Create a list of sendtargets (st) from firmware DDBs, issue send targets
4975 * using connection open, then create the list of normal targets (nt)
4976 * from firmware DDBs. Based on the list of nt setup session and connection
4979 void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset)
4982 struct list_head list_st, list_nt;
4983 struct qla_ddb_index *st_ddb_idx, *st_ddb_idx_tmp;
4984 unsigned long wtime;
4986 if (!test_bit(AF_LINK_UP, &ha->flags)) {
4987 set_bit(AF_BUILD_DDB_LIST, &ha->flags);
4988 ha->is_reset = is_reset;
4992 INIT_LIST_HEAD(&list_st);
4993 INIT_LIST_HEAD(&list_nt);
4995 qla4xxx_build_st_list(ha, &list_st);
4997 /* Before issuing conn open mbox, ensure all IPs states are configured
4998 * Note, conn open fails if IPs are not configured
5000 qla4xxx_wait_for_ip_configuration(ha);
5002 /* Go thru the STs and fire the sendtargets by issuing conn open mbx */
5003 list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) {
5004 qla4xxx_conn_open(ha, st_ddb_idx->fw_ddb_idx);
5007 /* Wait to ensure all sendtargets are done for min 12 sec wait */
5008 tmo = ((ha->def_timeout > LOGIN_TOV) &&
5009 (ha->def_timeout < LOGIN_TOV * 10) ?
5010 ha->def_timeout : LOGIN_TOV);
5012 DEBUG2(ql4_printk(KERN_INFO, ha,
5013 "Default time to wait for build ddb %d\n", tmo));
5015 wtime = jiffies + (HZ * tmo);
5017 if (list_empty(&list_st))
5020 qla4xxx_remove_failed_ddb(ha, &list_st);
5021 schedule_timeout_uninterruptible(HZ / 10);
5022 } while (time_after(wtime, jiffies));
5024 /* Free up the sendtargets list */
5025 qla4xxx_free_ddb_list(&list_st);
5027 qla4xxx_build_nt_list(ha, &list_nt, is_reset);
5029 qla4xxx_free_ddb_list(&list_nt);
5031 qla4xxx_free_ddb_index(ha);
5035 * qla4xxx_probe_adapter - callback function to probe HBA
5036 * @pdev: pointer to pci_dev structure
5037 * @pci_device_id: pointer to pci_device entry
5039 * This routine will probe for Qlogic 4xxx iSCSI host adapters.
5040 * It returns zero if successful. It also initializes all data necessary for
5043 static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
5044 const struct pci_device_id *ent)
5046 int ret = -ENODEV, status;
5047 struct Scsi_Host *host;
5048 struct scsi_qla_host *ha;
5049 uint8_t init_retry_count = 0;
5051 struct qla4_8xxx_legacy_intr_set *nx_legacy_intr;
5054 if (pci_enable_device(pdev))
5057 host = iscsi_host_alloc(&qla4xxx_driver_template, sizeof(*ha), 0);
5060 "qla4xxx: Couldn't allocate host from scsi layer!\n");
5061 goto probe_disable_device;
5064 /* Clear our data area */
5065 ha = to_qla_host(host);
5066 memset(ha, 0, sizeof(*ha));
5068 /* Save the information from PCI BIOS. */
5071 ha->host_no = host->host_no;
5073 pci_enable_pcie_error_reporting(pdev);
5075 /* Setup Runtime configurable options */
5076 if (is_qla8022(ha)) {
5077 ha->isp_ops = &qla4_8xxx_isp_ops;
5078 rwlock_init(&ha->hw_lock);
5079 ha->qdr_sn_window = -1;
5080 ha->ddr_mn_window = -1;
5081 ha->curr_window = 255;
5082 ha->func_num = PCI_FUNC(ha->pdev->devfn);
5083 nx_legacy_intr = &legacy_intr[ha->func_num];
5084 ha->nx_legacy_intr.int_vec_bit = nx_legacy_intr->int_vec_bit;
5085 ha->nx_legacy_intr.tgt_status_reg =
5086 nx_legacy_intr->tgt_status_reg;
5087 ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg;
5088 ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg;
5090 ha->isp_ops = &qla4xxx_isp_ops;
5093 /* Set EEH reset type to fundamental if required by hba */
5095 pdev->needs_freset = 1;
5097 /* Configure PCI I/O space. */
5098 ret = ha->isp_ops->iospace_config(ha);
5100 goto probe_failed_ioconfig;
5102 ql4_printk(KERN_INFO, ha, "Found an ISP%04x, irq %d, iobase 0x%p\n",
5103 pdev->device, pdev->irq, ha->reg);
5105 qla4xxx_config_dma_addressing(ha);
5107 /* Initialize lists and spinlocks. */
5108 INIT_LIST_HEAD(&ha->free_srb_q);
5110 mutex_init(&ha->mbox_sem);
5111 mutex_init(&ha->chap_sem);
5112 init_completion(&ha->mbx_intr_comp);
5113 init_completion(&ha->disable_acb_comp);
5115 spin_lock_init(&ha->hardware_lock);
5116 spin_lock_init(&ha->work_lock);
5118 /* Initialize work list */
5119 INIT_LIST_HEAD(&ha->work_list);
5121 /* Allocate dma buffers */
5122 if (qla4xxx_mem_alloc(ha)) {
5123 ql4_printk(KERN_WARNING, ha,
5124 "[ERROR] Failed to allocate memory for adapter\n");
5130 host->cmd_per_lun = 3;
5131 host->max_channel = 0;
5132 host->max_lun = MAX_LUNS - 1;
5133 host->max_id = MAX_TARGETS;
5134 host->max_cmd_len = IOCB_MAX_CDB_LEN;
5135 host->can_queue = MAX_SRBS ;
5136 host->transportt = qla4xxx_scsi_transport;
5138 ret = scsi_init_shared_tag_map(host, MAX_SRBS);
5140 ql4_printk(KERN_WARNING, ha,
5141 "%s: scsi_init_shared_tag_map failed\n", __func__);
5145 pci_set_drvdata(pdev, ha);
5147 ret = scsi_add_host(host, &pdev->dev);
5152 (void) qla4_8xxx_get_flash_info(ha);
5155 * Initialize the Host adapter request/response queues and
5157 * NOTE: interrupts enabled upon successful completion
5159 status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
5160 while ((!test_bit(AF_ONLINE, &ha->flags)) &&
5161 init_retry_count++ < MAX_INIT_RETRIES) {
5163 if (is_qla8022(ha)) {
5164 qla4_82xx_idc_lock(ha);
5165 dev_state = qla4_82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
5166 qla4_82xx_idc_unlock(ha);
5167 if (dev_state == QLA82XX_DEV_FAILED) {
5168 ql4_printk(KERN_WARNING, ha, "%s: don't retry "
5169 "initialize adapter. H/W is in failed state\n",
5174 DEBUG2(printk("scsi: %s: retrying adapter initialization "
5175 "(%d)\n", __func__, init_retry_count));
5177 if (ha->isp_ops->reset_chip(ha) == QLA_ERROR)
5180 status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
5183 if (!test_bit(AF_ONLINE, &ha->flags)) {
5184 ql4_printk(KERN_WARNING, ha, "Failed to initialize adapter\n");
5186 if (is_qla8022(ha) && ql4xdontresethba) {
5187 /* Put the device in failed state. */
5188 DEBUG2(printk(KERN_ERR "HW STATE: FAILED\n"));
5189 qla4_82xx_idc_lock(ha);
5190 qla4_82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
5191 QLA82XX_DEV_FAILED);
5192 qla4_82xx_idc_unlock(ha);
5198 /* Startup the kernel thread for this host adapter. */
5199 DEBUG2(printk("scsi: %s: Starting kernel thread for "
5200 "qla4xxx_dpc\n", __func__));
5201 sprintf(buf, "qla4xxx_%lu_dpc", ha->host_no);
5202 ha->dpc_thread = create_singlethread_workqueue(buf);
5203 if (!ha->dpc_thread) {
5204 ql4_printk(KERN_WARNING, ha, "Unable to start DPC thread!\n");
5208 INIT_WORK(&ha->dpc_work, qla4xxx_do_dpc);
5210 sprintf(buf, "qla4xxx_%lu_task", ha->host_no);
5211 ha->task_wq = alloc_workqueue(buf, WQ_MEM_RECLAIM, 1);
5213 ql4_printk(KERN_WARNING, ha, "Unable to start task thread!\n");
5218 /* For ISP-82XX, request_irqs is called in qla4_8xxx_load_risc
5219 * (which is called indirectly by qla4xxx_initialize_adapter),
5220 * so that irqs will be registered after crbinit but before
5223 if (!is_qla8022(ha)) {
5224 ret = qla4xxx_request_irqs(ha);
5226 ql4_printk(KERN_WARNING, ha, "Failed to reserve "
5227 "interrupt %d already in use.\n", pdev->irq);
5232 pci_save_state(ha->pdev);
5233 ha->isp_ops->enable_intrs(ha);
5235 /* Start timer thread. */
5236 qla4xxx_start_timer(ha, qla4xxx_timer, 1);
5238 set_bit(AF_INIT_DONE, &ha->flags);
5240 qla4_8xxx_alloc_sysfs_attr(ha);
5243 " QLogic iSCSI HBA Driver version: %s\n"
5244 " QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n",
5245 qla4xxx_version_str, ha->pdev->device, pci_name(ha->pdev),
5246 ha->host_no, ha->firmware_version[0], ha->firmware_version[1],
5247 ha->patch_number, ha->build_number);
5249 if (qla4xxx_setup_boot_info(ha))
5250 ql4_printk(KERN_ERR, ha,
5251 "%s: No iSCSI boot target configured\n", __func__);
5253 /* Perform the build ddb list and login to each */
5254 qla4xxx_build_ddb_list(ha, INIT_ADAPTER);
5255 iscsi_host_for_each_session(ha->host, qla4xxx_login_flash_ddb);
5257 qla4xxx_create_chap_list(ha);
5259 qla4xxx_create_ifaces(ha);
5263 scsi_remove_host(ha->host);
5266 qla4xxx_free_adapter(ha);
5268 probe_failed_ioconfig:
5269 pci_disable_pcie_error_reporting(pdev);
5270 scsi_host_put(ha->host);
5272 probe_disable_device:
5273 pci_disable_device(pdev);
5279 * qla4xxx_prevent_other_port_reinit - prevent other port from re-initialize
5280 * @ha: pointer to adapter structure
5282 * Mark the other ISP-4xxx port to indicate that the driver is being removed,
5283 * so that the other port will not re-initialize while in the process of
5284 * removing the ha due to driver unload or hba hotplug.
5286 static void qla4xxx_prevent_other_port_reinit(struct scsi_qla_host *ha)
5288 struct scsi_qla_host *other_ha = NULL;
5289 struct pci_dev *other_pdev = NULL;
5290 int fn = ISP4XXX_PCI_FN_2;
5292 /*iscsi function numbers for ISP4xxx is 1 and 3*/
5293 if (PCI_FUNC(ha->pdev->devfn) & BIT_1)
5294 fn = ISP4XXX_PCI_FN_1;
5297 pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus),
5298 ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
5301 /* Get other_ha if other_pdev is valid and state is enable*/
5303 if (atomic_read(&other_pdev->enable_cnt)) {
5304 other_ha = pci_get_drvdata(other_pdev);
5306 set_bit(AF_HA_REMOVAL, &other_ha->flags);
5307 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: "
5308 "Prevent %s reinit\n", __func__,
5309 dev_name(&other_ha->pdev->dev)));
5312 pci_dev_put(other_pdev);
5316 static void qla4xxx_destroy_fw_ddb_session(struct scsi_qla_host *ha)
5318 struct ddb_entry *ddb_entry;
5322 for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
5324 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
5325 if ((ddb_entry != NULL) &&
5326 (ddb_entry->ddb_type == FLASH_DDB)) {
5328 options = LOGOUT_OPTION_CLOSE_SESSION;
5329 if (qla4xxx_session_logout_ddb(ha, ddb_entry, options)
5331 ql4_printk(KERN_ERR, ha, "%s: Logout failed\n",
5334 qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
5336 * we have decremented the reference count of the driver
5337 * when we setup the session to have the driver unload
5338 * to be seamless without actually destroying the
5341 try_module_get(qla4xxx_iscsi_transport.owner);
5342 iscsi_destroy_endpoint(ddb_entry->conn->ep);
5343 qla4xxx_free_ddb(ha, ddb_entry);
5344 iscsi_session_teardown(ddb_entry->sess);
5349 * qla4xxx_remove_adapter - calback function to remove adapter.
5350 * @pci_dev: PCI device pointer
5352 static void __devexit qla4xxx_remove_adapter(struct pci_dev *pdev)
5354 struct scsi_qla_host *ha;
5356 ha = pci_get_drvdata(pdev);
5358 if (!is_qla8022(ha))
5359 qla4xxx_prevent_other_port_reinit(ha);
5361 /* destroy iface from sysfs */
5362 qla4xxx_destroy_ifaces(ha);
5364 if ((!ql4xdisablesysfsboot) && ha->boot_kset)
5365 iscsi_boot_destroy_kset(ha->boot_kset);
5367 qla4xxx_destroy_fw_ddb_session(ha);
5368 qla4_8xxx_free_sysfs_attr(ha);
5370 scsi_remove_host(ha->host);
5372 qla4xxx_free_adapter(ha);
5374 scsi_host_put(ha->host);
5376 pci_disable_pcie_error_reporting(pdev);
5377 pci_disable_device(pdev);
5378 pci_set_drvdata(pdev, NULL);
5382 * qla4xxx_config_dma_addressing() - Configure OS DMA addressing method.
5385 * At exit, the @ha's flags.enable_64bit_addressing set to indicated
5386 * supported addressing method.
5388 static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha)
5392 /* Update our PCI device dma_mask for full 64 bit mask */
5393 if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(64)) == 0) {
5394 if (pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
5395 dev_dbg(&ha->pdev->dev,
5396 "Failed to set 64 bit PCI consistent mask; "
5398 retval = pci_set_consistent_dma_mask(ha->pdev,
5402 retval = pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(32));
5405 static int qla4xxx_slave_alloc(struct scsi_device *sdev)
5407 struct iscsi_cls_session *cls_sess;
5408 struct iscsi_session *sess;
5409 struct ddb_entry *ddb;
5410 int queue_depth = QL4_DEF_QDEPTH;
5412 cls_sess = starget_to_session(sdev->sdev_target);
5413 sess = cls_sess->dd_data;
5414 ddb = sess->dd_data;
5416 sdev->hostdata = ddb;
5417 sdev->tagged_supported = 1;
5419 if (ql4xmaxqdepth != 0 && ql4xmaxqdepth <= 0xffffU)
5420 queue_depth = ql4xmaxqdepth;
5422 scsi_activate_tcq(sdev, queue_depth);
5426 static int qla4xxx_slave_configure(struct scsi_device *sdev)
5428 sdev->tagged_supported = 1;
5432 static void qla4xxx_slave_destroy(struct scsi_device *sdev)
5434 scsi_deactivate_tcq(sdev, 1);
5437 static int qla4xxx_change_queue_depth(struct scsi_device *sdev, int qdepth,
5440 if (!ql4xqfulltracking)
5443 return iscsi_change_queue_depth(sdev, qdepth, reason);
5447 * qla4xxx_del_from_active_array - returns an active srb
5448 * @ha: Pointer to host adapter structure.
5449 * @index: index into the active_array
5451 * This routine removes and returns the srb at the specified index
5453 struct srb *qla4xxx_del_from_active_array(struct scsi_qla_host *ha,
5456 struct srb *srb = NULL;
5457 struct scsi_cmnd *cmd = NULL;
5459 cmd = scsi_host_find_tag(ha->host, index);
5463 srb = (struct srb *)CMD_SP(cmd);
5467 /* update counters */
5468 if (srb->flags & SRB_DMA_VALID) {
5469 ha->req_q_count += srb->iocb_cnt;
5470 ha->iocb_cnt -= srb->iocb_cnt;
5472 srb->cmd->host_scribble =
5473 (unsigned char *)(unsigned long) MAX_SRBS;
5479 * qla4xxx_eh_wait_on_command - waits for command to be returned by firmware
5480 * @ha: Pointer to host adapter structure.
5481 * @cmd: Scsi Command to wait on.
5483 * This routine waits for the command to be returned by the Firmware
5484 * for some max time.
5486 static int qla4xxx_eh_wait_on_command(struct scsi_qla_host *ha,
5487 struct scsi_cmnd *cmd)
5491 uint32_t max_wait_time = EH_WAIT_CMD_TOV;
5494 /* Dont wait on command if PCI error is being handled
5497 if (unlikely(pci_channel_offline(ha->pdev)) ||
5498 (test_bit(AF_EEH_BUSY, &ha->flags))) {
5499 ql4_printk(KERN_WARNING, ha, "scsi%ld: Return from %s\n",
5500 ha->host_no, __func__);
5505 /* Checking to see if its returned to OS */
5506 rp = (struct srb *) CMD_SP(cmd);
5513 } while (max_wait_time--);
5519 * qla4xxx_wait_for_hba_online - waits for HBA to come online
5520 * @ha: Pointer to host adapter structure
5522 static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha)
5524 unsigned long wait_online;
5526 wait_online = jiffies + (HBA_ONLINE_TOV * HZ);
5527 while (time_before(jiffies, wait_online)) {
5539 * qla4xxx_eh_wait_for_commands - wait for active cmds to finish.
5540 * @ha: pointer to HBA
5544 * This function waits for all outstanding commands to a lun to complete. It
5545 * returns 0 if all pending commands are returned and 1 otherwise.
5547 static int qla4xxx_eh_wait_for_commands(struct scsi_qla_host *ha,
5548 struct scsi_target *stgt,
5549 struct scsi_device *sdev)
5553 struct scsi_cmnd *cmd;
5556 * Waiting for all commands for the designated target or dev
5557 * in the active array
5559 for (cnt = 0; cnt < ha->host->can_queue; cnt++) {
5560 cmd = scsi_host_find_tag(ha->host, cnt);
5561 if (cmd && stgt == scsi_target(cmd->device) &&
5562 (!sdev || sdev == cmd->device)) {
5563 if (!qla4xxx_eh_wait_on_command(ha, cmd)) {
5573 * qla4xxx_eh_abort - callback for abort task.
5574 * @cmd: Pointer to Linux's SCSI command structure
5576 * This routine is called by the Linux OS to abort the specified
5579 static int qla4xxx_eh_abort(struct scsi_cmnd *cmd)
5581 struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
5582 unsigned int id = cmd->device->id;
5583 unsigned int lun = cmd->device->lun;
5584 unsigned long flags;
5585 struct srb *srb = NULL;
5589 ql4_printk(KERN_INFO, ha,
5590 "scsi%ld:%d:%d: Abort command issued cmd=%p\n",
5591 ha->host_no, id, lun, cmd);
5593 spin_lock_irqsave(&ha->hardware_lock, flags);
5594 srb = (struct srb *) CMD_SP(cmd);
5596 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5599 kref_get(&srb->srb_ref);
5600 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5602 if (qla4xxx_abort_task(ha, srb) != QLA_SUCCESS) {
5603 DEBUG3(printk("scsi%ld:%d:%d: Abort_task mbx failed.\n",
5604 ha->host_no, id, lun));
5607 DEBUG3(printk("scsi%ld:%d:%d: Abort_task mbx success.\n",
5608 ha->host_no, id, lun));
5612 kref_put(&srb->srb_ref, qla4xxx_srb_compl);
5614 /* Wait for command to complete */
5616 if (!qla4xxx_eh_wait_on_command(ha, cmd)) {
5617 DEBUG2(printk("scsi%ld:%d:%d: Abort handler timed out\n",
5618 ha->host_no, id, lun));
5623 ql4_printk(KERN_INFO, ha,
5624 "scsi%ld:%d:%d: Abort command - %s\n",
5625 ha->host_no, id, lun, (ret == SUCCESS) ? "succeeded" : "failed");
5631 * qla4xxx_eh_device_reset - callback for target reset.
5632 * @cmd: Pointer to Linux's SCSI command structure
5634 * This routine is called by the Linux OS to reset all luns on the
5637 static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd)
5639 struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
5640 struct ddb_entry *ddb_entry = cmd->device->hostdata;
5641 int ret = FAILED, stat;
5646 ret = iscsi_block_scsi_eh(cmd);
5651 ql4_printk(KERN_INFO, ha,
5652 "scsi%ld:%d:%d:%d: DEVICE RESET ISSUED.\n", ha->host_no,
5653 cmd->device->channel, cmd->device->id, cmd->device->lun);
5655 DEBUG2(printk(KERN_INFO
5656 "scsi%ld: DEVICE_RESET cmd=%p jiffies = 0x%lx, to=%x,"
5657 "dpc_flags=%lx, status=%x allowed=%d\n", ha->host_no,
5658 cmd, jiffies, cmd->request->timeout / HZ,
5659 ha->dpc_flags, cmd->result, cmd->allowed));
5661 /* FIXME: wait for hba to go online */
5662 stat = qla4xxx_reset_lun(ha, ddb_entry, cmd->device->lun);
5663 if (stat != QLA_SUCCESS) {
5664 ql4_printk(KERN_INFO, ha, "DEVICE RESET FAILED. %d\n", stat);
5665 goto eh_dev_reset_done;
5668 if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device),
5670 ql4_printk(KERN_INFO, ha,
5671 "DEVICE RESET FAILED - waiting for "
5673 goto eh_dev_reset_done;
5677 if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun,
5678 MM_LUN_RESET) != QLA_SUCCESS)
5679 goto eh_dev_reset_done;
5681 ql4_printk(KERN_INFO, ha,
5682 "scsi(%ld:%d:%d:%d): DEVICE RESET SUCCEEDED.\n",
5683 ha->host_no, cmd->device->channel, cmd->device->id,
5694 * qla4xxx_eh_target_reset - callback for target reset.
5695 * @cmd: Pointer to Linux's SCSI command structure
5697 * This routine is called by the Linux OS to reset the target.
5699 static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd)
5701 struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
5702 struct ddb_entry *ddb_entry = cmd->device->hostdata;
5708 ret = iscsi_block_scsi_eh(cmd);
5712 starget_printk(KERN_INFO, scsi_target(cmd->device),
5713 "WARM TARGET RESET ISSUED.\n");
5715 DEBUG2(printk(KERN_INFO
5716 "scsi%ld: TARGET_DEVICE_RESET cmd=%p jiffies = 0x%lx, "
5717 "to=%x,dpc_flags=%lx, status=%x allowed=%d\n",
5718 ha->host_no, cmd, jiffies, cmd->request->timeout / HZ,
5719 ha->dpc_flags, cmd->result, cmd->allowed));
5721 stat = qla4xxx_reset_target(ha, ddb_entry);
5722 if (stat != QLA_SUCCESS) {
5723 starget_printk(KERN_INFO, scsi_target(cmd->device),
5724 "WARM TARGET RESET FAILED.\n");
5728 if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device),
5730 starget_printk(KERN_INFO, scsi_target(cmd->device),
5731 "WARM TARGET DEVICE RESET FAILED - "
5732 "waiting for commands.\n");
5737 if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun,
5738 MM_TGT_WARM_RESET) != QLA_SUCCESS) {
5739 starget_printk(KERN_INFO, scsi_target(cmd->device),
5740 "WARM TARGET DEVICE RESET FAILED - "
5741 "marker iocb failed.\n");
5745 starget_printk(KERN_INFO, scsi_target(cmd->device),
5746 "WARM TARGET RESET SUCCEEDED.\n");
5751 * qla4xxx_is_eh_active - check if error handler is running
5752 * @shost: Pointer to SCSI Host struct
5754 * This routine finds that if reset host is called in EH
5755 * scenario or from some application like sg_reset
5757 static int qla4xxx_is_eh_active(struct Scsi_Host *shost)
5759 if (shost->shost_state == SHOST_RECOVERY)
5765 * qla4xxx_eh_host_reset - kernel callback
5766 * @cmd: Pointer to Linux's SCSI command structure
5768 * This routine is invoked by the Linux kernel to perform fatal error
5769 * recovery on the specified adapter.
5771 static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd)
5773 int return_status = FAILED;
5774 struct scsi_qla_host *ha;
5776 ha = to_qla_host(cmd->device->host);
5778 if (ql4xdontresethba) {
5779 DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
5780 ha->host_no, __func__));
5782 /* Clear outstanding srb in queues */
5783 if (qla4xxx_is_eh_active(cmd->device->host))
5784 qla4xxx_abort_active_cmds(ha, DID_ABORT << 16);
5789 ql4_printk(KERN_INFO, ha,
5790 "scsi(%ld:%d:%d:%d): HOST RESET ISSUED.\n", ha->host_no,
5791 cmd->device->channel, cmd->device->id, cmd->device->lun);
5793 if (qla4xxx_wait_for_hba_online(ha) != QLA_SUCCESS) {
5794 DEBUG2(printk("scsi%ld:%d: %s: Unable to reset host. Adapter "
5795 "DEAD.\n", ha->host_no, cmd->device->channel,
5801 if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
5803 set_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
5805 set_bit(DPC_RESET_HA, &ha->dpc_flags);
5808 if (qla4xxx_recover_adapter(ha) == QLA_SUCCESS)
5809 return_status = SUCCESS;
5811 ql4_printk(KERN_INFO, ha, "HOST RESET %s.\n",
5812 return_status == FAILED ? "FAILED" : "SUCCEEDED");
5814 return return_status;
5817 static int qla4xxx_context_reset(struct scsi_qla_host *ha)
5819 uint32_t mbox_cmd[MBOX_REG_COUNT];
5820 uint32_t mbox_sts[MBOX_REG_COUNT];
5821 struct addr_ctrl_blk_def *acb = NULL;
5822 uint32_t acb_len = sizeof(struct addr_ctrl_blk_def);
5823 int rval = QLA_SUCCESS;
5826 acb = dma_alloc_coherent(&ha->pdev->dev,
5827 sizeof(struct addr_ctrl_blk_def),
5828 &acb_dma, GFP_KERNEL);
5830 ql4_printk(KERN_ERR, ha, "%s: Unable to alloc acb\n",
5833 goto exit_port_reset;
5836 memset(acb, 0, acb_len);
5838 rval = qla4xxx_get_acb(ha, acb_dma, PRIMARI_ACB, acb_len);
5839 if (rval != QLA_SUCCESS) {
5844 rval = qla4xxx_disable_acb(ha);
5845 if (rval != QLA_SUCCESS) {
5850 wait_for_completion_timeout(&ha->disable_acb_comp,
5851 DISABLE_ACB_TOV * HZ);
5853 rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], acb_dma);
5854 if (rval != QLA_SUCCESS) {
5860 dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk_def),
5863 DEBUG2(ql4_printk(KERN_INFO, ha, "%s %s\n", __func__,
5864 rval == QLA_SUCCESS ? "SUCCEEDED" : "FAILED"));
5868 static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type)
5870 struct scsi_qla_host *ha = to_qla_host(shost);
5871 int rval = QLA_SUCCESS;
5873 if (ql4xdontresethba) {
5874 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Don't Reset HBA\n",
5877 goto exit_host_reset;
5880 rval = qla4xxx_wait_for_hba_online(ha);
5881 if (rval != QLA_SUCCESS) {
5882 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Unable to reset host "
5883 "adapter\n", __func__));
5885 goto exit_host_reset;
5888 if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
5889 goto recover_adapter;
5891 switch (reset_type) {
5892 case SCSI_ADAPTER_RESET:
5893 set_bit(DPC_RESET_HA, &ha->dpc_flags);
5895 case SCSI_FIRMWARE_RESET:
5896 if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
5898 /* set firmware context reset */
5899 set_bit(DPC_RESET_HA_FW_CONTEXT,
5902 rval = qla4xxx_context_reset(ha);
5903 goto exit_host_reset;
5910 rval = qla4xxx_recover_adapter(ha);
5911 if (rval != QLA_SUCCESS) {
5912 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: recover adapter fail\n",
5921 /* PCI AER driver recovers from all correctable errors w/o
5922 * driver intervention. For uncorrectable errors PCI AER
5923 * driver calls the following device driver's callbacks
5925 * - Fatal Errors - link_reset
5926 * - Non-Fatal Errors - driver's pci_error_detected() which
5927 * returns CAN_RECOVER, NEED_RESET or DISCONNECT.
5929 * PCI AER driver calls
5930 * CAN_RECOVER - driver's pci_mmio_enabled(), mmio_enabled
5931 * returns RECOVERED or NEED_RESET if fw_hung
5932 * NEED_RESET - driver's slot_reset()
5933 * DISCONNECT - device is dead & cannot recover
5934 * RECOVERED - driver's pci_resume()
5936 static pci_ers_result_t
5937 qla4xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
5939 struct scsi_qla_host *ha = pci_get_drvdata(pdev);
5941 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: error detected:state %x\n",
5942 ha->host_no, __func__, state);
5944 if (!is_aer_supported(ha))
5945 return PCI_ERS_RESULT_NONE;
5948 case pci_channel_io_normal:
5949 clear_bit(AF_EEH_BUSY, &ha->flags);
5950 return PCI_ERS_RESULT_CAN_RECOVER;
5951 case pci_channel_io_frozen:
5952 set_bit(AF_EEH_BUSY, &ha->flags);
5953 qla4xxx_mailbox_premature_completion(ha);
5954 qla4xxx_free_irqs(ha);
5955 pci_disable_device(pdev);
5956 /* Return back all IOs */
5957 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
5958 return PCI_ERS_RESULT_NEED_RESET;
5959 case pci_channel_io_perm_failure:
5960 set_bit(AF_EEH_BUSY, &ha->flags);
5961 set_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags);
5962 qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
5963 return PCI_ERS_RESULT_DISCONNECT;
5965 return PCI_ERS_RESULT_NEED_RESET;
5969 * qla4xxx_pci_mmio_enabled() gets called if
5970 * qla4xxx_pci_error_detected() returns PCI_ERS_RESULT_CAN_RECOVER
5971 * and read/write to the device still works.
5973 static pci_ers_result_t
5974 qla4xxx_pci_mmio_enabled(struct pci_dev *pdev)
5976 struct scsi_qla_host *ha = pci_get_drvdata(pdev);
5978 if (!is_aer_supported(ha))
5979 return PCI_ERS_RESULT_NONE;
5981 return PCI_ERS_RESULT_RECOVERED;
5984 static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
5986 uint32_t rval = QLA_ERROR;
5989 struct pci_dev *other_pdev = NULL;
5991 ql4_printk(KERN_WARNING, ha, "scsi%ld: In %s\n", ha->host_no, __func__);
5993 set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
5995 if (test_bit(AF_ONLINE, &ha->flags)) {
5996 clear_bit(AF_ONLINE, &ha->flags);
5997 clear_bit(AF_LINK_UP, &ha->flags);
5998 iscsi_host_for_each_session(ha->host, qla4xxx_fail_session);
5999 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
6002 fn = PCI_FUNC(ha->pdev->devfn);
6005 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Finding PCI device at "
6006 "func %x\n", ha->host_no, __func__, fn);
6007 /* Get the pci device given the domain, bus,
6008 * slot/function number */
6010 pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus),
6011 ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
6017 if (atomic_read(&other_pdev->enable_cnt)) {
6018 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Found PCI "
6019 "func in enabled state%x\n", ha->host_no,
6021 pci_dev_put(other_pdev);
6024 pci_dev_put(other_pdev);
6027 /* The first function on the card, the reset owner will
6028 * start & initialize the firmware. The other functions
6029 * on the card will reset the firmware context
6032 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn being reset "
6033 "0x%x is the owner\n", ha->host_no, __func__,
6036 qla4_82xx_idc_lock(ha);
6037 qla4_82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
6040 qla4_82xx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION,
6041 QLA82XX_IDC_VERSION);
6043 qla4_82xx_idc_unlock(ha);
6044 clear_bit(AF_FW_RECOVERY, &ha->flags);
6045 rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
6046 qla4_82xx_idc_lock(ha);
6048 if (rval != QLA_SUCCESS) {
6049 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: "
6050 "FAILED\n", ha->host_no, __func__);
6051 qla4_8xxx_clear_drv_active(ha);
6052 qla4_82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
6053 QLA82XX_DEV_FAILED);
6055 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: "
6056 "READY\n", ha->host_no, __func__);
6057 qla4_82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
6059 /* Clear driver state register */
6060 qla4_82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, 0);
6061 qla4_8xxx_set_drv_active(ha);
6062 ret = qla4xxx_request_irqs(ha);
6064 ql4_printk(KERN_WARNING, ha, "Failed to "
6065 "reserve interrupt %d already in use.\n",
6069 ha->isp_ops->enable_intrs(ha);
6073 qla4_82xx_idc_unlock(ha);
6075 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn 0x%x is not "
6076 "the reset owner\n", ha->host_no, __func__,
6078 if ((qla4_82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE) ==
6079 QLA82XX_DEV_READY)) {
6080 clear_bit(AF_FW_RECOVERY, &ha->flags);
6081 rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
6082 if (rval == QLA_SUCCESS) {
6083 ret = qla4xxx_request_irqs(ha);
6085 ql4_printk(KERN_WARNING, ha, "Failed to"
6086 " reserve interrupt %d already in"
6087 " use.\n", ha->pdev->irq);
6090 ha->isp_ops->enable_intrs(ha);
6094 qla4_82xx_idc_lock(ha);
6095 qla4_8xxx_set_drv_active(ha);
6096 qla4_82xx_idc_unlock(ha);
6099 clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
6103 static pci_ers_result_t
6104 qla4xxx_pci_slot_reset(struct pci_dev *pdev)
6106 pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT;
6107 struct scsi_qla_host *ha = pci_get_drvdata(pdev);
6110 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: slot_reset\n",
6111 ha->host_no, __func__);
6113 if (!is_aer_supported(ha))
6114 return PCI_ERS_RESULT_NONE;
6116 /* Restore the saved state of PCIe device -
6117 * BAR registers, PCI Config space, PCIX, MSI,
6120 pci_restore_state(pdev);
6122 /* pci_restore_state() clears the saved_state flag of the device
6123 * save restored state which resets saved_state flag
6125 pci_save_state(pdev);
6127 /* Initialize device or resume if in suspended state */
6128 rc = pci_enable_device(pdev);
6130 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Can't re-enable "
6131 "device after reset\n", ha->host_no, __func__);
6132 goto exit_slot_reset;
6135 ha->isp_ops->disable_intrs(ha);
6137 if (is_qla8022(ha)) {
6138 if (qla4_8xxx_error_recovery(ha) == QLA_SUCCESS) {
6139 ret = PCI_ERS_RESULT_RECOVERED;
6140 goto exit_slot_reset;
6142 goto exit_slot_reset;
6146 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Return=%x\n"
6147 "device after reset\n", ha->host_no, __func__, ret);
6152 qla4xxx_pci_resume(struct pci_dev *pdev)
6154 struct scsi_qla_host *ha = pci_get_drvdata(pdev);
6157 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: pci_resume\n",
6158 ha->host_no, __func__);
6160 ret = qla4xxx_wait_for_hba_online(ha);
6161 if (ret != QLA_SUCCESS) {
6162 ql4_printk(KERN_ERR, ha, "scsi%ld: %s: the device failed to "
6163 "resume I/O from slot/link_reset\n", ha->host_no,
6167 pci_cleanup_aer_uncorrect_error_status(pdev);
6168 clear_bit(AF_EEH_BUSY, &ha->flags);
6171 static struct pci_error_handlers qla4xxx_err_handler = {
6172 .error_detected = qla4xxx_pci_error_detected,
6173 .mmio_enabled = qla4xxx_pci_mmio_enabled,
6174 .slot_reset = qla4xxx_pci_slot_reset,
6175 .resume = qla4xxx_pci_resume,
6178 static struct pci_device_id qla4xxx_pci_tbl[] = {
6180 .vendor = PCI_VENDOR_ID_QLOGIC,
6181 .device = PCI_DEVICE_ID_QLOGIC_ISP4010,
6182 .subvendor = PCI_ANY_ID,
6183 .subdevice = PCI_ANY_ID,
6186 .vendor = PCI_VENDOR_ID_QLOGIC,
6187 .device = PCI_DEVICE_ID_QLOGIC_ISP4022,
6188 .subvendor = PCI_ANY_ID,
6189 .subdevice = PCI_ANY_ID,
6192 .vendor = PCI_VENDOR_ID_QLOGIC,
6193 .device = PCI_DEVICE_ID_QLOGIC_ISP4032,
6194 .subvendor = PCI_ANY_ID,
6195 .subdevice = PCI_ANY_ID,
6198 .vendor = PCI_VENDOR_ID_QLOGIC,
6199 .device = PCI_DEVICE_ID_QLOGIC_ISP8022,
6200 .subvendor = PCI_ANY_ID,
6201 .subdevice = PCI_ANY_ID,
6205 MODULE_DEVICE_TABLE(pci, qla4xxx_pci_tbl);
6207 static struct pci_driver qla4xxx_pci_driver = {
6208 .name = DRIVER_NAME,
6209 .id_table = qla4xxx_pci_tbl,
6210 .probe = qla4xxx_probe_adapter,
6211 .remove = qla4xxx_remove_adapter,
6212 .err_handler = &qla4xxx_err_handler,
6215 static int __init qla4xxx_module_init(void)
6219 /* Allocate cache for SRBs. */
6220 srb_cachep = kmem_cache_create("qla4xxx_srbs", sizeof(struct srb), 0,
6221 SLAB_HWCACHE_ALIGN, NULL);
6222 if (srb_cachep == NULL) {
6224 "%s: Unable to allocate SRB cache..."
6225 "Failing load!\n", DRIVER_NAME);
6230 /* Derive version string. */
6231 strcpy(qla4xxx_version_str, QLA4XXX_DRIVER_VERSION);
6232 if (ql4xextended_error_logging)
6233 strcat(qla4xxx_version_str, "-debug");
6235 qla4xxx_scsi_transport =
6236 iscsi_register_transport(&qla4xxx_iscsi_transport);
6237 if (!qla4xxx_scsi_transport){
6239 goto release_srb_cache;
6242 ret = pci_register_driver(&qla4xxx_pci_driver);
6244 goto unregister_transport;
6246 printk(KERN_INFO "QLogic iSCSI HBA Driver\n");
6249 unregister_transport:
6250 iscsi_unregister_transport(&qla4xxx_iscsi_transport);
6252 kmem_cache_destroy(srb_cachep);
6257 static void __exit qla4xxx_module_exit(void)
6259 pci_unregister_driver(&qla4xxx_pci_driver);
6260 iscsi_unregister_transport(&qla4xxx_iscsi_transport);
6261 kmem_cache_destroy(srb_cachep);
6264 module_init(qla4xxx_module_init);
6265 module_exit(qla4xxx_module_exit);
6267 MODULE_AUTHOR("QLogic Corporation");
6268 MODULE_DESCRIPTION("QLogic iSCSI HBA Driver");
6269 MODULE_LICENSE("GPL");
6270 MODULE_VERSION(QLA4XXX_DRIVER_VERSION);