2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
9 #include <linux/blkdev.h>
10 #include <linux/delay.h>
12 #include <scsi/scsi_tcq.h>
14 static request_t *qla2x00_req_pkt(struct scsi_qla_host *, struct req_que *,
16 static void qla2x00_isp_cmd(struct scsi_qla_host *, struct req_que *);
18 static void qla25xx_set_que(srb_t *, struct rsp_que **);
20 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
23 * Returns the proper CF_* direction based on CDB.
25 static inline uint16_t
26 qla2x00_get_cmd_direction(srb_t *sp)
32 /* Set transfer direction */
33 if (sp->cmd->sc_data_direction == DMA_TO_DEVICE) {
35 sp->fcport->vha->hw->qla_stats.output_bytes +=
36 scsi_bufflen(sp->cmd);
37 } else if (sp->cmd->sc_data_direction == DMA_FROM_DEVICE) {
39 sp->fcport->vha->hw->qla_stats.input_bytes +=
40 scsi_bufflen(sp->cmd);
46 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
47 * Continuation Type 0 IOCBs to allocate.
49 * @dsds: number of data segment decriptors needed
51 * Returns the number of IOCB entries needed to store @dsds.
54 qla2x00_calc_iocbs_32(uint16_t dsds)
60 iocbs += (dsds - 3) / 7;
68 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
69 * Continuation Type 1 IOCBs to allocate.
71 * @dsds: number of data segment decriptors needed
73 * Returns the number of IOCB entries needed to store @dsds.
76 qla2x00_calc_iocbs_64(uint16_t dsds)
82 iocbs += (dsds - 2) / 5;
90 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
93 * Returns a pointer to the Continuation Type 0 IOCB packet.
95 static inline cont_entry_t *
96 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
98 cont_entry_t *cont_pkt;
99 struct req_que *req = vha->req;
100 /* Adjust ring index. */
102 if (req->ring_index == req->length) {
104 req->ring_ptr = req->ring;
109 cont_pkt = (cont_entry_t *)req->ring_ptr;
111 /* Load packet defaults. */
112 *((uint32_t *)(&cont_pkt->entry_type)) =
113 __constant_cpu_to_le32(CONTINUE_TYPE);
119 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
122 * Returns a pointer to the continuation type 1 IOCB packet.
124 static inline cont_a64_entry_t *
125 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha)
127 cont_a64_entry_t *cont_pkt;
129 struct req_que *req = vha->req;
130 /* Adjust ring index. */
132 if (req->ring_index == req->length) {
134 req->ring_ptr = req->ring;
139 cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
141 /* Load packet defaults. */
142 *((uint32_t *)(&cont_pkt->entry_type)) =
143 __constant_cpu_to_le32(CONTINUE_A64_TYPE);
149 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
150 * capable IOCB types.
152 * @sp: SRB command to process
153 * @cmd_pkt: Command type 2 IOCB
154 * @tot_dsds: Total number of segments to transfer
156 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
161 scsi_qla_host_t *vha;
162 struct scsi_cmnd *cmd;
163 struct scatterlist *sg;
168 /* Update entry type to indicate Command Type 2 IOCB */
169 *((uint32_t *)(&cmd_pkt->entry_type)) =
170 __constant_cpu_to_le32(COMMAND_TYPE);
172 /* No data transfer */
173 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
174 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
178 vha = sp->fcport->vha;
179 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
181 /* Three DSDs are available in the Command Type 2 IOCB */
183 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
185 /* Load data segments */
186 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
187 cont_entry_t *cont_pkt;
189 /* Allocate additional continuation packets? */
190 if (avail_dsds == 0) {
192 * Seven DSDs are available in the Continuation
195 cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
196 cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
200 *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
201 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
207 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
208 * capable IOCB types.
210 * @sp: SRB command to process
211 * @cmd_pkt: Command type 3 IOCB
212 * @tot_dsds: Total number of segments to transfer
214 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
219 scsi_qla_host_t *vha;
220 struct scsi_cmnd *cmd;
221 struct scatterlist *sg;
226 /* Update entry type to indicate Command Type 3 IOCB */
227 *((uint32_t *)(&cmd_pkt->entry_type)) =
228 __constant_cpu_to_le32(COMMAND_A64_TYPE);
230 /* No data transfer */
231 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
232 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
236 vha = sp->fcport->vha;
237 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
239 /* Two DSDs are available in the Command Type 3 IOCB */
241 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
243 /* Load data segments */
244 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
246 cont_a64_entry_t *cont_pkt;
248 /* Allocate additional continuation packets? */
249 if (avail_dsds == 0) {
251 * Five DSDs are available in the Continuation
254 cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
255 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
259 sle_dma = sg_dma_address(sg);
260 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
261 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
262 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
268 * qla2x00_start_scsi() - Send a SCSI command to the ISP
269 * @sp: command to send to the ISP
271 * Returns non-zero if a failure occurred, else zero.
274 qla2x00_start_scsi(srb_t *sp)
278 scsi_qla_host_t *vha;
279 struct scsi_cmnd *cmd;
283 cmd_entry_t *cmd_pkt;
287 struct device_reg_2xxx __iomem *reg;
288 struct qla_hw_data *ha;
292 /* Setup device pointers. */
294 vha = sp->fcport->vha;
296 reg = &ha->iobase->isp;
298 req = ha->req_q_map[0];
299 rsp = ha->rsp_q_map[0];
300 /* So we know we haven't pci_map'ed anything yet */
303 /* Send marker if required */
304 if (vha->marker_needed != 0) {
305 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL)
307 return (QLA_FUNCTION_FAILED);
308 vha->marker_needed = 0;
311 /* Acquire ring specific lock */
312 spin_lock_irqsave(&ha->hardware_lock, flags);
314 /* Check for room in outstanding command list. */
315 handle = req->current_outstanding_cmd;
316 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
318 if (handle == MAX_OUTSTANDING_COMMANDS)
320 if (!req->outstanding_cmds[handle])
323 if (index == MAX_OUTSTANDING_COMMANDS)
326 /* Map the sg table so we have an accurate count of sg entries needed */
327 if (scsi_sg_count(cmd)) {
328 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
329 scsi_sg_count(cmd), cmd->sc_data_direction);
337 /* Calculate the number of request entries needed. */
338 req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
339 if (req->cnt < (req_cnt + 2)) {
340 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
341 if (req->ring_index < cnt)
342 req->cnt = cnt - req->ring_index;
344 req->cnt = req->length -
345 (req->ring_index - cnt);
347 if (req->cnt < (req_cnt + 2))
350 /* Build command packet */
351 req->current_outstanding_cmd = handle;
352 req->outstanding_cmds[handle] = sp;
354 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
357 cmd_pkt = (cmd_entry_t *)req->ring_ptr;
358 cmd_pkt->handle = handle;
359 /* Zero out remaining portion of packet. */
360 clr_ptr = (uint32_t *)cmd_pkt + 2;
361 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
362 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
364 /* Set target ID and LUN number*/
365 SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
366 cmd_pkt->lun = cpu_to_le16(sp->cmd->device->lun);
368 /* Update tagged queuing modifier */
369 cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG);
371 /* Load SCSI command packet. */
372 memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
373 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
375 /* Build IOCB segments */
376 ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
378 /* Set total data segment count. */
379 cmd_pkt->entry_count = (uint8_t)req_cnt;
382 /* Adjust ring index. */
384 if (req->ring_index == req->length) {
386 req->ring_ptr = req->ring;
390 sp->flags |= SRB_DMA_VALID;
392 /* Set chip new ring index. */
393 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
394 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
396 /* Manage unprocessed RIO/ZIO commands in response queue. */
397 if (vha->flags.process_response_queue &&
398 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
399 qla2x00_process_response_queue(rsp);
401 spin_unlock_irqrestore(&ha->hardware_lock, flags);
402 return (QLA_SUCCESS);
408 spin_unlock_irqrestore(&ha->hardware_lock, flags);
410 return (QLA_FUNCTION_FAILED);
414 * qla2x00_marker() - Send a marker IOCB to the firmware.
418 * @type: marker modifier
420 * Can be called from both normal and interrupt context.
422 * Returns non-zero if a failure occurred, else zero.
425 __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
426 struct rsp_que *rsp, uint16_t loop_id,
427 uint16_t lun, uint8_t type)
430 struct mrk_entry_24xx *mrk24;
431 struct qla_hw_data *ha = vha->hw;
432 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
435 mrk = (mrk_entry_t *)qla2x00_req_pkt(vha, req, rsp);
437 DEBUG2_3(printk("%s(%ld): failed to allocate Marker IOCB.\n",
438 __func__, base_vha->host_no));
440 return (QLA_FUNCTION_FAILED);
443 mrk->entry_type = MARKER_TYPE;
444 mrk->modifier = type;
445 if (type != MK_SYNC_ALL) {
446 if (IS_FWI2_CAPABLE(ha)) {
447 mrk24 = (struct mrk_entry_24xx *) mrk;
448 mrk24->nport_handle = cpu_to_le16(loop_id);
449 mrk24->lun[1] = LSB(lun);
450 mrk24->lun[2] = MSB(lun);
451 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
452 mrk24->vp_index = vha->vp_idx;
453 mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
455 SET_TARGET_ID(ha, mrk->target, loop_id);
456 mrk->lun = cpu_to_le16(lun);
461 qla2x00_isp_cmd(vha, req);
463 return (QLA_SUCCESS);
467 qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
468 struct rsp_que *rsp, uint16_t loop_id, uint16_t lun,
472 unsigned long flags = 0;
474 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
475 ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
476 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
482 * qla2x00_req_pkt() - Retrieve a request packet from the request ring.
485 * Note: The caller must hold the hardware lock before calling this routine.
487 * Returns NULL if function failed, else, a pointer to the request packet.
490 qla2x00_req_pkt(struct scsi_qla_host *vha, struct req_que *req,
493 struct qla_hw_data *ha = vha->hw;
494 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
495 request_t *pkt = NULL;
499 uint16_t req_cnt = 1;
501 /* Wait 1 second for slot. */
502 for (timer = HZ; timer; timer--) {
503 if ((req_cnt + 2) >= req->cnt) {
504 /* Calculate number of free request entries. */
507 RD_REG_DWORD(®->isp25mq.req_q_out);
510 cnt = (uint16_t)RD_REG_DWORD(
511 ®->isp82.req_q_out);
512 else if (IS_FWI2_CAPABLE(ha))
513 cnt = (uint16_t)RD_REG_DWORD(
514 ®->isp24.req_q_out);
516 cnt = qla2x00_debounce_register(
517 ISP_REQ_Q_OUT(ha, ®->isp));
519 if (req->ring_index < cnt)
520 req->cnt = cnt - req->ring_index;
522 req->cnt = req->length -
523 (req->ring_index - cnt);
525 /* If room for request in request ring. */
526 if ((req_cnt + 2) < req->cnt) {
530 /* Zero out packet. */
531 dword_ptr = (uint32_t *)pkt;
532 for (cnt = 0; cnt < REQUEST_ENTRY_SIZE / 4; cnt++)
535 /* Set entry count. */
536 pkt->entry_count = 1;
541 /* Release ring specific lock */
542 spin_unlock_irq(&ha->hardware_lock);
544 udelay(2); /* 2 us */
546 /* Check for pending interrupts. */
547 /* During init we issue marker directly */
548 if (!vha->marker_needed && !vha->flags.init_done)
550 spin_lock_irq(&ha->hardware_lock);
553 DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__));
560 * qla2x00_isp_cmd() - Modify the request ring pointer.
563 * Note: The caller must hold the hardware lock before calling this routine.
566 qla2x00_isp_cmd(struct scsi_qla_host *vha, struct req_que *req)
568 struct qla_hw_data *ha = vha->hw;
569 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
570 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
572 DEBUG5(printk("%s(): IOCB data:\n", __func__));
573 DEBUG5(qla2x00_dump_buffer(
574 (uint8_t *)req->ring_ptr, REQUEST_ENTRY_SIZE));
576 /* Adjust ring index. */
578 if (req->ring_index == req->length) {
580 req->ring_ptr = req->ring;
584 /* Set chip new ring index. */
585 if (IS_QLA82XX(ha)) {
586 uint32_t dbval = 0x04 | (ha->portnum << 5);
588 /* write, read and verify logic */
589 dbval = dbval | (req->id << 8) | (req->ring_index << 16);
591 qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
594 (unsigned long __iomem *)ha->nxdb_wr_ptr,
597 while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
598 WRT_REG_DWORD((unsigned long __iomem *)
599 ha->nxdb_wr_ptr, dbval);
603 } else if (ha->mqenable) {
604 /* Set chip new ring index. */
605 WRT_REG_DWORD(®->isp25mq.req_q_in, req->ring_index);
606 RD_REG_DWORD(&ioreg->hccr);
608 if (IS_FWI2_CAPABLE(ha)) {
609 WRT_REG_DWORD(®->isp24.req_q_in, req->ring_index);
610 RD_REG_DWORD_RELAXED(®->isp24.req_q_in);
612 WRT_REG_WORD(ISP_REQ_Q_IN(ha, ®->isp),
614 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, ®->isp));
621 * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
622 * Continuation Type 1 IOCBs to allocate.
624 * @dsds: number of data segment decriptors needed
626 * Returns the number of IOCB entries needed to store @dsds.
629 qla24xx_calc_iocbs(uint16_t dsds)
635 iocbs += (dsds - 1) / 5;
643 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
646 * @sp: SRB command to process
647 * @cmd_pkt: Command type 3 IOCB
648 * @tot_dsds: Total number of segments to transfer
651 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
656 scsi_qla_host_t *vha;
657 struct scsi_cmnd *cmd;
658 struct scatterlist *sg;
664 /* Update entry type to indicate Command Type 3 IOCB */
665 *((uint32_t *)(&cmd_pkt->entry_type)) =
666 __constant_cpu_to_le32(COMMAND_TYPE_7);
668 /* No data transfer */
669 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
670 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
674 vha = sp->fcport->vha;
677 /* Set transfer direction */
678 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
679 cmd_pkt->task_mgmt_flags =
680 __constant_cpu_to_le16(TMF_WRITE_DATA);
681 sp->fcport->vha->hw->qla_stats.output_bytes +=
682 scsi_bufflen(sp->cmd);
683 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
684 cmd_pkt->task_mgmt_flags =
685 __constant_cpu_to_le16(TMF_READ_DATA);
686 sp->fcport->vha->hw->qla_stats.input_bytes +=
687 scsi_bufflen(sp->cmd);
690 /* One DSD is available in the Command Type 3 IOCB */
692 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
694 /* Load data segments */
696 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
698 cont_a64_entry_t *cont_pkt;
700 /* Allocate additional continuation packets? */
701 if (avail_dsds == 0) {
703 * Five DSDs are available in the Continuation
706 cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
707 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
711 sle_dma = sg_dma_address(sg);
712 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
713 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
714 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
721 * qla24xx_start_scsi() - Send a SCSI command to the ISP
722 * @sp: command to send to the ISP
724 * Returns non-zero if a failure occurred, else zero.
727 qla24xx_start_scsi(srb_t *sp)
734 struct cmd_type_7 *cmd_pkt;
738 struct req_que *req = NULL;
739 struct rsp_que *rsp = NULL;
740 struct scsi_cmnd *cmd = sp->cmd;
741 struct scsi_qla_host *vha = sp->fcport->vha;
742 struct qla_hw_data *ha = vha->hw;
744 /* Setup device pointers. */
747 qla25xx_set_que(sp, &rsp);
750 /* So we know we haven't pci_map'ed anything yet */
753 /* Send marker if required */
754 if (vha->marker_needed != 0) {
755 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL)
757 return QLA_FUNCTION_FAILED;
758 vha->marker_needed = 0;
761 /* Acquire ring specific lock */
762 spin_lock_irqsave(&ha->hardware_lock, flags);
764 /* Check for room in outstanding command list. */
765 handle = req->current_outstanding_cmd;
766 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
768 if (handle == MAX_OUTSTANDING_COMMANDS)
770 if (!req->outstanding_cmds[handle])
773 if (index == MAX_OUTSTANDING_COMMANDS)
776 /* Map the sg table so we have an accurate count of sg entries needed */
777 if (scsi_sg_count(cmd)) {
778 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
779 scsi_sg_count(cmd), cmd->sc_data_direction);
787 req_cnt = qla24xx_calc_iocbs(tot_dsds);
788 if (req->cnt < (req_cnt + 2)) {
789 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
791 if (req->ring_index < cnt)
792 req->cnt = cnt - req->ring_index;
794 req->cnt = req->length -
795 (req->ring_index - cnt);
797 if (req->cnt < (req_cnt + 2))
800 /* Build command packet. */
801 req->current_outstanding_cmd = handle;
802 req->outstanding_cmds[handle] = sp;
804 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
807 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
808 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
810 /* Zero out remaining portion of packet. */
811 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
812 clr_ptr = (uint32_t *)cmd_pkt + 2;
813 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
814 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
816 /* Set NPORT-ID and LUN number*/
817 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
818 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
819 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
820 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
821 cmd_pkt->vp_index = sp->fcport->vp_idx;
823 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
824 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
826 /* Load SCSI command packet. */
827 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
828 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
830 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
832 /* Build IOCB segments */
833 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
835 /* Set total data segment count. */
836 cmd_pkt->entry_count = (uint8_t)req_cnt;
837 /* Specify response queue number where completion should happen */
838 cmd_pkt->entry_status = (uint8_t) rsp->id;
841 /* Adjust ring index. */
843 if (req->ring_index == req->length) {
845 req->ring_ptr = req->ring;
849 sp->flags |= SRB_DMA_VALID;
851 /* Set chip new ring index. */
852 WRT_REG_DWORD(req->req_q_in, req->ring_index);
853 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
855 /* Manage unprocessed RIO/ZIO commands in response queue. */
856 if (vha->flags.process_response_queue &&
857 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
858 qla24xx_process_response_queue(vha, rsp);
860 spin_unlock_irqrestore(&ha->hardware_lock, flags);
867 spin_unlock_irqrestore(&ha->hardware_lock, flags);
869 return QLA_FUNCTION_FAILED;
872 static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
874 struct scsi_cmnd *cmd = sp->cmd;
875 struct qla_hw_data *ha = sp->fcport->vha->hw;
876 int affinity = cmd->request->cpu;
878 if (ha->flags.cpu_affinity_enabled && affinity >= 0 &&
879 affinity < ha->max_rsp_queues - 1)
880 *rsp = ha->rsp_q_map[affinity + 1];
882 *rsp = ha->rsp_q_map[0];
885 /* Generic Control-SRB manipulation functions. */
888 qla2x00_alloc_iocbs(srb_t *sp)
890 scsi_qla_host_t *vha = sp->fcport->vha;
891 struct qla_hw_data *ha = vha->hw;
892 struct req_que *req = ha->req_q_map[0];
893 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
894 uint32_t index, handle;
896 uint16_t cnt, req_cnt;
901 /* Check for room in outstanding command list. */
902 handle = req->current_outstanding_cmd;
903 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
905 if (handle == MAX_OUTSTANDING_COMMANDS)
907 if (!req->outstanding_cmds[handle])
910 if (index == MAX_OUTSTANDING_COMMANDS)
913 /* Check for room on request queue. */
914 if (req->cnt < req_cnt) {
916 cnt = RD_REG_DWORD(®->isp25mq.req_q_out);
917 else if (IS_FWI2_CAPABLE(ha))
918 cnt = RD_REG_DWORD(®->isp24.req_q_out);
920 cnt = qla2x00_debounce_register(
921 ISP_REQ_Q_OUT(ha, ®->isp));
923 if (req->ring_index < cnt)
924 req->cnt = cnt - req->ring_index;
926 req->cnt = req->length -
927 (req->ring_index - cnt);
929 if (req->cnt < req_cnt)
933 req->current_outstanding_cmd = handle;
934 req->outstanding_cmds[handle] = sp;
938 memset(pkt, 0, REQUEST_ENTRY_SIZE);
939 pkt->entry_count = req_cnt;
940 pkt->handle = handle;
948 qla2x00_start_iocbs(srb_t *sp)
950 struct qla_hw_data *ha = sp->fcport->vha->hw;
951 struct req_que *req = ha->req_q_map[0];
952 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
953 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
955 if (IS_QLA82XX(ha)) {
956 qla82xx_start_iocbs(sp);
958 /* Adjust ring index. */
960 if (req->ring_index == req->length) {
962 req->ring_ptr = req->ring;
966 /* Set chip new ring index. */
968 WRT_REG_DWORD(®->isp25mq.req_q_in, req->ring_index);
969 RD_REG_DWORD(&ioreg->hccr);
970 } else if (IS_QLA82XX(ha)) {
971 qla82xx_start_iocbs(sp);
972 } else if (IS_FWI2_CAPABLE(ha)) {
973 WRT_REG_DWORD(®->isp24.req_q_in, req->ring_index);
974 RD_REG_DWORD_RELAXED(®->isp24.req_q_in);
976 WRT_REG_WORD(ISP_REQ_Q_IN(ha, ®->isp),
978 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, ®->isp));
984 qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
986 struct srb_logio *lio = sp->ctx;
988 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
989 logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
990 if (lio->flags & SRB_LOGIN_COND_PLOGI)
991 logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
992 if (lio->flags & SRB_LOGIN_SKIP_PRLI)
993 logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
994 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
995 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
996 logio->port_id[1] = sp->fcport->d_id.b.area;
997 logio->port_id[2] = sp->fcport->d_id.b.domain;
998 logio->vp_index = sp->fcport->vp_idx;
1002 qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
1004 struct qla_hw_data *ha = sp->fcport->vha->hw;
1005 struct srb_logio *lio = sp->ctx;
1008 mbx->entry_type = MBX_IOCB_TYPE;;
1009 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1010 mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
1011 opts = lio->flags & SRB_LOGIN_COND_PLOGI ? BIT_0: 0;
1012 opts |= lio->flags & SRB_LOGIN_SKIP_PRLI ? BIT_1: 0;
1013 if (HAS_EXTENDED_IDS(ha)) {
1014 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
1015 mbx->mb10 = cpu_to_le16(opts);
1017 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
1019 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1020 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1021 sp->fcport->d_id.b.al_pa);
1022 mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
1026 qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1028 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1029 logio->control_flags =
1030 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
1031 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1032 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1033 logio->port_id[1] = sp->fcport->d_id.b.area;
1034 logio->port_id[2] = sp->fcport->d_id.b.domain;
1035 logio->vp_index = sp->fcport->vp_idx;
1039 qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
1041 struct qla_hw_data *ha = sp->fcport->vha->hw;
1043 mbx->entry_type = MBX_IOCB_TYPE;;
1044 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1045 mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
1046 mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
1047 cpu_to_le16(sp->fcport->loop_id):
1048 cpu_to_le16(sp->fcport->loop_id << 8);
1049 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1050 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1051 sp->fcport->d_id.b.al_pa);
1052 mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
1053 /* Implicit: mbx->mbx10 = 0. */
1057 qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1059 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1060 logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
1061 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1062 logio->vp_index = sp->fcport->vp_idx;
1066 qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
1068 struct qla_hw_data *ha = sp->fcport->vha->hw;
1070 mbx->entry_type = MBX_IOCB_TYPE;
1071 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1072 mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
1073 if (HAS_EXTENDED_IDS(ha)) {
1074 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
1075 mbx->mb10 = cpu_to_le16(BIT_0);
1077 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
1079 mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
1080 mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
1081 mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
1082 mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
1083 mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
1087 qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
1089 struct fc_bsg_job *bsg_job = ((struct srb_bsg*)sp->ctx)->bsg_job;
1091 els_iocb->entry_type = ELS_IOCB_TYPE;
1092 els_iocb->entry_count = 1;
1093 els_iocb->sys_define = 0;
1094 els_iocb->entry_status = 0;
1095 els_iocb->handle = sp->handle;
1096 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1097 els_iocb->tx_dsd_count = __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
1098 els_iocb->vp_index = sp->fcport->vp_idx;
1099 els_iocb->sof_type = EST_SOFI3;
1100 els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
1102 els_iocb->opcode =(((struct srb_bsg*)sp->ctx)->ctx.type == SRB_ELS_CMD_RPT) ?
1103 bsg_job->request->rqst_data.r_els.els_code : bsg_job->request->rqst_data.h_els.command_code;
1104 els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
1105 els_iocb->port_id[1] = sp->fcport->d_id.b.area;
1106 els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
1107 els_iocb->control_flags = 0;
1108 els_iocb->rx_byte_count =
1109 cpu_to_le32(bsg_job->reply_payload.payload_len);
1110 els_iocb->tx_byte_count =
1111 cpu_to_le32(bsg_job->request_payload.payload_len);
1113 els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
1114 (bsg_job->request_payload.sg_list)));
1115 els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
1116 (bsg_job->request_payload.sg_list)));
1117 els_iocb->tx_len = cpu_to_le32(sg_dma_len
1118 (bsg_job->request_payload.sg_list));
1120 els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
1121 (bsg_job->reply_payload.sg_list)));
1122 els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
1123 (bsg_job->reply_payload.sg_list)));
1124 els_iocb->rx_len = cpu_to_le32(sg_dma_len
1125 (bsg_job->reply_payload.sg_list));
1129 qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
1131 uint16_t avail_dsds;
1133 struct scatterlist *sg;
1136 scsi_qla_host_t *vha = sp->fcport->vha;
1137 struct fc_bsg_job *bsg_job = ((struct srb_bsg*)sp->ctx)->bsg_job;
1138 int loop_iterartion = 0;
1139 int cont_iocb_prsnt = 0;
1140 int entry_count = 1;
1142 ct_iocb->entry_type = CT_IOCB_TYPE;
1143 ct_iocb->entry_status = 0;
1144 ct_iocb->sys_define = 0;
1145 ct_iocb->handle = sp->handle;
1147 ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1148 ct_iocb->vp_index = sp->fcport->vp_idx;
1149 ct_iocb->comp_status = __constant_cpu_to_le16(0);
1151 ct_iocb->cmd_dsd_count =
1152 __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
1153 ct_iocb->timeout = 0;
1154 ct_iocb->rsp_dsd_count =
1155 __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
1156 ct_iocb->rsp_byte_count =
1157 cpu_to_le32(bsg_job->reply_payload.payload_len);
1158 ct_iocb->cmd_byte_count =
1159 cpu_to_le32(bsg_job->request_payload.payload_len);
1160 ct_iocb->dseg_0_address[0] = cpu_to_le32(LSD(sg_dma_address
1161 (bsg_job->request_payload.sg_list)));
1162 ct_iocb->dseg_0_address[1] = cpu_to_le32(MSD(sg_dma_address
1163 (bsg_job->request_payload.sg_list)));
1164 ct_iocb->dseg_0_len = cpu_to_le32(sg_dma_len
1165 (bsg_job->request_payload.sg_list));
1168 cur_dsd = (uint32_t *)ct_iocb->dseg_1_address;
1170 tot_dsds = bsg_job->reply_payload.sg_cnt;
1172 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
1174 cont_a64_entry_t *cont_pkt;
1176 /* Allocate additional continuation packets? */
1177 if (avail_dsds == 0) {
1179 * Five DSDs are available in the Cont.
1182 cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
1183 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
1185 cont_iocb_prsnt = 1;
1189 sle_dma = sg_dma_address(sg);
1190 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1191 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1192 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1196 ct_iocb->entry_count = entry_count;
1200 qla2x00_start_sp(srb_t *sp)
1203 struct qla_hw_data *ha = sp->fcport->vha->hw;
1205 struct srb_ctx *ctx = sp->ctx;
1206 unsigned long flags;
1208 rval = QLA_FUNCTION_FAILED;
1209 spin_lock_irqsave(&ha->hardware_lock, flags);
1210 pkt = qla2x00_alloc_iocbs(sp);
1215 switch (ctx->type) {
1217 IS_FWI2_CAPABLE(ha) ?
1218 qla24xx_login_iocb(sp, pkt) :
1219 qla2x00_login_iocb(sp, pkt);
1221 case SRB_LOGOUT_CMD:
1222 IS_FWI2_CAPABLE(ha) ?
1223 qla24xx_logout_iocb(sp, pkt) :
1224 qla2x00_logout_iocb(sp, pkt);
1226 case SRB_ELS_CMD_RPT:
1227 case SRB_ELS_CMD_HST:
1228 qla24xx_els_iocb(sp, pkt);
1231 qla24xx_ct_iocb(sp, pkt);
1234 IS_FWI2_CAPABLE(ha) ?
1235 qla24xx_adisc_iocb(sp, pkt) :
1236 qla2x00_adisc_iocb(sp, pkt);
1243 qla2x00_start_iocbs(sp);
1245 spin_unlock_irqrestore(&ha->hardware_lock, flags);