rk fb: from rk3368 fb only need to reserved 1 framebuffer
[firefly-linux-kernel-4.4.55.git] / drivers / scsi / qla2xxx / qla_iocb.c
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2013 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8 #include "qla_target.h"
9
10 #include <linux/blkdev.h>
11 #include <linux/delay.h>
12
13 #include <scsi/scsi_tcq.h>
14
15 static void qla25xx_set_que(srb_t *, struct rsp_que **);
16 /**
17  * qla2x00_get_cmd_direction() - Determine control_flag data direction.
18  * @cmd: SCSI command
19  *
20  * Returns the proper CF_* direction based on CDB.
21  */
22 static inline uint16_t
23 qla2x00_get_cmd_direction(srb_t *sp)
24 {
25         uint16_t cflags;
26         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
27         struct scsi_qla_host *vha = sp->fcport->vha;
28
29         cflags = 0;
30
31         /* Set transfer direction */
32         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
33                 cflags = CF_WRITE;
34                 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
35         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
36                 cflags = CF_READ;
37                 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
38         }
39         return (cflags);
40 }
41
42 /**
43  * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
44  * Continuation Type 0 IOCBs to allocate.
45  *
46  * @dsds: number of data segment decriptors needed
47  *
48  * Returns the number of IOCB entries needed to store @dsds.
49  */
50 uint16_t
51 qla2x00_calc_iocbs_32(uint16_t dsds)
52 {
53         uint16_t iocbs;
54
55         iocbs = 1;
56         if (dsds > 3) {
57                 iocbs += (dsds - 3) / 7;
58                 if ((dsds - 3) % 7)
59                         iocbs++;
60         }
61         return (iocbs);
62 }
63
64 /**
65  * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
66  * Continuation Type 1 IOCBs to allocate.
67  *
68  * @dsds: number of data segment decriptors needed
69  *
70  * Returns the number of IOCB entries needed to store @dsds.
71  */
72 uint16_t
73 qla2x00_calc_iocbs_64(uint16_t dsds)
74 {
75         uint16_t iocbs;
76
77         iocbs = 1;
78         if (dsds > 2) {
79                 iocbs += (dsds - 2) / 5;
80                 if ((dsds - 2) % 5)
81                         iocbs++;
82         }
83         return (iocbs);
84 }
85
86 /**
87  * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
88  * @ha: HA context
89  *
90  * Returns a pointer to the Continuation Type 0 IOCB packet.
91  */
92 static inline cont_entry_t *
93 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
94 {
95         cont_entry_t *cont_pkt;
96         struct req_que *req = vha->req;
97         /* Adjust ring index. */
98         req->ring_index++;
99         if (req->ring_index == req->length) {
100                 req->ring_index = 0;
101                 req->ring_ptr = req->ring;
102         } else {
103                 req->ring_ptr++;
104         }
105
106         cont_pkt = (cont_entry_t *)req->ring_ptr;
107
108         /* Load packet defaults. */
109         *((uint32_t *)(&cont_pkt->entry_type)) =
110             __constant_cpu_to_le32(CONTINUE_TYPE);
111
112         return (cont_pkt);
113 }
114
115 /**
116  * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
117  * @ha: HA context
118  *
119  * Returns a pointer to the continuation type 1 IOCB packet.
120  */
121 static inline cont_a64_entry_t *
122 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
123 {
124         cont_a64_entry_t *cont_pkt;
125
126         /* Adjust ring index. */
127         req->ring_index++;
128         if (req->ring_index == req->length) {
129                 req->ring_index = 0;
130                 req->ring_ptr = req->ring;
131         } else {
132                 req->ring_ptr++;
133         }
134
135         cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
136
137         /* Load packet defaults. */
138         *((uint32_t *)(&cont_pkt->entry_type)) = IS_QLAFX00(vha->hw) ?
139             __constant_cpu_to_le32(CONTINUE_A64_TYPE_FX00) :
140             __constant_cpu_to_le32(CONTINUE_A64_TYPE);
141
142         return (cont_pkt);
143 }
144
145 static inline int
146 qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
147 {
148         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
149         uint8_t guard = scsi_host_get_guard(cmd->device->host);
150
151         /* We always use DIFF Bundling for best performance */
152         *fw_prot_opts = 0;
153
154         /* Translate SCSI opcode to a protection opcode */
155         switch (scsi_get_prot_op(cmd)) {
156         case SCSI_PROT_READ_STRIP:
157                 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
158                 break;
159         case SCSI_PROT_WRITE_INSERT:
160                 *fw_prot_opts |= PO_MODE_DIF_INSERT;
161                 break;
162         case SCSI_PROT_READ_INSERT:
163                 *fw_prot_opts |= PO_MODE_DIF_INSERT;
164                 break;
165         case SCSI_PROT_WRITE_STRIP:
166                 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
167                 break;
168         case SCSI_PROT_READ_PASS:
169         case SCSI_PROT_WRITE_PASS:
170                 if (guard & SHOST_DIX_GUARD_IP)
171                         *fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM;
172                 else
173                         *fw_prot_opts |= PO_MODE_DIF_PASS;
174                 break;
175         default:        /* Normal Request */
176                 *fw_prot_opts |= PO_MODE_DIF_PASS;
177                 break;
178         }
179
180         return scsi_prot_sg_count(cmd);
181 }
182
183 /*
184  * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
185  * capable IOCB types.
186  *
187  * @sp: SRB command to process
188  * @cmd_pkt: Command type 2 IOCB
189  * @tot_dsds: Total number of segments to transfer
190  */
191 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
192     uint16_t tot_dsds)
193 {
194         uint16_t        avail_dsds;
195         uint32_t        *cur_dsd;
196         scsi_qla_host_t *vha;
197         struct scsi_cmnd *cmd;
198         struct scatterlist *sg;
199         int i;
200
201         cmd = GET_CMD_SP(sp);
202
203         /* Update entry type to indicate Command Type 2 IOCB */
204         *((uint32_t *)(&cmd_pkt->entry_type)) =
205             __constant_cpu_to_le32(COMMAND_TYPE);
206
207         /* No data transfer */
208         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
209                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
210                 return;
211         }
212
213         vha = sp->fcport->vha;
214         cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
215
216         /* Three DSDs are available in the Command Type 2 IOCB */
217         avail_dsds = 3;
218         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
219
220         /* Load data segments */
221         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
222                 cont_entry_t *cont_pkt;
223
224                 /* Allocate additional continuation packets? */
225                 if (avail_dsds == 0) {
226                         /*
227                          * Seven DSDs are available in the Continuation
228                          * Type 0 IOCB.
229                          */
230                         cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
231                         cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
232                         avail_dsds = 7;
233                 }
234
235                 *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
236                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
237                 avail_dsds--;
238         }
239 }
240
241 /**
242  * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
243  * capable IOCB types.
244  *
245  * @sp: SRB command to process
246  * @cmd_pkt: Command type 3 IOCB
247  * @tot_dsds: Total number of segments to transfer
248  */
249 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
250     uint16_t tot_dsds)
251 {
252         uint16_t        avail_dsds;
253         uint32_t        *cur_dsd;
254         scsi_qla_host_t *vha;
255         struct scsi_cmnd *cmd;
256         struct scatterlist *sg;
257         int i;
258
259         cmd = GET_CMD_SP(sp);
260
261         /* Update entry type to indicate Command Type 3 IOCB */
262         *((uint32_t *)(&cmd_pkt->entry_type)) =
263             __constant_cpu_to_le32(COMMAND_A64_TYPE);
264
265         /* No data transfer */
266         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
267                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
268                 return;
269         }
270
271         vha = sp->fcport->vha;
272         cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
273
274         /* Two DSDs are available in the Command Type 3 IOCB */
275         avail_dsds = 2;
276         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
277
278         /* Load data segments */
279         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
280                 dma_addr_t      sle_dma;
281                 cont_a64_entry_t *cont_pkt;
282
283                 /* Allocate additional continuation packets? */
284                 if (avail_dsds == 0) {
285                         /*
286                          * Five DSDs are available in the Continuation
287                          * Type 1 IOCB.
288                          */
289                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
290                         cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
291                         avail_dsds = 5;
292                 }
293
294                 sle_dma = sg_dma_address(sg);
295                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
296                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
297                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
298                 avail_dsds--;
299         }
300 }
301
302 /**
303  * qla2x00_start_scsi() - Send a SCSI command to the ISP
304  * @sp: command to send to the ISP
305  *
306  * Returns non-zero if a failure occurred, else zero.
307  */
308 int
309 qla2x00_start_scsi(srb_t *sp)
310 {
311         int             ret, nseg;
312         unsigned long   flags;
313         scsi_qla_host_t *vha;
314         struct scsi_cmnd *cmd;
315         uint32_t        *clr_ptr;
316         uint32_t        index;
317         uint32_t        handle;
318         cmd_entry_t     *cmd_pkt;
319         uint16_t        cnt;
320         uint16_t        req_cnt;
321         uint16_t        tot_dsds;
322         struct device_reg_2xxx __iomem *reg;
323         struct qla_hw_data *ha;
324         struct req_que *req;
325         struct rsp_que *rsp;
326         char            tag[2];
327
328         /* Setup device pointers. */
329         ret = 0;
330         vha = sp->fcport->vha;
331         ha = vha->hw;
332         reg = &ha->iobase->isp;
333         cmd = GET_CMD_SP(sp);
334         req = ha->req_q_map[0];
335         rsp = ha->rsp_q_map[0];
336         /* So we know we haven't pci_map'ed anything yet */
337         tot_dsds = 0;
338
339         /* Send marker if required */
340         if (vha->marker_needed != 0) {
341                 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
342                     QLA_SUCCESS) {
343                         return (QLA_FUNCTION_FAILED);
344                 }
345                 vha->marker_needed = 0;
346         }
347
348         /* Acquire ring specific lock */
349         spin_lock_irqsave(&ha->hardware_lock, flags);
350
351         /* Check for room in outstanding command list. */
352         handle = req->current_outstanding_cmd;
353         for (index = 1; index < req->num_outstanding_cmds; index++) {
354                 handle++;
355                 if (handle == req->num_outstanding_cmds)
356                         handle = 1;
357                 if (!req->outstanding_cmds[handle])
358                         break;
359         }
360         if (index == req->num_outstanding_cmds)
361                 goto queuing_error;
362
363         /* Map the sg table so we have an accurate count of sg entries needed */
364         if (scsi_sg_count(cmd)) {
365                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
366                     scsi_sg_count(cmd), cmd->sc_data_direction);
367                 if (unlikely(!nseg))
368                         goto queuing_error;
369         } else
370                 nseg = 0;
371
372         tot_dsds = nseg;
373
374         /* Calculate the number of request entries needed. */
375         req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
376         if (req->cnt < (req_cnt + 2)) {
377                 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
378                 if (req->ring_index < cnt)
379                         req->cnt = cnt - req->ring_index;
380                 else
381                         req->cnt = req->length -
382                             (req->ring_index - cnt);
383                 /* If still no head room then bail out */
384                 if (req->cnt < (req_cnt + 2))
385                         goto queuing_error;
386         }
387
388         /* Build command packet */
389         req->current_outstanding_cmd = handle;
390         req->outstanding_cmds[handle] = sp;
391         sp->handle = handle;
392         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
393         req->cnt -= req_cnt;
394
395         cmd_pkt = (cmd_entry_t *)req->ring_ptr;
396         cmd_pkt->handle = handle;
397         /* Zero out remaining portion of packet. */
398         clr_ptr = (uint32_t *)cmd_pkt + 2;
399         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
400         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
401
402         /* Set target ID and LUN number*/
403         SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
404         cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
405
406         /* Update tagged queuing modifier */
407         if (scsi_populate_tag_msg(cmd, tag)) {
408                 switch (tag[0]) {
409                 case HEAD_OF_QUEUE_TAG:
410                         cmd_pkt->control_flags =
411                             __constant_cpu_to_le16(CF_HEAD_TAG);
412                         break;
413                 case ORDERED_QUEUE_TAG:
414                         cmd_pkt->control_flags =
415                             __constant_cpu_to_le16(CF_ORDERED_TAG);
416                         break;
417                 default:
418                         cmd_pkt->control_flags =
419                             __constant_cpu_to_le16(CF_SIMPLE_TAG);
420                         break;
421                 }
422         } else {
423                 cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG);
424         }
425
426         /* Load SCSI command packet. */
427         memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
428         cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
429
430         /* Build IOCB segments */
431         ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
432
433         /* Set total data segment count. */
434         cmd_pkt->entry_count = (uint8_t)req_cnt;
435         wmb();
436
437         /* Adjust ring index. */
438         req->ring_index++;
439         if (req->ring_index == req->length) {
440                 req->ring_index = 0;
441                 req->ring_ptr = req->ring;
442         } else
443                 req->ring_ptr++;
444
445         sp->flags |= SRB_DMA_VALID;
446
447         /* Set chip new ring index. */
448         WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
449         RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg));     /* PCI Posting. */
450
451         /* Manage unprocessed RIO/ZIO commands in response queue. */
452         if (vha->flags.process_response_queue &&
453             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
454                 qla2x00_process_response_queue(rsp);
455
456         spin_unlock_irqrestore(&ha->hardware_lock, flags);
457         return (QLA_SUCCESS);
458
459 queuing_error:
460         if (tot_dsds)
461                 scsi_dma_unmap(cmd);
462
463         spin_unlock_irqrestore(&ha->hardware_lock, flags);
464
465         return (QLA_FUNCTION_FAILED);
466 }
467
468 /**
469  * qla2x00_start_iocbs() - Execute the IOCB command
470  */
471 void
472 qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
473 {
474         struct qla_hw_data *ha = vha->hw;
475         device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
476
477         if (IS_QLA82XX(ha)) {
478                 qla82xx_start_iocbs(vha);
479         } else {
480                 /* Adjust ring index. */
481                 req->ring_index++;
482                 if (req->ring_index == req->length) {
483                         req->ring_index = 0;
484                         req->ring_ptr = req->ring;
485                 } else
486                         req->ring_ptr++;
487
488                 /* Set chip new ring index. */
489                 if (ha->mqenable || IS_QLA83XX(ha)) {
490                         WRT_REG_DWORD(req->req_q_in, req->ring_index);
491                         RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
492                 } else if (IS_QLAFX00(ha)) {
493                         WRT_REG_DWORD(&reg->ispfx00.req_q_in, req->ring_index);
494                         RD_REG_DWORD_RELAXED(&reg->ispfx00.req_q_in);
495                         QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
496                 } else if (IS_FWI2_CAPABLE(ha)) {
497                         WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
498                         RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
499                 } else {
500                         WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
501                                 req->ring_index);
502                         RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
503                 }
504         }
505 }
506
507 /**
508  * qla2x00_marker() - Send a marker IOCB to the firmware.
509  * @ha: HA context
510  * @loop_id: loop ID
511  * @lun: LUN
512  * @type: marker modifier
513  *
514  * Can be called from both normal and interrupt context.
515  *
516  * Returns non-zero if a failure occurred, else zero.
517  */
518 static int
519 __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
520                         struct rsp_que *rsp, uint16_t loop_id,
521                         uint16_t lun, uint8_t type)
522 {
523         mrk_entry_t *mrk;
524         struct mrk_entry_24xx *mrk24 = NULL;
525         struct mrk_entry_fx00 *mrkfx = NULL;
526
527         struct qla_hw_data *ha = vha->hw;
528         scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
529
530         req = ha->req_q_map[0];
531         mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, NULL);
532         if (mrk == NULL) {
533                 ql_log(ql_log_warn, base_vha, 0x3026,
534                     "Failed to allocate Marker IOCB.\n");
535
536                 return (QLA_FUNCTION_FAILED);
537         }
538
539         mrk->entry_type = MARKER_TYPE;
540         mrk->modifier = type;
541         if (type != MK_SYNC_ALL) {
542                 if (IS_QLAFX00(ha)) {
543                         mrkfx = (struct mrk_entry_fx00 *) mrk;
544                         mrkfx->handle = MAKE_HANDLE(req->id, mrkfx->handle);
545                         mrkfx->handle_hi = 0;
546                         mrkfx->tgt_id = cpu_to_le16(loop_id);
547                         mrkfx->lun[1] = LSB(lun);
548                         mrkfx->lun[2] = MSB(lun);
549                         host_to_fcp_swap(mrkfx->lun, sizeof(mrkfx->lun));
550                 } else if (IS_FWI2_CAPABLE(ha)) {
551                         mrk24 = (struct mrk_entry_24xx *) mrk;
552                         mrk24->nport_handle = cpu_to_le16(loop_id);
553                         mrk24->lun[1] = LSB(lun);
554                         mrk24->lun[2] = MSB(lun);
555                         host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
556                         mrk24->vp_index = vha->vp_idx;
557                         mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
558                 } else {
559                         SET_TARGET_ID(ha, mrk->target, loop_id);
560                         mrk->lun = cpu_to_le16(lun);
561                 }
562         }
563         wmb();
564
565         qla2x00_start_iocbs(vha, req);
566
567         return (QLA_SUCCESS);
568 }
569
570 int
571 qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
572                 struct rsp_que *rsp, uint16_t loop_id, uint16_t lun,
573                 uint8_t type)
574 {
575         int ret;
576         unsigned long flags = 0;
577
578         spin_lock_irqsave(&vha->hw->hardware_lock, flags);
579         ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
580         spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
581
582         return (ret);
583 }
584
585 /*
586  * qla2x00_issue_marker
587  *
588  * Issue marker
589  * Caller CAN have hardware lock held as specified by ha_locked parameter.
590  * Might release it, then reaquire.
591  */
592 int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
593 {
594         if (ha_locked) {
595                 if (__qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
596                                         MK_SYNC_ALL) != QLA_SUCCESS)
597                         return QLA_FUNCTION_FAILED;
598         } else {
599                 if (qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
600                                         MK_SYNC_ALL) != QLA_SUCCESS)
601                         return QLA_FUNCTION_FAILED;
602         }
603         vha->marker_needed = 0;
604
605         return QLA_SUCCESS;
606 }
607
608 static inline int
609 qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
610         uint16_t tot_dsds)
611 {
612         uint32_t *cur_dsd = NULL;
613         scsi_qla_host_t *vha;
614         struct qla_hw_data *ha;
615         struct scsi_cmnd *cmd;
616         struct  scatterlist *cur_seg;
617         uint32_t *dsd_seg;
618         void *next_dsd;
619         uint8_t avail_dsds;
620         uint8_t first_iocb = 1;
621         uint32_t dsd_list_len;
622         struct dsd_dma *dsd_ptr;
623         struct ct6_dsd *ctx;
624
625         cmd = GET_CMD_SP(sp);
626
627         /* Update entry type to indicate Command Type 3 IOCB */
628         *((uint32_t *)(&cmd_pkt->entry_type)) =
629                 __constant_cpu_to_le32(COMMAND_TYPE_6);
630
631         /* No data transfer */
632         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
633                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
634                 return 0;
635         }
636
637         vha = sp->fcport->vha;
638         ha = vha->hw;
639
640         /* Set transfer direction */
641         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
642                 cmd_pkt->control_flags =
643                     __constant_cpu_to_le16(CF_WRITE_DATA);
644                 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
645         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
646                 cmd_pkt->control_flags =
647                     __constant_cpu_to_le16(CF_READ_DATA);
648                 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
649         }
650
651         cur_seg = scsi_sglist(cmd);
652         ctx = GET_CMD_CTX_SP(sp);
653
654         while (tot_dsds) {
655                 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
656                     QLA_DSDS_PER_IOCB : tot_dsds;
657                 tot_dsds -= avail_dsds;
658                 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
659
660                 dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
661                     struct dsd_dma, list);
662                 next_dsd = dsd_ptr->dsd_addr;
663                 list_del(&dsd_ptr->list);
664                 ha->gbl_dsd_avail--;
665                 list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
666                 ctx->dsd_use_cnt++;
667                 ha->gbl_dsd_inuse++;
668
669                 if (first_iocb) {
670                         first_iocb = 0;
671                         dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
672                         *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
673                         *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
674                         cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len);
675                 } else {
676                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
677                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
678                         *cur_dsd++ = cpu_to_le32(dsd_list_len);
679                 }
680                 cur_dsd = (uint32_t *)next_dsd;
681                 while (avail_dsds) {
682                         dma_addr_t      sle_dma;
683
684                         sle_dma = sg_dma_address(cur_seg);
685                         *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
686                         *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
687                         *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
688                         cur_seg = sg_next(cur_seg);
689                         avail_dsds--;
690                 }
691         }
692
693         /* Null termination */
694         *cur_dsd++ =  0;
695         *cur_dsd++ = 0;
696         *cur_dsd++ = 0;
697         cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
698         return 0;
699 }
700
701 /*
702  * qla24xx_calc_dsd_lists() - Determine number of DSD list required
703  * for Command Type 6.
704  *
705  * @dsds: number of data segment decriptors needed
706  *
707  * Returns the number of dsd list needed to store @dsds.
708  */
709 inline uint16_t
710 qla24xx_calc_dsd_lists(uint16_t dsds)
711 {
712         uint16_t dsd_lists = 0;
713
714         dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
715         if (dsds % QLA_DSDS_PER_IOCB)
716                 dsd_lists++;
717         return dsd_lists;
718 }
719
720
721 /**
722  * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
723  * IOCB types.
724  *
725  * @sp: SRB command to process
726  * @cmd_pkt: Command type 3 IOCB
727  * @tot_dsds: Total number of segments to transfer
728  */
729 inline void
730 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
731     uint16_t tot_dsds)
732 {
733         uint16_t        avail_dsds;
734         uint32_t        *cur_dsd;
735         scsi_qla_host_t *vha;
736         struct scsi_cmnd *cmd;
737         struct scatterlist *sg;
738         int i;
739         struct req_que *req;
740
741         cmd = GET_CMD_SP(sp);
742
743         /* Update entry type to indicate Command Type 3 IOCB */
744         *((uint32_t *)(&cmd_pkt->entry_type)) =
745             __constant_cpu_to_le32(COMMAND_TYPE_7);
746
747         /* No data transfer */
748         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
749                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
750                 return;
751         }
752
753         vha = sp->fcport->vha;
754         req = vha->req;
755
756         /* Set transfer direction */
757         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
758                 cmd_pkt->task_mgmt_flags =
759                     __constant_cpu_to_le16(TMF_WRITE_DATA);
760                 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
761         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
762                 cmd_pkt->task_mgmt_flags =
763                     __constant_cpu_to_le16(TMF_READ_DATA);
764                 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
765         }
766
767         /* One DSD is available in the Command Type 3 IOCB */
768         avail_dsds = 1;
769         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
770
771         /* Load data segments */
772
773         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
774                 dma_addr_t      sle_dma;
775                 cont_a64_entry_t *cont_pkt;
776
777                 /* Allocate additional continuation packets? */
778                 if (avail_dsds == 0) {
779                         /*
780                          * Five DSDs are available in the Continuation
781                          * Type 1 IOCB.
782                          */
783                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
784                         cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
785                         avail_dsds = 5;
786                 }
787
788                 sle_dma = sg_dma_address(sg);
789                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
790                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
791                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
792                 avail_dsds--;
793         }
794 }
795
796 struct fw_dif_context {
797         uint32_t ref_tag;
798         uint16_t app_tag;
799         uint8_t ref_tag_mask[4];        /* Validation/Replacement Mask*/
800         uint8_t app_tag_mask[2];        /* Validation/Replacement Mask*/
801 };
802
803 /*
804  * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
805  *
806  */
807 static inline void
808 qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
809     unsigned int protcnt)
810 {
811         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
812
813         switch (scsi_get_prot_type(cmd)) {
814         case SCSI_PROT_DIF_TYPE0:
815                 /*
816                  * No check for ql2xenablehba_err_chk, as it would be an
817                  * I/O error if hba tag generation is not done.
818                  */
819                 pkt->ref_tag = cpu_to_le32((uint32_t)
820                     (0xffffffff & scsi_get_lba(cmd)));
821
822                 if (!qla2x00_hba_err_chk_enabled(sp))
823                         break;
824
825                 pkt->ref_tag_mask[0] = 0xff;
826                 pkt->ref_tag_mask[1] = 0xff;
827                 pkt->ref_tag_mask[2] = 0xff;
828                 pkt->ref_tag_mask[3] = 0xff;
829                 break;
830
831         /*
832          * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
833          * match LBA in CDB + N
834          */
835         case SCSI_PROT_DIF_TYPE2:
836                 pkt->app_tag = __constant_cpu_to_le16(0);
837                 pkt->app_tag_mask[0] = 0x0;
838                 pkt->app_tag_mask[1] = 0x0;
839
840                 pkt->ref_tag = cpu_to_le32((uint32_t)
841                     (0xffffffff & scsi_get_lba(cmd)));
842
843                 if (!qla2x00_hba_err_chk_enabled(sp))
844                         break;
845
846                 /* enable ALL bytes of the ref tag */
847                 pkt->ref_tag_mask[0] = 0xff;
848                 pkt->ref_tag_mask[1] = 0xff;
849                 pkt->ref_tag_mask[2] = 0xff;
850                 pkt->ref_tag_mask[3] = 0xff;
851                 break;
852
853         /* For Type 3 protection: 16 bit GUARD only */
854         case SCSI_PROT_DIF_TYPE3:
855                 pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
856                         pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
857                                                                 0x00;
858                 break;
859
860         /*
861          * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
862          * 16 bit app tag.
863          */
864         case SCSI_PROT_DIF_TYPE1:
865                 pkt->ref_tag = cpu_to_le32((uint32_t)
866                     (0xffffffff & scsi_get_lba(cmd)));
867                 pkt->app_tag = __constant_cpu_to_le16(0);
868                 pkt->app_tag_mask[0] = 0x0;
869                 pkt->app_tag_mask[1] = 0x0;
870
871                 if (!qla2x00_hba_err_chk_enabled(sp))
872                         break;
873
874                 /* enable ALL bytes of the ref tag */
875                 pkt->ref_tag_mask[0] = 0xff;
876                 pkt->ref_tag_mask[1] = 0xff;
877                 pkt->ref_tag_mask[2] = 0xff;
878                 pkt->ref_tag_mask[3] = 0xff;
879                 break;
880         }
881 }
882
883 struct qla2_sgx {
884         dma_addr_t              dma_addr;       /* OUT */
885         uint32_t                dma_len;        /* OUT */
886
887         uint32_t                tot_bytes;      /* IN */
888         struct scatterlist      *cur_sg;        /* IN */
889
890         /* for book keeping, bzero on initial invocation */
891         uint32_t                bytes_consumed;
892         uint32_t                num_bytes;
893         uint32_t                tot_partial;
894
895         /* for debugging */
896         uint32_t                num_sg;
897         srb_t                   *sp;
898 };
899
900 static int
901 qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
902         uint32_t *partial)
903 {
904         struct scatterlist *sg;
905         uint32_t cumulative_partial, sg_len;
906         dma_addr_t sg_dma_addr;
907
908         if (sgx->num_bytes == sgx->tot_bytes)
909                 return 0;
910
911         sg = sgx->cur_sg;
912         cumulative_partial = sgx->tot_partial;
913
914         sg_dma_addr = sg_dma_address(sg);
915         sg_len = sg_dma_len(sg);
916
917         sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
918
919         if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
920                 sgx->dma_len = (blk_sz - cumulative_partial);
921                 sgx->tot_partial = 0;
922                 sgx->num_bytes += blk_sz;
923                 *partial = 0;
924         } else {
925                 sgx->dma_len = sg_len - sgx->bytes_consumed;
926                 sgx->tot_partial += sgx->dma_len;
927                 *partial = 1;
928         }
929
930         sgx->bytes_consumed += sgx->dma_len;
931
932         if (sg_len == sgx->bytes_consumed) {
933                 sg = sg_next(sg);
934                 sgx->num_sg++;
935                 sgx->cur_sg = sg;
936                 sgx->bytes_consumed = 0;
937         }
938
939         return 1;
940 }
941
942 static int
943 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
944         uint32_t *dsd, uint16_t tot_dsds)
945 {
946         void *next_dsd;
947         uint8_t avail_dsds = 0;
948         uint32_t dsd_list_len;
949         struct dsd_dma *dsd_ptr;
950         struct scatterlist *sg_prot;
951         uint32_t *cur_dsd = dsd;
952         uint16_t        used_dsds = tot_dsds;
953
954         uint32_t        prot_int;
955         uint32_t        partial;
956         struct qla2_sgx sgx;
957         dma_addr_t      sle_dma;
958         uint32_t        sle_dma_len, tot_prot_dma_len = 0;
959         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
960
961         prot_int = cmd->device->sector_size;
962
963         memset(&sgx, 0, sizeof(struct qla2_sgx));
964         sgx.tot_bytes = scsi_bufflen(cmd);
965         sgx.cur_sg = scsi_sglist(cmd);
966         sgx.sp = sp;
967
968         sg_prot = scsi_prot_sglist(cmd);
969
970         while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
971
972                 sle_dma = sgx.dma_addr;
973                 sle_dma_len = sgx.dma_len;
974 alloc_and_fill:
975                 /* Allocate additional continuation packets? */
976                 if (avail_dsds == 0) {
977                         avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
978                                         QLA_DSDS_PER_IOCB : used_dsds;
979                         dsd_list_len = (avail_dsds + 1) * 12;
980                         used_dsds -= avail_dsds;
981
982                         /* allocate tracking DS */
983                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
984                         if (!dsd_ptr)
985                                 return 1;
986
987                         /* allocate new list */
988                         dsd_ptr->dsd_addr = next_dsd =
989                             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
990                                 &dsd_ptr->dsd_list_dma);
991
992                         if (!next_dsd) {
993                                 /*
994                                  * Need to cleanup only this dsd_ptr, rest
995                                  * will be done by sp_free_dma()
996                                  */
997                                 kfree(dsd_ptr);
998                                 return 1;
999                         }
1000
1001                         list_add_tail(&dsd_ptr->list,
1002                             &((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
1003
1004                         sp->flags |= SRB_CRC_CTX_DSD_VALID;
1005
1006                         /* add new list to cmd iocb or last list */
1007                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1008                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1009                         *cur_dsd++ = dsd_list_len;
1010                         cur_dsd = (uint32_t *)next_dsd;
1011                 }
1012                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1013                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1014                 *cur_dsd++ = cpu_to_le32(sle_dma_len);
1015                 avail_dsds--;
1016
1017                 if (partial == 0) {
1018                         /* Got a full protection interval */
1019                         sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
1020                         sle_dma_len = 8;
1021
1022                         tot_prot_dma_len += sle_dma_len;
1023                         if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
1024                                 tot_prot_dma_len = 0;
1025                                 sg_prot = sg_next(sg_prot);
1026                         }
1027
1028                         partial = 1; /* So as to not re-enter this block */
1029                         goto alloc_and_fill;
1030                 }
1031         }
1032         /* Null termination */
1033         *cur_dsd++ = 0;
1034         *cur_dsd++ = 0;
1035         *cur_dsd++ = 0;
1036         return 0;
1037 }
1038
1039 static int
1040 qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
1041         uint16_t tot_dsds)
1042 {
1043         void *next_dsd;
1044         uint8_t avail_dsds = 0;
1045         uint32_t dsd_list_len;
1046         struct dsd_dma *dsd_ptr;
1047         struct scatterlist *sg;
1048         uint32_t *cur_dsd = dsd;
1049         int     i;
1050         uint16_t        used_dsds = tot_dsds;
1051         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1052
1053         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
1054                 dma_addr_t      sle_dma;
1055
1056                 /* Allocate additional continuation packets? */
1057                 if (avail_dsds == 0) {
1058                         avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1059                                         QLA_DSDS_PER_IOCB : used_dsds;
1060                         dsd_list_len = (avail_dsds + 1) * 12;
1061                         used_dsds -= avail_dsds;
1062
1063                         /* allocate tracking DS */
1064                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1065                         if (!dsd_ptr)
1066                                 return 1;
1067
1068                         /* allocate new list */
1069                         dsd_ptr->dsd_addr = next_dsd =
1070                             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1071                                 &dsd_ptr->dsd_list_dma);
1072
1073                         if (!next_dsd) {
1074                                 /*
1075                                  * Need to cleanup only this dsd_ptr, rest
1076                                  * will be done by sp_free_dma()
1077                                  */
1078                                 kfree(dsd_ptr);
1079                                 return 1;
1080                         }
1081
1082                         list_add_tail(&dsd_ptr->list,
1083                             &((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
1084
1085                         sp->flags |= SRB_CRC_CTX_DSD_VALID;
1086
1087                         /* add new list to cmd iocb or last list */
1088                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1089                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1090                         *cur_dsd++ = dsd_list_len;
1091                         cur_dsd = (uint32_t *)next_dsd;
1092                 }
1093                 sle_dma = sg_dma_address(sg);
1094
1095                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1096                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1097                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1098                 avail_dsds--;
1099
1100         }
1101         /* Null termination */
1102         *cur_dsd++ = 0;
1103         *cur_dsd++ = 0;
1104         *cur_dsd++ = 0;
1105         return 0;
1106 }
1107
1108 static int
1109 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1110                                                         uint32_t *dsd,
1111         uint16_t tot_dsds)
1112 {
1113         void *next_dsd;
1114         uint8_t avail_dsds = 0;
1115         uint32_t dsd_list_len;
1116         struct dsd_dma *dsd_ptr;
1117         struct scatterlist *sg;
1118         int     i;
1119         struct scsi_cmnd *cmd;
1120         uint32_t *cur_dsd = dsd;
1121         uint16_t        used_dsds = tot_dsds;
1122
1123         cmd = GET_CMD_SP(sp);
1124         scsi_for_each_prot_sg(cmd, sg, tot_dsds, i) {
1125                 dma_addr_t      sle_dma;
1126
1127                 /* Allocate additional continuation packets? */
1128                 if (avail_dsds == 0) {
1129                         avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1130                                                 QLA_DSDS_PER_IOCB : used_dsds;
1131                         dsd_list_len = (avail_dsds + 1) * 12;
1132                         used_dsds -= avail_dsds;
1133
1134                         /* allocate tracking DS */
1135                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1136                         if (!dsd_ptr)
1137                                 return 1;
1138
1139                         /* allocate new list */
1140                         dsd_ptr->dsd_addr = next_dsd =
1141                             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1142                                 &dsd_ptr->dsd_list_dma);
1143
1144                         if (!next_dsd) {
1145                                 /*
1146                                  * Need to cleanup only this dsd_ptr, rest
1147                                  * will be done by sp_free_dma()
1148                                  */
1149                                 kfree(dsd_ptr);
1150                                 return 1;
1151                         }
1152
1153                         list_add_tail(&dsd_ptr->list,
1154                             &((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
1155
1156                         sp->flags |= SRB_CRC_CTX_DSD_VALID;
1157
1158                         /* add new list to cmd iocb or last list */
1159                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1160                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1161                         *cur_dsd++ = dsd_list_len;
1162                         cur_dsd = (uint32_t *)next_dsd;
1163                 }
1164                 sle_dma = sg_dma_address(sg);
1165
1166                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1167                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1168                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1169
1170                 avail_dsds--;
1171         }
1172         /* Null termination */
1173         *cur_dsd++ = 0;
1174         *cur_dsd++ = 0;
1175         *cur_dsd++ = 0;
1176         return 0;
1177 }
1178
1179 /**
1180  * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1181  *                                                      Type 6 IOCB types.
1182  *
1183  * @sp: SRB command to process
1184  * @cmd_pkt: Command type 3 IOCB
1185  * @tot_dsds: Total number of segments to transfer
1186  */
1187 static inline int
1188 qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1189     uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
1190 {
1191         uint32_t                *cur_dsd, *fcp_dl;
1192         scsi_qla_host_t         *vha;
1193         struct scsi_cmnd        *cmd;
1194         struct scatterlist      *cur_seg;
1195         int                     sgc;
1196         uint32_t                total_bytes = 0;
1197         uint32_t                data_bytes;
1198         uint32_t                dif_bytes;
1199         uint8_t                 bundling = 1;
1200         uint16_t                blk_size;
1201         uint8_t                 *clr_ptr;
1202         struct crc_context      *crc_ctx_pkt = NULL;
1203         struct qla_hw_data      *ha;
1204         uint8_t                 additional_fcpcdb_len;
1205         uint16_t                fcp_cmnd_len;
1206         struct fcp_cmnd         *fcp_cmnd;
1207         dma_addr_t              crc_ctx_dma;
1208         char                    tag[2];
1209
1210         cmd = GET_CMD_SP(sp);
1211
1212         sgc = 0;
1213         /* Update entry type to indicate Command Type CRC_2 IOCB */
1214         *((uint32_t *)(&cmd_pkt->entry_type)) =
1215             __constant_cpu_to_le32(COMMAND_TYPE_CRC_2);
1216
1217         vha = sp->fcport->vha;
1218         ha = vha->hw;
1219
1220         /* No data transfer */
1221         data_bytes = scsi_bufflen(cmd);
1222         if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1223                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1224                 return QLA_SUCCESS;
1225         }
1226
1227         cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
1228
1229         /* Set transfer direction */
1230         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1231                 cmd_pkt->control_flags =
1232                     __constant_cpu_to_le16(CF_WRITE_DATA);
1233         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1234                 cmd_pkt->control_flags =
1235                     __constant_cpu_to_le16(CF_READ_DATA);
1236         }
1237
1238         if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1239             (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) ||
1240             (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) ||
1241             (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT))
1242                 bundling = 0;
1243
1244         /* Allocate CRC context from global pool */
1245         crc_ctx_pkt = sp->u.scmd.ctx =
1246             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
1247
1248         if (!crc_ctx_pkt)
1249                 goto crc_queuing_error;
1250
1251         /* Zero out CTX area. */
1252         clr_ptr = (uint8_t *)crc_ctx_pkt;
1253         memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));
1254
1255         crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1256
1257         sp->flags |= SRB_CRC_CTX_DMA_VALID;
1258
1259         /* Set handle */
1260         crc_ctx_pkt->handle = cmd_pkt->handle;
1261
1262         INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1263
1264         qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
1265             &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1266
1267         cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
1268         cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
1269         cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
1270
1271         /* Determine SCSI command length -- align to 4 byte boundary */
1272         if (cmd->cmd_len > 16) {
1273                 additional_fcpcdb_len = cmd->cmd_len - 16;
1274                 if ((cmd->cmd_len % 4) != 0) {
1275                         /* SCSI cmd > 16 bytes must be multiple of 4 */
1276                         goto crc_queuing_error;
1277                 }
1278                 fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1279         } else {
1280                 additional_fcpcdb_len = 0;
1281                 fcp_cmnd_len = 12 + 16 + 4;
1282         }
1283
1284         fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1285
1286         fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1287         if (cmd->sc_data_direction == DMA_TO_DEVICE)
1288                 fcp_cmnd->additional_cdb_len |= 1;
1289         else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1290                 fcp_cmnd->additional_cdb_len |= 2;
1291
1292         int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
1293         memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1294         cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1295         cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
1296             LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1297         cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32(
1298             MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1299         fcp_cmnd->task_management = 0;
1300
1301         /*
1302          * Update tagged queuing modifier if using command tag queuing
1303          */
1304         if (scsi_populate_tag_msg(cmd, tag)) {
1305                 switch (tag[0]) {
1306                 case HEAD_OF_QUEUE_TAG:
1307                     fcp_cmnd->task_attribute = TSK_HEAD_OF_QUEUE;
1308                     break;
1309                 case ORDERED_QUEUE_TAG:
1310                     fcp_cmnd->task_attribute = TSK_ORDERED;
1311                     break;
1312                 default:
1313                     fcp_cmnd->task_attribute = TSK_SIMPLE;
1314                     break;
1315                 }
1316         } else {
1317                 fcp_cmnd->task_attribute = TSK_SIMPLE;
1318         }
1319
1320         cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1321
1322         /* Compute dif len and adjust data len to incude protection */
1323         dif_bytes = 0;
1324         blk_size = cmd->device->sector_size;
1325         dif_bytes = (data_bytes / blk_size) * 8;
1326
1327         switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
1328         case SCSI_PROT_READ_INSERT:
1329         case SCSI_PROT_WRITE_STRIP:
1330             total_bytes = data_bytes;
1331             data_bytes += dif_bytes;
1332             break;
1333
1334         case SCSI_PROT_READ_STRIP:
1335         case SCSI_PROT_WRITE_INSERT:
1336         case SCSI_PROT_READ_PASS:
1337         case SCSI_PROT_WRITE_PASS:
1338             total_bytes = data_bytes + dif_bytes;
1339             break;
1340         default:
1341             BUG();
1342         }
1343
1344         if (!qla2x00_hba_err_chk_enabled(sp))
1345                 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
1346         /* HBA error checking enabled */
1347         else if (IS_PI_UNINIT_CAPABLE(ha)) {
1348                 if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1)
1349                     || (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1350                         SCSI_PROT_DIF_TYPE2))
1351                         fw_prot_opts |= BIT_10;
1352                 else if (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1353                     SCSI_PROT_DIF_TYPE3)
1354                         fw_prot_opts |= BIT_11;
1355         }
1356
1357         if (!bundling) {
1358                 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
1359         } else {
1360                 /*
1361                  * Configure Bundling if we need to fetch interlaving
1362                  * protection PCI accesses
1363                  */
1364                 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1365                 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1366                 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1367                                                         tot_prot_dsds);
1368                 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
1369         }
1370
1371         /* Finish the common fields of CRC pkt */
1372         crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1373         crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1374         crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
1375         crc_ctx_pkt->guard_seed = __constant_cpu_to_le16(0);
1376         /* Fibre channel byte count */
1377         cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1378         fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1379             additional_fcpcdb_len);
1380         *fcp_dl = htonl(total_bytes);
1381
1382         if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1383                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1384                 return QLA_SUCCESS;
1385         }
1386         /* Walks data segments */
1387
1388         cmd_pkt->control_flags |=
1389             __constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
1390
1391         if (!bundling && tot_prot_dsds) {
1392                 if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
1393                     cur_dsd, tot_dsds))
1394                         goto crc_queuing_error;
1395         } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
1396             (tot_dsds - tot_prot_dsds)))
1397                 goto crc_queuing_error;
1398
1399         if (bundling && tot_prot_dsds) {
1400                 /* Walks dif segments */
1401                 cur_seg = scsi_prot_sglist(cmd);
1402                 cmd_pkt->control_flags |=
1403                         __constant_cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1404                 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
1405                 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
1406                     tot_prot_dsds))
1407                         goto crc_queuing_error;
1408         }
1409         return QLA_SUCCESS;
1410
1411 crc_queuing_error:
1412         /* Cleanup will be performed by the caller */
1413
1414         return QLA_FUNCTION_FAILED;
1415 }
1416
1417 /**
1418  * qla24xx_start_scsi() - Send a SCSI command to the ISP
1419  * @sp: command to send to the ISP
1420  *
1421  * Returns non-zero if a failure occurred, else zero.
1422  */
1423 int
1424 qla24xx_start_scsi(srb_t *sp)
1425 {
1426         int             ret, nseg;
1427         unsigned long   flags;
1428         uint32_t        *clr_ptr;
1429         uint32_t        index;
1430         uint32_t        handle;
1431         struct cmd_type_7 *cmd_pkt;
1432         uint16_t        cnt;
1433         uint16_t        req_cnt;
1434         uint16_t        tot_dsds;
1435         struct req_que *req = NULL;
1436         struct rsp_que *rsp = NULL;
1437         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1438         struct scsi_qla_host *vha = sp->fcport->vha;
1439         struct qla_hw_data *ha = vha->hw;
1440         char            tag[2];
1441
1442         /* Setup device pointers. */
1443         ret = 0;
1444
1445         qla25xx_set_que(sp, &rsp);
1446         req = vha->req;
1447
1448         /* So we know we haven't pci_map'ed anything yet */
1449         tot_dsds = 0;
1450
1451         /* Send marker if required */
1452         if (vha->marker_needed != 0) {
1453                 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1454                     QLA_SUCCESS)
1455                         return QLA_FUNCTION_FAILED;
1456                 vha->marker_needed = 0;
1457         }
1458
1459         /* Acquire ring specific lock */
1460         spin_lock_irqsave(&ha->hardware_lock, flags);
1461
1462         /* Check for room in outstanding command list. */
1463         handle = req->current_outstanding_cmd;
1464         for (index = 1; index < req->num_outstanding_cmds; index++) {
1465                 handle++;
1466                 if (handle == req->num_outstanding_cmds)
1467                         handle = 1;
1468                 if (!req->outstanding_cmds[handle])
1469                         break;
1470         }
1471         if (index == req->num_outstanding_cmds)
1472                 goto queuing_error;
1473
1474         /* Map the sg table so we have an accurate count of sg entries needed */
1475         if (scsi_sg_count(cmd)) {
1476                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1477                     scsi_sg_count(cmd), cmd->sc_data_direction);
1478                 if (unlikely(!nseg))
1479                         goto queuing_error;
1480         } else
1481                 nseg = 0;
1482
1483         tot_dsds = nseg;
1484         req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1485         if (req->cnt < (req_cnt + 2)) {
1486                 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
1487
1488                 if (req->ring_index < cnt)
1489                         req->cnt = cnt - req->ring_index;
1490                 else
1491                         req->cnt = req->length -
1492                                 (req->ring_index - cnt);
1493                 if (req->cnt < (req_cnt + 2))
1494                         goto queuing_error;
1495         }
1496
1497         /* Build command packet. */
1498         req->current_outstanding_cmd = handle;
1499         req->outstanding_cmds[handle] = sp;
1500         sp->handle = handle;
1501         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1502         req->cnt -= req_cnt;
1503
1504         cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1505         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1506
1507         /* Zero out remaining portion of packet. */
1508         /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
1509         clr_ptr = (uint32_t *)cmd_pkt + 2;
1510         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1511         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1512
1513         /* Set NPORT-ID and LUN number*/
1514         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1515         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1516         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1517         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1518         cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
1519
1520         int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1521         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1522
1523         /* Update tagged queuing modifier -- default is TSK_SIMPLE (0). */
1524         if (scsi_populate_tag_msg(cmd, tag)) {
1525                 switch (tag[0]) {
1526                 case HEAD_OF_QUEUE_TAG:
1527                         cmd_pkt->task = TSK_HEAD_OF_QUEUE;
1528                         break;
1529                 case ORDERED_QUEUE_TAG:
1530                         cmd_pkt->task = TSK_ORDERED;
1531                         break;
1532                 default:
1533                     cmd_pkt->task = TSK_SIMPLE;
1534                     break;
1535                 }
1536         } else {
1537                 cmd_pkt->task = TSK_SIMPLE;
1538         }
1539
1540         /* Load SCSI command packet. */
1541         memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1542         host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1543
1544         cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1545
1546         /* Build IOCB segments */
1547         qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
1548
1549         /* Set total data segment count. */
1550         cmd_pkt->entry_count = (uint8_t)req_cnt;
1551         /* Specify response queue number where completion should happen */
1552         cmd_pkt->entry_status = (uint8_t) rsp->id;
1553         wmb();
1554         /* Adjust ring index. */
1555         req->ring_index++;
1556         if (req->ring_index == req->length) {
1557                 req->ring_index = 0;
1558                 req->ring_ptr = req->ring;
1559         } else
1560                 req->ring_ptr++;
1561
1562         sp->flags |= SRB_DMA_VALID;
1563
1564         /* Set chip new ring index. */
1565         WRT_REG_DWORD(req->req_q_in, req->ring_index);
1566         RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1567
1568         /* Manage unprocessed RIO/ZIO commands in response queue. */
1569         if (vha->flags.process_response_queue &&
1570                 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1571                 qla24xx_process_response_queue(vha, rsp);
1572
1573         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1574         return QLA_SUCCESS;
1575
1576 queuing_error:
1577         if (tot_dsds)
1578                 scsi_dma_unmap(cmd);
1579
1580         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1581
1582         return QLA_FUNCTION_FAILED;
1583 }
1584
1585 /**
1586  * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1587  * @sp: command to send to the ISP
1588  *
1589  * Returns non-zero if a failure occurred, else zero.
1590  */
1591 int
1592 qla24xx_dif_start_scsi(srb_t *sp)
1593 {
1594         int                     nseg;
1595         unsigned long           flags;
1596         uint32_t                *clr_ptr;
1597         uint32_t                index;
1598         uint32_t                handle;
1599         uint16_t                cnt;
1600         uint16_t                req_cnt = 0;
1601         uint16_t                tot_dsds;
1602         uint16_t                tot_prot_dsds;
1603         uint16_t                fw_prot_opts = 0;
1604         struct req_que          *req = NULL;
1605         struct rsp_que          *rsp = NULL;
1606         struct scsi_cmnd        *cmd = GET_CMD_SP(sp);
1607         struct scsi_qla_host    *vha = sp->fcport->vha;
1608         struct qla_hw_data      *ha = vha->hw;
1609         struct cmd_type_crc_2   *cmd_pkt;
1610         uint32_t                status = 0;
1611
1612 #define QDSS_GOT_Q_SPACE        BIT_0
1613
1614         /* Only process protection or >16 cdb in this routine */
1615         if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1616                 if (cmd->cmd_len <= 16)
1617                         return qla24xx_start_scsi(sp);
1618         }
1619
1620         /* Setup device pointers. */
1621
1622         qla25xx_set_que(sp, &rsp);
1623         req = vha->req;
1624
1625         /* So we know we haven't pci_map'ed anything yet */
1626         tot_dsds = 0;
1627
1628         /* Send marker if required */
1629         if (vha->marker_needed != 0) {
1630                 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1631                     QLA_SUCCESS)
1632                         return QLA_FUNCTION_FAILED;
1633                 vha->marker_needed = 0;
1634         }
1635
1636         /* Acquire ring specific lock */
1637         spin_lock_irqsave(&ha->hardware_lock, flags);
1638
1639         /* Check for room in outstanding command list. */
1640         handle = req->current_outstanding_cmd;
1641         for (index = 1; index < req->num_outstanding_cmds; index++) {
1642                 handle++;
1643                 if (handle == req->num_outstanding_cmds)
1644                         handle = 1;
1645                 if (!req->outstanding_cmds[handle])
1646                         break;
1647         }
1648
1649         if (index == req->num_outstanding_cmds)
1650                 goto queuing_error;
1651
1652         /* Compute number of required data segments */
1653         /* Map the sg table so we have an accurate count of sg entries needed */
1654         if (scsi_sg_count(cmd)) {
1655                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1656                     scsi_sg_count(cmd), cmd->sc_data_direction);
1657                 if (unlikely(!nseg))
1658                         goto queuing_error;
1659                 else
1660                         sp->flags |= SRB_DMA_VALID;
1661
1662                 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1663                     (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1664                         struct qla2_sgx sgx;
1665                         uint32_t        partial;
1666
1667                         memset(&sgx, 0, sizeof(struct qla2_sgx));
1668                         sgx.tot_bytes = scsi_bufflen(cmd);
1669                         sgx.cur_sg = scsi_sglist(cmd);
1670                         sgx.sp = sp;
1671
1672                         nseg = 0;
1673                         while (qla24xx_get_one_block_sg(
1674                             cmd->device->sector_size, &sgx, &partial))
1675                                 nseg++;
1676                 }
1677         } else
1678                 nseg = 0;
1679
1680         /* number of required data segments */
1681         tot_dsds = nseg;
1682
1683         /* Compute number of required protection segments */
1684         if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1685                 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1686                     scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1687                 if (unlikely(!nseg))
1688                         goto queuing_error;
1689                 else
1690                         sp->flags |= SRB_CRC_PROT_DMA_VALID;
1691
1692                 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1693                     (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1694                         nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
1695                 }
1696         } else {
1697                 nseg = 0;
1698         }
1699
1700         req_cnt = 1;
1701         /* Total Data and protection sg segment(s) */
1702         tot_prot_dsds = nseg;
1703         tot_dsds += nseg;
1704         if (req->cnt < (req_cnt + 2)) {
1705                 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
1706
1707                 if (req->ring_index < cnt)
1708                         req->cnt = cnt - req->ring_index;
1709                 else
1710                         req->cnt = req->length -
1711                                 (req->ring_index - cnt);
1712                 if (req->cnt < (req_cnt + 2))
1713                         goto queuing_error;
1714         }
1715
1716         status |= QDSS_GOT_Q_SPACE;
1717
1718         /* Build header part of command packet (excluding the OPCODE). */
1719         req->current_outstanding_cmd = handle;
1720         req->outstanding_cmds[handle] = sp;
1721         sp->handle = handle;
1722         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1723         req->cnt -= req_cnt;
1724
1725         /* Fill-in common area */
1726         cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1727         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1728
1729         clr_ptr = (uint32_t *)cmd_pkt + 2;
1730         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1731
1732         /* Set NPORT-ID and LUN number*/
1733         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1734         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1735         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1736         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1737
1738         int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1739         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1740
1741         /* Total Data and protection segment(s) */
1742         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1743
1744         /* Build IOCB segments and adjust for data protection segments */
1745         if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1746             req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1747                 QLA_SUCCESS)
1748                 goto queuing_error;
1749
1750         cmd_pkt->entry_count = (uint8_t)req_cnt;
1751         /* Specify response queue number where completion should happen */
1752         cmd_pkt->entry_status = (uint8_t) rsp->id;
1753         cmd_pkt->timeout = __constant_cpu_to_le16(0);
1754         wmb();
1755
1756         /* Adjust ring index. */
1757         req->ring_index++;
1758         if (req->ring_index == req->length) {
1759                 req->ring_index = 0;
1760                 req->ring_ptr = req->ring;
1761         } else
1762                 req->ring_ptr++;
1763
1764         /* Set chip new ring index. */
1765         WRT_REG_DWORD(req->req_q_in, req->ring_index);
1766         RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1767
1768         /* Manage unprocessed RIO/ZIO commands in response queue. */
1769         if (vha->flags.process_response_queue &&
1770             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1771                 qla24xx_process_response_queue(vha, rsp);
1772
1773         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1774
1775         return QLA_SUCCESS;
1776
1777 queuing_error:
1778         if (status & QDSS_GOT_Q_SPACE) {
1779                 req->outstanding_cmds[handle] = NULL;
1780                 req->cnt += req_cnt;
1781         }
1782         /* Cleanup will be performed by the caller (queuecommand) */
1783
1784         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1785         return QLA_FUNCTION_FAILED;
1786 }
1787
1788
1789 static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
1790 {
1791         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1792         struct qla_hw_data *ha = sp->fcport->vha->hw;
1793         int affinity = cmd->request->cpu;
1794
1795         if (ha->flags.cpu_affinity_enabled && affinity >= 0 &&
1796                 affinity < ha->max_rsp_queues - 1)
1797                 *rsp = ha->rsp_q_map[affinity + 1];
1798          else
1799                 *rsp = ha->rsp_q_map[0];
1800 }
1801
1802 /* Generic Control-SRB manipulation functions. */
1803 void *
1804 qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
1805 {
1806         struct qla_hw_data *ha = vha->hw;
1807         struct req_que *req = ha->req_q_map[0];
1808         device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
1809         uint32_t index, handle;
1810         request_t *pkt;
1811         uint16_t cnt, req_cnt;
1812
1813         pkt = NULL;
1814         req_cnt = 1;
1815         handle = 0;
1816
1817         if (!sp)
1818                 goto skip_cmd_array;
1819
1820         /* Check for room in outstanding command list. */
1821         handle = req->current_outstanding_cmd;
1822         for (index = 1; req->num_outstanding_cmds; index++) {
1823                 handle++;
1824                 if (handle == req->num_outstanding_cmds)
1825                         handle = 1;
1826                 if (!req->outstanding_cmds[handle])
1827                         break;
1828         }
1829         if (index == req->num_outstanding_cmds) {
1830                 ql_log(ql_log_warn, vha, 0x700b,
1831                     "No room on outstanding cmd array.\n");
1832                 goto queuing_error;
1833         }
1834
1835         /* Prep command array. */
1836         req->current_outstanding_cmd = handle;
1837         req->outstanding_cmds[handle] = sp;
1838         sp->handle = handle;
1839
1840         /* Adjust entry-counts as needed. */
1841         if (sp->type != SRB_SCSI_CMD)
1842                 req_cnt = sp->iocbs;
1843
1844 skip_cmd_array:
1845         /* Check for room on request queue. */
1846         if (req->cnt < req_cnt) {
1847                 if (ha->mqenable || IS_QLA83XX(ha))
1848                         cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
1849                 else if (IS_QLA82XX(ha))
1850                         cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
1851                 else if (IS_FWI2_CAPABLE(ha))
1852                         cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
1853                 else if (IS_QLAFX00(ha))
1854                         cnt = RD_REG_DWORD(&reg->ispfx00.req_q_out);
1855                 else
1856                         cnt = qla2x00_debounce_register(
1857                             ISP_REQ_Q_OUT(ha, &reg->isp));
1858
1859                 if  (req->ring_index < cnt)
1860                         req->cnt = cnt - req->ring_index;
1861                 else
1862                         req->cnt = req->length -
1863                             (req->ring_index - cnt);
1864         }
1865         if (req->cnt < req_cnt)
1866                 goto queuing_error;
1867
1868         /* Prep packet */
1869         req->cnt -= req_cnt;
1870         pkt = req->ring_ptr;
1871         memset(pkt, 0, REQUEST_ENTRY_SIZE);
1872         if (IS_QLAFX00(ha)) {
1873                 WRT_REG_BYTE(&pkt->entry_count, req_cnt);
1874                 WRT_REG_WORD(&pkt->handle, handle);
1875         } else {
1876                 pkt->entry_count = req_cnt;
1877                 pkt->handle = handle;
1878         }
1879
1880 queuing_error:
1881         return pkt;
1882 }
1883
1884 static void
1885 qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1886 {
1887         struct srb_iocb *lio = &sp->u.iocb_cmd;
1888
1889         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1890         logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
1891         if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
1892                 logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
1893         if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
1894                 logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
1895         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1896         logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1897         logio->port_id[1] = sp->fcport->d_id.b.area;
1898         logio->port_id[2] = sp->fcport->d_id.b.domain;
1899         logio->vp_index = sp->fcport->vha->vp_idx;
1900 }
1901
1902 static void
1903 qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
1904 {
1905         struct qla_hw_data *ha = sp->fcport->vha->hw;
1906         struct srb_iocb *lio = &sp->u.iocb_cmd;
1907         uint16_t opts;
1908
1909         mbx->entry_type = MBX_IOCB_TYPE;
1910         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1911         mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
1912         opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
1913         opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
1914         if (HAS_EXTENDED_IDS(ha)) {
1915                 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
1916                 mbx->mb10 = cpu_to_le16(opts);
1917         } else {
1918                 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
1919         }
1920         mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1921         mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1922             sp->fcport->d_id.b.al_pa);
1923         mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
1924 }
1925
1926 static void
1927 qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1928 {
1929         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1930         logio->control_flags =
1931             cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
1932         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1933         logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1934         logio->port_id[1] = sp->fcport->d_id.b.area;
1935         logio->port_id[2] = sp->fcport->d_id.b.domain;
1936         logio->vp_index = sp->fcport->vha->vp_idx;
1937 }
1938
1939 static void
1940 qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
1941 {
1942         struct qla_hw_data *ha = sp->fcport->vha->hw;
1943
1944         mbx->entry_type = MBX_IOCB_TYPE;
1945         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1946         mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
1947         mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
1948             cpu_to_le16(sp->fcport->loop_id):
1949             cpu_to_le16(sp->fcport->loop_id << 8);
1950         mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1951         mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1952             sp->fcport->d_id.b.al_pa);
1953         mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
1954         /* Implicit: mbx->mbx10 = 0. */
1955 }
1956
1957 static void
1958 qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1959 {
1960         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1961         logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
1962         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1963         logio->vp_index = sp->fcport->vha->vp_idx;
1964 }
1965
1966 static void
1967 qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
1968 {
1969         struct qla_hw_data *ha = sp->fcport->vha->hw;
1970
1971         mbx->entry_type = MBX_IOCB_TYPE;
1972         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1973         mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
1974         if (HAS_EXTENDED_IDS(ha)) {
1975                 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
1976                 mbx->mb10 = cpu_to_le16(BIT_0);
1977         } else {
1978                 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
1979         }
1980         mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
1981         mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
1982         mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
1983         mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
1984         mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
1985 }
1986
1987 static void
1988 qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
1989 {
1990         uint32_t flags;
1991         unsigned int lun;
1992         struct fc_port *fcport = sp->fcport;
1993         scsi_qla_host_t *vha = fcport->vha;
1994         struct qla_hw_data *ha = vha->hw;
1995         struct srb_iocb *iocb = &sp->u.iocb_cmd;
1996         struct req_que *req = vha->req;
1997
1998         flags = iocb->u.tmf.flags;
1999         lun = iocb->u.tmf.lun;
2000
2001         tsk->entry_type = TSK_MGMT_IOCB_TYPE;
2002         tsk->entry_count = 1;
2003         tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
2004         tsk->nport_handle = cpu_to_le16(fcport->loop_id);
2005         tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2006         tsk->control_flags = cpu_to_le32(flags);
2007         tsk->port_id[0] = fcport->d_id.b.al_pa;
2008         tsk->port_id[1] = fcport->d_id.b.area;
2009         tsk->port_id[2] = fcport->d_id.b.domain;
2010         tsk->vp_index = fcport->vha->vp_idx;
2011
2012         if (flags == TCF_LUN_RESET) {
2013                 int_to_scsilun(lun, &tsk->lun);
2014                 host_to_fcp_swap((uint8_t *)&tsk->lun,
2015                         sizeof(tsk->lun));
2016         }
2017 }
2018
2019 static void
2020 qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2021 {
2022         struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2023
2024         els_iocb->entry_type = ELS_IOCB_TYPE;
2025         els_iocb->entry_count = 1;
2026         els_iocb->sys_define = 0;
2027         els_iocb->entry_status = 0;
2028         els_iocb->handle = sp->handle;
2029         els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2030         els_iocb->tx_dsd_count = __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2031         els_iocb->vp_index = sp->fcport->vha->vp_idx;
2032         els_iocb->sof_type = EST_SOFI3;
2033         els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2034
2035         els_iocb->opcode =
2036             sp->type == SRB_ELS_CMD_RPT ?
2037             bsg_job->request->rqst_data.r_els.els_code :
2038             bsg_job->request->rqst_data.h_els.command_code;
2039         els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2040         els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2041         els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2042         els_iocb->control_flags = 0;
2043         els_iocb->rx_byte_count =
2044             cpu_to_le32(bsg_job->reply_payload.payload_len);
2045         els_iocb->tx_byte_count =
2046             cpu_to_le32(bsg_job->request_payload.payload_len);
2047
2048         els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
2049             (bsg_job->request_payload.sg_list)));
2050         els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
2051             (bsg_job->request_payload.sg_list)));
2052         els_iocb->tx_len = cpu_to_le32(sg_dma_len
2053             (bsg_job->request_payload.sg_list));
2054
2055         els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
2056             (bsg_job->reply_payload.sg_list)));
2057         els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
2058             (bsg_job->reply_payload.sg_list)));
2059         els_iocb->rx_len = cpu_to_le32(sg_dma_len
2060             (bsg_job->reply_payload.sg_list));
2061 }
2062
2063 static void
2064 qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
2065 {
2066         uint16_t        avail_dsds;
2067         uint32_t        *cur_dsd;
2068         struct scatterlist *sg;
2069         int index;
2070         uint16_t tot_dsds;
2071         scsi_qla_host_t *vha = sp->fcport->vha;
2072         struct qla_hw_data *ha = vha->hw;
2073         struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2074         int loop_iterartion = 0;
2075         int cont_iocb_prsnt = 0;
2076         int entry_count = 1;
2077
2078         memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
2079         ct_iocb->entry_type = CT_IOCB_TYPE;
2080         ct_iocb->entry_status = 0;
2081         ct_iocb->handle1 = sp->handle;
2082         SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
2083         ct_iocb->status = __constant_cpu_to_le16(0);
2084         ct_iocb->control_flags = __constant_cpu_to_le16(0);
2085         ct_iocb->timeout = 0;
2086         ct_iocb->cmd_dsd_count =
2087             __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2088         ct_iocb->total_dsd_count =
2089             __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
2090         ct_iocb->req_bytecount =
2091             cpu_to_le32(bsg_job->request_payload.payload_len);
2092         ct_iocb->rsp_bytecount =
2093             cpu_to_le32(bsg_job->reply_payload.payload_len);
2094
2095         ct_iocb->dseg_req_address[0] = cpu_to_le32(LSD(sg_dma_address
2096             (bsg_job->request_payload.sg_list)));
2097         ct_iocb->dseg_req_address[1] = cpu_to_le32(MSD(sg_dma_address
2098             (bsg_job->request_payload.sg_list)));
2099         ct_iocb->dseg_req_length = ct_iocb->req_bytecount;
2100
2101         ct_iocb->dseg_rsp_address[0] = cpu_to_le32(LSD(sg_dma_address
2102             (bsg_job->reply_payload.sg_list)));
2103         ct_iocb->dseg_rsp_address[1] = cpu_to_le32(MSD(sg_dma_address
2104             (bsg_job->reply_payload.sg_list)));
2105         ct_iocb->dseg_rsp_length = ct_iocb->rsp_bytecount;
2106
2107         avail_dsds = 1;
2108         cur_dsd = (uint32_t *)ct_iocb->dseg_rsp_address;
2109         index = 0;
2110         tot_dsds = bsg_job->reply_payload.sg_cnt;
2111
2112         for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2113                 dma_addr_t       sle_dma;
2114                 cont_a64_entry_t *cont_pkt;
2115
2116                 /* Allocate additional continuation packets? */
2117                 if (avail_dsds == 0) {
2118                         /*
2119                         * Five DSDs are available in the Cont.
2120                         * Type 1 IOCB.
2121                                */
2122                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2123                             vha->hw->req_q_map[0]);
2124                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2125                         avail_dsds = 5;
2126                         cont_iocb_prsnt = 1;
2127                         entry_count++;
2128                 }
2129
2130                 sle_dma = sg_dma_address(sg);
2131                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
2132                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
2133                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
2134                 loop_iterartion++;
2135                 avail_dsds--;
2136         }
2137         ct_iocb->entry_count = entry_count;
2138 }
2139
2140 static void
2141 qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
2142 {
2143         uint16_t        avail_dsds;
2144         uint32_t        *cur_dsd;
2145         struct scatterlist *sg;
2146         int index;
2147         uint16_t tot_dsds;
2148         scsi_qla_host_t *vha = sp->fcport->vha;
2149         struct qla_hw_data *ha = vha->hw;
2150         struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2151         int loop_iterartion = 0;
2152         int cont_iocb_prsnt = 0;
2153         int entry_count = 1;
2154
2155         ct_iocb->entry_type = CT_IOCB_TYPE;
2156         ct_iocb->entry_status = 0;
2157         ct_iocb->sys_define = 0;
2158         ct_iocb->handle = sp->handle;
2159
2160         ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2161         ct_iocb->vp_index = sp->fcport->vha->vp_idx;
2162         ct_iocb->comp_status = __constant_cpu_to_le16(0);
2163
2164         ct_iocb->cmd_dsd_count =
2165             __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2166         ct_iocb->timeout = 0;
2167         ct_iocb->rsp_dsd_count =
2168             __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2169         ct_iocb->rsp_byte_count =
2170             cpu_to_le32(bsg_job->reply_payload.payload_len);
2171         ct_iocb->cmd_byte_count =
2172             cpu_to_le32(bsg_job->request_payload.payload_len);
2173         ct_iocb->dseg_0_address[0] = cpu_to_le32(LSD(sg_dma_address
2174             (bsg_job->request_payload.sg_list)));
2175         ct_iocb->dseg_0_address[1] = cpu_to_le32(MSD(sg_dma_address
2176            (bsg_job->request_payload.sg_list)));
2177         ct_iocb->dseg_0_len = cpu_to_le32(sg_dma_len
2178             (bsg_job->request_payload.sg_list));
2179
2180         avail_dsds = 1;
2181         cur_dsd = (uint32_t *)ct_iocb->dseg_1_address;
2182         index = 0;
2183         tot_dsds = bsg_job->reply_payload.sg_cnt;
2184
2185         for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2186                 dma_addr_t       sle_dma;
2187                 cont_a64_entry_t *cont_pkt;
2188
2189                 /* Allocate additional continuation packets? */
2190                 if (avail_dsds == 0) {
2191                         /*
2192                         * Five DSDs are available in the Cont.
2193                         * Type 1 IOCB.
2194                                */
2195                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2196                             ha->req_q_map[0]);
2197                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2198                         avail_dsds = 5;
2199                         cont_iocb_prsnt = 1;
2200                         entry_count++;
2201                 }
2202
2203                 sle_dma = sg_dma_address(sg);
2204                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
2205                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
2206                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
2207                 loop_iterartion++;
2208                 avail_dsds--;
2209         }
2210         ct_iocb->entry_count = entry_count;
2211 }
2212
2213 /*
2214  * qla82xx_start_scsi() - Send a SCSI command to the ISP
2215  * @sp: command to send to the ISP
2216  *
2217  * Returns non-zero if a failure occurred, else zero.
2218  */
2219 int
2220 qla82xx_start_scsi(srb_t *sp)
2221 {
2222         int             ret, nseg;
2223         unsigned long   flags;
2224         struct scsi_cmnd *cmd;
2225         uint32_t        *clr_ptr;
2226         uint32_t        index;
2227         uint32_t        handle;
2228         uint16_t        cnt;
2229         uint16_t        req_cnt;
2230         uint16_t        tot_dsds;
2231         struct device_reg_82xx __iomem *reg;
2232         uint32_t dbval;
2233         uint32_t *fcp_dl;
2234         uint8_t additional_cdb_len;
2235         struct ct6_dsd *ctx;
2236         struct scsi_qla_host *vha = sp->fcport->vha;
2237         struct qla_hw_data *ha = vha->hw;
2238         struct req_que *req = NULL;
2239         struct rsp_que *rsp = NULL;
2240         char tag[2];
2241
2242         /* Setup device pointers. */
2243         ret = 0;
2244         reg = &ha->iobase->isp82;
2245         cmd = GET_CMD_SP(sp);
2246         req = vha->req;
2247         rsp = ha->rsp_q_map[0];
2248
2249         /* So we know we haven't pci_map'ed anything yet */
2250         tot_dsds = 0;
2251
2252         dbval = 0x04 | (ha->portnum << 5);
2253
2254         /* Send marker if required */
2255         if (vha->marker_needed != 0) {
2256                 if (qla2x00_marker(vha, req,
2257                         rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
2258                         ql_log(ql_log_warn, vha, 0x300c,
2259                             "qla2x00_marker failed for cmd=%p.\n", cmd);
2260                         return QLA_FUNCTION_FAILED;
2261                 }
2262                 vha->marker_needed = 0;
2263         }
2264
2265         /* Acquire ring specific lock */
2266         spin_lock_irqsave(&ha->hardware_lock, flags);
2267
2268         /* Check for room in outstanding command list. */
2269         handle = req->current_outstanding_cmd;
2270         for (index = 1; index < req->num_outstanding_cmds; index++) {
2271                 handle++;
2272                 if (handle == req->num_outstanding_cmds)
2273                         handle = 1;
2274                 if (!req->outstanding_cmds[handle])
2275                         break;
2276         }
2277         if (index == req->num_outstanding_cmds)
2278                 goto queuing_error;
2279
2280         /* Map the sg table so we have an accurate count of sg entries needed */
2281         if (scsi_sg_count(cmd)) {
2282                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2283                     scsi_sg_count(cmd), cmd->sc_data_direction);
2284                 if (unlikely(!nseg))
2285                         goto queuing_error;
2286         } else
2287                 nseg = 0;
2288
2289         tot_dsds = nseg;
2290
2291         if (tot_dsds > ql2xshiftctondsd) {
2292                 struct cmd_type_6 *cmd_pkt;
2293                 uint16_t more_dsd_lists = 0;
2294                 struct dsd_dma *dsd_ptr;
2295                 uint16_t i;
2296
2297                 more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
2298                 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
2299                         ql_dbg(ql_dbg_io, vha, 0x300d,
2300                             "Num of DSD list %d is than %d for cmd=%p.\n",
2301                             more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
2302                             cmd);
2303                         goto queuing_error;
2304                 }
2305
2306                 if (more_dsd_lists <= ha->gbl_dsd_avail)
2307                         goto sufficient_dsds;
2308                 else
2309                         more_dsd_lists -= ha->gbl_dsd_avail;
2310
2311                 for (i = 0; i < more_dsd_lists; i++) {
2312                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
2313                         if (!dsd_ptr) {
2314                                 ql_log(ql_log_fatal, vha, 0x300e,
2315                                     "Failed to allocate memory for dsd_dma "
2316                                     "for cmd=%p.\n", cmd);
2317                                 goto queuing_error;
2318                         }
2319
2320                         dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
2321                                 GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
2322                         if (!dsd_ptr->dsd_addr) {
2323                                 kfree(dsd_ptr);
2324                                 ql_log(ql_log_fatal, vha, 0x300f,
2325                                     "Failed to allocate memory for dsd_addr "
2326                                     "for cmd=%p.\n", cmd);
2327                                 goto queuing_error;
2328                         }
2329                         list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
2330                         ha->gbl_dsd_avail++;
2331                 }
2332
2333 sufficient_dsds:
2334                 req_cnt = 1;
2335
2336                 if (req->cnt < (req_cnt + 2)) {
2337                         cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2338                                 &reg->req_q_out[0]);
2339                         if (req->ring_index < cnt)
2340                                 req->cnt = cnt - req->ring_index;
2341                         else
2342                                 req->cnt = req->length -
2343                                         (req->ring_index - cnt);
2344                         if (req->cnt < (req_cnt + 2))
2345                                 goto queuing_error;
2346                 }
2347
2348                 ctx = sp->u.scmd.ctx =
2349                     mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
2350                 if (!ctx) {
2351                         ql_log(ql_log_fatal, vha, 0x3010,
2352                             "Failed to allocate ctx for cmd=%p.\n", cmd);
2353                         goto queuing_error;
2354                 }
2355
2356                 memset(ctx, 0, sizeof(struct ct6_dsd));
2357                 ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
2358                         GFP_ATOMIC, &ctx->fcp_cmnd_dma);
2359                 if (!ctx->fcp_cmnd) {
2360                         ql_log(ql_log_fatal, vha, 0x3011,
2361                             "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
2362                         goto queuing_error;
2363                 }
2364
2365                 /* Initialize the DSD list and dma handle */
2366                 INIT_LIST_HEAD(&ctx->dsd_list);
2367                 ctx->dsd_use_cnt = 0;
2368
2369                 if (cmd->cmd_len > 16) {
2370                         additional_cdb_len = cmd->cmd_len - 16;
2371                         if ((cmd->cmd_len % 4) != 0) {
2372                                 /* SCSI command bigger than 16 bytes must be
2373                                  * multiple of 4
2374                                  */
2375                                 ql_log(ql_log_warn, vha, 0x3012,
2376                                     "scsi cmd len %d not multiple of 4 "
2377                                     "for cmd=%p.\n", cmd->cmd_len, cmd);
2378                                 goto queuing_error_fcp_cmnd;
2379                         }
2380                         ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
2381                 } else {
2382                         additional_cdb_len = 0;
2383                         ctx->fcp_cmnd_len = 12 + 16 + 4;
2384                 }
2385
2386                 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
2387                 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2388
2389                 /* Zero out remaining portion of packet. */
2390                 /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
2391                 clr_ptr = (uint32_t *)cmd_pkt + 2;
2392                 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2393                 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2394
2395                 /* Set NPORT-ID and LUN number*/
2396                 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2397                 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2398                 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2399                 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2400                 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
2401
2402                 /* Build IOCB segments */
2403                 if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
2404                         goto queuing_error_fcp_cmnd;
2405
2406                 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2407                 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2408
2409                 /* build FCP_CMND IU */
2410                 memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
2411                 int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
2412                 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
2413
2414                 if (cmd->sc_data_direction == DMA_TO_DEVICE)
2415                         ctx->fcp_cmnd->additional_cdb_len |= 1;
2416                 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
2417                         ctx->fcp_cmnd->additional_cdb_len |= 2;
2418
2419                 /*
2420                  * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2421                  */
2422                 if (scsi_populate_tag_msg(cmd, tag)) {
2423                         switch (tag[0]) {
2424                         case HEAD_OF_QUEUE_TAG:
2425                                 ctx->fcp_cmnd->task_attribute =
2426                                     TSK_HEAD_OF_QUEUE;
2427                                 break;
2428                         case ORDERED_QUEUE_TAG:
2429                                 ctx->fcp_cmnd->task_attribute =
2430                                     TSK_ORDERED;
2431                                 break;
2432                         }
2433                 }
2434
2435                 /* Populate the FCP_PRIO. */
2436                 if (ha->flags.fcp_prio_enabled)
2437                         ctx->fcp_cmnd->task_attribute |=
2438                             sp->fcport->fcp_prio << 3;
2439
2440                 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
2441
2442                 fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
2443                     additional_cdb_len);
2444                 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
2445
2446                 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
2447                 cmd_pkt->fcp_cmnd_dseg_address[0] =
2448                     cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
2449                 cmd_pkt->fcp_cmnd_dseg_address[1] =
2450                     cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
2451
2452                 sp->flags |= SRB_FCP_CMND_DMA_VALID;
2453                 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2454                 /* Set total data segment count. */
2455                 cmd_pkt->entry_count = (uint8_t)req_cnt;
2456                 /* Specify response queue number where
2457                  * completion should happen
2458                  */
2459                 cmd_pkt->entry_status = (uint8_t) rsp->id;
2460         } else {
2461                 struct cmd_type_7 *cmd_pkt;
2462                 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2463                 if (req->cnt < (req_cnt + 2)) {
2464                         cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2465                             &reg->req_q_out[0]);
2466                         if (req->ring_index < cnt)
2467                                 req->cnt = cnt - req->ring_index;
2468                         else
2469                                 req->cnt = req->length -
2470                                         (req->ring_index - cnt);
2471                 }
2472                 if (req->cnt < (req_cnt + 2))
2473                         goto queuing_error;
2474
2475                 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
2476                 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2477
2478                 /* Zero out remaining portion of packet. */
2479                 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2480                 clr_ptr = (uint32_t *)cmd_pkt + 2;
2481                 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2482                 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2483
2484                 /* Set NPORT-ID and LUN number*/
2485                 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2486                 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2487                 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2488                 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2489                 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
2490
2491                 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2492                 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
2493                     sizeof(cmd_pkt->lun));
2494
2495                 /*
2496                  * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2497                  */
2498                 if (scsi_populate_tag_msg(cmd, tag)) {
2499                         switch (tag[0]) {
2500                         case HEAD_OF_QUEUE_TAG:
2501                                 cmd_pkt->task = TSK_HEAD_OF_QUEUE;
2502                                 break;
2503                         case ORDERED_QUEUE_TAG:
2504                                 cmd_pkt->task = TSK_ORDERED;
2505                                 break;
2506                         }
2507                 }
2508
2509                 /* Populate the FCP_PRIO. */
2510                 if (ha->flags.fcp_prio_enabled)
2511                         cmd_pkt->task |= sp->fcport->fcp_prio << 3;
2512
2513                 /* Load SCSI command packet. */
2514                 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
2515                 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
2516
2517                 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2518
2519                 /* Build IOCB segments */
2520                 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
2521
2522                 /* Set total data segment count. */
2523                 cmd_pkt->entry_count = (uint8_t)req_cnt;
2524                 /* Specify response queue number where
2525                  * completion should happen.
2526                  */
2527                 cmd_pkt->entry_status = (uint8_t) rsp->id;
2528
2529         }
2530         /* Build command packet. */
2531         req->current_outstanding_cmd = handle;
2532         req->outstanding_cmds[handle] = sp;
2533         sp->handle = handle;
2534         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2535         req->cnt -= req_cnt;
2536         wmb();
2537
2538         /* Adjust ring index. */
2539         req->ring_index++;
2540         if (req->ring_index == req->length) {
2541                 req->ring_index = 0;
2542                 req->ring_ptr = req->ring;
2543         } else
2544                 req->ring_ptr++;
2545
2546         sp->flags |= SRB_DMA_VALID;
2547
2548         /* Set chip new ring index. */
2549         /* write, read and verify logic */
2550         dbval = dbval | (req->id << 8) | (req->ring_index << 16);
2551         if (ql2xdbwr)
2552                 qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
2553         else {
2554                 WRT_REG_DWORD(
2555                         (unsigned long __iomem *)ha->nxdb_wr_ptr,
2556                         dbval);
2557                 wmb();
2558                 while (RD_REG_DWORD((void __iomem *)ha->nxdb_rd_ptr) != dbval) {
2559                         WRT_REG_DWORD(
2560                                 (unsigned long __iomem *)ha->nxdb_wr_ptr,
2561                                 dbval);
2562                         wmb();
2563                 }
2564         }
2565
2566         /* Manage unprocessed RIO/ZIO commands in response queue. */
2567         if (vha->flags.process_response_queue &&
2568             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2569                 qla24xx_process_response_queue(vha, rsp);
2570
2571         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2572         return QLA_SUCCESS;
2573
2574 queuing_error_fcp_cmnd:
2575         dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
2576 queuing_error:
2577         if (tot_dsds)
2578                 scsi_dma_unmap(cmd);
2579
2580         if (sp->u.scmd.ctx) {
2581                 mempool_free(sp->u.scmd.ctx, ha->ctx_mempool);
2582                 sp->u.scmd.ctx = NULL;
2583         }
2584         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2585
2586         return QLA_FUNCTION_FAILED;
2587 }
2588
2589 int
2590 qla2x00_start_sp(srb_t *sp)
2591 {
2592         int rval;
2593         struct qla_hw_data *ha = sp->fcport->vha->hw;
2594         void *pkt;
2595         unsigned long flags;
2596
2597         rval = QLA_FUNCTION_FAILED;
2598         spin_lock_irqsave(&ha->hardware_lock, flags);
2599         pkt = qla2x00_alloc_iocbs(sp->fcport->vha, sp);
2600         if (!pkt) {
2601                 ql_log(ql_log_warn, sp->fcport->vha, 0x700c,
2602                     "qla2x00_alloc_iocbs failed.\n");
2603                 goto done;
2604         }
2605
2606         rval = QLA_SUCCESS;
2607         switch (sp->type) {
2608         case SRB_LOGIN_CMD:
2609                 IS_FWI2_CAPABLE(ha) ?
2610                     qla24xx_login_iocb(sp, pkt) :
2611                     qla2x00_login_iocb(sp, pkt);
2612                 break;
2613         case SRB_LOGOUT_CMD:
2614                 IS_FWI2_CAPABLE(ha) ?
2615                     qla24xx_logout_iocb(sp, pkt) :
2616                     qla2x00_logout_iocb(sp, pkt);
2617                 break;
2618         case SRB_ELS_CMD_RPT:
2619         case SRB_ELS_CMD_HST:
2620                 qla24xx_els_iocb(sp, pkt);
2621                 break;
2622         case SRB_CT_CMD:
2623                 IS_FWI2_CAPABLE(ha) ?
2624                     qla24xx_ct_iocb(sp, pkt) :
2625                     qla2x00_ct_iocb(sp, pkt);
2626                 break;
2627         case SRB_ADISC_CMD:
2628                 IS_FWI2_CAPABLE(ha) ?
2629                     qla24xx_adisc_iocb(sp, pkt) :
2630                     qla2x00_adisc_iocb(sp, pkt);
2631                 break;
2632         case SRB_TM_CMD:
2633                 IS_QLAFX00(ha) ?
2634                     qlafx00_tm_iocb(sp, pkt) :
2635                     qla24xx_tm_iocb(sp, pkt);
2636                 break;
2637         case SRB_FXIOCB_DCMD:
2638         case SRB_FXIOCB_BCMD:
2639                 qlafx00_fxdisc_iocb(sp, pkt);
2640                 break;
2641         case SRB_ABT_CMD:
2642                 qlafx00_abort_iocb(sp, pkt);
2643                 break;
2644         default:
2645                 break;
2646         }
2647
2648         wmb();
2649         qla2x00_start_iocbs(sp->fcport->vha, ha->req_q_map[0]);
2650 done:
2651         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2652         return rval;
2653 }
2654
2655 static void
2656 qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
2657                                 struct cmd_bidir *cmd_pkt, uint32_t tot_dsds)
2658 {
2659         uint16_t avail_dsds;
2660         uint32_t *cur_dsd;
2661         uint32_t req_data_len = 0;
2662         uint32_t rsp_data_len = 0;
2663         struct scatterlist *sg;
2664         int index;
2665         int entry_count = 1;
2666         struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2667
2668         /*Update entry type to indicate bidir command */
2669         *((uint32_t *)(&cmd_pkt->entry_type)) =
2670                 __constant_cpu_to_le32(COMMAND_BIDIRECTIONAL);
2671
2672         /* Set the transfer direction, in this set both flags
2673          * Also set the BD_WRAP_BACK flag, firmware will take care
2674          * assigning DID=SID for outgoing pkts.
2675          */
2676         cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
2677         cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2678         cmd_pkt->control_flags =
2679                         __constant_cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA |
2680                                                         BD_WRAP_BACK);
2681
2682         req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
2683         cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len);
2684         cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len);
2685         cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
2686
2687         vha->bidi_stats.transfer_bytes += req_data_len;
2688         vha->bidi_stats.io_count++;
2689
2690         /* Only one dsd is available for bidirectional IOCB, remaining dsds
2691          * are bundled in continuation iocb
2692          */
2693         avail_dsds = 1;
2694         cur_dsd = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
2695
2696         index = 0;
2697
2698         for_each_sg(bsg_job->request_payload.sg_list, sg,
2699                                 bsg_job->request_payload.sg_cnt, index) {
2700                 dma_addr_t sle_dma;
2701                 cont_a64_entry_t *cont_pkt;
2702
2703                 /* Allocate additional continuation packets */
2704                 if (avail_dsds == 0) {
2705                         /* Continuation type 1 IOCB can accomodate
2706                          * 5 DSDS
2707                          */
2708                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
2709                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2710                         avail_dsds = 5;
2711                         entry_count++;
2712                 }
2713                 sle_dma = sg_dma_address(sg);
2714                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
2715                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
2716                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
2717                 avail_dsds--;
2718         }
2719         /* For read request DSD will always goes to continuation IOCB
2720          * and follow the write DSD. If there is room on the current IOCB
2721          * then it is added to that IOCB else new continuation IOCB is
2722          * allocated.
2723          */
2724         for_each_sg(bsg_job->reply_payload.sg_list, sg,
2725                                 bsg_job->reply_payload.sg_cnt, index) {
2726                 dma_addr_t sle_dma;
2727                 cont_a64_entry_t *cont_pkt;
2728
2729                 /* Allocate additional continuation packets */
2730                 if (avail_dsds == 0) {
2731                         /* Continuation type 1 IOCB can accomodate
2732                          * 5 DSDS
2733                          */
2734                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
2735                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2736                         avail_dsds = 5;
2737                         entry_count++;
2738                 }
2739                 sle_dma = sg_dma_address(sg);
2740                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
2741                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
2742                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
2743                 avail_dsds--;
2744         }
2745         /* This value should be same as number of IOCB required for this cmd */
2746         cmd_pkt->entry_count = entry_count;
2747 }
2748
2749 int
2750 qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
2751 {
2752
2753         struct qla_hw_data *ha = vha->hw;
2754         unsigned long flags;
2755         uint32_t handle;
2756         uint32_t index;
2757         uint16_t req_cnt;
2758         uint16_t cnt;
2759         uint32_t *clr_ptr;
2760         struct cmd_bidir *cmd_pkt = NULL;
2761         struct rsp_que *rsp;
2762         struct req_que *req;
2763         int rval = EXT_STATUS_OK;
2764
2765         rval = QLA_SUCCESS;
2766
2767         rsp = ha->rsp_q_map[0];
2768         req = vha->req;
2769
2770         /* Send marker if required */
2771         if (vha->marker_needed != 0) {
2772                 if (qla2x00_marker(vha, req,
2773                         rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
2774                         return EXT_STATUS_MAILBOX;
2775                 vha->marker_needed = 0;
2776         }
2777
2778         /* Acquire ring specific lock */
2779         spin_lock_irqsave(&ha->hardware_lock, flags);
2780
2781         /* Check for room in outstanding command list. */
2782         handle = req->current_outstanding_cmd;
2783         for (index = 1; index < req->num_outstanding_cmds; index++) {
2784                 handle++;
2785         if (handle == req->num_outstanding_cmds)
2786                 handle = 1;
2787         if (!req->outstanding_cmds[handle])
2788                 break;
2789         }
2790
2791         if (index == req->num_outstanding_cmds) {
2792                 rval = EXT_STATUS_BUSY;
2793                 goto queuing_error;
2794         }
2795
2796         /* Calculate number of IOCB required */
2797         req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2798
2799         /* Check for room on request queue. */
2800         if (req->cnt < req_cnt + 2) {
2801                 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
2802
2803                 if  (req->ring_index < cnt)
2804                         req->cnt = cnt - req->ring_index;
2805                 else
2806                         req->cnt = req->length -
2807                                 (req->ring_index - cnt);
2808         }
2809         if (req->cnt < req_cnt + 2) {
2810                 rval = EXT_STATUS_BUSY;
2811                 goto queuing_error;
2812         }
2813
2814         cmd_pkt = (struct cmd_bidir *)req->ring_ptr;
2815         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2816
2817         /* Zero out remaining portion of packet. */
2818         /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2819         clr_ptr = (uint32_t *)cmd_pkt + 2;
2820         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2821
2822         /* Set NPORT-ID  (of vha)*/
2823         cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id);
2824         cmd_pkt->port_id[0] = vha->d_id.b.al_pa;
2825         cmd_pkt->port_id[1] = vha->d_id.b.area;
2826         cmd_pkt->port_id[2] = vha->d_id.b.domain;
2827
2828         qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds);
2829         cmd_pkt->entry_status = (uint8_t) rsp->id;
2830         /* Build command packet. */
2831         req->current_outstanding_cmd = handle;
2832         req->outstanding_cmds[handle] = sp;
2833         sp->handle = handle;
2834         req->cnt -= req_cnt;
2835
2836         /* Send the command to the firmware */
2837         wmb();
2838         qla2x00_start_iocbs(vha, req);
2839 queuing_error:
2840         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2841         return rval;
2842 }