c9c709e0f1a23d159acd00a716b05e11f7524567
[firefly-linux-kernel-4.4.55.git] / drivers / scsi / qla2xxx / qla_iocb.c
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2010 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8
9 #include <linux/blkdev.h>
10 #include <linux/delay.h>
11
12 #include <scsi/scsi_tcq.h>
13
14 static void qla2x00_isp_cmd(struct scsi_qla_host *, struct req_que *);
15
16 static void qla25xx_set_que(srb_t *, struct rsp_que **);
17 /**
18  * qla2x00_get_cmd_direction() - Determine control_flag data direction.
19  * @cmd: SCSI command
20  *
21  * Returns the proper CF_* direction based on CDB.
22  */
23 static inline uint16_t
24 qla2x00_get_cmd_direction(srb_t *sp)
25 {
26         uint16_t cflags;
27
28         cflags = 0;
29
30         /* Set transfer direction */
31         if (sp->cmd->sc_data_direction == DMA_TO_DEVICE) {
32                 cflags = CF_WRITE;
33                 sp->fcport->vha->hw->qla_stats.output_bytes +=
34                     scsi_bufflen(sp->cmd);
35         } else if (sp->cmd->sc_data_direction == DMA_FROM_DEVICE) {
36                 cflags = CF_READ;
37                 sp->fcport->vha->hw->qla_stats.input_bytes +=
38                     scsi_bufflen(sp->cmd);
39         }
40         return (cflags);
41 }
42
43 /**
44  * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
45  * Continuation Type 0 IOCBs to allocate.
46  *
47  * @dsds: number of data segment decriptors needed
48  *
49  * Returns the number of IOCB entries needed to store @dsds.
50  */
51 uint16_t
52 qla2x00_calc_iocbs_32(uint16_t dsds)
53 {
54         uint16_t iocbs;
55
56         iocbs = 1;
57         if (dsds > 3) {
58                 iocbs += (dsds - 3) / 7;
59                 if ((dsds - 3) % 7)
60                         iocbs++;
61         }
62         return (iocbs);
63 }
64
65 /**
66  * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
67  * Continuation Type 1 IOCBs to allocate.
68  *
69  * @dsds: number of data segment decriptors needed
70  *
71  * Returns the number of IOCB entries needed to store @dsds.
72  */
73 uint16_t
74 qla2x00_calc_iocbs_64(uint16_t dsds)
75 {
76         uint16_t iocbs;
77
78         iocbs = 1;
79         if (dsds > 2) {
80                 iocbs += (dsds - 2) / 5;
81                 if ((dsds - 2) % 5)
82                         iocbs++;
83         }
84         return (iocbs);
85 }
86
87 /**
88  * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
89  * @ha: HA context
90  *
91  * Returns a pointer to the Continuation Type 0 IOCB packet.
92  */
93 static inline cont_entry_t *
94 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
95 {
96         cont_entry_t *cont_pkt;
97         struct req_que *req = vha->req;
98         /* Adjust ring index. */
99         req->ring_index++;
100         if (req->ring_index == req->length) {
101                 req->ring_index = 0;
102                 req->ring_ptr = req->ring;
103         } else {
104                 req->ring_ptr++;
105         }
106
107         cont_pkt = (cont_entry_t *)req->ring_ptr;
108
109         /* Load packet defaults. */
110         *((uint32_t *)(&cont_pkt->entry_type)) =
111             __constant_cpu_to_le32(CONTINUE_TYPE);
112
113         return (cont_pkt);
114 }
115
116 /**
117  * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
118  * @ha: HA context
119  *
120  * Returns a pointer to the continuation type 1 IOCB packet.
121  */
122 static inline cont_a64_entry_t *
123 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha)
124 {
125         cont_a64_entry_t *cont_pkt;
126
127         struct req_que *req = vha->req;
128         /* Adjust ring index. */
129         req->ring_index++;
130         if (req->ring_index == req->length) {
131                 req->ring_index = 0;
132                 req->ring_ptr = req->ring;
133         } else {
134                 req->ring_ptr++;
135         }
136
137         cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
138
139         /* Load packet defaults. */
140         *((uint32_t *)(&cont_pkt->entry_type)) =
141             __constant_cpu_to_le32(CONTINUE_A64_TYPE);
142
143         return (cont_pkt);
144 }
145
146 static inline int
147 qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
148 {
149         uint8_t guard = scsi_host_get_guard(sp->cmd->device->host);
150
151         /* We only support T10 DIF right now */
152         if (guard != SHOST_DIX_GUARD_CRC) {
153                 DEBUG2(printk(KERN_ERR "Unsupported guard: %d\n", guard));
154                 return 0;
155         }
156
157         /* We always use DIFF Bundling for best performance */
158         *fw_prot_opts = 0;
159
160         /* Translate SCSI opcode to a protection opcode */
161         switch (scsi_get_prot_op(sp->cmd)) {
162         case SCSI_PROT_READ_STRIP:
163                 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
164                 break;
165         case SCSI_PROT_WRITE_INSERT:
166                 *fw_prot_opts |= PO_MODE_DIF_INSERT;
167                 break;
168         case SCSI_PROT_READ_INSERT:
169                 *fw_prot_opts |= PO_MODE_DIF_INSERT;
170                 break;
171         case SCSI_PROT_WRITE_STRIP:
172                 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
173                 break;
174         case SCSI_PROT_READ_PASS:
175                 *fw_prot_opts |= PO_MODE_DIF_PASS;
176                 break;
177         case SCSI_PROT_WRITE_PASS:
178                 *fw_prot_opts |= PO_MODE_DIF_PASS;
179                 break;
180         default:        /* Normal Request */
181                 *fw_prot_opts |= PO_MODE_DIF_PASS;
182                 break;
183         }
184
185         return scsi_prot_sg_count(sp->cmd);
186 }
187
188 /*
189  * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
190  * capable IOCB types.
191  *
192  * @sp: SRB command to process
193  * @cmd_pkt: Command type 2 IOCB
194  * @tot_dsds: Total number of segments to transfer
195  */
196 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
197     uint16_t tot_dsds)
198 {
199         uint16_t        avail_dsds;
200         uint32_t        *cur_dsd;
201         scsi_qla_host_t *vha;
202         struct scsi_cmnd *cmd;
203         struct scatterlist *sg;
204         int i;
205
206         cmd = sp->cmd;
207
208         /* Update entry type to indicate Command Type 2 IOCB */
209         *((uint32_t *)(&cmd_pkt->entry_type)) =
210             __constant_cpu_to_le32(COMMAND_TYPE);
211
212         /* No data transfer */
213         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
214                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
215                 return;
216         }
217
218         vha = sp->fcport->vha;
219         cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
220
221         /* Three DSDs are available in the Command Type 2 IOCB */
222         avail_dsds = 3;
223         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
224
225         /* Load data segments */
226         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
227                 cont_entry_t *cont_pkt;
228
229                 /* Allocate additional continuation packets? */
230                 if (avail_dsds == 0) {
231                         /*
232                          * Seven DSDs are available in the Continuation
233                          * Type 0 IOCB.
234                          */
235                         cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
236                         cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
237                         avail_dsds = 7;
238                 }
239
240                 *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
241                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
242                 avail_dsds--;
243         }
244 }
245
246 /**
247  * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
248  * capable IOCB types.
249  *
250  * @sp: SRB command to process
251  * @cmd_pkt: Command type 3 IOCB
252  * @tot_dsds: Total number of segments to transfer
253  */
254 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
255     uint16_t tot_dsds)
256 {
257         uint16_t        avail_dsds;
258         uint32_t        *cur_dsd;
259         scsi_qla_host_t *vha;
260         struct scsi_cmnd *cmd;
261         struct scatterlist *sg;
262         int i;
263
264         cmd = sp->cmd;
265
266         /* Update entry type to indicate Command Type 3 IOCB */
267         *((uint32_t *)(&cmd_pkt->entry_type)) =
268             __constant_cpu_to_le32(COMMAND_A64_TYPE);
269
270         /* No data transfer */
271         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
272                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
273                 return;
274         }
275
276         vha = sp->fcport->vha;
277         cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
278
279         /* Two DSDs are available in the Command Type 3 IOCB */
280         avail_dsds = 2;
281         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
282
283         /* Load data segments */
284         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
285                 dma_addr_t      sle_dma;
286                 cont_a64_entry_t *cont_pkt;
287
288                 /* Allocate additional continuation packets? */
289                 if (avail_dsds == 0) {
290                         /*
291                          * Five DSDs are available in the Continuation
292                          * Type 1 IOCB.
293                          */
294                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
295                         cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
296                         avail_dsds = 5;
297                 }
298
299                 sle_dma = sg_dma_address(sg);
300                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
301                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
302                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
303                 avail_dsds--;
304         }
305 }
306
307 /**
308  * qla2x00_start_scsi() - Send a SCSI command to the ISP
309  * @sp: command to send to the ISP
310  *
311  * Returns non-zero if a failure occurred, else zero.
312  */
313 int
314 qla2x00_start_scsi(srb_t *sp)
315 {
316         int             ret, nseg;
317         unsigned long   flags;
318         scsi_qla_host_t *vha;
319         struct scsi_cmnd *cmd;
320         uint32_t        *clr_ptr;
321         uint32_t        index;
322         uint32_t        handle;
323         cmd_entry_t     *cmd_pkt;
324         uint16_t        cnt;
325         uint16_t        req_cnt;
326         uint16_t        tot_dsds;
327         struct device_reg_2xxx __iomem *reg;
328         struct qla_hw_data *ha;
329         struct req_que *req;
330         struct rsp_que *rsp;
331
332         /* Setup device pointers. */
333         ret = 0;
334         vha = sp->fcport->vha;
335         ha = vha->hw;
336         reg = &ha->iobase->isp;
337         cmd = sp->cmd;
338         req = ha->req_q_map[0];
339         rsp = ha->rsp_q_map[0];
340         /* So we know we haven't pci_map'ed anything yet */
341         tot_dsds = 0;
342
343         /* Send marker if required */
344         if (vha->marker_needed != 0) {
345                 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL)
346                                                         != QLA_SUCCESS)
347                         return (QLA_FUNCTION_FAILED);
348                 vha->marker_needed = 0;
349         }
350
351         /* Acquire ring specific lock */
352         spin_lock_irqsave(&ha->hardware_lock, flags);
353
354         /* Check for room in outstanding command list. */
355         handle = req->current_outstanding_cmd;
356         for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
357                 handle++;
358                 if (handle == MAX_OUTSTANDING_COMMANDS)
359                         handle = 1;
360                 if (!req->outstanding_cmds[handle])
361                         break;
362         }
363         if (index == MAX_OUTSTANDING_COMMANDS)
364                 goto queuing_error;
365
366         /* Map the sg table so we have an accurate count of sg entries needed */
367         if (scsi_sg_count(cmd)) {
368                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
369                     scsi_sg_count(cmd), cmd->sc_data_direction);
370                 if (unlikely(!nseg))
371                         goto queuing_error;
372         } else
373                 nseg = 0;
374
375         tot_dsds = nseg;
376
377         /* Calculate the number of request entries needed. */
378         req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
379         if (req->cnt < (req_cnt + 2)) {
380                 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
381                 if (req->ring_index < cnt)
382                         req->cnt = cnt - req->ring_index;
383                 else
384                         req->cnt = req->length -
385                             (req->ring_index - cnt);
386         }
387         if (req->cnt < (req_cnt + 2))
388                 goto queuing_error;
389
390         /* Build command packet */
391         req->current_outstanding_cmd = handle;
392         req->outstanding_cmds[handle] = sp;
393         sp->handle = handle;
394         sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
395         req->cnt -= req_cnt;
396
397         cmd_pkt = (cmd_entry_t *)req->ring_ptr;
398         cmd_pkt->handle = handle;
399         /* Zero out remaining portion of packet. */
400         clr_ptr = (uint32_t *)cmd_pkt + 2;
401         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
402         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
403
404         /* Set target ID and LUN number*/
405         SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
406         cmd_pkt->lun = cpu_to_le16(sp->cmd->device->lun);
407
408         /* Update tagged queuing modifier */
409         cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG);
410
411         /* Load SCSI command packet. */
412         memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
413         cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
414
415         /* Build IOCB segments */
416         ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
417
418         /* Set total data segment count. */
419         cmd_pkt->entry_count = (uint8_t)req_cnt;
420         wmb();
421
422         /* Adjust ring index. */
423         req->ring_index++;
424         if (req->ring_index == req->length) {
425                 req->ring_index = 0;
426                 req->ring_ptr = req->ring;
427         } else
428                 req->ring_ptr++;
429
430         sp->flags |= SRB_DMA_VALID;
431
432         /* Set chip new ring index. */
433         WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
434         RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg));     /* PCI Posting. */
435
436         /* Manage unprocessed RIO/ZIO commands in response queue. */
437         if (vha->flags.process_response_queue &&
438             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
439                 qla2x00_process_response_queue(rsp);
440
441         spin_unlock_irqrestore(&ha->hardware_lock, flags);
442         return (QLA_SUCCESS);
443
444 queuing_error:
445         if (tot_dsds)
446                 scsi_dma_unmap(cmd);
447
448         spin_unlock_irqrestore(&ha->hardware_lock, flags);
449
450         return (QLA_FUNCTION_FAILED);
451 }
452
453 /**
454  * qla2x00_marker() - Send a marker IOCB to the firmware.
455  * @ha: HA context
456  * @loop_id: loop ID
457  * @lun: LUN
458  * @type: marker modifier
459  *
460  * Can be called from both normal and interrupt context.
461  *
462  * Returns non-zero if a failure occurred, else zero.
463  */
464 int
465 __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
466                         struct rsp_que *rsp, uint16_t loop_id,
467                         uint16_t lun, uint8_t type)
468 {
469         mrk_entry_t *mrk;
470         struct mrk_entry_24xx *mrk24;
471         struct qla_hw_data *ha = vha->hw;
472         scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
473
474         mrk24 = NULL;
475         mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, 0);
476         if (mrk == NULL) {
477                 DEBUG2_3(printk("%s(%ld): failed to allocate Marker IOCB.\n",
478                     __func__, base_vha->host_no));
479
480                 return (QLA_FUNCTION_FAILED);
481         }
482
483         mrk->entry_type = MARKER_TYPE;
484         mrk->modifier = type;
485         if (type != MK_SYNC_ALL) {
486                 if (IS_FWI2_CAPABLE(ha)) {
487                         mrk24 = (struct mrk_entry_24xx *) mrk;
488                         mrk24->nport_handle = cpu_to_le16(loop_id);
489                         mrk24->lun[1] = LSB(lun);
490                         mrk24->lun[2] = MSB(lun);
491                         host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
492                         mrk24->vp_index = vha->vp_idx;
493                         mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
494                 } else {
495                         SET_TARGET_ID(ha, mrk->target, loop_id);
496                         mrk->lun = cpu_to_le16(lun);
497                 }
498         }
499         wmb();
500
501         qla2x00_isp_cmd(vha, req);
502
503         return (QLA_SUCCESS);
504 }
505
506 int
507 qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
508                 struct rsp_que *rsp, uint16_t loop_id, uint16_t lun,
509                 uint8_t type)
510 {
511         int ret;
512         unsigned long flags = 0;
513
514         spin_lock_irqsave(&vha->hw->hardware_lock, flags);
515         ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
516         spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
517
518         return (ret);
519 }
520
521 /**
522  * qla2x00_isp_cmd() - Modify the request ring pointer.
523  * @ha: HA context
524  *
525  * Note: The caller must hold the hardware lock before calling this routine.
526  */
527 static void
528 qla2x00_isp_cmd(struct scsi_qla_host *vha, struct req_que *req)
529 {
530         struct qla_hw_data *ha = vha->hw;
531         device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
532         struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
533
534         DEBUG5(printk("%s(): IOCB data:\n", __func__));
535         DEBUG5(qla2x00_dump_buffer(
536             (uint8_t *)req->ring_ptr, REQUEST_ENTRY_SIZE));
537
538         /* Adjust ring index. */
539         req->ring_index++;
540         if (req->ring_index == req->length) {
541                 req->ring_index = 0;
542                 req->ring_ptr = req->ring;
543         } else
544                 req->ring_ptr++;
545
546         /* Set chip new ring index. */
547         if (IS_QLA82XX(ha)) {
548                 uint32_t dbval = 0x04 | (ha->portnum << 5);
549
550                 /* write, read and verify logic */
551                 dbval = dbval | (req->id << 8) | (req->ring_index << 16);
552                 if (ql2xdbwr)
553                         qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
554                 else {
555                         WRT_REG_DWORD(
556                                 (unsigned long __iomem *)ha->nxdb_wr_ptr,
557                                 dbval);
558                         wmb();
559                         while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
560                                 WRT_REG_DWORD((unsigned long __iomem *)
561                                         ha->nxdb_wr_ptr, dbval);
562                                 wmb();
563                         }
564                 }
565         } else if (ha->mqenable) {
566                 /* Set chip new ring index. */
567                 WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
568                 RD_REG_DWORD(&ioreg->hccr);
569         } else {
570                 if (IS_FWI2_CAPABLE(ha)) {
571                         WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
572                         RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
573                 } else {
574                         WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
575                                 req->ring_index);
576                         RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
577                 }
578         }
579
580 }
581
582 /**
583  * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
584  * Continuation Type 1 IOCBs to allocate.
585  *
586  * @dsds: number of data segment decriptors needed
587  *
588  * Returns the number of IOCB entries needed to store @dsds.
589  */
590 inline uint16_t
591 qla24xx_calc_iocbs(uint16_t dsds)
592 {
593         uint16_t iocbs;
594
595         iocbs = 1;
596         if (dsds > 1) {
597                 iocbs += (dsds - 1) / 5;
598                 if ((dsds - 1) % 5)
599                         iocbs++;
600         }
601         DEBUG3(printk(KERN_DEBUG "%s(): Required PKT(s) = %d\n",
602             __func__, iocbs));
603         return iocbs;
604 }
605
606 /**
607  * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
608  * IOCB types.
609  *
610  * @sp: SRB command to process
611  * @cmd_pkt: Command type 3 IOCB
612  * @tot_dsds: Total number of segments to transfer
613  */
614 inline void
615 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
616     uint16_t tot_dsds)
617 {
618         uint16_t        avail_dsds;
619         uint32_t        *cur_dsd;
620         scsi_qla_host_t *vha;
621         struct scsi_cmnd *cmd;
622         struct scatterlist *sg;
623         int i;
624         struct req_que *req;
625
626         cmd = sp->cmd;
627
628         /* Update entry type to indicate Command Type 3 IOCB */
629         *((uint32_t *)(&cmd_pkt->entry_type)) =
630             __constant_cpu_to_le32(COMMAND_TYPE_7);
631
632         /* No data transfer */
633         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
634                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
635                 return;
636         }
637
638         vha = sp->fcport->vha;
639         req = vha->req;
640
641         /* Set transfer direction */
642         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
643                 cmd_pkt->task_mgmt_flags =
644                     __constant_cpu_to_le16(TMF_WRITE_DATA);
645                 sp->fcport->vha->hw->qla_stats.output_bytes +=
646                     scsi_bufflen(sp->cmd);
647         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
648                 cmd_pkt->task_mgmt_flags =
649                     __constant_cpu_to_le16(TMF_READ_DATA);
650                 sp->fcport->vha->hw->qla_stats.input_bytes +=
651                     scsi_bufflen(sp->cmd);
652         }
653
654         /* One DSD is available in the Command Type 3 IOCB */
655         avail_dsds = 1;
656         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
657
658         /* Load data segments */
659
660         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
661                 dma_addr_t      sle_dma;
662                 cont_a64_entry_t *cont_pkt;
663
664                 /* Allocate additional continuation packets? */
665                 if (avail_dsds == 0) {
666                         /*
667                          * Five DSDs are available in the Continuation
668                          * Type 1 IOCB.
669                          */
670                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
671                         cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
672                         avail_dsds = 5;
673                 }
674
675                 sle_dma = sg_dma_address(sg);
676                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
677                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
678                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
679                 avail_dsds--;
680         }
681 }
682
683 struct fw_dif_context {
684         uint32_t ref_tag;
685         uint16_t app_tag;
686         uint8_t ref_tag_mask[4];        /* Validation/Replacement Mask*/
687         uint8_t app_tag_mask[2];        /* Validation/Replacement Mask*/
688 };
689
690 /*
691  * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
692  *
693  */
694 static inline void
695 qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt,
696     unsigned int protcnt)
697 {
698         struct sd_dif_tuple *spt;
699         unsigned char op = scsi_get_prot_op(cmd);
700
701         switch (scsi_get_prot_type(cmd)) {
702         /* For TYPE 0 protection: no checking */
703         case SCSI_PROT_DIF_TYPE0:
704                 pkt->ref_tag_mask[0] = 0x00;
705                 pkt->ref_tag_mask[1] = 0x00;
706                 pkt->ref_tag_mask[2] = 0x00;
707                 pkt->ref_tag_mask[3] = 0x00;
708                 break;
709
710         /*
711          * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
712          * match LBA in CDB + N
713          */
714         case SCSI_PROT_DIF_TYPE2:
715                 break;
716
717         /* For Type 3 protection: 16 bit GUARD only */
718         case SCSI_PROT_DIF_TYPE3:
719                 pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
720                         pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
721                                                                 0x00;
722                 break;
723
724         /*
725          * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
726          * 16 bit app tag.
727          */
728         case SCSI_PROT_DIF_TYPE1:
729                 if (!ql2xenablehba_err_chk)
730                         break;
731
732                 if (protcnt && (op == SCSI_PROT_WRITE_STRIP ||
733                     op == SCSI_PROT_WRITE_PASS)) {
734                         spt = page_address(sg_page(scsi_prot_sglist(cmd))) +
735                             scsi_prot_sglist(cmd)[0].offset;
736                         DEBUG18(printk(KERN_DEBUG
737                             "%s(): LBA from user %p, lba = 0x%x\n",
738                             __func__, spt, (int)spt->ref_tag));
739                         pkt->ref_tag = swab32(spt->ref_tag);
740                         pkt->app_tag_mask[0] = 0x0;
741                         pkt->app_tag_mask[1] = 0x0;
742                 } else {
743                         pkt->ref_tag = cpu_to_le32((uint32_t)
744                             (0xffffffff & scsi_get_lba(cmd)));
745                         pkt->app_tag = __constant_cpu_to_le16(0);
746                         pkt->app_tag_mask[0] = 0x0;
747                         pkt->app_tag_mask[1] = 0x0;
748                 }
749                 /* enable ALL bytes of the ref tag */
750                 pkt->ref_tag_mask[0] = 0xff;
751                 pkt->ref_tag_mask[1] = 0xff;
752                 pkt->ref_tag_mask[2] = 0xff;
753                 pkt->ref_tag_mask[3] = 0xff;
754                 break;
755         }
756
757         DEBUG18(printk(KERN_DEBUG
758             "%s(): Setting protection Tags: (BIG) ref tag = 0x%x,"
759             " app tag = 0x%x, prot SG count %d , cmd lba 0x%x,"
760             " prot_type=%u\n", __func__, pkt->ref_tag, pkt->app_tag, protcnt,
761             (int)scsi_get_lba(cmd), scsi_get_prot_type(cmd)));
762 }
763
764
765 static int
766 qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
767         uint16_t tot_dsds)
768 {
769         void *next_dsd;
770         uint8_t avail_dsds = 0;
771         uint32_t dsd_list_len;
772         struct dsd_dma *dsd_ptr;
773         struct scatterlist *sg;
774         uint32_t *cur_dsd = dsd;
775         int     i;
776         uint16_t        used_dsds = tot_dsds;
777
778         uint8_t         *cp;
779
780         scsi_for_each_sg(sp->cmd, sg, tot_dsds, i) {
781                 dma_addr_t      sle_dma;
782
783                 /* Allocate additional continuation packets? */
784                 if (avail_dsds == 0) {
785                         avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
786                                         QLA_DSDS_PER_IOCB : used_dsds;
787                         dsd_list_len = (avail_dsds + 1) * 12;
788                         used_dsds -= avail_dsds;
789
790                         /* allocate tracking DS */
791                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
792                         if (!dsd_ptr)
793                                 return 1;
794
795                         /* allocate new list */
796                         dsd_ptr->dsd_addr = next_dsd =
797                             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
798                                 &dsd_ptr->dsd_list_dma);
799
800                         if (!next_dsd) {
801                                 /*
802                                  * Need to cleanup only this dsd_ptr, rest
803                                  * will be done by sp_free_dma()
804                                  */
805                                 kfree(dsd_ptr);
806                                 return 1;
807                         }
808
809                         list_add_tail(&dsd_ptr->list,
810                             &((struct crc_context *)sp->ctx)->dsd_list);
811
812                         sp->flags |= SRB_CRC_CTX_DSD_VALID;
813
814                         /* add new list to cmd iocb or last list */
815                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
816                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
817                         *cur_dsd++ = dsd_list_len;
818                         cur_dsd = (uint32_t *)next_dsd;
819                 }
820                 sle_dma = sg_dma_address(sg);
821                 DEBUG18(printk("%s(): %p, sg entry %d - addr =0x%x 0x%x,"
822                     " len =%d\n", __func__ , cur_dsd, i, LSD(sle_dma),
823                     MSD(sle_dma), sg_dma_len(sg)));
824                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
825                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
826                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
827                 avail_dsds--;
828
829                 if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
830                         cp = page_address(sg_page(sg)) + sg->offset;
831                         DEBUG18(printk("%s(): User Data buffer= %p:\n",
832                             __func__ , cp));
833                 }
834         }
835         /* Null termination */
836         *cur_dsd++ = 0;
837         *cur_dsd++ = 0;
838         *cur_dsd++ = 0;
839         return 0;
840 }
841
842 static int
843 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
844                                                         uint32_t *dsd,
845         uint16_t tot_dsds)
846 {
847         void *next_dsd;
848         uint8_t avail_dsds = 0;
849         uint32_t dsd_list_len;
850         struct dsd_dma *dsd_ptr;
851         struct scatterlist *sg;
852         int     i;
853         struct scsi_cmnd *cmd;
854         uint32_t *cur_dsd = dsd;
855         uint16_t        used_dsds = tot_dsds;
856
857         uint8_t         *cp;
858
859
860         cmd = sp->cmd;
861         scsi_for_each_prot_sg(cmd, sg, tot_dsds, i) {
862                 dma_addr_t      sle_dma;
863
864                 /* Allocate additional continuation packets? */
865                 if (avail_dsds == 0) {
866                         avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
867                                                 QLA_DSDS_PER_IOCB : used_dsds;
868                         dsd_list_len = (avail_dsds + 1) * 12;
869                         used_dsds -= avail_dsds;
870
871                         /* allocate tracking DS */
872                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
873                         if (!dsd_ptr)
874                                 return 1;
875
876                         /* allocate new list */
877                         dsd_ptr->dsd_addr = next_dsd =
878                             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
879                                 &dsd_ptr->dsd_list_dma);
880
881                         if (!next_dsd) {
882                                 /*
883                                  * Need to cleanup only this dsd_ptr, rest
884                                  * will be done by sp_free_dma()
885                                  */
886                                 kfree(dsd_ptr);
887                                 return 1;
888                         }
889
890                         list_add_tail(&dsd_ptr->list,
891                             &((struct crc_context *)sp->ctx)->dsd_list);
892
893                         sp->flags |= SRB_CRC_CTX_DSD_VALID;
894
895                         /* add new list to cmd iocb or last list */
896                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
897                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
898                         *cur_dsd++ = dsd_list_len;
899                         cur_dsd = (uint32_t *)next_dsd;
900                 }
901                 sle_dma = sg_dma_address(sg);
902                 if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
903                         DEBUG18(printk(KERN_DEBUG
904                             "%s(): %p, sg entry %d - addr =0x%x"
905                             "0x%x, len =%d\n", __func__ , cur_dsd, i,
906                             LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg)));
907                 }
908                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
909                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
910                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
911
912                 if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
913                         cp = page_address(sg_page(sg)) + sg->offset;
914                         DEBUG18(printk("%s(): Protection Data buffer = %p:\n",
915                             __func__ , cp));
916                 }
917                 avail_dsds--;
918         }
919         /* Null termination */
920         *cur_dsd++ = 0;
921         *cur_dsd++ = 0;
922         *cur_dsd++ = 0;
923         return 0;
924 }
925
926 /**
927  * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
928  *                                                      Type 6 IOCB types.
929  *
930  * @sp: SRB command to process
931  * @cmd_pkt: Command type 3 IOCB
932  * @tot_dsds: Total number of segments to transfer
933  */
934 static inline int
935 qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
936     uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
937 {
938         uint32_t                *cur_dsd, *fcp_dl;
939         scsi_qla_host_t         *vha;
940         struct scsi_cmnd        *cmd;
941         struct scatterlist      *cur_seg;
942         int                     sgc;
943         uint32_t                total_bytes;
944         uint32_t                data_bytes;
945         uint32_t                dif_bytes;
946         uint8_t                 bundling = 1;
947         uint16_t                blk_size;
948         uint8_t                 *clr_ptr;
949         struct crc_context      *crc_ctx_pkt = NULL;
950         struct qla_hw_data      *ha;
951         uint8_t                 additional_fcpcdb_len;
952         uint16_t                fcp_cmnd_len;
953         struct fcp_cmnd         *fcp_cmnd;
954         dma_addr_t              crc_ctx_dma;
955
956         cmd = sp->cmd;
957
958         sgc = 0;
959         /* Update entry type to indicate Command Type CRC_2 IOCB */
960         *((uint32_t *)(&cmd_pkt->entry_type)) =
961             __constant_cpu_to_le32(COMMAND_TYPE_CRC_2);
962
963         /* No data transfer */
964         data_bytes = scsi_bufflen(cmd);
965         if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
966                 DEBUG18(printk(KERN_INFO "%s: Zero data bytes or DMA-NONE %d\n",
967                     __func__, data_bytes));
968                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
969                 return QLA_SUCCESS;
970         }
971
972         vha = sp->fcport->vha;
973         ha = vha->hw;
974
975         DEBUG18(printk(KERN_DEBUG
976             "%s(%ld): Executing cmd sp %p, pid=%ld, prot_op=%u.\n", __func__,
977             vha->host_no, sp, cmd->serial_number, scsi_get_prot_op(sp->cmd)));
978
979         cmd_pkt->vp_index = sp->fcport->vp_idx;
980
981         /* Set transfer direction */
982         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
983                 cmd_pkt->control_flags =
984                     __constant_cpu_to_le16(CF_WRITE_DATA);
985         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
986                 cmd_pkt->control_flags =
987                     __constant_cpu_to_le16(CF_READ_DATA);
988         }
989
990         tot_prot_dsds = scsi_prot_sg_count(cmd);
991         if (!tot_prot_dsds)
992                 bundling = 0;
993
994         /* Allocate CRC context from global pool */
995         crc_ctx_pkt = sp->ctx = dma_pool_alloc(ha->dl_dma_pool,
996             GFP_ATOMIC, &crc_ctx_dma);
997
998         if (!crc_ctx_pkt)
999                 goto crc_queuing_error;
1000
1001         /* Zero out CTX area. */
1002         clr_ptr = (uint8_t *)crc_ctx_pkt;
1003         memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));
1004
1005         crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1006
1007         sp->flags |= SRB_CRC_CTX_DMA_VALID;
1008
1009         /* Set handle */
1010         crc_ctx_pkt->handle = cmd_pkt->handle;
1011
1012         INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1013
1014         qla24xx_set_t10dif_tags(cmd, (struct fw_dif_context *)
1015             &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1016
1017         cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
1018         cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
1019         cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
1020
1021         /* Determine SCSI command length -- align to 4 byte boundary */
1022         if (cmd->cmd_len > 16) {
1023                 DEBUG18(printk(KERN_INFO "%s(): **** SCSI CMD > 16\n",
1024                     __func__));
1025                 additional_fcpcdb_len = cmd->cmd_len - 16;
1026                 if ((cmd->cmd_len % 4) != 0) {
1027                         /* SCSI cmd > 16 bytes must be multiple of 4 */
1028                         goto crc_queuing_error;
1029                 }
1030                 fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1031         } else {
1032                 additional_fcpcdb_len = 0;
1033                 fcp_cmnd_len = 12 + 16 + 4;
1034         }
1035
1036         fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1037
1038         fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1039         if (cmd->sc_data_direction == DMA_TO_DEVICE)
1040                 fcp_cmnd->additional_cdb_len |= 1;
1041         else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1042                 fcp_cmnd->additional_cdb_len |= 2;
1043
1044         int_to_scsilun(sp->cmd->device->lun, &fcp_cmnd->lun);
1045         memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1046         cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1047         cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
1048             LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1049         cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32(
1050             MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1051         fcp_cmnd->task_attribute = 0;
1052         fcp_cmnd->task_managment = 0;
1053
1054         cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1055
1056         DEBUG18(printk(KERN_INFO "%s(%ld): Total SG(s) Entries %d, Data"
1057             "entries %d, data bytes %d, Protection entries %d\n",
1058             __func__, vha->host_no, tot_dsds, (tot_dsds-tot_prot_dsds),
1059             data_bytes, tot_prot_dsds));
1060
1061         /* Compute dif len and adjust data len to incude protection */
1062         total_bytes = data_bytes;
1063         dif_bytes = 0;
1064         blk_size = cmd->device->sector_size;
1065         if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE1) {
1066                 dif_bytes = (data_bytes / blk_size) * 8;
1067                 total_bytes += dif_bytes;
1068         }
1069
1070         if (!ql2xenablehba_err_chk)
1071                 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
1072
1073         if (!bundling) {
1074                 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
1075         } else {
1076                 /*
1077                  * Configure Bundling if we need to fetch interlaving
1078                  * protection PCI accesses
1079                  */
1080                 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1081                 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1082                 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1083                                                         tot_prot_dsds);
1084                 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
1085         }
1086
1087         /* Finish the common fields of CRC pkt */
1088         crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1089         crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1090         crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
1091         crc_ctx_pkt->guard_seed = __constant_cpu_to_le16(0);
1092         /* Fibre channel byte count */
1093         cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1094         fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1095             additional_fcpcdb_len);
1096         *fcp_dl = htonl(total_bytes);
1097
1098         DEBUG18(printk(KERN_INFO "%s(%ld): dif bytes = 0x%x (%d), total bytes"
1099             " = 0x%x (%d), dat block size =0x%x (%d)\n", __func__,
1100             vha->host_no, dif_bytes, dif_bytes, total_bytes, total_bytes,
1101             crc_ctx_pkt->blk_size, crc_ctx_pkt->blk_size));
1102
1103         /* Walks data segments */
1104
1105         cmd_pkt->control_flags |=
1106             __constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
1107         if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
1108             (tot_dsds - tot_prot_dsds)))
1109                 goto crc_queuing_error;
1110
1111         if (bundling && tot_prot_dsds) {
1112                 /* Walks dif segments */
1113                 cur_seg = scsi_prot_sglist(cmd);
1114                 cmd_pkt->control_flags |=
1115                         __constant_cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1116                 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
1117                 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
1118                     tot_prot_dsds))
1119                         goto crc_queuing_error;
1120         }
1121         return QLA_SUCCESS;
1122
1123 crc_queuing_error:
1124         DEBUG18(qla_printk(KERN_INFO, ha,
1125             "CMD sent FAILED crc_q error:sp = %p\n", sp));
1126         /* Cleanup will be performed by the caller */
1127
1128         return QLA_FUNCTION_FAILED;
1129 }
1130
1131 /**
1132  * qla24xx_start_scsi() - Send a SCSI command to the ISP
1133  * @sp: command to send to the ISP
1134  *
1135  * Returns non-zero if a failure occurred, else zero.
1136  */
1137 int
1138 qla24xx_start_scsi(srb_t *sp)
1139 {
1140         int             ret, nseg;
1141         unsigned long   flags;
1142         uint32_t        *clr_ptr;
1143         uint32_t        index;
1144         uint32_t        handle;
1145         struct cmd_type_7 *cmd_pkt;
1146         uint16_t        cnt;
1147         uint16_t        req_cnt;
1148         uint16_t        tot_dsds;
1149         struct req_que *req = NULL;
1150         struct rsp_que *rsp = NULL;
1151         struct scsi_cmnd *cmd = sp->cmd;
1152         struct scsi_qla_host *vha = sp->fcport->vha;
1153         struct qla_hw_data *ha = vha->hw;
1154
1155         /* Setup device pointers. */
1156         ret = 0;
1157
1158         qla25xx_set_que(sp, &rsp);
1159         req = vha->req;
1160
1161         /* So we know we haven't pci_map'ed anything yet */
1162         tot_dsds = 0;
1163
1164         /* Send marker if required */
1165         if (vha->marker_needed != 0) {
1166                 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL)
1167                                                         != QLA_SUCCESS)
1168                         return QLA_FUNCTION_FAILED;
1169                 vha->marker_needed = 0;
1170         }
1171
1172         /* Acquire ring specific lock */
1173         spin_lock_irqsave(&ha->hardware_lock, flags);
1174
1175         /* Check for room in outstanding command list. */
1176         handle = req->current_outstanding_cmd;
1177         for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
1178                 handle++;
1179                 if (handle == MAX_OUTSTANDING_COMMANDS)
1180                         handle = 1;
1181                 if (!req->outstanding_cmds[handle])
1182                         break;
1183         }
1184         if (index == MAX_OUTSTANDING_COMMANDS)
1185                 goto queuing_error;
1186
1187         /* Map the sg table so we have an accurate count of sg entries needed */
1188         if (scsi_sg_count(cmd)) {
1189                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1190                     scsi_sg_count(cmd), cmd->sc_data_direction);
1191                 if (unlikely(!nseg))
1192                         goto queuing_error;
1193         } else
1194                 nseg = 0;
1195
1196         tot_dsds = nseg;
1197
1198         req_cnt = qla24xx_calc_iocbs(tot_dsds);
1199         if (req->cnt < (req_cnt + 2)) {
1200                 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
1201
1202                 if (req->ring_index < cnt)
1203                         req->cnt = cnt - req->ring_index;
1204                 else
1205                         req->cnt = req->length -
1206                                 (req->ring_index - cnt);
1207         }
1208         if (req->cnt < (req_cnt + 2))
1209                 goto queuing_error;
1210
1211         /* Build command packet. */
1212         req->current_outstanding_cmd = handle;
1213         req->outstanding_cmds[handle] = sp;
1214         sp->handle = handle;
1215         sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1216         req->cnt -= req_cnt;
1217
1218         cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1219         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1220
1221         /* Zero out remaining portion of packet. */
1222         /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
1223         clr_ptr = (uint32_t *)cmd_pkt + 2;
1224         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1225         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1226
1227         /* Set NPORT-ID and LUN number*/
1228         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1229         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1230         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1231         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1232         cmd_pkt->vp_index = sp->fcport->vp_idx;
1233
1234         int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
1235         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1236
1237         /* Load SCSI command packet. */
1238         memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1239         host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1240
1241         cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1242
1243         /* Build IOCB segments */
1244         qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
1245
1246         /* Set total data segment count. */
1247         cmd_pkt->entry_count = (uint8_t)req_cnt;
1248         /* Specify response queue number where completion should happen */
1249         cmd_pkt->entry_status = (uint8_t) rsp->id;
1250         wmb();
1251
1252         /* Adjust ring index. */
1253         req->ring_index++;
1254         if (req->ring_index == req->length) {
1255                 req->ring_index = 0;
1256                 req->ring_ptr = req->ring;
1257         } else
1258                 req->ring_ptr++;
1259
1260         sp->flags |= SRB_DMA_VALID;
1261
1262         /* Set chip new ring index. */
1263         WRT_REG_DWORD(req->req_q_in, req->ring_index);
1264         RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1265
1266         /* Manage unprocessed RIO/ZIO commands in response queue. */
1267         if (vha->flags.process_response_queue &&
1268                 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1269                 qla24xx_process_response_queue(vha, rsp);
1270
1271         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1272         return QLA_SUCCESS;
1273
1274 queuing_error:
1275         if (tot_dsds)
1276                 scsi_dma_unmap(cmd);
1277
1278         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1279
1280         return QLA_FUNCTION_FAILED;
1281 }
1282
1283
1284 /**
1285  * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1286  * @sp: command to send to the ISP
1287  *
1288  * Returns non-zero if a failure occurred, else zero.
1289  */
1290 int
1291 qla24xx_dif_start_scsi(srb_t *sp)
1292 {
1293         int                     nseg;
1294         unsigned long           flags;
1295         uint32_t                *clr_ptr;
1296         uint32_t                index;
1297         uint32_t                handle;
1298         uint16_t                cnt;
1299         uint16_t                req_cnt = 0;
1300         uint16_t                tot_dsds;
1301         uint16_t                tot_prot_dsds;
1302         uint16_t                fw_prot_opts = 0;
1303         struct req_que          *req = NULL;
1304         struct rsp_que          *rsp = NULL;
1305         struct scsi_cmnd        *cmd = sp->cmd;
1306         struct scsi_qla_host    *vha = sp->fcport->vha;
1307         struct qla_hw_data      *ha = vha->hw;
1308         struct cmd_type_crc_2   *cmd_pkt;
1309         uint32_t                status = 0;
1310
1311 #define QDSS_GOT_Q_SPACE        BIT_0
1312
1313         /* Only process protection in this routine */
1314         if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL)
1315                 return qla24xx_start_scsi(sp);
1316
1317         /* Setup device pointers. */
1318
1319         qla25xx_set_que(sp, &rsp);
1320         req = vha->req;
1321
1322         /* So we know we haven't pci_map'ed anything yet */
1323         tot_dsds = 0;
1324
1325         /* Send marker if required */
1326         if (vha->marker_needed != 0) {
1327                 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1328                     QLA_SUCCESS)
1329                         return QLA_FUNCTION_FAILED;
1330                 vha->marker_needed = 0;
1331         }
1332
1333         /* Acquire ring specific lock */
1334         spin_lock_irqsave(&ha->hardware_lock, flags);
1335
1336         /* Check for room in outstanding command list. */
1337         handle = req->current_outstanding_cmd;
1338         for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
1339                 handle++;
1340                 if (handle == MAX_OUTSTANDING_COMMANDS)
1341                         handle = 1;
1342                 if (!req->outstanding_cmds[handle])
1343                         break;
1344         }
1345
1346         if (index == MAX_OUTSTANDING_COMMANDS)
1347                 goto queuing_error;
1348
1349         /* Compute number of required data segments */
1350         /* Map the sg table so we have an accurate count of sg entries needed */
1351         if (scsi_sg_count(cmd)) {
1352                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1353                     scsi_sg_count(cmd), cmd->sc_data_direction);
1354                 if (unlikely(!nseg))
1355                         goto queuing_error;
1356                 else
1357                         sp->flags |= SRB_DMA_VALID;
1358         } else
1359                 nseg = 0;
1360
1361         /* number of required data segments */
1362         tot_dsds = nseg;
1363
1364         /* Compute number of required protection segments */
1365         if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1366                 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1367                     scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1368                 if (unlikely(!nseg))
1369                         goto queuing_error;
1370                 else
1371                         sp->flags |= SRB_CRC_PROT_DMA_VALID;
1372         } else {
1373                 nseg = 0;
1374         }
1375
1376         req_cnt = 1;
1377         /* Total Data and protection sg segment(s) */
1378         tot_prot_dsds = nseg;
1379         tot_dsds += nseg;
1380         if (req->cnt < (req_cnt + 2)) {
1381                 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
1382
1383                 if (req->ring_index < cnt)
1384                         req->cnt = cnt - req->ring_index;
1385                 else
1386                         req->cnt = req->length -
1387                                 (req->ring_index - cnt);
1388         }
1389
1390         if (req->cnt < (req_cnt + 2))
1391                 goto queuing_error;
1392
1393         status |= QDSS_GOT_Q_SPACE;
1394
1395         /* Build header part of command packet (excluding the OPCODE). */
1396         req->current_outstanding_cmd = handle;
1397         req->outstanding_cmds[handle] = sp;
1398         sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1399         req->cnt -= req_cnt;
1400
1401         /* Fill-in common area */
1402         cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1403         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1404
1405         clr_ptr = (uint32_t *)cmd_pkt + 2;
1406         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1407
1408         /* Set NPORT-ID and LUN number*/
1409         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1410         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1411         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1412         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1413
1414         int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
1415         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1416
1417         /* Total Data and protection segment(s) */
1418         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1419
1420         /* Build IOCB segments and adjust for data protection segments */
1421         if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1422             req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1423                 QLA_SUCCESS)
1424                 goto queuing_error;
1425
1426         cmd_pkt->entry_count = (uint8_t)req_cnt;
1427         /* Specify response queue number where completion should happen */
1428         cmd_pkt->entry_status = (uint8_t) rsp->id;
1429         cmd_pkt->timeout = __constant_cpu_to_le16(0);
1430         wmb();
1431
1432         /* Adjust ring index. */
1433         req->ring_index++;
1434         if (req->ring_index == req->length) {
1435                 req->ring_index = 0;
1436                 req->ring_ptr = req->ring;
1437         } else
1438                 req->ring_ptr++;
1439
1440         /* Set chip new ring index. */
1441         WRT_REG_DWORD(req->req_q_in, req->ring_index);
1442         RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1443
1444         /* Manage unprocessed RIO/ZIO commands in response queue. */
1445         if (vha->flags.process_response_queue &&
1446             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1447                 qla24xx_process_response_queue(vha, rsp);
1448
1449         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1450
1451         return QLA_SUCCESS;
1452
1453 queuing_error:
1454         if (status & QDSS_GOT_Q_SPACE) {
1455                 req->outstanding_cmds[handle] = NULL;
1456                 req->cnt += req_cnt;
1457         }
1458         /* Cleanup will be performed by the caller (queuecommand) */
1459
1460         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1461
1462         DEBUG18(qla_printk(KERN_INFO, ha,
1463             "CMD sent FAILED SCSI prot_op:%02x\n", scsi_get_prot_op(cmd)));
1464         return QLA_FUNCTION_FAILED;
1465 }
1466
1467
1468 static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
1469 {
1470         struct scsi_cmnd *cmd = sp->cmd;
1471         struct qla_hw_data *ha = sp->fcport->vha->hw;
1472         int affinity = cmd->request->cpu;
1473
1474         if (ha->flags.cpu_affinity_enabled && affinity >= 0 &&
1475                 affinity < ha->max_rsp_queues - 1)
1476                 *rsp = ha->rsp_q_map[affinity + 1];
1477          else
1478                 *rsp = ha->rsp_q_map[0];
1479 }
1480
1481 /* Generic Control-SRB manipulation functions. */
1482 void *
1483 qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
1484 {
1485         struct qla_hw_data *ha = vha->hw;
1486         struct req_que *req = ha->req_q_map[0];
1487         device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
1488         uint32_t index, handle;
1489         request_t *pkt;
1490         uint16_t cnt, req_cnt;
1491
1492         pkt = NULL;
1493         req_cnt = 1;
1494         handle = 0;
1495
1496         if (!sp)
1497                 goto skip_cmd_array;
1498
1499         /* Check for room in outstanding command list. */
1500         handle = req->current_outstanding_cmd;
1501         for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
1502                 handle++;
1503                 if (handle == MAX_OUTSTANDING_COMMANDS)
1504                         handle = 1;
1505                 if (!req->outstanding_cmds[handle])
1506                         break;
1507         }
1508         if (index == MAX_OUTSTANDING_COMMANDS)
1509                 goto queuing_error;
1510
1511         /* Prep command array. */
1512         req->current_outstanding_cmd = handle;
1513         req->outstanding_cmds[handle] = sp;
1514         sp->handle = handle;
1515
1516 skip_cmd_array:
1517         /* Check for room on request queue. */
1518         if (req->cnt < req_cnt) {
1519                 if (ha->mqenable)
1520                         cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
1521                 else if (IS_QLA82XX(ha))
1522                         cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
1523                 else if (IS_FWI2_CAPABLE(ha))
1524                         cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
1525                 else
1526                         cnt = qla2x00_debounce_register(
1527                             ISP_REQ_Q_OUT(ha, &reg->isp));
1528
1529                 if  (req->ring_index < cnt)
1530                         req->cnt = cnt - req->ring_index;
1531                 else
1532                         req->cnt = req->length -
1533                             (req->ring_index - cnt);
1534         }
1535         if (req->cnt < req_cnt)
1536                 goto queuing_error;
1537
1538         /* Prep packet */
1539         req->cnt -= req_cnt;
1540         pkt = req->ring_ptr;
1541         memset(pkt, 0, REQUEST_ENTRY_SIZE);
1542         pkt->entry_count = req_cnt;
1543         pkt->handle = handle;
1544
1545 queuing_error:
1546         return pkt;
1547 }
1548
1549 static void
1550 qla2x00_start_iocbs(srb_t *sp)
1551 {
1552         struct qla_hw_data *ha = sp->fcport->vha->hw;
1553         struct req_que *req = ha->req_q_map[0];
1554         device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
1555         struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
1556
1557         if (IS_QLA82XX(ha)) {
1558                 qla82xx_start_iocbs(sp);
1559         } else {
1560                 /* Adjust ring index. */
1561                 req->ring_index++;
1562                 if (req->ring_index == req->length) {
1563                         req->ring_index = 0;
1564                         req->ring_ptr = req->ring;
1565                 } else
1566                         req->ring_ptr++;
1567
1568                 /* Set chip new ring index. */
1569                 if (ha->mqenable) {
1570                         WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
1571                         RD_REG_DWORD(&ioreg->hccr);
1572                 } else if (IS_QLA82XX(ha)) {
1573                         qla82xx_start_iocbs(sp);
1574                 } else if (IS_FWI2_CAPABLE(ha)) {
1575                         WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
1576                         RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
1577                 } else {
1578                         WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
1579                                 req->ring_index);
1580                         RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
1581                 }
1582         }
1583 }
1584
1585 static void
1586 qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1587 {
1588         struct srb_ctx *ctx = sp->ctx;
1589         struct srb_iocb *lio = ctx->u.iocb_cmd;
1590
1591         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1592         logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
1593         if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
1594                 logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
1595         if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
1596                 logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
1597         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1598         logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1599         logio->port_id[1] = sp->fcport->d_id.b.area;
1600         logio->port_id[2] = sp->fcport->d_id.b.domain;
1601         logio->vp_index = sp->fcport->vp_idx;
1602 }
1603
1604 static void
1605 qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
1606 {
1607         struct qla_hw_data *ha = sp->fcport->vha->hw;
1608         struct srb_ctx *ctx = sp->ctx;
1609         struct srb_iocb *lio = ctx->u.iocb_cmd;
1610         uint16_t opts;
1611
1612         mbx->entry_type = MBX_IOCB_TYPE;
1613         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1614         mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
1615         opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
1616         opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
1617         if (HAS_EXTENDED_IDS(ha)) {
1618                 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
1619                 mbx->mb10 = cpu_to_le16(opts);
1620         } else {
1621                 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
1622         }
1623         mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1624         mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1625             sp->fcport->d_id.b.al_pa);
1626         mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
1627 }
1628
1629 static void
1630 qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1631 {
1632         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1633         logio->control_flags =
1634             cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
1635         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1636         logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1637         logio->port_id[1] = sp->fcport->d_id.b.area;
1638         logio->port_id[2] = sp->fcport->d_id.b.domain;
1639         logio->vp_index = sp->fcport->vp_idx;
1640 }
1641
1642 static void
1643 qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
1644 {
1645         struct qla_hw_data *ha = sp->fcport->vha->hw;
1646
1647         mbx->entry_type = MBX_IOCB_TYPE;
1648         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1649         mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
1650         mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
1651             cpu_to_le16(sp->fcport->loop_id):
1652             cpu_to_le16(sp->fcport->loop_id << 8);
1653         mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1654         mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1655             sp->fcport->d_id.b.al_pa);
1656         mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
1657         /* Implicit: mbx->mbx10 = 0. */
1658 }
1659
1660 static void
1661 qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1662 {
1663         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1664         logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
1665         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1666         logio->vp_index = sp->fcport->vp_idx;
1667 }
1668
1669 static void
1670 qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
1671 {
1672         struct qla_hw_data *ha = sp->fcport->vha->hw;
1673
1674         mbx->entry_type = MBX_IOCB_TYPE;
1675         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1676         mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
1677         if (HAS_EXTENDED_IDS(ha)) {
1678                 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
1679                 mbx->mb10 = cpu_to_le16(BIT_0);
1680         } else {
1681                 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
1682         }
1683         mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
1684         mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
1685         mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
1686         mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
1687         mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
1688 }
1689
1690 static void
1691 qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
1692 {
1693         uint32_t flags;
1694         unsigned int lun;
1695         struct fc_port *fcport = sp->fcport;
1696         scsi_qla_host_t *vha = fcport->vha;
1697         struct qla_hw_data *ha = vha->hw;
1698         struct srb_ctx *ctx = sp->ctx;
1699         struct srb_iocb *iocb = ctx->u.iocb_cmd;
1700         struct req_que *req = vha->req;
1701
1702         flags = iocb->u.tmf.flags;
1703         lun = iocb->u.tmf.lun;
1704
1705         tsk->entry_type = TSK_MGMT_IOCB_TYPE;
1706         tsk->entry_count = 1;
1707         tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
1708         tsk->nport_handle = cpu_to_le16(fcport->loop_id);
1709         tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
1710         tsk->control_flags = cpu_to_le32(flags);
1711         tsk->port_id[0] = fcport->d_id.b.al_pa;
1712         tsk->port_id[1] = fcport->d_id.b.area;
1713         tsk->port_id[2] = fcport->d_id.b.domain;
1714         tsk->vp_index = fcport->vp_idx;
1715
1716         if (flags == TCF_LUN_RESET) {
1717                 int_to_scsilun(lun, &tsk->lun);
1718                 host_to_fcp_swap((uint8_t *)&tsk->lun,
1719                         sizeof(tsk->lun));
1720         }
1721 }
1722
1723 static void
1724 qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
1725 {
1726         struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
1727
1728         els_iocb->entry_type = ELS_IOCB_TYPE;
1729         els_iocb->entry_count = 1;
1730         els_iocb->sys_define = 0;
1731         els_iocb->entry_status = 0;
1732         els_iocb->handle = sp->handle;
1733         els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1734         els_iocb->tx_dsd_count = __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
1735         els_iocb->vp_index = sp->fcport->vp_idx;
1736         els_iocb->sof_type = EST_SOFI3;
1737         els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
1738
1739         els_iocb->opcode =
1740             (((struct srb_ctx *)sp->ctx)->type == SRB_ELS_CMD_RPT) ?
1741             bsg_job->request->rqst_data.r_els.els_code :
1742             bsg_job->request->rqst_data.h_els.command_code;
1743         els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
1744         els_iocb->port_id[1] = sp->fcport->d_id.b.area;
1745         els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
1746         els_iocb->control_flags = 0;
1747         els_iocb->rx_byte_count =
1748             cpu_to_le32(bsg_job->reply_payload.payload_len);
1749         els_iocb->tx_byte_count =
1750             cpu_to_le32(bsg_job->request_payload.payload_len);
1751
1752         els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
1753             (bsg_job->request_payload.sg_list)));
1754         els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
1755             (bsg_job->request_payload.sg_list)));
1756         els_iocb->tx_len = cpu_to_le32(sg_dma_len
1757             (bsg_job->request_payload.sg_list));
1758
1759         els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
1760             (bsg_job->reply_payload.sg_list)));
1761         els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
1762             (bsg_job->reply_payload.sg_list)));
1763         els_iocb->rx_len = cpu_to_le32(sg_dma_len
1764             (bsg_job->reply_payload.sg_list));
1765 }
1766
1767 static void
1768 qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
1769 {
1770         uint16_t        avail_dsds;
1771         uint32_t        *cur_dsd;
1772         struct scatterlist *sg;
1773         int index;
1774         uint16_t tot_dsds;
1775         scsi_qla_host_t *vha = sp->fcport->vha;
1776         struct qla_hw_data *ha = vha->hw;
1777         struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
1778         int loop_iterartion = 0;
1779         int cont_iocb_prsnt = 0;
1780         int entry_count = 1;
1781
1782         memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
1783         ct_iocb->entry_type = CT_IOCB_TYPE;
1784         ct_iocb->entry_status = 0;
1785         ct_iocb->handle1 = sp->handle;
1786         SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
1787         ct_iocb->status = __constant_cpu_to_le16(0);
1788         ct_iocb->control_flags = __constant_cpu_to_le16(0);
1789         ct_iocb->timeout = 0;
1790         ct_iocb->cmd_dsd_count =
1791             __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
1792         ct_iocb->total_dsd_count =
1793             __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
1794         ct_iocb->req_bytecount =
1795             cpu_to_le32(bsg_job->request_payload.payload_len);
1796         ct_iocb->rsp_bytecount =
1797             cpu_to_le32(bsg_job->reply_payload.payload_len);
1798
1799         ct_iocb->dseg_req_address[0] = cpu_to_le32(LSD(sg_dma_address
1800             (bsg_job->request_payload.sg_list)));
1801         ct_iocb->dseg_req_address[1] = cpu_to_le32(MSD(sg_dma_address
1802             (bsg_job->request_payload.sg_list)));
1803         ct_iocb->dseg_req_length = ct_iocb->req_bytecount;
1804
1805         ct_iocb->dseg_rsp_address[0] = cpu_to_le32(LSD(sg_dma_address
1806             (bsg_job->reply_payload.sg_list)));
1807         ct_iocb->dseg_rsp_address[1] = cpu_to_le32(MSD(sg_dma_address
1808             (bsg_job->reply_payload.sg_list)));
1809         ct_iocb->dseg_rsp_length = ct_iocb->rsp_bytecount;
1810
1811         avail_dsds = 1;
1812         cur_dsd = (uint32_t *)ct_iocb->dseg_rsp_address;
1813         index = 0;
1814         tot_dsds = bsg_job->reply_payload.sg_cnt;
1815
1816         for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
1817                 dma_addr_t       sle_dma;
1818                 cont_a64_entry_t *cont_pkt;
1819
1820                 /* Allocate additional continuation packets? */
1821                 if (avail_dsds == 0) {
1822                         /*
1823                         * Five DSDs are available in the Cont.
1824                         * Type 1 IOCB.
1825                                */
1826                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
1827                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
1828                         avail_dsds = 5;
1829                         cont_iocb_prsnt = 1;
1830                         entry_count++;
1831                 }
1832
1833                 sle_dma = sg_dma_address(sg);
1834                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
1835                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
1836                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
1837                 loop_iterartion++;
1838                 avail_dsds--;
1839         }
1840         ct_iocb->entry_count = entry_count;
1841 }
1842
1843 static void
1844 qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
1845 {
1846         uint16_t        avail_dsds;
1847         uint32_t        *cur_dsd;
1848         struct scatterlist *sg;
1849         int index;
1850         uint16_t tot_dsds;
1851         scsi_qla_host_t *vha = sp->fcport->vha;
1852         struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
1853         int loop_iterartion = 0;
1854         int cont_iocb_prsnt = 0;
1855         int entry_count = 1;
1856
1857         ct_iocb->entry_type = CT_IOCB_TYPE;
1858         ct_iocb->entry_status = 0;
1859         ct_iocb->sys_define = 0;
1860         ct_iocb->handle = sp->handle;
1861
1862         ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1863         ct_iocb->vp_index = sp->fcport->vp_idx;
1864         ct_iocb->comp_status = __constant_cpu_to_le16(0);
1865
1866         ct_iocb->cmd_dsd_count =
1867             __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
1868         ct_iocb->timeout = 0;
1869         ct_iocb->rsp_dsd_count =
1870             __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
1871         ct_iocb->rsp_byte_count =
1872             cpu_to_le32(bsg_job->reply_payload.payload_len);
1873         ct_iocb->cmd_byte_count =
1874             cpu_to_le32(bsg_job->request_payload.payload_len);
1875         ct_iocb->dseg_0_address[0] = cpu_to_le32(LSD(sg_dma_address
1876             (bsg_job->request_payload.sg_list)));
1877         ct_iocb->dseg_0_address[1] = cpu_to_le32(MSD(sg_dma_address
1878            (bsg_job->request_payload.sg_list)));
1879         ct_iocb->dseg_0_len = cpu_to_le32(sg_dma_len
1880             (bsg_job->request_payload.sg_list));
1881
1882         avail_dsds = 1;
1883         cur_dsd = (uint32_t *)ct_iocb->dseg_1_address;
1884         index = 0;
1885         tot_dsds = bsg_job->reply_payload.sg_cnt;
1886
1887         for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
1888                 dma_addr_t       sle_dma;
1889                 cont_a64_entry_t *cont_pkt;
1890
1891                 /* Allocate additional continuation packets? */
1892                 if (avail_dsds == 0) {
1893                         /*
1894                         * Five DSDs are available in the Cont.
1895                         * Type 1 IOCB.
1896                                */
1897                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
1898                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
1899                         avail_dsds = 5;
1900                         cont_iocb_prsnt = 1;
1901                         entry_count++;
1902                 }
1903
1904                 sle_dma = sg_dma_address(sg);
1905                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
1906                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
1907                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
1908                 loop_iterartion++;
1909                 avail_dsds--;
1910         }
1911         ct_iocb->entry_count = entry_count;
1912 }
1913
1914 int
1915 qla2x00_start_sp(srb_t *sp)
1916 {
1917         int rval;
1918         struct qla_hw_data *ha = sp->fcport->vha->hw;
1919         void *pkt;
1920         struct srb_ctx *ctx = sp->ctx;
1921         unsigned long flags;
1922
1923         rval = QLA_FUNCTION_FAILED;
1924         spin_lock_irqsave(&ha->hardware_lock, flags);
1925         pkt = qla2x00_alloc_iocbs(sp->fcport->vha, sp);
1926         if (!pkt)
1927                 goto done;
1928
1929         rval = QLA_SUCCESS;
1930         switch (ctx->type) {
1931         case SRB_LOGIN_CMD:
1932                 IS_FWI2_CAPABLE(ha) ?
1933                     qla24xx_login_iocb(sp, pkt) :
1934                     qla2x00_login_iocb(sp, pkt);
1935                 break;
1936         case SRB_LOGOUT_CMD:
1937                 IS_FWI2_CAPABLE(ha) ?
1938                     qla24xx_logout_iocb(sp, pkt) :
1939                     qla2x00_logout_iocb(sp, pkt);
1940                 break;
1941         case SRB_ELS_CMD_RPT:
1942         case SRB_ELS_CMD_HST:
1943                 qla24xx_els_iocb(sp, pkt);
1944                 break;
1945         case SRB_CT_CMD:
1946                 IS_FWI2_CAPABLE(ha) ?
1947                 qla24xx_ct_iocb(sp, pkt) :
1948                 qla2x00_ct_iocb(sp, pkt);
1949                 break;
1950         case SRB_ADISC_CMD:
1951                 IS_FWI2_CAPABLE(ha) ?
1952                     qla24xx_adisc_iocb(sp, pkt) :
1953                     qla2x00_adisc_iocb(sp, pkt);
1954                 break;
1955         case SRB_TM_CMD:
1956                 qla24xx_tm_iocb(sp, pkt);
1957                 break;
1958         default:
1959                 break;
1960         }
1961
1962         wmb();
1963         qla2x00_start_iocbs(sp);
1964 done:
1965         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1966         return rval;
1967 }