[SCSI] qla2xxx: Limit mailbox command contention for ADISC requests.
[firefly-linux-kernel-4.4.55.git] / drivers / scsi / qla2xxx / qla_iocb.c
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2008 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8
9 #include <linux/blkdev.h>
10 #include <linux/delay.h>
11
12 #include <scsi/scsi_tcq.h>
13
14 static request_t *qla2x00_req_pkt(struct scsi_qla_host *, struct req_que *,
15                                                         struct rsp_que *rsp);
16 static void qla2x00_isp_cmd(struct scsi_qla_host *, struct req_que *);
17
18 static void qla25xx_set_que(srb_t *, struct rsp_que **);
19 /**
20  * qla2x00_get_cmd_direction() - Determine control_flag data direction.
21  * @cmd: SCSI command
22  *
23  * Returns the proper CF_* direction based on CDB.
24  */
25 static inline uint16_t
26 qla2x00_get_cmd_direction(srb_t *sp)
27 {
28         uint16_t cflags;
29
30         cflags = 0;
31
32         /* Set transfer direction */
33         if (sp->cmd->sc_data_direction == DMA_TO_DEVICE) {
34                 cflags = CF_WRITE;
35                 sp->fcport->vha->hw->qla_stats.output_bytes +=
36                     scsi_bufflen(sp->cmd);
37         } else if (sp->cmd->sc_data_direction == DMA_FROM_DEVICE) {
38                 cflags = CF_READ;
39                 sp->fcport->vha->hw->qla_stats.input_bytes +=
40                     scsi_bufflen(sp->cmd);
41         }
42         return (cflags);
43 }
44
45 /**
46  * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
47  * Continuation Type 0 IOCBs to allocate.
48  *
49  * @dsds: number of data segment decriptors needed
50  *
51  * Returns the number of IOCB entries needed to store @dsds.
52  */
53 uint16_t
54 qla2x00_calc_iocbs_32(uint16_t dsds)
55 {
56         uint16_t iocbs;
57
58         iocbs = 1;
59         if (dsds > 3) {
60                 iocbs += (dsds - 3) / 7;
61                 if ((dsds - 3) % 7)
62                         iocbs++;
63         }
64         return (iocbs);
65 }
66
67 /**
68  * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
69  * Continuation Type 1 IOCBs to allocate.
70  *
71  * @dsds: number of data segment decriptors needed
72  *
73  * Returns the number of IOCB entries needed to store @dsds.
74  */
75 uint16_t
76 qla2x00_calc_iocbs_64(uint16_t dsds)
77 {
78         uint16_t iocbs;
79
80         iocbs = 1;
81         if (dsds > 2) {
82                 iocbs += (dsds - 2) / 5;
83                 if ((dsds - 2) % 5)
84                         iocbs++;
85         }
86         return (iocbs);
87 }
88
89 /**
90  * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
91  * @ha: HA context
92  *
93  * Returns a pointer to the Continuation Type 0 IOCB packet.
94  */
95 static inline cont_entry_t *
96 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
97 {
98         cont_entry_t *cont_pkt;
99         struct req_que *req = vha->req;
100         /* Adjust ring index. */
101         req->ring_index++;
102         if (req->ring_index == req->length) {
103                 req->ring_index = 0;
104                 req->ring_ptr = req->ring;
105         } else {
106                 req->ring_ptr++;
107         }
108
109         cont_pkt = (cont_entry_t *)req->ring_ptr;
110
111         /* Load packet defaults. */
112         *((uint32_t *)(&cont_pkt->entry_type)) =
113             __constant_cpu_to_le32(CONTINUE_TYPE);
114
115         return (cont_pkt);
116 }
117
118 /**
119  * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
120  * @ha: HA context
121  *
122  * Returns a pointer to the continuation type 1 IOCB packet.
123  */
124 static inline cont_a64_entry_t *
125 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha)
126 {
127         cont_a64_entry_t *cont_pkt;
128
129         struct req_que *req = vha->req;
130         /* Adjust ring index. */
131         req->ring_index++;
132         if (req->ring_index == req->length) {
133                 req->ring_index = 0;
134                 req->ring_ptr = req->ring;
135         } else {
136                 req->ring_ptr++;
137         }
138
139         cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
140
141         /* Load packet defaults. */
142         *((uint32_t *)(&cont_pkt->entry_type)) =
143             __constant_cpu_to_le32(CONTINUE_A64_TYPE);
144
145         return (cont_pkt);
146 }
147
148 /**
149  * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
150  * capable IOCB types.
151  *
152  * @sp: SRB command to process
153  * @cmd_pkt: Command type 2 IOCB
154  * @tot_dsds: Total number of segments to transfer
155  */
156 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
157     uint16_t tot_dsds)
158 {
159         uint16_t        avail_dsds;
160         uint32_t        *cur_dsd;
161         scsi_qla_host_t *vha;
162         struct scsi_cmnd *cmd;
163         struct scatterlist *sg;
164         int i;
165
166         cmd = sp->cmd;
167
168         /* Update entry type to indicate Command Type 2 IOCB */
169         *((uint32_t *)(&cmd_pkt->entry_type)) =
170             __constant_cpu_to_le32(COMMAND_TYPE);
171
172         /* No data transfer */
173         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
174                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
175                 return;
176         }
177
178         vha = sp->fcport->vha;
179         cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
180
181         /* Three DSDs are available in the Command Type 2 IOCB */
182         avail_dsds = 3;
183         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
184
185         /* Load data segments */
186         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
187                 cont_entry_t *cont_pkt;
188
189                 /* Allocate additional continuation packets? */
190                 if (avail_dsds == 0) {
191                         /*
192                          * Seven DSDs are available in the Continuation
193                          * Type 0 IOCB.
194                          */
195                         cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
196                         cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
197                         avail_dsds = 7;
198                 }
199
200                 *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
201                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
202                 avail_dsds--;
203         }
204 }
205
206 /**
207  * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
208  * capable IOCB types.
209  *
210  * @sp: SRB command to process
211  * @cmd_pkt: Command type 3 IOCB
212  * @tot_dsds: Total number of segments to transfer
213  */
214 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
215     uint16_t tot_dsds)
216 {
217         uint16_t        avail_dsds;
218         uint32_t        *cur_dsd;
219         scsi_qla_host_t *vha;
220         struct scsi_cmnd *cmd;
221         struct scatterlist *sg;
222         int i;
223
224         cmd = sp->cmd;
225
226         /* Update entry type to indicate Command Type 3 IOCB */
227         *((uint32_t *)(&cmd_pkt->entry_type)) =
228             __constant_cpu_to_le32(COMMAND_A64_TYPE);
229
230         /* No data transfer */
231         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
232                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
233                 return;
234         }
235
236         vha = sp->fcport->vha;
237         cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
238
239         /* Two DSDs are available in the Command Type 3 IOCB */
240         avail_dsds = 2;
241         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
242
243         /* Load data segments */
244         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
245                 dma_addr_t      sle_dma;
246                 cont_a64_entry_t *cont_pkt;
247
248                 /* Allocate additional continuation packets? */
249                 if (avail_dsds == 0) {
250                         /*
251                          * Five DSDs are available in the Continuation
252                          * Type 1 IOCB.
253                          */
254                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
255                         cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
256                         avail_dsds = 5;
257                 }
258
259                 sle_dma = sg_dma_address(sg);
260                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
261                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
262                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
263                 avail_dsds--;
264         }
265 }
266
267 /**
268  * qla2x00_start_scsi() - Send a SCSI command to the ISP
269  * @sp: command to send to the ISP
270  *
271  * Returns non-zero if a failure occurred, else zero.
272  */
273 int
274 qla2x00_start_scsi(srb_t *sp)
275 {
276         int             ret, nseg;
277         unsigned long   flags;
278         scsi_qla_host_t *vha;
279         struct scsi_cmnd *cmd;
280         uint32_t        *clr_ptr;
281         uint32_t        index;
282         uint32_t        handle;
283         cmd_entry_t     *cmd_pkt;
284         uint16_t        cnt;
285         uint16_t        req_cnt;
286         uint16_t        tot_dsds;
287         struct device_reg_2xxx __iomem *reg;
288         struct qla_hw_data *ha;
289         struct req_que *req;
290         struct rsp_que *rsp;
291
292         /* Setup device pointers. */
293         ret = 0;
294         vha = sp->fcport->vha;
295         ha = vha->hw;
296         reg = &ha->iobase->isp;
297         cmd = sp->cmd;
298         req = ha->req_q_map[0];
299         rsp = ha->rsp_q_map[0];
300         /* So we know we haven't pci_map'ed anything yet */
301         tot_dsds = 0;
302
303         /* Send marker if required */
304         if (vha->marker_needed != 0) {
305                 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL)
306                                                         != QLA_SUCCESS)
307                         return (QLA_FUNCTION_FAILED);
308                 vha->marker_needed = 0;
309         }
310
311         /* Acquire ring specific lock */
312         spin_lock_irqsave(&ha->hardware_lock, flags);
313
314         /* Check for room in outstanding command list. */
315         handle = req->current_outstanding_cmd;
316         for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
317                 handle++;
318                 if (handle == MAX_OUTSTANDING_COMMANDS)
319                         handle = 1;
320                 if (!req->outstanding_cmds[handle])
321                         break;
322         }
323         if (index == MAX_OUTSTANDING_COMMANDS)
324                 goto queuing_error;
325
326         /* Map the sg table so we have an accurate count of sg entries needed */
327         if (scsi_sg_count(cmd)) {
328                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
329                     scsi_sg_count(cmd), cmd->sc_data_direction);
330                 if (unlikely(!nseg))
331                         goto queuing_error;
332         } else
333                 nseg = 0;
334
335         tot_dsds = nseg;
336
337         /* Calculate the number of request entries needed. */
338         req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
339         if (req->cnt < (req_cnt + 2)) {
340                 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
341                 if (req->ring_index < cnt)
342                         req->cnt = cnt - req->ring_index;
343                 else
344                         req->cnt = req->length -
345                             (req->ring_index - cnt);
346         }
347         if (req->cnt < (req_cnt + 2))
348                 goto queuing_error;
349
350         /* Build command packet */
351         req->current_outstanding_cmd = handle;
352         req->outstanding_cmds[handle] = sp;
353         sp->handle = handle;
354         sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
355         req->cnt -= req_cnt;
356
357         cmd_pkt = (cmd_entry_t *)req->ring_ptr;
358         cmd_pkt->handle = handle;
359         /* Zero out remaining portion of packet. */
360         clr_ptr = (uint32_t *)cmd_pkt + 2;
361         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
362         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
363
364         /* Set target ID and LUN number*/
365         SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
366         cmd_pkt->lun = cpu_to_le16(sp->cmd->device->lun);
367
368         /* Update tagged queuing modifier */
369         cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG);
370
371         /* Load SCSI command packet. */
372         memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
373         cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
374
375         /* Build IOCB segments */
376         ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
377
378         /* Set total data segment count. */
379         cmd_pkt->entry_count = (uint8_t)req_cnt;
380         wmb();
381
382         /* Adjust ring index. */
383         req->ring_index++;
384         if (req->ring_index == req->length) {
385                 req->ring_index = 0;
386                 req->ring_ptr = req->ring;
387         } else
388                 req->ring_ptr++;
389
390         sp->flags |= SRB_DMA_VALID;
391
392         /* Set chip new ring index. */
393         WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
394         RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg));     /* PCI Posting. */
395
396         /* Manage unprocessed RIO/ZIO commands in response queue. */
397         if (vha->flags.process_response_queue &&
398             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
399                 qla2x00_process_response_queue(rsp);
400
401         spin_unlock_irqrestore(&ha->hardware_lock, flags);
402         return (QLA_SUCCESS);
403
404 queuing_error:
405         if (tot_dsds)
406                 scsi_dma_unmap(cmd);
407
408         spin_unlock_irqrestore(&ha->hardware_lock, flags);
409
410         return (QLA_FUNCTION_FAILED);
411 }
412
413 /**
414  * qla2x00_marker() - Send a marker IOCB to the firmware.
415  * @ha: HA context
416  * @loop_id: loop ID
417  * @lun: LUN
418  * @type: marker modifier
419  *
420  * Can be called from both normal and interrupt context.
421  *
422  * Returns non-zero if a failure occurred, else zero.
423  */
424 int
425 __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
426                         struct rsp_que *rsp, uint16_t loop_id,
427                         uint16_t lun, uint8_t type)
428 {
429         mrk_entry_t *mrk;
430         struct mrk_entry_24xx *mrk24;
431         struct qla_hw_data *ha = vha->hw;
432         scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
433
434         mrk24 = NULL;
435         mrk = (mrk_entry_t *)qla2x00_req_pkt(vha, req, rsp);
436         if (mrk == NULL) {
437                 DEBUG2_3(printk("%s(%ld): failed to allocate Marker IOCB.\n",
438                     __func__, base_vha->host_no));
439
440                 return (QLA_FUNCTION_FAILED);
441         }
442
443         mrk->entry_type = MARKER_TYPE;
444         mrk->modifier = type;
445         if (type != MK_SYNC_ALL) {
446                 if (IS_FWI2_CAPABLE(ha)) {
447                         mrk24 = (struct mrk_entry_24xx *) mrk;
448                         mrk24->nport_handle = cpu_to_le16(loop_id);
449                         mrk24->lun[1] = LSB(lun);
450                         mrk24->lun[2] = MSB(lun);
451                         host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
452                         mrk24->vp_index = vha->vp_idx;
453                         mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
454                 } else {
455                         SET_TARGET_ID(ha, mrk->target, loop_id);
456                         mrk->lun = cpu_to_le16(lun);
457                 }
458         }
459         wmb();
460
461         qla2x00_isp_cmd(vha, req);
462
463         return (QLA_SUCCESS);
464 }
465
466 int
467 qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
468                 struct rsp_que *rsp, uint16_t loop_id, uint16_t lun,
469                 uint8_t type)
470 {
471         int ret;
472         unsigned long flags = 0;
473
474         spin_lock_irqsave(&vha->hw->hardware_lock, flags);
475         ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
476         spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
477
478         return (ret);
479 }
480
481 /**
482  * qla2x00_req_pkt() - Retrieve a request packet from the request ring.
483  * @ha: HA context
484  *
485  * Note: The caller must hold the hardware lock before calling this routine.
486  *
487  * Returns NULL if function failed, else, a pointer to the request packet.
488  */
489 static request_t *
490 qla2x00_req_pkt(struct scsi_qla_host *vha, struct req_que *req,
491                 struct rsp_que *rsp)
492 {
493         struct qla_hw_data *ha = vha->hw;
494         device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
495         request_t       *pkt = NULL;
496         uint16_t        cnt;
497         uint32_t        *dword_ptr;
498         uint32_t        timer;
499         uint16_t        req_cnt = 1;
500
501         /* Wait 1 second for slot. */
502         for (timer = HZ; timer; timer--) {
503                 if ((req_cnt + 2) >= req->cnt) {
504                         /* Calculate number of free request entries. */
505                         if (ha->mqenable)
506                                 cnt = (uint16_t)
507                                         RD_REG_DWORD(&reg->isp25mq.req_q_out);
508                         else {
509                                 if (IS_QLA82XX(ha))
510                                         cnt = (uint16_t)RD_REG_DWORD(
511                                             &reg->isp82.req_q_out);
512                                 else if (IS_FWI2_CAPABLE(ha))
513                                         cnt = (uint16_t)RD_REG_DWORD(
514                                                 &reg->isp24.req_q_out);
515                                 else
516                                         cnt = qla2x00_debounce_register(
517                                                 ISP_REQ_Q_OUT(ha, &reg->isp));
518                         }
519                         if  (req->ring_index < cnt)
520                                 req->cnt = cnt - req->ring_index;
521                         else
522                                 req->cnt = req->length -
523                                     (req->ring_index - cnt);
524                 }
525                 /* If room for request in request ring. */
526                 if ((req_cnt + 2) < req->cnt) {
527                         req->cnt--;
528                         pkt = req->ring_ptr;
529
530                         /* Zero out packet. */
531                         dword_ptr = (uint32_t *)pkt;
532                         for (cnt = 0; cnt < REQUEST_ENTRY_SIZE / 4; cnt++)
533                                 *dword_ptr++ = 0;
534
535                         /* Set entry count. */
536                         pkt->entry_count = 1;
537
538                         break;
539                 }
540
541                 /* Release ring specific lock */
542                 spin_unlock_irq(&ha->hardware_lock);
543
544                 udelay(2);   /* 2 us */
545
546                 /* Check for pending interrupts. */
547                 /* During init we issue marker directly */
548                 if (!vha->marker_needed && !vha->flags.init_done)
549                         qla2x00_poll(rsp);
550                 spin_lock_irq(&ha->hardware_lock);
551         }
552         if (!pkt) {
553                 DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__));
554         }
555
556         return (pkt);
557 }
558
559 /**
560  * qla2x00_isp_cmd() - Modify the request ring pointer.
561  * @ha: HA context
562  *
563  * Note: The caller must hold the hardware lock before calling this routine.
564  */
565 static void
566 qla2x00_isp_cmd(struct scsi_qla_host *vha, struct req_que *req)
567 {
568         struct qla_hw_data *ha = vha->hw;
569         device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
570         struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
571
572         DEBUG5(printk("%s(): IOCB data:\n", __func__));
573         DEBUG5(qla2x00_dump_buffer(
574             (uint8_t *)req->ring_ptr, REQUEST_ENTRY_SIZE));
575
576         /* Adjust ring index. */
577         req->ring_index++;
578         if (req->ring_index == req->length) {
579                 req->ring_index = 0;
580                 req->ring_ptr = req->ring;
581         } else
582                 req->ring_ptr++;
583
584         /* Set chip new ring index. */
585         if (IS_QLA82XX(ha)) {
586                 uint32_t dbval = 0x04 | (ha->portnum << 5);
587
588                 /* write, read and verify logic */
589                 dbval = dbval | (req->id << 8) | (req->ring_index << 16);
590                 if (ql2xdbwr)
591                         qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
592                 else {
593                         WRT_REG_DWORD(
594                                 (unsigned long __iomem *)ha->nxdb_wr_ptr,
595                                 dbval);
596                         wmb();
597                         while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
598                                 WRT_REG_DWORD((unsigned long __iomem *)
599                                         ha->nxdb_wr_ptr, dbval);
600                                 wmb();
601                         }
602                 }
603         } else if (ha->mqenable) {
604                 /* Set chip new ring index. */
605                 WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
606                 RD_REG_DWORD(&ioreg->hccr);
607         } else {
608                 if (IS_FWI2_CAPABLE(ha)) {
609                         WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
610                         RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
611                 } else {
612                         WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
613                                 req->ring_index);
614                         RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
615                 }
616         }
617
618 }
619
620 /**
621  * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
622  * Continuation Type 1 IOCBs to allocate.
623  *
624  * @dsds: number of data segment decriptors needed
625  *
626  * Returns the number of IOCB entries needed to store @dsds.
627  */
628 inline uint16_t
629 qla24xx_calc_iocbs(uint16_t dsds)
630 {
631         uint16_t iocbs;
632
633         iocbs = 1;
634         if (dsds > 1) {
635                 iocbs += (dsds - 1) / 5;
636                 if ((dsds - 1) % 5)
637                         iocbs++;
638         }
639         return iocbs;
640 }
641
642 /**
643  * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
644  * IOCB types.
645  *
646  * @sp: SRB command to process
647  * @cmd_pkt: Command type 3 IOCB
648  * @tot_dsds: Total number of segments to transfer
649  */
650 inline void
651 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
652     uint16_t tot_dsds)
653 {
654         uint16_t        avail_dsds;
655         uint32_t        *cur_dsd;
656         scsi_qla_host_t *vha;
657         struct scsi_cmnd *cmd;
658         struct scatterlist *sg;
659         int i;
660         struct req_que *req;
661
662         cmd = sp->cmd;
663
664         /* Update entry type to indicate Command Type 3 IOCB */
665         *((uint32_t *)(&cmd_pkt->entry_type)) =
666             __constant_cpu_to_le32(COMMAND_TYPE_7);
667
668         /* No data transfer */
669         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
670                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
671                 return;
672         }
673
674         vha = sp->fcport->vha;
675         req = vha->req;
676
677         /* Set transfer direction */
678         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
679                 cmd_pkt->task_mgmt_flags =
680                     __constant_cpu_to_le16(TMF_WRITE_DATA);
681                 sp->fcport->vha->hw->qla_stats.output_bytes +=
682                     scsi_bufflen(sp->cmd);
683         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
684                 cmd_pkt->task_mgmt_flags =
685                     __constant_cpu_to_le16(TMF_READ_DATA);
686                 sp->fcport->vha->hw->qla_stats.input_bytes +=
687                     scsi_bufflen(sp->cmd);
688         }
689
690         /* One DSD is available in the Command Type 3 IOCB */
691         avail_dsds = 1;
692         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
693
694         /* Load data segments */
695
696         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
697                 dma_addr_t      sle_dma;
698                 cont_a64_entry_t *cont_pkt;
699
700                 /* Allocate additional continuation packets? */
701                 if (avail_dsds == 0) {
702                         /*
703                          * Five DSDs are available in the Continuation
704                          * Type 1 IOCB.
705                          */
706                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
707                         cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
708                         avail_dsds = 5;
709                 }
710
711                 sle_dma = sg_dma_address(sg);
712                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
713                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
714                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
715                 avail_dsds--;
716         }
717 }
718
719
720 /**
721  * qla24xx_start_scsi() - Send a SCSI command to the ISP
722  * @sp: command to send to the ISP
723  *
724  * Returns non-zero if a failure occurred, else zero.
725  */
726 int
727 qla24xx_start_scsi(srb_t *sp)
728 {
729         int             ret, nseg;
730         unsigned long   flags;
731         uint32_t        *clr_ptr;
732         uint32_t        index;
733         uint32_t        handle;
734         struct cmd_type_7 *cmd_pkt;
735         uint16_t        cnt;
736         uint16_t        req_cnt;
737         uint16_t        tot_dsds;
738         struct req_que *req = NULL;
739         struct rsp_que *rsp = NULL;
740         struct scsi_cmnd *cmd = sp->cmd;
741         struct scsi_qla_host *vha = sp->fcport->vha;
742         struct qla_hw_data *ha = vha->hw;
743
744         /* Setup device pointers. */
745         ret = 0;
746
747         qla25xx_set_que(sp, &rsp);
748         req = vha->req;
749
750         /* So we know we haven't pci_map'ed anything yet */
751         tot_dsds = 0;
752
753         /* Send marker if required */
754         if (vha->marker_needed != 0) {
755                 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL)
756                                                         != QLA_SUCCESS)
757                         return QLA_FUNCTION_FAILED;
758                 vha->marker_needed = 0;
759         }
760
761         /* Acquire ring specific lock */
762         spin_lock_irqsave(&ha->hardware_lock, flags);
763
764         /* Check for room in outstanding command list. */
765         handle = req->current_outstanding_cmd;
766         for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
767                 handle++;
768                 if (handle == MAX_OUTSTANDING_COMMANDS)
769                         handle = 1;
770                 if (!req->outstanding_cmds[handle])
771                         break;
772         }
773         if (index == MAX_OUTSTANDING_COMMANDS)
774                 goto queuing_error;
775
776         /* Map the sg table so we have an accurate count of sg entries needed */
777         if (scsi_sg_count(cmd)) {
778                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
779                     scsi_sg_count(cmd), cmd->sc_data_direction);
780                 if (unlikely(!nseg))
781                         goto queuing_error;
782         } else
783                 nseg = 0;
784
785         tot_dsds = nseg;
786
787         req_cnt = qla24xx_calc_iocbs(tot_dsds);
788         if (req->cnt < (req_cnt + 2)) {
789                 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
790
791                 if (req->ring_index < cnt)
792                         req->cnt = cnt - req->ring_index;
793                 else
794                         req->cnt = req->length -
795                                 (req->ring_index - cnt);
796         }
797         if (req->cnt < (req_cnt + 2))
798                 goto queuing_error;
799
800         /* Build command packet. */
801         req->current_outstanding_cmd = handle;
802         req->outstanding_cmds[handle] = sp;
803         sp->handle = handle;
804         sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
805         req->cnt -= req_cnt;
806
807         cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
808         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
809
810         /* Zero out remaining portion of packet. */
811         /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
812         clr_ptr = (uint32_t *)cmd_pkt + 2;
813         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
814         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
815
816         /* Set NPORT-ID and LUN number*/
817         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
818         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
819         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
820         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
821         cmd_pkt->vp_index = sp->fcport->vp_idx;
822
823         int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
824         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
825
826         /* Load SCSI command packet. */
827         memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
828         host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
829
830         cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
831
832         /* Build IOCB segments */
833         qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
834
835         /* Set total data segment count. */
836         cmd_pkt->entry_count = (uint8_t)req_cnt;
837         /* Specify response queue number where completion should happen */
838         cmd_pkt->entry_status = (uint8_t) rsp->id;
839         wmb();
840
841         /* Adjust ring index. */
842         req->ring_index++;
843         if (req->ring_index == req->length) {
844                 req->ring_index = 0;
845                 req->ring_ptr = req->ring;
846         } else
847                 req->ring_ptr++;
848
849         sp->flags |= SRB_DMA_VALID;
850
851         /* Set chip new ring index. */
852         WRT_REG_DWORD(req->req_q_in, req->ring_index);
853         RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
854
855         /* Manage unprocessed RIO/ZIO commands in response queue. */
856         if (vha->flags.process_response_queue &&
857                 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
858                 qla24xx_process_response_queue(vha, rsp);
859
860         spin_unlock_irqrestore(&ha->hardware_lock, flags);
861         return QLA_SUCCESS;
862
863 queuing_error:
864         if (tot_dsds)
865                 scsi_dma_unmap(cmd);
866
867         spin_unlock_irqrestore(&ha->hardware_lock, flags);
868
869         return QLA_FUNCTION_FAILED;
870 }
871
872 static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
873 {
874         struct scsi_cmnd *cmd = sp->cmd;
875         struct qla_hw_data *ha = sp->fcport->vha->hw;
876         int affinity = cmd->request->cpu;
877
878         if (ha->flags.cpu_affinity_enabled && affinity >= 0 &&
879                 affinity < ha->max_rsp_queues - 1)
880                 *rsp = ha->rsp_q_map[affinity + 1];
881          else
882                 *rsp = ha->rsp_q_map[0];
883 }
884
885 /* Generic Control-SRB manipulation functions. */
886
887 static void *
888 qla2x00_alloc_iocbs(srb_t *sp)
889 {
890         scsi_qla_host_t *vha = sp->fcport->vha;
891         struct qla_hw_data *ha = vha->hw;
892         struct req_que *req = ha->req_q_map[0];
893         device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
894         uint32_t index, handle;
895         request_t *pkt;
896         uint16_t cnt, req_cnt;
897
898         pkt = NULL;
899         req_cnt = 1;
900
901         /* Check for room in outstanding command list. */
902         handle = req->current_outstanding_cmd;
903         for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
904                 handle++;
905                 if (handle == MAX_OUTSTANDING_COMMANDS)
906                         handle = 1;
907                 if (!req->outstanding_cmds[handle])
908                         break;
909         }
910         if (index == MAX_OUTSTANDING_COMMANDS)
911                 goto queuing_error;
912
913         /* Check for room on request queue. */
914         if (req->cnt < req_cnt) {
915                 if (ha->mqenable)
916                         cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
917                 else if (IS_FWI2_CAPABLE(ha))
918                         cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
919                 else
920                         cnt = qla2x00_debounce_register(
921                             ISP_REQ_Q_OUT(ha, &reg->isp));
922
923                 if  (req->ring_index < cnt)
924                         req->cnt = cnt - req->ring_index;
925                 else
926                         req->cnt = req->length -
927                             (req->ring_index - cnt);
928         }
929         if (req->cnt < req_cnt)
930                 goto queuing_error;
931
932         /* Prep packet */
933         req->current_outstanding_cmd = handle;
934         req->outstanding_cmds[handle] = sp;
935         req->cnt -= req_cnt;
936
937         pkt = req->ring_ptr;
938         memset(pkt, 0, REQUEST_ENTRY_SIZE);
939         pkt->entry_count = req_cnt;
940         pkt->handle = handle;
941         sp->handle = handle;
942
943 queuing_error:
944         return pkt;
945 }
946
947 static void
948 qla2x00_start_iocbs(srb_t *sp)
949 {
950         struct qla_hw_data *ha = sp->fcport->vha->hw;
951         struct req_que *req = ha->req_q_map[0];
952         device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
953         struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
954
955         if (IS_QLA82XX(ha)) {
956                 qla82xx_start_iocbs(sp);
957         } else {
958                 /* Adjust ring index. */
959                 req->ring_index++;
960                 if (req->ring_index == req->length) {
961                         req->ring_index = 0;
962                         req->ring_ptr = req->ring;
963                 } else
964                         req->ring_ptr++;
965
966                 /* Set chip new ring index. */
967                 if (ha->mqenable) {
968                         WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
969                         RD_REG_DWORD(&ioreg->hccr);
970                 } else if (IS_QLA82XX(ha)) {
971                         qla82xx_start_iocbs(sp);
972                 } else if (IS_FWI2_CAPABLE(ha)) {
973                         WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
974                         RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
975                 } else {
976                         WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
977                                 req->ring_index);
978                         RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
979                 }
980         }
981 }
982
983 static void
984 qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
985 {
986         struct srb_logio *lio = sp->ctx;
987
988         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
989         logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
990         if (lio->flags & SRB_LOGIN_COND_PLOGI)
991                 logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
992         if (lio->flags & SRB_LOGIN_SKIP_PRLI)
993                 logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
994         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
995         logio->port_id[0] = sp->fcport->d_id.b.al_pa;
996         logio->port_id[1] = sp->fcport->d_id.b.area;
997         logio->port_id[2] = sp->fcport->d_id.b.domain;
998         logio->vp_index = sp->fcport->vp_idx;
999 }
1000
1001 static void
1002 qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
1003 {
1004         struct qla_hw_data *ha = sp->fcport->vha->hw;
1005         struct srb_logio *lio = sp->ctx;
1006         uint16_t opts;
1007
1008         mbx->entry_type = MBX_IOCB_TYPE;;
1009         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1010         mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
1011         opts = lio->flags & SRB_LOGIN_COND_PLOGI ? BIT_0: 0;
1012         opts |= lio->flags & SRB_LOGIN_SKIP_PRLI ? BIT_1: 0;
1013         if (HAS_EXTENDED_IDS(ha)) {
1014                 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
1015                 mbx->mb10 = cpu_to_le16(opts);
1016         } else {
1017                 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
1018         }
1019         mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1020         mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1021             sp->fcport->d_id.b.al_pa);
1022         mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
1023 }
1024
1025 static void
1026 qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1027 {
1028         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1029         logio->control_flags =
1030             cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
1031         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1032         logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1033         logio->port_id[1] = sp->fcport->d_id.b.area;
1034         logio->port_id[2] = sp->fcport->d_id.b.domain;
1035         logio->vp_index = sp->fcport->vp_idx;
1036 }
1037
1038 static void
1039 qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
1040 {
1041         struct qla_hw_data *ha = sp->fcport->vha->hw;
1042
1043         mbx->entry_type = MBX_IOCB_TYPE;;
1044         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1045         mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
1046         mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
1047             cpu_to_le16(sp->fcport->loop_id):
1048             cpu_to_le16(sp->fcport->loop_id << 8);
1049         mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1050         mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1051             sp->fcport->d_id.b.al_pa);
1052         mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
1053         /* Implicit: mbx->mbx10 = 0. */
1054 }
1055
1056 static void
1057 qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1058 {
1059         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1060         logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
1061         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1062         logio->vp_index = sp->fcport->vp_idx;
1063 }
1064
1065 static void
1066 qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
1067 {
1068         struct qla_hw_data *ha = sp->fcport->vha->hw;
1069
1070         mbx->entry_type = MBX_IOCB_TYPE;
1071         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1072         mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
1073         if (HAS_EXTENDED_IDS(ha)) {
1074                 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
1075                 mbx->mb10 = cpu_to_le16(BIT_0);
1076         } else {
1077                 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
1078         }
1079         mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
1080         mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
1081         mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
1082         mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
1083         mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
1084 }
1085
1086 static void
1087 qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
1088 {
1089         struct fc_bsg_job *bsg_job = ((struct srb_bsg*)sp->ctx)->bsg_job;
1090
1091         els_iocb->entry_type = ELS_IOCB_TYPE;
1092         els_iocb->entry_count = 1;
1093         els_iocb->sys_define = 0;
1094         els_iocb->entry_status = 0;
1095         els_iocb->handle = sp->handle;
1096         els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1097         els_iocb->tx_dsd_count = __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
1098         els_iocb->vp_index = sp->fcport->vp_idx;
1099         els_iocb->sof_type = EST_SOFI3;
1100         els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
1101
1102         els_iocb->opcode =(((struct srb_bsg*)sp->ctx)->ctx.type == SRB_ELS_CMD_RPT) ?
1103             bsg_job->request->rqst_data.r_els.els_code : bsg_job->request->rqst_data.h_els.command_code;
1104         els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
1105         els_iocb->port_id[1] = sp->fcport->d_id.b.area;
1106         els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
1107         els_iocb->control_flags = 0;
1108         els_iocb->rx_byte_count =
1109             cpu_to_le32(bsg_job->reply_payload.payload_len);
1110         els_iocb->tx_byte_count =
1111             cpu_to_le32(bsg_job->request_payload.payload_len);
1112
1113         els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
1114             (bsg_job->request_payload.sg_list)));
1115         els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
1116             (bsg_job->request_payload.sg_list)));
1117         els_iocb->tx_len = cpu_to_le32(sg_dma_len
1118             (bsg_job->request_payload.sg_list));
1119
1120         els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
1121             (bsg_job->reply_payload.sg_list)));
1122         els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
1123             (bsg_job->reply_payload.sg_list)));
1124         els_iocb->rx_len = cpu_to_le32(sg_dma_len
1125             (bsg_job->reply_payload.sg_list));
1126 }
1127
1128 static void
1129 qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
1130 {
1131         uint16_t        avail_dsds;
1132         uint32_t        *cur_dsd;
1133         struct scatterlist *sg;
1134         int index;
1135         uint16_t tot_dsds;
1136         scsi_qla_host_t *vha = sp->fcport->vha;
1137         struct fc_bsg_job *bsg_job = ((struct srb_bsg*)sp->ctx)->bsg_job;
1138         int loop_iterartion = 0;
1139         int cont_iocb_prsnt = 0;
1140         int entry_count = 1;
1141
1142         ct_iocb->entry_type = CT_IOCB_TYPE;
1143         ct_iocb->entry_status = 0;
1144         ct_iocb->sys_define = 0;
1145         ct_iocb->handle = sp->handle;
1146
1147         ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1148         ct_iocb->vp_index = sp->fcport->vp_idx;
1149         ct_iocb->comp_status = __constant_cpu_to_le16(0);
1150
1151         ct_iocb->cmd_dsd_count =
1152             __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
1153         ct_iocb->timeout = 0;
1154         ct_iocb->rsp_dsd_count =
1155             __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
1156         ct_iocb->rsp_byte_count =
1157             cpu_to_le32(bsg_job->reply_payload.payload_len);
1158         ct_iocb->cmd_byte_count =
1159             cpu_to_le32(bsg_job->request_payload.payload_len);
1160         ct_iocb->dseg_0_address[0] = cpu_to_le32(LSD(sg_dma_address
1161             (bsg_job->request_payload.sg_list)));
1162         ct_iocb->dseg_0_address[1] = cpu_to_le32(MSD(sg_dma_address
1163            (bsg_job->request_payload.sg_list)));
1164         ct_iocb->dseg_0_len = cpu_to_le32(sg_dma_len
1165             (bsg_job->request_payload.sg_list));
1166
1167         avail_dsds = 1;
1168         cur_dsd = (uint32_t *)ct_iocb->dseg_1_address;
1169         index = 0;
1170         tot_dsds = bsg_job->reply_payload.sg_cnt;
1171
1172         for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
1173                 dma_addr_t       sle_dma;
1174                 cont_a64_entry_t *cont_pkt;
1175
1176                 /* Allocate additional continuation packets? */
1177                 if (avail_dsds == 0) {
1178                         /*
1179                         * Five DSDs are available in the Cont.
1180                         * Type 1 IOCB.
1181                                */
1182                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
1183                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
1184                         avail_dsds = 5;
1185                         cont_iocb_prsnt = 1;
1186                         entry_count++;
1187                 }
1188
1189                 sle_dma = sg_dma_address(sg);
1190                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
1191                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
1192                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
1193                 loop_iterartion++;
1194                 avail_dsds--;
1195         }
1196         ct_iocb->entry_count = entry_count;
1197 }
1198
1199 int
1200 qla2x00_start_sp(srb_t *sp)
1201 {
1202         int rval;
1203         struct qla_hw_data *ha = sp->fcport->vha->hw;
1204         void *pkt;
1205         struct srb_ctx *ctx = sp->ctx;
1206         unsigned long flags;
1207
1208         rval = QLA_FUNCTION_FAILED;
1209         spin_lock_irqsave(&ha->hardware_lock, flags);
1210         pkt = qla2x00_alloc_iocbs(sp);
1211         if (!pkt)
1212                 goto done;
1213
1214         rval = QLA_SUCCESS;
1215         switch (ctx->type) {
1216         case SRB_LOGIN_CMD:
1217                 IS_FWI2_CAPABLE(ha) ?
1218                     qla24xx_login_iocb(sp, pkt) :
1219                     qla2x00_login_iocb(sp, pkt);
1220                 break;
1221         case SRB_LOGOUT_CMD:
1222                 IS_FWI2_CAPABLE(ha) ?
1223                     qla24xx_logout_iocb(sp, pkt) :
1224                     qla2x00_logout_iocb(sp, pkt);
1225                 break;
1226         case SRB_ELS_CMD_RPT:
1227         case SRB_ELS_CMD_HST:
1228                 qla24xx_els_iocb(sp, pkt);
1229                 break;
1230         case SRB_CT_CMD:
1231                 qla24xx_ct_iocb(sp, pkt);
1232                 break;
1233         case SRB_ADISC_CMD:
1234                 IS_FWI2_CAPABLE(ha) ?
1235                     qla24xx_adisc_iocb(sp, pkt) :
1236                     qla2x00_adisc_iocb(sp, pkt);
1237                 break;
1238         default:
1239                 break;
1240         }
1241
1242         wmb();
1243         qla2x00_start_iocbs(sp);
1244 done:
1245         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1246         return rval;
1247 }