tcm_qla2xxx: Offload WRITE I/O backend submission to tcm_qla2xxx wq
[firefly-linux-kernel-4.4.55.git] / drivers / scsi / qla2xxx / qla_mid.c
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2011 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8 #include "qla_gbl.h"
9 #include "qla_target.h"
10
11 #include <linux/moduleparam.h>
12 #include <linux/vmalloc.h>
13 #include <linux/slab.h>
14 #include <linux/list.h>
15
16 #include <scsi/scsi_tcq.h>
17 #include <scsi/scsicam.h>
18 #include <linux/delay.h>
19
20 void
21 qla2x00_vp_stop_timer(scsi_qla_host_t *vha)
22 {
23         if (vha->vp_idx && vha->timer_active) {
24                 del_timer_sync(&vha->timer);
25                 vha->timer_active = 0;
26         }
27 }
28
29 static uint32_t
30 qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
31 {
32         uint32_t vp_id;
33         struct qla_hw_data *ha = vha->hw;
34         unsigned long flags;
35
36         /* Find an empty slot and assign an vp_id */
37         mutex_lock(&ha->vport_lock);
38         vp_id = find_first_zero_bit(ha->vp_idx_map, ha->max_npiv_vports + 1);
39         if (vp_id > ha->max_npiv_vports) {
40                 ql_dbg(ql_dbg_vport, vha, 0xa000,
41                     "vp_id %d is bigger than max-supported %d.\n",
42                     vp_id, ha->max_npiv_vports);
43                 mutex_unlock(&ha->vport_lock);
44                 return vp_id;
45         }
46
47         set_bit(vp_id, ha->vp_idx_map);
48         ha->num_vhosts++;
49         vha->vp_idx = vp_id;
50
51         spin_lock_irqsave(&ha->vport_slock, flags);
52         list_add_tail(&vha->list, &ha->vp_list);
53
54         qlt_update_vp_map(vha, SET_VP_IDX);
55
56         spin_unlock_irqrestore(&ha->vport_slock, flags);
57
58         mutex_unlock(&ha->vport_lock);
59         return vp_id;
60 }
61
62 void
63 qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
64 {
65         uint16_t vp_id;
66         struct qla_hw_data *ha = vha->hw;
67         unsigned long flags = 0;
68
69         mutex_lock(&ha->vport_lock);
70         /*
71          * Wait for all pending activities to finish before removing vport from
72          * the list.
73          * Lock needs to be held for safe removal from the list (it
74          * ensures no active vp_list traversal while the vport is removed
75          * from the queue)
76          */
77         spin_lock_irqsave(&ha->vport_slock, flags);
78         while (atomic_read(&vha->vref_count)) {
79                 spin_unlock_irqrestore(&ha->vport_slock, flags);
80
81                 msleep(500);
82
83                 spin_lock_irqsave(&ha->vport_slock, flags);
84         }
85         list_del(&vha->list);
86         qlt_update_vp_map(vha, RESET_VP_IDX);
87         spin_unlock_irqrestore(&ha->vport_slock, flags);
88
89         vp_id = vha->vp_idx;
90         ha->num_vhosts--;
91         clear_bit(vp_id, ha->vp_idx_map);
92
93         mutex_unlock(&ha->vport_lock);
94 }
95
96 static scsi_qla_host_t *
97 qla24xx_find_vhost_by_name(struct qla_hw_data *ha, uint8_t *port_name)
98 {
99         scsi_qla_host_t *vha;
100         struct scsi_qla_host *tvha;
101         unsigned long flags;
102
103         spin_lock_irqsave(&ha->vport_slock, flags);
104         /* Locate matching device in database. */
105         list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) {
106                 if (!memcmp(port_name, vha->port_name, WWN_SIZE)) {
107                         spin_unlock_irqrestore(&ha->vport_slock, flags);
108                         return vha;
109                 }
110         }
111         spin_unlock_irqrestore(&ha->vport_slock, flags);
112         return NULL;
113 }
114
115 /*
116  * qla2x00_mark_vp_devices_dead
117  *      Updates fcport state when device goes offline.
118  *
119  * Input:
120  *      ha = adapter block pointer.
121  *      fcport = port structure pointer.
122  *
123  * Return:
124  *      None.
125  *
126  * Context:
127  */
128 static void
129 qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
130 {
131         /*
132          * !!! NOTE !!!
133          * This function, if called in contexts other than vp create, disable
134          * or delete, please make sure this is synchronized with the
135          * delete thread.
136          */
137         fc_port_t *fcport;
138
139         list_for_each_entry(fcport, &vha->vp_fcports, list) {
140                 ql_dbg(ql_dbg_vport, vha, 0xa001,
141                     "Marking port dead, loop_id=0x%04x : %x.\n",
142                     fcport->loop_id, fcport->vha->vp_idx);
143
144                 qla2x00_mark_device_lost(vha, fcport, 0, 0);
145                 qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
146         }
147 }
148
149 int
150 qla24xx_disable_vp(scsi_qla_host_t *vha)
151 {
152         int ret;
153
154         ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
155         atomic_set(&vha->loop_state, LOOP_DOWN);
156         atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
157
158         /* Remove port id from vp target map */
159         qlt_update_vp_map(vha, RESET_AL_PA);
160
161         qla2x00_mark_vp_devices_dead(vha);
162         atomic_set(&vha->vp_state, VP_FAILED);
163         vha->flags.management_server_logged_in = 0;
164         if (ret == QLA_SUCCESS) {
165                 fc_vport_set_state(vha->fc_vport, FC_VPORT_DISABLED);
166         } else {
167                 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
168                 return -1;
169         }
170         return 0;
171 }
172
173 int
174 qla24xx_enable_vp(scsi_qla_host_t *vha)
175 {
176         int ret;
177         struct qla_hw_data *ha = vha->hw;
178         scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
179
180         /* Check if physical ha port is Up */
181         if (atomic_read(&base_vha->loop_state) == LOOP_DOWN  ||
182                 atomic_read(&base_vha->loop_state) == LOOP_DEAD ||
183                 !(ha->current_topology & ISP_CFG_F)) {
184                 vha->vp_err_state =  VP_ERR_PORTDWN;
185                 fc_vport_set_state(vha->fc_vport, FC_VPORT_LINKDOWN);
186                 goto enable_failed;
187         }
188
189         /* Initialize the new vport unless it is a persistent port */
190         mutex_lock(&ha->vport_lock);
191         ret = qla24xx_modify_vp_config(vha);
192         mutex_unlock(&ha->vport_lock);
193
194         if (ret != QLA_SUCCESS) {
195                 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
196                 goto enable_failed;
197         }
198
199         ql_dbg(ql_dbg_taskm, vha, 0x801a,
200             "Virtual port with id: %d - Enabled.\n", vha->vp_idx);
201         return 0;
202
203 enable_failed:
204         ql_dbg(ql_dbg_taskm, vha, 0x801b,
205             "Virtual port with id: %d - Disabled.\n", vha->vp_idx);
206         return 1;
207 }
208
209 static void
210 qla24xx_configure_vp(scsi_qla_host_t *vha)
211 {
212         struct fc_vport *fc_vport;
213         int ret;
214
215         fc_vport = vha->fc_vport;
216
217         ql_dbg(ql_dbg_vport, vha, 0xa002,
218             "%s: change request #3.\n", __func__);
219         ret = qla2x00_send_change_request(vha, 0x3, vha->vp_idx);
220         if (ret != QLA_SUCCESS) {
221                 ql_dbg(ql_dbg_vport, vha, 0xa003, "Failed to enable "
222                     "receiving of RSCN requests: 0x%x.\n", ret);
223                 return;
224         } else {
225                 /* Corresponds to SCR enabled */
226                 clear_bit(VP_SCR_NEEDED, &vha->vp_flags);
227         }
228
229         vha->flags.online = 1;
230         if (qla24xx_configure_vhba(vha))
231                 return;
232
233         atomic_set(&vha->vp_state, VP_ACTIVE);
234         fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE);
235 }
236
237 void
238 qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
239 {
240         scsi_qla_host_t *vha;
241         struct qla_hw_data *ha = rsp->hw;
242         int i = 0;
243         unsigned long flags;
244
245         spin_lock_irqsave(&ha->vport_slock, flags);
246         list_for_each_entry(vha, &ha->vp_list, list) {
247                 if (vha->vp_idx) {
248                         atomic_inc(&vha->vref_count);
249                         spin_unlock_irqrestore(&ha->vport_slock, flags);
250
251                         switch (mb[0]) {
252                         case MBA_LIP_OCCURRED:
253                         case MBA_LOOP_UP:
254                         case MBA_LOOP_DOWN:
255                         case MBA_LIP_RESET:
256                         case MBA_POINT_TO_POINT:
257                         case MBA_CHG_IN_CONNECTION:
258                         case MBA_PORT_UPDATE:
259                         case MBA_RSCN_UPDATE:
260                                 ql_dbg(ql_dbg_async, vha, 0x5024,
261                                     "Async_event for VP[%d], mb=0x%x vha=%p.\n",
262                                     i, *mb, vha);
263                                 qla2x00_async_event(vha, rsp, mb);
264                                 break;
265                         }
266
267                         spin_lock_irqsave(&ha->vport_slock, flags);
268                         atomic_dec(&vha->vref_count);
269                 }
270                 i++;
271         }
272         spin_unlock_irqrestore(&ha->vport_slock, flags);
273 }
274
275 int
276 qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
277 {
278         /*
279          * Physical port will do most of the abort and recovery work. We can
280          * just treat it as a loop down
281          */
282         if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
283                 atomic_set(&vha->loop_state, LOOP_DOWN);
284                 qla2x00_mark_all_devices_lost(vha, 0);
285         } else {
286                 if (!atomic_read(&vha->loop_down_timer))
287                         atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
288         }
289
290         /*
291          * To exclusively reset vport, we need to log it out first.  Note: this
292          * control_vp can fail if ISP reset is already issued, this is
293          * expected, as the vp would be already logged out due to ISP reset.
294          */
295         if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
296                 qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
297
298         ql_dbg(ql_dbg_taskm, vha, 0x801d,
299             "Scheduling enable of Vport %d.\n", vha->vp_idx);
300         return qla24xx_enable_vp(vha);
301 }
302
303 static int
304 qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
305 {
306         ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x4012,
307             "Entering %s vp_flags: 0x%lx.\n", __func__, vha->vp_flags);
308
309         qla2x00_do_work(vha);
310
311         if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) {
312                 /* VP acquired. complete port configuration */
313                 ql_dbg(ql_dbg_dpc, vha, 0x4014,
314                     "Configure VP scheduled.\n");
315                 qla24xx_configure_vp(vha);
316                 ql_dbg(ql_dbg_dpc, vha, 0x4015,
317                     "Configure VP end.\n");
318                 return 0;
319         }
320
321         if (test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) {
322                 ql_dbg(ql_dbg_dpc, vha, 0x4016,
323                     "FCPort update scheduled.\n");
324                 qla2x00_update_fcports(vha);
325                 clear_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags);
326                 ql_dbg(ql_dbg_dpc, vha, 0x4017,
327                     "FCPort update end.\n");
328         }
329
330         if ((test_and_clear_bit(RELOGIN_NEEDED, &vha->dpc_flags)) &&
331                 !test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) &&
332                 atomic_read(&vha->loop_state) != LOOP_DOWN) {
333
334                 ql_dbg(ql_dbg_dpc, vha, 0x4018,
335                     "Relogin needed scheduled.\n");
336                 qla2x00_relogin(vha);
337                 ql_dbg(ql_dbg_dpc, vha, 0x4019,
338                     "Relogin needed end.\n");
339         }
340
341         if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) &&
342             (!(test_and_set_bit(RESET_ACTIVE, &vha->dpc_flags)))) {
343                 clear_bit(RESET_ACTIVE, &vha->dpc_flags);
344         }
345
346         if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
347                 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))) {
348                         ql_dbg(ql_dbg_dpc, vha, 0x401a,
349                             "Loop resync scheduled.\n");
350                         qla2x00_loop_resync(vha);
351                         clear_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags);
352                         ql_dbg(ql_dbg_dpc, vha, 0x401b,
353                             "Loop resync end.\n");
354                 }
355         }
356
357         ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x401c,
358             "Exiting %s.\n", __func__);
359         return 0;
360 }
361
362 void
363 qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha)
364 {
365         int ret;
366         struct qla_hw_data *ha = vha->hw;
367         scsi_qla_host_t *vp;
368         unsigned long flags = 0;
369
370         if (vha->vp_idx)
371                 return;
372         if (list_empty(&ha->vp_list))
373                 return;
374
375         clear_bit(VP_DPC_NEEDED, &vha->dpc_flags);
376
377         if (!(ha->current_topology & ISP_CFG_F))
378                 return;
379
380         spin_lock_irqsave(&ha->vport_slock, flags);
381         list_for_each_entry(vp, &ha->vp_list, list) {
382                 if (vp->vp_idx) {
383                         atomic_inc(&vp->vref_count);
384                         spin_unlock_irqrestore(&ha->vport_slock, flags);
385
386                         ret = qla2x00_do_dpc_vp(vp);
387
388                         spin_lock_irqsave(&ha->vport_slock, flags);
389                         atomic_dec(&vp->vref_count);
390                 }
391         }
392         spin_unlock_irqrestore(&ha->vport_slock, flags);
393 }
394
395 int
396 qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport)
397 {
398         scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
399         struct qla_hw_data *ha = base_vha->hw;
400         scsi_qla_host_t *vha;
401         uint8_t port_name[WWN_SIZE];
402
403         if (fc_vport->roles != FC_PORT_ROLE_FCP_INITIATOR)
404                 return VPCERR_UNSUPPORTED;
405
406         /* Check up the F/W and H/W support NPIV */
407         if (!ha->flags.npiv_supported)
408                 return VPCERR_UNSUPPORTED;
409
410         /* Check up whether npiv supported switch presented */
411         if (!(ha->switch_cap & FLOGI_MID_SUPPORT))
412                 return VPCERR_NO_FABRIC_SUPP;
413
414         /* Check up unique WWPN */
415         u64_to_wwn(fc_vport->port_name, port_name);
416         if (!memcmp(port_name, base_vha->port_name, WWN_SIZE))
417                 return VPCERR_BAD_WWN;
418         vha = qla24xx_find_vhost_by_name(ha, port_name);
419         if (vha)
420                 return VPCERR_BAD_WWN;
421
422         /* Check up max-npiv-supports */
423         if (ha->num_vhosts > ha->max_npiv_vports) {
424                 ql_dbg(ql_dbg_vport, vha, 0xa004,
425                     "num_vhosts %ud is bigger "
426                     "than max_npiv_vports %ud.\n",
427                     ha->num_vhosts, ha->max_npiv_vports);
428                 return VPCERR_UNSUPPORTED;
429         }
430         return 0;
431 }
432
433 scsi_qla_host_t *
434 qla24xx_create_vhost(struct fc_vport *fc_vport)
435 {
436         scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
437         struct qla_hw_data *ha = base_vha->hw;
438         scsi_qla_host_t *vha;
439         struct scsi_host_template *sht = &qla2xxx_driver_template;
440         struct Scsi_Host *host;
441
442         vha = qla2x00_create_host(sht, ha);
443         if (!vha) {
444                 ql_log(ql_log_warn, vha, 0xa005,
445                     "scsi_host_alloc() failed for vport.\n");
446                 return(NULL);
447         }
448
449         host = vha->host;
450         fc_vport->dd_data = vha;
451         /* New host info */
452         u64_to_wwn(fc_vport->node_name, vha->node_name);
453         u64_to_wwn(fc_vport->port_name, vha->port_name);
454
455         vha->fc_vport = fc_vport;
456         vha->device_flags = 0;
457         vha->vp_idx = qla24xx_allocate_vp_id(vha);
458         if (vha->vp_idx > ha->max_npiv_vports) {
459                 ql_dbg(ql_dbg_vport, vha, 0xa006,
460                     "Couldn't allocate vp_id.\n");
461                 goto create_vhost_failed;
462         }
463         vha->mgmt_svr_loop_id = 10 + vha->vp_idx;
464
465         vha->dpc_flags = 0L;
466
467         /*
468          * To fix the issue of processing a parent's RSCN for the vport before
469          * its SCR is complete.
470          */
471         set_bit(VP_SCR_NEEDED, &vha->vp_flags);
472         atomic_set(&vha->loop_state, LOOP_DOWN);
473         atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
474
475         qla2x00_start_timer(vha, qla2x00_timer, WATCH_INTERVAL);
476
477         vha->req = base_vha->req;
478         host->can_queue = base_vha->req->length + 128;
479         host->this_id = 255;
480         host->cmd_per_lun = 3;
481         if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif)
482                 host->max_cmd_len = 32;
483         else
484                 host->max_cmd_len = MAX_CMDSZ;
485         host->max_channel = MAX_BUSES - 1;
486         host->max_lun = ql2xmaxlun;
487         host->unique_id = host->host_no;
488         host->max_id = ha->max_fibre_devices;
489         host->transportt = qla2xxx_transport_vport_template;
490
491         ql_dbg(ql_dbg_vport, vha, 0xa007,
492             "Detect vport hba %ld at address = %p.\n",
493             vha->host_no, vha);
494
495         vha->flags.init_done = 1;
496
497         mutex_lock(&ha->vport_lock);
498         set_bit(vha->vp_idx, ha->vp_idx_map);
499         ha->cur_vport_count++;
500         mutex_unlock(&ha->vport_lock);
501
502         return vha;
503
504 create_vhost_failed:
505         return NULL;
506 }
507
508 static void
509 qla25xx_free_req_que(struct scsi_qla_host *vha, struct req_que *req)
510 {
511         struct qla_hw_data *ha = vha->hw;
512         uint16_t que_id = req->id;
513
514         dma_free_coherent(&ha->pdev->dev, (req->length + 1) *
515                 sizeof(request_t), req->ring, req->dma);
516         req->ring = NULL;
517         req->dma = 0;
518         if (que_id) {
519                 ha->req_q_map[que_id] = NULL;
520                 mutex_lock(&ha->vport_lock);
521                 clear_bit(que_id, ha->req_qid_map);
522                 mutex_unlock(&ha->vport_lock);
523         }
524         kfree(req);
525         req = NULL;
526 }
527
528 static void
529 qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
530 {
531         struct qla_hw_data *ha = vha->hw;
532         uint16_t que_id = rsp->id;
533
534         if (rsp->msix && rsp->msix->have_irq) {
535                 free_irq(rsp->msix->vector, rsp);
536                 rsp->msix->have_irq = 0;
537                 rsp->msix->rsp = NULL;
538         }
539         dma_free_coherent(&ha->pdev->dev, (rsp->length + 1) *
540                 sizeof(response_t), rsp->ring, rsp->dma);
541         rsp->ring = NULL;
542         rsp->dma = 0;
543         if (que_id) {
544                 ha->rsp_q_map[que_id] = NULL;
545                 mutex_lock(&ha->vport_lock);
546                 clear_bit(que_id, ha->rsp_qid_map);
547                 mutex_unlock(&ha->vport_lock);
548         }
549         kfree(rsp);
550         rsp = NULL;
551 }
552
553 int
554 qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req)
555 {
556         int ret = -1;
557
558         if (req) {
559                 req->options |= BIT_0;
560                 ret = qla25xx_init_req_que(vha, req);
561         }
562         if (ret == QLA_SUCCESS)
563                 qla25xx_free_req_que(vha, req);
564
565         return ret;
566 }
567
568 static int
569 qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
570 {
571         int ret = -1;
572
573         if (rsp) {
574                 rsp->options |= BIT_0;
575                 ret = qla25xx_init_rsp_que(vha, rsp);
576         }
577         if (ret == QLA_SUCCESS)
578                 qla25xx_free_rsp_que(vha, rsp);
579
580         return ret;
581 }
582
583 /* Delete all queues for a given vhost */
584 int
585 qla25xx_delete_queues(struct scsi_qla_host *vha)
586 {
587         int cnt, ret = 0;
588         struct req_que *req = NULL;
589         struct rsp_que *rsp = NULL;
590         struct qla_hw_data *ha = vha->hw;
591
592         /* Delete request queues */
593         for (cnt = 1; cnt < ha->max_req_queues; cnt++) {
594                 req = ha->req_q_map[cnt];
595                 if (req) {
596                         ret = qla25xx_delete_req_que(vha, req);
597                         if (ret != QLA_SUCCESS) {
598                                 ql_log(ql_log_warn, vha, 0x00ea,
599                                     "Couldn't delete req que %d.\n",
600                                     req->id);
601                                 return ret;
602                         }
603                 }
604         }
605
606         /* Delete response queues */
607         for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) {
608                 rsp = ha->rsp_q_map[cnt];
609                 if (rsp) {
610                         ret = qla25xx_delete_rsp_que(vha, rsp);
611                         if (ret != QLA_SUCCESS) {
612                                 ql_log(ql_log_warn, vha, 0x00eb,
613                                     "Couldn't delete rsp que %d.\n",
614                                     rsp->id);
615                                 return ret;
616                         }
617                 }
618         }
619         return ret;
620 }
621
622 int
623 qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
624         uint8_t vp_idx, uint16_t rid, int rsp_que, uint8_t qos)
625 {
626         int ret = 0;
627         struct req_que *req = NULL;
628         struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
629         uint16_t que_id = 0;
630         device_reg_t __iomem *reg;
631         uint32_t cnt;
632
633         req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
634         if (req == NULL) {
635                 ql_log(ql_log_fatal, base_vha, 0x00d9,
636                     "Failed to allocate memory for request queue.\n");
637                 goto failed;
638         }
639
640         req->length = REQUEST_ENTRY_CNT_24XX;
641         req->ring = dma_alloc_coherent(&ha->pdev->dev,
642                         (req->length + 1) * sizeof(request_t),
643                         &req->dma, GFP_KERNEL);
644         if (req->ring == NULL) {
645                 ql_log(ql_log_fatal, base_vha, 0x00da,
646                     "Failed to allocte memory for request_ring.\n");
647                 goto que_failed;
648         }
649
650         mutex_lock(&ha->vport_lock);
651         que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues);
652         if (que_id >= ha->max_req_queues) {
653                 mutex_unlock(&ha->vport_lock);
654                 ql_log(ql_log_warn, base_vha, 0x00db,
655                     "No resources to create additional request queue.\n");
656                 goto que_failed;
657         }
658         set_bit(que_id, ha->req_qid_map);
659         ha->req_q_map[que_id] = req;
660         req->rid = rid;
661         req->vp_idx = vp_idx;
662         req->qos = qos;
663
664         ql_dbg(ql_dbg_multiq, base_vha, 0xc002,
665             "queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
666             que_id, req->rid, req->vp_idx, req->qos);
667         ql_dbg(ql_dbg_init, base_vha, 0x00dc,
668             "queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
669             que_id, req->rid, req->vp_idx, req->qos);
670         if (rsp_que < 0)
671                 req->rsp = NULL;
672         else
673                 req->rsp = ha->rsp_q_map[rsp_que];
674         /* Use alternate PCI bus number */
675         if (MSB(req->rid))
676                 options |= BIT_4;
677         /* Use alternate PCI devfn */
678         if (LSB(req->rid))
679                 options |= BIT_5;
680         req->options = options;
681
682         ql_dbg(ql_dbg_multiq, base_vha, 0xc003,
683             "options=0x%x.\n", req->options);
684         ql_dbg(ql_dbg_init, base_vha, 0x00dd,
685             "options=0x%x.\n", req->options);
686         for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++)
687                 req->outstanding_cmds[cnt] = NULL;
688         req->current_outstanding_cmd = 1;
689
690         req->ring_ptr = req->ring;
691         req->ring_index = 0;
692         req->cnt = req->length;
693         req->id = que_id;
694         reg = ISP_QUE_REG(ha, que_id);
695         req->max_q_depth = ha->req_q_map[0]->max_q_depth;
696         mutex_unlock(&ha->vport_lock);
697         ql_dbg(ql_dbg_multiq, base_vha, 0xc004,
698             "ring_ptr=%p ring_index=%d, "
699             "cnt=%d id=%d max_q_depth=%d.\n",
700             req->ring_ptr, req->ring_index,
701             req->cnt, req->id, req->max_q_depth);
702         ql_dbg(ql_dbg_init, base_vha, 0x00de,
703             "ring_ptr=%p ring_index=%d, "
704             "cnt=%d id=%d max_q_depth=%d.\n",
705             req->ring_ptr, req->ring_index, req->cnt,
706             req->id, req->max_q_depth);
707
708         ret = qla25xx_init_req_que(base_vha, req);
709         if (ret != QLA_SUCCESS) {
710                 ql_log(ql_log_fatal, base_vha, 0x00df,
711                     "%s failed.\n", __func__);
712                 mutex_lock(&ha->vport_lock);
713                 clear_bit(que_id, ha->req_qid_map);
714                 mutex_unlock(&ha->vport_lock);
715                 goto que_failed;
716         }
717
718         return req->id;
719
720 que_failed:
721         qla25xx_free_req_que(base_vha, req);
722 failed:
723         return 0;
724 }
725
726 static void qla_do_work(struct work_struct *work)
727 {
728         unsigned long flags;
729         struct rsp_que *rsp = container_of(work, struct rsp_que, q_work);
730         struct scsi_qla_host *vha;
731         struct qla_hw_data *ha = rsp->hw;
732
733         spin_lock_irqsave(&rsp->hw->hardware_lock, flags);
734         vha = pci_get_drvdata(ha->pdev);
735         qla24xx_process_response_queue(vha, rsp);
736         spin_unlock_irqrestore(&rsp->hw->hardware_lock, flags);
737 }
738
739 /* create response queue */
740 int
741 qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
742         uint8_t vp_idx, uint16_t rid, int req)
743 {
744         int ret = 0;
745         struct rsp_que *rsp = NULL;
746         struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
747         uint16_t que_id = 0;
748         device_reg_t __iomem *reg;
749
750         rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
751         if (rsp == NULL) {
752                 ql_log(ql_log_warn, base_vha, 0x0066,
753                     "Failed to allocate memory for response queue.\n");
754                 goto failed;
755         }
756
757         rsp->length = RESPONSE_ENTRY_CNT_MQ;
758         rsp->ring = dma_alloc_coherent(&ha->pdev->dev,
759                         (rsp->length + 1) * sizeof(response_t),
760                         &rsp->dma, GFP_KERNEL);
761         if (rsp->ring == NULL) {
762                 ql_log(ql_log_warn, base_vha, 0x00e1,
763                     "Failed to allocate memory for response ring.\n");
764                 goto que_failed;
765         }
766
767         mutex_lock(&ha->vport_lock);
768         que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_rsp_queues);
769         if (que_id >= ha->max_rsp_queues) {
770                 mutex_unlock(&ha->vport_lock);
771                 ql_log(ql_log_warn, base_vha, 0x00e2,
772                     "No resources to create additional request queue.\n");
773                 goto que_failed;
774         }
775         set_bit(que_id, ha->rsp_qid_map);
776
777         if (ha->flags.msix_enabled)
778                 rsp->msix = &ha->msix_entries[que_id + 1];
779         else
780                 ql_log(ql_log_warn, base_vha, 0x00e3,
781                     "MSIX not enalbled.\n");
782
783         ha->rsp_q_map[que_id] = rsp;
784         rsp->rid = rid;
785         rsp->vp_idx = vp_idx;
786         rsp->hw = ha;
787         ql_dbg(ql_dbg_init, base_vha, 0x00e4,
788             "queue_id=%d rid=%d vp_idx=%d hw=%p.\n",
789             que_id, rsp->rid, rsp->vp_idx, rsp->hw);
790         /* Use alternate PCI bus number */
791         if (MSB(rsp->rid))
792                 options |= BIT_4;
793         /* Use alternate PCI devfn */
794         if (LSB(rsp->rid))
795                 options |= BIT_5;
796         /* Enable MSIX handshake mode on for uncapable adapters */
797         if (!IS_MSIX_NACK_CAPABLE(ha))
798                 options |= BIT_6;
799
800         rsp->options = options;
801         rsp->id = que_id;
802         reg = ISP_QUE_REG(ha, que_id);
803         rsp->rsp_q_in = &reg->isp25mq.rsp_q_in;
804         rsp->rsp_q_out = &reg->isp25mq.rsp_q_out;
805         mutex_unlock(&ha->vport_lock);
806         ql_dbg(ql_dbg_multiq, base_vha, 0xc00b,
807             "options=%x id=%d rsp_q_in=%p rsp_q_out=%p",
808             rsp->options, rsp->id, rsp->rsp_q_in,
809             rsp->rsp_q_out);
810         ql_dbg(ql_dbg_init, base_vha, 0x00e5,
811             "options=%x id=%d rsp_q_in=%p rsp_q_out=%p",
812             rsp->options, rsp->id, rsp->rsp_q_in,
813             rsp->rsp_q_out);
814
815         ret = qla25xx_request_irq(rsp);
816         if (ret)
817                 goto que_failed;
818
819         ret = qla25xx_init_rsp_que(base_vha, rsp);
820         if (ret != QLA_SUCCESS) {
821                 ql_log(ql_log_fatal, base_vha, 0x00e7,
822                     "%s failed.\n", __func__);
823                 mutex_lock(&ha->vport_lock);
824                 clear_bit(que_id, ha->rsp_qid_map);
825                 mutex_unlock(&ha->vport_lock);
826                 goto que_failed;
827         }
828         if (req >= 0)
829                 rsp->req = ha->req_q_map[req];
830         else
831                 rsp->req = NULL;
832
833         qla2x00_init_response_q_entries(rsp);
834         if (rsp->hw->wq)
835                 INIT_WORK(&rsp->q_work, qla_do_work);
836         return rsp->id;
837
838 que_failed:
839         qla25xx_free_rsp_que(base_vha, rsp);
840 failed:
841         return 0;
842 }