Merge tag 'iwlwifi-for-kalle-2015-04-28' of https://git.kernel.org/pub/scm/linux...
[firefly-linux-kernel-4.4.55.git] / drivers / scsi / lpfc / lpfc_sli.c
index 207a43d952fa47e290894c2ade6dc19df3b1f026..56f73682d4bd571f792faba2859e8266df30c834 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2014 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2015 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
@@ -918,12 +918,16 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
                lpfc_cmd = (struct lpfc_scsi_buf *) piocbq->context1;
                ndlp = lpfc_cmd->rdata->pnode;
        } else  if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
-                       !(piocbq->iocb_flag & LPFC_IO_LIBDFC))
+                       !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) {
                ndlp = piocbq->context_un.ndlp;
-       else  if (piocbq->iocb_flag & LPFC_IO_LIBDFC)
-               ndlp = piocbq->context_un.ndlp;
-       else
+       } else  if (piocbq->iocb_flag & LPFC_IO_LIBDFC) {
+               if (piocbq->iocb_flag & LPFC_IO_LOOPBACK)
+                       ndlp = NULL;
+               else
+                       ndlp = piocbq->context_un.ndlp;
+       } else {
                ndlp = piocbq->context1;
+       }
 
        list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list);
        start_sglq = sglq;
@@ -2213,6 +2217,46 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
        else
                mempool_free(pmb, phba->mbox_mem_pool);
 }
+ /**
+ * lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler
+ * @phba: Pointer to HBA context object.
+ * @pmb: Pointer to mailbox object.
+ *
+ * This function is the unreg rpi mailbox completion handler. It
+ * frees the memory resources associated with the completed mailbox
+ * command. An additional refrenece is put on the ndlp to prevent
+ * lpfc_nlp_release from freeing the rpi bit in the bitmask before
+ * the unreg mailbox command completes, this routine puts the
+ * reference back.
+ *
+ **/
+void
+lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+       struct lpfc_vport  *vport = pmb->vport;
+       struct lpfc_nodelist *ndlp;
+
+       ndlp = pmb->context1;
+       if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
+               if (phba->sli_rev == LPFC_SLI_REV4 &&
+                   (bf_get(lpfc_sli_intf_if_type,
+                    &phba->sli4_hba.sli_intf) ==
+                    LPFC_SLI_INTF_IF_TYPE_2)) {
+                       if (ndlp) {
+                               lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+                                                "0010 UNREG_LOGIN vpi:%x "
+                                                "rpi:%x DID:%x map:%x %p\n",
+                                                vport->vpi, ndlp->nlp_rpi,
+                                                ndlp->nlp_DID,
+                                                ndlp->nlp_usg_map, ndlp);
+
+                               lpfc_nlp_put(ndlp);
+                       }
+               }
+       }
+
+       mempool_free(pmb, phba->mbox_mem_pool);
+}
 
 /**
  * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
@@ -12842,7 +12886,7 @@ lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
  * fails this function will return -ENXIO.
  **/
 int
-lpfc_modify_fcp_eq_delay(struct lpfc_hba *phba, uint16_t startq)
+lpfc_modify_fcp_eq_delay(struct lpfc_hba *phba, uint32_t startq)
 {
        struct lpfc_mbx_modify_eq_delay *eq_delay;
        LPFC_MBOXQ_t *mbox;
@@ -12959,11 +13003,8 @@ lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
        bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
               LPFC_EQE_SIZE);
        bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
-       /* Calculate delay multiper from maximum interrupt per second */
-       if (imax > LPFC_DMULT_CONST)
-               dmult = 0;
-       else
-               dmult = LPFC_DMULT_CONST/imax - 1;
+       /* don't setup delay multiplier using EQ_CREATE */
+       dmult = 0;
        bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
               dmult);
        switch (eq->entry_count) {
@@ -15662,14 +15703,14 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
        struct lpfc_rpi_hdr *rpi_hdr;
        unsigned long iflag;
 
-       max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
-       rpi_limit = phba->sli4_hba.next_rpi;
-
        /*
         * Fetch the next logical rpi.  Because this index is logical,
         * the  driver starts at 0 each time.
         */
        spin_lock_irqsave(&phba->hbalock, iflag);
+       max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
+       rpi_limit = phba->sli4_hba.next_rpi;
+
        rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
        if (rpi >= rpi_limit)
                rpi = LPFC_RPI_ALLOC_ERROR;
@@ -15678,6 +15719,9 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
                phba->sli4_hba.max_cfg_param.rpi_used++;
                phba->sli4_hba.rpi_count++;
        }
+       lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+                       "0001 rpi:%x max:%x lim:%x\n",
+                       (int) rpi, max_rpi, rpi_limit);
 
        /*
         * Don't try to allocate more rpi header regions if the device limit