From 0794d601d1748b007dc338127232e74c19fd1337 Mon Sep 17 00:00:00 2001 From: James Smart Date: Mon, 28 Jan 2019 11:14:19 -0800 Subject: scsi: lpfc: Implement common IO buffers between NVME and SCSI Currently, both NVME and SCSI get their IO buffers from separate pools. XRI's are associated 1:1 with IO buffers, so XRI's are also split between protocols. Eliminate the independent pools and use a single pool. Each buffer structure now has a common section and a protocol section. Per protocol routines for SGL initialization are removed and replaced by common routines. Initialization of the buffers is only done on the common area. All other fields, which are protocol specific, are initialized when the buffer is allocated for use in the per-protocol allocation routine. In the past, the SCSI side allocated IO buffers as part of slave_alloc calls until the maximum XRIs for SCSI was reached. As all XRIs are now common and may be used for either protocol, allocation for everything is done as part of adapter initialization and the scsi side has no action in slave alloc. As XRI's are no longer split, the lpfc_xri_split module parameter is removed. Adapters based on SLI3 will continue to use the older scsi_buf_list_get/put routines. All SLI4 adapters utilize the new IO buffer scheme Signed-off-by: Dick Kennedy Signed-off-by: James Smart Reviewed-by: Hannes Reinecke Signed-off-by: Martin K. Petersen --- drivers/scsi/lpfc/lpfc_init.c | 515 +++++++++++++++++++++++------------------- 1 file changed, 277 insertions(+), 238 deletions(-) (limited to 'drivers/scsi/lpfc/lpfc_init.c') diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index c1c36812c3d2..149f3182f41e 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -1092,13 +1092,15 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba) spin_unlock_irq(&phba->hbalock); - list_for_each_entry_safe(psb, psb_next, &aborts, list) { - psb->pCmd = NULL; - psb->status = IOSTAT_SUCCESS; + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { + list_for_each_entry_safe(psb, psb_next, &aborts, list) { + psb->pCmd = NULL; + psb->status = IOSTAT_SUCCESS; + } + spin_lock_irqsave(&phba->common_buf_list_put_lock, iflag); + list_splice(&aborts, &phba->lpfc_common_buf_list_put); + spin_unlock_irqrestore(&phba->common_buf_list_put_lock, iflag); } - spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag); - list_splice(&aborts, &phba->lpfc_scsi_buf_list_put); - spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag); if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { cnt = 0; @@ -1107,10 +1109,10 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba) psb->status = IOSTAT_SUCCESS; cnt++; } - spin_lock_irqsave(&phba->nvme_buf_list_put_lock, iflag); - phba->put_nvme_bufs += cnt; - list_splice(&nvme_aborts, &phba->lpfc_nvme_buf_list_put); - spin_unlock_irqrestore(&phba->nvme_buf_list_put_lock, iflag); + spin_lock_irqsave(&phba->common_buf_list_put_lock, iflag); + phba->put_common_bufs += cnt; + list_splice(&nvme_aborts, &phba->lpfc_common_buf_list_put); + spin_unlock_irqrestore(&phba->common_buf_list_put_lock, iflag); list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) { ctxp->flag &= ~(LPFC_NVMET_XBUSY | LPFC_NVMET_ABORT_OP); @@ -3123,6 +3125,18 @@ lpfc_online(struct lpfc_hba *phba) "6132 NVME restore reg failed " "on nvmei error x%x\n", error); } + /* Don't post more new bufs if repost already recovered + * the nvme sgls. + */ + if (phba->sli4_hba.common_xri_cnt == 0) { + i = lpfc_new_common_buf(phba, + phba->sli4_hba.common_xri_max); + if (i == 0) { + lpfc_unblock_mgmt_io(phba); + return 1; + } + phba->total_common_bufs += i; + } } else { lpfc_sli_queue_init(phba); if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */ @@ -3355,50 +3369,49 @@ lpfc_scsi_free(struct lpfc_hba *phba) spin_unlock(&phba->scsi_buf_list_get_lock); spin_unlock_irq(&phba->hbalock); } + /** - * lpfc_nvme_free - Free all the NVME buffers and IOCBs from driver lists + * lpfc_common_free - Free all the IO buffers and IOCBs from driver lists * @phba: pointer to lpfc hba data structure. * - * This routine is to free all the NVME buffers and IOCBs from the driver + * This routine is to free all the IO buffers and IOCBs from the driver * list back to kernel. It is called from lpfc_pci_remove_one to free * the internal resources before the device is removed from the system. **/ static void -lpfc_nvme_free(struct lpfc_hba *phba) +lpfc_common_free(struct lpfc_hba *phba) { struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next; - if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) - return; - spin_lock_irq(&phba->hbalock); /* Release all the lpfc_nvme_bufs maintained by this host. */ - spin_lock(&phba->nvme_buf_list_put_lock); + spin_lock(&phba->common_buf_list_put_lock); list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, - &phba->lpfc_nvme_buf_list_put, list) { + &phba->lpfc_common_buf_list_put, list) { list_del(&lpfc_ncmd->list); - phba->put_nvme_bufs--; + phba->put_common_bufs--; dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data, lpfc_ncmd->dma_handle); kfree(lpfc_ncmd); - phba->total_nvme_bufs--; + phba->total_common_bufs--; } - spin_unlock(&phba->nvme_buf_list_put_lock); + spin_unlock(&phba->common_buf_list_put_lock); - spin_lock(&phba->nvme_buf_list_get_lock); + spin_lock(&phba->common_buf_list_get_lock); list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, - &phba->lpfc_nvme_buf_list_get, list) { + &phba->lpfc_common_buf_list_get, list) { list_del(&lpfc_ncmd->list); - phba->get_nvme_bufs--; + phba->get_common_bufs--; dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data, lpfc_ncmd->dma_handle); kfree(lpfc_ncmd); - phba->total_nvme_bufs--; + phba->total_common_bufs--; } - spin_unlock(&phba->nvme_buf_list_get_lock); + spin_unlock(&phba->common_buf_list_get_lock); spin_unlock_irq(&phba->hbalock); } + /** * lpfc_sli4_els_sgl_update - update ELS xri-sgl sizing and mapping * @phba: pointer to lpfc hba data structure. @@ -3641,7 +3654,7 @@ out_free_mem: } /** - * lpfc_sli4_scsi_sgl_update - update xri-sgl sizing and mapping + * lpfc_sli4_common_sgl_update - update xri-sgl sizing and mapping * @phba: pointer to lpfc hba data structure. * * This routine first calculates the sizes of the current els and allocated @@ -3653,94 +3666,214 @@ out_free_mem: * 0 - successful (for now, it always returns 0) **/ int -lpfc_sli4_scsi_sgl_update(struct lpfc_hba *phba) +lpfc_sli4_common_sgl_update(struct lpfc_hba *phba) { - struct lpfc_scsi_buf *psb, *psb_next; - uint16_t i, lxri, els_xri_cnt, scsi_xri_cnt; - LIST_HEAD(scsi_sgl_list); - int rc; + struct lpfc_nvme_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL; + uint16_t i, lxri, els_xri_cnt; + uint16_t common_xri_cnt, common_xri_max; + LIST_HEAD(common_sgl_list); + int rc, cnt; - /* - * update on pci function's els xri-sgl list - */ - els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); - phba->total_scsi_bufs = 0; + phba->total_common_bufs = 0; + phba->get_common_bufs = 0; + phba->put_common_bufs = 0; /* - * update on pci function's allocated scsi xri-sgl list + * update on pci function's allocated nvme xri-sgl list */ - /* maximum number of xris available for scsi buffers */ - phba->sli4_hba.scsi_xri_max = phba->sli4_hba.max_cfg_param.max_xri - - els_xri_cnt; - if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) - return 0; - - if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) - phba->sli4_hba.scsi_xri_max = /* Split them up */ - (phba->sli4_hba.scsi_xri_max * - phba->cfg_xri_split) / 100; - - spin_lock_irq(&phba->scsi_buf_list_get_lock); - spin_lock(&phba->scsi_buf_list_put_lock); - list_splice_init(&phba->lpfc_scsi_buf_list_get, &scsi_sgl_list); - list_splice(&phba->lpfc_scsi_buf_list_put, &scsi_sgl_list); - spin_unlock(&phba->scsi_buf_list_put_lock); - spin_unlock_irq(&phba->scsi_buf_list_get_lock); + /* maximum number of xris available for nvme buffers */ + els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); + common_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; + phba->sli4_hba.common_xri_max = common_xri_max; lpfc_printf_log(phba, KERN_INFO, LOG_SLI, - "6060 Current allocated SCSI xri-sgl count:%d, " - "maximum SCSI xri count:%d (split:%d)\n", - phba->sli4_hba.scsi_xri_cnt, - phba->sli4_hba.scsi_xri_max, phba->cfg_xri_split); - - if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) { - /* max scsi xri shrinked below the allocated scsi buffers */ - scsi_xri_cnt = phba->sli4_hba.scsi_xri_cnt - - phba->sli4_hba.scsi_xri_max; - /* release the extra allocated scsi buffers */ - for (i = 0; i < scsi_xri_cnt; i++) { - list_remove_head(&scsi_sgl_list, psb, - struct lpfc_scsi_buf, list); - if (psb) { + "6074 Current allocated XRI sgl count:%d, " + "maximum XRI count:%d\n", + phba->sli4_hba.common_xri_cnt, + phba->sli4_hba.common_xri_max); + + spin_lock_irq(&phba->common_buf_list_get_lock); + spin_lock(&phba->common_buf_list_put_lock); + list_splice_init(&phba->lpfc_common_buf_list_get, &common_sgl_list); + list_splice(&phba->lpfc_common_buf_list_put, &common_sgl_list); + cnt = phba->get_common_bufs + phba->put_common_bufs; + phba->get_common_bufs = 0; + phba->put_common_bufs = 0; + spin_unlock(&phba->common_buf_list_put_lock); + spin_unlock_irq(&phba->common_buf_list_get_lock); + + if (phba->sli4_hba.common_xri_cnt > phba->sli4_hba.common_xri_max) { + /* max nvme xri shrunk below the allocated nvme buffers */ + spin_lock_irq(&phba->common_buf_list_get_lock); + common_xri_cnt = phba->sli4_hba.common_xri_cnt - + phba->sli4_hba.common_xri_max; + spin_unlock_irq(&phba->common_buf_list_get_lock); + /* release the extra allocated nvme buffers */ + for (i = 0; i < common_xri_cnt; i++) { + list_remove_head(&common_sgl_list, lpfc_ncmd, + struct lpfc_nvme_buf, list); + if (lpfc_ncmd) { dma_pool_free(phba->lpfc_sg_dma_buf_pool, - psb->data, psb->dma_handle); - kfree(psb); + lpfc_ncmd->data, + lpfc_ncmd->dma_handle); + kfree(lpfc_ncmd); } } - spin_lock_irq(&phba->scsi_buf_list_get_lock); - phba->sli4_hba.scsi_xri_cnt -= scsi_xri_cnt; - spin_unlock_irq(&phba->scsi_buf_list_get_lock); + spin_lock_irq(&phba->common_buf_list_get_lock); + phba->sli4_hba.common_xri_cnt -= common_xri_cnt; + spin_unlock_irq(&phba->common_buf_list_get_lock); } - /* update xris associated to remaining allocated scsi buffers */ - psb = NULL; - psb_next = NULL; - list_for_each_entry_safe(psb, psb_next, &scsi_sgl_list, list) { + /* update xris associated to remaining allocated nvme buffers */ + lpfc_ncmd = NULL; + lpfc_ncmd_next = NULL; + list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, + &common_sgl_list, list) { lxri = lpfc_sli4_next_xritag(phba); if (lxri == NO_XRI) { lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "2560 Failed to allocate xri for " - "scsi buffer\n"); + "6075 Failed to allocate xri for " + "nvme buffer\n"); rc = -ENOMEM; goto out_free_mem; } - psb->cur_iocbq.sli4_lxritag = lxri; - psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; + lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri; + lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; } - spin_lock_irq(&phba->scsi_buf_list_get_lock); - spin_lock(&phba->scsi_buf_list_put_lock); - list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list_get); - INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); - spin_unlock(&phba->scsi_buf_list_put_lock); - spin_unlock_irq(&phba->scsi_buf_list_get_lock); + spin_lock_irq(&phba->common_buf_list_get_lock); + spin_lock(&phba->common_buf_list_put_lock); + list_splice_init(&common_sgl_list, &phba->lpfc_common_buf_list_get); + phba->get_common_bufs = cnt; + INIT_LIST_HEAD(&phba->lpfc_common_buf_list_put); + spin_unlock(&phba->common_buf_list_put_lock); + spin_unlock_irq(&phba->common_buf_list_get_lock); return 0; out_free_mem: - lpfc_scsi_free(phba); + lpfc_common_free(phba); return rc; } +/** + * lpfc_new_common_buf - IO buffer allocator for HBA with SLI4 IF spec + * @vport: The virtual port for which this call being executed. + * @num_to_allocate: The requested number of buffers to allocate. + * + * This routine allocates nvme buffers for device with SLI-4 interface spec, + * the nvme buffer contains all the necessary information needed to initiate + * an I/O. After allocating up to @num_to_allocate IO buffers and put + * them on a list, it post them to the port by using SGL block post. + * + * Return codes: + * int - number of nvme buffers that were allocated and posted. + * 0 = failure, less than num_to_alloc is a partial failure. + **/ +int +lpfc_new_common_buf(struct lpfc_hba *phba, int num_to_alloc) +{ + struct lpfc_nvme_buf *lpfc_ncmd; + struct lpfc_iocbq *pwqeq; + uint16_t iotag, lxri = 0; + int bcnt, num_posted; + LIST_HEAD(prep_nblist); + LIST_HEAD(post_nblist); + LIST_HEAD(nvme_nblist); + + /* Sanity check to ensure our sizing is right for both SCSI and NVME */ + if ((sizeof(struct lpfc_scsi_buf) > LPFC_COMMON_IO_BUF_SZ) || + (sizeof(struct lpfc_nvme_buf) > LPFC_COMMON_IO_BUF_SZ)) { + lpfc_printf_log(phba, KERN_ERR, LOG_FCP, + "6426 Common buffer size mismatch: %ld %ld\n", + sizeof(struct lpfc_scsi_buf), + sizeof(struct lpfc_nvme_buf)); + return 0; + } + + for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { + lpfc_ncmd = kzalloc(LPFC_COMMON_IO_BUF_SZ, GFP_KERNEL); + if (!lpfc_ncmd) + break; + /* + * Get memory from the pci pool to map the virt space to + * pci bus space for an I/O. The DMA buffer includes the + * number of SGE's necessary to support the sg_tablesize. + */ + lpfc_ncmd->data = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool, + GFP_KERNEL, + &lpfc_ncmd->dma_handle); + if (!lpfc_ncmd->data) { + kfree(lpfc_ncmd); + break; + } + memset(lpfc_ncmd->data, 0, phba->cfg_sg_dma_buf_size); + + /* + * 4K Page alignment is CRITICAL to BlockGuard, double check + * to be sure. + */ + if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) && + (((unsigned long)(lpfc_ncmd->data) & + (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) { + lpfc_printf_log(phba, KERN_ERR, LOG_FCP, + "3369 Memory alignment err: addr=%lx\n", + (unsigned long)lpfc_ncmd->data); + dma_pool_free(phba->lpfc_sg_dma_buf_pool, + lpfc_ncmd->data, lpfc_ncmd->dma_handle); + kfree(lpfc_ncmd); + break; + } + + lxri = lpfc_sli4_next_xritag(phba); + if (lxri == NO_XRI) { + dma_pool_free(phba->lpfc_sg_dma_buf_pool, + lpfc_ncmd->data, lpfc_ncmd->dma_handle); + kfree(lpfc_ncmd); + break; + } + pwqeq = &lpfc_ncmd->cur_iocbq; + + /* Allocate iotag for lpfc_ncmd->cur_iocbq. */ + iotag = lpfc_sli_next_iotag(phba, pwqeq); + if (iotag == 0) { + dma_pool_free(phba->lpfc_sg_dma_buf_pool, + lpfc_ncmd->data, lpfc_ncmd->dma_handle); + kfree(lpfc_ncmd); + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + "6121 Failed to allocate IOTAG for" + " XRI:0x%x\n", lxri); + lpfc_sli4_free_xri(phba, lxri); + break; + } + pwqeq->sli4_lxritag = lxri; + pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; + pwqeq->context1 = lpfc_ncmd; + + /* Initialize local short-hand pointers. */ + lpfc_ncmd->dma_sgl = lpfc_ncmd->data; + lpfc_ncmd->dma_phys_sgl = lpfc_ncmd->dma_handle; + lpfc_ncmd->cur_iocbq.context1 = lpfc_ncmd; + + /* add the nvme buffer to a post list */ + list_add_tail(&lpfc_ncmd->list, &post_nblist); + spin_lock_irq(&phba->common_buf_list_get_lock); + phba->sli4_hba.common_xri_cnt++; + spin_unlock_irq(&phba->common_buf_list_get_lock); + } + lpfc_printf_log(phba, KERN_INFO, LOG_NVME, + "6114 Allocate %d out of %d requested new NVME " + "buffers\n", bcnt, num_to_alloc); + + /* post the list of nvme buffer sgls to port if available */ + if (!list_empty(&post_nblist)) + num_posted = lpfc_sli4_post_common_sgl_list( + phba, &post_nblist, bcnt); + else + num_posted = 0; + + return num_posted; +} + static uint64_t lpfc_get_wwpn(struct lpfc_hba *phba) { @@ -3776,111 +3909,6 @@ lpfc_get_wwpn(struct lpfc_hba *phba) return rol64(wwn, 32); } -/** - * lpfc_sli4_nvme_sgl_update - update xri-sgl sizing and mapping - * @phba: pointer to lpfc hba data structure. - * - * This routine first calculates the sizes of the current els and allocated - * scsi sgl lists, and then goes through all sgls to updates the physical - * XRIs assigned due to port function reset. During port initialization, the - * current els and allocated scsi sgl lists are 0s. - * - * Return codes - * 0 - successful (for now, it always returns 0) - **/ -int -lpfc_sli4_nvme_sgl_update(struct lpfc_hba *phba) -{ - struct lpfc_nvme_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL; - uint16_t i, lxri, els_xri_cnt; - uint16_t nvme_xri_cnt, nvme_xri_max; - LIST_HEAD(nvme_sgl_list); - int rc, cnt; - - phba->total_nvme_bufs = 0; - phba->get_nvme_bufs = 0; - phba->put_nvme_bufs = 0; - - if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) - return 0; - /* - * update on pci function's allocated nvme xri-sgl list - */ - - /* maximum number of xris available for nvme buffers */ - els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); - nvme_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; - phba->sli4_hba.nvme_xri_max = nvme_xri_max; - phba->sli4_hba.nvme_xri_max -= phba->sli4_hba.scsi_xri_max; - - lpfc_printf_log(phba, KERN_INFO, LOG_SLI, - "6074 Current allocated NVME xri-sgl count:%d, " - "maximum NVME xri count:%d\n", - phba->sli4_hba.nvme_xri_cnt, - phba->sli4_hba.nvme_xri_max); - - spin_lock_irq(&phba->nvme_buf_list_get_lock); - spin_lock(&phba->nvme_buf_list_put_lock); - list_splice_init(&phba->lpfc_nvme_buf_list_get, &nvme_sgl_list); - list_splice(&phba->lpfc_nvme_buf_list_put, &nvme_sgl_list); - cnt = phba->get_nvme_bufs + phba->put_nvme_bufs; - phba->get_nvme_bufs = 0; - phba->put_nvme_bufs = 0; - spin_unlock(&phba->nvme_buf_list_put_lock); - spin_unlock_irq(&phba->nvme_buf_list_get_lock); - - if (phba->sli4_hba.nvme_xri_cnt > phba->sli4_hba.nvme_xri_max) { - /* max nvme xri shrunk below the allocated nvme buffers */ - spin_lock_irq(&phba->nvme_buf_list_get_lock); - nvme_xri_cnt = phba->sli4_hba.nvme_xri_cnt - - phba->sli4_hba.nvme_xri_max; - spin_unlock_irq(&phba->nvme_buf_list_get_lock); - /* release the extra allocated nvme buffers */ - for (i = 0; i < nvme_xri_cnt; i++) { - list_remove_head(&nvme_sgl_list, lpfc_ncmd, - struct lpfc_nvme_buf, list); - if (lpfc_ncmd) { - dma_pool_free(phba->lpfc_sg_dma_buf_pool, - lpfc_ncmd->data, - lpfc_ncmd->dma_handle); - kfree(lpfc_ncmd); - } - } - spin_lock_irq(&phba->nvme_buf_list_get_lock); - phba->sli4_hba.nvme_xri_cnt -= nvme_xri_cnt; - spin_unlock_irq(&phba->nvme_buf_list_get_lock); - } - - /* update xris associated to remaining allocated nvme buffers */ - lpfc_ncmd = NULL; - lpfc_ncmd_next = NULL; - list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, - &nvme_sgl_list, list) { - lxri = lpfc_sli4_next_xritag(phba); - if (lxri == NO_XRI) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "6075 Failed to allocate xri for " - "nvme buffer\n"); - rc = -ENOMEM; - goto out_free_mem; - } - lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri; - lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; - } - spin_lock_irq(&phba->nvme_buf_list_get_lock); - spin_lock(&phba->nvme_buf_list_put_lock); - list_splice_init(&nvme_sgl_list, &phba->lpfc_nvme_buf_list_get); - phba->get_nvme_bufs = cnt; - INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put); - spin_unlock(&phba->nvme_buf_list_put_lock); - spin_unlock_irq(&phba->nvme_buf_list_get_lock); - return 0; - -out_free_mem: - lpfc_nvme_free(phba); - return rc; -} - /** * lpfc_create_port - Create an FC port * @phba: pointer to lpfc hba data structure. @@ -5819,24 +5847,19 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) "NVME" : " "), (phba->nvmet_support ? "NVMET" : " ")); - if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { - /* Initialize the scsi buffer list used by driver for scsi IO */ - spin_lock_init(&phba->scsi_buf_list_get_lock); - INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get); - spin_lock_init(&phba->scsi_buf_list_put_lock); - INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); - } + /* Initialize the IO buffer list used by driver for SLI3 SCSI */ + spin_lock_init(&phba->scsi_buf_list_get_lock); + INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get); + spin_lock_init(&phba->scsi_buf_list_put_lock); + INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); - if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) && - (phba->nvmet_support == 0)) { - /* Initialize the NVME buffer list used by driver for NVME IO */ - spin_lock_init(&phba->nvme_buf_list_get_lock); - INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_get); - phba->get_nvme_bufs = 0; - spin_lock_init(&phba->nvme_buf_list_put_lock); - INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put); - phba->put_nvme_bufs = 0; - } + /* Initialize the IO buffer list used by driver for SLI4 SCSI/NVME */ + spin_lock_init(&phba->common_buf_list_get_lock); + INIT_LIST_HEAD(&phba->lpfc_common_buf_list_get); + phba->get_common_bufs = 0; + spin_lock_init(&phba->common_buf_list_put_lock); + INIT_LIST_HEAD(&phba->lpfc_common_buf_list_put); + phba->put_common_bufs = 0; /* Initialize the fabric iocb list */ INIT_LIST_HEAD(&phba->fabric_iocb_list); @@ -5877,7 +5900,7 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) static int lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) { - int rc; + int rc, entry_sz; /* * Initialize timers used by driver @@ -5922,6 +5945,11 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) lpfc_template_no_hr.sg_tablesize = phba->cfg_sg_seg_cnt; lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; + if (phba->sli_rev == LPFC_SLI_REV4) + entry_sz = sizeof(struct sli4_sge); + else + entry_sz = sizeof(struct ulp_bde64); + /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */ if (phba->cfg_enable_bg) { /* @@ -5935,7 +5963,7 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) */ phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) + - (LPFC_MAX_SG_SEG_CNT * sizeof(struct ulp_bde64)); + (LPFC_MAX_SG_SEG_CNT * entry_sz); if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF) phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF; @@ -5950,7 +5978,7 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) */ phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) + - ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64)); + ((phba->cfg_sg_seg_cnt + 2) * entry_sz); /* Total BDEs in BPL for scsi_sg_list */ phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2; @@ -6875,11 +6903,8 @@ lpfc_init_sgl_list(struct lpfc_hba *phba) /* els xri-sgl book keeping */ phba->sli4_hba.els_xri_cnt = 0; - /* scsi xri-buffer book keeping */ - phba->sli4_hba.scsi_xri_cnt = 0; - /* nvme xri-buffer book keeping */ - phba->sli4_hba.nvme_xri_cnt = 0; + phba->sli4_hba.common_xri_cnt = 0; } /** @@ -10556,7 +10581,7 @@ static void lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba) { int wait_time = 0; - int nvme_xri_cmpl = 1; + int common_xri_cmpl = 1; int nvmet_xri_cmpl = 1; int fcp_xri_cmpl = 1; int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); @@ -10575,13 +10600,13 @@ lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba) fcp_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list); if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { - nvme_xri_cmpl = + common_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_nvme_buf_list); nvmet_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); } - while (!fcp_xri_cmpl || !els_xri_cmpl || !nvme_xri_cmpl || + while (!fcp_xri_cmpl || !els_xri_cmpl || !common_xri_cmpl || !nvmet_xri_cmpl) { if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) { if (!nvmet_xri_cmpl) @@ -10589,7 +10614,7 @@ lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba) "6424 NVMET XRI exchange busy " "wait time: %d seconds.\n", wait_time/1000); - if (!nvme_xri_cmpl) + if (!common_xri_cmpl) lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "6100 NVME XRI exchange busy " "wait time: %d seconds.\n", @@ -10611,7 +10636,7 @@ lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba) wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1; } if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { - nvme_xri_cmpl = list_empty( + common_xri_cmpl = list_empty( &phba->sli4_hba.lpfc_abts_nvme_buf_list); nvmet_xri_cmpl = list_empty( &phba->sli4_hba.lpfc_abts_nvmet_ctx_list); @@ -11190,6 +11215,8 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev) * corresponding pools here. */ lpfc_scsi_free(phba); + lpfc_free_iocb_list(phba); + lpfc_mem_free_all(phba); dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), @@ -11767,7 +11794,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) struct lpfc_hba *phba; struct lpfc_vport *vport = NULL; struct Scsi_Host *shost = NULL; - int error; + int error, len; uint32_t cfg_mode, intr_mode; /* Allocate memory for HBA structure */ @@ -11877,19 +11904,32 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) /* NVME support in FW earlier in the driver load corrects the * FC4 type making a check for nvme_support unnecessary. */ - if ((phba->nvmet_support == 0) && - (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) { - /* Create NVME binding with nvme_fc_transport. This - * ensures the vport is initialized. If the localport - * create fails, it should not unload the driver to - * support field issues. + if (phba->nvmet_support == 0) { + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { + /* Create NVME binding with nvme_fc_transport. This + * ensures the vport is initialized. If the localport + * create fails, it should not unload the driver to + * support field issues. + */ + error = lpfc_nvme_create_localport(vport); + if (error) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "6004 NVME registration " + "failed, error x%x\n", + error); + } + } + /* Don't post more new bufs if repost already recovered + * the nvme sgls. */ - error = lpfc_nvme_create_localport(vport); - if (error) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "6004 NVME registration failed, " - "error x%x\n", - error); + if (phba->sli4_hba.common_xri_cnt == 0) { + len = lpfc_new_common_buf( + phba, phba->sli4_hba.common_xri_max); + if (len == 0) { + error = -ENOMEM; + goto out_disable_intr; + } + phba->total_common_bufs += len; } } @@ -11989,8 +12029,7 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev) /* Perform scsi free before driver resource_unset since scsi * buffers are released to their corresponding pools here. */ - lpfc_scsi_free(phba); - lpfc_nvme_free(phba); + lpfc_common_free(phba); lpfc_free_iocb_list(phba); lpfc_unset_driver_resource_phase2(phba); -- cgit v1.2.3 From 7370d10ac99e8ebc5501c0fcdec482cb939ecbd4 Mon Sep 17 00:00:00 2001 From: James Smart Date: Mon, 28 Jan 2019 11:14:20 -0800 Subject: scsi: lpfc: Remove extra vector and SLI4 queue for Expresslane There is a extra queue and msix vector for expresslane. Now that the driver will be doing queues per cpu, this oddball queue is no longer needed. Expresslane will utilize the normal per-cpu queues. Updated debugfs sli4 queue output to go along with the change Signed-off-by: Dick Kennedy Signed-off-by: James Smart Reviewed-by: Hannes Reinecke Signed-off-by: Martin K. Petersen --- drivers/scsi/lpfc/lpfc_init.c | 225 ++---------------------------------------- 1 file changed, 8 insertions(+), 217 deletions(-) (limited to 'drivers/scsi/lpfc/lpfc_init.c') diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 149f3182f41e..9d9b965f796d 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -6059,7 +6059,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0}; struct lpfc_mqe *mqe; int longs; - int fof_vectors = 0; int extra; uint64_t wwn; u32 if_type; @@ -6433,8 +6432,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) /* Verify OAS is supported */ lpfc_sli4_oas_verify(phba); - if (phba->cfg_fof) - fof_vectors = 1; /* Verify RAS support on adapter */ lpfc_sli4_ras_init(phba); @@ -6478,7 +6475,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) goto out_remove_rpi_hdrs; } - phba->sli4_hba.hba_eq_hdl = kcalloc(fof_vectors + phba->io_channel_irqs, + phba->sli4_hba.hba_eq_hdl = kcalloc(phba->io_channel_irqs, sizeof(struct lpfc_hba_eq_hdl), GFP_KERNEL); if (!phba->sli4_hba.hba_eq_hdl) { @@ -8048,7 +8045,7 @@ lpfc_sli4_read_config(struct lpfc_hba *phba) /* * Whats left after this can go toward NVME. * The minus 6 accounts for ELS, NVME LS, MBOX - * fof plus a couple extra. When configured for + * plus a couple extra. When configured for * NVMET, FCP io channel WQs are not created. */ length -= 6; @@ -8280,7 +8277,6 @@ static int lpfc_sli4_queue_verify(struct lpfc_hba *phba) { int io_channel; - int fof_vectors = phba->cfg_fof ? 1 : 0; /* * Sanity check for configured queue parameters against the run-time @@ -8299,13 +8295,13 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba) io_channel = phba->sli4_hba.num_online_cpu; } - if (io_channel + fof_vectors > phba->sli4_hba.max_cfg_param.max_eq) { + if (io_channel > phba->sli4_hba.max_cfg_param.max_eq) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2575 Reducing IO channels to match number of " "available EQs: from %d to %d\n", io_channel, phba->sli4_hba.max_cfg_param.max_eq); - io_channel = phba->sli4_hba.max_cfg_param.max_eq - fof_vectors; + io_channel = phba->sli4_hba.max_cfg_param.max_eq; } /* The actual number of FCP / NVME event queues adopted */ @@ -8769,10 +8765,6 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) phba->sli4_hba.nvmet_mrq_data[idx] = qdesc; } } - - /* Create the Queues needed for Flash Optimized Fabric operations */ - if (phba->cfg_fof) - lpfc_fof_queue_create(phba); return 0; out_error: @@ -8828,9 +8820,6 @@ lpfc_sli4_release_queue_map(uint16_t **qmap) void lpfc_sli4_queue_destroy(struct lpfc_hba *phba) { - if (phba->cfg_fof) - lpfc_fof_queue_destroy(phba); - /* Release HBA eqs */ lpfc_sli4_release_queues(&phba->sli4_hba.hba_eq, phba->io_channel_irqs); @@ -9331,16 +9320,6 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) phba->sli4_hba.dat_rq->queue_id, phba->sli4_hba.els_cq->queue_id); - if (phba->cfg_fof) { - rc = lpfc_fof_queue_setup(phba); - if (rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0549 Failed setup of FOF Queues: " - "rc = 0x%x\n", rc); - goto out_destroy; - } - } - for (qidx = 0; qidx < io_channel; qidx += LPFC_MAX_EQ_DELAY_EQID_CNT) lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT, phba->cfg_fcp_imax); @@ -9370,10 +9349,6 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba) { int qidx; - /* Unset the queues created for Flash Optimized Fabric operations */ - if (phba->cfg_fof) - lpfc_fof_queue_destroy(phba); - /* Unset mailbox command work queue */ if (phba->sli4_hba.mbx_wq) lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); @@ -10297,8 +10272,6 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba) /* Set up MSI-X multi-message vectors */ vectors = phba->io_channel_irqs; - if (phba->cfg_fof) - vectors++; rc = pci_alloc_irq_vectors(phba->pcidev, (phba->nvmet_support) ? 1 : 2, @@ -10320,16 +10293,10 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba) phba->sli4_hba.hba_eq_hdl[index].idx = index; phba->sli4_hba.hba_eq_hdl[index].phba = phba; atomic_set(&phba->sli4_hba.hba_eq_hdl[index].hba_eq_in_use, 1); - if (phba->cfg_fof && (index == (vectors - 1))) - rc = request_irq(pci_irq_vector(phba->pcidev, index), - &lpfc_sli4_fof_intr_handler, 0, - name, - &phba->sli4_hba.hba_eq_hdl[index]); - else - rc = request_irq(pci_irq_vector(phba->pcidev, index), - &lpfc_sli4_hba_intr_handler, 0, - name, - &phba->sli4_hba.hba_eq_hdl[index]); + rc = request_irq(pci_irq_vector(phba->pcidev, index), + &lpfc_sli4_hba_intr_handler, 0, + name, + &phba->sli4_hba.hba_eq_hdl[index]); if (rc) { lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "0486 MSI-X fast-path (%d) " @@ -10338,9 +10305,6 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba) } } - if (phba->cfg_fof) - vectors--; - if (vectors != phba->io_channel_irqs) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "3238 Reducing IO channels to match number of " @@ -10415,10 +10379,6 @@ lpfc_sli4_enable_msi(struct lpfc_hba *phba) phba->sli4_hba.hba_eq_hdl[index].phba = phba; } - if (phba->cfg_fof) { - phba->sli4_hba.hba_eq_hdl[index].idx = index; - phba->sli4_hba.hba_eq_hdl[index].phba = phba; - } return 0; } @@ -10485,12 +10445,6 @@ lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) eqhdl->phba = phba; atomic_set(&eqhdl->hba_eq_in_use, 1); } - if (phba->cfg_fof) { - eqhdl = &phba->sli4_hba.hba_eq_hdl[idx]; - eqhdl->idx = idx; - eqhdl->phba = phba; - atomic_set(&eqhdl->hba_eq_in_use, 1); - } } } return intr_mode; @@ -10516,10 +10470,6 @@ lpfc_sli4_disable_intr(struct lpfc_hba *phba) for (index = 0; index < phba->io_channel_irqs; index++) free_irq(pci_irq_vector(phba->pcidev, index), &phba->sli4_hba.hba_eq_hdl[index]); - - if (phba->cfg_fof) - free_irq(pci_irq_vector(phba->pcidev, index), - &phba->sli4_hba.hba_eq_hdl[index]); } else { free_irq(phba->pcidev->irq, phba); } @@ -12692,165 +12642,6 @@ lpfc_sli4_ras_init(struct lpfc_hba *phba) } } -/** - * lpfc_fof_queue_setup - Set up all the fof queues - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to set up all the fof queues for the FC HBA - * operation. - * - * Return codes - * 0 - successful - * -ENOMEM - No available memory - **/ -int -lpfc_fof_queue_setup(struct lpfc_hba *phba) -{ - struct lpfc_sli_ring *pring; - int rc; - - rc = lpfc_eq_create(phba, phba->sli4_hba.fof_eq, LPFC_MAX_IMAX); - if (rc) - return -ENOMEM; - - if (phba->cfg_fof) { - - rc = lpfc_cq_create(phba, phba->sli4_hba.oas_cq, - phba->sli4_hba.fof_eq, LPFC_WCQ, LPFC_FCP); - if (rc) - goto out_oas_cq; - - rc = lpfc_wq_create(phba, phba->sli4_hba.oas_wq, - phba->sli4_hba.oas_cq, LPFC_FCP); - if (rc) - goto out_oas_wq; - - /* Bind this CQ/WQ to the NVME ring */ - pring = phba->sli4_hba.oas_wq->pring; - pring->sli.sli4.wqp = - (void *)phba->sli4_hba.oas_wq; - phba->sli4_hba.oas_cq->pring = pring; - } - - return 0; - -out_oas_wq: - lpfc_cq_destroy(phba, phba->sli4_hba.oas_cq); -out_oas_cq: - lpfc_eq_destroy(phba, phba->sli4_hba.fof_eq); - return rc; - -} - -/** - * lpfc_fof_queue_create - Create all the fof queues - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to allocate all the fof queues for the FC HBA - * operation. For each SLI4 queue type, the parameters such as queue entry - * count (queue depth) shall be taken from the module parameter. For now, - * we just use some constant number as place holder. - * - * Return codes - * 0 - successful - * -ENOMEM - No availble memory - * -EIO - The mailbox failed to complete successfully. - **/ -int -lpfc_fof_queue_create(struct lpfc_hba *phba) -{ - struct lpfc_queue *qdesc; - uint32_t wqesize; - - /* Create FOF EQ */ - qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, - phba->sli4_hba.eq_esize, - phba->sli4_hba.eq_ecount); - if (!qdesc) - goto out_error; - - qdesc->qe_valid = 1; - phba->sli4_hba.fof_eq = qdesc; - - if (phba->cfg_fof) { - - /* Create OAS CQ */ - if (phba->enab_exp_wqcq_pages) - qdesc = lpfc_sli4_queue_alloc(phba, - LPFC_EXPANDED_PAGE_SIZE, - phba->sli4_hba.cq_esize, - LPFC_CQE_EXP_COUNT); - else - qdesc = lpfc_sli4_queue_alloc(phba, - LPFC_DEFAULT_PAGE_SIZE, - phba->sli4_hba.cq_esize, - phba->sli4_hba.cq_ecount); - if (!qdesc) - goto out_error; - - qdesc->qe_valid = 1; - phba->sli4_hba.oas_cq = qdesc; - - /* Create OAS WQ */ - if (phba->enab_exp_wqcq_pages) { - wqesize = (phba->fcp_embed_io) ? - LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize; - qdesc = lpfc_sli4_queue_alloc(phba, - LPFC_EXPANDED_PAGE_SIZE, - wqesize, - LPFC_WQE_EXP_COUNT); - } else - qdesc = lpfc_sli4_queue_alloc(phba, - LPFC_DEFAULT_PAGE_SIZE, - phba->sli4_hba.wq_esize, - phba->sli4_hba.wq_ecount); - - if (!qdesc) - goto out_error; - - phba->sli4_hba.oas_wq = qdesc; - list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); - - } - return 0; - -out_error: - lpfc_fof_queue_destroy(phba); - return -ENOMEM; -} - -/** - * lpfc_fof_queue_destroy - Destroy all the fof queues - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to release all the SLI4 queues with the FC HBA - * operation. - * - * Return codes - * 0 - successful - **/ -int -lpfc_fof_queue_destroy(struct lpfc_hba *phba) -{ - /* Release FOF Event queue */ - if (phba->sli4_hba.fof_eq != NULL) { - lpfc_sli4_queue_free(phba->sli4_hba.fof_eq); - phba->sli4_hba.fof_eq = NULL; - } - - /* Release OAS Completion queue */ - if (phba->sli4_hba.oas_cq != NULL) { - lpfc_sli4_queue_free(phba->sli4_hba.oas_cq); - phba->sli4_hba.oas_cq = NULL; - } - - /* Release OAS Work queue */ - if (phba->sli4_hba.oas_wq != NULL) { - lpfc_sli4_queue_free(phba->sli4_hba.oas_wq); - phba->sli4_hba.oas_wq = NULL; - } - return 0; -} MODULE_DEVICE_TABLE(pci, lpfc_id_table); -- cgit v1.2.3 From cdb42becdd40eeb320af3f21ac9a34e9d7517516 Mon Sep 17 00:00:00 2001 From: James Smart Date: Mon, 28 Jan 2019 11:14:21 -0800 Subject: scsi: lpfc: Replace io_channels for nvme and fcp with general hdw_queues per cpu Currently, both nvme and fcp each have their own concept of an io_channel, which is a combination wq/cq and associated msix. Different cpus would share an io_channel. The driver is now moving to per-cpu wq/cq pairs and msix vectors. The driver will still use separate wq/cq pairs per protocol on each cpu, but the protocols will share the msix vector. Given the elimination of the nvme and fcp io channels, the module parameters will be removed. A new parameter, lpfc_hdw_queue is added which allows the wq/cq pair allocation per cpu to be overridden and allocated to lesser value. If lpfc_hdw_queue is zero, the number of pairs allocated will be based on the number of cpus. If non-zero, the parameter specifies the number of queues to allocate. At this time, the maximum non-zero value is 64. To manage this new paradigm, a new hardware queue structure is created to track queue activity and relationships. As MSIX vector allocation must be known before setting up the relationships, msix allocation now occurs before queue datastructures are allocated. If the number of vectors allocated is less than the desired hardware queues, the hardware queue counts will be reduced to the number of vectors Signed-off-by: Dick Kennedy Signed-off-by: James Smart Reviewed-by: Hannes Reinecke Signed-off-by: Martin K. Petersen --- drivers/scsi/lpfc/lpfc_init.c | 489 +++++++++++++++--------------------------- 1 file changed, 178 insertions(+), 311 deletions(-) (limited to 'drivers/scsi/lpfc/lpfc_init.c') diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 9d9b965f796d..c4acd9ee55b1 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -1315,7 +1315,7 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba) localport->private; tot = 0; for (i = 0; - i < phba->cfg_nvme_io_channel; i++) { + i < phba->cfg_hdw_queue; i++) { cstat = &lport->cstat[i]; data1 = atomic_read( &cstat->fc4NvmeInputRequests); @@ -1331,15 +1331,15 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba) } /* Interrupts per sec per EQ */ - val = phba->cfg_fcp_imax / phba->io_channel_irqs; + val = phba->cfg_fcp_imax / phba->cfg_hdw_queue; tick_cqe = val / CONFIG_HZ; /* Per tick per EQ */ /* Assume 1 CQE/ISR, calc max CQEs allowed for time duration */ max_cqe = time_elapsed * tick_cqe; - for (i = 0; i < phba->io_channel_irqs; i++) { + for (i = 0; i < phba->cfg_hdw_queue; i++) { /* Fast-path EQ */ - qp = phba->sli4_hba.hba_eq[i]; + qp = phba->sli4_hba.hdwq[i].hba_eq; if (!qp) continue; @@ -1361,7 +1361,7 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba) if (val) { /* First, interrupts per sec per EQ */ val = phba->cfg_fcp_imax / - phba->io_channel_irqs; + phba->cfg_hdw_queue; /* us delay between each interrupt */ val = LPFC_SEC_TO_USEC / val; @@ -2945,7 +2945,8 @@ lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) void lpfc_stop_hba_timers(struct lpfc_hba *phba) { - lpfc_stop_vport_timers(phba->pport); + if (phba->pport) + lpfc_stop_vport_timers(phba->pport); del_timer_sync(&phba->sli.mbox_tmo); del_timer_sync(&phba->fabric_block_timer); del_timer_sync(&phba->eratt_poll); @@ -3989,7 +3990,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) shost->max_lun = vport->cfg_max_luns; shost->this_id = -1; shost->max_cmd_len = 16; - shost->nr_hw_queues = phba->cfg_fcp_io_channel; + shost->nr_hw_queues = phba->cfg_hdw_queue; if (phba->sli_rev == LPFC_SLI_REV4) { shost->dma_boundary = phba->sli4_hba.pc_sli4_params.sge_supp_len-1; @@ -4248,7 +4249,8 @@ lpfc_stop_port_s4(struct lpfc_hba *phba) { /* Reset some HBA SLI4 setup states */ lpfc_stop_hba_timers(phba); - phba->pport->work_port_events = 0; + if (phba->pport) + phba->pport->work_port_events = 0; phba->sli4_hba.intr_enable = 0; } @@ -6475,9 +6477,9 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) goto out_remove_rpi_hdrs; } - phba->sli4_hba.hba_eq_hdl = kcalloc(phba->io_channel_irqs, - sizeof(struct lpfc_hba_eq_hdl), - GFP_KERNEL); + phba->sli4_hba.hba_eq_hdl = kcalloc(phba->cfg_hdw_queue, + sizeof(struct lpfc_hba_eq_hdl), + GFP_KERNEL); if (!phba->sli4_hba.hba_eq_hdl) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2572 Failed allocate memory for " @@ -8049,21 +8051,23 @@ lpfc_sli4_read_config(struct lpfc_hba *phba) * NVMET, FCP io channel WQs are not created. */ length -= 6; + + /* Take off FCP queues */ if (!phba->nvmet_support) - length -= phba->cfg_fcp_io_channel; + length -= phba->cfg_hdw_queue; - if (phba->cfg_nvme_io_channel > length) { + /* Check to see if there is enough for NVME */ + if (phba->cfg_hdw_queue > length) { lpfc_printf_log( phba, KERN_ERR, LOG_SLI, "2005 Reducing NVME IO channel to %d: " - "WQ %d CQ %d NVMEIO %d FCPIO %d\n", + "WQ %d CQ %d CommonIO %d\n", length, phba->sli4_hba.max_cfg_param.max_wq, phba->sli4_hba.max_cfg_param.max_cq, - phba->cfg_nvme_io_channel, - phba->cfg_fcp_io_channel); + phba->cfg_hdw_queue); - phba->cfg_nvme_io_channel = length; + phba->cfg_hdw_queue = length; } } } @@ -8276,52 +8280,30 @@ lpfc_setup_endian_order(struct lpfc_hba *phba) static int lpfc_sli4_queue_verify(struct lpfc_hba *phba) { - int io_channel; - /* * Sanity check for configured queue parameters against the run-time * device parameters */ - /* Sanity check on HBA EQ parameters */ - io_channel = phba->io_channel_irqs; - - if (phba->sli4_hba.num_online_cpu < io_channel) { - lpfc_printf_log(phba, - KERN_ERR, LOG_INIT, - "3188 Reducing IO channels to match number of " - "online CPUs: from %d to %d\n", - io_channel, phba->sli4_hba.num_online_cpu); - io_channel = phba->sli4_hba.num_online_cpu; - } - - if (io_channel > phba->sli4_hba.max_cfg_param.max_eq) { + if (phba->cfg_hdw_queue > phba->sli4_hba.max_cfg_param.max_eq) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2575 Reducing IO channels to match number of " "available EQs: from %d to %d\n", - io_channel, + phba->cfg_hdw_queue, phba->sli4_hba.max_cfg_param.max_eq); - io_channel = phba->sli4_hba.max_cfg_param.max_eq; + phba->cfg_hdw_queue = phba->sli4_hba.max_cfg_param.max_eq; } - /* The actual number of FCP / NVME event queues adopted */ - if (io_channel != phba->io_channel_irqs) - phba->io_channel_irqs = io_channel; - if (phba->cfg_fcp_io_channel > io_channel) - phba->cfg_fcp_io_channel = io_channel; - if (phba->cfg_nvme_io_channel > io_channel) - phba->cfg_nvme_io_channel = io_channel; if (phba->nvmet_support) { - if (phba->cfg_nvme_io_channel < phba->cfg_nvmet_mrq) - phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel; + if (phba->cfg_hdw_queue < phba->cfg_nvmet_mrq) + phba->cfg_nvmet_mrq = phba->cfg_hdw_queue; } if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX) phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX; lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2574 IO channels: irqs %d fcp %d nvme %d MRQ: %d\n", - phba->io_channel_irqs, phba->cfg_fcp_io_channel, - phba->cfg_nvme_io_channel, phba->cfg_nvmet_mrq); + "2574 IO channels: hdwQ %d MRQ: %d\n", + phba->cfg_hdw_queue, phba->cfg_nvmet_mrq); /* Get EQ depth from module parameter, fake the default for now */ phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; @@ -8348,7 +8330,7 @@ lpfc_alloc_nvme_wq_cq(struct lpfc_hba *phba, int wqidx) return 1; } qdesc->qe_valid = 1; - phba->sli4_hba.nvme_cq[wqidx] = qdesc; + phba->sli4_hba.hdwq[wqidx].nvme_cq = qdesc; qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE, LPFC_WQE128_SIZE, LPFC_WQE_EXP_COUNT); @@ -8358,7 +8340,7 @@ lpfc_alloc_nvme_wq_cq(struct lpfc_hba *phba, int wqidx) wqidx); return 1; } - phba->sli4_hba.nvme_wq[wqidx] = qdesc; + phba->sli4_hba.hdwq[wqidx].nvme_wq = qdesc; list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); return 0; } @@ -8386,7 +8368,7 @@ lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx) return 1; } qdesc->qe_valid = 1; - phba->sli4_hba.fcp_cq[wqidx] = qdesc; + phba->sli4_hba.hdwq[wqidx].fcp_cq = qdesc; /* Create Fast Path FCP WQs */ if (phba->enab_exp_wqcq_pages) { @@ -8407,7 +8389,7 @@ lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx) wqidx); return 1; } - phba->sli4_hba.fcp_wq[wqidx] = qdesc; + phba->sli4_hba.hdwq[wqidx].fcp_wq = qdesc; list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); return 0; } @@ -8430,16 +8412,12 @@ int lpfc_sli4_queue_create(struct lpfc_hba *phba) { struct lpfc_queue *qdesc; - int idx, io_channel; + int idx; /* * Create HBA Record arrays. * Both NVME and FCP will share that same vectors / EQs */ - io_channel = phba->io_channel_irqs; - if (!io_channel) - return -ERANGE; - phba->sli4_hba.mq_esize = LPFC_MQE_SIZE; phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT; phba->sli4_hba.wq_esize = LPFC_WQE_SIZE; @@ -8451,87 +8429,17 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; - phba->sli4_hba.hba_eq = kcalloc(io_channel, - sizeof(struct lpfc_queue *), - GFP_KERNEL); - if (!phba->sli4_hba.hba_eq) { + phba->sli4_hba.hdwq = kcalloc(phba->cfg_hdw_queue, + sizeof(struct lpfc_sli4_hdw_queue), + GFP_KERNEL); + if (!phba->sli4_hba.hdwq) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2576 Failed allocate memory for " - "fast-path EQ record array\n"); + "6427 Failed allocate memory for " + "fast-path Hardware Queue array\n"); goto out_error; } - if (phba->cfg_fcp_io_channel) { - phba->sli4_hba.fcp_cq = kcalloc(phba->cfg_fcp_io_channel, - sizeof(struct lpfc_queue *), - GFP_KERNEL); - if (!phba->sli4_hba.fcp_cq) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2577 Failed allocate memory for " - "fast-path CQ record array\n"); - goto out_error; - } - phba->sli4_hba.fcp_wq = kcalloc(phba->cfg_fcp_io_channel, - sizeof(struct lpfc_queue *), - GFP_KERNEL); - if (!phba->sli4_hba.fcp_wq) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2578 Failed allocate memory for " - "fast-path FCP WQ record array\n"); - goto out_error; - } - /* - * Since the first EQ can have multiple CQs associated with it, - * this array is used to quickly see if we have a FCP fast-path - * CQ match. - */ - phba->sli4_hba.fcp_cq_map = kcalloc(phba->cfg_fcp_io_channel, - sizeof(uint16_t), - GFP_KERNEL); - if (!phba->sli4_hba.fcp_cq_map) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2545 Failed allocate memory for " - "fast-path CQ map\n"); - goto out_error; - } - } - - if (phba->cfg_nvme_io_channel) { - phba->sli4_hba.nvme_cq = kcalloc(phba->cfg_nvme_io_channel, - sizeof(struct lpfc_queue *), - GFP_KERNEL); - if (!phba->sli4_hba.nvme_cq) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "6077 Failed allocate memory for " - "fast-path CQ record array\n"); - goto out_error; - } - - phba->sli4_hba.nvme_wq = kcalloc(phba->cfg_nvme_io_channel, - sizeof(struct lpfc_queue *), - GFP_KERNEL); - if (!phba->sli4_hba.nvme_wq) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2581 Failed allocate memory for " - "fast-path NVME WQ record array\n"); - goto out_error; - } - - /* - * Since the first EQ can have multiple CQs associated with it, - * this array is used to quickly see if we have a NVME fast-path - * CQ match. - */ - phba->sli4_hba.nvme_cq_map = kcalloc(phba->cfg_nvme_io_channel, - sizeof(uint16_t), - GFP_KERNEL); - if (!phba->sli4_hba.nvme_cq_map) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "6078 Failed allocate memory for " - "fast-path CQ map\n"); - goto out_error; - } - + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { if (phba->nvmet_support) { phba->sli4_hba.nvmet_cqset = kcalloc( phba->cfg_nvmet_mrq, @@ -8569,7 +8477,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list); /* Create HBA Event Queues (EQs) */ - for (idx = 0; idx < io_channel; idx++) { + for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { /* Create EQs */ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, phba->sli4_hba.eq_esize, @@ -8580,33 +8488,38 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) goto out_error; } qdesc->qe_valid = 1; - phba->sli4_hba.hba_eq[idx] = qdesc; + phba->sli4_hba.hdwq[idx].hba_eq = qdesc; } - /* FCP and NVME io channels are not required to be balanced */ - for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) + /* Allocate SCSI SLI4 CQ/WQs */ + for (idx = 0; idx < phba->cfg_hdw_queue; idx++) if (lpfc_alloc_fcp_wq_cq(phba, idx)) goto out_error; - for (idx = 0; idx < phba->cfg_nvme_io_channel; idx++) - if (lpfc_alloc_nvme_wq_cq(phba, idx)) - goto out_error; + /* Allocate NVME SLI4 CQ/WQs */ + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { + for (idx = 0; idx < phba->cfg_hdw_queue; idx++) + if (lpfc_alloc_nvme_wq_cq(phba, idx)) + goto out_error; - if (phba->nvmet_support) { - for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) { - qdesc = lpfc_sli4_queue_alloc(phba, + if (phba->nvmet_support) { + for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) { + qdesc = lpfc_sli4_queue_alloc( + phba, LPFC_DEFAULT_PAGE_SIZE, phba->sli4_hba.cq_esize, phba->sli4_hba.cq_ecount); - if (!qdesc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "3142 Failed allocate NVME " - "CQ Set (%d)\n", idx); - goto out_error; + if (!qdesc) { + lpfc_printf_log( + phba, KERN_ERR, LOG_INIT, + "3142 Failed allocate NVME " + "CQ Set (%d)\n", idx); + goto out_error; + } + qdesc->qe_valid = 1; + phba->sli4_hba.nvmet_cqset[idx] = qdesc; } - qdesc->qe_valid = 1; - phba->sli4_hba.nvmet_cqset[idx] = qdesc; } } @@ -8723,7 +8636,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) } phba->sli4_hba.dat_rq = qdesc; - if (phba->nvmet_support) { + if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) && + phba->nvmet_support) { for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) { /* Create NVMET Receive Queue for header */ qdesc = lpfc_sli4_queue_alloc(phba, @@ -8797,12 +8711,23 @@ lpfc_sli4_release_queues(struct lpfc_queue ***qs, int max) } static inline void -lpfc_sli4_release_queue_map(uint16_t **qmap) +lpfc_sli4_release_hdwq(struct lpfc_sli4_hdw_queue *hdwq, int max) { - if (*qmap != NULL) { - kfree(*qmap); - *qmap = NULL; + uint32_t idx; + + for (idx = 0; idx < max; idx++) { + lpfc_sli4_queue_free(hdwq[idx].hba_eq); + lpfc_sli4_queue_free(hdwq[idx].fcp_cq); + lpfc_sli4_queue_free(hdwq[idx].nvme_cq); + lpfc_sli4_queue_free(hdwq[idx].fcp_wq); + lpfc_sli4_queue_free(hdwq[idx].nvme_wq); + hdwq[idx].hba_eq = NULL; + hdwq[idx].fcp_cq = NULL; + hdwq[idx].nvme_cq = NULL; + hdwq[idx].fcp_wq = NULL; + hdwq[idx].nvme_wq = NULL; } + kfree(hdwq); } /** @@ -8821,29 +8746,10 @@ void lpfc_sli4_queue_destroy(struct lpfc_hba *phba) { /* Release HBA eqs */ - lpfc_sli4_release_queues(&phba->sli4_hba.hba_eq, phba->io_channel_irqs); - - /* Release FCP cqs */ - lpfc_sli4_release_queues(&phba->sli4_hba.fcp_cq, - phba->cfg_fcp_io_channel); - - /* Release FCP wqs */ - lpfc_sli4_release_queues(&phba->sli4_hba.fcp_wq, - phba->cfg_fcp_io_channel); - - /* Release FCP CQ mapping array */ - lpfc_sli4_release_queue_map(&phba->sli4_hba.fcp_cq_map); - - /* Release NVME cqs */ - lpfc_sli4_release_queues(&phba->sli4_hba.nvme_cq, - phba->cfg_nvme_io_channel); - - /* Release NVME wqs */ - lpfc_sli4_release_queues(&phba->sli4_hba.nvme_wq, - phba->cfg_nvme_io_channel); - - /* Release NVME CQ mapping array */ - lpfc_sli4_release_queue_map(&phba->sli4_hba.nvme_cq_map); + if (phba->sli4_hba.hdwq) + lpfc_sli4_release_hdwq(phba->sli4_hba.hdwq, + phba->cfg_hdw_queue); + phba->sli4_hba.hdwq = NULL; if (phba->nvmet_support) { lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset, @@ -8927,7 +8833,7 @@ lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq, cq->chann = qidx; if (qtype != LPFC_MBOX) { - /* Setup nvme_cq_map for fast lookup */ + /* Setup cq_map for fast lookup */ if (cq_map) *cq_map = cq->queue_id; @@ -8990,9 +8896,10 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) { uint32_t shdr_status, shdr_add_status; union lpfc_sli4_cfg_shdr *shdr; + struct lpfc_sli4_hdw_queue *qp; LPFC_MBOXQ_t *mboxq; int qidx; - uint32_t length, io_channel; + uint32_t length; int rc = -ENOMEM; /* Check for dual-ULP support */ @@ -9043,25 +8950,25 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) /* * Set up HBA Event Queues (EQs) */ - io_channel = phba->io_channel_irqs; + qp = phba->sli4_hba.hdwq; /* Set up HBA event queue */ - if (io_channel && !phba->sli4_hba.hba_eq) { + if (!qp) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "3147 Fast-path EQs not allocated\n"); rc = -ENOMEM; goto out_error; } - for (qidx = 0; qidx < io_channel; qidx++) { - if (!phba->sli4_hba.hba_eq[qidx]) { + for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { + if (!qp[qidx].hba_eq) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0522 Fast-path EQ (%d) not " "allocated\n", qidx); rc = -ENOMEM; goto out_destroy; } - rc = lpfc_eq_create(phba, phba->sli4_hba.hba_eq[qidx], - phba->cfg_fcp_imax); + rc = lpfc_eq_create(phba, qp[qidx].hba_eq, + phba->cfg_fcp_imax); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0523 Failed setup of fast-path EQ " @@ -9070,26 +8977,17 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) goto out_destroy; } lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "2584 HBA EQ setup: queue[%d]-id=%d\n", - qidx, phba->sli4_hba.hba_eq[qidx]->queue_id); + "2584 HBA EQ setup: queue[%d]-id=%d\n", qidx, + qp[qidx].hba_eq->queue_id); } - if (phba->cfg_nvme_io_channel) { - if (!phba->sli4_hba.nvme_cq || !phba->sli4_hba.nvme_wq) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "6084 Fast-path NVME %s array not allocated\n", - (phba->sli4_hba.nvme_cq) ? "CQ" : "WQ"); - rc = -ENOMEM; - goto out_destroy; - } - - for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) { + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { + for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { rc = lpfc_create_wq_cq(phba, - phba->sli4_hba.hba_eq[ - qidx % io_channel], - phba->sli4_hba.nvme_cq[qidx], - phba->sli4_hba.nvme_wq[qidx], - &phba->sli4_hba.nvme_cq_map[qidx], + qp[qidx].hba_eq, + qp[qidx].nvme_cq, + qp[qidx].nvme_wq, + &phba->sli4_hba.hdwq[qidx].nvme_cq_map, qidx, LPFC_NVME); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, @@ -9101,31 +8999,19 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) } } - if (phba->cfg_fcp_io_channel) { - /* Set up fast-path FCP Response Complete Queue */ - if (!phba->sli4_hba.fcp_cq || !phba->sli4_hba.fcp_wq) { + for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { + rc = lpfc_create_wq_cq(phba, + qp[qidx].hba_eq, + qp[qidx].fcp_cq, + qp[qidx].fcp_wq, + &phba->sli4_hba.hdwq[qidx].fcp_cq_map, + qidx, LPFC_FCP); + if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "3148 Fast-path FCP %s array not allocated\n", - phba->sli4_hba.fcp_cq ? "WQ" : "CQ"); - rc = -ENOMEM; - goto out_destroy; - } - - for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++) { - rc = lpfc_create_wq_cq(phba, - phba->sli4_hba.hba_eq[ - qidx % io_channel], - phba->sli4_hba.fcp_cq[qidx], - phba->sli4_hba.fcp_wq[qidx], - &phba->sli4_hba.fcp_cq_map[qidx], - qidx, LPFC_FCP); - if (rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0535 Failed to setup fastpath " "FCP WQ/CQ (%d), rc = 0x%x\n", qidx, (uint32_t)rc); - goto out_destroy; - } + goto out_destroy; } } @@ -9144,7 +9030,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) goto out_destroy; } - rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hba_eq[0], + rc = lpfc_create_wq_cq(phba, qp[0].hba_eq, phba->sli4_hba.mbx_cq, phba->sli4_hba.mbx_wq, NULL, 0, LPFC_MBOX); @@ -9165,7 +9051,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) if (phba->cfg_nvmet_mrq > 1) { rc = lpfc_cq_create_set(phba, phba->sli4_hba.nvmet_cqset, - phba->sli4_hba.hba_eq, + qp, LPFC_WCQ, LPFC_NVMET); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, @@ -9177,7 +9063,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) } else { /* Set up NVMET Receive Complete Queue */ rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0], - phba->sli4_hba.hba_eq[0], + qp[0].hba_eq, LPFC_WCQ, LPFC_NVMET); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, @@ -9191,7 +9077,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) "6090 NVMET CQ setup: cq-id=%d, " "parent eq-id=%d\n", phba->sli4_hba.nvmet_cqset[0]->queue_id, - phba->sli4_hba.hba_eq[0]->queue_id); + qp[0].hba_eq->queue_id); } } @@ -9203,14 +9089,14 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) rc = -ENOMEM; goto out_destroy; } - rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hba_eq[0], - phba->sli4_hba.els_cq, - phba->sli4_hba.els_wq, - NULL, 0, LPFC_ELS); + rc = lpfc_create_wq_cq(phba, qp[0].hba_eq, + phba->sli4_hba.els_cq, + phba->sli4_hba.els_wq, + NULL, 0, LPFC_ELS); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0529 Failed setup of ELS WQ/CQ: rc = 0x%x\n", - (uint32_t)rc); + "0525 Failed setup of ELS WQ/CQ: rc = 0x%x\n", + (uint32_t)rc); goto out_destroy; } lpfc_printf_log(phba, KERN_INFO, LOG_INIT, @@ -9218,7 +9104,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) phba->sli4_hba.els_wq->queue_id, phba->sli4_hba.els_cq->queue_id); - if (phba->cfg_nvme_io_channel) { + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { /* Set up NVME LS Complete Queue */ if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, @@ -9227,14 +9113,14 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) rc = -ENOMEM; goto out_destroy; } - rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hba_eq[0], - phba->sli4_hba.nvmels_cq, - phba->sli4_hba.nvmels_wq, - NULL, 0, LPFC_NVME_LS); + rc = lpfc_create_wq_cq(phba, qp[0].hba_eq, + phba->sli4_hba.nvmels_cq, + phba->sli4_hba.nvmels_wq, + NULL, 0, LPFC_NVME_LS); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0529 Failed setup of NVVME LS WQ/CQ: " - "rc = 0x%x\n", (uint32_t)rc); + "0526 Failed setup of NVVME LS WQ/CQ: " + "rc = 0x%x\n", (uint32_t)rc); goto out_destroy; } @@ -9320,7 +9206,8 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) phba->sli4_hba.dat_rq->queue_id, phba->sli4_hba.els_cq->queue_id); - for (qidx = 0; qidx < io_channel; qidx += LPFC_MAX_EQ_DELAY_EQID_CNT) + for (qidx = 0; qidx < phba->cfg_hdw_queue; + qidx += LPFC_MAX_EQ_DELAY_EQID_CNT) lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT, phba->cfg_fcp_imax); @@ -9347,6 +9234,7 @@ out_error: void lpfc_sli4_queue_unset(struct lpfc_hba *phba) { + struct lpfc_sli4_hdw_queue *qp; int qidx; /* Unset mailbox command work queue */ @@ -9366,17 +9254,6 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba) lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq); - /* Unset FCP work queue */ - if (phba->sli4_hba.fcp_wq) - for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++) - lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[qidx]); - - /* Unset NVME work queue */ - if (phba->sli4_hba.nvme_wq) { - for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) - lpfc_wq_destroy(phba, phba->sli4_hba.nvme_wq[qidx]); - } - /* Unset mailbox command complete queue */ if (phba->sli4_hba.mbx_cq) lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); @@ -9389,11 +9266,6 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba) if (phba->sli4_hba.nvmels_cq) lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq); - /* Unset NVME response complete queue */ - if (phba->sli4_hba.nvme_cq) - for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) - lpfc_cq_destroy(phba, phba->sli4_hba.nvme_cq[qidx]); - if (phba->nvmet_support) { /* Unset NVMET MRQ queue */ if (phba->sli4_hba.nvmet_mrq_hdr) { @@ -9412,15 +9284,17 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba) } } - /* Unset FCP response complete queue */ - if (phba->sli4_hba.fcp_cq) - for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++) - lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[qidx]); - - /* Unset fast-path event queue */ - if (phba->sli4_hba.hba_eq) - for (qidx = 0; qidx < phba->io_channel_irqs; qidx++) - lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[qidx]); + /* Unset fast-path SLI4 queues */ + if (phba->sli4_hba.hdwq) { + for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { + qp = &phba->sli4_hba.hdwq[qidx]; + lpfc_wq_destroy(phba, qp->fcp_wq); + lpfc_wq_destroy(phba, qp->nvme_wq); + lpfc_cq_destroy(phba, qp->fcp_cq); + lpfc_cq_destroy(phba, qp->nvme_cq); + lpfc_eq_destroy(phba, qp->hba_eq); + } + } } /** @@ -10246,7 +10120,7 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors) if (vec >= vectors) vec = 0; index++; - if (index >= phba->cfg_fcp_io_channel) + if (index >= phba->cfg_hdw_queue) index = 0; cpup++; } @@ -10271,7 +10145,7 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba) char *name; /* Set up MSI-X multi-message vectors */ - vectors = phba->io_channel_irqs; + vectors = phba->cfg_hdw_queue; rc = pci_alloc_irq_vectors(phba->pcidev, (phba->nvmet_support) ? 1 : 2, @@ -10305,19 +10179,15 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba) } } - if (vectors != phba->io_channel_irqs) { + if (vectors != phba->cfg_hdw_queue) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT,