summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorAnil Ravindranath <anil_ravindranath@pmc-sierra.com>2009-08-25 17:35:18 -0700
committerJames Bottomley <James.Bottomley@suse.de>2009-09-12 09:35:24 -0500
commit89a3681041507773dfee1b88c1c90c8a811a79d3 (patch)
treedf3ed10fee1725722524c66ba2e24cfef5f40ff5 /drivers
parent073ed91e245d56d71a85e2a49bf0b3962fe74dc4 (diff)
[SCSI] pmcraid: PMC-Sierra MaxRAID driver to support 6Gb/s SAS RAID controller
Signed-off-by: Anil Ravindranath <anil_ravindranath@pmc-sierra.com> Signed-off-by: James Bottomley <James.Bottomley@suse.de>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/scsi/Kconfig6
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/pmcraid.c5604
-rw-r--r--drivers/scsi/pmcraid.h1029
4 files changed, 6640 insertions, 0 deletions
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 9c23122f755f..82bb3b2d207a 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1811,6 +1811,12 @@ config ZFCP
called zfcp. If you want to compile it as a module, say M here
and read <file:Documentation/kbuild/modules.txt>.
+config SCSI_PMCRAID
+ tristate "PMC SIERRA Linux MaxRAID adapter support"
+ depends on PCI && SCSI
+ ---help---
+ This driver supports the PMC SIERRA MaxRAID adapters.
+
config SCSI_SRP
tristate "SCSI RDMA Protocol helper library"
depends on SCSI && PCI
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 25429ea63d0a..61a94af3cee7 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -130,6 +130,7 @@ obj-$(CONFIG_SCSI_MVSAS) += mvsas/
obj-$(CONFIG_PS3_ROM) += ps3rom.o
obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgb3i/
obj-$(CONFIG_SCSI_BNX2_ISCSI) += libiscsi.o bnx2i/
+obj-$(CONFIG_SCSI_PMCRAID) += pmcraid.o
obj-$(CONFIG_ARM) += arm/
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
new file mode 100644
index 000000000000..4302f06e4ec9
--- /dev/null
+++ b/drivers/scsi/pmcraid.c
@@ -0,0 +1,5604 @@
+/*
+ * pmcraid.c -- driver for PMC Sierra MaxRAID controller adapters
+ *
+ * Written By: PMC Sierra Corporation
+ *
+ * Copyright (C) 2008, 2009 PMC Sierra Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307,
+ * USA
+ *
+ */
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/wait.h>
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/blkdev.h>
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/hdreg.h>
+#include <linux/version.h>
+#include <linux/io.h>
+#include <asm/irq.h>
+#include <asm/processor.h>
+#include <linux/libata.h>
+#include <linux/mutex.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsicam.h>
+
+#include "pmcraid.h"
+
+/*
+ * Module configuration parameters
+ */
+static unsigned int pmcraid_debug_log;
+static unsigned int pmcraid_disable_aen;
+static unsigned int pmcraid_log_level = IOASC_LOG_LEVEL_MUST;
+
+/*
+ * Data structures to support multiple adapters by the LLD.
+ * pmcraid_adapter_count - count of configured adapters
+ */
+static atomic_t pmcraid_adapter_count = ATOMIC_INIT(0);
+
+/*
+ * Supporting user-level control interface through IOCTL commands.
+ * pmcraid_major - major number to use
+ * pmcraid_minor - minor number(s) to use
+ */
+static unsigned int pmcraid_major;
+static struct class *pmcraid_class;
+DECLARE_BITMAP(pmcraid_minor, PMCRAID_MAX_ADAPTERS);
+
+/*
+ * Module parameters
+ */
+MODULE_AUTHOR("PMC Sierra Corporation, anil_ravindranath@pmc-sierra.com");
+MODULE_DESCRIPTION("PMC Sierra MaxRAID Controller Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(PMCRAID_DRIVER_VERSION);
+
+module_param_named(log_level, pmcraid_log_level, uint, (S_IRUGO | S_IWUSR));
+MODULE_PARM_DESC(log_level,
+ "Enables firmware error code logging, default :1 high-severity"
+ " errors, 2: all errors including high-severity errors,"
+ " 0: disables logging");
+
+module_param_named(debug, pmcraid_debug_log, uint, (S_IRUGO | S_IWUSR));
+MODULE_PARM_DESC(debug,
+ "Enable driver verbose message logging. Set 1 to enable."
+ "(default: 0)");
+
+module_param_named(disable_aen, pmcraid_disable_aen, uint, (S_IRUGO | S_IWUSR));
+MODULE_PARM_DESC(disable_aen,
+ "Disable driver aen notifications to apps. Set 1 to disable."
+ "(default: 0)");
+
+/* chip specific constants for PMC MaxRAID controllers (same for
+ * 0x5220 and 0x8010
+ */
+static struct pmcraid_chip_details pmcraid_chip_cfg[] = {
+ {
+ .ioastatus = 0x0,
+ .ioarrin = 0x00040,
+ .mailbox = 0x7FC30,
+ .global_intr_mask = 0x00034,
+ .ioa_host_intr = 0x0009C,
+ .ioa_host_intr_clr = 0x000A0,
+ .ioa_host_mask = 0x7FC28,
+ .ioa_host_mask_clr = 0x7FC28,
+ .host_ioa_intr = 0x00020,
+ .host_ioa_intr_clr = 0x00020,
+ .transop_timeout = 300
+ }
+};
+
+/*
+ * PCI device ids supported by pmcraid driver
+ */
+static struct pci_device_id pmcraid_pci_table[] __devinitdata = {
+ { PCI_DEVICE(PCI_VENDOR_ID_PMC, PCI_DEVICE_ID_PMC_MAXRAID),
+ 0, 0, (kernel_ulong_t)&pmcraid_chip_cfg[0]
+ },
+ {}
+};
+
+MODULE_DEVICE_TABLE(pci, pmcraid_pci_table);
+
+
+
+/**
+ * pmcraid_slave_alloc - Prepare for commands to a device
+ * @scsi_dev: scsi device struct
+ *
+ * This function is called by mid-layer prior to sending any command to the new
+ * device. Stores resource entry details of the device in scsi_device struct.
+ * Queuecommand uses the resource handle and other details to fill up IOARCB
+ * while sending commands to the device.
+ *
+ * Return value:
+ * 0 on success / -ENXIO if device does not exist
+ */
+static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
+{
+ struct pmcraid_resource_entry *temp, *res = NULL;
+ struct pmcraid_instance *pinstance;
+ u8 target, bus, lun;
+ unsigned long lock_flags;
+ int rc = -ENXIO;
+ pinstance = shost_priv(scsi_dev->host);
+
+ /* Driver exposes VSET and GSCSI resources only; all other device types
+ * are not exposed. Resource list is synchronized using resource lock
+ * so any traversal or modifications to the list should be done inside
+ * this lock
+ */
+ spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
+ list_for_each_entry(temp, &pinstance->used_res_q, queue) {
+
+ /* do not expose VSETs with order-ids >= 240 */
+ if (RES_IS_VSET(temp->cfg_entry)) {
+ target = temp->cfg_entry.unique_flags1;
+ if (target >= PMCRAID_MAX_VSET_TARGETS)
+ continue;
+ bus = PMCRAID_VSET_BUS_ID;
+ lun = 0;
+ } else if (RES_IS_GSCSI(temp->cfg_entry)) {
+ target = RES_TARGET(temp->cfg_entry.resource_address);
+ bus = PMCRAID_PHYS_BUS_ID;
+ lun = RES_LUN(temp->cfg_entry.resource_address);
+ } else {
+ continue;
+ }
+
+ if (bus == scsi_dev->channel &&
+ target == scsi_dev->id &&
+ lun == scsi_dev->lun) {
+ res = temp;
+ break;
+ }
+ }
+
+ if (res) {
+ res->scsi_dev = scsi_dev;
+ scsi_dev->hostdata = res;
+ res->change_detected = 0;
+ atomic_set(&res->read_failures, 0);
+ atomic_set(&res->write_failures, 0);
+ rc = 0;
+ }
+ spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
+ return rc;
+}
+
+/**
+ * pmcraid_slave_configure - Configures a SCSI device
+ * @scsi_dev: scsi device struct
+ *
+ * This fucntion is executed by SCSI mid layer just after a device is first
+ * scanned (i.e. it has responded to an INQUIRY). For VSET resources, the
+ * timeout value (default 30s) will be over-written to a higher value (60s)
+ * and max_sectors value will be over-written to 512. It also sets queue depth
+ * to host->cmd_per_lun value
+ *
+ * Return value:
+ * 0 on success
+ */
+static int pmcraid_slave_configure(struct scsi_device *scsi_dev)
+{
+ struct pmcraid_resource_entry *res = scsi_dev->hostdata;
+
+ if (!res)
+ return 0;
+
+ /* LLD exposes VSETs and Enclosure devices only */
+ if (RES_IS_GSCSI(res->cfg_entry) &&
+ scsi_dev->type != TYPE_ENCLOSURE)
+ return -ENXIO;
+
+ pmcraid_info("configuring %x:%x:%x:%x\n",
+ scsi_dev->host->unique_id,
+ scsi_dev->channel,
+ scsi_dev->id,
+ scsi_dev->lun);
+
+ if (RES_IS_GSCSI(res->cfg_entry)) {
+ scsi_dev->allow_restart = 1;
+ } else if (RES_IS_VSET(res->cfg_entry)) {
+ scsi_dev->allow_restart = 1;
+ blk_queue_rq_timeout(scsi_dev->request_queue,
+ PMCRAID_VSET_IO_TIMEOUT);
+ blk_queue_max_sectors(scsi_dev->request_queue,
+ PMCRAID_VSET_MAX_SECTORS);
+ }
+
+ if (scsi_dev->tagged_supported &&
+ (RES_IS_GSCSI(res->cfg_entry) || RES_IS_VSET(res->cfg_entry))) {
+ scsi_activate_tcq(scsi_dev, scsi_dev->queue_depth);
+ scsi_adjust_queue_depth(scsi_dev, MSG_SIMPLE_TAG,
+ scsi_dev->host->cmd_per_lun);
+ } else {
+ scsi_adjust_queue_depth(scsi_dev, 0,
+ scsi_dev->host->cmd_per_lun);
+ }
+
+ return 0;
+}
+
+/**
+ * pmcraid_slave_destroy - Unconfigure a SCSI device before removing it
+ *
+ * @scsi_dev: scsi device struct
+ *
+ * This is called by mid-layer before removing a device. Pointer assignments
+ * done in pmcraid_slave_alloc will be reset to NULL here.
+ *
+ * Return value
+ * none
+ */
+static void pmcraid_slave_destroy(struct scsi_device *scsi_dev)
+{
+ struct pmcraid_resource_entry *res;
+
+ res = (struct pmcraid_resource_entry *)scsi_dev->hostdata;
+
+ if (res)
+ res->scsi_dev = NULL;
+
+ scsi_dev->hostdata = NULL;
+}
+
+/**
+ * pmcraid_change_queue_depth - Change the device's queue depth
+ * @scsi_dev: scsi device struct
+ * @depth: depth to set
+ *
+ * Return value
+ * actual depth set
+ */
+static int pmcraid_change_queue_depth(struct scsi_device *scsi_dev, int depth)
+{
+ if (depth > PMCRAID_MAX_CMD_PER_LUN)
+ depth = PMCRAID_MAX_CMD_PER_LUN;
+
+ scsi_adjust_queue_depth(scsi_dev, scsi_get_tag_type(scsi_dev), depth);
+
+ return scsi_dev->queue_depth;
+}
+
+/**
+ * pmcraid_change_queue_type - Change the device's queue type
+ * @scsi_dev: scsi device struct
+ * @tag: type of tags to use
+ *
+ * Return value:
+ * actual queue type set
+ */
+static int pmcraid_change_queue_type(struct scsi_device *scsi_dev, int tag)
+{
+ struct pmcraid_resource_entry *res;
+
+ res = (struct pmcraid_resource_entry *)scsi_dev->hostdata;
+
+ if ((res) && scsi_dev->tagged_supported &&
+ (RES_IS_GSCSI(res->cfg_entry) || RES_IS_VSET(res->cfg_entry))) {
+ scsi_set_tag_type(scsi_dev, tag);
+
+ if (tag)
+ scsi_activate_tcq(scsi_dev, scsi_dev->queue_depth);
+ else
+ scsi_deactivate_tcq(scsi_dev, scsi_dev->queue_depth);
+ } else
+ tag = 0;
+
+ return tag;
+}
+
+
+/**
+ * pmcraid_init_cmdblk - initializes a command block
+ *
+ * @cmd: pointer to struct pmcraid_cmd to be initialized
+ * @index: if >=0 first time initialization; otherwise reinitialization
+ *
+ * Return Value
+ * None
+ */
+void pmcraid_init_cmdblk(struct pmcraid_cmd *cmd, int index)
+{
+ struct pmcraid_ioarcb *ioarcb = &(cmd->ioa_cb->ioarcb);
+ dma_addr_t dma_addr = cmd->ioa_cb_bus_addr;
+
+ if (index >= 0) {
+ /* first time initialization (called from probe) */
+ u32 ioasa_offset =
+ offsetof(struct pmcraid_control_block, ioasa);
+
+ cmd->index = index;
+ ioarcb->response_handle = cpu_to_le32(index << 2);
+ ioarcb->ioarcb_bus_addr = cpu_to_le64(dma_addr);
+ ioarcb->ioasa_bus_addr = cpu_to_le64(dma_addr + ioasa_offset);
+ ioarcb->ioasa_len = cpu_to_le16(sizeof(struct pmcraid_ioasa));
+ } else {
+ /* re-initialization of various lengths, called once command is
+ * processed by IOA
+ */
+ memset(&cmd->ioa_cb->ioarcb.cdb, 0, PMCRAID_MAX_CDB_LEN);
+ ioarcb->request_flags0 = 0;
+ ioarcb->request_flags1 = 0;
+ ioarcb->cmd_timeout = 0;
+ ioarcb->ioarcb_bus_addr &= (~0x1FULL);
+ ioarcb->ioadl_bus_addr = 0;
+ ioarcb->ioadl_length = 0;
+ ioarcb->data_transfer_length = 0;
+ ioarcb->add_cmd_param_length = 0;
+ ioarcb->add_cmd_param_offset = 0;
+ cmd->ioa_cb->ioasa.ioasc = 0;
+ cmd->ioa_cb->ioasa.residual_data_length = 0;
+ cmd->u.time_left = 0;
+ }
+
+ cmd->cmd_done = NULL;
+ cmd->scsi_cmd = NULL;
+ cmd->release = 0;
+ cmd->completion_req = 0;
+ cmd->dma_handle = 0;
+ init_timer(&cmd->timer);
+}
+
+/**
+ * pmcraid_reinit_cmdblk - reinitialize a command block
+ *
+ * @cmd: pointer to struct pmcraid_cmd to be reinitialized
+ *
+ * Return Value
+ * None
+ */
+static void pmcraid_reinit_cmdblk(struct pmcraid_cmd *cmd)
+{
+ pmcraid_init_cmdblk(cmd, -1);
+}
+
+/**
+ * pmcraid_get_free_cmd - get a free cmd block from command block pool
+ * @pinstance: adapter instance structure
+ *
+ * Return Value:
+ * returns pointer to cmd block or NULL if no blocks are available
+ */
+static struct pmcraid_cmd *pmcraid_get_free_cmd(
+ struct pmcraid_instance *pinstance
+)
+{
+ struct pmcraid_cmd *cmd = NULL;
+ unsigned long lock_flags;
+
+ /* free cmd block list is protected by free_pool_lock */
+ spin_lock_irqsave(&pinstance->free_pool_lock, lock_flags);
+
+ if (!list_empty(&pinstance->free_cmd_pool)) {
+ cmd = list_entry(pinstance->free_cmd_pool.next,
+ struct pmcraid_cmd, free_list);
+ list_del(&cmd->free_list);
+ }
+ spin_unlock_irqrestore(&pinstance->free_pool_lock, lock_flags);
+
+ /* Initialize the command block before giving it the caller */
+ if (cmd != NULL)
+ pmcraid_reinit_cmdblk(cmd);
+ return cmd;
+}
+
+/**
+ * pmcraid_return_cmd - return a completed command block back into free pool
+ * @cmd: pointer to the command block
+ *
+ * Return Value:
+ * nothing
+ */
+void pmcraid_return_cmd(struct pmcraid_cmd *cmd)
+{
+ struct pmcraid_instance *pinstance = cmd->drv_inst;
+ unsigned long lock_flags;
+
+ spin_lock_irqsave(&pinstance->free_pool_lock, lock_flags);
+ list_add_tail(&cmd->free_list, &pinstance->free_cmd_pool);
+ spin_unlock_irqrestore(&pinstance->free_pool_lock, lock_flags);
+}
+
+/**
+ * pmcraid_read_interrupts - reads IOA interrupts
+ *
+ * @pinstance: pointer to adapter instance structure
+ *
+ * Return value
+ * interrupts read from IOA
+ */
+static u32 pmcraid_read_interrupts(struct pmcraid_instance *pinstance)
+{
+ return ioread32(pinstance->int_regs.ioa_host_interrupt_reg);
+}
+
+/**
+ * pmcraid_disable_interrupts - Masks and clears all specified interrupts
+ *
+ * @pinstance: pointer to per adapter instance structure
+ * @intrs: interrupts to disable
+ *
+ * Return Value
+ * None
+ */
+static void pmcraid_disable_interrupts(
+ struct pmcraid_instance *pinstance,
+ u32 intrs
+)
+{
+ u32 gmask = ioread32(pinstance->int_regs.global_interrupt_mask_reg);
+ u32 nmask = gmask | GLOBAL_INTERRUPT_MASK;
+
+ iowrite32(nmask, pinstance->int_regs.global_interrupt_mask_reg);
+ iowrite32(intrs, pinstance->int_regs.ioa_host_interrupt_clr_reg);
+ iowrite32(intrs, pinstance->int_regs.ioa_host_interrupt_mask_reg);
+ ioread32(pinstance->int_regs.ioa_host_interrupt_mask_reg);
+}
+
+/**
+ * pmcraid_enable_interrupts - Enables specified interrupts
+ *
+ * @pinstance: pointer to per adapter instance structure
+ * @intr: interrupts to enable
+ *
+ * Return Value
+ * None
+ */
+static void pmcraid_enable_interrupts(
+ struct pmcraid_instance *pinstance,
+ u32 intrs
+)
+{
+ u32 gmask = ioread32(pinstance->int_regs.global_interrupt_mask_reg);
+ u32 nmask = gmask & (~GLOBAL_INTERRUPT_MASK);
+
+ iowrite32(nmask, pinstance->int_regs.global_interrupt_mask_reg);
+ iowrite32(~intrs, pinstance->int_regs.ioa_host_interrupt_mask_reg);
+ ioread32(pinstance->int_regs.ioa_host_interrupt_mask_reg);
+
+ pmcraid_info("enabled interrupts global mask = %x intr_mask = %x\n",
+ ioread32(pinstance->int_regs.global_interrupt_mask_reg),
+ ioread32(pinstance->int_regs.ioa_host_interrupt_mask_reg));
+}
+
+/**
+ * pmcraid_reset_type - Determine the required reset type
+ * @pinstance: pointer to adapter instance structure
+ *
+ * IOA requires hard reset if any of the following conditions is true.
+ * 1. If HRRQ valid interrupt is not masked
+ * 2. IOA reset alert doorbell is set
+ * 3. If there are any error interrupts
+ */
+static void pmcraid_reset_type(struct pmcraid_instance *pinstance)
+{
+ u32 mask;
+ u32 intrs;
+ u32 alerts;
+
+ mask = ioread32(pinstance->int_regs.ioa_host_interrupt_mask_reg);
+ intrs = ioread32(pinstance->int_regs.ioa_host_interrupt_reg);
+ alerts = ioread32(pinstance->int_regs.host_ioa_interrupt_reg);
+
+ if ((mask & INTRS_HRRQ_VALID) == 0 ||
+ (alerts & DOORBELL_IOA_RESET_ALERT) ||
+ (intrs & PMCRAID_ERROR_INTERRUPTS)) {
+ pmcraid_info("IOA requires hard reset\n");
+ pinstance->ioa_hard_reset = 1;
+ }
+
+ /* If unit check is active, trigger the dump */
+ if (intrs & INTRS_IOA_UNIT_CHECK)
+ pinstance->ioa_unit_check = 1;
+}
+
+/**
+ * pmcraid_bist_done - completion function for PCI BIST
+ * @cmd: pointer to reset command
+ * Return Value
+ * none
+ */
+
+static void pmcraid_ioa_reset(struct pmcraid_cmd *);
+
+static void pmcraid_bist_done(struct pmcraid_cmd *cmd)
+{
+ struct pmcraid_instance *pinstance = cmd->drv_inst;
+ unsigned long lock_flags;
+ int rc;
+ u16 pci_reg;
+
+ rc = pci_read_config_word(pinstance->pdev, PCI_COMMAND, &pci_reg);
+
+ /* If PCI config space can't be accessed wait for another two secs */
+ if ((rc != PCIBIOS_SUCCESSFUL || (!(pci_reg & PCI_COMMAND_MEMORY))) &&
+ cmd->u.time_left > 0) {
+ pmcraid_info("BIST not complete, waiting another 2 secs\n");
+ cmd->timer.expires = jiffies + cmd->u.time_left;
+ cmd->u.time_left = 0;
+ cmd->timer.data = (unsigned long)cmd;
+ cmd->timer.function =
+ (void (*)(unsigned long))pmcraid_bist_done;
+ add_timer(&cmd->timer);
+ } else {
+ cmd->u.time_left = 0;
+ pmcraid_info("BIST is complete, proceeding with reset\n");
+ spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
+ pmcraid_ioa_reset(cmd);
+ spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
+ }
+}
+
+/**
+ * pmcraid_start_bist - starts BIST
+ * @cmd: pointer to reset cmd
+ * Return Value
+ * none
+ */
+static void pmcraid_start_bist(struct pmcraid_cmd *cmd)
+{
+ struct pmcraid_instance *pinstance = cmd->drv_inst;
+ u32 doorbells, intrs;
+
+ /* proceed with bist and wait for 2 seconds */
+ iowrite32(DOORBELL_IOA_START_BIST,
+ pinstance->int_regs.host_ioa_interrupt_reg);
+ doorbells = ioread32(pinstance->int_regs.host_ioa_interrupt_reg);
+ intrs = ioread32(pinstance->int_regs.ioa_host_interrupt_reg);
+ pmcraid_info("doorbells after start bist: %x intrs: %x \n",
+ doorbells, intrs);
+
+ cmd->u.time_left = msecs_to_jiffies(PMCRAID_BIST_TIMEOUT);
+ cmd->timer.data = (unsigned long)cmd;
+ cmd->timer.expires = jiffies + msecs_to_jiffies(PMCRAID_BIST_TIMEOUT);
+ cmd->timer.function = (void (*)(unsigned long))pmcraid_bist_done;
+ add_timer(&cmd->timer);
+}
+
+/**
+ * pmcraid_reset_alert_done - completion routine for reset_alert
+ * @cmd: pointer to command block used in reset sequence
+ * Return value
+ * None
+ */
+static void pmcraid_reset_alert_done(struct pmcraid_cmd *cmd)
+{
+ struct pmcraid_instance *pinstance = cmd->drv_inst;
+ u32 status = ioread32(pinstance->ioa_status);
+ unsigned long lock_flags;
+
+ /* if the critical operation in progress bit is set or the wait times
+ * out, invoke reset engine to proceed with hard reset. If there is
+ * some more time to wait, restart the timer
+ */
+ if (((status & INTRS_CRITICAL_OP_IN_PROGRESS) == 0) ||
+ cmd->u.time_left <= 0) {
+ pmcraid_info("critical op is reset proceeding with reset\n");
+ spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
+ pmcraid_ioa_reset(cmd);
+ spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
+ } else {
+ pmcraid_info("critical op is not yet reset waiting again\n");
+ /* restart timer if some more time is available to wait */
+ cmd->u.time_left -= PMCRAID_CHECK_FOR_RESET_TIMEOUT;
+ cmd->timer.data = (unsigned long)cmd;
+ cmd->timer.expires = jiffies + PMCRAID_CHECK_FOR_RESET_TIMEOUT;
+ cmd->timer.function =
+ (void (*)(unsigned long))pmcraid_reset_alert_done;
+ add_timer(&cmd->timer);
+ }
+}
+
+/**
+ * pmcraid_reset_alert - alerts IOA for a possible reset
+ * @cmd : command block to be used for reset sequence.
+ *
+ * Return Value
+ * returns 0 if pci config-space is accessible and RESET_DOORBELL is
+ * successfully written to IOA. Returns non-zero in case pci_config_space
+ * is not accessible
+ */
+static void pmcraid_reset_alert(struct pmcraid_cmd *cmd)
+{
+ struct pmcraid_instance *pinstance = cmd->drv_inst;
+ u32 doorbells;
+ int rc;
+ u16 pci_reg;
+
+ /* If we are able to access IOA PCI config space, alert IOA that we are
+ * going to reset it soon. This enables IOA to preserv persistent error
+ * data if any. In case memory space is not accessible, proceed with
+ * BIST or slot_reset
+ */
+ rc = pci_read_config_word(pinstance->pdev, PCI_COMMAND, &pci_reg);
+ if ((rc == PCIBIOS_SUCCESSFUL) && (pci_reg & PCI_COMMAND_MEMORY)) {
+
+ /* wait for IOA permission i.e until CRITICAL_OPERATION bit is
+ * reset IOA doesn't generate any interrupts when CRITICAL
+ * OPERATION bit is reset. A timer is started to wait for this
+ * bit to be reset.
+ */
+ cmd->u.time_left = PMCRAID_RESET_TIMEOUT;
+ cmd->timer.data = (unsigned long)cmd;
+ cmd->timer.expires = jiffies + PMCRAID_CHECK_FOR_RESET_TIMEOUT;
+ cmd->timer.function =
+ (void (*)(unsigned long))pmcraid_reset_alert_done;
+ add_timer(&cmd->timer);
+
+ iowrite32(DOORBELL_IOA_RESET_ALERT,
+ pinstance->int_regs.host_ioa_interrupt_reg);
+ doorbells =
+ ioread32(pinstance->int_regs.host_ioa_interrupt_reg);
+ pmcraid_info("doorbells after reset alert: %x\n", doorbells);
+ } else {
+ pmcraid_info("PCI config is not accessible starting BIST\n");
+ pinstance->ioa_state = IOA_STATE_IN_HARD_RESET;
+ pmcraid_start_bist(cmd);
+ }
+}
+
+/**
+ * pmcraid_timeout_handler - Timeout handler for internally generated ops
+ *
+ * @cmd : pointer to command structure, that got timedout
+ *
+ * This function blocks host requests and initiates an adapter reset.
+ *
+ * Return value:
+ * None
+ */
+static void pmcraid_timeout_handler(struct pmcraid_cmd *cmd)
+{
+ struct pmcraid_instance *pinstance = cmd->drv_inst;
+ unsigned long lock_flags;
+
+ dev_err(&pinstance->pdev->dev,
+ "Adapter being reset due to command timeout.\n");
+
+ /* Command timeouts result in hard reset sequence. The command that got
+ * timed out may be the one used as part of reset sequence. In this
+ * case restart reset sequence using the same command block even if
+ * reset is in progress. Otherwise fail this command and get a free
+ * command block to restart the reset sequence.
+ */
+ spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
+ if (!pinstance->ioa_reset_in_progress) {
+ pinstance->ioa_reset_attempts = 0;
+ cmd = pmcraid_get_free_cmd(pinstance);
+
+ /* If we are out of command blocks, just return here itself.
+ * Some other command's timeout handler can do the reset job
+ */
+ if (cmd == NULL) {
+ spin_unlock_irqrestore(pinstance->host->host_lock,
+ lock_flags);
+ pmcraid_err("no free cmnd block for timeout handler\n");
+ return;
+ }
+
+ pinstance->reset_cmd = cmd;
+ pinstance->ioa_reset_in_progress = 1;
+ } else {
+ pmcraid_info("reset is already in progress\n");
+
+ if (pinstance->reset_cmd != cmd) {
+ /* This command should have been given to IOA, this
+ * command will be completed by fail_outstanding_cmds
+ * anyway
+ */
+ pmcraid_err("cmd is pending but reset in progress\n");
+ }
+
+ /* If this command was being used as part of the reset
+ * sequence, set cmd_done pointer to pmcraid_ioa_reset. This
+ * causes fail_outstanding_commands not to return the command
+ * block back to free pool
+ */
+ if (cmd == pinstance->reset_cmd)
+ cmd->cmd_done = pmcraid_ioa_reset;
+
+ }
+
+ pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT;
+ scsi_block_requests(pinstance->host);
+ pmcraid_reset_alert(cmd);
+ spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
+}
+
+/**
+ * pmcraid_internal_done - completion routine for internally generated cmds
+ *
+ * @cmd: command that got response from IOA
+ *
+ * Return Value:
+ * none
+ */
+static void pmcraid_internal_done(struct pmcraid_cmd *cmd)
+{
+ pmcraid_info("response internal cmd CDB[0] = %x ioasc = %x\n",
+ cmd->ioa_cb->ioarcb.cdb[0],
+ le32_to_cpu(cmd->ioa_cb->ioasa.ioasc));
+
+ /* Some of the internal commands are sent with callers blocking for the
+ * response. Same will be indicated as part of cmd->completion_req
+ * field. Response path needs to wake up any waiters waiting for cmd
+ * completion if this flag is set.
+ */
+ if (cmd->completion_req) {
+ cmd->completion_req = 0;
+ complete(&cmd->wait_for_completion);
+ }
+
+ /* most of the internal commands are completed by caller itself, so
+ * no need to return the command block back to free pool until we are
+ * required to do so (e.g once done with initialization).
+ */
+ if (cmd->release) {
+ cmd->release = 0;
+ pmcraid_return_cmd(cmd);
+ }
+}
+
+/**
+ * pmcraid_reinit_cfgtable_done - done function for cfg table reinitialization
+ *
+ * @cmd: command that got response from IOA
+ *
+ * This routine is called after driver re-reads configuration table due to a
+ * lost CCN. It returns the command block back to free pool and schedules
+ * worker thread to add/delete devices into the system.
+ *
+ * Return Value:
+ * none
+ */
+static void pmcraid_reinit_cfgtable_done(struct pmcraid_cmd *cmd)
+{
+ pmcraid_info("response internal cmd CDB[0] = %x ioasc = %x\n",
+ cmd->ioa_cb->ioarcb.cdb[0],
+ le32_to_cpu(cmd->ioa_cb->ioasa.ioasc));
+
+ if (cmd->release) {
+ cmd->release = 0;
+ pmcraid_return_cmd(cmd);
+ }
+ pmcraid_info("scheduling worker for config table reinitialization\n");
+ schedule_work(&cmd->drv_inst->worker_q);
+}
+
+/**
+ * pmcraid_erp_done - Process completion of SCSI error response from device
+ * @cmd: pmcraid_command
+ *
+ * This function copies the sense buffer into the scsi_cmd struct and completes
+ * scsi_cmd by calling scsi_done function.
+ *
+ * Return value:
+ * none
+ */
+static void pmcraid_erp_done(struct pmcraid_cmd *cmd)
+{
+ struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd;
+ struct pmcraid_instance *pinstance = cmd->drv_inst;
+ u32 ioasc = le32_to_cpu(cmd->ioa_cb->ioasa.ioasc);
+
+ if (PMCRAID_IOASC_SENSE_KEY(ioasc) > 0) {
+ scsi_cmd->result |= (DID_ERROR << 16);
+ pmcraid_err("command CDB[0] = %x failed with IOASC: 0x%08X\n",
+ cmd->ioa_cb->ioarcb.cdb[0], ioasc);
+ }
+
+ /* if we had allocated sense buffers for request sense, copy the sense
+ * release the buffers
+ */
+ if (cmd->sense_buffer != NULL) {
+ memcpy(scsi_cmd->sense_buffer,
+ cmd->sense_buffer,
+ SCSI_SENSE_BUFFERSIZE);
+ pci_free_consistent(pinstance->pdev,
+ SCSI_SENSE_BUFFERSIZE,
+ cmd->sense_buffer, cmd->sense_buffer_dma);
+ cmd->sense_buffer = NULL;
+ cmd->sense_buffer_dma = 0;
+ }
+
+ scsi_dma_unmap(scsi_cmd);
+ pmcraid_return_cmd(cmd);
+ scsi_cmd->scsi_done(scsi_cmd);
+}
+
+/**
+ * pmcraid_fire_command - sends an IOA command to adapter
+ *
+ * This function adds the given block into pending command list
+ * and returns without waiting
+ *
+ * @cmd : command to be sent to the device
+ *
+ * Return Value
+ * None
+ */
+static void _pmcraid_fire_command(struct pmcraid_cmd *cmd)
+{
+ struct pmcraid_instance *pinstance = cmd->drv_inst;
+ unsigned long lock_flags;
+
+ /* Add this command block to pending cmd pool. We do this prior to
+ * writting IOARCB to ioarrin because IOA might complete the command
+ * by the time we are about to add it to the list. Response handler
+ * (isr/tasklet) looks for cmb block in the pending pending list.
+ */
+ spin_lock_irqsave(&pinstance->pending_pool_lock, lock_flags);
+ list_add_tail(&cmd->free_list, &pinstance->pending_cmd_pool);
+ spin_unlock_irqrestore(&pinstance->pending_pool_lock, lock_flags);
+ atomic_inc(&pinstance->outstanding_cmds);
+
+ /* driver writes lower 32-bit value of IOARCB address only */
+ mb();
+ iowrite32(le32_to_cpu(cmd->ioa_cb->ioarcb.ioarcb_bus_addr),
+ pinstance->ioarrin);
+}
+
+/**
+ * pmcraid_send_cmd - fires a command to IOA
+ *
+ * This function also sets up timeout function, and command completion
+ * function
+ *
+ * @cmd: pointer to the command block to be fired to IOA
+ * @cmd_done: command completion function, called once IOA responds
+ * @timeout: timeout to wait for this command completion
+ * @timeout_func: timeout handler
+ *
+ * Return value
+ * none
+ */
+static void pmcraid_send_cmd(
+ struct pmcraid_cmd *cmd,
+ void (*cmd_done) (struct pmcraid_cmd *),
+ unsigned long timeout,
+ void (*timeout_func) (struct pmcraid_cmd *)
+)
+{
+ /* initialize done function */
+ cmd->cmd_done = cmd_done;
+
+ if (timeout_func) {
+ /* setup timeout handler */
+ cmd->timer.data = (unsigned long)cmd;
+ cmd->timer.expires = jiffies + timeout;
+ cmd->timer.function = (void (*)(unsigned long))timeout_func;
+ add_timer(&cmd->timer);
+ }
+
+ /* fire the command to IOA */
+ _pmcraid_fire_command(cmd);
+}
+
+/**
+ * pmcraid_ioa_shutdown - sends SHUTDOWN command to ioa
+ *
+ * @cmd: pointer to the command block used as part of reset sequence
+ *
+ * Return Value
+ * None
+ */
+static void pmcraid_ioa_shutdown(struct pmcraid_cmd *cmd)
+{
+ pmcraid_info("response for Cancel CCN CDB[0] = %x ioasc = %x\n",
+ cmd->ioa_cb->ioarcb.cdb[0],
+ le32_to_cpu(cmd->ioa_cb->ioasa.ioasc));
+
+ /* Note that commands sent during reset require next command to be sent
+ * to IOA. Hence reinit the done function as well as timeout function
+ */
+ pmcraid_reinit_cmdblk(cmd);
+ cmd->ioa_cb->ioarcb.request_type = REQ_TYPE_IOACMD;
+ cmd->ioa_cb->ioarcb.resource_handle =
+ cpu_to_le32(PMCRAID_IOA_RES_HANDLE);
+ cmd->ioa_cb->ioarcb.cdb[0] = PMCRAID_IOA_SHUTDOWN;
+ cmd->ioa_cb->ioarcb.cdb[1] = PMCRAID_SHUTDOWN_NORMAL;
+
+ /* fire shutdown command to hardware. */
+ pmcraid_info("firing normal shutdown command (%d) to IOA\n",
+ le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle));
+
+ pmcraid_send_cmd(cmd, pmcraid_ioa_reset,
+ PMCRAID_SHUTDOWN_TIMEOUT,
+ pmcraid_timeout_handler);
+}
+
+/**
+ * pmcraid_identify_hrrq - registers host rrq buffers with IOA
+ * @cmd: pointer to command block to be used for identify hrrq
+ *
+ * Return Value
+ * 0 in case of success, otherwise non-zero failure code
+ */
+
+static void pmcraid_querycfg(struct pmcraid_cmd *);
+
+static void pmcraid_identify_hrrq(struct pmcraid_cmd *cmd)
+{
+ struct pmcraid_instance *pinstance = cmd->drv_inst;
+ struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
+ int index = 0;
+ __be64 hrrq_addr = cpu_to_be64(pinstance->hrrq_start_bus_addr[index]);
+ u32 hrrq_size = cpu_to_be32(sizeof(u32) * PMCRAID_MAX_CMD);
+
+ pmcraid_reinit_cmdblk(cmd);
+
+ /* Initialize ioarcb */
+ ioarcb->request_type = REQ_TYPE_IOACMD;
+ ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE);
+
+ /* initialize the hrrq number where IOA will respond to this command */
+ ioarcb->hrrq_id = index;
+ ioarcb->cdb[0] = PMCRAID_IDENTIFY_HRRQ;
+ ioarcb->cdb[1] = index;
+
+ /* IOA expects 64-bit pci address to be written in B.E format
+ * (i.e cdb[2]=MSByte..cdb[9]=LSB.
+ */
+ pmcraid_info("HRRQ_IDENTIFY with hrrq:ioarcb => %llx:%llx\n",
+ hrrq_addr, ioarcb->ioarcb_bus_addr);
+
+ memcpy(&(ioarcb->cdb[2]), &hrrq_addr, sizeof(hrrq_addr));
+ memcpy(&(ioarcb->cdb[10]), &hrrq_size, sizeof(hrrq_size));
+
+ /* Subsequent commands require HRRQ identification to be successful.
+ * Note that this gets called even during reset from SCSI mid-layer
+ * or tasklet
+ */
+ pmcraid_send_cmd(cmd, pmcraid_querycfg,
+ PMCRAID_INTERNAL_TIMEOUT,
+ pmcraid_timeout_handler);
+}
+
+static void pmcraid_process_ccn(struct pmcraid_cmd *cmd);
+static void pmcraid_process_ldn(struct pmcraid_cmd *cmd);
+
+/**
+ * pmcraid_send_hcam_cmd - send an initialized command block(HCAM) to IOA
+ *
+ * @cmd: initialized command block pointer
+ *
+ * Return Value
+ * none
+ */
+static void pmcraid_send_hcam_cmd(struct pmcraid_cmd *cmd)
+{
+ if (cmd->ioa_cb->ioarcb.cdb[1] == PMCRAID_HCAM_CODE_CONFIG_CHANGE)
+ atomic_set(&(cmd->drv_inst->ccn.ignore), 0);
+ else
+ atomic_set(&(cmd->drv_inst->ldn.ignore), 0);
+
+ pmcraid_send_cmd(cmd, cmd->cmd_done, 0, NULL);
+}
+
+/**
+ * pmcraid_init_hcam - send an initialized command block(HCAM) to IOA
+ *
+ * @pinstance: pointer to adapter instance structure
+ * @type: HCAM type
+ *
+ * Return Value
+ * pointer to initialized pmcraid_cmd structure or NULL
+ */
+static struct pmcraid_cmd *pmcraid_init_hcam
+(
+ struct pmcraid_instance *pinstance,
+ u8 type
+)
+{
+ struct pmcraid_cmd *cmd;
+ struct pmcraid_ioarcb *ioarcb;
+ struct pmcraid_ioadl_desc *ioadl;
+ struct pmcraid_hostrcb *hcam;
+ void (*cmd_done) (struct pmcraid_cmd *);
+ dma_addr_t dma;
+ int rcb_size;
+
+ cmd = pmcraid_get_free_cmd(pinstance);
+
+ if (!cmd) {
+ pmcraid_err("no free command blocks for hcam\n");
+ return cmd;
+ }
+
+ if (type == PMCRAID_HCAM_CODE_CONFIG_CHANGE) {
+ rcb_size = sizeof(struct pmcraid_hcam_ccn);