summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJianyun Li <jyli@marvell.com>2011-05-11 23:22:44 +0800
committerJames Bottomley <JBottomley@Parallels.com>2011-08-27 08:36:58 -0600
commitf0c568a478f03536602b1730b9473ee86d61d836 (patch)
tree497abf5e0c688227271bbb13301b8bf419767f7a
parentde37920b87a78e5e4a5e40e65548f1bf1cfb2f5c (diff)
[SCSI] mvumi: Add Marvell UMI driver
The Marvell Universal Message Interface (UMI) defines a messaging interface between host and Marvell products (Plato, for example). It considers situations of limited system resource and optimized system performance. UMI driver translates host request to message and sends message to FW via UMI, FW receives message and processes it, then sends response to UMI driver. FW generates an interrupt when it needs to send information or response to UMI driver Signed-off-by: Jianyun Li <jyli@marvell.com> Signed-off-by: James Bottomley <JBottomley@Parallels.com>
-rw-r--r--drivers/scsi/Kconfig9
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/mvumi.c2018
-rw-r--r--drivers/scsi/mvumi.h505
4 files changed, 2533 insertions, 0 deletions
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 4a79b9d86156..74c967e0fbb1 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -560,6 +560,15 @@ source "drivers/scsi/aic7xxx/Kconfig.aic79xx"
source "drivers/scsi/aic94xx/Kconfig"
source "drivers/scsi/mvsas/Kconfig"
+config SCSI_MVUMI
+ tristate "Marvell UMI driver"
+ depends on SCSI && PCI
+ help
+ Module for Marvell Universal Message Interface(UMI) driver
+
+ To compile this driver as a module, choose M here: the
+ module will be called mvumi.
+
config SCSI_DPT_I2O
tristate "Adaptec I2O RAID support "
depends on SCSI && PCI && VIRT_TO_BUS
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 3c08f5352b2d..2b469978b952 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -134,6 +134,7 @@ obj-$(CONFIG_SCSI_IBMVFC) += ibmvscsi/
obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o
obj-$(CONFIG_SCSI_STEX) += stex.o
obj-$(CONFIG_SCSI_MVSAS) += mvsas/
+obj-$(CONFIG_SCSI_MVUMI) += mvumi.o
obj-$(CONFIG_PS3_ROM) += ps3rom.o
obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgbi/
obj-$(CONFIG_SCSI_CXGB4_ISCSI) += libiscsi.o libiscsi_tcp.o cxgbi/
diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c
new file mode 100644
index 000000000000..88cf1db21a79
--- /dev/null
+++ b/drivers/scsi/mvumi.c
@@ -0,0 +1,2018 @@
+/*
+ * Marvell UMI driver
+ *
+ * Copyright 2011 Marvell. <jyli@marvell.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+*/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/blkdev.h>
+#include <linux/io.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_eh.h>
+#include <linux/uaccess.h>
+
+#include "mvumi.h"
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("jyli@marvell.com");
+MODULE_DESCRIPTION("Marvell UMI Driver");
+
+static DEFINE_PCI_DEVICE_TABLE(mvumi_pci_table) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_2, PCI_DEVICE_ID_MARVELL_MV9143) },
+ { 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, mvumi_pci_table);
+
+static void tag_init(struct mvumi_tag *st, unsigned short size)
+{
+ unsigned short i;
+ BUG_ON(size != st->size);
+ st->top = size;
+ for (i = 0; i < size; i++)
+ st->stack[i] = size - 1 - i;
+}
+
+static unsigned short tag_get_one(struct mvumi_hba *mhba, struct mvumi_tag *st)
+{
+ BUG_ON(st->top <= 0);
+ return st->stack[--st->top];
+}
+
+static void tag_release_one(struct mvumi_hba *mhba, struct mvumi_tag *st,
+ unsigned short tag)
+{
+ BUG_ON(st->top >= st->size);
+ st->stack[st->top++] = tag;
+}
+
+static bool tag_is_empty(struct mvumi_tag *st)
+{
+ if (st->top == 0)
+ return 1;
+ else
+ return 0;
+}
+
+static void mvumi_unmap_pci_addr(struct pci_dev *dev, void **addr_array)
+{
+ int i;
+
+ for (i = 0; i < MAX_BASE_ADDRESS; i++)
+ if ((pci_resource_flags(dev, i) & IORESOURCE_MEM) &&
+ addr_array[i])
+ pci_iounmap(dev, addr_array[i]);
+}
+
+static int mvumi_map_pci_addr(struct pci_dev *dev, void **addr_array)
+{
+ int i;
+
+ for (i = 0; i < MAX_BASE_ADDRESS; i++) {
+ if (pci_resource_flags(dev, i) & IORESOURCE_MEM) {
+ addr_array[i] = pci_iomap(dev, i, 0);
+ if (!addr_array[i]) {
+ dev_err(&dev->dev, "failed to map Bar[%d]\n",
+ i);
+ mvumi_unmap_pci_addr(dev, addr_array);
+ return -ENOMEM;
+ }
+ } else
+ addr_array[i] = NULL;
+
+ dev_dbg(&dev->dev, "Bar %d : %p.\n", i, addr_array[i]);
+ }
+
+ return 0;
+}
+
+static struct mvumi_res *mvumi_alloc_mem_resource(struct mvumi_hba *mhba,
+ enum resource_type type, unsigned int size)
+{
+ struct mvumi_res *res = kzalloc(sizeof(*res), GFP_KERNEL);
+
+ if (!res) {
+ dev_err(&mhba->pdev->dev,
+ "Failed to allocate memory for resouce manager.\n");
+ return NULL;
+ }
+
+ switch (type) {
+ case RESOURCE_CACHED_MEMORY:
+ res->virt_addr = kzalloc(size, GFP_KERNEL);
+ if (!res->virt_addr) {
+ dev_err(&mhba->pdev->dev,
+ "unable to allocate memory,size = %d.\n", size);
+ kfree(res);
+ return NULL;
+ }
+ break;
+
+ case RESOURCE_UNCACHED_MEMORY:
+ size = round_up(size, 8);
+ res->virt_addr = pci_alloc_consistent(mhba->pdev, size,
+ &res->bus_addr);
+ if (!res->virt_addr) {
+ dev_err(&mhba->pdev->dev,
+ "unable to allocate consistent mem,"
+ "size = %d.\n", size);
+ kfree(res);
+ return NULL;
+ }
+ memset(res->virt_addr, 0, size);
+ break;
+
+ default:
+ dev_err(&mhba->pdev->dev, "unknown resource type %d.\n", type);
+ kfree(res);
+ return NULL;
+ }
+
+ res->type = type;
+ res->size = size;
+ INIT_LIST_HEAD(&res->entry);
+ list_add_tail(&res->entry, &mhba->res_list);
+
+ return res;
+}
+
+static void mvumi_release_mem_resource(struct mvumi_hba *mhba)
+{
+ struct mvumi_res *res, *tmp;
+
+ list_for_each_entry_safe(res, tmp, &mhba->res_list, entry) {
+ switch (res->type) {
+ case RESOURCE_UNCACHED_MEMORY:
+ pci_free_consistent(mhba->pdev, res->size,
+ res->virt_addr, res->bus_addr);
+ break;
+ case RESOURCE_CACHED_MEMORY:
+ kfree(res->virt_addr);
+ break;
+ default:
+ dev_err(&mhba->pdev->dev,
+ "unknown resource type %d\n", res->type);
+ break;
+ }
+ list_del(&res->entry);
+ kfree(res);
+ }
+ mhba->fw_flag &= ~MVUMI_FW_ALLOC;
+}
+
+/**
+ * mvumi_make_sgl - Prepares SGL
+ * @mhba: Adapter soft state
+ * @scmd: SCSI command from the mid-layer
+ * @sgl_p: SGL to be filled in
+ * @sg_count return the number of SG elements
+ *
+ * If successful, this function returns 0. otherwise, it returns -1.
+ */
+static int mvumi_make_sgl(struct mvumi_hba *mhba, struct scsi_cmnd *scmd,
+ void *sgl_p, unsigned char *sg_count)
+{
+ struct scatterlist *sg;
+ struct mvumi_sgl *m_sg = (struct mvumi_sgl *) sgl_p;
+ unsigned int i;
+ unsigned int sgnum = scsi_sg_count(scmd);
+ dma_addr_t busaddr;
+
+ if (sgnum) {
+ sg = scsi_sglist(scmd);
+ *sg_count = pci_map_sg(mhba->pdev, sg, sgnum,
+ (int) scmd->sc_data_direction);
+ if (*sg_count > mhba->max_sge) {
+ dev_err(&mhba->pdev->dev, "sg count[0x%x] is bigger "
+ "than max sg[0x%x].\n",
+ *sg_count, mhba->max_sge);
+ return -1;
+ }
+ for (i = 0; i < *sg_count; i++) {
+ busaddr = sg_dma_address(&sg[i]);
+ m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(busaddr));
+ m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(busaddr));
+ m_sg->flags = 0;
+ m_sg->size = cpu_to_le32(sg_dma_len(&sg[i]));
+ if ((i + 1) == *sg_count)
+ m_sg->flags |= SGD_EOT;
+
+ m_sg++;
+ }
+ } else {
+ scmd->SCp.dma_handle = scsi_bufflen(scmd) ?
+ pci_map_single(mhba->pdev, scsi_sglist(scmd),
+ scsi_bufflen(scmd),
+ (int) scmd->sc_data_direction)
+ : 0;
+ busaddr = scmd->SCp.dma_handle;
+ m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(busaddr));
+ m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(busaddr));
+ m_sg->flags = SGD_EOT;
+ m_sg->size = cpu_to_le32(scsi_bufflen(scmd));
+ *sg_count = 1;
+ }
+
+ return 0;
+}
+
+static int mvumi_internal_cmd_sgl(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
+ unsigned int size)
+{
+ struct mvumi_sgl *m_sg;
+ void *virt_addr;
+ dma_addr_t phy_addr;
+
+ if (size == 0)
+ return 0;
+
+ virt_addr = pci_alloc_consistent(mhba->pdev, size, &phy_addr);
+ if (!virt_addr)
+ return -1;
+
+ memset(virt_addr, 0, size);
+
+ m_sg = (struct mvumi_sgl *) &cmd->frame->payload[0];
+ cmd->frame->sg_counts = 1;
+ cmd->data_buf = virt_addr;
+
+ m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(phy_addr));
+ m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(phy_addr));
+ m_sg->flags = SGD_EOT;
+ m_sg->size = cpu_to_le32(size);
+
+ return 0;
+}
+
+static struct mvumi_cmd *mvumi_create_internal_cmd(struct mvumi_hba *mhba,
+ unsigned int buf_size)
+{
+ struct mvumi_cmd *cmd;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd) {
+ dev_err(&mhba->pdev->dev, "failed to create a internal cmd\n");
+ return NULL;
+ }
+ INIT_LIST_HEAD(&cmd->queue_pointer);
+
+ cmd->frame = kzalloc(mhba->ib_max_size, GFP_KERNEL);
+ if (!cmd->frame) {
+ dev_err(&mhba->pdev->dev, "failed to allocate memory for FW"
+ " frame,size = %d.\n", mhba->ib_max_size);
+ kfree(cmd);
+ return NULL;
+ }
+
+ if (buf_size) {
+ if (mvumi_internal_cmd_sgl(mhba, cmd, buf_size)) {
+ dev_err(&mhba->pdev->dev, "failed to allocate memory"
+ " for internal frame\n");
+ kfree(cmd->frame);
+ kfree(cmd);
+ return NULL;
+ }
+ } else
+ cmd->frame->sg_counts = 0;
+
+ return cmd;
+}
+
+static void mvumi_delete_internal_cmd(struct mvumi_hba *mhba,
+ struct mvumi_cmd *cmd)
+{
+ struct mvumi_sgl *m_sg;
+ unsigned int size;
+ dma_addr_t phy_addr;
+
+ if (cmd && cmd->frame) {
+ if (cmd->frame->sg_counts) {
+ m_sg = (struct mvumi_sgl *) &cmd->frame->payload[0];
+ size = m_sg->size;
+
+ phy_addr = (dma_addr_t) m_sg->baseaddr_l |
+ (dma_addr_t) ((m_sg->baseaddr_h << 16) << 16);
+
+ pci_free_consistent(mhba->pdev, size, cmd->data_buf,
+ phy_addr);
+ }
+ kfree(cmd->frame);
+ kfree(cmd);
+ }
+}
+
+/**
+ * mvumi_get_cmd - Get a command from the free pool
+ * @mhba: Adapter soft state
+ *
+ * Returns a free command from the pool
+ */
+static struct mvumi_cmd *mvumi_get_cmd(struct mvumi_hba *mhba)
+{
+ struct mvumi_cmd *cmd = NULL;
+
+ if (likely(!list_empty(&mhba->cmd_pool))) {
+ cmd = list_entry((&mhba->cmd_pool)->next,
+ struct mvumi_cmd, queue_pointer);
+ list_del_init(&cmd->queue_pointer);
+ } else
+ dev_warn(&mhba->pdev->dev, "command pool is empty!\n");
+
+ return cmd;
+}
+
+/**
+ * mvumi_return_cmd - Return a cmd to free command pool
+ * @mhba: Adapter soft state
+ * @cmd: Command packet to be returned to free command pool
+ */
+static inline void mvumi_return_cmd(struct mvumi_hba *mhba,
+ struct mvumi_cmd *cmd)
+{
+ cmd->scmd = NULL;
+ list_add_tail(&cmd->queue_pointer, &mhba->cmd_pool);
+}
+
+/**
+ * mvumi_free_cmds - Free all the cmds in the free cmd pool
+ * @mhba: Adapter soft state
+ */
+static void mvumi_free_cmds(struct mvumi_hba *mhba)
+{
+ struct mvumi_cmd *cmd;
+
+ while (!list_empty(&mhba->cmd_pool)) {
+ cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd,
+ queue_pointer);
+ list_del(&cmd->queue_pointer);
+ kfree(cmd->frame);
+ kfree(cmd);
+ }
+}
+
+/**
+ * mvumi_alloc_cmds - Allocates the command packets
+ * @mhba: Adapter soft state
+ *
+ */
+static int mvumi_alloc_cmds(struct mvumi_hba *mhba)
+{
+ int i;
+ struct mvumi_cmd *cmd;
+
+ for (i = 0; i < mhba->max_io; i++) {
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ goto err_exit;
+
+ INIT_LIST_HEAD(&cmd->queue_pointer);
+ list_add_tail(&cmd->queue_pointer, &mhba->cmd_pool);
+ cmd->frame = kzalloc(mhba->ib_max_size, GFP_KERNEL);
+ if (!cmd->frame)
+ goto err_exit;
+ }
+ return 0;
+
+err_exit:
+ dev_err(&mhba->pdev->dev,
+ "failed to allocate memory for cmd[0x%x].\n", i);
+ while (!list_empty(&mhba->cmd_pool)) {
+ cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd,
+ queue_pointer);
+ list_del(&cmd->queue_pointer);
+ kfree(cmd->frame);
+ kfree(cmd);
+ }
+ return -ENOMEM;
+}
+
+static int mvumi_get_ib_list_entry(struct mvumi_hba *mhba, void **ib_entry)
+{
+ unsigned int ib_rp_reg, cur_ib_entry;
+
+ if (atomic_read(&mhba->fw_outstanding) >= mhba->max_io) {
+ dev_warn(&mhba->pdev->dev, "firmware io overflow.\n");
+ return -1;
+ }
+ ib_rp_reg = ioread32(mhba->mmio + CLA_INB_READ_POINTER);
+
+ if (unlikely(((ib_rp_reg & CL_SLOT_NUM_MASK) ==
+ (mhba->ib_cur_slot & CL_SLOT_NUM_MASK)) &&
+ ((ib_rp_reg & CL_POINTER_TOGGLE) !=
+ (mhba->ib_cur_slot & CL_POINTER_TOGGLE)))) {
+ dev_warn(&mhba->pdev->dev, "no free slot to use.\n");
+ return -1;
+ }
+
+ cur_ib_entry = mhba->ib_cur_slot & CL_SLOT_NUM_MASK;
+ cur_ib_entry++;
+ if (cur_ib_entry >= mhba->list_num_io) {
+ cur_ib_entry -= mhba->list_num_io;
+ mhba->ib_cur_slot ^= CL_POINTER_TOGGLE;
+ }
+ mhba->ib_cur_slot &= ~CL_SLOT_NUM_MASK;
+ mhba->ib_cur_slot |= (cur_ib_entry & CL_SLOT_NUM_MASK);
+ *ib_entry = mhba->ib_list + cur_ib_entry * mhba->ib_max_size;
+ atomic_inc(&mhba->fw_outstanding);
+
+ return 0;
+}
+
+static void mvumi_send_ib_list_entry(struct mvumi_hba *mhba)
+{
+ iowrite32(0xfff, mhba->ib_shadow);
+ iowrite32(mhba->ib_cur_slot, mhba->mmio + CLA_INB_WRITE_POINTER);
+}
+
+static char mvumi_check_ob_frame(struct mvumi_hba *mhba,
+ unsigned int cur_obf, struct mvumi_rsp_frame *p_outb_frame)
+{
+ unsigned short tag, request_id;
+
+ udelay(1);
+ p_outb_frame = mhba->ob_list + cur_obf * mhba->ob_max_size;
+ request_id = p_outb_frame->request_id;
+ tag = p_outb_frame->tag;
+ if (tag > mhba->tag_pool.size) {
+ dev_err(&mhba->pdev->dev, "ob frame data error\n");
+ return -1;
+ }
+ if (mhba->tag_cmd[tag] == NULL) {
+ dev_err(&mhba->pdev->dev, "tag[0x%x] with NO command\n", tag);
+ return -1;
+ } else if (mhba->tag_cmd[tag]->request_id != request_id &&
+ mhba->request_id_enabled) {
+ dev_err(&mhba->pdev->dev, "request ID from FW:0x%x,"
+ "cmd request ID:0x%x\n", request_id,
+ mhba->tag_cmd[tag]->request_id);
+ return -1;
+ }
+
+ return 0;
+}
+
+static void mvumi_receive_ob_list_entry(struct mvumi_hba *mhba)
+{
+ unsigned int ob_write_reg, ob_write_shadow_reg;
+ unsigned int cur_obf, assign_obf_end, i;
+ struct mvumi_ob_data *ob_data;
+ struct mvumi_rsp_frame *p_outb_frame;
+
+ do {
+ ob_write_reg = ioread32(mhba->mmio + CLA_OUTB_COPY_POINTER);
+ ob_write_shadow_reg = ioread32(mhba->ob_shadow);
+ } while ((ob_write_reg & CL_SLOT_NUM_MASK) != ob_write_shadow_reg);
+
+ cur_obf = mhba->ob_cur_slot & CL_SLOT_NUM_MASK;
+ assign_obf_end = ob_write_reg & CL_SLOT_NUM_MASK;
+
+ if ((ob_write_reg & CL_POINTER_TOGGLE) !=
+ (mhba->ob_cur_slot & CL_POINTER_TOGGLE)) {
+ assign_obf_end += mhba->list_num_io;
+ }
+
+ for (i = (assign_obf_end - cur_obf); i != 0; i--) {
+ cur_obf++;
+ if (cur_obf >= mhba->list_num_io) {
+ cur_obf -= mhba->list_num_io;
+ mhba->ob_cur_slot ^= CL_POINTER_TOGGLE;
+ }
+
+ p_outb_frame = mhba->ob_list + cur_obf * mhba->ob_max_size;
+
+ /* Copy pointer may point to entry in outbound list
+ * before entry has valid data
+ */
+ if (unlikely(p_outb_frame->tag > mhba->tag_pool.size ||
+ mhba->tag_cmd[p_outb_frame->tag] == NULL ||
+ p_outb_frame->request_id !=
+ mhba->tag_cmd[p_outb_frame->tag]->request_id))
+ if (mvumi_check_ob_frame(mhba, cur_obf, p_outb_frame))
+ continue;
+
+ if (!list_empty(&mhba->ob_data_list)) {
+ ob_data = (struct mvumi_ob_data *)
+ list_first_entry(&mhba->ob_data_list,
+ struct mvumi_ob_data, list);
+ list_del_init(&ob_data->list);
+ } else {
+ ob_data = NULL;
+ if (cur_obf == 0) {
+ cur_obf = mhba->list_num_io - 1;
+ mhba->ob_cur_slot ^= CL_POINTER_TOGGLE;
+ } else
+ cur_obf -= 1;
+ break;
+ }
+
+ memcpy(ob_data->data, p_outb_frame, mhba->ob_max_size);
+ p_outb_frame->tag = 0xff;
+
+ list_add_tail(&ob_data->list, &mhba->free_ob_list);
+ }
+ mhba->ob_cur_slot &= ~CL_SLOT_NUM_MASK;
+ mhba->ob_cur_slot |= (cur_obf & CL_SLOT_NUM_MASK);
+ iowrite32(mhba->ob_cur_slot, mhba->mmio + CLA_OUTB_READ_POINTER);
+}
+
+static void mvumi_reset(void *regs)
+{
+ iowrite32(0, regs + CPU_ENPOINTA_MASK_REG);
+ if (ioread32(regs + CPU_ARM_TO_PCIEA_MSG1) != HANDSHAKE_DONESTATE)
+ return;
+
+ iowrite32(DRBL_SOFT_RESET, regs + CPU_PCIEA_TO_ARM_DRBL_REG);
+}
+
+static unsigned char mvumi_start(struct mvumi_hba *mhba);
+
+static int mvumi_wait_for_outstanding(struct mvumi_hba *mhba)
+{
+ mhba->fw_state = FW_STATE_ABORT;
+ mvumi_reset(mhba->mmio);
+
+ if (mvumi_start(mhba))
+ return FAILED;
+ else
+ return SUCCESS;
+}
+
+static int mvumi_host_reset(struct scsi_cmnd *scmd)
+{
+ struct mvumi_hba *mhba;
+
+ mhba = (struct mvumi_hba *) scmd->device->host->hostdata;
+
+ scmd_printk(KERN_NOTICE, scmd, "RESET -%ld cmd=%x retries=%x\n",
+ scmd->serial_number, scmd->cmnd[0], scmd->retries);
+
+ return mvumi_wait_for_outstanding(mhba);
+}
+
+static int mvumi_issue_blocked_cmd(struct mvumi_hba *mhba,
+ struct mvumi_cmd *cmd)
+{
+ unsigned long flags;
+
+ cmd->cmd_status = REQ_STATUS_PENDING;
+
+ if (atomic_read(&cmd->sync_cmd)) {
+ dev_err(&mhba->pdev->dev,
+ "last blocked cmd not finished, sync_cmd = %d\n",
+ atomic_read(&cmd->sync_cmd));
+ BUG_ON(1);
+ return -1;
+ }
+ atomic_inc(&cmd->sync_cmd);
+ spin_lock_irqsave(mhba->shost->host_lock, flags);
+ mhba->instancet->fire_cmd(mhba, cmd);
+ spin_unlock_irqrestore(mhba->shost->host_lock, flags);
+
+ wait_event_timeout(mhba->int_cmd_wait_q,
+ (cmd->cmd_status != REQ_STATUS_PENDING),
+ MVUMI_INTERNAL_CMD_WAIT_TIME * HZ);
+
+ /* command timeout */
+ if (atomic_read(&cmd->sync_cmd)) {
+ spin_lock_irqsave(mhba->shost->host_lock, flags);
+ atomic_dec(&cmd->sync_cmd);
+ if (mhba->tag_cmd[cmd->frame->tag]) {
+ mhba->tag_cmd[cmd->frame->tag] = 0;
+ dev_warn(&mhba->pdev->dev, "TIMEOUT:release tag [%d]\n",
+ cmd->frame->tag);
+ tag_release_one(mhba, &mhba->tag_pool, cmd->frame->tag);
+ }
+ if (!list_empty(&cmd->queue_pointer)) {
+ dev_warn(&mhba->pdev->dev,
+ "TIMEOUT:A internal command doesn't send!\n");
+ list_del_init(&cmd->queue_pointer);
+ } else
+ atomic_dec(&mhba->fw_outstanding);
+
+ spin_unlock_irqrestore(mhba->shost->host_lock, flags);
+ }
+ return 0;
+}
+
+static void mvumi_release_fw(struct mvumi_hba *mhba)
+{
+ mvumi_free_cmds(mhba);
+ mvumi_release_mem_resource(mhba);
+ mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr);
+ kfree(mhba->handshake_page);
+ pci_release_regions(mhba->pdev);
+}
+
+static unsigned char mvumi_flush_cache(struct mvumi_hba *mhba)
+{
+ struct mvumi_cmd *cmd;
+ struct mvumi_msg_frame *frame;
+ unsigned char device_id, retry = 0;
+ unsigned char bitcount = sizeof(unsigned char) * 8;
+
+ for (device_id = 0; device_id < mhba->max_target_id; device_id++) {
+ if (!(mhba->target_map[device_id / bitcount] &
+ (1 << (device_id % bitcount))))
+ continue;
+get_cmd: cmd = mvumi_create_internal_cmd(mhba, 0);
+ if (!cmd) {
+ if (retry++ >= 5) {
+ dev_err(&mhba->pdev->dev, "failed to get memory"
+ " for internal flush cache cmd for "
+ "device %d", device_id);
+ retry = 0;
+ continue;
+ } else
+ goto get_cmd;
+ }
+ cmd->scmd = NULL;
+ cmd->cmd_status = REQ_STATUS_PENDING;
+ atomic_set(&cmd->sync_cmd, 0);
+ frame = cmd->frame;
+ frame->req_function = CL_FUN_SCSI_CMD;
+ frame->device_id = device_id;
+ frame->cmd_flag = CMD_FLAG_NON_DATA;
+ frame->data_transfer_length = 0;
+ frame->cdb_length = MAX_COMMAND_SIZE;
+ memset(frame->cdb, 0, MAX_COMMAND_SIZE);
+ frame->cdb[0] = SCSI_CMD_MARVELL_SPECIFIC;
+ frame->cdb[2] = CDB_CORE_SHUTDOWN;
+
+ mvumi_issue_blocked_cmd(mhba, cmd);
+ if (cmd->cmd_status != SAM_STAT_GOOD) {
+ dev_err(&mhba->pdev->dev,
+ "device %d flush cache failed, status=0x%x.\n",
+ device_id, cmd->cmd_status);
+ }
+
+ mvumi_delete_internal_cmd(mhba, cmd);
+ }
+ return 0;
+}
+
+static unsigned char
+mvumi_calculate_checksum(struct mvumi_hs_header *p_header,
+ unsigned short len)
+{
+ unsigned char *ptr;
+ unsigned char ret = 0, i;
+
+ ptr = (unsigned char *) p_header->frame_content;
+ for (i = 0; i < len; i++) {
+ ret ^= *ptr;
+ ptr++;
+ }
+
+ return ret;
+}
+
+void mvumi_hs_build_page(struct mvumi_hba *mhba,
+ struct mvumi_hs_header *hs_header)
+{
+ struct mvumi_hs_page2 *hs_page2;
+ struct mvumi_hs_page4 *hs_page4;
+ struct mvumi_hs_page3 *hs_page3;
+ struct timeval time;
+ unsigned int local_time;
+
+ switch (hs_header->page_code) {
+ case HS_PAGE_HOST_INFO:
+ hs_page2 = (struct mvumi_hs_page2 *) hs_header;
+ hs_header->frame_length = sizeof(*hs_page2) - 4;
+ memset(hs_header->frame_content, 0, hs_header->frame_length);
+ hs_page2->host_type = 3; /* 3 mean linux*/
+ hs_page2->host_ver.ver_major = VER_MAJOR;
+ hs_page2->host_ver.ver_minor = VER_MINOR;
+ hs_page2->host_ver.ver_oem = VER_OEM;
+ hs_page2->host_ver.ver_build = VER_BUILD;
+ hs_page2->system_io_bus = 0;
+ hs_page2->slot_number = 0;
+ hs_page2->intr_level = 0;
+ hs_page2->intr_vector = 0;
+ do_gettimeofday(&time);
+ local_time = (unsigned int) (time.tv_sec -
+ (sys_tz.tz_minuteswest * 60));
+ hs_page2->seconds_since1970 = local_time;
+ hs_header->checksum = mvumi_calculate_checksum(hs_header,
+ hs_header->frame_length);
+ break;
+
+ case HS_PAGE_FIRM_CTL:
+ hs_page3 = (struct mvumi_hs_page3 *) hs_header;
+ hs_header->frame_length = sizeof(*hs_page3) - 4;
+ memset(hs_header->frame_content, 0, hs_header->frame_length);
+ hs_header->checksum = mvumi_calculate_checksum(hs_header,
+ hs_header->frame_length);
+ break;
+
+ case HS_PAGE_CL_INFO:
+ hs_page4 = (struct mvumi_hs_page4 *) hs_header;
+ hs_header->frame_length = sizeof(*hs_page4) - 4;
+ memset(hs_header->frame_content, 0, hs_header->frame_length);
+ hs_page4->ib_baseaddr_l = lower_32_bits(mhba->ib_list_phys);
+ hs_page4->ib_baseaddr_h = upper_32_bits(mhba->ib_list_phys);
+
+ hs_page4->ob_baseaddr_l = lower_32_bits(mhba->ob_list_phys);
+ hs_page4->ob_baseaddr_h = upper_32_bits(mhba->ob_list_phys);
+ hs_page4->ib_entry_size = mhba->ib_max_size_setting;
+ hs_page4->ob_entry_size = mhba->ob_max_size_setting;
+ hs_page4->ob_depth = mhba->list_num_io;
+ hs_page4->ib_depth = mhba->list_num_io;
+ hs_header->checksum = mvumi_calculate_checksum(hs_header,
+ hs_header->frame_length);
+ break;
+
+ default:
+ dev_err(&mhba->pdev->dev, "cannot build page, code[0x%x]\n",
+ hs_header->page_code);
+ break;
+ }
+}
+
+/**
+ * mvumi_init_data - Initialize requested date for FW
+ * @mhba: Adapter soft state
+ */
+static int mvumi_init_data(struct mvumi_hba *mhba)
+{
+ struct mvumi_ob_data *ob_pool;
+ struct mvumi_res *res_mgnt;
+ unsigned int tmp_size, offset, i;
+ void *virmem, *v;
+ dma_addr_t p;
+
+ if (mhba->fw_flag & MVUMI_FW_ALLOC)
+ return 0;
+
+ tmp_size = mhba->ib_max_size * mhba->max_io;
+ tmp_size += 128 + mhba->ob_max_size * mhba->max_io;
+ tmp_size += 8 + sizeof(u32) + 16;
+
+ res_mgnt = mvumi_alloc_mem_resource(mhba,
+ RESOURCE_UNCACHED_MEMORY, tmp_size);
+ if (!res_mgnt) {
+ dev_err(&mhba->pdev->dev,
+ "failed to allocate memory for inbound list\n");
+ goto fail_alloc_dma_buf;
+ }
+
+ p = res_mgnt->bus_addr;
+ v = res_mgnt->virt_addr;
+ /* ib_list */
+ offset = round_up(p, 128) - p;
+ p += offset;
+ v += offset;
+ mhba->ib_list = v;
+ mhba->ib_list_phys = p;
+ v += mhba->ib_max_size * mhba->max_io;
+ p += mhba->ib_max_size * mhba->max_io;
+ /* ib shadow */
+ offset = round_up(p, 8) - p;
+ p += offset;
+ v += offset;
+ mhba->ib_shadow = v;
+ mhba->ib_shadow_phys = p;
+ p += sizeof(u32);
+ v += sizeof(u32);
+ /* ob shadow */
+ offset = round_up(p, 8) - p;
+ p += offset;
+ v += offset;
+ mhba->ob_shadow = v;
+ mhba->ob_shadow_phys = p;
+ p += 8;
+ v += 8;
+
+ /* ob list */
+ offset = round_up(p, 128) - p;
+ p += offset;
+ v += offset;
+
+ mhba->ob_list = v;
+ mhba->ob_list_phys = p;
+
+ /* ob data pool */
+ tmp_size = mhba->max_io * (mhba->ob_max_size + sizeof(*ob_pool));
+ tmp_size = round_up(tmp_size, 8);
+
+ res_mgnt = mvumi_alloc_mem_resource(mhba,
+ RESOURCE_CACHED_MEMORY, tmp_size);
+ if (!res_mgnt) {
+ dev_err(&mhba->pdev->dev,
+ "failed to allocate memory for outbound data buffer\n");
+ goto fail_alloc_dma_buf;
+ }
+ virmem = res_mgnt->virt_addr;
+
+ for (i = mhba->max_io; i != 0; i--) {
+ ob_pool = (struct mvumi_ob_data *) virmem;
+ list_add_tail(&ob_pool->list, &mhba->ob_data_list);
+ virmem += mhba->ob_max_size + sizeof(*ob_pool);
+ }
+
+ tmp_size = sizeof(unsigned short) * mhba->max_io +
+ sizeof(struct mvumi_cmd *) * mhba->max_io;
+ tmp_size += round_up(mhba->max_target_id, sizeof(unsigned char) * 8) /
+ (sizeof(unsigned char) * 8);
+
+ res_mgnt = mvumi_alloc_mem_resource(mhba,
+ RESOURCE_CACHED_MEMORY, tmp_size);
+ if (!res_mgnt) {
+ dev_err(&mhba->pdev->dev,
+ "failed to allocate memory for tag and target map\n");
+ goto fail_alloc_dma_buf;
+ }
+
+ virmem = res_mgnt->virt_addr;
+ mhba->tag_pool.stack = virmem;
+ mhba->tag_pool.size = mhba->max_io;
+ tag_init(&mhba->tag_pool, mhba->max_io);
+ virmem += sizeof(unsigned short) * mhba->max_io;
+
+ mhba->tag_cmd = virmem;
+ virmem += sizeof(struct mvumi_cmd *) * mhba->max_io;
+
+ mhba->target_map = virmem;
+
+ mhba->fw_flag |= MVUMI_FW_ALLOC;
+ return 0;
+
+fail_alloc_dma_buf:
+ mvumi_release_mem_resource(mhba);
+ return -1;
+}
+
+static int mvumi_hs_process_page(struct mvumi_hba *mhba,
+ struct mvumi_hs_header *hs_header)
+{
+ struct mvumi_hs_page1 *hs_page1;
+ unsigned char page_checksum;
+
+ page_checksum = mvumi_calculate_checksum(hs_header,
+ hs_header->frame_length);
+ if (page_checksum != hs_header->checksum) {
+ dev_err(&mhba->pdev->dev, "checksum error\n");
+ return -1;
+ }
+
+ switch (hs_header->page_code) {
+ case HS_PAGE_FIRM_CAP:
+ hs_page1 = (struct mvumi_hs_page1 *) hs_header;
+
+ mhba->max_io = hs_page1->max_io_support;
+ mhba->list_num_io = hs_page1->cl_inout_list_depth;
+ mhba->max_transfer_size = hs_page1->max_transfer_size;
+ mhba->max_target_id = hs_page1->max_devices_support;
+ mhba->hba_capability = hs_page1->capability;
+ mhba->ib_max_size_setting = hs_page1->cl_in_max_entry_size;
+ mhba->ib_max_size = (1 << hs_page1->cl_in_max_entry_size) << 2;
+
+ mhba->ob_max_size_setting = hs_page1->cl_out_max_entry_size;
+ mhba->ob_max_size = (1 << hs_page1->cl_out_max_entry_size) << 2;
+
+ dev_dbg(&mhba->pdev->dev, "FW version:%d\n",
+ hs_page1->fw_ver.ver_build);
+
+ break;
+ default:
+ dev_err(&mhba->pdev->dev, "handshake: page code error\n");
+ return -1;
+ }
+ return 0;
+}
+
+/**
+ * mvumi_handshake - Move the FW to READY state
+ * @mhba: Adapter soft state
+ *
+ * During the initialization, FW passes can potentially be in any one of
+ * several possible states. If the FW in operational, waiting-for-handshake
+ * states, driver must take steps to bring it to ready state. Otherwise, it
+ * has to wait for the ready state.
+ */
+static int mvumi_handshake(struct mvumi_hba *mhba)
+{
+ unsigned int hs_state, tmp, hs_fun;
+ struct mvumi_hs_header *hs_header;
+ void *regs = mhba->mmio;
+
+ if (mhba->fw_state == FW_STATE_STARTING)
+ hs_state = HS_S_START;
+ else {
+ tmp = ioread32(regs + CPU_ARM_TO_PCIEA_MSG0);
+ hs_state = HS_GET_STATE(tmp);
+ dev_dbg(&mhba->pdev->dev, "handshake state[0x%x].\n", hs_state);
+ if (HS_GET_STATUS(tmp) != HS_STATUS_OK) {
+ mhba->fw_state = FW_STATE_STARTING;
+ return -1;
+ }
+ }
+
+ hs_fun = 0;
+ switch (hs_state) {
+ case HS_S_START:
+ mhba->fw_state = FW_STATE_HANDSHAKING;
+ HS_SET_STATUS(hs_fun, HS_STATUS_OK);
+ HS_SET_STATE(hs_fun, HS_S_RESET);
+ iowrite32(HANDSHAKE_SIGNATURE, regs + CPU_PCIEA_TO_ARM_MSG1);
+ iowrite32(hs_fun, regs + CPU_PCIEA_TO_ARM_MSG0);
+ iowrite32(DRBL_HANDSHAKE, regs + CPU_PCIEA_TO_ARM_DRBL_REG);
+ break;
+
+ case HS_S_RESET:
+ iowrite32(lower_32_bits(mhba->handshake_page_phys),
+ regs + CPU_PCIEA_TO_ARM_MSG1);
+ iowrite32(upper_32_bits(mhba->handshake_page_phys),
+ regs + CPU_ARM_TO_PCIEA_MSG1);
+ HS_SET_STATUS(hs_fun, HS_STATUS_OK);
+ HS_SET_STATE(hs_fun, HS_S_PAGE_ADDR);
+ iowrite32(hs_fun, regs + CPU_PCIEA_TO_ARM_MSG0);
+ iowrite32(DRBL_HANDSHAKE, regs + CPU_PCIEA_TO_ARM_DRBL_REG);
+
+ break;
+
+ case HS_S_PAGE_ADDR:
+ case HS_S_QUERY_PAGE:
+ case HS_S_SEND_PAGE:
+ hs_header = (struct mvumi_hs_header *) mhba->handshake_page;
+ if (hs_header->page_code == HS_PAGE_FIRM_CAP) {
+ mhba->hba_total_pages =
+ ((struct mvumi_hs_page1 *) hs_header)->total_pages;
+
+ if (mhba->hba_total_pages == 0)
+ mhba->hba_total_pages = HS_PAGE_TOTAL-1;
+ }
+
+ if (hs_state == HS_S_QUERY_PAGE) {
+ if (mvumi_hs_process_page(mhba, hs_header)) {
+ HS_SET_STATE(hs_fun, HS_S_ABORT);
+ return -1;
+ }
+ if (mvumi_init_data(mhba)) {
+ HS_SET_STATE(hs_fun, HS_S_ABORT);
+ return -1;
+ }
+ } else if (hs_state == HS_S_PAGE_ADDR) {
+ hs_header->page_code = 0;
+ mhba->hba_total_pages = HS_PAGE_TOTAL-1;
+ }
+
+ if ((hs_header->page_code + 1) <= mhba->hba_total_pages) {
+ hs_header->page_code++;
+ if (hs_header->page_code != HS_PAGE_FIRM_CAP) {
+ mvumi_hs_build_page(mhba, hs_header);
+ HS_SET_STATE(hs_fun, HS_S_SEND_PAGE);
+ } else
+ HS_SET_STATE(hs_fun, HS_S_QUERY_PAGE);
+ } else
+ HS_SET_STATE(hs_fun, HS_S_END);
+
+ HS_SET_STATUS(hs_fun, HS_STATUS_OK);
+ iowrite32(hs_fun, regs + CPU_PCIEA_TO_ARM_MSG0);
+ iowrite32(DRBL_HANDSHAKE, regs + CPU_PCIEA_TO_ARM_DRBL_REG);
+ break;
+
+ case HS_S_END:
+ /* Set communication list ISR */
+ tmp = ioread32(regs + CPU_ENPOINTA_MASK_REG);
+ tmp |= INT_MAP_COMAOUT | INT_MAP_COMAERR;
+ iowrite32(tmp, regs + CPU_ENPOINTA_MASK_REG);
+ iowrite32(mhba->list_num_io, mhba->ib_shadow);
+ /* Set InBound List Avaliable count shadow */
+ iowrite32(lower_32_bits(mhba->ib_shadow_phys),
+ regs + CLA_INB_AVAL_COUNT_BASEL);
+ iowrite32(upper_32_bits(mhba->ib_shadow_phys),
+ regs + CLA_INB_AVAL_COUNT_BASEH);
+
+ /* Set OutBound List Avaliable count shadow */
+ iowrite32((mhba->list_num_io-1) | CL_POINTER_TOGGLE,
+ mhba->ob_shadow);
+ iowrite32(lower_32_bits(mhba->ob_shadow_phys), regs + 0x5B0);
+ iowrite32(upper_32_bits(mhba->ob_shadow_phys), regs + 0x5B4);
+
+ mhba->ib_cur_slot = (mhba->list_num_io - 1) | CL_POINTER_TOGGLE;
+ mhba->ob_cur_slot = (mhba->list_num_io - 1) | CL_POINTER_TOGGLE;
+ mhba->fw_state = FW_STATE_STARTED;
+
+ break;
+ default:
+ dev_err(&mhba->pdev->dev, "unknown handshake state [0x%x].\n",
+ hs_state);
+ return -1;
+ }
+ return 0;
+}
+
+static unsigned char mvumi_handshake_event(struct mvumi_hba *mhba)
+{
+ unsigned int isr_status;
+ unsigned long before;
+
+ before = jiffies;
+ mvumi_handshake(mhba);
+ do {
+ isr_status = mhba->instancet->read_fw_status_reg(mhba->mmio);
+
+ if (mhba->fw_state == FW_STATE_STARTED)
+ return 0;
+ if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) {
+ dev_err(&mhba->pdev->dev,
+ "no handshake response at state 0x%x.\n",
+ mhba->fw_state);
+ dev_err(&mhba->pdev->dev,
+ "isr : global=0x%x,status=0x%x.\n",
+ mhba->global_isr, isr_status);
+ return -1;
+ }
+ rmb();
+ usleep_range(1000, 2000);
+ } while (!(isr_status & DRBL_HANDSHAKE_ISR));
+
+ return 0;
+}
+
+static unsigned char mvumi_check_handshake(struct mvumi_hba *mhba)
+{
+ void *regs = mhba->mmio;
+ unsigned int tmp;
+ unsigned long before;
+
+ before = jiffies;
+ tmp = ioread32(regs + CPU_ARM_TO_PCIEA_MSG1);
+ while ((tmp != HANDSHAKE_READYSTATE) && (tmp != HANDSHAKE_DONESTATE)) {
+ if (tmp != HANDSHAKE_READYSTATE)
+ iowrite32(DRBL_MU_RESET,
+ regs + CPU_PCIEA_TO_ARM_DRBL_REG);
+ if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) {