diff options
Diffstat (limited to 'drivers/net')
23 files changed, 2538 insertions, 107 deletions
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index e7f68ac0c7e3..eafe6bedc692 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -1347,9 +1347,16 @@ static int mv88e6xxx_atu_setup(struct mv88e6xxx_chip *chip) if (err) return err; - err = mv88e6xxx_g1_atu_set_learn2all(chip, true); - if (err) - return err; + /* The chips that have a "learn2all" bit in Global1, ATU + * Control are precisely those whose port registers have a + * Message Port bit in Port Control 1 and hence implement + * ->port_setup_message_port. + */ + if (chip->info->ops->port_setup_message_port) { + err = mv88e6xxx_g1_atu_set_learn2all(chip, true); + if (err) + return err; + } return mv88e6xxx_g1_atu_set_age_time(chip, 300000); } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 7b444fcb6289..9ff79d5d14c4 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -2100,19 +2100,16 @@ static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal, u16 ext, u16 *index, u32 *item_length, u32 *data_length); -static int bnxt_flash_nvram(struct net_device *dev, - u16 dir_type, - u16 dir_ordinal, - u16 dir_ext, - u16 dir_attr, - const u8 *data, - size_t data_len) +static int __bnxt_flash_nvram(struct net_device *dev, u16 dir_type, + u16 dir_ordinal, u16 dir_ext, u16 dir_attr, + u32 dir_item_len, const u8 *data, + size_t data_len) { struct bnxt *bp = netdev_priv(dev); int rc; struct hwrm_nvm_write_input req = {0}; dma_addr_t dma_handle; - u8 *kmem; + u8 *kmem = NULL; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_WRITE, -1, -1); @@ -2120,26 +2117,42 @@ static int bnxt_flash_nvram(struct net_device *dev, req.dir_ordinal = cpu_to_le16(dir_ordinal); req.dir_ext = cpu_to_le16(dir_ext); req.dir_attr = cpu_to_le16(dir_attr); - req.dir_data_length = cpu_to_le32(data_len); + req.dir_item_length = cpu_to_le32(dir_item_len); + if (data_len && data) { + req.dir_data_length = cpu_to_le32(data_len); - kmem = dma_alloc_coherent(&bp->pdev->dev, data_len, &dma_handle, - GFP_KERNEL); - if (!kmem) { - netdev_err(dev, "dma_alloc_coherent failure, length = %u\n", - (unsigned)data_len); - return -ENOMEM; + kmem = dma_alloc_coherent(&bp->pdev->dev, data_len, &dma_handle, + GFP_KERNEL); + if (!kmem) + return -ENOMEM; + + memcpy(kmem, data, data_len); + req.host_src_addr = cpu_to_le64(dma_handle); } - memcpy(kmem, data, data_len); - req.host_src_addr = cpu_to_le64(dma_handle); - rc = hwrm_send_message(bp, &req, sizeof(req), FLASH_NVRAM_TIMEOUT); - dma_free_coherent(&bp->pdev->dev, data_len, kmem, dma_handle); + rc = _hwrm_send_message(bp, &req, sizeof(req), FLASH_NVRAM_TIMEOUT); + if (kmem) + dma_free_coherent(&bp->pdev->dev, data_len, kmem, dma_handle); if (rc == -EACCES) bnxt_print_admin_err(bp); return rc; } +static int bnxt_flash_nvram(struct net_device *dev, u16 dir_type, + u16 dir_ordinal, u16 dir_ext, u16 dir_attr, + const u8 *data, size_t data_len) +{ + struct bnxt *bp = netdev_priv(dev); + int rc; + + mutex_lock(&bp->hwrm_cmd_lock); + rc = __bnxt_flash_nvram(dev, dir_type, dir_ordinal, dir_ext, dir_attr, + 0, data, data_len); + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + static int bnxt_hwrm_firmware_reset(struct net_device *dev, u8 proc_type, u8 self_reset, u8 flags) { @@ -2419,90 +2432,141 @@ static int bnxt_flash_firmware_from_file(struct net_device *dev, return rc; } +#define BNXT_PKG_DMA_SIZE 0x40000 +#define BNXT_NVM_MORE_FLAG (cpu_to_le16(NVM_MODIFY_REQ_FLAGS_BATCH_MODE)) +#define BNXT_NVM_LAST_FLAG (cpu_to_le16(NVM_MODIFY_REQ_FLAGS_BATCH_LAST)) + int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware *fw, u32 install_type) { - struct bnxt *bp = netdev_priv(dev); - struct hwrm_nvm_install_update_output *resp = bp->hwrm_cmd_resp_addr; struct hwrm_nvm_install_update_input install = {0}; + struct hwrm_nvm_install_update_output resp = {0}; + struct hwrm_nvm_modify_input modify = {0}; + struct bnxt *bp = netdev_priv(dev); + bool defrag_attempted = false; + dma_addr_t dma_handle; + u8 *kmem = NULL; + u32 modify_len; u32 item_len; int rc = 0; u16 index; bnxt_hwrm_fw_set_time(bp); - rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE, - BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE, - &index, &item_len, NULL); - if (rc) { - netdev_err(dev, "PKG update area not created in nvram\n"); - return rc; + bnxt_hwrm_cmd_hdr_init(bp, &modify, HWRM_NVM_MODIFY, -1, -1); + + /* Try allocating a large DMA buffer first. Older fw will + * cause excessive NVRAM erases when using small blocks. + */ + modify_len = roundup_pow_of_two(fw->size); + modify_len = min_t(u32, modify_len, BNXT_PKG_DMA_SIZE); + while (1) { + kmem = dma_alloc_coherent(&bp->pdev->dev, modify_len, + &dma_handle, GFP_KERNEL); + if (!kmem && modify_len > PAGE_SIZE) + modify_len /= 2; + else + break; } + if (!kmem) + return -ENOMEM; - if (fw->size > item_len) { - netdev_err(dev, "PKG insufficient update area in nvram: %lu\n", - (unsigned long)fw->size); - rc = -EFBIG; - } else { - dma_addr_t dma_handle; - u8 *kmem; - struct hwrm_nvm_modify_input modify = {0}; + modify.host_src_addr = cpu_to_le64(dma_handle); - bnxt_hwrm_cmd_hdr_init(bp, &modify, HWRM_NVM_MODIFY, -1, -1); + bnxt_hwrm_cmd_hdr_init(bp, &install, HWRM_NVM_INSTALL_UPDATE, -1, -1); + if ((install_type & 0xffff) == 0) + install_type >>= 16; + install.install_type = cpu_to_le32(install_type); + + do { + u32 copied = 0, len = modify_len; + + rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE, + BNX_DIR_ORDINAL_FIRST, + BNX_DIR_EXT_NONE, + &index, &item_len, NULL); + if (rc) { + netdev_err(dev, "PKG update area not created in nvram\n"); + break; + } + if (fw->size > item_len) { + netdev_err(dev, "PKG insufficient update area in nvram: %lu\n", + (unsigned long)fw->size); + rc = -EFBIG; + break; + } modify.dir_idx = cpu_to_le16(index); - modify.len = cpu_to_le32(fw->size); - kmem = dma_alloc_coherent(&bp->pdev->dev, fw->size, - &dma_handle, GFP_KERNEL); - if (!kmem) { - netdev_err(dev, - "dma_alloc_coherent failure, length = %u\n", - (unsigned int)fw->size); - rc = -ENOMEM; - } else { - memcpy(kmem, fw->data, fw->size); - modify.host_src_addr = cpu_to_le64(dma_handle); + if (fw->size > modify_len) + modify.flags = BNXT_NVM_MORE_FLAG; + while (copied < fw->size) { + u32 balance = fw->size - copied; + if (balance <= modify_len) { + len = balance; + if (copied) + modify.flags |= BNXT_NVM_LAST_FLAG; + } + memcpy(kmem, fw->data + copied, len); + modify.len = cpu_to_le32(len); + modify.offset = cpu_to_le32(copied); rc = hwrm_send_message(bp, &modify, sizeof(modify), FLASH_PACKAGE_TIMEOUT); - dma_free_coherent(&bp->pdev->dev, fw->size, kmem, - dma_handle); + if (rc) + goto pkg_abort; + copied += len; } - } - if (rc) - goto err_exit; - - if ((install_type & 0xffff) == 0) - install_type >>= 16; - bnxt_hwrm_cmd_hdr_init(bp, &install, HWRM_NVM_INSTALL_UPDATE, -1, -1); - install.install_type = cpu_to_le32(install_type); + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message_silent(bp, &install, sizeof(install), + INSTALL_PACKAGE_TIMEOUT); + memcpy(&resp, bp->hwrm_cmd_resp_addr, sizeof(resp)); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &install, sizeof(install), - INSTALL_PACKAGE_TIMEOUT); - if (rc) { - u8 error_code = ((struct hwrm_err_output *)resp)->cmd_err; + if (defrag_attempted) { + /* We have tried to defragment already in the previous + * iteration. Return with the result for INSTALL_UPDATE + */ + mutex_unlock(&bp->hwrm_cmd_lock); + break; + } - if (resp->error_code && error_code == + if (rc && ((struct hwrm_err_output *)&resp)->cmd_err == NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) { - install.flags |= cpu_to_le16( - NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG); - rc = _hwrm_send_message(bp, &install, sizeof(install), - INSTALL_PACKAGE_TIMEOUT); + install.flags |= + cpu_to_le16(NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG); + + rc = _hwrm_send_message_silent(bp, &install, + sizeof(install), + INSTALL_PACKAGE_TIMEOUT); + memcpy(&resp, bp->hwrm_cmd_resp_addr, sizeof(resp)); + + if (rc && ((struct hwrm_err_output *)&resp)->cmd_err == + NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE) { + /* FW has cleared NVM area, driver will create + * UPDATE directory and try the flash again + */ + defrag_attempted = true; + rc = __bnxt_flash_nvram(bp->dev, + BNX_DIR_TYPE_UPDATE, + BNX_DIR_ORDINAL_FIRST, + 0, 0, item_len, NULL, + 0); + } else if (rc) { + netdev_err(dev, "HWRM_NVM_INSTALL_UPDATE failure rc :%x\n", rc); + } + } else if (rc) { + netdev_err(dev, "HWRM_NVM_INSTALL_UPDATE failure rc :%x\n", rc); } - if (rc) - goto flash_pkg_exit; - } + mutex_unlock(&bp->hwrm_cmd_lock); + } while (defrag_attempted && !rc); - if (resp->result) { +pkg_abort: + dma_free_coherent(&bp->pdev->dev, modify_len, kmem, dma_handle); + if (resp.result) { netdev_err(dev, "PKG install error = %d, problem_item = %d\n", - (s8)resp->result, (int)resp->problem_item); + (s8)resp.result, (int)resp.problem_item); rc = -ENOPKG; } -flash_pkg_exit: - mutex_unlock(&bp->hwrm_cmd_lock); -err_exit: if (rc == -EACCES) bnxt_print_admin_err(bp); return rc; diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c index 797886524054..39757b4cf8f4 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.c +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c @@ -570,12 +570,6 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget) u16 vlan_tag = 0; u8 rx_ptype; - if (cleaned_count >= ICE_RX_BUF_WRITE) { - failure |= ice_alloc_rx_bufs_zc(rx_ring, - cleaned_count); - cleaned_count = 0; - } - rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean); stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S); @@ -642,6 +636,9 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget) ice_receive_skb(rx_ring, skb, vlan_tag); } + if (cleaned_count >= ICE_RX_BUF_WRITE) + failure = !ice_alloc_rx_bufs_zc(rx_ring, cleaned_count); + ice_finalize_xdp_rx(rx_ring, xdp_xmit); ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes); diff --git a/drivers/net/ethernet/marvell/octeontx2/Kconfig b/drivers/net/ethernet/marvell/octeontx2/Kconfig index 543a1d047567..16caa02095fe 100644 --- a/drivers/net/ethernet/marvell/octeontx2/Kconfig +++ b/drivers/net/ethernet/marvell/octeontx2/Kconfig @@ -9,6 +9,7 @@ config OCTEONTX2_MBOX config OCTEONTX2_AF tristate "Marvell OcteonTX2 RVU Admin Function driver" select OCTEONTX2_MBOX + select NET_DEVLINK depends on (64BIT && COMPILE_TEST) || ARM64 depends on PCI help diff --git a/drivers/net/ethernet/marvell/octeontx2/af/Makefile b/drivers/net/ethernet/marvell/octeontx2/af/Makefile index 7100d1dd856e..eb535c98ca38 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/Makefile +++ b/drivers/net/ethernet/marvell/octeontx2/af/Makefile @@ -10,4 +10,4 @@ obj-$(CONFIG_OCTEONTX2_AF) += octeontx2_af.o octeontx2_mbox-y := mbox.o rvu_trace.o octeontx2_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \ rvu_reg.o rvu_npc.o rvu_debugfs.o ptp.o rvu_npc_fs.o \ - rvu_cpt.o + rvu_cpt.o rvu_devlink.o diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c index 9f901c0edcbb..e8fd712860a1 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c @@ -2826,17 +2826,23 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (err) goto err_flr; + err = rvu_register_dl(rvu); + if (err) + goto err_irq; + rvu_setup_rvum_blk_revid(rvu); /* Enable AF's VFs (if any) */ err = rvu_enable_sriov(rvu); if (err) - goto err_irq; + goto err_dl; /* Initialize debugfs */ rvu_dbg_init(rvu); return 0; +err_dl: + rvu_unregister_dl(rvu); err_irq: rvu_unregister_interrupts(rvu); err_flr: @@ -2868,6 +2874,7 @@ static void rvu_remove(struct pci_dev *pdev) rvu_dbg_exit(rvu); rvu_unregister_interrupts(rvu); + rvu_unregister_dl(rvu); rvu_flr_wq_destroy(rvu); rvu_cgx_exit(rvu); rvu_fwdata_exit(rvu); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h index b6c0977499ab..b1a6ecfd563e 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h @@ -12,7 +12,10 @@ #define RVU_H #include <linux/pci.h> +#include <net/devlink.h> + #include "rvu_struct.h" +#include "rvu_devlink.h" #include "common.h" #include "mbox.h" #include "npc.h" @@ -422,6 +425,7 @@ struct rvu { #ifdef CONFIG_DEBUG_FS struct rvu_debugfs rvu_dbg; #endif + struct rvu_devlink *rvu_dl; }; static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val) diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c new file mode 100644 index 000000000000..3f9d0ab6d5ae --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c @@ -0,0 +1,770 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell OcteonTx2 RVU Devlink + * + * Copyright (C) 2020 Marvell. + * + */ + +#include<linux/bitfield.h> + +#include "rvu.h" +#include "rvu_reg.h" +#include "rvu_struct.h" + +#define DRV_NAME "octeontx2-af" + +static int rvu_report_pair_start(struct devlink_fmsg *fmsg, const char *name) +{ + int err; + + err = devlink_fmsg_pair_nest_start(fmsg, name); + if (err) + return err; + + return devlink_fmsg_obj_nest_start(fmsg); +} + +static int rvu_report_pair_end(struct devlink_fmsg *fmsg) +{ + int err; + + err = devlink_fmsg_obj_nest_end(fmsg); + if (err) + return err; + + return devlink_fmsg_pair_nest_end(fmsg); +} + +static bool rvu_common_request_irq(struct rvu *rvu, int offset, + const char *name, irq_handler_t fn) +{ + struct rvu_devlink *rvu_dl = rvu->rvu_dl; + int rc; + + sprintf(&rvu->irq_name[offset * NAME_SIZE], name); + rc = request_irq(pci_irq_vector(rvu->pdev, offset), fn, 0, + &rvu->irq_name[offset * NAME_SIZE], rvu_dl); + if (rc) + dev_warn(rvu->dev, "Failed to register %s irq\n", name); + else + rvu->irq_allocated[offset] = true; + + return rvu->irq_allocated[offset]; +} + +static void rvu_npa_intr_work(struct work_struct *work) +{ + struct rvu_npa_health_reporters *rvu_npa_health_reporter; + + rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, intr_work); + devlink_health_report(rvu_npa_health_reporter->rvu_hw_npa_intr_reporter, + "NPA_AF_RVU Error", + rvu_npa_health_reporter->npa_event_ctx); +} + +static irqreturn_t rvu_npa_af_rvu_intr_handler(int irq, void *rvu_irq) +{ + struct rvu_npa_event_ctx *npa_event_context; + struct rvu_devlink *rvu_dl = rvu_irq; + struct rvu *rvu; + int blkaddr; + u64 intr; + + rvu = rvu_dl->rvu; + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0); + if (blkaddr < 0) + return IRQ_NONE; + + npa_event_context = rvu_dl->rvu_npa_health_reporter->npa_event_ctx; + intr = rvu_read64(rvu, blkaddr, NPA_AF_RVU_INT); + npa_event_context->npa_af_rvu_int = intr; + + /* Clear interrupts */ + rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT, intr); + rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1C, ~0ULL); + queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_npa_health_reporter->intr_work); + + return IRQ_HANDLED; +} + +static void rvu_npa_gen_work(struct work_struct *work) +{ + struct rvu_npa_health_reporters *rvu_npa_health_reporter; + + rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, gen_work); + devlink_health_report(rvu_npa_health_reporter->rvu_hw_npa_gen_reporter, + "NPA_AF_GEN Error", + rvu_npa_health_reporter->npa_event_ctx); +} + +static irqreturn_t rvu_npa_af_gen_intr_handler(int irq, void *rvu_irq) +{ + struct rvu_npa_event_ctx *npa_event_context; + struct rvu_devlink *rvu_dl = rvu_irq; + struct rvu *rvu; + int blkaddr; + u64 intr; + + rvu = rvu_dl->rvu; + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0); + if (blkaddr < 0) + return IRQ_NONE; + + npa_event_context = rvu_dl->rvu_npa_health_reporter->npa_event_ctx; + intr = rvu_read64(rvu, blkaddr, NPA_AF_GEN_INT); + npa_event_context->npa_af_rvu_gen = intr; + + /* Clear interrupts */ + rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT, intr); + rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1C, ~0ULL); + queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_npa_health_reporter->gen_work); + + return IRQ_HANDLED; +} + +static void rvu_npa_err_work(struct work_struct *work) +{ + struct rvu_npa_health_reporters *rvu_npa_health_reporter; + + rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, err_work); + devlink_health_report(rvu_npa_health_reporter->rvu_hw_npa_err_reporter, + "NPA_AF_ERR Error", + rvu_npa_health_reporter->npa_event_ctx); +} + +static irqreturn_t rvu_npa_af_err_intr_handler(int irq, void *rvu_irq) +{ + struct rvu_npa_event_ctx *npa_event_context; + struct rvu_devlink *rvu_dl = rvu_irq; + struct rvu *rvu; + int blkaddr; + u64 intr; + + rvu = rvu_dl->rvu; + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0); + if (blkaddr < 0) + return IRQ_NONE; + npa_event_context = rvu_dl->rvu_npa_health_reporter->npa_event_ctx; + intr = rvu_read64(rvu, blkaddr, NPA_AF_ERR_INT); + npa_event_context->npa_af_rvu_err = intr; + + /* Clear interrupts */ + rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT, intr); + rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1C, ~0ULL); + queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_npa_health_reporter->err_work); + + return IRQ_HANDLED; +} + +static void rvu_npa_ras_work(struct work_struct *work) +{ + struct rvu_npa_health_reporters *rvu_npa_health_reporter; + + rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, ras_work); + devlink_health_report(rvu_npa_health_reporter->rvu_hw_npa_ras_reporter, + "HW NPA_AF_RAS Error reported", + rvu_npa_health_reporter->npa_event_ctx); +} + +static irqreturn_t rvu_npa_af_ras_intr_handler(int irq, void *rvu_irq) +{ + struct rvu_npa_event_ctx *npa_event_context; + struct rvu_devlink *rvu_dl = rvu_irq; + struct rvu *rvu; + int blkaddr; + u64 intr; + + rvu = rvu_dl->rvu; + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0); + if (blkaddr < 0) + return IRQ_NONE; + + npa_event_context = rvu_dl->rvu_npa_health_reporter->npa_event_ctx; + intr = rvu_read64(rvu, blkaddr, NPA_AF_RAS); + npa_event_context->npa_af_rvu_ras = intr; + + /* Clear interrupts */ + rvu_write64(rvu, blkaddr, NPA_AF_RAS, intr); + rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1C, ~0ULL); + queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_npa_health_reporter->ras_work); + + return IRQ_HANDLED; +} + +static void rvu_npa_unregister_interrupts(struct rvu *rvu) +{ + struct rvu_devlink *rvu_dl = rvu->rvu_dl; + int i, offs, blkaddr; + u64 reg; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0); + if (blkaddr < 0) + return; + + reg = rvu_read64(rvu, blkaddr, NPA_PRIV_AF_INT_CFG); + offs = reg & 0x3FF; + + rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1C, ~0ULL); + rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1C, ~0ULL); + rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1C, ~0ULL); + rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1C, ~0ULL); + + for (i = 0; i < NPA_AF_INT_VEC_CNT; i++) + if (rvu->irq_allocated[offs + i]) { + free_irq(pci_irq_vector(rvu->pdev, offs + i), rvu_dl); + rvu->irq_allocated[offs + i] = false; + } +} + +static int rvu_npa_register_interrupts(struct rvu *rvu) +{ + int blkaddr, base; + bool rc; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0); + if (blkaddr < 0) + return blkaddr; + + /* Get NPA AF MSIX vectors offset. */ + base = rvu_read64(rvu, blkaddr, NPA_PRIV_AF_INT_CFG) & 0x3ff; + if (!base) { + dev_warn(rvu->dev, + "Failed to get NPA_AF_INT vector offsets\n"); + return 0; + } + + /* Register and enable NPA_AF_RVU_INT interrupt */ + rc = rvu_common_request_irq(rvu, base + NPA_AF_INT_VEC_RVU, + "NPA_AF_RVU_INT", + rvu_npa_af_rvu_intr_handler); + if (!rc) + goto err; + rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1S, ~0ULL); + + /* Register and enable NPA_AF_GEN_INT interrupt */ + rc = rvu_common_request_irq(rvu, base + NPA_AF_INT_VEC_GEN, + "NPA_AF_RVU_GEN", + rvu_npa_af_gen_intr_handler); + if (!rc) + goto err; + rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1S, ~0ULL); + + /* Register and enable NPA_AF_ERR_INT interrupt */ + rc = rvu_common_request_irq(rvu, base + NPA_AF_INT_VEC_AF_ERR, + "NPA_AF_ERR_INT", + rvu_npa_af_err_intr_handler); + if (!rc) + goto err; + rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1S, ~0ULL); + + /* Register and enable NPA_AF_RAS interrupt */ + rc = rvu_common_request_irq(rvu, base + NPA_AF_INT_VEC_POISON, + "NPA_AF_RAS", + rvu_npa_af_ras_intr_handler); + if (!rc) + goto err; + rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1S, ~0ULL); + + return 0; +err: + rvu_npa_unregister_interrupts(rvu); + return rc; +} + +static int rvu_npa_report_show(struct devlink_fmsg *fmsg, void *ctx, + enum npa_af_rvu_health health_reporter) +{ + struct rvu_npa_event_ctx *npa_event_context; + unsigned int intr_val, alloc_dis, free_dis; + int err; + + npa_event_context = ctx; + switch (health_reporter) { + case NPA_AF_RVU_GEN: + intr_val = npa_event_context->npa_af_rvu_gen; + err = rvu_report_pair_start(fmsg, "NPA_AF_GENERAL"); + if (err) + return err; + err = devlink_fmsg_u64_pair_put(fmsg, "\tNPA General Interrupt Reg ", + npa_event_context->npa_af_rvu_gen); + if (err) + return err; + if (intr_val & BIT_ULL(32)) { + err = devlink_fmsg_string_put(fmsg, "\n\tUnmap PF Error"); + if (err) + return err; + } + + free_dis = FIELD_GET(GENMASK(15, 0), intr_val); + if (free_dis & BIT(NPA_INPQ_NIX0_RX)) { + err = devlink_fmsg_string_put(fmsg, "\n\tNIX0: free disabled RX"); + if (err) + return err; + } + if (free_dis & BIT(NPA_INPQ_NIX0_TX)) { + err = devlink_fmsg_string_put(fmsg, "\n\tNIX0:free disabled TX"); + if (err) + return err; + } + if (free_dis & BIT(NPA_INPQ_NIX1_RX)) { + err = devlink_fmsg_string_put(fmsg, "\n\tNIX1: free disabled RX"); + if (err) + return err; + } + if (free_dis & BIT(NPA_INPQ_NIX1_TX)) { + err = devlink_fmsg_string_put(fmsg, "\n\tNIX1:free disabled TX"); + if (err) + return err; + } + if (free_dis & BIT(NPA_INPQ_SSO)) { + err = devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for SSO"); + if (err) + return err; + } + if (free_dis & BIT(NPA_INPQ_TIM)) { + err = devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for TIM"); + if (err) + return err; + } + if (free_dis & BIT(NPA_INPQ_DPI)) { + err = devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for DPI"); + if (err) + return err; + } + if (free_dis & BIT(NPA_INPQ_AURA_OP)) { + err = devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for AURA"); + if (err) + return err; + } + + alloc_dis = FIELD_GET(GENMASK(31, 16), intr_val); + if (alloc_dis & BIT(NPA_INPQ_NIX0_RX)) { + err = devlink_fmsg_string_put(fmsg, "\n\tNIX0: alloc disabled RX"); + if (err) + return err; + } + if (alloc_dis & BIT(NPA_INPQ_NIX0_TX)) { + err = devlink_fmsg_string_put(fmsg, "\n\tNIX0:alloc disabled TX"); + if (err) + return err; + } + if (alloc_dis & BIT(NPA_INPQ_NIX1_RX)) { + err = devlink_fmsg_string_put(fmsg, "\n\tNIX1: alloc disabled RX"); + if (err) + return err; + } + if (alloc_dis & BIT(NPA_INPQ_NIX1_TX)) { + err = devlink_fmsg_string_put(fmsg, "\n\tNIX1:alloc disabled TX"); + if (err) + return err; + } + if (alloc_dis & BIT(NPA_INPQ_SSO)) { + err = devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for SSO"); + if (err) + return err; + } + if (alloc_dis & BIT(NPA_INPQ_TIM)) { + err = devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for TIM"); + if (err) + return err; + } + if (alloc_dis & BIT(NPA_INPQ_DPI)) { + err = devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for DPI"); + if (err) + return err; + } + if (alloc_dis & BIT(NPA_INPQ_AURA_OP)) { + err = devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for AURA"); + if (err) + return err; + } + err = rvu_report_pair_end(fmsg); + if (err) + return err; + break; + case NPA_AF_RVU_ERR: + err = rvu_report_pair_start(fmsg, "NPA_AF_ERR"); + if (err) + return err; + err = devlink_fmsg_u64_pair_put(fmsg, "\tNPA Error Interrupt Reg ", + npa_event_context->npa_af_rvu_err); + if (err) + return err; + + if (npa_event_context->npa_af_rvu_err & BIT_ULL(14)) { + err = devlink_fmsg_string_put(fmsg, "\n\tFault on NPA_AQ_INST_S read"); + if (err) + return err; + } + if (npa_event_context->npa_af_rvu_err & BIT_ULL(13)) { + err = devlink_fmsg_string_put(fmsg, "\n\tFault on NPA_AQ_RES_S write"); + if (err) + return err; + } + if (npa_event_context->npa_af_rvu_err & BIT_ULL(12)) { + err = devlink_fmsg_string_put(fmsg, "\n\tAQ Doorbell Error"); + if (err) + return err; + } + err = rvu_report_pair_end(fmsg); + if (err) + return err; + break; + case NPA_AF_RVU_RAS: + err = rvu_report_pair_start(fmsg, "NPA_AF_RVU_RAS"); + if (err) + return err; + err = devlink_fmsg_u64_pair_put(fmsg, "\tNPA RAS Interrupt Reg ", + npa_event_context->npa_af_rvu_ras); + if (err) + return err; + if (npa_event_context->npa_af_rvu_ras & BIT_ULL(34)) { + err = devlink_fmsg_string_put(fmsg, "\n\tPoison data on NPA_AQ_INST_S"); + if (err) + return err; + } + if (npa_event_context->npa_af_rvu_ras & BIT_ULL(33)) { + err = devlink_fmsg_string_put(fmsg, "\n\tPoison data on NPA_AQ_RES_S"); + if (err) + return err; + } + if (npa_event_context->npa_af_rvu_ras & BIT_ULL(32)) { + err = devlink_fmsg_string_put(fmsg, "\n\tPoison data on HW context"); + if (err) + return err; + } + err = rvu_report_pair_end(fmsg); + if (err) + return err; + break; + case NPA_AF_RVU_INTR: + err = rvu_report_pair_start(fmsg, "NPA_AF_RVU"); + if (err) + return err; + err = devlink_fmsg_u64_pair_put(fmsg, "\tNPA RVU Interrupt Reg ", + npa_event_context->npa_af_rvu_int); + if (err) + return err; + if (npa_event_context->npa_af_rvu_int & BIT_ULL(0)) { + err = devlink_fmsg_string_put(fmsg, "\n\tUnmap Slot Error"); + if (err) + return err; + } + return rvu_report_pair_end(fmsg); + default: + return -EINVAL; + } + + return 0; +} + +static int rvu_hw_npa_intr_dump(struct devlink_health_reporter *reporter, + struct devlink_fmsg *fmsg, void *ctx, + struct netlink_ext_ack *netlink_extack) +{ + struct rvu *rvu = devlink_health_reporter_priv(reporter); + struct rvu_devlink *rvu_dl = rvu->rvu_dl; + struct rvu_npa_event_ctx *npa_ctx; + + npa_ctx = rvu_dl->rvu_npa_health_reporter->npa_event_ctx; |