summaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2017-03-28 18:05:24 -0700
committerDavid S. Miller <davem@davemloft.net>2017-03-28 18:05:24 -0700
commitc552a50e5cda4e5942ce77d9f27b08d37d81b0e6 (patch)
tree7e7730662f567357c963d83dcffb242ade877135 /drivers/net
parent30b38236e1313e216a869d38cef867d327a0009b (diff)
parentd0d40a73f1e6061d892eca9a6cf1c85a9e4fd1eb (diff)
Merge branch 'qed-load-unload-mfw'
Yuval Mintz says: ==================== qed: load/unload mfw series This series correct the unload flow and greatly enhances its initialization flow in regard to interactions between driver and management firmware. Patch #1 makes sure unloading is done under management-firmware's 'criticial section' protection. Patches #2 - #4 move driver into using a newer scheme for loading in regard to the MFW; This newer scheme would help cleaning the device in case a previous instance has dirtied it [preboot, PDA, etc.]. Patches #5 - #6 let driver inform management-firmware on number of resources which are dependent on the non-management firmware used. Patch #7 then uses a new resource [BDQ] instead of some set value. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h33
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.h3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c579
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev_api.h73
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_fcoe.c30
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h87
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iscsi.c32
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c50
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c819
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h179
10 files changed, 1480 insertions, 405 deletions
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index ca30a27df035..8c85fd2f3949 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -51,7 +51,19 @@
#include "qed_hsi.h"
extern const struct qed_common_ops qed_common_ops_pass;
-#define DRV_MODULE_VERSION "8.10.10.21"
+
+#define QED_MAJOR_VERSION 8
+#define QED_MINOR_VERSION 10
+#define QED_REVISION_VERSION 10
+#define QED_ENGINEERING_VERSION 21
+
+#define QED_VERSION \
+ ((QED_MAJOR_VERSION << 24) | (QED_MINOR_VERSION << 16) | \
+ (QED_REVISION_VERSION << 8) | QED_ENGINEERING_VERSION)
+
+#define STORM_FW_VERSION \
+ ((FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) | \
+ (FW_REVISION_VERSION << 8) | FW_ENGINEERING_VERSION)
#define MAX_HWFNS_PER_DEVICE (4)
#define NAME_SIZE 16
@@ -59,8 +71,6 @@ extern const struct qed_common_ops qed_common_ops_pass;
#define QED_WFQ_UNIT 100
-#define ISCSI_BDQ_ID(_port_id) (_port_id)
-#define FCOE_BDQ_ID(_port_id) ((_port_id) + 2)
#define QED_WID_SIZE (1024)
#define QED_PF_DEMS_SIZE (4)
@@ -76,6 +86,15 @@ union qed_mcp_protocol_stats;
enum qed_mcp_protocol_type;
/* helpers */
+#define QED_MFW_GET_FIELD(name, field) \
+ (((name) & (field ## _MASK)) >> (field ## _SHIFT))
+
+#define QED_MFW_SET_FIELD(name, field, value) \
+ do { \
+ (name) &= ~((field ## _MASK) << (field ## _SHIFT)); \
+ (name) |= (((value) << (field ## _SHIFT)) & (field ## _MASK));\
+ } while (0)
+
static inline u32 qed_db_addr(u32 cid, u32 DEMS)
{
u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) |
@@ -198,6 +217,7 @@ enum qed_resources {
QED_LL2_QUEUE,
QED_CMDQS_CQS,
QED_RDMA_STATS_QUEUE,
+ QED_BDQ,
QED_MAX_RESC,
};
@@ -355,6 +375,12 @@ struct qed_fw_data {
u32 init_ops_size;
};
+#define DRV_MODULE_VERSION \
+ __stringify(QED_MAJOR_VERSION) "." \
+ __stringify(QED_MINOR_VERSION) "." \
+ __stringify(QED_REVISION_VERSION) "." \
+ __stringify(QED_ENGINEERING_VERSION)
+
struct qed_simd_fp_handler {
void *token;
void (*func)(void *);
@@ -732,5 +758,6 @@ void qed_get_protocol_stats(struct qed_dev *cdev,
enum qed_mcp_protocol_type type,
union qed_mcp_protocol_stats *stats);
int qed_slowpath_irq_req(struct qed_hwfn *hwfn);
+void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn);
#endif /* _QED_H */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
index 0fabe97f998d..2eb988fe1298 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
@@ -85,9 +85,6 @@ struct qed_dcbx_app_metadata {
enum qed_pci_personality personality;
};
-#define QED_MFW_GET_FIELD(name, field) \
- (((name) & (field ## _MASK)) >> (field ## _SHIFT))
-
struct qed_dcbx_info {
struct lldp_status_params_s lldp_remote[LLDP_MAX_LLDP_AGENTS];
struct lldp_config_params_s lldp_local[LLDP_MAX_LLDP_AGENTS];
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 11e45f0f7779..e75c83351d34 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -1106,25 +1106,34 @@ static void qed_reset_mb_shadow(struct qed_hwfn *p_hwfn,
p_hwfn->mcp_info->mfw_mb_cur, p_hwfn->mcp_info->mfw_mb_length);
}
-int qed_hw_init(struct qed_dev *cdev,
- struct qed_tunn_start_params *p_tunn,
- bool b_hw_start,
- enum qed_int_mode int_mode,
- bool allow_npar_tx_switch,
- const u8 *bin_fw_data)
+static void
+qed_fill_load_req_params(struct qed_load_req_params *p_load_req,
+ struct qed_drv_load_params *p_drv_load)
{
+ memset(p_load_req, 0, sizeof(*p_load_req));
+
+ p_load_req->drv_role = p_drv_load->is_crash_kernel ?
+ QED_DRV_ROLE_KDUMP : QED_DRV_ROLE_OS;
+ p_load_req->timeout_val = p_drv_load->mfw_timeout_val;
+ p_load_req->avoid_eng_reset = p_drv_load->avoid_eng_reset;
+ p_load_req->override_force_load = p_drv_load->override_force_load;
+}
+
+int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
+{
+ struct qed_load_req_params load_req_params;
u32 load_code, param, drv_mb_param;
bool b_default_mtu = true;
struct qed_hwfn *p_hwfn;
int rc = 0, mfw_rc, i;
- if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) {
+ if ((p_params->int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) {
DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n");
return -EINVAL;
}
if (IS_PF(cdev)) {
- rc = qed_init_fw_data(cdev, bin_fw_data);
+ rc = qed_init_fw_data(cdev, p_params->bin_fw_data);
if (rc)
return rc;
}
@@ -1150,17 +1159,21 @@ int qed_hw_init(struct qed_dev *cdev,
if (rc)
return rc;
- rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt, &load_code);
+ qed_fill_load_req_params(&load_req_params,
+ p_params->p_drv_load_params);
+ rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt,
+ &load_req_params);
if (rc) {
- DP_NOTICE(p_hwfn, "Failed sending LOAD_REQ command\n");
+ DP_NOTICE(p_hwfn, "Failed sending a LOAD_REQ command\n");
return rc;
}
- qed_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt);
-
+ load_code = load_req_params.load_code;
DP_VERBOSE(p_hwfn, QED_MSG_SP,
- "Load request was sent. Resp:0x%x, Load code: 0x%x\n",
- rc, load_code);
+ "Load request was sent. Load code: 0x%x\n",
+ load_code);
+
+ qed_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt);
p_hwfn->first_on_engine = (load_code ==
FW_MSG_CODE_DRV_LOAD_ENGINE);
@@ -1181,11 +1194,15 @@ int qed_hw_init(struct qed_dev *cdev,
/* Fall into */
case FW_MSG_CODE_DRV_LOAD_FUNCTION:
rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt,
- p_tunn, p_hwfn->hw_info.hw_mode,
- b_hw_start, int_mode,
- allow_npar_tx_switch);
+ p_params->p_tunn,
+ p_hwfn->hw_info.hw_mode,
+ p_params->b_hw_start,
+ p_params->int_mode,
+ p_params->allow_npar_tx_switch);
break;
default:
+ DP_NOTICE(p_hwfn,
+ "Unexpected load code [0x%08x]", load_code);
rc = -EINVAL;
break;
}
@@ -1225,10 +1242,7 @@ int qed_hw_init(struct qed_dev *cdev,
if (IS_PF(cdev)) {
p_hwfn = QED_LEADING_HWFN(cdev);
- drv_mb_param = (FW_MAJOR_VERSION << 24) |
- (FW_MINOR_VERSION << 16) |
- (FW_REVISION_VERSION << 8) |
- (FW_ENGINEERING_VERSION);
+ drv_mb_param = STORM_FW_VERSION;
rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER,
drv_mb_param, &load_code, &param);
@@ -1303,27 +1317,53 @@ void qed_hw_timers_stop_all(struct qed_dev *cdev)
int qed_hw_stop(struct qed_dev *cdev)
{
- int rc = 0, t_rc;
+ struct qed_hwfn *p_hwfn;
+ struct qed_ptt *p_ptt;
+ int rc, rc2 = 0;
int j;
for_each_hwfn(cdev, j) {
- struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
- struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
+ p_hwfn = &cdev->hwfns[j];
+ p_ptt = p_hwfn->p_main_ptt;
DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Stopping hw/fw\n");
if (IS_VF(cdev)) {
qed_vf_pf_int_cleanup(p_hwfn);
+ rc = qed_vf_pf_reset(p_hwfn);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "qed_vf_pf_reset failed. rc = %d.\n",
+ rc);
+ rc2 = -EINVAL;
+ }
continue;
}
/* mark the hw as uninitialized... */
p_hwfn->hw_init_done = false;
+ /* Send unload command to MCP */
+ rc = qed_mcp_unload_req(p_hwfn, p_ptt);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "Failed sending a UNLOAD_REQ command. rc = %d.\n",
+ rc);
+ rc2 = -EINVAL;
+ }
+
+ qed_slowpath_irq_sync(p_hwfn);
+
+ /* After this point no MFW attentions are expected, e.g. prevent
+ * race between pf stop and dcbx pf update.
+ */
rc = qed_sp_pf_stop(p_hwfn);
- if (rc)
+ if (rc) {
DP_NOTICE(p_hwfn,
- "Failed to close PF against FW. Continue to stop HW to prevent illegal host access by the device\n");
+ "Failed to close PF against FW [rc = %d]. Continue to stop HW to prevent illegal host access by the device.\n",
+ rc);
+ rc2 = -EINVAL;
+ }
qed_wr(p_hwfn, p_ptt,
NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
@@ -1346,20 +1386,37 @@ int qed_hw_stop(struct qed_dev *cdev)
/* Need to wait 1ms to guarantee SBs are cleared */
usleep_range(1000, 2000);
+
+ /* Disable PF in HW blocks */
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DB_ENABLE, 0);
+ qed_wr(p_hwfn, p_ptt, QM_REG_PF_EN, 0);
+
+ qed_mcp_unload_done(p_hwfn, p_ptt);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "Failed sending a UNLOAD_DONE command. rc = %d.\n",
+ rc);
+ rc2 = -EINVAL;
+ }
}
if (IS_PF(cdev)) {
+ p_hwfn = QED_LEADING_HWFN(cdev);
+ p_ptt = QED_LEADING_HWFN(cdev)->p_main_ptt;
+
/* Disable DMAE in PXP - in CMT, this should only be done for
* first hw-function, and only after all transactions have
* stopped for all active hw-functions.
*/
- t_rc = qed_change_pci_hwfn(&cdev->hwfns[0],
- cdev->hwfns[0].p_main_ptt, false);
- if (t_rc != 0)
- rc = t_rc;
+ rc = qed_change_pci_hwfn(p_hwfn, p_ptt, false);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "qed_change_pci_hwfn failed. rc = %d.\n", rc);
+ rc2 = -EINVAL;
+ }
}
- return rc;
+ return rc2;
}
void qed_hw_stop_fastpath(struct qed_dev *cdev)
@@ -1404,89 +1461,6 @@ void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn)
NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
}
-static int qed_reg_assert(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, u32 reg, bool expected)
-{
- u32 assert_val = qed_rd(p_hwfn, p_ptt, reg);
-
- if (assert_val != expected) {
- DP_NOTICE(p_hwfn, "Value at address 0x%08x != 0x%08x\n",
- reg, expected);
- return -EINVAL;
- }
-
- return 0;
-}
-
-int qed_hw_reset(struct qed_dev *cdev)
-{
- int rc = 0;
- u32 unload_resp, unload_param;
- u32 wol_param;
- int i;
-
- switch (cdev->wol_config) {
- case QED_OV_WOL_DISABLED:
- wol_param = DRV_MB_PARAM_UNLOAD_WOL_DISABLED;
- break;
- case QED_OV_WOL_ENABLED:
- wol_param = DRV_MB_PARAM_UNLOAD_WOL_ENABLED;
- break;
- default:
- DP_NOTICE(cdev,
- "Unknown WoL configuration %02x\n", cdev->wol_config);
- /* Fallthrough */
- case QED_OV_WOL_DEFAULT:
- wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP;
- }
-
- for_each_hwfn(cdev, i) {
- struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
-
- if (IS_VF(cdev)) {
- rc = qed_vf_pf_reset(p_hwfn);
- if (rc)
- return rc;
- continue;
- }
-
- DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Resetting hw/fw\n");
-
- /* Check for incorrect states */
- qed_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
- QM_REG_USG_CNT_PF_TX, 0);
- qed_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
- QM_REG_USG_CNT_PF_OTHER, 0);
-
- /* Disable PF in HW blocks */
- qed_wr(p_hwfn, p_hwfn->p_main_ptt, DORQ_REG_PF_DB_ENABLE, 0);
- qed_wr(p_hwfn, p_hwfn->p_main_ptt, QM_REG_PF_EN, 0);
- qed_wr(p_hwfn, p_hwfn->p_main_ptt,
- TCFC_REG_STRONG_ENABLE_PF, 0);
- qed_wr(p_hwfn, p_hwfn->p_main_ptt,
- CCFC_REG_STRONG_ENABLE_PF, 0);
-
- /* Send unload command to MCP */
- rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
- DRV_MSG_CODE_UNLOAD_REQ, wol_param,
- &unload_resp, &unload_param);
- if (rc) {
- DP_NOTICE(p_hwfn, "qed_hw_reset: UNLOAD_REQ failed\n");
- unload_resp = FW_MSG_CODE_DRV_UNLOAD_ENGINE;
- }
-
- rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
- DRV_MSG_CODE_UNLOAD_DONE,
- 0, &unload_resp, &unload_param);
- if (rc) {
- DP_NOTICE(p_hwfn, "qed_hw_reset: UNLOAD_DONE failed\n");
- return rc;
- }
- }
-
- return rc;
-}
-
/* Free hwfn memory and resources acquired in hw_hwfn_prepare */
static void qed_hw_hwfn_free(struct qed_hwfn *p_hwfn)
{
@@ -1591,187 +1565,222 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
RESC_NUM(p_hwfn, QED_SB));
}
-static enum resource_id_enum qed_hw_get_mfw_res_id(enum qed_resources res_id)
+const char *qed_hw_get_resc_name(enum qed_resources res_id)
{
- enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID;
-
switch (res_id) {
- case QED_SB:
- mfw_res_id = RESOURCE_NUM_SB_E;
- break;
case QED_L2_QUEUE:
- mfw_res_id = RESOURCE_NUM_L2_QUEUE_E;
- break;
+ return "L2_QUEUE";
case QED_VPORT:
- mfw_res_id = RESOURCE_NUM_VPORT_E;
- break;
+ return "VPORT";
case QED_RSS_ENG:
- mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E;
- break;
+ return "RSS_ENG";
case QED_PQ:
- mfw_res_id = RESOURCE_NUM_PQ_E;
- break;
+ return "PQ";
case QED_RL:
- mfw_res_id = RESOURCE_NUM_RL_E;
- break;
+ return "RL";
case QED_MAC:
+ return "MAC";
case QED_VLAN:
- /* Each VFC resource can accommodate both a MAC and a VLAN */
- mfw_res_id = RESOURCE_VFC_FILTER_E;
- break;
+ return "VLAN";
+ case QED_RDMA_CNQ_RAM:
+ return "RDMA_CNQ_RAM";
case QED_ILT:
- mfw_res_id = RESOURCE_ILT_E;
- break;
+ return "ILT";
case QED_LL2_QUEUE:
- mfw_res_id = RESOURCE_LL2_QUEUE_E;
- break;
- case QED_RDMA_CNQ_RAM:
+ return "LL2_QUEUE";
case QED_CMDQS_CQS:
- /* CNQ/CMDQS are the same resource */
- mfw_res_id = RESOURCE_CQS_E;
- break;
+ return "CMDQS_CQS";
case QED_RDMA_STATS_QUEUE:
- mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E;
- break;
+ return "RDMA_STATS_QUEUE";
+ case QED_BDQ:
+ return "BDQ";
+ case QED_SB:
+ return "SB";
default:
- break;
+ return "UNKNOWN_RESOURCE";
}
+}
- return mfw_res_id;
+static int
+__qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_resources res_id,
+ u32 resc_max_val, u32 *p_mcp_resp)
+{
+ int rc;
+
+ rc = qed_mcp_set_resc_max_val(p_hwfn, p_ptt, res_id,
+ resc_max_val, p_mcp_resp);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "MFW response failure for a max value setting of resource %d [%s]\n",
+ res_id, qed_hw_get_resc_name(res_id));
+ return rc;
+ }
+
+ if (*p_mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK)
+ DP_INFO(p_hwfn,
+ "Failed to set the max value of resource %d [%s]. mcp_resp = 0x%08x.\n",
+ res_id, qed_hw_get_resc_name(res_id), *p_mcp_resp);
+
+ return 0;
+}
+
+static int
+qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ bool b_ah = QED_IS_AH(p_hwfn->cdev);
+ u32 resc_max_val, mcp_resp;
+ u8 res_id;
+ int rc;
+
+ for (res_id = 0; res_id < QED_MAX_RESC; res_id++) {
+ switch (res_id) {
+ case QED_LL2_QUEUE:
+ resc_max_val = MAX_NUM_LL2_RX_QUEUES;
+ break;
+ case QED_RDMA_CNQ_RAM:
+ /* No need for a case for QED_CMDQS_CQS since
+ * CNQ/CMDQS are the same resource.
+ */
+ resc_max_val = NUM_OF_CMDQS_CQS;
+ break;
+ case QED_RDMA_STATS_QUEUE:
+ resc_max_val = b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2
+ : RDMA_NUM_STATISTIC_COUNTERS_BB;
+ break;
+ case QED_BDQ:
+ resc_max_val = BDQ_NUM_RESOURCES;
+ break;
+ default:
+ continue;
+ }
+
+ rc = __qed_hw_set_soft_resc_size(p_hwfn, p_ptt, res_id,
+ resc_max_val, &mcp_resp);
+ if (rc)
+ return rc;
+
+ /* There's no point to continue to the next resource if the
+ * command is not supported by the MFW.
+ * We do continue if the command is supported but the resource
+ * is unknown to the MFW. Such a resource will be later
+ * configured with the default allocation values.
+ */
+ if (mcp_resp == FW_MSG_CODE_UNSUPPORTED)
+ return -EINVAL;
+ }
+
+ return 0;
}
-static u32 qed_hw_get_dflt_resc_num(struct qed_hwfn *p_hwfn,
- enum qed_resources res_id)
+static
+int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn,
+ enum qed_resources res_id,
+ u32 *p_resc_num, u32 *p_resc_start)
{
u8 num_funcs = p_hwfn->num_funcs_on_engine;
bool b_ah = QED_IS_AH(p_hwfn->cdev);
struct qed_sb_cnt_info sb_cnt_info;
- u32 dflt_resc_num = 0;
switch (res_id) {
- case QED_SB:
- memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
- qed_int_get_num_sbs(p_hwfn, &sb_cnt_info);
- dflt_resc_num = sb_cnt_info.sb_cnt;
- break;
case QED_L2_QUEUE:
- dflt_resc_num = (b_ah ? MAX_NUM_L2_QUEUES_K2
- : MAX_NUM_L2_QUEUES_BB) / num_funcs;
+ *p_resc_num = (b_ah ? MAX_NUM_L2_QUEUES_K2 :
+ MAX_NUM_L2_QUEUES_BB) / num_funcs;
break;
case QED_VPORT:
- dflt_resc_num = MAX_NUM_VPORTS_BB / num_funcs;
- dflt_resc_num = (b_ah ? MAX_NUM_VPORTS_K2
- : MAX_NUM_VPORTS_BB) / num_funcs;
+ *p_resc_num = (b_ah ? MAX_NUM_VPORTS_K2 :
+ MAX_NUM_VPORTS_BB) / num_funcs;
break;
case QED_RSS_ENG:
- dflt_resc_num = (b_ah ? ETH_RSS_ENGINE_NUM_K2
- : ETH_RSS_ENGINE_NUM_BB) / num_funcs;
+ *p_resc_num = (b_ah ? ETH_RSS_ENGINE_NUM_K2 :
+ ETH_RSS_ENGINE_NUM_BB) / num_funcs;
break;
case QED_PQ:
- /* The granularity of the PQs is 8 */
- dflt_resc_num = (b_ah ? MAX_QM_TX_QUEUES_K2
- : MAX_QM_TX_QUEUES_BB) / num_funcs;
- dflt_resc_num &= ~0x7;
+ *p_resc_num = (b_ah ? MAX_QM_TX_QUEUES_K2 :
+ MAX_QM_TX_QUEUES_BB) / num_funcs;
+ *p_resc_num &= ~0x7; /* The granularity of the PQs is 8 */
break;
case QED_RL:
- dflt_resc_num = MAX_QM_GLOBAL_RLS / num_funcs;
+ *p_resc_num = MAX_QM_GLOBAL_RLS / num_funcs;
break;
case QED_MAC:
case QED_VLAN:
/* Each VFC resource can accommodate both a MAC and a VLAN */
- dflt_resc_num = ETH_NUM_MAC_FILTERS / num_funcs;
+ *p_resc_num = ETH_NUM_MAC_FILTERS / num_funcs;
break;
case QED_ILT:
- dflt_resc_num = (b_ah ? PXP_NUM_ILT_RECORDS_K2
- : PXP_NUM_ILT_RECORDS_BB) / num_funcs;
+ *p_resc_num = (b_ah ? PXP_NUM_ILT_RECORDS_K2 :
+ PXP_NUM_ILT_RECORDS_BB) / num_funcs;
break;
case QED_LL2_QUEUE:
- dflt_resc_num = MAX_NUM_LL2_RX_QUEUES / num_funcs;
+ *p_resc_num = MAX_NUM_LL2_RX_QUEUES / num_funcs;
break;
case QED_RDMA_CNQ_RAM:
case QED_CMDQS_CQS:
/* CNQ/CMDQS are the same resource */
- dflt_resc_num = NUM_OF_CMDQS_CQS / num_funcs;
+ *p_resc_num = NUM_OF_CMDQS_CQS / num_funcs;
break;
case QED_RDMA_STATS_QUEUE:
- dflt_resc_num = (b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2
- : RDMA_NUM_STATISTIC_COUNTERS_BB) /
- num_funcs;
-
+ *p_resc_num = (b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2 :
+ RDMA_NUM_STATISTIC_COUNTERS_BB) / num_funcs;
break;
- default:
+ case QED_BDQ:
+ if (p_hwfn->hw_info.personality != QED_PCI_ISCSI &&
+ p_hwfn->hw_info.personality != QED_PCI_FCOE)
+ *p_resc_num = 0;
+ else
+ *p_resc_num = 1;
+ break;
+ case QED_SB:
+ memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
+ qed_int_get_num_sbs(p_hwfn, &sb_cnt_info);
+ *p_resc_num = sb_cnt_info.sb_cnt;
break;
+ default:
+ return -EINVAL;
}
- return dflt_resc_num;
-}
-
-static const char *qed_hw_get_resc_name(enum qed_resources res_id)
-{
switch (res_id) {
- case QED_SB:
- return "SB";
- case QED_L2_QUEUE:
- return "L2_QUEUE";
- case QED_VPORT:
- return "VPORT";
- case QED_RSS_ENG:
- return "RSS_ENG";
- case QED_PQ:
- return "PQ";
- case QED_RL:
- return "RL";
- case QED_MAC:
- return "MAC";
- case QED_VLAN:
- return "VLAN";
- case QED_RDMA_CNQ_RAM:
- return "RDMA_CNQ_RAM";
- case QED_ILT:
- return "ILT";
- case QED_LL2_QUEUE:
- return "LL2_QUEUE";
- case QED_CMDQS_CQS:
- return "CMDQS_CQS";
- case QED_RDMA_STATS_QUEUE:
- return "RDMA_STATS_QUEUE";
+ case QED_BDQ:
+ if (!*p_resc_num)
+ *p_resc_start = 0;
+ else if (p_hwfn->cdev->num_ports_in_engines == 4)
+ *p_resc_start = p_hwfn->port_id;
+ else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI)
+ *p_resc_start = p_hwfn->port_id;
+ else if (p_hwfn->hw_info.personality == QED_PCI_FCOE)
+ *p_resc_start = p_hwfn->port_id + 2;
+ break;
default:
- return "UNKNOWN_RESOURCE";
+ *p_resc_start = *p_resc_num * p_hwfn->enabled_func_idx;
+ break;
}
+
+ return 0;
}
-static int qed_hw_set_resc_info(struct qed_hwfn *p_hwfn,
- enum qed_resources res_id)
+static int __qed_hw_set_resc_info(struct qed_hwfn *p_hwfn,
+ enum qed_resources res_id)
{
- u32 dflt_resc_num = 0, dflt_resc_start = 0, mcp_resp, mcp_param;
- u32 *p_resc_num, *p_resc_start;
- struct resource_info resc_info;
+ u32 dflt_resc_num = 0, dflt_resc_start = 0;
+ u32 mcp_resp, *p_resc_num, *p_resc_start;
int rc;
p_resc_num = &RESC_NUM(p_hwfn, res_id);
p_resc_start = &RESC_START(p_hwfn, res_id);
- /* Default values assumes that each function received equal share */
- dflt_resc_num = qed_hw_get_dflt_resc_num(p_hwfn, res_id);
- if (!dflt_resc_num) {
+ rc = qed_hw_get_dflt_resc(p_hwfn, res_id, &dflt_resc_num,
+ &dflt_resc_start);
+ if (rc) {
DP_ERR(p_hwfn,
"Failed to get default amount for resource %d [%s]\n",
res_id, qed_hw_get_resc_name(res_id));
- return -EINVAL;
- }
- dflt_resc_start = dflt_resc_num * p_hwfn->enabled_func_idx;
-
- memset(&resc_info, 0, sizeof(resc_info));
- resc_info.res_id = qed_hw_get_mfw_res_id(res_id);
- if (resc_info.res_id == RESOURCE_NUM_INVALID) {
- DP_ERR(p_hwfn,
- "Failed to match resource %d [%s] with the MFW resources\n",
- res_id, qed_hw_get_resc_name(res_id));
- return -EINVAL;
+ return rc;
}
- rc = qed_mcp_get_resc_info(p_hwfn, p_hwfn->p_main_ptt, &resc_info,
- &mcp_resp, &mcp_param);
+ rc = qed_mcp_get_resc_info(p_hwfn, p_hwfn->p_main_ptt, res_id,
+ &mcp_resp, p_resc_num, p_resc_start);
if (rc) {
DP_NOTICE(p_hwfn,
"MFW response failure for an allocation request for resource %d [%s]\n",
@@ -1784,13 +1793,12 @@ static int qed_hw_set_resc_info(struct qed_hwfn *p_hwfn,
* - There is an internal error in the MFW while processing the request
* - The resource ID is unknown to the MFW
*/
- if (mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK &&
- mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_DEPRECATED) {
- DP_NOTICE(p_hwfn,
- "Resource %d [%s]: No allocation info was received [mcp_resp 0x%x]. Applying default values [num %d, start %d].\n",
- res_id,
- qed_hw_get_resc_name(res_id),
- mcp_resp, dflt_resc_num, dflt_resc_start);
+ if (mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK) {
+ DP_INFO(p_hwfn,
+ "Failed to receive allocation info for resource %d [%s]. mcp_resp = 0x%x. Applying default values [%d,%d].\n",
+ res_id,
+ qed_hw_get_resc_name(res_id),
+ mcp_resp, dflt_resc_num, dflt_resc_start);
*p_resc_num = dflt_resc_num;
*p_resc_start = dflt_resc_start;
goto out;
@@ -1798,13 +1806,9 @@ static int qed_hw_set_resc_info(struct qed_hwfn *p_hwfn,
/* Special handling for status blocks; Would be revised in future */
if (res_id == QED_SB) {
- resc_info.size -= 1;
- resc_info.offset -= p_hwfn->enabled_func_idx;
+ *p_resc_num -= 1;
+ *p_resc_start -= p_hwfn->enabled_func_idx;
}
-
- *p_resc_num = resc_info.size;
- *p_resc_start = resc_info.offset;
-
out:
/* PQs have to divide by 8 [that's the HW granularity].
* Reduce number so it would fit.
@@ -1822,18 +1826,85 @@ out:
return 0;
}
-static int qed_hw_get_resc(struct qed_hwfn *p_hwfn)
+static int qed_hw_set_resc_info(struct qed_hwfn *p_hwfn)
{
- bool b_ah = QED_IS_AH(p_hwfn->cdev);
- u8 res_id;
int rc;
+ u8 res_id;
for (res_id = 0; res_id < QED_MAX_RESC; res_id++) {
- rc = qed_hw_set_resc_info(p_hwfn, res_id);
+ rc = __qed_hw_set_resc_info(p_hwfn, res_id);
if (rc)
return rc;
}
+ return 0;
+}
+
+#define QED_RESC_ALLOC_LOCK_RETRY_CNT 10
+#define QED_RESC_ALLOC_LOCK_RETRY_INTVL_US 10000 /* 10 msec */
+
+static int qed_hw_get_resc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct qed_resc_unlock_params resc_unlock_params;
+ struct qed_resc_lock_params resc_lock_params;
+ bool b_ah = QED_IS_AH(p_hwfn->cdev);
+ u8 res_id;
+ int rc;
+
+ /* Setting the max values of the soft resources and the following
+ * resources allocation queries should be atomic. Since several PFs can
+ * run in parallel - a resource lock is needed.
+ * If either the resource lock or resource set value commands are not
+ * supported - skip the the max values setting, release the lock if
+ * needed, and proceed to the queries. Other failures, including a
+ * failure to acquire the lock, will cause this function to fail.
+ */
+ memset(&resc_lock_params, 0, sizeof(resc_lock_params));
+ resc_lock_params.resource = QED_RESC_LOCK_RESC_ALLOC;
+ resc_lock_params.retry_num = QED_RESC_ALLOC_LOCK_RETRY_CNT;
+ resc_lock_params.retry_interval = QED_RESC_ALLOC_LOCK_RETRY_INTVL_US;
+ resc_lock_params.sleep_b4_retry = true;
+ memset(&resc_unlock_params, 0, sizeof(resc_unlock_params));
+ resc_unlock_params.resource = QED_RESC_LOCK_RESC_ALLOC;
+
+ rc = qed_mcp_resc_lock(p_hwfn, p_ptt, &resc_lock_params);
+ if (rc && rc != -EINVAL) {
+ return rc;
+ } else if (rc == -EINVAL) {
+ DP_INFO(p_hwfn,
+ "Skip the max values setting of the soft resources since the resource lock is not supported by the MFW\n");
+ } else if (!rc && !resc_lock_params.b_granted) {
+ DP_NOTICE(p_hwfn,
+ "Failed to acquire the resource lock for the resource allocation commands\n");
+ return -EBUSY;
+ } else {
+ rc = qed_hw_set_soft_resc_size(p_hwfn, p_ptt);
+ if (rc && rc != -EINVAL) {
+ DP_NOTICE(p_hwfn,
+ "Failed to set the max values of the soft resources\n");
+ goto unlock_and_exit;
+ } else if (rc == -EINVAL) {
+ DP_INFO(p_hwfn,
+ "Skip the max values setting of the soft resources since it is not supported by the MFW\n");
+ rc = qed_mcp_resc_unlock(p_hwfn, p_ptt,
+ &resc_unlock_params);
+ if (rc)
+ DP_INFO(p_hwfn,
+ "Failed to release the resource lock for the resource allocation commands\n");
+ }
+ }
+
+ rc = qed_hw_set_resc_info(p_hwfn);
+ if (rc)
+ goto unlock_and_exit;
+
+ if (resc_lock_params.b_granted && !resc_unlock_params.b_released) {
+ rc = qed_mcp_resc_unlock(p_hwfn, p_ptt, &resc_unlock_params);
+ if (rc)
+ DP_INFO(p_hwfn,
+ "Failed to release the resource lock for the resource allocation commands\n");
+ }
+
/* Sanity for ILT */
if ((b_ah && (RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_K2)) ||
(!b_ah && (RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_BB))) {
@@ -1845,8 +1916,6 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn)
qed_hw_set_feat(p_hwfn);
- DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE,
- "The numbers for each resource are:\n");
for (res_id = 0; res_id < QED_MAX_RESC; res_id++)
DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE, "%s = %d start = %d\n",
qed_hw_get_resc_name(res_id),
@@ -1854,6 +1923,11 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn)
RESC_START(p_hwfn, res_id));
return 0;
+
+unlock_and_exit:
+ if (resc_lock_params.b_granted && !resc_unlock_params.b_released)
+ qed_mcp_resc_unlock(p_hwfn, p_ptt, &resc_unlock_params);
+ return rc;
}
static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
@@ -2184,7 +2258,7 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn,
if (qed_mcp_is_init(p_hwfn))
p_hwfn->hw_info.mtu = p_hwfn->mcp_info->func_info.mtu;
- return qed_hw_get_resc(p_hwfn);
+ return qed_hw_get_resc(p_hwfn, p_ptt);
}
static int qed_get_dev_info(struct qed_dev *cdev)
@@ -2306,6 +2380,15 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
goto err2;
}
+ /* Sending a mailbox to the MFW should be done after qed_get_hw_info()
+ * is called as it sets the ports number in an engine.
+ */
+ if (IS_LEAD_HWFN(p_hwfn)) {
+ rc = qed_mcp_initiate_pf_flr(p_hwfn, p_hwfn->p_main_ptt);
+ if (rc)
+ DP_NOTICE(p_hwfn, "Failed to initiate PF FLR\n");
+ }
+
/* Allocate the init RT array and initialize the init-ops engine */
rc = qed_init_alloc(p_hwfn);
if (rc)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
index 6812003411cd..2c6637fd7ef6 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
@@ -82,26 +82,63 @@ int qed_resc_alloc(struct qed_dev *cdev);
*/
void qed_resc_setup(struct qed_dev *cdev);
+enum qed_override_force_load {
+ QED_OVERRIDE_FORCE_LOAD_NONE,
+ QED_OVERRIDE_FORCE_LOAD_ALWAYS,
+ QED_OVERRIDE_FORCE_LOAD_NEVER,
+};
+
+struct qed_drv_load_params {
+ /* Indicates whether the driver is running over a crash kernel.
+ * As part of the load request, this will be used for providing the
+ * driver role to the MFW.
+ * In case of a crash kernel over PDA - this should be set to false.
+ */
+ bool is_crash_kernel;
+
+ /* The timeout value that the MFW should use when locking the engine for
+ * the driver load process.
+ * A value of '0' means the default value, and '255' means no timeout.
+ */
+ u8 mfw_timeout_val;
+#define QED_LOAD_REQ_LOCK_TO_DEFAULT 0
+#define QED_LOAD_REQ_LOCK_TO_NONE 255
+
+ /* Avoid engine reset when first PF loads on it */
+ bool avoid_eng_reset;
+
+ /* Allow overriding the default force load behavior */
+ enum qed_override_force_load override_force_load;
+};
+
+struct qed_hw_init_params {
+ /* Tunneling parameters */
+ struct qed_tunn_start_params *p_tunn;
+
+ bool b_hw_start;
+
+ /* Interrupt mode [msix, inta, etc.] to use */
+ enum qed_int_mode int_mode;
+
+ /* NPAR tx switching to be used for vports for tx-switching */
+ bool allow_npar_tx_switch;
+
+ /* Binary fw data pointer in binary fw file */
+ const u8 *bin_fw_data;
+
+ /* Driver load parameters */
+ struct qed_drv_load_params *p_drv_load_params;
+};
+
/**
* @brief qed_hw_init -
*
* @param cdev
- * @param p_tunn
- * @param b_hw_start
- * @param int_mode - interrupt mode [msix, inta, etc.] to use.
- * @param allow_npar_tx_switch - npar tx switching to be used
- * for vports configured for tx-switching.
- * @param bin_fw_data - binary fw data pointer in binary fw file.
- * Pass NULL if not using binary fw file.
+ * @param p_params
*
* @return int
*/
-int qed_hw_init(struct qed_dev *cdev,
- struct qed_tunn_start_params *p_tunn,
- bool b_hw_start,
- enum qed_int_mode int_mode,
- bool allow_npar_tx_switch,
- const u8 *bin_fw_data);
+int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params);
/**
* @brief qed_hw_timers_stop_all - stop the timers HW block
@@ -140,14 +177,6 @@ void qed_hw_stop_fastpath(struct qed_dev *cdev);
*/
void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn);
-/**
- * @brief qed_hw_reset -
- *
- * @param cdev
- *
- * @return int
- */
-int qed_hw_reset(struct qed_dev *cdev);
/**
* @brief qed_hw_prepare -
@@ -441,4 +470,6 @@ int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
*/
int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
u16 coalesce, u8 qid, u16 sb_id);
+
+const char *qed_hw_get_resc_name(enum qed_resources res_id);
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
index cbc81412174f..60921b72c995 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
+++ b/drivers/net/e