summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorSaeed Mahameed <saeedm@mellanox.com>2019-07-04 16:40:32 -0400
committerSaeed Mahameed <saeedm@mellanox.com>2019-07-04 16:42:59 -0400
commite08a976a16cafc20931db1d17aed9183202bfa8d (patch)
treef2bfbc7013305d65f5647d4f8ec7fbd325a93b0a /drivers
parente2c746944e26609f63661cedb7c7c31f0578c58f (diff)
parentf8efee08dd9d41ab71010e9b16c9ead51753b7d6 (diff)
Merge branch 'mlx5-next' of git://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux
Misc updates from mlx5-next branch: 1) Add the required HW definitions and structures for upcoming TLS support. 2) Add support for MCQI and MCQS hardware registers for fw version query. 3) Added hardware bits and structures definitions for sub-functions 4) Small code cleanup and improvement for PF pci driver. 5) Bluefield (ECPF) updates and refactoring for better E-Switch management on ECPF embedded CPU NIC: 5.1) Consolidate querying eswitch number of VFs 5.2) Register event handler at the correct E-Switch init stage 5.3) Setup PF's inline mode and vlan pop when the ECPF is the E-Swtich manager ( the host PF is basically a VF ). 5.4) Handle Vport UC address changes in switchdev mode. 6) Cleanup the rep and netdev reference when unloading IB rep. Signed-off-by: Saeed Mahameed <saeedm@mellanox.com> i# All conflicts fixed but you are still merging.
Diffstat (limited to 'drivers')
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c5
-rw-r--r--drivers/infiniband/hw/mlx5/ib_rep.c24
-rw-r--r--drivers/infiniband/hw/mlx5/ib_rep.h2
-rw-r--r--drivers/infiniband/hw/mlx5/main.c4
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h1
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c2
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cq.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c63
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c181
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h51
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c237
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rdma.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sriov.c42
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c43
28 files changed, 442 insertions, 326 deletions
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 0220736b073e..4efbbd2fce0c 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -37,7 +37,7 @@
#include "mlx5_ib.h"
#include "srq.h"
-static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq)
+static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe)
{
struct ib_cq *ibcq = &to_mibcq(cq)->ibcq;
@@ -891,6 +891,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
int entries = attr->cqe;
int vector = attr->comp_vector;
struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ u32 out[MLX5_ST_SZ_DW(create_cq_out)];
struct mlx5_ib_cq *cq;
int uninitialized_var(index);
int uninitialized_var(inlen);
@@ -958,7 +959,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
if (cq->create_flags & IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN)
MLX5_SET(cqc, cqc, oi, 1);
- err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen);
+ err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen, out, sizeof(out));
if (err)
goto err_cqb;
diff --git a/drivers/infiniband/hw/mlx5/ib_rep.c b/drivers/infiniband/hw/mlx5/ib_rep.c
index aa9acebfcc23..74ce9249e75a 100644
--- a/drivers/infiniband/hw/mlx5/ib_rep.c
+++ b/drivers/infiniband/hw/mlx5/ib_rep.c
@@ -14,9 +14,10 @@ mlx5_ib_set_vport_rep(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
int vport_index;
ibdev = mlx5_ib_get_uplink_ibdev(dev->priv.eswitch);
- vport_index = ibdev->free_port++;
+ vport_index = rep->vport_index;
ibdev->port[vport_index].rep = rep;
+ rep->rep_data[REP_IB].priv = ibdev;
write_lock(&ibdev->port[vport_index].roce.netdev_lock);
ibdev->port[vport_index].roce.netdev =
mlx5_ib_get_rep_netdev(dev->priv.eswitch, rep->vport);
@@ -28,7 +29,7 @@ mlx5_ib_set_vport_rep(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
static int
mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
{
- int num_ports = MLX5_TOTAL_VPORTS(dev);
+ int num_ports = mlx5_eswitch_get_total_vports(dev);
const struct mlx5_ib_profile *profile;
struct mlx5_ib_dev *ibdev;
int vport_index;
@@ -50,7 +51,7 @@ mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
}
ibdev->is_rep = true;
- vport_index = ibdev->free_port++;
+ vport_index = rep->vport_index;
ibdev->port[vport_index].rep = rep;
ibdev->port[vport_index].roce.netdev =
mlx5_ib_get_rep_netdev(dev->priv.eswitch, rep->vport);
@@ -68,15 +69,18 @@ mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
static void
mlx5_ib_vport_rep_unload(struct mlx5_eswitch_rep *rep)
{
- struct mlx5_ib_dev *dev;
-
- if (!rep->rep_data[REP_IB].priv ||
- rep->vport != MLX5_VPORT_UPLINK)
- return;
+ struct mlx5_ib_dev *dev = mlx5_ib_rep_to_dev(rep);
+ struct mlx5_ib_port *port;
- dev = mlx5_ib_rep_to_dev(rep);
- __mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
+ port = &dev->port[rep->vport_index];
+ write_lock(&port->roce.netdev_lock);
+ port->roce.netdev = NULL;
+ write_unlock(&port->roce.netdev_lock);
rep->rep_data[REP_IB].priv = NULL;
+ port->rep = NULL;
+
+ if (rep->vport == MLX5_VPORT_UPLINK)
+ __mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
}
static void *mlx5_ib_vport_get_proto_dev(struct mlx5_eswitch_rep *rep)
diff --git a/drivers/infiniband/hw/mlx5/ib_rep.h b/drivers/infiniband/hw/mlx5/ib_rep.h
index 7a917e6d5c09..de43b423bafc 100644
--- a/drivers/infiniband/hw/mlx5/ib_rep.h
+++ b/drivers/infiniband/hw/mlx5/ib_rep.h
@@ -28,7 +28,7 @@ struct net_device *mlx5_ib_get_rep_netdev(struct mlx5_eswitch *esw,
#else /* CONFIG_MLX5_ESWITCH */
static inline u8 mlx5_ib_eswitch_mode(struct mlx5_eswitch *esw)
{
- return SRIOV_NONE;
+ return MLX5_ESWITCH_NONE;
}
static inline
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index b1d5f4382d85..ba312bf59c7a 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -4492,7 +4492,7 @@ static void mlx5_ib_handle_internal_error(struct mlx5_ib_dev *ibdev)
* lock/unlock above locks Now need to arm all involved CQs.
*/
list_for_each_entry(mcq, &cq_armed_list, reset_notify) {
- mcq->comp(mcq);
+ mcq->comp(mcq, NULL);
}
spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags);
}
@@ -6814,7 +6814,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
printk_once(KERN_INFO "%s", mlx5_version);
if (MLX5_ESWITCH_MANAGER(mdev) &&
- mlx5_ib_eswitch_mode(mdev->priv.eswitch) == SRIOV_OFFLOADS) {
+ mlx5_ib_eswitch_mode(mdev->priv.eswitch) == MLX5_ESWITCH_OFFLOADS) {
if (!mlx5_core_mp_enabled(mdev))
mlx5_ib_register_vport_reps(mdev);
return mdev;
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 1c205c2bd486..ee73dc122d28 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -978,7 +978,6 @@ struct mlx5_ib_dev {
u16 devx_whitelist_uid;
struct mlx5_srq_table srq_table;
struct mlx5_async_ctx async_ctx;
- int free_port;
};
static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index c594489eb2d7..831c450b271a 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -1558,9 +1558,9 @@ mlx5_ib_create_pf_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
eq->irq_nb.notifier_call = mlx5_ib_eq_pf_int;
param = (struct mlx5_eq_param) {
.irq_index = 0,
- .mask = 1 << MLX5_EVENT_TYPE_PAGE_FAULT,
.nent = MLX5_IB_NUM_PF_EQE,
};
+ param.mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_FAULT;
eq->core = mlx5_eq_create_generic(dev->mdev, &param);
if (IS_ERR(eq->core)) {
err = PTR_ERR(eq->core);
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index f6623c77443a..768c7e81f688 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -6297,7 +6297,7 @@ static void handle_drain_completion(struct ib_cq *cq,
/* Run the CQ handler - this makes sure that the drain WR will
* be processed if wasn't processed yet.
*/
- mcq->mcq.comp(&mcq->mcq);
+ mcq->mcq.comp(&mcq->mcq, NULL);
}
wait_for_completion(&sdrain->done);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
index 713a17ee3751..818edc63e428 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
@@ -58,7 +58,7 @@ void mlx5_cq_tasklet_cb(unsigned long data)
list_for_each_entry_safe(mcq, temp, &ctx->process_list,
tasklet_ctx.list) {
list_del_init(&mcq->tasklet_ctx.list);
- mcq->tasklet_ctx.comp(mcq);
+ mcq->tasklet_ctx.comp(mcq, NULL);
mlx5_cq_put(mcq);
if (time_after(jiffies, end))
break;
@@ -68,7 +68,8 @@ void mlx5_cq_tasklet_cb(unsigned long data)
tasklet_schedule(&ctx->task);
}
-static void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq)
+static void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq,
+ struct mlx5_eqe *eqe)
{
unsigned long flags;
struct mlx5_eq_tasklet *tasklet_ctx = cq->tasklet_ctx.priv;
@@ -87,11 +88,10 @@ static void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq)
}
int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
- u32 *in, int inlen)
+ u32 *in, int inlen, u32 *out, int outlen)
{
int eqn = MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context), c_eqn);
u32 dout[MLX5_ST_SZ_DW(destroy_cq_out)];
- u32 out[MLX5_ST_SZ_DW(create_cq_out)];
u32 din[MLX5_ST_SZ_DW(destroy_cq_in)];
struct mlx5_eq_comp *eq;
int err;
@@ -100,9 +100,9 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
if (IS_ERR(eq))
return PTR_ERR(eq);
- memset(out, 0, sizeof(out));
+ memset(out, 0, outlen);
MLX5_SET(create_cq_in, in, opcode, MLX5_CMD_OP_CREATE_CQ);
- err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+ err = mlx5_cmd_exec(dev, in, inlen, out, outlen);
if (err)
return err;
@@ -158,13 +158,8 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
u32 in[MLX5_ST_SZ_DW(destroy_cq_in)] = {0};
int err;
- err = mlx5_eq_del_cq(mlx5_get_async_eq(dev), cq);
- if (err)
- return err;
-
- err = mlx5_eq_del_cq(&cq->eq->core, cq);
- if (err)
- return err;
+ mlx5_eq_del_cq(mlx5_get_async_eq(dev), cq);
+ mlx5_eq_del_cq(&cq->eq->core, cq);
MLX5_SET(destroy_cq_in, in, opcode, MLX5_CMD_OP_DESTROY_CQ);
MLX5_SET(destroy_cq_in, in, cqn, cq->cqn);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index f6b1da99e6c2..5bb6a26ea267 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -311,13 +311,20 @@ static u32 mlx5_gen_pci_id(struct mlx5_core_dev *dev)
/* Must be called with intf_mutex held */
struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
{
- u32 pci_id = mlx5_gen_pci_id(dev);
struct mlx5_core_dev *res = NULL;
struct mlx5_core_dev *tmp_dev;
struct mlx5_priv *priv;
+ u32 pci_id;
+ if (!mlx5_core_is_pf(dev))
+ return NULL;
+
+ pci_id = mlx5_gen_pci_id(dev);
list_for_each_entry(priv, &mlx5_dev_list, dev_list) {
tmp_dev = container_of(priv, struct mlx5_core_dev, priv);
+ if (!mlx5_core_is_pf(tmp_dev))
+ continue;
+
if ((dev != tmp_dev) && (mlx5_gen_pci_id(tmp_dev) == pci_id)) {
res = tmp_dev;
break;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 3281ace5e126..6b85816c2f21 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -782,7 +782,7 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more);
void mlx5e_trigger_irq(struct mlx5e_icosq *sq);
-void mlx5e_completion_event(struct mlx5_core_cq *mcq);
+void mlx5e_completion_event(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe);
void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
int mlx5e_napi_poll(struct napi_struct *napi, int budget);
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index 554672edf8c3..8dd31b5c740c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -680,7 +680,7 @@ static void mlx5e_dcbnl_getpermhwaddr(struct net_device *netdev,
memset(perm_addr, 0xff, MAX_ADDR_LEN);
- mlx5_query_nic_vport_mac_address(priv->mdev, 0, perm_addr);
+ mlx5_query_mac_address(priv->mdev, perm_addr);
}
static void mlx5e_dcbnl_setpgtccfgtx(struct net_device *netdev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 1085040675ae..afbc1e81f36a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -1516,6 +1516,7 @@ static void mlx5e_free_cq(struct mlx5e_cq *cq)
static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
{
+ u32 out[MLX5_ST_SZ_DW(create_cq_out)];
struct mlx5_core_dev *mdev = cq->mdev;
struct mlx5_core_cq *mcq = &cq->mcq;
@@ -1550,7 +1551,7 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
- err = mlx5_core_create_cq(mdev, mcq, in, inlen);
+ err = mlx5_core_create_cq(mdev, mcq, in, inlen, out, sizeof(out));
kvfree(in);
@@ -4588,7 +4589,7 @@ static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- mlx5_query_nic_vport_mac_address(priv->mdev, 0, netdev->dev_addr);
+ mlx5_query_mac_address(priv->mdev, netdev->dev_addr);
if (is_zero_ether_addr(netdev->dev_addr) &&
!MLX5_CAP_GEN(priv->mdev, vport_group_manager)) {
eth_hw_addr_random(netdev);
@@ -5140,7 +5141,7 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev)
#ifdef CONFIG_MLX5_ESWITCH
if (MLX5_ESWITCH_MANAGER(mdev) &&
- mlx5_eswitch_mode(mdev->priv.eswitch) == SRIOV_OFFLOADS) {
+ mlx5_eswitch_mode(mdev->priv.eswitch) == MLX5_ESWITCH_OFFLOADS) {
mlx5e_rep_register_vport_reps(mdev);
return mdev;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 330034fcdfc5..6c623efb76e4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -398,7 +398,7 @@ static int mlx5e_rep_get_port_parent_id(struct net_device *dev,
priv = netdev_priv(dev);
esw = priv->mdev->priv.eswitch;
- if (esw->mode == SRIOV_NONE)
+ if (esw->mode == MLX5_ESWITCH_NONE)
return -EOPNOTSUPP;
parent_id = mlx5_query_nic_system_image_guid(priv->mdev);
@@ -414,7 +414,7 @@ static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw,
struct mlx5e_rep_sq *rep_sq, *tmp;
struct mlx5e_rep_priv *rpriv;
- if (esw->mode != SRIOV_OFFLOADS)
+ if (esw->mode != MLX5_ESWITCH_OFFLOADS)
return;
rpriv = mlx5e_rep_to_rep_priv(rep);
@@ -435,7 +435,7 @@ static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
int err;
int i;
- if (esw->mode != SRIOV_OFFLOADS)
+ if (esw->mode != MLX5_ESWITCH_OFFLOADS)
return 0;
rpriv = mlx5e_rep_to_rep_priv(rep);
@@ -1392,7 +1392,7 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev)
SET_NETDEV_DEV(netdev, mdev->device);
netdev->netdev_ops = &mlx5e_netdev_ops_uplink_rep;
/* we want a persistent mac for the uplink rep */
- mlx5_query_nic_vport_mac_address(mdev, 0, netdev->dev_addr);
+ mlx5_query_mac_address(mdev, netdev->dev_addr);
netdev->ethtool_ops = &mlx5e_uplink_rep_ethtool_ops;
#ifdef CONFIG_MLX5_CORE_EN_DCB
if (MLX5_CAP_GEN(mdev, qos))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index e6b199cd68ea..3ac9b1e423ee 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -3372,7 +3372,7 @@ mlx5e_tc_add_flow(struct mlx5e_priv *priv,
if (!tc_can_offload_extack(priv->netdev, f->common.extack))
return -EOPNOTSUPP;
- if (esw && esw->mode == SRIOV_OFFLOADS)
+ if (esw && esw->mode == MLX5_ESWITCH_OFFLOADS)
err = mlx5e_add_fdb_flow(priv, f, flow_flags,
filter_dev, flow);
else
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index e6c434efbd46..1c021a299baa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -134,7 +134,7 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
return work_done;
}
-void mlx5e_completion_event(struct mlx5_core_cq *mcq)
+void mlx5e_completion_event(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe)
{
struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 58fff2f39b38..41f25ea2e8d9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -153,7 +153,7 @@ static int mlx5_eq_comp_int(struct notifier_block *nb,
cq = mlx5_eq_cq_get(eq, cqn);
if (likely(cq)) {
++cq->arm_sn;
- cq->comp(cq);
+ cq->comp(cq, eqe);
mlx5_cq_put(cq);
} else {
mlx5_core_warn(eq->dev, "Completion event for bogus CQ 0x%x\n", cqn);
@@ -256,6 +256,7 @@ create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
int inlen;
u32 *in;
int err;
+ int i;
/* Init CQ table */
memset(cq_table, 0, sizeof(*cq_table));
@@ -283,10 +284,12 @@ create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
mlx5_fill_page_array(&eq->buf, pas);
MLX5_SET(create_eq_in, in, opcode, MLX5_CMD_OP_CREATE_EQ);
- if (!param->mask && MLX5_CAP_GEN(dev, log_max_uctx))
+ if (!param->mask[0] && MLX5_CAP_GEN(dev, log_max_uctx))
MLX5_SET(create_eq_in, in, uid, MLX5_SHARED_RESOURCE_UID);
- MLX5_SET64(create_eq_in, in, event_bitmask, param->mask);
+ for (i = 0; i < 4; i++)
+ MLX5_ARRAY_SET64(create_eq_in, in, event_bitmask, i,
+ param->mask[i]);
eqc = MLX5_ADDR_OF(create_eq_in, in, eq_context_entry);
MLX5_SET(eqc, eqc, log_eq_size, ilog2(eq->nent));
@@ -389,7 +392,7 @@ int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
return err;
}
-int mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
+void mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
{
struct mlx5_cq_table *table = &eq->cq_table;
struct mlx5_core_cq *tmp;
@@ -399,16 +402,14 @@ int mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
spin_unlock(&table->lock);
if (!tmp) {
- mlx5_core_warn(eq->dev, "cq 0x%x not found in eq 0x%x tree\n", eq->eqn, cq->cqn);
- return -ENOENT;
- }
-
- if (tmp != cq) {
- mlx5_core_warn(eq->dev, "corruption on cqn 0x%x in eq 0x%x\n", eq->eqn, cq->cqn);
- return -EINVAL;
+ mlx5_core_dbg(eq->dev, "cq 0x%x not found in eq 0x%x tree\n",
+ eq->eqn, cq->cqn);
+ return;
}
- return 0;
+ if (tmp != cq)
+ mlx5_core_dbg(eq->dev, "corruption on cqn 0x%x in eq 0x%x\n",
+ eq->eqn, cq->cqn);
}
int mlx5_eq_table_init(struct mlx5_core_dev *dev)
@@ -502,14 +503,31 @@ static int cq_err_event_notifier(struct notifier_block *nb,
return NOTIFY_OK;
}
- cq->event(cq, type);
+ if (cq->event)
+ cq->event(cq, type);
mlx5_cq_put(cq);
return NOTIFY_OK;
}
-static u64 gather_async_events_mask(struct mlx5_core_dev *dev)
+static void gather_user_async_events(struct mlx5_core_dev *dev, u64 mask[4])
+{
+ __be64 *user_unaffiliated_events;
+ __be64 *user_affiliated_events;
+ int i;
+
+ user_affiliated_events =
+ MLX5_CAP_DEV_EVENT(dev, user_affiliated_events);
+ user_unaffiliated_events =
+ MLX5_CAP_DEV_EVENT(dev, user_unaffiliated_events);
+
+ for (i = 0; i < 4; i++)
+ mask[i] |= be64_to_cpu(user_affiliated_events[i] |
+ user_unaffiliated_events[i]);
+}
+
+static void gather_async_events_mask(struct mlx5_core_dev *dev, u64 mask[4])
{
u64 async_event_mask = MLX5_ASYNC_EVENT_MASK;
@@ -546,7 +564,10 @@ static u64 gather_async_events_mask(struct mlx5_core_dev *dev)
async_event_mask |=
(1ull << MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED);
- return async_event_mask;
+ mask[0] = async_event_mask;
+
+ if (MLX5_CAP_GEN(dev, event_cap))
+ gather_user_async_events(dev, mask);
}
static int create_async_eqs(struct mlx5_core_dev *dev)
@@ -561,9 +582,10 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
table->cmd_eq.irq_nb.notifier_call = mlx5_eq_async_int;
param = (struct mlx5_eq_param) {
.irq_index = 0,
- .mask = 1ull << MLX5_EVENT_TYPE_CMD,
.nent = MLX5_NUM_CMD_EQE,
};
+
+ param.mask[0] = 1ull << MLX5_EVENT_TYPE_CMD;
err = create_async_eq(dev, &table->cmd_eq.core, &param);
if (err) {
mlx5_core_warn(dev, "failed to create cmd EQ %d\n", err);
@@ -579,9 +601,10 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
table->async_eq.irq_nb.notifier_call = mlx5_eq_async_int;
param = (struct mlx5_eq_param) {
.irq_index = 0,
- .mask = gather_async_events_mask(dev),
.nent = MLX5_NUM_ASYNC_EQE,
};
+
+ gather_async_events_mask(dev, param.mask);
err = create_async_eq(dev, &table->async_eq.core, &param);
if (err) {
mlx5_core_warn(dev, "failed to create async EQ %d\n", err);
@@ -597,9 +620,10 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
table->pages_eq.irq_nb.notifier_call = mlx5_eq_async_int;
param = (struct mlx5_eq_param) {
.irq_index = 0,
- .mask = 1 << MLX5_EVENT_TYPE_PAGE_REQUEST,
.nent = /* TODO: sriov max_vf + */ 1,
};
+
+ param.mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_REQUEST;
err = create_async_eq(dev, &table->pages_eq.core, &param);
if (err) {
mlx5_core_warn(dev, "failed to create pages EQ %d\n", err);
@@ -791,7 +815,6 @@ static int create_comp_eqs(struct mlx5_core_dev *dev)
eq->irq_nb.notifier_call = mlx5_eq_comp_int;
param = (struct mlx5_eq_param) {
.irq_index = vecidx,
- .mask = 0,
.nent = nent,
};
err = create_map_eq(dev, &eq->core, &param);
@@ -927,6 +950,7 @@ int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb)
return atomic_notifier_chain_register(&eqt->nh[nb->event_type], &nb->nb);
}
+EXPORT_SYMBOL(mlx5_eq_notifier_register);
int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb)
{
@@ -937,3 +961,4 @@ int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb)
return atomic_notifier_chain_unregister(&eqt->nh[nb->event_type], &nb->nb);
}
+EXPORT_SYMBOL(mlx5_eq_notifier_unregister);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 67e76979bb42..7281f8d6cba6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -497,7 +497,7 @@ static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
fdb_add:
/* SRIOV is enabled: Forward UC MAC to vport */
- if (esw->fdb_table.legacy.fdb && esw->mode == SRIOV_LEGACY)
+ if (esw->fdb_table.legacy.fdb && esw->mode == MLX5_ESWITCH_LEGACY)
vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM fr(%p)\n",
@@ -897,7 +897,7 @@ static void esw_vport_change_handle_locked(struct mlx5_vport *vport)
struct mlx5_eswitch *esw = dev->priv.eswitch;
u8 mac[ETH_ALEN];
- mlx5_query_nic_vport_mac_address(dev, vport->vport, mac);
+ mlx5_query_nic_vport_mac_address(dev, vport->vport, true, mac);
esw_debug(dev, "vport[%d] Context Changed: perm mac: %pM\n",
vport->vport, mac);
@@ -1553,6 +1553,7 @@ static void esw_apply_vport_conf(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
u16 vport_num = vport->vport;
+ int flags;
if (esw->manager_vport == vport_num)
return;
@@ -1570,11 +1571,13 @@ static void esw_apply_vport_conf(struct mlx5_eswitch *esw,
vport->info.node_guid);
}
+ flags = (vport->info.vlan || vport->info.qos) ?
+ SET_VLAN_STRIP | SET_VLAN_INSERT : 0;
modify_esw_vport_cvlan(esw->dev, vport_num, vport->info.vlan, vport->info.qos,
- (vport->info.vlan || vport->info.qos));
+ flags);
/* Only legacy mode needs ACLs */
- if (esw->mode == SRIOV_LEGACY) {
+ if (esw->mode == MLX5_ESWITCH_LEGACY) {
esw_vport_ingress_config(esw, vport);
esw_vport_egress_config(esw, vport);
}
@@ -1626,7 +1629,7 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num);
/* Create steering drop counters for ingress and egress ACLs */
- if (vport_num && esw->mode == SRIOV_LEGACY)
+ if (vport_num && esw->mode == MLX5_ESWITCH_LEGACY)
esw_vport_create_drop_counters(vport);
/* Restore old vport configuration */
@@ -1680,7 +1683,7 @@ static void esw_disable_vport(struct mlx5_eswitch *esw,
vport->enabled_events = 0;
esw_vport_disable_qos(esw, vport);
if (esw->manager_vport != vport_num &&
- esw->mode == SRIOV_LEGACY) {
+ esw->mode == MLX5_ESWITCH_LEGACY) {
mlx5_modify_vport_admin_state(esw->dev,
MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
vport_num, 1,
@@ -1712,59 +1715,91 @@ static int eswitch_vport_event(struct notifier_block *nb,
return NOTIFY_OK;
}
-int mlx5_esw_query_functions(struct mlx5_core_dev *dev, u32 *out, int outlen)
+/**
+ * mlx5_esw_query_functions - Returns raw output about functions state
+ * @dev: Pointer to device to query
+ *
+ * mlx5_esw_query_functions() allocates and returns functions changed
+ * raw output memory pointer from device on success. Otherwise returns ERR_PTR.
+ * Caller must free the memory using kvfree() when valid pointer is returned.
+ */
+const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
{
+ int outlen = MLX5_ST_SZ_BYTES(query_esw_functions_out);
u32 in[MLX5_ST_SZ_DW(query_esw_functions_in)] = {};
+ u32 *out;
+ int err;
+
+ out = kvzalloc(outlen, GFP_KERNEL);
+ if (!out)
+ return ERR_PTR(-ENOMEM);
MLX5_SET(query_esw_functions_in, in, opcode,
MLX5_CMD_OP_QUERY_ESW_FUNCTIONS);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
+ if (!err)
+ return out;
+
+ kvfree(out);
+ return ERR_PTR(err);
+}
+
+static void mlx5_eswitch_event_handlers_register(struct mlx5_eswitch *esw)
+{
+ MLX5_NB_INIT(&esw->nb, eswitch_vport_event, NIC_VPORT_CHANGE);
+ mlx5_eq_notifier_register(esw->dev, &esw->nb);
+
+ if (esw->mode == MLX5_ESWITCH_OFFLOADS && mlx5_eswitch_is_funcs_handler(esw->dev)) {
+ MLX5_NB_INIT(&esw->esw_funcs.nb, mlx5_esw_funcs_changed_handler,
+ ESW_FUNCTIONS_CHANGED);
+ mlx5_eq_notifier_register(esw->dev, &esw->esw_funcs.nb);
+ }
+}
+
+static void mlx5_eswitch_event_handlers_unregister(struct mlx5_eswitch *esw)
+{
+ if (esw->mode == MLX5_ESWITCH_OFFLOADS && mlx5_eswitch_is_funcs_handler(esw->dev))
+ mlx5_eq_notifier_unregister(esw->dev, &esw->esw_funcs.nb);
+
+ mlx5_eq_notifier_unregister(esw->dev, &esw->nb);
+
+ flush_workqueue(esw->work_queue);
}
/* Public E-Switch API */
#define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev))
-int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
+int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode)
{
struct mlx5_vport *vport;
- int total_nvports = 0;
int err;
int i, enabled_events;
if (!ESW_ALLOWED(esw) ||
!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
- esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n");
+ esw_warn(esw->dev, "FDB is not supported, aborting ...\n");
return -EOPNOTSUPP;
}
if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support))
- esw_warn(esw->dev, "E-Switch ingress ACL is not supported by FW\n");
+ esw_warn(esw->dev, "ingress ACL is not supported by FW\n");
if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support))
- esw_warn(esw->dev, "E-Switch engress ACL is not supported by FW\n");
-
- esw_info(esw->dev, "E-Switch enable SRIOV: nvfs(%d) mode (%d)\n", nvfs, mode);
-
- if (mode == SRIOV_OFFLOADS) {
- if (mlx5_core_is_ecpf_esw_manager(esw->dev))
- total_nvports = esw->total_vports;