summaryrefslogtreecommitdiffstats
path: root/net/sched
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2020-09-22 16:45:34 -0700
committerDavid S. Miller <davem@davemloft.net>2020-09-22 16:45:34 -0700
commit3ab0a7a0c349a1d7beb2bb371a62669d1528269d (patch)
treed2ae17c3bfc829ce0c747ad97021cd4bc8fb11dc /net/sched
parent92ec804f3dbf0d986f8e10850bfff14f316d7aaf (diff)
parent805c6d3c19210c90c109107d189744e960eae025 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Two minor conflicts: 1) net/ipv4/route.c, adding a new local variable while moving another local variable and removing it's initial assignment. 2) drivers/net/dsa/microchip/ksz9477.c, overlapping changes. One pretty prints the port mode differently, whilst another changes the driver to try and obtain the port mode from the port node rather than the switch node. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched')
-rw-r--r--net/sched/act_ife.c44
-rw-r--r--net/sched/act_tunnel_key.c1
-rw-r--r--net/sched/cls_flower.c5
-rw-r--r--net/sched/sch_generic.c48
-rw-r--r--net/sched/sch_taprio.c28
5 files changed, 89 insertions, 37 deletions
diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
index c1fcd85719d6..5c568757643b 100644
--- a/net/sched/act_ife.c
+++ b/net/sched/act_ife.c
@@ -436,6 +436,25 @@ static void tcf_ife_cleanup(struct tc_action *a)
kfree_rcu(p, rcu);
}
+static int load_metalist(struct nlattr **tb, bool rtnl_held)
+{
+ int i;
+
+ for (i = 1; i < max_metacnt; i++) {
+ if (tb[i]) {
+ void *val = nla_data(tb[i]);
+ int len = nla_len(tb[i]);
+ int rc;
+
+ rc = load_metaops_and_vet(i, val, len, rtnl_held);
+ if (rc != 0)
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb,
bool exists, bool rtnl_held)
{
@@ -449,10 +468,6 @@ static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb,
val = nla_data(tb[i]);
len = nla_len(tb[i]);
- rc = load_metaops_and_vet(i, val, len, rtnl_held);
- if (rc != 0)
- return rc;
-
rc = add_metainfo(ife, i, val, len, exists);
if (rc)
return rc;
@@ -509,6 +524,21 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
if (!p)
return -ENOMEM;
+ if (tb[TCA_IFE_METALST]) {
+ err = nla_parse_nested_deprecated(tb2, IFE_META_MAX,
+ tb[TCA_IFE_METALST], NULL,
+ NULL);
+ if (err) {
+ kfree(p);
+ return err;
+ }
+ err = load_metalist(tb2, rtnl_held);
+ if (err) {
+ kfree(p);
+ return err;
+ }
+ }
+
index = parm->index;
err = tcf_idr_check_alloc(tn, &index, a, bind);
if (err < 0) {
@@ -570,15 +600,9 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
}
if (tb[TCA_IFE_METALST]) {
- err = nla_parse_nested_deprecated(tb2, IFE_META_MAX,
- tb[TCA_IFE_METALST], NULL,
- NULL);
- if (err)
- goto metadata_parse_err;
err = populate_metalist(ife, tb2, exists, rtnl_held);
if (err)
goto metadata_parse_err;
-
} else {
/* if no passed metadata allow list or passed allow-all
* then here we process by adding as many supported metadatum
diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
index 536c4bc31be6..37f1e10f35e0 100644
--- a/net/sched/act_tunnel_key.c
+++ b/net/sched/act_tunnel_key.c
@@ -156,6 +156,7 @@ tunnel_key_copy_vxlan_opt(const struct nlattr *nla, void *dst, int dst_len,
struct vxlan_metadata *md = dst;
md->gbp = nla_get_u32(tb[TCA_TUNNEL_KEY_ENC_OPT_VXLAN_GBP]);
+ md->gbp &= VXLAN_GBP_MASK;
}
return sizeof(struct vxlan_metadata);
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index a4f7ef1de7e7..fed18fd2c50b 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -1175,8 +1175,10 @@ static int fl_set_vxlan_opt(const struct nlattr *nla, struct fl_flow_key *key,
return -EINVAL;
}
- if (tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP])
+ if (tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) {
md->gbp = nla_get_u32(tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]);
+ md->gbp &= VXLAN_GBP_MASK;
+ }
return sizeof(*md);
}
@@ -1221,6 +1223,7 @@ static int fl_set_erspan_opt(const struct nlattr *nla, struct fl_flow_key *key,
}
if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) {
nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX];
+ memset(&md->u, 0x00, sizeof(md->u));
md->u.index = nla_get_be32(nla);
}
} else if (md->version == 2) {
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 265a61d011df..54c417244642 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -1131,24 +1131,10 @@ EXPORT_SYMBOL(dev_activate);
static void qdisc_deactivate(struct Qdisc *qdisc)
{
- bool nolock = qdisc->flags & TCQ_F_NOLOCK;
-
if (qdisc->flags & TCQ_F_BUILTIN)
return;
- if (test_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state))
- return;
-
- if (nolock)
- spin_lock_bh(&qdisc->seqlock);
- spin_lock_bh(qdisc_lock(qdisc));
set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state);
-
- qdisc_reset(qdisc);
-
- spin_unlock_bh(qdisc_lock(qdisc));
- if (nolock)
- spin_unlock_bh(&qdisc->seqlock);
}
static void dev_deactivate_queue(struct net_device *dev,
@@ -1165,6 +1151,30 @@ static void dev_deactivate_queue(struct net_device *dev,
}
}
+static void dev_reset_queue(struct net_device *dev,
+ struct netdev_queue *dev_queue,
+ void *_unused)
+{
+ struct Qdisc *qdisc;
+ bool nolock;
+
+ qdisc = dev_queue->qdisc_sleeping;
+ if (!qdisc)
+ return;
+
+ nolock = qdisc->flags & TCQ_F_NOLOCK;
+
+ if (nolock)
+ spin_lock_bh(&qdisc->seqlock);
+ spin_lock_bh(qdisc_lock(qdisc));
+
+ qdisc_reset(qdisc);
+
+ spin_unlock_bh(qdisc_lock(qdisc));
+ if (nolock)
+ spin_unlock_bh(&qdisc->seqlock);
+}
+
static bool some_qdisc_is_busy(struct net_device *dev)
{
unsigned int i;
@@ -1213,12 +1223,20 @@ void dev_deactivate_many(struct list_head *head)
dev_watchdog_down(dev);
}
- /* Wait for outstanding qdisc-less dev_queue_xmit calls.
+ /* Wait for outstanding qdisc-less dev_queue_xmit calls or
+ * outstanding qdisc enqueuing calls.
* This is avoided if all devices are in dismantle phase :
* Caller will call synchronize_net() for us
*/
synchronize_net();
+ list_for_each_entry(dev, head, close_list) {
+ netdev_for_each_tx_queue(dev, dev_reset_queue, NULL);
+
+ if (dev_ingress_queue(dev))
+ dev_reset_queue(dev, dev_ingress_queue(dev), NULL);
+ }
+
/* Wait for outstanding qdisc_run calls. */
list_for_each_entry(dev, head, close_list) {
while (some_qdisc_is_busy(dev)) {
diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
index fe53c1e38c7d..b0ad7687ee2c 100644
--- a/net/sched/sch_taprio.c
+++ b/net/sched/sch_taprio.c
@@ -777,9 +777,11 @@ static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = {
[TCA_TAPRIO_ATTR_TXTIME_DELAY] = { .type = NLA_U32 },
};
-static int fill_sched_entry(struct nlattr **tb, struct sched_entry *entry,
+static int fill_sched_entry(struct taprio_sched *q, struct nlattr **tb,
+ struct sched_entry *entry,
struct netlink_ext_ack *extack)
{
+ int min_duration = length_to_duration(q, ETH_ZLEN);
u32 interval = 0;
if (tb[TCA_TAPRIO_SCHED_ENTRY_CMD])
@@ -794,7 +796,10 @@ static int fill_sched_entry(struct nlattr **tb, struct sched_entry *entry,
interval = nla_get_u32(
tb[TCA_TAPRIO_SCHED_ENTRY_INTERVAL]);
- if (interval == 0) {
+ /* The interval should allow at least the minimum ethernet
+ * frame to go out.
+ */
+ if (interval < min_duration) {
NL_SET_ERR_MSG(extack, "Invalid interval for schedule entry");
return -EINVAL;
}
@@ -804,8 +809,9 @@ static int fill_sched_entry(struct nlattr **tb, struct sched_entry *entry,
return 0;
}
-static int parse_sched_entry(struct nlattr *n, struct sched_entry *entry,
- int index, struct netlink_ext_ack *extack)
+static int parse_sched_entry(struct taprio_sched *q, struct nlattr *n,
+ struct sched_entry *entry, int index,
+ struct netlink_ext_ack *extack)
{
struct nlattr *tb[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = { };
int err;
@@ -819,10 +825,10 @@ static int parse_sched_entry(struct nlattr *n, struct sched_entry *entry,
entry->index = index;
- return fill_sched_entry(tb, entry, extack);
+ return fill_sched_entry(q, tb, entry, extack);
}
-static int parse_sched_list(struct nlattr *list,
+static int parse_sched_list(struct taprio_sched *q, struct nlattr *list,
struct sched_gate_list *sched,
struct netlink_ext_ack *extack)
{
@@ -847,7 +853,7 @@ static int parse_sched_list(struct nlattr *list,
return -ENOMEM;
}
- err = parse_sched_entry(n, entry, i, extack);
+ err = parse_sched_entry(q, n, entry, i, extack);
if (err < 0) {
kfree(entry);
return err;
@@ -862,7 +868,7 @@ static int parse_sched_list(struct nlattr *list,
return i;
}
-static int parse_taprio_schedule(struct nlattr **tb,
+static int parse_taprio_schedule(struct taprio_sched *q, struct nlattr **tb,
struct sched_gate_list *new,
struct netlink_ext_ack *extack)
{
@@ -883,8 +889,8 @@ static int parse_taprio_schedule(struct nlattr **tb,
new->cycle_time = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME]);
if (tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST])
- err = parse_sched_list(
- tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST], new, extack);
+ err = parse_sched_list(q, tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST],
+ new, extack);
if (err < 0)
return err;
@@ -1473,7 +1479,7 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
goto free_sched;
}
- err = parse_taprio_schedule(tb, new_admin, extack);
+ err = parse_taprio_schedule(q, tb, new_admin, extack);
if (err < 0)
goto free_sched;