summaryrefslogtreecommitdiffstats
path: root/net/sched/sch_red.c
diff options
context:
space:
mode:
authorJakub Kicinski <jakub.kicinski@netronome.com>2018-01-14 20:01:26 -0800
committerDavid S. Miller <davem@davemloft.net>2018-01-17 14:29:32 -0500
commit416ef9b15c688b91edbf654ebe7bc349c9151147 (patch)
treeb85b4d8f31906c3a88bd89d790f98d9aa2d2bfbf /net/sched/sch_red.c
parent2d83619de8a64703cd3b0bf21b3b85f081290bf3 (diff)
net: sched: red: don't reset the backlog on every stat dump
Commit 0dfb33a0d7e2 ("sch_red: report backlog information") copied child's backlog into RED's backlog. Back then RED did not maintain its own backlog counts. This has changed after commit 2ccccf5fb43f ("net_sched: update hierarchical backlog too") and commit d7f4f332f082 ("sch_red: update backlog as well"). Copying is no longer necessary. Tested: $ tc -s qdisc show dev veth0 qdisc red 1: root refcnt 2 limit 400000b min 30000b max 30000b ecn Sent 20942 bytes 221 pkt (dropped 0, overlimits 0 requeues 0) backlog 1260b 14p requeues 14 marked 0 early 0 pdrop 0 other 0 qdisc tbf 2: parent 1: rate 1Kbit burst 15000b lat 3585.0s Sent 20942 bytes 221 pkt (dropped 0, overlimits 138 requeues 0) backlog 1260b 14p requeues 14 Recently RED offload was added. We need to make sure drivers don't depend on resetting the stats. This means backlog should be treated like any other statistic: total_stat = new_hw_stat - prev_hw_stat; Adjust mlxsw. Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com> Acked-by: Nogah Frankel <nogahf@mellanox.com> Acked-by: Jiri Pirko <jiri@mellanox.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/sch_red.c')
-rw-r--r--net/sched/sch_red.c2
1 files changed, 1 insertions, 1 deletions
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index 0af1c1254e0b..16644b3d2362 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -167,6 +167,7 @@ static int red_offload(struct Qdisc *sch, bool enable)
opt.set.max = q->parms.qth_max >> q->parms.Wlog;
opt.set.probability = q->parms.max_P;
opt.set.is_ecn = red_use_ecn(q);
+ opt.set.qstats = &sch->qstats;
} else {
opt.command = TC_RED_DESTROY;
}
@@ -322,7 +323,6 @@ static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
};
int err;
- sch->qstats.backlog = q->qdisc->qstats.backlog;
err = red_dump_offload_stats(sch, &opt);
if (err)
goto nla_put_failure;