kernel: merge pending fq_codel backlog accounting fix

Signed-off-by: Felix Fietkau <nbd@nbd.name>
lede-17.01
Felix Fietkau 2016-06-07 14:11:13 +02:00
parent df7af9317b
commit fecd715ef8
4 changed files with 74 additions and 4 deletions

View File

@ -0,0 +1,70 @@
From: Eric Dumazet <edumazet@google.com>
Date: Sat, 4 Jun 2016 12:55:13 -0700
Subject: [PATCH] fq_codel: fix NET_XMIT_CN behavior
My prior attempt to fix the backlogs of parents failed.
If we return NET_XMIT_CN, our parents wont increase their backlog,
so our qdisc_tree_reduce_backlog() should take this into account.
v2: Florian Westphal pointed out that we could drop the packet,
so we need to save qdisc_pkt_len(skb) in a temp variable before
calling fq_codel_drop()
Fixes: 9d18562a2278 ("fq_codel: add batch ability to fq_codel_drop()")
Fixes: 2ccccf5fb43f ("net_sched: update hierarchical backlog too")
Reported-by: Stas Nichiporovich <stasn77@gmail.com>
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: WANG Cong <xiyou.wangcong@gmail.com>
Cc: Jamal Hadi Salim <jhs@mojatatu.com>
---
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -197,6 +197,7 @@ static int fq_codel_enqueue(struct sk_bu
unsigned int idx, prev_backlog, prev_qlen;
struct fq_codel_flow *flow;
int uninitialized_var(ret);
+ unsigned int pkt_len;
bool memory_limited;
idx = fq_codel_classify(skb, sch, &ret);
@@ -228,6 +229,8 @@ static int fq_codel_enqueue(struct sk_bu
prev_backlog = sch->qstats.backlog;
prev_qlen = sch->q.qlen;
+ /* save this packet length as it might be dropped by fq_codel_drop() */
+ pkt_len = qdisc_pkt_len(skb);
/* fq_codel_drop() is quite expensive, as it performs a linear search
* in q->backlogs[] to find a fat flow.
* So instead of dropping a single packet, drop half of its backlog
@@ -235,14 +238,23 @@ static int fq_codel_enqueue(struct sk_bu
*/
ret = fq_codel_drop(sch, q->drop_batch_size);
- q->drop_overlimit += prev_qlen - sch->q.qlen;
+ prev_qlen -= sch->q.qlen;
+ prev_backlog -= sch->qstats.backlog;
+ q->drop_overlimit += prev_qlen;
if (memory_limited)
- q->drop_overmemory += prev_qlen - sch->q.qlen;
- /* As we dropped packet(s), better let upper stack know this */
- qdisc_tree_reduce_backlog(sch, prev_qlen - sch->q.qlen,
- prev_backlog - sch->qstats.backlog);
+ q->drop_overmemory += prev_qlen;
- return ret == idx ? NET_XMIT_CN : NET_XMIT_SUCCESS;
+ /* As we dropped packet(s), better let upper stack know this.
+ * If we dropped a packet for this flow, return NET_XMIT_CN,
+ * but in this case, our parents wont increase their backlogs.
+ */
+ if (ret == idx) {
+ qdisc_tree_reduce_backlog(sch, prev_qlen - 1,
+ prev_backlog - pkt_len);
+ return NET_XMIT_CN;
+ }
+ qdisc_tree_reduce_backlog(sch, prev_qlen, prev_backlog);
+ return NET_XMIT_SUCCESS;
}
/* This is the specific function called from codel_dequeue()

View File

@ -1,6 +1,6 @@
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -459,7 +459,7 @@ static int fq_codel_init(struct Qdisc *s
@@ -471,7 +471,7 @@ static int fq_codel_init(struct Qdisc *s
sch->limit = 10*1024;
q->flows_cnt = 1024;

View File

@ -1,6 +1,6 @@
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -218,7 +218,6 @@ static int fq_codel_enqueue(struct sk_bu
@@ -219,7 +219,6 @@ static int fq_codel_enqueue(struct sk_bu
list_add_tail(&flow->flowchain, &q->new_flows);
q->new_flow_count++;
flow->deficit = q->quantum;

View File

@ -13,7 +13,7 @@
device, it has to decide which ones to send first, which ones to
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -676,7 +676,7 @@ static const struct Qdisc_class_ops fq_c
@@ -688,7 +688,7 @@ static const struct Qdisc_class_ops fq_c
.walk = fq_codel_walk,
};
@ -22,7 +22,7 @@
.cl_ops = &fq_codel_class_ops,
.id = "fq_codel",
.priv_size = sizeof(struct fq_codel_sched_data),
@@ -692,6 +692,7 @@ static struct Qdisc_ops fq_codel_qdisc_o
@@ -704,6 +704,7 @@ static struct Qdisc_ops fq_codel_qdisc_o
.dump_stats = fq_codel_dump_stats,
.owner = THIS_MODULE,
};