blob: 063b726bf1f8636e3529a2e61d1b43fced918d9c [file] [log] [blame]
Eric Dumazet4b549a22012-05-11 09:30:50 +00001/*
2 * Fair Queue CoDel discipline
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Copyright (C) 2012 Eric Dumazet <edumazet@google.com>
10 */
11
12#include <linux/module.h>
13#include <linux/types.h>
14#include <linux/kernel.h>
15#include <linux/jiffies.h>
16#include <linux/string.h>
17#include <linux/in.h>
18#include <linux/errno.h>
19#include <linux/init.h>
20#include <linux/skbuff.h>
21#include <linux/jhash.h>
22#include <linux/slab.h>
23#include <linux/vmalloc.h>
24#include <net/netlink.h>
25#include <net/pkt_sched.h>
26#include <net/flow_keys.h>
27#include <net/codel.h>
28
29/* Fair Queue CoDel.
30 *
31 * Principles :
32 * Packets are classified (internal classifier or external) on flows.
33 * This is a Stochastic model (as we use a hash, several flows
34 * might be hashed on same slot)
35 * Each flow has a CoDel managed queue.
36 * Flows are linked onto two (Round Robin) lists,
37 * so that new flows have priority on old ones.
38 *
39 * For a given flow, packets are not reordered (CoDel uses a FIFO)
40 * head drops only.
41 * ECN capability is on by default.
42 * Low memory footprint (64 bytes per flow)
43 */
44
45struct fq_codel_flow {
46 struct sk_buff *head;
47 struct sk_buff *tail;
48 struct list_head flowchain;
49 int deficit;
50 u32 dropped; /* number of drops (or ECN marks) on this flow */
51 struct codel_vars cvars;
52}; /* please try to keep this structure <= 64 bytes */
53
54struct fq_codel_sched_data {
55 struct tcf_proto *filter_list; /* optional external classifier */
56 struct fq_codel_flow *flows; /* Flows table [flows_cnt] */
57 u32 *backlogs; /* backlog table [flows_cnt] */
58 u32 flows_cnt; /* number of flows */
59 u32 perturbation; /* hash perturbation */
60 u32 quantum; /* psched_mtu(qdisc_dev(sch)); */
61 struct codel_params cparams;
62 struct codel_stats cstats;
63 u32 drop_overlimit;
64 u32 new_flow_count;
65
66 struct list_head new_flows; /* list of new flows */
67 struct list_head old_flows; /* list of old flows */
68};
69
70static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q,
71 const struct sk_buff *skb)
72{
73 struct flow_keys keys;
74 unsigned int hash;
75
76 skb_flow_dissect(skb, &keys);
77 hash = jhash_3words((__force u32)keys.dst,
78 (__force u32)keys.src ^ keys.ip_proto,
79 (__force u32)keys.ports, q->perturbation);
80 return ((u64)hash * q->flows_cnt) >> 32;
81}
82
83static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch,
84 int *qerr)
85{
86 struct fq_codel_sched_data *q = qdisc_priv(sch);
87 struct tcf_result res;
88 int result;
89
90 if (TC_H_MAJ(skb->priority) == sch->handle &&
91 TC_H_MIN(skb->priority) > 0 &&
92 TC_H_MIN(skb->priority) <= q->flows_cnt)
93 return TC_H_MIN(skb->priority);
94
95 if (!q->filter_list)
96 return fq_codel_hash(q, skb) + 1;
97
98 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
99 result = tc_classify(skb, q->filter_list, &res);
100 if (result >= 0) {
101#ifdef CONFIG_NET_CLS_ACT
102 switch (result) {
103 case TC_ACT_STOLEN:
104 case TC_ACT_QUEUED:
105 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
106 case TC_ACT_SHOT:
107 return 0;
108 }
109#endif
110 if (TC_H_MIN(res.classid) <= q->flows_cnt)
111 return TC_H_MIN(res.classid);
112 }
113 return 0;
114}
115
116/* helper functions : might be changed when/if skb use a standard list_head */
117
118/* remove one skb from head of slot queue */
119static inline struct sk_buff *dequeue_head(struct fq_codel_flow *flow)
120{
121 struct sk_buff *skb = flow->head;
122
123 flow->head = skb->next;
124 skb->next = NULL;
125 return skb;
126}
127
128/* add skb to flow queue (tail add) */
129static inline void flow_queue_add(struct fq_codel_flow *flow,
130 struct sk_buff *skb)
131{
132 if (flow->head == NULL)
133 flow->head = skb;
134 else
135 flow->tail->next = skb;
136 flow->tail = skb;
137 skb->next = NULL;
138}
139
140static unsigned int fq_codel_drop(struct Qdisc *sch)
141{
142 struct fq_codel_sched_data *q = qdisc_priv(sch);
143 struct sk_buff *skb;
144 unsigned int maxbacklog = 0, idx = 0, i, len;
145 struct fq_codel_flow *flow;
146
147 /* Queue is full! Find the fat flow and drop packet from it.
148 * This might sound expensive, but with 1024 flows, we scan
149 * 4KB of memory, and we dont need to handle a complex tree
150 * in fast path (packet queue/enqueue) with many cache misses.
151 */
152 for (i = 0; i < q->flows_cnt; i++) {
153 if (q->backlogs[i] > maxbacklog) {
154 maxbacklog = q->backlogs[i];
155 idx = i;
156 }
157 }
158 flow = &q->flows[idx];
159 skb = dequeue_head(flow);
160 len = qdisc_pkt_len(skb);
161 q->backlogs[idx] -= len;
162 kfree_skb(skb);
163 sch->q.qlen--;
164 sch->qstats.drops++;
165 sch->qstats.backlog -= len;
166 flow->dropped++;
167 return idx;
168}
169
170static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
171{
172 struct fq_codel_sched_data *q = qdisc_priv(sch);
173 unsigned int idx;
174 struct fq_codel_flow *flow;
175 int uninitialized_var(ret);
176
177 idx = fq_codel_classify(skb, sch, &ret);
178 if (idx == 0) {
179 if (ret & __NET_XMIT_BYPASS)
180 sch->qstats.drops++;
181 kfree_skb(skb);
182 return ret;
183 }
184 idx--;
185
186 codel_set_enqueue_time(skb);
187 flow = &q->flows[idx];
188 flow_queue_add(flow, skb);
189 q->backlogs[idx] += qdisc_pkt_len(skb);
190 sch->qstats.backlog += qdisc_pkt_len(skb);
191
192 if (list_empty(&flow->flowchain)) {
193 list_add_tail(&flow->flowchain, &q->new_flows);
Eric Dumazet4b549a22012-05-11 09:30:50 +0000194 q->new_flow_count++;
195 flow->deficit = q->quantum;
196 flow->dropped = 0;
197 }
Vijay Subramaniancd68ddd2013-03-28 13:52:00 +0000198 if (++sch->q.qlen <= sch->limit)
Eric Dumazet4b549a22012-05-11 09:30:50 +0000199 return NET_XMIT_SUCCESS;
200
201 q->drop_overlimit++;
202 /* Return Congestion Notification only if we dropped a packet
203 * from this flow.
204 */
205 if (fq_codel_drop(sch) == idx)
206 return NET_XMIT_CN;
207
208 /* As we dropped a packet, better let upper stack know this */
209 qdisc_tree_decrease_qlen(sch, 1);
210 return NET_XMIT_SUCCESS;
211}
212
213/* This is the specific function called from codel_dequeue()
214 * to dequeue a packet from queue. Note: backlog is handled in
215 * codel, we dont need to reduce it here.
216 */
217static struct sk_buff *dequeue(struct codel_vars *vars, struct Qdisc *sch)
218{
Eric Dumazet865ec552012-05-16 04:39:09 +0000219 struct fq_codel_sched_data *q = qdisc_priv(sch);
Eric Dumazet4b549a22012-05-11 09:30:50 +0000220 struct fq_codel_flow *flow;
221 struct sk_buff *skb = NULL;
222
223 flow = container_of(vars, struct fq_codel_flow, cvars);
224 if (flow->head) {
225 skb = dequeue_head(flow);
Eric Dumazet865ec552012-05-16 04:39:09 +0000226 q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb);
Eric Dumazet4b549a22012-05-11 09:30:50 +0000227 sch->q.qlen--;
228 }
229 return skb;
230}
231
232static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
233{
234 struct fq_codel_sched_data *q = qdisc_priv(sch);
235 struct sk_buff *skb;
236 struct fq_codel_flow *flow;
237 struct list_head *head;
238 u32 prev_drop_count, prev_ecn_mark;
239
240begin:
241 head = &q->new_flows;
242 if (list_empty(head)) {
243 head = &q->old_flows;
244 if (list_empty(head))
245 return NULL;
246 }
247 flow = list_first_entry(head, struct fq_codel_flow, flowchain);
248
249 if (flow->deficit <= 0) {
250 flow->deficit += q->quantum;
251 list_move_tail(&flow->flowchain, &q->old_flows);
252 goto begin;
253 }
254
255 prev_drop_count = q->cstats.drop_count;
256 prev_ecn_mark = q->cstats.ecn_mark;
257
258 skb = codel_dequeue(sch, &q->cparams, &flow->cvars, &q->cstats,
Eric Dumazet865ec552012-05-16 04:39:09 +0000259 dequeue);
Eric Dumazet4b549a22012-05-11 09:30:50 +0000260
261 flow->dropped += q->cstats.drop_count - prev_drop_count;
262 flow->dropped += q->cstats.ecn_mark - prev_ecn_mark;
263
264 if (!skb) {
265 /* force a pass through old_flows to prevent starvation */
266 if ((head == &q->new_flows) && !list_empty(&q->old_flows))
267 list_move_tail(&flow->flowchain, &q->old_flows);
268 else
269 list_del_init(&flow->flowchain);
270 goto begin;
271 }
272 qdisc_bstats_update(sch, skb);
273 flow->deficit -= qdisc_pkt_len(skb);
274 /* We cant call qdisc_tree_decrease_qlen() if our qlen is 0,
275 * or HTB crashes. Defer it for next round.
276 */
277 if (q->cstats.drop_count && sch->q.qlen) {
278 qdisc_tree_decrease_qlen(sch, q->cstats.drop_count);
279 q->cstats.drop_count = 0;
280 }
281 return skb;
282}
283
284static void fq_codel_reset(struct Qdisc *sch)
285{
286 struct sk_buff *skb;
287
288 while ((skb = fq_codel_dequeue(sch)) != NULL)
289 kfree_skb(skb);
290}
291
292static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = {
293 [TCA_FQ_CODEL_TARGET] = { .type = NLA_U32 },
294 [TCA_FQ_CODEL_LIMIT] = { .type = NLA_U32 },
295 [TCA_FQ_CODEL_INTERVAL] = { .type = NLA_U32 },
296 [TCA_FQ_CODEL_ECN] = { .type = NLA_U32 },
297 [TCA_FQ_CODEL_FLOWS] = { .type = NLA_U32 },
298 [TCA_FQ_CODEL_QUANTUM] = { .type = NLA_U32 },
299};
300
301static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt)
302{
303 struct fq_codel_sched_data *q = qdisc_priv(sch);
304 struct nlattr *tb[TCA_FQ_CODEL_MAX + 1];
305 int err;
306
307 if (!opt)
308 return -EINVAL;
309
310 err = nla_parse_nested(tb, TCA_FQ_CODEL_MAX, opt, fq_codel_policy);
311 if (err < 0)
312 return err;
313 if (tb[TCA_FQ_CODEL_FLOWS]) {
314 if (q->flows)
315 return -EINVAL;
316 q->flows_cnt = nla_get_u32(tb[TCA_FQ_CODEL_FLOWS]);
317 if (!q->flows_cnt ||
318 q->flows_cnt > 65536)
319 return -EINVAL;
320 }
321 sch_tree_lock(sch);
322
323 if (tb[TCA_FQ_CODEL_TARGET]) {
324 u64 target = nla_get_u32(tb[TCA_FQ_CODEL_TARGET]);
325
326 q->cparams.target = (target * NSEC_PER_USEC) >> CODEL_SHIFT;
327 }
328
329 if (tb[TCA_FQ_CODEL_INTERVAL]) {
330 u64 interval = nla_get_u32(tb[TCA_FQ_CODEL_INTERVAL]);
331
332 q->cparams.interval = (interval * NSEC_PER_USEC) >> CODEL_SHIFT;
333 }
334
335 if (tb[TCA_FQ_CODEL_LIMIT])
336 sch->limit = nla_get_u32(tb[TCA_FQ_CODEL_LIMIT]);
337
338 if (tb[TCA_FQ_CODEL_ECN])
339 q->cparams.ecn = !!nla_get_u32(tb[TCA_FQ_CODEL_ECN]);
340
341 if (tb[TCA_FQ_CODEL_QUANTUM])
342 q->quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM]));
343
344 while (sch->q.qlen > sch->limit) {
345 struct sk_buff *skb = fq_codel_dequeue(sch);
346
347 kfree_skb(skb);
348 q->cstats.drop_count++;
349 }
350 qdisc_tree_decrease_qlen(sch, q->cstats.drop_count);
351 q->cstats.drop_count = 0;
352
353 sch_tree_unlock(sch);
354 return 0;
355}
356
357static void *fq_codel_zalloc(size_t sz)
358{
359 void *ptr = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN);
360
361 if (!ptr)
362 ptr = vzalloc(sz);
363 return ptr;
364}
365
366static void fq_codel_free(void *addr)
367{
WANG Cong4cb28972014-06-02 15:55:22 -0700368 kvfree(addr);
Eric Dumazet4b549a22012-05-11 09:30:50 +0000369}
370
371static void fq_codel_destroy(struct Qdisc *sch)
372{
373 struct fq_codel_sched_data *q = qdisc_priv(sch);
374
375 tcf_destroy_chain(&q->filter_list);
376 fq_codel_free(q->backlogs);
377 fq_codel_free(q->flows);
378}
379
380static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt)
381{
382 struct fq_codel_sched_data *q = qdisc_priv(sch);
383 int i;
384
385 sch->limit = 10*1024;
386 q->flows_cnt = 1024;
387 q->quantum = psched_mtu(qdisc_dev(sch));
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500388 q->perturbation = prandom_u32();
Eric Dumazet4b549a22012-05-11 09:30:50 +0000389 INIT_LIST_HEAD(&q->new_flows);
390 INIT_LIST_HEAD(&q->old_flows);
391 codel_params_init(&q->cparams);
392 codel_stats_init(&q->cstats);
393 q->cparams.ecn = true;
394
395 if (opt) {
396 int err = fq_codel_change(sch, opt);
397 if (err)
398 return err;
399 }
400
401 if (!q->flows) {
402 q->flows = fq_codel_zalloc(q->flows_cnt *
403 sizeof(struct fq_codel_flow));
404 if (!q->flows)
405 return -ENOMEM;
406 q->backlogs = fq_codel_zalloc(q->flows_cnt * sizeof(u32));
407 if (!q->backlogs) {
408 fq_codel_free(q->flows);
409 return -ENOMEM;
410 }
411 for (i = 0; i < q->flows_cnt; i++) {
412 struct fq_codel_flow *flow = q->flows + i;
413
414 INIT_LIST_HEAD(&flow->flowchain);
Eric Dumazetb3791352012-09-01 03:19:57 +0000415 codel_vars_init(&flow->cvars);
Eric Dumazet4b549a22012-05-11 09:30:50 +0000416 }
417 }
418 if (sch->limit >= 1)
419 sch->flags |= TCQ_F_CAN_BYPASS;
420 else
421 sch->flags &= ~TCQ_F_CAN_BYPASS;
422 return 0;
423}
424
425static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb)
426{
427 struct fq_codel_sched_data *q = qdisc_priv(sch);
428 struct nlattr *opts;
429
430 opts = nla_nest_start(skb, TCA_OPTIONS);
431 if (opts == NULL)
432 goto nla_put_failure;
433
434 if (nla_put_u32(skb, TCA_FQ_CODEL_TARGET,
435 codel_time_to_us(q->cparams.target)) ||
436 nla_put_u32(skb, TCA_FQ_CODEL_LIMIT,
437 sch->limit) ||
438 nla_put_u32(skb, TCA_FQ_CODEL_INTERVAL,
439 codel_time_to_us(q->cparams.interval)) ||
440 nla_put_u32(skb, TCA_FQ_CODEL_ECN,
441 q->cparams.ecn) ||
442 nla_put_u32(skb, TCA_FQ_CODEL_QUANTUM,
443 q->quantum) ||
444 nla_put_u32(skb, TCA_FQ_CODEL_FLOWS,
445 q->flows_cnt))
446 goto nla_put_failure;
447
Yang Yingliangd59b7d82014-03-12 10:20:32 +0800448 return nla_nest_end(skb, opts);
Eric Dumazet4b549a22012-05-11 09:30:50 +0000449
450nla_put_failure:
451 return -1;
452}
453
454static int fq_codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
455{
456 struct fq_codel_sched_data *q = qdisc_priv(sch);
457 struct tc_fq_codel_xstats st = {
458 .type = TCA_FQ_CODEL_XSTATS_QDISC,
Eric Dumazet4b549a22012-05-11 09:30:50 +0000459 };
460 struct list_head *pos;
461
Sasha Levin669d67b2012-05-14 11:57:06 +0000462 st.qdisc_stats.maxpacket = q->cstats.maxpacket;
463 st.qdisc_stats.drop_overlimit = q->drop_overlimit;
464 st.qdisc_stats.ecn_mark = q->cstats.ecn_mark;
465 st.qdisc_stats.new_flow_count = q->new_flow_count;
466
Eric Dumazet4b549a22012-05-11 09:30:50 +0000467 list_for_each(pos, &q->new_flows)
468 st.qdisc_stats.new_flows_len++;
469
470 list_for_each(pos, &q->old_flows)
471 st.qdisc_stats.old_flows_len++;
472
473 return gnet_stats_copy_app(d, &st, sizeof(st));
474}
475
476static struct Qdisc *fq_codel_leaf(struct Qdisc *sch, unsigned long arg)
477{
478 return NULL;
479}
480
481static unsigned long fq_codel_get(struct Qdisc *sch, u32 classid)
482{
483 return 0;
484}
485
486static unsigned long fq_codel_bind(struct Qdisc *sch, unsigned long parent,
487 u32 classid)
488{
489 /* we cannot bypass queue discipline anymore */
490 sch->flags &= ~TCQ_F_CAN_BYPASS;
491 return 0;
492}
493
494static void fq_codel_put(struct Qdisc *q, unsigned long cl)
495{
496}
497
498static struct tcf_proto **fq_codel_find_tcf(struct Qdisc *sch, unsigned long cl)
499{
500 struct fq_codel_sched_data *q = qdisc_priv(sch);
501
502 if (cl)
503 return NULL;
504 return &q->filter_list;
505}
506
507static int fq_codel_dump_class(struct Qdisc *sch, unsigned long cl,
508 struct sk_buff *skb, struct tcmsg *tcm)
509{
510 tcm->tcm_handle |= TC_H_MIN(cl);
511 return 0;
512}
513
514static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl,
515 struct gnet_dump *d)
516{
517 struct fq_codel_sched_data *q = qdisc_priv(sch);
518 u32 idx = cl - 1;
519 struct gnet_stats_queue qs = { 0 };
520 struct tc_fq_codel_xstats xstats;
521
522 if (idx < q->flows_cnt) {
523 const struct fq_codel_flow *flow = &q->flows[idx];
524 const struct sk_buff *skb = flow->head;
525
526 memset(&xstats, 0, sizeof(xstats));
527 xstats.type = TCA_FQ_CODEL_XSTATS_CLASS;
528 xstats.class_stats.deficit = flow->deficit;
529 xstats.class_stats.ldelay =
530 codel_time_to_us(flow->cvars.ldelay);
531 xstats.class_stats.count = flow->cvars.count;
532 xstats.class_stats.lastcount = flow->cvars.lastcount;
533 xstats.class_stats.dropping = flow->cvars.dropping;
534 if (flow->cvars.dropping) {
535 codel_tdiff_t delta = flow->cvars.drop_next -
536 codel_get_time();
537
538 xstats.class_stats.drop_next = (delta >= 0) ?
539 codel_time_to_us(delta) :
540 -codel_time_to_us(-delta);
541 }
542 while (skb) {
543 qs.qlen++;
544 skb = skb->next;
545 }
546 qs.backlog = q->backlogs[idx];
547 qs.drops = flow->dropped;
548 }
549 if (gnet_stats_copy_queue(d, &qs) < 0)
550 return -1;
551 if (idx < q->flows_cnt)
552 return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
553 return 0;
554}
555
556static void fq_codel_walk(struct Qdisc *sch, struct qdisc_walker *arg)
557{
558 struct fq_codel_sched_data *q = qdisc_priv(sch);
559 unsigned int i;
560
561 if (arg->stop)
562 return;
563
564 for (i = 0; i < q->flows_cnt; i++) {
565 if (list_empty(&q->flows[i].flowchain) ||
566 arg->count < arg->skip) {
567 arg->count++;
568 continue;
569 }
570 if (arg->fn(sch, i + 1, arg) < 0) {
571 arg->stop = 1;
572 break;
573 }
574 arg->count++;
575 }
576}
577
578static const struct Qdisc_class_ops fq_codel_class_ops = {
579 .leaf = fq_codel_leaf,
580 .get = fq_codel_get,
581 .put = fq_codel_put,
582 .tcf_chain = fq_codel_find_tcf,
583 .bind_tcf = fq_codel_bind,
584 .unbind_tcf = fq_codel_put,
585 .dump = fq_codel_dump_class,
586 .dump_stats = fq_codel_dump_class_stats,
587 .walk = fq_codel_walk,
588};
589
590static struct Qdisc_ops fq_codel_qdisc_ops __read_mostly = {
591 .cl_ops = &fq_codel_class_ops,
592 .id = "fq_codel",
593 .priv_size = sizeof(struct fq_codel_sched_data),
594 .enqueue = fq_codel_enqueue,
595 .dequeue = fq_codel_dequeue,
596 .peek = qdisc_peek_dequeued,
597 .drop = fq_codel_drop,
598 .init = fq_codel_init,
599 .reset = fq_codel_reset,
600 .destroy = fq_codel_destroy,
601 .change = fq_codel_change,
602 .dump = fq_codel_dump,
603 .dump_stats = fq_codel_dump_stats,
604 .owner = THIS_MODULE,
605};
606
607static int __init fq_codel_module_init(void)
608{
609 return register_qdisc(&fq_codel_qdisc_ops);
610}
611
612static void __exit fq_codel_module_exit(void)
613{
614 unregister_qdisc(&fq_codel_qdisc_ops);
615}
616
617module_init(fq_codel_module_init)
618module_exit(fq_codel_module_exit)
619MODULE_AUTHOR("Eric Dumazet");
620MODULE_LICENSE("GPL");