1 /*
2 * Copyright (c) 1998-2002 Luigi Rizzo, Universita` di Pisa
3 * Portions Copyright (c) 2000 Akamba Corp.
4 * All rights reserved
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * $FreeBSD: src/sys/netinet/ip_dummynet.c,v 1.24.2.22 2003/05/13 09:31:06 maxim Exp $
28 */
29
30 #include "opt_ipfw.h"
31 #include "opt_inet.h"
32 #ifndef INET
33 #error IPFIREWALL3 requires INET.
34 #endif /* INET */
35
36 /*
37 * This module implements IP dummynet, a bandwidth limiter/delay emulator.
38 * Description of the data structures used is in ip_dummynet.h
39 * Here you mainly find the following blocks of code:
40 * + variable declarations;
41 * + heap management functions;
42 * + scheduler and dummynet functions;
43 * + configuration and initialization.
44 *
45 * Most important Changes:
46 *
47 * 011004: KLDable
48 * 010124: Fixed WF2Q behaviour
49 * 010122: Fixed spl protection.
50 * 000601: WF2Q support
51 * 000106: Large rewrite, use heaps to handle very many pipes.
52 * 980513: Initial release
53 */
54
55 #include <sys/param.h>
56 #include <sys/kernel.h>
57 #include <sys/malloc.h>
58 #include <sys/mbuf.h>
59 #include <sys/socketvar.h>
60 #include <sys/sysctl.h>
61 #include <sys/systimer.h>
62 #include <sys/thread2.h>
63
64 #include <net/ethernet.h>
65 #include <net/netmsg2.h>
66 #include <net/netisr2.h>
67 #include <net/route.h>
68
69 #include <net/if.h>
70 #include <netinet/in_var.h>
71 #include <netinet/ip_var.h>
72
73 #include <net/dummynet3/ip_dummynet3.h>
74 #include <net/ipfw3/ip_fw.h>
75
76 void check_pipe(int *cmd_ctl, int *cmd_val, struct ip_fw_args **args,
77 struct ip_fw **f, ipfw_insn *cmd, uint16_t ip_len);
78 void check_queue(int *cmd_ctl, int *cmd_val, struct ip_fw_args **args,
79 struct ip_fw **f, ipfw_insn *cmd, uint16_t ip_len);
80
81 void
check_pipe(int * cmd_ctl,int * cmd_val,struct ip_fw_args ** args,struct ip_fw ** f,ipfw_insn * cmd,uint16_t ip_len)82 check_pipe(int *cmd_ctl, int *cmd_val, struct ip_fw_args **args,
83 struct ip_fw **f, ipfw_insn *cmd, uint16_t ip_len)
84 {
85 (*args)->rule = *f;
86 (*args)->cookie = cmd->arg1;
87 *cmd_val = IP_FW_DUMMYNET;
88 *cmd_ctl = IP_FW_CTL_DONE;
89 }
90
91 void
check_queue(int * cmd_ctl,int * cmd_val,struct ip_fw_args ** args,struct ip_fw ** f,ipfw_insn * cmd,uint16_t ip_len)92 check_queue(int *cmd_ctl, int *cmd_val, struct ip_fw_args **args,
93 struct ip_fw **f, ipfw_insn *cmd, uint16_t ip_len)
94 {
95 (*args)->rule = *f;
96 (*args)->cookie = cmd->arg1;
97 *cmd_val = IP_FW_DUMMYNET;
98 *cmd_ctl = IP_FW_CTL_DONE;
99 }
100
101 #ifdef DUMMYNET_DEBUG
102 #define DPRINTF(fmt, ...) kprintf(fmt, __VA_ARGS__)
103 #else
104 #define DPRINTF(fmt, ...) ((void)0)
105 #endif
106
107 #ifndef DN_CALLOUT_FREQ_MAX
108 #define DN_CALLOUT_FREQ_MAX 10000
109 #endif
110
111 /*
112 * The maximum/minimum hash table size for queues.
113 * These values must be a power of 2.
114 */
115 #define DN_MIN_HASH_SIZE 4
116 #define DN_MAX_HASH_SIZE 65536
117
118 /*
119 * Some macros are used to compare key values and handle wraparounds.
120 * MAX64 returns the largest of two key values.
121 */
122 #define DN_KEY_LT(a, b) ((int64_t)((a) - (b)) < 0)
123 #define DN_KEY_LEQ(a, b) ((int64_t)((a) - (b)) <= 0)
124 #define DN_KEY_GT(a, b) ((int64_t)((a) - (b)) > 0)
125 #define DN_KEY_GEQ(a, b) ((int64_t)((a) - (b)) >= 0)
126 #define MAX64(x, y) ((((int64_t)((y) - (x))) > 0) ? (y) : (x))
127
128 #define DN_NR_HASH_MAX 16
129 #define DN_NR_HASH_MASK (DN_NR_HASH_MAX - 1)
130 #define DN_NR_HASH(nr) \
131 ((((nr) >> 12) ^ ((nr) >> 8) ^ ((nr) >> 4) ^ (nr)) & DN_NR_HASH_MASK)
132
133 MALLOC_DEFINE(M_DUMMYNET, "dummynet", "dummynet heap");
134
135 extern int ip_dn_cpu;
136
137 static dn_key curr_time = 0; /* current simulation time */
138 static int dn_hash_size = 64; /* default hash size */
139 static int pipe_expire = 1; /* expire queue if empty */
140 static int dn_max_ratio = 16; /* max queues/buckets ratio */
141
142 /*
143 * Statistics on number of queue searches and search steps
144 */
145 static int searches;
146 static int search_steps;
147
148 /*
149 * RED parameters
150 */
151 static int red_lookup_depth = 256; /* default lookup table depth */
152 static int red_avg_pkt_size = 512; /* default medium packet size */
153 static int red_max_pkt_size = 1500;/* default max packet size */
154
155 /*
156 * Three heaps contain queues and pipes that the scheduler handles:
157 *
158 * + ready_heap contains all dn_flow_queue related to fixed-rate pipes.
159 * + wfq_ready_heap contains the pipes associated with WF2Q flows
160 * + extract_heap contains pipes associated with delay lines.
161 */
162 static struct dn_heap ready_heap;
163 static struct dn_heap extract_heap;
164 static struct dn_heap wfq_ready_heap;
165
166 static struct dn_pipe_head pipe_table[DN_NR_HASH_MAX];
167 static struct dn_flowset_head flowset_table[DN_NR_HASH_MAX];
168
169 /*
170 * Variables for dummynet systimer
171 */
172 static struct netmsg_base dn_netmsg;
173 static struct systimer dn_clock;
174 static int dn_hz = 1000;
175
176 static int sysctl_dn_hz(SYSCTL_HANDLER_ARGS);
177
178 SYSCTL_DECL(_net_inet_ip_dummynet);
179
180 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, hash_size, CTLFLAG_RW,
181 &dn_hash_size, 0, "Default hash table size");
182 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, curr_time, CTLFLAG_RD,
183 &curr_time, 0, "Current tick");
184 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, expire, CTLFLAG_RW,
185 &pipe_expire, 0, "Expire queue if empty");
186 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, max_chain_len, CTLFLAG_RW,
187 &dn_max_ratio, 0, "Max ratio between dynamic queues and buckets");
188
189 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, ready_heap, CTLFLAG_RD,
190 &ready_heap.size, 0, "Size of ready heap");
191 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, extract_heap, CTLFLAG_RD,
192 &extract_heap.size, 0, "Size of extract heap");
193
194 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, searches, CTLFLAG_RD,
195 &searches, 0, "Number of queue searches");
196 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, search_steps, CTLFLAG_RD,
197 &search_steps, 0, "Number of queue search steps");
198
199 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_lookup_depth, CTLFLAG_RD,
200 &red_lookup_depth, 0, "Depth of RED lookup table");
201 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_avg_pkt_size, CTLFLAG_RD,
202 &red_avg_pkt_size, 0, "RED Medium packet size");
203 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_max_pkt_size, CTLFLAG_RD,
204 &red_max_pkt_size, 0, "RED Max packet size");
205
206 SYSCTL_PROC(_net_inet_ip_dummynet, OID_AUTO, hz, CTLTYPE_INT | CTLFLAG_RW,
207 0, 0, sysctl_dn_hz, "I", "Dummynet callout frequency");
208
209 static int heap_init(struct dn_heap *, int);
210 static int heap_insert(struct dn_heap *, dn_key, void *);
211 static void heap_extract(struct dn_heap *, void *);
212
213 static void transmit_event(struct dn_pipe *);
214 static void ready_event(struct dn_flow_queue *);
215 static void ready_event_wfq(struct dn_pipe *);
216
217 static int config_pipe(struct dn_ioc_pipe *);
218 static void dummynet_flush(void);
219
220 static void dummynet_clock(systimer_t, int, struct intrframe *);
221 static void dummynet(netmsg_t);
222
223 static struct dn_pipe *dn_find_pipe(int);
224 static struct dn_flow_set *dn_locate_flowset(int, int);
225
226 typedef void (*dn_pipe_iter_t)(struct dn_pipe *, void *);
227 static void dn_iterate_pipe(dn_pipe_iter_t, void *);
228
229 typedef void (*dn_flowset_iter_t)(struct dn_flow_set *, void *);
230 static void dn_iterate_flowset(dn_flowset_iter_t, void *);
231
232 static ip_dn_io_t dummynet_io;
233 static ip_dn_ctl_t dummynet_ctl;
234
235 /*
236 * Heap management functions.
237 *
238 * In the heap, first node is element 0. Children of i are 2i+1 and 2i+2.
239 * Some macros help finding parent/children so we can optimize them.
240 *
241 * heap_init() is called to expand the heap when needed.
242 * Increment size in blocks of 16 entries.
243 * XXX failure to allocate a new element is a pretty bad failure
244 * as we basically stall a whole queue forever!!
245 * Returns 1 on error, 0 on success
246 */
247 #define HEAP_FATHER(x) (((x) - 1) / 2)
248 #define HEAP_LEFT(x) (2*(x) + 1)
249 #define HEAP_IS_LEFT(x) ((x) & 1)
250 #define HEAP_RIGHT(x) (2*(x) + 2)
251 #define HEAP_SWAP(a, b, buffer) { buffer = a; a = b; b = buffer; }
252 #define HEAP_INCREMENT 15
253
254 static int
heap_init(struct dn_heap * h,int new_size)255 heap_init(struct dn_heap *h, int new_size)
256 {
257 struct dn_heap_entry *p;
258
259 if (h->size >= new_size) {
260 kprintf("%s, Bogus call, have %d want %d\n", __func__,
261 h->size, new_size);
262 return 0;
263 }
264
265 new_size = (new_size + HEAP_INCREMENT) & ~HEAP_INCREMENT;
266 p = kmalloc(new_size * sizeof(*p), M_DUMMYNET, M_WAITOK | M_ZERO);
267 if (h->size > 0) {
268 bcopy(h->p, p, h->size * sizeof(*p));
269 kfree(h->p, M_DUMMYNET);
270 }
271 h->p = p;
272 h->size = new_size;
273 return 0;
274 }
275
276 /*
277 * Insert element in heap. Normally, p != NULL, we insert p in
278 * a new position and bubble up. If p == NULL, then the element is
279 * already in place, and key is the position where to start the
280 * bubble-up.
281 * Returns 1 on failure (cannot allocate new heap entry)
282 *
283 * If offset > 0 the position (index, int) of the element in the heap is
284 * also stored in the element itself at the given offset in bytes.
285 */
286 #define SET_OFFSET(heap, node) \
287 if (heap->offset > 0) \
288 *((int *)((char *)(heap->p[node].object) + heap->offset)) = node;
289
290 /*
291 * RESET_OFFSET is used for sanity checks. It sets offset to an invalid value.
292 */
293 #define RESET_OFFSET(heap, node) \
294 if (heap->offset > 0) \
295 *((int *)((char *)(heap->p[node].object) + heap->offset)) = -1;
296
297 static int
heap_insert(struct dn_heap * h,dn_key key1,void * p)298 heap_insert(struct dn_heap *h, dn_key key1, void *p)
299 {
300 int son;
301
302 if (p == NULL) { /* Data already there, set starting point */
303 son = key1;
304 } else { /* Insert new element at the end, possibly resize */
305 son = h->elements;
306 if (son == h->size) { /* Need resize... */
307 if (heap_init(h, h->elements + 1))
308 return 1; /* Failure... */
309 }
310 h->p[son].object = p;
311 h->p[son].key = key1;
312 h->elements++;
313 }
314
315 while (son > 0) { /* Bubble up */
316 int father = HEAP_FATHER(son);
317 struct dn_heap_entry tmp;
318
319 if (DN_KEY_LT(h->p[father].key, h->p[son].key))
320 break; /* Found right position */
321
322 /* 'son' smaller than 'father', swap and repeat */
323 HEAP_SWAP(h->p[son], h->p[father], tmp);
324 SET_OFFSET(h, son);
325 son = father;
326 }
327 SET_OFFSET(h, son);
328 return 0;
329 }
330
331 /*
332 * Remove top element from heap, or obj if obj != NULL
333 */
334 static void
heap_extract(struct dn_heap * h,void * obj)335 heap_extract(struct dn_heap *h, void *obj)
336 {
337 int child, father, max = h->elements - 1;
338
339 if (max < 0) {
340 kprintf("warning, extract from empty heap 0x%p\n", h);
341 return;
342 }
343
344 father = 0; /* Default: move up smallest child */
345 if (obj != NULL) { /* Extract specific element, index is at offset */
346 if (h->offset <= 0)
347 panic("%s from middle not supported on this heap!!!", __func__);
348
349 father = *((int *)((char *)obj + h->offset));
350 if (father < 0 || father >= h->elements) {
351 panic("%s father %d out of bound 0..%d", __func__,
352 father, h->elements);
353 }
354 }
355 RESET_OFFSET(h, father);
356
357 child = HEAP_LEFT(father); /* Left child */
358 while (child <= max) { /* Valid entry */
359 if (child != max && DN_KEY_LT(h->p[child + 1].key, h->p[child].key))
360 child = child + 1; /* Take right child, otherwise left */
361 h->p[father] = h->p[child];
362 SET_OFFSET(h, father);
363 father = child;
364 child = HEAP_LEFT(child); /* Left child for next loop */
365 }
366 h->elements--;
367 if (father != max) {
368 /*
369 * Fill hole with last entry and bubble up, reusing the insert code
370 */
371 h->p[father] = h->p[max];
372 heap_insert(h, father, NULL); /* This one cannot fail */
373 }
374 }
375
376 /*
377 * heapify() will reorganize data inside an array to maintain the
378 * heap property. It is needed when we delete a bunch of entries.
379 */
380 static void
heapify(struct dn_heap * h)381 heapify(struct dn_heap *h)
382 {
383 int i;
384
385 for (i = 0; i < h->elements; i++)
386 heap_insert(h, i , NULL);
387 }
388
389 /*
390 * Cleanup the heap and free data structure
391 */
392 static void
heap_free(struct dn_heap * h)393 heap_free(struct dn_heap *h)
394 {
395 if (h->size > 0)
396 kfree(h->p, M_DUMMYNET);
397 bzero(h, sizeof(*h));
398 }
399
400 /*
401 * --- End of heap management functions ---
402 */
403
404 /*
405 * Scheduler functions:
406 *
407 * transmit_event() is called when the delay-line needs to enter
408 * the scheduler, either because of existing pkts getting ready,
409 * or new packets entering the queue. The event handled is the delivery
410 * time of the packet.
411 *
412 * ready_event() does something similar with fixed-rate queues, and the
413 * event handled is the finish time of the head pkt.
414 *
415 * ready_event_wfq() does something similar with WF2Q queues, and the
416 * event handled is the start time of the head pkt.
417 *
418 * In all cases, we make sure that the data structures are consistent
419 * before passing pkts out, because this might trigger recursive
420 * invocations of the procedures.
421 */
422 static void
transmit_event(struct dn_pipe * pipe)423 transmit_event(struct dn_pipe *pipe)
424 {
425 struct dn_pkt *pkt;
426
427 while ((pkt = TAILQ_FIRST(&pipe->p_queue)) &&
428 DN_KEY_LEQ(pkt->output_time, curr_time)) {
429 TAILQ_REMOVE(&pipe->p_queue, pkt, dn_next);
430 ip_dn_packet_redispatch(pkt);
431 }
432
433 /*
434 * If there are leftover packets, put into the heap for next event
435 */
436 if ((pkt = TAILQ_FIRST(&pipe->p_queue)) != NULL) {
437 /*
438 * XXX should check errors on heap_insert, by draining the
439 * whole pipe and hoping in the future we are more successful
440 */
441 heap_insert(&extract_heap, pkt->output_time, pipe);
442 }
443 }
444
445 /*
446 * The following macro computes how many ticks we have to wait
447 * before being able to transmit a packet. The credit is taken from
448 * either a pipe (WF2Q) or a flow_queue (per-flow queueing)
449 */
450 #define SET_TICKS(pkt, q, p) \
451 (pkt->dn_m->m_pkthdr.len*8*dn_hz - (q)->numbytes + p->bandwidth - 1 ) / \
452 p->bandwidth;
453
454 /*
455 * Extract pkt from queue, compute output time (could be now)
456 * and put into delay line (p_queue)
457 */
458 static void
move_pkt(struct dn_pkt * pkt,struct dn_flow_queue * q,struct dn_pipe * p,int len)459 move_pkt(struct dn_pkt *pkt, struct dn_flow_queue *q,
460 struct dn_pipe *p, int len)
461 {
462 TAILQ_REMOVE(&q->queue, pkt, dn_next);
463 q->len--;
464 q->len_bytes -= len;
465
466 pkt->output_time = curr_time + p->delay;
467
468 TAILQ_INSERT_TAIL(&p->p_queue, pkt, dn_next);
469 }
470
471 /*
472 * ready_event() is invoked every time the queue must enter the
473 * scheduler, either because the first packet arrives, or because
474 * a previously scheduled event fired.
475 * On invokation, drain as many pkts as possible (could be 0) and then
476 * if there are leftover packets reinsert the pkt in the scheduler.
477 */
478 static void
ready_event(struct dn_flow_queue * q)479 ready_event(struct dn_flow_queue *q)
480 {
481 struct dn_pkt *pkt;
482 struct dn_pipe *p = q->fs->pipe;
483 int p_was_empty;
484
485 if (p == NULL) {
486 kprintf("ready_event- pipe is gone\n");
487 return;
488 }
489 p_was_empty = TAILQ_EMPTY(&p->p_queue);
490
491 /*
492 * Schedule fixed-rate queues linked to this pipe:
493 * Account for the bw accumulated since last scheduling, then
494 * drain as many pkts as allowed by q->numbytes and move to
495 * the delay line (in p) computing output time.
496 * bandwidth==0 (no limit) means we can drain the whole queue,
497 * setting len_scaled = 0 does the job.
498 */
499 q->numbytes += (curr_time - q->sched_time) * p->bandwidth;
500 while ((pkt = TAILQ_FIRST(&q->queue)) != NULL) {
501 int len = pkt->dn_m->m_pkthdr.len;
502 int len_scaled = p->bandwidth ? len*8*dn_hz : 0;
503
504 if (len_scaled > q->numbytes)
505 break;
506 q->numbytes -= len_scaled;
507 move_pkt(pkt, q, p, len);
508 }
509
510 /*
511 * If we have more packets queued, schedule next ready event
512 * (can only occur when bandwidth != 0, otherwise we would have
513 * flushed the whole queue in the previous loop).
514 * To this purpose we record the current time and compute how many
515 * ticks to go for the finish time of the packet.
516 */
517 if ((pkt = TAILQ_FIRST(&q->queue)) != NULL) {
518 /* This implies bandwidth != 0 */
519 dn_key t = SET_TICKS(pkt, q, p); /* ticks i have to wait */
520
521 q->sched_time = curr_time;
522
523 /*
524 * XXX should check errors on heap_insert, and drain the whole
525 * queue on error hoping next time we are luckier.
526 */
527 heap_insert(&ready_heap, curr_time + t, q);
528 } else { /* RED needs to know when the queue becomes empty */
529 q->q_time = curr_time;
530 q->numbytes = 0;
531 }
532
533 /*
534 * If the delay line was empty call transmit_event(p) now.
535 * Otherwise, the scheduler will take care of it.
536 */
537 if (p_was_empty)
538 transmit_event(p);
539 }
540
541 /*
542 * Called when we can transmit packets on WF2Q queues. Take pkts out of
543 * the queues at their start time, and enqueue into the delay line.
544 * Packets are drained until p->numbytes < 0. As long as
545 * len_scaled >= p->numbytes, the packet goes into the delay line
546 * with a deadline p->delay. For the last packet, if p->numbytes < 0,
547 * there is an additional delay.
548 */
549 static void
ready_event_wfq(struct dn_pipe * p)550 ready_event_wfq(struct dn_pipe *p)
551 {
552 int p_was_empty = TAILQ_EMPTY(&p->p_queue);
553 struct dn_heap *sch = &p->scheduler_heap;
554 struct dn_heap *neh = &p->not_eligible_heap;
555
556 p->numbytes += (curr_time - p->sched_time) * p->bandwidth;
557
558 /*
559 * While we have backlogged traffic AND credit, we need to do
560 * something on the queue.
561 */
562 while (p->numbytes >= 0 && (sch->elements > 0 || neh->elements > 0)) {
563 if (sch->elements > 0) { /* Have some eligible pkts to send out */
564 struct dn_flow_queue *q = sch->p[0].object;
565 struct dn_pkt *pkt = TAILQ_FIRST(&q->queue);
566 struct dn_flow_set *fs = q->fs;
567 uint64_t len = pkt->dn_m->m_pkthdr.len;
568 int len_scaled = p->bandwidth ? len*8*dn_hz : 0;
569
570 heap_extract(sch, NULL); /* Remove queue from heap */
571 p->numbytes -= len_scaled;
572 move_pkt(pkt, q, p, len);
573
574 p->V += (len << MY_M) / p->sum; /* Update V */
575 q->S = q->F; /* Update start time */
576
577 if (q->len == 0) { /* Flow not backlogged any more */
578 fs->backlogged--;
579 heap_insert(&p->idle_heap, q->F, q);
580 } else { /* Still backlogged */
581 /*
582 * Update F and position in backlogged queue, then
583 * put flow in not_eligible_heap (we will fix this later).
584 */
585 len = TAILQ_FIRST(&q->queue)->dn_m->m_pkthdr.len;
586 q->F += (len << MY_M) / (uint64_t)fs->weight;
587 if (DN_KEY_LEQ(q->S, p->V))
588 heap_insert(neh, q->S, q);
589 else
590 heap_insert(sch, q->F, q);
591 }
592 }
593
594 /*
595 * Now compute V = max(V, min(S_i)). Remember that all elements in
596 * sch have by definition S_i <= V so if sch is not empty, V is surely
597 * the max and we must not update it. Conversely, if sch is empty
598 * we only need to look at neh.
599 */
600 if (sch->elements == 0 && neh->elements > 0)
601 p->V = MAX64(p->V, neh->p[0].key);
602
603 /*
604 * Move from neh to sch any packets that have become eligible
605 */
606 while (neh->elements > 0 && DN_KEY_LEQ(neh->p[0].key, p->V)) {
607 struct dn_flow_queue *q = neh->p[0].object;
608
609 heap_extract(neh, NULL);
610 heap_insert(sch, q->F, q);
611 }
612 }
613
614 if (sch->elements == 0 && neh->elements == 0 && p->numbytes >= 0 &&
615 p->idle_heap.elements > 0) {
616 /*
617 * No traffic and no events scheduled. We can get rid of idle-heap.
618 */
619 int i;
620
621 for (i = 0; i < p->idle_heap.elements; i++) {
622 struct dn_flow_queue *q = p->idle_heap.p[i].object;
623
624 q->F = 0;
625 q->S = q->F + 1;
626 }
627 p->sum = 0;
628 p->V = 0;
629 p->idle_heap.elements = 0;
630 }
631
632 /*
633 * If we are getting clocks from dummynet and if we are under credit,
634 * schedule the next ready event.
635 * Also fix the delivery time of the last packet.
636 */
637 if (p->numbytes < 0) { /* This implies bandwidth>0 */
638 dn_key t = 0; /* Number of ticks i have to wait */
639
640 if (p->bandwidth > 0)
641 t = (p->bandwidth - 1 - p->numbytes) / p->bandwidth;
642 TAILQ_LAST(&p->p_queue, dn_pkt_queue)->output_time += t;
643 p->sched_time = curr_time;
644
645 /*
646 * XXX should check errors on heap_insert, and drain the whole
647 * queue on error hoping next time we are luckier.
648 */
649 heap_insert(&wfq_ready_heap, curr_time + t, p);
650 }
651
652 /*
653 * If the delay line was empty call transmit_event(p) now.
654 * Otherwise, the scheduler will take care of it.
655 */
656 if (p_was_empty)
657 transmit_event(p);
658 }
659
660 static void
dn_expire_pipe_cb(struct dn_pipe * pipe,void * dummy __unused)661 dn_expire_pipe_cb(struct dn_pipe *pipe, void *dummy __unused)
662 {
663 if (pipe->idle_heap.elements > 0 &&
664 DN_KEY_LT(pipe->idle_heap.p[0].key, pipe->V)) {
665 struct dn_flow_queue *q = pipe->idle_heap.p[0].object;
666
667 heap_extract(&pipe->idle_heap, NULL);
668 q->S = q->F + 1; /* Mark timestamp as invalid */
669 pipe->sum -= q->fs->weight;
670 }
671 }
672
673 /*
674 * This is called once per tick, or dn_hz times per second. It is used to
675 * increment the current tick counter and schedule expired events.
676 */
677 static void
dummynet(netmsg_t msg)678 dummynet(netmsg_t msg)
679 {
680 void *p;
681 struct dn_heap *h;
682 struct dn_heap *heaps[3];
683 int i;
684
685 heaps[0] = &ready_heap; /* Fixed-rate queues */
686 heaps[1] = &wfq_ready_heap; /* WF2Q queues */
687 heaps[2] = &extract_heap; /* Delay line */
688
689 /* Reply ASAP */
690 crit_enter();
691 lwkt_replymsg(&msg->lmsg, 0);
692 crit_exit();
693
694 curr_time++;
695 for (i = 0; i < 3; i++) {
696 h = heaps[i];
697 while (h->elements > 0 && DN_KEY_LEQ(h->p[0].key, curr_time)) {
698 if (h->p[0].key > curr_time) {
699 kprintf("-- dummynet: warning, heap %d is %d ticks late\n",
700 i, (int)(curr_time - h->p[0].key));
701 }
702
703 p = h->p[0].object; /* Store a copy before heap_extract */
704 heap_extract(h, NULL); /* Need to extract before processing */
705
706 if (i == 0)
707 ready_event(p);
708 else if (i == 1)
709 ready_event_wfq(p);
710 else
711 transmit_event(p);
712 }
713 }
714
715 /* Sweep pipes trying to expire idle flow_queues */
716 dn_iterate_pipe(dn_expire_pipe_cb, NULL);
717 }
718
719 /*
720 * Unconditionally expire empty queues in case of shortage.
721 * Returns the number of queues freed.
722 */
723 static int
expire_queues(struct dn_flow_set * fs)724 expire_queues(struct dn_flow_set *fs)
725 {
726 int i, initial_elements = fs->rq_elements;
727
728 if (fs->last_expired == time_uptime)
729 return 0;
730
731 fs->last_expired = time_uptime;
732
733 for (i = 0; i <= fs->rq_size; i++) { /* Last one is overflow */
734 struct dn_flow_queue *q, *qn;
735
736 LIST_FOREACH_MUTABLE(q, &fs->rq[i], q_link, qn) {
737 if (!TAILQ_EMPTY(&q->queue) || q->S != q->F + 1)
738 continue;
739
740 /*
741 * Entry is idle, expire it
742 */
743 LIST_REMOVE(q, q_link);
744 kfree(q, M_DUMMYNET);
745
746 KASSERT(fs->rq_elements > 0,
747 ("invalid rq_elements %d", fs->rq_elements));
748 fs->rq_elements--;
749 }
750 }
751 return initial_elements - fs->rq_elements;
752 }
753
754 /*
755 * If room, create a new queue and put at head of slot i;
756 * otherwise, create or use the default queue.
757 */
758 static struct dn_flow_queue *
create_queue(struct dn_flow_set * fs,int i)759 create_queue(struct dn_flow_set *fs, int i)
760 {
761 struct dn_flow_queue *q;
762
763 if (fs->rq_elements > fs->rq_size * dn_max_ratio &&
764 expire_queues(fs) == 0) {
765 /*
766 * No way to get room, use or create overflow queue.
767 */
768 i = fs->rq_size;
769 if (!LIST_EMPTY(&fs->rq[i]))
770 return LIST_FIRST(&fs->rq[i]);
771 }
772
773 q = kmalloc(sizeof(*q), M_DUMMYNET, M_INTWAIT | M_NULLOK | M_ZERO);
774 if (q == NULL)
775 return NULL;
776
777 q->fs = fs;
778 q->hash_slot = i;
779 q->S = q->F + 1; /* hack - mark timestamp as invalid */
780 TAILQ_INIT(&q->queue);
781
782 LIST_INSERT_HEAD(&fs->rq[i], q, q_link);
783 fs->rq_elements++;
784
785 return q;
786 }
787
788 /*
789 * Given a flow_set and a pkt in last_pkt, find a matching queue
790 * after appropriate masking. The queue is moved to front
791 * so that further searches take less time.
792 */
793 static struct dn_flow_queue *
find_queue(struct dn_flow_set * fs,struct dn_flow_id * id)794 find_queue(struct dn_flow_set *fs, struct dn_flow_id *id)
795 {
796 struct dn_flow_queue *q;
797 int i = 0;
798
799 if (!(fs->flags_fs & DN_HAVE_FLOW_MASK)) {
800 q = LIST_FIRST(&fs->rq[0]);
801 } else {
802 struct dn_flow_queue *qn;
803
804 /* First, do the masking */
805 id->fid_dst_ip &= fs->flow_mask.fid_dst_ip;
806 id->fid_src_ip &= fs->flow_mask.fid_src_ip;
807 id->fid_dst_port &= fs->flow_mask.fid_dst_port;
808 id->fid_src_port &= fs->flow_mask.fid_src_port;
809 id->fid_proto &= fs->flow_mask.fid_proto;
810 id->fid_flags = 0; /* we don't care about this one */
811
812 /* Then, hash function */
813 i = ((id->fid_dst_ip) & 0xffff) ^
814 ((id->fid_dst_ip >> 15) & 0xffff) ^
815 ((id->fid_src_ip << 1) & 0xffff) ^
816 ((id->fid_src_ip >> 16 ) & 0xffff) ^
817 (id->fid_dst_port << 1) ^ (id->fid_src_port) ^
818 (id->fid_proto);
819 i = i % fs->rq_size;
820
821 /*
822 * Finally, scan the current list for a match and
823 * expire idle flow queues
824 */
825 searches++;
826 LIST_FOREACH_MUTABLE(q, &fs->rq[i], q_link, qn) {
827 search_steps++;
828 if (id->fid_dst_ip == q->id.fid_dst_ip &&
829 id->fid_src_ip == q->id.fid_src_ip &&
830 id->fid_dst_port == q->id.fid_dst_port &&
831 id->fid_src_port == q->id.fid_src_port &&
832 id->fid_proto == q->id.fid_proto &&
833 id->fid_flags == q->id.fid_flags) {
834 break; /* Found */
835 } else if (pipe_expire && TAILQ_EMPTY(&q->queue) &&
836 q->S == q->F + 1) {
837 /*
838 * Entry is idle and not in any heap, expire it
839 */
840 LIST_REMOVE(q, q_link);
841 kfree(q, M_DUMMYNET);
842
843 KASSERT(fs->rq_elements > 0,
844 ("invalid rq_elements %d", fs->rq_elements));
845 fs->rq_elements--;
846 }
847 }
848 if (q && LIST_FIRST(&fs->rq[i]) != q) { /* Found and not in front */
849 LIST_REMOVE(q, q_link);
850 LIST_INSERT_HEAD(&fs->rq[i], q, q_link);
851 }
852 }
853 if (q == NULL) { /* No match, need to allocate a new entry */
854 q = create_queue(fs, i);
855 if (q != NULL)
856 q->id = *id;
857 }
858 return q;
859 }
860
861 static int
red_drops(struct dn_flow_set * fs,struct dn_flow_queue * q,int len)862 red_drops(struct dn_flow_set *fs, struct dn_flow_queue *q, int len)
863 {
864 /*
865 * RED algorithm
866 *
867 * RED calculates the average queue size (avg) using a low-pass filter
868 * with an exponential weighted (w_q) moving average:
869 * avg <- (1-w_q) * avg + w_q * q_size
870 * where q_size is the queue length (measured in bytes or * packets).
871 *
872 * If q_size == 0, we compute the idle time for the link, and set
873 * avg = (1 - w_q)^(idle/s)
874 * where s is the time needed for transmitting a medium-sized packet.
875 *
876 * Now, if avg < min_th the packet is enqueued.
877 * If avg > max_th the packet is dropped. Otherwise, the packet is
878 * dropped with probability P function of avg.
879 */
880
881 int64_t p_b = 0;
882 u_int q_size = (fs->flags_fs & DN_QSIZE_IS_BYTES) ? q->len_bytes : q->len;
883
884 IPFW3_DEBUG("\n%d q: %2u ", (int)curr_time, q_size);
885
886 /* Average queue size estimation */
887 if (q_size != 0) {
888 /*
889 * Queue is not empty, avg <- avg + (q_size - avg) * w_q
890 */
891 int diff = SCALE(q_size) - q->avg;
892 int64_t v = SCALE_MUL((int64_t)diff, (int64_t)fs->w_q);
893
894 q->avg += (int)v;
895 } else {
896 /*
897 * Queue is empty, find for how long the queue has been
898 * empty and use a lookup table for computing
899 * (1 - * w_q)^(idle_time/s) where s is the time to send a
900 * (small) packet.
901 * XXX check wraps...
902 */
903 if (q->avg) {
904 u_int t = (curr_time - q->q_time) / fs->lookup_step;
905
906 q->avg = (t < fs->lookup_depth) ?
907 SCALE_MUL(q->avg, fs->w_q_lookup[t]) : 0;
908 }
909 }
910 IPFW3_DEBUG("avg: %u ", SCALE_VAL(q->avg));
911
912 /* Should i drop? */
913
914 if (q->avg < fs->min_th) {
915 /* Accept packet */
916 q->count = -1;
917 return 0;
918 }
919
920 if (q->avg >= fs->max_th) { /* Average queue >= Max threshold */
921 if (fs->flags_fs & DN_IS_GENTLE_RED) {
922 /*
923 * According to Gentle-RED, if avg is greater than max_th the
924 * packet is dropped with a probability
925 * p_b = c_3 * avg - c_4
926 * where c_3 = (1 - max_p) / max_th, and c_4 = 1 - 2 * max_p
927 */
928 p_b = SCALE_MUL((int64_t)fs->c_3, (int64_t)q->avg) - fs->c_4;
929 } else {
930 q->count = -1;
931 kprintf("- drop\n");
932 return 1;
933 }
934 } else if (q->avg > fs->min_th) {
935 /*
936 * We compute p_b using the linear dropping function p_b = c_1 *
937 * avg - c_2, where c_1 = max_p / (max_th - min_th), and c_2 =
938 * max_p * min_th / (max_th - min_th)
939 */
940 p_b = SCALE_MUL((int64_t)fs->c_1, (int64_t)q->avg) - fs->c_2;
941 }
942 if (fs->flags_fs & DN_QSIZE_IS_BYTES)
943 p_b = (p_b * len) / fs->max_pkt_size;
944
945 if (++q->count == 0) {
946 q->random = krandom() & 0xffff;
947 } else {
948 /*
949 * q->count counts packets arrived since last drop, so a greater
950 * value of q->count means a greater packet drop probability.
951 */
952 if (SCALE_MUL(p_b, SCALE((int64_t)q->count)) > q->random) {
953 q->count = 0;
954 IPFW3_DEBUG("%s", "- red drop");
955 /* After a drop we calculate a new random value */
956 q->random = krandom() & 0xffff;
957 return 1; /* Drop */
958 }
959 }
960 /* End of RED algorithm */
961 return 0; /* Accept */
962 }
963
964 static void
dn_iterate_pipe(dn_pipe_iter_t func,void * arg)965 dn_iterate_pipe(dn_pipe_iter_t func, void *arg)
966 {
967 int i;
968
969 for (i = 0; i < DN_NR_HASH_MAX; ++i) {
970 struct dn_pipe_head *pipe_hdr = &pipe_table[i];
971 struct dn_pipe *pipe, *pipe_next;
972
973 LIST_FOREACH_MUTABLE(pipe, pipe_hdr, p_link, pipe_next)
974 func(pipe, arg);
975 }
976 }
977
978 static void
dn_iterate_flowset(dn_flowset_iter_t func,void * arg)979 dn_iterate_flowset(dn_flowset_iter_t func, void *arg)
980 {
981 int i;
982
983 for (i = 0; i < DN_NR_HASH_MAX; ++i) {
984 struct dn_flowset_head *fs_hdr = &flowset_table[i];
985 struct dn_flow_set *fs, *fs_next;
986
987 LIST_FOREACH_MUTABLE(fs, fs_hdr, fs_link, fs_next)
988 func(fs, arg);
989 }
990 }
991
992 static struct dn_pipe *
dn_find_pipe(int pipe_nr)993 dn_find_pipe(int pipe_nr)
994 {
995 struct dn_pipe_head *pipe_hdr;
996 struct dn_pipe *p;
997
998 pipe_hdr = &pipe_table[DN_NR_HASH(pipe_nr)];
999 LIST_FOREACH(p, pipe_hdr, p_link) {
1000 if (p->pipe_nr == pipe_nr)
1001 break;
1002 }
1003 return p;
1004 }
1005
1006 static struct dn_flow_set *
dn_find_flowset(int fs_nr)1007 dn_find_flowset(int fs_nr)
1008 {
1009 struct dn_flowset_head *fs_hdr;
1010 struct dn_flow_set *fs;
1011
1012 fs_hdr = &flowset_table[DN_NR_HASH(fs_nr)];
1013 LIST_FOREACH(fs, fs_hdr, fs_link) {
1014 if (fs->fs_nr == fs_nr)
1015 break;
1016 }
1017 return fs;
1018 }
1019
1020 static struct dn_flow_set *
dn_locate_flowset(int pipe_nr,int is_pipe)1021 dn_locate_flowset(int pipe_nr, int is_pipe)
1022 {
1023 struct dn_flow_set *fs = NULL;
1024
1025 if (!is_pipe) {
1026 fs = dn_find_flowset(pipe_nr);
1027 } else {
1028 struct dn_pipe *p;
1029
1030 p = dn_find_pipe(pipe_nr);
1031 if (p != NULL)
1032 fs = &p->fs;
1033 }
1034 return fs;
1035 }
1036
1037 /*
1038 * Dummynet hook for packets. Below 'pipe' is a pipe or a queue
1039 * depending on whether WF2Q or fixed bw is used.
1040 *
1041 * pipe_nr pipe or queue the packet is destined for.
1042 * dir where shall we send the packet after dummynet.
1043 * m the mbuf with the packet
1044 * fwa->oif the 'ifp' parameter from the caller.
1045 * NULL in ip_input, destination interface in ip_output
1046 * fwa->ro route parameter (only used in ip_output, NULL otherwise)
1047 * fwa->dst destination address, only used by ip_output
1048 * fwa->rule matching rule, in case of multiple passes
1049 * fwa->flags flags from the caller, only used in ip_output
1050 */
1051 static int
dummynet_io(struct mbuf * m)1052 dummynet_io(struct mbuf *m)
1053 {
1054 struct dn_pkt *pkt;
1055 struct m_tag *tag;
1056 struct dn_flow_set *fs;
1057 struct dn_pipe *pipe;
1058 uint64_t len = m->m_pkthdr.len;
1059 struct dn_flow_queue *q = NULL;
1060 int is_pipe, pipe_nr;
1061
1062 tag = m_tag_find(m, PACKET_TAG_DUMMYNET, NULL);
1063 pkt = m_tag_data(tag);
1064
1065 is_pipe = pkt->dn_flags & DN_FLAGS_IS_PIPE;
1066 pipe_nr = pkt->pipe_nr;
1067
1068 /*
1069 * This is a dummynet rule, so we expect a O_PIPE or O_QUEUE rule
1070 */
1071 fs = dn_locate_flowset(pipe_nr, is_pipe);
1072 if (fs == NULL)
1073 goto dropit; /* This queue/pipe does not exist! */
1074
1075 pipe = fs->pipe;
1076 if (pipe == NULL) { /* Must be a queue, try find a matching pipe */
1077 pipe = dn_find_pipe(fs->parent_nr);
1078 if (pipe != NULL) {
1079 fs->pipe = pipe;
1080 } else {
1081 kprintf("No pipe %d for queue %d, drop pkt\n",
1082 fs->parent_nr, fs->fs_nr);
1083 goto dropit;
1084 }
1085 }
1086
1087 q = find_queue(fs, &pkt->id);
1088 if (q == NULL)
1089 goto dropit; /* Cannot allocate queue */
1090
1091 /*
1092 * Update statistics, then check reasons to drop pkt
1093 */
1094 q->tot_bytes += len;
1095 q->tot_pkts++;
1096
1097 if (fs->plr && krandom() < fs->plr)
1098 goto dropit; /* Random pkt drop */
1099
1100 if (fs->flags_fs & DN_QSIZE_IS_BYTES) {
1101 if (q->len_bytes > fs->qsize)
1102 goto dropit; /* Queue size overflow */
1103 } else {
1104 if (q->len >= fs->qsize)
1105 goto dropit; /* Queue count overflow */
1106 }
1107
1108 if ((fs->flags_fs & DN_IS_RED) && red_drops(fs, q, len))
1109 goto dropit;
1110
1111 TAILQ_INSERT_TAIL(&q->queue, pkt, dn_next);
1112 q->len++;
1113 q->len_bytes += len;
1114
1115 if (TAILQ_FIRST(&q->queue) != pkt) /* Flow was not idle, we are done */
1116 goto done;
1117
1118 /*
1119 * If we reach this point the flow was previously idle, so we need
1120 * to schedule it. This involves different actions for fixed-rate
1121 * or WF2Q queues.
1122 */
1123 if (is_pipe) {
1124 /*
1125 * Fixed-rate queue: just insert into the ready_heap.
1126 */
1127 dn_key t = 0;
1128
1129 if (pipe->bandwidth)
1130 t = SET_TICKS(pkt, q, pipe);
1131
1132 q->sched_time = curr_time;
1133 if (t == 0) /* Must process it now */
1134 ready_event(q);
1135 else
1136 heap_insert(&ready_heap, curr_time + t, q);
1137 } else {
1138 /*
1139 * WF2Q:
1140 * First, compute start time S: if the flow was idle (S=F+1)
1141 * set S to the virtual time V for the controlling pipe, and update
1142 * the sum of weights for the pipe; otherwise, remove flow from
1143 * idle_heap and set S to max(F, V).
1144 * Second, compute finish time F = S + len/weight.
1145 * Third, if pipe was idle, update V = max(S, V).
1146 * Fourth, count one more backlogged flow.
1147 */
1148 if (DN_KEY_GT(q->S, q->F)) { /* Means timestamps are invalid */
1149 q->S = pipe->V;
1150 pipe->sum += fs->weight; /* Add weight of new queue */
1151 } else {
1152 heap_extract(&pipe->idle_heap, q);
1153 q->S = MAX64(q->F, pipe->V);
1154 }
1155 q->F = q->S + (len << MY_M) / (uint64_t)fs->weight;
1156
1157 if (pipe->not_eligible_heap.elements == 0 &&
1158 pipe->scheduler_heap.elements == 0)
1159 pipe->V = MAX64(q->S, pipe->V);
1160
1161 fs->backlogged++;
1162
1163 /*
1164 * Look at eligibility. A flow is not eligibile if S>V (when
1165 * this happens, it means that there is some other flow already
1166 * scheduled for the same pipe, so the scheduler_heap cannot be
1167 * empty). If the flow is not eligible we just store it in the
1168 * not_eligible_heap. Otherwise, we store in the scheduler_heap
1169 * and possibly invoke ready_event_wfq() right now if there is
1170 * leftover credit.
1171 * Note that for all flows in scheduler_heap (SCH), S_i <= V,
1172 * and for all flows in not_eligible_heap (NEH), S_i > V.
1173 * So when we need to compute max(V, min(S_i)) forall i in SCH+NEH,
1174 * we only need to look into NEH.
1175 */
1176 if (DN_KEY_GT(q->S, pipe->V)) { /* Not eligible */
1177 if (pipe->scheduler_heap.elements == 0)
1178 kprintf("++ ouch! not eligible but empty scheduler!\n");
1179 heap_insert(&pipe->not_eligible_heap, q->S, q);
1180 } else {
1181 heap_insert(&pipe->scheduler_heap, q->F, q);
1182 if (pipe->numbytes >= 0) { /* Pipe is idle */
1183 if (pipe->scheduler_heap.elements != 1)
1184 kprintf("*** OUCH! pipe should have been idle!\n");
1185 IPFW3_DEBUG("Waking up pipe %d at %d\n",
1186 pipe->pipe_nr, (int)(q->F >> MY_M));
1187 pipe->sched_time = curr_time;
1188 ready_event_wfq(pipe);
1189 }
1190 }
1191 }
1192 done:
1193 return 0;
1194
1195 dropit:
1196 if (q)
1197 q->drops++;
1198 return ENOBUFS;
1199 }
1200
1201 /*
1202 * Dispose all packets and flow_queues on a flow_set.
1203 * If all=1, also remove red lookup table and other storage,
1204 * including the descriptor itself.
1205 * For the one in dn_pipe MUST also cleanup ready_heap...
1206 */
1207 static void
purge_flow_set(struct dn_flow_set * fs,int all)1208 purge_flow_set(struct dn_flow_set *fs, int all)
1209 {
1210 int i;
1211 #ifdef INVARIANTS
1212 int rq_elements = 0;
1213 #endif
1214
1215 for (i = 0; i <= fs->rq_size; i++) {
1216 struct dn_flow_queue *q;
1217
1218 while ((q = LIST_FIRST(&fs->rq[i])) != NULL) {
1219 struct dn_pkt *pkt;
1220
1221 while ((pkt = TAILQ_FIRST(&q->queue)) != NULL) {
1222 TAILQ_REMOVE(&q->queue, pkt, dn_next);
1223 ip_dn_packet_free(pkt);
1224 }
1225
1226 LIST_REMOVE(q, q_link);
1227 kfree(q, M_DUMMYNET);
1228
1229 #ifdef INVARIANTS
1230 rq_elements++;
1231 #endif
1232 }
1233 }
1234 KASSERT(rq_elements == fs->rq_elements,
1235 ("# rq elements mismatch, freed %d, total %d",
1236 rq_elements, fs->rq_elements));
1237 fs->rq_elements = 0;
1238
1239 if (all) {
1240 /* RED - free lookup table */
1241 if (fs->w_q_lookup)
1242 kfree(fs->w_q_lookup, M_DUMMYNET);
1243
1244 if (fs->rq)
1245 kfree(fs->rq, M_DUMMYNET);
1246
1247 /*
1248 * If this fs is not part of a pipe, free it
1249 *
1250 * fs->pipe == NULL could happen, if 'fs' is a WF2Q and
1251 * - No packet belongs to that flow set is delivered by
1252 * dummynet_io(), i.e. parent pipe is not installed yet.
1253 * - Parent pipe is deleted.
1254 */
1255 if (fs->pipe == NULL || (fs->pipe && fs != &fs->pipe->fs))
1256 kfree(fs, M_DUMMYNET);
1257 }
1258 }
1259
1260 /*
1261 * Dispose all packets queued on a pipe (not a flow_set).
1262 * Also free all resources associated to a pipe, which is about
1263 * to be deleted.
1264 */
1265 static void
purge_pipe(struct dn_pipe * pipe)1266 purge_pipe(struct dn_pipe *pipe)
1267 {
1268 struct dn_pkt *pkt;
1269
1270 purge_flow_set(&pipe->fs, 1);
1271
1272 while ((pkt = TAILQ_FIRST(&pipe->p_queue)) != NULL) {
1273 TAILQ_REMOVE(&pipe->p_queue, pkt, dn_next);
1274 ip_dn_packet_free(pkt);
1275 }
1276
1277 heap_free(&pipe->scheduler_heap);
1278 heap_free(&pipe->not_eligible_heap);
1279 heap_free(&pipe->idle_heap);
1280 }
1281
1282 /*
1283 * Delete all pipes and heaps returning memory.
1284 */
1285 static void
dummynet_flush(void)1286 dummynet_flush(void)
1287 {
1288 struct dn_pipe_head pipe_list;
1289 struct dn_flowset_head fs_list;
1290 struct dn_pipe *p;
1291 struct dn_flow_set *fs;
1292 int i;
1293
1294 /*
1295 * Prevent future matches...
1296 */
1297 LIST_INIT(&pipe_list);
1298 for (i = 0; i < DN_NR_HASH_MAX; ++i) {
1299 struct dn_pipe_head *pipe_hdr = &pipe_table[i];
1300
1301 while ((p = LIST_FIRST(pipe_hdr)) != NULL) {
1302 LIST_REMOVE(p, p_link);
1303 LIST_INSERT_HEAD(&pipe_list, p, p_link);
1304 }
1305 }
1306
1307 LIST_INIT(&fs_list);
1308 for (i = 0; i < DN_NR_HASH_MAX; ++i) {
1309 struct dn_flowset_head *fs_hdr = &flowset_table[i];
1310
1311 while ((fs = LIST_FIRST(fs_hdr)) != NULL) {
1312 LIST_REMOVE(fs, fs_link);
1313 LIST_INSERT_HEAD(&fs_list, fs, fs_link);
1314 }
1315 }
1316
1317 /* Free heaps so we don't have unwanted events */
1318 heap_free(&ready_heap);
1319 heap_free(&wfq_ready_heap);
1320 heap_free(&extract_heap);
1321
1322 /*
1323 * Now purge all queued pkts and delete all pipes
1324 */
1325 /* Scan and purge all flow_sets. */
1326 while ((fs = LIST_FIRST(&fs_list)) != NULL) {
1327 LIST_REMOVE(fs, fs_link);
1328 purge_flow_set(fs, 1);
1329 }
1330
1331 while ((p = LIST_FIRST(&pipe_list)) != NULL) {
1332 LIST_REMOVE(p, p_link);
1333 purge_pipe(p);
1334 kfree(p, M_DUMMYNET);
1335 }
1336 }
1337
1338 /*
1339 * setup RED parameters
1340 */
1341 static int
config_red(const struct dn_ioc_flowset * ioc_fs,struct dn_flow_set * x)1342 config_red(const struct dn_ioc_flowset *ioc_fs, struct dn_flow_set *x)
1343 {
1344 int i;
1345
1346 x->w_q = ioc_fs->w_q;
1347 x->min_th = SCALE(ioc_fs->min_th);
1348 x->max_th = SCALE(ioc_fs->max_th);
1349 x->max_p = ioc_fs->max_p;
1350
1351 x->c_1 = ioc_fs->max_p / (ioc_fs->max_th - ioc_fs->min_th);
1352 x->c_2 = SCALE_MUL(x->c_1, SCALE(ioc_fs->min_th));
1353 if (x->flags_fs & DN_IS_GENTLE_RED) {
1354 x->c_3 = (SCALE(1) - ioc_fs->max_p) / ioc_fs->max_th;
1355 x->c_4 = (SCALE(1) - 2 * ioc_fs->max_p);
1356 }
1357
1358 /* If the lookup table already exist, free and create it again */
1359 if (x->w_q_lookup) {
1360 kfree(x->w_q_lookup, M_DUMMYNET);
1361 x->w_q_lookup = NULL ;
1362 }
1363
1364 if (red_lookup_depth == 0) {
1365 kprintf("net.inet.ip.dummynet.red_lookup_depth must be > 0\n");
1366 kfree(x, M_DUMMYNET);
1367 return EINVAL;
1368 }
1369 x->lookup_depth = red_lookup_depth;
1370 x->w_q_lookup = kmalloc(x->lookup_depth * sizeof(int),
1371 M_DUMMYNET, M_WAITOK);
1372
1373 /* Fill the lookup table with (1 - w_q)^x */
1374 x->lookup_step = ioc_fs->lookup_step;
1375 x->lookup_weight = ioc_fs->lookup_weight;
1376
1377 x->w_q_lookup[0] = SCALE(1) - x->w_q;
1378 for (i = 1; i < x->lookup_depth; i++)
1379 x->w_q_lookup[i] = SCALE_MUL(x->w_q_lookup[i - 1], x->lookup_weight);
1380
1381 if (red_avg_pkt_size < 1)
1382 red_avg_pkt_size = 512;
1383 x->avg_pkt_size = red_avg_pkt_size;
1384
1385 if (red_max_pkt_size < 1)
1386 red_max_pkt_size = 1500;
1387 x->max_pkt_size = red_max_pkt_size;
1388
1389 return 0;
1390 }
1391
1392 static void
alloc_hash(struct dn_flow_set * x,const struct dn_ioc_flowset * ioc_fs)1393 alloc_hash(struct dn_flow_set *x, const struct dn_ioc_flowset *ioc_fs)
1394 {
1395 int i, alloc_size;
1396
1397 if (x->flags_fs & DN_HAVE_FLOW_MASK) {
1398 int l = ioc_fs->rq_size;
1399
1400 /* Allocate some slots */
1401 if (l == 0)
1402 l = dn_hash_size;
1403
1404 if (l < DN_MIN_HASH_SIZE)
1405 l = DN_MIN_HASH_SIZE;
1406 else if (l > DN_MAX_HASH_SIZE)
1407 l = DN_MAX_HASH_SIZE;
1408
1409 x->rq_size = l;
1410 } else {
1411 /* One is enough for null mask */
1412 x->rq_size = 1;
1413 }
1414 alloc_size = x->rq_size + 1;
1415
1416 x->rq = kmalloc(alloc_size * sizeof(struct dn_flowqueue_head),
1417 M_DUMMYNET, M_WAITOK | M_ZERO);
1418 x->rq_elements = 0;
1419
1420 for (i = 0; i < alloc_size; ++i)
1421 LIST_INIT(&x->rq[i]);
1422 }
1423
1424 static void
set_flowid_parms(struct dn_flow_id * id,const struct dn_ioc_flowid * ioc_id)1425 set_flowid_parms(struct dn_flow_id *id, const struct dn_ioc_flowid *ioc_id)
1426 {
1427 id->fid_dst_ip = ioc_id->u.ip.dst_ip;
1428 id->fid_src_ip = ioc_id->u.ip.src_ip;
1429 id->fid_dst_port = ioc_id->u.ip.dst_port;
1430 id->fid_src_port = ioc_id->u.ip.src_port;
1431 id->fid_proto = ioc_id->u.ip.proto;
1432 id->fid_flags = ioc_id->u.ip.flags;
1433 }
1434
1435 static void
set_fs_parms(struct dn_flow_set * x,const struct dn_ioc_flowset * ioc_fs)1436 set_fs_parms(struct dn_flow_set *x, const struct dn_ioc_flowset *ioc_fs)
1437 {
1438 x->flags_fs = ioc_fs->flags_fs;
1439 x->qsize = ioc_fs->qsize;
1440 x->plr = ioc_fs->plr;
1441 set_flowid_parms(&x->flow_mask, &ioc_fs->flow_mask);
1442 if (x->flags_fs & DN_QSIZE_IS_BYTES) {
1443 if (x->qsize > 1024 * 1024)
1444 x->qsize = 1024 * 1024;
1445 } else {
1446 if (x->qsize == 0 || x->qsize > 100)
1447 x->qsize = 50;
1448 }
1449
1450 /* Configuring RED */
1451 if (x->flags_fs & DN_IS_RED)
1452 config_red(ioc_fs, x); /* XXX should check errors */
1453 }
1454
1455 /*
1456 * setup pipe or queue parameters.
1457 */
1458
1459 static int
config_pipe(struct dn_ioc_pipe * ioc_pipe)1460 config_pipe(struct dn_ioc_pipe *ioc_pipe)
1461 {
1462 struct dn_ioc_flowset *ioc_fs = &ioc_pipe->fs;
1463 int error;
1464
1465 /*
1466 * The config program passes parameters as follows:
1467 * bw bits/second (0 means no limits)
1468 * delay ms (must be translated into ticks)
1469 * qsize slots or bytes
1470 */
1471 ioc_pipe->delay = (ioc_pipe->delay * dn_hz) / 1000;
1472
1473 /*
1474 * We need either a pipe number or a flow_set number
1475 */
1476 if (ioc_pipe->pipe_nr == 0 && ioc_fs->fs_nr == 0)
1477 return EINVAL;
1478 if (ioc_pipe->pipe_nr != 0 && ioc_fs->fs_nr != 0)
1479 return EINVAL;
1480
1481 /*
1482 * Validate pipe number
1483 */
1484 if (ioc_pipe->pipe_nr > DN_PIPE_NR_MAX || ioc_pipe->pipe_nr < 0)
1485 return EINVAL;
1486
1487 error = EINVAL;
1488 if (ioc_pipe->pipe_nr != 0) { /* This is a pipe */
1489 struct dn_pipe *x, *p;
1490
1491 /* Locate pipe */
1492 p = dn_find_pipe(ioc_pipe->pipe_nr);
1493
1494 if (p == NULL) { /* New pipe */
1495 x = kmalloc(sizeof(struct dn_pipe), M_DUMMYNET, M_WAITOK | M_ZERO);
1496 x->pipe_nr = ioc_pipe->pipe_nr;
1497 x->fs.pipe = x;
1498 TAILQ_INIT(&x->p_queue);
1499
1500 /*
1501 * idle_heap is the only one from which we extract from the middle.
1502 */
1503 x->idle_heap.size = x->idle_heap.elements = 0;
1504 x->idle_heap.offset = __offsetof(struct dn_flow_queue, heap_pos);
1505 } else {
1506 int i;
1507
1508 x = p;
1509
1510 /* Flush accumulated credit for all queues */
1511 for (i = 0; i <= x->fs.rq_size; i++) {
1512 struct dn_flow_queue *q;
1513
1514 LIST_FOREACH(q, &x->fs.rq[i], q_link)
1515 q->numbytes = 0;
1516 }
1517 }
1518
1519 x->bandwidth = ioc_pipe->bandwidth;
1520 x->numbytes = 0; /* Just in case... */
1521 x->delay = ioc_pipe->delay;
1522
1523 set_fs_parms(&x->fs, ioc_fs);
1524
1525 if (x->fs.rq == NULL) { /* A new pipe */
1526 struct dn_pipe_head *pipe_hdr;
1527
1528 alloc_hash(&x->fs, ioc_fs);
1529
1530 pipe_hdr = &pipe_table[DN_NR_HASH(x->pipe_nr)];
1531 LIST_INSERT_HEAD(pipe_hdr, x, p_link);
1532 }
1533 } else { /* Config flow_set */
1534 struct dn_flow_set *x, *fs;
1535
1536 /* Locate flow_set */
1537 fs = dn_find_flowset(ioc_fs->fs_nr);
1538
1539 if (fs == NULL) { /* New flow_set */
1540 if (ioc_fs->parent_nr == 0) /* Need link to a pipe */
1541 goto back;
1542
1543 x = kmalloc(sizeof(struct dn_flow_set), M_DUMMYNET,
1544 M_WAITOK | M_ZERO);
1545 x->fs_nr = ioc_fs->fs_nr;
1546 x->parent_nr = ioc_fs->parent_nr;
1547 x->weight = ioc_fs->weight;
1548 if (x->weight == 0)
1549 x->weight = 1;
1550 else if (x->weight > 100)
1551 x->weight = 100;
1552 } else {
1553 /* Change parent pipe not allowed; must delete and recreate */
1554 if (ioc_fs->parent_nr != 0 && fs->parent_nr != ioc_fs->parent_nr)
1555 goto back;
1556 x = fs;
1557 }
1558
1559 set_fs_parms(x, ioc_fs);
1560
1561 if (x->rq == NULL) { /* A new flow_set */
1562 struct dn_flowset_head *fs_hdr;
1563
1564 alloc_hash(x, ioc_fs);
1565
1566 fs_hdr = &flowset_table[DN_NR_HASH(x->fs_nr)];
1567 LIST_INSERT_HEAD(fs_hdr, x, fs_link);
1568 }
1569 }
1570 error = 0;
1571
1572 back:
1573 return error;
1574 }
1575
1576 /*
1577 * Helper function to remove from a heap queues which are linked to
1578 * a flow_set about to be deleted.
1579 */
1580 static void
fs_remove_from_heap(struct dn_heap * h,struct dn_flow_set * fs)1581 fs_remove_from_heap(struct dn_heap *h, struct dn_flow_set *fs)
1582 {
1583 int i = 0, found = 0;
1584
1585 while (i < h->elements) {
1586 if (((struct dn_flow_queue *)h->p[i].object)->fs == fs) {
1587 h->elements--;
1588 h->p[i] = h->p[h->elements];
1589 found++;
1590 } else {
1591 i++;
1592 }
1593 }
1594 if (found)
1595 heapify(h);
1596 }
1597
1598 /*
1599 * helper function to remove a pipe from a heap (can be there at most once)
1600 */
1601 static void
pipe_remove_from_heap(struct dn_heap * h,struct dn_pipe * p)1602 pipe_remove_from_heap(struct dn_heap *h, struct dn_pipe *p)
1603 {
1604 if (h->elements > 0) {
1605 int i;
1606
1607 for (i = 0; i < h->elements; i++) {
1608 if (h->p[i].object == p) { /* found it */
1609 h->elements--;
1610 h->p[i] = h->p[h->elements];
1611 heapify(h);
1612 break;
1613 }
1614 }
1615 }
1616 }
1617
1618 static void
dn_unref_pipe_cb(struct dn_flow_set * fs,void * pipe0)1619 dn_unref_pipe_cb(struct dn_flow_set *fs, void *pipe0)
1620 {
1621 struct dn_pipe *pipe = pipe0;
1622
1623 if (fs->pipe == pipe) {
1624 kprintf("++ ref to pipe %d from fs %d\n",
1625 pipe->pipe_nr, fs->fs_nr);
1626 fs->pipe = NULL;
1627 purge_flow_set(fs, 0);
1628 }
1629 }
1630
1631 /*
1632 * Fully delete a pipe or a queue, cleaning up associated info.
1633 */
1634 static int
delete_pipe(const struct dn_ioc_pipe * ioc_pipe)1635 delete_pipe(const struct dn_ioc_pipe *ioc_pipe)
1636 {
1637 struct dn_pipe *p;
1638 int error;
1639
1640 if (ioc_pipe->pipe_nr == 0 && ioc_pipe->fs.fs_nr == 0)
1641 return EINVAL;
1642 if (ioc_pipe->pipe_nr != 0 && ioc_pipe->fs.fs_nr != 0)
1643 return EINVAL;
1644
1645 if (ioc_pipe->pipe_nr > DN_NR_HASH_MAX || ioc_pipe->pipe_nr < 0)
1646 return EINVAL;
1647
1648 error = EINVAL;
1649 if (ioc_pipe->pipe_nr != 0) { /* This is an old-style pipe */
1650 /* Locate pipe */
1651 p = dn_find_pipe(ioc_pipe->pipe_nr);
1652 if (p == NULL)
1653 goto back; /* Not found */
1654
1655 /* Unlink from pipe hash table */
1656 LIST_REMOVE(p, p_link);
1657
1658 /* Remove all references to this pipe from flow_sets */
1659 dn_iterate_flowset(dn_unref_pipe_cb, p);
1660
1661 fs_remove_from_heap(&ready_heap, &p->fs);
1662 purge_pipe(p); /* Remove all data associated to this pipe */
1663
1664 /* Remove reference to here from extract_heap and wfq_ready_heap */
1665 pipe_remove_from_heap(&extract_heap, p);
1666 pipe_remove_from_heap(&wfq_ready_heap, p);
1667
1668 kfree(p, M_DUMMYNET);
1669 } else { /* This is a WF2Q queue (dn_flow_set) */
1670 struct dn_flow_set *fs;
1671
1672 /* Locate flow_set */
1673 fs = dn_find_flowset(ioc_pipe->fs.fs_nr);
1674 if (fs == NULL)
1675 goto back; /* Not found */
1676
1677 LIST_REMOVE(fs, fs_link);
1678
1679 if ((p = fs->pipe) != NULL) {
1680 /* Update total weight on parent pipe and cleanup parent heaps */
1681 p->sum -= fs->weight * fs->backlogged;
1682 fs_remove_from_heap(&p->not_eligible_heap, fs);
1683 fs_remove_from_heap(&p->scheduler_heap, fs);
1684 #if 1 /* XXX should i remove from idle_heap as well ? */
1685 fs_remove_from_heap(&p->idle_heap, fs);
1686 #endif
1687 }
1688 purge_flow_set(fs, 1);
1689 }
1690 error = 0;
1691
1692 back:
1693 return error;
1694 }
1695
1696 /*
1697 * helper function used to copy data from kernel in DUMMYNET_GET
1698 */
1699 static void
dn_copy_flowid(const struct dn_flow_id * id,struct dn_ioc_flowid * ioc_id)1700 dn_copy_flowid(const struct dn_flow_id *id, struct dn_ioc_flowid *ioc_id)
1701 {
1702 ioc_id->type = ETHERTYPE_IP;
1703 ioc_id->u.ip.dst_ip = id->fid_dst_ip;
1704 ioc_id->u.ip.src_ip = id->fid_src_ip;
1705 ioc_id->u.ip.dst_port = id->fid_dst_port;
1706 ioc_id->u.ip.src_port = id->fid_src_port;
1707 ioc_id->u.ip.proto = id->fid_proto;
1708 ioc_id->u.ip.flags = id->fid_flags;
1709 }
1710
1711 static void *
dn_copy_flowqueues(const struct dn_flow_set * fs,void * bp)1712 dn_copy_flowqueues(const struct dn_flow_set *fs, void *bp)
1713 {
1714 struct dn_ioc_flowqueue *ioc_fq = bp;
1715 int i, copied = 0;
1716
1717 for (i = 0; i <= fs->rq_size; i++) {
1718 const struct dn_flow_queue *q;
1719
1720 LIST_FOREACH(q, &fs->rq[i], q_link) {
1721 if (q->hash_slot != i) { /* XXX ASSERT */
1722 kprintf("++ at %d: wrong slot (have %d, "
1723 "should be %d)\n",
1724 copied, q->hash_slot, i);
1725 }
1726 if (q->fs != fs) { /* XXX ASSERT */
1727 kprintf("++ at %d: wrong fs ptr (have %p, should be %p)\n",
1728 i, q->fs, fs);
1729 }
1730
1731 copied++;
1732
1733 ioc_fq->len = q->len;
1734 ioc_fq->len_bytes = q->len_bytes;
1735 ioc_fq->tot_pkts = q->tot_pkts;
1736 ioc_fq->tot_bytes = q->tot_bytes;
1737 ioc_fq->drops = q->drops;
1738 ioc_fq->hash_slot = q->hash_slot;
1739 ioc_fq->S = q->S;
1740 ioc_fq->F = q->F;
1741 dn_copy_flowid(&q->id, &ioc_fq->id);
1742
1743 ioc_fq++;
1744 }
1745 }
1746
1747 if (copied != fs->rq_elements) { /* XXX ASSERT */
1748 kprintf("++ wrong count, have %d should be %d\n",
1749 copied, fs->rq_elements);
1750 }
1751 return ioc_fq;
1752 }
1753
1754 static void
dn_copy_flowset(const struct dn_flow_set * fs,struct dn_ioc_flowset * ioc_fs,u_short fs_type)1755 dn_copy_flowset(const struct dn_flow_set *fs, struct dn_ioc_flowset *ioc_fs,
1756 u_short fs_type)
1757 {
1758 ioc_fs->fs_type = fs_type;
1759
1760 ioc_fs->fs_nr = fs->fs_nr;
1761 ioc_fs->flags_fs = fs->flags_fs;
1762 ioc_fs->parent_nr = fs->parent_nr;
1763
1764 ioc_fs->weight = fs->weight;
1765 ioc_fs->qsize = fs->qsize;
1766 ioc_fs->plr = fs->plr;
1767
1768 ioc_fs->rq_size = fs->rq_size;
1769 ioc_fs->rq_elements = fs->rq_elements;
1770
1771 ioc_fs->w_q = fs->w_q;
1772 ioc_fs->max_th = fs->max_th;
1773 ioc_fs->min_th = fs->min_th;
1774 ioc_fs->max_p = fs->max_p;
1775
1776 dn_copy_flowid(&fs->flow_mask, &ioc_fs->flow_mask);
1777 }
1778
1779 static void
dn_calc_pipe_size_cb(struct dn_pipe * pipe,void * sz)1780 dn_calc_pipe_size_cb(struct dn_pipe *pipe, void *sz)
1781 {
1782 size_t *size = sz;
1783
1784 *size += sizeof(struct dn_ioc_pipe) +
1785 pipe->fs.rq_elements * sizeof(struct dn_ioc_flowqueue);
1786 }
1787
1788 static void
dn_calc_fs_size_cb(struct dn_flow_set * fs,void * sz)1789 dn_calc_fs_size_cb(struct dn_flow_set *fs, void *sz)
1790 {
1791 size_t *size = sz;
1792
1793 *size += sizeof(struct dn_ioc_flowset) +
1794 fs->rq_elements * sizeof(struct dn_ioc_flowqueue);
1795 }
1796
1797 static void
dn_copyout_pipe_cb(struct dn_pipe * pipe,void * bp0)1798 dn_copyout_pipe_cb(struct dn_pipe *pipe, void *bp0)
1799 {
1800 char **bp = bp0;
1801 struct dn_ioc_pipe *ioc_pipe = (struct dn_ioc_pipe *)(*bp);
1802
1803 /*
1804 * Copy flow set descriptor associated with this pipe
1805 */
1806 dn_copy_flowset(&pipe->fs, &ioc_pipe->fs, DN_IS_PIPE);
1807
1808 /*
1809 * Copy pipe descriptor
1810 */
1811 ioc_pipe->bandwidth = pipe->bandwidth;
1812 ioc_pipe->pipe_nr = pipe->pipe_nr;
1813 ioc_pipe->V = pipe->V;
1814 /* Convert delay to milliseconds */
1815 ioc_pipe->delay = (pipe->delay * 1000) / dn_hz;
1816
1817 /*
1818 * Copy flow queue descriptors
1819 */
1820 *bp += sizeof(*ioc_pipe);
1821 *bp = dn_copy_flowqueues(&pipe->fs, *bp);
1822 }
1823
1824 static void
dn_copyout_fs_cb(struct dn_flow_set * fs,void * bp0)1825 dn_copyout_fs_cb(struct dn_flow_set *fs, void *bp0)
1826 {
1827 char **bp = bp0;
1828 struct dn_ioc_flowset *ioc_fs = (struct dn_ioc_flowset *)(*bp);
1829
1830 /*
1831 * Copy flow set descriptor
1832 */
1833 dn_copy_flowset(fs, ioc_fs, DN_IS_QUEUE);
1834
1835 /*
1836 * Copy flow queue descriptors
1837 */
1838 *bp += sizeof(*ioc_fs);
1839 *bp = dn_copy_flowqueues(fs, *bp);
1840 }
1841
1842 static int
dummynet_get(struct dn_sopt * dn_sopt)1843 dummynet_get(struct dn_sopt *dn_sopt)
1844 {
1845 char *buf, *bp;
1846 size_t size = 0;
1847
1848 /*
1849 * Compute size of data structures: list of pipes and flow_sets.
1850 */
1851 dn_iterate_pipe(dn_calc_pipe_size_cb, &size);
1852 dn_iterate_flowset(dn_calc_fs_size_cb, &size);
1853
1854 /*
1855 * Copyout pipe/flow_set/flow_queue
1856 */
1857 bp = buf = kmalloc(size, M_TEMP, M_WAITOK | M_ZERO);
1858 dn_iterate_pipe(dn_copyout_pipe_cb, &bp);
1859 dn_iterate_flowset(dn_copyout_fs_cb, &bp);
1860
1861 /* Temp memory will be freed by caller */
1862 dn_sopt->dn_sopt_arg = buf;
1863 dn_sopt->dn_sopt_arglen = size;
1864 return 0;
1865 }
1866
1867 /*
1868 * Handler for the various dummynet socket options (get, flush, config, del)
1869 */
1870 static int
dummynet_ctl(struct dn_sopt * dn_sopt)1871 dummynet_ctl(struct dn_sopt *dn_sopt)
1872 {
1873 int error = 0;
1874
1875 switch (dn_sopt->dn_sopt_name) {
1876 case IP_DUMMYNET_GET:
1877 error = dummynet_get(dn_sopt);
1878 break;
1879
1880 case IP_DUMMYNET_FLUSH:
1881 dummynet_flush();
1882 break;
1883
1884 case IP_DUMMYNET_CONFIGURE:
1885 KKASSERT(dn_sopt->dn_sopt_arglen == sizeof(struct dn_ioc_pipe));
1886 error = config_pipe(dn_sopt->dn_sopt_arg);
1887 break;
1888
1889 case IP_DUMMYNET_DEL: /* Remove a pipe or flow_set */
1890 KKASSERT(dn_sopt->dn_sopt_arglen == sizeof(struct dn_ioc_pipe));
1891 error = delete_pipe(dn_sopt->dn_sopt_arg);
1892 break;
1893
1894 default:
1895 kprintf("%s -- unknown option %d\n", __func__, dn_sopt->dn_sopt_name);
1896 error = EINVAL;
1897 break;
1898 }
1899 return error;
1900 }
1901
1902 static void
dummynet_clock(systimer_t info __unused,int in_ipi __unused,struct intrframe * frame __unused)1903 dummynet_clock(systimer_t info __unused, int in_ipi __unused,
1904 struct intrframe *frame __unused)
1905 {
1906 KASSERT(mycpuid == ip_dn_cpu,
1907 ("dummynet systimer comes on cpu%d, should be %d!",
1908 mycpuid, ip_dn_cpu));
1909
1910 crit_enter();
1911 if (DUMMYNET_LOADED && (dn_netmsg.lmsg.ms_flags & MSGF_DONE))
1912 lwkt_sendmsg_oncpu(netisr_cpuport(mycpuid), &dn_netmsg.lmsg);
1913 crit_exit();
1914 }
1915
1916 static int
sysctl_dn_hz(SYSCTL_HANDLER_ARGS)1917 sysctl_dn_hz(SYSCTL_HANDLER_ARGS)
1918 {
1919 int error, val;
1920
1921 val = dn_hz;
1922 error = sysctl_handle_int(oidp, &val, 0, req);
1923 if (error || req->newptr == NULL)
1924 return error;
1925 if (val <= 0)
1926 return EINVAL;
1927 else if (val > DN_CALLOUT_FREQ_MAX)
1928 val = DN_CALLOUT_FREQ_MAX;
1929
1930 crit_enter();
1931 dn_hz = val;
1932 systimer_adjust_periodic(&dn_clock, val);
1933 crit_exit();
1934
1935 return 0;
1936 }
1937
1938 static void
ip_dn_init_dispatch(netmsg_t msg)1939 ip_dn_init_dispatch(netmsg_t msg)
1940 {
1941 int i, error = 0;
1942
1943 KASSERT(mycpuid == ip_dn_cpu,
1944 ("%s runs on cpu%d, instead of cpu%d", __func__,
1945 mycpuid, ip_dn_cpu));
1946
1947 crit_enter();
1948
1949 if (DUMMYNET_LOADED) {
1950 kprintf("DUMMYNET already loaded\n");
1951 error = EEXIST;
1952 goto back;
1953 }
1954
1955 kprintf("DUMMYNET initialized (011031)\n");
1956
1957 for (i = 0; i < DN_NR_HASH_MAX; ++i)
1958 LIST_INIT(&pipe_table[i]);
1959
1960 for (i = 0; i < DN_NR_HASH_MAX; ++i)
1961 LIST_INIT(&flowset_table[i]);
1962
1963 ready_heap.size = ready_heap.elements = 0;
1964 ready_heap.offset = 0;
1965
1966 wfq_ready_heap.size = wfq_ready_heap.elements = 0;
1967 wfq_ready_heap.offset = 0;
1968
1969 extract_heap.size = extract_heap.elements = 0;
1970 extract_heap.offset = 0;
1971
1972 ip_dn_ctl_ptr = dummynet_ctl;
1973 ip_dn_io_ptr = dummynet_io;
1974
1975 netmsg_init(&dn_netmsg, NULL, &netisr_adone_rport,
1976 0, dummynet);
1977 systimer_init_periodic_nq(&dn_clock, dummynet_clock, NULL, dn_hz);
1978
1979 back:
1980 crit_exit();
1981 lwkt_replymsg(&msg->lmsg, error);
1982 }
1983
1984 static int
ip_dn_init(void)1985 ip_dn_init(void)
1986 {
1987 struct netmsg_base smsg;
1988
1989 if (ip_dn_cpu >= ncpus) {
1990 kprintf("%s: CPU%d does not exist, switch to CPU0\n",
1991 __func__, ip_dn_cpu);
1992 ip_dn_cpu = 0;
1993 }
1994
1995 ip_fw3_register_module(MODULE_DUMMYNET_ID, MODULE_DUMMYNET_NAME);
1996 ip_fw3_register_filter_funcs(MODULE_DUMMYNET_ID, O_DUMMYNET_PIPE,
1997 (filter_func)check_pipe);
1998 ip_fw3_register_filter_funcs(MODULE_DUMMYNET_ID, O_DUMMYNET_QUEUE,
1999 (filter_func)check_pipe);
2000
2001 netmsg_init(&smsg, NULL, &curthread->td_msgport,
2002 0, ip_dn_init_dispatch);
2003 lwkt_domsg(netisr_cpuport(ip_dn_cpu), &smsg.lmsg, 0);
2004 return smsg.lmsg.ms_error;
2005 }
2006
2007 #ifdef KLD_MODULE
2008
2009 static void
ip_dn_stop_dispatch(netmsg_t msg)2010 ip_dn_stop_dispatch(netmsg_t msg)
2011 {
2012 crit_enter();
2013
2014 dummynet_flush();
2015
2016 ip_dn_ctl_ptr = NULL;
2017 ip_dn_io_ptr = NULL;
2018
2019 systimer_del(&dn_clock);
2020
2021 crit_exit();
2022 lwkt_replymsg(&msg->lmsg, 0);
2023 }
2024
2025 static void
ip_dn_stop(void)2026 ip_dn_stop(void)
2027 {
2028 struct netmsg_base smsg;
2029
2030 netmsg_init(&smsg, NULL, &curthread->td_msgport,
2031 0, ip_dn_stop_dispatch);
2032 lwkt_domsg(netisr_cpuport(ip_dn_cpu), &smsg.lmsg, 0);
2033
2034 netmsg_service_sync();
2035 }
2036
2037 #endif /* KLD_MODULE */
2038
2039 static int
dummynet_modevent(module_t mod,int type,void * data)2040 dummynet_modevent(module_t mod, int type, void *data)
2041 {
2042 switch (type) {
2043 case MOD_LOAD:
2044 return ip_dn_init();
2045
2046 case MOD_UNLOAD:
2047 #ifndef KLD_MODULE
2048 kprintf("dummynet statically compiled, cannot unload\n");
2049 return EINVAL;
2050 #else
2051 ip_dn_stop();
2052 #endif
2053 break;
2054
2055 default:
2056 break;
2057 }
2058 return 0;
2059 }
2060
2061 static moduledata_t dummynet_mod = {
2062 "dummynet2",
2063 dummynet_modevent,
2064 NULL
2065 };
2066 DECLARE_MODULE(dummynet3, dummynet_mod, SI_SUB_PROTO_END, SI_ORDER_ANY);
2067 MODULE_DEPEND(dummynet3, ipfw3_basic, 1, 1, 1);
2068 MODULE_VERSION(dummynet3, 1);
2069