1 /* $Id: pftop.c,v 1.10 2009/04/06 12:08:26 henning Exp $ */ 2 /* 3 * Copyright (c) 2001, 2007 Can Erkin Acar 4 * Copyright (c) 2001 Daniel Hartmeier 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * - Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * - Redistributions in binary form must reproduce the above 14 * copyright notice, this list of conditions and the following 15 * disclaimer in the documentation and/or other materials provided 16 * with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 21 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 22 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 26 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 28 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 * 31 */ 32 33 #include <sys/types.h> 34 #include <sys/ioctl.h> 35 #include <sys/socket.h> 36 37 #include <net/if.h> 38 #include <netinet/in.h> 39 #include <netinet/tcp.h> 40 #include <netinet/tcp_fsm.h> 41 #include <net/pfvar.h> 42 #include <arpa/inet.h> 43 44 #include <altq/altq.h> 45 #include <altq/altq_cbq.h> 46 #include <altq/altq_priq.h> 47 #include <altq/altq_hfsc.h> 48 49 #include <ctype.h> 50 #include <curses.h> 51 #include <err.h> 52 #include <errno.h> 53 #include <fcntl.h> 54 #include <netdb.h> 55 #include <signal.h> 56 #include <stdio.h> 57 #include <stdlib.h> 58 #include <string.h> 59 #include <unistd.h> 60 #include <stdarg.h> 61 62 #include "systat.h" 63 #include "engine.h" 64 #include "cache.h" 65 66 extern const char *tcpstates[]; 67 68 #define MIN_NUM_STATES 1024 69 #define NUM_STATE_INC 1024 70 71 #define DEFAULT_CACHE_SIZE 10000 72 73 /* XXX must also check type before use */ 74 #define PT_ADDR(x) (&(x)->addr.v.a.addr) 75 76 /* XXX must also check type before use */ 77 #define PT_MASK(x) (&(x)->addr.v.a.mask) 78 79 #define PT_NOROUTE(x) ((x)->addr.type == PF_ADDR_NOROUTE) 80 81 /* view management */ 82 int select_states(void); 83 int read_states(void); 84 void sort_states(void); 85 void print_states(void); 86 87 int select_rules(void); 88 int read_rules(void); 89 void print_rules(void); 90 91 int print_header(void); 92 int keyboard_callback(int ch); 93 94 int select_queues(void); 95 int read_queues(void); 96 void print_queues(void); 97 98 void update_cache(void); 99 100 /* qsort callbacks */ 101 int sort_size_callback(const void *s1, const void *s2); 102 int sort_exp_callback(const void *s1, const void *s2); 103 int sort_pkt_callback(const void *s1, const void *s2); 104 int sort_age_callback(const void *s1, const void *s2); 105 int sort_sa_callback(const void *s1, const void *s2); 106 int sort_sp_callback(const void *s1, const void *s2); 107 int sort_da_callback(const void *s1, const void *s2); 108 int sort_dp_callback(const void *s1, const void *s2); 109 int sort_rate_callback(const void *s1, const void *s2); 110 int sort_peak_callback(const void *s1, const void *s2); 111 int pf_dev = -1; 112 113 struct sc_ent **state_cache = NULL; 114 struct pfsync_state *state_buf = NULL; 115 int state_buf_len = 0; 116 u_int32_t *state_ord = NULL; 117 u_int32_t num_states = 0; 118 u_int32_t num_states_all = 0; 119 u_int32_t num_rules = 0; 120 u_int32_t num_queues = 0; 121 int cachestates = 0; 122 123 char *filter_string = NULL; 124 int dumpfilter = 0; 125 126 #define MIN_LABEL_SIZE 5 127 #define ANCHOR_FLD_SIZE 12 128 129 /* Define fields */ 130 field_def fields[] = { 131 {"SRC", 20, 45, 1, FLD_ALIGN_LEFT, -1, 0, 0, 0}, 132 {"DEST", 20, 45, 1, FLD_ALIGN_LEFT, -1, 0, 0, 0}, 133 {"GW", 20, 45, 1, FLD_ALIGN_LEFT, -1, 0, 0, 0}, 134 {"STATE", 5, 23, 18, FLD_ALIGN_COLUMN, -1, 0, 0, 0}, 135 {"AGE", 5, 9, 4, FLD_ALIGN_RIGHT, -1, 0, 0, 0}, 136 {"EXP", 5, 9, 4, FLD_ALIGN_RIGHT, -1, 0, 0, 0}, 137 {"PR ", 4, 9, 1, FLD_ALIGN_LEFT, -1, 0, 0, 0}, 138 {"DIR", 1, 3, 2, FLD_ALIGN_CENTER, -1, 0, 0, 0}, 139 {"PKTS", 5, 8, 1, FLD_ALIGN_RIGHT, -1, 0, 0, 0}, 140 {"BYTES", 5, 8, 1, FLD_ALIGN_RIGHT, -1, 0, 0, 0}, 141 {"RULE", 2, 4, 1, FLD_ALIGN_RIGHT, -1, 0, 0, 0}, 142 {"LABEL", MIN_LABEL_SIZE, MIN_LABEL_SIZE, 1, FLD_ALIGN_LEFT, -1, 0, 0, 0}, 143 {"STATES", 5, 8, 1, FLD_ALIGN_RIGHT, -1, 0, 0, 0}, 144 {"EVAL", 5, 8, 1, FLD_ALIGN_RIGHT, -1, 0, 0, 0}, 145 {"ACTION", 1, 8, 4, FLD_ALIGN_LEFT, -1, 0, 0, 0}, 146 {"LOG", 1, 3, 2, FLD_ALIGN_LEFT, -1, 0, 0, 0}, 147 {"QUICK", 1, 1, 1, FLD_ALIGN_LEFT, -1, 0, 0, 0}, 148 {"KS", 1, 1, 1, FLD_ALIGN_LEFT, -1, 0, 0, 0}, 149 {"IF", 4, 6, 1, FLD_ALIGN_LEFT, -1, 0, 0, 0}, 150 {"INFO", 40, 80, 1, FLD_ALIGN_LEFT, -1, 0, 0, 0}, 151 {"MAX", 3, 5, 2, FLD_ALIGN_RIGHT, -1, 0, 0}, 152 {"RATE", 5, 8, 1, FLD_ALIGN_RIGHT, -1, 0, 0, 0}, 153 {"AVG", 5, 8, 1, FLD_ALIGN_RIGHT, -1, 0, 0, 0}, 154 {"PEAK", 5, 8, 1, FLD_ALIGN_RIGHT, -1, 0, 0, 0}, 155 {"ANCHOR", 6, 16, 1, FLD_ALIGN_LEFT, -1, 0, 0}, 156 {"QUEUE", 15, 30, 1, FLD_ALIGN_LEFT, -1, 0, 0, 0}, 157 {"BW", 4, 5, 1, FLD_ALIGN_RIGHT, -1, 0, 0, 0}, 158 {"SCH", 3, 4, 1, FLD_ALIGN_LEFT, -1, 0, 0, 0}, 159 {"PRIO", 1, 4, 1, FLD_ALIGN_RIGHT, -1, 0, 0, 0}, 160 {"DROP_P", 6, 8, 1, FLD_ALIGN_RIGHT, -1, 0, 0, 0}, 161 {"DROP_B", 6, 8, 1, FLD_ALIGN_RIGHT, -1, 0, 0, 0}, 162 {"QLEN", 4, 4, 1, FLD_ALIGN_RIGHT, -1, 0, 0, 0}, 163 {"BORROW", 4, 6, 1, FLD_ALIGN_RIGHT, -1, 0, 0, 0}, 164 {"SUSPENDS", 4, 6, 1, FLD_ALIGN_RIGHT, -1, 0, 0, 0}, 165 {"P/S", 3, 7, 1, FLD_ALIGN_RIGHT, -1, 0, 0, 0}, 166 {"B/S", 4, 7, 1, FLD_ALIGN_RIGHT, -1, 0, 0, 0} 167 }; 168 169 170 #define FIELD_ADDR(x) (&fields[x]) 171 172 /* for states */ 173 #define FLD_SRC FIELD_ADDR(0) 174 #define FLD_DEST FIELD_ADDR(1) 175 #define FLD_GW FIELD_ADDR(2) 176 #define FLD_STATE FIELD_ADDR(3) 177 #define FLD_AGE FIELD_ADDR(4) 178 #define FLD_EXP FIELD_ADDR(5) 179 /* common */ 180 #define FLD_PROTO FIELD_ADDR(6) 181 #define FLD_DIR FIELD_ADDR(7) 182 #define FLD_PKTS FIELD_ADDR(8) 183 #define FLD_BYTES FIELD_ADDR(9) 184 #define FLD_RULE FIELD_ADDR(10) 185 /* for rules */ 186 #define FLD_LABEL FIELD_ADDR(11) 187 #define FLD_STATS FIELD_ADDR(12) 188 #define FLD_EVAL FIELD_ADDR(13) 189 #define FLD_ACTION FIELD_ADDR(14) 190 #define FLD_LOG FIELD_ADDR(15) 191 #define FLD_QUICK FIELD_ADDR(16) 192 #define FLD_KST FIELD_ADDR(17) 193 #define FLD_IF FIELD_ADDR(18) 194 #define FLD_RINFO FIELD_ADDR(19) 195 #define FLD_STMAX FIELD_ADDR(20) 196 /* other */ 197 #define FLD_SI FIELD_ADDR(21) /* instantaneous speed */ 198 #define FLD_SA FIELD_ADDR(22) /* average speed */ 199 #define FLD_SP FIELD_ADDR(23) /* peak speed */ 200 #define FLD_ANCHOR FIELD_ADDR(24) 201 /* for queues */ 202 #define FLD_QUEUE FIELD_ADDR(25) 203 #define FLD_BANDW FIELD_ADDR(26) 204 #define FLD_SCHED FIELD_ADDR(27) 205 #define FLD_PRIO FIELD_ADDR(28) 206 #define FLD_DROPP FIELD_ADDR(29) 207 #define FLD_DROPB FIELD_ADDR(30) 208 #define FLD_QLEN FIELD_ADDR(31) 209 #define FLD_BORR FIELD_ADDR(32) 210 #define FLD_SUSP FIELD_ADDR(33) 211 #define FLD_PKTSPS FIELD_ADDR(34) 212 #define FLD_BYTESPS FIELD_ADDR(35) 213 214 /* Define views */ 215 field_def *view0[] = { 216 FLD_PROTO, FLD_DIR, FLD_SRC, FLD_DEST, FLD_STATE, 217 FLD_AGE, FLD_EXP, FLD_PKTS, FLD_BYTES, NULL 218 }; 219 220 field_def *view1[] = { 221 FLD_PROTO, FLD_DIR, FLD_SRC, FLD_DEST, FLD_GW, FLD_STATE, FLD_AGE, 222 FLD_EXP, FLD_PKTS, FLD_BYTES, FLD_SI, FLD_SP, FLD_SA, FLD_RULE, NULL 223 }; 224 225 field_def *view2[] = { 226 FLD_PROTO, FLD_DIR, FLD_SRC, FLD_DEST, FLD_STATE, FLD_AGE, FLD_EXP, 227 FLD_PKTS, FLD_BYTES, FLD_SI, FLD_SP, FLD_SA, FLD_RULE, FLD_GW, NULL 228 }; 229 230 field_def *view3[] = { 231 FLD_PROTO, FLD_DIR, FLD_SRC, FLD_DEST, FLD_AGE, FLD_EXP, FLD_PKTS, 232 FLD_BYTES, FLD_STATE, FLD_SI, FLD_SP, FLD_SA, FLD_RULE, FLD_GW, NULL 233 }; 234 235 field_def *view4[] = { 236 FLD_PROTO, FLD_DIR, FLD_SRC, FLD_DEST, FLD_PKTS, FLD_BYTES, FLD_STATE, 237 FLD_AGE, FLD_EXP, FLD_SI, FLD_SP, FLD_SA, FLD_RULE, FLD_GW, NULL 238 }; 239 240 field_def *view5[] = { 241 FLD_RULE, FLD_ANCHOR, FLD_ACTION, FLD_DIR, FLD_LOG, FLD_QUICK, FLD_IF, 242 FLD_PROTO, FLD_KST, FLD_PKTS, FLD_BYTES, FLD_STATS, FLD_STMAX, 243 FLD_RINFO, NULL 244 }; 245 246 field_def *view6[] = { 247 FLD_RULE, FLD_LABEL, FLD_PKTS, FLD_BYTES, FLD_STATS, FLD_STMAX, 248 FLD_ACTION, FLD_DIR, FLD_LOG, FLD_QUICK, FLD_IF, FLD_PROTO, 249 FLD_ANCHOR, FLD_KST, NULL 250 }; 251 252 field_def *view7[] = { 253 FLD_PROTO, FLD_DIR, FLD_SRC, FLD_DEST, FLD_SI, FLD_SP, FLD_SA, 254 FLD_BYTES, FLD_STATE, FLD_PKTS, FLD_AGE, FLD_EXP, FLD_RULE, FLD_GW, NULL 255 }; 256 257 field_def *view8[] = { 258 FLD_QUEUE, FLD_BANDW, FLD_SCHED, FLD_PRIO, FLD_PKTS, FLD_BYTES, 259 FLD_DROPP, FLD_DROPB, FLD_QLEN, FLD_BORR, FLD_SUSP, FLD_PKTSPS, 260 FLD_BYTESPS, NULL 261 }; 262 263 /* Define orderings */ 264 order_type order_list[] = { 265 {"none", "none", 'N', NULL}, 266 {"bytes", "bytes", 'B', sort_size_callback}, 267 {"expiry", "exp", 'E', sort_exp_callback}, 268 {"packets", "pkt", 'P', sort_pkt_callback}, 269 {"age", "age", 'A', sort_age_callback}, 270 {"source addr", "src", 'F', sort_sa_callback}, 271 {"dest. addr", "dest", 'T', sort_da_callback}, 272 {"source port", "sport", 'S', sort_sp_callback}, 273 {"dest. port", "dport", 'D', sort_dp_callback}, 274 {"rate", "rate", 'R', sort_rate_callback}, 275 {"peak", "peak", 'K', sort_peak_callback}, 276 {NULL, NULL, 0, NULL} 277 }; 278 279 /* Define view managers */ 280 struct view_manager state_mgr = { 281 "States", select_states, read_states, sort_states, print_header, 282 print_states, keyboard_callback, order_list, NULL 283 }; 284 285 struct view_manager rule_mgr = { 286 "Rules", select_rules, read_rules, NULL, print_header, 287 print_rules, keyboard_callback, NULL, NULL 288 }; 289 290 struct view_manager queue_mgr = { 291 "Queues", select_queues, read_queues, NULL, print_header, 292 print_queues, keyboard_callback, NULL, NULL 293 }; 294 295 field_view views[] = { 296 {view2, "states", '8', &state_mgr}, 297 {view5, "rules", '9', &rule_mgr}, 298 {view8, "queues", 'Q', &queue_mgr}, 299 {NULL, NULL, 0, NULL} 300 }; 301 302 303 /* altq structures from pfctl */ 304 305 union class_stats { 306 class_stats_t cbq_stats; 307 struct priq_classstats priq_stats; 308 struct hfsc_classstats hfsc_stats; 309 }; 310 311 struct queue_stats { 312 union class_stats data; 313 struct timeval timestamp; 314 u_int8_t valid; 315 }; 316 317 struct pf_altq_node { 318 struct pf_altq altq; 319 struct pf_altq_node *next; 320 struct pf_altq_node *children; 321 struct pf_altq_node *next_flat; 322 struct queue_stats qstats; 323 struct queue_stats qstats_last; 324 u_int8_t depth; 325 u_int8_t visited; 326 }; 327 328 329 /* ordering functions */ 330 331 int 332 sort_size_callback(const void *s1, const void *s2) 333 { 334 u_int64_t b1 = COUNTER(state_buf[* (u_int32_t *) s1].bytes[0]) + 335 COUNTER(state_buf[* (u_int32_t *) s1].bytes[1]); 336 u_int64_t b2 = COUNTER(state_buf[* (u_int32_t *) s2].bytes[0]) + 337 COUNTER(state_buf[* (u_int32_t *) s2].bytes[1]); 338 if (b2 > b1) 339 return sortdir; 340 if (b2 < b1) 341 return -sortdir; 342 return 0; 343 } 344 345 int 346 sort_pkt_callback(const void *s1, const void *s2) 347 { 348 u_int64_t p1 = COUNTER(state_buf[* (u_int32_t *) s1].packets[0]) + 349 COUNTER(state_buf[* (u_int32_t *) s1].packets[1]); 350 u_int64_t p2 = COUNTER(state_buf[* (u_int32_t *) s2].packets[0]) + 351 COUNTER(state_buf[* (u_int32_t *) s2].packets[1]); 352 if (p2 > p1) 353 return sortdir; 354 if (p2 < p1) 355 return -sortdir; 356 return 0; 357 } 358 359 int 360 sort_age_callback(const void *s1, const void *s2) 361 { 362 if (ntohl(state_buf[* (u_int32_t *) s2].creation) > 363 ntohl(state_buf[* (u_int32_t *) s1].creation)) 364 return sortdir; 365 if (ntohl(state_buf[* (u_int32_t *) s2].creation) < 366 ntohl(state_buf[* (u_int32_t *) s1].creation)) 367 return -sortdir; 368 return 0; 369 } 370 371 int 372 sort_exp_callback(const void *s1, const void *s2) 373 { 374 if (ntohl(state_buf[* (u_int32_t *) s2].expire) > 375 ntohl(state_buf[* (u_int32_t *) s1].expire)) 376 return sortdir; 377 if (ntohl(state_buf[* (u_int32_t *) s2].expire) < 378 ntohl(state_buf[* (u_int32_t *) s1].expire)) 379 return -sortdir; 380 return 0; 381 } 382 383 int 384 sort_rate_callback(const void *s1, const void *s2) 385 { 386 struct sc_ent *e1 = state_cache[* (u_int32_t *) s1]; 387 struct sc_ent *e2 = state_cache[* (u_int32_t *) s2]; 388 389 if (e1 == NULL) 390 return sortdir; 391 if (e2 == NULL) 392 return -sortdir; 393 394 if (e2->rate > e1 -> rate) 395 return sortdir; 396 if (e2->rate < e1 -> rate) 397 return -sortdir; 398 return 0; 399 } 400 401 int 402 sort_peak_callback(const void *s1, const void *s2) 403 { 404 struct sc_ent *e1 = state_cache[* (u_int32_t *) s1]; 405 struct sc_ent *e2 = state_cache[* (u_int32_t *) s2]; 406 407 if (e2 == NULL) 408 return -sortdir; 409 if (e1 == NULL || e2 == NULL) 410 return 0; 411 412 if (e2->peak > e1 -> peak) 413 return sortdir; 414 if (e2->peak < e1 -> peak) 415 return -sortdir; 416 return 0; 417 } 418 419 int 420 compare_addr(int af, const struct pf_addr *a, const struct pf_addr *b) 421 { 422 switch (af) { 423 case AF_INET: 424 if (ntohl(a->addr32[0]) > ntohl(b->addr32[0])) 425 return 1; 426 if (a->addr32[0] != b->addr32[0]) 427 return -1; 428 break; 429 case AF_INET6: 430 if (ntohl(a->addr32[0]) > ntohl(b->addr32[0])) 431 return 1; 432 if (a->addr32[0] != b->addr32[0]) 433 return -1; 434 if (ntohl(a->addr32[1]) > ntohl(b->addr32[1])) 435 return 1; 436 if (a->addr32[1] != b->addr32[1]) 437 return -1; 438 if (ntohl(a->addr32[2]) > ntohl(b->addr32[2])) 439 return 1; 440 if (a->addr32[2] != b->addr32[2]) 441 return -1; 442 if (ntohl(a->addr32[3]) > ntohl(b->addr32[3])) 443 return 1; 444 if (a->addr32[3] != b->addr32[3]) 445 return -1; 446 break; 447 } 448 449 return 0; 450 } 451 452 __inline int 453 sort_addr_callback(const struct pfsync_state *s1, 454 const struct pfsync_state *s2, int dir) 455 { 456 const struct pf_addr *aa, *ab; 457 u_int16_t pa, pb; 458 int af, ret, ii, io; 459 460 af = s1->af; 461 462 if (af > s2->af) 463 return sortdir; 464 if (af < s2->af) 465 return -sortdir; 466 467 ii = io = 0; 468 469 if (dir == PF_OUT) /* looking for source addr */ 470 io = 1; 471 else /* looking for dest addr */ 472 ii = 1; 473 474 if (s1->direction == PF_IN) { 475 aa = &s1->key[PF_SK_STACK].addr[ii]; 476 pa = s1->key[PF_SK_STACK].port[ii]; 477 } else { 478 aa = &s1->key[PF_SK_WIRE].addr[io]; 479 pa = s1->key[PF_SK_WIRE].port[io]; 480 } 481 482 if (s2->direction == PF_IN) { 483 ab = &s2->key[PF_SK_STACK].addr[ii];; 484 pb = s2->key[PF_SK_STACK].port[ii]; 485 } else { 486 ab = &s2->key[PF_SK_WIRE].addr[io];; 487 pb = s2->key[PF_SK_WIRE].port[io]; 488 } 489 490 ret = compare_addr(af, aa, ab); 491 if (ret) 492 return ret * sortdir; 493 494 if (ntohs(pa) > ntohs(pb)) 495 return sortdir; 496 return -sortdir; 497 } 498 499 __inline int 500 sort_port_callback(const struct pfsync_state *s1, 501 const struct pfsync_state *s2, int dir) 502 { 503 const struct pf_addr *aa, *ab; 504 u_int16_t pa, pb; 505 int af, ret, ii, io; 506 507 af = s1->af; 508 509 510 if (af > s2->af) 511 return sortdir; 512 if (af < s2->af) 513 return -sortdir; 514 515 ii = io = 0; 516 517 if (dir == PF_OUT) /* looking for source addr */ 518 io = 1; 519 else /* looking for dest addr */ 520 ii = 1; 521 522 if (s1->direction == PF_IN) { 523 aa = &s1->key[PF_SK_STACK].addr[ii]; 524 pa = s1->key[PF_SK_STACK].port[ii]; 525 } else { 526 aa = &s1->key[PF_SK_WIRE].addr[io]; 527 pa = s1->key[PF_SK_WIRE].port[io]; 528 } 529 530 if (s2->direction == PF_IN) { 531 ab = &s2->key[PF_SK_STACK].addr[ii];; 532 pb = s2->key[PF_SK_STACK].port[ii]; 533 } else { 534 ab = &s2->key[PF_SK_WIRE].addr[io];; 535 pb = s2->key[PF_SK_WIRE].port[io]; 536 } 537 538 539 if (ntohs(pa) > ntohs(pb)) 540 return sortdir; 541 if (ntohs(pa) < ntohs(pb)) 542 return - sortdir; 543 544 ret = compare_addr(af, aa, ab); 545 if (ret) 546 return ret * sortdir; 547 return -sortdir; 548 } 549 550 int 551 sort_sa_callback(const void *p1, const void *p2) 552 { 553 struct pfsync_state *s1 = state_buf + (* (u_int32_t *) p1); 554 struct pfsync_state *s2 = state_buf + (* (u_int32_t *) p2); 555 return sort_addr_callback(s1, s2, PF_OUT); 556 } 557 558 int 559 sort_da_callback(const void *p1, const void *p2) 560 { 561 struct pfsync_state *s1 = state_buf + (* (u_int32_t *) p1); 562 struct pfsync_state *s2 = state_buf + (* (u_int32_t *) p2); 563 return sort_addr_callback(s1, s2, PF_IN); 564 } 565 566 int 567 sort_sp_callback(const void *p1, const void *p2) 568 { 569 struct pfsync_state *s1 = state_buf + (* (u_int32_t *) p1); 570 struct pfsync_state *s2 = state_buf + (* (u_int32_t *) p2); 571 return sort_port_callback(s1, s2, PF_OUT); 572 } 573 574 int 575 sort_dp_callback(const void *p1, const void *p2) 576 { 577 struct pfsync_state *s1 = state_buf + (* (u_int32_t *) p1); 578 struct pfsync_state *s2 = state_buf + (* (u_int32_t *) p2); 579 return sort_port_callback(s1, s2, PF_IN); 580 } 581 582 void 583 sort_states(void) 584 { 585 order_type *ordering; 586 587 if (curr_mgr == NULL) 588 return; 589 590 ordering = curr_mgr->order_curr; 591 592 if (ordering == NULL) 593 return; 594 if (ordering->func == NULL) 595 return; 596 if (state_buf == NULL) 597 return; 598 if (num_states <= 0) 599 return; 600 601 mergesort(state_ord, num_states, sizeof(u_int32_t), ordering->func); 602 } 603 604 /* state management functions */ 605 606 void 607 alloc_buf(int ns) 608 { 609 int len; 610 611 if (ns < MIN_NUM_STATES) 612 ns = MIN_NUM_STATES; 613 614 len = ns; 615 616 if (len >= state_buf_len) { 617 len += NUM_STATE_INC; 618 state_buf = realloc(state_buf, len * sizeof(struct pfsync_state)); 619 state_ord = realloc(state_ord, len * sizeof(u_int32_t)); 620 state_cache = realloc(state_cache, 621 len * sizeof(struct sc_ent *)); 622 if (state_buf == NULL || state_ord == NULL || 623 state_cache == NULL) 624 err(1, "realloc"); 625 state_buf_len = len; 626 } 627 } 628 629 int 630 select_states(void) 631 { 632 num_disp = num_states; 633 return (0); 634 } 635 636 int 637 read_states(void) 638 { 639 struct pfioc_states ps; 640 int n; 641 642 if (pf_dev == -1) 643 return -1; 644 645 for (;;) { 646 int sbytes = state_buf_len * sizeof(struct pfsync_state); 647 648 ps.ps_len = sbytes; 649 ps.ps_buf = (char *) state_buf; 650 651 if (ioctl(pf_dev, DIOCGETSTATES, &ps) < 0) { 652 error("DIOCGETSTATES"); 653 } 654 num_states_all = ps.ps_len / sizeof(struct pfsync_state); 655 656 if (ps.ps_len < sbytes) 657 break; 658 659 alloc_buf(num_states_all); 660 } 661 662 if (dumpfilter) { 663 int fd = open("state.dmp", O_WRONLY|O_CREAT|O_EXCL, 0); 664 if (fd > 0) { 665 write(fd, state_buf, ps.ps_len); 666 close(fd); 667 } 668 } 669 670 num_states = num_states_all; 671 for (n = 0; n<num_states_all; n++) 672 state_ord[n] = n; 673 674 if (cachestates) { 675 for (n = 0; n < num_states; n++) 676 state_cache[n] = cache_state(state_buf + n); 677 cache_endupdate(); 678 } 679 680 num_disp = num_states; 681 return 0; 682 } 683 684 int 685 unmask(struct pf_addr * m, u_int8_t af) 686 { 687 int i = 31, j = 0, b = 0, msize; 688 u_int32_t tmp; 689 690 if (af == AF_INET) 691 msize = 1; 692 else 693 msize = 4; 694 while (j < msize && m->addr32[j] == 0xffffffff) { 695 b += 32; 696 j++; 697 } 698 if (j < msize) { 699 tmp = ntohl(m->addr32[j]); 700 for (i = 31; tmp & (1 << i); --i) 701 b++; 702 } 703 return (b); 704 } 705 706 /* display functions */ 707 708 void 709 tb_print_addr(struct pf_addr * addr, struct pf_addr * mask, int af) 710 { 711 static char buf[48]; 712 const char *bf; 713 714 bf = inet_ntop(af, addr, buf, sizeof(buf)); 715 tbprintf("%s", bf); 716 717 if (mask != NULL) { 718 if (!PF_AZERO(mask, af)) 719 tbprintf("/%u", unmask(mask, af)); 720 } 721 } 722 723 void 724 print_fld_host2(field_def *fld, struct pfsync_state_key *ks, 725 struct pfsync_state_key *kn, int idx, int af) 726 { 727 struct pf_addr *as = &ks->addr[idx]; 728 struct pf_addr *an = &kn->addr[idx]; 729 730 u_int16_t ps = ntohs(ks->port[idx]); 731 u_int16_t pn = ntohs(kn->port[idx]); 732 733 if (fld == NULL) 734 return; 735 736 if (fld->width < 3) { 737 print_fld_str(fld, "*"); 738 return; 739 } 740 741 tb_start(); 742 tb_print_addr(as, NULL, af); 743 744 if (af == AF_INET) 745 tbprintf(":%u", ps); 746 else 747 tbprintf("[%u]", ps); 748 749 print_fld_tb(fld); 750 751 if (PF_ANEQ(as, an, af) || ps != pn) { 752 tb_start(); 753 tb_print_addr(an, NULL, af); 754 755 if (af == AF_INET) 756 tbprintf(":%u", pn); 757 else 758 tbprintf("[%u]", pn); 759 print_fld_tb(FLD_GW); 760 } 761 762 } 763 764 void 765 print_fld_state(field_def *fld, unsigned int proto, 766 unsigned int s1, unsigned int s2) 767 { 768 int len; 769 770 if (fld == NULL) 771 return; 772 773 len = fld->width; 774 if (len < 1) 775 return; 776 777 tb_start(); 778 779 if (proto == IPPROTO_TCP) { 780 if (s1 <= TCPS_TIME_WAIT && s2 <= TCPS_TIME_WAIT) 781 tbprintf("%s:%s", tcpstates[s1], tcpstates[s2]); 782 #ifdef PF_TCPS_PROXY_SRC 783 else if (s1 == PF_TCPS_PROXY_SRC || 784 s2 == PF_TCPS_PROXY_SRC) 785 tbprintf("PROXY:SRC\n"); 786 else if (s1 == PF_TCPS_PROXY_DST || 787 s2 == PF_TCPS_PROXY_DST) 788 tbprintf("PROXY:DST\n"); 789 #endif 790 else 791 tbprintf("<BAD STATE LEVELS>"); 792 } else if (proto == IPPROTO_UDP && s1 < PFUDPS_NSTATES && 793 s2 < PFUDPS_NSTATES) { 794 const char *states[] = PFUDPS_NAMES; 795 tbprintf("%s:%s", states[s1], states[s2]); 796 } else if (proto != IPPROTO_ICMP && s1 < PFOTHERS_NSTATES && 797 s2 < PFOTHERS_NSTATES) { 798 /* XXX ICMP doesn't really have state levels */ 799 const char *states[] = PFOTHERS_NAMES; 800 tbprintf("%s:%s", states[s1], states[s2]); 801 } else { 802 tbprintf("%u:%u", s1, s2); 803 } 804 805 if (strlen(tmp_buf) > len) { 806 tb_start(); 807 tbprintf("%u:%u", s1, s2); 808 } 809 810 print_fld_tb(fld); 811 } 812 813 int 814 print_state(struct pfsync_state * s, struct sc_ent * ent) 815 { 816 struct pfsync_state_peer *src, *dst; 817 struct protoent *p; 818 u_int64_t sz; 819 820 if (s->direction == PF_OUT) { 821 src = &s->src; 822 dst = &s->dst; 823 } else { 824 src = &s->dst; 825 dst = &s->src; 826 } 827 828 p = getprotobynumber(s->proto); 829 830 if (p != NULL) 831 print_fld_str(FLD_PROTO, p->p_name); 832 else 833 print_fld_uint(FLD_PROTO, s->proto); 834 835 if (s->direction == PF_OUT) { 836 print_fld_host2(FLD_SRC, &s->key[PF_SK_WIRE], 837 &s->key[PF_SK_STACK], 1, s->af); 838 print_fld_host2(FLD_DEST, &s->key[PF_SK_WIRE], 839 &s->key[PF_SK_STACK], 0, s->af); 840 } else { 841 print_fld_host2(FLD_SRC, &s->key[PF_SK_STACK], 842 &s->key[PF_SK_WIRE], 0, s->af); 843 print_fld_host2(FLD_DEST, &s->key[PF_SK_STACK], 844 &s->key[PF_SK_WIRE], 1, s->af); 845 } 846 847 if (s->direction == PF_OUT) 848 print_fld_str(FLD_DIR, "Out"); 849 else 850 print_fld_str(FLD_DIR, "In"); 851 852 print_fld_state(FLD_STATE, s->proto, src->state, dst->state); 853 print_fld_age(FLD_AGE, ntohl(s->creation)); 854 print_fld_age(FLD_EXP, ntohl(s->expire)); 855 856 sz = COUNTER(s->bytes[0]) + COUNTER(s->bytes[1]); 857 858 print_fld_size(FLD_PKTS, COUNTER(s->packets[0]) + 859 COUNTER(s->packets[1])); 860 print_fld_size(FLD_BYTES, sz); 861 print_fld_rate(FLD_SA, (s->creation) ? 862 ((double)sz/ntohl((double)s->creation)) : -1); 863 864 print_fld_uint(FLD_RULE, ntohl(s->rule)); 865 if (cachestates && ent != NULL) { 866 print_fld_rate(FLD_SI, ent->rate); 867 print_fld_rate(FLD_SP, ent->peak); 868 } 869 870 end_line(); 871 return 1; 872 } 873 874 void 875 print_states(void) 876 { 877 int n, count = 0; 878 879 for (n = dispstart; n < num_disp; n++) { 880 count += print_state(state_buf + state_ord[n], 881 state_cache[state_ord[n]]); 882 if (maxprint > 0 && count >= maxprint) 883 break; 884 } 885 } 886 887 /* rule display */ 888 889 struct pf_rule *rules = NULL; 890 u_int32_t alloc_rules = 0; 891 892 int 893 select_rules(void) 894 { 895 num_disp = num_rules; 896 return (0); 897 } 898 899 900 void 901 add_rule_alloc(u_int32_t nr) 902 { 903 if (nr == 0) 904 return; 905 906 num_rules += nr; 907 908 if (rules == NULL) { 909 rules = malloc(num_rules * sizeof(struct pf_rule)); 910 if (rules == NULL) 911 err(1, "malloc"); 912 alloc_rules = num_rules; 913 } else if (num_rules > alloc_rules) { 914 rules = realloc(rules, num_rules * sizeof(struct pf_rule)); 915 if (rules == NULL) 916 err(1, "realloc"); 917 alloc_rules = num_rules; 918 } 919 } 920 921 int label_length; 922 923 int 924 read_anchor_rules(char *anchor) 925 { 926 struct pfioc_rule pr; 927 u_int32_t nr, num, off; 928 int len; 929 930 if (pf_dev < 0) 931 return (-1); 932 933 memset(&pr, 0, sizeof(pr)); 934 strlcpy(pr.anchor, anchor, sizeof(pr.anchor)); 935 936 if (ioctl(pf_dev, DIOCGETRULES, &pr)) { 937 error("anchor %s: %s", anchor, strerror(errno)); 938 return (-1); 939 } 940 941 off = num_rules; 942 num = pr.nr; 943 add_rule_alloc(num); 944 945 for (nr = 0; nr < num; ++nr) { 946 pr.nr = nr; 947 if (ioctl(pf_dev, DIOCGETRULE, &pr)) { 948 error("DIOCGETRULE: %s", strerror(errno)); 949 return (-1); 950 } 951 /* XXX overload pr.anchor, to store a pointer to 952 * anchor name */ 953 pr.rule.anchor = (struct pf_anchor *) anchor; 954 len = strlen(pr.rule.label); 955 if (len > label_length) 956 label_length = len; 957 rules[off + nr] = pr.rule; 958 } 959 960 return (num); 961 } 962 963 struct anchor_name { 964 char name[MAXPATHLEN]; 965 struct anchor_name *next; 966 u_int32_t ref; 967 }; 968 969 struct anchor_name *anchor_root = NULL; 970 struct anchor_name *anchor_end = NULL; 971 struct anchor_name *anchor_free = NULL; 972 973 struct anchor_name* 974 alloc_anchor_name(const char *path) 975 { 976 struct anchor_name *a; 977 978 a = anchor_free; 979 if (a == NULL) { 980 a = (struct anchor_name *)malloc(sizeof(struct anchor_name)); 981 if (a == NULL) 982 return (NULL); 983 } else 984 anchor_free = a->next; 985 986 if (anchor_root == NULL) 987 anchor_end = a; 988 989 a->next = anchor_root; 990 anchor_root = a; 991 992 a->ref = 0; 993 strlcpy(a->name, path, sizeof(a->name)); 994 return (a); 995 } 996 997 void 998 reset_anchor_names(void) 999 { 1000 if (anchor_end == NULL) 1001 return; 1002 1003 anchor_end->next = anchor_free; 1004 anchor_free = anchor_root; 1005 anchor_root = anchor_end = NULL; 1006 } 1007 1008 struct pfioc_ruleset ruleset; 1009 char *rs_end = NULL; 1010 1011 int 1012 read_rulesets(const char *path) 1013 { 1014 char *pre; 1015 struct anchor_name *a; 1016 u_int32_t nr, ns; 1017 int len; 1018 1019 if (path == NULL) 1020 ruleset.path[0] = '\0'; 1021 else if (strlcpy(ruleset.path, path, sizeof(ruleset.path)) >= 1022 sizeof(ruleset.path)) 1023 return (-1); 1024 1025 /* a persistent storage for anchor names */ 1026 a = alloc_anchor_name(ruleset.path); 1027 if (a == NULL) 1028 return (-1); 1029 1030 len = read_anchor_rules(a->name); 1031 if (len < 0) 1032 return (-1); 1033 1034 a->ref += len; 1035 1036 if (ioctl(pf_dev, DIOCGETRULESETS, &ruleset)) { 1037 error("DIOCGETRULESETS: %s", strerror(errno)); 1038 return (-1); 1039 } 1040 1041 ns = ruleset.nr; 1042 1043 if (rs_end == NULL) 1044 rs_end = ruleset.path + sizeof(ruleset.path); 1045 1046 /* 'pre' tracks the previous level on the anchor */ 1047 pre = strchr(ruleset.path, 0); 1048 len = rs_end - pre; 1049 if (len < 1) 1050 return (-1); 1051 --len; 1052 1053 for (nr = 0; nr < ns; ++nr) { 1054 ruleset.nr = nr; 1055 if (ioctl(pf_dev, DIOCGETRULESET, &ruleset)) { 1056 error("DIOCGETRULESET: %s", strerror(errno)); 1057 return (-1); 1058 } 1059 *pre = '/'; 1060 if (strlcpy(pre + 1, ruleset.name, len) < len) 1061 read_rulesets(ruleset.path); 1062 *pre = '\0'; 1063 } 1064 1065 return (0); 1066 } 1067 1068 void 1069 compute_anchor_field(void) 1070 { 1071 struct anchor_name *a; 1072 int sum, cnt, mx, nx; 1073 sum = cnt = mx = 0; 1074 1075 for (a = anchor_root; a != NULL; a = a->next, cnt++) { 1076 int len; 1077 if (a->ref == 0) 1078 continue; 1079 len = strlen(a->name); 1080 sum += len; 1081 if (len > mx) 1082 mx = len; 1083 } 1084 1085 nx = sum/cnt; 1086 if (nx < ANCHOR_FLD_SIZE) 1087 nx = (mx < ANCHOR_FLD_SIZE) ? mx : ANCHOR_FLD_SIZE; 1088 1089 if (FLD_ANCHOR->max_width != mx || 1090 FLD_ANCHOR->norm_width != nx) { 1091 FLD_ANCHOR->max_width = mx; 1092 FLD_ANCHOR->norm_width = nx; 1093 field_setup(); 1094 need_update = 1; 1095 } 1096 } 1097 1098 int 1099 read_rules(void) 1100 { 1101 int ret, nw, mw; 1102 num_rules = 0; 1103 1104 if (pf_dev == -1) 1105 return (-1); 1106 1107 label_length = MIN_LABEL_SIZE; 1108 1109 reset_anchor_names(); 1110 ret = read_rulesets(NULL); 1111 compute_anchor_field(); 1112 1113 nw = mw = label_length; 1114 if (nw > 16) 1115 nw = 16; 1116 1117 if (FLD_LABEL->norm_width != nw || 1118 FLD_LABEL->max_width != mw) { 1119 FLD_LABEL->norm_width = nw; 1120 FLD_LABEL->max_width = mw; 1121 field_setup(); 1122 need_update = 1; 1123 } 1124 1125 num_disp = num_rules; 1126 return (ret); 1127 } 1128 1129 void 1130 tb_print_addrw(struct pf_addr_wrap *addr, struct pf_addr *mask, u_int8_t af) 1131 { 1132 switch (addr->type) { 1133 case PF_ADDR_ADDRMASK: 1134 tb_print_addr(&addr->v.a.addr, mask, af); 1135 break; 1136 case PF_ADDR_NOROUTE: 1137 tbprintf("noroute"); 1138 break; 1139 case PF_ADDR_DYNIFTL: 1140 tbprintf("(%s)", addr->v.ifname); 1141 break; 1142 case PF_ADDR_TABLE: 1143 tbprintf("<%s>", addr->v.tblname); 1144 break; 1145 default: 1146 tbprintf("UNKNOWN"); 1147 break; 1148 } 1149 } 1150 1151 void 1152 tb_print_op(u_int8_t op, const char *a1, const char *a2) 1153 { 1154 if (op == PF_OP_IRG) 1155 tbprintf("%s >< %s ", a1, a2); 1156 else if (op == PF_OP_XRG) 1157 tbprintf("%s <> %s ", a1, a2); 1158 else if (op == PF_OP_RRG) 1159 tbprintf("%s:%s ", a1, a2); 1160 else if (op == PF_OP_EQ) 1161 tbprintf("= %s ", a1); 1162 else if (op == PF_OP_NE) 1163 tbprintf("!= %s ", a1); 1164 else if (op == PF_OP_LT) 1165 tbprintf("< %s ", a1); 1166 else if (op == PF_OP_LE) 1167 tbprintf("<= %s ", a1); 1168 else if (op == PF_OP_GT) 1169 tbprintf("> %s ", a1); 1170 else if (op == PF_OP_GE) 1171 tbprintf(">= %s ", a1); 1172 } 1173 1174 void 1175 tb_print_port(u_int8_t op, u_int16_t p1, u_int16_t p2, char *proto) 1176 { 1177 char a1[6], a2[6]; 1178 struct servent *s = getservbyport(p1, proto); 1179 1180 p1 = ntohs(p1); 1181 p2 = ntohs(p2); 1182 snprintf(a1, sizeof(a1), "%u", p1); 1183 snprintf(a2, sizeof(a2), "%u", p2); 1184 tbprintf("port "); 1185 if (s != NULL && (op == PF_OP_EQ || op == PF_OP_NE)) 1186 tb_print_op(op, s->s_name, a2); 1187 else 1188 tb_print_op(op, a1, a2); 1189 } 1190 1191 void 1192 tb_print_fromto(struct pf_rule_addr *src, struct pf_rule_addr *dst, 1193 u_int8_t af, u_int8_t proto) 1194 { 1195 if ( 1196 PF_AZERO(PT_ADDR(src), AF_INET6) && 1197 PF_AZERO(PT_ADDR(dst), AF_INET6) && 1198 ! PT_NOROUTE(src) && ! PT_NOROUTE(dst) && 1199 PF_AZERO(PT_MASK(src), AF_INET6) && 1200 PF_AZERO(PT_MASK(dst), AF_INET6) && 1201 !src->port_op && !dst->port_op) 1202 tbprintf("all "); 1203 else { 1204 tbprintf("from "); 1205 if (PT_NOROUTE(src)) 1206 tbprintf("no-route "); 1207 else if (PF_AZERO(PT_ADDR(src), AF_INET6) && 1208 PF_AZERO(PT_MASK(src), AF_INET6)) 1209 tbprintf("any "); 1210 else { 1211 if (src->neg) 1212 tbprintf("! "); 1213 tb_print_addrw(&src->addr, PT_MASK(src), af); 1214 tbprintf(" "); 1215 } 1216 if (src->port_op) 1217 tb_print_port(src->port_op, src->port[0], 1218 src->port[1], 1219 proto == IPPROTO_TCP ? "tcp" : "udp"); 1220 1221 tbprintf("to "); 1222 if (PT_NOROUTE(dst)) 1223 tbprintf("no-route "); 1224 else if (PF_AZERO(PT_ADDR(dst), AF_INET6) && 1225 PF_AZERO(PT_MASK(dst), AF_INET6)) 1226 tbprintf("any "); 1227 else { 1228 if (dst->neg) 1229 tbprintf("! "); 1230 tb_print_addrw(&dst->addr, PT_MASK(dst), af); 1231 tbprintf(" "); 1232 } 1233 if (dst->port_op) 1234 tb_print_port(dst->port_op, dst->port[0], 1235 dst->port[1], 1236 proto == IPPROTO_TCP ? "tcp" : "udp"); 1237 } 1238 } 1239 1240 void 1241 tb_print_ugid(u_int8_t op, unsigned u1, unsigned u2, 1242 const char *t, unsigned umax) 1243 { 1244 char a1[11], a2[11]; 1245 1246 snprintf(a1, sizeof(a1), "%u", u1); 1247 snprintf(a2, sizeof(a2), "%u", u2); 1248 1249 tbprintf("%s ", t); 1250 if (u1 == umax && (op == PF_OP_EQ || op == PF_OP_NE)) 1251 tb_print_op(op, "unknown", a2); 1252 else 1253 tb_print_op(op, a1, a2); 1254 } 1255 1256 void 1257 tb_print_flags(u_int8_t f) 1258 { 1259 const char *tcpflags = "FSRPAUEW"; 1260 int i; 1261 1262 for (i = 0; tcpflags[i]; ++i) 1263 if (f & (1 << i)) 1264 tbprintf("%c", tcpflags[i]); 1265 } 1266 1267 void 1268 print_rule(struct pf_rule *pr) 1269 { 1270 static const char *actiontypes[] = { "Pass", "Block", "Scrub", "Nat", 1271 "no Nat", "Binat", "no Binat", "Rdr", "no Rdr" }; 1272 int numact = sizeof(actiontypes) / sizeof(char *); 1273 1274 static const char *routetypes[] = { "", "fastroute", "route-to", 1275 "dup-to", "reply-to" }; 1276 1277 int numroute = sizeof(routetypes) / sizeof(char *); 1278 1279 if (pr == NULL) return; 1280 1281 print_fld_str(FLD_LABEL, pr->label); 1282 print_fld_size(FLD_STATS, pr->states_tot); 1283 1284 print_fld_size(FLD_PKTS, pr->packets[0] + pr->packets[1]); 1285 print_fld_size(FLD_BYTES, pr->bytes[0] + pr->bytes[1]); 1286 1287 print_fld_uint(FLD_RULE, pr->nr); 1288 if (pr->direction == PF_OUT) 1289 print_fld_str(FLD_DIR, "Out"); 1290 else if (pr->direction == PF_IN) 1291 print_fld_str(FLD_DIR, "In"); 1292 else 1293 print_fld_str(FLD_DIR, "Any"); 1294 1295 if (pr->quick) 1296 print_fld_str(FLD_QUICK, "Quick"); 1297 1298 if (pr->keep_state == PF_STATE_NORMAL) 1299 print_fld_str(FLD_KST, "Keep"); 1300 else if (pr->keep_state == PF_STATE_MODULATE) 1301 print_fld_str(FLD_KST, "Mod"); 1302 #ifdef PF_STATE_SYNPROXY 1303 else if (pr->keep_state == PF_STATE_MODULATE) 1304 print_fld_str(FLD_KST, "Syn"); 1305 #endif 1306 if (pr->log == 1) 1307 print_fld_str(FLD_LOG, "Log"); 1308 else if (pr->log == 2) 1309 print_fld_str(FLD_LOG, "All"); 1310 1311 if (pr->action >= numact) 1312 print_fld_uint(FLD_ACTION, pr->action); 1313 else print_fld_str(FLD_ACTION, actiontypes[pr->action]); 1314 1315 if (pr->proto) { 1316 struct protoent *p = getprotobynumber(pr->proto); 1317 1318 if (p != NULL) 1319 print_fld_str(FLD_PROTO, p->p_name); 1320 else 1321 print_fld_uint(FLD_PROTO, pr->proto); 1322 } 1323 1324 if (pr->ifname[0]) { 1325 tb_start(); 1326 if (pr->ifnot) 1327 tbprintf("!"); 1328 tbprintf("%s", pr->ifname); 1329 print_fld_tb(FLD_IF); 1330 } 1331 if (pr->max_states) 1332 print_fld_uint(FLD_STMAX, pr->max_states); 1333 1334 /* print info field */ 1335 1336 tb_start(); 1337 1338 if (pr->natpass) 1339 tbprintf("pass "); 1340 1341 if (pr->action == PF_DROP) { 1342 if (pr->rule_flag & PFRULE_RETURNRST) 1343 tbprintf("return-rst "); 1344 #ifdef PFRULE_RETURN 1345 else if (pr->rule_flag & PFRULE_RETURN) 1346 tbprintf("return "); 1347 #endif 1348 #ifdef PFRULE_RETURNICMP 1349 else if (pr->rule_flag & PFRULE_RETURNICMP) 1350 tbprintf("return-icmp "); 1351 #endif 1352 else 1353 tbprintf("drop "); 1354 } 1355 1356 if (pr->rt > 0 && pr->rt < numroute) { 1357 tbprintf("%s ", routetypes[pr->rt]); 1358 if (pr->rt != PF_FASTROUTE) 1359 tbprintf("... "); 1360 } 1361 1362 if (pr->af) { 1363 if (pr->af == AF_INET) 1364 tbprintf("inet "); 1365 else 1366 tbprintf("inet6 "); 1367 } 1368 1369 tb_print_fromto(&pr->src, &pr->dst, pr->af, pr->proto); 1370 1371 if (pr->uid.op) 1372 tb_print_ugid(pr->uid.op, pr->uid.uid[0], pr->uid.uid[1], 1373 "user", UID_MAX); 1374 if (pr->gid.op) 1375 tb_print_ugid(pr->gid.op, pr->gid.gid[0], pr->gid.gid[1], 1376 "group", GID_MAX); 1377 1378 if (pr->action == PF_PASS && 1379 (pr->proto == 0 || pr->proto == IPPROTO_TCP) && 1380 (pr->flags != TH_SYN || pr->flagset != (TH_SYN | TH_ACK) )) { 1381 tbprintf("flags "); 1382 if (pr->flags || pr->flagset) { 1383 tb_print_flags(pr->flags); 1384 tbprintf("/"); 1385 tb_print_flags(pr->flagset); 1386 } else 1387 tbprintf("any "); 1388 } 1389 1390 tbprintf(" "); 1391 1392 if (pr->tos) 1393 tbprintf("tos 0x%2.2x ", pr->tos); 1394 #ifdef PFRULE_FRAGMENT 1395 if (pr->rule_flag & PFRULE_FRAGMENT) 1396 tbprintf("fragment "); 1397 #endif 1398 #ifdef PFRULE_NODF 1399 if (pr->rule_flag & PFRULE_NODF) 1400 tbprintf("no-df "); 1401 #endif 1402 #ifdef PFRULE_RANDOMID 1403 if (pr->rule_flag & PFRULE_RANDOMID) 1404 tbprintf("random-id "); 1405 #endif 1406 if (pr->min_ttl) 1407 tbprintf("min-ttl %d ", pr->min_ttl); 1408 if (pr->max_mss) 1409 tbprintf("max-mss %d ", pr->max_mss); 1410 if (pr->allow_opts) 1411 tbprintf("allow-opts "); 1412 1413 /* XXX more missing */ 1414 1415 if (pr->qname[0] && pr->pqname[0]) 1416 tbprintf("queue(%s, %s) ", pr->qname, pr->pqname); 1417 else if (pr->qname[0]) 1418 tbprintf("queue %s ", pr->qname); 1419 1420 if (pr->tagname[0]) 1421 tbprintf("tag %s ", pr->tagname); 1422 if (pr->match_tagname[0]) { 1423 if (pr->match_tag_not) 1424 tbprintf("! "); 1425 tbprintf("tagged %s ", pr->match_tagname); 1426 } 1427 1428 print_fld_tb(FLD_RINFO); 1429 1430 /* XXX anchor field overloaded with anchor name */ 1431 print_fld_str(FLD_ANCHOR, (char *)pr->anchor); 1432 tb_end(); 1433 1434 end_line(); 1435 } 1436 1437 void 1438 print_rules(void) 1439 { 1440 u_int32_t n, count = 0; 1441 1442 for (n = dispstart; n < num_rules; n++) { 1443 print_rule(rules + n); 1444 count ++; 1445 if (maxprint > 0 && count >= maxprint) 1446 break; 1447 } 1448 } 1449 1450 /* queue display */ 1451 1452 struct pf_altq_node * 1453 pfctl_find_altq_node(struct pf_altq_node *root, const char *qname, 1454 const char *ifname) 1455 { 1456 struct pf_altq_node *node, *child; 1457 1458 for (node = root; node != NULL; node = node->next) { 1459 if (!strcmp(node->altq.qname, qname) 1460 && !(strcmp(node->altq.ifname, ifname))) 1461 return (node); 1462 if (node->children != NULL) { 1463 child = pfctl_find_altq_node(node->children, qname, 1464 ifname); 1465 if (child != NULL) 1466 return (child); 1467 } 1468 } 1469 return (NULL); 1470 } 1471 1472 void 1473 pfctl_insert_altq_node(struct pf_altq_node **root, 1474 const struct pf_altq altq, const struct queue_stats qstats) 1475 { 1476 struct pf_altq_node *node; 1477 1478 node = calloc(1, sizeof(struct pf_altq_node)); 1479 if (node == NULL) 1480 err(1, "pfctl_insert_altq_node: calloc"); 1481 memcpy(&node->altq, &altq, sizeof(struct pf_altq)); 1482 memcpy(&node->qstats, &qstats, sizeof(qstats)); 1483 node->next = node->children = node->next_flat = NULL; 1484 node->depth = 0; 1485 node->visited = 1; 1486 1487 if (*root == NULL) 1488 *root = node; 1489 else if (!altq.parent[0]) { 1490 struct pf_altq_node *prev = *root; 1491 1492 while (prev->next != NULL) 1493 prev = prev->next; 1494 prev->next = node; 1495 } else { 1496 struct pf_altq_node *parent; 1497 1498 parent = pfctl_find_altq_node(*root, altq.parent, altq.ifname); 1499 if (parent == NULL) 1500 errx(1, "parent %s not found", altq.parent); 1501 node->depth = parent->depth+1; 1502 if (parent->children == NULL) 1503 parent->children = node; 1504 else { 1505 struct pf_altq_node *prev = parent->children; 1506 1507 while (prev->next != NULL) 1508 prev = prev->next; 1509 prev->next = node; 1510 } 1511 } 1512 } 1513 1514 void 1515 pfctl_set_next_flat(struct pf_altq_node *node, struct pf_altq_node *up) 1516 { 1517 while (node) { 1518 struct pf_altq_node *next = node->next ? node->next : up; 1519 if (node->children) { 1520 node->next_flat = node->children; 1521 pfctl_set_next_flat(node->children, next); 1522 } else 1523 node->next_flat = next; 1524 node = node->next; 1525 } 1526 } 1527 1528 int 1529 pfctl_update_qstats(struct pf_altq_node **root, int *inserts) 1530 { 1531 struct pf_altq_node *node; 1532 struct pfioc_altq pa; 1533 struct pfioc_qstats pq; 1534 u_int32_t nr; 1535 struct queue_stats qstats; 1536 u_int32_t nr_queues; 1537 int ret = 0; 1538 1539 *inserts = 0; 1540 memset(&pa, 0, sizeof(pa)); 1541 memset(&pq, 0, sizeof(pq)); 1542 memset(&qstats, 0, sizeof(qstats)); 1543 1544 if (pf_dev < 0) 1545 return (-1); 1546 1547 if (ioctl(pf_dev, DIOCGETALTQS, &pa)) { 1548 error("DIOCGETALTQS: %s", strerror(errno)); 1549 return (-1); 1550 } 1551 1552 num_queues = nr_queues = pa.nr; 1553 for (nr = 0; nr < nr_queues; ++nr) { 1554 pa.nr = nr; 1555 if (ioctl(pf_dev, DIOCGETALTQ, &pa)) { 1556 error("DIOCGETALTQ: %s", strerror(errno)); 1557 ret = -1; 1558 break; 1559 } 1560 if (pa.altq.qid > 0) { 1561 pq.nr = nr; 1562 pq.ticket = pa.ticket; 1563 pq.buf = &qstats; 1564 pq.nbytes = sizeof(qstats); 1565 if (ioctl(pf_dev, DIOCGETQSTATS, &pq)) { 1566 error("DIOCGETQSTATS: %s", strerror(errno)); 1567 ret = -1; 1568 break; 1569 } 1570 qstats.valid = 1; 1571 gettimeofday(&qstats.timestamp, NULL); 1572 if ((node = pfctl_find_altq_node(*root, pa.altq.qname, 1573 pa.altq.ifname)) != NULL) { 1574 /* update altq data too as bandwidth may have changed */ 1575 memcpy(&node->altq, &pa.altq, sizeof(struct pf_altq)); 1576 memcpy(&node->qstats_last, &node->qstats, 1577 sizeof(struct queue_stats)); 1578 memcpy(&node->qstats, &qstats, 1579 sizeof(qstats)); 1580 node->visited = 1; 1581 } else { 1582 pfctl_insert_altq_node(root, pa.altq, qstats); 1583 *inserts = 1; 1584 } 1585 } 1586 else 1587 --num_queues; 1588 } 1589 1590 pfctl_set_next_flat(*root, NULL); 1591 1592 return (ret); 1593 } 1594 1595 void 1596 pfctl_free_altq_node(struct pf_altq_node *node) 1597 { 1598 while (node != NULL) { 1599 struct pf_altq_node *prev; 1600 1601 if (node->children != NULL) 1602 pfctl_free_altq_node(node->children); 1603 prev = node; 1604 node = node->next; 1605 free(prev); 1606 } 1607 } 1608 1609 void 1610 pfctl_mark_all_unvisited(struct pf_altq_node *root) 1611 { 1612 if (root != NULL) { 1613 struct pf_altq_node *node = root; 1614 while (node != NULL) { 1615 node->visited = 0; 1616 node = node->next_flat; 1617 } 1618 } 1619 } 1620 1621 int 1622 pfctl_have_unvisited(struct pf_altq_node *root) 1623 { 1624 if (root == NULL) 1625 return(0); 1626 else { 1627 struct pf_altq_node *node = root; 1628 while (node != NULL) { 1629 if (node->visited == 0) 1630 return(1); 1631 node = node->next_flat; 1632 } 1633 return(0); 1634 } 1635 } 1636 1637 struct pf_altq_node *altq_root = NULL; 1638 1639 int 1640 select_queues(void) 1641 { 1642 num_disp = num_queues; 1643 return (0); 1644 } 1645 1646 int 1647 read_queues(void) 1648 { 1649 static int first_read = 1; 1650 int inserts; 1651 num_disp = num_queues = 0; 1652 1653 pfctl_mark_all_unvisited(altq_root); 1654 if (pfctl_update_qstats(&altq_root, &inserts)) 1655 return (-1); 1656 1657 /* Allow inserts only on first read; 1658 * on subsequent reads clear and reload 1659 */ 1660 if (first_read == 0 && 1661 (inserts != 0 || pfctl_have_unvisited(altq_root) != 0)) { 1662 pfctl_free_altq_node(altq_root); 1663 altq_root = NULL; 1664 first_read = 1; 1665 if (pfctl_update_qstats(&altq_root, &inserts)) 1666 return (-1); 1667 } 1668 1669 first_read = 0; 1670 num_disp = num_queues; 1671 1672 return(0); 1673 } 1674 1675 double 1676 calc_interval(struct timeval *cur_time, struct timeval *last_time) 1677 { 1678 double sec; 1679 1680 sec = (double)(cur_time->tv_sec - last_time->tv_sec) + 1681 (double)(cur_time->tv_usec - last_time->tv_usec) / 1000000; 1682 1683 return (sec); 1684 } 1685 1686 double 1687 calc_rate(u_int64_t new_bytes, u_int64_t last_bytes, double interval) 1688 { 1689 double rate; 1690 1691 rate = (double)(new_bytes - last_bytes) / interval; 1692 return (rate); 1693 } 1694 1695 double 1696 calc_pps(u_int64_t new_pkts, u_int64_t last_pkts, double interval) 1697 { 1698 double pps; 1699 1700 pps = (double)(new_pkts - last_pkts) / interval; 1701 return (pps); 1702 } 1703 1704 #define DEFAULT_PRIORITY 1 1705 1706 void 1707 print_queue(struct pf_altq_node *node) 1708 { 1709 u_int8_t d; 1710 double interval, pps, bps; 1711 pps = bps = 0; 1712 1713 tb_start(); 1714 for (d = 0; d < node->depth; d++) 1715 tbprintf(" "); 1716 tbprintf(node->altq.qname); 1717 print_fld_tb(FLD_QUEUE); 1718 1719 if (node->altq.scheduler == ALTQT_CBQ || 1720 node->altq.scheduler == ALTQT_HFSC 1721 ) 1722 print_fld_bw(FLD_BANDW, (double)node->altq.bandwidth); 1723 1724 if (node->altq.priority != DEFAULT_PRIORITY) 1725 print_fld_uint(FLD_PRIO, 1726 node->altq.priority); 1727 1728 if (node->qstats.valid && node->qstats_last.valid) 1729 interval = calc_interval(&node->qstats.timestamp, 1730 &node->qstats_last.timestamp); 1731 else 1732 interval = 0; 1733 1734 switch (node->altq.scheduler) { 1735 case ALTQT_CBQ: 1736 print_fld_str(FLD_SCHED, "cbq"); 1737 print_fld_size(FLD_PKTS, 1738 node->qstats.data.cbq_stats.xmit_cnt.packets); 1739 print_fld_size(FLD_BYTES, 1740 node->qstats.data.cbq_stats.xmit_cnt.bytes); 1741 print_fld_size(FLD_DROPP, 1742 node->qstats.data.cbq_stats.drop_cnt.packets); 1743 print_fld_size(FLD_DROPB, 1744 node->qstats.data.cbq_stats.drop_cnt.bytes); 1745 print_fld_size(FLD_QLEN, node->qstats.data.cbq_stats.qcnt); 1746 print_fld_size(FLD_BORR, node->qstats.data.cbq_stats.borrows); 1747 print_fld_size(FLD_SUSP, node->qstats.data.cbq_stats.delays); 1748 if (interval > 0) { 1749 pps = calc_pps(node->qstats.data.cbq_stats.xmit_cnt.packets, 1750 node->qstats_last.data.cbq_stats.xmit_cnt.packets, interval); 1751 bps = calc_rate(node->qstats.data.cbq_stats.xmit_cnt.bytes, 1752 node->qstats_last.data.cbq_stats.xmit_cnt.bytes, interval); 1753 } 1754 break; 1755 case ALTQT_PRIQ: 1756 print_fld_str(FLD_SCHED, "priq"); 1757 print_fld_size(FLD_PKTS, 1758 node->qstats.data.priq_stats.xmitcnt.packets); 1759 print_fld_size(FLD_BYTES, 1760 node->qstats.data.priq_stats.xmitcnt.bytes); 1761 print_fld_size(FLD_DROPP, 1762 node->qstats.data.priq_stats.dropcnt.packets); 1763 print_fld_size(FLD_DROPB, 1764 node->qstats.data.priq_stats.dropcnt.bytes); 1765 print_fld_size(FLD_QLEN, node->qstats.data.priq_stats.qlength); 1766 if (interval > 0) { 1767 pps = calc_pps(node->qstats.data.priq_stats.xmitcnt.packets, 1768 node->qstats_last.data.priq_stats.xmitcnt.packets, interval); 1769 bps = calc_rate(node->qstats.data.priq_stats.xmitcnt.bytes, 1770 node->qstats_last.data.priq_stats.xmitcnt.bytes, interval); 1771 } 1772 break; 1773 case ALTQT_HFSC: 1774 print_fld_str(FLD_SCHED, "hfsc"); 1775 print_fld_size(FLD_PKTS, 1776 node->qstats.data.hfsc_stats.xmit_cnt.packets); 1777 print_fld_size(FLD_BYTES, 1778 node->qstats.data.hfsc_stats.xmit_cnt.bytes); 1779 print_fld_size(FLD_DROPP, 1780 node->qstats.data.hfsc_stats.drop_cnt.packets); 1781 print_fld_size(FLD_DROPB, 1782 node->qstats.data.hfsc_stats.drop_cnt.bytes); 1783 print_fld_size(FLD_QLEN, node->qstats.data.hfsc_stats.qlength); 1784 if (interval > 0) { 1785 pps = calc_pps(node->qstats.data.hfsc_stats.xmit_cnt.packets, 1786 node->qstats_last.data.hfsc_stats.xmit_cnt.packets, interval); 1787 bps = calc_rate(node->qstats.data.hfsc_stats.xmit_cnt.bytes, 1788 node->qstats_last.data.hfsc_stats.xmit_cnt.bytes, interval); 1789 } 1790 break; 1791 } 1792 1793 /* if (node->altq.scheduler != ALTQT_HFSC && interval > 0) { */ 1794 if (node->altq.scheduler && interval > 0) { 1795 tb_start(); 1796 if (pps > 0 && pps < 1) 1797 tbprintf("%-3.1lf", pps); 1798 else 1799 tbprintf("%u", (unsigned int) pps); 1800 1801 print_fld_tb(FLD_PKTSPS); 1802 print_fld_bw(FLD_BYTESPS, bps); 1803 } 1804 } 1805 1806 void 1807 print_queues(void) 1808 { 1809 u_int32_t n, count = 0; 1810 struct pf_altq_node *node = altq_root; 1811 1812 for (n = 0; n < dispstart; n++) 1813 node = node->next_flat; 1814 1815 for (; n < num_disp; n++) { 1816 print_queue(node); 1817 node = node->next_flat; 1818 end_line(); 1819 count ++; 1820 if (maxprint > 0 && count >= maxprint) 1821 break; 1822 } 1823 } 1824 1825 /* main program functions */ 1826 1827 void 1828 update_cache(void) 1829 { 1830 static int pstate = -1; 1831 if (pstate == cachestates) 1832 return; 1833 1834 pstate = cachestates; 1835 if (cachestates) { 1836 show_field(FLD_SI); 1837 show_field(FLD_SP); 1838 gotsig_alarm = 1; 1839 } else { 1840 hide_field(FLD_SI); 1841 hide_field(FLD_SP); 1842 need_update = 1; 1843 } 1844 field_setup(); 1845 } 1846 1847 int 1848 initpftop(void) 1849 { 1850 struct pf_status status; 1851 field_view *v; 1852 int cachesize = DEFAULT_CACHE_SIZE; 1853 1854 v = views; 1855 while(v->name != NULL) 1856 add_view(v++); 1857 1858 pf_dev = open("/dev/pf", O_RDONLY); 1859 if (pf_dev == -1) { 1860 alloc_buf(0); 1861 } else if (ioctl(pf_dev, DIOCGETSTATUS, &status)) { 1862 warn("DIOCGETSTATUS"); 1863 alloc_buf(0); 1864 } else 1865 alloc_buf(status.states); 1866 1867 /* initialize cache with given size */ 1868 if (cache_init(cachesize)) 1869 warnx("Failed to initialize cache."); 1870 else if (interactive && cachesize > 0) 1871 cachestates = 1; 1872 1873 update_cache(); 1874 1875 show_field(FLD_STMAX); 1876 show_field(FLD_ANCHOR); 1877 1878 return (1); 1879 } 1880