1 /* $OpenBSD: pftop.c,v 1.20 2011/10/13 18:32:30 claudio Exp $ */ 2 /* 3 * Copyright (c) 2001, 2007 Can Erkin Acar 4 * Copyright (c) 2001 Daniel Hartmeier 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * - Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * - Redistributions in binary form must reproduce the above 14 * copyright notice, this list of conditions and the following 15 * disclaimer in the documentation and/or other materials provided 16 * with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 21 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 22 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 26 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 28 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 * 31 */ 32 33 #include <sys/types.h> 34 #include <sys/ioctl.h> 35 #include <sys/socket.h> 36 37 #include <net/if.h> 38 #include <netinet/in.h> 39 #include <netinet/tcp.h> 40 #include <netinet/tcp_fsm.h> 41 #include <net/pfvar.h> 42 #include <arpa/inet.h> 43 44 #include <altq/altq.h> 45 #include <altq/altq_cbq.h> 46 #include <altq/altq_priq.h> 47 #include <altq/altq_hfsc.h> 48 49 #include <ctype.h> 50 #include <curses.h> 51 #include <err.h> 52 #include <errno.h> 53 #include <fcntl.h> 54 #include <netdb.h> 55 #include <signal.h> 56 #include <stdio.h> 57 #include <stdlib.h> 58 #include <string.h> 59 #include <unistd.h> 60 #include <stdarg.h> 61 62 #include "systat.h" 63 #include "engine.h" 64 #include "cache.h" 65 66 extern const char *tcpstates[]; 67 68 #define MIN_NUM_STATES 1024 69 #define NUM_STATE_INC 1024 70 71 #define DEFAULT_CACHE_SIZE 10000 72 73 /* XXX must also check type before use */ 74 #define PT_ADDR(x) (&(x)->addr.v.a.addr) 75 76 /* XXX must also check type before use */ 77 #define PT_MASK(x) (&(x)->addr.v.a.mask) 78 79 #define PT_NOROUTE(x) ((x)->addr.type == PF_ADDR_NOROUTE) 80 81 /* view management */ 82 int select_states(void); 83 int read_states(void); 84 void sort_states(void); 85 void print_states(void); 86 87 int select_rules(void); 88 int read_rules(void); 89 void print_rules(void); 90 91 int select_queues(void); 92 int read_queues(void); 93 void print_queues(void); 94 95 void update_cache(void); 96 97 /* qsort callbacks */ 98 int sort_size_callback(const void *s1, const void *s2); 99 int sort_exp_callback(const void *s1, const void *s2); 100 int sort_pkt_callback(const void *s1, const void *s2); 101 int sort_age_callback(const void *s1, const void *s2); 102 int sort_sa_callback(const void *s1, const void *s2); 103 int sort_sp_callback(const void *s1, const void *s2); 104 int sort_da_callback(const void *s1, const void *s2); 105 int sort_dp_callback(const void *s1, const void *s2); 106 int sort_rate_callback(const void *s1, const void *s2); 107 int sort_peak_callback(const void *s1, const void *s2); 108 int pf_dev = -1; 109 110 struct sc_ent **state_cache = NULL; 111 struct pfsync_state *state_buf = NULL; 112 int state_buf_len = 0; 113 u_int32_t *state_ord = NULL; 114 u_int32_t num_states = 0; 115 u_int32_t num_states_all = 0; 116 u_int32_t num_rules = 0; 117 u_int32_t num_queues = 0; 118 int cachestates = 0; 119 120 char *filter_string = NULL; 121 int dumpfilter = 0; 122 123 #define MIN_LABEL_SIZE 5 124 #define ANCHOR_FLD_SIZE 12 125 126 /* Define fields */ 127 field_def fields[] = { 128 {"SRC", 20, 45, 1, FLD_ALIGN_LEFT, -1, 0, 0, 0}, 129 {"DEST", 20, 45, 1, FLD_ALIGN_LEFT, -1, 0, 0, 0}, 130 {"GW", 20, 45, 1, FLD_ALIGN_LEFT, -1, 0, 0, 0}, 131 {"STATE", 5, 23, 18, FLD_ALIGN_COLUMN, -1, 0, 0, 0}, 132 {"AGE", 5, 9, 4, FLD_ALIGN_RIGHT, -1, 0, 0, 0}, 133 {"EXP", 5, 9, 4, FLD_ALIGN_RIGHT, -1, 0, 0, 0}, 134 {"PR ", 4, 9, 1, FLD_ALIGN_LEFT, -1, 0, 0, 0}, 135 {"DIR", 1, 3, 2, FLD_ALIGN_CENTER, -1, 0, 0, 0}, 136 {"PKTS", 5, 8, 1, FLD_ALIGN_RIGHT, -1, 0, 0, 0}, 137 {"BYTES", 5, 8, 1, FLD_ALIGN_RIGHT, -1, 0, 0, 0}, 138 {"RULE", 2, 4, 1, FLD_ALIGN_RIGHT, -1, 0, 0, 0}, 139 {"LABEL", MIN_LABEL_SIZE, MIN_LABEL_SIZE, 1, FLD_ALIGN_LEFT, -1, 0, 0, 0}, 140 {"STATES", 5, 8, 1, FLD_ALIGN_RIGHT, -1, 0, 0, 0}, 141 {"EVAL", 5, 8, 1, FLD_ALIGN_RIGHT, -1, 0, 0, 0}, 142 {"ACTION", 1, 8, 4, FLD_ALIGN_LEFT, -1, 0, 0, 0}, 143 {"LOG", 1, 3, 2, FLD_ALIGN_LEFT, -1, 0, 0, 0}, 144 {"QUICK", 1, 1, 1, FLD_ALIGN_LEFT, -1, 0, 0, 0}, 145 {"KS", 1, 1, 1, FLD_ALIGN_LEFT, -1, 0, 0, 0}, 146 {"IF", 4, 6, 1, FLD_ALIGN_LEFT, -1, 0, 0, 0}, 147 {"INFO", 40, 80, 1, FLD_ALIGN_LEFT, -1, 0, 0, 0}, 148 {"MAX", 3, 5, 2, FLD_ALIGN_RIGHT, -1, 0, 0}, 149 {"RATE", 5, 8, 1, FLD_ALIGN_RIGHT, -1, 0, 0, 0}, 150 {"AVG", 5, 8, 1, FLD_ALIGN_RIGHT, -1, 0, 0, 0}, 151 {"PEAK", 5, 8, 1, FLD_ALIGN_RIGHT, -1, 0, 0, 0}, 152 {"ANCHOR", 6, 16, 1, FLD_ALIGN_LEFT, -1, 0, 0}, 153 {"QUEUE", 15, 30, 1, FLD_ALIGN_LEFT, -1, 0, 0, 0}, 154 {"BW", 4, 5, 1, FLD_ALIGN_RIGHT, -1, 0, 0, 0}, 155 {"SCH", 3, 4, 1, FLD_ALIGN_LEFT, -1, 0, 0, 0}, 156 {"PRIO", 1, 4, 1, FLD_ALIGN_RIGHT, -1, 0, 0, 0}, 157 {"DROP_P", 6, 8, 1, FLD_ALIGN_RIGHT, -1, 0, 0, 0}, 158 {"DROP_B", 6, 8, 1, FLD_ALIGN_RIGHT, -1, 0, 0, 0}, 159 {"QLEN", 4, 4, 1, FLD_ALIGN_RIGHT, -1, 0, 0, 0}, 160 {"BORROW", 4, 6, 1, FLD_ALIGN_RIGHT, -1, 0, 0, 0}, 161 {"SUSPENDS", 4, 6, 1, FLD_ALIGN_RIGHT, -1, 0, 0, 0}, 162 {"P/S", 3, 7, 1, FLD_ALIGN_RIGHT, -1, 0, 0, 0}, 163 {"B/S", 4, 7, 1, FLD_ALIGN_RIGHT, -1, 0, 0, 0} 164 }; 165 166 167 /* for states */ 168 #define FLD_SRC FIELD_ADDR(fields,0) 169 #define FLD_DEST FIELD_ADDR(fields,1) 170 #define FLD_GW FIELD_ADDR(fields,2) 171 #define FLD_STATE FIELD_ADDR(fields,3) 172 #define FLD_AGE FIELD_ADDR(fields,4) 173 #define FLD_EXP FIELD_ADDR(fields,5) 174 /* common */ 175 #define FLD_PROTO FIELD_ADDR(fields,6) 176 #define FLD_DIR FIELD_ADDR(fields,7) 177 #define FLD_PKTS FIELD_ADDR(fields,8) 178 #define FLD_BYTES FIELD_ADDR(fields,9) 179 #define FLD_RULE FIELD_ADDR(fields,10) 180 /* for rules */ 181 #define FLD_LABEL FIELD_ADDR(fields,11) 182 #define FLD_STATS FIELD_ADDR(fields,12) 183 #define FLD_EVAL FIELD_ADDR(fields,13) 184 #define FLD_ACTION FIELD_ADDR(fields,14) 185 #define FLD_LOG FIELD_ADDR(fields,15) 186 #define FLD_QUICK FIELD_ADDR(fields,16) 187 #define FLD_KST FIELD_ADDR(fields,17) 188 #define FLD_IF FIELD_ADDR(fields,18) 189 #define FLD_RINFO FIELD_ADDR(fields,19) 190 #define FLD_STMAX FIELD_ADDR(fields,20) 191 /* other */ 192 #define FLD_SI FIELD_ADDR(fields,21) /* instantaneous speed */ 193 #define FLD_SA FIELD_ADDR(fields,22) /* average speed */ 194 #define FLD_SP FIELD_ADDR(fields,23) /* peak speed */ 195 #define FLD_ANCHOR FIELD_ADDR(fields,24) 196 /* for queues */ 197 #define FLD_QUEUE FIELD_ADDR(fields,25) 198 #define FLD_BANDW FIELD_ADDR(fields,26) 199 #define FLD_SCHED FIELD_ADDR(fields,27) 200 #define FLD_PRIO FIELD_ADDR(fields,28) 201 #define FLD_DROPP FIELD_ADDR(fields,29) 202 #define FLD_DROPB FIELD_ADDR(fields,30) 203 #define FLD_QLEN FIELD_ADDR(fields,31) 204 #define FLD_BORR FIELD_ADDR(fields,32) 205 #define FLD_SUSP FIELD_ADDR(fields,33) 206 #define FLD_PKTSPS FIELD_ADDR(fields,34) 207 #define FLD_BYTESPS FIELD_ADDR(fields,35) 208 209 /* Define views */ 210 field_def *view0[] = { 211 FLD_PROTO, FLD_DIR, FLD_SRC, FLD_DEST, FLD_STATE, 212 FLD_AGE, FLD_EXP, FLD_PKTS, FLD_BYTES, NULL 213 }; 214 215 field_def *view1[] = { 216 FLD_PROTO, FLD_DIR, FLD_SRC, FLD_DEST, FLD_GW, FLD_STATE, FLD_AGE, 217 FLD_EXP, FLD_PKTS, FLD_BYTES, FLD_SI, FLD_SP, FLD_SA, FLD_RULE, NULL 218 }; 219 220 field_def *view2[] = { 221 FLD_PROTO, FLD_DIR, FLD_SRC, FLD_DEST, FLD_STATE, FLD_AGE, FLD_EXP, 222 FLD_PKTS, FLD_BYTES, FLD_SI, FLD_SP, FLD_SA, FLD_RULE, FLD_GW, NULL 223 }; 224 225 field_def *view3[] = { 226 FLD_PROTO, FLD_DIR, FLD_SRC, FLD_DEST, FLD_AGE, FLD_EXP, FLD_PKTS, 227 FLD_BYTES, FLD_STATE, FLD_SI, FLD_SP, FLD_SA, FLD_RULE, FLD_GW, NULL 228 }; 229 230 field_def *view4[] = { 231 FLD_PROTO, FLD_DIR, FLD_SRC, FLD_DEST, FLD_PKTS, FLD_BYTES, FLD_STATE, 232 FLD_AGE, FLD_EXP, FLD_SI, FLD_SP, FLD_SA, FLD_RULE, FLD_GW, NULL 233 }; 234 235 field_def *view5[] = { 236 FLD_RULE, FLD_ANCHOR, FLD_ACTION, FLD_DIR, FLD_LOG, FLD_QUICK, FLD_IF, 237 FLD_PROTO, FLD_KST, FLD_PKTS, FLD_BYTES, FLD_STATS, FLD_STMAX, 238 FLD_RINFO, NULL 239 }; 240 241 field_def *view6[] = { 242 FLD_RULE, FLD_LABEL, FLD_PKTS, FLD_BYTES, FLD_STATS, FLD_STMAX, 243 FLD_ACTION, FLD_DIR, FLD_LOG, FLD_QUICK, FLD_IF, FLD_PROTO, 244 FLD_ANCHOR, FLD_KST, NULL 245 }; 246 247 field_def *view7[] = { 248 FLD_PROTO, FLD_DIR, FLD_SRC, FLD_DEST, FLD_SI, FLD_SP, FLD_SA, 249 FLD_BYTES, FLD_STATE, FLD_PKTS, FLD_AGE, FLD_EXP, FLD_RULE, FLD_GW, NULL 250 }; 251 252 field_def *view8[] = { 253 FLD_QUEUE, FLD_BANDW, FLD_SCHED, FLD_PRIO, FLD_PKTS, FLD_BYTES, 254 FLD_DROPP, FLD_DROPB, FLD_QLEN, FLD_BORR, FLD_SUSP, FLD_PKTSPS, 255 FLD_BYTESPS, NULL 256 }; 257 258 /* Define orderings */ 259 order_type order_list[] = { 260 {"none", "none", 'N', NULL}, 261 {"bytes", "bytes", 'B', sort_size_callback}, 262 {"expiry", "exp", 'E', sort_exp_callback}, 263 {"packets", "pkt", 'P', sort_pkt_callback}, 264 {"age", "age", 'A', sort_age_callback}, 265 {"source addr", "src", 'F', sort_sa_callback}, 266 {"dest. addr", "dest", 'T', sort_da_callback}, 267 {"source port", "sport", 'S', sort_sp_callback}, 268 {"dest. port", "dport", 'D', sort_dp_callback}, 269 {"rate", "rate", 'R', sort_rate_callback}, 270 {"peak", "peak", 'K', sort_peak_callback}, 271 {NULL, NULL, 0, NULL} 272 }; 273 274 /* Define view managers */ 275 struct view_manager state_mgr = { 276 "States", select_states, read_states, sort_states, print_header, 277 print_states, keyboard_callback, order_list, NULL 278 }; 279 280 struct view_manager rule_mgr = { 281 "Rules", select_rules, read_rules, NULL, print_header, 282 print_rules, keyboard_callback, NULL, NULL 283 }; 284 285 struct view_manager queue_mgr = { 286 "Queues", select_queues, read_queues, NULL, print_header, 287 print_queues, keyboard_callback, NULL, NULL 288 }; 289 290 field_view views[] = { 291 {view2, "states", '8', &state_mgr}, 292 {view5, "rules", '9', &rule_mgr}, 293 {view8, "queues", 'Q', &queue_mgr}, 294 {NULL, NULL, 0, NULL} 295 }; 296 297 298 /* altq structures from pfctl */ 299 300 union class_stats { 301 class_stats_t cbq_stats; 302 struct priq_classstats priq_stats; 303 struct hfsc_classstats hfsc_stats; 304 }; 305 306 struct queue_stats { 307 union class_stats data; 308 struct timeval timestamp; 309 u_int8_t valid; 310 }; 311 312 struct pf_altq_node { 313 struct pf_altq altq; 314 struct pf_altq_node *next; 315 struct pf_altq_node *children; 316 struct pf_altq_node *next_flat; 317 struct queue_stats qstats; 318 struct queue_stats qstats_last; 319 u_int8_t depth; 320 u_int8_t visited; 321 }; 322 323 324 /* ordering functions */ 325 326 int 327 sort_size_callback(const void *s1, const void *s2) 328 { 329 u_int64_t b1 = COUNTER(state_buf[* (u_int32_t *) s1].bytes[0]) + 330 COUNTER(state_buf[* (u_int32_t *) s1].bytes[1]); 331 u_int64_t b2 = COUNTER(state_buf[* (u_int32_t *) s2].bytes[0]) + 332 COUNTER(state_buf[* (u_int32_t *) s2].bytes[1]); 333 if (b2 > b1) 334 return sortdir; 335 if (b2 < b1) 336 return -sortdir; 337 return 0; 338 } 339 340 int 341 sort_pkt_callback(const void *s1, const void *s2) 342 { 343 u_int64_t p1 = COUNTER(state_buf[* (u_int32_t *) s1].packets[0]) + 344 COUNTER(state_buf[* (u_int32_t *) s1].packets[1]); 345 u_int64_t p2 = COUNTER(state_buf[* (u_int32_t *) s2].packets[0]) + 346 COUNTER(state_buf[* (u_int32_t *) s2].packets[1]); 347 if (p2 > p1) 348 return sortdir; 349 if (p2 < p1) 350 return -sortdir; 351 return 0; 352 } 353 354 int 355 sort_age_callback(const void *s1, const void *s2) 356 { 357 if (ntohl(state_buf[* (u_int32_t *) s2].creation) > 358 ntohl(state_buf[* (u_int32_t *) s1].creation)) 359 return sortdir; 360 if (ntohl(state_buf[* (u_int32_t *) s2].creation) < 361 ntohl(state_buf[* (u_int32_t *) s1].creation)) 362 return -sortdir; 363 return 0; 364 } 365 366 int 367 sort_exp_callback(const void *s1, const void *s2) 368 { 369 if (ntohl(state_buf[* (u_int32_t *) s2].expire) > 370 ntohl(state_buf[* (u_int32_t *) s1].expire)) 371 return sortdir; 372 if (ntohl(state_buf[* (u_int32_t *) s2].expire) < 373 ntohl(state_buf[* (u_int32_t *) s1].expire)) 374 return -sortdir; 375 return 0; 376 } 377 378 int 379 sort_rate_callback(const void *s1, const void *s2) 380 { 381 struct sc_ent *e1 = state_cache[* (u_int32_t *) s1]; 382 struct sc_ent *e2 = state_cache[* (u_int32_t *) s2]; 383 384 if (e1 == NULL) 385 return sortdir; 386 if (e2 == NULL) 387 return -sortdir; 388 389 if (e2->rate > e1 -> rate) 390 return sortdir; 391 if (e2->rate < e1 -> rate) 392 return -sortdir; 393 return 0; 394 } 395 396 int 397 sort_peak_callback(const void *s1, const void *s2) 398 { 399 struct sc_ent *e1 = state_cache[* (u_int32_t *) s1]; 400 struct sc_ent *e2 = state_cache[* (u_int32_t *) s2]; 401 402 if (e2 == NULL) 403 return -sortdir; 404 if (e1 == NULL || e2 == NULL) 405 return 0; 406 407 if (e2->peak > e1 -> peak) 408 return sortdir; 409 if (e2->peak < e1 -> peak) 410 return -sortdir; 411 return 0; 412 } 413 414 int 415 compare_addr(int af, const struct pf_addr *a, const struct pf_addr *b) 416 { 417 switch (af) { 418 case AF_INET: 419 if (ntohl(a->addr32[0]) > ntohl(b->addr32[0])) 420 return 1; 421 if (a->addr32[0] != b->addr32[0]) 422 return -1; 423 break; 424 case AF_INET6: 425 if (ntohl(a->addr32[0]) > ntohl(b->addr32[0])) 426 return 1; 427 if (a->addr32[0] != b->addr32[0]) 428 return -1; 429 if (ntohl(a->addr32[1]) > ntohl(b->addr32[1])) 430 return 1; 431 if (a->addr32[1] != b->addr32[1]) 432 return -1; 433 if (ntohl(a->addr32[2]) > ntohl(b->addr32[2])) 434 return 1; 435 if (a->addr32[2] != b->addr32[2]) 436 return -1; 437 if (ntohl(a->addr32[3]) > ntohl(b->addr32[3])) 438 return 1; 439 if (a->addr32[3] != b->addr32[3]) 440 return -1; 441 break; 442 } 443 444 return 0; 445 } 446 447 static __inline int 448 sort_addr_callback(const struct pfsync_state *s1, 449 const struct pfsync_state *s2, int dir) 450 { 451 const struct pf_addr *aa, *ab; 452 u_int16_t pa, pb; 453 int af, side, ret, ii, io; 454 455 side = s1->direction == PF_IN ? PF_SK_STACK : PF_SK_WIRE; 456 457 if (s1->key[side].af > s2->key[side].af) 458 return sortdir; 459 if (s1->key[side].af < s2->key[side].af) 460 return -sortdir; 461 462 ii = io = 0; 463 464 if (dir == PF_OUT) /* looking for source addr */ 465 io = 1; 466 else /* looking for dest addr */ 467 ii = 1; 468 469 if (s1->key[PF_SK_STACK].af != s1->key[PF_SK_WIRE].af) { 470 dir = PF_OUT; 471 side = PF_SK_STACK; 472 } else { 473 dir = s1->direction; 474 side = PF_SK_WIRE; 475 } 476 477 if (dir == PF_IN) { 478 aa = &s1->key[PF_SK_STACK].addr[ii]; 479 pa = s1->key[PF_SK_STACK].port[ii]; 480 af = s1->key[PF_SK_STACK].af; 481 } else { 482 aa = &s1->key[side].addr[io]; 483 pa = s1->key[side].port[io]; 484 af = s1->key[side].af; 485 } 486 487 if (s2->key[PF_SK_STACK].af != s2->key[PF_SK_WIRE].af) { 488 dir = PF_OUT; 489 side = PF_SK_STACK; 490 } else { 491 dir = s2->direction; 492 side = PF_SK_WIRE; 493 } 494 495 if (dir == PF_IN) { 496 ab = &s2->key[PF_SK_STACK].addr[ii]; 497 pb = s2->key[PF_SK_STACK].port[ii]; 498 af = s1->key[PF_SK_STACK].af; 499 } else { 500 ab = &s2->key[side].addr[io]; 501 pb = s2->key[side].port[io]; 502 af = s1->key[side].af; 503 } 504 505 ret = compare_addr(af, aa, ab); 506 if (ret) 507 return ret * sortdir; 508 509 if (ntohs(pa) > ntohs(pb)) 510 return sortdir; 511 return -sortdir; 512 } 513 514 static __inline int 515 sort_port_callback(const struct pfsync_state *s1, 516 const struct pfsync_state *s2, int dir) 517 { 518 const struct pf_addr *aa, *ab; 519 u_int16_t pa, pb; 520 int af, side, ret, ii, io; 521 522 side = s1->direction == PF_IN ? PF_SK_STACK : PF_SK_WIRE; 523 524 if (s1->key[side].af > s2->key[side].af) 525 return sortdir; 526 if (s1->key[side].af < s2->key[side].af) 527 return -sortdir; 528 529 ii = io = 0; 530 531 if (dir == PF_OUT) /* looking for source addr */ 532 io = 1; 533 else /* looking for dest addr */ 534 ii = 1; 535 536 if (s1->key[PF_SK_STACK].af != s1->key[PF_SK_WIRE].af) { 537 dir = PF_OUT; 538 side = PF_SK_STACK; 539 } else { 540 dir = s1->direction; 541 side = PF_SK_WIRE; 542 } 543 544 if (dir == PF_IN) { 545 aa = &s1->key[PF_SK_STACK].addr[ii]; 546 pa = s1->key[PF_SK_STACK].port[ii]; 547 af = s1->key[PF_SK_STACK].af; 548 } else { 549 aa = &s1->key[side].addr[io]; 550 pa = s1->key[side].port[io]; 551 af = s1->key[side].af; 552 } 553 554 if (s2->key[PF_SK_STACK].af != s2->key[PF_SK_WIRE].af) { 555 dir = PF_OUT; 556 side = PF_SK_STACK; 557 } else { 558 dir = s2->direction; 559 side = PF_SK_WIRE; 560 } 561 562 if (dir == PF_IN) { 563 ab = &s2->key[PF_SK_STACK].addr[ii]; 564 pb = s2->key[PF_SK_STACK].port[ii]; 565 af = s1->key[PF_SK_STACK].af; 566 } else { 567 ab = &s2->key[side].addr[io]; 568 pb = s2->key[side].port[io]; 569 af = s1->key[side].af; 570 } 571 572 573 if (ntohs(pa) > ntohs(pb)) 574 return sortdir; 575 if (ntohs(pa) < ntohs(pb)) 576 return - sortdir; 577 578 ret = compare_addr(af, aa, ab); 579 if (ret) 580 return ret * sortdir; 581 return -sortdir; 582 } 583 584 int 585 sort_sa_callback(const void *p1, const void *p2) 586 { 587 struct pfsync_state *s1 = state_buf + (* (u_int32_t *) p1); 588 struct pfsync_state *s2 = state_buf + (* (u_int32_t *) p2); 589 return sort_addr_callback(s1, s2, PF_OUT); 590 } 591 592 int 593 sort_da_callback(const void *p1, const void *p2) 594 { 595 struct pfsync_state *s1 = state_buf + (* (u_int32_t *) p1); 596 struct pfsync_state *s2 = state_buf + (* (u_int32_t *) p2); 597 return sort_addr_callback(s1, s2, PF_IN); 598 } 599 600 int 601 sort_sp_callback(const void *p1, const void *p2) 602 { 603 struct pfsync_state *s1 = state_buf + (* (u_int32_t *) p1); 604 struct pfsync_state *s2 = state_buf + (* (u_int32_t *) p2); 605 return sort_port_callback(s1, s2, PF_OUT); 606 } 607 608 int 609 sort_dp_callback(const void *p1, const void *p2) 610 { 611 struct pfsync_state *s1 = state_buf + (* (u_int32_t *) p1); 612 struct pfsync_state *s2 = state_buf + (* (u_int32_t *) p2); 613 return sort_port_callback(s1, s2, PF_IN); 614 } 615 616 void 617 sort_states(void) 618 { 619 order_type *ordering; 620 621 if (curr_mgr == NULL) 622 return; 623 624 ordering = curr_mgr->order_curr; 625 626 if (ordering == NULL) 627 return; 628 if (ordering->func == NULL) 629 return; 630 if (state_buf == NULL) 631 return; 632 if (num_states <= 0) 633 return; 634 635 mergesort(state_ord, num_states, sizeof(u_int32_t), ordering->func); 636 } 637 638 /* state management functions */ 639 640 void 641 alloc_buf(int ns) 642 { 643 int len; 644 645 if (ns < MIN_NUM_STATES) 646 ns = MIN_NUM_STATES; 647 648 len = ns; 649 650 if (len >= state_buf_len) { 651 len += NUM_STATE_INC; 652 state_buf = realloc(state_buf, len * sizeof(struct pfsync_state)); 653 state_ord = realloc(state_ord, len * sizeof(u_int32_t)); 654 state_cache = realloc(state_cache, 655 len * sizeof(struct sc_ent *)); 656 if (state_buf == NULL || state_ord == NULL || 657 state_cache == NULL) 658 err(1, "realloc"); 659 state_buf_len = len; 660 } 661 } 662 663 int 664 select_states(void) 665 { 666 num_disp = num_states; 667 return (0); 668 } 669 670 int 671 read_states(void) 672 { 673 struct pfioc_states ps; 674 int n; 675 676 if (pf_dev == -1) 677 return -1; 678 679 for (;;) { 680 int sbytes = state_buf_len * sizeof(struct pfsync_state); 681 682 ps.ps_len = sbytes; 683 ps.ps_buf = (char *) state_buf; 684 685 if (ioctl(pf_dev, DIOCGETSTATES, &ps) < 0) { 686 error("DIOCGETSTATES"); 687 } 688 num_states_all = ps.ps_len / sizeof(struct pfsync_state); 689 690 if (ps.ps_len < sbytes) 691 break; 692 693 alloc_buf(num_states_all); 694 } 695 696 if (dumpfilter) { 697 int fd = open("state.dmp", O_WRONLY|O_CREAT|O_EXCL, 0); 698 if (fd > 0) { 699 write(fd, state_buf, ps.ps_len); 700 close(fd); 701 } 702 } 703 704 num_states = num_states_all; 705 for (n = 0; n<num_states_all; n++) 706 state_ord[n] = n; 707 708 if (cachestates) { 709 for (n = 0; n < num_states; n++) 710 state_cache[n] = cache_state(state_buf + n); 711 cache_endupdate(); 712 } 713 714 num_disp = num_states; 715 return 0; 716 } 717 718 int 719 unmask(struct pf_addr * m, u_int8_t af) 720 { 721 int i = 31, j = 0, b = 0, msize; 722 u_int32_t tmp; 723 724 if (af == AF_INET) 725 msize = 1; 726 else 727 msize = 4; 728 while (j < msize && m->addr32[j] == 0xffffffff) { 729 b += 32; 730 j++; 731 } 732 if (j < msize) { 733 tmp = ntohl(m->addr32[j]); 734 for (i = 31; tmp & (1 << i); --i) 735 b++; 736 } 737 return (b); 738 } 739 740 /* display functions */ 741 742 void 743 tb_print_addr(struct pf_addr * addr, struct pf_addr * mask, int af) 744 { 745 switch (af) { 746 case AF_INET: { 747 tbprintf("%s", inetname(addr->v4)); 748 break; 749 } 750 case AF_INET6: { 751 tbprintf("%s", inet6name(&addr->v6)); 752 break; 753 } 754 } 755 756 if (mask != NULL) { 757 if (!PF_AZERO(mask, af)) 758 tbprintf("/%u", unmask(mask, af)); 759 } 760 } 761 762 void 763 print_fld_host2(field_def *fld, struct pfsync_state_key *ks, 764 struct pfsync_state_key *kn, int idx) 765 { 766 struct pf_addr *as = &ks->addr[idx]; 767 struct pf_addr *an = &kn->addr[idx]; 768 769 u_int16_t ps = ntohs(ks->port[idx]); 770 u_int16_t pn = ntohs(kn->port[idx]); 771 772 int asf = ks->af; 773 int anf = kn->af; 774 775 if (fld == NULL) 776 return; 777 778 if (fld->width < 3) { 779 print_fld_str(fld, "*"); 780 return; 781 } 782 783 tb_start(); 784 tb_print_addr(as, NULL, asf); 785 786 if (asf == AF_INET) 787 tbprintf(":%u", ps); 788 else 789 tbprintf("[%u]", ps); 790 791 print_fld_tb(fld); 792 793 if (asf != anf || PF_ANEQ(as, an, asf) || ps != pn) { 794 tb_start(); 795 tb_print_addr(an, NULL, anf); 796 797 if (anf == AF_INET) 798 tbprintf(":%u", pn); 799 else 800 tbprintf("[%u]", pn); 801 print_fld_tb(FLD_GW); 802 } 803 804 } 805 806 void 807 print_fld_state(field_def *fld, unsigned int proto, 808 unsigned int s1, unsigned int s2) 809 { 810 int len; 811 812 if (fld == NULL) 813 return; 814 815 len = fld->width; 816 if (len < 1) 817 return; 818 819 tb_start(); 820 821 if (proto == IPPROTO_TCP) { 822 if (s1 <= TCPS_TIME_WAIT && s2 <= TCPS_TIME_WAIT) 823 tbprintf("%s:%s", tcpstates[s1], tcpstates[s2]); 824 #ifdef PF_TCPS_PROXY_SRC 825 else if (s1 == PF_TCPS_PROXY_SRC || 826 s2 == PF_TCPS_PROXY_SRC) 827 tbprintf("PROXY:SRC\n"); 828 else if (s1 == PF_TCPS_PROXY_DST || 829 s2 == PF_TCPS_PROXY_DST) 830 tbprintf("PROXY:DST\n"); 831 #endif 832 else 833 tbprintf("<BAD STATE LEVELS>"); 834 } else if (proto == IPPROTO_UDP && s1 < PFUDPS_NSTATES && 835 s2 < PFUDPS_NSTATES) { 836 const char *states[] = PFUDPS_NAMES; 837 tbprintf("%s:%s", states[s1], states[s2]); 838 } else if (proto != IPPROTO_ICMP && s1 < PFOTHERS_NSTATES && 839 s2 < PFOTHERS_NSTATES) { 840 /* XXX ICMP doesn't really have state levels */ 841 const char *states[] = PFOTHERS_NAMES; 842 tbprintf("%s:%s", states[s1], states[s2]); 843 } else { 844 tbprintf("%u:%u", s1, s2); 845 } 846 847 if (strlen(tmp_buf) > len) { 848 tb_start(); 849 tbprintf("%u:%u", s1, s2); 850 } 851 852 print_fld_tb(fld); 853 } 854 855 int 856 print_state(struct pfsync_state * s, struct sc_ent * ent) 857 { 858 struct pfsync_state_peer *src, *dst; 859 struct protoent *p; 860 u_int64_t sz; 861 int afto, dir; 862 863 afto = s->key[PF_SK_STACK].af == s->key[PF_SK_WIRE].af ? 0 : 1; 864 dir = afto ? PF_OUT : s->direction; 865 866 if (dir == PF_OUT) { 867 src = &s->src; 868 dst = &s->dst; 869 } else { 870 src = &s->dst; 871 dst = &s->src; 872 } 873 874 p = getprotobynumber(s->proto); 875 876 if (p != NULL) 877 print_fld_str(FLD_PROTO, p->p_name); 878 else 879 print_fld_uint(FLD_PROTO, s->proto); 880 881 if (dir == PF_OUT) { 882 print_fld_host2(FLD_SRC, 883 &s->key[afto ? PF_SK_STACK : PF_SK_WIRE], 884 &s->key[PF_SK_STACK], 1); 885 print_fld_host2(FLD_DEST, 886 &s->key[afto ? PF_SK_STACK : PF_SK_WIRE], 887 &s->key[afto ? PF_SK_WIRE : PF_SK_STACK], 0); 888 } else { 889 print_fld_host2(FLD_SRC, &s->key[PF_SK_STACK], 890 &s->key[PF_SK_WIRE], 0); 891 print_fld_host2(FLD_DEST, &s->key[PF_SK_STACK], 892 &s->key[PF_SK_WIRE], 1); 893 } 894 895 if (dir == PF_OUT) 896 print_fld_str(FLD_DIR, "Out"); 897 else 898 print_fld_str(FLD_DIR, "In"); 899 900 print_fld_state(FLD_STATE, s->proto, src->state, dst->state); 901 print_fld_age(FLD_AGE, ntohl(s->creation)); 902 print_fld_age(FLD_EXP, ntohl(s->expire)); 903 904 sz = COUNTER(s->bytes[0]) + COUNTER(s->bytes[1]); 905 906 print_fld_size(FLD_PKTS, COUNTER(s->packets[0]) + 907 COUNTER(s->packets[1])); 908 print_fld_size(FLD_BYTES, sz); 909 print_fld_rate(FLD_SA, (s->creation) ? 910 ((double)sz/ntohl((double)s->creation)) : -1); 911 912 print_fld_uint(FLD_RULE, ntohl(s->rule)); 913 if (cachestates && ent != NULL) { 914 print_fld_rate(FLD_SI, ent->rate); 915 print_fld_rate(FLD_SP, ent->peak); 916 } 917 918 end_line(); 919 return 1; 920 } 921 922 void 923 print_states(void) 924 { 925 int n, count = 0; 926 927 for (n = dispstart; n < num_disp; n++) { 928 count += print_state(state_buf + state_ord[n], 929 state_cache[state_ord[n]]); 930 if (maxprint > 0 && count >= maxprint) 931 break; 932 } 933 } 934 935 /* rule display */ 936 937 struct pf_rule *rules = NULL; 938 u_int32_t alloc_rules = 0; 939 940 int 941 select_rules(void) 942 { 943 num_disp = num_rules; 944 return (0); 945 } 946 947 948 void 949 add_rule_alloc(u_int32_t nr) 950 { 951 if (nr == 0) 952 return; 953 954 num_rules += nr; 955 956 if (rules == NULL) { 957 rules = malloc(num_rules * sizeof(struct pf_rule)); 958 if (rules == NULL) 959 err(1, "malloc"); 960 alloc_rules = num_rules; 961 } else if (num_rules > alloc_rules) { 962 rules = realloc(rules, num_rules * sizeof(struct pf_rule)); 963 if (rules == NULL) 964 err(1, "realloc"); 965 alloc_rules = num_rules; 966 } 967 } 968 969 int label_length; 970 971 int 972 read_anchor_rules(char *anchor) 973 { 974 struct pfioc_rule pr; 975 u_int32_t nr, num, off; 976 int len; 977 978 if (pf_dev < 0) 979 return (-1); 980 981 memset(&pr, 0, sizeof(pr)); 982 strlcpy(pr.anchor, anchor, sizeof(pr.anchor)); 983 984 if (ioctl(pf_dev, DIOCGETRULES, &pr)) { 985 error("anchor %s: %s", anchor, strerror(errno)); 986 return (-1); 987 } 988 989 off = num_rules; 990 num = pr.nr; 991 add_rule_alloc(num); 992 993 for (nr = 0; nr < num; ++nr) { 994 pr.nr = nr; 995 if (ioctl(pf_dev, DIOCGETRULE, &pr)) { 996 error("DIOCGETRULE: %s", strerror(errno)); 997 return (-1); 998 } 999 /* XXX overload pr.anchor, to store a pointer to 1000 * anchor name */ 1001 pr.rule.anchor = (struct pf_anchor *) anchor; 1002 len = strlen(pr.rule.label); 1003 if (len > label_length) 1004 label_length = len; 1005 rules[off + nr] = pr.rule; 1006 } 1007 1008 return (num); 1009 } 1010 1011 struct anchor_name { 1012 char name[MAXPATHLEN]; 1013 struct anchor_name *next; 1014 u_int32_t ref; 1015 }; 1016 1017 struct anchor_name *anchor_root = NULL; 1018 struct anchor_name *anchor_end = NULL; 1019 struct anchor_name *anchor_free = NULL; 1020 1021 struct anchor_name* 1022 alloc_anchor_name(const char *path) 1023 { 1024 struct anchor_name *a; 1025 1026 a = anchor_free; 1027 if (a == NULL) { 1028 a = (struct anchor_name *)malloc(sizeof(struct anchor_name)); 1029 if (a == NULL) 1030 return (NULL); 1031 } else 1032 anchor_free = a->next; 1033 1034 if (anchor_root == NULL) 1035 anchor_end = a; 1036 1037 a->next = anchor_root; 1038 anchor_root = a; 1039 1040 a->ref = 0; 1041 strlcpy(a->name, path, sizeof(a->name)); 1042 return (a); 1043 } 1044 1045 void 1046 reset_anchor_names(void) 1047 { 1048 if (anchor_end == NULL) 1049 return; 1050 1051 anchor_end->next = anchor_free; 1052 anchor_free = anchor_root; 1053 anchor_root = anchor_end = NULL; 1054 } 1055 1056 struct pfioc_ruleset ruleset; 1057 char *rs_end = NULL; 1058 1059 int 1060 read_rulesets(const char *path) 1061 { 1062 char *pre; 1063 struct anchor_name *a; 1064 u_int32_t nr, ns; 1065 int len; 1066 1067 if (path == NULL) 1068 ruleset.path[0] = '\0'; 1069 else if (strlcpy(ruleset.path, path, sizeof(ruleset.path)) >= 1070 sizeof(ruleset.path)) 1071 return (-1); 1072 1073 /* a persistent storage for anchor names */ 1074 a = alloc_anchor_name(ruleset.path); 1075 if (a == NULL) 1076 return (-1); 1077 1078 len = read_anchor_rules(a->name); 1079 if (len < 0) 1080 return (-1); 1081 1082 a->ref += len; 1083 1084 if (ioctl(pf_dev, DIOCGETRULESETS, &ruleset)) { 1085 error("DIOCGETRULESETS: %s", strerror(errno)); 1086 return (-1); 1087 } 1088 1089 ns = ruleset.nr; 1090 1091 if (rs_end == NULL) 1092 rs_end = ruleset.path + sizeof(ruleset.path); 1093 1094 /* 'pre' tracks the previous level on the anchor */ 1095 pre = strchr(ruleset.path, 0); 1096 len = rs_end - pre; 1097 if (len < 1) 1098 return (-1); 1099 --len; 1100 1101 for (nr = 0; nr < ns; ++nr) { 1102 ruleset.nr = nr; 1103 if (ioctl(pf_dev, DIOCGETRULESET, &ruleset)) { 1104 error("DIOCGETRULESET: %s", strerror(errno)); 1105 return (-1); 1106 } 1107 *pre = '/'; 1108 if (strlcpy(pre + 1, ruleset.name, len) < len) 1109 read_rulesets(ruleset.path); 1110 *pre = '\0'; 1111 } 1112 1113 return (0); 1114 } 1115 1116 void 1117 compute_anchor_field(void) 1118 { 1119 struct anchor_name *a; 1120 int sum, cnt, mx, nx; 1121 sum = cnt = mx = 0; 1122 1123 for (a = anchor_root; a != NULL; a = a->next, cnt++) { 1124 int len; 1125 if (a->ref == 0) 1126 continue; 1127 len = strlen(a->name); 1128 sum += len; 1129 if (len > mx) 1130 mx = len; 1131 } 1132 1133 nx = sum/cnt; 1134 if (nx < ANCHOR_FLD_SIZE) 1135 nx = (mx < ANCHOR_FLD_SIZE) ? mx : ANCHOR_FLD_SIZE; 1136 1137 if (FLD_ANCHOR->max_width != mx || 1138 FLD_ANCHOR->norm_width != nx) { 1139 FLD_ANCHOR->max_width = mx; 1140 FLD_ANCHOR->norm_width = nx; 1141 field_setup(); 1142 need_update = 1; 1143 } 1144 } 1145 1146 int 1147 read_rules(void) 1148 { 1149 int ret, nw, mw; 1150 num_rules = 0; 1151 1152 if (pf_dev == -1) 1153 return (-1); 1154 1155 label_length = MIN_LABEL_SIZE; 1156 1157 reset_anchor_names(); 1158 ret = read_rulesets(NULL); 1159 compute_anchor_field(); 1160 1161 nw = mw = label_length; 1162 if (nw > 16) 1163 nw = 16; 1164 1165 if (FLD_LABEL->norm_width != nw || 1166 FLD_LABEL->max_width != mw) { 1167 FLD_LABEL->norm_width = nw; 1168 FLD_LABEL->max_width = mw; 1169 field_setup(); 1170 need_update = 1; 1171 } 1172 1173 num_disp = num_rules; 1174 return (ret); 1175 } 1176 1177 void 1178 tb_print_addrw(struct pf_addr_wrap *addr, struct pf_addr *mask, u_int8_t af) 1179 { 1180 switch (addr->type) { 1181 case PF_ADDR_ADDRMASK: 1182 tb_print_addr(&addr->v.a.addr, mask, af); 1183 break; 1184 case PF_ADDR_NOROUTE: 1185 tbprintf("noroute"); 1186 break; 1187 case PF_ADDR_DYNIFTL: 1188 tbprintf("(%s)", addr->v.ifname); 1189 break; 1190 case PF_ADDR_TABLE: 1191 tbprintf("<%s>", addr->v.tblname); 1192 break; 1193 default: 1194 tbprintf("UNKNOWN"); 1195 break; 1196 } 1197 } 1198 1199 void 1200 tb_print_op(u_int8_t op, const char *a1, const char *a2) 1201 { 1202 if (op == PF_OP_IRG) 1203 tbprintf("%s >< %s ", a1, a2); 1204 else if (op == PF_OP_XRG) 1205 tbprintf("%s <> %s ", a1, a2); 1206 else if (op == PF_OP_RRG) 1207 tbprintf("%s:%s ", a1, a2); 1208 else if (op == PF_OP_EQ) 1209 tbprintf("= %s ", a1); 1210 else if (op == PF_OP_NE) 1211 tbprintf("!= %s ", a1); 1212 else if (op == PF_OP_LT) 1213 tbprintf("< %s ", a1); 1214 else if (op == PF_OP_LE) 1215 tbprintf("<= %s ", a1); 1216 else if (op == PF_OP_GT) 1217 tbprintf("> %s ", a1); 1218 else if (op == PF_OP_GE) 1219 tbprintf(">= %s ", a1); 1220 } 1221 1222 void 1223 tb_print_port(u_int8_t op, u_int16_t p1, u_int16_t p2, char *proto) 1224 { 1225 char a1[6], a2[6]; 1226 struct servent *s = getservbyport(p1, proto); 1227 1228 p1 = ntohs(p1); 1229 p2 = ntohs(p2); 1230 snprintf(a1, sizeof(a1), "%u", p1); 1231 snprintf(a2, sizeof(a2), "%u", p2); 1232 tbprintf("port "); 1233 if (s != NULL && (op == PF_OP_EQ || op == PF_OP_NE)) 1234 tb_print_op(op, s->s_name, a2); 1235 else 1236 tb_print_op(op, a1, a2); 1237 } 1238 1239 void 1240 tb_print_fromto(struct pf_rule_addr *src, struct pf_rule_addr *dst, 1241 u_int8_t af, u_int8_t proto) 1242 { 1243 if ( 1244 PF_AZERO(PT_ADDR(src), AF_INET6) && 1245 PF_AZERO(PT_ADDR(dst), AF_INET6) && 1246 ! PT_NOROUTE(src) && ! PT_NOROUTE(dst) && 1247 PF_AZERO(PT_MASK(src), AF_INET6) && 1248 PF_AZERO(PT_MASK(dst), AF_INET6) && 1249 !src->port_op && !dst->port_op) 1250 tbprintf("all "); 1251 else { 1252 tbprintf("from "); 1253 if (PT_NOROUTE(src)) 1254 tbprintf("no-route "); 1255 else if (PF_AZERO(PT_ADDR(src), AF_INET6) && 1256 PF_AZERO(PT_MASK(src), AF_INET6)) 1257 tbprintf("any "); 1258 else { 1259 if (src->neg) 1260 tbprintf("! "); 1261 tb_print_addrw(&src->addr, PT_MASK(src), af); 1262 tbprintf(" "); 1263 } 1264 if (src->port_op) 1265 tb_print_port(src->port_op, src->port[0], 1266 src->port[1], 1267 proto == IPPROTO_TCP ? "tcp" : "udp"); 1268 1269 tbprintf("to "); 1270 if (PT_NOROUTE(dst)) 1271 tbprintf("no-route "); 1272 else if (PF_AZERO(PT_ADDR(dst), AF_INET6) && 1273 PF_AZERO(PT_MASK(dst), AF_INET6)) 1274 tbprintf("any "); 1275 else { 1276 if (dst->neg) 1277 tbprintf("! "); 1278 tb_print_addrw(&dst->addr, PT_MASK(dst), af); 1279 tbprintf(" "); 1280 } 1281 if (dst->port_op) 1282 tb_print_port(dst->port_op, dst->port[0], 1283 dst->port[1], 1284 proto == IPPROTO_TCP ? "tcp" : "udp"); 1285 } 1286 } 1287 1288 void 1289 tb_print_ugid(u_int8_t op, unsigned u1, unsigned u2, 1290 const char *t, unsigned umax) 1291 { 1292 char a1[11], a2[11]; 1293 1294 snprintf(a1, sizeof(a1), "%u", u1); 1295 snprintf(a2, sizeof(a2), "%u", u2); 1296 1297 tbprintf("%s ", t); 1298 if (u1 == umax && (op == PF_OP_EQ || op == PF_OP_NE)) 1299 tb_print_op(op, "unknown", a2); 1300 else 1301 tb_print_op(op, a1, a2); 1302 } 1303 1304 void 1305 tb_print_flags(u_int8_t f) 1306 { 1307 const char *tcpflags = "FSRPAUEW"; 1308 int i; 1309 1310 for (i = 0; tcpflags[i]; ++i) 1311 if (f & (1 << i)) 1312 tbprintf("%c", tcpflags[i]); 1313 } 1314 1315 void 1316 print_rule(struct pf_rule *pr) 1317 { 1318 static const char *actiontypes[] = { "Pass", "Block", "Scrub", 1319 "no Scrub", "Nat", "no Nat", "Binat", "no Binat", "Rdr", 1320 "no Rdr", "SynProxy Block", "Defer", "Match" }; 1321 int numact = sizeof(actiontypes) / sizeof(char *); 1322 1323 static const char *routetypes[] = { "", "fastroute", "route-to", 1324 "dup-to", "reply-to" }; 1325 1326 int numroute = sizeof(routetypes) / sizeof(char *); 1327 1328 if (pr == NULL) return; 1329 1330 print_fld_str(FLD_LABEL, pr->label); 1331 print_fld_size(FLD_STATS, pr->states_tot); 1332 1333 print_fld_size(FLD_PKTS, pr->packets[0] + pr->packets[1]); 1334 print_fld_size(FLD_BYTES, pr->bytes[0] + pr->bytes[1]); 1335 1336 print_fld_uint(FLD_RULE, pr->nr); 1337 if (pr->direction == PF_OUT) 1338 print_fld_str(FLD_DIR, "Out"); 1339 else if (pr->direction == PF_IN) 1340 print_fld_str(FLD_DIR, "In"); 1341 else 1342 print_fld_str(FLD_DIR, "Any"); 1343 1344 if (pr->quick) 1345 print_fld_str(FLD_QUICK, "Quick"); 1346 1347 if (pr->keep_state == PF_STATE_NORMAL) 1348 print_fld_str(FLD_KST, "Keep"); 1349 else if (pr->keep_state == PF_STATE_MODULATE) 1350 print_fld_str(FLD_KST, "Mod"); 1351 #ifdef PF_STATE_SYNPROXY 1352 else if (pr->keep_state == PF_STATE_MODULATE) 1353 print_fld_str(FLD_KST, "Syn"); 1354 #endif 1355 if (pr->log == 1) 1356 print_fld_str(FLD_LOG, "Log"); 1357 else if (pr->log == 2) 1358 print_fld_str(FLD_LOG, "All"); 1359 1360 if (pr->action >= numact) 1361 print_fld_uint(FLD_ACTION, pr->action); 1362 else print_fld_str(FLD_ACTION, actiontypes[pr->action]); 1363 1364 if (pr->proto) { 1365 struct protoent *p = getprotobynumber(pr->proto); 1366 1367 if (p != NULL) 1368 print_fld_str(FLD_PROTO, p->p_name); 1369 else 1370 print_fld_uint(FLD_PROTO, pr->proto); 1371 } 1372 1373 if (pr->ifname[0]) { 1374 tb_start(); 1375 if (pr->ifnot) 1376 tbprintf("!"); 1377 tbprintf("%s", pr->ifname); 1378 print_fld_tb(FLD_IF); 1379 } 1380 if (pr->max_states) 1381 print_fld_uint(FLD_STMAX, pr->max_states); 1382 1383 /* print info field */ 1384 1385 tb_start(); 1386 1387 if (pr->action == PF_DROP) { 1388 if (pr->rule_flag & PFRULE_RETURNRST) 1389 tbprintf("return-rst "); 1390 #ifdef PFRULE_RETURN 1391 else if (pr->rule_flag & PFRULE_RETURN) 1392 tbprintf("return "); 1393 #endif 1394 #ifdef PFRULE_RETURNICMP 1395 else if (pr->rule_flag & PFRULE_RETURNICMP) 1396 tbprintf("return-icmp "); 1397 #endif 1398 else 1399 tbprintf("drop "); 1400 } 1401 1402 if (pr->rt > 0 && pr->rt < numroute) { 1403 tbprintf("%s ", routetypes[pr->rt]); 1404 } 1405 1406 if (pr->af) { 1407 if (pr->af == AF_INET) 1408 tbprintf("inet "); 1409 else 1410 tbprintf("inet6 "); 1411 } 1412 1413 tb_print_fromto(&pr->src, &pr->dst, pr->af, pr->proto); 1414 1415 if (pr->uid.op) 1416 tb_print_ugid(pr->uid.op, pr->uid.uid[0], pr->uid.uid[1], 1417 "user", UID_MAX); 1418 if (pr->gid.op) 1419 tb_print_ugid(pr->gid.op, pr->gid.gid[0], pr->gid.gid[1], 1420 "group", GID_MAX); 1421 1422 if (pr->action == PF_PASS && 1423 (pr->proto == 0 || pr->proto == IPPROTO_TCP) && 1424 (pr->flags != TH_SYN || pr->flagset != (TH_SYN | TH_ACK) )) { 1425 tbprintf("flags "); 1426 if (pr->flags || pr->flagset) { 1427 tb_print_flags(pr->flags); 1428 tbprintf("/"); 1429 tb_print_flags(pr->flagset); 1430 } else 1431 tbprintf("any "); 1432 } 1433 1434 tbprintf(" "); 1435 1436 if (pr->tos) 1437 tbprintf("tos 0x%2.2x ", pr->tos); 1438 #ifdef PFRULE_FRAGMENT 1439 if (pr->rule_flag & PFRULE_FRAGMENT) 1440 tbprintf("fragment "); 1441 #endif 1442 #ifdef PFRULE_NODF 1443 if (pr->rule_flag & PFRULE_NODF) 1444 tbprintf("no-df "); 1445 #endif 1446 #ifdef PFRULE_RANDOMID 1447 if (pr->rule_flag & PFRULE_RANDOMID) 1448 tbprintf("random-id "); 1449 #endif 1450 if (pr->min_ttl) 1451 tbprintf("min-ttl %d ", pr->min_ttl); 1452 if (pr->max_mss) 1453 tbprintf("max-mss %d ", pr->max_mss); 1454 if (pr->allow_opts) 1455 tbprintf("allow-opts "); 1456 1457 /* XXX more missing */ 1458 1459 if (pr->qname[0] && pr->pqname[0]) 1460 tbprintf("queue(%s, %s) ", pr->qname, pr->pqname); 1461 else if (pr->qname[0]) 1462 tbprintf("queue %s ", pr->qname); 1463 1464 if (pr->tagname[0]) 1465 tbprintf("tag %s ", pr->tagname); 1466 if (pr->match_tagname[0]) { 1467 if (pr->match_tag_not) 1468 tbprintf("! "); 1469 tbprintf("tagged %s ", pr->match_tagname); 1470 } 1471 1472 print_fld_tb(FLD_RINFO); 1473 1474 /* XXX anchor field overloaded with anchor name */ 1475 print_fld_str(FLD_ANCHOR, (char *)pr->anchor); 1476 tb_end(); 1477 1478 end_line(); 1479 } 1480 1481 void 1482 print_rules(void) 1483 { 1484 u_int32_t n, count = 0; 1485 1486 for (n = dispstart; n < num_rules; n++) { 1487 print_rule(rules + n); 1488 count ++; 1489 if (maxprint > 0 && count >= maxprint) 1490 break; 1491 } 1492 } 1493 1494 /* queue display */ 1495 1496 struct pf_altq_node * 1497 pfctl_find_altq_node(struct pf_altq_node *root, const char *qname, 1498 const char *ifname) 1499 { 1500 struct pf_altq_node *node, *child; 1501 1502 for (node = root; node != NULL; node = node->next) { 1503 if (!strcmp(node->altq.qname, qname) 1504 && !(strcmp(node->altq.ifname, ifname))) 1505 return (node); 1506 if (node->children != NULL) { 1507 child = pfctl_find_altq_node(node->children, qname, 1508 ifname); 1509 if (child != NULL) 1510 return (child); 1511 } 1512 } 1513 return (NULL); 1514 } 1515 1516 void 1517 pfctl_insert_altq_node(struct pf_altq_node **root, 1518 const struct pf_altq altq, const struct queue_stats qstats) 1519 { 1520 struct pf_altq_node *node; 1521 1522 node = calloc(1, sizeof(struct pf_altq_node)); 1523 if (node == NULL) 1524 err(1, "pfctl_insert_altq_node: calloc"); 1525 memcpy(&node->altq, &altq, sizeof(struct pf_altq)); 1526 memcpy(&node->qstats, &qstats, sizeof(qstats)); 1527 node->next = node->children = node->next_flat = NULL; 1528 node->depth = 0; 1529 node->visited = 1; 1530 1531 if (*root == NULL) 1532 *root = node; 1533 else if (!altq.parent[0]) { 1534 struct pf_altq_node *prev = *root; 1535 1536 while (prev->next != NULL) 1537 prev = prev->next; 1538 prev->next = node; 1539 } else { 1540 struct pf_altq_node *parent; 1541 1542 parent = pfctl_find_altq_node(*root, altq.parent, altq.ifname); 1543 if (parent == NULL) 1544 errx(1, "parent %s not found", altq.parent); 1545 node->depth = parent->depth+1; 1546 if (parent->children == NULL) 1547 parent->children = node; 1548 else { 1549 struct pf_altq_node *prev = parent->children; 1550 1551 while (prev->next != NULL) 1552 prev = prev->next; 1553 prev->next = node; 1554 } 1555 } 1556 } 1557 1558 void 1559 pfctl_set_next_flat(struct pf_altq_node *node, struct pf_altq_node *up) 1560 { 1561 while (node) { 1562 struct pf_altq_node *next = node->next ? node->next : up; 1563 if (node->children) { 1564 node->next_flat = node->children; 1565 pfctl_set_next_flat(node->children, next); 1566 } else 1567 node->next_flat = next; 1568 node = node->next; 1569 } 1570 } 1571 1572 int 1573 pfctl_update_qstats(struct pf_altq_node **root, int *inserts) 1574 { 1575 struct pf_altq_node *node; 1576 struct pfioc_altq pa; 1577 struct pfioc_qstats pq; 1578 u_int32_t nr; 1579 struct queue_stats qstats; 1580 u_int32_t nr_queues; 1581 int ret = 0; 1582 1583 *inserts = 0; 1584 memset(&pa, 0, sizeof(pa)); 1585 memset(&pq, 0, sizeof(pq)); 1586 memset(&qstats, 0, sizeof(qstats)); 1587 1588 if (pf_dev < 0) 1589 return (-1); 1590 1591 if (ioctl(pf_dev, DIOCGETALTQS, &pa)) { 1592 error("DIOCGETALTQS: %s", strerror(errno)); 1593 return (-1); 1594 } 1595 1596 num_queues = nr_queues = pa.nr; 1597 for (nr = 0; nr < nr_queues; ++nr) { 1598 pa.nr = nr; 1599 if (ioctl(pf_dev, DIOCGETALTQ, &pa)) { 1600 error("DIOCGETALTQ: %s", strerror(errno)); 1601 ret = -1; 1602 break; 1603 } 1604 if (pa.altq.qid > 0) { 1605 pq.nr = nr; 1606 pq.ticket = pa.ticket; 1607 pq.buf = &qstats; 1608 pq.nbytes = sizeof(qstats); 1609 if (ioctl(pf_dev, DIOCGETQSTATS, &pq)) { 1610 error("DIOCGETQSTATS: %s", strerror(errno)); 1611 ret = -1; 1612 break; 1613 } 1614 qstats.valid = 1; 1615 gettimeofday(&qstats.timestamp, NULL); 1616 if ((node = pfctl_find_altq_node(*root, pa.altq.qname, 1617 pa.altq.ifname)) != NULL) { 1618 /* update altq data too as bandwidth may have changed */ 1619 memcpy(&node->altq, &pa.altq, sizeof(struct pf_altq)); 1620 memcpy(&node->qstats_last, &node->qstats, 1621 sizeof(struct queue_stats)); 1622 memcpy(&node->qstats, &qstats, 1623 sizeof(qstats)); 1624 node->visited = 1; 1625 } else { 1626 pfctl_insert_altq_node(root, pa.altq, qstats); 1627 *inserts = 1; 1628 } 1629 } 1630 else 1631 --num_queues; 1632 } 1633 1634 pfctl_set_next_flat(*root, NULL); 1635 1636 return (ret); 1637 } 1638 1639 void 1640 pfctl_free_altq_node(struct pf_altq_node *node) 1641 { 1642 while (node != NULL) { 1643 struct pf_altq_node *prev; 1644 1645 if (node->children != NULL) 1646 pfctl_free_altq_node(node->children); 1647 prev = node; 1648 node = node->next; 1649 free(prev); 1650 } 1651 } 1652 1653 void 1654 pfctl_mark_all_unvisited(struct pf_altq_node *root) 1655 { 1656 if (root != NULL) { 1657 struct pf_altq_node *node = root; 1658 while (node != NULL) { 1659 node->visited = 0; 1660 node = node->next_flat; 1661 } 1662 } 1663 } 1664 1665 int 1666 pfctl_have_unvisited(struct pf_altq_node *root) 1667 { 1668 if (root == NULL) 1669 return(0); 1670 else { 1671 struct pf_altq_node *node = root; 1672 while (node != NULL) { 1673 if (node->visited == 0) 1674 return(1); 1675 node = node->next_flat; 1676 } 1677 return(0); 1678 } 1679 } 1680 1681 struct pf_altq_node *altq_root = NULL; 1682 1683 int 1684 select_queues(void) 1685 { 1686 num_disp = num_queues; 1687 return (0); 1688 } 1689 1690 int 1691 read_queues(void) 1692 { 1693 static int first_read = 1; 1694 int inserts; 1695 num_disp = num_queues = 0; 1696 1697 pfctl_mark_all_unvisited(altq_root); 1698 if (pfctl_update_qstats(&altq_root, &inserts)) 1699 return (-1); 1700 1701 /* Allow inserts only on first read; 1702 * on subsequent reads clear and reload 1703 */ 1704 if (first_read == 0 && 1705 (inserts != 0 || pfctl_have_unvisited(altq_root) != 0)) { 1706 pfctl_free_altq_node(altq_root); 1707 altq_root = NULL; 1708 first_read = 1; 1709 if (pfctl_update_qstats(&altq_root, &inserts)) 1710 return (-1); 1711 } 1712 1713 first_read = 0; 1714 num_disp = num_queues; 1715 1716 return(0); 1717 } 1718 1719 double 1720 calc_interval(struct timeval *cur_time, struct timeval *last_time) 1721 { 1722 double sec; 1723 1724 sec = (double)(cur_time->tv_sec - last_time->tv_sec) + 1725 (double)(cur_time->tv_usec - last_time->tv_usec) / 1000000; 1726 1727 return (sec); 1728 } 1729 1730 double 1731 calc_rate(u_int64_t new_bytes, u_int64_t last_bytes, double interval) 1732 { 1733 double rate; 1734 1735 rate = (double)(new_bytes - last_bytes) / interval; 1736 return (rate); 1737 } 1738 1739 double 1740 calc_pps(u_int64_t new_pkts, u_int64_t last_pkts, double interval) 1741 { 1742 double pps; 1743 1744 pps = (double)(new_pkts - last_pkts) / interval; 1745 return (pps); 1746 } 1747 1748 #define DEFAULT_PRIORITY 1 1749 1750 void 1751 print_queue(struct pf_altq_node *node) 1752 { 1753 u_int8_t d; 1754 double interval, pps, bps; 1755 pps = bps = 0; 1756 1757 tb_start(); 1758 for (d = 0; d < node->depth; d++) 1759 tbprintf(" "); 1760 tbprintf(node->altq.qname); 1761 print_fld_tb(FLD_QUEUE); 1762 1763 if (node->altq.scheduler == ALTQT_CBQ || 1764 node->altq.scheduler == ALTQT_HFSC 1765 ) 1766 print_fld_bw(FLD_BANDW, (double)node->altq.bandwidth); 1767 1768 if (node->altq.priority != DEFAULT_PRIORITY) 1769 print_fld_uint(FLD_PRIO, 1770 node->altq.priority); 1771 1772 if (node->qstats.valid && node->qstats_last.valid) 1773 interval = calc_interval(&node->qstats.timestamp, 1774 &node->qstats_last.timestamp); 1775 else 1776 interval = 0; 1777 1778 switch (node->altq.scheduler) { 1779 case ALTQT_CBQ: 1780 print_fld_str(FLD_SCHED, "cbq"); 1781 print_fld_size(FLD_PKTS, 1782 node->qstats.data.cbq_stats.xmit_cnt.packets); 1783 print_fld_size(FLD_BYTES, 1784 node->qstats.data.cbq_stats.xmit_cnt.bytes); 1785 print_fld_size(FLD_DROPP, 1786 node->qstats.data.cbq_stats.drop_cnt.packets); 1787 print_fld_size(FLD_DROPB, 1788 node->qstats.data.cbq_stats.drop_cnt.bytes); 1789 print_fld_size(FLD_QLEN, node->qstats.data.cbq_stats.qcnt); 1790 print_fld_size(FLD_BORR, node->qstats.data.cbq_stats.borrows); 1791 print_fld_size(FLD_SUSP, node->qstats.data.cbq_stats.delays); 1792 if (interval > 0) { 1793 pps = calc_pps(node->qstats.data.cbq_stats.xmit_cnt.packets, 1794 node->qstats_last.data.cbq_stats.xmit_cnt.packets, interval); 1795 bps = calc_rate(node->qstats.data.cbq_stats.xmit_cnt.bytes, 1796 node->qstats_last.data.cbq_stats.xmit_cnt.bytes, interval); 1797 } 1798 break; 1799 case ALTQT_PRIQ: 1800 print_fld_str(FLD_SCHED, "priq"); 1801 print_fld_size(FLD_PKTS, 1802 node->qstats.data.priq_stats.xmitcnt.packets); 1803 print_fld_size(FLD_BYTES, 1804 node->qstats.data.priq_stats.xmitcnt.bytes); 1805 print_fld_size(FLD_DROPP, 1806 node->qstats.data.priq_stats.dropcnt.packets); 1807 print_fld_size(FLD_DROPB, 1808 node->qstats.data.priq_stats.dropcnt.bytes); 1809 print_fld_size(FLD_QLEN, node->qstats.data.priq_stats.qlength); 1810 if (interval > 0) { 1811 pps = calc_pps(node->qstats.data.priq_stats.xmitcnt.packets, 1812 node->qstats_last.data.priq_stats.xmitcnt.packets, interval); 1813 bps = calc_rate(node->qstats.data.priq_stats.xmitcnt.bytes, 1814 node->qstats_last.data.priq_stats.xmitcnt.bytes, interval); 1815 } 1816 break; 1817 case ALTQT_HFSC: 1818 print_fld_str(FLD_SCHED, "hfsc"); 1819 print_fld_size(FLD_PKTS, 1820 node->qstats.data.hfsc_stats.xmit_cnt.packets); 1821 print_fld_size(FLD_BYTES, 1822 node->qstats.data.hfsc_stats.xmit_cnt.bytes); 1823 print_fld_size(FLD_DROPP, 1824 node->qstats.data.hfsc_stats.drop_cnt.packets); 1825 print_fld_size(FLD_DROPB, 1826 node->qstats.data.hfsc_stats.drop_cnt.bytes); 1827 print_fld_size(FLD_QLEN, node->qstats.data.hfsc_stats.qlength); 1828 if (interval > 0) { 1829 pps = calc_pps(node->qstats.data.hfsc_stats.xmit_cnt.packets, 1830 node->qstats_last.data.hfsc_stats.xmit_cnt.packets, interval); 1831 bps = calc_rate(node->qstats.data.hfsc_stats.xmit_cnt.bytes, 1832 node->qstats_last.data.hfsc_stats.xmit_cnt.bytes, interval); 1833 } 1834 break; 1835 } 1836 1837 /* if (node->altq.scheduler != ALTQT_HFSC && interval > 0) { */ 1838 if (node->altq.scheduler && interval > 0) { 1839 tb_start(); 1840 if (pps > 0 && pps < 1) 1841 tbprintf("%-3.1lf", pps); 1842 else 1843 tbprintf("%u", (unsigned int) pps); 1844 1845 print_fld_tb(FLD_PKTSPS); 1846 print_fld_bw(FLD_BYTESPS, bps); 1847 } 1848 } 1849 1850 void 1851 print_queues(void) 1852 { 1853 u_int32_t n, count = 0; 1854 struct pf_altq_node *node = altq_root; 1855 1856 for (n = 0; n < dispstart; n++) 1857 node = node->next_flat; 1858 1859 for (; n < num_disp; n++) { 1860 print_queue(node); 1861 node = node->next_flat; 1862 end_line(); 1863 count ++; 1864 if (maxprint > 0 && count >= maxprint) 1865 break; 1866 } 1867 } 1868 1869 /* main program functions */ 1870 1871 void 1872 update_cache(void) 1873 { 1874 static int pstate = -1; 1875 if (pstate == cachestates) 1876 return; 1877 1878 pstate = cachestates; 1879 if (cachestates) { 1880 show_field(FLD_SI); 1881 show_field(FLD_SP); 1882 gotsig_alarm = 1; 1883 } else { 1884 hide_field(FLD_SI); 1885 hide_field(FLD_SP); 1886 need_update = 1; 1887 } 1888 field_setup(); 1889 } 1890 1891 int 1892 initpftop(void) 1893 { 1894 struct pf_status status; 1895 field_view *v; 1896 int cachesize = DEFAULT_CACHE_SIZE; 1897 1898 v = views; 1899 while(v->name != NULL) 1900 add_view(v++); 1901 1902 pf_dev = open("/dev/pf", O_RDONLY); 1903 if (pf_dev == -1) { 1904 alloc_buf(0); 1905 } else if (ioctl(pf_dev, DIOCGETSTATUS, &status)) { 1906 warn("DIOCGETSTATUS"); 1907 alloc_buf(0); 1908 } else 1909 alloc_buf(status.states); 1910 1911 /* initialize cache with given size */ 1912 if (cache_init(cachesize)) 1913 warnx("Failed to initialize cache."); 1914 else if (interactive && cachesize > 0) 1915 cachestates = 1; 1916 1917 update_cache(); 1918 1919 show_field(FLD_STMAX); 1920 show_field(FLD_ANCHOR); 1921 1922 return (1); 1923 } 1924