1 /* $OpenBSD: pftop.c,v 1.19 2011/04/05 15:07:46 sthen Exp $ */ 2 /* 3 * Copyright (c) 2001, 2007 Can Erkin Acar 4 * Copyright (c) 2001 Daniel Hartmeier 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * - Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * - Redistributions in binary form must reproduce the above 14 * copyright notice, this list of conditions and the following 15 * disclaimer in the documentation and/or other materials provided 16 * with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 21 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 22 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 26 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 28 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 * 31 */ 32 33 #include <sys/types.h> 34 #include <sys/ioctl.h> 35 #include <sys/socket.h> 36 37 #include <net/if.h> 38 #include <netinet/in.h> 39 #include <netinet/tcp.h> 40 #include <netinet/tcp_fsm.h> 41 #include <net/pfvar.h> 42 #include <arpa/inet.h> 43 44 #include <altq/altq.h> 45 #include <altq/altq_cbq.h> 46 #include <altq/altq_priq.h> 47 #include <altq/altq_hfsc.h> 48 49 #include <ctype.h> 50 #include <curses.h> 51 #include <err.h> 52 #include <errno.h> 53 #include <fcntl.h> 54 #include <netdb.h> 55 #include <signal.h> 56 #include <stdio.h> 57 #include <stdlib.h> 58 #include <string.h> 59 #include <unistd.h> 60 #include <stdarg.h> 61 62 #include "systat.h" 63 #include "engine.h" 64 #include "cache.h" 65 66 extern const char *tcpstates[]; 67 68 #define MIN_NUM_STATES 1024 69 #define NUM_STATE_INC 1024 70 71 #define DEFAULT_CACHE_SIZE 10000 72 73 /* XXX must also check type before use */ 74 #define PT_ADDR(x) (&(x)->addr.v.a.addr) 75 76 /* XXX must also check type before use */ 77 #define PT_MASK(x) (&(x)->addr.v.a.mask) 78 79 #define PT_NOROUTE(x) ((x)->addr.type == PF_ADDR_NOROUTE) 80 81 /* view management */ 82 int select_states(void); 83 int read_states(void); 84 void sort_states(void); 85 void print_states(void); 86 87 int select_rules(void); 88 int read_rules(void); 89 void print_rules(void); 90 91 int select_queues(void); 92 int read_queues(void); 93 void print_queues(void); 94 95 void update_cache(void); 96 97 /* qsort callbacks */ 98 int sort_size_callback(const void *s1, const void *s2); 99 int sort_exp_callback(const void *s1, const void *s2); 100 int sort_pkt_callback(const void *s1, const void *s2); 101 int sort_age_callback(const void *s1, const void *s2); 102 int sort_sa_callback(const void *s1, const void *s2); 103 int sort_sp_callback(const void *s1, const void *s2); 104 int sort_da_callback(const void *s1, const void *s2); 105 int sort_dp_callback(const void *s1, const void *s2); 106 int sort_rate_callback(const void *s1, const void *s2); 107 int sort_peak_callback(const void *s1, const void *s2); 108 int pf_dev = -1; 109 110 struct sc_ent **state_cache = NULL; 111 struct pfsync_state *state_buf = NULL; 112 int state_buf_len = 0; 113 u_int32_t *state_ord = NULL; 114 u_int32_t num_states = 0; 115 u_int32_t num_states_all = 0; 116 u_int32_t num_rules = 0; 117 u_int32_t num_queues = 0; 118 int cachestates = 0; 119 120 char *filter_string = NULL; 121 int dumpfilter = 0; 122 123 #define MIN_LABEL_SIZE 5 124 #define ANCHOR_FLD_SIZE 12 125 126 /* Define fields */ 127 field_def fields[] = { 128 {"SRC", 20, 45, 1, FLD_ALIGN_LEFT, -1, 0, 0, 0}, 129 {"DEST", 20, 45, 1, FLD_ALIGN_LEFT, -1, 0, 0, 0}, 130 {"GW", 20, 45, 1, FLD_ALIGN_LEFT, -1, 0, 0, 0}, 131 {"STATE", 5, 23, 18, FLD_ALIGN_COLUMN, -1, 0, 0, 0}, 132 {"AGE", 5, 9, 4, FLD_ALIGN_RIGHT, -1, 0, 0, 0}, 133 {"EXP", 5, 9, 4, FLD_ALIGN_RIGHT, -1, 0, 0, 0}, 134 {"PR ", 4, 9, 1, FLD_ALIGN_LEFT, -1, 0, 0, 0}, 135 {"DIR", 1, 3, 2, FLD_ALIGN_CENTER, -1, 0, 0, 0}, 136 {"PKTS", 5, 8, 1, FLD_ALIGN_RIGHT, -1, 0, 0, 0}, 137 {"BYTES", 5, 8, 1, FLD_ALIGN_RIGHT, -1, 0, 0, 0}, 138 {"RULE", 2, 4, 1, FLD_ALIGN_RIGHT, -1, 0, 0, 0}, 139 {"LABEL", MIN_LABEL_SIZE, MIN_LABEL_SIZE, 1, FLD_ALIGN_LEFT, -1, 0, 0, 0}, 140 {"STATES", 5, 8, 1, FLD_ALIGN_RIGHT, -1, 0, 0, 0}, 141 {"EVAL", 5, 8, 1, FLD_ALIGN_RIGHT, -1, 0, 0, 0}, 142 {"ACTION", 1, 8, 4, FLD_ALIGN_LEFT, -1, 0, 0, 0}, 143 {"LOG", 1, 3, 2, FLD_ALIGN_LEFT, -1, 0, 0, 0}, 144 {"QUICK", 1, 1, 1, FLD_ALIGN_LEFT, -1, 0, 0, 0}, 145 {"KS", 1, 1, 1, FLD_ALIGN_LEFT, -1, 0, 0, 0}, 146 {"IF", 4, 6, 1, FLD_ALIGN_LEFT, -1, 0, 0, 0}, 147 {"INFO", 40, 80, 1, FLD_ALIGN_LEFT, -1, 0, 0, 0}, 148 {"MAX", 3, 5, 2, FLD_ALIGN_RIGHT, -1, 0, 0}, 149 {"RATE", 5, 8, 1, FLD_ALIGN_RIGHT, -1, 0, 0, 0}, 150 {"AVG", 5, 8, 1, FLD_ALIGN_RIGHT, -1, 0, 0, 0}, 151 {"PEAK", 5, 8, 1, FLD_ALIGN_RIGHT, -1, 0, 0, 0}, 152 {"ANCHOR", 6, 16, 1, FLD_ALIGN_LEFT, -1, 0, 0}, 153 {"QUEUE", 15, 30, 1, FLD_ALIGN_LEFT, -1, 0, 0, 0}, 154 {"BW", 4, 5, 1, FLD_ALIGN_RIGHT, -1, 0, 0, 0}, 155 {"SCH", 3, 4, 1, FLD_ALIGN_LEFT, -1, 0, 0, 0}, 156 {"PRIO", 1, 4, 1, FLD_ALIGN_RIGHT, -1, 0, 0, 0}, 157 {"DROP_P", 6, 8, 1, FLD_ALIGN_RIGHT, -1, 0, 0, 0}, 158 {"DROP_B", 6, 8, 1, FLD_ALIGN_RIGHT, -1, 0, 0, 0}, 159 {"QLEN", 4, 4, 1, FLD_ALIGN_RIGHT, -1, 0, 0, 0}, 160 {"BORROW", 4, 6, 1, FLD_ALIGN_RIGHT, -1, 0, 0, 0}, 161 {"SUSPENDS", 4, 6, 1, FLD_ALIGN_RIGHT, -1, 0, 0, 0}, 162 {"P/S", 3, 7, 1, FLD_ALIGN_RIGHT, -1, 0, 0, 0}, 163 {"B/S", 4, 7, 1, FLD_ALIGN_RIGHT, -1, 0, 0, 0} 164 }; 165 166 167 /* for states */ 168 #define FLD_SRC FIELD_ADDR(fields,0) 169 #define FLD_DEST FIELD_ADDR(fields,1) 170 #define FLD_GW FIELD_ADDR(fields,2) 171 #define FLD_STATE FIELD_ADDR(fields,3) 172 #define FLD_AGE FIELD_ADDR(fields,4) 173 #define FLD_EXP FIELD_ADDR(fields,5) 174 /* common */ 175 #define FLD_PROTO FIELD_ADDR(fields,6) 176 #define FLD_DIR FIELD_ADDR(fields,7) 177 #define FLD_PKTS FIELD_ADDR(fields,8) 178 #define FLD_BYTES FIELD_ADDR(fields,9) 179 #define FLD_RULE FIELD_ADDR(fields,10) 180 /* for rules */ 181 #define FLD_LABEL FIELD_ADDR(fields,11) 182 #define FLD_STATS FIELD_ADDR(fields,12) 183 #define FLD_EVAL FIELD_ADDR(fields,13) 184 #define FLD_ACTION FIELD_ADDR(fields,14) 185 #define FLD_LOG FIELD_ADDR(fields,15) 186 #define FLD_QUICK FIELD_ADDR(fields,16) 187 #define FLD_KST FIELD_ADDR(fields,17) 188 #define FLD_IF FIELD_ADDR(fields,18) 189 #define FLD_RINFO FIELD_ADDR(fields,19) 190 #define FLD_STMAX FIELD_ADDR(fields,20) 191 /* other */ 192 #define FLD_SI FIELD_ADDR(fields,21) /* instantaneous speed */ 193 #define FLD_SA FIELD_ADDR(fields,22) /* average speed */ 194 #define FLD_SP FIELD_ADDR(fields,23) /* peak speed */ 195 #define FLD_ANCHOR FIELD_ADDR(fields,24) 196 /* for queues */ 197 #define FLD_QUEUE FIELD_ADDR(fields,25) 198 #define FLD_BANDW FIELD_ADDR(fields,26) 199 #define FLD_SCHED FIELD_ADDR(fields,27) 200 #define FLD_PRIO FIELD_ADDR(fields,28) 201 #define FLD_DROPP FIELD_ADDR(fields,29) 202 #define FLD_DROPB FIELD_ADDR(fields,30) 203 #define FLD_QLEN FIELD_ADDR(fields,31) 204 #define FLD_BORR FIELD_ADDR(fields,32) 205 #define FLD_SUSP FIELD_ADDR(fields,33) 206 #define FLD_PKTSPS FIELD_ADDR(fields,34) 207 #define FLD_BYTESPS FIELD_ADDR(fields,35) 208 209 /* Define views */ 210 field_def *view0[] = { 211 FLD_PROTO, FLD_DIR, FLD_SRC, FLD_DEST, FLD_STATE, 212 FLD_AGE, FLD_EXP, FLD_PKTS, FLD_BYTES, NULL 213 }; 214 215 field_def *view1[] = { 216 FLD_PROTO, FLD_DIR, FLD_SRC, FLD_DEST, FLD_GW, FLD_STATE, FLD_AGE, 217 FLD_EXP, FLD_PKTS, FLD_BYTES, FLD_SI, FLD_SP, FLD_SA, FLD_RULE, NULL 218 }; 219 220 field_def *view2[] = { 221 FLD_PROTO, FLD_DIR, FLD_SRC, FLD_DEST, FLD_STATE, FLD_AGE, FLD_EXP, 222 FLD_PKTS, FLD_BYTES, FLD_SI, FLD_SP, FLD_SA, FLD_RULE, FLD_GW, NULL 223 }; 224 225 field_def *view3[] = { 226 FLD_PROTO, FLD_DIR, FLD_SRC, FLD_DEST, FLD_AGE, FLD_EXP, FLD_PKTS, 227 FLD_BYTES, FLD_STATE, FLD_SI, FLD_SP, FLD_SA, FLD_RULE, FLD_GW, NULL 228 }; 229 230 field_def *view4[] = { 231 FLD_PROTO, FLD_DIR, FLD_SRC, FLD_DEST, FLD_PKTS, FLD_BYTES, FLD_STATE, 232 FLD_AGE, FLD_EXP, FLD_SI, FLD_SP, FLD_SA, FLD_RULE, FLD_GW, NULL 233 }; 234 235 field_def *view5[] = { 236 FLD_RULE, FLD_ANCHOR, FLD_ACTION, FLD_DIR, FLD_LOG, FLD_QUICK, FLD_IF, 237 FLD_PROTO, FLD_KST, FLD_PKTS, FLD_BYTES, FLD_STATS, FLD_STMAX, 238 FLD_RINFO, NULL 239 }; 240 241 field_def *view6[] = { 242 FLD_RULE, FLD_LABEL, FLD_PKTS, FLD_BYTES, FLD_STATS, FLD_STMAX, 243 FLD_ACTION, FLD_DIR, FLD_LOG, FLD_QUICK, FLD_IF, FLD_PROTO, 244 FLD_ANCHOR, FLD_KST, NULL 245 }; 246 247 field_def *view7[] = { 248 FLD_PROTO, FLD_DIR, FLD_SRC, FLD_DEST, FLD_SI, FLD_SP, FLD_SA, 249 FLD_BYTES, FLD_STATE, FLD_PKTS, FLD_AGE, FLD_EXP, FLD_RULE, FLD_GW, NULL 250 }; 251 252 field_def *view8[] = { 253 FLD_QUEUE, FLD_BANDW, FLD_SCHED, FLD_PRIO, FLD_PKTS, FLD_BYTES, 254 FLD_DROPP, FLD_DROPB, FLD_QLEN, FLD_BORR, FLD_SUSP, FLD_PKTSPS, 255 FLD_BYTESPS, NULL 256 }; 257 258 /* Define orderings */ 259 order_type order_list[] = { 260 {"none", "none", 'N', NULL}, 261 {"bytes", "bytes", 'B', sort_size_callback}, 262 {"expiry", "exp", 'E', sort_exp_callback}, 263 {"packets", "pkt", 'P', sort_pkt_callback}, 264 {"age", "age", 'A', sort_age_callback}, 265 {"source addr", "src", 'F', sort_sa_callback}, 266 {"dest. addr", "dest", 'T', sort_da_callback}, 267 {"source port", "sport", 'S', sort_sp_callback}, 268 {"dest. port", "dport", 'D', sort_dp_callback}, 269 {"rate", "rate", 'R', sort_rate_callback}, 270 {"peak", "peak", 'K', sort_peak_callback}, 271 {NULL, NULL, 0, NULL} 272 }; 273 274 /* Define view managers */ 275 struct view_manager state_mgr = { 276 "States", select_states, read_states, sort_states, print_header, 277 print_states, keyboard_callback, order_list, NULL 278 }; 279 280 struct view_manager rule_mgr = { 281 "Rules", select_rules, read_rules, NULL, print_header, 282 print_rules, keyboard_callback, NULL, NULL 283 }; 284 285 struct view_manager queue_mgr = { 286 "Queues", select_queues, read_queues, NULL, print_header, 287 print_queues, keyboard_callback, NULL, NULL 288 }; 289 290 field_view views[] = { 291 {view2, "states", '8', &state_mgr}, 292 {view5, "rules", '9', &rule_mgr}, 293 {view8, "queues", 'Q', &queue_mgr}, 294 {NULL, NULL, 0, NULL} 295 }; 296 297 298 /* altq structures from pfctl */ 299 300 union class_stats { 301 class_stats_t cbq_stats; 302 struct priq_classstats priq_stats; 303 struct hfsc_classstats hfsc_stats; 304 }; 305 306 struct queue_stats { 307 union class_stats data; 308 struct timeval timestamp; 309 u_int8_t valid; 310 }; 311 312 struct pf_altq_node { 313 struct pf_altq altq; 314 struct pf_altq_node *next; 315 struct pf_altq_node *children; 316 struct pf_altq_node *next_flat; 317 struct queue_stats qstats; 318 struct queue_stats qstats_last; 319 u_int8_t depth; 320 u_int8_t visited; 321 }; 322 323 324 /* ordering functions */ 325 326 int 327 sort_size_callback(const void *s1, const void *s2) 328 { 329 u_int64_t b1 = COUNTER(state_buf[* (u_int32_t *) s1].bytes[0]) + 330 COUNTER(state_buf[* (u_int32_t *) s1].bytes[1]); 331 u_int64_t b2 = COUNTER(state_buf[* (u_int32_t *) s2].bytes[0]) + 332 COUNTER(state_buf[* (u_int32_t *) s2].bytes[1]); 333 if (b2 > b1) 334 return sortdir; 335 if (b2 < b1) 336 return -sortdir; 337 return 0; 338 } 339 340 int 341 sort_pkt_callback(const void *s1, const void *s2) 342 { 343 u_int64_t p1 = COUNTER(state_buf[* (u_int32_t *) s1].packets[0]) + 344 COUNTER(state_buf[* (u_int32_t *) s1].packets[1]); 345 u_int64_t p2 = COUNTER(state_buf[* (u_int32_t *) s2].packets[0]) + 346 COUNTER(state_buf[* (u_int32_t *) s2].packets[1]); 347 if (p2 > p1) 348 return sortdir; 349 if (p2 < p1) 350 return -sortdir; 351 return 0; 352 } 353 354 int 355 sort_age_callback(const void *s1, const void *s2) 356 { 357 if (ntohl(state_buf[* (u_int32_t *) s2].creation) > 358 ntohl(state_buf[* (u_int32_t *) s1].creation)) 359 return sortdir; 360 if (ntohl(state_buf[* (u_int32_t *) s2].creation) < 361 ntohl(state_buf[* (u_int32_t *) s1].creation)) 362 return -sortdir; 363 return 0; 364 } 365 366 int 367 sort_exp_callback(const void *s1, const void *s2) 368 { 369 if (ntohl(state_buf[* (u_int32_t *) s2].expire) > 370 ntohl(state_buf[* (u_int32_t *) s1].expire)) 371 return sortdir; 372 if (ntohl(state_buf[* (u_int32_t *) s2].expire) < 373 ntohl(state_buf[* (u_int32_t *) s1].expire)) 374 return -sortdir; 375 return 0; 376 } 377 378 int 379 sort_rate_callback(const void *s1, const void *s2) 380 { 381 struct sc_ent *e1 = state_cache[* (u_int32_t *) s1]; 382 struct sc_ent *e2 = state_cache[* (u_int32_t *) s2]; 383 384 if (e1 == NULL) 385 return sortdir; 386 if (e2 == NULL) 387 return -sortdir; 388 389 if (e2->rate > e1 -> rate) 390 return sortdir; 391 if (e2->rate < e1 -> rate) 392 return -sortdir; 393 return 0; 394 } 395 396 int 397 sort_peak_callback(const void *s1, const void *s2) 398 { 399 struct sc_ent *e1 = state_cache[* (u_int32_t *) s1]; 400 struct sc_ent *e2 = state_cache[* (u_int32_t *) s2]; 401 402 if (e2 == NULL) 403 return -sortdir; 404 if (e1 == NULL || e2 == NULL) 405 return 0; 406 407 if (e2->peak > e1 -> peak) 408 return sortdir; 409 if (e2->peak < e1 -> peak) 410 return -sortdir; 411 return 0; 412 } 413 414 int 415 compare_addr(int af, const struct pf_addr *a, const struct pf_addr *b) 416 { 417 switch (af) { 418 case AF_INET: 419 if (ntohl(a->addr32[0]) > ntohl(b->addr32[0])) 420 return 1; 421 if (a->addr32[0] != b->addr32[0]) 422 return -1; 423 break; 424 case AF_INET6: 425 if (ntohl(a->addr32[0]) > ntohl(b->addr32[0])) 426 return 1; 427 if (a->addr32[0] != b->addr32[0]) 428 return -1; 429 if (ntohl(a->addr32[1]) > ntohl(b->addr32[1])) 430 return 1; 431 if (a->addr32[1] != b->addr32[1]) 432 return -1; 433 if (ntohl(a->addr32[2]) > ntohl(b->addr32[2])) 434 return 1; 435 if (a->addr32[2] != b->addr32[2]) 436 return -1; 437 if (ntohl(a->addr32[3]) > ntohl(b->addr32[3])) 438 return 1; 439 if (a->addr32[3] != b->addr32[3]) 440 return -1; 441 break; 442 } 443 444 return 0; 445 } 446 447 static __inline int 448 sort_addr_callback(const struct pfsync_state *s1, 449 const struct pfsync_state *s2, int dir) 450 { 451 const struct pf_addr *aa, *ab; 452 u_int16_t pa, pb; 453 int af, ret, ii, io; 454 455 af = s1->af; 456 457 if (af > s2->af) 458 return sortdir; 459 if (af < s2->af) 460 return -sortdir; 461 462 ii = io = 0; 463 464 if (dir == PF_OUT) /* looking for source addr */ 465 io = 1; 466 else /* looking for dest addr */ 467 ii = 1; 468 469 if (s1->direction == PF_IN) { 470 aa = &s1->key[PF_SK_STACK].addr[ii]; 471 pa = s1->key[PF_SK_STACK].port[ii]; 472 } else { 473 aa = &s1->key[PF_SK_WIRE].addr[io]; 474 pa = s1->key[PF_SK_WIRE].port[io]; 475 } 476 477 if (s2->direction == PF_IN) { 478 ab = &s2->key[PF_SK_STACK].addr[ii]; 479 pb = s2->key[PF_SK_STACK].port[ii]; 480 } else { 481 ab = &s2->key[PF_SK_WIRE].addr[io]; 482 pb = s2->key[PF_SK_WIRE].port[io]; 483 } 484 485 ret = compare_addr(af, aa, ab); 486 if (ret) 487 return ret * sortdir; 488 489 if (ntohs(pa) > ntohs(pb)) 490 return sortdir; 491 return -sortdir; 492 } 493 494 static __inline int 495 sort_port_callback(const struct pfsync_state *s1, 496 const struct pfsync_state *s2, int dir) 497 { 498 const struct pf_addr *aa, *ab; 499 u_int16_t pa, pb; 500 int af, ret, ii, io; 501 502 af = s1->af; 503 504 505 if (af > s2->af) 506 return sortdir; 507 if (af < s2->af) 508 return -sortdir; 509 510 ii = io = 0; 511 512 if (dir == PF_OUT) /* looking for source addr */ 513 io = 1; 514 else /* looking for dest addr */ 515 ii = 1; 516 517 if (s1->direction == PF_IN) { 518 aa = &s1->key[PF_SK_STACK].addr[ii]; 519 pa = s1->key[PF_SK_STACK].port[ii]; 520 } else { 521 aa = &s1->key[PF_SK_WIRE].addr[io]; 522 pa = s1->key[PF_SK_WIRE].port[io]; 523 } 524 525 if (s2->direction == PF_IN) { 526 ab = &s2->key[PF_SK_STACK].addr[ii]; 527 pb = s2->key[PF_SK_STACK].port[ii]; 528 } else { 529 ab = &s2->key[PF_SK_WIRE].addr[io]; 530 pb = s2->key[PF_SK_WIRE].port[io]; 531 } 532 533 534 if (ntohs(pa) > ntohs(pb)) 535 return sortdir; 536 if (ntohs(pa) < ntohs(pb)) 537 return - sortdir; 538 539 ret = compare_addr(af, aa, ab); 540 if (ret) 541 return ret * sortdir; 542 return -sortdir; 543 } 544 545 int 546 sort_sa_callback(const void *p1, const void *p2) 547 { 548 struct pfsync_state *s1 = state_buf + (* (u_int32_t *) p1); 549 struct pfsync_state *s2 = state_buf + (* (u_int32_t *) p2); 550 return sort_addr_callback(s1, s2, PF_OUT); 551 } 552 553 int 554 sort_da_callback(const void *p1, const void *p2) 555 { 556 struct pfsync_state *s1 = state_buf + (* (u_int32_t *) p1); 557 struct pfsync_state *s2 = state_buf + (* (u_int32_t *) p2); 558 return sort_addr_callback(s1, s2, PF_IN); 559 } 560 561 int 562 sort_sp_callback(const void *p1, const void *p2) 563 { 564 struct pfsync_state *s1 = state_buf + (* (u_int32_t *) p1); 565 struct pfsync_state *s2 = state_buf + (* (u_int32_t *) p2); 566 return sort_port_callback(s1, s2, PF_OUT); 567 } 568 569 int 570 sort_dp_callback(const void *p1, const void *p2) 571 { 572 struct pfsync_state *s1 = state_buf + (* (u_int32_t *) p1); 573 struct pfsync_state *s2 = state_buf + (* (u_int32_t *) p2); 574 return sort_port_callback(s1, s2, PF_IN); 575 } 576 577 void 578 sort_states(void) 579 { 580 order_type *ordering; 581 582 if (curr_mgr == NULL) 583 return; 584 585 ordering = curr_mgr->order_curr; 586 587 if (ordering == NULL) 588 return; 589 if (ordering->func == NULL) 590 return; 591 if (state_buf == NULL) 592 return; 593 if (num_states <= 0) 594 return; 595 596 mergesort(state_ord, num_states, sizeof(u_int32_t), ordering->func); 597 } 598 599 /* state management functions */ 600 601 void 602 alloc_buf(int ns) 603 { 604 int len; 605 606 if (ns < MIN_NUM_STATES) 607 ns = MIN_NUM_STATES; 608 609 len = ns; 610 611 if (len >= state_buf_len) { 612 len += NUM_STATE_INC; 613 state_buf = realloc(state_buf, len * sizeof(struct pfsync_state)); 614 state_ord = realloc(state_ord, len * sizeof(u_int32_t)); 615 state_cache = realloc(state_cache, 616 len * sizeof(struct sc_ent *)); 617 if (state_buf == NULL || state_ord == NULL || 618 state_cache == NULL) 619 err(1, "realloc"); 620 state_buf_len = len; 621 } 622 } 623 624 int 625 select_states(void) 626 { 627 num_disp = num_states; 628 return (0); 629 } 630 631 int 632 read_states(void) 633 { 634 struct pfioc_states ps; 635 int n; 636 637 if (pf_dev == -1) 638 return -1; 639 640 for (;;) { 641 int sbytes = state_buf_len * sizeof(struct pfsync_state); 642 643 ps.ps_len = sbytes; 644 ps.ps_buf = (char *) state_buf; 645 646 if (ioctl(pf_dev, DIOCGETSTATES, &ps) < 0) { 647 error("DIOCGETSTATES"); 648 } 649 num_states_all = ps.ps_len / sizeof(struct pfsync_state); 650 651 if (ps.ps_len < sbytes) 652 break; 653 654 alloc_buf(num_states_all); 655 } 656 657 if (dumpfilter) { 658 int fd = open("state.dmp", O_WRONLY|O_CREAT|O_EXCL, 0); 659 if (fd > 0) { 660 write(fd, state_buf, ps.ps_len); 661 close(fd); 662 } 663 } 664 665 num_states = num_states_all; 666 for (n = 0; n<num_states_all; n++) 667 state_ord[n] = n; 668 669 if (cachestates) { 670 for (n = 0; n < num_states; n++) 671 state_cache[n] = cache_state(state_buf + n); 672 cache_endupdate(); 673 } 674 675 num_disp = num_states; 676 return 0; 677 } 678 679 int 680 unmask(struct pf_addr * m, u_int8_t af) 681 { 682 int i = 31, j = 0, b = 0, msize; 683 u_int32_t tmp; 684 685 if (af == AF_INET) 686 msize = 1; 687 else 688 msize = 4; 689 while (j < msize && m->addr32[j] == 0xffffffff) { 690 b += 32; 691 j++; 692 } 693 if (j < msize) { 694 tmp = ntohl(m->addr32[j]); 695 for (i = 31; tmp & (1 << i); --i) 696 b++; 697 } 698 return (b); 699 } 700 701 /* display functions */ 702 703 void 704 tb_print_addr(struct pf_addr * addr, struct pf_addr * mask, int af) 705 { 706 switch (af) { 707 case AF_INET: { 708 tbprintf("%s", inetname(addr->v4)); 709 break; 710 } 711 case AF_INET6: { 712 tbprintf("%s", inet6name(&addr->v6)); 713 break; 714 } 715 } 716 717 if (mask != NULL) { 718 if (!PF_AZERO(mask, af)) 719 tbprintf("/%u", unmask(mask, af)); 720 } 721 } 722 723 void 724 print_fld_host2(field_def *fld, struct pfsync_state_key *ks, 725 struct pfsync_state_key *kn, int idx, int af) 726 { 727 struct pf_addr *as = &ks->addr[idx]; 728 struct pf_addr *an = &kn->addr[idx]; 729 730 u_int16_t ps = ntohs(ks->port[idx]); 731 u_int16_t pn = ntohs(kn->port[idx]); 732 733 if (fld == NULL) 734 return; 735 736 if (fld->width < 3) { 737 print_fld_str(fld, "*"); 738 return; 739 } 740 741 tb_start(); 742 tb_print_addr(as, NULL, af); 743 744 if (af == AF_INET) 745 tbprintf(":%u", ps); 746 else 747 tbprintf("[%u]", ps); 748 749 print_fld_tb(fld); 750 751 if (PF_ANEQ(as, an, af) || ps != pn) { 752 tb_start(); 753 tb_print_addr(an, NULL, af); 754 755 if (af == AF_INET) 756 tbprintf(":%u", pn); 757 else 758 tbprintf("[%u]", pn); 759 print_fld_tb(FLD_GW); 760 } 761 762 } 763 764 void 765 print_fld_state(field_def *fld, unsigned int proto, 766 unsigned int s1, unsigned int s2) 767 { 768 int len; 769 770 if (fld == NULL) 771 return; 772 773 len = fld->width; 774 if (len < 1) 775 return; 776 777 tb_start(); 778 779 if (proto == IPPROTO_TCP) { 780 if (s1 <= TCPS_TIME_WAIT && s2 <= TCPS_TIME_WAIT) 781 tbprintf("%s:%s", tcpstates[s1], tcpstates[s2]); 782 #ifdef PF_TCPS_PROXY_SRC 783 else if (s1 == PF_TCPS_PROXY_SRC || 784 s2 == PF_TCPS_PROXY_SRC) 785 tbprintf("PROXY:SRC\n"); 786 else if (s1 == PF_TCPS_PROXY_DST || 787 s2 == PF_TCPS_PROXY_DST) 788 tbprintf("PROXY:DST\n"); 789 #endif 790 else 791 tbprintf("<BAD STATE LEVELS>"); 792 } else if (proto == IPPROTO_UDP && s1 < PFUDPS_NSTATES && 793 s2 < PFUDPS_NSTATES) { 794 const char *states[] = PFUDPS_NAMES; 795 tbprintf("%s:%s", states[s1], states[s2]); 796 } else if (proto != IPPROTO_ICMP && s1 < PFOTHERS_NSTATES && 797 s2 < PFOTHERS_NSTATES) { 798 /* XXX ICMP doesn't really have state levels */ 799 const char *states[] = PFOTHERS_NAMES; 800 tbprintf("%s:%s", states[s1], states[s2]); 801 } else { 802 tbprintf("%u:%u", s1, s2); 803 } 804 805 if (strlen(tmp_buf) > len) { 806 tb_start(); 807 tbprintf("%u:%u", s1, s2); 808 } 809 810 print_fld_tb(fld); 811 } 812 813 int 814 print_state(struct pfsync_state * s, struct sc_ent * ent) 815 { 816 struct pfsync_state_peer *src, *dst; 817 struct protoent *p; 818 u_int64_t sz; 819 820 if (s->direction == PF_OUT) { 821 src = &s->src; 822 dst = &s->dst; 823 } else { 824 src = &s->dst; 825 dst = &s->src; 826 } 827 828 p = getprotobynumber(s->proto); 829 830 if (p != NULL) 831 print_fld_str(FLD_PROTO, p->p_name); 832 else 833 print_fld_uint(FLD_PROTO, s->proto); 834 835 if (s->direction == PF_OUT) { 836 print_fld_host2(FLD_SRC, &s->key[PF_SK_WIRE], 837 &s->key[PF_SK_STACK], 1, s->af); 838 print_fld_host2(FLD_DEST, &s->key[PF_SK_WIRE], 839 &s->key[PF_SK_STACK], 0, s->af); 840 } else { 841 print_fld_host2(FLD_SRC, &s->key[PF_SK_STACK], 842 &s->key[PF_SK_WIRE], 0, s->af); 843 print_fld_host2(FLD_DEST, &s->key[PF_SK_STACK], 844 &s->key[PF_SK_WIRE], 1, s->af); 845 } 846 847 if (s->direction == PF_OUT) 848 print_fld_str(FLD_DIR, "Out"); 849 else 850 print_fld_str(FLD_DIR, "In"); 851 852 print_fld_state(FLD_STATE, s->proto, src->state, dst->state); 853 print_fld_age(FLD_AGE, ntohl(s->creation)); 854 print_fld_age(FLD_EXP, ntohl(s->expire)); 855 856 sz = COUNTER(s->bytes[0]) + COUNTER(s->bytes[1]); 857 858 print_fld_size(FLD_PKTS, COUNTER(s->packets[0]) + 859 COUNTER(s->packets[1])); 860 print_fld_size(FLD_BYTES, sz); 861 print_fld_rate(FLD_SA, (s->creation) ? 862 ((double)sz/ntohl((double)s->creation)) : -1); 863 864 print_fld_uint(FLD_RULE, ntohl(s->rule)); 865 if (cachestates && ent != NULL) { 866 print_fld_rate(FLD_SI, ent->rate); 867 print_fld_rate(FLD_SP, ent->peak); 868 } 869 870 end_line(); 871 return 1; 872 } 873 874 void 875 print_states(void) 876 { 877 int n, count = 0; 878 879 for (n = dispstart; n < num_disp; n++) { 880 count += print_state(state_buf + state_ord[n], 881 state_cache[state_ord[n]]); 882 if (maxprint > 0 && count >= maxprint) 883 break; 884 } 885 } 886 887 /* rule display */ 888 889 struct pf_rule *rules = NULL; 890 u_int32_t alloc_rules = 0; 891 892 int 893 select_rules(void) 894 { 895 num_disp = num_rules; 896 return (0); 897 } 898 899 900 void 901 add_rule_alloc(u_int32_t nr) 902 { 903 if (nr == 0) 904 return; 905 906 num_rules += nr; 907 908 if (rules == NULL) { 909 rules = malloc(num_rules * sizeof(struct pf_rule)); 910 if (rules == NULL) 911 err(1, "malloc"); 912 alloc_rules = num_rules; 913 } else if (num_rules > alloc_rules) { 914 rules = realloc(rules, num_rules * sizeof(struct pf_rule)); 915 if (rules == NULL) 916 err(1, "realloc"); 917 alloc_rules = num_rules; 918 } 919 } 920 921 int label_length; 922 923 int 924 read_anchor_rules(char *anchor) 925 { 926 struct pfioc_rule pr; 927 u_int32_t nr, num, off; 928 int len; 929 930 if (pf_dev < 0) 931 return (-1); 932 933 memset(&pr, 0, sizeof(pr)); 934 strlcpy(pr.anchor, anchor, sizeof(pr.anchor)); 935 936 if (ioctl(pf_dev, DIOCGETRULES, &pr)) { 937 error("anchor %s: %s", anchor, strerror(errno)); 938 return (-1); 939 } 940 941 off = num_rules; 942 num = pr.nr; 943 add_rule_alloc(num); 944 945 for (nr = 0; nr < num; ++nr) { 946 pr.nr = nr; 947 if (ioctl(pf_dev, DIOCGETRULE, &pr)) { 948 error("DIOCGETRULE: %s", strerror(errno)); 949 return (-1); 950 } 951 /* XXX overload pr.anchor, to store a pointer to 952 * anchor name */ 953 pr.rule.anchor = (struct pf_anchor *) anchor; 954 len = strlen(pr.rule.label); 955 if (len > label_length) 956 label_length = len; 957 rules[off + nr] = pr.rule; 958 } 959 960 return (num); 961 } 962 963 struct anchor_name { 964 char name[MAXPATHLEN]; 965 struct anchor_name *next; 966 u_int32_t ref; 967 }; 968 969 struct anchor_name *anchor_root = NULL; 970 struct anchor_name *anchor_end = NULL; 971 struct anchor_name *anchor_free = NULL; 972 973 struct anchor_name* 974 alloc_anchor_name(const char *path) 975 { 976 struct anchor_name *a; 977 978 a = anchor_free; 979 if (a == NULL) { 980 a = (struct anchor_name *)malloc(sizeof(struct anchor_name)); 981 if (a == NULL) 982 return (NULL); 983 } else 984 anchor_free = a->next; 985 986 if (anchor_root == NULL) 987 anchor_end = a; 988 989 a->next = anchor_root; 990 anchor_root = a; 991 992 a->ref = 0; 993 strlcpy(a->name, path, sizeof(a->name)); 994 return (a); 995 } 996 997 void 998 reset_anchor_names(void) 999 { 1000 if (anchor_end == NULL) 1001 return; 1002 1003 anchor_end->next = anchor_free; 1004 anchor_free = anchor_root; 1005 anchor_root = anchor_end = NULL; 1006 } 1007 1008 struct pfioc_ruleset ruleset; 1009 char *rs_end = NULL; 1010 1011 int 1012 read_rulesets(const char *path) 1013 { 1014 char *pre; 1015 struct anchor_name *a; 1016 u_int32_t nr, ns; 1017 int len; 1018 1019 if (path == NULL) 1020 ruleset.path[0] = '\0'; 1021 else if (strlcpy(ruleset.path, path, sizeof(ruleset.path)) >= 1022 sizeof(ruleset.path)) 1023 return (-1); 1024 1025 /* a persistent storage for anchor names */ 1026 a = alloc_anchor_name(ruleset.path); 1027 if (a == NULL) 1028 return (-1); 1029 1030 len = read_anchor_rules(a->name); 1031 if (len < 0) 1032 return (-1); 1033 1034 a->ref += len; 1035 1036 if (ioctl(pf_dev, DIOCGETRULESETS, &ruleset)) { 1037 error("DIOCGETRULESETS: %s", strerror(errno)); 1038 return (-1); 1039 } 1040 1041 ns = ruleset.nr; 1042 1043 if (rs_end == NULL) 1044 rs_end = ruleset.path + sizeof(ruleset.path); 1045 1046 /* 'pre' tracks the previous level on the anchor */ 1047 pre = strchr(ruleset.path, 0); 1048 len = rs_end - pre; 1049 if (len < 1) 1050 return (-1); 1051 --len; 1052 1053 for (nr = 0; nr < ns; ++nr) { 1054 ruleset.nr = nr; 1055 if (ioctl(pf_dev, DIOCGETRULESET, &ruleset)) { 1056 error("DIOCGETRULESET: %s", strerror(errno)); 1057 return (-1); 1058 } 1059 *pre = '/'; 1060 if (strlcpy(pre + 1, ruleset.name, len) < len) 1061 read_rulesets(ruleset.path); 1062 *pre = '\0'; 1063 } 1064 1065 return (0); 1066 } 1067 1068 void 1069 compute_anchor_field(void) 1070 { 1071 struct anchor_name *a; 1072 int sum, cnt, mx, nx; 1073 sum = cnt = mx = 0; 1074 1075 for (a = anchor_root; a != NULL; a = a->next, cnt++) { 1076 int len; 1077 if (a->ref == 0) 1078 continue; 1079 len = strlen(a->name); 1080 sum += len; 1081 if (len > mx) 1082 mx = len; 1083 } 1084 1085 nx = sum/cnt; 1086 if (nx < ANCHOR_FLD_SIZE) 1087 nx = (mx < ANCHOR_FLD_SIZE) ? mx : ANCHOR_FLD_SIZE; 1088 1089 if (FLD_ANCHOR->max_width != mx || 1090 FLD_ANCHOR->norm_width != nx) { 1091 FLD_ANCHOR->max_width = mx; 1092 FLD_ANCHOR->norm_width = nx; 1093 field_setup(); 1094 need_update = 1; 1095 } 1096 } 1097 1098 int 1099 read_rules(void) 1100 { 1101 int ret, nw, mw; 1102 num_rules = 0; 1103 1104 if (pf_dev == -1) 1105 return (-1); 1106 1107 label_length = MIN_LABEL_SIZE; 1108 1109 reset_anchor_names(); 1110 ret = read_rulesets(NULL); 1111 compute_anchor_field(); 1112 1113 nw = mw = label_length; 1114 if (nw > 16) 1115 nw = 16; 1116 1117 if (FLD_LABEL->norm_width != nw || 1118 FLD_LABEL->max_width != mw) { 1119 FLD_LABEL->norm_width = nw; 1120 FLD_LABEL->max_width = mw; 1121 field_setup(); 1122 need_update = 1; 1123 } 1124 1125 num_disp = num_rules; 1126 return (ret); 1127 } 1128 1129 void 1130 tb_print_addrw(struct pf_addr_wrap *addr, struct pf_addr *mask, u_int8_t af) 1131 { 1132 switch (addr->type) { 1133 case PF_ADDR_ADDRMASK: 1134 tb_print_addr(&addr->v.a.addr, mask, af); 1135 break; 1136 case PF_ADDR_NOROUTE: 1137 tbprintf("noroute"); 1138 break; 1139 case PF_ADDR_DYNIFTL: 1140 tbprintf("(%s)", addr->v.ifname); 1141 break; 1142 case PF_ADDR_TABLE: 1143 tbprintf("<%s>", addr->v.tblname); 1144 break; 1145 default: 1146 tbprintf("UNKNOWN"); 1147 break; 1148 } 1149 } 1150 1151 void 1152 tb_print_op(u_int8_t op, const char *a1, const char *a2) 1153 { 1154 if (op == PF_OP_IRG) 1155 tbprintf("%s >< %s ", a1, a2); 1156 else if (op == PF_OP_XRG) 1157 tbprintf("%s <> %s ", a1, a2); 1158 else if (op == PF_OP_RRG) 1159 tbprintf("%s:%s ", a1, a2); 1160 else if (op == PF_OP_EQ) 1161 tbprintf("= %s ", a1); 1162 else if (op == PF_OP_NE) 1163 tbprintf("!= %s ", a1); 1164 else if (op == PF_OP_LT) 1165 tbprintf("< %s ", a1); 1166 else if (op == PF_OP_LE) 1167 tbprintf("<= %s ", a1); 1168 else if (op == PF_OP_GT) 1169 tbprintf("> %s ", a1); 1170 else if (op == PF_OP_GE) 1171 tbprintf(">= %s ", a1); 1172 } 1173 1174 void 1175 tb_print_port(u_int8_t op, u_int16_t p1, u_int16_t p2, char *proto) 1176 { 1177 char a1[6], a2[6]; 1178 struct servent *s = getservbyport(p1, proto); 1179 1180 p1 = ntohs(p1); 1181 p2 = ntohs(p2); 1182 snprintf(a1, sizeof(a1), "%u", p1); 1183 snprintf(a2, sizeof(a2), "%u", p2); 1184 tbprintf("port "); 1185 if (s != NULL && (op == PF_OP_EQ || op == PF_OP_NE)) 1186 tb_print_op(op, s->s_name, a2); 1187 else 1188 tb_print_op(op, a1, a2); 1189 } 1190 1191 void 1192 tb_print_fromto(struct pf_rule_addr *src, struct pf_rule_addr *dst, 1193 u_int8_t af, u_int8_t proto) 1194 { 1195 if ( 1196 PF_AZERO(PT_ADDR(src), AF_INET6) && 1197 PF_AZERO(PT_ADDR(dst), AF_INET6) && 1198 ! PT_NOROUTE(src) && ! PT_NOROUTE(dst) && 1199 PF_AZERO(PT_MASK(src), AF_INET6) && 1200 PF_AZERO(PT_MASK(dst), AF_INET6) && 1201 !src->port_op && !dst->port_op) 1202 tbprintf("all "); 1203 else { 1204 tbprintf("from "); 1205 if (PT_NOROUTE(src)) 1206 tbprintf("no-route "); 1207 else if (PF_AZERO(PT_ADDR(src), AF_INET6) && 1208 PF_AZERO(PT_MASK(src), AF_INET6)) 1209 tbprintf("any "); 1210 else { 1211 if (src->neg) 1212 tbprintf("! "); 1213 tb_print_addrw(&src->addr, PT_MASK(src), af); 1214 tbprintf(" "); 1215 } 1216 if (src->port_op) 1217 tb_print_port(src->port_op, src->port[0], 1218 src->port[1], 1219 proto == IPPROTO_TCP ? "tcp" : "udp"); 1220 1221 tbprintf("to "); 1222 if (PT_NOROUTE(dst)) 1223 tbprintf("no-route "); 1224 else if (PF_AZERO(PT_ADDR(dst), AF_INET6) && 1225 PF_AZERO(PT_MASK(dst), AF_INET6)) 1226 tbprintf("any "); 1227 else { 1228 if (dst->neg) 1229 tbprintf("! "); 1230 tb_print_addrw(&dst->addr, PT_MASK(dst), af); 1231 tbprintf(" "); 1232 } 1233 if (dst->port_op) 1234 tb_print_port(dst->port_op, dst->port[0], 1235 dst->port[1], 1236 proto == IPPROTO_TCP ? "tcp" : "udp"); 1237 } 1238 } 1239 1240 void 1241 tb_print_ugid(u_int8_t op, unsigned u1, unsigned u2, 1242 const char *t, unsigned umax) 1243 { 1244 char a1[11], a2[11]; 1245 1246 snprintf(a1, sizeof(a1), "%u", u1); 1247 snprintf(a2, sizeof(a2), "%u", u2); 1248 1249 tbprintf("%s ", t); 1250 if (u1 == umax && (op == PF_OP_EQ || op == PF_OP_NE)) 1251 tb_print_op(op, "unknown", a2); 1252 else 1253 tb_print_op(op, a1, a2); 1254 } 1255 1256 void 1257 tb_print_flags(u_int8_t f) 1258 { 1259 const char *tcpflags = "FSRPAUEW"; 1260 int i; 1261 1262 for (i = 0; tcpflags[i]; ++i) 1263 if (f & (1 << i)) 1264 tbprintf("%c", tcpflags[i]); 1265 } 1266 1267 void 1268 print_rule(struct pf_rule *pr) 1269 { 1270 static const char *actiontypes[] = { "Pass", "Block", "Scrub", 1271 "no Scrub", "Nat", "no Nat", "Binat", "no Binat", "Rdr", 1272 "no Rdr", "SynProxy Block", "Defer", "Match" }; 1273 int numact = sizeof(actiontypes) / sizeof(char *); 1274 1275 static const char *routetypes[] = { "", "fastroute", "route-to", 1276 "dup-to", "reply-to" }; 1277 1278 int numroute = sizeof(routetypes) / sizeof(char *); 1279 1280 if (pr == NULL) return; 1281 1282 print_fld_str(FLD_LABEL, pr->label); 1283 print_fld_size(FLD_STATS, pr->states_tot); 1284 1285 print_fld_size(FLD_PKTS, pr->packets[0] + pr->packets[1]); 1286 print_fld_size(FLD_BYTES, pr->bytes[0] + pr->bytes[1]); 1287 1288 print_fld_uint(FLD_RULE, pr->nr); 1289 if (pr->direction == PF_OUT) 1290 print_fld_str(FLD_DIR, "Out"); 1291 else if (pr->direction == PF_IN) 1292 print_fld_str(FLD_DIR, "In"); 1293 else 1294 print_fld_str(FLD_DIR, "Any"); 1295 1296 if (pr->quick) 1297 print_fld_str(FLD_QUICK, "Quick"); 1298 1299 if (pr->keep_state == PF_STATE_NORMAL) 1300 print_fld_str(FLD_KST, "Keep"); 1301 else if (pr->keep_state == PF_STATE_MODULATE) 1302 print_fld_str(FLD_KST, "Mod"); 1303 #ifdef PF_STATE_SYNPROXY 1304 else if (pr->keep_state == PF_STATE_MODULATE) 1305 print_fld_str(FLD_KST, "Syn"); 1306 #endif 1307 if (pr->log == 1) 1308 print_fld_str(FLD_LOG, "Log"); 1309 else if (pr->log == 2) 1310 print_fld_str(FLD_LOG, "All"); 1311 1312 if (pr->action >= numact) 1313 print_fld_uint(FLD_ACTION, pr->action); 1314 else print_fld_str(FLD_ACTION, actiontypes[pr->action]); 1315 1316 if (pr->proto) { 1317 struct protoent *p = getprotobynumber(pr->proto); 1318 1319 if (p != NULL) 1320 print_fld_str(FLD_PROTO, p->p_name); 1321 else 1322 print_fld_uint(FLD_PROTO, pr->proto); 1323 } 1324 1325 if (pr->ifname[0]) { 1326 tb_start(); 1327 if (pr->ifnot) 1328 tbprintf("!"); 1329 tbprintf("%s", pr->ifname); 1330 print_fld_tb(FLD_IF); 1331 } 1332 if (pr->max_states) 1333 print_fld_uint(FLD_STMAX, pr->max_states); 1334 1335 /* print info field */ 1336 1337 tb_start(); 1338 1339 if (pr->action == PF_DROP) { 1340 if (pr->rule_flag & PFRULE_RETURNRST) 1341 tbprintf("return-rst "); 1342 #ifdef PFRULE_RETURN 1343 else if (pr->rule_flag & PFRULE_RETURN) 1344 tbprintf("return "); 1345 #endif 1346 #ifdef PFRULE_RETURNICMP 1347 else if (pr->rule_flag & PFRULE_RETURNICMP) 1348 tbprintf("return-icmp "); 1349 #endif 1350 else 1351 tbprintf("drop "); 1352 } 1353 1354 if (pr->rt > 0 && pr->rt < numroute) { 1355 tbprintf("%s ", routetypes[pr->rt]); 1356 } 1357 1358 if (pr->af) { 1359 if (pr->af == AF_INET) 1360 tbprintf("inet "); 1361 else 1362 tbprintf("inet6 "); 1363 } 1364 1365 tb_print_fromto(&pr->src, &pr->dst, pr->af, pr->proto); 1366 1367 if (pr->uid.op) 1368 tb_print_ugid(pr->uid.op, pr->uid.uid[0], pr->uid.uid[1], 1369 "user", UID_MAX); 1370 if (pr->gid.op) 1371 tb_print_ugid(pr->gid.op, pr->gid.gid[0], pr->gid.gid[1], 1372 "group", GID_MAX); 1373 1374 if (pr->action == PF_PASS && 1375 (pr->proto == 0 || pr->proto == IPPROTO_TCP) && 1376 (pr->flags != TH_SYN || pr->flagset != (TH_SYN | TH_ACK) )) { 1377 tbprintf("flags "); 1378 if (pr->flags || pr->flagset) { 1379 tb_print_flags(pr->flags); 1380 tbprintf("/"); 1381 tb_print_flags(pr->flagset); 1382 } else 1383 tbprintf("any "); 1384 } 1385 1386 tbprintf(" "); 1387 1388 if (pr->tos) 1389 tbprintf("tos 0x%2.2x ", pr->tos); 1390 #ifdef PFRULE_FRAGMENT 1391 if (pr->rule_flag & PFRULE_FRAGMENT) 1392 tbprintf("fragment "); 1393 #endif 1394 #ifdef PFRULE_NODF 1395 if (pr->rule_flag & PFRULE_NODF) 1396 tbprintf("no-df "); 1397 #endif 1398 #ifdef PFRULE_RANDOMID 1399 if (pr->rule_flag & PFRULE_RANDOMID) 1400 tbprintf("random-id "); 1401 #endif 1402 if (pr->min_ttl) 1403 tbprintf("min-ttl %d ", pr->min_ttl); 1404 if (pr->max_mss) 1405 tbprintf("max-mss %d ", pr->max_mss); 1406 if (pr->allow_opts) 1407 tbprintf("allow-opts "); 1408 1409 /* XXX more missing */ 1410 1411 if (pr->qname[0] && pr->pqname[0]) 1412 tbprintf("queue(%s, %s) ", pr->qname, pr->pqname); 1413 else if (pr->qname[0]) 1414 tbprintf("queue %s ", pr->qname); 1415 1416 if (pr->tagname[0]) 1417 tbprintf("tag %s ", pr->tagname); 1418 if (pr->match_tagname[0]) { 1419 if (pr->match_tag_not) 1420 tbprintf("! "); 1421 tbprintf("tagged %s ", pr->match_tagname); 1422 } 1423 1424 print_fld_tb(FLD_RINFO); 1425 1426 /* XXX anchor field overloaded with anchor name */ 1427 print_fld_str(FLD_ANCHOR, (char *)pr->anchor); 1428 tb_end(); 1429 1430 end_line(); 1431 } 1432 1433 void 1434 print_rules(void) 1435 { 1436 u_int32_t n, count = 0; 1437 1438 for (n = dispstart; n < num_rules; n++) { 1439 print_rule(rules + n); 1440 count ++; 1441 if (maxprint > 0 && count >= maxprint) 1442 break; 1443 } 1444 } 1445 1446 /* queue display */ 1447 1448 struct pf_altq_node * 1449 pfctl_find_altq_node(struct pf_altq_node *root, const char *qname, 1450 const char *ifname) 1451 { 1452 struct pf_altq_node *node, *child; 1453 1454 for (node = root; node != NULL; node = node->next) { 1455 if (!strcmp(node->altq.qname, qname) 1456 && !(strcmp(node->altq.ifname, ifname))) 1457 return (node); 1458 if (node->children != NULL) { 1459 child = pfctl_find_altq_node(node->children, qname, 1460 ifname); 1461 if (child != NULL) 1462 return (child); 1463 } 1464 } 1465 return (NULL); 1466 } 1467 1468 void 1469 pfctl_insert_altq_node(struct pf_altq_node **root, 1470 const struct pf_altq altq, const struct queue_stats qstats) 1471 { 1472 struct pf_altq_node *node; 1473 1474 node = calloc(1, sizeof(struct pf_altq_node)); 1475 if (node == NULL) 1476 err(1, "pfctl_insert_altq_node: calloc"); 1477 memcpy(&node->altq, &altq, sizeof(struct pf_altq)); 1478 memcpy(&node->qstats, &qstats, sizeof(qstats)); 1479 node->next = node->children = node->next_flat = NULL; 1480 node->depth = 0; 1481 node->visited = 1; 1482 1483 if (*root == NULL) 1484 *root = node; 1485 else if (!altq.parent[0]) { 1486 struct pf_altq_node *prev = *root; 1487 1488 while (prev->next != NULL) 1489 prev = prev->next; 1490 prev->next = node; 1491 } else { 1492 struct pf_altq_node *parent; 1493 1494 parent = pfctl_find_altq_node(*root, altq.parent, altq.ifname); 1495 if (parent == NULL) 1496 errx(1, "parent %s not found", altq.parent); 1497 node->depth = parent->depth+1; 1498 if (parent->children == NULL) 1499 parent->children = node; 1500 else { 1501 struct pf_altq_node *prev = parent->children; 1502 1503 while (prev->next != NULL) 1504 prev = prev->next; 1505 prev->next = node; 1506 } 1507 } 1508 } 1509 1510 void 1511 pfctl_set_next_flat(struct pf_altq_node *node, struct pf_altq_node *up) 1512 { 1513 while (node) { 1514 struct pf_altq_node *next = node->next ? node->next : up; 1515 if (node->children) { 1516 node->next_flat = node->children; 1517 pfctl_set_next_flat(node->children, next); 1518 } else 1519 node->next_flat = next; 1520 node = node->next; 1521 } 1522 } 1523 1524 int 1525 pfctl_update_qstats(struct pf_altq_node **root, int *inserts) 1526 { 1527 struct pf_altq_node *node; 1528 struct pfioc_altq pa; 1529 struct pfioc_qstats pq; 1530 u_int32_t nr; 1531 struct queue_stats qstats; 1532 u_int32_t nr_queues; 1533 int ret = 0; 1534 1535 *inserts = 0; 1536 memset(&pa, 0, sizeof(pa)); 1537 memset(&pq, 0, sizeof(pq)); 1538 memset(&qstats, 0, sizeof(qstats)); 1539 1540 if (pf_dev < 0) 1541 return (-1); 1542 1543 if (ioctl(pf_dev, DIOCGETALTQS, &pa)) { 1544 error("DIOCGETALTQS: %s", strerror(errno)); 1545 return (-1); 1546 } 1547 1548 num_queues = nr_queues = pa.nr; 1549 for (nr = 0; nr < nr_queues; ++nr) { 1550 pa.nr = nr; 1551 if (ioctl(pf_dev, DIOCGETALTQ, &pa)) { 1552 error("DIOCGETALTQ: %s", strerror(errno)); 1553 ret = -1; 1554 break; 1555 } 1556 if (pa.altq.qid > 0) { 1557 pq.nr = nr; 1558 pq.ticket = pa.ticket; 1559 pq.buf = &qstats; 1560 pq.nbytes = sizeof(qstats); 1561 if (ioctl(pf_dev, DIOCGETQSTATS, &pq)) { 1562 error("DIOCGETQSTATS: %s", strerror(errno)); 1563 ret = -1; 1564 break; 1565 } 1566 qstats.valid = 1; 1567 gettimeofday(&qstats.timestamp, NULL); 1568 if ((node = pfctl_find_altq_node(*root, pa.altq.qname, 1569 pa.altq.ifname)) != NULL) { 1570 /* update altq data too as bandwidth may have changed */ 1571 memcpy(&node->altq, &pa.altq, sizeof(struct pf_altq)); 1572 memcpy(&node->qstats_last, &node->qstats, 1573 sizeof(struct queue_stats)); 1574 memcpy(&node->qstats, &qstats, 1575 sizeof(qstats)); 1576 node->visited = 1; 1577 } else { 1578 pfctl_insert_altq_node(root, pa.altq, qstats); 1579 *inserts = 1; 1580 } 1581 } 1582 else 1583 --num_queues; 1584 } 1585 1586 pfctl_set_next_flat(*root, NULL); 1587 1588 return (ret); 1589 } 1590 1591 void 1592 pfctl_free_altq_node(struct pf_altq_node *node) 1593 { 1594 while (node != NULL) { 1595 struct pf_altq_node *prev; 1596 1597 if (node->children != NULL) 1598 pfctl_free_altq_node(node->children); 1599 prev = node; 1600 node = node->next; 1601 free(prev); 1602 } 1603 } 1604 1605 void 1606 pfctl_mark_all_unvisited(struct pf_altq_node *root) 1607 { 1608 if (root != NULL) { 1609 struct pf_altq_node *node = root; 1610 while (node != NULL) { 1611 node->visited = 0; 1612 node = node->next_flat; 1613 } 1614 } 1615 } 1616 1617 int 1618 pfctl_have_unvisited(struct pf_altq_node *root) 1619 { 1620 if (root == NULL) 1621 return(0); 1622 else { 1623 struct pf_altq_node *node = root; 1624 while (node != NULL) { 1625 if (node->visited == 0) 1626 return(1); 1627 node = node->next_flat; 1628 } 1629 return(0); 1630 } 1631 } 1632 1633 struct pf_altq_node *altq_root = NULL; 1634 1635 int 1636 select_queues(void) 1637 { 1638 num_disp = num_queues; 1639 return (0); 1640 } 1641 1642 int 1643 read_queues(void) 1644 { 1645 static int first_read = 1; 1646 int inserts; 1647 num_disp = num_queues = 0; 1648 1649 pfctl_mark_all_unvisited(altq_root); 1650 if (pfctl_update_qstats(&altq_root, &inserts)) 1651 return (-1); 1652 1653 /* Allow inserts only on first read; 1654 * on subsequent reads clear and reload 1655 */ 1656 if (first_read == 0 && 1657 (inserts != 0 || pfctl_have_unvisited(altq_root) != 0)) { 1658 pfctl_free_altq_node(altq_root); 1659 altq_root = NULL; 1660 first_read = 1; 1661 if (pfctl_update_qstats(&altq_root, &inserts)) 1662 return (-1); 1663 } 1664 1665 first_read = 0; 1666 num_disp = num_queues; 1667 1668 return(0); 1669 } 1670 1671 double 1672 calc_interval(struct timeval *cur_time, struct timeval *last_time) 1673 { 1674 double sec; 1675 1676 sec = (double)(cur_time->tv_sec - last_time->tv_sec) + 1677 (double)(cur_time->tv_usec - last_time->tv_usec) / 1000000; 1678 1679 return (sec); 1680 } 1681 1682 double 1683 calc_rate(u_int64_t new_bytes, u_int64_t last_bytes, double interval) 1684 { 1685 double rate; 1686 1687 rate = (double)(new_bytes - last_bytes) / interval; 1688 return (rate); 1689 } 1690 1691 double 1692 calc_pps(u_int64_t new_pkts, u_int64_t last_pkts, double interval) 1693 { 1694 double pps; 1695 1696 pps = (double)(new_pkts - last_pkts) / interval; 1697 return (pps); 1698 } 1699 1700 #define DEFAULT_PRIORITY 1 1701 1702 void 1703 print_queue(struct pf_altq_node *node) 1704 { 1705 u_int8_t d; 1706 double interval, pps, bps; 1707 pps = bps = 0; 1708 1709 tb_start(); 1710 for (d = 0; d < node->depth; d++) 1711 tbprintf(" "); 1712 tbprintf(node->altq.qname); 1713 print_fld_tb(FLD_QUEUE); 1714 1715 if (node->altq.scheduler == ALTQT_CBQ || 1716 node->altq.scheduler == ALTQT_HFSC 1717 ) 1718 print_fld_bw(FLD_BANDW, (double)node->altq.bandwidth); 1719 1720 if (node->altq.priority != DEFAULT_PRIORITY) 1721 print_fld_uint(FLD_PRIO, 1722 node->altq.priority); 1723 1724 if (node->qstats.valid && node->qstats_last.valid) 1725 interval = calc_interval(&node->qstats.timestamp, 1726 &node->qstats_last.timestamp); 1727 else 1728 interval = 0; 1729 1730 switch (node->altq.scheduler) { 1731 case ALTQT_CBQ: 1732 print_fld_str(FLD_SCHED, "cbq"); 1733 print_fld_size(FLD_PKTS, 1734 node->qstats.data.cbq_stats.xmit_cnt.packets); 1735 print_fld_size(FLD_BYTES, 1736 node->qstats.data.cbq_stats.xmit_cnt.bytes); 1737 print_fld_size(FLD_DROPP, 1738 node->qstats.data.cbq_stats.drop_cnt.packets); 1739 print_fld_size(FLD_DROPB, 1740 node->qstats.data.cbq_stats.drop_cnt.bytes); 1741 print_fld_size(FLD_QLEN, node->qstats.data.cbq_stats.qcnt); 1742 print_fld_size(FLD_BORR, node->qstats.data.cbq_stats.borrows); 1743 print_fld_size(FLD_SUSP, node->qstats.data.cbq_stats.delays); 1744 if (interval > 0) { 1745 pps = calc_pps(node->qstats.data.cbq_stats.xmit_cnt.packets, 1746 node->qstats_last.data.cbq_stats.xmit_cnt.packets, interval); 1747 bps = calc_rate(node->qstats.data.cbq_stats.xmit_cnt.bytes, 1748 node->qstats_last.data.cbq_stats.xmit_cnt.bytes, interval); 1749 } 1750 break; 1751 case ALTQT_PRIQ: 1752 print_fld_str(FLD_SCHED, "priq"); 1753 print_fld_size(FLD_PKTS, 1754 node->qstats.data.priq_stats.xmitcnt.packets); 1755 print_fld_size(FLD_BYTES, 1756 node->qstats.data.priq_stats.xmitcnt.bytes); 1757 print_fld_size(FLD_DROPP, 1758 node->qstats.data.priq_stats.dropcnt.packets); 1759 print_fld_size(FLD_DROPB, 1760 node->qstats.data.priq_stats.dropcnt.bytes); 1761 print_fld_size(FLD_QLEN, node->qstats.data.priq_stats.qlength); 1762 if (interval > 0) { 1763 pps = calc_pps(node->qstats.data.priq_stats.xmitcnt.packets, 1764 node->qstats_last.data.priq_stats.xmitcnt.packets, interval); 1765 bps = calc_rate(node->qstats.data.priq_stats.xmitcnt.bytes, 1766 node->qstats_last.data.priq_stats.xmitcnt.bytes, interval); 1767 } 1768 break; 1769 case ALTQT_HFSC: 1770 print_fld_str(FLD_SCHED, "hfsc"); 1771 print_fld_size(FLD_PKTS, 1772 node->qstats.data.hfsc_stats.xmit_cnt.packets); 1773 print_fld_size(FLD_BYTES, 1774 node->qstats.data.hfsc_stats.xmit_cnt.bytes); 1775 print_fld_size(FLD_DROPP, 1776 node->qstats.data.hfsc_stats.drop_cnt.packets); 1777 print_fld_size(FLD_DROPB, 1778 node->qstats.data.hfsc_stats.drop_cnt.bytes); 1779 print_fld_size(FLD_QLEN, node->qstats.data.hfsc_stats.qlength); 1780 if (interval > 0) { 1781 pps = calc_pps(node->qstats.data.hfsc_stats.xmit_cnt.packets, 1782 node->qstats_last.data.hfsc_stats.xmit_cnt.packets, interval); 1783 bps = calc_rate(node->qstats.data.hfsc_stats.xmit_cnt.bytes, 1784 node->qstats_last.data.hfsc_stats.xmit_cnt.bytes, interval); 1785 } 1786 break; 1787 } 1788 1789 /* if (node->altq.scheduler != ALTQT_HFSC && interval > 0) { */ 1790 if (node->altq.scheduler && interval > 0) { 1791 tb_start(); 1792 if (pps > 0 && pps < 1) 1793 tbprintf("%-3.1lf", pps); 1794 else 1795 tbprintf("%u", (unsigned int) pps); 1796 1797 print_fld_tb(FLD_PKTSPS); 1798 print_fld_bw(FLD_BYTESPS, bps); 1799 } 1800 } 1801 1802 void 1803 print_queues(void) 1804 { 1805 u_int32_t n, count = 0; 1806 struct pf_altq_node *node = altq_root; 1807 1808 for (n = 0; n < dispstart; n++) 1809 node = node->next_flat; 1810 1811 for (; n < num_disp; n++) { 1812 print_queue(node); 1813 node = node->next_flat; 1814 end_line(); 1815 count ++; 1816 if (maxprint > 0 && count >= maxprint) 1817 break; 1818 } 1819 } 1820 1821 /* main program functions */ 1822 1823 void 1824 update_cache(void) 1825 { 1826 static int pstate = -1; 1827 if (pstate == cachestates) 1828 return; 1829 1830 pstate = cachestates; 1831 if (cachestates) { 1832 show_field(FLD_SI); 1833 show_field(FLD_SP); 1834 gotsig_alarm = 1; 1835 } else { 1836 hide_field(FLD_SI); 1837 hide_field(FLD_SP); 1838 need_update = 1; 1839 } 1840 field_setup(); 1841 } 1842 1843 int 1844 initpftop(void) 1845 { 1846 struct pf_status status; 1847 field_view *v; 1848 int cachesize = DEFAULT_CACHE_SIZE; 1849 1850 v = views; 1851 while(v->name != NULL) 1852 add_view(v++); 1853 1854 pf_dev = open("/dev/pf", O_RDONLY); 1855 if (pf_dev == -1) { 1856 alloc_buf(0); 1857 } else if (ioctl(pf_dev, DIOCGETSTATUS, &status)) { 1858 warn("DIOCGETSTATUS"); 1859 alloc_buf(0); 1860 } else 1861 alloc_buf(status.states); 1862 1863 /* initialize cache with given size */ 1864 if (cache_init(cachesize)) 1865 warnx("Failed to initialize cache."); 1866 else if (interactive && cachesize > 0) 1867 cachestates = 1; 1868 1869 update_cache(); 1870 1871 show_field(FLD_STMAX); 1872 show_field(FLD_ANCHOR); 1873 1874 return (1); 1875 } 1876