1 /* $NetBSD: optimize.c,v 1.3 1995/04/29 05:42:28 cgd Exp $ */ 2 3 /* 4 * Copyright (c) 1988, 1989, 1990, 1991, 1993, 1994 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that: (1) source code distributions 9 * retain the above copyright notice and this paragraph in its entirety, (2) 10 * distributions including binary code include the above copyright notice and 11 * this paragraph in its entirety in the documentation or other materials 12 * provided with the distribution, and (3) all advertising materials mentioning 13 * features or use of this software display the following acknowledgement: 14 * ``This product includes software developed by the University of California, 15 * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of 16 * the University nor the names of its contributors may be used to endorse 17 * or promote products derived from this software without specific prior 18 * written permission. 19 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED 20 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF 21 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. 22 * 23 * Optimization module for tcpdump intermediate representation. 24 */ 25 #ifndef lint 26 static char rcsid[] = 27 "@(#) Header: optimize.c,v 1.45 94/06/20 19:07:55 leres Exp (LBL)"; 28 #endif 29 30 #include <sys/types.h> 31 #include <sys/time.h> 32 33 #include <net/bpf.h> 34 35 #include <stdio.h> 36 #ifdef __osf__ 37 #include <stdlib.h> 38 #include <malloc.h> 39 #endif 40 #ifdef __NetBSD__ 41 #include <stdlib.h> 42 #endif 43 #include <memory.h> 44 45 #include "gencode.h" 46 47 #ifndef __GNUC__ 48 #define inline 49 #endif 50 51 #define A_ATOM BPF_MEMWORDS 52 #define X_ATOM (BPF_MEMWORDS+1) 53 54 #define NOP -1 55 56 /* 57 * This define is used to represent *both* the accumulator and 58 * x register in use-def computations. 59 * Currently, the use-def code assumes only one definition per instruction. 60 */ 61 #define AX_ATOM N_ATOMS 62 63 /* 64 * A flag to indicate that further optimization is needed. 65 * Iterative passes are continued until a given pass yields no 66 * branch movement. 67 */ 68 static int done; 69 70 /* 71 * A block is marked if only if its mark equals the current mark. 72 * Rather than traverse the code array, marking each item, 'cur_mark' is 73 * incremented. This automatically makes each element unmarked. 74 */ 75 static int cur_mark; 76 #define isMarked(p) ((p)->mark == cur_mark) 77 #define unMarkAll() cur_mark += 1 78 #define Mark(p) ((p)->mark = cur_mark) 79 80 static void opt_init(struct block *); 81 static void opt_cleanup(void); 82 83 static void make_marks(struct block *); 84 static void mark_code(struct block *); 85 86 static void intern_blocks(struct block *); 87 88 static int eq_slist(struct slist *, struct slist *); 89 90 static void find_levels_r(struct block *); 91 92 static void find_levels(struct block *); 93 static void find_dom(struct block *); 94 static void propedom(struct edge *); 95 static void find_edom(struct block *); 96 static void find_closure(struct block *); 97 static int atomuse(struct stmt *); 98 static int atomdef(struct stmt *); 99 static void compute_local_ud(struct block *); 100 static void find_ud(struct block *); 101 static void init_val(void); 102 static long F(int, long, long); 103 static inline void vstore(struct stmt *, long *, long, int); 104 static void opt_blk(struct block *, int); 105 static int use_conflict(struct block *, struct block *); 106 static void opt_j(struct edge *); 107 static void or_pullup(struct block *); 108 static void and_pullup(struct block *); 109 static void opt_blks(struct block *, int); 110 static inline void link_inedge(struct edge *, struct block *); 111 static void find_inedges(struct block *); 112 static void opt_root(struct block **); 113 static void opt_loop(struct block *, int); 114 static void fold_op(struct stmt *, long, long); 115 static inline struct slist *this_op(struct slist *); 116 static void opt_not(struct block *); 117 static void opt_peep(struct block *); 118 static void opt_stmt(struct stmt *, long[], int); 119 static void deadstmt(struct stmt *, struct stmt *[]); 120 static void opt_deadstores(struct block *); 121 static void opt_blk(struct block *, int); 122 static int use_conflict(struct block *, struct block *); 123 static void opt_j(struct edge *); 124 static struct block *fold_edge(struct block *, struct edge *); 125 static inline int eq_blk(struct block *, struct block *); 126 static int slength(struct slist *); 127 static int count_blocks(struct block *); 128 static void number_blks_r(struct block *); 129 static int count_stmts(struct block *); 130 static void convert_code_r(struct block *); 131 132 static int n_blocks; 133 struct block **blocks; 134 static int n_edges; 135 struct edge **edges; 136 137 /* 138 * A bit vector set representation of the dominators. 139 * We round up the set size to the next power of two. 140 */ 141 static int nodewords; 142 static int edgewords; 143 struct block **levels; 144 u_long *space; 145 #define BITS_PER_WORD (8*sizeof(u_long)) 146 /* 147 * True if a is in uset {p} 148 */ 149 #define SET_MEMBER(p, a) \ 150 ((p)[(unsigned)(a) / BITS_PER_WORD] & (1 << ((unsigned)(a) % BITS_PER_WORD))) 151 152 /* 153 * Add 'a' to uset p. 154 */ 155 #define SET_INSERT(p, a) \ 156 (p)[(unsigned)(a) / BITS_PER_WORD] |= (1 << ((unsigned)(a) % BITS_PER_WORD)) 157 158 /* 159 * Delete 'a' from uset p. 160 */ 161 #define SET_DELETE(p, a) \ 162 (p)[(unsigned)(a) / BITS_PER_WORD] &= ~(1 << ((unsigned)(a) % BITS_PER_WORD)) 163 164 /* 165 * a := a intersect b 166 */ 167 #define SET_INTERSECT(a, b, n)\ 168 {\ 169 register u_long *_x = a, *_y = b;\ 170 register int _n = n;\ 171 while (--_n >= 0) *_x++ &= *_y++;\ 172 } 173 174 /* 175 * a := a - b 176 */ 177 #define SET_SUBTRACT(a, b, n)\ 178 {\ 179 register u_long *_x = a, *_y = b;\ 180 register int _n = n;\ 181 while (--_n >= 0) *_x++ &=~ *_y++;\ 182 } 183 184 /* 185 * a := a union b 186 */ 187 #define SET_UNION(a, b, n)\ 188 {\ 189 register u_long *_x = a, *_y = b;\ 190 register int _n = n;\ 191 while (--_n >= 0) *_x++ |= *_y++;\ 192 } 193 194 static uset all_dom_sets; 195 static uset all_closure_sets; 196 static uset all_edge_sets; 197 198 #ifndef MAX 199 #define MAX(a,b) ((a)>(b)?(a):(b)) 200 #endif 201 202 static void 203 find_levels_r(b) 204 struct block *b; 205 { 206 int level; 207 208 if (isMarked(b)) 209 return; 210 211 Mark(b); 212 b->link = 0; 213 214 if (JT(b)) { 215 find_levels_r(JT(b)); 216 find_levels_r(JF(b)); 217 level = MAX(JT(b)->level, JF(b)->level) + 1; 218 } else 219 level = 0; 220 b->level = level; 221 b->link = levels[level]; 222 levels[level] = b; 223 } 224 225 /* 226 * Level graph. The levels go from 0 at the leaves to 227 * N_LEVELS at the root. The levels[] array points to the 228 * first node of the level list, whose elements are linked 229 * with the 'link' field of the struct block. 230 */ 231 static void 232 find_levels(root) 233 struct block *root; 234 { 235 memset((char *)levels, 0, n_blocks * sizeof(*levels)); 236 unMarkAll(); 237 find_levels_r(root); 238 } 239 240 /* 241 * Find dominator relationships. 242 * Assumes graph has been leveled. 243 */ 244 static void 245 find_dom(root) 246 struct block *root; 247 { 248 int i; 249 struct block *b; 250 u_long *x; 251 252 /* 253 * Initialize sets to contain all nodes. 254 */ 255 x = all_dom_sets; 256 i = n_blocks * nodewords; 257 while (--i >= 0) 258 *x++ = ~0; 259 /* Root starts off empty. */ 260 for (i = nodewords; --i >= 0;) 261 root->dom[i] = 0; 262 263 /* root->level is the highest level no found. */ 264 for (i = root->level; i >= 0; --i) { 265 for (b = levels[i]; b; b = b->link) { 266 SET_INSERT(b->dom, b->id); 267 if (JT(b) == 0) 268 continue; 269 SET_INTERSECT(JT(b)->dom, b->dom, nodewords); 270 SET_INTERSECT(JF(b)->dom, b->dom, nodewords); 271 } 272 } 273 } 274 275 static void 276 propedom(ep) 277 struct edge *ep; 278 { 279 SET_INSERT(ep->edom, ep->id); 280 if (ep->succ) { 281 SET_INTERSECT(ep->succ->et.edom, ep->edom, edgewords); 282 SET_INTERSECT(ep->succ->ef.edom, ep->edom, edgewords); 283 } 284 } 285 286 /* 287 * Compute edge dominators. 288 * Assumes graph has been leveled and predecessors established. 289 */ 290 static void 291 find_edom(root) 292 struct block *root; 293 { 294 int i; 295 uset x; 296 struct block *b; 297 298 x = all_edge_sets; 299 for (i = n_edges * edgewords; --i >= 0; ) 300 x[i] = ~0; 301 302 /* root->level is the highest level no found. */ 303 memset(root->et.edom, 0, edgewords * sizeof(*(uset)0)); 304 memset(root->ef.edom, 0, edgewords * sizeof(*(uset)0)); 305 for (i = root->level; i >= 0; --i) { 306 for (b = levels[i]; b != 0; b = b->link) { 307 propedom(&b->et); 308 propedom(&b->ef); 309 } 310 } 311 } 312 313 /* 314 * Find the backwards transitive closure of the flow graph. These sets 315 * are backwards in the sense that we find the set of nodes that reach 316 * a given node, not the set of nodes that can be reached by a node. 317 * 318 * Assumes graph has been leveled. 319 */ 320 static void 321 find_closure(root) 322 struct block *root; 323 { 324 int i; 325 struct block *b; 326 327 /* 328 * Initialize sets to contain no nodes. 329 */ 330 memset((char *)all_closure_sets, 0, 331 n_blocks * nodewords * sizeof(*all_closure_sets)); 332 333 /* root->level is the highest level no found. */ 334 for (i = root->level; i >= 0; --i) { 335 for (b = levels[i]; b; b = b->link) { 336 SET_INSERT(b->closure, b->id); 337 if (JT(b) == 0) 338 continue; 339 SET_UNION(JT(b)->closure, b->closure, nodewords); 340 SET_UNION(JF(b)->closure, b->closure, nodewords); 341 } 342 } 343 } 344 345 /* 346 * Return the register number that is used by s. If A and X are both 347 * used, return AX_ATOM. If no register is used, return -1. 348 * 349 * The implementation should probably change to an array access. 350 */ 351 static int 352 atomuse(s) 353 struct stmt *s; 354 { 355 register int c = s->code; 356 357 if (c == NOP) 358 return -1; 359 360 switch (BPF_CLASS(c)) { 361 362 case BPF_RET: 363 return (BPF_RVAL(c) == BPF_A) ? A_ATOM : 364 (BPF_RVAL(c) == BPF_X) ? X_ATOM : -1; 365 366 case BPF_LD: 367 case BPF_LDX: 368 return (BPF_MODE(c) == BPF_IND) ? X_ATOM : 369 (BPF_MODE(c) == BPF_MEM) ? s->k : -1; 370 371 case BPF_ST: 372 return A_ATOM; 373 374 case BPF_STX: 375 return X_ATOM; 376 377 case BPF_JMP: 378 case BPF_ALU: 379 if (BPF_SRC(c) == BPF_X) 380 return AX_ATOM; 381 return A_ATOM; 382 383 case BPF_MISC: 384 return BPF_MISCOP(c) == BPF_TXA ? X_ATOM : A_ATOM; 385 } 386 abort(); 387 /* NOTREACHED */ 388 } 389 390 /* 391 * Return the register number that is defined by 's'. We assume that 392 * a single stmt cannot define more than one register. If no register 393 * is defined, return -1. 394 * 395 * The implementation should probably change to an array access. 396 */ 397 static int 398 atomdef(s) 399 struct stmt *s; 400 { 401 if (s->code == NOP) 402 return -1; 403 404 switch (BPF_CLASS(s->code)) { 405 406 case BPF_LD: 407 case BPF_ALU: 408 return A_ATOM; 409 410 case BPF_LDX: 411 return X_ATOM; 412 413 case BPF_ST: 414 case BPF_STX: 415 return s->k; 416 417 case BPF_MISC: 418 return BPF_MISCOP(s->code) == BPF_TAX ? X_ATOM : A_ATOM; 419 } 420 return -1; 421 } 422 423 static void 424 compute_local_ud(b) 425 struct block *b; 426 { 427 struct slist *s; 428 atomset def = 0, use = 0, kill = 0; 429 int atom; 430 431 for (s = b->stmts; s; s = s->next) { 432 if (s->s.code == NOP) 433 continue; 434 atom = atomuse(&s->s); 435 if (atom >= 0) { 436 if (atom == AX_ATOM) { 437 if (!ATOMELEM(def, X_ATOM)) 438 use |= ATOMMASK(X_ATOM); 439 if (!ATOMELEM(def, A_ATOM)) 440 use |= ATOMMASK(A_ATOM); 441 } 442 else if (atom < N_ATOMS) { 443 if (!ATOMELEM(def, atom)) 444 use |= ATOMMASK(atom); 445 } 446 else 447 abort(); 448 } 449 atom = atomdef(&s->s); 450 if (atom >= 0) { 451 if (!ATOMELEM(use, atom)) 452 kill |= ATOMMASK(atom); 453 def |= ATOMMASK(atom); 454 } 455 } 456 if (!ATOMELEM(def, A_ATOM) && BPF_CLASS(b->s.code) == BPF_JMP) 457 use |= ATOMMASK(A_ATOM); 458 459 b->def = def; 460 b->kill = kill; 461 b->in_use = use; 462 } 463 464 /* 465 * Assume graph is already leveled. 466 */ 467 static void 468 find_ud(root) 469 struct block *root; 470 { 471 int i, maxlevel; 472 struct block *p; 473 474 /* 475 * root->level is the highest level no found; 476 * count down from there. 477 */ 478 maxlevel = root->level; 479 for (i = maxlevel; i >= 0; --i) 480 for (p = levels[i]; p; p = p->link) { 481 compute_local_ud(p); 482 p->out_use = 0; 483 } 484 485 for (i = 1; i <= maxlevel; ++i) { 486 for (p = levels[i]; p; p = p->link) { 487 p->out_use |= JT(p)->in_use | JF(p)->in_use; 488 p->in_use |= p->out_use &~ p->kill; 489 } 490 } 491 } 492 493 /* 494 * These data structures are used in a Cocke and Shwarz style 495 * value numbering scheme. Since the flowgraph is acyclic, 496 * exit values can be propagated from a node's predecessors 497 * provided it is uniquely defined. 498 */ 499 struct valnode { 500 int code; 501 long v0, v1; 502 long val; 503 struct valnode *next; 504 }; 505 506 #define MODULUS 213 507 static struct valnode *hashtbl[MODULUS]; 508 static int curval; 509 static int maxval; 510 511 /* Integer constants mapped with the load immediate opcode. */ 512 #define K(i) F(BPF_LD|BPF_IMM|BPF_W, i, 0L) 513 514 struct vmapinfo { 515 int is_const; 516 long const_val; 517 }; 518 519 struct vmapinfo *vmap; 520 struct valnode *vnode_base; 521 struct valnode *next_vnode; 522 523 static void 524 init_val() 525 { 526 curval = 0; 527 next_vnode = vnode_base; 528 memset((char *)vmap, 0, maxval * sizeof(*vmap)); 529 memset((char *)hashtbl, 0, sizeof hashtbl); 530 } 531 532 /* Because we really don't have an IR, this stuff is a little messy. */ 533 static long 534 F(code, v0, v1) 535 int code; 536 long v0, v1; 537 { 538 u_int hash; 539 int val; 540 struct valnode *p; 541 542 hash = (u_int)code ^ (v0 << 4) ^ (v1 << 8); 543 hash %= MODULUS; 544 545 for (p = hashtbl[hash]; p; p = p->next) 546 if (p->code == code && p->v0 == v0 && p->v1 == v1) 547 return p->val; 548 549 val = ++curval; 550 if (BPF_MODE(code) == BPF_IMM && 551 (BPF_CLASS(code) == BPF_LD || BPF_CLASS(code) == BPF_LDX)) { 552 vmap[val].const_val = v0; 553 vmap[val].is_const = 1; 554 } 555 p = next_vnode++; 556 p->val = val; 557 p->code = code; 558 p->v0 = v0; 559 p->v1 = v1; 560 p->next = hashtbl[hash]; 561 hashtbl[hash] = p; 562 563 return val; 564 } 565 566 static inline void 567 vstore(s, valp, newval, alter) 568 struct stmt *s; 569 long *valp; 570 long newval; 571 int alter; 572 { 573 if (alter && *valp == newval) 574 s->code = NOP; 575 else 576 *valp = newval; 577 } 578 579 static void 580 fold_op(s, v0, v1) 581 struct stmt *s; 582 long v0, v1; 583 { 584 long a, b; 585 586 a = vmap[v0].const_val; 587 b = vmap[v1].const_val; 588 589 switch (BPF_OP(s->code)) { 590 case BPF_ADD: 591 a += b; 592 break; 593 594 case BPF_SUB: 595 a -= b; 596 break; 597 598 case BPF_MUL: 599 a *= b; 600 break; 601 602 case BPF_DIV: 603 if (b == 0) 604 bpf_error("division by zero"); 605 a /= b; 606 break; 607 608 case BPF_AND: 609 a &= b; 610 break; 611 612 case BPF_OR: 613 a |= b; 614 break; 615 616 case BPF_LSH: 617 a <<= b; 618 break; 619 620 case BPF_RSH: 621 a >>= b; 622 break; 623 624 case BPF_NEG: 625 a = -a; 626 break; 627 628 default: 629 abort(); 630 } 631 s->k = a; 632 s->code = BPF_LD|BPF_IMM; 633 done = 0; 634 } 635 636 static inline struct slist * 637 this_op(s) 638 struct slist *s; 639 { 640 while (s != 0 && s->s.code == NOP) 641 s = s->next; 642 return s; 643 } 644 645 static void 646 opt_not(b) 647 struct block *b; 648 { 649 struct block *tmp = JT(b); 650 651 JT(b) = JF(b); 652 JF(b) = tmp; 653 } 654 655 static void 656 opt_peep(b) 657 struct block *b; 658 { 659 struct slist *s; 660 struct slist *next, *last; 661 int val; 662 long v; 663 664 s = b->stmts; 665 if (s == 0) 666 return; 667 668 last = s; 669 while (1) { 670 s = this_op(s); 671 if (s == 0) 672 break; 673 next = this_op(s->next); 674 if (next == 0) 675 break; 676 last = next; 677 678 /* 679 * st M[k] --> st M[k] 680 * ldx M[k] tax 681 */ 682 if (s->s.code == BPF_ST && 683 next->s.code == (BPF_LDX|BPF_MEM) && 684 s->s.k == next->s.k) { 685 done = 0; 686 next->s.code = BPF_MISC|BPF_TAX; 687 } 688 /* 689 * ld #k --> ldx #k 690 * tax txa 691 */ 692 if (s->s.code == (BPF_LD|BPF_IMM) && 693 next->s.code == (BPF_MISC|BPF_TAX)) { 694 s->s.code = BPF_LDX|BPF_IMM; 695 next->s.code = BPF_MISC|BPF_TXA; 696 done = 0; 697 } 698 /* 699 * This is an ugly special case, but it happens 700 * when you say tcp[k] or udp[k] where k is a constant. 701 */ 702 if (s->s.code == (BPF_LD|BPF_IMM)) { 703 struct slist *add, *tax, *ild; 704 705 /* 706 * Check that X isn't used on exit from this 707 * block (which the optimizer might cause). 708 * We know the code generator won't generate 709 * any local dependencies. 710 */ 711 if (ATOMELEM(b->out_use, X_ATOM)) 712 break; 713 714 if (next->s.code != (BPF_LDX|BPF_MSH|BPF_B)) 715 add = next; 716 else 717 add = this_op(next->next); 718 if (add == 0 || add->s.code != (BPF_ALU|BPF_ADD|BPF_X)) 719 break; 720 721 tax = this_op(add->next); 722 if (tax == 0 || tax->s.code != (BPF_MISC|BPF_TAX)) 723 break; 724 725 ild = this_op(tax->next); 726 if (ild == 0 || BPF_CLASS(ild->s.code) != BPF_LD || 727 BPF_MODE(ild->s.code) != BPF_IND) 728 break; 729 /* 730 * XXX We need to check that X is not 731 * subsequently used. We know we can eliminate the 732 * accumulator modifications since it is defined 733 * by the last stmt of this sequence. 734 * 735 * We want to turn this sequence: 736 * 737 * (004) ldi #0x2 {s} 738 * (005) ldxms [14] {next} -- optional 739 * (006) addx {add} 740 * (007) tax {tax} 741 * (008) ild [x+0] {ild} 742 * 743 * into this sequence: 744 * 745 * (004) nop 746 * (005) ldxms [14] 747 * (006) nop 748 * (007) nop 749 * (008) ild [x+2] 750 * 751 */ 752 ild->s.k += s->s.k; 753 s->s.code = NOP; 754 add->s.code = NOP; 755 tax->s.code = NOP; 756 done = 0; 757 } 758 s = next; 759 } 760 /* 761 * If we have a subtract to do a comparison, and the X register 762 * is a known constant, we can merge this value into the 763 * comparison. 764 */ 765 if (last->s.code == (BPF_ALU|BPF_SUB|BPF_X) && 766 !ATOMELEM(b->out_use, A_ATOM)) { 767 val = b->val[X_ATOM]; 768 if (vmap[val].is_const) { 769 b->s.k += vmap[val].const_val; 770 last->s.code = NOP; 771 done = 0; 772 } else if (b->s.k == 0) { 773 /* 774 * sub x -> nop 775 * j #0 j x 776 */ 777 last->s.code = NOP; 778 b->s.code = BPF_CLASS(b->s.code) | BPF_OP(b->s.code) | 779 BPF_X; 780 done = 0; 781 } 782 } 783 /* 784 * Likewise, a constant subtract can be simplified. 785 */ 786 else if (last->s.code == (BPF_ALU|BPF_SUB|BPF_K) && 787 !ATOMELEM(b->out_use, A_ATOM)) { 788 b->s.k += last->s.k; 789 last->s.code = NOP; 790 done = 0; 791 } 792 /* 793 * and #k nop 794 * jeq #0 -> jset #k 795 */ 796 if (last->s.code == (BPF_ALU|BPF_AND|BPF_K) && 797 !ATOMELEM(b->out_use, A_ATOM) && b->s.k == 0) { 798 b->s.k = last->s.k; 799 b->s.code = BPF_JMP|BPF_K|BPF_JSET; 800 last->s.code = NOP; 801 done = 0; 802 opt_not(b); 803 } 804 /* 805 * If the accumulator is a known constant, we can compute the 806 * comparison result. 807 */ 808 val = b->val[A_ATOM]; 809 if (vmap[val].is_const && BPF_SRC(b->s.code) == BPF_K) { 810 v = vmap[val].const_val; 811 switch (BPF_OP(b->s.code)) { 812 813 case BPF_JEQ: 814 v = v == b->s.k; 815 break; 816 817 case BPF_JGT: 818 v = v > b->s.k; 819 break; 820 821 case BPF_JGE: 822 v = v >= b->s.k; 823 break; 824 825 case BPF_JSET: 826 v &= b->s.k; 827 break; 828 829 default: 830 abort(); 831 } 832 if (JF(b) != JT(b)) 833 done = 0; 834 if (v) 835 JF(b) = JT(b); 836 else 837 JT(b) = JF(b); 838 } 839 } 840 841 /* 842 * Compute the symbolic value of expression of 's', and update 843 * anything it defines in the value table 'val'. If 'alter' is true, 844 * do various optimizations. This code would be cleaner if symbolic 845 * evaluation and code transformations weren't folded together. 846 */ 847 static void 848 opt_stmt(s, val, alter) 849 struct stmt *s; 850 long val[]; 851 int alter; 852 { 853 int op; 854 long v; 855 856 switch (s->code) { 857 858 case BPF_LD|BPF_ABS|BPF_W: 859 case BPF_LD|BPF_ABS|BPF_H: 860 case BPF_LD|BPF_ABS|BPF_B: 861 v = F(s->code, s->k, 0L); 862 vstore(s, &val[A_ATOM], v, alter); 863 break; 864 865 case BPF_LD|BPF_IND|BPF_W: 866 case BPF_LD|BPF_IND|BPF_H: 867 case BPF_LD|BPF_IND|BPF_B: 868 v = val[X_ATOM]; 869 if (alter && vmap[v].is_const) { 870 s->code = BPF_LD|BPF_ABS|BPF_SIZE(s->code); 871 s->k += vmap[v].const_val; 872 v = F(s->code, s->k, 0L); 873 done = 0; 874 } 875 else 876 v = F(s->code, s->k, v); 877 vstore(s, &val[A_ATOM], v, alter); 878 break; 879 880 case BPF_LD|BPF_LEN: 881 v = F(s->code, 0L, 0L); 882 vstore(s, &val[A_ATOM], v, alter); 883 break; 884 885 case BPF_LD|BPF_IMM: 886 v = K(s->k); 887 vstore(s, &val[A_ATOM], v, alter); 888 break; 889 890 case BPF_LDX|BPF_IMM: 891 v = K(s->k); 892 vstore(s, &val[X_ATOM], v, alter); 893 break; 894 895 case BPF_LDX|BPF_MSH|BPF_B: 896 v = F(s->code, s->k, 0L); 897 vstore(s, &val[X_ATOM], v, alter); 898 break; 899 900 case BPF_ALU|BPF_NEG: 901 if (alter && vmap[val[A_ATOM]].is_const) { 902 s->code = BPF_LD|BPF_IMM; 903 s->k = -vmap[val[A_ATOM]].const_val; 904 val[A_ATOM] = K(s->k); 905 } 906 else 907 val[A_ATOM] = F(s->code, val[A_ATOM], 0L); 908 break; 909 910 case BPF_ALU|BPF_ADD|BPF_K: 911 case BPF_ALU|BPF_SUB|BPF_K: 912 case BPF_ALU|BPF_MUL|BPF_K: 913 case BPF_ALU|BPF_DIV|BPF_K: 914 case BPF_ALU|BPF_AND|BPF_K: 915 case BPF_ALU|BPF_OR|BPF_K: 916 case BPF_ALU|BPF_LSH|BPF_K: 917 case BPF_ALU|BPF_RSH|BPF_K: 918 op = BPF_OP(s->code); 919 if (alter) { 920 if (s->k == 0) { 921 if (op == BPF_ADD || op == BPF_SUB || 922 op == BPF_LSH || op == BPF_RSH || 923 op == BPF_OR) { 924 s->code = NOP; 925 break; 926 } 927 if (op == BPF_MUL || op == BPF_AND) { 928 s->code = BPF_LD|BPF_IMM; 929 val[A_ATOM] = K(s->k); 930 break; 931 } 932 } 933 if (vmap[val[A_ATOM]].is_const) { 934 fold_op(s, val[A_ATOM], K(s->k)); 935 val[A_ATOM] = K(s->k); 936 break; 937 } 938 } 939 val[A_ATOM] = F(s->code, val[A_ATOM], K(s->k)); 940 break; 941 942 case BPF_ALU|BPF_ADD|BPF_X: 943 case BPF_ALU|BPF_SUB|BPF_X: 944 case BPF_ALU|BPF_MUL|BPF_X: 945 case BPF_ALU|BPF_DIV|BPF_X: 946 case BPF_ALU|BPF_AND|BPF_X: 947 case BPF_ALU|BPF_OR|BPF_X: 948 case BPF_ALU|BPF_LSH|BPF_X: 949 case BPF_ALU|BPF_RSH|BPF_X: 950 op = BPF_OP(s->code); 951 if (alter && vmap[val[X_ATOM]].is_const) { 952 if (vmap[val[A_ATOM]].is_const) { 953 fold_op(s, val[A_ATOM], val[X_ATOM]); 954 val[A_ATOM] = K(s->k); 955 } 956 else { 957 s->code = BPF_ALU|BPF_K|op; 958 s->k = vmap[val[X_ATOM]].const_val; 959 done = 0; 960 val[A_ATOM] = 961 F(s->code, val[A_ATOM], K(s->k)); 962 } 963 break; 964 } 965 /* 966 * Check if we're doing something to an accumulator 967 * that is 0, and simplify. This may not seem like 968 * much of a simplification but it could open up further 969 * optimizations. 970 * XXX We could also check for mul by 1, and -1, etc. 971 */ 972 if (alter && vmap[val[A_ATOM]].is_const 973 && vmap[val[A_ATOM]].const_val == 0) { 974 if (op == BPF_ADD || op == BPF_OR || 975 op == BPF_LSH || op == BPF_RSH || op == BPF_SUB) { 976 s->code = BPF_MISC|BPF_TXA; 977 vstore(s, &val[A_ATOM], val[X_ATOM], alter); 978 break; 979 } 980 else if (op == BPF_MUL || op == BPF_DIV || 981 op == BPF_AND) { 982 s->code = BPF_LD|BPF_IMM; 983 s->k = 0; 984 vstore(s, &val[A_ATOM], K(s->k), alter); 985 break; 986 } 987 else if (op == BPF_NEG) { 988 s->code = NOP; 989 break; 990 } 991 } 992 val[A_ATOM] = F(s->code, val[A_ATOM], val[X_ATOM]); 993 break; 994 995 case BPF_MISC|BPF_TXA: 996 vstore(s, &val[A_ATOM], val[X_ATOM], alter); 997 break; 998 999 case BPF_LD|BPF_MEM: 1000 v = val[s->k]; 1001 if (alter && vmap[v].is_const) { 1002 s->code = BPF_LD|BPF_IMM; 1003 s->k = vmap[v].const_val; 1004 done = 0; 1005 } 1006 vstore(s, &val[A_ATOM], v, alter); 1007 break; 1008 1009 case BPF_MISC|BPF_TAX: 1010 vstore(s, &val[X_ATOM], val[A_ATOM], alter); 1011 break; 1012 1013 case BPF_LDX|BPF_MEM: 1014 v = val[s->k]; 1015 if (alter && vmap[v].is_const) { 1016 s->code = BPF_LDX|BPF_IMM; 1017 s->k = vmap[v].const_val; 1018 done = 0; 1019 } 1020 vstore(s, &val[X_ATOM], v, alter); 1021 break; 1022 1023 case BPF_ST: 1024 vstore(s, &val[s->k], val[A_ATOM], alter); 1025 break; 1026 1027 case BPF_STX: 1028 vstore(s, &val[s->k], val[X_ATOM], alter); 1029 break; 1030 } 1031 } 1032 1033 static void 1034 deadstmt(s, last) 1035 register struct stmt *s; 1036 register struct stmt *last[]; 1037 { 1038 register int atom; 1039 1040 atom = atomuse(s); 1041 if (atom >= 0) { 1042 if (atom == AX_ATOM) { 1043 last[X_ATOM] = 0; 1044 last[A_ATOM] = 0; 1045 } 1046 else 1047 last[atom] = 0; 1048 } 1049 atom = atomdef(s); 1050 if (atom >= 0) { 1051 if (last[atom]) { 1052 done = 0; 1053 last[atom]->code = NOP; 1054 } 1055 last[atom] = s; 1056 } 1057 } 1058 1059 static void 1060 opt_deadstores(b) 1061 register struct block *b; 1062 { 1063 register struct slist *s; 1064 register int atom; 1065 struct stmt *last[N_ATOMS]; 1066 1067 memset((char *)last, 0, sizeof last); 1068 1069 for (s = b->stmts; s != 0; s = s->next) 1070 deadstmt(&s->s, last); 1071 deadstmt(&b->s, last); 1072 1073 for (atom = 0; atom < N_ATOMS; ++atom) 1074 if (last[atom] && !ATOMELEM(b->out_use, atom)) { 1075 last[atom]->code = NOP; 1076 done = 0; 1077 } 1078 } 1079 1080 static void 1081 opt_blk(b, do_stmts) 1082 struct block *b; 1083 int do_stmts; 1084 { 1085 struct slist *s; 1086 struct edge *p; 1087 int i; 1088 long aval; 1089 1090 /* 1091 * Initialize the atom values. 1092 * If we have no predecessors, everything is undefined. 1093 * Otherwise, we inherent our values from our predecessors. 1094 * If any register has an ambiguous value (i.e. control paths are 1095 * merging) give it the undefined value of 0. 1096 */ 1097 p = b->in_edges; 1098 if (p == 0) 1099 memset((char *)b->val, 0, sizeof(b->val)); 1100 else { 1101 memcpy((char *)b->val, (char *)p->pred->val, sizeof(b->val)); 1102 while ((p = p->next) != NULL) { 1103 for (i = 0; i < N_ATOMS; ++i) 1104 if (b->val[i] != p->pred->val[i]) 1105 b->val[i] = 0; 1106 } 1107 } 1108 aval = b->val[A_ATOM]; 1109 for (s = b->stmts; s; s = s->next) 1110 opt_stmt(&s->s, b->val, do_stmts); 1111 1112 /* 1113 * This is a special case: if we don't use anything from this 1114 * block, and we load the accumulator with value that is 1115 * already there, eliminate all the statements. 1116 */ 1117 if (do_stmts && b->out_use == 0 && aval != 0 && 1118 b->val[A_ATOM] == aval) 1119 b->stmts = 0; 1120 else { 1121 opt_peep(b); 1122 opt_deadstores(b); 1123 } 1124 /* 1125 * Set up values for branch optimizer. 1126 */ 1127 if (BPF_SRC(b->s.code) == BPF_K) 1128 b->oval = K(b->s.k); 1129 else 1130 b->oval = b->val[X_ATOM]; 1131 b->et.code = b->s.code; 1132 b->ef.code = -b->s.code; 1133 } 1134 1135 /* 1136 * Return true if any register that is used on exit from 'succ', has 1137 * an exit value that is different from the corresponding exit value 1138 * from 'b'. 1139 */ 1140 static int 1141 use_conflict(b, succ) 1142 struct block *b, *succ; 1143 { 1144 int atom; 1145 atomset use = succ->out_use; 1146 1147 if (use == 0) 1148 return 0; 1149 1150 for (atom = 0; atom < N_ATOMS; ++atom) 1151 if (ATOMELEM(use, atom)) 1152 if (b->val[atom] != succ->val[atom]) 1153 return 1; 1154 return 0; 1155 } 1156 1157 static struct block * 1158 fold_edge(child, ep) 1159 struct block *child; 1160 struct edge *ep; 1161 { 1162 int sense; 1163 int aval0, aval1, oval0, oval1; 1164 int code = ep->code; 1165 1166 if (code < 0) { 1167 code = -code; 1168 sense = 0; 1169 } else 1170 sense = 1; 1171 1172 if (child->s.code != code) 1173 return 0; 1174 1175 aval0 = child->val[A_ATOM]; 1176 oval0 = child->oval; 1177 aval1 = ep->pred->val[A_ATOM]; 1178 oval1 = ep->pred->oval; 1179 1180 if (aval0 != aval1) 1181 return 0; 1182 1183 if (oval0 == oval1) 1184 /* 1185 * The operands are identical, so the 1186 * result is true if a true branch was 1187 * taken to get here, otherwise false. 1188 */ 1189 return sense ? JT(child) : JF(child); 1190 1191 if (sense && code == (BPF_JMP|BPF_JEQ|BPF_K)) 1192 /* 1193 * At this point, we only know the comparison if we 1194 * came down the true branch, and it was an equality 1195 * comparison with a constant. We rely on the fact that 1196 * distinct constants have distinct value numbers. 1197 */ 1198 return JF(child); 1199 1200 return 0; 1201 } 1202 1203 static void 1204 opt_j(ep) 1205 struct edge *ep; 1206 { 1207 register int i, k; 1208 register struct block *target; 1209 1210 if (JT(ep->succ) == 0) 1211 return; 1212 1213 if (JT(ep->succ) == JF(ep->succ)) { 1214 /* 1215 * Common branch targets can be eliminated, provided 1216 * there is no data dependency. 1217 */ 1218 if (!use_conflict(ep->pred, ep->succ->et.succ)) { 1219 done = 0; 1220 ep->succ = JT(ep->succ); 1221 } 1222 } 1223 /* 1224 * For each edge dominator that matches the successor of this 1225 * edge, promote the edge successor to the its grandchild. 1226 * 1227 * XXX We violate the set abstraction here in favor a reasonably 1228 * efficient loop. 1229 */ 1230 top: 1231 for (i = 0; i < edgewords; ++i) { 1232 register u_long x = ep->edom[i]; 1233 1234 while (x != 0) { 1235 k = ffs(x) - 1; 1236 x &=~ (1 << k); 1237 k += i * BITS_PER_WORD; 1238 1239 target = fold_edge(ep->succ, edges[k]); 1240 /* 1241 * Check that there is no data dependency between 1242 * nodes that will be violated if we move the edge. 1243 */ 1244 if (target != 0 && !use_conflict(ep->pred, target)) { 1245 done = 0; 1246 ep->succ = target; 1247 if (JT(target) != 0) 1248 /* 1249 * Start over unless we hit a leaf. 1250 */ 1251 goto top; 1252 return; 1253 } 1254 } 1255 } 1256 } 1257 1258 1259 static void 1260 or_pullup(b) 1261 struct block *b; 1262 { 1263 int val, at_top; 1264 struct block *pull; 1265 struct block **diffp, **samep; 1266 struct edge *ep; 1267 1268 ep = b->in_edges; 1269 if (ep == 0) 1270 return; 1271 1272 /* 1273 * Make sure each predecessor loads the same value. 1274 * XXX why? 1275 */ 1276 val = ep->pred->val[A_ATOM]; 1277 for (ep = ep->next; ep != 0; ep = ep->next) 1278 if (val != ep->pred->val[A_ATOM]) 1279 return; 1280 1281 if (JT(b->in_edges->pred) == b) 1282 diffp = &JT(b->in_edges->pred); 1283 else 1284 diffp = &JF(b->in_edges->pred); 1285 1286 at_top = 1; 1287 while (1) { 1288 if (*diffp == 0) 1289 return; 1290 1291 if (JT(*diffp) != JT(b)) 1292 return; 1293 1294 if (!SET_MEMBER((*diffp)->dom, b->id)) 1295 return; 1296 1297 if ((*diffp)->val[A_ATOM] != val) 1298 break; 1299 1300 diffp = &JF(*diffp); 1301 at_top = 0; 1302 } 1303 samep = &JF(*diffp); 1304 while (1) { 1305 if (*samep == 0) 1306 return; 1307 1308 if (JT(*samep) != JT(b)) 1309 return; 1310 1311 if (!SET_MEMBER((*samep)->dom, b->id)) 1312 return; 1313 1314 if ((*samep)->val[A_ATOM] == val) 1315 break; 1316 1317 /* XXX Need to check that there are no data dependencies 1318 between dp0 and dp1. Currently, the code generator 1319 will not produce such dependencies. */ 1320 samep = &JF(*samep); 1321 } 1322 #ifdef notdef 1323 /* XXX This doesn't cover everything. */ 1324 for (i = 0; i < N_ATOMS; ++i) 1325 if ((*samep)->val[i] != pred->val[i]) 1326 return; 1327 #endif 1328 /* Pull up the node. */ 1329 pull = *samep; 1330 *samep = JF(pull); 1331 JF(pull) = *diffp; 1332 1333 /* 1334 * At the top of the chain, each predecessor needs to point at the 1335 * pulled up node. Inside the chain, there is only one predecessor 1336 * to worry about. 1337 */ 1338 if (at_top) { 1339 for (ep = b->in_edges; ep != 0; ep = ep->next) { 1340 if (JT(ep->pred) == b) 1341 JT(ep->pred) = pull; 1342 else 1343 JF(ep->pred) = pull; 1344 } 1345 } 1346 else 1347 *diffp = pull; 1348 1349 done = 0; 1350 } 1351 1352 static void 1353 and_pullup(b) 1354 struct block *b; 1355 { 1356 int val, at_top; 1357 struct block *pull; 1358 struct block **diffp, **samep; 1359 struct edge *ep; 1360 1361 ep = b->in_edges; 1362 if (ep == 0) 1363 return; 1364 1365 /* 1366 * Make sure each predecessor loads the same value. 1367 */ 1368 val = ep->pred->val[A_ATOM]; 1369 for (ep = ep->next; ep != 0; ep = ep->next) 1370 if (val != ep->pred->val[A_ATOM]) 1371 return; 1372 1373 if (JT(b->in_edges->pred) == b) 1374 diffp = &JT(b->in_edges->pred); 1375 else 1376 diffp = &JF(b->in_edges->pred); 1377 1378 at_top = 1; 1379 while (1) { 1380 if (*diffp == 0) 1381 return; 1382 1383 if (JF(*diffp) != JF(b)) 1384 return; 1385 1386 if (!SET_MEMBER((*diffp)->dom, b->id)) 1387 return; 1388 1389 if ((*diffp)->val[A_ATOM] != val) 1390 break; 1391 1392 diffp = &JT(*diffp); 1393 at_top = 0; 1394 } 1395 samep = &JT(*diffp); 1396 while (1) { 1397 if (*samep == 0) 1398 return; 1399 1400 if (JF(*samep) != JF(b)) 1401 return; 1402 1403 if (!SET_MEMBER((*samep)->dom, b->id)) 1404 return; 1405 1406 if ((*samep)->val[A_ATOM] == val) 1407 break; 1408 1409 /* XXX Need to check that there are no data dependencies 1410 between diffp and samep. Currently, the code generator 1411 will not produce such dependencies. */ 1412 samep = &JT(*samep); 1413 } 1414 #ifdef notdef 1415 /* XXX This doesn't cover everything. */ 1416 for (i = 0; i < N_ATOMS; ++i) 1417 if ((*samep)->val[i] != pred->val[i]) 1418 return; 1419 #endif 1420 /* Pull up the node. */ 1421 pull = *samep; 1422 *samep = JT(pull); 1423 JT(pull) = *diffp; 1424 1425 /* 1426 * At the top of the chain, each predecessor needs to point at the 1427 * pulled up node. Inside the chain, there is only one predecessor 1428 * to worry about. 1429 */ 1430 if (at_top) { 1431 for (ep = b->in_edges; ep != 0; ep = ep->next) { 1432 if (JT(ep->pred) == b) 1433 JT(ep->pred) = pull; 1434 else 1435 JF(ep->pred) = pull; 1436 } 1437 } 1438 else 1439 *diffp = pull; 1440 1441 done = 0; 1442 } 1443 1444 static void 1445 opt_blks(root, do_stmts) 1446 struct block *root; 1447 int do_stmts; 1448 { 1449 int i, maxlevel; 1450 struct block *p; 1451 1452 init_val(); 1453 maxlevel = root->level; 1454 for (i = maxlevel; i >= 0; --i) 1455 for (p = levels[i]; p; p = p->link) 1456 opt_blk(p, do_stmts); 1457 1458 if (do_stmts) 1459 /* 1460 * No point trying to move branches; it can't possibly 1461 * make a difference at this point. 1462 */ 1463 return; 1464 1465 for (i = 1; i <= maxlevel; ++i) { 1466 for (p = levels[i]; p; p = p->link) { 1467 opt_j(&p->et); 1468 opt_j(&p->ef); 1469 } 1470 } 1471 for (i = 1; i <= maxlevel; ++i) { 1472 for (p = levels[i]; p; p = p->link) { 1473 or_pullup(p); 1474 and_pullup(p); 1475 } 1476 } 1477 } 1478 1479 static inline void 1480 link_inedge(parent, child) 1481 struct edge *parent; 1482 struct block *child; 1483 { 1484 parent->next = child->in_edges; 1485 child->in_edges = parent; 1486 } 1487 1488 static void 1489 find_inedges(root) 1490 struct block *root; 1491 { 1492 int i; 1493 struct block *b; 1494 1495 for (i = 0; i < n_blocks; ++i) 1496 blocks[i]->in_edges = 0; 1497 1498 /* 1499 * Traverse the graph, adding each edge to the predecessor 1500 * list of its successors. Skip the leaves (i.e. level 0). 1501 */ 1502 for (i = root->level; i > 0; --i) { 1503 for (b = levels[i]; b != 0; b = b->link) { 1504 link_inedge(&b->et, JT(b)); 1505 link_inedge(&b->ef, JF(b)); 1506 } 1507 } 1508 } 1509 1510 static void 1511 opt_root(b) 1512 struct block **b; 1513 { 1514 struct slist *tmp, *s; 1515 1516 s = (*b)->stmts; 1517 (*b)->stmts = 0; 1518 while (BPF_CLASS((*b)->s.code) == BPF_JMP && JT(*b) == JF(*b)) 1519 *b = JT(*b); 1520 1521 tmp = (*b)->stmts; 1522 if (tmp != 0) 1523 sappend(s, tmp); 1524 (*b)->stmts = s; 1525 } 1526 1527 static void 1528 opt_loop(root, do_stmts) 1529 struct block *root; 1530 int do_stmts; 1531 { 1532 1533 #ifdef BDEBUG 1534 if (dflag > 1) 1535 opt_dump(root); 1536 #endif 1537 do { 1538 done = 1; 1539 find_levels(root); 1540 find_dom(root); 1541 find_closure(root); 1542 find_inedges(root); 1543 find_ud(root); 1544 find_edom(root); 1545 opt_blks(root, do_stmts); 1546 #ifdef BDEBUG 1547 if (dflag > 1) 1548 opt_dump(root); 1549 #endif 1550 } while (!done); 1551 } 1552 1553 /* 1554 * Optimize the filter code in its dag representation. 1555 */ 1556 void 1557 bpf_optimize(rootp) 1558 struct block **rootp; 1559 { 1560 struct block *root; 1561 1562 root = *rootp; 1563 1564 opt_init(root); 1565 opt_loop(root, 0); 1566 opt_loop(root, 1); 1567 intern_blocks(root); 1568 opt_root(rootp); 1569 opt_cleanup(); 1570 } 1571 1572 static void 1573 make_marks(p) 1574 struct block *p; 1575 { 1576 if (!isMarked(p)) { 1577 Mark(p); 1578 if (BPF_CLASS(p->s.code) != BPF_RET) { 1579 make_marks(JT(p)); 1580 make_marks(JF(p)); 1581 } 1582 } 1583 } 1584 1585 /* 1586 * Mark code array such that isMarked(i) is true 1587 * only for nodes that are alive. 1588 */ 1589 static void 1590 mark_code(p) 1591 struct block *p; 1592 { 1593 cur_mark += 1; 1594 make_marks(p); 1595 } 1596 1597 /* 1598 * True iff the two stmt lists load the same value from the packet into 1599 * the accumulator. 1600 */ 1601 static int 1602 eq_slist(x, y) 1603 struct slist *x, *y; 1604 { 1605 while (1) { 1606 while (x && x->s.code == NOP) 1607 x = x->next; 1608 while (y && y->s.code == NOP) 1609 y = y->next; 1610 if (x == 0) 1611 return y == 0; 1612 if (y == 0) 1613 return x == 0; 1614 if (x->s.code != y->s.code || x->s.k != y->s.k) 1615 return 0; 1616 x = x->next; 1617 y = y->next; 1618 } 1619 } 1620 1621 static inline int 1622 eq_blk(b0, b1) 1623 struct block *b0, *b1; 1624 { 1625 if (b0->s.code == b1->s.code && 1626 b0->s.k == b1->s.k && 1627 b0->et.succ == b1->et.succ && 1628 b0->ef.succ == b1->ef.succ) 1629 return eq_slist(b0->stmts, b1->stmts); 1630 return 0; 1631 } 1632 1633 static void 1634 intern_blocks(root) 1635 struct block *root; 1636 { 1637 struct block *p; 1638 int i, j; 1639 int done; 1640 top: 1641 done = 1; 1642 for (i = 0; i < n_blocks; ++i) 1643 blocks[i]->link = 0; 1644 1645 mark_code(root); 1646 1647 for (i = n_blocks - 1; --i >= 0; ) { 1648 if (!isMarked(blocks[i])) 1649 continue; 1650 for (j = i + 1; j < n_blocks; ++j) { 1651 if (!isMarked(blocks[j])) 1652 continue; 1653 if (eq_blk(blocks[i], blocks[j])) { 1654 blocks[i]->link = blocks[j]->link ? 1655 blocks[j]->link : blocks[j]; 1656 break; 1657 } 1658 } 1659 } 1660 for (i = 0; i < n_blocks; ++i) { 1661 p = blocks[i]; 1662 if (JT(p) == 0) 1663 continue; 1664 if (JT(p)->link) { 1665 done = 0; 1666 JT(p) = JT(p)->link; 1667 } 1668 if (JF(p)->link) { 1669 done = 0; 1670 JF(p) = JF(p)->link; 1671 } 1672 } 1673 if (!done) 1674 goto top; 1675 } 1676 1677 static void 1678 opt_cleanup() 1679 { 1680 free((void *)vnode_base); 1681 free((void *)vmap); 1682 free((void *)edges); 1683 free((void *)space); 1684 free((void *)levels); 1685 free((void *)blocks); 1686 } 1687 1688 /* 1689 * Return the number of stmts in 's'. 1690 */ 1691 static int 1692 slength(s) 1693 struct slist *s; 1694 { 1695 int n = 0; 1696 1697 for (; s; s = s->next) 1698 if (s->s.code != NOP) 1699 ++n; 1700 return n; 1701 } 1702 1703 /* 1704 * Return the number of nodes reachable by 'p'. 1705 * All nodes should be initially unmarked. 1706 */ 1707 static int 1708 count_blocks(p) 1709 struct block *p; 1710 { 1711 if (p == 0 || isMarked(p)) 1712 return 0; 1713 Mark(p); 1714 return count_blocks(JT(p)) + count_blocks(JF(p)) + 1; 1715 } 1716 1717 /* 1718 * Do a depth first search on the flow graph, numbering the 1719 * the basic blocks, and entering them into the 'blocks' array.` 1720 */ 1721 static void 1722 number_blks_r(p) 1723 struct block *p; 1724 { 1725 int n; 1726 1727 if (p == 0 || isMarked(p)) 1728 return; 1729 1730 Mark(p); 1731 n = n_blocks++; 1732 p->id = n; 1733 blocks[n] = p; 1734 1735 number_blks_r(JT(p)); 1736 number_blks_r(JF(p)); 1737 } 1738 1739 /* 1740 * Return the number of stmts in the flowgraph reachable by 'p'. 1741 * The nodes should be unmarked before calling. 1742 */ 1743 static int 1744 count_stmts(p) 1745 struct block *p; 1746 { 1747 int n; 1748 1749 if (p == 0 || isMarked(p)) 1750 return 0; 1751 Mark(p); 1752 n = count_stmts(JT(p)) + count_stmts(JF(p)); 1753 return slength(p->stmts) + n + 1; 1754 } 1755 1756 /* 1757 * Allocate memory. All allocation is done before optimization 1758 * is begun. A linear bound on the size of all data structures is computed 1759 * from the total number of blocks and/or statements. 1760 */ 1761 static void 1762 opt_init(root) 1763 struct block *root; 1764 { 1765 u_long *p; 1766 int i, n, max_stmts; 1767 1768 /* 1769 * First, count the blocks, so we can malloc an array to map 1770 * block number to block. Then, put the blocks into the array. 1771 */ 1772 unMarkAll(); 1773 n = count_blocks(root); 1774 blocks = (struct block **)malloc(n * sizeof(*blocks)); 1775 unMarkAll(); 1776 n_blocks = 0; 1777 number_blks_r(root); 1778 1779 n_edges = 2 * n_blocks; 1780 edges = (struct edge **)malloc(n_edges * sizeof(*edges)); 1781 1782 /* 1783 * The number of levels is bounded by the number of nodes. 1784 */ 1785 levels = (struct block **)malloc(n_blocks * sizeof(*levels)); 1786 1787 edgewords = n_edges / (8 * sizeof(u_long)) + 1; 1788 nodewords = n_blocks / (8 * sizeof(u_long)) + 1; 1789 1790 /* XXX */ 1791 space = (u_long *)malloc(2 * n_blocks * nodewords * sizeof(*space) 1792 + n_edges * edgewords * sizeof(*space)); 1793 p = space; 1794 all_dom_sets = p; 1795 for (i = 0; i < n; ++i) { 1796 blocks[i]->dom = p; 1797 p += nodewords; 1798 } 1799 all_closure_sets = p; 1800 for (i = 0; i < n; ++i) { 1801 blocks[i]->closure = p; 1802 p += nodewords; 1803 } 1804 all_edge_sets = p; 1805 for (i = 0; i < n; ++i) { 1806 register struct block *b = blocks[i]; 1807 1808 b->et.edom = p; 1809 p += edgewords; 1810 b->ef.edom = p; 1811 p += edgewords; 1812 b->et.id = i; 1813 edges[i] = &b->et; 1814 b->ef.id = n_blocks + i; 1815 edges[n_blocks + i] = &b->ef; 1816 b->et.pred = b; 1817 b->ef.pred = b; 1818 } 1819 max_stmts = 0; 1820 for (i = 0; i < n; ++i) 1821 max_stmts += slength(blocks[i]->stmts) + 1; 1822 /* 1823 * We allocate at most 3 value numbers per statement, 1824 * so this is an upper bound on the number of valnodes 1825 * we'll need. 1826 */ 1827 maxval = 3 * max_stmts; 1828 vmap = (struct vmapinfo *)malloc(maxval * sizeof(*vmap)); 1829 vnode_base = (struct valnode *)malloc(maxval * sizeof(*vmap)); 1830 } 1831 1832 /* 1833 * Some pointers used to convert the basic block form of the code, 1834 * into the array form that BPF requires. 'fstart' will point to 1835 * the malloc'd array while 'ftail' is used during the recursive traversal. 1836 */ 1837 static struct bpf_insn *fstart; 1838 static struct bpf_insn *ftail; 1839 1840 #ifdef BDEBUG 1841 int bids[1000]; 1842 #endif 1843 1844 static void 1845 convert_code_r(p) 1846 struct block *p; 1847 { 1848 struct bpf_insn *dst; 1849 struct slist *src; 1850 int slen; 1851 u_int off; 1852 1853 if (p == 0 || isMarked(p)) 1854 return; 1855 Mark(p); 1856 1857 convert_code_r(JF(p)); 1858 convert_code_r(JT(p)); 1859 1860 slen = slength(p->stmts); 1861 dst = ftail -= slen + 1; 1862 1863 p->offset = dst - fstart; 1864 1865 for (src = p->stmts; src; src = src->next) { 1866 if (src->s.code == NOP) 1867 continue; 1868 dst->code = (u_short)src->s.code; 1869 dst->k = src->s.k; 1870 ++dst; 1871 } 1872 #ifdef BDEBUG 1873 bids[dst - fstart] = p->id + 1; 1874 #endif 1875 dst->code = (u_short)p->s.code; 1876 dst->k = p->s.k; 1877 if (JT(p)) { 1878 off = JT(p)->offset - (p->offset + slen) - 1; 1879 if (off >= 256) 1880 bpf_error("long jumps not supported"); 1881 dst->jt = off; 1882 off = JF(p)->offset - (p->offset + slen) - 1; 1883 if (off >= 256) 1884 bpf_error("long jumps not supported"); 1885 dst->jf = off; 1886 } 1887 } 1888 1889 1890 /* 1891 * Convert flowgraph intermediate representation to the 1892 * BPF array representation. Set *lenp to the number of instructions. 1893 */ 1894 struct bpf_insn * 1895 icode_to_fcode(root, lenp) 1896 struct block *root; 1897 int *lenp; 1898 { 1899 int n; 1900 struct bpf_insn *fp; 1901 1902 unMarkAll(); 1903 n = *lenp = count_stmts(root); 1904 1905 fp = (struct bpf_insn *)malloc(sizeof(*fp) * n); 1906 memset((char *)fp, 0, sizeof(*fp) * n); 1907 fstart = fp; 1908 ftail = fp + n; 1909 1910 unMarkAll(); 1911 convert_code_r(root); 1912 1913 return fp; 1914 } 1915 1916 #ifdef BDEBUG 1917 opt_dump(root) 1918 struct block *root; 1919 { 1920 struct bpf_program f; 1921 1922 memset(bids, 0, sizeof bids); 1923 f.bf_insns = icode_to_fcode(root, &f.bf_len); 1924 bpf_dump(&f, 1); 1925 putchar('\n'); 1926 free((char *)f.bf_insns); 1927 } 1928 #endif 1929