1 /*- 2 * Copyright (c) 2005 The DragonFly Project. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in 12 * the documentation and/or other materials provided with the 13 * distribution. 14 * 3. Neither the name of The DragonFly Project nor the names of its 15 * contributors may be used to endorse or promote products derived 16 * from this software without specific, prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 21 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 22 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 26 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 27 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 28 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 #ifndef _NET_IFQ_VAR_H_ 33 #define _NET_IFQ_VAR_H_ 34 35 #ifndef _KERNEL 36 #error "This file should not be included by userland programs." 37 #endif 38 39 #ifndef _SYS_SYSTM_H_ 40 #include <sys/systm.h> 41 #endif 42 #ifndef _SYS_THREAD2_H_ 43 #include <sys/thread2.h> 44 #endif 45 #ifndef _SYS_SERIALIZE_H_ 46 #include <sys/serialize.h> 47 #endif 48 #ifndef _SYS_MBUF_H_ 49 #include <sys/mbuf.h> 50 #endif 51 #ifndef _NET_IF_VAR_H_ 52 #include <net/if_var.h> 53 #endif 54 #ifndef _NET_ALTQ_IF_ALTQ_H_ 55 #include <net/altq/if_altq.h> 56 #endif 57 58 #define ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq) \ 59 KASSERT(ifsq_get_ifp((ifsq)) == (ifp) && \ 60 ifsq_get_index((ifsq)) == ALTQ_SUBQ_INDEX_DEFAULT, \ 61 ("not ifp's default subqueue")); 62 63 struct ifaltq; 64 struct ifaltq_subque; 65 66 /* 67 * Subqueue watchdog 68 */ 69 typedef void (*ifsq_watchdog_t)(struct ifaltq_subque *); 70 71 struct ifsubq_watchdog { 72 struct callout wd_callout; 73 int wd_timer; 74 struct ifaltq_subque *wd_subq; 75 ifsq_watchdog_t wd_watchdog; 76 }; 77 78 /* 79 * Support for "classic" ALTQ interfaces. 80 */ 81 int ifsq_classic_enqueue(struct ifaltq_subque *, struct mbuf *, 82 struct altq_pktattr *); 83 struct mbuf *ifsq_classic_dequeue(struct ifaltq_subque *, int); 84 int ifsq_classic_request(struct ifaltq_subque *, int, void *); 85 void ifq_set_classic(struct ifaltq *); 86 87 void ifq_set_maxlen(struct ifaltq *, int); 88 void ifq_set_methods(struct ifaltq *, altq_mapsubq_t, 89 ifsq_enqueue_t, ifsq_dequeue_t, ifsq_request_t); 90 int ifq_mapsubq_default(struct ifaltq *, int); 91 int ifq_mapsubq_mask(struct ifaltq *, int); 92 int ifq_mapsubq_modulo(struct ifaltq *, int); 93 94 void ifsq_devstart(struct ifaltq_subque *ifsq); 95 void ifsq_devstart_sched(struct ifaltq_subque *ifsq); 96 97 void ifsq_watchdog_init(struct ifsubq_watchdog *, 98 struct ifaltq_subque *, ifsq_watchdog_t); 99 void ifsq_watchdog_start(struct ifsubq_watchdog *); 100 void ifsq_watchdog_stop(struct ifsubq_watchdog *); 101 102 /* 103 * Dispatch a packet to an interface. 104 */ 105 int ifq_dispatch(struct ifnet *, struct mbuf *, 106 struct altq_pktattr *); 107 108 #ifdef ALTQ 109 110 static __inline int 111 ifq_is_enabled(struct ifaltq *_ifq) 112 { 113 return(_ifq->altq_flags & ALTQF_ENABLED); 114 } 115 116 static __inline int 117 ifq_is_attached(struct ifaltq *_ifq) 118 { 119 return(_ifq->altq_disc != NULL); 120 } 121 122 #else /* !ALTQ */ 123 124 static __inline int 125 ifq_is_enabled(struct ifaltq *_ifq) 126 { 127 return(0); 128 } 129 130 static __inline int 131 ifq_is_attached(struct ifaltq *_ifq) 132 { 133 return(0); 134 } 135 136 #endif /* ALTQ */ 137 138 static __inline int 139 ifq_is_ready(struct ifaltq *_ifq) 140 { 141 return(_ifq->altq_flags & ALTQF_READY); 142 } 143 144 static __inline void 145 ifq_set_ready(struct ifaltq *_ifq) 146 { 147 _ifq->altq_flags |= ALTQF_READY; 148 } 149 150 /* 151 * Subqueue lock must be held 152 */ 153 static __inline int 154 ifsq_enqueue_locked(struct ifaltq_subque *_ifsq, struct mbuf *_m, 155 struct altq_pktattr *_pa) 156 { 157 #ifdef ALTQ 158 if (!ifq_is_enabled(_ifsq->ifsq_altq)) 159 return ifsq_classic_enqueue(_ifsq, _m, _pa); 160 else 161 #endif 162 return _ifsq->ifsq_enqueue(_ifsq, _m, _pa); 163 } 164 165 static __inline int 166 ifsq_enqueue(struct ifaltq_subque *_ifsq, struct mbuf *_m, 167 struct altq_pktattr *_pa) 168 { 169 int _error; 170 171 ALTQ_SQ_LOCK(_ifsq); 172 _error = ifsq_enqueue_locked(_ifsq, _m, _pa); 173 ALTQ_SQ_UNLOCK(_ifsq); 174 return _error; 175 } 176 177 static __inline struct mbuf * 178 ifsq_dequeue(struct ifaltq_subque *_ifsq) 179 { 180 struct mbuf *_m; 181 182 ALTQ_SQ_LOCK(_ifsq); 183 if (_ifsq->ifsq_prepended != NULL) { 184 _m = _ifsq->ifsq_prepended; 185 _ifsq->ifsq_prepended = NULL; 186 ALTQ_SQ_CNTR_DEC(_ifsq, _m->m_pkthdr.len); 187 ALTQ_SQ_UNLOCK(_ifsq); 188 return _m; 189 } 190 191 #ifdef ALTQ 192 if (_ifsq->ifsq_altq->altq_tbr != NULL) 193 _m = tbr_dequeue(_ifsq, ALTDQ_REMOVE); 194 else if (!ifq_is_enabled(_ifsq->ifsq_altq)) 195 _m = ifsq_classic_dequeue(_ifsq, ALTDQ_REMOVE); 196 else 197 #endif 198 _m = _ifsq->ifsq_dequeue(_ifsq, ALTDQ_REMOVE); 199 ALTQ_SQ_UNLOCK(_ifsq); 200 return _m; 201 } 202 203 /* 204 * Subqueue lock must be held 205 */ 206 static __inline struct mbuf * 207 ifsq_poll_locked(struct ifaltq_subque *_ifsq) 208 { 209 if (_ifsq->ifsq_prepended != NULL) 210 return _ifsq->ifsq_prepended; 211 212 #ifdef ALTQ 213 if (_ifsq->ifsq_altq->altq_tbr != NULL) 214 return tbr_dequeue(_ifsq, ALTDQ_POLL); 215 else if (!ifq_is_enabled(_ifsq->ifsq_altq)) 216 return ifsq_classic_dequeue(_ifsq, ALTDQ_POLL); 217 else 218 #endif 219 return _ifsq->ifsq_dequeue(_ifsq, ALTDQ_POLL); 220 } 221 222 static __inline struct mbuf * 223 ifsq_poll(struct ifaltq_subque *_ifsq) 224 { 225 struct mbuf *_m; 226 227 ALTQ_SQ_LOCK(_ifsq); 228 _m = ifsq_poll_locked(_ifsq); 229 ALTQ_SQ_UNLOCK(_ifsq); 230 return _m; 231 } 232 233 static __inline int 234 ifsq_poll_pktlen(struct ifaltq_subque *_ifsq) 235 { 236 struct mbuf *_m; 237 int _len = 0; 238 239 ALTQ_SQ_LOCK(_ifsq); 240 241 _m = ifsq_poll_locked(_ifsq); 242 if (_m != NULL) { 243 M_ASSERTPKTHDR(_m); 244 _len = _m->m_pkthdr.len; 245 } 246 247 ALTQ_SQ_UNLOCK(_ifsq); 248 249 return _len; 250 } 251 252 /* 253 * Subqueue lock must be held 254 */ 255 static __inline void 256 ifsq_purge_locked(struct ifaltq_subque *_ifsq) 257 { 258 if (_ifsq->ifsq_prepended != NULL) { 259 ALTQ_SQ_CNTR_DEC(_ifsq, _ifsq->ifsq_prepended->m_pkthdr.len); 260 m_freem(_ifsq->ifsq_prepended); 261 _ifsq->ifsq_prepended = NULL; 262 } 263 264 #ifdef ALTQ 265 if (!ifq_is_enabled(_ifsq->ifsq_altq)) 266 ifsq_classic_request(_ifsq, ALTRQ_PURGE, NULL); 267 else 268 #endif 269 _ifsq->ifsq_request(_ifsq, ALTRQ_PURGE, NULL); 270 } 271 272 static __inline void 273 ifsq_purge(struct ifaltq_subque *_ifsq) 274 { 275 ALTQ_SQ_LOCK(_ifsq); 276 ifsq_purge_locked(_ifsq); 277 ALTQ_SQ_UNLOCK(_ifsq); 278 } 279 280 static __inline void 281 ifq_lock_all(struct ifaltq *_ifq) 282 { 283 int _q; 284 285 for (_q = 0; _q < _ifq->altq_subq_cnt; ++_q) 286 ALTQ_SQ_LOCK(&_ifq->altq_subq[_q]); 287 } 288 289 static __inline void 290 ifq_unlock_all(struct ifaltq *_ifq) 291 { 292 int _q; 293 294 for (_q = _ifq->altq_subq_cnt - 1; _q >= 0; --_q) 295 ALTQ_SQ_UNLOCK(&_ifq->altq_subq[_q]); 296 } 297 298 /* 299 * All of the subqueue locks must be held 300 */ 301 static __inline void 302 ifq_purge_all_locked(struct ifaltq *_ifq) 303 { 304 int _q; 305 306 for (_q = 0; _q < _ifq->altq_subq_cnt; ++_q) 307 ifsq_purge_locked(&_ifq->altq_subq[_q]); 308 } 309 310 static __inline void 311 ifq_purge_all(struct ifaltq *_ifq) 312 { 313 ifq_lock_all(_ifq); 314 ifq_purge_all_locked(_ifq); 315 ifq_unlock_all(_ifq); 316 } 317 318 static __inline void 319 ifq_classify(struct ifaltq *_ifq, struct mbuf *_m, uint8_t _af, 320 struct altq_pktattr *_pa) 321 { 322 #ifdef ALTQ 323 if (ifq_is_enabled(_ifq)) { 324 _pa->pattr_af = _af; 325 _pa->pattr_hdr = mtod(_m, caddr_t); 326 if (ifq_is_enabled(_ifq) && 327 (_ifq->altq_flags & ALTQF_CLASSIFY)) { 328 /* XXX default subqueue */ 329 struct ifaltq_subque *_ifsq = 330 &_ifq->altq_subq[ALTQ_SUBQ_INDEX_DEFAULT]; 331 332 ALTQ_SQ_LOCK(_ifsq); 333 if (ifq_is_enabled(_ifq) && 334 (_ifq->altq_flags & ALTQF_CLASSIFY)) 335 _ifq->altq_classify(_ifq, _m, _pa); 336 ALTQ_SQ_UNLOCK(_ifsq); 337 } 338 } 339 #endif 340 } 341 342 static __inline void 343 ifsq_prepend(struct ifaltq_subque *_ifsq, struct mbuf *_m) 344 { 345 ALTQ_SQ_LOCK(_ifsq); 346 KASSERT(_ifsq->ifsq_prepended == NULL, ("pending prepended mbuf")); 347 _ifsq->ifsq_prepended = _m; 348 ALTQ_SQ_CNTR_INC(_ifsq, _m->m_pkthdr.len); 349 ALTQ_SQ_UNLOCK(_ifsq); 350 } 351 352 /* 353 * Subqueue hardware serializer must be held 354 */ 355 static __inline void 356 ifsq_set_oactive(struct ifaltq_subque *_ifsq) 357 { 358 _ifsq->ifsq_hw_oactive = 1; 359 } 360 361 /* 362 * Subqueue hardware serializer must be held 363 */ 364 static __inline void 365 ifsq_clr_oactive(struct ifaltq_subque *_ifsq) 366 { 367 _ifsq->ifsq_hw_oactive = 0; 368 } 369 370 /* 371 * Subqueue hardware serializer must be held 372 */ 373 static __inline int 374 ifsq_is_oactive(const struct ifaltq_subque *_ifsq) 375 { 376 return _ifsq->ifsq_hw_oactive; 377 } 378 379 /* 380 * Hand a packet to the interface's default subqueue. 381 * 382 * The default subqueue hardware serializer must be held. If the 383 * subqueue hardware serializer is not held yet, ifq_dispatch() 384 * should be used to get better performance. 385 */ 386 static __inline int 387 ifq_handoff(struct ifnet *_ifp, struct mbuf *_m, struct altq_pktattr *_pa) 388 { 389 struct ifaltq_subque *_ifsq; 390 int _error; 391 int _qid = ALTQ_SUBQ_INDEX_DEFAULT; /* XXX default subqueue */ 392 393 _ifsq = &_ifp->if_snd.altq_subq[_qid]; 394 395 ASSERT_ALTQ_SQ_SERIALIZED_HW(_ifsq); 396 _error = ifsq_enqueue(_ifsq, _m, _pa); 397 if (_error == 0) { 398 IFNET_STAT_INC(_ifp, obytes, _m->m_pkthdr.len); 399 if (_m->m_flags & M_MCAST) 400 IFNET_STAT_INC(_ifp, omcasts, 1); 401 if (!ifsq_is_oactive(_ifsq)) 402 (*_ifp->if_start)(_ifp, _ifsq); 403 } else { 404 IFNET_STAT_INC(_ifp, oqdrops, 1); 405 } 406 return(_error); 407 } 408 409 static __inline int 410 ifsq_is_empty(const struct ifaltq_subque *_ifsq) 411 { 412 return(_ifsq->ifsq_len == 0); 413 } 414 415 /* 416 * Subqueue lock must be held 417 */ 418 static __inline int 419 ifsq_data_ready(struct ifaltq_subque *_ifsq) 420 { 421 #ifdef ALTQ 422 if (_ifsq->ifsq_altq->altq_tbr != NULL) 423 return (ifsq_poll_locked(_ifsq) != NULL); 424 else 425 #endif 426 return !ifsq_is_empty(_ifsq); 427 } 428 429 /* 430 * Subqueue lock must be held 431 */ 432 static __inline int 433 ifsq_is_started(const struct ifaltq_subque *_ifsq) 434 { 435 return _ifsq->ifsq_started; 436 } 437 438 /* 439 * Subqueue lock must be held 440 */ 441 static __inline void 442 ifsq_set_started(struct ifaltq_subque *_ifsq) 443 { 444 _ifsq->ifsq_started = 1; 445 } 446 447 /* 448 * Subqueue lock must be held 449 */ 450 static __inline void 451 ifsq_clr_started(struct ifaltq_subque *_ifsq) 452 { 453 _ifsq->ifsq_started = 0; 454 } 455 456 static __inline struct ifsubq_stage * 457 ifsq_get_stage(struct ifaltq_subque *_ifsq, int _cpuid) 458 { 459 return &_ifsq->ifsq_stage[_cpuid]; 460 } 461 462 static __inline int 463 ifsq_get_cpuid(const struct ifaltq_subque *_ifsq) 464 { 465 return _ifsq->ifsq_cpuid; 466 } 467 468 static __inline void 469 ifsq_set_cpuid(struct ifaltq_subque *_ifsq, int _cpuid) 470 { 471 KASSERT(_cpuid >= 0 && _cpuid < ncpus, 472 ("invalid ifsq_cpuid %d", _cpuid)); 473 _ifsq->ifsq_cpuid = _cpuid; 474 } 475 476 static __inline struct lwkt_msg * 477 ifsq_get_ifstart_lmsg(struct ifaltq_subque *_ifsq, int _cpuid) 478 { 479 return &_ifsq->ifsq_ifstart_nmsg[_cpuid].lmsg; 480 } 481 482 static __inline int 483 ifsq_get_index(const struct ifaltq_subque *_ifsq) 484 { 485 return _ifsq->ifsq_index; 486 } 487 488 static __inline void 489 ifsq_set_priv(struct ifaltq_subque *_ifsq, void *_priv) 490 { 491 _ifsq->ifsq_hw_priv = _priv; 492 } 493 494 static __inline void * 495 ifsq_get_priv(const struct ifaltq_subque *_ifsq) 496 { 497 return _ifsq->ifsq_hw_priv; 498 } 499 500 static __inline struct ifnet * 501 ifsq_get_ifp(const struct ifaltq_subque *_ifsq) 502 { 503 return _ifsq->ifsq_ifp; 504 } 505 506 static __inline void 507 ifsq_set_hw_serialize(struct ifaltq_subque *_ifsq, 508 struct lwkt_serialize *_hwslz) 509 { 510 KASSERT(_hwslz != NULL, ("NULL hw serialize")); 511 KASSERT(_ifsq->ifsq_hw_serialize == NULL, 512 ("hw serialize has been setup")); 513 _ifsq->ifsq_hw_serialize = _hwslz; 514 } 515 516 static __inline void 517 ifsq_serialize_hw(struct ifaltq_subque *_ifsq) 518 { 519 lwkt_serialize_enter(_ifsq->ifsq_hw_serialize); 520 } 521 522 static __inline void 523 ifsq_deserialize_hw(struct ifaltq_subque *_ifsq) 524 { 525 lwkt_serialize_exit(_ifsq->ifsq_hw_serialize); 526 } 527 528 static __inline int 529 ifsq_tryserialize_hw(struct ifaltq_subque *_ifsq) 530 { 531 return lwkt_serialize_try(_ifsq->ifsq_hw_serialize); 532 } 533 534 static __inline struct ifaltq_subque * 535 ifq_get_subq_default(const struct ifaltq *_ifq) 536 { 537 return &_ifq->altq_subq[ALTQ_SUBQ_INDEX_DEFAULT]; 538 } 539 540 static __inline struct ifaltq_subque * 541 ifq_get_subq(const struct ifaltq *_ifq, int _idx) 542 { 543 KASSERT(_idx >= 0 && _idx < _ifq->altq_subq_cnt, 544 ("invalid qid %d", _idx)); 545 return &_ifq->altq_subq[_idx]; 546 } 547 548 static __inline struct ifaltq_subque * 549 ifq_map_subq(struct ifaltq *_ifq, int _cpuid) 550 { 551 int _idx = _ifq->altq_mapsubq(_ifq, _cpuid); 552 return ifq_get_subq(_ifq, _idx); 553 } 554 555 static __inline void 556 ifq_set_subq_cnt(struct ifaltq *_ifq, int _cnt) 557 { 558 _ifq->altq_subq_cnt = _cnt; 559 } 560 561 static __inline void 562 ifq_set_subq_mask(struct ifaltq *_ifq, uint32_t _mask) 563 { 564 565 KASSERT(((_mask + 1) & _mask) == 0, ("invalid mask %08x", _mask)); 566 _ifq->altq_subq_mappriv = _mask; 567 } 568 569 static __inline void 570 ifq_set_subq_divisor(struct ifaltq *_ifq, uint32_t _divisor) 571 { 572 573 KASSERT(_divisor > 0, ("invalid divisor %u", _divisor)); 574 KASSERT(_divisor <= _ifq->altq_subq_cnt, 575 ("invalid divisor %u, max %d", _divisor, _ifq->altq_subq_cnt)); 576 _ifq->altq_subq_mappriv = _divisor; 577 } 578 579 /* COMPAT */ 580 static __inline int 581 ifq_is_oactive(const struct ifaltq *_ifq) 582 { 583 return ifsq_is_oactive(ifq_get_subq_default(_ifq)); 584 } 585 586 /* COMPAT */ 587 static __inline void 588 ifq_set_oactive(struct ifaltq *_ifq) 589 { 590 ifsq_set_oactive(ifq_get_subq_default(_ifq)); 591 } 592 593 /* COMPAT */ 594 static __inline void 595 ifq_clr_oactive(struct ifaltq *_ifq) 596 { 597 ifsq_clr_oactive(ifq_get_subq_default(_ifq)); 598 } 599 600 /* COMPAT */ 601 static __inline int 602 ifq_is_empty(struct ifaltq *_ifq) 603 { 604 return ifsq_is_empty(ifq_get_subq_default(_ifq)); 605 } 606 607 /* COMPAT */ 608 static __inline void 609 ifq_purge(struct ifaltq *_ifq) 610 { 611 ifsq_purge(ifq_get_subq_default(_ifq)); 612 } 613 614 /* COMPAT */ 615 static __inline struct mbuf * 616 ifq_dequeue(struct ifaltq *_ifq) 617 { 618 return ifsq_dequeue(ifq_get_subq_default(_ifq)); 619 } 620 621 /* COMPAT */ 622 static __inline void 623 ifq_prepend(struct ifaltq *_ifq, struct mbuf *_m) 624 { 625 ifsq_prepend(ifq_get_subq_default(_ifq), _m); 626 } 627 628 /* COMPAT */ 629 static __inline void 630 ifq_set_cpuid(struct ifaltq *_ifq, int _cpuid) 631 { 632 KASSERT(_ifq->altq_subq_cnt == 1, 633 ("invalid subqueue count %d", _ifq->altq_subq_cnt)); 634 ifsq_set_cpuid(ifq_get_subq_default(_ifq), _cpuid); 635 } 636 637 /* COMPAT */ 638 static __inline void 639 ifq_set_hw_serialize(struct ifaltq *_ifq, struct lwkt_serialize *_hwslz) 640 { 641 KASSERT(_ifq->altq_subq_cnt == 1, 642 ("invalid subqueue count %d", _ifq->altq_subq_cnt)); 643 ifsq_set_hw_serialize(ifq_get_subq_default(_ifq), _hwslz); 644 } 645 646 #endif /* _NET_IFQ_VAR_H_ */ 647