1 /*- 2 * Copyright (c) 2001-2002 Luigi Rizzo 3 * 4 * Supported by: the Xorp Project (www.xorp.org) 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * $FreeBSD: src/sys/kern/kern_poll.c,v 1.2.2.4 2002/06/27 23:26:33 luigi Exp $ 28 */ 29 30 #include "opt_ifpoll.h" 31 32 #include <sys/param.h> 33 #include <sys/kernel.h> 34 #include <sys/ktr.h> 35 #include <sys/malloc.h> 36 #include <sys/serialize.h> 37 #include <sys/socket.h> 38 #include <sys/sysctl.h> 39 40 #include <sys/thread2.h> 41 #include <sys/msgport2.h> 42 43 #include <machine/atomic.h> 44 #include <machine/clock.h> 45 #include <machine/smp.h> 46 47 #include <net/if.h> 48 #include <net/if_poll.h> 49 #include <net/netmsg2.h> 50 51 /* 52 * Polling support for network device drivers. 53 * 54 * Drivers which support this feature try to register one status polling 55 * handler and several TX/RX polling handlers with the polling code. 56 * If interface's if_npoll is called with non-NULL second argument, then 57 * a register operation is requested, else a deregister operation is 58 * requested. If the requested operation is "register", driver should 59 * setup the ifpoll_info passed in accoding its own needs: 60 * ifpoll_info.ifpi_status.status_func == NULL 61 * No status polling handler will be installed on CPU(0) 62 * ifpoll_info.ifpi_rx[n].poll_func == NULL 63 * No RX polling handler will be installed on CPU(n) 64 * ifpoll_info.ifpi_tx[n].poll_func == NULL 65 * No TX polling handler will be installed on CPU(n) 66 * 67 * RX is polled at the specified polling frequency (net.ifpoll.X.pollhz). 68 * TX and status polling could be done at lower frequency than RX frequency 69 * (net.ifpoll.0.status_frac and net.ifpoll.X.tx_frac). To avoid systimer 70 * staggering at high frequency, RX systimer gives TX and status polling a 71 * piggyback (XXX). 72 * 73 * All of the registered polling handlers are called only if the interface 74 * is marked as 'IFF_RUNNING and IFF_NPOLLING'. However, the interface's 75 * register and deregister function (ifnet.if_npoll) will be called even 76 * if interface is not marked with 'IFF_RUNNING'. 77 * 78 * If registration is successful, the driver must disable interrupts, 79 * and further I/O is performed through the TX/RX polling handler, which 80 * are invoked (at least once per clock tick) with 3 arguments: the "arg" 81 * passed at register time, a struct ifnet pointer, and a "count" limit. 82 * The registered serializer will be held before calling the related 83 * polling handler. 84 * 85 * The count limit specifies how much work the handler can do during the 86 * call -- typically this is the number of packets to be received, or 87 * transmitted, etc. (drivers are free to interpret this number, as long 88 * as the max time spent in the function grows roughly linearly with the 89 * count). 90 * 91 * A second variable controls the sharing of CPU between polling/kernel 92 * network processing, and other activities (typically userlevel tasks): 93 * net.ifpoll.X.{rx,tx}.user_frac (between 0 and 100, default 50) sets the 94 * share of CPU allocated to user tasks. CPU is allocated proportionally 95 * to the shares, by dynamically adjusting the "count" (poll_burst). 96 * 97 * Other parameters can should be left to their default values. 98 * The following constraints hold 99 * 100 * 1 <= poll_burst <= poll_burst_max 101 * 1 <= poll_each_burst <= poll_burst_max 102 * MIN_POLL_BURST_MAX <= poll_burst_max <= MAX_POLL_BURST_MAX 103 */ 104 105 #define IFPOLL_LIST_LEN 128 106 #define IFPOLL_FREQ_MAX 30000 107 108 #define MIN_IOPOLL_BURST_MAX 10 109 #define MAX_IOPOLL_BURST_MAX 1000 110 #define IOPOLL_BURST_MAX 150 /* good for 100Mbit net and HZ=1000 */ 111 112 #define IOPOLL_EACH_BURST 5 113 114 #define IFPOLL_FREQ_DEFAULT 2000 115 116 #define IFPOLL_TXFRAC_DEFAULT 1 /* 1/2 of the pollhz */ 117 #define IFPOLL_STFRAC_DEFAULT 19 /* 1/20 of the pollhz */ 118 119 #define IFPOLL_RX 0x1 120 #define IFPOLL_TX 0x2 121 122 union ifpoll_time { 123 struct timeval tv; 124 uint64_t tsc; 125 }; 126 127 struct iopoll_rec { 128 struct lwkt_serialize *serializer; 129 struct ifnet *ifp; 130 void *arg; 131 ifpoll_iofn_t poll_func; 132 }; 133 134 struct iopoll_ctx { 135 union ifpoll_time prev_t; 136 u_long short_ticks; /* statistics */ 137 u_long lost_polls; /* statistics */ 138 u_long suspect; /* statistics */ 139 u_long stalled; /* statistics */ 140 uint32_t pending_polls; /* state */ 141 142 struct netmsg_base poll_netmsg; 143 struct netmsg_base poll_more_netmsg; 144 145 int poll_cpuid; 146 int pollhz; 147 uint32_t phase; /* state */ 148 int residual_burst; /* state */ 149 uint32_t poll_each_burst; /* tunable */ 150 union ifpoll_time poll_start_t; /* state */ 151 152 uint32_t poll_burst; /* state */ 153 uint32_t poll_burst_max; /* tunable */ 154 uint32_t user_frac; /* tunable */ 155 uint32_t kern_frac; /* state */ 156 157 uint32_t poll_handlers; /* next free entry in pr[]. */ 158 struct iopoll_rec pr[IFPOLL_LIST_LEN]; 159 160 struct sysctl_ctx_list poll_sysctl_ctx; 161 struct sysctl_oid *poll_sysctl_tree; 162 } __cachealign; 163 164 struct poll_comm { 165 struct systimer pollclock; 166 int poll_cpuid; 167 168 int stfrac_count; /* state */ 169 int poll_stfrac; /* tunable */ 170 171 int txfrac_count; /* state */ 172 int poll_txfrac; /* tunable */ 173 174 int pollhz; /* tunable */ 175 176 struct sysctl_ctx_list sysctl_ctx; 177 struct sysctl_oid *sysctl_tree; 178 } __cachealign; 179 180 struct stpoll_rec { 181 struct lwkt_serialize *serializer; 182 struct ifnet *ifp; 183 ifpoll_stfn_t status_func; 184 }; 185 186 struct stpoll_ctx { 187 struct netmsg_base poll_netmsg; 188 189 int pollhz; 190 191 uint32_t poll_handlers; /* next free entry in pr[]. */ 192 struct stpoll_rec pr[IFPOLL_LIST_LEN]; 193 194 struct sysctl_ctx_list poll_sysctl_ctx; 195 struct sysctl_oid *poll_sysctl_tree; 196 } __cachealign; 197 198 struct iopoll_sysctl_netmsg { 199 struct netmsg_base base; 200 struct iopoll_ctx *ctx; 201 }; 202 203 void ifpoll_init_pcpu(int); 204 static void ifpoll_register_handler(netmsg_t); 205 static void ifpoll_deregister_handler(netmsg_t); 206 207 /* 208 * Status polling 209 */ 210 static void stpoll_init(void); 211 static void stpoll_handler(netmsg_t); 212 static void stpoll_clock(struct stpoll_ctx *); 213 static int stpoll_register(struct ifnet *, const struct ifpoll_status *); 214 static int stpoll_deregister(struct ifnet *); 215 216 /* 217 * RX/TX polling 218 */ 219 static struct iopoll_ctx *iopoll_ctx_create(int, int); 220 static void iopoll_init(int); 221 static void rxpoll_handler(netmsg_t); 222 static void txpoll_handler(netmsg_t); 223 static void rxpollmore_handler(netmsg_t); 224 static void txpollmore_handler(netmsg_t); 225 static void iopoll_clock(struct iopoll_ctx *); 226 static int iopoll_register(struct ifnet *, struct iopoll_ctx *, 227 const struct ifpoll_io *); 228 static int iopoll_deregister(struct ifnet *, struct iopoll_ctx *); 229 230 static void iopoll_add_sysctl(struct sysctl_ctx_list *, 231 struct sysctl_oid_list *, struct iopoll_ctx *, int); 232 static void sysctl_burstmax_handler(netmsg_t); 233 static int sysctl_burstmax(SYSCTL_HANDLER_ARGS); 234 static void sysctl_eachburst_handler(netmsg_t); 235 static int sysctl_eachburst(SYSCTL_HANDLER_ARGS); 236 237 /* 238 * Common functions 239 */ 240 static void poll_comm_init(int); 241 static void poll_comm_start(int); 242 static void poll_comm_adjust_pollhz(struct poll_comm *); 243 static void poll_comm_systimer0(systimer_t, int, struct intrframe *); 244 static void poll_comm_systimer(systimer_t, int, struct intrframe *); 245 static void sysctl_pollhz_handler(netmsg_t); 246 static void sysctl_stfrac_handler(netmsg_t); 247 static void sysctl_txfrac_handler(netmsg_t); 248 static int sysctl_pollhz(SYSCTL_HANDLER_ARGS); 249 static int sysctl_stfrac(SYSCTL_HANDLER_ARGS); 250 static int sysctl_txfrac(SYSCTL_HANDLER_ARGS); 251 252 static struct stpoll_ctx stpoll_context; 253 static struct poll_comm *poll_common[MAXCPU]; 254 static struct iopoll_ctx *rxpoll_context[MAXCPU]; 255 static struct iopoll_ctx *txpoll_context[MAXCPU]; 256 257 SYSCTL_NODE(_net, OID_AUTO, ifpoll, CTLFLAG_RW, 0, 258 "Network device polling parameters"); 259 260 static int iopoll_burst_max = IOPOLL_BURST_MAX; 261 static int iopoll_each_burst = IOPOLL_EACH_BURST; 262 263 static int ifpoll_pollhz = IFPOLL_FREQ_DEFAULT; 264 static int ifpoll_stfrac = IFPOLL_STFRAC_DEFAULT; 265 static int ifpoll_txfrac = IFPOLL_TXFRAC_DEFAULT; 266 267 TUNABLE_INT("net.ifpoll.burst_max", &iopoll_burst_max); 268 TUNABLE_INT("net.ifpoll.each_burst", &iopoll_each_burst); 269 TUNABLE_INT("net.ifpoll.pollhz", &ifpoll_pollhz); 270 TUNABLE_INT("net.ifpoll.status_frac", &ifpoll_stfrac); 271 TUNABLE_INT("net.ifpoll.tx_frac", &ifpoll_txfrac); 272 273 static __inline void 274 ifpoll_sendmsg_oncpu(netmsg_t msg) 275 { 276 if (msg->lmsg.ms_flags & MSGF_DONE) 277 lwkt_sendmsg(netisr_portfn(mycpuid), &msg->lmsg); 278 } 279 280 static __inline void 281 sched_stpoll(struct stpoll_ctx *st_ctx) 282 { 283 ifpoll_sendmsg_oncpu((netmsg_t)&st_ctx->poll_netmsg); 284 } 285 286 static __inline void 287 sched_iopoll(struct iopoll_ctx *io_ctx) 288 { 289 ifpoll_sendmsg_oncpu((netmsg_t)&io_ctx->poll_netmsg); 290 } 291 292 static __inline void 293 sched_iopollmore(struct iopoll_ctx *io_ctx) 294 { 295 ifpoll_sendmsg_oncpu((netmsg_t)&io_ctx->poll_more_netmsg); 296 } 297 298 static __inline void 299 ifpoll_time_get(union ifpoll_time *t) 300 { 301 if (__predict_true(tsc_present)) 302 t->tsc = rdtsc(); 303 else 304 microuptime(&t->tv); 305 } 306 307 /* Return time diff in us */ 308 static __inline int 309 ifpoll_time_diff(const union ifpoll_time *s, const union ifpoll_time *e) 310 { 311 if (__predict_true(tsc_present)) { 312 return (((e->tsc - s->tsc) * 1000000) / tsc_frequency); 313 } else { 314 return ((e->tv.tv_usec - s->tv.tv_usec) + 315 (e->tv.tv_sec - s->tv.tv_sec) * 1000000); 316 } 317 } 318 319 /* 320 * Initialize per-cpu qpolling(4) context. Called from kern_clock.c: 321 */ 322 void 323 ifpoll_init_pcpu(int cpuid) 324 { 325 if (cpuid >= ncpus2) 326 return; 327 328 poll_comm_init(cpuid); 329 330 if (cpuid == 0) 331 stpoll_init(); 332 iopoll_init(cpuid); 333 334 poll_comm_start(cpuid); 335 } 336 337 int 338 ifpoll_register(struct ifnet *ifp) 339 { 340 struct ifpoll_info *info; 341 struct netmsg_base nmsg; 342 int error; 343 344 if (ifp->if_npoll == NULL) { 345 /* Device does not support polling */ 346 return EOPNOTSUPP; 347 } 348 349 info = kmalloc(sizeof(*info), M_TEMP, M_WAITOK | M_ZERO); 350 351 /* 352 * Attempt to register. Interlock with IFF_NPOLLING. 353 */ 354 355 ifnet_serialize_all(ifp); 356 357 if (ifp->if_flags & IFF_NPOLLING) { 358 /* Already polling */ 359 ifnet_deserialize_all(ifp); 360 kfree(info, M_TEMP); 361 return EBUSY; 362 } 363 364 info->ifpi_ifp = ifp; 365 366 ifp->if_flags |= IFF_NPOLLING; 367 ifp->if_npoll(ifp, info); 368 KASSERT(ifp->if_npoll_cpuid >= 0, ("invalid npoll cpuid")); 369 370 ifnet_deserialize_all(ifp); 371 372 netmsg_init(&nmsg, NULL, &curthread->td_msgport, 373 0, ifpoll_register_handler); 374 nmsg.lmsg.u.ms_resultp = info; 375 376 error = lwkt_domsg(netisr_portfn(0), &nmsg.lmsg, 0); 377 if (error) { 378 if (!ifpoll_deregister(ifp)) { 379 if_printf(ifp, "ifpoll_register: " 380 "ifpoll_deregister failed!\n"); 381 } 382 } 383 384 kfree(info, M_TEMP); 385 return error; 386 } 387 388 int 389 ifpoll_deregister(struct ifnet *ifp) 390 { 391 struct netmsg_base nmsg; 392 int error; 393 394 if (ifp->if_npoll == NULL) 395 return EOPNOTSUPP; 396 397 ifnet_serialize_all(ifp); 398 399 if ((ifp->if_flags & IFF_NPOLLING) == 0) { 400 ifnet_deserialize_all(ifp); 401 return EINVAL; 402 } 403 ifp->if_flags &= ~IFF_NPOLLING; 404 405 ifnet_deserialize_all(ifp); 406 407 netmsg_init(&nmsg, NULL, &curthread->td_msgport, 408 0, ifpoll_deregister_handler); 409 nmsg.lmsg.u.ms_resultp = ifp; 410 411 error = lwkt_domsg(netisr_portfn(0), &nmsg.lmsg, 0); 412 if (!error) { 413 ifnet_serialize_all(ifp); 414 ifp->if_npoll(ifp, NULL); 415 KASSERT(ifp->if_npoll_cpuid < 0, ("invalid npoll cpuid")); 416 ifnet_deserialize_all(ifp); 417 } 418 return error; 419 } 420 421 static void 422 ifpoll_register_handler(netmsg_t nmsg) 423 { 424 const struct ifpoll_info *info = nmsg->lmsg.u.ms_resultp; 425 int cpuid = mycpuid, nextcpu; 426 int error; 427 428 KKASSERT(cpuid < ncpus2); 429 KKASSERT(&curthread->td_msgport == netisr_portfn(cpuid)); 430 431 if (cpuid == 0) { 432 error = stpoll_register(info->ifpi_ifp, &info->ifpi_status); 433 if (error) 434 goto failed; 435 } 436 437 error = iopoll_register(info->ifpi_ifp, rxpoll_context[cpuid], 438 &info->ifpi_rx[cpuid]); 439 if (error) 440 goto failed; 441 442 error = iopoll_register(info->ifpi_ifp, txpoll_context[cpuid], 443 &info->ifpi_tx[cpuid]); 444 if (error) 445 goto failed; 446 447 /* Adjust polling frequency, after all registration is done */ 448 poll_comm_adjust_pollhz(poll_common[cpuid]); 449 450 nextcpu = cpuid + 1; 451 if (nextcpu < ncpus2) 452 lwkt_forwardmsg(netisr_portfn(nextcpu), &nmsg->lmsg); 453 else 454 lwkt_replymsg(&nmsg->lmsg, 0); 455 return; 456 failed: 457 lwkt_replymsg(&nmsg->lmsg, error); 458 } 459 460 static void 461 ifpoll_deregister_handler(netmsg_t nmsg) 462 { 463 struct ifnet *ifp = nmsg->lmsg.u.ms_resultp; 464 int cpuid = mycpuid, nextcpu; 465 466 KKASSERT(cpuid < ncpus2); 467 KKASSERT(&curthread->td_msgport == netisr_portfn(cpuid)); 468 469 /* Ignore errors */ 470 if (cpuid == 0) 471 stpoll_deregister(ifp); 472 iopoll_deregister(ifp, rxpoll_context[cpuid]); 473 iopoll_deregister(ifp, txpoll_context[cpuid]); 474 475 /* Adjust polling frequency, after all deregistration is done */ 476 poll_comm_adjust_pollhz(poll_common[cpuid]); 477 478 nextcpu = cpuid + 1; 479 if (nextcpu < ncpus2) 480 lwkt_forwardmsg(netisr_portfn(nextcpu), &nmsg->lmsg); 481 else 482 lwkt_replymsg(&nmsg->lmsg, 0); 483 } 484 485 static void 486 stpoll_init(void) 487 { 488 struct stpoll_ctx *st_ctx = &stpoll_context; 489 const struct poll_comm *comm = poll_common[0]; 490 491 st_ctx->pollhz = comm->pollhz / (comm->poll_stfrac + 1); 492 493 sysctl_ctx_init(&st_ctx->poll_sysctl_ctx); 494 st_ctx->poll_sysctl_tree = SYSCTL_ADD_NODE(&st_ctx->poll_sysctl_ctx, 495 SYSCTL_CHILDREN(comm->sysctl_tree), 496 OID_AUTO, "status", CTLFLAG_RD, 0, ""); 497 498 SYSCTL_ADD_UINT(&st_ctx->poll_sysctl_ctx, 499 SYSCTL_CHILDREN(st_ctx->poll_sysctl_tree), 500 OID_AUTO, "handlers", CTLFLAG_RD, 501 &st_ctx->poll_handlers, 0, 502 "Number of registered status poll handlers"); 503 504 netmsg_init(&st_ctx->poll_netmsg, NULL, &netisr_adone_rport, 505 0, stpoll_handler); 506 } 507 508 /* 509 * stpoll_handler is scheduled by sched_stpoll when appropriate, typically 510 * once per polling systimer tick. 511 */ 512 static void 513 stpoll_handler(netmsg_t msg) 514 { 515 struct stpoll_ctx *st_ctx = &stpoll_context; 516 struct thread *td = curthread; 517 int i; 518 519 KKASSERT(&td->td_msgport == netisr_portfn(0)); 520 521 crit_enter_quick(td); 522 523 /* Reply ASAP */ 524 lwkt_replymsg(&msg->lmsg, 0); 525 526 if (st_ctx->poll_handlers == 0) { 527 crit_exit_quick(td); 528 return; 529 } 530 531 for (i = 0; i < st_ctx->poll_handlers; ++i) { 532 const struct stpoll_rec *rec = &st_ctx->pr[i]; 533 struct ifnet *ifp = rec->ifp; 534 535 if (!lwkt_serialize_try(rec->serializer)) 536 continue; 537 538 if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) == 539 (IFF_RUNNING | IFF_NPOLLING)) 540 rec->status_func(ifp, st_ctx->pollhz); 541 542 lwkt_serialize_exit(rec->serializer); 543 } 544 545 crit_exit_quick(td); 546 } 547 548 /* 549 * Hook from status poll systimer. Tries to schedule an status poll. 550 * NOTE: Caller should hold critical section. 551 */ 552 static void 553 stpoll_clock(struct stpoll_ctx *st_ctx) 554 { 555 KKASSERT(mycpuid == 0); 556 557 if (st_ctx->poll_handlers == 0) 558 return; 559 sched_stpoll(st_ctx); 560 } 561 562 static int 563 stpoll_register(struct ifnet *ifp, const struct ifpoll_status *st_rec) 564 { 565 struct stpoll_ctx *st_ctx = &stpoll_context; 566 int error; 567 568 KKASSERT(&curthread->td_msgport == netisr_portfn(0)); 569 570 if (st_rec->status_func == NULL) 571 return 0; 572 573 /* 574 * Check if there is room. 575 */ 576 if (st_ctx->poll_handlers >= IFPOLL_LIST_LEN) { 577 /* 578 * List full, cannot register more entries. 579 * This should never happen; if it does, it is probably a 580 * broken driver trying to register multiple times. Checking 581 * this at runtime is expensive, and won't solve the problem 582 * anyways, so just report a few times and then give up. 583 */ 584 static int verbose = 10; /* XXX */ 585 586 if (verbose > 0) { 587 kprintf("status poll handlers list full, " 588 "maybe a broken driver ?\n"); 589 verbose--; 590 } 591 error = ENOENT; 592 } else { 593 struct stpoll_rec *rec = &st_ctx->pr[st_ctx->poll_handlers]; 594 595 rec->ifp = ifp; 596 rec->serializer = st_rec->serializer; 597 rec->status_func = st_rec->status_func; 598 599 st_ctx->poll_handlers++; 600 error = 0; 601 } 602 return error; 603 } 604 605 static int 606 stpoll_deregister(struct ifnet *ifp) 607 { 608 struct stpoll_ctx *st_ctx = &stpoll_context; 609 int i, error; 610 611 KKASSERT(&curthread->td_msgport == netisr_portfn(0)); 612 613 for (i = 0; i < st_ctx->poll_handlers; ++i) { 614 if (st_ctx->pr[i].ifp == ifp) /* Found it */ 615 break; 616 } 617 if (i == st_ctx->poll_handlers) { 618 kprintf("stpoll_deregister: ifp not found!!!\n"); 619 error = ENOENT; 620 } else { 621 st_ctx->poll_handlers--; 622 if (i < st_ctx->poll_handlers) { 623 /* Last entry replaces this one. */ 624 st_ctx->pr[i] = st_ctx->pr[st_ctx->poll_handlers]; 625 } 626 error = 0; 627 } 628 return error; 629 } 630 631 static __inline void 632 iopoll_reset_state(struct iopoll_ctx *io_ctx) 633 { 634 crit_enter(); 635 io_ctx->poll_burst = 5; 636 io_ctx->pending_polls = 0; 637 io_ctx->residual_burst = 0; 638 io_ctx->phase = 0; 639 io_ctx->kern_frac = 0; 640 bzero(&io_ctx->poll_start_t, sizeof(io_ctx->poll_start_t)); 641 bzero(&io_ctx->prev_t, sizeof(io_ctx->prev_t)); 642 crit_exit(); 643 } 644 645 static void 646 iopoll_init(int cpuid) 647 { 648 KKASSERT(cpuid < ncpus2); 649 650 rxpoll_context[cpuid] = iopoll_ctx_create(cpuid, IFPOLL_RX); 651 txpoll_context[cpuid] = iopoll_ctx_create(cpuid, IFPOLL_TX); 652 } 653 654 static struct iopoll_ctx * 655 iopoll_ctx_create(int cpuid, int poll_type) 656 { 657 struct poll_comm *comm; 658 struct iopoll_ctx *io_ctx; 659 const char *poll_type_str; 660 netisr_fn_t handler, more_handler; 661 662 KKASSERT(poll_type == IFPOLL_RX || poll_type == IFPOLL_TX); 663 664 /* 665 * Make sure that tunables are in sane state 666 */ 667 if (iopoll_burst_max < MIN_IOPOLL_BURST_MAX) 668 iopoll_burst_max = MIN_IOPOLL_BURST_MAX; 669 else if (iopoll_burst_max > MAX_IOPOLL_BURST_MAX) 670 iopoll_burst_max = MAX_IOPOLL_BURST_MAX; 671 672 if (iopoll_each_burst > iopoll_burst_max) 673 iopoll_each_burst = iopoll_burst_max; 674 675 comm = poll_common[cpuid]; 676 677 /* 678 * Create the per-cpu polling context 679 */ 680 io_ctx = kmalloc_cachealign(sizeof(*io_ctx), M_DEVBUF, 681 M_WAITOK | M_ZERO); 682 683 io_ctx->poll_each_burst = iopoll_each_burst; 684 io_ctx->poll_burst_max = iopoll_burst_max; 685 io_ctx->user_frac = 50; 686 if (poll_type == IFPOLL_RX) 687 io_ctx->pollhz = comm->pollhz; 688 else 689 io_ctx->pollhz = comm->pollhz / (comm->poll_txfrac + 1); 690 io_ctx->poll_cpuid = cpuid; 691 iopoll_reset_state(io_ctx); 692 693 if (poll_type == IFPOLL_RX) { 694 handler = rxpoll_handler; 695 more_handler = rxpollmore_handler; 696 } else { 697 handler = txpoll_handler; 698 more_handler = txpollmore_handler; 699 } 700 701 netmsg_init(&io_ctx->poll_netmsg, NULL, &netisr_adone_rport, 702 0, handler); 703 io_ctx->poll_netmsg.lmsg.u.ms_resultp = io_ctx; 704 705 netmsg_init(&io_ctx->poll_more_netmsg, NULL, &netisr_adone_rport, 706 0, more_handler); 707 io_ctx->poll_more_netmsg.lmsg.u.ms_resultp = io_ctx; 708 709 /* 710 * Initialize per-cpu sysctl nodes 711 */ 712 if (poll_type == IFPOLL_RX) 713 poll_type_str = "rx"; 714 else 715 poll_type_str = "tx"; 716 717 sysctl_ctx_init(&io_ctx->poll_sysctl_ctx); 718 io_ctx->poll_sysctl_tree = SYSCTL_ADD_NODE(&io_ctx->poll_sysctl_ctx, 719 SYSCTL_CHILDREN(comm->sysctl_tree), 720 OID_AUTO, poll_type_str, CTLFLAG_RD, 0, ""); 721 iopoll_add_sysctl(&io_ctx->poll_sysctl_ctx, 722 SYSCTL_CHILDREN(io_ctx->poll_sysctl_tree), io_ctx, poll_type); 723 724 return io_ctx; 725 } 726 727 /* 728 * Hook from iopoll systimer. Tries to schedule an iopoll, but keeps 729 * track of lost ticks due to the previous handler taking too long. 730 * Normally, this should not happen, because polling handler should 731 * run for a short time. However, in some cases (e.g. when there are 732 * changes in link status etc.) the drivers take a very long time 733 * (even in the order of milliseconds) to reset and reconfigure the 734 * device, causing apparent lost polls. 735 * 736 * The first part of the code is just for debugging purposes, and tries 737 * to count how often hardclock ticks are shorter than they should, 738 * meaning either stray interrupts or delayed events. 739 * 740 * WARNING! called from fastint or IPI, the MP lock might not be held. 741 * NOTE: Caller should hold critical section. 742 */ 743 static void 744 iopoll_clock(struct iopoll_ctx *io_ctx) 745 { 746 union ifpoll_time t; 747 int delta; 748 749 KKASSERT(mycpuid == io_ctx->poll_cpuid); 750 751 if (io_ctx->poll_handlers == 0) 752 return; 753 754 ifpoll_time_get(&t); 755 delta = ifpoll_time_diff(&io_ctx->prev_t, &t); 756 if (delta * io_ctx->pollhz < 500000) 757 io_ctx->short_ticks++; 758 else 759 io_ctx->prev_t = t; 760 761 if (io_ctx->pending_polls > 100) { 762 /* 763 * Too much, assume it has stalled (not always true 764 * see comment above). 765 */ 766 io_ctx->stalled++; 767 io_ctx->pending_polls = 0; 768 io_ctx->phase = 0; 769 } 770 771 if (io_ctx->phase <= 2) { 772 if (io_ctx->phase != 0) 773 io_ctx->suspect++; 774 io_ctx->phase = 1; 775 sched_iopoll(io_ctx); 776 io_ctx->phase = 2; 777 } 778 if (io_ctx->pending_polls++ > 0) 779 io_ctx->lost_polls++; 780 } 781 782 /* 783 * rxpoll_handler and txpoll_handler are scheduled by sched_iopoll when 784 * appropriate, typically once per polling systimer tick. 785 * 786 * Note that the message is replied immediately in order to allow a new 787 * ISR to be scheduled in the handler. 788 */ 789 static void 790 rxpoll_handler(netmsg_t msg) 791 { 792 struct iopoll_ctx *io_ctx; 793 struct thread *td = curthread; 794 int i, cycles; 795 796 io_ctx = msg->lmsg.u.ms_resultp; 797 KKASSERT(&td->td_msgport == netisr_portfn(io_ctx->poll_cpuid)); 798 799 crit_enter_quick(td); 800 801 /* Reply ASAP */ 802 lwkt_replymsg(&msg->lmsg, 0); 803 804 if (io_ctx->poll_handlers == 0) { 805 crit_exit_quick(td); 806 return; 807 } 808 809 io_ctx->phase = 3; 810 if (io_ctx->residual_burst == 0) { 811 /* First call in this tick */ 812 ifpoll_time_get(&io_ctx->poll_start_t); 813 io_ctx->residual_burst = io_ctx->poll_burst; 814 } 815 cycles = (io_ctx->residual_burst < io_ctx->poll_each_burst) ? 816 io_ctx->residual_burst : io_ctx->poll_each_burst; 817 io_ctx->residual_burst -= cycles; 818 819 for (i = 0; i < io_ctx->poll_handlers; i++) { 820 const struct iopoll_rec *rec = &io_ctx->pr[i]; 821 struct ifnet *ifp = rec->ifp; 822 823 if (!lwkt_serialize_try(rec->serializer)) 824 continue; 825 826 if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) == 827 (IFF_RUNNING | IFF_NPOLLING)) 828 rec->poll_func(ifp, rec->arg, cycles); 829 830 lwkt_serialize_exit(rec->serializer); 831 } 832 833 /* 834 * Do a quick exit/enter to catch any higher-priority 835 * interrupt sources. 836 */ 837 crit_exit_quick(td); 838 crit_enter_quick(td); 839 840 sched_iopollmore(io_ctx); 841 io_ctx->phase = 4; 842 843 crit_exit_quick(td); 844 } 845 846 static void 847 txpoll_handler(netmsg_t msg) 848 { 849 struct iopoll_ctx *io_ctx; 850 struct thread *td = curthread; 851 int i; 852 853 io_ctx = msg->lmsg.u.ms_resultp; 854 KKASSERT(&td->td_msgport == netisr_portfn(io_ctx->poll_cpuid)); 855 856 crit_enter_quick(td); 857 858 /* Reply ASAP */ 859 lwkt_replymsg(&msg->lmsg, 0); 860 861 if (io_ctx->poll_handlers == 0) { 862 crit_exit_quick(td); 863 return; 864 } 865 866 io_ctx->phase = 3; 867 868 for (i = 0; i < io_ctx->poll_handlers; i++) { 869 const struct iopoll_rec *rec = &io_ctx->pr[i]; 870 struct ifnet *ifp = rec->ifp; 871 872 if (!lwkt_serialize_try(rec->serializer)) 873 continue; 874 875 if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) == 876 (IFF_RUNNING | IFF_NPOLLING)) 877 rec->poll_func(ifp, rec->arg, -1); 878 879 lwkt_serialize_exit(rec->serializer); 880 } 881 882 /* 883 * Do a quick exit/enter to catch any higher-priority 884 * interrupt sources. 885 */ 886 crit_exit_quick(td); 887 crit_enter_quick(td); 888 889 sched_iopollmore(io_ctx); 890 io_ctx->phase = 4; 891 892 crit_exit_quick(td); 893 } 894 895 /* 896 * rxpollmore_handler and txpollmore_handler are called after other netisr's, 897 * possibly scheduling another rxpoll_handler or txpoll_handler call, or 898 * adapting the burst size for the next cycle. 899 * 900 * It is very bad to fetch large bursts of packets from a single card at once, 901 * because the burst could take a long time to be completely processed leading 902 * to unfairness. To reduce the problem, and also to account better for time 903 * spent in network-related processing, we split the burst in smaller chunks 904 * of fixed size, giving control to the other netisr's between chunks. This 905 * helps in improving the fairness, reducing livelock and accounting for the 906 * work performed in low level handling. 907 */ 908 static void 909 rxpollmore_handler(netmsg_t msg) 910 { 911 struct thread *td = curthread; 912 struct iopoll_ctx *io_ctx; 913 union ifpoll_time t; 914 int kern_load; 915 uint32_t pending_polls; 916 917 io_ctx = msg->lmsg.u.ms_resultp; 918 KKASSERT(&td->td_msgport == netisr_portfn(io_ctx->poll_cpuid)); 919 920 crit_enter_quick(td); 921 922 /* Replay ASAP */ 923 lwkt_replymsg(&msg->lmsg, 0); 924 925 if (io_ctx->poll_handlers == 0) { 926 crit_exit_quick(td); 927 return; 928 } 929 930 io_ctx->phase = 5; 931 if (io_ctx->residual_burst > 0) { 932 sched_iopoll(io_ctx); 933 crit_exit_quick(td); 934 /* Will run immediately on return, followed by netisrs */ 935 return; 936 } 937 938 /* Here we can account time spent in iopoll's in this tick */ 939 ifpoll_time_get(&t); 940 kern_load = ifpoll_time_diff(&io_ctx->poll_start_t, &t); 941 kern_load = (kern_load * io_ctx->pollhz) / 10000; /* 0..100 */ 942 io_ctx->kern_frac = kern_load; 943 944 if (kern_load > (100 - io_ctx->user_frac)) { 945 /* Try decrease ticks */ 946 if (io_ctx->poll_burst > 1) 947 io_ctx->poll_burst--; 948 } else { 949 if (io_ctx->poll_burst < io_ctx->poll_burst_max) 950 io_ctx->poll_burst++; 951 } 952 953 io_ctx->pending_polls--; 954 pending_polls = io_ctx->pending_polls; 955 956 if (pending_polls == 0) { 957 /* We are done */ 958 io_ctx->phase = 0; 959 } else { 960 /* 961 * Last cycle was long and caused us to miss one or more 962 * hardclock ticks. Restart processing again, but slightly 963 * reduce the burst size to prevent that this happens again. 964 */ 965 io_ctx->poll_burst -= (io_ctx->poll_burst / 8); 966 if (io_ctx->poll_burst < 1) 967 io_ctx->poll_burst = 1; 968 sched_iopoll(io_ctx); 969 io_ctx->phase = 6; 970 } 971 972 crit_exit_quick(td); 973 } 974 975 static void 976 txpollmore_handler(netmsg_t msg) 977 { 978 struct thread *td = curthread; 979 struct iopoll_ctx *io_ctx; 980 uint32_t pending_polls; 981 982 io_ctx = msg->lmsg.u.ms_resultp; 983 KKASSERT(&td->td_msgport == netisr_portfn(io_ctx->poll_cpuid)); 984 985 crit_enter_quick(td); 986 987 /* Replay ASAP */ 988 lwkt_replymsg(&msg->lmsg, 0); 989 990 if (io_ctx->poll_handlers == 0) { 991 crit_exit_quick(td); 992 return; 993 } 994 995 io_ctx->phase = 5; 996 997 io_ctx->pending_polls--; 998 pending_polls = io_ctx->pending_polls; 999 1000 if (pending_polls == 0) { 1001 /* We are done */ 1002 io_ctx->phase = 0; 1003 } else { 1004 /* 1005 * Last cycle was long and caused us to miss one or more 1006 * hardclock ticks. Restart processing again. 1007 */ 1008 sched_iopoll(io_ctx); 1009 io_ctx->phase = 6; 1010 } 1011 1012 crit_exit_quick(td); 1013 } 1014 1015 static void 1016 iopoll_add_sysctl(struct sysctl_ctx_list *ctx, struct sysctl_oid_list *parent, 1017 struct iopoll_ctx *io_ctx, int poll_type) 1018 { 1019 if (poll_type == IFPOLL_RX) { 1020 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "burst_max", 1021 CTLTYPE_UINT | CTLFLAG_RW, io_ctx, 0, sysctl_burstmax, 1022 "IU", "Max Polling burst size"); 1023 1024 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "each_burst", 1025 CTLTYPE_UINT | CTLFLAG_RW, io_ctx, 0, sysctl_eachburst, 1026 "IU", "Max size of each burst"); 1027 1028 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "burst", CTLFLAG_RD, 1029 &io_ctx->poll_burst, 0, "Current polling burst size"); 1030 1031 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "user_frac", CTLFLAG_RW, 1032 &io_ctx->user_frac, 0, "Desired user fraction of cpu time"); 1033 1034 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "kern_frac", CTLFLAG_RD, 1035 &io_ctx->kern_frac, 0, "Kernel fraction of cpu time"); 1036 1037 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "residual_burst", CTLFLAG_RD, 1038 &io_ctx->residual_burst, 0, 1039 "# of residual cycles in burst"); 1040 } 1041 1042 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "phase", CTLFLAG_RD, 1043 &io_ctx->phase, 0, "Polling phase"); 1044 1045 SYSCTL_ADD_ULONG(ctx, parent, OID_AUTO, "suspect", CTLFLAG_RW, 1046 &io_ctx->suspect, "Suspected events"); 1047 1048 SYSCTL_ADD_ULONG(ctx, parent, OID_AUTO, "stalled", CTLFLAG_RW, 1049 &io_ctx->stalled, "Potential stalls"); 1050 1051 SYSCTL_ADD_ULONG(ctx, parent, OID_AUTO, "short_ticks", CTLFLAG_RW, 1052 &io_ctx->short_ticks, 1053 "Hardclock ticks shorter than they should be"); 1054 1055 SYSCTL_ADD_ULONG(ctx, parent, OID_AUTO, "lost_polls", CTLFLAG_RW, 1056 &io_ctx->lost_polls, 1057 "How many times we would have lost a poll tick"); 1058 1059 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "pending_polls", CTLFLAG_RD, 1060 &io_ctx->pending_polls, 0, "Do we need to poll again"); 1061 1062 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "handlers", CTLFLAG_RD, 1063 &io_ctx->poll_handlers, 0, "Number of registered poll handlers"); 1064 } 1065 1066 static void 1067 sysctl_burstmax_handler(netmsg_t nmsg) 1068 { 1069 struct iopoll_sysctl_netmsg *msg = (struct iopoll_sysctl_netmsg *)nmsg; 1070 struct iopoll_ctx *io_ctx; 1071 1072 io_ctx = msg->ctx; 1073 KKASSERT(&curthread->td_msgport == netisr_portfn(io_ctx->poll_cpuid)); 1074 1075 io_ctx->poll_burst_max = nmsg->lmsg.u.ms_result; 1076 if (io_ctx->poll_each_burst > io_ctx->poll_burst_max) 1077 io_ctx->poll_each_burst = io_ctx->poll_burst_max; 1078 if (io_ctx->poll_burst > io_ctx->poll_burst_max) 1079 io_ctx->poll_burst = io_ctx->poll_burst_max; 1080 if (io_ctx->residual_burst > io_ctx->poll_burst_max) 1081 io_ctx->residual_burst = io_ctx->poll_burst_max; 1082 1083 lwkt_replymsg(&nmsg->lmsg, 0); 1084 } 1085 1086 static int 1087 sysctl_burstmax(SYSCTL_HANDLER_ARGS) 1088 { 1089 struct iopoll_ctx *io_ctx = arg1; 1090 struct iopoll_sysctl_netmsg msg; 1091 uint32_t burst_max; 1092 int error; 1093 1094 burst_max = io_ctx->poll_burst_max; 1095 error = sysctl_handle_int(oidp, &burst_max, 0, req); 1096 if (error || req->newptr == NULL) 1097 return error; 1098 if (burst_max < MIN_IOPOLL_BURST_MAX) 1099 burst_max = MIN_IOPOLL_BURST_MAX; 1100 else if (burst_max > MAX_IOPOLL_BURST_MAX) 1101 burst_max = MAX_IOPOLL_BURST_MAX; 1102 1103 netmsg_init(&msg.base, NULL, &curthread->td_msgport, 1104 0, sysctl_burstmax_handler); 1105 msg.base.lmsg.u.ms_result = burst_max; 1106 msg.ctx = io_ctx; 1107 1108 return lwkt_domsg(netisr_portfn(io_ctx->poll_cpuid), &msg.base.lmsg, 0); 1109 } 1110 1111 static void 1112 sysctl_eachburst_handler(netmsg_t nmsg) 1113 { 1114 struct iopoll_sysctl_netmsg *msg = (struct iopoll_sysctl_netmsg *)nmsg; 1115 struct iopoll_ctx *io_ctx; 1116 uint32_t each_burst; 1117 1118 io_ctx = msg->ctx; 1119 KKASSERT(&curthread->td_msgport == netisr_portfn(io_ctx->poll_cpuid)); 1120 1121 each_burst = nmsg->lmsg.u.ms_result; 1122 if (each_burst > io_ctx->poll_burst_max) 1123 each_burst = io_ctx->poll_burst_max; 1124 else if (each_burst < 1) 1125 each_burst = 1; 1126 io_ctx->poll_each_burst = each_burst; 1127 1128 lwkt_replymsg(&nmsg->lmsg, 0); 1129 } 1130 1131 static int 1132 sysctl_eachburst(SYSCTL_HANDLER_ARGS) 1133 { 1134 struct iopoll_ctx *io_ctx = arg1; 1135 struct iopoll_sysctl_netmsg msg; 1136 uint32_t each_burst; 1137 int error; 1138 1139 each_burst = io_ctx->poll_each_burst; 1140 error = sysctl_handle_int(oidp, &each_burst, 0, req); 1141 if (error || req->newptr == NULL) 1142 return error; 1143 1144 netmsg_init(&msg.base, NULL, &curthread->td_msgport, 1145 0, sysctl_eachburst_handler); 1146 msg.base.lmsg.u.ms_result = each_burst; 1147 msg.ctx = io_ctx; 1148 1149 return lwkt_domsg(netisr_portfn(io_ctx->poll_cpuid), &msg.base.lmsg, 0); 1150 } 1151 1152 static int 1153 iopoll_register(struct ifnet *ifp, struct iopoll_ctx *io_ctx, 1154 const struct ifpoll_io *io_rec) 1155 { 1156 int error; 1157 1158 KKASSERT(&curthread->td_msgport == netisr_portfn(io_ctx->poll_cpuid)); 1159 1160 if (io_rec->poll_func == NULL) 1161 return 0; 1162 1163 /* 1164 * Check if there is room. 1165 */ 1166 if (io_ctx->poll_handlers >= IFPOLL_LIST_LEN) { 1167 /* 1168 * List full, cannot register more entries. 1169 * This should never happen; if it does, it is probably a 1170 * broken driver trying to register multiple times. Checking 1171 * this at runtime is expensive, and won't solve the problem 1172 * anyways, so just report a few times and then give up. 1173 */ 1174 static int verbose = 10; /* XXX */ 1175 if (verbose > 0) { 1176 kprintf("io poll handlers list full, " 1177 "maybe a broken driver ?\n"); 1178 verbose--; 1179 } 1180 error = ENOENT; 1181 } else { 1182 struct iopoll_rec *rec = &io_ctx->pr[io_ctx->poll_handlers]; 1183 1184 rec->ifp = ifp; 1185 rec->serializer = io_rec->serializer; 1186 rec->arg = io_rec->arg; 1187 rec->poll_func = io_rec->poll_func; 1188 1189 io_ctx->poll_handlers++; 1190 error = 0; 1191 } 1192 return error; 1193 } 1194 1195 static int 1196 iopoll_deregister(struct ifnet *ifp, struct iopoll_ctx *io_ctx) 1197 { 1198 int i, error; 1199 1200 KKASSERT(&curthread->td_msgport == netisr_portfn(io_ctx->poll_cpuid)); 1201 1202 for (i = 0; i < io_ctx->poll_handlers; ++i) { 1203 if (io_ctx->pr[i].ifp == ifp) /* Found it */ 1204 break; 1205 } 1206 if (i == io_ctx->poll_handlers) { 1207 error = ENOENT; 1208 } else { 1209 io_ctx->poll_handlers--; 1210 if (i < io_ctx->poll_handlers) { 1211 /* Last entry replaces this one. */ 1212 io_ctx->pr[i] = io_ctx->pr[io_ctx->poll_handlers]; 1213 } 1214 1215 if (io_ctx->poll_handlers == 0) 1216 iopoll_reset_state(io_ctx); 1217 error = 0; 1218 } 1219 return error; 1220 } 1221 1222 static void 1223 poll_comm_init(int cpuid) 1224 { 1225 struct poll_comm *comm; 1226 char cpuid_str[16]; 1227 1228 comm = kmalloc_cachealign(sizeof(*comm), M_DEVBUF, M_WAITOK | M_ZERO); 1229 1230 if (ifpoll_stfrac < 0) 1231 ifpoll_stfrac = IFPOLL_STFRAC_DEFAULT; 1232 if (ifpoll_txfrac < 0) 1233 ifpoll_txfrac = IFPOLL_TXFRAC_DEFAULT; 1234 1235 comm->pollhz = ifpoll_pollhz; 1236 comm->poll_cpuid = cpuid; 1237 comm->poll_stfrac = ifpoll_stfrac; 1238 comm->poll_txfrac = ifpoll_txfrac; 1239 1240 ksnprintf(cpuid_str, sizeof(cpuid_str), "%d", cpuid); 1241 1242 sysctl_ctx_init(&comm->sysctl_ctx); 1243 comm->sysctl_tree = SYSCTL_ADD_NODE(&comm->sysctl_ctx, 1244 SYSCTL_STATIC_CHILDREN(_net_ifpoll), 1245 OID_AUTO, cpuid_str, CTLFLAG_RD, 0, ""); 1246 1247 SYSCTL_ADD_PROC(&comm->sysctl_ctx, SYSCTL_CHILDREN(comm->sysctl_tree), 1248 OID_AUTO, "pollhz", CTLTYPE_INT | CTLFLAG_RW, 1249 comm, 0, sysctl_pollhz, 1250 "I", "Device polling frequency"); 1251 1252 if (cpuid == 0) { 1253 SYSCTL_ADD_PROC(&comm->sysctl_ctx, 1254 SYSCTL_CHILDREN(comm->sysctl_tree), 1255 OID_AUTO, "status_frac", 1256 CTLTYPE_INT | CTLFLAG_RW, 1257 comm, 0, sysctl_stfrac, 1258 "I", "# of cycles before status is polled"); 1259 } 1260 SYSCTL_ADD_PROC(&comm->sysctl_ctx, SYSCTL_CHILDREN(comm->sysctl_tree), 1261 OID_AUTO, "tx_frac", CTLTYPE_INT | CTLFLAG_RW, 1262 comm, 0, sysctl_txfrac, 1263 "I", "# of cycles before TX is polled"); 1264 1265 poll_common[cpuid] = comm; 1266 } 1267 1268 static void 1269 poll_comm_start(int cpuid) 1270 { 1271 struct poll_comm *comm = poll_common[cpuid]; 1272 systimer_func_t func; 1273 1274 /* 1275 * Initialize systimer 1276 */ 1277 if (cpuid == 0) 1278 func = poll_comm_systimer0; 1279 else 1280 func = poll_comm_systimer; 1281 systimer_init_periodic_nq(&comm->pollclock, func, comm, 1); 1282 } 1283 1284 static void 1285 _poll_comm_systimer(struct poll_comm *comm) 1286 { 1287 if (comm->txfrac_count-- == 0) { 1288 comm->txfrac_count = comm->poll_txfrac; 1289 iopoll_clock(txpoll_context[comm->poll_cpuid]); 1290 } 1291 iopoll_clock(rxpoll_context[comm->poll_cpuid]); 1292 } 1293 1294 static void 1295 poll_comm_systimer0(systimer_t info, int in_ipi __unused, 1296 struct intrframe *frame __unused) 1297 { 1298 struct poll_comm *comm = info->data; 1299 globaldata_t gd = mycpu; 1300 1301 KKASSERT(comm->poll_cpuid == gd->gd_cpuid && gd->gd_cpuid == 0); 1302 1303 crit_enter_gd(gd); 1304 1305 if (comm->stfrac_count-- == 0) { 1306 comm->stfrac_count = comm->poll_stfrac; 1307 stpoll_clock(&stpoll_context); 1308 } 1309 _poll_comm_systimer(comm); 1310 1311 crit_exit_gd(gd); 1312 } 1313 1314 static void 1315 poll_comm_systimer(systimer_t info, int in_ipi __unused, 1316 struct intrframe *frame __unused) 1317 { 1318 struct poll_comm *comm = info->data; 1319 globaldata_t gd = mycpu; 1320 1321 KKASSERT(comm->poll_cpuid == gd->gd_cpuid && gd->gd_cpuid != 0); 1322 1323 crit_enter_gd(gd); 1324 _poll_comm_systimer(comm); 1325 crit_exit_gd(gd); 1326 } 1327 1328 static void 1329 poll_comm_adjust_pollhz(struct poll_comm *comm) 1330 { 1331 uint32_t handlers; 1332 int pollhz = 1; 1333 1334 KKASSERT(&curthread->td_msgport == netisr_portfn(comm->poll_cpuid)); 1335 1336 /* 1337 * If there is no polling handler registered, set systimer 1338 * frequency to the lowest value. Polling systimer frequency 1339 * will be adjusted to the requested value, once there are 1340 * registered handlers. 1341 */ 1342 handlers = rxpoll_context[mycpuid]->poll_handlers + 1343 txpoll_context[mycpuid]->poll_handlers; 1344 if (comm->poll_cpuid == 0) 1345 handlers += stpoll_context.poll_handlers; 1346 if (handlers) 1347 pollhz = comm->pollhz; 1348 systimer_adjust_periodic(&comm->pollclock, pollhz); 1349 } 1350 1351 static int 1352 sysctl_pollhz(SYSCTL_HANDLER_ARGS) 1353 { 1354 struct poll_comm *comm = arg1; 1355 struct netmsg_base nmsg; 1356 int error, phz; 1357 1358 phz = comm->pollhz; 1359 error = sysctl_handle_int(oidp, &phz, 0, req); 1360 if (error || req->newptr == NULL) 1361 return error; 1362 if (phz <= 0) 1363 return EINVAL; 1364 else if (phz > IFPOLL_FREQ_MAX) 1365 phz = IFPOLL_FREQ_MAX; 1366 1367 netmsg_init(&nmsg, NULL, &curthread->td_msgport, 1368 0, sysctl_pollhz_handler); 1369 nmsg.lmsg.u.ms_result = phz; 1370 1371 return lwkt_domsg(netisr_portfn(comm->poll_cpuid), &nmsg.lmsg, 0); 1372 } 1373 1374 static void 1375 sysctl_pollhz_handler(netmsg_t nmsg) 1376 { 1377 struct poll_comm *comm = poll_common[mycpuid]; 1378 1379 KKASSERT(&curthread->td_msgport == netisr_portfn(comm->poll_cpuid)); 1380 1381 /* Save polling frequency */ 1382 comm->pollhz = nmsg->lmsg.u.ms_result; 1383 1384 /* 1385 * Adjust cached pollhz 1386 */ 1387 rxpoll_context[mycpuid]->pollhz = comm->pollhz; 1388 txpoll_context[mycpuid]->pollhz = 1389 comm->pollhz / (comm->poll_txfrac + 1); 1390 if (mycpuid == 0) 1391 stpoll_context.pollhz = comm->pollhz / (comm->poll_stfrac + 1); 1392 1393 /* 1394 * Adjust polling frequency 1395 */ 1396 poll_comm_adjust_pollhz(comm); 1397 1398 lwkt_replymsg(&nmsg->lmsg, 0); 1399 } 1400 1401 static int 1402 sysctl_stfrac(SYSCTL_HANDLER_ARGS) 1403 { 1404 struct poll_comm *comm = arg1; 1405 struct netmsg_base nmsg; 1406 int error, stfrac; 1407 1408 KKASSERT(comm->poll_cpuid == 0); 1409 1410 stfrac = comm->poll_stfrac; 1411 error = sysctl_handle_int(oidp, &stfrac, 0, req); 1412 if (error || req->newptr == NULL) 1413 return error; 1414 if (stfrac < 0) 1415 return EINVAL; 1416 1417 netmsg_init(&nmsg, NULL, &curthread->td_msgport, 1418 0, sysctl_stfrac_handler); 1419 nmsg.lmsg.u.ms_result = stfrac; 1420 1421 return lwkt_domsg(netisr_portfn(comm->poll_cpuid), &nmsg.lmsg, 0); 1422 } 1423 1424 static void 1425 sysctl_stfrac_handler(netmsg_t nmsg) 1426 { 1427 struct poll_comm *comm = poll_common[mycpuid]; 1428 int stfrac = nmsg->lmsg.u.ms_result; 1429 1430 KKASSERT(&curthread->td_msgport == netisr_portfn(comm->poll_cpuid)); 1431 1432 crit_enter(); 1433 comm->poll_stfrac = stfrac; 1434 if (comm->stfrac_count > comm->poll_stfrac) 1435 comm->stfrac_count = comm->poll_stfrac; 1436 crit_exit(); 1437 1438 lwkt_replymsg(&nmsg->lmsg, 0); 1439 } 1440 1441 static int 1442 sysctl_txfrac(SYSCTL_HANDLER_ARGS) 1443 { 1444 struct poll_comm *comm = arg1; 1445 struct netmsg_base nmsg; 1446 int error, txfrac; 1447 1448 txfrac = comm->poll_txfrac; 1449 error = sysctl_handle_int(oidp, &txfrac, 0, req); 1450 if (error || req->newptr == NULL) 1451 return error; 1452 if (txfrac < 0) 1453 return EINVAL; 1454 1455 netmsg_init(&nmsg, NULL, &curthread->td_msgport, 1456 0, sysctl_txfrac_handler); 1457 nmsg.lmsg.u.ms_result = txfrac; 1458 1459 return lwkt_domsg(netisr_portfn(comm->poll_cpuid), &nmsg.lmsg, 0); 1460 } 1461 1462 static void 1463 sysctl_txfrac_handler(netmsg_t nmsg) 1464 { 1465 struct poll_comm *comm = poll_common[mycpuid]; 1466 int txfrac = nmsg->lmsg.u.ms_result; 1467 1468 KKASSERT(&curthread->td_msgport == netisr_portfn(comm->poll_cpuid)); 1469 1470 crit_enter(); 1471 comm->poll_txfrac = txfrac; 1472 if (comm->txfrac_count > comm->poll_txfrac) 1473 comm->txfrac_count = comm->poll_txfrac; 1474 crit_exit(); 1475 1476 lwkt_replymsg(&nmsg->lmsg, 0); 1477 } 1478