1 /*- 2 * Copyright (c) 2001-2002 Luigi Rizzo 3 * 4 * Supported by: the Xorp Project (www.xorp.org) 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * $FreeBSD: src/sys/kern/kern_poll.c,v 1.2.2.4 2002/06/27 23:26:33 luigi Exp $ 28 */ 29 30 #include "opt_ifpoll.h" 31 32 #include <sys/param.h> 33 #include <sys/kernel.h> 34 #include <sys/ktr.h> 35 #include <sys/malloc.h> 36 #include <sys/serialize.h> 37 #include <sys/socket.h> 38 #include <sys/sysctl.h> 39 40 #include <sys/thread2.h> 41 #include <sys/msgport2.h> 42 43 #include <machine/atomic.h> 44 #include <machine/clock.h> 45 #include <machine/smp.h> 46 47 #include <net/if.h> 48 #include <net/if_poll.h> 49 #include <net/netmsg2.h> 50 51 /* 52 * Polling support for network device drivers. 53 * 54 * Drivers which support this feature try to register one status polling 55 * handler and several TX/RX polling handlers with the polling code. 56 * If interface's if_npoll is called with non-NULL second argument, then 57 * a register operation is requested, else a deregister operation is 58 * requested. If the requested operation is "register", driver should 59 * setup the ifpoll_info passed in accoding its own needs: 60 * ifpoll_info.ifpi_status.status_func == NULL 61 * No status polling handler will be installed on CPU(0) 62 * ifpoll_info.ifpi_rx[n].poll_func == NULL 63 * No RX polling handler will be installed on CPU(n) 64 * ifpoll_info.ifpi_tx[n].poll_func == NULL 65 * No TX polling handler will be installed on CPU(n) 66 * 67 * RX is polled at the specified polling frequency (net.ifpoll.X.pollhz). 68 * TX and status polling could be done at lower frequency than RX frequency 69 * (net.ifpoll.0.status_frac and net.ifpoll.X.tx_frac). To avoid systimer 70 * staggering at high frequency, RX systimer gives TX and status polling a 71 * piggyback (XXX). 72 * 73 * All of the registered polling handlers are called only if the interface 74 * is marked as 'IFF_RUNNING and IFF_NPOLLING'. However, the interface's 75 * register and deregister function (ifnet.if_npoll) will be called even 76 * if interface is not marked with 'IFF_RUNNING'. 77 * 78 * If registration is successful, the driver must disable interrupts, 79 * and further I/O is performed through the TX/RX polling handler, which 80 * are invoked (at least once per clock tick) with 3 arguments: the "arg" 81 * passed at register time, a struct ifnet pointer, and a "count" limit. 82 * The registered serializer will be held before calling the related 83 * polling handler. 84 * 85 * The count limit specifies how much work the handler can do during the 86 * call -- typically this is the number of packets to be received, or 87 * transmitted, etc. (drivers are free to interpret this number, as long 88 * as the max time spent in the function grows roughly linearly with the 89 * count). 90 * 91 * A second variable controls the sharing of CPU between polling/kernel 92 * network processing, and other activities (typically userlevel tasks): 93 * net.ifpoll.X.{rx,tx}.user_frac (between 0 and 100, default 50) sets the 94 * share of CPU allocated to user tasks. CPU is allocated proportionally 95 * to the shares, by dynamically adjusting the "count" (poll_burst). 96 * 97 * Other parameters can should be left to their default values. 98 * The following constraints hold 99 * 100 * 1 <= poll_burst <= poll_burst_max 101 * 1 <= poll_each_burst <= poll_burst_max 102 * MIN_POLL_BURST_MAX <= poll_burst_max <= MAX_POLL_BURST_MAX 103 */ 104 105 #define IFPOLL_LIST_LEN 128 106 #define IFPOLL_FREQ_MAX 30000 107 108 #define MIN_IOPOLL_BURST_MAX 10 109 #define MAX_IOPOLL_BURST_MAX 5000 110 #define IOPOLL_BURST_MAX 250 /* good for 1000Mbit net and HZ=6000 */ 111 112 #define IOPOLL_EACH_BURST 50 113 #define IOPOLL_USER_FRAC 50 114 115 #define IFPOLL_FREQ_DEFAULT 6000 116 117 #define IFPOLL_TXFRAC_DEFAULT 1 /* 1/1 of the pollhz */ 118 #define IFPOLL_STFRAC_DEFAULT 120 /* 1/120 of the pollhz */ 119 120 #define IFPOLL_RX 0x1 121 #define IFPOLL_TX 0x2 122 123 union ifpoll_time { 124 struct timeval tv; 125 uint64_t tsc; 126 }; 127 128 struct iopoll_rec { 129 struct lwkt_serialize *serializer; 130 struct ifnet *ifp; 131 void *arg; 132 ifpoll_iofn_t poll_func; 133 }; 134 135 struct iopoll_ctx { 136 union ifpoll_time prev_t; 137 u_long short_ticks; /* statistics */ 138 u_long lost_polls; /* statistics */ 139 u_long suspect; /* statistics */ 140 u_long stalled; /* statistics */ 141 uint32_t pending_polls; /* state */ 142 143 struct netmsg_base poll_netmsg; 144 struct netmsg_base poll_more_netmsg; 145 146 int poll_cpuid; 147 int pollhz; 148 uint32_t phase; /* state */ 149 int residual_burst; /* state */ 150 uint32_t poll_each_burst; /* tunable */ 151 union ifpoll_time poll_start_t; /* state */ 152 153 uint32_t poll_burst; /* state */ 154 uint32_t poll_burst_max; /* tunable */ 155 uint32_t user_frac; /* tunable */ 156 uint32_t kern_frac; /* state */ 157 158 uint32_t poll_handlers; /* next free entry in pr[]. */ 159 struct iopoll_rec pr[IFPOLL_LIST_LEN]; 160 161 struct sysctl_ctx_list poll_sysctl_ctx; 162 struct sysctl_oid *poll_sysctl_tree; 163 } __cachealign; 164 165 struct poll_comm { 166 struct systimer pollclock; 167 int poll_cpuid; 168 169 int stfrac_count; /* state */ 170 int poll_stfrac; /* tunable */ 171 172 int txfrac_count; /* state */ 173 int poll_txfrac; /* tunable */ 174 175 int pollhz; /* tunable */ 176 177 struct sysctl_ctx_list sysctl_ctx; 178 struct sysctl_oid *sysctl_tree; 179 } __cachealign; 180 181 struct stpoll_rec { 182 struct lwkt_serialize *serializer; 183 struct ifnet *ifp; 184 ifpoll_stfn_t status_func; 185 }; 186 187 struct stpoll_ctx { 188 struct netmsg_base poll_netmsg; 189 190 uint32_t poll_handlers; /* next free entry in pr[]. */ 191 struct stpoll_rec pr[IFPOLL_LIST_LEN]; 192 193 struct sysctl_ctx_list poll_sysctl_ctx; 194 struct sysctl_oid *poll_sysctl_tree; 195 } __cachealign; 196 197 struct iopoll_sysctl_netmsg { 198 struct netmsg_base base; 199 struct iopoll_ctx *ctx; 200 }; 201 202 void ifpoll_init_pcpu(int); 203 static void ifpoll_register_handler(netmsg_t); 204 static void ifpoll_deregister_handler(netmsg_t); 205 206 /* 207 * Status polling 208 */ 209 static void stpoll_init(void); 210 static void stpoll_handler(netmsg_t); 211 static void stpoll_clock(struct stpoll_ctx *); 212 static int stpoll_register(struct ifnet *, const struct ifpoll_status *); 213 static int stpoll_deregister(struct ifnet *); 214 215 /* 216 * RX/TX polling 217 */ 218 static struct iopoll_ctx *iopoll_ctx_create(int, int); 219 static void iopoll_init(int); 220 static void rxpoll_handler(netmsg_t); 221 static void txpoll_handler(netmsg_t); 222 static void rxpollmore_handler(netmsg_t); 223 static void txpollmore_handler(netmsg_t); 224 static void iopoll_clock(struct iopoll_ctx *); 225 static int iopoll_register(struct ifnet *, struct iopoll_ctx *, 226 const struct ifpoll_io *); 227 static int iopoll_deregister(struct ifnet *, struct iopoll_ctx *); 228 229 static void iopoll_add_sysctl(struct sysctl_ctx_list *, 230 struct sysctl_oid_list *, struct iopoll_ctx *, int); 231 static void sysctl_burstmax_handler(netmsg_t); 232 static int sysctl_burstmax(SYSCTL_HANDLER_ARGS); 233 static void sysctl_eachburst_handler(netmsg_t); 234 static int sysctl_eachburst(SYSCTL_HANDLER_ARGS); 235 236 /* 237 * Common functions 238 */ 239 static void poll_comm_init(int); 240 static void poll_comm_start(int); 241 static void poll_comm_adjust_pollhz(struct poll_comm *); 242 static void poll_comm_systimer0(systimer_t, int, struct intrframe *); 243 static void poll_comm_systimer(systimer_t, int, struct intrframe *); 244 static void sysctl_pollhz_handler(netmsg_t); 245 static void sysctl_stfrac_handler(netmsg_t); 246 static void sysctl_txfrac_handler(netmsg_t); 247 static int sysctl_pollhz(SYSCTL_HANDLER_ARGS); 248 static int sysctl_stfrac(SYSCTL_HANDLER_ARGS); 249 static int sysctl_txfrac(SYSCTL_HANDLER_ARGS); 250 static int sysctl_compat_npoll_stfrac(SYSCTL_HANDLER_ARGS); 251 static int sysctl_compat_npoll_cpuid(SYSCTL_HANDLER_ARGS); 252 253 static struct stpoll_ctx stpoll_context; 254 static struct poll_comm *poll_common[MAXCPU]; 255 static struct iopoll_ctx *rxpoll_context[MAXCPU]; 256 static struct iopoll_ctx *txpoll_context[MAXCPU]; 257 258 SYSCTL_NODE(_net, OID_AUTO, ifpoll, CTLFLAG_RW, 0, 259 "Network device polling parameters"); 260 261 static int iopoll_burst_max = IOPOLL_BURST_MAX; 262 static int iopoll_each_burst = IOPOLL_EACH_BURST; 263 static int iopoll_user_frac = IOPOLL_USER_FRAC; 264 265 static int ifpoll_pollhz = IFPOLL_FREQ_DEFAULT; 266 static int ifpoll_stfrac = IFPOLL_STFRAC_DEFAULT; 267 static int ifpoll_txfrac = IFPOLL_TXFRAC_DEFAULT; 268 269 TUNABLE_INT("net.ifpoll.burst_max", &iopoll_burst_max); 270 TUNABLE_INT("net.ifpoll.each_burst", &iopoll_each_burst); 271 TUNABLE_INT("net.ifpoll.user_frac", &iopoll_user_frac); 272 TUNABLE_INT("net.ifpoll.pollhz", &ifpoll_pollhz); 273 TUNABLE_INT("net.ifpoll.status_frac", &ifpoll_stfrac); 274 TUNABLE_INT("net.ifpoll.tx_frac", &ifpoll_txfrac); 275 276 #define IFPOLL_FREQ_ADJ(comm) (((comm)->poll_cpuid * 3) % 50) 277 278 static __inline int 279 poll_comm_pollhz_div(const struct poll_comm *comm, int pollhz) 280 { 281 return pollhz + IFPOLL_FREQ_ADJ(comm); 282 } 283 284 static __inline int 285 poll_comm_pollhz_conv(const struct poll_comm *comm, int pollhz) 286 { 287 return pollhz - IFPOLL_FREQ_ADJ(comm); 288 } 289 290 static __inline void 291 ifpoll_sendmsg_oncpu(netmsg_t msg) 292 { 293 if (msg->lmsg.ms_flags & MSGF_DONE) 294 lwkt_sendmsg(netisr_portfn(mycpuid), &msg->lmsg); 295 } 296 297 static __inline void 298 sched_stpoll(struct stpoll_ctx *st_ctx) 299 { 300 ifpoll_sendmsg_oncpu((netmsg_t)&st_ctx->poll_netmsg); 301 } 302 303 static __inline void 304 sched_iopoll(struct iopoll_ctx *io_ctx) 305 { 306 ifpoll_sendmsg_oncpu((netmsg_t)&io_ctx->poll_netmsg); 307 } 308 309 static __inline void 310 sched_iopollmore(struct iopoll_ctx *io_ctx) 311 { 312 ifpoll_sendmsg_oncpu((netmsg_t)&io_ctx->poll_more_netmsg); 313 } 314 315 static __inline void 316 ifpoll_time_get(union ifpoll_time *t) 317 { 318 if (__predict_true(tsc_present)) 319 t->tsc = rdtsc(); 320 else 321 microuptime(&t->tv); 322 } 323 324 /* Return time diff in us */ 325 static __inline int 326 ifpoll_time_diff(const union ifpoll_time *s, const union ifpoll_time *e) 327 { 328 if (__predict_true(tsc_present)) { 329 return (((e->tsc - s->tsc) * 1000000) / tsc_frequency); 330 } else { 331 return ((e->tv.tv_usec - s->tv.tv_usec) + 332 (e->tv.tv_sec - s->tv.tv_sec) * 1000000); 333 } 334 } 335 336 /* 337 * Initialize per-cpu qpolling(4) context. Called from kern_clock.c: 338 */ 339 void 340 ifpoll_init_pcpu(int cpuid) 341 { 342 if (cpuid >= ncpus2) 343 return; 344 345 poll_comm_init(cpuid); 346 347 if (cpuid == 0) 348 stpoll_init(); 349 iopoll_init(cpuid); 350 351 poll_comm_start(cpuid); 352 } 353 354 int 355 ifpoll_register(struct ifnet *ifp) 356 { 357 struct ifpoll_info *info; 358 struct netmsg_base nmsg; 359 int error; 360 361 if (ifp->if_npoll == NULL) { 362 /* Device does not support polling */ 363 return EOPNOTSUPP; 364 } 365 366 info = kmalloc(sizeof(*info), M_TEMP, M_WAITOK | M_ZERO); 367 368 /* 369 * Attempt to register. Interlock with IFF_NPOLLING. 370 */ 371 372 ifnet_serialize_all(ifp); 373 374 if (ifp->if_flags & IFF_NPOLLING) { 375 /* Already polling */ 376 ifnet_deserialize_all(ifp); 377 kfree(info, M_TEMP); 378 return EBUSY; 379 } 380 381 info->ifpi_ifp = ifp; 382 383 ifp->if_flags |= IFF_NPOLLING; 384 ifp->if_npoll(ifp, info); 385 386 ifnet_deserialize_all(ifp); 387 388 netmsg_init(&nmsg, NULL, &curthread->td_msgport, 389 0, ifpoll_register_handler); 390 nmsg.lmsg.u.ms_resultp = info; 391 392 error = lwkt_domsg(netisr_portfn(0), &nmsg.lmsg, 0); 393 if (error) { 394 if (!ifpoll_deregister(ifp)) { 395 if_printf(ifp, "ifpoll_register: " 396 "ifpoll_deregister failed!\n"); 397 } 398 } 399 400 kfree(info, M_TEMP); 401 return error; 402 } 403 404 int 405 ifpoll_deregister(struct ifnet *ifp) 406 { 407 struct netmsg_base nmsg; 408 int error; 409 410 if (ifp->if_npoll == NULL) 411 return EOPNOTSUPP; 412 413 ifnet_serialize_all(ifp); 414 415 if ((ifp->if_flags & IFF_NPOLLING) == 0) { 416 ifnet_deserialize_all(ifp); 417 return EINVAL; 418 } 419 ifp->if_flags &= ~IFF_NPOLLING; 420 421 ifnet_deserialize_all(ifp); 422 423 netmsg_init(&nmsg, NULL, &curthread->td_msgport, 424 0, ifpoll_deregister_handler); 425 nmsg.lmsg.u.ms_resultp = ifp; 426 427 error = lwkt_domsg(netisr_portfn(0), &nmsg.lmsg, 0); 428 if (!error) { 429 ifnet_serialize_all(ifp); 430 ifp->if_npoll(ifp, NULL); 431 ifnet_deserialize_all(ifp); 432 } 433 return error; 434 } 435 436 static void 437 ifpoll_register_handler(netmsg_t nmsg) 438 { 439 const struct ifpoll_info *info = nmsg->lmsg.u.ms_resultp; 440 int cpuid = mycpuid, nextcpu; 441 int error; 442 443 KKASSERT(cpuid < ncpus2); 444 KKASSERT(&curthread->td_msgport == netisr_portfn(cpuid)); 445 446 if (cpuid == 0) { 447 error = stpoll_register(info->ifpi_ifp, &info->ifpi_status); 448 if (error) 449 goto failed; 450 } 451 452 error = iopoll_register(info->ifpi_ifp, rxpoll_context[cpuid], 453 &info->ifpi_rx[cpuid]); 454 if (error) 455 goto failed; 456 457 error = iopoll_register(info->ifpi_ifp, txpoll_context[cpuid], 458 &info->ifpi_tx[cpuid]); 459 if (error) 460 goto failed; 461 462 /* Adjust polling frequency, after all registration is done */ 463 poll_comm_adjust_pollhz(poll_common[cpuid]); 464 465 nextcpu = cpuid + 1; 466 if (nextcpu < ncpus2) 467 lwkt_forwardmsg(netisr_portfn(nextcpu), &nmsg->lmsg); 468 else 469 lwkt_replymsg(&nmsg->lmsg, 0); 470 return; 471 failed: 472 lwkt_replymsg(&nmsg->lmsg, error); 473 } 474 475 static void 476 ifpoll_deregister_handler(netmsg_t nmsg) 477 { 478 struct ifnet *ifp = nmsg->lmsg.u.ms_resultp; 479 int cpuid = mycpuid, nextcpu; 480 481 KKASSERT(cpuid < ncpus2); 482 KKASSERT(&curthread->td_msgport == netisr_portfn(cpuid)); 483 484 /* Ignore errors */ 485 if (cpuid == 0) 486 stpoll_deregister(ifp); 487 iopoll_deregister(ifp, rxpoll_context[cpuid]); 488 iopoll_deregister(ifp, txpoll_context[cpuid]); 489 490 /* Adjust polling frequency, after all deregistration is done */ 491 poll_comm_adjust_pollhz(poll_common[cpuid]); 492 493 nextcpu = cpuid + 1; 494 if (nextcpu < ncpus2) 495 lwkt_forwardmsg(netisr_portfn(nextcpu), &nmsg->lmsg); 496 else 497 lwkt_replymsg(&nmsg->lmsg, 0); 498 } 499 500 static void 501 stpoll_init(void) 502 { 503 struct stpoll_ctx *st_ctx = &stpoll_context; 504 const struct poll_comm *comm = poll_common[0]; 505 506 sysctl_ctx_init(&st_ctx->poll_sysctl_ctx); 507 st_ctx->poll_sysctl_tree = SYSCTL_ADD_NODE(&st_ctx->poll_sysctl_ctx, 508 SYSCTL_CHILDREN(comm->sysctl_tree), 509 OID_AUTO, "status", CTLFLAG_RD, 0, ""); 510 511 SYSCTL_ADD_UINT(&st_ctx->poll_sysctl_ctx, 512 SYSCTL_CHILDREN(st_ctx->poll_sysctl_tree), 513 OID_AUTO, "handlers", CTLFLAG_RD, 514 &st_ctx->poll_handlers, 0, 515 "Number of registered status poll handlers"); 516 517 netmsg_init(&st_ctx->poll_netmsg, NULL, &netisr_adone_rport, 518 0, stpoll_handler); 519 } 520 521 /* 522 * stpoll_handler is scheduled by sched_stpoll when appropriate, typically 523 * once per polling systimer tick. 524 */ 525 static void 526 stpoll_handler(netmsg_t msg) 527 { 528 struct stpoll_ctx *st_ctx = &stpoll_context; 529 struct thread *td = curthread; 530 int i; 531 532 KKASSERT(&td->td_msgport == netisr_portfn(0)); 533 534 crit_enter_quick(td); 535 536 /* Reply ASAP */ 537 lwkt_replymsg(&msg->lmsg, 0); 538 539 if (st_ctx->poll_handlers == 0) { 540 crit_exit_quick(td); 541 return; 542 } 543 544 for (i = 0; i < st_ctx->poll_handlers; ++i) { 545 const struct stpoll_rec *rec = &st_ctx->pr[i]; 546 struct ifnet *ifp = rec->ifp; 547 548 if (!lwkt_serialize_try(rec->serializer)) 549 continue; 550 551 if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) == 552 (IFF_RUNNING | IFF_NPOLLING)) 553 rec->status_func(ifp); 554 555 lwkt_serialize_exit(rec->serializer); 556 } 557 558 crit_exit_quick(td); 559 } 560 561 /* 562 * Hook from status poll systimer. Tries to schedule an status poll. 563 * NOTE: Caller should hold critical section. 564 */ 565 static void 566 stpoll_clock(struct stpoll_ctx *st_ctx) 567 { 568 KKASSERT(mycpuid == 0); 569 570 if (st_ctx->poll_handlers == 0) 571 return; 572 sched_stpoll(st_ctx); 573 } 574 575 static int 576 stpoll_register(struct ifnet *ifp, const struct ifpoll_status *st_rec) 577 { 578 struct stpoll_ctx *st_ctx = &stpoll_context; 579 int error; 580 581 KKASSERT(&curthread->td_msgport == netisr_portfn(0)); 582 583 if (st_rec->status_func == NULL) 584 return 0; 585 586 /* 587 * Check if there is room. 588 */ 589 if (st_ctx->poll_handlers >= IFPOLL_LIST_LEN) { 590 /* 591 * List full, cannot register more entries. 592 * This should never happen; if it does, it is probably a 593 * broken driver trying to register multiple times. Checking 594 * this at runtime is expensive, and won't solve the problem 595 * anyways, so just report a few times and then give up. 596 */ 597 static int verbose = 10; /* XXX */ 598 599 if (verbose > 0) { 600 kprintf("status poll handlers list full, " 601 "maybe a broken driver ?\n"); 602 verbose--; 603 } 604 error = ENOENT; 605 } else { 606 struct stpoll_rec *rec = &st_ctx->pr[st_ctx->poll_handlers]; 607 608 rec->ifp = ifp; 609 rec->serializer = st_rec->serializer; 610 rec->status_func = st_rec->status_func; 611 612 st_ctx->poll_handlers++; 613 error = 0; 614 } 615 return error; 616 } 617 618 static int 619 stpoll_deregister(struct ifnet *ifp) 620 { 621 struct stpoll_ctx *st_ctx = &stpoll_context; 622 int i, error; 623 624 KKASSERT(&curthread->td_msgport == netisr_portfn(0)); 625 626 for (i = 0; i < st_ctx->poll_handlers; ++i) { 627 if (st_ctx->pr[i].ifp == ifp) /* Found it */ 628 break; 629 } 630 if (i == st_ctx->poll_handlers) { 631 error = ENOENT; 632 } else { 633 st_ctx->poll_handlers--; 634 if (i < st_ctx->poll_handlers) { 635 /* Last entry replaces this one. */ 636 st_ctx->pr[i] = st_ctx->pr[st_ctx->poll_handlers]; 637 } 638 error = 0; 639 } 640 return error; 641 } 642 643 static __inline void 644 iopoll_reset_state(struct iopoll_ctx *io_ctx) 645 { 646 crit_enter(); 647 io_ctx->poll_burst = io_ctx->poll_each_burst; 648 io_ctx->pending_polls = 0; 649 io_ctx->residual_burst = 0; 650 io_ctx->phase = 0; 651 io_ctx->kern_frac = 0; 652 bzero(&io_ctx->poll_start_t, sizeof(io_ctx->poll_start_t)); 653 bzero(&io_ctx->prev_t, sizeof(io_ctx->prev_t)); 654 crit_exit(); 655 } 656 657 static void 658 iopoll_init(int cpuid) 659 { 660 KKASSERT(cpuid < ncpus2); 661 662 rxpoll_context[cpuid] = iopoll_ctx_create(cpuid, IFPOLL_RX); 663 txpoll_context[cpuid] = iopoll_ctx_create(cpuid, IFPOLL_TX); 664 } 665 666 static struct iopoll_ctx * 667 iopoll_ctx_create(int cpuid, int poll_type) 668 { 669 struct poll_comm *comm; 670 struct iopoll_ctx *io_ctx; 671 const char *poll_type_str; 672 netisr_fn_t handler, more_handler; 673 674 KKASSERT(poll_type == IFPOLL_RX || poll_type == IFPOLL_TX); 675 676 /* 677 * Make sure that tunables are in sane state 678 */ 679 if (iopoll_burst_max < MIN_IOPOLL_BURST_MAX) 680 iopoll_burst_max = MIN_IOPOLL_BURST_MAX; 681 else if (iopoll_burst_max > MAX_IOPOLL_BURST_MAX) 682 iopoll_burst_max = MAX_IOPOLL_BURST_MAX; 683 684 if (iopoll_each_burst > iopoll_burst_max) 685 iopoll_each_burst = iopoll_burst_max; 686 687 comm = poll_common[cpuid]; 688 689 /* 690 * Create the per-cpu polling context 691 */ 692 io_ctx = kmalloc_cachealign(sizeof(*io_ctx), M_DEVBUF, 693 M_WAITOK | M_ZERO); 694 695 io_ctx->poll_each_burst = iopoll_each_burst; 696 io_ctx->poll_burst_max = iopoll_burst_max; 697 io_ctx->user_frac = iopoll_user_frac; 698 if (poll_type == IFPOLL_RX) 699 io_ctx->pollhz = comm->pollhz; 700 else 701 io_ctx->pollhz = comm->pollhz / (comm->poll_txfrac + 1); 702 io_ctx->poll_cpuid = cpuid; 703 iopoll_reset_state(io_ctx); 704 705 if (poll_type == IFPOLL_RX) { 706 handler = rxpoll_handler; 707 more_handler = rxpollmore_handler; 708 } else { 709 handler = txpoll_handler; 710 more_handler = txpollmore_handler; 711 } 712 713 netmsg_init(&io_ctx->poll_netmsg, NULL, &netisr_adone_rport, 714 0, handler); 715 io_ctx->poll_netmsg.lmsg.u.ms_resultp = io_ctx; 716 717 netmsg_init(&io_ctx->poll_more_netmsg, NULL, &netisr_adone_rport, 718 0, more_handler); 719 io_ctx->poll_more_netmsg.lmsg.u.ms_resultp = io_ctx; 720 721 /* 722 * Initialize per-cpu sysctl nodes 723 */ 724 if (poll_type == IFPOLL_RX) 725 poll_type_str = "rx"; 726 else 727 poll_type_str = "tx"; 728 729 sysctl_ctx_init(&io_ctx->poll_sysctl_ctx); 730 io_ctx->poll_sysctl_tree = SYSCTL_ADD_NODE(&io_ctx->poll_sysctl_ctx, 731 SYSCTL_CHILDREN(comm->sysctl_tree), 732 OID_AUTO, poll_type_str, CTLFLAG_RD, 0, ""); 733 iopoll_add_sysctl(&io_ctx->poll_sysctl_ctx, 734 SYSCTL_CHILDREN(io_ctx->poll_sysctl_tree), io_ctx, poll_type); 735 736 return io_ctx; 737 } 738 739 /* 740 * Hook from iopoll systimer. Tries to schedule an iopoll, but keeps 741 * track of lost ticks due to the previous handler taking too long. 742 * Normally, this should not happen, because polling handler should 743 * run for a short time. However, in some cases (e.g. when there are 744 * changes in link status etc.) the drivers take a very long time 745 * (even in the order of milliseconds) to reset and reconfigure the 746 * device, causing apparent lost polls. 747 * 748 * The first part of the code is just for debugging purposes, and tries 749 * to count how often hardclock ticks are shorter than they should, 750 * meaning either stray interrupts or delayed events. 751 * 752 * WARNING! called from fastint or IPI, the MP lock might not be held. 753 * NOTE: Caller should hold critical section. 754 */ 755 static void 756 iopoll_clock(struct iopoll_ctx *io_ctx) 757 { 758 union ifpoll_time t; 759 int delta; 760 761 KKASSERT(mycpuid == io_ctx->poll_cpuid); 762 763 if (io_ctx->poll_handlers == 0) 764 return; 765 766 ifpoll_time_get(&t); 767 delta = ifpoll_time_diff(&io_ctx->prev_t, &t); 768 if (delta * io_ctx->pollhz < 500000) 769 io_ctx->short_ticks++; 770 else 771 io_ctx->prev_t = t; 772 773 if (io_ctx->pending_polls > 100) { 774 /* 775 * Too much, assume it has stalled (not always true 776 * see comment above). 777 */ 778 io_ctx->stalled++; 779 io_ctx->pending_polls = 0; 780 io_ctx->phase = 0; 781 } 782 783 if (io_ctx->phase <= 2) { 784 if (io_ctx->phase != 0) 785 io_ctx->suspect++; 786 io_ctx->phase = 1; 787 sched_iopoll(io_ctx); 788 io_ctx->phase = 2; 789 } 790 if (io_ctx->pending_polls++ > 0) 791 io_ctx->lost_polls++; 792 } 793 794 /* 795 * rxpoll_handler and txpoll_handler are scheduled by sched_iopoll when 796 * appropriate, typically once per polling systimer tick. 797 * 798 * Note that the message is replied immediately in order to allow a new 799 * ISR to be scheduled in the handler. 800 */ 801 static void 802 rxpoll_handler(netmsg_t msg) 803 { 804 struct iopoll_ctx *io_ctx; 805 struct thread *td = curthread; 806 int i, cycles; 807 808 io_ctx = msg->lmsg.u.ms_resultp; 809 KKASSERT(&td->td_msgport == netisr_portfn(io_ctx->poll_cpuid)); 810 811 crit_enter_quick(td); 812 813 /* Reply ASAP */ 814 lwkt_replymsg(&msg->lmsg, 0); 815 816 if (io_ctx->poll_handlers == 0) { 817 crit_exit_quick(td); 818 return; 819 } 820 821 io_ctx->phase = 3; 822 if (io_ctx->residual_burst == 0) { 823 /* First call in this tick */ 824 ifpoll_time_get(&io_ctx->poll_start_t); 825 io_ctx->residual_burst = io_ctx->poll_burst; 826 } 827 cycles = (io_ctx->residual_burst < io_ctx->poll_each_burst) ? 828 io_ctx->residual_burst : io_ctx->poll_each_burst; 829 io_ctx->residual_burst -= cycles; 830 831 for (i = 0; i < io_ctx->poll_handlers; i++) { 832 const struct iopoll_rec *rec = &io_ctx->pr[i]; 833 struct ifnet *ifp = rec->ifp; 834 835 if (!lwkt_serialize_try(rec->serializer)) 836 continue; 837 838 if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) == 839 (IFF_RUNNING | IFF_NPOLLING)) 840 rec->poll_func(ifp, rec->arg, cycles); 841 842 lwkt_serialize_exit(rec->serializer); 843 } 844 845 /* 846 * Do a quick exit/enter to catch any higher-priority 847 * interrupt sources. 848 */ 849 crit_exit_quick(td); 850 crit_enter_quick(td); 851 852 sched_iopollmore(io_ctx); 853 io_ctx->phase = 4; 854 855 crit_exit_quick(td); 856 } 857 858 static void 859 txpoll_handler(netmsg_t msg) 860 { 861 struct iopoll_ctx *io_ctx; 862 struct thread *td = curthread; 863 int i; 864 865 io_ctx = msg->lmsg.u.ms_resultp; 866 KKASSERT(&td->td_msgport == netisr_portfn(io_ctx->poll_cpuid)); 867 868 crit_enter_quick(td); 869 870 /* Reply ASAP */ 871 lwkt_replymsg(&msg->lmsg, 0); 872 873 if (io_ctx->poll_handlers == 0) { 874 crit_exit_quick(td); 875 return; 876 } 877 878 io_ctx->phase = 3; 879 880 for (i = 0; i < io_ctx->poll_handlers; i++) { 881 const struct iopoll_rec *rec = &io_ctx->pr[i]; 882 struct ifnet *ifp = rec->ifp; 883 884 if (!lwkt_serialize_try(rec->serializer)) 885 continue; 886 887 if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) == 888 (IFF_RUNNING | IFF_NPOLLING)) 889 rec->poll_func(ifp, rec->arg, -1); 890 891 lwkt_serialize_exit(rec->serializer); 892 } 893 894 /* 895 * Do a quick exit/enter to catch any higher-priority 896 * interrupt sources. 897 */ 898 crit_exit_quick(td); 899 crit_enter_quick(td); 900 901 sched_iopollmore(io_ctx); 902 io_ctx->phase = 4; 903 904 crit_exit_quick(td); 905 } 906 907 /* 908 * rxpollmore_handler and txpollmore_handler are called after other netisr's, 909 * possibly scheduling another rxpoll_handler or txpoll_handler call, or 910 * adapting the burst size for the next cycle. 911 * 912 * It is very bad to fetch large bursts of packets from a single card at once, 913 * because the burst could take a long time to be completely processed leading 914 * to unfairness. To reduce the problem, and also to account better for time 915 * spent in network-related processing, we split the burst in smaller chunks 916 * of fixed size, giving control to the other netisr's between chunks. This 917 * helps in improving the fairness, reducing livelock and accounting for the 918 * work performed in low level handling. 919 */ 920 static void 921 rxpollmore_handler(netmsg_t msg) 922 { 923 struct thread *td = curthread; 924 struct iopoll_ctx *io_ctx; 925 union ifpoll_time t; 926 int kern_load; 927 uint32_t pending_polls; 928 929 io_ctx = msg->lmsg.u.ms_resultp; 930 KKASSERT(&td->td_msgport == netisr_portfn(io_ctx->poll_cpuid)); 931 932 crit_enter_quick(td); 933 934 /* Replay ASAP */ 935 lwkt_replymsg(&msg->lmsg, 0); 936 937 if (io_ctx->poll_handlers == 0) { 938 crit_exit_quick(td); 939 return; 940 } 941 942 io_ctx->phase = 5; 943 if (io_ctx->residual_burst > 0) { 944 sched_iopoll(io_ctx); 945 crit_exit_quick(td); 946 /* Will run immediately on return, followed by netisrs */ 947 return; 948 } 949 950 /* Here we can account time spent in iopoll's in this tick */ 951 ifpoll_time_get(&t); 952 kern_load = ifpoll_time_diff(&io_ctx->poll_start_t, &t); 953 kern_load = (kern_load * io_ctx->pollhz) / 10000; /* 0..100 */ 954 io_ctx->kern_frac = kern_load; 955 956 if (kern_load > (100 - io_ctx->user_frac)) { 957 /* Try decrease ticks */ 958 if (io_ctx->poll_burst > 1) 959 io_ctx->poll_burst--; 960 } else { 961 if (io_ctx->poll_burst < io_ctx->poll_burst_max) 962 io_ctx->poll_burst++; 963 } 964 965 io_ctx->pending_polls--; 966 pending_polls = io_ctx->pending_polls; 967 968 if (pending_polls == 0) { 969 /* We are done */ 970 io_ctx->phase = 0; 971 } else { 972 /* 973 * Last cycle was long and caused us to miss one or more 974 * hardclock ticks. Restart processing again, but slightly 975 * reduce the burst size to prevent that this happens again. 976 */ 977 io_ctx->poll_burst -= (io_ctx->poll_burst / 8); 978 if (io_ctx->poll_burst < 1) 979 io_ctx->poll_burst = 1; 980 sched_iopoll(io_ctx); 981 io_ctx->phase = 6; 982 } 983 984 crit_exit_quick(td); 985 } 986 987 static void 988 txpollmore_handler(netmsg_t msg) 989 { 990 struct thread *td = curthread; 991 struct iopoll_ctx *io_ctx; 992 uint32_t pending_polls; 993 994 io_ctx = msg->lmsg.u.ms_resultp; 995 KKASSERT(&td->td_msgport == netisr_portfn(io_ctx->poll_cpuid)); 996 997 crit_enter_quick(td); 998 999 /* Replay ASAP */ 1000 lwkt_replymsg(&msg->lmsg, 0); 1001 1002 if (io_ctx->poll_handlers == 0) { 1003 crit_exit_quick(td); 1004 return; 1005 } 1006 1007 io_ctx->phase = 5; 1008 1009 io_ctx->pending_polls--; 1010 pending_polls = io_ctx->pending_polls; 1011 1012 if (pending_polls == 0) { 1013 /* We are done */ 1014 io_ctx->phase = 0; 1015 } else { 1016 /* 1017 * Last cycle was long and caused us to miss one or more 1018 * hardclock ticks. Restart processing again. 1019 */ 1020 sched_iopoll(io_ctx); 1021 io_ctx->phase = 6; 1022 } 1023 1024 crit_exit_quick(td); 1025 } 1026 1027 static void 1028 iopoll_add_sysctl(struct sysctl_ctx_list *ctx, struct sysctl_oid_list *parent, 1029 struct iopoll_ctx *io_ctx, int poll_type) 1030 { 1031 if (poll_type == IFPOLL_RX) { 1032 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "burst_max", 1033 CTLTYPE_UINT | CTLFLAG_RW, io_ctx, 0, sysctl_burstmax, 1034 "IU", "Max Polling burst size"); 1035 1036 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "each_burst", 1037 CTLTYPE_UINT | CTLFLAG_RW, io_ctx, 0, sysctl_eachburst, 1038 "IU", "Max size of each burst"); 1039 1040 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "burst", CTLFLAG_RD, 1041 &io_ctx->poll_burst, 0, "Current polling burst size"); 1042 1043 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "user_frac", CTLFLAG_RW, 1044 &io_ctx->user_frac, 0, "Desired user fraction of cpu time"); 1045 1046 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "kern_frac", CTLFLAG_RD, 1047 &io_ctx->kern_frac, 0, "Kernel fraction of cpu time"); 1048 1049 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "residual_burst", CTLFLAG_RD, 1050 &io_ctx->residual_burst, 0, 1051 "# of residual cycles in burst"); 1052 } 1053 1054 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "phase", CTLFLAG_RD, 1055 &io_ctx->phase, 0, "Polling phase"); 1056 1057 SYSCTL_ADD_ULONG(ctx, parent, OID_AUTO, "suspect", CTLFLAG_RW, 1058 &io_ctx->suspect, "Suspected events"); 1059 1060 SYSCTL_ADD_ULONG(ctx, parent, OID_AUTO, "stalled", CTLFLAG_RW, 1061 &io_ctx->stalled, "Potential stalls"); 1062 1063 SYSCTL_ADD_ULONG(ctx, parent, OID_AUTO, "short_ticks", CTLFLAG_RW, 1064 &io_ctx->short_ticks, 1065 "Hardclock ticks shorter than they should be"); 1066 1067 SYSCTL_ADD_ULONG(ctx, parent, OID_AUTO, "lost_polls", CTLFLAG_RW, 1068 &io_ctx->lost_polls, 1069 "How many times we would have lost a poll tick"); 1070 1071 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "pending_polls", CTLFLAG_RD, 1072 &io_ctx->pending_polls, 0, "Do we need to poll again"); 1073 1074 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "handlers", CTLFLAG_RD, 1075 &io_ctx->poll_handlers, 0, "Number of registered poll handlers"); 1076 } 1077 1078 static void 1079 sysctl_burstmax_handler(netmsg_t nmsg) 1080 { 1081 struct iopoll_sysctl_netmsg *msg = (struct iopoll_sysctl_netmsg *)nmsg; 1082 struct iopoll_ctx *io_ctx; 1083 1084 io_ctx = msg->ctx; 1085 KKASSERT(&curthread->td_msgport == netisr_portfn(io_ctx->poll_cpuid)); 1086 1087 io_ctx->poll_burst_max = nmsg->lmsg.u.ms_result; 1088 if (io_ctx->poll_each_burst > io_ctx->poll_burst_max) 1089 io_ctx->poll_each_burst = io_ctx->poll_burst_max; 1090 if (io_ctx->poll_burst > io_ctx->poll_burst_max) 1091 io_ctx->poll_burst = io_ctx->poll_burst_max; 1092 if (io_ctx->residual_burst > io_ctx->poll_burst_max) 1093 io_ctx->residual_burst = io_ctx->poll_burst_max; 1094 1095 lwkt_replymsg(&nmsg->lmsg, 0); 1096 } 1097 1098 static int 1099 sysctl_burstmax(SYSCTL_HANDLER_ARGS) 1100 { 1101 struct iopoll_ctx *io_ctx = arg1; 1102 struct iopoll_sysctl_netmsg msg; 1103 uint32_t burst_max; 1104 int error; 1105 1106 burst_max = io_ctx->poll_burst_max; 1107 error = sysctl_handle_int(oidp, &burst_max, 0, req); 1108 if (error || req->newptr == NULL) 1109 return error; 1110 if (burst_max < MIN_IOPOLL_BURST_MAX) 1111 burst_max = MIN_IOPOLL_BURST_MAX; 1112 else if (burst_max > MAX_IOPOLL_BURST_MAX) 1113 burst_max = MAX_IOPOLL_BURST_MAX; 1114 1115 netmsg_init(&msg.base, NULL, &curthread->td_msgport, 1116 0, sysctl_burstmax_handler); 1117 msg.base.lmsg.u.ms_result = burst_max; 1118 msg.ctx = io_ctx; 1119 1120 return lwkt_domsg(netisr_portfn(io_ctx->poll_cpuid), &msg.base.lmsg, 0); 1121 } 1122 1123 static void 1124 sysctl_eachburst_handler(netmsg_t nmsg) 1125 { 1126 struct iopoll_sysctl_netmsg *msg = (struct iopoll_sysctl_netmsg *)nmsg; 1127 struct iopoll_ctx *io_ctx; 1128 uint32_t each_burst; 1129 1130 io_ctx = msg->ctx; 1131 KKASSERT(&curthread->td_msgport == netisr_portfn(io_ctx->poll_cpuid)); 1132 1133 each_burst = nmsg->lmsg.u.ms_result; 1134 if (each_burst > io_ctx->poll_burst_max) 1135 each_burst = io_ctx->poll_burst_max; 1136 else if (each_burst < 1) 1137 each_burst = 1; 1138 io_ctx->poll_each_burst = each_burst; 1139 1140 lwkt_replymsg(&nmsg->lmsg, 0); 1141 } 1142 1143 static int 1144 sysctl_eachburst(SYSCTL_HANDLER_ARGS) 1145 { 1146 struct iopoll_ctx *io_ctx = arg1; 1147 struct iopoll_sysctl_netmsg msg; 1148 uint32_t each_burst; 1149 int error; 1150 1151 each_burst = io_ctx->poll_each_burst; 1152 error = sysctl_handle_int(oidp, &each_burst, 0, req); 1153 if (error || req->newptr == NULL) 1154 return error; 1155 1156 netmsg_init(&msg.base, NULL, &curthread->td_msgport, 1157 0, sysctl_eachburst_handler); 1158 msg.base.lmsg.u.ms_result = each_burst; 1159 msg.ctx = io_ctx; 1160 1161 return lwkt_domsg(netisr_portfn(io_ctx->poll_cpuid), &msg.base.lmsg, 0); 1162 } 1163 1164 static int 1165 iopoll_register(struct ifnet *ifp, struct iopoll_ctx *io_ctx, 1166 const struct ifpoll_io *io_rec) 1167 { 1168 int error; 1169 1170 KKASSERT(&curthread->td_msgport == netisr_portfn(io_ctx->poll_cpuid)); 1171 1172 if (io_rec->poll_func == NULL) 1173 return 0; 1174 1175 /* 1176 * Check if there is room. 1177 */ 1178 if (io_ctx->poll_handlers >= IFPOLL_LIST_LEN) { 1179 /* 1180 * List full, cannot register more entries. 1181 * This should never happen; if it does, it is probably a 1182 * broken driver trying to register multiple times. Checking 1183 * this at runtime is expensive, and won't solve the problem 1184 * anyways, so just report a few times and then give up. 1185 */ 1186 static int verbose = 10; /* XXX */ 1187 if (verbose > 0) { 1188 kprintf("io poll handlers list full, " 1189 "maybe a broken driver ?\n"); 1190 verbose--; 1191 } 1192 error = ENOENT; 1193 } else { 1194 struct iopoll_rec *rec = &io_ctx->pr[io_ctx->poll_handlers]; 1195 1196 rec->ifp = ifp; 1197 rec->serializer = io_rec->serializer; 1198 rec->arg = io_rec->arg; 1199 rec->poll_func = io_rec->poll_func; 1200 1201 io_ctx->poll_handlers++; 1202 error = 0; 1203 } 1204 return error; 1205 } 1206 1207 static int 1208 iopoll_deregister(struct ifnet *ifp, struct iopoll_ctx *io_ctx) 1209 { 1210 int i, error; 1211 1212 KKASSERT(&curthread->td_msgport == netisr_portfn(io_ctx->poll_cpuid)); 1213 1214 for (i = 0; i < io_ctx->poll_handlers; ++i) { 1215 if (io_ctx->pr[i].ifp == ifp) /* Found it */ 1216 break; 1217 } 1218 if (i == io_ctx->poll_handlers) { 1219 error = ENOENT; 1220 } else { 1221 io_ctx->poll_handlers--; 1222 if (i < io_ctx->poll_handlers) { 1223 /* Last entry replaces this one. */ 1224 io_ctx->pr[i] = io_ctx->pr[io_ctx->poll_handlers]; 1225 } 1226 1227 if (io_ctx->poll_handlers == 0) 1228 iopoll_reset_state(io_ctx); 1229 error = 0; 1230 } 1231 return error; 1232 } 1233 1234 static void 1235 poll_comm_init(int cpuid) 1236 { 1237 struct poll_comm *comm; 1238 char cpuid_str[16]; 1239 1240 comm = kmalloc_cachealign(sizeof(*comm), M_DEVBUF, M_WAITOK | M_ZERO); 1241 1242 if (ifpoll_stfrac < 1) 1243 ifpoll_stfrac = IFPOLL_STFRAC_DEFAULT; 1244 if (ifpoll_txfrac < 1) 1245 ifpoll_txfrac = IFPOLL_TXFRAC_DEFAULT; 1246 1247 comm->poll_cpuid = cpuid; 1248 comm->pollhz = poll_comm_pollhz_div(comm, ifpoll_pollhz); 1249 comm->poll_stfrac = ifpoll_stfrac - 1; 1250 comm->poll_txfrac = ifpoll_txfrac - 1; 1251 1252 ksnprintf(cpuid_str, sizeof(cpuid_str), "%d", cpuid); 1253 1254 sysctl_ctx_init(&comm->sysctl_ctx); 1255 comm->sysctl_tree = SYSCTL_ADD_NODE(&comm->sysctl_ctx, 1256 SYSCTL_STATIC_CHILDREN(_net_ifpoll), 1257 OID_AUTO, cpuid_str, CTLFLAG_RD, 0, ""); 1258 1259 SYSCTL_ADD_PROC(&comm->sysctl_ctx, SYSCTL_CHILDREN(comm->sysctl_tree), 1260 OID_AUTO, "pollhz", CTLTYPE_INT | CTLFLAG_RW, 1261 comm, 0, sysctl_pollhz, 1262 "I", "Device polling frequency"); 1263 1264 if (cpuid == 0) { 1265 SYSCTL_ADD_PROC(&comm->sysctl_ctx, 1266 SYSCTL_CHILDREN(comm->sysctl_tree), 1267 OID_AUTO, "status_frac", 1268 CTLTYPE_INT | CTLFLAG_RW, 1269 comm, 0, sysctl_stfrac, 1270 "I", "# of cycles before status is polled"); 1271 } 1272 SYSCTL_ADD_PROC(&comm->sysctl_ctx, SYSCTL_CHILDREN(comm->sysctl_tree), 1273 OID_AUTO, "tx_frac", CTLTYPE_INT | CTLFLAG_RW, 1274 comm, 0, sysctl_txfrac, 1275 "I", "# of cycles before TX is polled"); 1276 1277 poll_common[cpuid] = comm; 1278 } 1279 1280 static void 1281 poll_comm_start(int cpuid) 1282 { 1283 struct poll_comm *comm = poll_common[cpuid]; 1284 systimer_func_t func; 1285 1286 /* 1287 * Initialize systimer 1288 */ 1289 if (cpuid == 0) 1290 func = poll_comm_systimer0; 1291 else 1292 func = poll_comm_systimer; 1293 systimer_init_periodic_nq(&comm->pollclock, func, comm, 1); 1294 } 1295 1296 static void 1297 _poll_comm_systimer(struct poll_comm *comm) 1298 { 1299 iopoll_clock(rxpoll_context[comm->poll_cpuid]); 1300 if (comm->txfrac_count-- == 0) { 1301 comm->txfrac_count = comm->poll_txfrac; 1302 iopoll_clock(txpoll_context[comm->poll_cpuid]); 1303 } 1304 } 1305 1306 static void 1307 poll_comm_systimer0(systimer_t info, int in_ipi __unused, 1308 struct intrframe *frame __unused) 1309 { 1310 struct poll_comm *comm = info->data; 1311 globaldata_t gd = mycpu; 1312 1313 KKASSERT(comm->poll_cpuid == gd->gd_cpuid && gd->gd_cpuid == 0); 1314 1315 crit_enter_gd(gd); 1316 1317 if (comm->stfrac_count-- == 0) { 1318 comm->stfrac_count = comm->poll_stfrac; 1319 stpoll_clock(&stpoll_context); 1320 } 1321 _poll_comm_systimer(comm); 1322 1323 crit_exit_gd(gd); 1324 } 1325 1326 static void 1327 poll_comm_systimer(systimer_t info, int in_ipi __unused, 1328 struct intrframe *frame __unused) 1329 { 1330 struct poll_comm *comm = info->data; 1331 globaldata_t gd = mycpu; 1332 1333 KKASSERT(comm->poll_cpuid == gd->gd_cpuid && gd->gd_cpuid != 0); 1334 1335 crit_enter_gd(gd); 1336 _poll_comm_systimer(comm); 1337 crit_exit_gd(gd); 1338 } 1339 1340 static void 1341 poll_comm_adjust_pollhz(struct poll_comm *comm) 1342 { 1343 uint32_t handlers; 1344 int pollhz = 1; 1345 1346 KKASSERT(&curthread->td_msgport == netisr_portfn(comm->poll_cpuid)); 1347 1348 /* 1349 * If there is no polling handler registered, set systimer 1350 * frequency to the lowest value. Polling systimer frequency 1351 * will be adjusted to the requested value, once there are 1352 * registered handlers. 1353 */ 1354 handlers = rxpoll_context[mycpuid]->poll_handlers + 1355 txpoll_context[mycpuid]->poll_handlers; 1356 if (comm->poll_cpuid == 0) 1357 handlers += stpoll_context.poll_handlers; 1358 if (handlers) 1359 pollhz = comm->pollhz; 1360 systimer_adjust_periodic(&comm->pollclock, pollhz); 1361 } 1362 1363 static int 1364 sysctl_pollhz(SYSCTL_HANDLER_ARGS) 1365 { 1366 struct poll_comm *comm = arg1; 1367 struct netmsg_base nmsg; 1368 int error, phz; 1369 1370 phz = poll_comm_pollhz_conv(comm, comm->pollhz); 1371 error = sysctl_handle_int(oidp, &phz, 0, req); 1372 if (error || req->newptr == NULL) 1373 return error; 1374 if (phz <= 0) 1375 return EINVAL; 1376 else if (phz > IFPOLL_FREQ_MAX) 1377 phz = IFPOLL_FREQ_MAX; 1378 1379 netmsg_init(&nmsg, NULL, &curthread->td_msgport, 1380 0, sysctl_pollhz_handler); 1381 nmsg.lmsg.u.ms_result = phz; 1382 1383 return lwkt_domsg(netisr_portfn(comm->poll_cpuid), &nmsg.lmsg, 0); 1384 } 1385 1386 static void 1387 sysctl_pollhz_handler(netmsg_t nmsg) 1388 { 1389 struct poll_comm *comm = poll_common[mycpuid]; 1390 1391 KKASSERT(&curthread->td_msgport == netisr_portfn(comm->poll_cpuid)); 1392 1393 /* Save polling frequency */ 1394 comm->pollhz = poll_comm_pollhz_div(comm, nmsg->lmsg.u.ms_result); 1395 1396 /* 1397 * Adjust cached pollhz 1398 */ 1399 rxpoll_context[mycpuid]->pollhz = comm->pollhz; 1400 txpoll_context[mycpuid]->pollhz = 1401 comm->pollhz / (comm->poll_txfrac + 1); 1402 1403 /* 1404 * Adjust polling frequency 1405 */ 1406 poll_comm_adjust_pollhz(comm); 1407 1408 lwkt_replymsg(&nmsg->lmsg, 0); 1409 } 1410 1411 static int 1412 sysctl_stfrac(SYSCTL_HANDLER_ARGS) 1413 { 1414 struct poll_comm *comm = arg1; 1415 struct netmsg_base nmsg; 1416 int error, stfrac; 1417 1418 KKASSERT(comm->poll_cpuid == 0); 1419 1420 stfrac = comm->poll_stfrac + 1; 1421 error = sysctl_handle_int(oidp, &stfrac, 0, req); 1422 if (error || req->newptr == NULL) 1423 return error; 1424 if (stfrac < 1) 1425 return EINVAL; 1426 1427 netmsg_init(&nmsg, NULL, &curthread->td_msgport, 1428 0, sysctl_stfrac_handler); 1429 nmsg.lmsg.u.ms_result = stfrac - 1; 1430 1431 return lwkt_domsg(netisr_portfn(comm->poll_cpuid), &nmsg.lmsg, 0); 1432 } 1433 1434 static void 1435 sysctl_stfrac_handler(netmsg_t nmsg) 1436 { 1437 struct poll_comm *comm = poll_common[mycpuid]; 1438 int stfrac = nmsg->lmsg.u.ms_result; 1439 1440 KKASSERT(&curthread->td_msgport == netisr_portfn(comm->poll_cpuid)); 1441 1442 crit_enter(); 1443 comm->poll_stfrac = stfrac; 1444 if (comm->stfrac_count > comm->poll_stfrac) 1445 comm->stfrac_count = comm->poll_stfrac; 1446 crit_exit(); 1447 1448 lwkt_replymsg(&nmsg->lmsg, 0); 1449 } 1450 1451 static int 1452 sysctl_txfrac(SYSCTL_HANDLER_ARGS) 1453 { 1454 struct poll_comm *comm = arg1; 1455 struct netmsg_base nmsg; 1456 int error, txfrac; 1457 1458 txfrac = comm->poll_txfrac + 1; 1459 error = sysctl_handle_int(oidp, &txfrac, 0, req); 1460 if (error || req->newptr == NULL) 1461 return error; 1462 if (txfrac < 1) 1463 return EINVAL; 1464 1465 netmsg_init(&nmsg, NULL, &curthread->td_msgport, 1466 0, sysctl_txfrac_handler); 1467 nmsg.lmsg.u.ms_result = txfrac - 1; 1468 1469 return lwkt_domsg(netisr_portfn(comm->poll_cpuid), &nmsg.lmsg, 0); 1470 } 1471 1472 static void 1473 sysctl_txfrac_handler(netmsg_t nmsg) 1474 { 1475 struct poll_comm *comm = poll_common[mycpuid]; 1476 int txfrac = nmsg->lmsg.u.ms_result; 1477 1478 KKASSERT(&curthread->td_msgport == netisr_portfn(comm->poll_cpuid)); 1479 1480 crit_enter(); 1481 comm->poll_txfrac = txfrac; 1482 if (comm->txfrac_count > comm->poll_txfrac) 1483 comm->txfrac_count = comm->poll_txfrac; 1484 crit_exit(); 1485 1486 lwkt_replymsg(&nmsg->lmsg, 0); 1487 } 1488 1489 void 1490 ifpoll_compat_setup(struct ifpoll_compat *cp, 1491 struct sysctl_ctx_list *sysctl_ctx, 1492 struct sysctl_oid *sysctl_tree, 1493 int unit, struct lwkt_serialize *slz) 1494 { 1495 cp->ifpc_stcount = 0; 1496 cp->ifpc_stfrac = ((poll_common[0]->poll_stfrac + 1) * 1497 howmany(IOPOLL_BURST_MAX, IOPOLL_EACH_BURST)) - 1; 1498 1499 cp->ifpc_cpuid = unit % ncpus2; 1500 cp->ifpc_serializer = slz; 1501 1502 if (sysctl_ctx != NULL && sysctl_tree != NULL) { 1503 SYSCTL_ADD_PROC(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1504 OID_AUTO, "npoll_stfrac", CTLTYPE_INT | CTLFLAG_RW, 1505 cp, 0, sysctl_compat_npoll_stfrac, "I", 1506 "polling status frac"); 1507 SYSCTL_ADD_PROC(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1508 OID_AUTO, "npoll_cpuid", CTLTYPE_INT | CTLFLAG_RW, 1509 cp, 0, sysctl_compat_npoll_cpuid, "I", 1510 "polling cpuid"); 1511 } 1512 } 1513 1514 static int 1515 sysctl_compat_npoll_stfrac(SYSCTL_HANDLER_ARGS) 1516 { 1517 struct ifpoll_compat *cp = arg1; 1518 int error = 0, stfrac; 1519 1520 lwkt_serialize_enter(cp->ifpc_serializer); 1521 1522 stfrac = cp->ifpc_stfrac + 1; 1523 error = sysctl_handle_int(oidp, &stfrac, 0, req); 1524 if (!error && req->newptr != NULL) { 1525 if (stfrac < 1) { 1526 error = EINVAL; 1527 } else { 1528 cp->ifpc_stfrac = stfrac - 1; 1529 if (cp->ifpc_stcount > cp->ifpc_stfrac) 1530 cp->ifpc_stcount = cp->ifpc_stfrac; 1531 } 1532 } 1533 1534 lwkt_serialize_exit(cp->ifpc_serializer); 1535 return error; 1536 } 1537 1538 static int 1539 sysctl_compat_npoll_cpuid(SYSCTL_HANDLER_ARGS) 1540 { 1541 struct ifpoll_compat *cp = arg1; 1542 int error = 0, cpuid; 1543 1544 lwkt_serialize_enter(cp->ifpc_serializer); 1545 1546 cpuid = cp->ifpc_cpuid; 1547 error = sysctl_handle_int(oidp, &cpuid, 0, req); 1548 if (!error && req->newptr != NULL) { 1549 if (cpuid < 0 || cpuid >= ncpus2) 1550 error = EINVAL; 1551 else 1552 cp->ifpc_cpuid = cpuid; 1553 } 1554 1555 lwkt_serialize_exit(cp->ifpc_serializer); 1556 return error; 1557 } 1558