1 /* 2 * Copyright (c) 2003, 2004 Jeffrey M. Hsu. All rights reserved. 3 * Copyright (c) 2003, 2004 The DragonFly Project. All rights reserved. 4 * 5 * This code is derived from software contributed to The DragonFly Project 6 * by Jeffrey M. Hsu. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of The DragonFly Project nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific, prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 */ 33 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/kernel.h> 37 #include <sys/msgport.h> 38 #include <sys/protosw.h> 39 #include <sys/socket.h> 40 #include <sys/socketvar.h> 41 #include <sys/socketops.h> 42 #include <sys/thread.h> 43 #include <sys/thread2.h> 44 #include <sys/msgport2.h> 45 #include <sys/spinlock2.h> 46 #include <sys/sysctl.h> 47 #include <sys/mbuf.h> 48 #include <vm/pmap.h> 49 50 #include <net/netmsg2.h> 51 #include <sys/socketvar2.h> 52 53 #include <net/netisr.h> 54 #include <net/netmsg.h> 55 56 static int async_rcvd_drop_race = 0; 57 SYSCTL_INT(_kern_ipc, OID_AUTO, async_rcvd_drop_race, CTLFLAG_RW, 58 &async_rcvd_drop_race, 0, "# of asynchronized pru_rcvd msg drop races"); 59 60 /* 61 * Abort a socket and free it. Called from soabort() only. soabort() 62 * got a ref on the socket which we must free on reply. 63 */ 64 void 65 so_pru_abort(struct socket *so) 66 { 67 struct netmsg_pru_abort msg; 68 69 netmsg_init(&msg.base, so, &curthread->td_msgport, 70 0, so->so_proto->pr_usrreqs->pru_abort); 71 (void)lwkt_domsg(so->so_port, &msg.base.lmsg, 0); 72 sofree(msg.base.nm_so); 73 } 74 75 /* 76 * Abort a socket and free it, asynchronously. Called from 77 * soaborta() only. soaborta() got a ref on the socket which we must 78 * free on reply. 79 */ 80 void 81 so_pru_aborta(struct socket *so) 82 { 83 struct netmsg_pru_abort *msg; 84 85 msg = kmalloc(sizeof(*msg), M_LWKTMSG, M_WAITOK | M_ZERO); 86 netmsg_init(&msg->base, so, &netisr_afree_free_so_rport, 87 0, so->so_proto->pr_usrreqs->pru_abort); 88 lwkt_sendmsg(so->so_port, &msg->base.lmsg); 89 } 90 91 /* 92 * Abort a socket and free it. Called from soabort_oncpu() only. 93 * Caller must make sure that the current CPU is inpcb's owner CPU. 94 */ 95 void 96 so_pru_abort_oncpu(struct socket *so) 97 { 98 struct netmsg_pru_abort msg; 99 netisr_fn_t func = so->so_proto->pr_usrreqs->pru_abort; 100 101 netmsg_init(&msg.base, so, &netisr_adone_rport, 0, func); 102 msg.base.lmsg.ms_flags &= ~(MSGF_REPLY | MSGF_DONE); 103 msg.base.lmsg.ms_flags |= MSGF_SYNC; 104 func((netmsg_t)&msg); 105 KKASSERT(msg.base.lmsg.ms_flags & MSGF_DONE); 106 sofree(msg.base.nm_so); 107 } 108 109 int 110 so_pru_accept(struct socket *so, struct sockaddr **nam) 111 { 112 struct netmsg_pru_accept msg; 113 114 netmsg_init(&msg.base, so, &curthread->td_msgport, 115 0, so->so_proto->pr_usrreqs->pru_accept); 116 msg.nm_nam = nam; 117 118 return lwkt_domsg(so->so_port, &msg.base.lmsg, 0); 119 } 120 121 int 122 so_pru_attach(struct socket *so, int proto, struct pru_attach_info *ai) 123 { 124 struct netmsg_pru_attach msg; 125 int error; 126 127 netmsg_init(&msg.base, so, &curthread->td_msgport, 128 0, so->so_proto->pr_usrreqs->pru_attach); 129 msg.nm_proto = proto; 130 msg.nm_ai = ai; 131 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0); 132 return (error); 133 } 134 135 int 136 so_pru_attach_direct(struct socket *so, int proto, struct pru_attach_info *ai) 137 { 138 struct netmsg_pru_attach msg; 139 netisr_fn_t func = so->so_proto->pr_usrreqs->pru_attach; 140 141 netmsg_init(&msg.base, so, &netisr_adone_rport, 0, func); 142 msg.base.lmsg.ms_flags &= ~(MSGF_REPLY | MSGF_DONE); 143 msg.base.lmsg.ms_flags |= MSGF_SYNC; 144 msg.nm_proto = proto; 145 msg.nm_ai = ai; 146 func((netmsg_t)&msg); 147 KKASSERT(msg.base.lmsg.ms_flags & MSGF_DONE); 148 return(msg.base.lmsg.ms_error); 149 } 150 151 /* 152 * NOTE: If the target port changes the bind operation will deal with it. 153 */ 154 int 155 so_pru_bind(struct socket *so, struct sockaddr *nam, struct thread *td) 156 { 157 struct netmsg_pru_bind msg; 158 int error; 159 160 netmsg_init(&msg.base, so, &curthread->td_msgport, 161 0, so->so_proto->pr_usrreqs->pru_bind); 162 msg.nm_nam = nam; 163 msg.nm_td = td; /* used only for prison_ip() */ 164 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0); 165 return (error); 166 } 167 168 int 169 so_pru_connect(struct socket *so, struct sockaddr *nam, struct thread *td) 170 { 171 struct netmsg_pru_connect msg; 172 int error; 173 174 netmsg_init(&msg.base, so, &curthread->td_msgport, 175 0, so->so_proto->pr_usrreqs->pru_connect); 176 msg.nm_nam = nam; 177 msg.nm_td = td; 178 msg.nm_m = NULL; 179 msg.nm_flags = 0; 180 msg.nm_reconnect = 0; 181 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0); 182 return (error); 183 } 184 185 int 186 so_pru_connect2(struct socket *so1, struct socket *so2) 187 { 188 struct netmsg_pru_connect2 msg; 189 int error; 190 191 netmsg_init(&msg.base, so1, &curthread->td_msgport, 192 0, so1->so_proto->pr_usrreqs->pru_connect2); 193 msg.nm_so1 = so1; 194 msg.nm_so2 = so2; 195 error = lwkt_domsg(so1->so_port, &msg.base.lmsg, 0); 196 return (error); 197 } 198 199 /* 200 * WARNING! Synchronous call from user context. Control function may do 201 * copyin/copyout. 202 */ 203 int 204 so_pru_control_direct(struct socket *so, u_long cmd, caddr_t data, 205 struct ifnet *ifp) 206 { 207 struct netmsg_pru_control msg; 208 netisr_fn_t func = so->so_proto->pr_usrreqs->pru_control; 209 210 netmsg_init(&msg.base, so, &netisr_adone_rport, 0, func); 211 msg.base.lmsg.ms_flags &= ~(MSGF_REPLY | MSGF_DONE); 212 msg.base.lmsg.ms_flags |= MSGF_SYNC; 213 msg.nm_cmd = cmd; 214 msg.nm_data = data; 215 msg.nm_ifp = ifp; 216 msg.nm_td = curthread; 217 func((netmsg_t)&msg); 218 KKASSERT(msg.base.lmsg.ms_flags & MSGF_DONE); 219 return(msg.base.lmsg.ms_error); 220 } 221 222 int 223 so_pru_detach(struct socket *so) 224 { 225 struct netmsg_pru_detach msg; 226 int error; 227 228 netmsg_init(&msg.base, so, &curthread->td_msgport, 229 0, so->so_proto->pr_usrreqs->pru_detach); 230 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0); 231 return (error); 232 } 233 234 void 235 so_pru_detach_direct(struct socket *so) 236 { 237 struct netmsg_pru_detach msg; 238 netisr_fn_t func = so->so_proto->pr_usrreqs->pru_detach; 239 240 netmsg_init(&msg.base, so, &netisr_adone_rport, 0, func); 241 msg.base.lmsg.ms_flags &= ~(MSGF_REPLY | MSGF_DONE); 242 msg.base.lmsg.ms_flags |= MSGF_SYNC; 243 func((netmsg_t)&msg); 244 KKASSERT(msg.base.lmsg.ms_flags & MSGF_DONE); 245 } 246 247 int 248 so_pru_disconnect(struct socket *so) 249 { 250 struct netmsg_pru_disconnect msg; 251 int error; 252 253 netmsg_init(&msg.base, so, &curthread->td_msgport, 254 0, so->so_proto->pr_usrreqs->pru_disconnect); 255 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0); 256 return (error); 257 } 258 259 void 260 so_pru_disconnect_direct(struct socket *so) 261 { 262 struct netmsg_pru_disconnect msg; 263 netisr_fn_t func = so->so_proto->pr_usrreqs->pru_disconnect; 264 265 netmsg_init(&msg.base, so, &netisr_adone_rport, 0, func); 266 msg.base.lmsg.ms_flags &= ~(MSGF_REPLY | MSGF_DONE); 267 msg.base.lmsg.ms_flags |= MSGF_SYNC; 268 func((netmsg_t)&msg); 269 KKASSERT(msg.base.lmsg.ms_flags & MSGF_DONE); 270 } 271 272 int 273 so_pru_listen(struct socket *so, struct thread *td) 274 { 275 struct netmsg_pru_listen msg; 276 int error; 277 278 netmsg_init(&msg.base, so, &curthread->td_msgport, 279 0, so->so_proto->pr_usrreqs->pru_listen); 280 msg.nm_td = td; /* used only for prison_ip() XXX JH */ 281 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0); 282 return (error); 283 } 284 285 int 286 so_pru_peeraddr(struct socket *so, struct sockaddr **nam) 287 { 288 struct netmsg_pru_peeraddr msg; 289 int error; 290 291 netmsg_init(&msg.base, so, &curthread->td_msgport, 292 0, so->so_proto->pr_usrreqs->pru_peeraddr); 293 msg.nm_nam = nam; 294 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0); 295 return (error); 296 } 297 298 int 299 so_pru_rcvd(struct socket *so, int flags) 300 { 301 struct netmsg_pru_rcvd msg; 302 int error; 303 304 netmsg_init(&msg.base, so, &curthread->td_msgport, 305 0, so->so_proto->pr_usrreqs->pru_rcvd); 306 msg.nm_flags = flags; 307 msg.nm_pru_flags = 0; 308 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0); 309 return (error); 310 } 311 312 void 313 so_pru_rcvd_async(struct socket *so) 314 { 315 lwkt_msg_t lmsg = &so->so_rcvd_msg.base.lmsg; 316 317 KASSERT(so->so_proto->pr_flags & PR_ASYNC_RCVD, 318 ("async pru_rcvd is not supported")); 319 320 /* 321 * WARNING! Spinlock is a bit dodgy, use hacked up sendmsg 322 * to avoid deadlocking. 323 */ 324 spin_lock(&so->so_rcvd_spin); 325 if ((so->so_rcvd_msg.nm_pru_flags & PRUR_DEAD) == 0) { 326 if (lmsg->ms_flags & MSGF_DONE) { 327 lwkt_sendmsg_stage1(so->so_port, lmsg); 328 spin_unlock(&so->so_rcvd_spin); 329 lwkt_sendmsg_stage2(so->so_port, lmsg); 330 } else { 331 spin_unlock(&so->so_rcvd_spin); 332 } 333 } else { 334 spin_unlock(&so->so_rcvd_spin); 335 } 336 } 337 338 int 339 so_pru_rcvoob(struct socket *so, struct mbuf *m, int flags) 340 { 341 struct netmsg_pru_rcvoob msg; 342 int error; 343 344 netmsg_init(&msg.base, so, &curthread->td_msgport, 345 0, so->so_proto->pr_usrreqs->pru_rcvoob); 346 msg.nm_m = m; 347 msg.nm_flags = flags; 348 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0); 349 return (error); 350 } 351 352 /* 353 * NOTE: If the target port changes the implied connect will deal with it. 354 */ 355 int 356 so_pru_send(struct socket *so, int flags, struct mbuf *m, 357 struct sockaddr *addr, struct mbuf *control, struct thread *td) 358 { 359 struct netmsg_pru_send msg; 360 int error; 361 362 netmsg_init(&msg.base, so, &curthread->td_msgport, 363 0, so->so_proto->pr_usrreqs->pru_send); 364 msg.nm_flags = flags; 365 msg.nm_m = m; 366 msg.nm_addr = addr; 367 msg.nm_control = control; 368 msg.nm_td = td; 369 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0); 370 return (error); 371 } 372 373 void 374 so_pru_sync(struct socket *so) 375 { 376 struct netmsg_base msg; 377 378 netmsg_init(&msg, so, &curthread->td_msgport, 0, 379 netmsg_sync_handler); 380 lwkt_domsg(so->so_port, &msg.lmsg, 0); 381 } 382 383 void 384 so_pru_send_async(struct socket *so, int flags, struct mbuf *m, 385 struct sockaddr *addr0, struct mbuf *control, struct thread *td) 386 { 387 struct netmsg_pru_send *msg; 388 struct sockaddr *addr = NULL; 389 390 KASSERT(so->so_proto->pr_flags & PR_ASYNC_SEND, 391 ("async pru_send is not supported")); 392 393 if (addr0 != NULL) { 394 addr = kmalloc(addr0->sa_len, M_SONAME, M_NOWAIT); 395 if (addr == NULL) { 396 /* 397 * Fail to allocate address w/o waiting; 398 * fallback to synchronized pru_send. 399 */ 400 so_pru_send(so, flags, m, addr0, control, td); 401 return; 402 } 403 memcpy(addr, addr0, addr0->sa_len); 404 flags |= PRUS_FREEADDR; 405 } 406 flags |= PRUS_NOREPLY; 407 408 if (td != NULL && (so->so_proto->pr_flags & PR_ASEND_HOLDTD)) { 409 lwkt_hold(td); 410 flags |= PRUS_HELDTD; 411 } 412 413 msg = &m->m_hdr.mh_sndmsg; 414 netmsg_init(&msg->base, so, &netisr_apanic_rport, 415 0, so->so_proto->pr_usrreqs->pru_send); 416 msg->nm_flags = flags; 417 msg->nm_m = m; 418 msg->nm_addr = addr; 419 msg->nm_control = control; 420 msg->nm_td = td; 421 lwkt_sendmsg(so->so_port, &msg->base.lmsg); 422 } 423 424 int 425 so_pru_sense(struct socket *so, struct stat *sb) 426 { 427 struct netmsg_pru_sense msg; 428 int error; 429 430 netmsg_init(&msg.base, so, &curthread->td_msgport, 431 0, so->so_proto->pr_usrreqs->pru_sense); 432 msg.nm_stat = sb; 433 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0); 434 return (error); 435 } 436 437 int 438 so_pru_shutdown(struct socket *so) 439 { 440 struct netmsg_pru_shutdown msg; 441 int error; 442 443 netmsg_init(&msg.base, so, &curthread->td_msgport, 444 0, so->so_proto->pr_usrreqs->pru_shutdown); 445 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0); 446 return (error); 447 } 448 449 int 450 so_pru_sockaddr(struct socket *so, struct sockaddr **nam) 451 { 452 struct netmsg_pru_sockaddr msg; 453 int error; 454 455 netmsg_init(&msg.base, so, &curthread->td_msgport, 456 0, so->so_proto->pr_usrreqs->pru_sockaddr); 457 msg.nm_nam = nam; 458 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0); 459 return (error); 460 } 461 462 int 463 so_pr_ctloutput(struct socket *so, struct sockopt *sopt) 464 { 465 struct netmsg_pr_ctloutput msg; 466 int error; 467 468 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val)); 469 netmsg_init(&msg.base, so, &curthread->td_msgport, 470 0, so->so_proto->pr_ctloutput); 471 msg.nm_sopt = sopt; 472 error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0); 473 return (error); 474 } 475 476 /* 477 * Protocol control input, typically via icmp. 478 * 479 * If the protocol pr_ctlport is not NULL we call it to figure out the 480 * protocol port. If NULL is returned we can just return, otherwise 481 * we issue a netmsg to call pr_ctlinput in the proper thread. 482 * 483 * This must be done synchronously as arg and/or extra may point to 484 * temporary data. 485 */ 486 void 487 so_pru_ctlinput(struct protosw *pr, int cmd, struct sockaddr *arg, void *extra) 488 { 489 struct netmsg_pru_ctlinput msg; 490 lwkt_port_t port; 491 492 if (pr->pr_ctlport == NULL) 493 return; 494 KKASSERT(pr->pr_ctlinput != NULL); 495 port = pr->pr_ctlport(cmd, arg, extra); 496 if (port == NULL) 497 return; 498 netmsg_init(&msg.base, NULL, &curthread->td_msgport, 499 0, pr->pr_ctlinput); 500 msg.nm_cmd = cmd; 501 msg.nm_arg = arg; 502 msg.nm_extra = extra; 503 lwkt_domsg(port, &msg.base.lmsg, 0); 504 } 505 506 /* 507 * If we convert all the protosw pr_ functions for all the protocols 508 * to take a message directly, this layer can go away. For the moment 509 * our dispatcher ignores the return value, but since we are handling 510 * the replymsg ourselves we return EASYNC by convention. 511 */ 512 513 /* 514 * Handle a predicate event request. This function is only called once 515 * when the predicate message queueing request is received. 516 */ 517 void 518 netmsg_so_notify(netmsg_t msg) 519 { 520 struct lwkt_token *tok; 521 struct signalsockbuf *ssb; 522 523 ssb = (msg->notify.nm_etype & NM_REVENT) ? 524 &msg->base.nm_so->so_rcv : 525 &msg->base.nm_so->so_snd; 526 527 /* 528 * Reply immediately if the event has occured, otherwise queue the 529 * request. 530 * 531 * NOTE: Socket can change if this is an accept predicate so cache 532 * the token. 533 */ 534 tok = lwkt_token_pool_lookup(msg->base.nm_so); 535 lwkt_gettoken(tok); 536 atomic_set_int(&ssb->ssb_flags, SSB_MEVENT); 537 if (msg->notify.nm_predicate(&msg->notify)) { 538 if (TAILQ_EMPTY(&ssb->ssb_kq.ki_mlist)) 539 atomic_clear_int(&ssb->ssb_flags, SSB_MEVENT); 540 lwkt_reltoken(tok); 541 lwkt_replymsg(&msg->base.lmsg, 542 msg->base.lmsg.ms_error); 543 } else { 544 TAILQ_INSERT_TAIL(&ssb->ssb_kq.ki_mlist, &msg->notify, nm_list); 545 /* 546 * NOTE: 547 * If predict ever blocks, 'tok' will be released, so 548 * SSB_MEVENT set beforehand could have been cleared 549 * when we reach here. In case that happens, we set 550 * SSB_MEVENT again, after the notify has been queued. 551 */ 552 atomic_set_int(&ssb->ssb_flags, SSB_MEVENT); 553 lwkt_reltoken(tok); 554 } 555 } 556 557 /* 558 * Called by doio when trying to abort a netmsg_so_notify message. 559 * Unlike the other functions this one is dispatched directly by 560 * the LWKT subsystem, so it takes a lwkt_msg_t as an argument. 561 * 562 * The original message, lmsg, is under the control of the caller and 563 * will not be destroyed until we return so we can safely reference it 564 * in our synchronous abort request. 565 * 566 * This part of the abort request occurs on the originating cpu which 567 * means we may race the message flags and the original message may 568 * not even have been processed by the target cpu yet. 569 */ 570 void 571 netmsg_so_notify_doabort(lwkt_msg_t lmsg) 572 { 573 struct netmsg_so_notify_abort msg; 574 575 if ((lmsg->ms_flags & (MSGF_DONE | MSGF_REPLY)) == 0) { 576 const struct netmsg_base *nmsg = 577 (const struct netmsg_base *)lmsg; 578 579 netmsg_init(&msg.base, nmsg->nm_so, &curthread->td_msgport, 580 0, netmsg_so_notify_abort); 581 msg.nm_notifymsg = (void *)lmsg; 582 lwkt_domsg(lmsg->ms_target_port, &msg.base.lmsg, 0); 583 } 584 } 585 586 /* 587 * Predicate requests can be aborted. This function is only called once 588 * and will interlock against processing/reply races (since such races 589 * occur on the same thread that controls the port where the abort is 590 * requeued). 591 * 592 * This part of the abort request occurs on the target cpu. The message 593 * flags must be tested again in case the test that we did on the 594 * originating cpu raced. Since messages are handled in sequence, the 595 * original message will have already been handled by the loop and either 596 * replied to or queued. 597 * 598 * We really only need to interlock with MSGF_REPLY (a bit that is set on 599 * our cpu when we reply). Note that MSGF_DONE is not set until the 600 * reply reaches the originating cpu. Test both bits anyway. 601 */ 602 void 603 netmsg_so_notify_abort(netmsg_t msg) 604 { 605 struct netmsg_so_notify_abort *abrtmsg = &msg->notify_abort; 606 struct netmsg_so_notify *nmsg = abrtmsg->nm_notifymsg; 607 struct signalsockbuf *ssb; 608 609 /* 610 * The original notify message is not destroyed until after the 611 * abort request is returned, so we can check its state. 612 */ 613 lwkt_getpooltoken(nmsg->base.nm_so); 614 if ((nmsg->base.lmsg.ms_flags & (MSGF_DONE | MSGF_REPLY)) == 0) { 615 ssb = (nmsg->nm_etype & NM_REVENT) ? 616 &nmsg->base.nm_so->so_rcv : 617 &nmsg->base.nm_so->so_snd; 618 TAILQ_REMOVE(&ssb->ssb_kq.ki_mlist, nmsg, nm_list); 619 lwkt_relpooltoken(nmsg->base.nm_so); 620 lwkt_replymsg(&nmsg->base.lmsg, EINTR); 621 } else { 622 lwkt_relpooltoken(nmsg->base.nm_so); 623 } 624 625 /* 626 * Reply to the abort message 627 */ 628 lwkt_replymsg(&abrtmsg->base.lmsg, 0); 629 } 630 631 void 632 so_async_rcvd_reply(struct socket *so) 633 { 634 /* 635 * Spinlock safe, reply runs to degenerate lwkt_null_replyport() 636 */ 637 spin_lock(&so->so_rcvd_spin); 638 lwkt_replymsg(&so->so_rcvd_msg.base.lmsg, 0); 639 spin_unlock(&so->so_rcvd_spin); 640 } 641 642 void 643 so_async_rcvd_drop(struct socket *so) 644 { 645 lwkt_msg_t lmsg = &so->so_rcvd_msg.base.lmsg; 646 647 /* 648 * Spinlock safe, drop runs to degenerate lwkt_spin_dropmsg() 649 */ 650 spin_lock(&so->so_rcvd_spin); 651 so->so_rcvd_msg.nm_pru_flags |= PRUR_DEAD; 652 again: 653 lwkt_dropmsg(lmsg); 654 if ((lmsg->ms_flags & MSGF_DONE) == 0) { 655 ++async_rcvd_drop_race; 656 ssleep(so, &so->so_rcvd_spin, 0, "soadrop", 1); 657 goto again; 658 } 659 spin_unlock(&so->so_rcvd_spin); 660 } 661