xref: /dflybsd-src/sys/kern/uipc_msg.c (revision 4c77af2da1d56e9bf10a0f03e974dbbf6a967810)
1 /*
2  * Copyright (c) 2003, 2004 Jeffrey M. Hsu.  All rights reserved.
3  * Copyright (c) 2003, 2004 The DragonFly Project.  All rights reserved.
4  *
5  * This code is derived from software contributed to The DragonFly Project
6  * by Jeffrey M. Hsu.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of The DragonFly Project nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific, prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
24  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  */
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/msgport.h>
38 #include <sys/protosw.h>
39 #include <sys/socket.h>
40 #include <sys/socketvar.h>
41 #include <sys/socketops.h>
42 #include <sys/thread.h>
43 #include <sys/thread2.h>
44 #include <sys/msgport2.h>
45 #include <sys/spinlock2.h>
46 #include <sys/mbuf.h>
47 #include <vm/pmap.h>
48 
49 #include <net/netmsg2.h>
50 #include <sys/socketvar2.h>
51 
52 #include <net/netisr.h>
53 #include <net/netmsg.h>
54 
55 /*
56  * Abort a socket and free it.  Called from soabort() only.  soabort()
57  * got a ref on the socket which we must free on reply.
58  */
59 void
60 so_pru_abort(struct socket *so)
61 {
62 	struct netmsg_pru_abort msg;
63 
64 	netmsg_init(&msg.base, so, &curthread->td_msgport,
65 		    0, so->so_proto->pr_usrreqs->pru_abort);
66 	(void)lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
67 	sofree(msg.base.nm_so);
68 }
69 
70 /*
71  * Abort a socket and free it, asynchronously.  Called from
72  * soaborta() only.  soaborta() got a ref on the socket which we must
73  * free on reply.
74  */
75 void
76 so_pru_aborta(struct socket *so)
77 {
78 	struct netmsg_pru_abort *msg;
79 
80 	msg = kmalloc(sizeof(*msg), M_LWKTMSG, M_WAITOK | M_ZERO);
81 	netmsg_init(&msg->base, so, &netisr_afree_free_so_rport,
82 		    0, so->so_proto->pr_usrreqs->pru_abort);
83 	lwkt_sendmsg(so->so_port, &msg->base.lmsg);
84 }
85 
86 /*
87  * Abort a socket and free it.  Called from soabort_oncpu() only.
88  * Caller must make sure that the current CPU is inpcb's owner CPU.
89  */
90 void
91 so_pru_abort_oncpu(struct socket *so)
92 {
93 	struct netmsg_pru_abort msg;
94 	netisr_fn_t func = so->so_proto->pr_usrreqs->pru_abort;
95 
96 	netmsg_init(&msg.base, so, &netisr_adone_rport, 0, func);
97 	msg.base.lmsg.ms_flags &= ~(MSGF_REPLY | MSGF_DONE);
98 	msg.base.lmsg.ms_flags |= MSGF_SYNC;
99 	func((netmsg_t)&msg);
100 	KKASSERT(msg.base.lmsg.ms_flags & MSGF_DONE);
101 	sofree(msg.base.nm_so);
102 }
103 
104 int
105 so_pru_accept(struct socket *so, struct sockaddr **nam)
106 {
107 	struct netmsg_pru_accept msg;
108 
109 	netmsg_init(&msg.base, so, &curthread->td_msgport,
110 	    0, so->so_proto->pr_usrreqs->pru_accept);
111 	msg.nm_nam = nam;
112 
113 	return lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
114 }
115 
116 int
117 so_pru_attach(struct socket *so, int proto, struct pru_attach_info *ai)
118 {
119 	struct netmsg_pru_attach msg;
120 	int error;
121 
122 	netmsg_init(&msg.base, so, &curthread->td_msgport,
123 		    0, so->so_proto->pr_usrreqs->pru_attach);
124 	msg.nm_proto = proto;
125 	msg.nm_ai = ai;
126 	error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
127 	return (error);
128 }
129 
130 int
131 so_pru_attach_direct(struct socket *so, int proto, struct pru_attach_info *ai)
132 {
133 	struct netmsg_pru_attach msg;
134 	netisr_fn_t func = so->so_proto->pr_usrreqs->pru_attach;
135 
136 	netmsg_init(&msg.base, so, &netisr_adone_rport, 0, func);
137 	msg.base.lmsg.ms_flags &= ~(MSGF_REPLY | MSGF_DONE);
138 	msg.base.lmsg.ms_flags |= MSGF_SYNC;
139 	msg.nm_proto = proto;
140 	msg.nm_ai = ai;
141 	func((netmsg_t)&msg);
142 	KKASSERT(msg.base.lmsg.ms_flags & MSGF_DONE);
143 	return(msg.base.lmsg.ms_error);
144 }
145 
146 /*
147  * NOTE: If the target port changes the bind operation will deal with it.
148  */
149 int
150 so_pru_bind(struct socket *so, struct sockaddr *nam, struct thread *td)
151 {
152 	struct netmsg_pru_bind msg;
153 	int error;
154 
155 	netmsg_init(&msg.base, so, &curthread->td_msgport,
156 		    0, so->so_proto->pr_usrreqs->pru_bind);
157 	msg.nm_nam = nam;
158 	msg.nm_td = td;		/* used only for prison_ip() */
159 	error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
160 	return (error);
161 }
162 
163 int
164 so_pru_connect(struct socket *so, struct sockaddr *nam, struct thread *td)
165 {
166 	struct netmsg_pru_connect msg;
167 	int error;
168 
169 	netmsg_init(&msg.base, so, &curthread->td_msgport,
170 		    0, so->so_proto->pr_usrreqs->pru_connect);
171 	msg.nm_nam = nam;
172 	msg.nm_td = td;
173 	msg.nm_m = NULL;
174 	msg.nm_flags = 0;
175 	msg.nm_reconnect = 0;
176 	error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
177 	return (error);
178 }
179 
180 int
181 so_pru_connect2(struct socket *so1, struct socket *so2)
182 {
183 	struct netmsg_pru_connect2 msg;
184 	int error;
185 
186 	netmsg_init(&msg.base, so1, &curthread->td_msgport,
187 		    0, so1->so_proto->pr_usrreqs->pru_connect2);
188 	msg.nm_so1 = so1;
189 	msg.nm_so2 = so2;
190 	error = lwkt_domsg(so1->so_port, &msg.base.lmsg, 0);
191 	return (error);
192 }
193 
194 /*
195  * WARNING!  Synchronous call from user context.  Control function may do
196  *	     copyin/copyout.
197  */
198 int
199 so_pru_control_direct(struct socket *so, u_long cmd, caddr_t data,
200 		      struct ifnet *ifp)
201 {
202 	struct netmsg_pru_control msg;
203 	netisr_fn_t func = so->so_proto->pr_usrreqs->pru_control;
204 
205 	netmsg_init(&msg.base, so, &netisr_adone_rport, 0, func);
206 	msg.base.lmsg.ms_flags &= ~(MSGF_REPLY | MSGF_DONE);
207 	msg.base.lmsg.ms_flags |= MSGF_SYNC;
208 	msg.nm_cmd = cmd;
209 	msg.nm_data = data;
210 	msg.nm_ifp = ifp;
211 	msg.nm_td = curthread;
212 	func((netmsg_t)&msg);
213 	KKASSERT(msg.base.lmsg.ms_flags & MSGF_DONE);
214 	return(msg.base.lmsg.ms_error);
215 }
216 
217 int
218 so_pru_detach(struct socket *so)
219 {
220 	struct netmsg_pru_detach msg;
221 	int error;
222 
223 	netmsg_init(&msg.base, so, &curthread->td_msgport,
224 		    0, so->so_proto->pr_usrreqs->pru_detach);
225 	error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
226 	return (error);
227 }
228 
229 void
230 so_pru_detach_direct(struct socket *so)
231 {
232 	struct netmsg_pru_detach msg;
233 	netisr_fn_t func = so->so_proto->pr_usrreqs->pru_detach;
234 
235 	netmsg_init(&msg.base, so, &netisr_adone_rport, 0, func);
236 	msg.base.lmsg.ms_flags &= ~(MSGF_REPLY | MSGF_DONE);
237 	msg.base.lmsg.ms_flags |= MSGF_SYNC;
238 	func((netmsg_t)&msg);
239 	KKASSERT(msg.base.lmsg.ms_flags & MSGF_DONE);
240 }
241 
242 int
243 so_pru_disconnect(struct socket *so)
244 {
245 	struct netmsg_pru_disconnect msg;
246 	int error;
247 
248 	netmsg_init(&msg.base, so, &curthread->td_msgport,
249 		    0, so->so_proto->pr_usrreqs->pru_disconnect);
250 	error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
251 	return (error);
252 }
253 
254 void
255 so_pru_disconnect_direct(struct socket *so)
256 {
257 	struct netmsg_pru_disconnect msg;
258 	netisr_fn_t func = so->so_proto->pr_usrreqs->pru_disconnect;
259 
260 	netmsg_init(&msg.base, so, &netisr_adone_rport, 0, func);
261 	msg.base.lmsg.ms_flags &= ~(MSGF_REPLY | MSGF_DONE);
262 	msg.base.lmsg.ms_flags |= MSGF_SYNC;
263 	func((netmsg_t)&msg);
264 	KKASSERT(msg.base.lmsg.ms_flags & MSGF_DONE);
265 }
266 
267 int
268 so_pru_listen(struct socket *so, struct thread *td)
269 {
270 	struct netmsg_pru_listen msg;
271 	int error;
272 
273 	netmsg_init(&msg.base, so, &curthread->td_msgport,
274 		    0, so->so_proto->pr_usrreqs->pru_listen);
275 	msg.nm_td = td;		/* used only for prison_ip() XXX JH */
276 	error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
277 	return (error);
278 }
279 
280 int
281 so_pru_peeraddr(struct socket *so, struct sockaddr **nam)
282 {
283 	struct netmsg_pru_peeraddr msg;
284 	int error;
285 
286 	netmsg_init(&msg.base, so, &curthread->td_msgport,
287 		    0, so->so_proto->pr_usrreqs->pru_peeraddr);
288 	msg.nm_nam = nam;
289 	error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
290 	return (error);
291 }
292 
293 int
294 so_pru_rcvd(struct socket *so, int flags)
295 {
296 	struct netmsg_pru_rcvd msg;
297 	int error;
298 
299 	netmsg_init(&msg.base, so, &curthread->td_msgport,
300 		    0, so->so_proto->pr_usrreqs->pru_rcvd);
301 	msg.nm_flags = flags;
302 	msg.nm_pru_flags = 0;
303 	error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
304 	return (error);
305 }
306 
307 void
308 so_pru_rcvd_async(struct socket *so)
309 {
310 	lwkt_msg_t lmsg = &so->so_rcvd_msg.base.lmsg;
311 
312 	KASSERT(so->so_proto->pr_flags & PR_ASYNC_RCVD,
313 	    ("async pru_rcvd is not supported"));
314 
315 	/*
316 	 * WARNING!  Spinlock is a bit dodgy, use hacked up sendmsg
317 	 *	     to avoid deadlocking.
318 	 */
319 	spin_lock(&so->so_rcvd_spin);
320 	if ((so->so_rcvd_msg.nm_pru_flags & PRUR_DEAD) == 0) {
321 		if (lmsg->ms_flags & MSGF_DONE) {
322 			soreference(so);
323 			lwkt_sendmsg_stage1(so->so_port, lmsg);
324 			spin_unlock(&so->so_rcvd_spin);
325 			lwkt_sendmsg_stage2(so->so_port, lmsg);
326 		} else {
327 			spin_unlock(&so->so_rcvd_spin);
328 		}
329 	} else {
330 		static int deadlog = 0;
331 
332 		if (!deadlog) {
333 			kprintf("async rcvd is dead\n");
334 			deadlog = 1;
335 		}
336 		spin_unlock(&so->so_rcvd_spin);
337 	}
338 }
339 
340 int
341 so_pru_rcvoob(struct socket *so, struct mbuf *m, int flags)
342 {
343 	struct netmsg_pru_rcvoob msg;
344 	int error;
345 
346 	netmsg_init(&msg.base, so, &curthread->td_msgport,
347 		    0, so->so_proto->pr_usrreqs->pru_rcvoob);
348 	msg.nm_m = m;
349 	msg.nm_flags = flags;
350 	error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
351 	return (error);
352 }
353 
354 /*
355  * NOTE: If the target port changes the implied connect will deal with it.
356  */
357 int
358 so_pru_send(struct socket *so, int flags, struct mbuf *m,
359 	    struct sockaddr *addr, struct mbuf *control, struct thread *td)
360 {
361 	struct netmsg_pru_send msg;
362 	int error;
363 
364 	netmsg_init(&msg.base, so, &curthread->td_msgport,
365 		    0, so->so_proto->pr_usrreqs->pru_send);
366 	msg.nm_flags = flags;
367 	msg.nm_m = m;
368 	msg.nm_addr = addr;
369 	msg.nm_control = control;
370 	msg.nm_td = td;
371 	error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
372 	return (error);
373 }
374 
375 void
376 so_pru_sync(struct socket *so)
377 {
378 	struct netmsg_base msg;
379 
380 	netmsg_init(&msg, so, &curthread->td_msgport, 0,
381 	    netmsg_sync_handler);
382 	lwkt_domsg(so->so_port, &msg.lmsg, 0);
383 }
384 
385 void
386 so_pru_send_async(struct socket *so, int flags, struct mbuf *m,
387 	    struct sockaddr *addr0, struct mbuf *control, struct thread *td)
388 {
389 	struct netmsg_pru_send *msg;
390 	struct sockaddr *addr = NULL;
391 
392 	KASSERT(so->so_proto->pr_flags & PR_ASYNC_SEND,
393 	    ("async pru_send is not supported"));
394 
395 	flags |= PRUS_NOREPLY;
396 	if (addr0 != NULL) {
397 		addr = kmalloc(addr0->sa_len, M_SONAME, M_WAITOK);
398 		memcpy(addr, addr0, addr0->sa_len);
399 		flags |= PRUS_FREEADDR;
400 	}
401 
402 	msg = &m->m_hdr.mh_sndmsg;
403 	netmsg_init(&msg->base, so, &netisr_apanic_rport,
404 		    0, so->so_proto->pr_usrreqs->pru_send);
405 	msg->nm_flags = flags;
406 	msg->nm_m = m;
407 	msg->nm_addr = addr;
408 	msg->nm_control = control;
409 	msg->nm_td = td;
410 	lwkt_sendmsg(so->so_port, &msg->base.lmsg);
411 }
412 
413 int
414 so_pru_sense(struct socket *so, struct stat *sb)
415 {
416 	struct netmsg_pru_sense msg;
417 	int error;
418 
419 	netmsg_init(&msg.base, so, &curthread->td_msgport,
420 		    0, so->so_proto->pr_usrreqs->pru_sense);
421 	msg.nm_stat = sb;
422 	error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
423 	return (error);
424 }
425 
426 int
427 so_pru_shutdown(struct socket *so)
428 {
429 	struct netmsg_pru_shutdown msg;
430 	int error;
431 
432 	netmsg_init(&msg.base, so, &curthread->td_msgport,
433 		    0, so->so_proto->pr_usrreqs->pru_shutdown);
434 	error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
435 	return (error);
436 }
437 
438 int
439 so_pru_sockaddr(struct socket *so, struct sockaddr **nam)
440 {
441 	struct netmsg_pru_sockaddr msg;
442 	int error;
443 
444 	netmsg_init(&msg.base, so, &curthread->td_msgport,
445 		    0, so->so_proto->pr_usrreqs->pru_sockaddr);
446 	msg.nm_nam = nam;
447 	error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
448 	return (error);
449 }
450 
451 int
452 so_pr_ctloutput(struct socket *so, struct sockopt *sopt)
453 {
454 	struct netmsg_pr_ctloutput msg;
455 	int error;
456 
457 	KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val));
458 	netmsg_init(&msg.base, so, &curthread->td_msgport,
459 		    0, so->so_proto->pr_ctloutput);
460 	msg.nm_sopt = sopt;
461 	error = lwkt_domsg(so->so_port, &msg.base.lmsg, 0);
462 	return (error);
463 }
464 
465 /*
466  * Protocol control input, typically via icmp.
467  *
468  * If the protocol pr_ctlport is not NULL we call it to figure out the
469  * protocol port.  If NULL is returned we can just return, otherwise
470  * we issue a netmsg to call pr_ctlinput in the proper thread.
471  *
472  * This must be done synchronously as arg and/or extra may point to
473  * temporary data.
474  */
475 void
476 so_pru_ctlinput(struct protosw *pr, int cmd, struct sockaddr *arg, void *extra)
477 {
478 	struct netmsg_pru_ctlinput msg;
479 	lwkt_port_t port;
480 
481 	if (pr->pr_ctlport == NULL)
482 		return;
483 	KKASSERT(pr->pr_ctlinput != NULL);
484 	port = pr->pr_ctlport(cmd, arg, extra);
485 	if (port == NULL)
486 		return;
487 	netmsg_init(&msg.base, NULL, &curthread->td_msgport,
488 		    0, pr->pr_ctlinput);
489 	msg.nm_cmd = cmd;
490 	msg.nm_arg = arg;
491 	msg.nm_extra = extra;
492 	lwkt_domsg(port, &msg.base.lmsg, 0);
493 }
494 
495 /*
496  * If we convert all the protosw pr_ functions for all the protocols
497  * to take a message directly, this layer can go away.  For the moment
498  * our dispatcher ignores the return value, but since we are handling
499  * the replymsg ourselves we return EASYNC by convention.
500  */
501 
502 /*
503  * Handle a predicate event request.  This function is only called once
504  * when the predicate message queueing request is received.
505  */
506 void
507 netmsg_so_notify(netmsg_t msg)
508 {
509 	struct lwkt_token *tok;
510 	struct signalsockbuf *ssb;
511 
512 	ssb = (msg->notify.nm_etype & NM_REVENT) ?
513 			&msg->base.nm_so->so_rcv :
514 			&msg->base.nm_so->so_snd;
515 
516 	/*
517 	 * Reply immediately if the event has occured, otherwise queue the
518 	 * request.
519 	 *
520 	 * NOTE: Socket can change if this is an accept predicate so cache
521 	 *	 the token.
522 	 */
523 	tok = lwkt_token_pool_lookup(msg->base.nm_so);
524 	lwkt_gettoken(tok);
525 	if (msg->notify.nm_predicate(&msg->notify)) {
526 		lwkt_reltoken(tok);
527 		lwkt_replymsg(&msg->base.lmsg,
528 			      msg->base.lmsg.ms_error);
529 	} else {
530 		TAILQ_INSERT_TAIL(&ssb->ssb_kq.ki_mlist, &msg->notify, nm_list);
531 		atomic_set_int(&ssb->ssb_flags, SSB_MEVENT);
532 		lwkt_reltoken(tok);
533 	}
534 }
535 
536 /*
537  * Called by doio when trying to abort a netmsg_so_notify message.
538  * Unlike the other functions this one is dispatched directly by
539  * the LWKT subsystem, so it takes a lwkt_msg_t as an argument.
540  *
541  * The original message, lmsg, is under the control of the caller and
542  * will not be destroyed until we return so we can safely reference it
543  * in our synchronous abort request.
544  *
545  * This part of the abort request occurs on the originating cpu which
546  * means we may race the message flags and the original message may
547  * not even have been processed by the target cpu yet.
548  */
549 void
550 netmsg_so_notify_doabort(lwkt_msg_t lmsg)
551 {
552 	struct netmsg_so_notify_abort msg;
553 
554 	if ((lmsg->ms_flags & (MSGF_DONE | MSGF_REPLY)) == 0) {
555 		netmsg_init(&msg.base, NULL, &curthread->td_msgport,
556 			    0, netmsg_so_notify_abort);
557 		msg.nm_notifymsg = (void *)lmsg;
558 		lwkt_domsg(lmsg->ms_target_port, &msg.base.lmsg, 0);
559 	}
560 }
561 
562 /*
563  * Predicate requests can be aborted.  This function is only called once
564  * and will interlock against processing/reply races (since such races
565  * occur on the same thread that controls the port where the abort is
566  * requeued).
567  *
568  * This part of the abort request occurs on the target cpu.  The message
569  * flags must be tested again in case the test that we did on the
570  * originating cpu raced.  Since messages are handled in sequence, the
571  * original message will have already been handled by the loop and either
572  * replied to or queued.
573  *
574  * We really only need to interlock with MSGF_REPLY (a bit that is set on
575  * our cpu when we reply).  Note that MSGF_DONE is not set until the
576  * reply reaches the originating cpu.  Test both bits anyway.
577  */
578 void
579 netmsg_so_notify_abort(netmsg_t msg)
580 {
581 	struct netmsg_so_notify_abort *abrtmsg = &msg->notify_abort;
582 	struct netmsg_so_notify *nmsg = abrtmsg->nm_notifymsg;
583 	struct signalsockbuf *ssb;
584 
585 	/*
586 	 * The original notify message is not destroyed until after the
587 	 * abort request is returned, so we can check its state.
588 	 */
589 	lwkt_getpooltoken(nmsg->base.nm_so);
590 	if ((nmsg->base.lmsg.ms_flags & (MSGF_DONE | MSGF_REPLY)) == 0) {
591 		ssb = (nmsg->nm_etype & NM_REVENT) ?
592 				&nmsg->base.nm_so->so_rcv :
593 				&nmsg->base.nm_so->so_snd;
594 		TAILQ_REMOVE(&ssb->ssb_kq.ki_mlist, nmsg, nm_list);
595 		lwkt_relpooltoken(nmsg->base.nm_so);
596 		lwkt_replymsg(&nmsg->base.lmsg, EINTR);
597 	} else {
598 		lwkt_relpooltoken(nmsg->base.nm_so);
599 	}
600 
601 	/*
602 	 * Reply to the abort message
603 	 */
604 	lwkt_replymsg(&abrtmsg->base.lmsg, 0);
605 }
606 
607 void
608 so_async_rcvd_reply(struct socket *so)
609 {
610 	/*
611 	 * Spinlock safe, reply runs to degenerate lwkt_null_replyport()
612 	 */
613 	spin_lock(&so->so_rcvd_spin);
614 	lwkt_replymsg(&so->so_rcvd_msg.base.lmsg, 0);
615 	spin_unlock(&so->so_rcvd_spin);
616 	sofree(so);
617 }
618 
619 void
620 so_async_rcvd_drop(struct socket *so)
621 {
622 	lwkt_msg_t lmsg = &so->so_rcvd_msg.base.lmsg;
623 
624 again:
625 	/*
626 	 * Spinlock safe, reply runs to degenerate lwkt_spin_dropmsg()
627 	 */
628 	spin_lock(&so->so_rcvd_spin);
629 	if (lwkt_dropmsg(lmsg) == 0)
630 		sofree(so);
631 	so->so_rcvd_msg.nm_pru_flags |= PRUR_DEAD;
632 	spin_unlock(&so->so_rcvd_spin);
633 	if ((lmsg->ms_flags & MSGF_DONE) == 0) {
634 		kprintf("Warning: tcp: so_async_rcvd_drop() raced message\n");
635 		tsleep(so, 0, "soadrop", 1);
636 		goto again;
637 	}
638 }
639