1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1982, 1986, 1990, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 #ifndef _SYS_SOCKETVAR_H_ 33 #define _SYS_SOCKETVAR_H_ 34 35 /* 36 * Socket generation count type. Also used in xinpcb, xtcpcb, xunpcb. 37 */ 38 typedef uint64_t so_gen_t; 39 40 #if defined(_KERNEL) || defined(_WANT_SOCKET) 41 #include <sys/queue.h> /* for TAILQ macros */ 42 #include <sys/selinfo.h> /* for struct selinfo */ 43 #include <sys/_lock.h> 44 #include <sys/_mutex.h> 45 #include <sys/osd.h> 46 #include <sys/_sx.h> 47 #include <sys/sockbuf.h> 48 #include <sys/_task.h> 49 #ifdef _KERNEL 50 #include <sys/caprights.h> 51 #include <sys/sockopt.h> 52 #else 53 #include <stdbool.h> 54 #endif 55 56 struct vnet; 57 58 /* 59 * Kernel structure per socket. 60 * Contains send and receive buffer queues, 61 * handle on protocol and pointer to protocol 62 * private data and error information. 63 */ 64 typedef int so_upcall_t(struct socket *, void *, int); 65 typedef void so_dtor_t(struct socket *); 66 67 struct socket; 68 69 enum socket_qstate { 70 SQ_NONE = 0, 71 SQ_INCOMP = 0x0800, /* on sol_incomp */ 72 SQ_COMP = 0x1000, /* on sol_comp */ 73 }; 74 75 76 struct so_splice { 77 struct socket *src; 78 struct socket *dst; 79 off_t max; /* maximum bytes to splice, or -1 */ 80 struct mtx mtx; 81 unsigned int wq_index; 82 enum so_splice_state { 83 SPLICE_IDLE, /* waiting for work to arrive */ 84 SPLICE_QUEUED, /* a wakeup has queued some work */ 85 SPLICE_RUNNING, /* currently transferring data */ 86 SPLICE_CLOSING, /* waiting for work to drain */ 87 SPLICE_CLOSED, /* unsplicing, terminal state */ 88 SPLICE_EXCEPTION, /* I/O error or limit, implicit unsplice */ 89 } state; 90 struct timeout_task timeout; 91 STAILQ_ENTRY(so_splice) next; 92 }; 93 94 /*- 95 * Locking key to struct socket: 96 * (a) constant after allocation, no locking required. 97 * (b) locked by SOCK_LOCK(so). 98 * (cr) locked by SOCK_RECVBUF_LOCK(so) 99 * (cs) locked by SOCK_SENDBUF_LOCK(so) 100 * (e) locked by SOLISTEN_LOCK() of corresponding listening socket. 101 * (f) not locked since integer reads/writes are atomic. 102 * (g) used only as a sleep/wakeup address, no value. 103 * (h) locked by global mutex so_global_mtx. 104 * (ir,is) locked by recv or send I/O locks. 105 * (k) locked by KTLS workqueue mutex 106 */ 107 TAILQ_HEAD(accept_queue, socket); 108 struct socket { 109 struct mtx so_lock; 110 volatile u_int so_count; /* (b / refcount) */ 111 struct selinfo so_rdsel; /* (b/cr) for so_rcv/so_comp */ 112 struct selinfo so_wrsel; /* (b/cs) for so_snd */ 113 int so_options; /* (b) from socket call, see socket.h */ 114 short so_type; /* (a) generic type, see socket.h */ 115 short so_state; /* (b) internal state flags SS_* */ 116 void *so_pcb; /* protocol control block */ 117 struct vnet *so_vnet; /* (a) network stack instance */ 118 struct protosw *so_proto; /* (a) protocol handle */ 119 short so_linger; /* time to linger close(2) */ 120 short so_timeo; /* (g) connection timeout */ 121 u_short so_error; /* (f) error affecting connection */ 122 u_short so_rerror; /* (f) error affecting connection */ 123 struct sigio *so_sigio; /* [sg] information for async I/O or 124 out of band data (SIGURG) */ 125 struct ucred *so_cred; /* (a) user credentials */ 126 struct label *so_label; /* (b) MAC label for socket */ 127 /* NB: generation count must not be first. */ 128 so_gen_t so_gencnt; /* (h) generation count */ 129 void *so_emuldata; /* (b) private data for emulators */ 130 so_dtor_t *so_dtor; /* (b) optional destructor */ 131 struct osd osd; /* Object Specific extensions */ 132 /* 133 * so_fibnum, so_user_cookie and friends can be used to attach 134 * some user-specified metadata to a socket, which then can be 135 * used by the kernel for various actions. 136 * so_user_cookie is used by ipfw/dummynet. 137 */ 138 int so_fibnum; /* routing domain for this socket */ 139 uint32_t so_user_cookie; 140 141 int so_ts_clock; /* type of the clock used for timestamps */ 142 uint32_t so_max_pacing_rate; /* (f) TX rate limit in bytes/s */ 143 struct so_splice *so_splice; /* (b) splice state for sink */ 144 struct so_splice *so_splice_back; /* (b) splice state for source */ 145 off_t so_splice_sent; /* (ir) splice bytes sent so far */ 146 147 /* 148 * Mutexes to prevent interleaving of socket I/O. These have to be 149 * outside of the socket buffers in order to interlock with listen(2). 150 */ 151 struct sx so_snd_sx __aligned(CACHE_LINE_SIZE); 152 struct mtx so_snd_mtx; 153 154 struct sx so_rcv_sx __aligned(CACHE_LINE_SIZE); 155 struct mtx so_rcv_mtx; 156 157 union { 158 /* Regular (data flow) socket. */ 159 struct { 160 /* (cr, cs) Receive and send buffers. */ 161 struct sockbuf so_rcv, so_snd; 162 163 /* (e) Our place on accept queue. */ 164 TAILQ_ENTRY(socket) so_list; 165 struct socket *so_listen; /* (b) */ 166 enum socket_qstate so_qstate; /* (b) */ 167 /* (b) cached MAC label for peer */ 168 struct label *so_peerlabel; 169 u_long so_oobmark; /* chars to oob mark */ 170 171 /* (k) Our place on KTLS RX work queue. */ 172 STAILQ_ENTRY(socket) so_ktls_rx_list; 173 }; 174 /* 175 * Listening socket, where accepts occur, is so_listen in all 176 * subsidiary sockets. If so_listen is NULL, socket is not 177 * related to an accept. For a listening socket itself 178 * sol_incomp queues partially completed connections, while 179 * sol_comp is a queue of connections ready to be accepted. 180 * If a connection is aborted and it has so_listen set, then 181 * it has to be pulled out of either sol_incomp or sol_comp. 182 * We allow connections to queue up based on current queue 183 * lengths and limit on number of queued connections for this 184 * socket. 185 */ 186 struct { 187 /* (e) queue of partial unaccepted connections */ 188 struct accept_queue sol_incomp; 189 /* (e) queue of complete unaccepted connections */ 190 struct accept_queue sol_comp; 191 u_int sol_qlen; /* (e) sol_comp length */ 192 u_int sol_incqlen; /* (e) sol_incomp length */ 193 u_int sol_qlimit; /* (e) queue limit */ 194 195 /* accept_filter(9) optional data */ 196 struct accept_filter *sol_accept_filter; 197 void *sol_accept_filter_arg; /* saved filter args */ 198 char *sol_accept_filter_str; /* saved user args */ 199 200 /* Optional upcall, for kernel socket. */ 201 so_upcall_t *sol_upcall; /* (e) */ 202 void *sol_upcallarg; /* (e) */ 203 204 /* Socket buffer parameters, to be copied to 205 * dataflow sockets, accepted from this one. */ 206 int sol_sbrcv_lowat; 207 int sol_sbsnd_lowat; 208 u_int sol_sbrcv_hiwat; 209 u_int sol_sbsnd_hiwat; 210 short sol_sbrcv_flags; 211 short sol_sbsnd_flags; 212 sbintime_t sol_sbrcv_timeo; 213 sbintime_t sol_sbsnd_timeo; 214 215 /* Information tracking listen queue overflows. */ 216 struct timeval sol_lastover; /* (e) */ 217 int sol_overcount; /* (e) */ 218 }; 219 }; 220 }; 221 #endif /* defined(_KERNEL) || defined(_WANT_SOCKET) */ 222 223 /* 224 * Socket state bits. 225 * 226 * Historically, these bits were all kept in the so_state field. 227 * They are now split into separate, lock-specific fields. 228 * so_state maintains basic socket state protected by the socket lock. 229 * so_qstate holds information about the socket accept queues. 230 * Each socket buffer also has a state field holding information 231 * relevant to that socket buffer (can't send, rcv). 232 * Many fields will be read without locks to improve performance and avoid 233 * lock order issues. However, this approach must be used with caution. 234 */ 235 #define SS_ISCONNECTED 0x0002 /* socket connected to a peer */ 236 #define SS_ISCONNECTING 0x0004 /* in process of connecting to peer */ 237 #define SS_ISDISCONNECTING 0x0008 /* in process of disconnecting */ 238 #define SS_NBIO 0x0100 /* non-blocking ops */ 239 #define SS_ASYNC 0x0200 /* async i/o notify */ 240 /* was SS_ISCONFIRMING 0x0400 */ 241 #define SS_ISDISCONNECTED 0x2000 /* socket disconnected from peer */ 242 243 #ifdef _KERNEL 244 245 #define SOCK_MTX(so) (&(so)->so_lock) 246 #define SOCK_LOCK(so) mtx_lock(&(so)->so_lock) 247 #define SOCK_OWNED(so) mtx_owned(&(so)->so_lock) 248 #define SOCK_UNLOCK(so) mtx_unlock(&(so)->so_lock) 249 #define SOCK_LOCK_ASSERT(so) mtx_assert(&(so)->so_lock, MA_OWNED) 250 #define SOCK_UNLOCK_ASSERT(so) mtx_assert(&(so)->so_lock, MA_NOTOWNED) 251 252 #define SOLISTENING(sol) (((sol)->so_options & SO_ACCEPTCONN) != 0) 253 #define SOLISTEN_LOCK(sol) do { \ 254 mtx_lock(&(sol)->so_lock); \ 255 KASSERT(SOLISTENING(sol), \ 256 ("%s: %p not listening", __func__, (sol))); \ 257 } while (0) 258 #define SOLISTEN_TRYLOCK(sol) mtx_trylock(&(sol)->so_lock) 259 #define SOLISTEN_UNLOCK(sol) do { \ 260 KASSERT(SOLISTENING(sol), \ 261 ("%s: %p not listening", __func__, (sol))); \ 262 mtx_unlock(&(sol)->so_lock); \ 263 } while (0) 264 #define SOLISTEN_LOCK_ASSERT(sol) do { \ 265 mtx_assert(&(sol)->so_lock, MA_OWNED); \ 266 KASSERT(SOLISTENING(sol), \ 267 ("%s: %p not listening", __func__, (sol))); \ 268 } while (0) 269 #define SOLISTEN_UNLOCK_ASSERT(sol) do { \ 270 mtx_assert(&(sol)->so_lock, MA_NOTOWNED); \ 271 KASSERT(SOLISTENING(sol), \ 272 ("%s: %p not listening", __func__, (sol))); \ 273 } while (0) 274 275 /* 276 * Socket buffer locks. These are strongly preferred over SOCKBUF_LOCK(sb) 277 * macros, as we are moving towards protocol specific socket buffers. 278 */ 279 #define SOCK_RECVBUF_MTX(so) \ 280 (&(so)->so_rcv_mtx) 281 #define SOCK_RECVBUF_LOCK(so) \ 282 mtx_lock(SOCK_RECVBUF_MTX(so)) 283 #define SOCK_RECVBUF_UNLOCK(so) \ 284 mtx_unlock(SOCK_RECVBUF_MTX(so)) 285 #define SOCK_RECVBUF_LOCK_ASSERT(so) \ 286 mtx_assert(SOCK_RECVBUF_MTX(so), MA_OWNED) 287 #define SOCK_RECVBUF_UNLOCK_ASSERT(so) \ 288 mtx_assert(SOCK_RECVBUF_MTX(so), MA_NOTOWNED) 289 290 #define SOCK_SENDBUF_MTX(so) \ 291 (&(so)->so_snd_mtx) 292 #define SOCK_SENDBUF_LOCK(so) \ 293 mtx_lock(SOCK_SENDBUF_MTX(so)) 294 #define SOCK_SENDBUF_UNLOCK(so) \ 295 mtx_unlock(SOCK_SENDBUF_MTX(so)) 296 #define SOCK_SENDBUF_LOCK_ASSERT(so) \ 297 mtx_assert(SOCK_SENDBUF_MTX(so), MA_OWNED) 298 #define SOCK_SENDBUF_UNLOCK_ASSERT(so) \ 299 mtx_assert(SOCK_SENDBUF_MTX(so), MA_NOTOWNED) 300 301 #define SOCK_BUF_LOCK(so, which) \ 302 mtx_lock(soeventmtx(so, which)) 303 #define SOCK_BUF_UNLOCK(so, which) \ 304 mtx_unlock(soeventmtx(so, which)) 305 #define SOCK_BUF_LOCK_ASSERT(so, which) \ 306 mtx_assert(soeventmtx(so, which), MA_OWNED) 307 #define SOCK_BUF_UNLOCK_ASSERT(so, which) \ 308 mtx_assert(soeventmtx(so, which), MA_NOTOWNED) 309 310 static inline struct sockbuf * 311 sobuf(struct socket *so, const sb_which which) 312 { 313 return (which == SO_RCV ? &so->so_rcv : &so->so_snd); 314 } 315 316 static inline struct mtx * 317 soeventmtx(struct socket *so, const sb_which which) 318 { 319 return (which == SO_RCV ? SOCK_RECVBUF_MTX(so) : SOCK_SENDBUF_MTX(so)); 320 } 321 322 /* 323 * Macros for sockets and socket buffering. 324 */ 325 326 327 #define isspliced(so) ((so->so_splice != NULL && \ 328 so->so_splice->src != NULL)) 329 #define issplicedback(so) ((so->so_splice_back != NULL && \ 330 so->so_splice_back->dst != NULL)) 331 /* 332 * Flags to soiolock(). 333 */ 334 #define SBL_WAIT 0x00000001 /* Wait if not immediately available. */ 335 #define SBL_NOINTR 0x00000002 /* Force non-interruptible sleep. */ 336 #define SBL_VALID (SBL_WAIT | SBL_NOINTR) 337 338 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? 0 : SBL_WAIT) 339 340 #define SOCK_IO_SEND_LOCK(so, flags) \ 341 soiolock((so), &(so)->so_snd_sx, (flags)) 342 #define SOCK_IO_SEND_UNLOCK(so) \ 343 soiounlock(&(so)->so_snd_sx) 344 #define SOCK_IO_SEND_ASSERT_LOCKED(so) \ 345 sx_assert(&(so)->so_snd_sx, SA_LOCKED) 346 #define SOCK_IO_RECV_LOCK(so, flags) \ 347 soiolock((so), &(so)->so_rcv_sx, (flags)) 348 #define SOCK_IO_RECV_UNLOCK(so) \ 349 soiounlock(&(so)->so_rcv_sx) 350 #define SOCK_IO_RECV_ASSERT_LOCKED(so) \ 351 sx_assert(&(so)->so_rcv_sx, SA_LOCKED) 352 353 /* do we have to send all at once on a socket? */ 354 #define sosendallatonce(so) \ 355 ((so)->so_proto->pr_flags & PR_ATOMIC) 356 357 /* can we read something from so? */ 358 #define soreadabledata(so) \ 359 (sbavail(&(so)->so_rcv) >= (so)->so_rcv.sb_lowat || \ 360 (so)->so_error || (so)->so_rerror) 361 #define _soreadable(so) \ 362 (soreadabledata(so) || ((so)->so_rcv.sb_state & SBS_CANTRCVMORE)) 363 364 static inline bool 365 soreadable(struct socket *so) 366 { 367 if (isspliced(so)) 368 return (false); 369 return (_soreadable(so)); 370 } 371 372 /* can we write something to so? */ 373 #define sowriteable(so) \ 374 ((sbspace(&(so)->so_snd) >= (so)->so_snd.sb_lowat && \ 375 (((so)->so_state&SS_ISCONNECTED) || \ 376 ((so)->so_proto->pr_flags&PR_CONNREQUIRED)==0)) || \ 377 ((so)->so_snd.sb_state & SBS_CANTSENDMORE) || \ 378 (so)->so_error) 379 380 /* 381 * soref()/sorele() ref-count the socket structure. 382 * soref() may be called without owning socket lock, but in that case a 383 * caller must own something that holds socket, and so_count must be not 0. 384 * Note that you must still explicitly close the socket, but the last ref 385 * count will free the structure. 386 */ 387 #define soref(so) refcount_acquire(&(so)->so_count) 388 #define sorele(so) do { \ 389 SOCK_UNLOCK_ASSERT(so); \ 390 if (!refcount_release_if_not_last(&(so)->so_count)) { \ 391 SOCK_LOCK(so); \ 392 sorele_locked(so); \ 393 } \ 394 } while (0) 395 396 /* 397 * In sorwakeup() and sowwakeup(), acquire the socket buffer lock to 398 * avoid a non-atomic test-and-wakeup. However, sowakeup is 399 * responsible for releasing the lock if it is called. We unlock only 400 * if we don't call into sowakeup. If any code is introduced that 401 * directly invokes the underlying sowakeup() primitives, it must 402 * maintain the same semantics. 403 */ 404 #define sorwakeup(so) do { \ 405 SOCK_RECVBUF_LOCK(so); \ 406 sorwakeup_locked(so); \ 407 } while (0) 408 409 #define sowwakeup(so) do { \ 410 SOCK_SENDBUF_LOCK(so); \ 411 sowwakeup_locked(so); \ 412 } while (0) 413 414 struct accept_filter { 415 char accf_name[16]; 416 int (*accf_callback) 417 (struct socket *so, void *arg, int waitflag); 418 void * (*accf_create) 419 (struct socket *so, char *arg); 420 void (*accf_destroy) 421 (struct socket *so); 422 SLIST_ENTRY(accept_filter) accf_next; 423 }; 424 425 #define ACCEPT_FILTER_DEFINE(modname, filtname, cb, create, destroy, ver) \ 426 static struct accept_filter modname##_filter = { \ 427 .accf_name = filtname, \ 428 .accf_callback = cb, \ 429 .accf_create = create, \ 430 .accf_destroy = destroy, \ 431 }; \ 432 static moduledata_t modname##_mod = { \ 433 .name = __XSTRING(modname), \ 434 .evhand = accept_filt_generic_mod_event, \ 435 .priv = &modname##_filter, \ 436 }; \ 437 DECLARE_MODULE(modname, modname##_mod, SI_SUB_DRIVERS, \ 438 SI_ORDER_MIDDLE); \ 439 MODULE_VERSION(modname, ver) 440 441 #ifdef MALLOC_DECLARE 442 MALLOC_DECLARE(M_ACCF); 443 MALLOC_DECLARE(M_PCB); 444 MALLOC_DECLARE(M_SONAME); 445 #endif 446 447 /* 448 * Socket specific helper hook point identifiers 449 * Do not leave holes in the sequence, hook registration is a loop. 450 */ 451 #define HHOOK_SOCKET_OPT 0 452 #define HHOOK_SOCKET_CREATE 1 453 #define HHOOK_SOCKET_RCV 2 454 #define HHOOK_SOCKET_SND 3 455 #define HHOOK_FILT_SOREAD 4 456 #define HHOOK_FILT_SOWRITE 5 457 #define HHOOK_SOCKET_CLOSE 6 458 #define HHOOK_SOCKET_NEWCONN 7 459 #define HHOOK_SOCKET_LAST HHOOK_SOCKET_NEWCONN 460 461 struct socket_hhook_data { 462 struct socket *so; 463 struct mbuf *m; 464 void *hctx; /* hook point specific data*/ 465 int status; 466 }; 467 468 extern int maxsockets; 469 extern u_long sb_max; 470 extern so_gen_t so_gencnt; 471 472 struct file; 473 struct filecaps; 474 struct filedesc; 475 struct mbuf; 476 struct sockaddr; 477 struct ucred; 478 struct uio; 479 enum shutdown_how; 480 481 /* Return values for socket upcalls. */ 482 #define SU_OK 0 483 #define SU_ISCONNECTED 1 484 485 /* 486 * From uipc_socket and friends 487 */ 488 int getsockaddr(struct sockaddr **namp, const struct sockaddr *uaddr, 489 size_t len); 490 int getsock_cap(struct thread *td, int fd, cap_rights_t *rightsp, 491 struct file **fpp, struct filecaps *havecaps); 492 int getsock(struct thread *td, int fd, cap_rights_t *rightsp, 493 struct file **fpp); 494 void soabort(struct socket *so); 495 int soaccept(struct socket *so, struct sockaddr *sa); 496 int sopeeraddr(struct socket *so, struct sockaddr *sa); 497 int sosockaddr(struct socket *so, struct sockaddr *sa); 498 void soaio_enqueue(struct task *task); 499 void soaio_rcv(void *context, int pending); 500 void soaio_snd(void *context, int pending); 501 int socheckuid(struct socket *so, uid_t uid); 502 int sobind(struct socket *so, struct sockaddr *nam, struct thread *td); 503 int sobindat(int fd, struct socket *so, struct sockaddr *nam, 504 struct thread *td); 505 int soclose(struct socket *so); 506 int soconnect(struct socket *so, struct sockaddr *nam, struct thread *td); 507 int soconnectat(int fd, struct socket *so, struct sockaddr *nam, 508 struct thread *td); 509 int soconnect2(struct socket *so1, struct socket *so2); 510 int socreate(int dom, struct socket **aso, int type, int proto, 511 struct ucred *cred, struct thread *td); 512 int sodisconnect(struct socket *so); 513 void sodtor_set(struct socket *, so_dtor_t *); 514 struct sockaddr *sodupsockaddr(const struct sockaddr *sa, int mflags); 515 void sohasoutofband(struct socket *so); 516 int solisten(struct socket *so, int backlog, struct thread *td); 517 void solisten_proto(struct socket *so, int backlog); 518 void solisten_proto_abort(struct socket *so); 519 int solisten_proto_check(struct socket *so); 520 bool solisten_enqueue(struct socket *, int); 521 int solisten_dequeue(struct socket *, struct socket **, int); 522 struct socket * 523 solisten_clone(struct socket *); 524 struct socket * 525 sonewconn(struct socket *head, int connstatus); 526 struct socket * 527 sopeeloff(struct socket *); 528 int sopoll(struct socket *so, int events, struct ucred *active_cred, 529 struct thread *td); 530 int sopoll_generic(struct socket *so, int events, 531 struct ucred *active_cred, struct thread *td); 532 int soreceive(struct socket *so, struct sockaddr **paddr, struct uio *uio, 533 struct mbuf **mp0, struct mbuf **controlp, int *flagsp); 534 int soreceive_stream(struct socket *so, struct sockaddr **paddr, 535 struct uio *uio, struct mbuf **mp0, struct mbuf **controlp, 536 int *flagsp); 537 int soreceive_dgram(struct socket *so, struct sockaddr **paddr, 538 struct uio *uio, struct mbuf **mp0, struct mbuf **controlp, 539 int *flagsp); 540 int soreceive_generic(struct socket *so, struct sockaddr **paddr, 541 struct uio *uio, struct mbuf **mp0, struct mbuf **controlp, 542 int *flagsp); 543 void sorele_locked(struct socket *so); 544 void sodealloc(struct socket *); 545 int soreserve(struct socket *so, u_long sndcc, u_long rcvcc); 546 void sorflush(struct socket *so); 547 int sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, 548 struct mbuf *top, struct mbuf *control, int flags, 549 struct thread *td); 550 int sousrsend(struct socket *so, struct sockaddr *addr, struct uio *uio, 551 struct mbuf *control, int flags, struct proc *); 552 int sosend_dgram(struct socket *so, struct sockaddr *addr, 553 struct uio *uio, struct mbuf *top, struct mbuf *control, 554 int flags, struct thread *td); 555 int sosend_generic(struct socket *so, struct sockaddr *addr, 556 struct uio *uio, struct mbuf *top, struct mbuf *control, 557 int flags, struct thread *td); 558 int soshutdown(struct socket *so, enum shutdown_how); 559 void soupcall_clear(struct socket *, sb_which); 560 void soupcall_set(struct socket *, sb_which, so_upcall_t, void *); 561 void solisten_upcall_set(struct socket *, so_upcall_t, void *); 562 void sorwakeup_locked(struct socket *); 563 void sowwakeup_locked(struct socket *); 564 void sowakeup_aio(struct socket *, sb_which); 565 void solisten_wakeup(struct socket *); 566 int selsocket(struct socket *so, int events, struct timeval *tv, 567 struct thread *td); 568 void soisconnected(struct socket *so); 569 void soisconnecting(struct socket *so); 570 void soisdisconnected(struct socket *so); 571 void soisdisconnecting(struct socket *so); 572 void socantrcvmore(struct socket *so); 573 void socantrcvmore_locked(struct socket *so); 574 void socantsendmore(struct socket *so); 575 void socantsendmore_locked(struct socket *so); 576 void soroverflow(struct socket *so); 577 void soroverflow_locked(struct socket *so); 578 int soiolock(struct socket *so, struct sx *sx, int flags); 579 void soiounlock(struct sx *sx); 580 581 /* 582 * Socket splicing routines. 583 */ 584 void so_splice_dispatch(struct so_splice *sp); 585 586 /* 587 * Accept filter functions (duh). 588 */ 589 int accept_filt_add(struct accept_filter *filt); 590 int accept_filt_del(char *name); 591 struct accept_filter *accept_filt_get(char *name); 592 #ifdef ACCEPT_FILTER_MOD 593 #ifdef SYSCTL_DECL 594 SYSCTL_DECL(_net_inet_accf); 595 #endif 596 int accept_filt_generic_mod_event(module_t mod, int event, void *data); 597 #endif 598 599 int pr_listen_notsupp(struct socket *so, int backlog, struct thread *td); 600 601 #endif /* _KERNEL */ 602 603 /* 604 * Structure to export socket from kernel to utilities, via sysctl(3). 605 */ 606 struct xsocket { 607 ksize_t xso_len; /* length of this structure */ 608 kvaddr_t xso_so; /* kernel address of struct socket */ 609 kvaddr_t so_pcb; /* kernel address of struct inpcb */ 610 uint64_t so_oobmark; 611 kvaddr_t so_splice_so; /* kernel address of spliced socket */ 612 int64_t so_spare64[7]; 613 int32_t xso_protocol; 614 int32_t xso_family; 615 uint32_t so_qlen; 616 uint32_t so_incqlen; 617 uint32_t so_qlimit; 618 pid_t so_pgid; 619 uid_t so_uid; 620 int32_t so_fibnum; 621 int32_t so_spare32[7]; 622 int16_t so_type; 623 int16_t so_options; 624 int16_t so_linger; 625 int16_t so_state; 626 int16_t so_timeo; 627 uint16_t so_error; 628 struct xsockbuf { 629 uint32_t sb_cc; 630 uint32_t sb_hiwat; 631 uint32_t sb_mbcnt; 632 uint32_t sb_spare0; /* was sb_mcnt */ 633 uint32_t sb_spare1; /* was sb_ccnt */ 634 uint32_t sb_mbmax; 635 int32_t sb_lowat; 636 int32_t sb_timeo; 637 int16_t sb_flags; 638 } so_rcv, so_snd; 639 }; 640 641 #ifdef _KERNEL 642 void sotoxsocket(struct socket *so, struct xsocket *xso); 643 void sbtoxsockbuf(struct sockbuf *sb, struct xsockbuf *xsb); 644 #endif 645 646 /* 647 * Socket buffer state bits. Exported via libprocstat(3). 648 */ 649 #define SBS_CANTSENDMORE 0x0010 /* can't send more data to peer */ 650 #define SBS_CANTRCVMORE 0x0020 /* can't receive more data from peer */ 651 #define SBS_RCVATMARK 0x0040 /* at mark on input */ 652 653 #endif /* !_SYS_SOCKETVAR_H_ */ 654