xref: /csrg-svn/sys/kern/uipc_socket2.c (revision 10204)
1 /*	uipc_socket2.c	4.34	83/01/08	*/
2 
3 #include "../h/param.h"
4 #include "../h/systm.h"
5 #include "../h/dir.h"
6 #include "../h/user.h"
7 #include "../h/proc.h"
8 #include "../h/file.h"
9 #include "../h/inode.h"
10 #include "../h/buf.h"
11 #include "../h/mbuf.h"
12 #include "../h/protosw.h"
13 #include "../h/socket.h"
14 #include "../h/socketvar.h"
15 
16 /*
17  * Primitive routines for operating on sockets and socket buffers
18  */
19 
20 /*
21  * Procedures to manipulate state flags of socket
22  * and do appropriate wakeups.  Normal sequence from the
23  * active (originating) side is that soisconnecting() is
24  * called during processing of connect() call,
25  * resulting in an eventual call to soisconnected() if/when the
26  * connection is established.  When the connection is torn down
27  * soisdisconnecting() is called during processing of disconnect() call,
28  * and soisdisconnected() is called when the connection to the peer
29  * is totally severed.  The semantics of these routines are such that
30  * connectionless protocols can call soisconnected() and soisdisconnected()
31  * only, bypassing the in-progress calls when setting up a ``connection''
32  * takes no time.
33  *
34  * From the passive side, a socket is created with SO_ACCEPTCONN
35  * creating two queues of sockets: so_q0 for connections in progress
36  * and so_q for connections already made and awaiting user acceptance.
37  * As a protocol is preparing incoming connections, it creates a socket
38  * structure queued on so_q0 by calling sonewconn().  When the connection
39  * is established, soisconnected() is called, and transfers the
40  * socket structure to so_q, making it available to accept().
41  *
42  * If a SO_ACCEPTCONN socket is closed with sockets on either
43  * so_q0 or so_q, these sockets are dropped.
44  *
45  * If and when higher level protocols are implemented in
46  * the kernel, the wakeups done here will sometimes
47  * be implemented as software-interrupt process scheduling.
48  */
49 
50 soisconnecting(so)
51 	struct socket *so;
52 {
53 
54 	so->so_state &= ~(SS_ISCONNECTED|SS_ISDISCONNECTING);
55 	so->so_state |= SS_ISCONNECTING;
56 	wakeup((caddr_t)&so->so_timeo);
57 }
58 
59 soisconnected(so)
60 	struct socket *so;
61 {
62 	register struct socket *head = so->so_head;
63 
64 	if (head) {
65 		if (soqremque(so, 0) == 0)
66 			panic("soisconnected");
67 		soqinsque(head, so, 1);
68 		wakeup((caddr_t)&head->so_timeo);
69 	}
70 	so->so_state &= ~(SS_ISCONNECTING|SS_ISDISCONNECTING);
71 	so->so_state |= SS_ISCONNECTED;
72 	wakeup((caddr_t)&so->so_timeo);
73 	sorwakeup(so);
74 	sowwakeup(so);
75 }
76 
77 soisdisconnecting(so)
78 	struct socket *so;
79 {
80 
81 	so->so_state &= ~SS_ISCONNECTING;
82 	so->so_state |= (SS_ISDISCONNECTING|SS_CANTRCVMORE|SS_CANTSENDMORE);
83 	wakeup((caddr_t)&so->so_timeo);
84 	sowwakeup(so);
85 	sorwakeup(so);
86 }
87 
88 soisdisconnected(so)
89 	struct socket *so;
90 {
91 
92 	so->so_state &= ~(SS_ISCONNECTING|SS_ISCONNECTED|SS_ISDISCONNECTING);
93 	so->so_state |= (SS_CANTRCVMORE|SS_CANTSENDMORE);
94 	wakeup((caddr_t)&so->so_timeo);
95 	sowwakeup(so);
96 	sorwakeup(so);
97 }
98 
99 /*
100  * When an attempt at a new connection is noted on a socket
101  * which accepts connections, sonewconn is called.  If the
102  * connection is possible (subject to space constraints, etc.)
103  * then we allocate a new structure, propoerly linked into the
104  * data structure of the original socket, and return this.
105  */
106 struct socket *
107 sonewconn(head)
108 	register struct socket *head;
109 {
110 	register struct socket *so;
111 	struct mbuf *m;
112 
113 	if (head->so_qlen + head->so_q0len > 3 * head->so_qlimit / 2)
114 		goto bad;
115 	m = m_getclr(M_DONTWAIT, MT_SOCKET);
116 	if (m == NULL)
117 		goto bad;
118 	so = mtod(m, struct socket *);
119 	so->so_type = head->so_type;
120 	so->so_options = head->so_options &~ SO_ACCEPTCONN;
121 	so->so_linger = head->so_linger;
122 	so->so_state = head->so_state | SS_NOFDREF;
123 	so->so_proto = head->so_proto;
124 	so->so_timeo = head->so_timeo;
125 	so->so_pgrp = head->so_pgrp;
126 	soqinsque(head, so, 0);
127 	if ((*so->so_proto->pr_usrreq)(so, PRU_ATTACH, (struct mbuf *)0,
128 	  (struct mbuf *)0, (struct sockopt *)0)) {
129 		(void) soqremque(so, 0);
130 		(void) m_free(m);
131 		goto bad;
132 	}
133 	return (so);
134 bad:
135 	return ((struct socket *)0);
136 }
137 
138 soqinsque(head, so, q)
139 	register struct socket *head, *so;
140 	int q;
141 {
142 
143 	so->so_head = head;
144 	if (q == 0) {
145 		head->so_q0len++;
146 		so->so_q0 = head->so_q0;
147 		head->so_q0 = so;
148 	} else {
149 		head->so_qlen++;
150 		so->so_q = head->so_q;
151 		head->so_q = so;
152 	}
153 }
154 
155 soqremque(so, q)
156 	register struct socket *so;
157 	int q;
158 {
159 	register struct socket *head, *prev, *next;
160 
161 	head = so->so_head;
162 	prev = head;
163 	for (;;) {
164 		next = q ? prev->so_q : prev->so_q0;
165 		if (next == so)
166 			break;
167 		if (next == head)
168 			return (0);
169 		prev = next;
170 	}
171 	if (q == 0) {
172 		prev->so_q0 = next->so_q0;
173 		head->so_q0len--;
174 	} else {
175 		prev->so_q = next->so_q;
176 		head->so_qlen--;
177 	}
178 	next->so_q0 = next->so_q = 0;
179 	next->so_head = 0;
180 	return (1);
181 }
182 
183 /*
184  * Socantsendmore indicates that no more data will be sent on the
185  * socket; it would normally be applied to a socket when the user
186  * informs the system that no more data is to be sent, by the protocol
187  * code (in case PRU_SHUTDOWN).  Socantrcvmore indicates that no more data
188  * will be received, and will normally be applied to the socket by a
189  * protocol when it detects that the peer will send no more data.
190  * Data queued for reading in the socket may yet be read.
191  */
192 
193 socantsendmore(so)
194 	struct socket *so;
195 {
196 
197 	so->so_state |= SS_CANTSENDMORE;
198 	sowwakeup(so);
199 }
200 
201 socantrcvmore(so)
202 	struct socket *so;
203 {
204 
205 	so->so_state |= SS_CANTRCVMORE;
206 	sorwakeup(so);
207 }
208 
209 /*
210  * Socket select/wakeup routines.
211  */
212 
213 /*
214  * Interface routine to select() system
215  * call for sockets.
216  */
217 soselect(so, rw)
218 	register struct socket *so;
219 	int rw;
220 {
221 	int s = splnet();
222 
223 	switch (rw) {
224 
225 	case FREAD:
226 		if (soreadable(so)) {
227 			splx(s);
228 			return (1);
229 		}
230 		sbselqueue(&so->so_rcv);
231 		break;
232 
233 	case FWRITE:
234 		if (sowriteable(so)) {
235 			splx(s);
236 			return (1);
237 		}
238 		sbselqueue(&so->so_snd);
239 		break;
240 	}
241 	splx(s);
242 	return (0);
243 }
244 
245 /*
246  * Queue a process for a select on a socket buffer.
247  */
248 sbselqueue(sb)
249 	struct sockbuf *sb;
250 {
251 	register struct proc *p;
252 
253 	if ((p = sb->sb_sel) && p->p_wchan == (caddr_t)&selwait)
254 		sb->sb_flags |= SB_COLL;
255 	else
256 		sb->sb_sel = u.u_procp;
257 }
258 
259 /*
260  * Wait for data to arrive at/drain from a socket buffer.
261  */
262 sbwait(sb)
263 	struct sockbuf *sb;
264 {
265 
266 	sb->sb_flags |= SB_WAIT;
267 	sleep((caddr_t)&sb->sb_cc, PZERO+1);
268 }
269 
270 /*
271  * Wakeup processes waiting on a socket buffer.
272  */
273 sbwakeup(sb)
274 	struct sockbuf *sb;
275 {
276 
277 	if (sb->sb_sel) {
278 		selwakeup(sb->sb_sel, sb->sb_flags & SB_COLL);
279 		sb->sb_sel = 0;
280 		sb->sb_flags &= ~SB_COLL;
281 	}
282 	if (sb->sb_flags & SB_WAIT) {
283 		sb->sb_flags &= ~SB_WAIT;
284 		wakeup((caddr_t)&sb->sb_cc);
285 	}
286 }
287 
288 /*
289  * Socket buffer (struct sockbuf) utility routines.
290  *
291  * Each socket contains two socket buffers: one for sending data and
292  * one for receiving data.  Each buffer contains a queue of mbufs,
293  * information about the number of mbufs and amount of data in the
294  * queue, and other fields allowing select() statements and notification
295  * on data availability to be implemented.
296  *
297  * Before using a new socket structure it is first necessary to reserve
298  * buffer space to the socket, by calling sbreserve.  This commits
299  * some of the available buffer space in the system buffer pool for the
300  * socket.  The space should be released by calling sbrelease when the
301  * socket is destroyed.
302  *
303  * The routine sbappend() is normally called to append new mbufs
304  * to a socket buffer, after checking that adequate space is available
305  * comparing the function spspace() with the amount of data to be added.
306  * Data is normally removed from a socket buffer in a protocol by
307  * first calling m_copy on the socket buffer mbuf chain and sending this
308  * to a peer, and then removing the data from the socket buffer with
309  * sbdrop when the data is acknowledged by the peer (or immediately
310  * in the case of unreliable protocols.)
311  *
312  * Protocols which do not require connections place both source address
313  * and data information in socket buffer queues.  The source addresses
314  * are stored in single mbufs after each data item, and are easily found
315  * as the data items are all marked with end of record markers.  The
316  * sbappendaddr() routine stores a datum and associated address in
317  * a socket buffer.  Note that, unlike sbappend(), this routine checks
318  * for the caller that there will be enough space to store the data.
319  * It fails if there is not enough space, or if it cannot find
320  * a mbuf to store the address in.
321  *
322  * The higher-level routines sosend and soreceive (in socket.c)
323  * also add data to, and remove data from socket buffers repectively.
324  */
325 
326 soreserve(so, sndcc, rcvcc)
327 	struct socket *so;
328 	int sndcc, rcvcc;
329 {
330 
331 	if (sbreserve(&so->so_snd, sndcc) == 0)
332 		goto bad;
333 	if (sbreserve(&so->so_rcv, rcvcc) == 0)
334 		goto bad2;
335 	return (0);
336 bad2:
337 	sbrelease(&so->so_snd);
338 bad:
339 	return (ENOBUFS);
340 }
341 
342 /*
343  * Allot mbufs to a sockbuf.
344  */
345 sbreserve(sb, cc)
346 	struct sockbuf *sb;
347 {
348 
349 	/* someday maybe this routine will fail... */
350 	sb->sb_hiwat = cc;
351 	/* the 2 implies names can be no more than 1 mbuf each */
352 	sb->sb_mbmax = cc*2;
353 	return (1);
354 }
355 
356 /*
357  * Free mbufs held by a socket, and reserved mbuf space.
358  */
359 sbrelease(sb)
360 	struct sockbuf *sb;
361 {
362 
363 	sbflush(sb);
364 	sb->sb_hiwat = sb->sb_mbmax = 0;
365 }
366 
367 /*
368  * Routines to add (at the end) and remove (from the beginning)
369  * data from a mbuf queue.
370  */
371 
372 /*
373  * Append mbuf queue m to sockbuf sb.
374  */
375 sbappend(sb, m)
376 	register struct mbuf *m;
377 	register struct sockbuf *sb;
378 {
379 	register struct mbuf *n;
380 
381 	n = sb->sb_mb;
382 	if (n)
383 		while (n->m_next)
384 			n = n->m_next;
385 	while (m) {
386 		if (m->m_len == 0 && (int)m->m_act == 0) {
387 			m = m_free(m);
388 			continue;
389 		}
390 		if (n && n->m_off <= MMAXOFF && m->m_off <= MMAXOFF &&
391 		   (int)n->m_act == 0 && (int)m->m_act == 0 &&
392 		   (n->m_off + n->m_len + m->m_len) <= MMAXOFF) {
393 			bcopy(mtod(m, caddr_t), mtod(n, caddr_t) + n->m_len,
394 			    (unsigned)m->m_len);
395 			n->m_len += m->m_len;
396 			sb->sb_cc += m->m_len;
397 			m = m_free(m);
398 			continue;
399 		}
400 		sballoc(sb, m);
401 		if (n == 0)
402 			sb->sb_mb = m;
403 		else
404 			n->m_next = m;
405 		n = m;
406 		m = m->m_next;
407 		n->m_next = 0;
408 	}
409 }
410 
411 /*
412  * Append data and address.
413  * Return 0 if no space in sockbuf or if
414  * can't get mbuf to stuff address in.
415  */
416 sbappendaddr(sb, asa, m0)
417 	struct sockbuf *sb;
418 	struct sockaddr *asa;
419 	struct mbuf *m0;
420 {
421 	struct sockaddr *msa;
422 	register struct mbuf *m;
423 	register int len = sizeof (struct sockaddr);
424 
425 	m = m0;
426 	if (m == 0)
427 		panic("sbappendaddr");
428 	for (;;) {
429 		len += m->m_len;
430 		if (m->m_next == 0) {
431 			m->m_act = (struct mbuf *)1;
432 			break;
433 		}
434 		m = m->m_next;
435 	}
436 	if (len > sbspace(sb))
437 		return (0);
438 	m = m_get(M_DONTWAIT, MT_SONAME);
439 	if (m == 0)
440 		return (0);
441 	m->m_len = sizeof (struct sockaddr);
442 	msa = mtod(m, struct sockaddr *);
443 	*msa = *asa;
444 	m->m_act = (struct mbuf *)1;
445 	sbappend(sb, m);
446 	sbappend(sb, m0);
447 	return (1);
448 }
449 
450 #ifdef notdef
451 SBCHECK(sb, str)
452 	struct sockbuf *sb;
453 	char *str;
454 {
455 	register int cnt = sb->sb_cc;
456 	register int mbcnt = sb->sb_mbcnt;
457 	register struct mbuf *m;
458 
459 	for (m = sb->sb_mb; m; m = m->m_next) {
460 		cnt -= m->m_len;
461 		mbcnt -= MSIZE;
462 		if (m->m_off > MMAXOFF)
463 			mbcnt -= CLBYTES;
464 	}
465 	if (cnt || mbcnt) {
466 		printf("cnt %d mbcnt %d\n", cnt, mbcnt);
467 		panic(str);
468 	}
469 }
470 #endif
471 
472 /*
473  * Free all mbufs on a sockbuf mbuf chain.
474  * Check that resource allocations return to 0.
475  */
476 sbflush(sb)
477 	struct sockbuf *sb;
478 {
479 
480 	if (sb->sb_flags & SB_LOCK)
481 		panic("sbflush");
482 	if (sb->sb_cc)
483 		sbdrop(sb, sb->sb_cc);
484 	if (sb->sb_cc || sb->sb_mbcnt || sb->sb_mb)
485 		panic("sbflush 2");
486 }
487 
488 /*
489  * Drop data from (the front of) a sockbuf chain.
490  */
491 sbdrop(sb, len)
492 	register struct sockbuf *sb;
493 	register int len;
494 {
495 	register struct mbuf *m = sb->sb_mb, *mn;
496 
497 	while (len > 0) {
498 		if (m == 0)
499 			panic("sbdrop");
500 		if (m->m_len > len) {
501 			m->m_len -= len;
502 			m->m_off += len;
503 			sb->sb_cc -= len;
504 			break;
505 		}
506 		len -= m->m_len;
507 		sbfree(sb, m);
508 		MFREE(m, mn);
509 		m = mn;
510 	}
511 	sb->sb_mb = m;
512 }
513