xref: /netbsd-src/external/bsd/ntp/dist/libntp/ntp_worker.c (revision 6a493d6bc668897c91594964a732d38505b70cbb)
1 /*	$NetBSD: ntp_worker.c,v 1.1.1.1 2013/12/27 23:30:47 christos Exp $	*/
2 
3 /*
4  * ntp_worker.c
5  */
6 #include <config.h>
7 #include "ntp_workimpl.h"
8 
9 #ifdef WORKER
10 
11 #include <stdio.h>
12 #include <ctype.h>
13 #include <signal.h>
14 
15 #include "iosignal.h"
16 #include "ntp_stdlib.h"
17 #include "ntp_malloc.h"
18 #include "ntp_syslog.h"
19 #include "ntpd.h"
20 #include "ntp_io.h"
21 #include "ntp_assert.h"
22 #include "ntp_unixtime.h"
23 #include "intreswork.h"
24 
25 
26 #define CHILD_MAX_IDLE	(3 * 60)	/* seconds, idle worker limit */
27 
28 blocking_child **	blocking_children;
29 size_t			blocking_children_alloc;
30 int			worker_per_query;	/* boolean */
31 int			intres_req_pending;
32 
33 
34 #ifndef HAVE_IO_COMPLETION_PORT
35 /*
36  * pipe_socketpair()
37  *
38  * Provides an AF_UNIX socketpair on systems which have them, otherwise
39  * pair of unidirectional pipes.
40  */
41 int
42 pipe_socketpair(
43 	int	caller_fds[2],
44 	int *	is_pipe
45 	)
46 {
47 	int	rc;
48 	int	fds[2];
49 	int	called_pipe;
50 
51 #ifdef HAVE_SOCKETPAIR
52 	rc = socketpair(AF_UNIX, SOCK_STREAM, 0, &fds[0]);
53 #else
54 	rc = -1;
55 #endif
56 
57 	if (-1 == rc) {
58 		rc = pipe(&fds[0]);
59 		called_pipe = TRUE;
60 	} else {
61 		called_pipe = FALSE;
62 	}
63 
64 	if (-1 == rc)
65 		return rc;
66 
67 	caller_fds[0] = fds[0];
68 	caller_fds[1] = fds[1];
69 	if (is_pipe != NULL)
70 		*is_pipe = called_pipe;
71 
72 	return 0;
73 }
74 
75 
76 /*
77  * close_all_except()
78  *
79  * Close all file descriptors except the given keep_fd.
80  */
81 void
82 close_all_except(
83 	int keep_fd
84 	)
85 {
86 	int fd;
87 
88 	for (fd = 0; fd < keep_fd; fd++)
89 		close(fd);
90 
91 	close_all_beyond(keep_fd);
92 }
93 
94 
95 /*
96  * close_all_beyond()
97  *
98  * Close all file descriptors after the given keep_fd, which is the
99  * highest fd to keep open.
100  */
101 void
102 close_all_beyond(
103 	int keep_fd
104 	)
105 {
106 # ifdef HAVE_CLOSEFROM
107 	closefrom(keep_fd + 1);
108 # elif defined(F_CLOSEM)
109 	/*
110 	 * From 'Writing Reliable AIX Daemons,' SG24-4946-00,
111 	 * by Eric Agar (saves us from doing 32767 system
112 	 * calls)
113 	 */
114 	if (fcntl(keep_fd + 1, F_CLOSEM, 0) == -1)
115 		msyslog(LOG_ERR, "F_CLOSEM(%d): %m", keep_fd + 1);
116 # else	/* !HAVE_CLOSEFROM && !F_CLOSEM follows */
117 	int fd;
118 	int max_fd;
119 
120 	max_fd = GETDTABLESIZE();
121 	for (fd = keep_fd + 1; fd < max_fd; fd++)
122 		close(fd);
123 # endif	/* !HAVE_CLOSEFROM && !F_CLOSEM */
124 }
125 #endif	/* HAVE_IO_COMPLETION_PORT */
126 
127 
128 u_int
129 available_blocking_child_slot(void)
130 {
131 	const size_t	each = sizeof(blocking_children[0]);
132 	u_int		slot;
133 	size_t		prev_alloc;
134 	size_t		new_alloc;
135 	size_t		prev_octets;
136 	size_t		octets;
137 
138 	for (slot = 0; slot < blocking_children_alloc; slot++) {
139 		if (NULL == blocking_children[slot])
140 			return slot;
141 		if (blocking_children[slot]->reusable) {
142 			blocking_children[slot]->reusable = FALSE;
143 			return slot;
144 		}
145 	}
146 
147 	prev_alloc = blocking_children_alloc;
148 	prev_octets = prev_alloc * each;
149 	new_alloc = blocking_children_alloc + 4;
150 	octets = new_alloc * each;
151 	blocking_children = erealloc_zero(blocking_children, octets,
152 					  prev_octets);
153 	blocking_children_alloc = new_alloc;
154 
155 	return prev_alloc;
156 }
157 
158 
159 int
160 queue_blocking_request(
161 	blocking_work_req	rtype,
162 	void *			req,
163 	size_t			reqsize,
164 	blocking_work_callback	done_func,
165 	void *			context
166 	)
167 {
168 	static u_int		intres_slot = UINT_MAX;
169 	u_int			child_slot;
170 	blocking_child *	c;
171 	blocking_pipe_header	req_hdr;
172 
173 	req_hdr.octets = sizeof(req_hdr) + reqsize;
174 	req_hdr.magic_sig = BLOCKING_REQ_MAGIC;
175 	req_hdr.rtype = rtype;
176 	req_hdr.done_func = done_func;
177 	req_hdr.context = context;
178 
179 	child_slot = UINT_MAX;
180 	if (worker_per_query || UINT_MAX == intres_slot ||
181 	    blocking_children[intres_slot]->reusable)
182 		child_slot = available_blocking_child_slot();
183 	if (!worker_per_query) {
184 		if (UINT_MAX == intres_slot)
185 			intres_slot = child_slot;
186 		else
187 			child_slot = intres_slot;
188 		if (0 == intres_req_pending)
189 			intres_timeout_req(0);
190 	}
191 	intres_req_pending++;
192 	INSIST(UINT_MAX != child_slot);
193 	c = blocking_children[child_slot];
194 	if (NULL == c) {
195 		c = emalloc_zero(sizeof(*c));
196 #ifdef WORK_FORK
197 		c->req_read_pipe = -1;
198 		c->req_write_pipe = -1;
199 #endif
200 #ifdef WORK_PIPE
201 		c->resp_read_pipe = -1;
202 		c->resp_write_pipe = -1;
203 #endif
204 		blocking_children[child_slot] = c;
205 	}
206 	req_hdr.child_idx = child_slot;
207 
208 	return send_blocking_req_internal(c, &req_hdr, req);
209 }
210 
211 
212 int queue_blocking_response(
213 	blocking_child *		c,
214 	blocking_pipe_header *		resp,
215 	size_t				respsize,
216 	const blocking_pipe_header *	req
217 	)
218 {
219 	resp->octets = respsize;
220 	resp->magic_sig = BLOCKING_RESP_MAGIC;
221 	resp->rtype = req->rtype;
222 	resp->context = req->context;
223 	resp->done_func = req->done_func;
224 
225 	return send_blocking_resp_internal(c, resp);
226 }
227 
228 
229 void
230 process_blocking_resp(
231 	blocking_child *	c
232 	)
233 {
234 	blocking_pipe_header *	resp;
235 	void *			data;
236 
237 	/*
238 	 * On Windows send_blocking_resp_internal() may signal the
239 	 * blocking_response_ready event multiple times while we're
240 	 * processing a response, so always consume all available
241 	 * responses before returning to test the event again.
242 	 */
243 #ifdef WORK_THREAD
244 	do {
245 #endif
246 		resp = receive_blocking_resp_internal(c);
247 		if (NULL != resp) {
248 			DEBUG_REQUIRE(BLOCKING_RESP_MAGIC ==
249 				      resp->magic_sig);
250 			data = (char *)resp + sizeof(*resp);
251 			intres_req_pending--;
252 			(*resp->done_func)(resp->rtype, resp->context,
253 					   resp->octets - sizeof(*resp),
254 					   data);
255 			free(resp);
256 		}
257 #ifdef WORK_THREAD
258 	} while (NULL != resp);
259 #endif
260 	if (!worker_per_query && 0 == intres_req_pending)
261 		intres_timeout_req(CHILD_MAX_IDLE);
262 	else if (worker_per_query)
263 		req_child_exit(c);
264 }
265 
266 
267 /*
268  * blocking_child_common runs as a forked child or a thread
269  */
270 int
271 blocking_child_common(
272 	blocking_child	*c
273 	)
274 {
275 	int say_bye;
276 	blocking_pipe_header *req;
277 
278 	say_bye = FALSE;
279 	while (!say_bye) {
280 		req = receive_blocking_req_internal(c);
281 		if (NULL == req) {
282 			say_bye = TRUE;
283 			break;
284 		}
285 
286 		DEBUG_REQUIRE(BLOCKING_REQ_MAGIC == req->magic_sig);
287 
288 		switch (req->rtype) {
289 		case BLOCKING_GETADDRINFO:
290 			if (blocking_getaddrinfo(c, req))
291 				say_bye = TRUE;
292 			break;
293 
294 		case BLOCKING_GETNAMEINFO:
295 			if (blocking_getnameinfo(c, req))
296 				say_bye = TRUE;
297 			break;
298 
299 		default:
300 			msyslog(LOG_ERR, "unknown req %d to blocking worker", req->rtype);
301 			say_bye = TRUE;
302 		}
303 
304 		free(req);
305 	}
306 
307 	return 0;
308 }
309 
310 
311 /*
312  * worker_idle_timer_fired()
313  *
314  * The parent starts this timer when the last pending response has been
315  * received from the child, making it idle, and clears the timer when a
316  * request is dispatched to the child.  Once the timer expires, the
317  * child is sent packing.
318  *
319  * This is called when worker_idle_timer is nonzero and less than or
320  * equal to current_time.
321  */
322 void
323 worker_idle_timer_fired(void)
324 {
325 	u_int			idx;
326 	blocking_child *	c;
327 
328 	DEBUG_REQUIRE(0 == intres_req_pending);
329 
330 	intres_timeout_req(0);
331 	for (idx = 0; idx < blocking_children_alloc; idx++) {
332 		c = blocking_children[idx];
333 		if (NULL == c)
334 			continue;
335 		req_child_exit(c);
336 	}
337 }
338 
339 
340 #else	/* !WORKER follows */
341 int ntp_worker_nonempty_compilation_unit;
342 #endif
343