1 /*
2 * ntp_monitor - monitor ntpd statistics
3 */
4 #ifdef HAVE_CONFIG_H
5 # include <config.h>
6 #endif
7
8 #include "ntpd.h"
9 #include "ntp_io.h"
10 #include "ntp_if.h"
11 #include "ntp_lists.h"
12 #include "ntp_stdlib.h"
13 #include <ntp_random.h>
14
15 #include <stdio.h>
16 #include <signal.h>
17 #ifdef HAVE_SYS_IOCTL_H
18 # include <sys/ioctl.h>
19 #endif
20
21 /*
22 * Record statistics based on source address, mode and version. The
23 * receive procedure calls us with the incoming rbufp before it does
24 * anything else. While at it, implement rate controls for inbound
25 * traffic.
26 *
27 * Each entry is doubly linked into two lists, a hash table and a most-
28 * recently-used (MRU) list. When a packet arrives it is looked up in
29 * the hash table. If found, the statistics are updated and the entry
30 * relinked at the head of the MRU list. If not found, a new entry is
31 * allocated, initialized and linked into both the hash table and at the
32 * head of the MRU list.
33 *
34 * Memory is usually allocated by grabbing a big chunk of new memory and
35 * cutting it up into littler pieces. The exception to this when we hit
36 * the memory limit. Then we free memory by grabbing entries off the
37 * tail for the MRU list, unlinking from the hash table, and
38 * reinitializing.
39 *
40 * INC_MONLIST is the default allocation granularity in entries.
41 * INIT_MONLIST is the default initial allocation in entries.
42 */
43 #ifdef MONMEMINC /* old name */
44 # define INC_MONLIST MONMEMINC
45 #elif !defined(INC_MONLIST)
46 # define INC_MONLIST (4 * 1024 / sizeof(mon_entry))
47 #endif
48 #ifndef INIT_MONLIST
49 # define INIT_MONLIST (4 * 1024 / sizeof(mon_entry))
50 #endif
51 #ifndef MRU_MAXDEPTH_DEF
52 # define MRU_MAXDEPTH_DEF (1024 * 1024 / sizeof(mon_entry))
53 #endif
54
55 /*
56 * Hashing stuff
57 */
58 u_char mon_hash_bits;
59
60 /*
61 * Pointers to the hash table and the MRU list. Memory for the hash
62 * table is allocated only if monitoring is enabled.
63 */
64 mon_entry ** mon_hash; /* MRU hash table */
65 mon_entry mon_mru_list; /* mru listhead */
66
67 /*
68 * List of free structures structures, and counters of in-use and total
69 * structures. The free structures are linked with the hash_next field.
70 */
71 static mon_entry *mon_free; /* free list or null if none */
72 u_int mru_alloc; /* mru list + free list count */
73 u_int mru_entries; /* mru list count */
74 u_int mru_peakentries; /* highest mru_entries seen */
75 u_int mru_initalloc = INIT_MONLIST;/* entries to preallocate */
76 u_int mru_incalloc = INC_MONLIST;/* allocation batch factor */
77 static u_int mon_mem_increments; /* times called malloc() */
78
79 /*
80 * Parameters of the RES_LIMITED restriction option. We define headway
81 * as the idle time between packets. A packet is discarded if the
82 * headway is less than the minimum, as well as if the average headway
83 * is less than eight times the increment.
84 */
85 int ntp_minpkt = NTP_MINPKT; /* minimum seconds between */
86 /* requests from a client */
87 u_char ntp_minpoll = NTP_MINPOLL; /* minimum average log2 seconds */
88 /* between client requests */
89
90 /*
91 * Initialization state. We may be monitoring, we may not. If
92 * we aren't, we may not even have allocated any memory yet.
93 */
94 u_int mon_enabled; /* enable switch */
95 u_int mru_mindepth = 600; /* preempt above this */
96 int mru_maxage = 64; /* for entries older than */
97 u_int mru_maxdepth = /* MRU count hard limit */
98 MRU_MAXDEPTH_DEF;
99 int mon_age = 3000; /* preemption limit */
100
101 static void mon_getmoremem(void);
102 static void remove_from_hash(mon_entry *);
103 static inline void mon_free_entry(mon_entry *);
104 static inline void mon_reclaim_entry(mon_entry *);
105
106
107 /*
108 * init_mon - initialize monitoring global data
109 */
110 void
init_mon(void)111 init_mon(void)
112 {
113 /*
114 * Don't do much of anything here. We don't allocate memory
115 * until mon_start().
116 */
117 mon_enabled = MON_OFF;
118 INIT_DLIST(mon_mru_list, mru);
119 }
120
121
122 /*
123 * remove_from_hash - removes an entry from the address hash table and
124 * decrements mru_entries.
125 */
126 static void
remove_from_hash(mon_entry * mon)127 remove_from_hash(
128 mon_entry *mon
129 )
130 {
131 u_int hash;
132 mon_entry *punlinked;
133
134 mru_entries--;
135 hash = MON_HASH(&mon->rmtadr);
136 UNLINK_SLIST(punlinked, mon_hash[hash], mon, hash_next,
137 mon_entry);
138 ENSURE(punlinked == mon);
139 }
140
141
142 static inline void
mon_free_entry(mon_entry * m)143 mon_free_entry(
144 mon_entry *m
145 )
146 {
147 ZERO(*m);
148 LINK_SLIST(mon_free, m, hash_next);
149 }
150
151
152 /*
153 * mon_reclaim_entry - Remove an entry from the MRU list and from the
154 * hash array, then zero-initialize it. Indirectly
155 * decrements mru_entries.
156
157 * The entry is prepared to be reused. Before return, in
158 * remove_from_hash(), mru_entries is decremented. It is the caller's
159 * responsibility to increment it again.
160 */
161 static inline void
mon_reclaim_entry(mon_entry * m)162 mon_reclaim_entry(
163 mon_entry *m
164 )
165 {
166 DEBUG_INSIST(NULL != m);
167
168 UNLINK_DLIST(m, mru);
169 remove_from_hash(m);
170 ZERO(*m);
171 }
172
173
174 /*
175 * mon_getmoremem - get more memory and put it on the free list
176 */
177 static void
mon_getmoremem(void)178 mon_getmoremem(void)
179 {
180 mon_entry *chunk;
181 u_int entries;
182
183 entries = (0 == mon_mem_increments)
184 ? mru_initalloc
185 : mru_incalloc;
186
187 if (entries) {
188 chunk = eallocarray(entries, sizeof(*chunk));
189 mru_alloc += entries;
190 for (chunk += entries; entries; entries--)
191 mon_free_entry(--chunk);
192
193 mon_mem_increments++;
194 }
195 }
196
197
198 /*
199 * mon_start - start up the monitoring software
200 */
201 void
mon_start(int mode)202 mon_start(
203 int mode
204 )
205 {
206 size_t octets;
207 u_int min_hash_slots;
208
209 if (MON_OFF == mode) /* MON_OFF is 0 */
210 return;
211 if (mon_enabled) {
212 mon_enabled |= mode;
213 return;
214 }
215 if (0 == mon_mem_increments)
216 mon_getmoremem();
217 /*
218 * Select the MRU hash table size to limit the average count
219 * per bucket at capacity (mru_maxdepth) to 8, if possible
220 * given our hash is limited to 16 bits.
221 */
222 min_hash_slots = (mru_maxdepth / 8) + 1;
223 mon_hash_bits = 0;
224 while (min_hash_slots >>= 1)
225 mon_hash_bits++;
226 mon_hash_bits = max(4, mon_hash_bits);
227 mon_hash_bits = min(16, mon_hash_bits);
228 octets = sizeof(*mon_hash) * MON_HASH_SIZE;
229 mon_hash = erealloc_zero(mon_hash, octets, 0);
230
231 mon_enabled = mode;
232 }
233
234
235 /*
236 * mon_stop - stop the monitoring software
237 */
238 void
mon_stop(int mode)239 mon_stop(
240 int mode
241 )
242 {
243 mon_entry *mon;
244
245 if (MON_OFF == mon_enabled)
246 return;
247 if ((mon_enabled & mode) == 0 || mode == MON_OFF)
248 return;
249
250 mon_enabled &= ~mode;
251 if (mon_enabled != MON_OFF)
252 return;
253
254 /*
255 * Move everything on the MRU list to the free list quickly,
256 * without bothering to remove each from either the MRU list or
257 * the hash table.
258 */
259 ITER_DLIST_BEGIN(mon_mru_list, mon, mru, mon_entry)
260 mon_free_entry(mon);
261 ITER_DLIST_END()
262
263 /* empty the MRU list and hash table. */
264 mru_entries = 0;
265 INIT_DLIST(mon_mru_list, mru);
266 zero_mem(mon_hash, sizeof(*mon_hash) * MON_HASH_SIZE);
267 }
268
269
270 /*
271 * mon_clearinterface -- remove mru entries referring to a local address
272 * which is going away.
273 */
274 void
mon_clearinterface(endpt * lcladr)275 mon_clearinterface(
276 endpt *lcladr
277 )
278 {
279 mon_entry *mon;
280
281 /* iterate mon over mon_mru_list */
282 ITER_DLIST_BEGIN(mon_mru_list, mon, mru, mon_entry)
283 if (mon->lcladr == lcladr) {
284 /* remove from mru list */
285 UNLINK_DLIST(mon, mru);
286 /* remove from hash list, adjust mru_entries */
287 remove_from_hash(mon);
288 /* put on free list */
289 mon_free_entry(mon);
290 }
291 ITER_DLIST_END()
292 }
293
294
295 /*
296 * ntp_monitor - record stats about this packet
297 *
298 * Returns supplied restriction flags, with RES_LIMITED and RES_KOD
299 * cleared unless the packet should not be responded to normally
300 * (RES_LIMITED) and possibly should trigger a KoD response (RES_KOD).
301 * The returned flags are saved in the MRU entry, so that it reflects
302 * whether the last packet from that source triggered rate limiting,
303 * and if so, possible KoD response. This implies you can not tell
304 * whether a given address is eligible for rate limiting/KoD from the
305 * monlist restrict bits, only whether or not the last packet triggered
306 * such responses. ntpdc -c reslist lets you see whether RES_LIMITED
307 * or RES_KOD is lit for a particular address before ntp_monitor()'s
308 * typical dousing.
309 */
310 u_short
ntp_monitor(struct recvbuf * rbufp,u_short flags)311 ntp_monitor(
312 struct recvbuf *rbufp,
313 u_short flags
314 )
315 {
316 l_fp interval_fp;
317 struct pkt * pkt;
318 mon_entry * mon;
319 mon_entry * oldest;
320 int oldest_age;
321 u_int hash;
322 u_short restrict_mask;
323 u_char mode;
324 u_char version;
325 int interval;
326 int head; /* headway increment */
327 int leak; /* new headway */
328 int limit; /* average threshold */
329
330 REQUIRE(rbufp != NULL);
331
332 if (mon_enabled == MON_OFF) {
333 return ~(RES_LIMITED | RES_KOD) & flags;
334 }
335 pkt = &rbufp->recv_pkt;
336 hash = MON_HASH(&rbufp->recv_srcadr);
337 mode = PKT_MODE(pkt->li_vn_mode);
338 version = PKT_VERSION(pkt->li_vn_mode);
339 mon = mon_hash[hash];
340
341 /*
342 * We keep track of all traffic for a given IP in one entry,
343 * otherwise cron'ed ntpdate or similar evades RES_LIMITED.
344 */
345
346 for (; mon != NULL; mon = mon->hash_next) {
347 if (SOCK_EQ(&mon->rmtadr, &rbufp->recv_srcadr)) {
348 break;
349 }
350 }
351 if (mon != NULL) {
352 interval_fp = rbufp->recv_time;
353 L_SUB(&interval_fp, &mon->last);
354 /* add one-half second to round up */
355 L_ADDUF(&interval_fp, 0x80000000);
356 interval = interval_fp.l_i;
357 mon->last = rbufp->recv_time;
358 NSRCPORT(&mon->rmtadr) = NSRCPORT(&rbufp->recv_srcadr);
359 mon->count++;
360 restrict_mask = flags;
361 mon->vn_mode = VN_MODE(version, mode);
362
363 /* Shuffle to the head of the MRU list. */
364 UNLINK_DLIST(mon, mru);
365 LINK_DLIST(mon_mru_list, mon, mru);
366
367 /*
368 * At this point the most recent arrival is first in the
369 * MRU list. Decrease the counter by the headway, but
370 * not less than zero.
371 */
372 mon->leak -= interval;
373 mon->leak = max(0, mon->leak);
374 head = 1 << ntp_minpoll;
375 leak = mon->leak + head;
376 limit = NTP_SHIFT * head;
377
378 DPRINTF(2, ("MRU: interval %d headway %d limit %d\n",
379 interval, leak, limit));
380
381 /*
382 * If the minimum and average thresholds are not
383 * exceeded, douse the RES_LIMITED and RES_KOD bits and
384 * increase the counter by the headway increment. Note
385 * that we give a 1-s grace for the minimum threshold
386 * and a 2-s grace for the headway increment. If one or
387 * both thresholds are exceeded and the old counter is
388 * less than the average threshold, set the counter to
389 * the average threshold plus the increment and leave
390 * the RES_LIMITED and RES_KOD bits lit. Otherwise,
391 * leave the counter alone and douse the RES_KOD bit.
392 * This rate-limits the KoDs to no more often than the
393 * average headway.
394 */
395 if (interval + 1 >= ntp_minpkt && leak < limit) {
396 mon->leak = leak - 2;
397 restrict_mask &= ~(RES_LIMITED | RES_KOD);
398 } else if (mon->leak < limit) {
399 mon->leak = limit + head;
400 } else {
401 restrict_mask &= ~RES_KOD;
402 }
403 mon->flags = restrict_mask;
404
405 return mon->flags;
406 }
407
408 /*
409 * If we got here, this is the first we've heard of this
410 * guy. Get him some memory, either from the free list
411 * or from the tail of the MRU list.
412 *
413 * The following ntp.conf "mru" knobs come into play determining
414 * the depth (or count) of the MRU list:
415 * - mru_mindepth ("mru mindepth") is a floor beneath which
416 * entries are kept without regard to their age. The
417 * default is 600 which matches the longtime implementation
418 * limit on the total number of entries.
419 * - mru_maxage ("mru maxage") is a ceiling on the age in
420 * seconds of entries. Entries older than this are
421 * reclaimed once mon_mindepth is exceeded. 64s default.
422 * Note that entries older than this can easily survive
423 * as they are reclaimed only as needed.
424 * - mru_maxdepth ("mru maxdepth") is a hard limit on the
425 * number of entries.
426 * - "mru maxmem" sets mru_maxdepth to the number of entries
427 * which fit in the given number of kilobytes. The default is
428 * 1024, or 1 megabyte.
429 * - mru_initalloc ("mru initalloc" sets the count of the
430 * initial allocation of MRU entries.
431 * - "mru initmem" sets mru_initalloc in units of kilobytes.
432 * The default is 4.
433 * - mru_incalloc ("mru incalloc" sets the number of entries to
434 * allocate on-demand each time the free list is empty.
435 * - "mru incmem" sets mru_incalloc in units of kilobytes.
436 * The default is 4.
437 * Whichever of "mru maxmem" or "mru maxdepth" occurs last in
438 * ntp.conf controls. Similarly for "mru initalloc" and "mru
439 * initmem", and for "mru incalloc" and "mru incmem".
440 */
441 if (mru_entries < mru_mindepth) {
442 if (NULL == mon_free)
443 mon_getmoremem();
444 UNLINK_HEAD_SLIST(mon, mon_free, hash_next);
445 } else {
446 oldest = TAIL_DLIST(mon_mru_list, mru);
447 oldest_age = 0; /* silence uninit warning */
448 if (oldest != NULL) {
449 interval_fp = rbufp->recv_time;
450 L_SUB(&interval_fp, &oldest->last);
451 /* add one-half second to round up */
452 L_ADDUF(&interval_fp, 0x80000000);
453 oldest_age = interval_fp.l_i;
454 }
455 /* note -1 is legal for mru_maxage (disables) */
456 if (oldest != NULL && mru_maxage < oldest_age) {
457 mon_reclaim_entry(oldest);
458 mon = oldest;
459 } else if (mon_free != NULL || mru_alloc <
460 mru_maxdepth) {
461 if (NULL == mon_free)
462 mon_getmoremem();
463 UNLINK_HEAD_SLIST(mon, mon_free, hash_next);
464 /* Preempt from the MRU list if old enough. */
465 } else if (ntp_uurandom() >
466 (double)oldest_age / mon_age) {
467 return ~(RES_LIMITED | RES_KOD) & flags;
468 } else {
469 mon_reclaim_entry(oldest);
470 mon = oldest;
471 }
472 }
473
474 INSIST(mon != NULL);
475
476 /*
477 * Got one, initialize it
478 */
479 mru_entries++;
480 mru_peakentries = max(mru_peakentries, mru_entries);
481 mon->last = rbufp->recv_time;
482 mon->first = mon->last;
483 mon->count = 1;
484 mon->flags = ~(RES_LIMITED | RES_KOD) & flags;
485 mon->leak = 0;
486 memcpy(&mon->rmtadr, &rbufp->recv_srcadr, sizeof(mon->rmtadr));
487 mon->vn_mode = VN_MODE(version, mode);
488 mon->lcladr = rbufp->dstadr;
489 mon->cast_flags = (u_char)(((rbufp->dstadr->flags &
490 INT_MCASTOPEN) && rbufp->fd == mon->lcladr->fd) ? MDF_MCAST
491 : rbufp->fd == mon->lcladr->bfd ? MDF_BCAST : MDF_UCAST);
492
493 /*
494 * Drop him into front of the hash table. Also put him on top of
495 * the MRU list.
496 */
497 LINK_SLIST(mon_hash[hash], mon, hash_next);
498 LINK_DLIST(mon_mru_list, mon, mru);
499
500 return mon->flags;
501 }
502
503
504