xref: /netbsd-src/sys/net/pktqueue.c (revision 122b5006ee1bd67145794b4cde92f4fe4781a5ec)
1 /*	$NetBSD: pktqueue.c,v 1.14 2021/10/11 05:13:11 knakahara Exp $	*/
2 
3 /*-
4  * Copyright (c) 2014 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Mindaugas Rasiukevicius.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * The packet queue (pktqueue) interface is a lockless IP input queue
34  * which also abstracts and handles network ISR scheduling.  It provides
35  * a mechanism to enable receiver-side packet steering (RPS).
36  */
37 
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: pktqueue.c,v 1.14 2021/10/11 05:13:11 knakahara Exp $");
40 
41 #ifdef _KERNEL_OPT
42 #include "opt_net_mpsafe.h"
43 #endif
44 
45 #include <sys/param.h>
46 #include <sys/types.h>
47 
48 #include <sys/atomic.h>
49 #include <sys/cpu.h>
50 #include <sys/pcq.h>
51 #include <sys/intr.h>
52 #include <sys/mbuf.h>
53 #include <sys/proc.h>
54 #include <sys/percpu.h>
55 #include <sys/xcall.h>
56 
57 #include <net/pktqueue.h>
58 #include <net/rss_config.h>
59 
60 #include <netinet/in.h>
61 #include <netinet/ip.h>
62 #include <netinet/ip6.h>
63 
64 struct pktqueue {
65 	/*
66 	 * The lock used for a barrier mechanism.  The barrier counter,
67 	 * as well as the drop counter, are managed atomically though.
68 	 * Ensure this group is in a separate cache line.
69 	 */
70 	union {
71 		struct {
72 			kmutex_t	pq_lock;
73 			volatile u_int	pq_barrier;
74 		};
75 		uint8_t	 _pad[COHERENCY_UNIT];
76 	};
77 
78 	/* The size of the queue, counters and the interrupt handler. */
79 	u_int		pq_maxlen;
80 	percpu_t *	pq_counters;
81 	void *		pq_sih;
82 
83 	/* Finally, per-CPU queues. */
84 	struct percpu *	pq_pcq;	/* struct pcq * */
85 };
86 
87 /* The counters of the packet queue. */
88 #define	PQCNT_ENQUEUE	0
89 #define	PQCNT_DEQUEUE	1
90 #define	PQCNT_DROP	2
91 #define	PQCNT_NCOUNTERS	3
92 
93 typedef struct {
94 	uint64_t	count[PQCNT_NCOUNTERS];
95 } pktq_counters_t;
96 
97 /* Special marker value used by pktq_barrier() mechanism. */
98 #define	PKTQ_MARKER	((void *)(~0ULL))
99 
100 static void
101 pktq_init_cpu(void *vqp, void *vpq, struct cpu_info *ci)
102 {
103 	struct pcq **qp = vqp;
104 	struct pktqueue *pq = vpq;
105 
106 	*qp = pcq_create(pq->pq_maxlen, KM_SLEEP);
107 }
108 
109 static void
110 pktq_fini_cpu(void *vqp, void *vpq, struct cpu_info *ci)
111 {
112 	struct pcq **qp = vqp, *q = *qp;
113 
114 	KASSERT(pcq_peek(q) == NULL);
115 	pcq_destroy(q);
116 	*qp = NULL;		/* paranoia */
117 }
118 
119 static struct pcq *
120 pktq_pcq(struct pktqueue *pq, struct cpu_info *ci)
121 {
122 	struct pcq **qp, *q;
123 
124 	/*
125 	 * As long as preemption is disabled, the xcall to swap percpu
126 	 * buffers can't complete, so it is safe to read the pointer.
127 	 */
128 	KASSERT(kpreempt_disabled());
129 
130 	qp = percpu_getptr_remote(pq->pq_pcq, ci);
131 	q = *qp;
132 
133 	return q;
134 }
135 
136 pktqueue_t *
137 pktq_create(size_t maxlen, void (*intrh)(void *), void *sc)
138 {
139 	const u_int sflags = SOFTINT_NET | SOFTINT_MPSAFE | SOFTINT_RCPU;
140 	pktqueue_t *pq;
141 	percpu_t *pc;
142 	void *sih;
143 
144 	pc = percpu_alloc(sizeof(pktq_counters_t));
145 	if ((sih = softint_establish(sflags, intrh, sc)) == NULL) {
146 		percpu_free(pc, sizeof(pktq_counters_t));
147 		return NULL;
148 	}
149 
150 	pq = kmem_zalloc(sizeof(*pq), KM_SLEEP);
151 	mutex_init(&pq->pq_lock, MUTEX_DEFAULT, IPL_NONE);
152 	pq->pq_maxlen = maxlen;
153 	pq->pq_counters = pc;
154 	pq->pq_sih = sih;
155 	pq->pq_pcq = percpu_create(sizeof(struct pcq *),
156 	    pktq_init_cpu, pktq_fini_cpu, pq);
157 
158 	return pq;
159 }
160 
161 void
162 pktq_destroy(pktqueue_t *pq)
163 {
164 
165 	percpu_free(pq->pq_pcq, sizeof(struct pcq *));
166 	percpu_free(pq->pq_counters, sizeof(pktq_counters_t));
167 	softint_disestablish(pq->pq_sih);
168 	mutex_destroy(&pq->pq_lock);
169 	kmem_free(pq, sizeof(*pq));
170 }
171 
172 /*
173  * - pktq_inc_counter: increment the counter given an ID.
174  * - pktq_collect_counts: handler to sum up the counts from each CPU.
175  * - pktq_getcount: return the effective count given an ID.
176  */
177 
178 static inline void
179 pktq_inc_count(pktqueue_t *pq, u_int i)
180 {
181 	percpu_t *pc = pq->pq_counters;
182 	pktq_counters_t *c;
183 
184 	c = percpu_getref(pc);
185 	c->count[i]++;
186 	percpu_putref(pc);
187 }
188 
189 static void
190 pktq_collect_counts(void *mem, void *arg, struct cpu_info *ci)
191 {
192 	const pktq_counters_t *c = mem;
193 	pktq_counters_t *sum = arg;
194 
195 	int s = splnet();
196 
197 	for (u_int i = 0; i < PQCNT_NCOUNTERS; i++) {
198 		sum->count[i] += c->count[i];
199 	}
200 
201 	splx(s);
202 }
203 
204 uint64_t
205 pktq_get_count(pktqueue_t *pq, pktq_count_t c)
206 {
207 	pktq_counters_t sum;
208 
209 	if (c != PKTQ_MAXLEN) {
210 		memset(&sum, 0, sizeof(sum));
211 		percpu_foreach_xcall(pq->pq_counters,
212 		    XC_HIGHPRI_IPL(IPL_SOFTNET), pktq_collect_counts, &sum);
213 	}
214 	switch (c) {
215 	case PKTQ_NITEMS:
216 		return sum.count[PQCNT_ENQUEUE] - sum.count[PQCNT_DEQUEUE];
217 	case PKTQ_DROPS:
218 		return sum.count[PQCNT_DROP];
219 	case PKTQ_MAXLEN:
220 		return pq->pq_maxlen;
221 	}
222 	return 0;
223 }
224 
225 uint32_t
226 pktq_rps_hash(pktq_rps_hash_func_t *funcp, const struct mbuf *m)
227 {
228 	pktq_rps_hash_func_t func = atomic_load_relaxed(funcp);
229 
230 	KASSERT(func != NULL);
231 
232 	return (*func)(m);
233 }
234 
235 static uint32_t
236 pktq_rps_hash_zero(const struct mbuf *m __unused)
237 {
238 
239 	return 0;
240 }
241 
242 static uint32_t
243 pktq_rps_hash_curcpu(const struct mbuf *m __unused)
244 {
245 
246 	return cpu_index(curcpu());
247 }
248 
249 static uint32_t
250 pktq_rps_hash_toeplitz(const struct mbuf *m)
251 {
252 	struct ip *ip;
253 	/*
254 	 * Disable UDP port - IP fragments aren't currently being handled
255 	 * and so we end up with a mix of 2-tuple and 4-tuple
256 	 * traffic.
257 	 */
258 	const u_int flag = RSS_TOEPLITZ_USE_TCP_PORT;
259 
260 	/* glance IP version */
261 	if ((m->m_flags & M_PKTHDR) == 0)
262 		return 0;
263 
264 	ip = mtod(m, struct ip *);
265 	if (ip->ip_v == IPVERSION) {
266 		if (__predict_false(m->m_len < sizeof(struct ip)))
267 			return 0;
268 		return rss_toeplitz_hash_from_mbuf_ipv4(m, flag);
269 	} else if (ip->ip_v == 6) {
270 		if (__predict_false(m->m_len < sizeof(struct ip6_hdr)))
271 			return 0;
272 		return rss_toeplitz_hash_from_mbuf_ipv6(m, flag);
273 	}
274 
275 	return 0;
276 }
277 
278 /*
279  * topelitz without curcpu.
280  * Generally, this has better performance than topelitz.
281  */
282 static uint32_t
283 pktq_rps_hash_toeplitz_othercpus(const struct mbuf *m)
284 {
285 	uint32_t hash;
286 
287 	hash = pktq_rps_hash_toeplitz(m);
288 	hash %= ncpu - 1;
289 	if (hash >= cpu_index(curcpu()))
290 		return hash + 1;
291 	else
292 		return hash;
293 }
294 
295 static struct pktq_rps_hash_table {
296 	const char* prh_type;
297 	pktq_rps_hash_func_t prh_func;
298 } const pktq_rps_hash_tab[] = {
299 	{ "zero", pktq_rps_hash_zero },
300 	{ "curcpu", pktq_rps_hash_curcpu },
301 	{ "toeplitz", pktq_rps_hash_toeplitz },
302 	{ "toeplitz-othercpus", pktq_rps_hash_toeplitz_othercpus },
303 };
304 const pktq_rps_hash_func_t pktq_rps_hash_default =
305 #ifdef NET_MPSAFE
306 	pktq_rps_hash_curcpu;
307 #else
308 	pktq_rps_hash_zero;
309 #endif
310 
311 static const char *
312 pktq_get_rps_hash_type(pktq_rps_hash_func_t func)
313 {
314 
315 	for (int i = 0; i < __arraycount(pktq_rps_hash_tab); i++) {
316 		if (func == pktq_rps_hash_tab[i].prh_func) {
317 			return pktq_rps_hash_tab[i].prh_type;
318 		}
319 	}
320 
321 	return NULL;
322 }
323 
324 static int
325 pktq_set_rps_hash_type(pktq_rps_hash_func_t *func, const char *type)
326 {
327 
328 	if (strcmp(type, pktq_get_rps_hash_type(*func)) == 0)
329 		return 0;
330 
331 	for (int i = 0; i < __arraycount(pktq_rps_hash_tab); i++) {
332 		if (strcmp(type, pktq_rps_hash_tab[i].prh_type) == 0) {
333 			atomic_store_relaxed(func, pktq_rps_hash_tab[i].prh_func);
334 			return 0;
335 		}
336 	}
337 
338 	return ENOENT;
339 }
340 
341 int
342 sysctl_pktq_rps_hash_handler(SYSCTLFN_ARGS)
343 {
344 	struct sysctlnode node;
345 	pktq_rps_hash_func_t *func;
346 	int error;
347 	char type[PKTQ_RPS_HASH_NAME_LEN];
348 
349 	node = *rnode;
350 	func = node.sysctl_data;
351 
352 	strlcpy(type, pktq_get_rps_hash_type(*func), PKTQ_RPS_HASH_NAME_LEN);
353 
354 	node.sysctl_data = &type;
355 	node.sysctl_size = sizeof(type);
356 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
357 	if (error || newp == NULL)
358 		return error;
359 
360 	error = pktq_set_rps_hash_type(func, type);
361 
362 	return error;
363  }
364 
365 /*
366  * pktq_enqueue: inject the packet into the end of the queue.
367  *
368  * => Must be called from the interrupt or with the preemption disabled.
369  * => Consumes the packet and returns true on success.
370  * => Returns false on failure; caller is responsible to free the packet.
371  */
372 bool
373 pktq_enqueue(pktqueue_t *pq, struct mbuf *m, const u_int hash __unused)
374 {
375 #if defined(_RUMPKERNEL) || defined(_RUMP_NATIVE_ABI)
376 	struct cpu_info *ci = curcpu();
377 #else
378 	struct cpu_info *ci = cpu_lookup(hash % ncpu);
379 #endif
380 
381 	KASSERT(kpreempt_disabled());
382 
383 	if (__predict_false(!pcq_put(pktq_pcq(pq, ci), m))) {
384 		pktq_inc_count(pq, PQCNT_DROP);
385 		return false;
386 	}
387 	softint_schedule_cpu(pq->pq_sih, ci);
388 	pktq_inc_count(pq, PQCNT_ENQUEUE);
389 	return true;
390 }
391 
392 /*
393  * pktq_dequeue: take a packet from the queue.
394  *
395  * => Must be called with preemption disabled.
396  * => Must ensure there are not concurrent dequeue calls.
397  */
398 struct mbuf *
399 pktq_dequeue(pktqueue_t *pq)
400 {
401 	struct cpu_info *ci = curcpu();
402 	struct mbuf *m;
403 
404 	KASSERT(kpreempt_disabled());
405 
406 	m = pcq_get(pktq_pcq(pq, ci));
407 	if (__predict_false(m == PKTQ_MARKER)) {
408 		/* Note the marker entry. */
409 		atomic_inc_uint(&pq->pq_barrier);
410 		return NULL;
411 	}
412 	if (__predict_true(m != NULL)) {
413 		pktq_inc_count(pq, PQCNT_DEQUEUE);
414 	}
415 	return m;
416 }
417 
418 /*
419  * pktq_barrier: waits for a grace period when all packets enqueued at
420  * the moment of calling this routine will be processed.  This is used
421  * to ensure that e.g. packets referencing some interface were drained.
422  */
423 void
424 pktq_barrier(pktqueue_t *pq)
425 {
426 	CPU_INFO_ITERATOR cii;
427 	struct cpu_info *ci;
428 	u_int pending = 0;
429 
430 	mutex_enter(&pq->pq_lock);
431 	KASSERT(pq->pq_barrier == 0);
432 
433 	for (CPU_INFO_FOREACH(cii, ci)) {
434 		struct pcq *q;
435 
436 		kpreempt_disable();
437 		q = pktq_pcq(pq, ci);
438 		kpreempt_enable();
439 
440 		/* If the queue is empty - nothing to do. */
441 		if (pcq_peek(q) == NULL) {
442 			continue;
443 		}
444 		/* Otherwise, put the marker and entry. */
445 		while (!pcq_put(q, PKTQ_MARKER)) {
446 			kpause("pktqsync", false, 1, NULL);
447 		}
448 		kpreempt_disable();
449 		softint_schedule_cpu(pq->pq_sih, ci);
450 		kpreempt_enable();
451 		pending++;
452 	}
453 
454 	/* Wait for each queue to process the markers. */
455 	while (pq->pq_barrier != pending) {
456 		kpause("pktqsync", false, 1, NULL);
457 	}
458 	pq->pq_barrier = 0;
459 	mutex_exit(&pq->pq_lock);
460 }
461 
462 /*
463  * pktq_flush: free mbufs in all queues.
464  *
465  * => The caller must ensure there are no concurrent writers or flush calls.
466  */
467 void
468 pktq_flush(pktqueue_t *pq)
469 {
470 	CPU_INFO_ITERATOR cii;
471 	struct cpu_info *ci;
472 	struct mbuf *m;
473 
474 	for (CPU_INFO_FOREACH(cii, ci)) {
475 		struct pcq *q;
476 
477 		kpreempt_disable();
478 		q = pktq_pcq(pq, ci);
479 		kpreempt_enable();
480 
481 		/*
482 		 * XXX This can't be right -- if the softint is running
483 		 * then pcq_get isn't safe here.
484 		 */
485 		while ((m = pcq_get(q)) != NULL) {
486 			pktq_inc_count(pq, PQCNT_DEQUEUE);
487 			m_freem(m);
488 		}
489 	}
490 }
491 
492 static void
493 pktq_set_maxlen_cpu(void *vpq, void *vqs)
494 {
495 	struct pktqueue *pq = vpq;
496 	struct pcq **qp, *q, **qs = vqs;
497 	unsigned i = cpu_index(curcpu());
498 	int s;
499 
500 	s = splnet();
501 	qp = percpu_getref(pq->pq_pcq);
502 	q = *qp;
503 	*qp = qs[i];
504 	qs[i] = q;
505 	percpu_putref(pq->pq_pcq);
506 	splx(s);
507 }
508 
509 /*
510  * pktq_set_maxlen: create per-CPU queues using a new size and replace
511  * the existing queues without losing any packets.
512  *
513  * XXX ncpu must remain stable throughout.
514  */
515 int
516 pktq_set_maxlen(pktqueue_t *pq, size_t maxlen)
517 {
518 	const u_int slotbytes = ncpu * sizeof(pcq_t *);
519 	pcq_t **qs;
520 
521 	if (!maxlen || maxlen > PCQ_MAXLEN)
522 		return EINVAL;
523 	if (pq->pq_maxlen == maxlen)
524 		return 0;
525 
526 	/* First, allocate the new queues. */
527 	qs = kmem_zalloc(slotbytes, KM_SLEEP);
528 	for (u_int i = 0; i < ncpu; i++) {
529 		qs[i] = pcq_create(maxlen, KM_SLEEP);
530 	}
531 
532 	/*
533 	 * Issue an xcall to replace the queue pointers on each CPU.
534 	 * This implies all the necessary memory barriers.
535 	 */
536 	mutex_enter(&pq->pq_lock);
537 	xc_wait(xc_broadcast(XC_HIGHPRI, pktq_set_maxlen_cpu, pq, qs));
538 	pq->pq_maxlen = maxlen;
539 	mutex_exit(&pq->pq_lock);
540 
541 	/*
542 	 * At this point, the new packets are flowing into the new
543 	 * queues.  However, the old queues may have some packets
544 	 * present which are no longer being processed.  We are going
545 	 * to re-enqueue them.  This may change the order of packet
546 	 * arrival, but it is not considered an issue.
547 	 *
548 	 * There may be in-flight interrupts calling pktq_dequeue()
549 	 * which reference the old queues.  Issue a barrier to ensure
550 	 * that we are going to be the only pcq_get() callers on the
551 	 * old queues.
552 	 */
553 	pktq_barrier(pq);
554 
555 	for (u_int i = 0; i < ncpu; i++) {
556 		struct pcq *q;
557 		struct mbuf *m;
558 
559 		kpreempt_disable();
560 		q = pktq_pcq(pq, cpu_lookup(i));
561 		kpreempt_enable();
562 
563 		while ((m = pcq_get(qs[i])) != NULL) {
564 			while (!pcq_put(q, m)) {
565 				kpause("pktqrenq", false, 1, NULL);
566 			}
567 		}
568 		pcq_destroy(qs[i]);
569 	}
570 
571 	/* Well, that was fun. */
572 	kmem_free(qs, slotbytes);
573 	return 0;
574 }
575 
576 int
577 sysctl_pktq_maxlen(SYSCTLFN_ARGS, pktqueue_t *pq)
578 {
579 	u_int nmaxlen = pktq_get_count(pq, PKTQ_MAXLEN);
580 	struct sysctlnode node = *rnode;
581 	int error;
582 
583 	node.sysctl_data = &nmaxlen;
584 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
585 	if (error || newp == NULL)
586 		return error;
587 	return pktq_set_maxlen(pq, nmaxlen);
588 }
589 
590 int
591 sysctl_pktq_count(SYSCTLFN_ARGS, pktqueue_t *pq, u_int count_id)
592 {
593 	uint64_t count = pktq_get_count(pq, count_id);
594 	struct sysctlnode node = *rnode;
595 
596 	node.sysctl_data = &count;
597 	return sysctl_lookup(SYSCTLFN_CALL(&node));
598 }
599