xref: /netbsd-src/sys/net/npf/npf_worker.c (revision f3cfa6f6ce31685c6c4a758bc430e69eb99f50a4)
1 /*-
2  * Copyright (c) 2010-2015 The NetBSD Foundation, Inc.
3  * All rights reserved.
4  *
5  * This material is based upon work partially supported by The
6  * NetBSD Foundation under a contract with Mindaugas Rasiukevicius.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
18  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
21  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27  * POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 #ifdef _KERNEL
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: npf_worker.c,v 1.6 2019/01/19 21:19:32 rmind Exp $");
33 
34 #include <sys/param.h>
35 #include <sys/types.h>
36 
37 #include <sys/mutex.h>
38 #include <sys/kmem.h>
39 #include <sys/kernel.h>
40 #include <sys/kthread.h>
41 #include <sys/cprng.h>
42 #endif
43 
44 #include "npf_impl.h"
45 
46 typedef struct npf_worker {
47 	kmutex_t		worker_lock;
48 	kcondvar_t		worker_cv;
49 	npf_workfunc_t		work_funcs[NPF_MAX_WORKS];
50 	bool			worker_exit;
51 	lwp_t *			worker_lwp;
52 	npf_t *			instances;
53 } npf_worker_t;
54 
55 #define	W_INTERVAL		mstohz(1 * 1000)
56 
57 static void			npf_worker(void *) __dead;
58 
59 static npf_worker_t *		npf_workers		__read_mostly;
60 static unsigned			npf_worker_count	__read_mostly;
61 
62 int
63 npf_worker_sysinit(unsigned nworkers)
64 {
65 	if (nworkers) {
66 		const size_t len = sizeof(npf_worker_t) * nworkers;
67 		npf_workers = kmem_zalloc(len, KM_SLEEP);
68 	} else {
69 		npf_workers = NULL;
70 	}
71 	npf_worker_count = nworkers;
72 
73 	for (unsigned i = 0; i < nworkers; i++) {
74 		npf_worker_t *wrk = &npf_workers[i];
75 
76 		mutex_init(&wrk->worker_lock, MUTEX_DEFAULT, IPL_SOFTNET);
77 		cv_init(&wrk->worker_cv, "npfgccv");
78 
79 		if (kthread_create(PRI_NONE, KTHREAD_MPSAFE | KTHREAD_MUSTJOIN,
80 		    NULL, npf_worker, wrk, &wrk->worker_lwp, "npfgc-%u", i)) {
81 			npf_worker_sysfini();
82 			return ENOMEM;
83 		}
84 	}
85 	return 0;
86 }
87 
88 void
89 npf_worker_sysfini(void)
90 {
91 	for (unsigned i = 0; i < npf_worker_count; i++) {
92 		npf_worker_t *wrk = &npf_workers[i];
93 
94 		/* Notify the worker and wait for the exit. */
95 		mutex_enter(&wrk->worker_lock);
96 		wrk->worker_exit = true;
97 		cv_broadcast(&wrk->worker_cv);
98 		mutex_exit(&wrk->worker_lock);
99 
100 		if (wrk->worker_lwp) {
101 			kthread_join(wrk->worker_lwp);
102 		}
103 
104 		/* LWP has exited, destroy the structures. */
105 		cv_destroy(&wrk->worker_cv);
106 		mutex_destroy(&wrk->worker_lock);
107 	}
108 	if (npf_workers) {
109 		const size_t len = sizeof(npf_worker_t) * npf_worker_count;
110 		kmem_free(npf_workers, len);
111 	}
112 }
113 
114 void
115 npf_worker_signal(npf_t *npf)
116 {
117 	const unsigned idx = npf->worker_id;
118 	npf_worker_t *wrk = &npf_workers[idx];
119 
120 	mutex_enter(&wrk->worker_lock);
121 	cv_signal(&wrk->worker_cv);
122 	mutex_exit(&wrk->worker_lock);
123 }
124 
125 static bool
126 npf_worker_testset(npf_worker_t *wrk, npf_workfunc_t find, npf_workfunc_t set)
127 {
128 	for (u_int i = 0; i < NPF_MAX_WORKS; i++) {
129 		if (wrk->work_funcs[i] == find) {
130 			wrk->work_funcs[i] = set;
131 			return true;
132 		}
133 	}
134 	return false;
135 }
136 
137 void
138 npf_worker_register(npf_t *npf, npf_workfunc_t func)
139 {
140 	npf_worker_t *wrk;
141 	unsigned idx;
142 
143 	if (!npf_worker_count) {
144 		return;
145 	}
146 
147 	idx = cprng_fast32() % npf_worker_count;
148 	wrk = &npf_workers[idx];
149 	mutex_enter(&wrk->worker_lock);
150 
151 	npf->worker_id = idx;
152 	npf->worker_entry = wrk->instances;
153 	wrk->instances = npf;
154 
155 	npf_worker_testset(wrk, NULL, func);
156 	mutex_exit(&wrk->worker_lock);
157 }
158 
159 void
160 npf_worker_unregister(npf_t *npf, npf_workfunc_t func)
161 {
162 	const unsigned idx = npf->worker_id;
163 	npf_worker_t *wrk;
164 	npf_t *instance;
165 
166 	if (!npf_worker_count) {
167 		return;
168 	}
169 	wrk = &npf_workers[idx];
170 
171 	mutex_enter(&wrk->worker_lock);
172 	npf_worker_testset(wrk, func, NULL);
173 	if ((instance = wrk->instances) == npf) {
174 		wrk->instances = instance->worker_entry;
175 	} else while (instance) {
176 		if (instance->worker_entry == npf) {
177 			instance->worker_entry = npf->worker_entry;
178 			break;
179 		}
180 		instance = instance->worker_entry;
181 	}
182 	mutex_exit(&wrk->worker_lock);
183 }
184 
185 static void
186 npf_worker(void *arg)
187 {
188 	npf_worker_t *wrk = arg;
189 
190 	KASSERT(wrk != NULL);
191 
192 	while (!wrk->worker_exit) {
193 		npf_t *npf;
194 
195 		npf = wrk->instances;
196 		while (npf) {
197 			u_int i = NPF_MAX_WORKS;
198 			npf_workfunc_t work;
199 
200 			if (!npf->sync_registered) {
201 				npf_thread_register(npf);
202 				npf->sync_registered = true;
203 			}
204 
205 			/* Run the jobs. */
206 			while (i--) {
207 				if ((work = wrk->work_funcs[i]) != NULL) {
208 					work(npf);
209 				}
210 			}
211 			/* Next .. */
212 			npf = npf->worker_entry;
213 		}
214 		if (wrk->worker_exit)
215 			break;
216 
217 		/* Sleep and periodically wake up, unless we get notified. */
218 		mutex_enter(&wrk->worker_lock);
219 		cv_timedwait(&wrk->worker_cv, &wrk->worker_lock, W_INTERVAL);
220 		mutex_exit(&wrk->worker_lock);
221 	}
222 	kthread_exit(0);
223 }
224