xref: /openbsd-src/sys/net/ifq.c (revision f2da64fbbbf1b03f09f390ab01267c93dfd77c4c)
1 /*	$OpenBSD: ifq.c,v 1.4 2015/12/29 12:35:43 dlg Exp $ */
2 
3 /*
4  * Copyright (c) 2015 David Gwynne <dlg@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <sys/param.h>
20 #include <sys/systm.h>
21 #include <sys/socket.h>
22 #include <sys/mbuf.h>
23 #include <sys/proc.h>
24 
25 #include <net/if.h>
26 #include <net/if_var.h>
27 
28 /*
29  * priq glue
30  */
31 void		*priq_alloc(void *);
32 void		 priq_free(void *);
33 int		 priq_enq(struct ifqueue *, struct mbuf *);
34 struct mbuf	*priq_deq_begin(struct ifqueue *, void **);
35 void		 priq_deq_commit(struct ifqueue *, struct mbuf *, void *);
36 void		 priq_purge(struct ifqueue *, struct mbuf_list *);
37 
38 const struct ifq_ops priq_ops = {
39 	priq_alloc,
40 	priq_free,
41 	priq_enq,
42 	priq_deq_begin,
43 	priq_deq_commit,
44 	priq_purge,
45 };
46 
47 const struct ifq_ops * const ifq_priq_ops = &priq_ops;
48 
49 /*
50  * priq internal structures
51  */
52 
53 struct priq_list {
54 	struct mbuf		*head;
55 	struct mbuf		*tail;
56 };
57 
58 struct priq {
59 	struct priq_list	 pq_lists[IFQ_NQUEUES];
60 };
61 
62 /*
63  * ifqueue serialiser
64  */
65 
66 void	ifq_start_task(void *);
67 void	ifq_restart_task(void *);
68 void	ifq_barrier_task(void *);
69 
70 #define TASK_ONQUEUE 0x1
71 
72 void
73 ifq_serialize(struct ifqueue *ifq, struct task *t)
74 {
75 	struct task work;
76 
77 	if (ISSET(t->t_flags, TASK_ONQUEUE))
78 		return;
79 
80 	mtx_enter(&ifq->ifq_task_mtx);
81 	if (!ISSET(t->t_flags, TASK_ONQUEUE)) {
82 		SET(t->t_flags, TASK_ONQUEUE);
83 		TAILQ_INSERT_TAIL(&ifq->ifq_task_list, t, t_entry);
84 	}
85 
86 	if (ifq->ifq_serializer == NULL) {
87 		ifq->ifq_serializer = curcpu();
88 
89 		while ((t = TAILQ_FIRST(&ifq->ifq_task_list)) != NULL) {
90 			TAILQ_REMOVE(&ifq->ifq_task_list, t, t_entry);
91 			CLR(t->t_flags, TASK_ONQUEUE);
92 			work = *t; /* copy to caller to avoid races */
93 
94 			mtx_leave(&ifq->ifq_task_mtx);
95 
96 			(*work.t_func)(work.t_arg);
97 
98 			mtx_enter(&ifq->ifq_task_mtx);
99 		}
100 
101 		ifq->ifq_serializer = NULL;
102 	}
103 	mtx_leave(&ifq->ifq_task_mtx);
104 }
105 
106 int
107 ifq_is_serialized(struct ifqueue *ifq)
108 {
109 	return (ifq->ifq_serializer == curcpu());
110 }
111 
112 void
113 ifq_start_task(void *p)
114 {
115 	struct ifqueue *ifq = p;
116 	struct ifnet *ifp = ifq->ifq_if;
117 
118 	if (!ISSET(ifp->if_flags, IFF_RUNNING) ||
119 	    ifq_empty(ifq) || ifq_is_oactive(ifq))
120 		return;
121 
122 	ifp->if_start(ifp);
123 }
124 
125 void
126 ifq_restart_task(void *p)
127 {
128 	struct ifqueue *ifq = p;
129 	struct ifnet *ifp = ifq->ifq_if;
130 
131 	ifq_clr_oactive(ifq);
132 	ifp->if_start(ifp);
133 }
134 
135 void
136 ifq_barrier(struct ifqueue *ifq)
137 {
138 	struct sleep_state sls;
139 	unsigned int notdone = 1;
140 	struct task t = TASK_INITIALIZER(ifq_barrier_task, &notdone);
141 
142 	/* this should only be called from converted drivers */
143 	KASSERT(ISSET(ifq->ifq_if->if_xflags, IFXF_MPSAFE));
144 
145 	if (ifq->ifq_serializer == NULL)
146 		return;
147 
148 	ifq_serialize(ifq, &t);
149 
150 	while (notdone) {
151 		sleep_setup(&sls, &notdone, PWAIT, "ifqbar");
152 		sleep_finish(&sls, notdone);
153 	}
154 }
155 
156 void
157 ifq_barrier_task(void *p)
158 {
159 	unsigned int *notdone = p;
160 
161 	*notdone = 0;
162 	wakeup_one(notdone);
163 }
164 
165 /*
166  * ifqueue mbuf queue API
167  */
168 
169 void
170 ifq_init(struct ifqueue *ifq, struct ifnet *ifp)
171 {
172 	ifq->ifq_if = ifp;
173 
174 	mtx_init(&ifq->ifq_mtx, IPL_NET);
175 	ifq->ifq_drops = 0;
176 
177 	/* default to priq */
178 	ifq->ifq_ops = &priq_ops;
179 	ifq->ifq_q = priq_ops.ifqop_alloc(NULL);
180 
181 	ifq->ifq_len = 0;
182 
183 	mtx_init(&ifq->ifq_task_mtx, IPL_NET);
184 	TAILQ_INIT(&ifq->ifq_task_list);
185 	ifq->ifq_serializer = NULL;
186 
187 	task_set(&ifq->ifq_start, ifq_start_task, ifq);
188 	task_set(&ifq->ifq_restart, ifq_restart_task, ifq);
189 
190 	if (ifq->ifq_maxlen == 0)
191 		ifq_set_maxlen(ifq, IFQ_MAXLEN);
192 }
193 
194 void
195 ifq_attach(struct ifqueue *ifq, const struct ifq_ops *newops, void *opsarg)
196 {
197 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
198 	struct mbuf_list free_ml = MBUF_LIST_INITIALIZER();
199 	struct mbuf *m;
200 	const struct ifq_ops *oldops;
201 	void *newq, *oldq;
202 
203 	newq = newops->ifqop_alloc(opsarg);
204 
205 	mtx_enter(&ifq->ifq_mtx);
206 	ifq->ifq_ops->ifqop_purge(ifq, &ml);
207 	ifq->ifq_len = 0;
208 
209 	oldops = ifq->ifq_ops;
210 	oldq = ifq->ifq_q;
211 
212 	ifq->ifq_ops = newops;
213 	ifq->ifq_q = newq;
214 
215 	while ((m = ml_dequeue(&ml)) != NULL) {
216 		if (ifq->ifq_ops->ifqop_enq(ifq, m) != 0) {
217 			ifq->ifq_drops++;
218 			ml_enqueue(&free_ml, m);
219 		} else
220 			ifq->ifq_len++;
221 	}
222 	mtx_leave(&ifq->ifq_mtx);
223 
224 	oldops->ifqop_free(oldq);
225 
226 	ml_purge(&free_ml);
227 }
228 
229 void
230 ifq_destroy(struct ifqueue *ifq)
231 {
232 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
233 
234 	/* don't need to lock because this is the last use of the ifq */
235 
236 	ifq->ifq_ops->ifqop_purge(ifq, &ml);
237 	ifq->ifq_ops->ifqop_free(ifq->ifq_q);
238 
239 	ml_purge(&ml);
240 }
241 
242 int
243 ifq_enqueue_try(struct ifqueue *ifq, struct mbuf *m)
244 {
245 	int rv;
246 
247 	mtx_enter(&ifq->ifq_mtx);
248 	rv = ifq->ifq_ops->ifqop_enq(ifq, m);
249 	if (rv == 0)
250 		ifq->ifq_len++;
251 	else
252 		ifq->ifq_drops++;
253 	mtx_leave(&ifq->ifq_mtx);
254 
255 	return (rv);
256 }
257 
258 int
259 ifq_enqueue(struct ifqueue *ifq, struct mbuf *m)
260 {
261 	int err;
262 
263 	err = ifq_enqueue_try(ifq, m);
264 	if (err != 0)
265 		m_freem(m);
266 
267 	return (err);
268 }
269 
270 struct mbuf *
271 ifq_deq_begin(struct ifqueue *ifq)
272 {
273 	struct mbuf *m = NULL;
274 	void *cookie;
275 
276 	mtx_enter(&ifq->ifq_mtx);
277 	if (ifq->ifq_len == 0 ||
278 	    (m = ifq->ifq_ops->ifqop_deq_begin(ifq, &cookie)) == NULL) {
279 		mtx_leave(&ifq->ifq_mtx);
280 		return (NULL);
281 	}
282 
283 	m->m_pkthdr.ph_cookie = cookie;
284 
285 	return (m);
286 }
287 
288 void
289 ifq_deq_commit(struct ifqueue *ifq, struct mbuf *m)
290 {
291 	void *cookie;
292 
293 	KASSERT(m != NULL);
294 	cookie = m->m_pkthdr.ph_cookie;
295 
296 	ifq->ifq_ops->ifqop_deq_commit(ifq, m, cookie);
297 	ifq->ifq_len--;
298 	mtx_leave(&ifq->ifq_mtx);
299 }
300 
301 void
302 ifq_deq_rollback(struct ifqueue *ifq, struct mbuf *m)
303 {
304 	KASSERT(m != NULL);
305 
306 	mtx_leave(&ifq->ifq_mtx);
307 }
308 
309 struct mbuf *
310 ifq_dequeue(struct ifqueue *ifq)
311 {
312 	struct mbuf *m;
313 
314 	m = ifq_deq_begin(ifq);
315 	if (m == NULL)
316 		return (NULL);
317 
318 	ifq_deq_commit(ifq, m);
319 
320 	return (m);
321 }
322 
323 unsigned int
324 ifq_purge(struct ifqueue *ifq)
325 {
326 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
327 	unsigned int rv;
328 
329 	mtx_enter(&ifq->ifq_mtx);
330 	ifq->ifq_ops->ifqop_purge(ifq, &ml);
331 	rv = ifq->ifq_len;
332 	ifq->ifq_len = 0;
333 	ifq->ifq_drops += rv;
334 	mtx_leave(&ifq->ifq_mtx);
335 
336 	KASSERT(rv == ml_len(&ml));
337 
338 	ml_purge(&ml);
339 
340 	return (rv);
341 }
342 
343 void *
344 ifq_q_enter(struct ifqueue *ifq, const struct ifq_ops *ops)
345 {
346 	mtx_enter(&ifq->ifq_mtx);
347 	if (ifq->ifq_ops == ops)
348 		return (ifq->ifq_q);
349 
350 	mtx_leave(&ifq->ifq_mtx);
351 
352 	return (NULL);
353 }
354 
355 void
356 ifq_q_leave(struct ifqueue *ifq, void *q)
357 {
358 	KASSERT(q == ifq->ifq_q);
359 	mtx_leave(&ifq->ifq_mtx);
360 }
361 
362 /*
363  * priq implementation
364  */
365 
366 void *
367 priq_alloc(void *null)
368 {
369 	return (malloc(sizeof(struct priq), M_DEVBUF, M_WAITOK | M_ZERO));
370 }
371 
372 void
373 priq_free(void *pq)
374 {
375 	free(pq, M_DEVBUF, sizeof(struct priq));
376 }
377 
378 int
379 priq_enq(struct ifqueue *ifq, struct mbuf *m)
380 {
381 	struct priq *pq;
382 	struct priq_list *pl;
383 
384 	if (ifq_len(ifq) >= ifq->ifq_maxlen)
385 		return (ENOBUFS);
386 
387 	pq = ifq->ifq_q;
388 	KASSERT(m->m_pkthdr.pf.prio <= IFQ_MAXPRIO);
389 	pl = &pq->pq_lists[m->m_pkthdr.pf.prio];
390 
391 	m->m_nextpkt = NULL;
392 	if (pl->tail == NULL)
393 		pl->head = m;
394 	else
395 		pl->tail->m_nextpkt = m;
396 	pl->tail = m;
397 
398 	return (0);
399 }
400 
401 struct mbuf *
402 priq_deq_begin(struct ifqueue *ifq, void **cookiep)
403 {
404 	struct priq *pq = ifq->ifq_q;
405 	struct priq_list *pl;
406 	unsigned int prio = nitems(pq->pq_lists);
407 	struct mbuf *m;
408 
409 	do {
410 		pl = &pq->pq_lists[--prio];
411 		m = pl->head;
412 		if (m != NULL) {
413 			*cookiep = pl;
414 			return (m);
415 		}
416 	} while (prio > 0);
417 
418 	return (NULL);
419 }
420 
421 void
422 priq_deq_commit(struct ifqueue *ifq, struct mbuf *m, void *cookie)
423 {
424 	struct priq_list *pl = cookie;
425 
426 	KASSERT(pl->head == m);
427 
428 	pl->head = m->m_nextpkt;
429 	m->m_nextpkt = NULL;
430 
431 	if (pl->head == NULL)
432 		pl->tail = NULL;
433 }
434 
435 void
436 priq_purge(struct ifqueue *ifq, struct mbuf_list *ml)
437 {
438 	struct priq *pq = ifq->ifq_q;
439 	struct priq_list *pl;
440 	unsigned int prio = nitems(pq->pq_lists);
441 	struct mbuf *m, *n;
442 
443 	do {
444 		pl = &pq->pq_lists[--prio];
445 
446 		for (m = pl->head; m != NULL; m = n) {
447 			n = m->m_nextpkt;
448 			ml_enqueue(ml, m);
449 		}
450 
451 		pl->head = pl->tail = NULL;
452 	} while (prio > 0);
453 }
454