xref: /dflybsd-src/sys/net/ifq_var.h (revision f1e3af6c0d9fb009456c1ad7a10c323acbf7022f)
1 /*-
2  * Copyright (c) 2005 The DragonFly Project.  All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *    the documentation and/or other materials provided with the
13  *    distribution.
14  * 3. Neither the name of The DragonFly Project nor the names of its
15  *    contributors may be used to endorse or promote products derived
16  *    from this software without specific, prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
21  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
22  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
24  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
26  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
28  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 #ifndef _NET_IFQ_VAR_H_
33 #define _NET_IFQ_VAR_H_
34 
35 #ifndef _KERNEL
36 #error "This file should not be included by userland programs."
37 #endif
38 
39 #ifndef _SYS_SYSTM_H_
40 #include <sys/systm.h>
41 #endif
42 #ifndef _SYS_THREAD2_H_
43 #include <sys/thread2.h>
44 #endif
45 #ifndef _SYS_SERIALIZE_H_
46 #include <sys/serialize.h>
47 #endif
48 #ifndef _SYS_MBUF_H_
49 #include <sys/mbuf.h>
50 #endif
51 #ifndef _NET_IF_VAR_H_
52 #include <net/if_var.h>
53 #endif
54 #ifndef _NET_ALTQ_IF_ALTQ_H_
55 #include <net/altq/if_altq.h>
56 #endif
57 
58 #define ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq) \
59 	KASSERT(ifsq_get_ifp((ifsq)) == (ifp) && \
60 	    ifsq_get_index((ifsq)) == ALTQ_SUBQ_INDEX_DEFAULT, \
61 	    ("not ifp's default subqueue"));
62 
63 struct ifaltq;
64 struct ifaltq_subque;
65 
66 /*
67  * Subqueue watchdog
68  */
69 typedef void	(*ifsq_watchdog_t)(struct ifaltq_subque *);
70 
71 struct ifsubq_watchdog {
72 	struct callout	wd_callout;
73 	int		wd_timer;
74 	struct ifaltq_subque *wd_subq;
75 	ifsq_watchdog_t	wd_watchdog;
76 };
77 
78 /*
79  * Support for "classic" ALTQ interfaces.
80  */
81 int		ifsq_classic_enqueue(struct ifaltq_subque *, struct mbuf *,
82 		    struct altq_pktattr *);
83 struct mbuf	*ifsq_classic_dequeue(struct ifaltq_subque *, struct mbuf *,
84 		    int);
85 int		ifsq_classic_request(struct ifaltq_subque *, int, void *);
86 void		ifq_set_classic(struct ifaltq *);
87 
88 void		ifq_set_maxlen(struct ifaltq *, int);
89 void		ifq_set_methods(struct ifaltq *, altq_mapsubq_t,
90 		    ifsq_enqueue_t, ifsq_dequeue_t, ifsq_request_t);
91 int		ifq_mapsubq_default(struct ifaltq *, int);
92 int		ifq_mapsubq_mask(struct ifaltq *, int);
93 
94 void		ifsq_devstart(struct ifaltq_subque *ifsq);
95 void		ifsq_devstart_sched(struct ifaltq_subque *ifsq);
96 
97 void		ifsq_watchdog_init(struct ifsubq_watchdog *,
98 		    struct ifaltq_subque *, ifsq_watchdog_t);
99 void		ifsq_watchdog_start(struct ifsubq_watchdog *);
100 void		ifsq_watchdog_stop(struct ifsubq_watchdog *);
101 
102 /*
103  * Dispatch a packet to an interface.
104  */
105 int		ifq_dispatch(struct ifnet *, struct mbuf *,
106 		    struct altq_pktattr *);
107 
108 #ifdef ALTQ
109 
110 static __inline int
111 ifq_is_enabled(struct ifaltq *_ifq)
112 {
113 	return(_ifq->altq_flags & ALTQF_ENABLED);
114 }
115 
116 static __inline int
117 ifq_is_attached(struct ifaltq *_ifq)
118 {
119 	return(_ifq->altq_disc != NULL);
120 }
121 
122 #else	/* !ALTQ */
123 
124 static __inline int
125 ifq_is_enabled(struct ifaltq *_ifq)
126 {
127 	return(0);
128 }
129 
130 static __inline int
131 ifq_is_attached(struct ifaltq *_ifq)
132 {
133 	return(0);
134 }
135 
136 #endif	/* ALTQ */
137 
138 static __inline int
139 ifq_is_ready(struct ifaltq *_ifq)
140 {
141 	return(_ifq->altq_flags & ALTQF_READY);
142 }
143 
144 static __inline void
145 ifq_set_ready(struct ifaltq *_ifq)
146 {
147 	_ifq->altq_flags |= ALTQF_READY;
148 }
149 
150 /*
151  * Subqueue lock must be held
152  */
153 static __inline int
154 ifsq_enqueue_locked(struct ifaltq_subque *_ifsq, struct mbuf *_m,
155     struct altq_pktattr *_pa)
156 {
157 #ifdef ALTQ
158 	if (!ifq_is_enabled(_ifsq->ifsq_altq))
159 		return ifsq_classic_enqueue(_ifsq, _m, _pa);
160 	else
161 #endif
162 	return _ifsq->ifsq_enqueue(_ifsq, _m, _pa);
163 }
164 
165 static __inline int
166 ifsq_enqueue(struct ifaltq_subque *_ifsq, struct mbuf *_m,
167     struct altq_pktattr *_pa)
168 {
169 	int _error;
170 
171 	ALTQ_SQ_LOCK(_ifsq);
172 	_error = ifsq_enqueue_locked(_ifsq, _m, _pa);
173 	ALTQ_SQ_UNLOCK(_ifsq);
174 	return _error;
175 }
176 
177 static __inline struct mbuf *
178 ifsq_dequeue(struct ifaltq_subque *_ifsq, struct mbuf *_mpolled)
179 {
180 	struct mbuf *_m;
181 
182 	ALTQ_SQ_LOCK(_ifsq);
183 	if (_ifsq->ifsq_prepended != NULL) {
184 		_m = _ifsq->ifsq_prepended;
185 		_ifsq->ifsq_prepended = NULL;
186 		KKASSERT(_ifsq->ifq_len > 0);
187 		_ifsq->ifq_len--;
188 		ALTQ_SQ_UNLOCK(_ifsq);
189 		return _m;
190 	}
191 
192 #ifdef ALTQ
193 	if (_ifsq->ifsq_altq->altq_tbr != NULL)
194 		_m = tbr_dequeue(_ifsq, _mpolled, ALTDQ_REMOVE);
195 	else if (!ifq_is_enabled(_ifsq->ifsq_altq))
196 		_m = ifsq_classic_dequeue(_ifsq, _mpolled, ALTDQ_REMOVE);
197 	else
198 #endif
199 	_m = _ifsq->ifsq_dequeue(_ifsq, _mpolled, ALTDQ_REMOVE);
200 	ALTQ_SQ_UNLOCK(_ifsq);
201 	return _m;
202 }
203 
204 /*
205  * Subqueue lock must be held
206  */
207 static __inline struct mbuf *
208 ifsq_poll_locked(struct ifaltq_subque *_ifsq)
209 {
210 	if (_ifsq->ifsq_prepended != NULL)
211 		return _ifsq->ifsq_prepended;
212 
213 #ifdef ALTQ
214 	if (_ifsq->ifsq_altq->altq_tbr != NULL)
215 		return tbr_dequeue(_ifsq, NULL, ALTDQ_POLL);
216 	else if (!ifq_is_enabled(_ifsq->ifsq_altq))
217 		return ifsq_classic_dequeue(_ifsq, NULL, ALTDQ_POLL);
218 	else
219 #endif
220 	return _ifsq->ifsq_dequeue(_ifsq, NULL, ALTDQ_POLL);
221 }
222 
223 static __inline struct mbuf *
224 ifsq_poll(struct ifaltq_subque *_ifsq)
225 {
226 	struct mbuf *_m;
227 
228 	ALTQ_SQ_LOCK(_ifsq);
229 	_m = ifsq_poll_locked(_ifsq);
230 	ALTQ_SQ_UNLOCK(_ifsq);
231 	return _m;
232 }
233 
234 /*
235  * Subqueue lock must be held
236  */
237 static __inline void
238 ifsq_purge_locked(struct ifaltq_subque *_ifsq)
239 {
240 	if (_ifsq->ifsq_prepended != NULL) {
241 		m_freem(_ifsq->ifsq_prepended);
242 		_ifsq->ifsq_prepended = NULL;
243 		KKASSERT(_ifsq->ifq_len > 0);
244 		_ifsq->ifq_len--;
245 	}
246 
247 #ifdef ALTQ
248 	if (!ifq_is_enabled(_ifsq->ifsq_altq))
249 		ifsq_classic_request(_ifsq, ALTRQ_PURGE, NULL);
250 	else
251 #endif
252 	_ifsq->ifsq_request(_ifsq, ALTRQ_PURGE, NULL);
253 }
254 
255 static __inline void
256 ifsq_purge(struct ifaltq_subque *_ifsq)
257 {
258 	ALTQ_SQ_LOCK(_ifsq);
259 	ifsq_purge_locked(_ifsq);
260 	ALTQ_SQ_UNLOCK(_ifsq);
261 }
262 
263 static __inline void
264 ifq_lock_all(struct ifaltq *_ifq)
265 {
266 	int _q;
267 
268 	for (_q = 0; _q < _ifq->altq_subq_cnt; ++_q)
269 		ALTQ_SQ_LOCK(&_ifq->altq_subq[_q]);
270 }
271 
272 static __inline void
273 ifq_unlock_all(struct ifaltq *_ifq)
274 {
275 	int _q;
276 
277 	for (_q = _ifq->altq_subq_cnt - 1; _q >= 0; --_q)
278 		ALTQ_SQ_UNLOCK(&_ifq->altq_subq[_q]);
279 }
280 
281 /*
282  * All of the subqueue locks must be held
283  */
284 static __inline void
285 ifq_purge_all_locked(struct ifaltq *_ifq)
286 {
287 	int _q;
288 
289 	for (_q = 0; _q < _ifq->altq_subq_cnt; ++_q)
290 		ifsq_purge_locked(&_ifq->altq_subq[_q]);
291 }
292 
293 static __inline void
294 ifq_purge_all(struct ifaltq *_ifq)
295 {
296 	ifq_lock_all(_ifq);
297 	ifq_purge_all_locked(_ifq);
298 	ifq_unlock_all(_ifq);
299 }
300 
301 static __inline void
302 ifq_classify(struct ifaltq *_ifq, struct mbuf *_m, uint8_t _af,
303     struct altq_pktattr *_pa)
304 {
305 #ifdef ALTQ
306 	if (ifq_is_enabled(_ifq)) {
307 		_pa->pattr_af = _af;
308 		_pa->pattr_hdr = mtod(_m, caddr_t);
309 		if (ifq_is_enabled(_ifq) &&
310 		    (_ifq->altq_flags & ALTQF_CLASSIFY)) {
311 			/* XXX default subqueue */
312 			struct ifaltq_subque *_ifsq =
313 			    &_ifq->altq_subq[ALTQ_SUBQ_INDEX_DEFAULT];
314 
315 			ALTQ_SQ_LOCK(_ifsq);
316 			if (ifq_is_enabled(_ifq) &&
317 			    (_ifq->altq_flags & ALTQF_CLASSIFY))
318 				_ifq->altq_classify(_ifq, _m, _pa);
319 			ALTQ_SQ_UNLOCK(_ifsq);
320 		}
321 	}
322 #endif
323 }
324 
325 static __inline void
326 ifsq_prepend(struct ifaltq_subque *_ifsq, struct mbuf *_m)
327 {
328 	ALTQ_SQ_LOCK(_ifsq);
329 	KASSERT(_ifsq->ifsq_prepended == NULL, ("pending prepended mbuf"));
330 	_ifsq->ifsq_prepended = _m;
331 	_ifsq->ifq_len++;
332 	ALTQ_SQ_UNLOCK(_ifsq);
333 }
334 
335 /*
336  * Subqueue hardware serializer must be held
337  */
338 static __inline void
339 ifsq_set_oactive(struct ifaltq_subque *_ifsq)
340 {
341 	_ifsq->ifsq_hw_oactive = 1;
342 }
343 
344 /*
345  * Subqueue hardware serializer must be held
346  */
347 static __inline void
348 ifsq_clr_oactive(struct ifaltq_subque *_ifsq)
349 {
350 	_ifsq->ifsq_hw_oactive = 0;
351 }
352 
353 /*
354  * Subqueue hardware serializer must be held
355  */
356 static __inline int
357 ifsq_is_oactive(const struct ifaltq_subque *_ifsq)
358 {
359 	return _ifsq->ifsq_hw_oactive;
360 }
361 
362 /*
363  * Hand a packet to the interface's default subqueue.
364  *
365  * The default subqueue hardware serializer must be held.  If the
366  * subqueue hardware serializer is not held yet, ifq_dispatch()
367  * should be used to get better performance.
368  */
369 static __inline int
370 ifq_handoff(struct ifnet *_ifp, struct mbuf *_m, struct altq_pktattr *_pa)
371 {
372 	struct ifaltq_subque *_ifsq;
373 	int _error;
374 	int _qid = ALTQ_SUBQ_INDEX_DEFAULT; /* XXX default subqueue */
375 
376 	_ifsq = &_ifp->if_snd.altq_subq[_qid];
377 
378 	ASSERT_ALTQ_SQ_SERIALIZED_HW(_ifsq);
379 	_error = ifsq_enqueue(_ifsq, _m, _pa);
380 	if (_error == 0) {
381 		IFNET_STAT_INC(_ifp, obytes, _m->m_pkthdr.len);
382 		if (_m->m_flags & M_MCAST)
383 			IFNET_STAT_INC(_ifp, omcasts, 1);
384 		if (!ifsq_is_oactive(_ifsq))
385 			(*_ifp->if_start)(_ifp, _ifsq);
386 	}
387 	return(_error);
388 }
389 
390 static __inline int
391 ifsq_is_empty(const struct ifaltq_subque *_ifsq)
392 {
393 	return(_ifsq->ifq_len == 0);
394 }
395 
396 /*
397  * Subqueue lock must be held
398  */
399 static __inline int
400 ifsq_data_ready(struct ifaltq_subque *_ifsq)
401 {
402 #ifdef ALTQ
403 	if (_ifsq->ifsq_altq->altq_tbr != NULL)
404 		return (ifsq_poll_locked(_ifsq) != NULL);
405 	else
406 #endif
407 	return !ifsq_is_empty(_ifsq);
408 }
409 
410 /*
411  * Subqueue lock must be held
412  */
413 static __inline int
414 ifsq_is_started(const struct ifaltq_subque *_ifsq)
415 {
416 	return _ifsq->ifsq_started;
417 }
418 
419 /*
420  * Subqueue lock must be held
421  */
422 static __inline void
423 ifsq_set_started(struct ifaltq_subque *_ifsq)
424 {
425 	_ifsq->ifsq_started = 1;
426 }
427 
428 /*
429  * Subqueue lock must be held
430  */
431 static __inline void
432 ifsq_clr_started(struct ifaltq_subque *_ifsq)
433 {
434 	_ifsq->ifsq_started = 0;
435 }
436 
437 static __inline struct ifsubq_stage *
438 ifsq_get_stage(struct ifaltq_subque *_ifsq, int _cpuid)
439 {
440 	return &_ifsq->ifsq_stage[_cpuid];
441 }
442 
443 static __inline int
444 ifsq_get_cpuid(const struct ifaltq_subque *_ifsq)
445 {
446 	return _ifsq->ifsq_cpuid;
447 }
448 
449 static __inline void
450 ifsq_set_cpuid(struct ifaltq_subque *_ifsq, int _cpuid)
451 {
452 	KASSERT(_cpuid >= 0 && _cpuid < ncpus,
453 	    ("invalid ifsq_cpuid %d", _cpuid));
454 	_ifsq->ifsq_cpuid = _cpuid;
455 }
456 
457 static __inline struct lwkt_msg *
458 ifsq_get_ifstart_lmsg(struct ifaltq_subque *_ifsq, int _cpuid)
459 {
460 	return &_ifsq->ifsq_ifstart_nmsg[_cpuid].lmsg;
461 }
462 
463 static __inline int
464 ifsq_get_index(const struct ifaltq_subque *_ifsq)
465 {
466 	return _ifsq->ifsq_index;
467 }
468 
469 static __inline void
470 ifsq_set_priv(struct ifaltq_subque *_ifsq, void *_priv)
471 {
472 	_ifsq->ifsq_hw_priv = _priv;
473 }
474 
475 static __inline void *
476 ifsq_get_priv(const struct ifaltq_subque *_ifsq)
477 {
478 	return _ifsq->ifsq_hw_priv;
479 }
480 
481 static __inline struct ifnet *
482 ifsq_get_ifp(const struct ifaltq_subque *_ifsq)
483 {
484 	return _ifsq->ifsq_ifp;
485 }
486 
487 static __inline void
488 ifsq_set_hw_serialize(struct ifaltq_subque *_ifsq,
489     struct lwkt_serialize *_hwslz)
490 {
491 	KASSERT(_hwslz != NULL, ("NULL hw serialize"));
492 	KASSERT(_ifsq->ifsq_hw_serialize == NULL,
493 	    ("hw serialize has been setup"));
494 	_ifsq->ifsq_hw_serialize = _hwslz;
495 }
496 
497 static __inline void
498 ifsq_serialize_hw(struct ifaltq_subque *_ifsq)
499 {
500 	lwkt_serialize_enter(_ifsq->ifsq_hw_serialize);
501 }
502 
503 static __inline void
504 ifsq_deserialize_hw(struct ifaltq_subque *_ifsq)
505 {
506 	lwkt_serialize_exit(_ifsq->ifsq_hw_serialize);
507 }
508 
509 static __inline int
510 ifsq_tryserialize_hw(struct ifaltq_subque *_ifsq)
511 {
512 	return lwkt_serialize_try(_ifsq->ifsq_hw_serialize);
513 }
514 
515 static __inline struct ifaltq_subque *
516 ifq_get_subq_default(const struct ifaltq *_ifq)
517 {
518 	return &_ifq->altq_subq[ALTQ_SUBQ_INDEX_DEFAULT];
519 }
520 
521 static __inline struct ifaltq_subque *
522 ifq_get_subq(const struct ifaltq *_ifq, int _idx)
523 {
524 	KASSERT(_idx >= 0 && _idx < _ifq->altq_subq_cnt,
525 	    ("invalid qid %d", _idx));
526 	return &_ifq->altq_subq[_idx];
527 }
528 
529 static __inline struct ifaltq_subque *
530 ifq_map_subq(struct ifaltq *_ifq, int _cpuid)
531 {
532 	int _idx = _ifq->altq_mapsubq(_ifq, _cpuid);
533 	return ifq_get_subq(_ifq, _idx);
534 }
535 
536 static __inline void
537 ifq_set_subq_cnt(struct ifaltq *_ifq, int _cnt)
538 {
539 	_ifq->altq_subq_cnt = _cnt;
540 }
541 
542 static __inline void
543 ifq_set_subq_mask(struct ifaltq *_ifq, uint32_t _mask)
544 {
545 	KASSERT(((_mask + 1) & _mask) == 0, ("invalid mask %08x", _mask));
546 	_ifq->altq_subq_mask = _mask;
547 }
548 
549 /* COMPAT */
550 static __inline int
551 ifq_is_oactive(const struct ifaltq *_ifq)
552 {
553 	return ifsq_is_oactive(ifq_get_subq_default(_ifq));
554 }
555 
556 /* COMPAT */
557 static __inline void
558 ifq_set_oactive(struct ifaltq *_ifq)
559 {
560 	ifsq_set_oactive(ifq_get_subq_default(_ifq));
561 }
562 
563 /* COMPAT */
564 static __inline void
565 ifq_clr_oactive(struct ifaltq *_ifq)
566 {
567 	ifsq_clr_oactive(ifq_get_subq_default(_ifq));
568 }
569 
570 /* COMPAT */
571 static __inline int
572 ifq_is_empty(struct ifaltq *_ifq)
573 {
574 	return ifsq_is_empty(ifq_get_subq_default(_ifq));
575 }
576 
577 /* COMPAT */
578 static __inline void
579 ifq_purge(struct ifaltq *_ifq)
580 {
581 	ifsq_purge(ifq_get_subq_default(_ifq));
582 }
583 
584 /* COMPAT */
585 static __inline struct mbuf *
586 ifq_dequeue(struct ifaltq *_ifq, struct mbuf *_mpolled)
587 {
588 	return ifsq_dequeue(ifq_get_subq_default(_ifq), _mpolled);
589 }
590 
591 /* COMPAT */
592 static __inline void
593 ifq_prepend(struct ifaltq *_ifq, struct mbuf *_m)
594 {
595 	ifsq_prepend(ifq_get_subq_default(_ifq), _m);
596 }
597 
598 /* COMPAT */
599 static __inline void
600 ifq_set_cpuid(struct ifaltq *_ifq, int _cpuid)
601 {
602 	KASSERT(_ifq->altq_subq_cnt == 1,
603 	    ("invalid subqueue count %d", _ifq->altq_subq_cnt));
604 	ifsq_set_cpuid(ifq_get_subq_default(_ifq), _cpuid);
605 }
606 
607 #endif	/* _NET_IFQ_VAR_H_ */
608