Lines Matching defs:ifq

1 /*	$OpenBSD: ifq.c,v 1.55 2024/11/20 02:18:45 dlg Exp $ */
82 ifq_run_start(struct ifqueue *ifq)
84 ifq_serialize(ifq, &ifq->ifq_start);
88 ifq_serialize(struct ifqueue *ifq, struct task *t)
95 mtx_enter(&ifq->ifq_task_mtx);
98 TAILQ_INSERT_TAIL(&ifq->ifq_task_list, t, t_entry);
101 if (ifq->ifq_serializer == NULL) {
102 ifq->ifq_serializer = curcpu();
104 while ((t = TAILQ_FIRST(&ifq->ifq_task_list)) != NULL) {
105 TAILQ_REMOVE(&ifq->ifq_task_list, t, t_entry);
109 mtx_leave(&ifq->ifq_task_mtx);
113 mtx_enter(&ifq->ifq_task_mtx);
116 ifq->ifq_serializer = NULL;
118 mtx_leave(&ifq->ifq_task_mtx);
122 ifq_start(struct ifqueue *ifq)
124 if (ifq_len(ifq) >= min(ifq->ifq_if->if_txmit, ifq->ifq_maxlen)) {
125 task_del(ifq->ifq_softnet, &ifq->ifq_bundle);
126 ifq_run_start(ifq);
128 task_add(ifq->ifq_softnet, &ifq->ifq_bundle);
134 struct ifqueue *ifq = p;
135 struct ifnet *ifp = ifq->ifq_if;
138 ifq_empty(ifq) || ifq_is_oactive(ifq))
141 ifp->if_qstart(ifq);
145 ifq_set_oactive(struct ifqueue *ifq)
147 if (ifq->ifq_oactive)
150 mtx_enter(&ifq->ifq_mtx);
151 if (!ifq->ifq_oactive) {
152 ifq->ifq_oactive = 1;
153 ifq->ifq_oactives++;
155 mtx_leave(&ifq->ifq_mtx);
159 ifq_deq_set_oactive(struct ifqueue *ifq)
161 MUTEX_ASSERT_LOCKED(&ifq->ifq_mtx);
163 if (!ifq->ifq_oactive) {
164 ifq->ifq_oactive = 1;
165 ifq->ifq_oactives++;
172 struct ifqueue *ifq = p;
173 struct ifnet *ifp = ifq->ifq_if;
175 ifq_clr_oactive(ifq);
176 ifp->if_qstart(ifq);
182 struct ifqueue *ifq = p;
184 ifq_run_start(ifq);
188 ifq_barrier(struct ifqueue *ifq)
193 task_del(ifq->ifq_softnet, &ifq->ifq_bundle);
195 if (ifq->ifq_serializer == NULL)
198 ifq_serialize(ifq, &t);
247 struct ifqueue *ifq = ks->ks_softc;
251 kstat_kv_u64(&kd->kd_packets) = ifq->ifq_packets;
252 kstat_kv_u64(&kd->kd_bytes) = ifq->ifq_bytes;
253 kstat_kv_u64(&kd->kd_qdrops) = ifq->ifq_qdrops;
254 kstat_kv_u64(&kd->kd_errors) = ifq->ifq_errors;
255 kstat_kv_u32(&kd->kd_qlen) = ifq->ifq_len;
256 kstat_kv_u32(&kd->kd_maxqlen) = ifq->ifq_maxlen;
257 kstat_kv_bool(&kd->kd_oactive) = ifq->ifq_oactive;
258 kstat_kv_u32(&kd->kd_oactives) = ifq->ifq_oactives;
265 ifq_init(struct ifqueue *ifq, struct ifnet *ifp, unsigned int idx)
267 ifq->ifq_if = ifp;
268 ifq->ifq_softnet = net_tq(idx);
269 ifq->ifq_softc = NULL;
271 mtx_init(&ifq->ifq_mtx, IPL_NET);
274 ifq->ifq_ops = &priq_ops;
275 ifq->ifq_q = priq_ops.ifqop_alloc(idx, NULL);
277 ml_init(&ifq->ifq_free);
278 ifq->ifq_len = 0;
280 ifq->ifq_packets = 0;
281 ifq->ifq_bytes = 0;
282 ifq->ifq_qdrops = 0;
283 ifq->ifq_errors = 0;
284 ifq->ifq_mcasts = 0;
286 mtx_init(&ifq->ifq_task_mtx, IPL_NET);
287 TAILQ_INIT(&ifq->ifq_task_list);
288 ifq->ifq_serializer = NULL;
289 task_set(&ifq->ifq_bundle, ifq_bundle_task, ifq);
291 task_set(&ifq->ifq_start, ifq_start_task, ifq);
292 task_set(&ifq->ifq_restart, ifq_restart_task, ifq);
294 if (ifq->ifq_maxlen == 0)
295 ifq_init_maxlen(ifq, IFQ_MAXLEN);
297 ifq->ifq_idx = idx;
301 ifq->ifq_kstat = kstat_create(ifp->if_xname, 0,
302 "txq", ifq->ifq_idx, KSTAT_T_KV, 0);
303 KASSERT(ifq->ifq_kstat != NULL);
304 kstat_set_mutex(ifq->ifq_kstat, &ifq->ifq_mtx);
305 ifq->ifq_kstat->ks_softc = ifq;
306 ifq->ifq_kstat->ks_datalen = sizeof(ifq_kstat_tpl);
307 ifq->ifq_kstat->ks_copy = ifq_kstat_copy;
308 kstat_install(ifq->ifq_kstat);
313 ifq_attach(struct ifqueue *ifq, const struct ifq_ops *newops, void *opsarg)
321 newq = newops->ifqop_alloc(ifq->ifq_idx, opsarg);
323 mtx_enter(&ifq->ifq_mtx);
324 ifq->ifq_ops->ifqop_purge(ifq, &ml);
325 ifq->ifq_len = 0;
327 oldops = ifq->ifq_ops;
328 oldq = ifq->ifq_q;
330 ifq->ifq_ops = newops;
331 ifq->ifq_q = newq;
334 m = ifq->ifq_ops->ifqop_enq(ifq, m);
336 ifq->ifq_qdrops++;
339 ifq->ifq_len++;
341 mtx_leave(&ifq->ifq_mtx);
343 oldops->ifqop_free(ifq->ifq_idx, oldq);
349 ifq_destroy(struct ifqueue *ifq)
354 kstat_destroy(ifq->ifq_kstat);
358 if (!task_del(ifq->ifq_softnet, &ifq->ifq_bundle))
359 taskq_barrier(ifq->ifq_softnet);
361 /* don't need to lock because this is the last use of the ifq */
363 ifq->ifq_ops->ifqop_purge(ifq, &ml);
364 ifq->ifq_ops->ifqop_free(ifq->ifq_idx, ifq->ifq_q);
370 ifq_add_data(struct ifqueue *ifq, struct if_data *data)
372 mtx_enter(&ifq->ifq_mtx);
373 data->ifi_opackets += ifq->ifq_packets;
374 data->ifi_obytes += ifq->ifq_bytes;
375 data->ifi_oqdrops += ifq->ifq_qdrops;
376 data->ifi_omcasts += ifq->ifq_mcasts;
378 mtx_leave(&ifq->ifq_mtx);
382 ifq_enqueue(struct ifqueue *ifq, struct mbuf *m)
386 mtx_enter(&ifq->ifq_mtx);
387 dm = ifq->ifq_ops->ifqop_enq(ifq, m);
389 ifq->ifq_packets++;
390 ifq->ifq_bytes += m->m_pkthdr.len;
392 ifq->ifq_mcasts++;
396 ifq->ifq_len++;
398 ifq->ifq_qdrops++;
399 mtx_leave(&ifq->ifq_mtx);
408 ifq_deq_enter(struct ifqueue *ifq)
410 mtx_enter(&ifq->ifq_mtx);
414 ifq_deq_leave(struct ifqueue *ifq)
418 ml = ifq->ifq_free;
419 ml_init(&ifq->ifq_free);
421 mtx_leave(&ifq->ifq_mtx);
428 ifq_deq_begin(struct ifqueue *ifq)
433 ifq_deq_enter(ifq);
434 if (ifq->ifq_len == 0 ||
435 (m = ifq->ifq_ops->ifqop_deq_begin(ifq, &cookie)) == NULL) {
436 ifq_deq_leave(ifq);
446 ifq_deq_commit(struct ifqueue *ifq, struct mbuf *m)
453 ifq->ifq_ops->ifqop_deq_commit(ifq, m, cookie);
454 ifq->ifq_len--;
455 ifq_deq_leave(ifq);
459 ifq_deq_rollback(struct ifqueue *ifq, struct mbuf *m)
463 ifq_deq_leave(ifq);
467 ifq_dequeue(struct ifqueue *ifq)
471 m = ifq_deq_begin(ifq);
475 ifq_deq_commit(ifq, m);
481 ifq_deq_sleep(struct ifqueue *ifq, struct mbuf **mp, int nbio, int priority,
489 ifq_deq_enter(ifq);
490 if (ifq->ifq_len == 0 && nbio)
494 m = ifq->ifq_ops->ifqop_deq_begin(ifq, &cookie);
496 ifq->ifq_ops->ifqop_deq_commit(ifq, m, cookie);
497 ifq->ifq_len--;
503 error = msleep_nsec(ifq, &ifq->ifq_mtx,
514 ifq_deq_leave(ifq);
520 ifq_hdatalen(struct ifqueue *ifq)
525 if (ifq_empty(ifq))
528 m = ifq_deq_begin(ifq);
531 ifq_deq_rollback(ifq, m);
538 ifq_init_maxlen(struct ifqueue *ifq, unsigned int maxlen)
541 ifq->ifq_maxlen = maxlen;
545 ifq_purge(struct ifqueue *ifq)
550 mtx_enter(&ifq->ifq_mtx);
551 ifq->ifq_ops->ifqop_purge(ifq, &ml);
552 rv = ifq->ifq_len;
553 ifq->ifq_len = 0;
554 ifq->ifq_qdrops += rv;
555 mtx_leave(&ifq->ifq_mtx);
565 ifq_q_enter(struct ifqueue *ifq, const struct ifq_ops *ops)
567 mtx_enter(&ifq->ifq_mtx);
568 if (ifq->ifq_ops == ops)
569 return (ifq->ifq_q);
571 mtx_leave(&ifq->ifq_mtx);
577 ifq_q_leave(struct ifqueue *ifq, void *q)
579 KASSERT(q == ifq->ifq_q);
580 mtx_leave(&ifq->ifq_mtx);
584 ifq_mfreem(struct ifqueue *ifq, struct mbuf *m)
586 MUTEX_ASSERT_LOCKED(&ifq->ifq_mtx);
588 ifq->ifq_len--;
589 ifq->ifq_qdrops++;
590 ml_enqueue(&ifq->ifq_free, m);
594 ifq_mfreeml(struct ifqueue *ifq, struct mbuf_list *ml)
596 MUTEX_ASSERT_LOCKED(&ifq->ifq_mtx);
598 ifq->ifq_len -= ml_len(ml);
599 ifq->ifq_qdrops += ml_len(ml);
600 ml_enlist(&ifq->ifq_free, ml);
929 priq_enq(struct ifqueue *ifq, struct mbuf *m)
936 pq = ifq->ifq_q;
940 if (ifq_len(ifq) >= ifq->ifq_maxlen) {
963 priq_deq_begin(struct ifqueue *ifq, void **cookiep)
965 struct priq *pq = ifq->ifq_q;
983 priq_deq_commit(struct ifqueue *ifq, struct mbuf *m, void *cookie)
993 priq_purge(struct ifqueue *ifq, struct mbuf_list *ml)
995 struct priq *pq = ifq->ifq_q;