Lines Matching defs:lc

83 static void	tcp_lro_rx_done(struct lro_ctrl *lc);
84 static int tcp_lro_rx_common(struct lro_ctrl *lc, struct mbuf *m,
86 static void tcp_lro_flush(struct lro_ctrl *lc, struct lro_entry *le);
92 int (*tcp_lro_flush_tcphpts)(struct lro_ctrl *lc, struct lro_entry *le);
151 tcp_lro_active_insert(struct lro_ctrl *lc, struct lro_head *bucket,
155 LIST_INSERT_HEAD(&lc->lro_active, le, next);
168 tcp_lro_init(struct lro_ctrl *lc)
170 return (tcp_lro_init_args(lc, NULL, tcp_lro_entries, 0));
174 tcp_lro_init_args(struct lro_ctrl *lc, struct ifnet *ifp,
181 lc->lro_bad_csum = 0;
182 lc->lro_queued = 0;
183 lc->lro_flushed = 0;
184 lc->lro_mbuf_count = 0;
185 lc->lro_mbuf_max = lro_mbufs;
186 lc->lro_cnt = lro_entries;
187 lc->lro_ackcnt_lim = TCP_LRO_ACKCNT_MAX;
188 lc->lro_length_lim = TCP_LRO_LENGTH_MAX;
189 lc->ifp = ifp;
190 LIST_INIT(&lc->lro_free);
191 LIST_INIT(&lc->lro_active);
194 lc->lro_hash = phashinit_flags(lro_entries, M_LRO, &lc->lro_hashsz,
196 if (lc->lro_hash == NULL) {
197 memset(lc, 0, sizeof(*lc));
204 lc->lro_mbuf_data = (struct lro_mbuf_sort *)
208 if (lc->lro_mbuf_data == NULL) {
209 free(lc->lro_hash, M_LRO);
210 memset(lc, 0, sizeof(*lc));
215 (lc->lro_mbuf_data + lro_mbufs);
219 LIST_INSERT_HEAD(&lc->lro_free, le + i, next);
492 tcp_lro_free(struct lro_ctrl *lc)
498 LIST_INIT(&lc->lro_free);
501 while ((le = LIST_FIRST(&lc->lro_active)) != NULL) {
507 free(lc->lro_hash, M_LRO);
508 lc->lro_hash = NULL;
509 lc->lro_hashsz = 0;
512 for (x = 0; x != lc->lro_mbuf_count; x++)
513 m_freem(lc->lro_mbuf_data[x].mb);
514 lc->lro_mbuf_count = 0;
517 free(lc->lro_mbuf_data, M_LRO);
518 lc->lro_mbuf_data = NULL;
586 tcp_lro_rx_done(struct lro_ctrl *lc)
590 while ((le = LIST_FIRST(&lc->lro_active)) != NULL) {
592 tcp_lro_flush(lc, le);
597 tcp_lro_flush_active(struct lro_ctrl *lc)
611 LIST_FOREACH_SAFE(le, &lc->lro_active, next, le_tmp) {
614 tcp_lro_flush(lc, le);
620 tcp_lro_flush_inactive(struct lro_ctrl *lc, const struct timeval *timeout)
627 if (LIST_EMPTY(&lc->lro_active))
634 LIST_FOREACH_SAFE(le, &lc->lro_active, next, le_tmp) {
637 tcp_lro_flush(lc, le);
644 tcp_lro_rx_ipv4(struct lro_ctrl *lc, struct mbuf *m, struct ip *ip4)
651 lc->lro_bad_csum++;
657 lc->lro_bad_csum++;
790 tcp_flush_out_entry(struct lro_ctrl *lc, struct lro_entry *le)
860 lc->lro_queued += le->m_head->m_pkthdr.lro_nsegs;
861 (*lc->ifp->if_input)(lc->ifp, le->m_head);
865 tcp_set_entry_to_mbuf(struct lro_ctrl *lc, struct lro_entry *le,
903 tcp_push_and_replace(struct lro_ctrl *lc, struct lro_entry *le, struct mbuf *m)
918 tcp_flush_out_entry(lc, le);
930 tcp_set_entry_to_mbuf(lc, le, m, pa->tcp);
963 tcp_lro_condense(struct lro_ctrl *lc, struct lro_entry *le)
1003 tcp_push_and_replace(lc, le, m);
1012 tcp_push_and_replace(lc, le, m);
1031 if (tcp_data_seg_total >= lc->lro_ackcnt_lim ||
1032 tcp_data_len_total >= lc->lro_length_lim) {
1034 tcp_push_and_replace(lc, le, m);
1047 tcp_push_and_replace(lc, le, m);
1051 tcp_push_and_replace(lc, le, m);
1058 tcp_push_and_replace(lc, le, m);
1072 tcp_push_and_replace(lc, le, m);
1109 tcp_lro_flush(struct lro_ctrl *lc, struct lro_entry *le)
1115 tcp_lro_flush_tcphpts(lc, le) != 0) {
1116 tcp_lro_condense(lc, le);
1117 tcp_flush_out_entry(lc, le);
1119 lc->lro_flushed++;
1121 LIST_INSERT_HEAD(&lc->lro_free, le, next);
1199 tcp_lro_flush_all(struct lro_ctrl *lc)
1207 if (lc->lro_mbuf_count == 0)
1209 if (lc->lro_cpu_is_set == 0) {
1210 if (lc->lro_last_cpu == curcpu) {
1211 lc->lro_cnt_of_same_cpu++;
1213 if (lc->lro_cnt_of_same_cpu > tcp_lro_cpu_set_thresh)
1214 lc->lro_cpu_is_set = 1;
1216 lc->lro_last_cpu = curcpu;
1217 lc->lro_cnt_of_same_cpu = 0;
1220 CURVNET_SET(lc->ifp->if_vnet);
1223 binuptime(&lc->lro_last_queue_time);
1226 tcp_lro_sort(lc->lro_mbuf_data, lc->lro_mbuf_count);
1230 for (x = 0; x != lc->lro_mbuf_count; x++) {
1234 mb = lc->lro_mbuf_data[x].mb;
1237 nseq = lc->lro_mbuf_data[x].seq & (-1ULL << 24);
1244 tcp_lro_rx_done(lc);
1248 if (tcp_lro_rx_common(lc, mb, 0, false) != 0) {
1250 tcp_lro_flush_active(lc);
1252 (*lc->ifp->if_input)(lc->ifp, mb);
1253 lc->lro_queued++;
1254 lc->lro_flushed++;
1260 tcp_lro_rx_done(lc);
1262 lc->lro_mbuf_count = 0;
1266 tcp_lro_rx_get_bucket(struct lro_ctrl *lc, struct mbuf *m, struct lro_parser *parser)
1276 return (&lc->lro_hash[hash % lc->lro_hashsz]);
1280 tcp_lro_rx_common(struct lro_ctrl *lc, struct mbuf *m, uint32_t csum, bool use_hash)
1327 error = tcp_lro_rx_ipv4(lc, m, pa->ip4);
1337 m->m_pkthdr.rcv_tstmp = bintime2ns(&lc->lro_last_queue_time);
1368 m->m_pkthdr.rcvif = lc->ifp;
1376 bucket = &lc->lro_hash[0];
1378 bucket = tcp_lro_rx_get_bucket(lc, m, pa);
1403 if (LIST_EMPTY(&lc->lro_free))
1407 le = LIST_FIRST(&lc->lro_free);
1409 tcp_lro_active_insert(lc, bucket, le);
1416 le->alloc_time = lc->lro_last_queue_time;
1418 tcp_set_entry_to_mbuf(lc, le, m, th);
1427 tcp_lro_rx(struct lro_ctrl *lc, struct mbuf *m, uint32_t csum)
1443 binuptime(&lc->lro_last_queue_time);
1444 CURVNET_SET(lc->ifp->if_vnet);
1445 error = tcp_lro_rx_common(lc, m, csum, true);
1452 tcp_lro_flush_active(lc);
1460 tcp_lro_queue_mbuf(struct lro_ctrl *lc, struct mbuf *mb)
1464 if (__predict_false(lc->ifp == NULL || lc->lro_mbuf_data == NULL ||
1465 lc->lro_mbuf_max == 0)) {
1472 if (__predict_false((lc->ifp->if_capenable & IFCAP_LRO) == 0)) {
1474 (*lc->ifp->if_input) (lc->ifp, mb);
1483 binuptime(&lc->lro_last_queue_time);
1484 mb->m_pkthdr.rcv_tstmp = bintime2ns(&lc->lro_last_queue_time);
1489 lc->lro_mbuf_data[lc->lro_mbuf_count].seq =
1492 ((uint64_t)lc->lro_mbuf_count);
1495 lc->lro_mbuf_data[lc->lro_mbuf_count].mb = mb;
1498 if (__predict_false(++lc->lro_mbuf_count == lc->lro_mbuf_max))
1499 tcp_lro_flush_all(lc);