Lines Matching +full:te +full:- +full:source
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
8 * Redistribution and use in source and binary forms, with or without
11 * 1. Redistributions of source code must retain the above copyright
113 struct port_info *pi = vi->pi;
114 struct adapter *sc = pi->adapter;
123 tx_credits = sc->params.ofldq_wr_cred;
124 tx_credits -= howmany(sizeof(struct cpl_abort_req), 16);
141 refcount_init(&toep->refcount, 1);
142 toep->td = sc->tom_softc;
143 toep->incarnation = sc->incarnation;
144 toep->vi = vi;
145 toep->tid = -1;
146 toep->tx_total = tx_credits;
147 toep->tx_credits = tx_credits;
148 mbufq_init(&toep->ulp_pduq, INT_MAX);
149 mbufq_init(&toep->ulp_pdu_reclaimq, INT_MAX);
150 toep->txsd_total = txsd_total;
151 toep->txsd_avail = txsd_total;
152 toep->txsd_pidx = 0;
153 toep->txsd_cidx = 0;
165 struct conn_params *cp = &toep->params;
166 struct port_info *pi = vi->pi;
167 struct adapter *sc = pi->adapter;
170 if (cp->tc_idx >= 0 && cp->tc_idx < sc->params.nsched_cls) {
171 tc = &pi->sched_params->cl_rl[cp->tc_idx];
172 mtx_lock(&sc->tc_lock);
173 if (tc->state != CS_HW_CONFIGURED) {
176 toep->tid, cp->tc_idx, tc->state);
177 cp->tc_idx = -1;
179 tc->refcount++;
181 mtx_unlock(&sc->tc_lock);
183 toep->ofld_txq = &sc->sge.ofld_txq[cp->txq_idx];
184 toep->ofld_rxq = &sc->sge.ofld_rxq[cp->rxq_idx];
185 toep->ctrlq = &sc->sge.ctrlq[pi->port_id];
190 toep->flags |= TPF_INITIALIZED;
199 refcount_acquire(&toep->refcount);
207 if (refcount_release(&toep->refcount) == 0)
210 KASSERT(!(toep->flags & TPF_ATTACHED),
212 KASSERT(!(toep->flags & TPF_CPL_PENDING),
215 if (toep->flags & TPF_INITIALIZED) {
229 struct tom_data *td = toep->td;
237 sb = &so->so_snd;
239 sb->sb_flags |= SB_NOCOALESCE;
241 sb = &so->so_rcv;
243 sb->sb_flags |= SB_NOCOALESCE;
244 if (inp->inp_vflag & INP_IPV6)
245 so->so_proto = &toe6_protosw;
247 so->so_proto = &toe_protosw;
251 tp->tod = &td->tod;
252 tp->t_toe = toep;
253 tp->t_flags |= TF_TOE;
256 toep->inp = inp;
257 toep->flags |= TPF_ATTACHED;
265 so->so_proto = &tcp6_protosw;
267 so->so_proto = &tcp_protosw;
276 struct toepcb *toep = tp->t_toe;
281 sb = &so->so_snd;
283 sb->sb_flags &= ~SB_NOCOALESCE;
285 sb = &so->so_rcv;
287 sb->sb_flags &= ~SB_NOCOALESCE;
288 restore_so_proto(so, inp->inp_vflag & INP_IPV6);
291 tp->tod = NULL;
292 tp->t_toe = NULL;
293 tp->t_flags &= ~TF_TOE;
295 toep->inp = NULL;
296 toep->flags &= ~TPF_ATTACHED;
304 struct tom_data *td = toep->td;
306 int tid = toep->tid;
308 KASSERT(!(toep->flags & TPF_CPL_PENDING),
312 __func__, toep, tid, toep->l2te, toep->ce);
314 if (toep->l2te) {
315 t4_l2t_release(toep->l2te);
316 toep->l2te = NULL;
319 remove_tid(sc, tid, toep->ce ? 2 : 1);
320 release_tid(sc, tid, toep->ctrlq);
321 toep->tid = -1;
322 mtx_lock(&td->toep_list_lock);
323 if (toep->flags & TPF_IN_TOEP_LIST) {
324 toep->flags &= ~TPF_IN_TOEP_LIST;
325 TAILQ_REMOVE(&td->toep_list, toep, link);
327 mtx_unlock(&td->toep_list_lock);
329 if (toep->ce) {
330 t4_release_clip_entry(sc, toep->ce);
331 toep->ce = NULL;
333 if (toep->params.tc_idx != -1)
334 t4_release_cl_rl(sc, toep->vi->pi->port_id, toep->params.tc_idx);
343 KASSERT(!(toep->flags & TPF_CPL_PENDING),
345 KASSERT(!(toep->flags & TPF_ATTACHED),
348 CTR(KTR_CXGBE, "%s: toep %p (0x%x)", __func__, toep, toep->flags);
355 MPASS(mbufq_empty(&toep->ulp_pduq));
356 MPASS(mbufq_empty(&toep->ulp_pdu_reclaimq));
361 MPASS(TAILQ_EMPTY(&toep->aiotx_jobq));
362 MPASS(toep->tid == -1);
363 MPASS(toep->l2te == NULL);
364 MPASS(toep->ce == NULL);
365 MPASS((toep->flags & TPF_IN_TOEP_LIST) == 0);
384 struct toepcb *toep = tp->t_toe;
389 KASSERT(toep->flags & TPF_ATTACHED,
393 if (tp->t_state == TCPS_SYN_SENT) {
395 __func__, toep->tid, toep, toep->flags, inp,
396 inp->inp_flags);
400 toep->tid, tcpstates[tp->t_state], toep, toep->flags, inp,
401 inp->inp_flags);
405 tp->tod = NULL;
406 tp->t_toe = NULL;
407 tp->t_flags &= ~TF_TOE;
408 toep->flags &= ~TPF_ATTACHED;
410 if (!(toep->flags & TPF_CPL_PENDING))
420 struct adapter *sc = tod->tod_softc;
421 struct toepcb *toep = tp->t_toe;
430 if (tp->t_state != TCPS_ESTABLISHED)
432 toep->params.nagle = tp->t_flags & TF_NODELAY ? 0 : 1;
433 t4_set_tcb_field(sc, toep->ctrlq, toep, W_TCB_T_FLAGS,
434 V_TF_NAGLE(1), V_TF_NAGLE(toep->params.nagle), 0, 0);
451 #define LAST_WORD ((TCB_SIZE / 4) - 1)
459 flit_idx = (LAST_WORD - word) / 2;
464 if (fls(mask) > 64 - shift) {
470 t2 = be64toh(tcb[flit_idx - 1]) << (64 - shift);
487 MPASS(tid >= sc->tids.tid_base);
488 MPASS(tid - sc->tids.tid_base < sc->tids.ntids);
490 cpl = start_wrq_wr(&sc->sge.ctrlq[0], howmany(sizeof(*cpl), 16),
497 cpl->reply_ctrl = htobe16(V_REPLY_CHAN(0) |
498 V_QUEUENO(sc->sge.ofld_rxq[0].iq.cntxt_id));
499 cpl->cookie = 0xff;
500 commit_wrq_wr(&sc->sge.ctrlq[0], cpl, &cookie);
508 struct tcb_histent *te;
512 te = malloc(sizeof(*te), M_CXGBE, M_ZERO | flags);
513 if (te == NULL)
515 mtx_init(&te->te_lock, "TCB entry", NULL, MTX_DEF);
516 callout_init_mtx(&te->te_callout, &te->te_lock, 0);
517 te->te_adapter = sc;
518 te->te_tid = tid;
520 return (te);
524 free_tcb_histent(struct tcb_histent *te)
527 mtx_destroy(&te->te_lock);
528 free(te, M_CXGBE);
537 struct tcb_histent *te = NULL;
538 struct tom_data *td = sc->tom_softc;
541 MPASS(tid >= sc->tids.tid_base);
542 MPASS(tid - sc->tids.tid_base < sc->tids.ntids);
544 if (td->tcb_history == NULL)
547 rw_wlock(&td->tcb_history_lock);
548 if (td->tcb_history[tid] != NULL) {
552 te = alloc_tcb_histent(sc, tid, M_NOWAIT);
553 if (te == NULL) {
557 mtx_lock(&te->te_lock);
560 te->te_flags |= TE_RPL_PENDING;
561 td->tcb_history[tid] = te;
563 free(te, M_CXGBE);
565 mtx_unlock(&te->te_lock);
567 rw_wunlock(&td->tcb_history_lock);
572 remove_tcb_histent(struct tcb_histent *te)
574 struct adapter *sc = te->te_adapter;
575 struct tom_data *td = sc->tom_softc;
577 rw_assert(&td->tcb_history_lock, RA_WLOCKED);
578 mtx_assert(&te->te_lock, MA_OWNED);
579 MPASS(td->tcb_history[te->te_tid] == te);
581 td->tcb_history[te->te_tid] = NULL;
582 free_tcb_histent(te);
583 rw_wunlock(&td->tcb_history_lock);
589 struct tcb_histent *te;
590 struct tom_data *td = sc->tom_softc;
592 MPASS(tid >= sc->tids.tid_base);
593 MPASS(tid - sc->tids.tid_base < sc->tids.ntids);
595 if (td->tcb_history == NULL)
599 rw_wlock(&td->tcb_history_lock);
601 rw_rlock(&td->tcb_history_lock);
602 te = td->tcb_history[tid];
603 if (te != NULL) {
604 mtx_lock(&te->te_lock);
605 return (te); /* with both locks held */
608 rw_wunlock(&td->tcb_history_lock);
610 rw_runlock(&td->tcb_history_lock);
612 return (te);
616 release_tcb_histent(struct tcb_histent *te)
618 struct adapter *sc = te->te_adapter;
619 struct tom_data *td = sc->tom_softc;
621 mtx_assert(&te->te_lock, MA_OWNED);
622 mtx_unlock(&te->te_lock);
623 rw_assert(&td->tcb_history_lock, RA_RLOCKED);
624 rw_runlock(&td->tcb_history_lock);
630 struct tcb_histent *te = arg;
632 mtx_assert(&te->te_lock, MA_OWNED);
635 MPASS(!(te->te_flags & TE_RPL_PENDING));
636 if (send_get_tcb(te->te_adapter, te->te_tid) == 0)
637 te->te_flags |= TE_RPL_PENDING;
639 callout_schedule(&te->te_callout, hz / 100);
643 update_tcb_histent(struct tcb_histent *te, const uint64_t *tcb)
645 struct tom_data *td = te->te_adapter->tom_softc;
654 if (GET_TCB_FIELD(tcb, T_DUPACKS) >= td->dupack_threshold)
689 te->te_sample[te->te_pidx] = sample;
690 if (++te->te_pidx == nitems(te->te_sample))
691 te->te_pidx = 0;
692 memcpy(te->te_tcb, tcb, TCB_SIZE);
693 te->te_flags |= TE_ACTIVE;
699 struct adapter *sc = iq->adapter;
702 struct tcb_histent *te;
707 te = lookup_tcb_histent(sc, tid, remove);
708 if (te == NULL) {
710 device_printf(sc->dev, "tcb %u: flags 0x%016jx, state %u, "
714 GET_TCB_FIELD(tcb, RCV_SCALE), cpl->cookie);
718 MPASS(te->te_flags & TE_RPL_PENDING);
719 te->te_flags &= ~TE_RPL_PENDING;
721 remove_tcb_histent(te);
723 update_tcb_histent(te, tcb);
724 callout_reset(&te->te_callout, hz / 10, request_tcb, te);
725 release_tcb_histent(te);
737 ti->tcpi_state = GET_TCB_FIELD(tcb, T_STATE);
740 ti->tcpi_rtt = tcp_ticks_to_us(sc, v);
743 ti->tcpi_rttvar = tcp_ticks_to_us(sc, v);
745 ti->tcpi_snd_ssthresh = GET_TCB_FIELD(tcb, SND_SSTHRESH);
746 ti->tcpi_snd_cwnd = GET_TCB_FIELD(tcb, SND_CWND);
747 ti->tcpi_rcv_nxt = GET_TCB_FIELD(tcb, RCV_NXT);
748 ti->tcpi_rcv_adv = GET_TCB_FIELD(tcb, RCV_ADV);
749 ti->tcpi_dupacks = GET_TCB_FIELD(tcb, T_DUPACKS);
752 ti->tcpi_snd_nxt = v - GET_TCB_FIELD(tcb, SND_NXT_RAW);
753 ti->tcpi_snd_una = v - GET_TCB_FIELD(tcb, SND_UNA_RAW);
754 ti->tcpi_snd_max = v - GET_TCB_FIELD(tcb, SND_MAX_RAW);
757 ti->tcpi_rcv_wscale = GET_TCB_FIELD(tcb, SND_SCALE); /* Yes, SND. */
758 ti->tcpi_rcv_space = GET_TCB_FIELD(tcb, RCV_WND);
761 ti->tcpi_snd_wscale = GET_TCB_FIELD(tcb, RCV_SCALE); /* Yes, RCV. */
762 ti->tcpi_snd_wnd = GET_TCB_FIELD(tcb, RCV_ADV);
764 ti->tcpi_snd_wnd <<= ti->tcpi_snd_wscale;
766 ti->tcpi_snd_wscale = 0;
771 fill_tcp_info_from_history(struct adapter *sc, struct tcb_histent *te,
775 fill_tcp_info_from_tcb(sc, te->te_tcb, ti);
789 MPASS(tid >= sc->tids.tid_base);
790 MPASS(tid - sc->tids.tid_base < sc->tids.ntids);
798 for (i = 0, j = TCB_SIZE - 16; i < j; i += 16, j -= 16) {
811 struct tcb_histent *te;
813 ti->tcpi_toe_tid = tid;
814 te = lookup_tcb_histent(sc, tid, false);
815 if (te != NULL) {
816 fill_tcp_info_from_history(sc, te, ti);
817 release_tcb_histent(te);
819 if (!(sc->debug_flags & DF_DISABLE_TCB_CACHE)) {
834 struct adapter *sc = tod->tod_softc;
835 struct toepcb *toep = tp->t_toe;
840 fill_tcp_info(sc, toep->tid, ti);
848 struct toepcb *toep = tp->t_toe;
866 if (toep->tx_credits < flowclen16 || toep->txsd_avail == 0) {
868 toep->tid, toep->tx_credits, toep->txsd_avail);
872 flowc = start_wrq_wr(&toep->ofld_txq->wrq, flowclen16, &cookie);
874 CH_ERR(sc, "ENOMEM in %s for tid %u.\n", __func__, toep->tid);
877 flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) |
879 flowc->flowid_len16 = htonl(V_FW_WR_LEN16(flowclen16) |
880 V_FW_WR_FLOWID(toep->tid));
881 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_MSS;
882 flowc->mnemval[0].val = htobe32(toep->params.emss);
884 txsd = &toep->txsd[toep->txsd_pidx];
885 txsd->tx_credits = flowclen16;
886 txsd->plen = 0;
887 toep->tx_credits -= txsd->tx_credits;
888 if (__predict_false(++toep->txsd_pidx == toep->txsd_total))
889 toep->txsd_pidx = 0;
890 toep->txsd_avail--;
891 commit_wrq_wr(&toep->ofld_txq->wrq, flowc, &cookie);
902 struct toepcb *toep = tp->t_toe;
903 struct adapter *sc = td_adapter(toep->td);
904 unsigned short *mtus = &sc->params.mtus[0];
909 /* tp->snd_una and snd_max are in host byte order too. */
913 __func__, toep->tid, seq, mtu, toep->params.mtu_idx,
914 mtus[toep->params.mtu_idx]);
917 (SEQ_LT(seq, tp->snd_una) || SEQ_GEQ(seq, tp->snd_max))) {
920 __func__, toep->tid, seq, tp->snd_una, tp->snd_max);
925 for (idx = 0; idx < NMTUS - 1 && mtus[idx + 1] <= mtu; idx++)
927 if (idx >= toep->params.mtu_idx)
931 * We'll send a compound work request with 2 SET_TCB_FIELDs -- the first
935 wrh = start_wrq_wr(toep->ctrlq, howmany(len, 16), &cookie);
937 CH_ERR(sc, "failed to change mtu_idx of tid %d (%u -> %u).\n",
938 toep->tid, toep->params.mtu_idx, idx);
943 ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, W_TCB_T_MAXSEG,
945 ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, W_TCB_TIMESTAMP,
947 commit_wrq_wr(toep->ctrlq, wrh, &cookie);
950 toep->params.mtu_idx = idx;
951 tp->t_maxseg = mtus[toep->params.mtu_idx];
952 if (inp->inp_inc.inc_flags & INC_ISIPV6)
953 tp->t_maxseg -= sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
955 tp->t_maxseg -= sizeof(struct ip) + sizeof(struct tcphdr);
956 toep->params.emss = tp->t_maxseg;
957 if (tp->t_flags & TF_RCVD_TSTMP)
958 toep->params.emss -= TCPOLEN_TSTAMP_APPA;
964 if (sc->tt.update_hc_on_pmtu_change != 0) {
967 inc.inc_fibnum = inp->inp_inc.inc_fibnum;
968 if (inp->inp_inc.inc_flags & INC_ISIPV6) {
970 inc.inc6_faddr = inp->inp_inc.inc6_faddr;
972 inc.inc_faddr = inp->inp_inc.inc_faddr;
978 __func__, toep->tid, toep->params.mtu_idx,
979 mtus[toep->params.mtu_idx], tp->t_maxseg, toep->params.emss);
989 struct inpcb *inp = toep->inp;
994 KASSERT(toep->flags & TPF_CPL_PENDING,
998 __func__, toep->tid, toep, toep->flags, inp, inp->inp_flags);
1002 toep->inp = NULL;
1003 need_wakeup = (toep->flags & TPF_WAITING_FOR_FINAL) != 0;
1004 toep->flags &= ~(TPF_CPL_PENDING | TPF_WAITING_FOR_FINAL);
1005 mbufq_drain(&toep->ulp_pduq);
1006 mbufq_drain(&toep->ulp_pdu_reclaimq);
1008 if (!(toep->flags & TPF_ATTACHED))
1026 struct tid_info *t = &sc->tids;
1028 MPASS(tid >= t->tid_base);
1029 MPASS(tid - t->tid_base < t->ntids);
1031 t->tid_tab[tid - t->tid_base] = ctx;
1032 atomic_add_int(&t->tids_in_use, ntids);
1038 struct tid_info *t = &sc->tids;
1040 return (t->tid_tab[tid - t->tid_base]);
1046 struct tid_info *t = &sc->tids;
1048 t->tid_tab[tid - t->tid_base] = ctx;
1054 struct tid_info *t = &sc->tids;
1056 t->tid_tab[tid - t->tid_base] = NULL;
1057 atomic_subtract_int(&t->tids_in_use, ntids);
1061 * What mtu_idx to use, given a 4-tuple. Note that both s->mss and tcp_mssopt
1070 unsigned short *mtus = &sc->params.mtus[0];
1075 mss = s->mss > 0 ? s->mss : tcp_mssopt(inc);
1076 if (inc->inc_flags & INC_ISIPV6)
1081 for (i = 0; i < NMTUS - 1 && mtus[i + 1] <= mtu; i++)
1095 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1097 wnd = sbspace(&so->so_rcv);
1126 MPASS(cp->wscale >= 0 && cp->wscale <= M_WND_SCALE);
1127 opt0 |= V_WND_SCALE(cp->wscale);
1129 MPASS(cp->mtu_idx >= 0 && cp->mtu_idx < NMTUS);
1130 opt0 |= V_MSS_IDX(cp->mtu_idx);
1132 MPASS(cp->ulp_mode >= 0 && cp->ulp_mode <= M_ULP_MODE);
1133 opt0 |= V_ULP_MODE(cp->ulp_mode);
1135 MPASS(cp->opt0_bufsize >= 0 && cp->opt0_bufsize <= M_RCV_BUFSIZ);
1136 opt0 |= V_RCV_BUFSIZ(cp->opt0_bufsize);
1138 MPASS(cp->l2t_idx >= 0 && cp->l2t_idx < vi->adapter->vres.l2t.size);
1139 opt0 |= V_L2T_IDX(cp->l2t_idx);
1141 opt0 |= V_SMAC_SEL(vi->smt_idx);
1142 opt0 |= V_TX_CHAN(vi->pi->tx_chan);
1144 MPASS(cp->keepalive == 0 || cp->keepalive == 1);
1145 opt0 |= V_KEEP_ALIVE(cp->keepalive);
1147 MPASS(cp->nagle == 0 || cp->nagle == 1);
1148 opt0 |= V_NAGLE(cp->nagle);
1157 struct port_info *pi = vi->pi;
1158 struct adapter *sc = pi->adapter;
1173 MPASS(cp->sack == 0 || cp->sack == 1);
1174 opt2 |= V_SACK_EN(cp->sack);
1176 MPASS(cp->tstamp == 0 || cp->tstamp == 1);
1177 opt2 |= V_TSTAMPS_EN(cp->tstamp);
1179 if (cp->wscale > 0)
1182 MPASS(cp->ecn == 0 || cp->ecn == 1);
1183 opt2 |= V_CCTRL_ECN(cp->ecn);
1185 opt2 |= V_TX_QUEUE(TX_MODQ(pi->tx_chan));
1188 opt2 |= V_RSS_QUEUE(sc->sge.ofld_rxq[cp->rxq_idx].iq.abs_id);
1190 MPASS(pi->rx_chan == 0 || pi->rx_chan == 1);
1191 opt2 |= V_RX_CHANNEL(pi->rx_chan);
1194 MPASS(cp->cong_algo >= 0 && cp->cong_algo <= M_CONG_CNTRL);
1195 opt2 |= V_CONG_CNTRL(cp->cong_algo);
1197 MPASS(cp->rx_coalesce == 0 || cp->rx_coalesce == 1);
1198 if (cp->rx_coalesce == 1)
1202 MPASS(cp->ulp_mode != ULP_MODE_TCPDDP);
1210 struct adapter *sc = vi->adapter;
1211 struct tp_params *tp = &sc->params.tp;
1218 if (tp->vlan_shift >= 0 && EVL_VLANOFTAG(e->vlan) != CPL_L2T_VLAN_NONE)
1219 ntuple |= (uint64_t)(F_FT_VLAN_VLD | e->vlan) << tp->vlan_shift;
1221 if (tp->port_shift >= 0)
1222 ntuple |= (uint64_t)e->lport << tp->port_shift;
1224 if (tp->protocol_shift >= 0)
1225 ntuple |= (uint64_t)IPPROTO_TCP << tp->protocol_shift;
1227 if (tp->vnic_shift >= 0 && tp->vnic_mode == FW_VNIC_MODE_PF_VF) {
1228 ntuple |= (uint64_t)(V_FT_VNID_ID_VF(vi->vin) |
1229 V_FT_VNID_ID_PF(sc->pf) | V_FT_VNID_ID_VLD(vi->vfvld)) <<
1230 tp->vnic_shift;
1247 struct port_info *pi = vi->pi;
1248 struct adapter *sc = pi->adapter;
1249 struct tom_tunables *tt = &sc->tt;
1255 MPASS(s->offload != 0);
1258 if (s->cong_algo >= 0)
1259 cp->cong_algo = s->cong_algo & M_CONG_CNTRL;
1260 else if (sc->tt.cong_algorithm >= 0)
1261 cp->cong_algo = tt->cong_algorithm & M_CONG_CNTRL;
1265 if (strcasecmp(cc->name, "reno") == 0)
1266 cp->cong_algo = CONG_ALG_RENO;
1267 else if (strcasecmp(cc->name, "tahoe") == 0)
1268 cp->cong_algo = CONG_ALG_TAHOE;
1269 if (strcasecmp(cc->name, "newreno") == 0)
1270 cp->cong_algo = CONG_ALG_NEWRENO;
1271 if (strcasecmp(cc->name, "highspeed") == 0)
1272 cp->cong_algo = CONG_ALG_HIGHSPEED;
1278 cp->cong_algo = CONG_ALG_NEWRENO;
1283 if (s->sched_class >= 0 && s->sched_class < sc->params.nsched_cls)
1284 cp->tc_idx = s->sched_class;
1286 cp->tc_idx = -1;
1289 if (s->nagle >= 0)
1290 cp->nagle = s->nagle > 0 ? 1 : 0;
1292 cp->nagle = tp->t_flags & TF_NODELAY ? 0 : 1;
1296 cp->keepalive = 1;
1298 cp->keepalive = 0;
1301 if (tt->tx_align >= 0)
1302 cp->tx_align = tt->tx_align > 0 ? 1 : 0;
1304 (port_top_speed(pi) > 10 || sc->params.nports > 2))
1305 cp->tx_align = 1;
1307 cp->tx_align = 0;
1310 cp->ulp_mode = ULP_MODE_NONE;
1313 if (s->rx_coalesce >= 0)
1314 cp->rx_coalesce = s->rx_coalesce > 0 ? 1 : 0;
1315 else if (tt->rx_coalesce >= 0)
1316 cp->rx_coalesce = tt->rx_coalesce > 0 ? 1 : 0;
1318 cp->rx_coalesce = 1; /* default */
1325 cp->mtu_idx = find_best_mtu_idx(sc, inc, s);
1328 if (s->txq == QUEUE_RANDOM)
1330 else if (s->txq == QUEUE_ROUNDROBIN)
1331 q_idx = atomic_fetchadd_int(&vi->txq_rr, 1);
1333 q_idx = s->txq;
1334 cp->txq_idx = vi->first_ofld_txq + q_idx % vi->nofldtxq;
1337 if (s->rxq == QUEUE_RANDOM)
1339 else if (s->rxq == QUEUE_ROUNDROBIN)
1340 q_idx = atomic_fetchadd_int(&vi->rxq_rr, 1);
1342 q_idx = s->rxq;
1343 cp->rxq_idx = vi->first_ofld_rxq + q_idx % vi->nofldrxq;
1350 if (tcpopt->tstamp &&
1351 (s->tstamp > 0 || (s->tstamp < 0 && V_tcp_do_rfc1323)))
1352 cp->tstamp = 1;
1354 cp->tstamp = 0;
1357 if (tcpopt->sack &&
1358 (s->sack > 0 || (s->sack < 0 && V_tcp_do_sack)))
1359 cp->sack = 1;
1361 cp->sack = 0;
1364 if (tcpopt->wsf > 0 && tcpopt->wsf < 15 && V_tcp_do_rfc1323)
1365 cp->wscale = select_rcv_wscale();
1367 cp->wscale = 0;
1370 if (tcpopt->ecn && /* XXX: review. */
1371 (s->ecn > 0 || (s->ecn < 0 && V_tcp_do_ecn)))
1372 cp->ecn = 1;
1374 cp->ecn = 0;
1376 wnd = max(so->sol_sbrcv_hiwat, MIN_RCV_WND);
1377 cp->opt0_bufsize = min(wnd >> 10, M_RCV_BUFSIZ);
1379 if (tt->sndbuf > 0)
1380 cp->sndbuf = tt->sndbuf;
1381 else if (so->sol_sbsnd_flags & SB_AUTOSIZE &&
1383 cp->sndbuf = 256 * 1024;
1385 cp->sndbuf = so->sol_sbsnd_hiwat;
1390 if (s->tstamp > 0 ||
1391 (s->tstamp < 0 && (tp->t_flags & TF_REQ_TSTMP)))
1392 cp->tstamp = 1;
1394 cp->tstamp = 0;
1397 if (s->sack > 0 ||
1398 (s->sack < 0 && (tp->t_flags & TF_SACK_PERMIT)))
1399 cp->sack = 1;
1401 cp->sack = 0;
1404 if (tp->t_flags & TF_REQ_SCALE)
1405 cp->wscale = select_rcv_wscale();
1407 cp->wscale = 0;
1410 if (s->ecn > 0 || (s->ecn < 0 && V_tcp_do_ecn == 1))
1411 cp->ecn = 1;
1413 cp->ecn = 0;
1415 SOCKBUF_LOCK(&so->so_rcv);
1417 SOCKBUF_UNLOCK(&so->so_rcv);
1418 cp->opt0_bufsize = min(wnd >> 10, M_RCV_BUFSIZ);
1420 if (tt->sndbuf > 0)
1421 cp->sndbuf = tt->sndbuf;
1423 SOCKBUF_LOCK(&so->so_snd);
1424 if (so->so_snd.sb_flags & SB_AUTOSIZE &&
1426 cp->sndbuf = 256 * 1024;
1428 cp->sndbuf = so->so_snd.sb_hiwat;
1429 SOCKBUF_UNLOCK(&so->so_snd);
1433 cp->l2t_idx = l2t_idx;
1436 cp->emss = 0;
1451 struct tid_info *t = &sc->tids;
1453 MPASS(t->ntids > 0);
1454 MPASS(t->tid_tab == NULL);
1456 t->tid_tab = malloc(t->ntids * sizeof(*t->tid_tab), M_CXGBE,
1458 if (t->tid_tab == NULL)
1460 atomic_store_rel_int(&t->tids_in_use, 0);
1468 struct tid_info *t = &sc->tids;
1470 KASSERT(t->tids_in_use == 0,
1471 ("%s: %d tids still in use.", __func__, t->tids_in_use));
1473 free(t->tid_tab, M_CXGBE);
1474 t->tid_tab = NULL;
1507 if (sc->tids.ntids == 0 || sc->tids.ntids > 1024)
1509 rw_init(&td->tcb_history_lock, "TCB history");
1510 td->tcb_history = malloc(sc->tids.ntids * sizeof(*td->tcb_history),
1512 td->dupack_threshold = G_DUPACKTHRESH(t4_read_reg(sc, A_TP_PARA_REG0));
1521 if (td->tcb_history != NULL) {
1522 for (i = 0; i < sc->tids.ntids; i++) {
1523 MPASS(td->tcb_history[i] == NULL);
1527 free(td->tcb_history, M_CXGBE);
1528 if (rw_initialized(&td->tcb_history_lock))
1529 rw_destroy(&td->tcb_history_lock);
1538 KASSERT(TAILQ_EMPTY(&td->toep_list),
1540 KASSERT(td->lctx_count == 0,
1543 t4_free_ppod_region(&td->pr);
1545 if (td->listen_mask != 0)
1546 hashdestroy(td->listen_hash, M_CXGBE, td->listen_mask);
1548 if (mtx_initialized(&td->unsent_wr_lock))
1549 mtx_destroy(&td->unsent_wr_lock);
1550 if (mtx_initialized(&td->lctx_hash_lock))
1551 mtx_destroy(&td->lctx_hash_lock);
1552 if (mtx_initialized(&td->toep_list_lock))
1553 mtx_destroy(&td->toep_list_lock);
1578 ipv6 = inp->inp_vflag & INP_IPV6;
1585 eh->ether_type = htons(ETHERTYPE_IPV6);
1587 eh->ether_type = htons(ETHERTYPE_IP);
1593 evh->evl_encap_proto = htons(ETHERTYPE_VLAN);
1594 evh->evl_tag = htons(vtag);
1596 evh->evl_proto = htons(ETHERTYPE_IPV6);
1598 evh->evl_proto = htons(ETHERTYPE_IP);
1606 ip6->ip6_vfc = IPV6_VERSION;
1607 ip6->ip6_plen = htons(sizeof(struct tcphdr));
1608 ip6->ip6_nxt = IPPROTO_TCP;
1610 ip6->ip6_src = inp->in6p_laddr;
1611 ip6->ip6_dst = inp->in6p_faddr;
1613 ip6->ip6_src = inp->in6p_laddr;
1614 ip6->ip6_dst = ip6->ip6_src;
1621 ip->ip_v = IPVERSION;
1622 ip->ip_hl = sizeof(*ip) >> 2;
1623 ip->ip_tos = inp->inp_ip_tos;
1624 ip->ip_len = htons(sizeof(struct ip) + sizeof(struct tcphdr));
1625 ip->ip_ttl = inp->inp_ip_ttl;
1626 ip->ip_p = IPPROTO_TCP;
1628 ip->ip_src = inp->inp_laddr;
1629 ip->ip_dst = inp->inp_faddr;
1631 ip->ip_src = inp->inp_laddr;
1632 ip->ip_dst = ip->ip_src;
1640 th->th_sport = inp->inp_lport; /* network byte order already */
1641 th->th_dport = inp->inp_fport; /* ditto */
1643 th->th_sport = inp->inp_lport; /* network byte order already */
1644 th->th_dport = th->th_sport;
1662 .rx_coalesce = -1,
1663 .cong_algo = -1,
1664 .sched_class = -1,
1665 .tstamp = -1,
1666 .sack = -1,
1667 .nagle = -1,
1668 .ecn = -1,
1669 .ddp = -1,
1670 .tls = -1,
1673 .mss = -1,
1680 rw_assert(&sc->policy_lock, RA_LOCKED);
1689 op = sc->policy;
1691 if (sc->tt.cop_managed_offloading)
1707 pktlen = m->m_pkthdr.len - sizeof(struct cpl_pass_accept_req);
1708 buflen = m->m_len - sizeof(struct cpl_pass_accept_req);
1719 r = &op->rule[0];
1720 for (i = 0; i < op->nrules; i++, r++) {
1721 if (r->open_type != open_type &&
1722 r->open_type != OPEN_TYPE_DONTCARE) {
1725 matched = bpf_filter(r->bpf_prog.bf_insns, pkt, pktlen, buflen);
1733 return (matched ? &r->settings : &disallow_offloading_settings);
1746 mtx_lock(&td->unsent_wr_lock);
1747 STAILQ_SWAP(&td->unsent_wr_list, &twr_list, wrqe);
1748 mtx_unlock(&td->unsent_wr_lock);
1773 "opcode %x\n", __func__, wr, wr->wr_len, opcode);
1790 MPASS(!(toep->flags & TPF_SYNQE));
1792 inp = toep->inp;
1793 CURVNET_SET(toep->vnet);
1797 toep->flags |= TPF_ABORT_SHUTDOWN;
1798 if ((inp->inp_flags & INP_DROPPED) == 0) {
1799 struct socket *so = inp->inp_socket;
1805 INP_WLOCK(inp); /* re-acquire */
1823 mtx_lock(&td->toep_list_lock);
1824 TAILQ_SWAP(&td->stranded_synqe, &slist, synq_entry, link);
1825 mtx_unlock(&td->toep_list_lock);
1828 MPASS(synqe->tid >= 0); /* stale, was kept around for debug */
1829 synqe->tid = -1;
1833 /* Clean up in-flight active opens. */
1834 mtx_lock(&td->toep_list_lock);
1835 TAILQ_SWAP(&td->stranded_atids, &tlist, toepcb, link);
1836 mtx_unlock(&td->toep_list_lock);
1839 MPASS(toep->tid >= 0); /* stale, was kept around for debug */
1840 toep->tid = -1;
1845 mtx_lock(&td->toep_list_lock);
1846 TAILQ_SWAP(&td->stranded_tids, &tlist, toepcb, link);
1847 mtx_unlock(&td->toep_list_lock);
1850 MPASS(toep->tid >= 0); /* stale, was kept around for debug */
1851 toep->tid = -1;
1870 /* per-adapter softc for TOM */
1876 mtx_init(&td->toep_list_lock, "PCB list lock", NULL, MTX_DEF);
1877 TAILQ_INIT(&td->toep_list);
1878 TAILQ_INIT(&td->synqe_list);
1879 TAILQ_INIT(&td->stranded_atids);
1880 TAILQ_INIT(&td->stranded_tids);
1881 TASK_INIT(&td->cleanup_stranded_tids, 0, cleanup_stranded_tids, td);
1884 mtx_init(&td->lctx_hash_lock, "lctx hash lock", NULL, MTX_DEF);
1885 td->listen_hash = hashinit_flags(LISTEN_HASH_SIZE, M_CXGBE,
1886 &td->listen_mask, HASH_NOWAIT);
1889 mtx_init(&td->unsent_wr_lock, "Unsent WR list lock", NULL, MTX_DEF);
1890 STAILQ_INIT(&td->unsent_wr_list);
1891 TASK_INIT(&td->reclaim_wr_resources, 0, reclaim_wr_resources, td);
1898 rc = t4_init_ppod_region(&td->pr, &sc->vres.ddp,
1903 V_TDDPTAGMASK(M_TDDPTAGMASK), td->pr.pr_tag_mask);
1908 tod = &td->tod;
1910 tod->tod_softc = sc;
1911 tod->tod_connect = t4_connect;
1912 tod->tod_listen_start = t4_listen_start;
1913 tod->tod_listen_stop = t4_listen_stop;
1914 tod->tod_rcvd = t4_rcvd;
1915 tod->tod_output = t4_tod_output;
1916 tod->tod_send_rst = t4_send_rst;
1917 tod->tod_send_fin = t4_send_fin;
1918 tod->tod_pcb_detach = t4_pcb_detach;
1919 tod->tod_l2_update = t4_l2_update;
1920 tod->tod_syncache_added = t4_syncache_added;
1921 tod->tod_syncache_removed = t4_syncache_removed;
1922 tod->tod_syncache_respond = t4_syncache_respond;
1923 tod->tod_offload_socket = t4_offload_socket;
1924 tod->tod_ctloutput = t4_ctloutput;
1925 tod->tod_tcp_info = t4_tcp_info;
1927 tod->tod_alloc_tls_session = t4_alloc_tls_session;
1929 tod->tod_pmtu_update = t4_pmtu_update;
1932 for_each_vi(sc->port[i], v, vi) {
1933 SETTOEDEV(vi->ifp, &td->tod);
1937 sc->tom_softc = td;
1938 register_toedev(sc->tom_softc);
1950 struct tom_data *td = sc->tom_softc;
1961 if (sc->offload_map != 0) {
1963 for_each_vi(sc->port[i], v, vi) {
1965 if_setcapenablebit(vi->ifp, 0, IFCAP_TOE);
1966 SETTOEDEV(vi->ifp, NULL);
1969 MPASS(sc->offload_map == 0);
1972 mtx_lock(&td->toep_list_lock);
1973 if (!TAILQ_EMPTY(&td->toep_list))
1975 MPASS(TAILQ_EMPTY(&td->synqe_list));
1976 MPASS(TAILQ_EMPTY(&td->stranded_tids));
1977 mtx_unlock(&td->toep_list_lock);
1979 mtx_lock(&td->lctx_hash_lock);
1980 if (td->lctx_count > 0)
1982 mtx_unlock(&td->lctx_hash_lock);
1984 taskqueue_drain(taskqueue_thread, &td->reclaim_wr_resources);
1985 taskqueue_drain(taskqueue_thread, &td->cleanup_stranded_tids);
1986 mtx_lock(&td->unsent_wr_lock);
1987 if (!STAILQ_EMPTY(&td->unsent_wr_list))
1989 mtx_unlock(&td->unsent_wr_lock);
1992 unregister_toedev(sc->tom_softc);
1994 sc->tom_softc = NULL;
2003 struct tom_data *td = sc->tom_softc;
2004 struct tid_info *t = &sc->tids;
2009 * Hashfilters and T6-KTLS are the only other users of atids but they're
2015 MPASS(!(sc->flags & KERN_TLS_ON));
2019 mtx_lock(&t->atid_lock);
2020 MPASS(t->atid_alloc_stopped == true);
2021 mtx_unlock(&t->atid_lock);
2025 * In-use atids fall in one of these two categories:
2031 for (atid = 0; atid < t->natids; atid++) {
2033 if ((uintptr_t)toep >= (uintptr_t)&t->atid_tab[0] &&
2034 (uintptr_t)toep < (uintptr_t)&t->atid_tab[t->natids])
2038 MPASS(toep->tid == atid);
2039 MPASS(toep->incarnation == sc->incarnation);
2041 * Take the atid out of the lookup table. toep->tid is stale
2045 __func__, atid, toep->incarnation);
2046 free_atid(sc, toep->tid);
2048 toep->tid = -1;
2050 mtx_lock(&td->toep_list_lock);
2051 toep->flags &= ~TPF_IN_TOEP_LIST;
2052 TAILQ_REMOVE(&td->toep_list, toep, link);
2053 TAILQ_INSERT_TAIL(&td->stranded_atids, toep, link);
2054 mtx_unlock(&td->toep_list_lock);
2056 MPASS(atomic_load_int(&t->atids_in_use) == 0);
2062 struct tom_data *td = sc->tom_softc;
2065 struct tid_info *t = &sc->tids;
2075 mtx_lock(&td->toep_list_lock);
2076 TAILQ_FOREACH(toep, &td->toep_list, link) {
2077 MPASS(sc->incarnation == toep->incarnation);
2078 MPASS(toep->tid >= 0);
2079 MPASS(toep == lookup_tid(sc, toep->tid));
2082 __func__, toep->tid, toep->incarnation);
2083 remove_tid(sc, toep->tid, toep->ce ? 2 : 1);
2085 /* toep->tid is stale now but left alone for debug. */
2086 toep->tid = -1;
2089 toep->flags &= ~TPF_IN_TOEP_LIST;
2091 MPASS(TAILQ_EMPTY(&td->stranded_tids));
2092 TAILQ_CONCAT(&td->stranded_tids, &td->toep_list, link);
2093 MPASS(TAILQ_EMPTY(&td->toep_list));
2094 mtx_unlock(&td->toep_list_lock);
2096 MPASS(atomic_load_int(&t->tids_in_use) == 0);
2103 * queue or to l2t_entry->wr_list.
2109 struct l2t_data *d = sc->l2t;
2110 struct tom_data *td = sc->tom_softc;
2120 taskqueue_drain(taskqueue_thread, &td->reclaim_wr_resources);
2121 MPASS(STAILQ_EMPTY(&td->unsent_wr_list));
2123 for (i = 0; i < d->l2t_size; i++) {
2124 e = &d->l2tab[i];
2125 mtx_lock(&e->lock);
2126 if (e->state == L2T_STATE_VALID || e->state == L2T_STATE_STALE)
2127 e->state = L2T_STATE_RESOLVING;
2132 while ((wr = STAILQ_FIRST(&e->wr_list)) != NULL) {
2133 STAILQ_REMOVE_HEAD(&e->wr_list, link);
2136 mtx_unlock(&e->lock);
2143 struct tid_info *t = &sc->tids;
2144 struct tom_data *td = sc->tom_softc;
2149 if (atomic_load_int(&t->atids_in_use) > 0)
2151 if (atomic_load_int(&t->stids_in_use) > 0)
2153 if (atomic_load_int(&t->tids_in_use) > 0)
2155 taskqueue_enqueue(taskqueue_thread, &td->cleanup_stranded_tids);
2161 MPASS(STAILQ_EMPTY(&td->unsent_wr_list));
2180 struct toepcb *toep = tp->t_toe;
2183 if (sopt->sopt_level == IPPROTO_TCP && sopt->sopt_name == TCP_USE_DDP) {
2184 if (sopt->sopt_dir != SOPT_SET)
2187 if (sopt->sopt_td != NULL) {
2209 struct toepcb *toep = tp->t_toe;