Lines Matching defs:rsm

489    struct rack_sendmap *rsm, uint8_t quality);
492 struct rack_sendmap *rsm);
494 static void rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm);
523 struct rack_sendmap *rsm, uint32_t cts);
524 static void rack_log_to_event(struct tcp_rack *rack, int32_t to_num, struct rack_sendmap *rsm);
539 struct rack_sendmap *rsm, uint64_t ts, int32_t * lenp, uint32_t add_flag, int segsiz);
542 struct rack_sendmap *rsm, uint64_t ts, uint32_t add_flag, int segsiz);
545 struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type, tcp_seq th_ack);
589 uint32_t len, uint32_t us_tim, int confidence, struct rack_sendmap *rsm, uint16_t rtrcnt);
753 uint32_t send_end_t, int line, uint8_t mode, struct rack_sendmap *rsm)
775 if (rsm != NULL) {
776 log.u_bbr.applimited = rsm->r_start;
777 log.u_bbr.delivered = rsm->r_end;
778 log.u_bbr.epoch = rsm->r_flags;
2496 rack_get_output_gain(struct tcp_rack *rack, struct rack_sendmap *rsm)
2500 } else if (rack->in_probe_rtt && (rsm == NULL))
2504 if (rsm) {
2624 rack_get_output_bw(struct tcp_rack *rack, uint64_t bw, struct rack_sendmap *rsm, int *capped)
2632 gain = (uint64_t)rack_get_output_gain(rack, rsm);
2685 rack_log_retran_reason(struct tcp_rack *rack, struct rack_sendmap *rsm, uint32_t tsused, uint32_t thresh, int mod)
2706 log.u_bbr.flex3 = rsm->r_flags;
2707 log.u_bbr.flex4 = rsm->r_dupack;
2708 log.u_bbr.flex5 = rsm->r_start;
2709 log.u_bbr.flex6 = rsm->r_end;
2771 rack_log_to_event(struct tcp_rack *rack, int32_t to_num, struct rack_sendmap *rsm)
2782 if (rsm == NULL)
2785 log.u_bbr.flex3 = rsm->r_end - rsm->r_start;
2809 struct rack_sendmap *rsm,
2821 log.u_bbr.delRate = (uintptr_t)rsm;
2829 if (rsm) {
2830 log.u_bbr.flex3 = rsm->r_start;
2831 log.u_bbr.flex4 = rsm->r_end;
2860 struct rack_sendmap *rsm, int conf)
2880 if (rsm) {
2881 log.u_bbr.pkt_epoch = rsm->r_start;
2882 log.u_bbr.lost = rsm->r_end;
2883 log.u_bbr.cwnd_gain = rsm->r_rtr_cnt;
2885 log.u_bbr.pacing_gain = (uint16_t)rsm->r_flags;
2916 if (rsm)
2917 log.u_bbr.bw_inuse |= ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]);
3401 struct rack_sendmap *rsm;
3405 * theory is the "hottest" rsm we have,
3409 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free);
3410 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext);
3413 return (rsm);
3420 rsm = uma_zalloc(rack_zone, M_NOWAIT);
3421 if (rsm) {
3424 return (rsm);
3427 * Dig in to our aux rsm's (the last two) since
3432 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free);
3433 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext);
3435 return (rsm);
3459 struct rack_sendmap *rsm;
3475 rsm = rack_alloc(rack);
3476 if (rsm != NULL && limit_type) {
3477 rsm->r_limit_type = limit_type;
3480 return (rsm);
3486 struct rack_sendmap *rsm;
3493 rsm = TAILQ_LAST(&rack->r_ctl.rc_free, rack_head);
3494 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext);
3497 uma_zfree(rack_zone, rsm);
3502 rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm)
3504 if (rsm->r_flags & RACK_APP_LIMITED) {
3509 if (rsm->r_limit_type) {
3513 if (rsm == rack->r_ctl.rc_first_appl) {
3514 rack->r_ctl.cleared_app_ack_seq = rsm->r_start + (rsm->r_end - rsm->r_start);
3519 rack->r_ctl.rc_first_appl = tqhash_find(rack->r_ctl.tqh, rsm->r_nseq_appl);
3521 if (rsm == rack->r_ctl.rc_resend)
3523 if (rsm == rack->r_ctl.rc_end_appl)
3525 if (rack->r_ctl.rc_tlpsend == rsm)
3527 if (rack->r_ctl.rc_sacklast == rsm)
3529 memset(rsm, 0, sizeof(struct rack_sendmap));
3534 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_free, rsm, r_tnext);
4262 struct rack_sendmap *rsm;
4296 rsm = tqhash_max(rack->r_ctl.tqh);
4297 if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) {
4299 rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm;
4307 rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start;
4308 rack->r_ctl.rc_end_appl = rsm;
4310 rsm->r_flags |= RACK_APP_LIMITED;
4760 rack_in_gp_window(struct tcpcb *tp, struct rack_sendmap *rsm)
4762 if (SEQ_GEQ(rsm->r_start, tp->gput_seq) &&
4763 SEQ_LEQ(rsm->r_end, tp->gput_ack)) {
4774 } else if (SEQ_LT(rsm->r_start, tp->gput_seq) &&
4775 SEQ_GT(rsm->r_end, tp->gput_seq)){
4782 } else if (SEQ_GEQ(rsm->r_start, tp->gput_seq) &&
4783 SEQ_LT(rsm->r_start, tp->gput_ack) &&
4784 SEQ_GEQ(rsm->r_end, tp->gput_ack)) {
4797 rack_mark_in_gp_win(struct tcpcb *tp, struct rack_sendmap *rsm)
4807 if (rack_in_gp_window(tp, rsm))
4808 rsm->r_flags |= RACK_IN_GP_WIN;
4810 rsm->r_flags &= ~RACK_IN_GP_WIN;
4817 struct rack_sendmap *rsm = NULL;
4819 rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq);
4820 if (rsm == NULL) {
4821 rsm = tqhash_min(rack->r_ctl.tqh);
4824 while ((rsm != NULL) && (SEQ_GEQ(tp->gput_ack, rsm->r_start))){
4825 rsm->r_flags &= ~RACK_IN_GP_WIN;
4826 rsm = tqhash_next(rack->r_ctl.tqh, rsm);
4834 struct rack_sendmap *rsm = NULL;
4844 * to gp_seq marking so that no rsm is set incorrectly
4847 rsm = tqhash_min(rack->r_ctl.tqh);
4848 while (rsm != NULL) {
4849 rack_mark_in_gp_win(tp, rsm);
4850 if (SEQ_GEQ(rsm->r_end, tp->gput_seq))
4852 rsm = tqhash_next(rack->r_ctl.tqh, rsm);
4855 if (rsm == NULL) {
4857 * Need to find the GP seq, if rsm is
4860 rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq);
4861 if (rsm == NULL)
4863 rack_mark_in_gp_win(tp, rsm);
4866 * Now we may need to mark already sent rsm, ahead of
4868 * *before* we started our measurment. The rsm, if non-null
4869 * has been marked (note if rsm would have been NULL we would have
4873 rsm = tqhash_next(rack->r_ctl.tqh, rsm);
4874 while (rsm) {
4875 rack_mark_in_gp_win(tp, rsm);
4876 if (SEQ_GT(rsm->r_end, tp->gput_ack))
4878 rsm = tqhash_next(rack->r_ctl.tqh, rsm);
5378 struct rack_sendmap *rsm;
5423 rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq);
5424 if (rsm) {
5426 if (SEQ_LT(rsm->r_start, tp->gput_seq)) {
5433 tp->gput_seq = rsm->r_start;
5435 if (rsm->r_flags & RACK_ACKED) {
5438 tp->gput_ts = (uint32_t)rsm->r_ack_arrival;
5439 tp->gput_seq = rsm->r_end;
5440 nrsm = tqhash_next(rack->r_ctl.tqh, rsm);
5442 rsm = nrsm;
5449 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[0];
5452 * If we don't find the rsm due to some
5465 (uintptr_t)rsm,
5469 __LINE__, rsm, quality);
5878 struct rack_sendmap *rsm;
5881 * Walk the time-order transmitted list looking for an rsm that is
5885 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) {
5886 if (rsm->r_flags & RACK_ACKED) {
5892 return (rsm);
5896 rack_find_high_nonack(struct tcp_rack *rack, struct rack_sendmap *rsm)
5905 prsm = rsm;
6006 struct rack_sendmap *rsm, uint32_t srtt)
6021 len = rsm->r_end - rsm->r_start;
6035 prsm = TAILQ_PREV(rsm, rack_head, r_tnext);
6044 idx = rsm->r_rtr_cnt - 1;
6046 if (rsm->r_tim_lastsent[nidx] >= prsm->r_tim_lastsent[idx]) {
6048 inter_gap = rsm->r_tim_lastsent[idx] - prsm->r_tim_lastsent[nidx];
6115 struct rack_sendmap *rsm;
6123 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
6124 if (rsm == NULL)
6128 if (rsm->r_flags & RACK_ACKED) {
6129 rsm = rack_find_lowest_rsm(rack);
6130 if (rsm == NULL)
6133 idx = rsm->r_rtr_cnt - 1;
6136 if (TSTMP_LT(tsused, ((uint32_t)rsm->r_tim_lastsent[idx]))) {
6139 if ((tsused - ((uint32_t)rsm->r_tim_lastsent[idx])) < thresh) {
6144 return (rsm);
6175 struct rack_sendmap *rsm;
6190 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
6191 if ((rsm == NULL) || sup_rack) {
6195 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
6196 if (rsm) {
6207 idx = rsm->r_rtr_cnt - 1;
6208 if (TSTMP_GEQ(rack->r_ctl.rc_tlp_rxt_last_time, ((uint32_t)rsm->r_tim_lastsent[idx])))
6211 tstmp_touse = (uint32_t)rsm->r_tim_lastsent[idx];
6228 rsm) {
6236 if (TSTMP_GT(cts, (uint32_t)rsm->r_tim_lastsent[0])) {
6237 red = (cts - (uint32_t)rsm->r_tim_lastsent[0]);
6251 if (rsm->r_flags & RACK_ACKED) {
6252 rsm = rack_find_lowest_rsm(rack);
6253 if (rsm == NULL) {
6259 if ((rsm->r_flags & RACK_SACK_PASSED) ||
6260 (rsm->r_flags & RACK_RWND_COLLAPSED) ||
6261 (rsm->r_dupack >= DUP_ACK_THRESHOLD)) {
6264 (rsm->r_flags & RACK_HAS_FIN)) {
6287 idx = rsm->r_rtr_cnt - 1;
6288 exp = ((uint32_t)rsm->r_tim_lastsent[idx]) + thresh;
6312 rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext);
6313 if (rsm == NULL) {
6314 /* We found no rsm to TLP with. */
6317 if (rsm->r_flags & RACK_HAS_FIN) {
6319 rsm = NULL;
6322 idx = rsm->r_rtr_cnt - 1;
6324 if (TSTMP_GEQ(((uint32_t)rsm->r_tim_lastsent[idx]), rack->r_ctl.rc_tlp_rxt_last_time))
6325 tstmp_touse = (uint32_t)rsm->r_tim_lastsent[idx];
6355 thresh = rack_calc_thresh_tlp(tp, rack, rsm, srtt);
6365 (uint32_t)rsm->r_tim_lastsent[idx],
6873 struct tcp_rack *rack, struct rack_sendmap *rsm, uint32_t cts)
6879 nrsm = rsm;
6886 exp = ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]) + thresh;
6916 struct rack_sendmap *rsm;
6922 rsm = rack_check_recovery_mode(tp, cts);
6923 rack_log_to_event(rack, RACK_TO_FRM_RACK, rsm);
6924 if (rsm) {
6926 rack_mark_lost(tp, rack, rsm, cts);
6927 rack->r_ctl.rc_resend = rsm;
6942 if (rsm == NULL) {
6954 rack_adjust_orig_mlen(struct rack_sendmap *rsm)
6957 if ((M_TRAILINGROOM(rsm->m) != rsm->orig_t_space)) {
6964 KASSERT((rsm->orig_t_space > M_TRAILINGROOM(rsm->m)),
6965 ("mbuf:%p rsm:%p trailing_space:%jd ots:%u oml:%u mlen:%u\n",
6966 rsm->m,
6967 rsm,
6968 (intmax_t)M_TRAILINGROOM(rsm->m),
6969 rsm->orig_t_space,
6970 rsm->orig_m_len,
6971 rsm->m->m_len));
6972 rsm->orig_m_len += (rsm->orig_t_space - M_TRAILINGROOM(rsm->m));
6973 rsm->orig_t_space = M_TRAILINGROOM(rsm->m);
6975 if (rsm->m->m_len < rsm->orig_m_len) {
6980 KASSERT((rsm->soff >= (rsm->orig_m_len - rsm->m->m_len)),
6981 ("mbuf:%p len:%u rsm:%p oml:%u soff:%u\n",
6982 rsm->m, rsm->m->m_len,
6983 rsm, rsm->orig_m_len,
6984 rsm->soff));
6985 if (rsm->soff >= (rsm->orig_m_len - rsm->m->m_len))
6986 rsm->soff -= (rsm->orig_m_len - rsm->m->m_len);
6988 rsm->soff = 0;
6989 rsm->orig_m_len = rsm->m->m_len;
6991 } else if (rsm->m->m_len > rsm->orig_m_len) {
6992 panic("rsm:%p m:%p m_len grew outside of t_space compensation",
6993 rsm, rsm->m);
6999 rack_setup_offset_for_rsm(struct tcp_rack *rack, struct rack_sendmap *src_rsm, struct rack_sendmap *rsm)
7017 ("rsm:%p nrsm:%p hit at soff:%u null m",
7018 src_rsm, rsm, soff));
7026 rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd,
7027 (rsm->r_start - rack->rc_tp->snd_una),
7028 &rsm->soff);
7029 rsm->orig_m_len = rsm->m->m_len;
7030 rsm->orig_t_space = M_TRAILINGROOM(rsm->m);
7034 rsm->m = m;
7035 rsm->soff = soff;
7036 rsm->orig_m_len = m->m_len;
7037 rsm->orig_t_space = M_TRAILINGROOM(rsm->m);
7042 struct rack_sendmap *rsm, uint32_t start)
7047 nrsm->r_end = rsm->r_end;
7048 nrsm->r_rtr_cnt = rsm->r_rtr_cnt;
7049 nrsm->r_act_rxt_cnt = rsm->r_act_rxt_cnt;
7050 nrsm->r_flags = rsm->r_flags;
7051 nrsm->r_dupack = rsm->r_dupack;
7052 nrsm->r_no_rtt_allowed = rsm->r_no_rtt_allowed;
7054 nrsm->r_fas = rsm->r_fas;
7055 nrsm->r_bas = rsm->r_bas;
7056 tqhash_update_end(rack->r_ctl.tqh, rsm, nrsm->r_start);
7057 nrsm->r_just_ret = rsm->r_just_ret;
7059 nrsm->r_tim_lastsent[idx] = rsm->r_tim_lastsent[idx];
7065 if (rsm->r_flags & RACK_HAS_FIN)
7066 rsm->r_flags &= ~RACK_HAS_FIN;
7068 if (rsm->r_flags & RACK_HAD_PUSH)
7069 rsm->r_flags &= ~RACK_HAD_PUSH;
7071 nrsm->r_hw_tls = rsm->r_hw_tls;
7075 * how much is left in original rsm. Then we walk out the mbuf
7079 KASSERT(((rsm->m != NULL) ||
7080 (rsm->r_flags & (RACK_HAS_SYN|RACK_HAS_FIN))),
7081 ("rsm:%p nrsm:%p rack:%p -- rsm->m is NULL?", rsm, nrsm, rack));
7082 if (rsm->m)
7083 rack_setup_offset_for_rsm(rack, rsm, nrsm);
7184 struct rack_sendmap *rsm = NULL;
7288 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
7290 rsm = tqhash_max(rack->r_ctl.tqh);
7291 if (rsm && (rsm->r_flags & (RACK_ACKED | RACK_HAS_FIN))) {
7292 rsm = rack_find_high_nonack(rack, rsm);
7295 if (rsm == NULL) {
7307 rsm = tqhash_find(rack->r_ctl.tqh, (rack->r_ctl.last_collapse_point - 1));
7309 rsm = tqhash_min(rack->r_ctl.tqh);
7311 if (rsm == NULL) {
7316 if ((rsm->r_end - rsm->r_start) > ctf_fixed_maxseg(tp)) {
7330 rack_clone_rsm(rack, nrsm, rsm,
7331 (rsm->r_end - ctf_fixed_maxseg(tp)));
7332 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__);
7337 panic("Insert in tailq_hash of %p fails ret:%d rack:%p rsm:%p",
7338 nrsm, insret, rack, rsm);
7341 if (rsm->r_in_tmap) {
7342 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
7345 rsm = nrsm;
7347 rack->r_ctl.rc_tlpsend = rsm;
7536 struct rack_sendmap *rsm, *trsm = NULL;
7575 * walk through and place every rsm in the tail queue
7580 TQHASH_FOREACH(rsm, rack->r_ctl.tqh) {
7581 rsm->r_dupack = 0;
7583 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2);
7586 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext);
7588 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, trsm, rsm, r_tnext);
7590 rsm->r_in_tmap = 1;
7591 trsm = rsm;
7592 if (rsm->r_flags & RACK_ACKED)
7593 rsm->r_flags |= RACK_WAS_ACKED;
7594 rsm->r_flags &= ~(RACK_ACKED | RACK_SACK_PASSED | RACK_WAS_SACKPASS | RACK_RWND_COLLAPSED | RACK_WAS_LOST);
7595 rsm->r_flags |= RACK_MUST_RXT;
7745 struct rack_sendmap *rsm;
7747 rsm = tqhash_min(rack->r_ctl.tqh);
7748 if (rsm) {
7750 if ((TSTMP_GT(cts, (uint32_t)rsm->r_tim_lastsent[0])) &&
7751 ((cts - (uint32_t)rsm->r_tim_lastsent[0]) >= TICKS_2_USEC(TP_KEEPINIT(tp)))) {
8138 struct rack_sendmap *rsm, uint64_t ts, uint32_t add_flag, int segsiz)
8142 rsm->r_rtr_cnt++;
8143 if (rsm->r_rtr_cnt > RACK_NUM_OF_RETRANS) {
8144 rsm->r_rtr_cnt = RACK_NUM_OF_RETRANS;
8145 rsm->r_flags |= RACK_OVERMAX;
8147 rsm->r_act_rxt_cnt++;
8149 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2);
8150 rsm->r_dupack = 0;
8151 if ((rsm->r_rtr_cnt > 1) && ((rsm->r_flags & RACK_TLP) == 0)) {
8152 rack->r_ctl.rc_holes_rxt += (rsm->r_end - rsm->r_start);
8153 rsm->r_rtr_bytes += (rsm->r_end - rsm->r_start);
8155 if (rsm->r_flags & RACK_WAS_LOST) {
8161 rsm->r_flags &= ~RACK_WAS_LOST;
8162 KASSERT((rack->r_ctl.rc_considered_lost >= (rsm->r_end - rsm->r_start)),
8163 ("rsm:%p rack:%p rc_considered_lost goes negative", rsm, rack));
8164 if (rack->r_ctl.rc_considered_lost >= (rsm->r_end - rsm->r_start))
8165 rack->r_ctl.rc_considered_lost -= rsm->r_end - rsm->r_start;
8169 idx = rsm->r_rtr_cnt - 1;
8170 rsm->r_tim_lastsent[idx] = ts;
8175 rsm->r_fas = ctf_flight_size(rack->rc_tp,
8177 if (rsm->r_flags & RACK_ACKED) {
8179 rsm->r_flags &= ~RACK_ACKED;
8180 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start);
8182 if (rsm->r_in_tmap) {
8183 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext);
8184 rsm->r_in_tmap = 0;
8187 rack_mark_in_gp_win(tp, rsm);
8188 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext);
8189 rsm->r_in_tmap = 1;
8190 rsm->r_bas = (uint8_t)(((rsm->r_end - rsm->r_start) + segsiz - 1) / segsiz);
8192 if (rsm->r_flags & RACK_MUST_RXT) {
8194 rack->r_ctl.rc_out_at_rto -= (rsm->r_end - rsm->r_start);
8195 if (SEQ_GEQ(rsm->r_end, rack->r_ctl.rc_snd_max_at_rto)) {
8203 rsm->r_flags &= ~RACK_MUST_RXT;
8206 rsm->r_flags &= ~RACK_RWND_COLLAPSED;
8207 if (rsm->r_flags & RACK_SACK_PASSED) {
8209 rsm->r_flags &= ~RACK_SACK_PASSED;
8210 rsm->r_flags |= RACK_WAS_SACKPASS;
8216 struct rack_sendmap *rsm, uint64_t ts, int32_t *lenp, uint32_t add_flag, int segsiz)
8219 * We (re-)transmitted starting at rsm->r_start for some length
8228 c_end = rsm->r_start + len;
8229 if (SEQ_GEQ(c_end, rsm->r_end)) {
8232 * slopping into the next rsm.
8234 rack_update_rsm(tp, rack, rsm, ts, add_flag, segsiz);
8235 if (c_end == rsm->r_end) {
8242 act_len = rsm->r_end - rsm->r_start;
8244 return (rsm->r_end);
8261 * So here we are going to take the original rsm and make it what we
8267 rack_clone_rsm(rack, nrsm, rsm, c_end);
8274 panic("Insert in tailq_hash of %p fails ret:%d rack:%p rsm:%p",
8275 nrsm, insret, rack, rsm);
8278 if (rsm->r_in_tmap) {
8279 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
8282 rsm->r_flags &= (~RACK_HAS_FIN);
8283 rack_update_rsm(tp, rack, rsm, ts, add_flag, segsiz);
8284 /* Log a split of rsm into rsm and nrsm */
8285 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__);
8297 struct rack_sendmap *rsm, *nrsm;
8380 rsm = rack_alloc(rack);
8381 if (rsm == NULL) {
8389 rsm->r_flags = RACK_HAS_FIN|add_flag;
8391 rsm->r_flags = add_flag;
8394 rsm->r_hw_tls = 1;
8395 rsm->r_tim_lastsent[0] = cts;
8396 rsm->r_rtr_cnt = 1;
8397 rsm->r_act_rxt_cnt = 0;
8398 rsm->r_rtr_bytes = 0;
8401 rsm->r_flags |= RACK_HAS_SYN;
8403 rsm->r_start = seq_out;
8404 rsm->r_end = rsm->r_start + len;
8405 rack_mark_in_gp_win(tp, rsm);
8406 rsm->r_dupack = 0;
8412 rsm->m = s_mb;
8413 rsm->soff = s_moff;
8418 rsm->r_fas = (ctf_flight_size(rack->rc_tp,
8420 (rsm->r_end - rsm->r_start));
8422 (rack->r_ctl.ss_hi_fs < rsm->r_fas)) {
8423 rack->r_ctl.ss_hi_fs = rsm->r_fas;
8425 /* rsm->m will be NULL if RACK_HAS_SYN or RACK_HAS_FIN is set */
8426 if (rsm->m) {
8427 if (rsm->m->m_len <= rsm->soff) {
8433 * within rsm->m. But if the sbsndptr was
8439 lm = rsm->m;
8440 while (lm->m_len <= rsm->soff) {
8441 rsm->soff -= lm->m_len;
8443 KASSERT(lm != NULL, ("%s rack:%p lm goes null orig_off:%u origmb:%p rsm->soff:%u",
8444 __func__, rack, s_moff, s_mb, rsm->soff));
8446 rsm->m = lm;
8448 rsm->orig_m_len = rsm->m->m_len;
8449 rsm->orig_t_space = M_TRAILINGROOM(rsm->m);
8451 rsm->orig_m_len = 0;
8452 rsm->orig_t_space = 0;
8454 rsm->r_bas = (uint8_t)((len + segsiz - 1) / segsiz);
8455 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2);
8456 /* Log a new rsm */
8457 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_NEW, 0, __LINE__);
8459 (void)tqhash_insert(rack->r_ctl.tqh, rsm);
8461 if ((insret = tqhash_insert(rack->r_ctl.tqh, rsm)) != 0) {
8462 panic("Insert in tailq_hash of %p fails ret:%d rack:%p rsm:%p",
8463 nrsm, insret, rack, rsm);
8466 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext);
8467 rsm->r_in_tmap = 1;
8468 if (rsm->r_flags & RACK_IS_PCM) {
8470 rack->r_ctl.pcm_i.eseq = rsm->r_end;
8473 rack->r_ctl.pcm_i.sseq = rsm->r_start;
8485 prsm = tqhash_prev(rack->r_ctl.tqh, rsm);
8496 rsm = hintrsm;
8500 rsm = NULL;
8502 if ((rsm) && (rsm->r_start == seq_out)) {
8503 seq_out = rack_update_entry(tp, rack, rsm, cts, &len, add_flag, segsiz);
8512 rsm = tqhash_find(rack->r_ctl.tqh, seq_out);
8513 if (rsm) {
8514 if (rsm->r_start == seq_out) {
8515 seq_out = rack_update_entry(tp, rack, rsm, cts, &len, add_flag, segsiz);
8522 if (SEQ_GEQ(seq_out, rsm->r_start) && SEQ_LT(seq_out, rsm->r_end)) {
8530 rack_update_rsm(tp, rack, rsm, cts, add_flag, segsiz);
8534 * copy rsm to nrsm and then trim the front of rsm
8537 rack_clone_rsm(rack, nrsm, rsm, seq_out);
8538 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__);
8543 panic("Insert in tailq_hash of %p fails ret:%d rack:%p rsm:%p",
8544 nrsm, insret, rack, rsm);
8547 if (rsm->r_in_tmap) {
8548 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
8551 rsm->r_flags &= (~RACK_HAS_FIN);
8567 printf("seq_out:%u len:%d snd_una:%u snd_max:%u -- but rsm not found?\n",
8570 TQHASH_FOREACH(rsm, rack->r_ctl.tqh) {
8571 printf("rsm:%p start:%u end:%u\n",
8572 rsm, rsm->r_start, rsm->r_end);
8597 int confidence, struct rack_sendmap *rsm, uint16_t rtrcnt)
8614 ((rsm == NULL) ||
8615 (rsm->r_just_ret) ||
8616 (rsm->r_one_out_nr &&
8619 * If the rsm had a just return
8654 rack_log_rtt_upd(rack->rc_tp, rack, us_rtt, len, rsm, confidence);
8869 struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type, tcp_seq th_ack)
8875 if ((rsm->r_flags & RACK_ACKED) ||
8876 (rsm->r_flags & RACK_WAS_ACKED))
8879 if (rsm->r_no_rtt_allowed) {
8884 if (SEQ_GT(th_ack, rsm->r_end)) {
8885 len_acked = rsm->r_end - rsm->r_start;
8888 len_acked = th_ack - rsm->r_start;
8892 len_acked = rsm->r_end - rsm->r_start;
8895 if (rsm->r_rtr_cnt == 1) {
8897 t = cts - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)];
8909 if (TSTMP_GT(tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time), rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]))
8910 us_rtt = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)];
8912 us_rtt = tcp_get_usecs(NULL) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)];
8917 CC_ALGO(tp)->rttsample(&tp->t_ccv, us_rtt, 1, rsm->r_fas);
8921 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)], cts, 1);
8922 tcp_rack_xmit_timer(rack, t + 1, len_acked, us_rtt, 2 , rsm, rsm->r_rtr_cnt);
8928 * If the rsm was app limited and it is
8936 * the rsm is being included in the current
8945 * between rsm's. We could do that by saving off the
8946 * pacing delay of each rsm (in an rsm) and then
8952 if (rsm->r_flags & RACK_APP_LIMITED) {
8962 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)], cts, 2);
8964 calc_conf, rsm, rsm->r_rtr_cnt);
8966 if ((rsm->r_flags & RACK_TLP) &&
8975 (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]))) {
8977 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)];
8996 ((rsm->r_flags & RACK_OVERMAX) == 0)) {
9001 for (i = 0; i < rsm->r_rtr_cnt; i++) {
9002 if (rack_ts_to_msec(rsm->r_tim_lastsent[i]) == to->to_tsecr) {
9003 t = cts - (uint32_t)rsm->r_tim_lastsent[i];
9014 if (TSTMP_GT(tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time), rsm->r_tim_lastsent[i]))
9015 us_rtt = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - (uint32_t)rsm->r_tim_lastsent[i];
9017 us_rtt = tcp_get_usecs(NULL) - (uint32_t)rsm->r_tim_lastsent[i];
9018 CC_ALGO(tp)->rttsample(&tp->t_ccv, us_rtt, 1, rsm->r_fas);
9020 if ((i + 1) < rsm->r_rtr_cnt) {
9042 (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]))) {
9044 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)];
9049 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[i], cts, 3);
9050 tcp_rack_xmit_timer(rack, t + 1, len_acked, t, 0, rsm,
9051 rsm->r_rtr_cnt);
9057 for (i = 0; i < rsm->r_rtr_cnt; i++) {
9058 rack_log_rtt_sendmap(rack, i, rsm->r_tim_lastsent[i], to->to_tsecr);
9070 i = rsm->r_rtr_cnt - 1;
9071 t = cts - (uint32_t)rsm->r_tim_lastsent[i];
9099 (uint32_t)rsm->r_tim_lastsent[i]))) {
9101 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[i];
9113 * Mark the SACK_PASSED flag on all entries prior to rsm send wise.
9117 struct tcp_rack *rack, struct rack_sendmap *rsm, uint32_t cts)
9124 /* Now start looking at rsm's */
9125 nrsm = rsm;
9128 if (nrsm == rsm) {
9151 exp = ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]) + thresh;
9174 struct rack_sendmap *rsm,
9182 SEQ_GEQ(rsm->r_end, tp->gput_seq)) {
9192 if (rsm->r_rtr_cnt > 1) {
9211 SEQ_GEQ(rsm->r_start, tp->gput_seq)) {
9214 * up some of the rsm, we set RACK_USE_BEG
9219 tp->gput_seq = rsm->r_start;
9222 SEQ_GEQ(rsm->r_end, tp->gput_seq)) {
9226 * deleting the rsm passed so basically
9234 tp->gput_seq = rsm->r_end;
9243 if (SEQ_GT(th_ack, rsm->r_end))
9246 tp->gput_seq = rsm->r_end;
9253 * Pick up the correct send time if we can the rsm passed in
9266 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[0];
9322 rack_log_gpset(rack, tp->gput_ack, 0, 0, line, 2, rsm);
9327 is_rsm_inside_declared_tlp_block(struct tcp_rack *rack, struct rack_sendmap *rsm)
9329 if (SEQ_LT(rsm->r_end, rack->r_ctl.last_tlp_acked_start)) {
9333 if (SEQ_GT(rsm->r_start, rack->r_ctl.last_tlp_acked_end)) {
9348 struct rack_sendmap *rsm, *nrsm, *prev, *next;
9355 rsm = *prsm;
9358 if ((rsm == NULL) ||
9359 (SEQ_LT(end, rsm->r_start)) ||
9360 (SEQ_GEQ(start, rsm->r_end)) ||
9361 (SEQ_LT(start, rsm->r_start))) {
9367 rsm = tqhash_find(rack->r_ctl.tqh, start);
9369 if (rsm == NULL) {
9373 /* Ok we have an ACK for some piece of this rsm */
9374 if (rsm->r_start != start) {
9375 if ((rsm->r_flags & RACK_ACKED) == 0) {
9380 if ((rsm->r_flags & RACK_TLP) &&
9381 (rsm->r_rtr_cnt > 1)) {
9387 (is_rsm_inside_declared_tlp_block(rack, rsm))) {
9394 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end);
9398 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) {
9399 rack->r_ctl.last_tlp_acked_start = rsm->r_start;
9403 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) {
9404 rack->r_ctl.last_tlp_acked_end = rsm->r_end;
9409 rack->r_ctl.last_tlp_acked_start = rsm->r_start;
9410 rack->r_ctl.last_tlp_acked_end = rsm->r_end;
9413 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end);
9420 * rsm |--------------|
9422 * rsm will become
9423 * rsm |---|
9437 next = tqhash_next(rack->r_ctl.tqh, rsm);
9439 (rsm->bindex == next->bindex) &&
9440 ((rsm->r_flags & RACK_STRADDLE) == 0) &&
9442 ((rsm->r_flags & RACK_IS_PCM) == 0) &&
9444 (rsm->r_flags & RACK_IN_GP_WIN) &&
9458 * rsm |------------| (not-acked)
9462 * rsm |------| (not-acked)
9470 memcpy(nrsm, rsm, sizeof(struct rack_sendmap));
9472 tqhash_update_end(rack->r_ctl.tqh, rsm, start);
9474 rsm->r_flags |= RACK_SHUFFLED;
9477 rack_setup_offset_for_rsm(rack, rsm, next);
9507 /* We don't need to adjust rsm, it did not change */
9509 rsm->r_dupack = 0;
9510 rsm->r_just_ret = 0;
9511 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2);
9520 if (rsm->r_flags & RACK_WAS_LOST) {
9525 ("rsm:%p rack:%p rc_considered_lost goes negative", rsm, rack));
9537 * Now we want to go up from rsm (the
9541 * sack-passed on rsm (The one passed in
9546 if (rsm->r_in_tmap) {
9547 nrsm = TAILQ_NEXT(rsm, r_tnext);
9561 rack_log_map_chg(tp, rack, &stack_map, rsm, next, MAP_SACK_M1, end, __LINE__);
9565 rsm = tqhash_next(rack->r_ctl.tqh, next);
9566 if (rsm == NULL)
9573 * rsm |--------|
9577 * fall through after reseting rsm. So we
9579 * rsm |----|
9583 * rsm to nrsm, so the next block
9595 rack_clone_rsm(rack, nrsm, rsm, start);
9596 rsm->r_just_ret = 0;
9601 panic("Insert in tailq_hash of %p fails ret:%d rack:%p rsm:%p",
9602 nrsm, insret, rack, rsm);
9605 if (rsm->r_in_tmap) {
9606 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
9609 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SACK_M2, end, __LINE__);
9610 rsm->r_flags &= (~RACK_HAS_FIN);
9612 rsm = nrsm;
9617 if (end == rsm->r_end) {
9619 rsm = tqhash_next(rack->r_ctl.tqh, rsm);
9621 } else if (SEQ_LT(end, rsm->r_end)) {
9623 rsm = tqhash_next(rack->r_ctl.tqh, rsm);
9631 start = rsm->r_end;
9632 rsm = tqhash_next(rack->r_ctl.tqh, rsm);
9633 if (rsm == NULL)
9638 if (SEQ_GEQ(end, rsm->r_end)) {
9642 * rsm --- |-----|
9647 if ((rsm->r_flags & RACK_ACKED) == 0) {
9651 if ((rsm->r_flags & RACK_TLP) &&
9652 (rsm->r_rtr_cnt > 1)) {
9658 (is_rsm_inside_declared_tlp_block(rack, rsm))) {
9664 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end);
9668 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) {
9669 rack->r_ctl.last_tlp_acked_start = rsm->r_start;
9673 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) {
9674 rack->r_ctl.last_tlp_acked_end = rsm->r_end;
9679 rack->r_ctl.last_tlp_acked_start = rsm->r_start;
9680 rack->r_ctl.last_tlp_acked_end = rsm->r_end;
9683 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end);
9686 rack_update_rtt(tp, rack, rsm, to, cts, SACKED, 0);
9687 changed += (rsm->r_end - rsm->r_start);
9689 if (rsm->r_flags & RACK_WAS_LOST) {
9692 my_chg = (rsm->r_end - rsm->r_start);
9693 rsm->r_flags &= ~RACK_WAS_LOST;
9695 ("rsm:%p rack:%p rc_considered_lost goes negative", rsm, rack));
9701 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start);
9702 if (rsm->r_in_tmap) /* should be true */
9703 rack_log_sack_passed(tp, rack, rsm, cts);
9705 if (rsm->r_flags & RACK_SACK_PASSED) {
9706 rsm->r_flags &= ~RACK_SACK_PASSED;
9712 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END);
9713 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time);
9714 rsm->r_flags |= RACK_ACKED;
9715 rack_update_pcm_ack(rack, 0, rsm->r_start, rsm->r_end);
9716 if (rsm->r_in_tmap) {
9717 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext);
9718 rsm->r_in_tmap = 0;
9720 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_SACK_M3, end, __LINE__);
9724 if (end == rsm->r_end) {
9729 * There is more not coverend by this rsm move on
9732 nrsm = tqhash_next(rack->r_ctl.tqh, rsm);
9733 start = rsm->r_end;
9734 rsm = nrsm;
9735 if (rsm == NULL)
9741 * our rsm i.e.:
9742 * rsm --- |-----|
9745 if ((rsm->r_flags & RACK_ACKED) == 0) {
9749 if ((rsm->r_flags & RACK_TLP) &&
9750 (rsm->r_rtr_cnt > 1)) {
9756 (is_rsm_inside_declared_tlp_block(rack, rsm))) {
9762 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end);
9766 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) {
9767 rack->r_ctl.last_tlp_acked_start = rsm->r_start;
9771 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) {
9772 rack->r_ctl.last_tlp_acked_end = rsm->r_end;
9777 rack->r_ctl.last_tlp_acked_start = rsm->r_start;
9778 rack->r_ctl.last_tlp_acked_end = rsm->r_end;
9781 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end);
9789 prev = tqhash_prev(rack->r_ctl.tqh, rsm);
9791 (rsm->bindex == prev->bindex) &&
9792 ((rsm->r_flags & RACK_STRADDLE) == 0) &&
9794 ((rsm->r_flags & RACK_IS_PCM) == 0) &&
9796 (rsm->r_flags & RACK_IN_GP_WIN) &&
9804 * Goal, we want the right remainder of rsm to shrink
9805 * in place and span from (rsm->r_start = end) to rsm->r_end.
9810 * rsm |-------| (non-acked)
9814 * rsm |-----| (non-acked)
9817 * Note if either prev/rsm is a TLP we don't
9821 memcpy(nrsm, rsm, sizeof(struct rack_sendmap));
9823 rsm->r_start = end;
9824 rsm->r_flags |= RACK_SHUFFLED;
9831 rsm->r_dupack = 0;
9852 prev->r_tim_lastsent[(prev->r_rtr_cnt-1)] = nrsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)];
9862 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2);
9864 * Now that the rsm has had its start moved forward
9867 rack_setup_offset_for_rsm(rack, prev, rsm);
9879 if (rsm->r_flags & RACK_WAS_LOST) {
9884 ("rsm:%p rack:%p rc_considered_lost goes negative", rsm, rack));
9895 rack_log_map_chg(tp, rack, prev, &stack_map, rsm, MAP_SACK_M4, end, __LINE__);
9896 rsm = prev;
9909 if ((rsm->r_flags & RACK_TLP) &&
9910 (rsm->r_rtr_cnt > 1)) {
9916 (is_rsm_inside_declared_tlp_block(rack, rsm))) {
9922 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end);
9926 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) {
9927 rack->r_ctl.last_tlp_acked_start = rsm->r_start;
9931 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) {
9932 rack->r_ctl.last_tlp_acked_end = rsm->r_end;
9937 rack->r_ctl.last_tlp_acked_start = rsm->r_start;
9938 rack->r_ctl.last_tlp_acked_end = rsm->r_end;
9941 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end);
9947 * nrsm->r_end = rsm->r_end;
9950 * rsm->r_end = nrsm->r_start;
9956 * rsm |----------| (not acked)
9959 * rsm |---| (acked)
9963 rack_clone_rsm(rack, nrsm, rsm, end);
9964 rsm->r_flags &= (~RACK_HAS_FIN);
9965 rsm->r_just_ret = 0;
9970 panic("Insert in tailq_hash of %p fails ret:% rack:%p rsm:%p",
9971 nrsm, insret, rack, rsm);
9974 if (rsm->r_in_tmap) {
9975 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
9980 rack_update_rtt(tp, rack, rsm, to, cts, SACKED, 0);
9981 changed += (rsm->r_end - rsm->r_start);
9982 if (rsm->r_flags & RACK_WAS_LOST) {
9985 my_chg = (rsm->r_end - rsm->r_start);
9986 rsm->r_flags &= ~RACK_WAS_LOST;
9988 ("rsm:%p rack:%p rc_considered_lost goes negative", rsm, rack));
9994 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start);
9996 if (rsm->r_in_tmap) /* should be true */
9997 rack_log_sack_passed(tp, rack, rsm, cts);
9999 if (rsm->r_flags & RACK_SACK_PASSED) {
10000 rsm->r_flags &= ~RACK_SACK_PASSED;
10006 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END);
10007 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time);
10008 rsm->r_flags |= RACK_ACKED;
10009 rack_update_pcm_ack(rack, 0, rsm->r_start, rsm->r_end);
10010 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SACK_M5, end, __LINE__);
10011 if (rsm->r_in_tmap) {
10012 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext);
10013 rsm->r_in_tmap = 0;
10023 if (rsm &&
10024 ((rsm->r_flags & RACK_TLP) == 0) &&
10025 (rsm->r_flags & RACK_ACKED)) {
10031 next = tqhash_next(rack->r_ctl.tqh, rsm);
10037 ((rsm->r_flags & RACK_IN_GP_WIN) == 0)) {
10040 if ((rsm->r_flags & RACK_IN_GP_WIN) &&
10044 if (rsm->bindex != next->bindex)
10046 if (rsm->r_flags & RACK_STRADDLE)
10048 if (rsm->r_flags & RACK_IS_PCM)
10056 rsm = rack_merge_rsm(rack, rsm, next);
10057 next = tqhash_next(rack->r_ctl.tqh, rsm);
10062 prev = tqhash_prev(rack->r_ctl.tqh, rsm);
10068 ((rsm->r_flags & RACK_IN_GP_WIN) == 0)) {
10071 if ((rsm->r_flags & RACK_IN_GP_WIN) &&
10075 if (rsm->bindex != prev->bindex)
10077 if (rsm->r_flags & RACK_STRADDLE)
10079 if (rsm->r_flags & RACK_IS_PCM)
10087 rsm = rack_merge_rsm(rack, prev, rsm);
10088 prev = tqhash_prev(rack->r_ctl.tqh, rsm);
10105 rack_peer_reneges(struct tcp_rack *rack, struct rack_sendmap *rsm, tcp_seq th_ack)
10110 while (rsm && (rsm->r_flags & RACK_ACKED)) {
10112 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start);
10114 if (rsm->r_in_tmap) {
10115 panic("rack:%p rsm:%p flags:0x%x in tmap?",
10116 rack, rsm, rsm->r_flags);
10119 rsm->r_flags &= ~(RACK_ACKED|RACK_SACK_PASSED|RACK_WAS_SACKPASS);
10122 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext);
10123 tmap = rsm;
10125 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, tmap, rsm, r_tnext);
10126 tmap = rsm;
10129 rsm = tqhash_next(rack->r_ctl.tqh, rsm);
10141 rack_rsm_sender_update(struct tcp_rack *rack, struct tcpcb *tp, struct rack_sendmap *rsm, uint8_t from)
10193 if (SEQ_GT(rsm->r_end, tp->gput_ack)) {
10194 tp->gput_ack = rsm->r_end;
10221 if ((ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]) <=
10224 if (rack_in_gp_window(tp, rsm)) {
10226 rack_log_gpset(rack, tp->gput_ack, (uint32_t)ts, rsm->r_end,
10227 __LINE__, from, rsm);
10234 struct rack_sendmap *rsm;
10305 rsm = tqhash_min(rack->r_ctl.tqh);
10306 if (rsm == NULL) {
10328 if (SEQ_LT(th_ack, rsm->r_start)) {
10332 rsm->r_start,
10337 rack_update_rtt(tp, rack, rsm, to, cts, CUM_ACKED, th_ack);
10340 if ((rsm->r_flags & RACK_TLP) &&
10341 (rsm->r_rtr_cnt > 1)) {
10343 * Yes, this rsm was a TLP and retransmitted, remember that
10352 (is_rsm_inside_declared_tlp_block(rack, rsm))) {
10358 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end);
10362 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) {
10363 rack->r_ctl.last_tlp_acked_start = rsm->r_start;
10367 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) {
10368 rack->r_ctl.last_tlp_acked_end = rsm->r_end;
10374 rack->r_ctl.last_tlp_acked_start = rsm->r_start;
10375 rack->r_ctl.last_tlp_acked_end = rsm->r_end;
10377 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end);
10381 rack->r_ctl.last_tmit_time_acked = rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)];
10382 if (SEQ_GEQ(th_ack, rsm->r_end)) {
10387 if (rsm->r_flags & RACK_WAS_LOST) {
10393 rsm->r_flags &= ~RACK_WAS_LOST;
10394 KASSERT((rack->r_ctl.rc_considered_lost >= (rsm->r_end - rsm->r_start)),
10395 ("rsm:%p rack:%p rc_considered_lost goes negative", rsm, rack));
10396 if (rack->r_ctl.rc_considered_lost >= (rsm->r_end - rsm->r_start))
10397 rack->r_ctl.rc_considered_lost -= rsm->r_end - rsm->r_start;
10401 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_FREE, rsm->r_end, __LINE__);
10402 rack->r_ctl.rc_holes_rxt -= rsm->r_rtr_bytes;
10403 rsm->r_rtr_bytes = 0;
10408 rack_rsm_sender_update(rack, tp, rsm, 4);
10409 tqhash_remove(rack->r_ctl.tqh, rsm, REMOVE_TYPE_CUMACK);
10410 if (rsm->r_in_tmap) {
10411 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext);
10412 rsm->r_in_tmap = 0;
10415 if (rsm->r_flags & RACK_ACKED) {
10420 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start);
10422 } else if (rsm->r_flags & RACK_SACK_PASSED) {
10428 rsm->r_flags &= ~RACK_SACK_PASSED;
10429 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time);
10430 rsm->r_flags |= RACK_ACKED;
10441 rack_update_pcm_ack(rack, 1, rsm->r_start, rsm->r_end);
10443 rack_update_pcm_ack(rack, 1, rsm->r_start, rsm->r_end);
10445 if ((rsm->r_flags & RACK_TO_REXT) &&
10456 if (to->to_tsecr == rack_ts_to_msec(rsm->r_tim_lastsent[0])) {
10461 left = th_ack - rsm->r_end;
10463 rack_need_set_test(tp, rack, rsm, th_ack, __LINE__, RACK_USE_END_OR_THACK);
10465 rack_free(rack, rsm);
10470 rsm = tqhash_min(rack->r_ctl.tqh);
10471 if (rsm && (rsm->r_flags & RACK_ACKED) && (th_ack == rsm->r_start)) {
10479 * given us snd_una up to (rsm->r_end).
10483 * our rsm->r_start in case we get an old ack
10486 rack_peer_reneges(rack, rsm, th_ack);
10490 if (rsm->r_flags & RACK_ACKED) {
10495 rack->r_ctl.rc_sacked -= (th_ack - rsm->r_start);
10497 rack_update_pcm_ack(rack, 1, rsm->r_start, th_ack);
10500 if (rsm->r_flags & RACK_WAS_LOST) {
10507 KASSERT((rack->r_ctl.rc_considered_lost >= (th_ack - rsm->r_start)),
10508 ("rsm:%p rack:%p rc_considered_lost goes negative th_ack:%u", rsm, rack, th_ack));
10509 if (rack->r_ctl.rc_considered_lost >= (th_ack - rsm->r_start))
10510 rack->r_ctl.rc_considered_lost -= th_ack - rsm->r_start;
10518 rsm->r_dupack = 0;
10519 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2);
10520 if (rsm->r_rtr_bytes) {
10527 ack_am = (th_ack - rsm->r_start);
10528 if (ack_am >= rsm->r_rtr_bytes) {
10530 rsm->r_rtr_bytes -= ack_am;
10538 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_TRIM_HEAD, th_ack, __LINE__);
10540 if (rsm->m &&
10541 ((rsm->orig_m_len != rsm->m->m_len) ||
10542 (M_TRAILINGROOM(rsm->m) != rsm->orig_t_space))) {
10544 rack_adjust_orig_mlen(rsm);
10546 rsm->soff += (th_ack - rsm->r_start);
10547 rack_rsm_sender_update(rack, tp, rsm, 5);
10555 m = rsm->m;
10556 soff = rsm->soff;
10561 (" rsm:%p off:%u soff:%u m:%p",
10562 rsm, rsm->soff, soff, m));
10569 * but tqhash_trim did update rsm->r_start so the offset calcuation
10575 (rsm->r_start - tp->snd_una),
10583 rsm->m = m;
10584 rsm->soff = soff;
10585 rsm->orig_m_len = rsm->m->m_len;
10586 rsm->orig_t_space = M_TRAILINGROOM(rsm->m);
10591 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_BEG);
10597 struct rack_sendmap *rsm;
10611 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) {
10612 if (rsm->r_flags & RACK_SACK_PASSED) {
10803 struct rack_sendmap *rsm;
10820 rsm = tqhash_min(rack->r_ctl.tqh);
10839 if (rsm && SEQ_GT(th_ack, rsm->r_start))
10840 changed = th_ack - rsm->r_start;
10969 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
10970 if (rsm &&
10971 SEQ_GT(sack_blocks[0].end, rsm->r_start) &&
10972 SEQ_LT(sack_blocks[0].start, rsm->r_end)) {
10977 acked = rack_proc_sack_blk(tp, rack, &sack_blocks[0], to, &rsm, cts, segsiz);
11005 rsm = rack->r_ctl.rc_sacklast;
11007 acked = rack_proc_sack_blk(tp, rack, &sack_blocks[i], to, &rsm, cts, segsiz);
11031 rsm = tcp_rack_output(tp, rack, tsused);
11033 rsm &&
11034 ((rsm->r_flags & RACK_MUST_RXT) == 0)) {
11050 rsm &&
11058 rack->r_ctl.rc_resend = rsm;
11064 if ((rsm && (rack->r_ctl.rc_prr_sndcnt >= ctf_fixed_maxseg(tp)) &&
11081 struct rack_sendmap *rsm;
11083 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
11084 while (rsm) {
11089 if ((rsm->r_dupack >= DUP_ACK_THRESHOLD) ||
11090 (rsm->r_flags & RACK_MUST_RXT)) {
11091 rsm = TAILQ_NEXT(rsm, r_tnext);
11096 if (rsm && (rsm->r_dupack < 0xff)) {
11097 rsm->r_dupack++;
11098 if (rsm->r_dupack >= DUP_ACK_THRESHOLD) {
11104 * we will get a return of the rsm. For a non-sack
11105 * connection we will get the rsm returned if the
11117 rack_log_retran_reason(rack, rsm, __LINE__, 1, 3);
11120 rack_log_retran_reason(rack, rsm, __LINE__, 0, 3);
11614 struct rack_sendmap *rsm;
11623 rsm = tqhash_min(rack->r_ctl.tqh);
11624 if ((rsm == NULL) || (m == NULL)) {
11629 KASSERT((rsm->m == m),
11630 ("Rack:%p sb:%p rsm:%p -- first rsm mbuf not aligned to sb",
11631 rack, sb, rsm));
11632 while (rsm->m && (rsm->m == m)) {
11638 tm = sbsndmbuf(sb, (rsm->r_start - snd_una), &soff);
11639 if ((rsm->orig_m_len != m->m_len) ||
11640 (rsm->orig_t_space != M_TRAILINGROOM(m))){
11641 rack_adjust_orig_mlen(rsm);
11644 KASSERT((rsm->soff == 0),
11645 ("Rack:%p rsm:%p -- rsm at head but soff not zero",
11646 rack, rsm));
11649 if ((rsm->soff != soff) || (rsm->m != tm)) {
11658 rsm->m = tm;
11659 rsm->soff = soff;
11661 rsm->orig_m_len = rsm->m->m_len;
11662 rsm->orig_t_space = M_TRAILINGROOM(rsm->m);
11664 rsm->orig_m_len = 0;
11665 rsm->orig_t_space = 0;
11668 rsm->m = sbsndmbuf(sb, (rsm->r_start - snd_una), &rsm->soff);
11669 if (rsm->m) {
11670 rsm->orig_m_len = rsm->m->m_len;
11671 rsm->orig_t_space = M_TRAILINGROOM(rsm->m);
11673 rsm->orig_m_len = 0;
11674 rsm->orig_t_space = 0;
11677 rsm = tqhash_next(rack->r_ctl.tqh, rsm);
11678 if (rsm == NULL)
12073 int dir, uint32_t flags, struct rack_sendmap *rsm)
12089 * 2 is log of a rsm being marked, 3 is a split.
12091 if (rsm == NULL)
12094 log.u_bbr.rttProp = (uintptr_t)rsm;
12128 struct rack_sendmap *nrsm, *rsm;
12135 rsm = tqhash_find(rack->r_ctl.tqh, rack->r_ctl.last_collapse_point);
12136 if (rsm == NULL) {
12142 if (SEQ_GT(rack->r_ctl.last_collapse_point, rsm->r_start)) {
12143 rack_log_collapse(rack, rsm->r_start, rsm->r_end,
12144 rack->r_ctl.last_collapse_point, line, 3, rsm->r_flags, rsm);
12147 /* We can't get a rsm, mark all? */
12148 nrsm = rsm;
12153 rack_clone_rsm(rack, nrsm, rsm, rack->r_ctl.last_collapse_point);
12158 panic("Insert in tailq_hash of %p fails ret:%d rack:%p rsm:%p",
12159 nrsm, insret, rack, rsm);
12162 rack_log_map_chg(rack->rc_tp, rack, NULL, rsm, nrsm, MAP_SPLIT,
12164 if (rsm->r_in_tmap) {
12165 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
12172 rsm = nrsm;
12176 TQHASH_FOREACH_FROM(nrsm, rack->r_ctl.tqh, rsm) {
12981 struct rack_sendmap *rsm;
12983 rsm = tqhash_min(rack->r_ctl.tqh);
12984 if (rsm) {
12985 if (rsm->r_flags & RACK_HAS_SYN) {
12986 rsm->r_flags &= ~RACK_HAS_SYN;
12987 rsm->r_start++;
12989 rack->r_ctl.rc_resend = rsm;
14267 struct rack_sendmap *rsm;
14279 rsm = tqhash_find(rack->r_ctl.tqh, reqr->req_param);
14280 if (rsm == NULL) {
14284 reqr->sendmap_start = rsm->r_start;
14285 reqr->sendmap_end = rsm->r_end;
14286 reqr->sendmap_send_cnt = rsm->r_rtr_cnt;
14287 reqr->sendmap_fas = rsm->r_fas;
14291 reqr->sendmap_time[i] = rsm->r_tim_lastsent[i];
14292 reqr->sendmap_ack_arrival = rsm->r_ack_arrival;
14293 reqr->sendmap_flags = rsm->r_flags & SNDMAP_MASK;
14294 reqr->sendmap_r_rtr_bytes = rsm->r_rtr_bytes;
14295 reqr->sendmap_dupacks = rsm->r_dupack;
14297 rsm->r_start,
14298 rsm->r_end,
14299 rsm->r_flags);
14410 struct rack_sendmap *rsm, *ersm;
14423 rsm = rack_alloc(rack);
14424 if (rsm == NULL) {
14428 rsm->r_no_rtt_allowed = 1;
14429 rsm->r_tim_lastsent[0] = rack_to_usec_ts(&rack->r_ctl.act_rcv_time);
14430 rsm->r_rtr_cnt = 1;
14431 rsm->r_rtr_bytes = 0;
14433 rsm->r_flags |= RACK_HAS_FIN;
14434 rsm->r_end = tp->snd_max;
14437 rsm->r_flags |= RACK_HAS_SYN;
14438 rsm->r_start = tp->iss;
14439 rsm->r_end = rsm->r_start + (tp->snd_max - tp->snd_una);
14441 rsm->r_start = tp->snd_una;
14442 rsm->r_dupack = 0;
14444 rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 0, &rsm->soff);
14445 if (rsm->m) {
14446 rsm->orig_m_len = rsm->m->m_len;
14447 rsm->orig_t_space = M_TRAILINGROOM(rsm->m);
14449 rsm->orig_m_len = 0;
14450 rsm->orig_t_space = 0;
14457 rsm->m = NULL;
14458 rsm->orig_m_len = 0;
14459 rsm->orig_t_space = 0;
14460 rsm->soff = 0;
14463 if ((insret = tqhash_insert(rack->r_ctl.tqh, rsm)) != 0) {
14464 panic("Insert in tailq_hash fails ret:%d rack:%p rsm:%p",
14465 insret, rack, rsm);
14468 (void)tqhash_insert(rack->r_ctl.tqh, rsm);
14470 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext);
14471 rsm->r_in_tmap = 1;
14488 rsm = rack_alloc(rack);
14489 if (rsm == NULL) {
14493 memset(rsm, 0, sizeof(struct rack_sendmap));
14494 /* Now configure the rsm and insert it */
14495 rsm->r_dupack = qr.sendmap_dupacks;
14496 rsm->r_start = qr.sendmap_start;
14497 rsm->r_end = qr.sendmap_end;
14499 rsm->r_fas = qr.sendmap_end;
14501 rsm->r_fas = rsm->r_start - tp->snd_una;
14507 rsm->r_flags = qr.sendmap_flags & SNDMAP_MASK;
14508 rsm->r_rtr_bytes = qr.sendmap_r_rtr_bytes;
14509 rsm->r_rtr_cnt = qr.sendmap_send_cnt;
14510 rsm->r_ack_arrival = qr.sendmap_ack_arrival;
14511 for (i=0 ; i<rsm->r_rtr_cnt; i++)
14512 rsm->r_tim_lastsent[i] = qr.sendmap_time[i];
14513 rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd,
14514 (rsm->r_start - tp->snd_una), &rsm->soff);
14515 if (rsm->m) {
14516 rsm->orig_m_len = rsm->m->m_len;
14517 rsm->orig_t_space = M_TRAILINGROOM(rsm->m);
14519 rsm->orig_m_len = 0;
14520 rsm->orig_t_space = 0;
14523 if ((insret = tqhash_insert(rack->r_ctl.tqh, rsm)) != 0) {
14524 panic("Insert in tailq_hash fails ret:%d rack:%p rsm:%p",
14525 insret, rack, rsm);
14528 (void)tqhash_insert(rack->r_ctl.tqh, rsm);
14530 if ((rsm->r_flags & RACK_ACKED) == 0) {
14533 rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]) {
14540 rsm->r_in_tmap = 1;
14541 TAILQ_INSERT_BEFORE(ersm, rsm, r_tnext);
14545 if (rsm->r_in_tmap == 0) {
14549 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext);
14550 rsm->r_in_tmap = 1;
14554 (SEQ_GT(rsm->r_end, rack->r_ctl.rc_sacklast->r_end))) {
14555 rack->r_ctl.rc_sacklast = rsm;
14559 rsm->r_start,
14560 rsm->r_end,
14561 rsm->r_flags);
15060 struct rack_sendmap *rsm;
15116 rsm = tqhash_min(rack->r_ctl.tqh);
15117 while (rsm) {
15118 tqhash_remove(rack->r_ctl.tqh, rsm, REMOVE_TYPE_CUMACK);
15120 uma_zfree(rack_zone, rsm);
15121 rsm = tqhash_min(rack->r_ctl.tqh);
15123 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free);
15124 while (rsm) {
15125 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext);
15129 uma_zfree(rack_zone, rsm);
15130 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free);
15149 rsm = tqhash_min(rack->r_ctl.tqh);
15150 log.u_bbr.delRate = (uintptr_t)rsm;
15151 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free);
15152 log.u_bbr.cur_del_rate = (uintptr_t)rsm;
15235 struct rack_sendmap *rsm;
15251 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
15252 if (((rsm == NULL) || (tp->t_state < TCPS_ESTABLISHED)) &&
15257 if (rsm == NULL) {
16393 struct rack_sendmap *rsm;
16867 (rsm = tqhash_min(rack->r_ctl.tqh)) != NULL)
16868 kern_prefetch(rsm, &prev_state);
17059 struct rack_sendmap *rsm = NULL;
17072 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
17073 if (rack->r_must_retran && rsm && (rsm->r_flags & RACK_MUST_RXT)) {
17074 return (rsm);
17076 if (rsm && ((rsm->r_flags & RACK_ACKED) == 0)) {
17079 rsm = rack_find_lowest_rsm(rack);
17080 if (rsm == NULL) {
17085 (rsm->r_dupack >= DUP_ACK_THRESHOLD)) {
17090 return (rsm);
17092 if (rsm->r_flags & RACK_ACKED) {
17095 if (((rsm->r_flags & RACK_SACK_PASSED) == 0) &&
17096 (rsm->r_dupack < DUP_ACK_THRESHOLD)) {
17101 idx = rsm->r_rtr_cnt - 1;
17102 ts_low = (uint32_t)rsm->r_tim_lastsent[idx];
17113 if ((rsm->r_dupack >= DUP_ACK_THRESHOLD) ||
17114 ((rsm->r_flags & RACK_SACK_PASSED))) {
17123 rack_log_retran_reason(rack, rsm, (tsused - ts_low), thresh, 1);
17125 return (rsm);
17133 int line, struct rack_sendmap *rsm, uint8_t quality)
17188 log.u_bbr.pacing_gain = rack_get_output_gain(rack, rsm);
17455 rack_get_pacing_delay(struct tcp_rack *rack, struct tcpcb *tp, uint32_t len, struct rack_sendmap *rsm, uint32_t segsiz, int line)
17534 if ((rack->r_rr_config == 1) && rsm) {
17545 rate_wanted = rack_get_output_bw(rack, bw_est, rsm, &capped);
17817 rack_log_pacing_delay_calc(rack, len, slot, rate_wanted, bw_est, lentim, 2, __LINE__, rsm, 0);
17947 * We want to get to the rsm that is either
18480 rack_fast_rsm_output(struct tcpcb *tp, struct tcp_rack *rack, struct rack_sendmap *rsm,
18486 * at the entry is in the ts_val. The concept her is that the rsm
18528 rsm->r_flags |= RACK_TLP;
18531 rsm->r_flags &= ~RACK_TLP;
18533 startseq = rsm->r_start;
18541 if (rsm->r_flags & RACK_HAS_FIN) {
18628 th->th_seq = htonl(rsm->r_start);
18637 if ((rsm->r_flags & RACK_HAD_PUSH) &&
18638 (len == (rsm->r_end - rsm->r_start)))
18646 if (rsm->r_flags & RACK_TLP) {
18662 if (rsm->m == NULL)
18664 if (rsm->m &&
18665 ((rsm->orig_m_len != rsm->m->m_len) ||
18666 (M_TRAILINGROOM(rsm->m) != rsm->orig_t_space))) {
18668 rack_adjust_orig_mlen(rsm);
18670 m->m_next = rack_fo_base_copym(rsm->m, rsm->soff, &len, NULL, if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, rsm->r_hw_tls);
18841 if (rsm->r_flags & RACK_RWND_COLLAPSED) {
18842 rack_log_collapse(rack, rsm->r_start, rsm->r_end, 0, __LINE__, 5, rsm->r_flags, rsm);
18844 counter_u64_add(rack_collapsed_win_rxt_bytes, (rsm->r_end - rsm->r_start));
18869 if (rsm->r_rtr_cnt > 0) {
18874 log.u_bbr.flex5 = rsm->r_fas;
18875 log.u_bbr.bbr_substate = rsm->r_bas;
18886 log.u_bbr.rttProp = (uintptr_t)rsm;
18887 log.u_bbr.delRate = rsm->r_flags;
18932 rack_log_output(tp, &to, len, rsm->r_start, flags, error, rack_to_usec_ts(tv),
18933 rsm, RACK_SENT_FP, rsm->m, rsm->soff, rsm->r_hw_tls, segsiz);
18940 tcp_account_for_send(tp, len, 1, doing_tlp, rsm->r_hw_tls);
18944 rack->r_ctl.last_sent_tlp_seq = rsm->r_start;
18945 rack->r_ctl.last_sent_tlp_len = rsm->r_end - rsm->r_start;
18954 if (IN_FASTRECOVERY(tp->t_flags) && rsm)
19622 struct rack_sendmap *rsm = NULL;
19626 rsm = tqhash_find(rack->r_ctl.tqh, rack->r_ctl.last_collapse_point);
19627 if ((rsm == NULL) || ((rsm->r_flags & RACK_RWND_COLLAPSED) == 0)) {
19633 if (rsm->r_end > (rack->rc_tp->snd_una + rack->rc_tp->snd_wnd)) {
19640 if (rsm->r_flags & RACK_ACKED) {
19645 rack->r_ctl.last_collapse_point = rsm->r_end;
19656 if ((cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])) > thresh) {
19657 rack_log_collapse(rack, rsm->r_start,
19658 (cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])),
19659 thresh, __LINE__, 6, rsm->r_flags, rsm);
19660 return (rsm);
19663 rack_log_collapse(rack, rsm->r_start,
19664 (cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])),
19665 thresh, __LINE__, 7, rsm->r_flags, rsm);
19734 struct rack_sendmap *rsm = NULL;
20088 rsm = rack_alloc(rack);
20089 if (rsm == NULL) {
20097 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_free, rsm, r_tnext);
20099 rsm = NULL;
20103 rsm = NULL;
20112 rsm = rack->r_ctl.rc_resend;
20114 len = rsm->r_end - rsm->r_start;
20117 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start),
20118 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p",
20120 rsm->r_start, tp->snd_una, tp, rack, rsm));
20121 sb_offset = rsm->r_start - tp->snd_una;
20124 ((rsm = rack_check_collapsed(rack, cts)) != NULL)) {
20128 * since this rsm has its chance to retransmit now.
20131 rack->r_ctl.last_collapse_point = rsm->r_end;
20139 len = rsm->r_end - rsm->r_start;
20140 sb_offset = rsm->r_start - tp->snd_una;
20143 } else if ((rsm = tcp_rack_output(tp, rack, cts)) != NULL) {
20146 ((rsm->r_flags & RACK_MUST_RXT) == 0) &&
20152 if (SEQ_LT(rsm->r_start, tp->snd_una)) {
20153 panic("Huh, tp:%p rack:%p rsm:%p start:%u < snd_una:%u\n",
20154 tp, rack, rsm, rsm->r_start, tp->snd_una);
20157 len = rsm->r_end - rsm->r_start;
20158 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start),
20159 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p",
20161 rsm->r_start, tp->snd_una, tp, rack, rsm));
20162 sb_offset = rsm->r_start - tp->snd_una;
20182 rsm = rack->r_ctl.rc_tlpsend;
20184 rsm->r_flags |= RACK_TLP;
20187 tlen = rsm->r_end - rsm->r_start;
20190 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start),
20191 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p",
20193 rsm->r_start, tp->snd_una, tp, rack, rsm));
20194 sb_offset = rsm->r_start - tp->snd_una;
20201 (rsm == NULL)) {
20234 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
20235 if (rsm == NULL) {
20243 if ((rsm->r_flags & RACK_MUST_RXT) == 0) {
20250 rsm = NULL;
20254 len = rsm->r_end - rsm->r_start;
20255 sb_offset = rsm->r_start - tp->snd_una;
20273 * work with this rsm.
20281 if ((rsm == NULL) &&
20293 if (rsm && (rsm->r_flags & RACK_HAS_FIN)) {
20304 if (rsm && rack->r_fsb_inited &&
20306 ((rsm->r_flags & RACK_HAS_FIN) == 0)) {
20309 ret = rack_fast_rsm_output(tp, rack, rsm, ts_val, cts, ms_cts, &tv, len, doing_tlp);
20570 } else if ((rsm == NULL) &&
20717 if ((rsm->r_flags & RACK_HAS_FIN) == 0)
20929 rsm = tqhash_max(rack->r_ctl.tqh);
20930 if (rsm) {
20935 rsm->r_just_ret = 1;
21049 rsm = tqhash_max(rack->r_ctl.tqh);
21050 if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) {
21052 rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm;
21060 rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start;
21061 rack->r_ctl.rc_end_appl = rsm;
21063 rsm->r_flags |= RACK_APP_LIMITED;
21118 (rsm == NULL) &&
21137 if (rsm || sack_rxmit)
21173 if ((rsm == NULL) &&
21208 * no rsm to use, then we look at various bits,
21213 if (rsm == NULL) {
21222 rack_seq = rsm->r_start;
21559 if (rsm == NULL)
21572 if (rsm == NULL)
21579 ((rsm == NULL) ? hw_tls : 0)
21602 if (rsm && (rsm->r_flags & RACK_TLP)) {
21945 if (rsm) {
21946 if (rsm->r_flags & RACK_RWND_COLLAPSED) {
21947 rack_log_collapse(rack, rsm->r_start, rsm->r_end, 0, __LINE__, 5, rsm->r_flags, rsm);
21949 counter_u64_add(rack_collapsed_win_rxt_bytes, (rsm->r_end - rsm->r_start));
21959 log.u_bbr.pacing_gain = rack_get_output_gain(rack, rsm);
21966 if (rsm && (rsm->r_rtr_cnt > 0)) {
21971 log.u_bbr.flex5 = rsm->r_fas;
21972 log.u_bbr.bbr_substate = rsm->r_bas;
21984 log.u_bbr.rttProp = (uintptr_t)rsm;
21986 if (rsm) {
21987 log.u_bbr.delRate = rsm->r_flags;
22041 ((rsm || sack_rxmit) ? IP_NO_SND_TAG_RL : 0),
22090 ((rsm || sack_rxmit) ? IP_NO_SND_TAG_RL : 0), 0,
22108 rsm, add_flag, s_mb, s_moff, hw_tls, segsiz);
22117 if (rsm == NULL) {
22140 tcp_account_for_send(tp, len, (rsm != NULL), doing_tlp, hw_tls);
22141 if (rsm && doing_tlp) {
22144 rack->r_ctl.last_sent_tlp_seq = rsm->r_start;
22145 rack->r_ctl.last_sent_tlp_len = rsm->r_end - rsm->r_start;
22154 if (rsm && (doing_tlp == 0)) {
22195 } else if (rsm) {
22197 rsm->r_flags &= ~RACK_TLP;
22214 if (rsm && (doing_tlp == 0))
22215 rack->r_ctl.rc_loss_count += rsm->r_end - rsm->r_start;
22220 if (rsm == NULL) {
22464 if (IN_FASTRECOVERY(tp->t_flags) && rsm)
22505 slot = rack_get_pacing_delay(rack, tp, tot_len_this_send, rsm, segsiz, __LINE__);
22510 if (rsm) {
22511 rack->r_ctl.rc_out_at_rto -= (rsm->r_end - rsm->r_start);
22512 if (SEQ_GEQ(rsm->r_end, rack->r_ctl.rc_snd_max_at_rto)) {
22542 (rsm == NULL) &&
22561 (rsm == NULL), optlen, __LINE__, 2);
22569 (rsm == NULL) &&
22646 struct rack_sendmap *rsm;
22664 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) {
22665 rsm->r_flags |= (RACK_MUST_RXT|RACK_PMTU_CHG);