1 /*- 2 * Copyright (c) 2016-2020 Netflix, Inc. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include "opt_inet.h" 31 #include "opt_inet6.h" 32 #include "opt_ipsec.h" 33 #include "opt_tcpdebug.h" 34 #include "opt_ratelimit.h" 35 #include "opt_kern_tls.h" 36 #include <sys/param.h> 37 #include <sys/arb.h> 38 #include <sys/module.h> 39 #include <sys/kernel.h> 40 #ifdef TCP_HHOOK 41 #include <sys/hhook.h> 42 #endif 43 #include <sys/lock.h> 44 #include <sys/malloc.h> 45 #include <sys/lock.h> 46 #include <sys/mutex.h> 47 #include <sys/mbuf.h> 48 #include <sys/proc.h> /* for proc0 declaration */ 49 #include <sys/socket.h> 50 #include <sys/socketvar.h> 51 #include <sys/sysctl.h> 52 #include <sys/systm.h> 53 #ifdef STATS 54 #include <sys/qmath.h> 55 #include <sys/tree.h> 56 #include <sys/stats.h> /* Must come after qmath.h and tree.h */ 57 #else 58 #include <sys/tree.h> 59 #endif 60 #include <sys/refcount.h> 61 #include <sys/queue.h> 62 #include <sys/tim_filter.h> 63 #include <sys/smp.h> 64 #include <sys/kthread.h> 65 #include <sys/kern_prefetch.h> 66 #include <sys/protosw.h> 67 #ifdef TCP_ACCOUNTING 68 #include <sys/sched.h> 69 #include <machine/cpu.h> 70 #endif 71 #include <vm/uma.h> 72 73 #include <net/route.h> 74 #include <net/route/nhop.h> 75 #include <net/vnet.h> 76 77 #define TCPSTATES /* for logging */ 78 79 #include <netinet/in.h> 80 #include <netinet/in_kdtrace.h> 81 #include <netinet/in_pcb.h> 82 #include <netinet/ip.h> 83 #include <netinet/ip_icmp.h> /* required for icmp_var.h */ 84 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */ 85 #include <netinet/ip_var.h> 86 #include <netinet/ip6.h> 87 #include <netinet6/in6_pcb.h> 88 #include <netinet6/ip6_var.h> 89 #include <netinet/tcp.h> 90 #define TCPOUTFLAGS 91 #include <netinet/tcp_fsm.h> 92 #include <netinet/tcp_log_buf.h> 93 #include <netinet/tcp_seq.h> 94 #include <netinet/tcp_timer.h> 95 #include <netinet/tcp_var.h> 96 #include <netinet/tcp_hpts.h> 97 #include <netinet/tcp_ratelimit.h> 98 #include <netinet/tcp_accounting.h> 99 #include <netinet/tcpip.h> 100 #include <netinet/cc/cc.h> 101 #include <netinet/cc/cc_newreno.h> 102 #include <netinet/tcp_fastopen.h> 103 #include <netinet/tcp_lro.h> 104 #ifdef NETFLIX_SHARED_CWND 105 #include <netinet/tcp_shared_cwnd.h> 106 #endif 107 #ifdef TCPDEBUG 108 #include <netinet/tcp_debug.h> 109 #endif /* TCPDEBUG */ 110 #ifdef TCP_OFFLOAD 111 #include <netinet/tcp_offload.h> 112 #endif 113 #ifdef INET6 114 #include <netinet6/tcp6_var.h> 115 #endif 116 117 #include <netipsec/ipsec_support.h> 118 119 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 120 #include <netipsec/ipsec.h> 121 #include <netipsec/ipsec6.h> 122 #endif /* IPSEC */ 123 124 #include <netinet/udp.h> 125 #include <netinet/udp_var.h> 126 #include <machine/in_cksum.h> 127 128 #ifdef MAC 129 #include <security/mac/mac_framework.h> 130 #endif 131 #include "sack_filter.h" 132 #include "tcp_rack.h" 133 #include "rack_bbr_common.h" 134 135 uma_zone_t rack_zone; 136 uma_zone_t rack_pcb_zone; 137 138 #ifndef TICKS2SBT 139 #define TICKS2SBT(__t) (tick_sbt * ((sbintime_t)(__t))) 140 #endif 141 142 VNET_DECLARE(uint32_t, newreno_beta); 143 VNET_DECLARE(uint32_t, newreno_beta_ecn); 144 #define V_newreno_beta VNET(newreno_beta) 145 #define V_newreno_beta_ecn VNET(newreno_beta_ecn) 146 147 148 MALLOC_DEFINE(M_TCPFSB, "tcp_fsb", "TCP fast send block"); 149 MALLOC_DEFINE(M_TCPDO, "tcp_do", "TCP deferred options"); 150 151 struct sysctl_ctx_list rack_sysctl_ctx; 152 struct sysctl_oid *rack_sysctl_root; 153 154 #define CUM_ACKED 1 155 #define SACKED 2 156 157 /* 158 * The RACK module incorporates a number of 159 * TCP ideas that have been put out into the IETF 160 * over the last few years: 161 * - Matt Mathis's Rate Halving which slowly drops 162 * the congestion window so that the ack clock can 163 * be maintained during a recovery. 164 * - Yuchung Cheng's RACK TCP (for which its named) that 165 * will stop us using the number of dup acks and instead 166 * use time as the gage of when we retransmit. 167 * - Reorder Detection of RFC4737 and the Tail-Loss probe draft 168 * of Dukkipati et.al. 169 * RACK depends on SACK, so if an endpoint arrives that 170 * cannot do SACK the state machine below will shuttle the 171 * connection back to using the "default" TCP stack that is 172 * in FreeBSD. 173 * 174 * To implement RACK the original TCP stack was first decomposed 175 * into a functional state machine with individual states 176 * for each of the possible TCP connection states. The do_segement 177 * functions role in life is to mandate the connection supports SACK 178 * initially and then assure that the RACK state matches the conenction 179 * state before calling the states do_segment function. Each 180 * state is simplified due to the fact that the original do_segment 181 * has been decomposed and we *know* what state we are in (no 182 * switches on the state) and all tests for SACK are gone. This 183 * greatly simplifies what each state does. 184 * 185 * TCP output is also over-written with a new version since it 186 * must maintain the new rack scoreboard. 187 * 188 */ 189 static int32_t rack_tlp_thresh = 1; 190 static int32_t rack_tlp_limit = 2; /* No more than 2 TLPs w-out new data */ 191 static int32_t rack_tlp_use_greater = 1; 192 static int32_t rack_reorder_thresh = 2; 193 static int32_t rack_reorder_fade = 60000000; /* 0 - never fade, def 60,000,000 194 * - 60 seconds */ 195 static uint8_t rack_req_measurements = 1; 196 /* Attack threshold detections */ 197 static uint32_t rack_highest_sack_thresh_seen = 0; 198 static uint32_t rack_highest_move_thresh_seen = 0; 199 static int32_t rack_enable_hw_pacing = 0; /* Due to CCSP keep it off by default */ 200 static int32_t rack_hw_pace_extra_slots = 2; /* 2 extra MSS time betweens */ 201 static int32_t rack_hw_rate_caps = 1; /* 1; */ 202 static int32_t rack_hw_rate_min = 0; /* 1500000;*/ 203 static int32_t rack_hw_rate_to_low = 0; /* 1200000; */ 204 static int32_t rack_hw_up_only = 1; 205 static int32_t rack_stats_gets_ms_rtt = 1; 206 static int32_t rack_prr_addbackmax = 2; 207 static int32_t rack_do_hystart = 0; 208 209 static int32_t rack_pkt_delay = 1000; 210 static int32_t rack_send_a_lot_in_prr = 1; 211 static int32_t rack_min_to = 1000; /* Number of microsecond min timeout */ 212 static int32_t rack_verbose_logging = 0; 213 static int32_t rack_ignore_data_after_close = 1; 214 static int32_t rack_enable_shared_cwnd = 1; 215 static int32_t rack_use_cmp_acks = 1; 216 static int32_t rack_use_fsb = 1; 217 static int32_t rack_use_rfo = 1; 218 static int32_t rack_use_rsm_rfo = 1; 219 static int32_t rack_max_abc_post_recovery = 2; 220 static int32_t rack_client_low_buf = 0; 221 static int32_t rack_dsack_std_based = 0x3; /* bit field bit 1 sets rc_rack_tmr_std_based and bit 2 sets rc_rack_use_dsack */ 222 #ifdef TCP_ACCOUNTING 223 static int32_t rack_tcp_accounting = 0; 224 #endif 225 static int32_t rack_limits_scwnd = 1; 226 static int32_t rack_enable_mqueue_for_nonpaced = 0; 227 static int32_t rack_disable_prr = 0; 228 static int32_t use_rack_rr = 1; 229 static int32_t rack_non_rxt_use_cr = 0; /* does a non-rxt in recovery use the configured rate (ss/ca)? */ 230 static int32_t rack_persist_min = 250000; /* 250usec */ 231 static int32_t rack_persist_max = 2000000; /* 2 Second in usec's */ 232 static int32_t rack_sack_not_required = 1; /* set to one to allow non-sack to use rack */ 233 static int32_t rack_default_init_window = 0; /* Use system default */ 234 static int32_t rack_limit_time_with_srtt = 0; 235 static int32_t rack_autosndbuf_inc = 20; /* In percentage form */ 236 static int32_t rack_enobuf_hw_boost_mult = 2; /* How many times the hw rate we boost slot using time_between */ 237 static int32_t rack_enobuf_hw_max = 12000; /* 12 ms in usecs */ 238 static int32_t rack_enobuf_hw_min = 10000; /* 10 ms in usecs */ 239 static int32_t rack_hw_rwnd_factor = 2; /* How many max_segs the rwnd must be before we hold off sending */ 240 /* 241 * Currently regular tcp has a rto_min of 30ms 242 * the backoff goes 12 times so that ends up 243 * being a total of 122.850 seconds before a 244 * connection is killed. 245 */ 246 static uint32_t rack_def_data_window = 20; 247 static uint32_t rack_goal_bdp = 2; 248 static uint32_t rack_min_srtts = 1; 249 static uint32_t rack_min_measure_usec = 0; 250 static int32_t rack_tlp_min = 10000; /* 10ms */ 251 static int32_t rack_rto_min = 30000; /* 30,000 usec same as main freebsd */ 252 static int32_t rack_rto_max = 4000000; /* 4 seconds in usec's */ 253 static const int32_t rack_free_cache = 2; 254 static int32_t rack_hptsi_segments = 40; 255 static int32_t rack_rate_sample_method = USE_RTT_LOW; 256 static int32_t rack_pace_every_seg = 0; 257 static int32_t rack_delayed_ack_time = 40000; /* 40ms in usecs */ 258 static int32_t rack_slot_reduction = 4; 259 static int32_t rack_wma_divisor = 8; /* For WMA calculation */ 260 static int32_t rack_cwnd_block_ends_measure = 0; 261 static int32_t rack_rwnd_block_ends_measure = 0; 262 static int32_t rack_def_profile = 0; 263 264 static int32_t rack_lower_cwnd_at_tlp = 0; 265 static int32_t rack_limited_retran = 0; 266 static int32_t rack_always_send_oldest = 0; 267 static int32_t rack_tlp_threshold_use = TLP_USE_TWO_ONE; 268 269 static uint16_t rack_per_of_gp_ss = 250; /* 250 % slow-start */ 270 static uint16_t rack_per_of_gp_ca = 200; /* 200 % congestion-avoidance */ 271 static uint16_t rack_per_of_gp_rec = 200; /* 200 % of bw */ 272 273 /* Probertt */ 274 static uint16_t rack_per_of_gp_probertt = 60; /* 60% of bw */ 275 static uint16_t rack_per_of_gp_lowthresh = 40; /* 40% is bottom */ 276 static uint16_t rack_per_of_gp_probertt_reduce = 10; /* 10% reduction */ 277 static uint16_t rack_atexit_prtt_hbp = 130; /* Clamp to 130% on exit prtt if highly buffered path */ 278 static uint16_t rack_atexit_prtt = 130; /* Clamp to 100% on exit prtt if non highly buffered path */ 279 280 static uint32_t rack_max_drain_wait = 2; /* How man gp srtt's before we give up draining */ 281 static uint32_t rack_must_drain = 1; /* How many GP srtt's we *must* wait */ 282 static uint32_t rack_probertt_use_min_rtt_entry = 1; /* Use the min to calculate the goal else gp_srtt */ 283 static uint32_t rack_probertt_use_min_rtt_exit = 0; 284 static uint32_t rack_probe_rtt_sets_cwnd = 0; 285 static uint32_t rack_probe_rtt_safety_val = 2000000; /* No more than 2 sec in probe-rtt */ 286 static uint32_t rack_time_between_probertt = 9600000; /* 9.6 sec in usecs */ 287 static uint32_t rack_probertt_gpsrtt_cnt_mul = 0; /* How many srtt periods does probe-rtt last top fraction */ 288 static uint32_t rack_probertt_gpsrtt_cnt_div = 0; /* How many srtt periods does probe-rtt last bottom fraction */ 289 static uint32_t rack_min_probertt_hold = 40000; /* Equal to delayed ack time */ 290 static uint32_t rack_probertt_filter_life = 10000000; 291 static uint32_t rack_probertt_lower_within = 10; 292 static uint32_t rack_min_rtt_movement = 250000; /* Must move at least 250ms (in microseconds) to count as a lowering */ 293 static int32_t rack_pace_one_seg = 0; /* Shall we pace for less than 1.4Meg 1MSS at a time */ 294 static int32_t rack_probertt_clear_is = 1; 295 static int32_t rack_max_drain_hbp = 1; /* Extra drain times gpsrtt for highly buffered paths */ 296 static int32_t rack_hbp_thresh = 3; /* what is the divisor max_rtt/min_rtt to decided a hbp */ 297 298 /* Part of pacing */ 299 static int32_t rack_max_per_above = 30; /* When we go to increment stop if above 100+this% */ 300 301 /* Timely information */ 302 /* Combine these two gives the range of 'no change' to bw */ 303 /* ie the up/down provide the upper and lower bound */ 304 static int32_t rack_gp_per_bw_mul_up = 2; /* 2% */ 305 static int32_t rack_gp_per_bw_mul_down = 4; /* 4% */ 306 static int32_t rack_gp_rtt_maxmul = 3; /* 3 x maxmin */ 307 static int32_t rack_gp_rtt_minmul = 1; /* minrtt + (minrtt/mindiv) is lower rtt */ 308 static int32_t rack_gp_rtt_mindiv = 4; /* minrtt + (minrtt * minmul/mindiv) is lower rtt */ 309 static int32_t rack_gp_decrease_per = 20; /* 20% decrease in multipler */ 310 static int32_t rack_gp_increase_per = 2; /* 2% increase in multipler */ 311 static int32_t rack_per_lower_bound = 50; /* Don't allow to drop below this multiplier */ 312 static int32_t rack_per_upper_bound_ss = 0; /* Don't allow SS to grow above this */ 313 static int32_t rack_per_upper_bound_ca = 0; /* Don't allow CA to grow above this */ 314 static int32_t rack_do_dyn_mul = 0; /* Are the rack gp multipliers dynamic */ 315 static int32_t rack_gp_no_rec_chg = 1; /* Prohibit recovery from reducing it's multiplier */ 316 static int32_t rack_timely_dec_clear = 6; /* Do we clear decrement count at a value (6)? */ 317 static int32_t rack_timely_max_push_rise = 3; /* One round of pushing */ 318 static int32_t rack_timely_max_push_drop = 3; /* Three round of pushing */ 319 static int32_t rack_timely_min_segs = 4; /* 4 segment minimum */ 320 static int32_t rack_use_max_for_nobackoff = 0; 321 static int32_t rack_timely_int_timely_only = 0; /* do interim timely's only use the timely algo (no b/w changes)? */ 322 static int32_t rack_timely_no_stopping = 0; 323 static int32_t rack_down_raise_thresh = 100; 324 static int32_t rack_req_segs = 1; 325 static uint64_t rack_bw_rate_cap = 0; 326 327 /* Weird delayed ack mode */ 328 static int32_t rack_use_imac_dack = 0; 329 /* Rack specific counters */ 330 counter_u64_t rack_badfr; 331 counter_u64_t rack_badfr_bytes; 332 counter_u64_t rack_rtm_prr_retran; 333 counter_u64_t rack_rtm_prr_newdata; 334 counter_u64_t rack_timestamp_mismatch; 335 counter_u64_t rack_reorder_seen; 336 counter_u64_t rack_paced_segments; 337 counter_u64_t rack_unpaced_segments; 338 counter_u64_t rack_calc_zero; 339 counter_u64_t rack_calc_nonzero; 340 counter_u64_t rack_saw_enobuf; 341 counter_u64_t rack_saw_enobuf_hw; 342 counter_u64_t rack_saw_enetunreach; 343 counter_u64_t rack_per_timer_hole; 344 counter_u64_t rack_large_ackcmp; 345 counter_u64_t rack_small_ackcmp; 346 #ifdef INVARIANTS 347 counter_u64_t rack_adjust_map_bw; 348 #endif 349 /* Tail loss probe counters */ 350 counter_u64_t rack_tlp_tot; 351 counter_u64_t rack_tlp_newdata; 352 counter_u64_t rack_tlp_retran; 353 counter_u64_t rack_tlp_retran_bytes; 354 counter_u64_t rack_tlp_retran_fail; 355 counter_u64_t rack_to_tot; 356 counter_u64_t rack_to_arm_rack; 357 counter_u64_t rack_to_arm_tlp; 358 counter_u64_t rack_hot_alloc; 359 counter_u64_t rack_to_alloc; 360 counter_u64_t rack_to_alloc_hard; 361 counter_u64_t rack_to_alloc_emerg; 362 counter_u64_t rack_to_alloc_limited; 363 counter_u64_t rack_alloc_limited_conns; 364 counter_u64_t rack_split_limited; 365 366 #define MAX_NUM_OF_CNTS 13 367 counter_u64_t rack_proc_comp_ack[MAX_NUM_OF_CNTS]; 368 counter_u64_t rack_multi_single_eq; 369 counter_u64_t rack_proc_non_comp_ack; 370 371 counter_u64_t rack_fto_send; 372 counter_u64_t rack_fto_rsm_send; 373 counter_u64_t rack_nfto_resend; 374 counter_u64_t rack_non_fto_send; 375 counter_u64_t rack_extended_rfo; 376 377 counter_u64_t rack_sack_proc_all; 378 counter_u64_t rack_sack_proc_short; 379 counter_u64_t rack_sack_proc_restart; 380 counter_u64_t rack_sack_attacks_detected; 381 counter_u64_t rack_sack_attacks_reversed; 382 counter_u64_t rack_sack_used_next_merge; 383 counter_u64_t rack_sack_splits; 384 counter_u64_t rack_sack_used_prev_merge; 385 counter_u64_t rack_sack_skipped_acked; 386 counter_u64_t rack_ack_total; 387 counter_u64_t rack_express_sack; 388 counter_u64_t rack_sack_total; 389 counter_u64_t rack_move_none; 390 counter_u64_t rack_move_some; 391 392 counter_u64_t rack_used_tlpmethod; 393 counter_u64_t rack_used_tlpmethod2; 394 counter_u64_t rack_enter_tlp_calc; 395 counter_u64_t rack_input_idle_reduces; 396 counter_u64_t rack_collapsed_win; 397 counter_u64_t rack_tlp_does_nada; 398 counter_u64_t rack_try_scwnd; 399 counter_u64_t rack_hw_pace_init_fail; 400 counter_u64_t rack_hw_pace_lost; 401 counter_u64_t rack_sbsndptr_right; 402 counter_u64_t rack_sbsndptr_wrong; 403 404 /* Temp CPU counters */ 405 counter_u64_t rack_find_high; 406 407 counter_u64_t rack_progress_drops; 408 counter_u64_t rack_out_size[TCP_MSS_ACCT_SIZE]; 409 counter_u64_t rack_opts_arry[RACK_OPTS_SIZE]; 410 411 412 #define RACK_REXMTVAL(tp) max(rack_rto_min, ((tp)->t_srtt + ((tp)->t_rttvar << 2))) 413 414 #define RACK_TCPT_RANGESET(tv, value, tvmin, tvmax, slop) do { \ 415 (tv) = (value) + slop; \ 416 if ((u_long)(tv) < (u_long)(tvmin)) \ 417 (tv) = (tvmin); \ 418 if ((u_long)(tv) > (u_long)(tvmax)) \ 419 (tv) = (tvmax); \ 420 } while (0) 421 422 static void 423 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick, int event, int line); 424 425 static int 426 rack_process_ack(struct mbuf *m, struct tcphdr *th, 427 struct socket *so, struct tcpcb *tp, struct tcpopt *to, 428 uint32_t tiwin, int32_t tlen, int32_t * ofia, int32_t thflags, int32_t * ret_val); 429 static int 430 rack_process_data(struct mbuf *m, struct tcphdr *th, 431 struct socket *so, struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, 432 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt); 433 static void 434 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack, 435 uint32_t th_ack, uint16_t nsegs, uint16_t type, int32_t recovery); 436 static struct rack_sendmap *rack_alloc(struct tcp_rack *rack); 437 static struct rack_sendmap *rack_alloc_limit(struct tcp_rack *rack, 438 uint8_t limit_type); 439 static struct rack_sendmap * 440 rack_check_recovery_mode(struct tcpcb *tp, 441 uint32_t tsused); 442 static void 443 rack_cong_signal(struct tcpcb *tp, 444 uint32_t type, uint32_t ack); 445 static void rack_counter_destroy(void); 446 static int 447 rack_ctloutput(struct socket *so, struct sockopt *sopt, 448 struct inpcb *inp, struct tcpcb *tp); 449 static int32_t rack_ctor(void *mem, int32_t size, void *arg, int32_t how); 450 static void 451 rack_set_pace_segments(struct tcpcb *tp, struct tcp_rack *rack, uint32_t line, uint64_t *fill_override); 452 static void 453 rack_do_segment(struct mbuf *m, struct tcphdr *th, 454 struct socket *so, struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, 455 uint8_t iptos); 456 static void rack_dtor(void *mem, int32_t size, void *arg); 457 static void 458 rack_log_alt_to_to_cancel(struct tcp_rack *rack, 459 uint32_t flex1, uint32_t flex2, 460 uint32_t flex3, uint32_t flex4, 461 uint32_t flex5, uint32_t flex6, 462 uint16_t flex7, uint8_t mod); 463 464 static void 465 rack_log_pacing_delay_calc(struct tcp_rack *rack, uint32_t len, uint32_t slot, 466 uint64_t bw_est, uint64_t bw, uint64_t len_time, int method, int line, 467 struct rack_sendmap *rsm, uint8_t quality); 468 static struct rack_sendmap * 469 rack_find_high_nonack(struct tcp_rack *rack, 470 struct rack_sendmap *rsm); 471 static struct rack_sendmap *rack_find_lowest_rsm(struct tcp_rack *rack); 472 static void rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm); 473 static void rack_fini(struct tcpcb *tp, int32_t tcb_is_purged); 474 static int 475 rack_get_sockopt(struct socket *so, struct sockopt *sopt, 476 struct inpcb *inp, struct tcpcb *tp, struct tcp_rack *rack); 477 static void 478 rack_do_goodput_measurement(struct tcpcb *tp, struct tcp_rack *rack, 479 tcp_seq th_ack, int line, uint8_t quality); 480 static uint32_t 481 rack_get_pacing_len(struct tcp_rack *rack, uint64_t bw, uint32_t mss); 482 static int32_t rack_handoff_ok(struct tcpcb *tp); 483 static int32_t rack_init(struct tcpcb *tp); 484 static void rack_init_sysctls(void); 485 static void 486 rack_log_ack(struct tcpcb *tp, struct tcpopt *to, 487 struct tcphdr *th, int entered_rec, int dup_ack_struck); 488 static void 489 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len, 490 uint32_t seq_out, uint8_t th_flags, int32_t err, uint64_t ts, 491 struct rack_sendmap *hintrsm, uint16_t add_flags, struct mbuf *s_mb, uint32_t s_moff, int hw_tls); 492 493 static void 494 rack_log_sack_passed(struct tcpcb *tp, struct tcp_rack *rack, 495 struct rack_sendmap *rsm); 496 static void rack_log_to_event(struct tcp_rack *rack, int32_t to_num, struct rack_sendmap *rsm); 497 static int32_t rack_output(struct tcpcb *tp); 498 499 static uint32_t 500 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack, 501 struct sackblk *sack, struct tcpopt *to, struct rack_sendmap **prsm, 502 uint32_t cts, int *moved_two); 503 static void rack_post_recovery(struct tcpcb *tp, uint32_t th_seq); 504 static void rack_remxt_tmr(struct tcpcb *tp); 505 static int 506 rack_set_sockopt(struct socket *so, struct sockopt *sopt, 507 struct inpcb *inp, struct tcpcb *tp, struct tcp_rack *rack); 508 static void rack_set_state(struct tcpcb *tp, struct tcp_rack *rack); 509 static int32_t rack_stopall(struct tcpcb *tp); 510 static void 511 rack_timer_activate(struct tcpcb *tp, uint32_t timer_type, 512 uint32_t delta); 513 static int32_t rack_timer_active(struct tcpcb *tp, uint32_t timer_type); 514 static void rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line); 515 static void rack_timer_stop(struct tcpcb *tp, uint32_t timer_type); 516 static uint32_t 517 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack, 518 struct rack_sendmap *rsm, uint64_t ts, int32_t * lenp, uint16_t add_flag); 519 static void 520 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack, 521 struct rack_sendmap *rsm, uint64_t ts, uint16_t add_flag); 522 static int 523 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack, 524 struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type, tcp_seq th_ack); 525 static int32_t tcp_addrack(module_t mod, int32_t type, void *data); 526 static int 527 rack_do_close_wait(struct mbuf *m, struct tcphdr *th, 528 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 529 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 530 static int 531 rack_do_closing(struct mbuf *m, struct tcphdr *th, 532 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 533 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 534 static int 535 rack_do_established(struct mbuf *m, struct tcphdr *th, 536 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 537 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 538 static int 539 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th, 540 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 541 int32_t tlen, uint32_t tiwin, int32_t nxt_pkt, uint8_t iptos); 542 static int 543 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th, 544 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 545 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 546 static int 547 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, 548 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 549 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 550 static int 551 rack_do_lastack(struct mbuf *m, struct tcphdr *th, 552 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 553 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 554 static int 555 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th, 556 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 557 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 558 static int 559 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th, 560 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 561 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 562 struct rack_sendmap * 563 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack, 564 uint32_t tsused); 565 static void tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt, 566 uint32_t len, uint32_t us_tim, int confidence, struct rack_sendmap *rsm, uint16_t rtrcnt); 567 static void 568 tcp_rack_partialack(struct tcpcb *tp); 569 static int 570 rack_set_profile(struct tcp_rack *rack, int prof); 571 static void 572 rack_apply_deferred_options(struct tcp_rack *rack); 573 574 int32_t rack_clear_counter=0; 575 576 static void 577 rack_set_cc_pacing(struct tcp_rack *rack) 578 { 579 struct sockopt sopt; 580 struct cc_newreno_opts opt; 581 struct newreno old, *ptr; 582 struct tcpcb *tp; 583 int error; 584 585 if (rack->rc_pacing_cc_set) 586 return; 587 588 tp = rack->rc_tp; 589 if (tp->cc_algo == NULL) { 590 /* Tcb is leaving */ 591 printf("No cc algorithm?\n"); 592 return; 593 } 594 rack->rc_pacing_cc_set = 1; 595 if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0) { 596 /* Not new-reno we can't play games with beta! */ 597 goto out; 598 } 599 ptr = ((struct newreno *)tp->ccv->cc_data); 600 if (CC_ALGO(tp)->ctl_output == NULL) { 601 /* Huh, why does new_reno no longer have a set function? */ 602 goto out; 603 } 604 if (ptr == NULL) { 605 /* Just the default values */ 606 old.beta = V_newreno_beta_ecn; 607 old.beta_ecn = V_newreno_beta_ecn; 608 old.newreno_flags = 0; 609 } else { 610 old.beta = ptr->beta; 611 old.beta_ecn = ptr->beta_ecn; 612 old.newreno_flags = ptr->newreno_flags; 613 } 614 sopt.sopt_valsize = sizeof(struct cc_newreno_opts); 615 sopt.sopt_dir = SOPT_SET; 616 opt.name = CC_NEWRENO_BETA; 617 opt.val = rack->r_ctl.rc_saved_beta.beta; 618 error = CC_ALGO(tp)->ctl_output(tp->ccv, &sopt, &opt); 619 if (error) { 620 goto out; 621 } 622 /* 623 * Hack alert we need to set in our newreno_flags 624 * so that Abe behavior is also applied. 625 */ 626 ((struct newreno *)tp->ccv->cc_data)->newreno_flags |= CC_NEWRENO_BETA_ECN_ENABLED; 627 opt.name = CC_NEWRENO_BETA_ECN; 628 opt.val = rack->r_ctl.rc_saved_beta.beta_ecn; 629 error = CC_ALGO(tp)->ctl_output(tp->ccv, &sopt, &opt); 630 if (error) { 631 goto out; 632 } 633 /* Save off the original values for restoral */ 634 memcpy(&rack->r_ctl.rc_saved_beta, &old, sizeof(struct newreno)); 635 out: 636 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 637 union tcp_log_stackspecific log; 638 struct timeval tv; 639 640 ptr = ((struct newreno *)tp->ccv->cc_data); 641 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 642 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 643 if (ptr) { 644 log.u_bbr.flex1 = ptr->beta; 645 log.u_bbr.flex2 = ptr->beta_ecn; 646 log.u_bbr.flex3 = ptr->newreno_flags; 647 } 648 log.u_bbr.flex4 = rack->r_ctl.rc_saved_beta.beta; 649 log.u_bbr.flex5 = rack->r_ctl.rc_saved_beta.beta_ecn; 650 log.u_bbr.flex6 = rack->r_ctl.rc_saved_beta.newreno_flags; 651 log.u_bbr.flex7 = rack->gp_ready; 652 log.u_bbr.flex7 <<= 1; 653 log.u_bbr.flex7 |= rack->use_fixed_rate; 654 log.u_bbr.flex7 <<= 1; 655 log.u_bbr.flex7 |= rack->rc_pacing_cc_set; 656 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 657 log.u_bbr.flex8 = 3; 658 tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, error, 659 0, &log, false, NULL, NULL, 0, &tv); 660 } 661 } 662 663 static void 664 rack_undo_cc_pacing(struct tcp_rack *rack) 665 { 666 struct newreno old, *ptr; 667 struct tcpcb *tp; 668 669 if (rack->rc_pacing_cc_set == 0) 670 return; 671 tp = rack->rc_tp; 672 rack->rc_pacing_cc_set = 0; 673 if (tp->cc_algo == NULL) 674 /* Tcb is leaving */ 675 return; 676 if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0) { 677 /* Not new-reno nothing to do! */ 678 return; 679 } 680 ptr = ((struct newreno *)tp->ccv->cc_data); 681 if (ptr == NULL) { 682 /* 683 * This happens at rack_fini() if the 684 * cc module gets freed on us. In that 685 * case we loose our "new" settings but 686 * thats ok, since the tcb is going away anyway. 687 */ 688 return; 689 } 690 /* Grab out our set values */ 691 memcpy(&old, ptr, sizeof(struct newreno)); 692 /* Copy back in the original values */ 693 memcpy(ptr, &rack->r_ctl.rc_saved_beta, sizeof(struct newreno)); 694 /* Now save back the values we had set in (for when pacing is restored) */ 695 memcpy(&rack->r_ctl.rc_saved_beta, &old, sizeof(struct newreno)); 696 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 697 union tcp_log_stackspecific log; 698 struct timeval tv; 699 700 ptr = ((struct newreno *)tp->ccv->cc_data); 701 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 702 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 703 log.u_bbr.flex1 = ptr->beta; 704 log.u_bbr.flex2 = ptr->beta_ecn; 705 log.u_bbr.flex3 = ptr->newreno_flags; 706 log.u_bbr.flex4 = rack->r_ctl.rc_saved_beta.beta; 707 log.u_bbr.flex5 = rack->r_ctl.rc_saved_beta.beta_ecn; 708 log.u_bbr.flex6 = rack->r_ctl.rc_saved_beta.newreno_flags; 709 log.u_bbr.flex7 = rack->gp_ready; 710 log.u_bbr.flex7 <<= 1; 711 log.u_bbr.flex7 |= rack->use_fixed_rate; 712 log.u_bbr.flex7 <<= 1; 713 log.u_bbr.flex7 |= rack->rc_pacing_cc_set; 714 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 715 log.u_bbr.flex8 = 4; 716 tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 717 0, &log, false, NULL, NULL, 0, &tv); 718 } 719 } 720 721 #ifdef NETFLIX_PEAKRATE 722 static inline void 723 rack_update_peakrate_thr(struct tcpcb *tp) 724 { 725 /* Keep in mind that t_maxpeakrate is in B/s. */ 726 uint64_t peak; 727 peak = uqmax((tp->t_maxseg * 2), 728 (((uint64_t)tp->t_maxpeakrate * (uint64_t)(tp->t_srtt)) / (uint64_t)HPTS_USEC_IN_SEC)); 729 tp->t_peakrate_thr = (uint32_t)uqmin(peak, UINT32_MAX); 730 } 731 #endif 732 733 static int 734 sysctl_rack_clear(SYSCTL_HANDLER_ARGS) 735 { 736 uint32_t stat; 737 int32_t error; 738 int i; 739 740 error = SYSCTL_OUT(req, &rack_clear_counter, sizeof(uint32_t)); 741 if (error || req->newptr == NULL) 742 return error; 743 744 error = SYSCTL_IN(req, &stat, sizeof(uint32_t)); 745 if (error) 746 return (error); 747 if (stat == 1) { 748 #ifdef INVARIANTS 749 printf("Clearing RACK counters\n"); 750 #endif 751 counter_u64_zero(rack_badfr); 752 counter_u64_zero(rack_badfr_bytes); 753 counter_u64_zero(rack_rtm_prr_retran); 754 counter_u64_zero(rack_rtm_prr_newdata); 755 counter_u64_zero(rack_timestamp_mismatch); 756 counter_u64_zero(rack_reorder_seen); 757 counter_u64_zero(rack_tlp_tot); 758 counter_u64_zero(rack_tlp_newdata); 759 counter_u64_zero(rack_tlp_retran); 760 counter_u64_zero(rack_tlp_retran_bytes); 761 counter_u64_zero(rack_tlp_retran_fail); 762 counter_u64_zero(rack_to_tot); 763 counter_u64_zero(rack_to_arm_rack); 764 counter_u64_zero(rack_to_arm_tlp); 765 counter_u64_zero(rack_paced_segments); 766 counter_u64_zero(rack_calc_zero); 767 counter_u64_zero(rack_calc_nonzero); 768 counter_u64_zero(rack_unpaced_segments); 769 counter_u64_zero(rack_saw_enobuf); 770 counter_u64_zero(rack_saw_enobuf_hw); 771 counter_u64_zero(rack_saw_enetunreach); 772 counter_u64_zero(rack_per_timer_hole); 773 counter_u64_zero(rack_large_ackcmp); 774 counter_u64_zero(rack_small_ackcmp); 775 #ifdef INVARIANTS 776 counter_u64_zero(rack_adjust_map_bw); 777 #endif 778 counter_u64_zero(rack_to_alloc_hard); 779 counter_u64_zero(rack_to_alloc_emerg); 780 counter_u64_zero(rack_sack_proc_all); 781 counter_u64_zero(rack_fto_send); 782 counter_u64_zero(rack_fto_rsm_send); 783 counter_u64_zero(rack_extended_rfo); 784 counter_u64_zero(rack_hw_pace_init_fail); 785 counter_u64_zero(rack_hw_pace_lost); 786 counter_u64_zero(rack_sbsndptr_wrong); 787 counter_u64_zero(rack_sbsndptr_right); 788 counter_u64_zero(rack_non_fto_send); 789 counter_u64_zero(rack_nfto_resend); 790 counter_u64_zero(rack_sack_proc_short); 791 counter_u64_zero(rack_sack_proc_restart); 792 counter_u64_zero(rack_to_alloc); 793 counter_u64_zero(rack_to_alloc_limited); 794 counter_u64_zero(rack_alloc_limited_conns); 795 counter_u64_zero(rack_split_limited); 796 for (i = 0; i < MAX_NUM_OF_CNTS; i++) { 797 counter_u64_zero(rack_proc_comp_ack[i]); 798 } 799 counter_u64_zero(rack_multi_single_eq); 800 counter_u64_zero(rack_proc_non_comp_ack); 801 counter_u64_zero(rack_find_high); 802 counter_u64_zero(rack_sack_attacks_detected); 803 counter_u64_zero(rack_sack_attacks_reversed); 804 counter_u64_zero(rack_sack_used_next_merge); 805 counter_u64_zero(rack_sack_used_prev_merge); 806 counter_u64_zero(rack_sack_splits); 807 counter_u64_zero(rack_sack_skipped_acked); 808 counter_u64_zero(rack_ack_total); 809 counter_u64_zero(rack_express_sack); 810 counter_u64_zero(rack_sack_total); 811 counter_u64_zero(rack_move_none); 812 counter_u64_zero(rack_move_some); 813 counter_u64_zero(rack_used_tlpmethod); 814 counter_u64_zero(rack_used_tlpmethod2); 815 counter_u64_zero(rack_enter_tlp_calc); 816 counter_u64_zero(rack_progress_drops); 817 counter_u64_zero(rack_tlp_does_nada); 818 counter_u64_zero(rack_try_scwnd); 819 counter_u64_zero(rack_collapsed_win); 820 } 821 rack_clear_counter = 0; 822 return (0); 823 } 824 825 static void 826 rack_init_sysctls(void) 827 { 828 int i; 829 struct sysctl_oid *rack_counters; 830 struct sysctl_oid *rack_attack; 831 struct sysctl_oid *rack_pacing; 832 struct sysctl_oid *rack_timely; 833 struct sysctl_oid *rack_timers; 834 struct sysctl_oid *rack_tlp; 835 struct sysctl_oid *rack_misc; 836 struct sysctl_oid *rack_features; 837 struct sysctl_oid *rack_measure; 838 struct sysctl_oid *rack_probertt; 839 struct sysctl_oid *rack_hw_pacing; 840 841 rack_attack = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 842 SYSCTL_CHILDREN(rack_sysctl_root), 843 OID_AUTO, 844 "sack_attack", 845 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 846 "Rack Sack Attack Counters and Controls"); 847 rack_counters = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 848 SYSCTL_CHILDREN(rack_sysctl_root), 849 OID_AUTO, 850 "stats", 851 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 852 "Rack Counters"); 853 SYSCTL_ADD_S32(&rack_sysctl_ctx, 854 SYSCTL_CHILDREN(rack_sysctl_root), 855 OID_AUTO, "rate_sample_method", CTLFLAG_RW, 856 &rack_rate_sample_method , USE_RTT_LOW, 857 "What method should we use for rate sampling 0=high, 1=low "); 858 /* Probe rtt related controls */ 859 rack_probertt = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 860 SYSCTL_CHILDREN(rack_sysctl_root), 861 OID_AUTO, 862 "probertt", 863 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 864 "ProbeRTT related Controls"); 865 SYSCTL_ADD_U16(&rack_sysctl_ctx, 866 SYSCTL_CHILDREN(rack_probertt), 867 OID_AUTO, "exit_per_hpb", CTLFLAG_RW, 868 &rack_atexit_prtt_hbp, 130, 869 "What percentage above goodput do we clamp CA/SS to at exit on high-BDP path 110%"); 870 SYSCTL_ADD_U16(&rack_sysctl_ctx, 871 SYSCTL_CHILDREN(rack_probertt), 872 OID_AUTO, "exit_per_nonhpb", CTLFLAG_RW, 873 &rack_atexit_prtt, 130, 874 "What percentage above goodput do we clamp CA/SS to at exit on a non high-BDP path 100%"); 875 SYSCTL_ADD_U16(&rack_sysctl_ctx, 876 SYSCTL_CHILDREN(rack_probertt), 877 OID_AUTO, "gp_per_mul", CTLFLAG_RW, 878 &rack_per_of_gp_probertt, 60, 879 "What percentage of goodput do we pace at in probertt"); 880 SYSCTL_ADD_U16(&rack_sysctl_ctx, 881 SYSCTL_CHILDREN(rack_probertt), 882 OID_AUTO, "gp_per_reduce", CTLFLAG_RW, 883 &rack_per_of_gp_probertt_reduce, 10, 884 "What percentage of goodput do we reduce every gp_srtt"); 885 SYSCTL_ADD_U16(&rack_sysctl_ctx, 886 SYSCTL_CHILDREN(rack_probertt), 887 OID_AUTO, "gp_per_low", CTLFLAG_RW, 888 &rack_per_of_gp_lowthresh, 40, 889 "What percentage of goodput do we allow the multiplier to fall to"); 890 SYSCTL_ADD_U32(&rack_sysctl_ctx, 891 SYSCTL_CHILDREN(rack_probertt), 892 OID_AUTO, "time_between", CTLFLAG_RW, 893 & rack_time_between_probertt, 96000000, 894 "How many useconds between the lowest rtt falling must past before we enter probertt"); 895 SYSCTL_ADD_U32(&rack_sysctl_ctx, 896 SYSCTL_CHILDREN(rack_probertt), 897 OID_AUTO, "safety", CTLFLAG_RW, 898 &rack_probe_rtt_safety_val, 2000000, 899 "If not zero, provides a maximum usecond that you can stay in probertt (2sec = 2000000)"); 900 SYSCTL_ADD_U32(&rack_sysctl_ctx, 901 SYSCTL_CHILDREN(rack_probertt), 902 OID_AUTO, "sets_cwnd", CTLFLAG_RW, 903 &rack_probe_rtt_sets_cwnd, 0, 904 "Do we set the cwnd too (if always_lower is on)"); 905 SYSCTL_ADD_U32(&rack_sysctl_ctx, 906 SYSCTL_CHILDREN(rack_probertt), 907 OID_AUTO, "maxdrainsrtts", CTLFLAG_RW, 908 &rack_max_drain_wait, 2, 909 "Maximum number of gp_srtt's to hold in drain waiting for flight to reach goal"); 910 SYSCTL_ADD_U32(&rack_sysctl_ctx, 911 SYSCTL_CHILDREN(rack_probertt), 912 OID_AUTO, "mustdrainsrtts", CTLFLAG_RW, 913 &rack_must_drain, 1, 914 "We must drain this many gp_srtt's waiting for flight to reach goal"); 915 SYSCTL_ADD_U32(&rack_sysctl_ctx, 916 SYSCTL_CHILDREN(rack_probertt), 917 OID_AUTO, "goal_use_min_entry", CTLFLAG_RW, 918 &rack_probertt_use_min_rtt_entry, 1, 919 "Should we use the min-rtt to calculate the goal rtt (else gp_srtt) at entry"); 920 SYSCTL_ADD_U32(&rack_sysctl_ctx, 921 SYSCTL_CHILDREN(rack_probertt), 922 OID_AUTO, "goal_use_min_exit", CTLFLAG_RW, 923 &rack_probertt_use_min_rtt_exit, 0, 924 "How to set cwnd at exit, 0 - dynamic, 1 - use min-rtt, 2 - use curgprtt, 3 - entry gp-rtt"); 925 SYSCTL_ADD_U32(&rack_sysctl_ctx, 926 SYSCTL_CHILDREN(rack_probertt), 927 OID_AUTO, "length_div", CTLFLAG_RW, 928 &rack_probertt_gpsrtt_cnt_div, 0, 929 "How many recent goodput srtt periods plus hold tim does probertt last (bottom of fraction)"); 930 SYSCTL_ADD_U32(&rack_sysctl_ctx, 931 SYSCTL_CHILDREN(rack_probertt), 932 OID_AUTO, "length_mul", CTLFLAG_RW, 933 &rack_probertt_gpsrtt_cnt_mul, 0, 934 "How many recent goodput srtt periods plus hold tim does probertt last (top of fraction)"); 935 SYSCTL_ADD_U32(&rack_sysctl_ctx, 936 SYSCTL_CHILDREN(rack_probertt), 937 OID_AUTO, "holdtim_at_target", CTLFLAG_RW, 938 &rack_min_probertt_hold, 200000, 939 "What is the minimum time we hold probertt at target"); 940 SYSCTL_ADD_U32(&rack_sysctl_ctx, 941 SYSCTL_CHILDREN(rack_probertt), 942 OID_AUTO, "filter_life", CTLFLAG_RW, 943 &rack_probertt_filter_life, 10000000, 944 "What is the time for the filters life in useconds"); 945 SYSCTL_ADD_U32(&rack_sysctl_ctx, 946 SYSCTL_CHILDREN(rack_probertt), 947 OID_AUTO, "lower_within", CTLFLAG_RW, 948 &rack_probertt_lower_within, 10, 949 "If the rtt goes lower within this percentage of the time, go into probe-rtt"); 950 SYSCTL_ADD_U32(&rack_sysctl_ctx, 951 SYSCTL_CHILDREN(rack_probertt), 952 OID_AUTO, "must_move", CTLFLAG_RW, 953 &rack_min_rtt_movement, 250, 954 "How much is the minimum movement in rtt to count as a drop for probertt purposes"); 955 SYSCTL_ADD_U32(&rack_sysctl_ctx, 956 SYSCTL_CHILDREN(rack_probertt), 957 OID_AUTO, "clear_is_cnts", CTLFLAG_RW, 958 &rack_probertt_clear_is, 1, 959 "Do we clear I/S counts on exiting probe-rtt"); 960 SYSCTL_ADD_S32(&rack_sysctl_ctx, 961 SYSCTL_CHILDREN(rack_probertt), 962 OID_AUTO, "hbp_extra_drain", CTLFLAG_RW, 963 &rack_max_drain_hbp, 1, 964 "How many extra drain gpsrtt's do we get in highly buffered paths"); 965 SYSCTL_ADD_S32(&rack_sysctl_ctx, 966 SYSCTL_CHILDREN(rack_probertt), 967 OID_AUTO, "hbp_threshold", CTLFLAG_RW, 968 &rack_hbp_thresh, 3, 969 "We are highly buffered if min_rtt_seen / max_rtt_seen > this-threshold"); 970 /* Pacing related sysctls */ 971 rack_pacing = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 972 SYSCTL_CHILDREN(rack_sysctl_root), 973 OID_AUTO, 974 "pacing", 975 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 976 "Pacing related Controls"); 977 SYSCTL_ADD_S32(&rack_sysctl_ctx, 978 SYSCTL_CHILDREN(rack_pacing), 979 OID_AUTO, "max_pace_over", CTLFLAG_RW, 980 &rack_max_per_above, 30, 981 "What is the maximum allowable percentage that we can pace above (so 30 = 130% of our goal)"); 982 SYSCTL_ADD_S32(&rack_sysctl_ctx, 983 SYSCTL_CHILDREN(rack_pacing), 984 OID_AUTO, "pace_to_one", CTLFLAG_RW, 985 &rack_pace_one_seg, 0, 986 "Do we allow low b/w pacing of 1MSS instead of two"); 987 SYSCTL_ADD_S32(&rack_sysctl_ctx, 988 SYSCTL_CHILDREN(rack_pacing), 989 OID_AUTO, "limit_wsrtt", CTLFLAG_RW, 990 &rack_limit_time_with_srtt, 0, 991 "Do we limit pacing time based on srtt"); 992 SYSCTL_ADD_S32(&rack_sysctl_ctx, 993 SYSCTL_CHILDREN(rack_pacing), 994 OID_AUTO, "init_win", CTLFLAG_RW, 995 &rack_default_init_window, 0, 996 "Do we have a rack initial window 0 = system default"); 997 SYSCTL_ADD_U16(&rack_sysctl_ctx, 998 SYSCTL_CHILDREN(rack_pacing), 999 OID_AUTO, "gp_per_ss", CTLFLAG_RW, 1000 &rack_per_of_gp_ss, 250, 1001 "If non zero, what percentage of goodput to pace at in slow start"); 1002 SYSCTL_ADD_U16(&rack_sysctl_ctx, 1003 SYSCTL_CHILDREN(rack_pacing), 1004 OID_AUTO, "gp_per_ca", CTLFLAG_RW, 1005 &rack_per_of_gp_ca, 150, 1006 "If non zero, what percentage of goodput to pace at in congestion avoidance"); 1007 SYSCTL_ADD_U16(&rack_sysctl_ctx, 1008 SYSCTL_CHILDREN(rack_pacing), 1009 OID_AUTO, "gp_per_rec", CTLFLAG_RW, 1010 &rack_per_of_gp_rec, 200, 1011 "If non zero, what percentage of goodput to pace at in recovery"); 1012 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1013 SYSCTL_CHILDREN(rack_pacing), 1014 OID_AUTO, "pace_max_seg", CTLFLAG_RW, 1015 &rack_hptsi_segments, 40, 1016 "What size is the max for TSO segments in pacing and burst mitigation"); 1017 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1018 SYSCTL_CHILDREN(rack_pacing), 1019 OID_AUTO, "burst_reduces", CTLFLAG_RW, 1020 &rack_slot_reduction, 4, 1021 "When doing only burst mitigation what is the reduce divisor"); 1022 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1023 SYSCTL_CHILDREN(rack_sysctl_root), 1024 OID_AUTO, "use_pacing", CTLFLAG_RW, 1025 &rack_pace_every_seg, 0, 1026 "If set we use pacing, if clear we use only the original burst mitigation"); 1027 SYSCTL_ADD_U64(&rack_sysctl_ctx, 1028 SYSCTL_CHILDREN(rack_pacing), 1029 OID_AUTO, "rate_cap", CTLFLAG_RW, 1030 &rack_bw_rate_cap, 0, 1031 "If set we apply this value to the absolute rate cap used by pacing"); 1032 SYSCTL_ADD_U8(&rack_sysctl_ctx, 1033 SYSCTL_CHILDREN(rack_sysctl_root), 1034 OID_AUTO, "req_measure_cnt", CTLFLAG_RW, 1035 &rack_req_measurements, 1, 1036 "If doing dynamic pacing, how many measurements must be in before we start pacing?"); 1037 /* Hardware pacing */ 1038 rack_hw_pacing = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1039 SYSCTL_CHILDREN(rack_sysctl_root), 1040 OID_AUTO, 1041 "hdwr_pacing", 1042 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1043 "Pacing related Controls"); 1044 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1045 SYSCTL_CHILDREN(rack_hw_pacing), 1046 OID_AUTO, "rwnd_factor", CTLFLAG_RW, 1047 &rack_hw_rwnd_factor, 2, 1048 "How many times does snd_wnd need to be bigger than pace_max_seg so we will hold off and get more acks?"); 1049 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1050 SYSCTL_CHILDREN(rack_hw_pacing), 1051 OID_AUTO, "pace_enobuf_mult", CTLFLAG_RW, 1052 &rack_enobuf_hw_boost_mult, 2, 1053 "By how many time_betweens should we boost the pacing time if we see a ENOBUFS?"); 1054 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1055 SYSCTL_CHILDREN(rack_hw_pacing), 1056 OID_AUTO, "pace_enobuf_max", CTLFLAG_RW, 1057 &rack_enobuf_hw_max, 2, 1058 "What is the max boost the pacing time if we see a ENOBUFS?"); 1059 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1060 SYSCTL_CHILDREN(rack_hw_pacing), 1061 OID_AUTO, "pace_enobuf_min", CTLFLAG_RW, 1062 &rack_enobuf_hw_min, 2, 1063 "What is the min boost the pacing time if we see a ENOBUFS?"); 1064 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1065 SYSCTL_CHILDREN(rack_hw_pacing), 1066 OID_AUTO, "enable", CTLFLAG_RW, 1067 &rack_enable_hw_pacing, 0, 1068 "Should RACK attempt to use hw pacing?"); 1069 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1070 SYSCTL_CHILDREN(rack_hw_pacing), 1071 OID_AUTO, "rate_cap", CTLFLAG_RW, 1072 &rack_hw_rate_caps, 1, 1073 "Does the highest hardware pacing rate cap the rate we will send at??"); 1074 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1075 SYSCTL_CHILDREN(rack_hw_pacing), 1076 OID_AUTO, "rate_min", CTLFLAG_RW, 1077 &rack_hw_rate_min, 0, 1078 "Do we need a minimum estimate of this many bytes per second in order to engage hw pacing?"); 1079 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1080 SYSCTL_CHILDREN(rack_hw_pacing), 1081 OID_AUTO, "rate_to_low", CTLFLAG_RW, 1082 &rack_hw_rate_to_low, 0, 1083 "If we fall below this rate, dis-engage hw pacing?"); 1084 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1085 SYSCTL_CHILDREN(rack_hw_pacing), 1086 OID_AUTO, "up_only", CTLFLAG_RW, 1087 &rack_hw_up_only, 1, 1088 "Do we allow hw pacing to lower the rate selected?"); 1089 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1090 SYSCTL_CHILDREN(rack_hw_pacing), 1091 OID_AUTO, "extra_mss_precise", CTLFLAG_RW, 1092 &rack_hw_pace_extra_slots, 2, 1093 "If the rates between software and hardware match precisely how many extra time_betweens do we get?"); 1094 rack_timely = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1095 SYSCTL_CHILDREN(rack_sysctl_root), 1096 OID_AUTO, 1097 "timely", 1098 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1099 "Rack Timely RTT Controls"); 1100 /* Timely based GP dynmics */ 1101 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1102 SYSCTL_CHILDREN(rack_timely), 1103 OID_AUTO, "upper", CTLFLAG_RW, 1104 &rack_gp_per_bw_mul_up, 2, 1105 "Rack timely upper range for equal b/w (in percentage)"); 1106 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1107 SYSCTL_CHILDREN(rack_timely), 1108 OID_AUTO, "lower", CTLFLAG_RW, 1109 &rack_gp_per_bw_mul_down, 4, 1110 "Rack timely lower range for equal b/w (in percentage)"); 1111 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1112 SYSCTL_CHILDREN(rack_timely), 1113 OID_AUTO, "rtt_max_mul", CTLFLAG_RW, 1114 &rack_gp_rtt_maxmul, 3, 1115 "Rack timely multipler of lowest rtt for rtt_max"); 1116 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1117 SYSCTL_CHILDREN(rack_timely), 1118 OID_AUTO, "rtt_min_div", CTLFLAG_RW, 1119 &rack_gp_rtt_mindiv, 4, 1120 "Rack timely divisor used for rtt + (rtt * mul/divisor) for check for lower rtt"); 1121 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1122 SYSCTL_CHILDREN(rack_timely), 1123 OID_AUTO, "rtt_min_mul", CTLFLAG_RW, 1124 &rack_gp_rtt_minmul, 1, 1125 "Rack timely multiplier used for rtt + (rtt * mul/divisor) for check for lower rtt"); 1126 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1127 SYSCTL_CHILDREN(rack_timely), 1128 OID_AUTO, "decrease", CTLFLAG_RW, 1129 &rack_gp_decrease_per, 20, 1130 "Rack timely decrease percentage of our GP multiplication factor"); 1131 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1132 SYSCTL_CHILDREN(rack_timely), 1133 OID_AUTO, "increase", CTLFLAG_RW, 1134 &rack_gp_increase_per, 2, 1135 "Rack timely increase perentage of our GP multiplication factor"); 1136 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1137 SYSCTL_CHILDREN(rack_timely), 1138 OID_AUTO, "lowerbound", CTLFLAG_RW, 1139 &rack_per_lower_bound, 50, 1140 "Rack timely lowest percentage we allow GP multiplier to fall to"); 1141 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1142 SYSCTL_CHILDREN(rack_timely), 1143 OID_AUTO, "upperboundss", CTLFLAG_RW, 1144 &rack_per_upper_bound_ss, 0, 1145 "Rack timely higest percentage we allow GP multiplier in SS to raise to (0 is no upperbound)"); 1146 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1147 SYSCTL_CHILDREN(rack_timely), 1148 OID_AUTO, "upperboundca", CTLFLAG_RW, 1149 &rack_per_upper_bound_ca, 0, 1150 "Rack timely higest percentage we allow GP multiplier to CA raise to (0 is no upperbound)"); 1151 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1152 SYSCTL_CHILDREN(rack_timely), 1153 OID_AUTO, "dynamicgp", CTLFLAG_RW, 1154 &rack_do_dyn_mul, 0, 1155 "Rack timely do we enable dynmaic timely goodput by default"); 1156 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1157 SYSCTL_CHILDREN(rack_timely), 1158 OID_AUTO, "no_rec_red", CTLFLAG_RW, 1159 &rack_gp_no_rec_chg, 1, 1160 "Rack timely do we prohibit the recovery multiplier from being lowered"); 1161 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1162 SYSCTL_CHILDREN(rack_timely), 1163 OID_AUTO, "red_clear_cnt", CTLFLAG_RW, 1164 &rack_timely_dec_clear, 6, 1165 "Rack timely what threshold do we count to before another boost during b/w decent"); 1166 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1167 SYSCTL_CHILDREN(rack_timely), 1168 OID_AUTO, "max_push_rise", CTLFLAG_RW, 1169 &rack_timely_max_push_rise, 3, 1170 "Rack timely how many times do we push up with b/w increase"); 1171 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1172 SYSCTL_CHILDREN(rack_timely), 1173 OID_AUTO, "max_push_drop", CTLFLAG_RW, 1174 &rack_timely_max_push_drop, 3, 1175 "Rack timely how many times do we push back on b/w decent"); 1176 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1177 SYSCTL_CHILDREN(rack_timely), 1178 OID_AUTO, "min_segs", CTLFLAG_RW, 1179 &rack_timely_min_segs, 4, 1180 "Rack timely when setting the cwnd what is the min num segments"); 1181 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1182 SYSCTL_CHILDREN(rack_timely), 1183 OID_AUTO, "noback_max", CTLFLAG_RW, 1184 &rack_use_max_for_nobackoff, 0, 1185 "Rack timely when deciding if to backoff on a loss, do we use under max rtt else min"); 1186 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1187 SYSCTL_CHILDREN(rack_timely), 1188 OID_AUTO, "interim_timely_only", CTLFLAG_RW, 1189 &rack_timely_int_timely_only, 0, 1190 "Rack timely when doing interim timely's do we only do timely (no b/w consideration)"); 1191 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1192 SYSCTL_CHILDREN(rack_timely), 1193 OID_AUTO, "nonstop", CTLFLAG_RW, 1194 &rack_timely_no_stopping, 0, 1195 "Rack timely don't stop increase"); 1196 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1197 SYSCTL_CHILDREN(rack_timely), 1198 OID_AUTO, "dec_raise_thresh", CTLFLAG_RW, 1199 &rack_down_raise_thresh, 100, 1200 "If the CA or SS is below this threshold raise on the first 3 b/w lowers (0=always)"); 1201 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1202 SYSCTL_CHILDREN(rack_timely), 1203 OID_AUTO, "bottom_drag_segs", CTLFLAG_RW, 1204 &rack_req_segs, 1, 1205 "Bottom dragging if not these many segments outstanding and room"); 1206 1207 /* TLP and Rack related parameters */ 1208 rack_tlp = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1209 SYSCTL_CHILDREN(rack_sysctl_root), 1210 OID_AUTO, 1211 "tlp", 1212 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1213 "TLP and Rack related Controls"); 1214 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1215 SYSCTL_CHILDREN(rack_tlp), 1216 OID_AUTO, "use_rrr", CTLFLAG_RW, 1217 &use_rack_rr, 1, 1218 "Do we use Rack Rapid Recovery"); 1219 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1220 SYSCTL_CHILDREN(rack_tlp), 1221 OID_AUTO, "post_rec_labc", CTLFLAG_RW, 1222 &rack_max_abc_post_recovery, 2, 1223 "Since we do early recovery, do we override the l_abc to a value, if so what?"); 1224 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1225 SYSCTL_CHILDREN(rack_tlp), 1226 OID_AUTO, "nonrxt_use_cr", CTLFLAG_RW, 1227 &rack_non_rxt_use_cr, 0, 1228 "Do we use ss/ca rate if in recovery we are transmitting a new data chunk"); 1229 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1230 SYSCTL_CHILDREN(rack_tlp), 1231 OID_AUTO, "tlpmethod", CTLFLAG_RW, 1232 &rack_tlp_threshold_use, TLP_USE_TWO_ONE, 1233 "What method do we do for TLP time calc 0=no-de-ack-comp, 1=ID, 2=2.1, 3=2.2"); 1234 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1235 SYSCTL_CHILDREN(rack_tlp), 1236 OID_AUTO, "limit", CTLFLAG_RW, 1237 &rack_tlp_limit, 2, 1238 "How many TLP's can be sent without sending new data"); 1239 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1240 SYSCTL_CHILDREN(rack_tlp), 1241 OID_AUTO, "use_greater", CTLFLAG_RW, 1242 &rack_tlp_use_greater, 1, 1243 "Should we use the rack_rtt time if its greater than srtt"); 1244 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1245 SYSCTL_CHILDREN(rack_tlp), 1246 OID_AUTO, "tlpminto", CTLFLAG_RW, 1247 &rack_tlp_min, 10000, 1248 "TLP minimum timeout per the specification (in microseconds)"); 1249 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1250 SYSCTL_CHILDREN(rack_tlp), 1251 OID_AUTO, "send_oldest", CTLFLAG_RW, 1252 &rack_always_send_oldest, 0, 1253 "Should we always send the oldest TLP and RACK-TLP"); 1254 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1255 SYSCTL_CHILDREN(rack_tlp), 1256 OID_AUTO, "rack_tlimit", CTLFLAG_RW, 1257 &rack_limited_retran, 0, 1258 "How many times can a rack timeout drive out sends"); 1259 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1260 SYSCTL_CHILDREN(rack_tlp), 1261 OID_AUTO, "tlp_cwnd_flag", CTLFLAG_RW, 1262 &rack_lower_cwnd_at_tlp, 0, 1263 "When a TLP completes a retran should we enter recovery"); 1264 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1265 SYSCTL_CHILDREN(rack_tlp), 1266 OID_AUTO, "reorder_thresh", CTLFLAG_RW, 1267 &rack_reorder_thresh, 2, 1268 "What factor for rack will be added when seeing reordering (shift right)"); 1269 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1270 SYSCTL_CHILDREN(rack_tlp), 1271 OID_AUTO, "rtt_tlp_thresh", CTLFLAG_RW, 1272 &rack_tlp_thresh, 1, 1273 "What divisor for TLP rtt/retran will be added (1=rtt, 2=1/2 rtt etc)"); 1274 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1275 SYSCTL_CHILDREN(rack_tlp), 1276 OID_AUTO, "reorder_fade", CTLFLAG_RW, 1277 &rack_reorder_fade, 60000000, 1278 "Does reorder detection fade, if so how many microseconds (0 means never)"); 1279 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1280 SYSCTL_CHILDREN(rack_tlp), 1281 OID_AUTO, "pktdelay", CTLFLAG_RW, 1282 &rack_pkt_delay, 1000, 1283 "Extra RACK time (in microseconds) besides reordering thresh"); 1284 1285 /* Timer related controls */ 1286 rack_timers = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1287 SYSCTL_CHILDREN(rack_sysctl_root), 1288 OID_AUTO, 1289 "timers", 1290 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1291 "Timer related controls"); 1292 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1293 SYSCTL_CHILDREN(rack_timers), 1294 OID_AUTO, "persmin", CTLFLAG_RW, 1295 &rack_persist_min, 250000, 1296 "What is the minimum time in microseconds between persists"); 1297 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1298 SYSCTL_CHILDREN(rack_timers), 1299 OID_AUTO, "persmax", CTLFLAG_RW, 1300 &rack_persist_max, 2000000, 1301 "What is the largest delay in microseconds between persists"); 1302 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1303 SYSCTL_CHILDREN(rack_timers), 1304 OID_AUTO, "delayed_ack", CTLFLAG_RW, 1305 &rack_delayed_ack_time, 40000, 1306 "Delayed ack time (40ms in microseconds)"); 1307 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1308 SYSCTL_CHILDREN(rack_timers), 1309 OID_AUTO, "minrto", CTLFLAG_RW, 1310 &rack_rto_min, 30000, 1311 "Minimum RTO in microseconds -- set with caution below 1000 due to TLP"); 1312 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1313 SYSCTL_CHILDREN(rack_timers), 1314 OID_AUTO, "maxrto", CTLFLAG_RW, 1315 &rack_rto_max, 4000000, 1316 "Maximum RTO in microseconds -- should be at least as large as min_rto"); 1317 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1318 SYSCTL_CHILDREN(rack_timers), 1319 OID_AUTO, "minto", CTLFLAG_RW, 1320 &rack_min_to, 1000, 1321 "Minimum rack timeout in microseconds"); 1322 /* Measure controls */ 1323 rack_measure = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1324 SYSCTL_CHILDREN(rack_sysctl_root), 1325 OID_AUTO, 1326 "measure", 1327 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1328 "Measure related controls"); 1329 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1330 SYSCTL_CHILDREN(rack_measure), 1331 OID_AUTO, "wma_divisor", CTLFLAG_RW, 1332 &rack_wma_divisor, 8, 1333 "When doing b/w calculation what is the divisor for the WMA"); 1334 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1335 SYSCTL_CHILDREN(rack_measure), 1336 OID_AUTO, "end_cwnd", CTLFLAG_RW, 1337 &rack_cwnd_block_ends_measure, 0, 1338 "Does a cwnd just-return end the measurement window (app limited)"); 1339 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1340 SYSCTL_CHILDREN(rack_measure), 1341 OID_AUTO, "end_rwnd", CTLFLAG_RW, 1342 &rack_rwnd_block_ends_measure, 0, 1343 "Does an rwnd just-return end the measurement window (app limited -- not persists)"); 1344 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1345 SYSCTL_CHILDREN(rack_measure), 1346 OID_AUTO, "min_target", CTLFLAG_RW, 1347 &rack_def_data_window, 20, 1348 "What is the minimum target window (in mss) for a GP measurements"); 1349 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1350 SYSCTL_CHILDREN(rack_measure), 1351 OID_AUTO, "goal_bdp", CTLFLAG_RW, 1352 &rack_goal_bdp, 2, 1353 "What is the goal BDP to measure"); 1354 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1355 SYSCTL_CHILDREN(rack_measure), 1356 OID_AUTO, "min_srtts", CTLFLAG_RW, 1357 &rack_min_srtts, 1, 1358 "What is the goal BDP to measure"); 1359 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1360 SYSCTL_CHILDREN(rack_measure), 1361 OID_AUTO, "min_measure_tim", CTLFLAG_RW, 1362 &rack_min_measure_usec, 0, 1363 "What is the Minimum time time for a measurement if 0, this is off"); 1364 /* Features */ 1365 rack_features = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1366 SYSCTL_CHILDREN(rack_sysctl_root), 1367 OID_AUTO, 1368 "features", 1369 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1370 "Feature controls"); 1371 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1372 SYSCTL_CHILDREN(rack_features), 1373 OID_AUTO, "cmpack", CTLFLAG_RW, 1374 &rack_use_cmp_acks, 1, 1375 "Should RACK have LRO send compressed acks"); 1376 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1377 SYSCTL_CHILDREN(rack_features), 1378 OID_AUTO, "fsb", CTLFLAG_RW, 1379 &rack_use_fsb, 1, 1380 "Should RACK use the fast send block?"); 1381 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1382 SYSCTL_CHILDREN(rack_features), 1383 OID_AUTO, "rfo", CTLFLAG_RW, 1384 &rack_use_rfo, 1, 1385 "Should RACK use rack_fast_output()?"); 1386 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1387 SYSCTL_CHILDREN(rack_features), 1388 OID_AUTO, "rsmrfo", CTLFLAG_RW, 1389 &rack_use_rsm_rfo, 1, 1390 "Should RACK use rack_fast_rsm_output()?"); 1391 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1392 SYSCTL_CHILDREN(rack_features), 1393 OID_AUTO, "non_paced_lro_queue", CTLFLAG_RW, 1394 &rack_enable_mqueue_for_nonpaced, 0, 1395 "Should RACK use mbuf queuing for non-paced connections"); 1396 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1397 SYSCTL_CHILDREN(rack_features), 1398 OID_AUTO, "hystartplusplus", CTLFLAG_RW, 1399 &rack_do_hystart, 0, 1400 "Should RACK enable HyStart++ on connections?"); 1401 /* Misc rack controls */ 1402 rack_misc = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1403 SYSCTL_CHILDREN(rack_sysctl_root), 1404 OID_AUTO, 1405 "misc", 1406 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1407 "Misc related controls"); 1408 #ifdef TCP_ACCOUNTING 1409 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1410 SYSCTL_CHILDREN(rack_misc), 1411 OID_AUTO, "tcp_acct", CTLFLAG_RW, 1412 &rack_tcp_accounting, 0, 1413 "Should we turn on TCP accounting for all rack sessions?"); 1414 #endif 1415 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1416 SYSCTL_CHILDREN(rack_misc), 1417 OID_AUTO, "rack_dsack_ctl", CTLFLAG_RW, 1418 &rack_dsack_std_based, 3, 1419 "How do we process dsack with respect to rack timers, bit field, 3 is standards based?"); 1420 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1421 SYSCTL_CHILDREN(rack_misc), 1422 OID_AUTO, "prr_addback_max", CTLFLAG_RW, 1423 &rack_prr_addbackmax, 2, 1424 "What is the maximum number of MSS we allow to be added back if prr can't send all its data?"); 1425 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1426 SYSCTL_CHILDREN(rack_misc), 1427 OID_AUTO, "stats_gets_ms", CTLFLAG_RW, 1428 &rack_stats_gets_ms_rtt, 1, 1429 "What do we feed the stats framework (1 = ms_rtt, 0 = us_rtt, 2 = ms_rtt from hdwr, > 2 usec rtt from hdwr)?"); 1430 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1431 SYSCTL_CHILDREN(rack_misc), 1432 OID_AUTO, "clientlowbuf", CTLFLAG_RW, 1433 &rack_client_low_buf, 0, 1434 "Client low buffer level (below this we are more aggressive in DGP exiting recovery (0 = off)?"); 1435 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1436 SYSCTL_CHILDREN(rack_misc), 1437 OID_AUTO, "defprofile", CTLFLAG_RW, 1438 &rack_def_profile, 0, 1439 "Should RACK use a default profile (0=no, num == profile num)?"); 1440 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1441 SYSCTL_CHILDREN(rack_misc), 1442 OID_AUTO, "shared_cwnd", CTLFLAG_RW, 1443 &rack_enable_shared_cwnd, 1, 1444 "Should RACK try to use the shared cwnd on connections where allowed"); 1445 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1446 SYSCTL_CHILDREN(rack_misc), 1447 OID_AUTO, "limits_on_scwnd", CTLFLAG_RW, 1448 &rack_limits_scwnd, 1, 1449 "Should RACK place low end time limits on the shared cwnd feature"); 1450 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1451 SYSCTL_CHILDREN(rack_misc), 1452 OID_AUTO, "iMac_dack", CTLFLAG_RW, 1453 &rack_use_imac_dack, 0, 1454 "Should RACK try to emulate iMac delayed ack"); 1455 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1456 SYSCTL_CHILDREN(rack_misc), 1457 OID_AUTO, "no_prr", CTLFLAG_RW, 1458 &rack_disable_prr, 0, 1459 "Should RACK not use prr and only pace (must have pacing on)"); 1460 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1461 SYSCTL_CHILDREN(rack_misc), 1462 OID_AUTO, "bb_verbose", CTLFLAG_RW, 1463 &rack_verbose_logging, 0, 1464 "Should RACK black box logging be verbose"); 1465 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1466 SYSCTL_CHILDREN(rack_misc), 1467 OID_AUTO, "data_after_close", CTLFLAG_RW, 1468 &rack_ignore_data_after_close, 1, 1469 "Do we hold off sending a RST until all pending data is ack'd"); 1470 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1471 SYSCTL_CHILDREN(rack_misc), 1472 OID_AUTO, "no_sack_needed", CTLFLAG_RW, 1473 &rack_sack_not_required, 1, 1474 "Do we allow rack to run on connections not supporting SACK"); 1475 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1476 SYSCTL_CHILDREN(rack_misc), 1477 OID_AUTO, "prr_sendalot", CTLFLAG_RW, 1478 &rack_send_a_lot_in_prr, 1, 1479 "Send a lot in prr"); 1480 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1481 SYSCTL_CHILDREN(rack_misc), 1482 OID_AUTO, "autoscale", CTLFLAG_RW, 1483 &rack_autosndbuf_inc, 20, 1484 "What percentage should rack scale up its snd buffer by?"); 1485 /* Sack Attacker detection stuff */ 1486 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1487 SYSCTL_CHILDREN(rack_attack), 1488 OID_AUTO, "detect_highsackratio", CTLFLAG_RW, 1489 &rack_highest_sack_thresh_seen, 0, 1490 "Highest sack to ack ratio seen"); 1491 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1492 SYSCTL_CHILDREN(rack_attack), 1493 OID_AUTO, "detect_highmoveratio", CTLFLAG_RW, 1494 &rack_highest_move_thresh_seen, 0, 1495 "Highest move to non-move ratio seen"); 1496 rack_ack_total = counter_u64_alloc(M_WAITOK); 1497 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1498 SYSCTL_CHILDREN(rack_attack), 1499 OID_AUTO, "acktotal", CTLFLAG_RD, 1500 &rack_ack_total, 1501 "Total number of Ack's"); 1502 rack_express_sack = counter_u64_alloc(M_WAITOK); 1503 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1504 SYSCTL_CHILDREN(rack_attack), 1505 OID_AUTO, "exp_sacktotal", CTLFLAG_RD, 1506 &rack_express_sack, 1507 "Total expresss number of Sack's"); 1508 rack_sack_total = counter_u64_alloc(M_WAITOK); 1509 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1510 SYSCTL_CHILDREN(rack_attack), 1511 OID_AUTO, "sacktotal", CTLFLAG_RD, 1512 &rack_sack_total, 1513 "Total number of SACKs"); 1514 rack_move_none = counter_u64_alloc(M_WAITOK); 1515 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1516 SYSCTL_CHILDREN(rack_attack), 1517 OID_AUTO, "move_none", CTLFLAG_RD, 1518 &rack_move_none, 1519 "Total number of SACK index reuse of postions under threshold"); 1520 rack_move_some = counter_u64_alloc(M_WAITOK); 1521 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1522 SYSCTL_CHILDREN(rack_attack), 1523 OID_AUTO, "move_some", CTLFLAG_RD, 1524 &rack_move_some, 1525 "Total number of SACK index reuse of postions over threshold"); 1526 rack_sack_attacks_detected = counter_u64_alloc(M_WAITOK); 1527 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1528 SYSCTL_CHILDREN(rack_attack), 1529 OID_AUTO, "attacks", CTLFLAG_RD, 1530 &rack_sack_attacks_detected, 1531 "Total number of SACK attackers that had sack disabled"); 1532 rack_sack_attacks_reversed = counter_u64_alloc(M_WAITOK); 1533 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1534 SYSCTL_CHILDREN(rack_attack), 1535 OID_AUTO, "reversed", CTLFLAG_RD, 1536 &rack_sack_attacks_reversed, 1537 "Total number of SACK attackers that were later determined false positive"); 1538 rack_sack_used_next_merge = counter_u64_alloc(M_WAITOK); 1539 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1540 SYSCTL_CHILDREN(rack_attack), 1541 OID_AUTO, "nextmerge", CTLFLAG_RD, 1542 &rack_sack_used_next_merge, 1543 "Total number of times we used the next merge"); 1544 rack_sack_used_prev_merge = counter_u64_alloc(M_WAITOK); 1545 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1546 SYSCTL_CHILDREN(rack_attack), 1547 OID_AUTO, "prevmerge", CTLFLAG_RD, 1548 &rack_sack_used_prev_merge, 1549 "Total number of times we used the prev merge"); 1550 /* Counters */ 1551 rack_fto_send = counter_u64_alloc(M_WAITOK); 1552 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1553 SYSCTL_CHILDREN(rack_counters), 1554 OID_AUTO, "fto_send", CTLFLAG_RD, 1555 &rack_fto_send, "Total number of rack_fast_output sends"); 1556 rack_fto_rsm_send = counter_u64_alloc(M_WAITOK); 1557 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1558 SYSCTL_CHILDREN(rack_counters), 1559 OID_AUTO, "fto_rsm_send", CTLFLAG_RD, 1560 &rack_fto_rsm_send, "Total number of rack_fast_rsm_output sends"); 1561 rack_nfto_resend = counter_u64_alloc(M_WAITOK); 1562 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1563 SYSCTL_CHILDREN(rack_counters), 1564 OID_AUTO, "nfto_resend", CTLFLAG_RD, 1565 &rack_nfto_resend, "Total number of rack_output retransmissions"); 1566 rack_non_fto_send = counter_u64_alloc(M_WAITOK); 1567 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1568 SYSCTL_CHILDREN(rack_counters), 1569 OID_AUTO, "nfto_send", CTLFLAG_RD, 1570 &rack_non_fto_send, "Total number of rack_output first sends"); 1571 rack_extended_rfo = counter_u64_alloc(M_WAITOK); 1572 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1573 SYSCTL_CHILDREN(rack_counters), 1574 OID_AUTO, "rfo_extended", CTLFLAG_RD, 1575 &rack_extended_rfo, "Total number of times we extended rfo"); 1576 1577 rack_hw_pace_init_fail = counter_u64_alloc(M_WAITOK); 1578 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1579 SYSCTL_CHILDREN(rack_counters), 1580 OID_AUTO, "hwpace_init_fail", CTLFLAG_RD, 1581 &rack_hw_pace_init_fail, "Total number of times we failed to initialize hw pacing"); 1582 rack_hw_pace_lost = counter_u64_alloc(M_WAITOK); 1583 1584 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1585 SYSCTL_CHILDREN(rack_counters), 1586 OID_AUTO, "hwpace_lost", CTLFLAG_RD, 1587 &rack_hw_pace_lost, "Total number of times we failed to initialize hw pacing"); 1588 rack_badfr = counter_u64_alloc(M_WAITOK); 1589 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1590 SYSCTL_CHILDREN(rack_counters), 1591 OID_AUTO, "badfr", CTLFLAG_RD, 1592 &rack_badfr, "Total number of bad FRs"); 1593 rack_badfr_bytes = counter_u64_alloc(M_WAITOK); 1594 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1595 SYSCTL_CHILDREN(rack_counters), 1596 OID_AUTO, "badfr_bytes", CTLFLAG_RD, 1597 &rack_badfr_bytes, "Total number of bad FRs"); 1598 rack_rtm_prr_retran = counter_u64_alloc(M_WAITOK); 1599 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1600 SYSCTL_CHILDREN(rack_counters), 1601 OID_AUTO, "prrsndret", CTLFLAG_RD, 1602 &rack_rtm_prr_retran, 1603 "Total number of prr based retransmits"); 1604 rack_rtm_prr_newdata = counter_u64_alloc(M_WAITOK); 1605 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1606 SYSCTL_CHILDREN(rack_counters), 1607 OID_AUTO, "prrsndnew", CTLFLAG_RD, 1608 &rack_rtm_prr_newdata, 1609 "Total number of prr based new transmits"); 1610 rack_timestamp_mismatch = counter_u64_alloc(M_WAITOK); 1611 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1612 SYSCTL_CHILDREN(rack_counters), 1613 OID_AUTO, "tsnf", CTLFLAG_RD, 1614 &rack_timestamp_mismatch, 1615 "Total number of timestamps that we could not find the reported ts"); 1616 rack_find_high = counter_u64_alloc(M_WAITOK); 1617 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1618 SYSCTL_CHILDREN(rack_counters), 1619 OID_AUTO, "findhigh", CTLFLAG_RD, 1620 &rack_find_high, 1621 "Total number of FIN causing find-high"); 1622 rack_reorder_seen = counter_u64_alloc(M_WAITOK); 1623 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1624 SYSCTL_CHILDREN(rack_counters), 1625 OID_AUTO, "reordering", CTLFLAG_RD, 1626 &rack_reorder_seen, 1627 "Total number of times we added delay due to reordering"); 1628 rack_tlp_tot = counter_u64_alloc(M_WAITOK); 1629 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1630 SYSCTL_CHILDREN(rack_counters), 1631 OID_AUTO, "tlp_to_total", CTLFLAG_RD, 1632 &rack_tlp_tot, 1633 "Total number of tail loss probe expirations"); 1634 rack_tlp_newdata = counter_u64_alloc(M_WAITOK); 1635 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1636 SYSCTL_CHILDREN(rack_counters), 1637 OID_AUTO, "tlp_new", CTLFLAG_RD, 1638 &rack_tlp_newdata, 1639 "Total number of tail loss probe sending new data"); 1640 rack_tlp_retran = counter_u64_alloc(M_WAITOK); 1641 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1642 SYSCTL_CHILDREN(rack_counters), 1643 OID_AUTO, "tlp_retran", CTLFLAG_RD, 1644 &rack_tlp_retran, 1645 "Total number of tail loss probe sending retransmitted data"); 1646 rack_tlp_retran_bytes = counter_u64_alloc(M_WAITOK); 1647 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1648 SYSCTL_CHILDREN(rack_counters), 1649 OID_AUTO, "tlp_retran_bytes", CTLFLAG_RD, 1650 &rack_tlp_retran_bytes, 1651 "Total bytes of tail loss probe sending retransmitted data"); 1652 rack_tlp_retran_fail = counter_u64_alloc(M_WAITOK); 1653 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1654 SYSCTL_CHILDREN(rack_counters), 1655 OID_AUTO, "tlp_retran_fail", CTLFLAG_RD, 1656 &rack_tlp_retran_fail, 1657 "Total number of tail loss probe sending retransmitted data that failed (wait for t3)"); 1658 rack_to_tot = counter_u64_alloc(M_WAITOK); 1659 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1660 SYSCTL_CHILDREN(rack_counters), 1661 OID_AUTO, "rack_to_tot", CTLFLAG_RD, 1662 &rack_to_tot, 1663 "Total number of times the rack to expired"); 1664 rack_to_arm_rack = counter_u64_alloc(M_WAITOK); 1665 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1666 SYSCTL_CHILDREN(rack_counters), 1667 OID_AUTO, "arm_rack", CTLFLAG_RD, 1668 &rack_to_arm_rack, 1669 "Total number of times the rack timer armed"); 1670 rack_to_arm_tlp = counter_u64_alloc(M_WAITOK); 1671 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1672 SYSCTL_CHILDREN(rack_counters), 1673 OID_AUTO, "arm_tlp", CTLFLAG_RD, 1674 &rack_to_arm_tlp, 1675 "Total number of times the tlp timer armed"); 1676 rack_calc_zero = counter_u64_alloc(M_WAITOK); 1677 rack_calc_nonzero = counter_u64_alloc(M_WAITOK); 1678 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1679 SYSCTL_CHILDREN(rack_counters), 1680 OID_AUTO, "calc_zero", CTLFLAG_RD, 1681 &rack_calc_zero, 1682 "Total number of times pacing time worked out to zero"); 1683 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1684 SYSCTL_CHILDREN(rack_counters), 1685 OID_AUTO, "calc_nonzero", CTLFLAG_RD, 1686 &rack_calc_nonzero, 1687 "Total number of times pacing time worked out to non-zero"); 1688 rack_paced_segments = counter_u64_alloc(M_WAITOK); 1689 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1690 SYSCTL_CHILDREN(rack_counters), 1691 OID_AUTO, "paced", CTLFLAG_RD, 1692 &rack_paced_segments, 1693 "Total number of times a segment send caused hptsi"); 1694 rack_unpaced_segments = counter_u64_alloc(M_WAITOK); 1695 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1696 SYSCTL_CHILDREN(rack_counters), 1697 OID_AUTO, "unpaced", CTLFLAG_RD, 1698 &rack_unpaced_segments, 1699 "Total number of times a segment did not cause hptsi"); 1700 rack_saw_enobuf = counter_u64_alloc(M_WAITOK); 1701 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1702 SYSCTL_CHILDREN(rack_counters), 1703 OID_AUTO, "saw_enobufs", CTLFLAG_RD, 1704 &rack_saw_enobuf, 1705 "Total number of times a sends returned enobuf for non-hdwr paced connections"); 1706 rack_saw_enobuf_hw = counter_u64_alloc(M_WAITOK); 1707 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1708 SYSCTL_CHILDREN(rack_counters), 1709 OID_AUTO, "saw_enobufs_hw", CTLFLAG_RD, 1710 &rack_saw_enobuf_hw, 1711 "Total number of times a send returned enobuf for hdwr paced connections"); 1712 rack_saw_enetunreach = counter_u64_alloc(M_WAITOK); 1713 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1714 SYSCTL_CHILDREN(rack_counters), 1715 OID_AUTO, "saw_enetunreach", CTLFLAG_RD, 1716 &rack_saw_enetunreach, 1717 "Total number of times a send received a enetunreachable"); 1718 rack_hot_alloc = counter_u64_alloc(M_WAITOK); 1719 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1720 SYSCTL_CHILDREN(rack_counters), 1721 OID_AUTO, "alloc_hot", CTLFLAG_RD, 1722 &rack_hot_alloc, 1723 "Total allocations from the top of our list"); 1724 rack_to_alloc = counter_u64_alloc(M_WAITOK); 1725 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1726 SYSCTL_CHILDREN(rack_counters), 1727 OID_AUTO, "allocs", CTLFLAG_RD, 1728 &rack_to_alloc, 1729 "Total allocations of tracking structures"); 1730 rack_to_alloc_hard = counter_u64_alloc(M_WAITOK); 1731 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1732 SYSCTL_CHILDREN(rack_counters), 1733 OID_AUTO, "allochard", CTLFLAG_RD, 1734 &rack_to_alloc_hard, 1735 "Total allocations done with sleeping the hard way"); 1736 rack_to_alloc_emerg = counter_u64_alloc(M_WAITOK); 1737 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1738 SYSCTL_CHILDREN(rack_counters), 1739 OID_AUTO, "allocemerg", CTLFLAG_RD, 1740 &rack_to_alloc_emerg, 1741 "Total allocations done from emergency cache"); 1742 rack_to_alloc_limited = counter_u64_alloc(M_WAITOK); 1743 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1744 SYSCTL_CHILDREN(rack_counters), 1745 OID_AUTO, "alloc_limited", CTLFLAG_RD, 1746 &rack_to_alloc_limited, 1747 "Total allocations dropped due to limit"); 1748 rack_alloc_limited_conns = counter_u64_alloc(M_WAITOK); 1749 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1750 SYSCTL_CHILDREN(rack_counters), 1751 OID_AUTO, "alloc_limited_conns", CTLFLAG_RD, 1752 &rack_alloc_limited_conns, 1753 "Connections with allocations dropped due to limit"); 1754 rack_split_limited = counter_u64_alloc(M_WAITOK); 1755 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1756 SYSCTL_CHILDREN(rack_counters), 1757 OID_AUTO, "split_limited", CTLFLAG_RD, 1758 &rack_split_limited, 1759 "Split allocations dropped due to limit"); 1760 1761 for (i = 0; i < MAX_NUM_OF_CNTS; i++) { 1762 char name[32]; 1763 sprintf(name, "cmp_ack_cnt_%d", i); 1764 rack_proc_comp_ack[i] = counter_u64_alloc(M_WAITOK); 1765 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1766 SYSCTL_CHILDREN(rack_counters), 1767 OID_AUTO, name, CTLFLAG_RD, 1768 &rack_proc_comp_ack[i], 1769 "Number of compressed acks we processed"); 1770 } 1771 rack_large_ackcmp = counter_u64_alloc(M_WAITOK); 1772 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1773 SYSCTL_CHILDREN(rack_counters), 1774 OID_AUTO, "cmp_large_mbufs", CTLFLAG_RD, 1775 &rack_large_ackcmp, 1776 "Number of TCP connections with large mbuf's for compressed acks"); 1777 rack_small_ackcmp = counter_u64_alloc(M_WAITOK); 1778 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1779 SYSCTL_CHILDREN(rack_counters), 1780 OID_AUTO, "cmp_small_mbufs", CTLFLAG_RD, 1781 &rack_small_ackcmp, 1782 "Number of TCP connections with small mbuf's for compressed acks"); 1783 #ifdef INVARIANTS 1784 rack_adjust_map_bw = counter_u64_alloc(M_WAITOK); 1785 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1786 SYSCTL_CHILDREN(rack_counters), 1787 OID_AUTO, "map_adjust_req", CTLFLAG_RD, 1788 &rack_adjust_map_bw, 1789 "Number of times we hit the case where the sb went up and down on a sendmap entry"); 1790 #endif 1791 rack_multi_single_eq = counter_u64_alloc(M_WAITOK); 1792 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1793 SYSCTL_CHILDREN(rack_counters), 1794 OID_AUTO, "cmp_ack_equiv", CTLFLAG_RD, 1795 &rack_multi_single_eq, 1796 "Number of compressed acks total represented"); 1797 rack_proc_non_comp_ack = counter_u64_alloc(M_WAITOK); 1798 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1799 SYSCTL_CHILDREN(rack_counters), 1800 OID_AUTO, "cmp_ack_not", CTLFLAG_RD, 1801 &rack_proc_non_comp_ack, 1802 "Number of non compresseds acks that we processed"); 1803 1804 1805 rack_sack_proc_all = counter_u64_alloc(M_WAITOK); 1806 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1807 SYSCTL_CHILDREN(rack_counters), 1808 OID_AUTO, "sack_long", CTLFLAG_RD, 1809 &rack_sack_proc_all, 1810 "Total times we had to walk whole list for sack processing"); 1811 rack_sack_proc_restart = counter_u64_alloc(M_WAITOK); 1812 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1813 SYSCTL_CHILDREN(rack_counters), 1814 OID_AUTO, "sack_restart", CTLFLAG_RD, 1815 &rack_sack_proc_restart, 1816 "Total times we had to walk whole list due to a restart"); 1817 rack_sack_proc_short = counter_u64_alloc(M_WAITOK); 1818 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1819 SYSCTL_CHILDREN(rack_counters), 1820 OID_AUTO, "sack_short", CTLFLAG_RD, 1821 &rack_sack_proc_short, 1822 "Total times we took shortcut for sack processing"); 1823 rack_enter_tlp_calc = counter_u64_alloc(M_WAITOK); 1824 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1825 SYSCTL_CHILDREN(rack_counters), 1826 OID_AUTO, "tlp_calc_entered", CTLFLAG_RD, 1827 &rack_enter_tlp_calc, 1828 "Total times we called calc-tlp"); 1829 rack_used_tlpmethod = counter_u64_alloc(M_WAITOK); 1830 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1831 SYSCTL_CHILDREN(rack_counters), 1832 OID_AUTO, "hit_tlp_method", CTLFLAG_RD, 1833 &rack_used_tlpmethod, 1834 "Total number of runt sacks"); 1835 rack_used_tlpmethod2 = counter_u64_alloc(M_WAITOK); 1836 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1837 SYSCTL_CHILDREN(rack_counters), 1838 OID_AUTO, "hit_tlp_method2", CTLFLAG_RD, 1839 &rack_used_tlpmethod2, 1840 "Total number of times we hit TLP method 2"); 1841 rack_sack_skipped_acked = counter_u64_alloc(M_WAITOK); 1842 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1843 SYSCTL_CHILDREN(rack_attack), 1844 OID_AUTO, "skipacked", CTLFLAG_RD, 1845 &rack_sack_skipped_acked, 1846 "Total number of times we skipped previously sacked"); 1847 rack_sack_splits = counter_u64_alloc(M_WAITOK); 1848 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1849 SYSCTL_CHILDREN(rack_attack), 1850 OID_AUTO, "ofsplit", CTLFLAG_RD, 1851 &rack_sack_splits, 1852 "Total number of times we did the old fashion tree split"); 1853 rack_progress_drops = counter_u64_alloc(M_WAITOK); 1854 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1855 SYSCTL_CHILDREN(rack_counters), 1856 OID_AUTO, "prog_drops", CTLFLAG_RD, 1857 &rack_progress_drops, 1858 "Total number of progress drops"); 1859 rack_input_idle_reduces = counter_u64_alloc(M_WAITOK); 1860 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1861 SYSCTL_CHILDREN(rack_counters), 1862 OID_AUTO, "idle_reduce_oninput", CTLFLAG_RD, 1863 &rack_input_idle_reduces, 1864 "Total number of idle reductions on input"); 1865 rack_collapsed_win = counter_u64_alloc(M_WAITOK); 1866 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1867 SYSCTL_CHILDREN(rack_counters), 1868 OID_AUTO, "collapsed_win", CTLFLAG_RD, 1869 &rack_collapsed_win, 1870 "Total number of collapsed windows"); 1871 rack_tlp_does_nada = counter_u64_alloc(M_WAITOK); 1872 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1873 SYSCTL_CHILDREN(rack_counters), 1874 OID_AUTO, "tlp_nada", CTLFLAG_RD, 1875 &rack_tlp_does_nada, 1876 "Total number of nada tlp calls"); 1877 rack_try_scwnd = counter_u64_alloc(M_WAITOK); 1878 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1879 SYSCTL_CHILDREN(rack_counters), 1880 OID_AUTO, "tried_scwnd", CTLFLAG_RD, 1881 &rack_try_scwnd, 1882 "Total number of scwnd attempts"); 1883 1884 rack_per_timer_hole = counter_u64_alloc(M_WAITOK); 1885 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1886 SYSCTL_CHILDREN(rack_counters), 1887 OID_AUTO, "timer_hole", CTLFLAG_RD, 1888 &rack_per_timer_hole, 1889 "Total persists start in timer hole"); 1890 1891 rack_sbsndptr_wrong = counter_u64_alloc(M_WAITOK); 1892 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1893 SYSCTL_CHILDREN(rack_counters), 1894 OID_AUTO, "sndptr_wrong", CTLFLAG_RD, 1895 &rack_sbsndptr_wrong, "Total number of times the saved sbsndptr was incorret"); 1896 rack_sbsndptr_right = counter_u64_alloc(M_WAITOK); 1897 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1898 SYSCTL_CHILDREN(rack_counters), 1899 OID_AUTO, "sndptr_right", CTLFLAG_RD, 1900 &rack_sbsndptr_right, "Total number of times the saved sbsndptr was corret"); 1901 1902 COUNTER_ARRAY_ALLOC(rack_out_size, TCP_MSS_ACCT_SIZE, M_WAITOK); 1903 SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root), 1904 OID_AUTO, "outsize", CTLFLAG_RD, 1905 rack_out_size, TCP_MSS_ACCT_SIZE, "MSS send sizes"); 1906 COUNTER_ARRAY_ALLOC(rack_opts_arry, RACK_OPTS_SIZE, M_WAITOK); 1907 SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root), 1908 OID_AUTO, "opts", CTLFLAG_RD, 1909 rack_opts_arry, RACK_OPTS_SIZE, "RACK Option Stats"); 1910 SYSCTL_ADD_PROC(&rack_sysctl_ctx, 1911 SYSCTL_CHILDREN(rack_sysctl_root), 1912 OID_AUTO, "clear", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, 1913 &rack_clear_counter, 0, sysctl_rack_clear, "IU", "Clear counters"); 1914 } 1915 1916 static __inline int 1917 rb_map_cmp(struct rack_sendmap *b, struct rack_sendmap *a) 1918 { 1919 if (SEQ_GEQ(b->r_start, a->r_start) && 1920 SEQ_LT(b->r_start, a->r_end)) { 1921 /* 1922 * The entry b is within the 1923 * block a. i.e.: 1924 * a -- |-------------| 1925 * b -- |----| 1926 * <or> 1927 * b -- |------| 1928 * <or> 1929 * b -- |-----------| 1930 */ 1931 return (0); 1932 } else if (SEQ_GEQ(b->r_start, a->r_end)) { 1933 /* 1934 * b falls as either the next 1935 * sequence block after a so a 1936 * is said to be smaller than b. 1937 * i.e: 1938 * a -- |------| 1939 * b -- |--------| 1940 * or 1941 * b -- |-----| 1942 */ 1943 return (1); 1944 } 1945 /* 1946 * Whats left is where a is 1947 * larger than b. i.e: 1948 * a -- |-------| 1949 * b -- |---| 1950 * or even possibly 1951 * b -- |--------------| 1952 */ 1953 return (-1); 1954 } 1955 1956 RB_PROTOTYPE(rack_rb_tree_head, rack_sendmap, r_next, rb_map_cmp); 1957 RB_GENERATE(rack_rb_tree_head, rack_sendmap, r_next, rb_map_cmp); 1958 1959 static uint32_t 1960 rc_init_window(struct tcp_rack *rack) 1961 { 1962 uint32_t win; 1963 1964 if (rack->rc_init_win == 0) { 1965 /* 1966 * Nothing set by the user, use the system stack 1967 * default. 1968 */ 1969 return (tcp_compute_initwnd(tcp_maxseg(rack->rc_tp))); 1970 } 1971 win = ctf_fixed_maxseg(rack->rc_tp) * rack->rc_init_win; 1972 return (win); 1973 } 1974 1975 static uint64_t 1976 rack_get_fixed_pacing_bw(struct tcp_rack *rack) 1977 { 1978 if (IN_FASTRECOVERY(rack->rc_tp->t_flags)) 1979 return (rack->r_ctl.rc_fixed_pacing_rate_rec); 1980 else if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) 1981 return (rack->r_ctl.rc_fixed_pacing_rate_ss); 1982 else 1983 return (rack->r_ctl.rc_fixed_pacing_rate_ca); 1984 } 1985 1986 static uint64_t 1987 rack_get_bw(struct tcp_rack *rack) 1988 { 1989 if (rack->use_fixed_rate) { 1990 /* Return the fixed pacing rate */ 1991 return (rack_get_fixed_pacing_bw(rack)); 1992 } 1993 if (rack->r_ctl.gp_bw == 0) { 1994 /* 1995 * We have yet no b/w measurement, 1996 * if we have a user set initial bw 1997 * return it. If we don't have that and 1998 * we have an srtt, use the tcp IW (10) to 1999 * calculate a fictional b/w over the SRTT 2000 * which is more or less a guess. Note 2001 * we don't use our IW from rack on purpose 2002 * so if we have like IW=30, we are not 2003 * calculating a "huge" b/w. 2004 */ 2005 uint64_t bw, srtt; 2006 if (rack->r_ctl.init_rate) 2007 return (rack->r_ctl.init_rate); 2008 2009 /* Has the user set a max peak rate? */ 2010 #ifdef NETFLIX_PEAKRATE 2011 if (rack->rc_tp->t_maxpeakrate) 2012 return (rack->rc_tp->t_maxpeakrate); 2013 #endif 2014 /* Ok lets come up with the IW guess, if we have a srtt */ 2015 if (rack->rc_tp->t_srtt == 0) { 2016 /* 2017 * Go with old pacing method 2018 * i.e. burst mitigation only. 2019 */ 2020 return (0); 2021 } 2022 /* Ok lets get the initial TCP win (not racks) */ 2023 bw = tcp_compute_initwnd(tcp_maxseg(rack->rc_tp)); 2024 srtt = (uint64_t)rack->rc_tp->t_srtt; 2025 bw *= (uint64_t)USECS_IN_SECOND; 2026 bw /= srtt; 2027 if (rack->r_ctl.bw_rate_cap && (bw > rack->r_ctl.bw_rate_cap)) 2028 bw = rack->r_ctl.bw_rate_cap; 2029 return (bw); 2030 } else { 2031 uint64_t bw; 2032 2033 if (rack->r_ctl.num_measurements >= RACK_REQ_AVG) { 2034 /* Averaging is done, we can return the value */ 2035 bw = rack->r_ctl.gp_bw; 2036 } else { 2037 /* Still doing initial average must calculate */ 2038 bw = rack->r_ctl.gp_bw / rack->r_ctl.num_measurements; 2039 } 2040 #ifdef NETFLIX_PEAKRATE 2041 if ((rack->rc_tp->t_maxpeakrate) && 2042 (bw > rack->rc_tp->t_maxpeakrate)) { 2043 /* The user has set a peak rate to pace at 2044 * don't allow us to pace faster than that. 2045 */ 2046 return (rack->rc_tp->t_maxpeakrate); 2047 } 2048 #endif 2049 if (rack->r_ctl.bw_rate_cap && (bw > rack->r_ctl.bw_rate_cap)) 2050 bw = rack->r_ctl.bw_rate_cap; 2051 return (bw); 2052 } 2053 } 2054 2055 static uint16_t 2056 rack_get_output_gain(struct tcp_rack *rack, struct rack_sendmap *rsm) 2057 { 2058 if (rack->use_fixed_rate) { 2059 return (100); 2060 } else if (rack->in_probe_rtt && (rsm == NULL)) 2061 return (rack->r_ctl.rack_per_of_gp_probertt); 2062 else if ((IN_FASTRECOVERY(rack->rc_tp->t_flags) && 2063 rack->r_ctl.rack_per_of_gp_rec)) { 2064 if (rsm) { 2065 /* a retransmission always use the recovery rate */ 2066 return (rack->r_ctl.rack_per_of_gp_rec); 2067 } else if (rack->rack_rec_nonrxt_use_cr) { 2068 /* Directed to use the configured rate */ 2069 goto configured_rate; 2070 } else if (rack->rack_no_prr && 2071 (rack->r_ctl.rack_per_of_gp_rec > 100)) { 2072 /* No PRR, lets just use the b/w estimate only */ 2073 return (100); 2074 } else { 2075 /* 2076 * Here we may have a non-retransmit but we 2077 * have no overrides, so just use the recovery 2078 * rate (prr is in effect). 2079 */ 2080 return (rack->r_ctl.rack_per_of_gp_rec); 2081 } 2082 } 2083 configured_rate: 2084 /* For the configured rate we look at our cwnd vs the ssthresh */ 2085 if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) 2086 return (rack->r_ctl.rack_per_of_gp_ss); 2087 else 2088 return (rack->r_ctl.rack_per_of_gp_ca); 2089 } 2090 2091 static void 2092 rack_log_dsack_event(struct tcp_rack *rack, uint8_t mod, uint32_t flex4, uint32_t flex5, uint32_t flex6) 2093 { 2094 /* 2095 * Types of logs (mod value) 2096 * 1 = dsack_persists reduced by 1 via T-O or fast recovery exit. 2097 * 2 = a dsack round begins, persist is reset to 16. 2098 * 3 = a dsack round ends 2099 * 4 = Dsack option increases rack rtt flex5 is the srtt input, flex6 is thresh 2100 * 5 = Socket option set changing the control flags rc_rack_tmr_std_based, rc_rack_use_dsack 2101 * 6 = Final rack rtt, flex4 is srtt and flex6 is final limited thresh. 2102 */ 2103 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2104 union tcp_log_stackspecific log; 2105 struct timeval tv; 2106 2107 memset(&log, 0, sizeof(log)); 2108 log.u_bbr.flex1 = rack->rc_rack_tmr_std_based; 2109 log.u_bbr.flex1 <<= 1; 2110 log.u_bbr.flex1 |= rack->rc_rack_use_dsack; 2111 log.u_bbr.flex1 <<= 1; 2112 log.u_bbr.flex1 |= rack->rc_dsack_round_seen; 2113 log.u_bbr.flex2 = rack->r_ctl.dsack_round_end; 2114 log.u_bbr.flex3 = rack->r_ctl.num_dsack; 2115 log.u_bbr.flex4 = flex4; 2116 log.u_bbr.flex5 = flex5; 2117 log.u_bbr.flex6 = flex6; 2118 log.u_bbr.flex7 = rack->r_ctl.dsack_persist; 2119 log.u_bbr.flex8 = mod; 2120 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2121 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2122 &rack->rc_inp->inp_socket->so_rcv, 2123 &rack->rc_inp->inp_socket->so_snd, 2124 RACK_DSACK_HANDLING, 0, 2125 0, &log, false, &tv); 2126 } 2127 } 2128 2129 static void 2130 rack_log_hdwr_pacing(struct tcp_rack *rack, 2131 uint64_t rate, uint64_t hw_rate, int line, 2132 int error, uint16_t mod) 2133 { 2134 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2135 union tcp_log_stackspecific log; 2136 struct timeval tv; 2137 const struct ifnet *ifp; 2138 2139 memset(&log, 0, sizeof(log)); 2140 log.u_bbr.flex1 = ((hw_rate >> 32) & 0x00000000ffffffff); 2141 log.u_bbr.flex2 = (hw_rate & 0x00000000ffffffff); 2142 if (rack->r_ctl.crte) { 2143 ifp = rack->r_ctl.crte->ptbl->rs_ifp; 2144 } else if (rack->rc_inp->inp_route.ro_nh && 2145 rack->rc_inp->inp_route.ro_nh->nh_ifp) { 2146 ifp = rack->rc_inp->inp_route.ro_nh->nh_ifp; 2147 } else 2148 ifp = NULL; 2149 if (ifp) { 2150 log.u_bbr.flex3 = (((uint64_t)ifp >> 32) & 0x00000000ffffffff); 2151 log.u_bbr.flex4 = ((uint64_t)ifp & 0x00000000ffffffff); 2152 } 2153 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2154 log.u_bbr.bw_inuse = rate; 2155 log.u_bbr.flex5 = line; 2156 log.u_bbr.flex6 = error; 2157 log.u_bbr.flex7 = mod; 2158 log.u_bbr.applimited = rack->r_ctl.rc_pace_max_segs; 2159 log.u_bbr.flex8 = rack->use_fixed_rate; 2160 log.u_bbr.flex8 <<= 1; 2161 log.u_bbr.flex8 |= rack->rack_hdrw_pacing; 2162 log.u_bbr.pkts_out = rack->rc_tp->t_maxseg; 2163 log.u_bbr.delRate = rack->r_ctl.crte_prev_rate; 2164 if (rack->r_ctl.crte) 2165 log.u_bbr.cur_del_rate = rack->r_ctl.crte->rate; 2166 else 2167 log.u_bbr.cur_del_rate = 0; 2168 log.u_bbr.rttProp = rack->r_ctl.last_hw_bw_req; 2169 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2170 &rack->rc_inp->inp_socket->so_rcv, 2171 &rack->rc_inp->inp_socket->so_snd, 2172 BBR_LOG_HDWR_PACE, 0, 2173 0, &log, false, &tv); 2174 } 2175 } 2176 2177 static uint64_t 2178 rack_get_output_bw(struct tcp_rack *rack, uint64_t bw, struct rack_sendmap *rsm, int *capped) 2179 { 2180 /* 2181 * We allow rack_per_of_gp_xx to dictate our bw rate we want. 2182 */ 2183 uint64_t bw_est, high_rate; 2184 uint64_t gain; 2185 2186 gain = (uint64_t)rack_get_output_gain(rack, rsm); 2187 bw_est = bw * gain; 2188 bw_est /= (uint64_t)100; 2189 /* Never fall below the minimum (def 64kbps) */ 2190 if (bw_est < RACK_MIN_BW) 2191 bw_est = RACK_MIN_BW; 2192 if (rack->r_rack_hw_rate_caps) { 2193 /* Rate caps are in place */ 2194 if (rack->r_ctl.crte != NULL) { 2195 /* We have a hdwr rate already */ 2196 high_rate = tcp_hw_highest_rate(rack->r_ctl.crte); 2197 if (bw_est >= high_rate) { 2198 /* We are capping bw at the highest rate table entry */ 2199 rack_log_hdwr_pacing(rack, 2200 bw_est, high_rate, __LINE__, 2201 0, 3); 2202 bw_est = high_rate; 2203 if (capped) 2204 *capped = 1; 2205 } 2206 } else if ((rack->rack_hdrw_pacing == 0) && 2207 (rack->rack_hdw_pace_ena) && 2208 (rack->rack_attempt_hdwr_pace == 0) && 2209 (rack->rc_inp->inp_route.ro_nh != NULL) && 2210 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { 2211 /* 2212 * Special case, we have not yet attempted hardware 2213 * pacing, and yet we may, when we do, find out if we are 2214 * above the highest rate. We need to know the maxbw for the interface 2215 * in question (if it supports ratelimiting). We get back 2216 * a 0, if the interface is not found in the RL lists. 2217 */ 2218 high_rate = tcp_hw_highest_rate_ifp(rack->rc_inp->inp_route.ro_nh->nh_ifp, rack->rc_inp); 2219 if (high_rate) { 2220 /* Yep, we have a rate is it above this rate? */ 2221 if (bw_est > high_rate) { 2222 bw_est = high_rate; 2223 if (capped) 2224 *capped = 1; 2225 } 2226 } 2227 } 2228 } 2229 return (bw_est); 2230 } 2231 2232 static void 2233 rack_log_retran_reason(struct tcp_rack *rack, struct rack_sendmap *rsm, uint32_t tsused, uint32_t thresh, int mod) 2234 { 2235 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2236 union tcp_log_stackspecific log; 2237 struct timeval tv; 2238 2239 if ((mod != 1) && (rack_verbose_logging == 0)) { 2240 /* 2241 * We get 3 values currently for mod 2242 * 1 - We are retransmitting and this tells the reason. 2243 * 2 - We are clearing a dup-ack count. 2244 * 3 - We are incrementing a dup-ack count. 2245 * 2246 * The clear/increment are only logged 2247 * if you have BBverbose on. 2248 */ 2249 return; 2250 } 2251 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2252 log.u_bbr.flex1 = tsused; 2253 log.u_bbr.flex2 = thresh; 2254 log.u_bbr.flex3 = rsm->r_flags; 2255 log.u_bbr.flex4 = rsm->r_dupack; 2256 log.u_bbr.flex5 = rsm->r_start; 2257 log.u_bbr.flex6 = rsm->r_end; 2258 log.u_bbr.flex8 = mod; 2259 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 2260 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 2261 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2262 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2263 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2264 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2265 log.u_bbr.pacing_gain = rack->r_must_retran; 2266 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2267 &rack->rc_inp->inp_socket->so_rcv, 2268 &rack->rc_inp->inp_socket->so_snd, 2269 BBR_LOG_SETTINGS_CHG, 0, 2270 0, &log, false, &tv); 2271 } 2272 } 2273 2274 static void 2275 rack_log_to_start(struct tcp_rack *rack, uint32_t cts, uint32_t to, int32_t slot, uint8_t which) 2276 { 2277 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2278 union tcp_log_stackspecific log; 2279 struct timeval tv; 2280 2281 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2282 log.u_bbr.flex1 = rack->rc_tp->t_srtt; 2283 log.u_bbr.flex2 = to; 2284 log.u_bbr.flex3 = rack->r_ctl.rc_hpts_flags; 2285 log.u_bbr.flex4 = slot; 2286 log.u_bbr.flex5 = rack->rc_inp->inp_hptsslot; 2287 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; 2288 log.u_bbr.flex7 = rack->rc_in_persist; 2289 log.u_bbr.flex8 = which; 2290 if (rack->rack_no_prr) 2291 log.u_bbr.pkts_out = 0; 2292 else 2293 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 2294 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 2295 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 2296 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2297 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2298 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2299 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2300 log.u_bbr.pacing_gain = rack->r_must_retran; 2301 log.u_bbr.lt_epoch = rack->rc_tp->t_rxtshift; 2302 log.u_bbr.lost = rack_rto_min; 2303 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2304 &rack->rc_inp->inp_socket->so_rcv, 2305 &rack->rc_inp->inp_socket->so_snd, 2306 BBR_LOG_TIMERSTAR, 0, 2307 0, &log, false, &tv); 2308 } 2309 } 2310 2311 static void 2312 rack_log_to_event(struct tcp_rack *rack, int32_t to_num, struct rack_sendmap *rsm) 2313 { 2314 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2315 union tcp_log_stackspecific log; 2316 struct timeval tv; 2317 2318 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2319 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 2320 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 2321 log.u_bbr.flex8 = to_num; 2322 log.u_bbr.flex1 = rack->r_ctl.rc_rack_min_rtt; 2323 log.u_bbr.flex2 = rack->rc_rack_rtt; 2324 if (rsm == NULL) 2325 log.u_bbr.flex3 = 0; 2326 else 2327 log.u_bbr.flex3 = rsm->r_end - rsm->r_start; 2328 if (rack->rack_no_prr) 2329 log.u_bbr.flex5 = 0; 2330 else 2331 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 2332 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2333 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2334 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2335 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2336 log.u_bbr.pacing_gain = rack->r_must_retran; 2337 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2338 &rack->rc_inp->inp_socket->so_rcv, 2339 &rack->rc_inp->inp_socket->so_snd, 2340 BBR_LOG_RTO, 0, 2341 0, &log, false, &tv); 2342 } 2343 } 2344 2345 static void 2346 rack_log_map_chg(struct tcpcb *tp, struct tcp_rack *rack, 2347 struct rack_sendmap *prev, 2348 struct rack_sendmap *rsm, 2349 struct rack_sendmap *next, 2350 int flag, uint32_t th_ack, int line) 2351 { 2352 if (rack_verbose_logging && (tp->t_logstate != TCP_LOG_STATE_OFF)) { 2353 union tcp_log_stackspecific log; 2354 struct timeval tv; 2355 2356 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2357 log.u_bbr.flex8 = flag; 2358 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 2359 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 2360 log.u_bbr.cur_del_rate = (uint64_t)prev; 2361 log.u_bbr.delRate = (uint64_t)rsm; 2362 log.u_bbr.rttProp = (uint64_t)next; 2363 log.u_bbr.flex7 = 0; 2364 if (prev) { 2365 log.u_bbr.flex1 = prev->r_start; 2366 log.u_bbr.flex2 = prev->r_end; 2367 log.u_bbr.flex7 |= 0x4; 2368 } 2369 if (rsm) { 2370 log.u_bbr.flex3 = rsm->r_start; 2371 log.u_bbr.flex4 = rsm->r_end; 2372 log.u_bbr.flex7 |= 0x2; 2373 } 2374 if (next) { 2375 log.u_bbr.flex5 = next->r_start; 2376 log.u_bbr.flex6 = next->r_end; 2377 log.u_bbr.flex7 |= 0x1; 2378 } 2379 log.u_bbr.applimited = line; 2380 log.u_bbr.pkts_out = th_ack; 2381 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2382 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2383 if (rack->rack_no_prr) 2384 log.u_bbr.lost = 0; 2385 else 2386 log.u_bbr.lost = rack->r_ctl.rc_prr_sndcnt; 2387 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2388 &rack->rc_inp->inp_socket->so_rcv, 2389 &rack->rc_inp->inp_socket->so_snd, 2390 TCP_LOG_MAPCHG, 0, 2391 0, &log, false, &tv); 2392 } 2393 } 2394 2395 static void 2396 rack_log_rtt_upd(struct tcpcb *tp, struct tcp_rack *rack, uint32_t t, uint32_t len, 2397 struct rack_sendmap *rsm, int conf) 2398 { 2399 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 2400 union tcp_log_stackspecific log; 2401 struct timeval tv; 2402 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2403 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 2404 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 2405 log.u_bbr.flex1 = t; 2406 log.u_bbr.flex2 = len; 2407 log.u_bbr.flex3 = rack->r_ctl.rc_rack_min_rtt; 2408 log.u_bbr.flex4 = rack->r_ctl.rack_rs.rs_rtt_lowest; 2409 log.u_bbr.flex5 = rack->r_ctl.rack_rs.rs_rtt_highest; 2410 log.u_bbr.flex6 = rack->r_ctl.rack_rs.rs_us_rtrcnt; 2411 log.u_bbr.flex7 = conf; 2412 log.u_bbr.rttProp = (uint64_t)rack->r_ctl.rack_rs.rs_rtt_tot; 2413 log.u_bbr.flex8 = rack->r_ctl.rc_rate_sample_method; 2414 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2415 log.u_bbr.delivered = rack->r_ctl.rack_rs.rs_us_rtrcnt; 2416 log.u_bbr.pkts_out = rack->r_ctl.rack_rs.rs_flags; 2417 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2418 if (rsm) { 2419 log.u_bbr.pkt_epoch = rsm->r_start; 2420 log.u_bbr.lost = rsm->r_end; 2421 log.u_bbr.cwnd_gain = rsm->r_rtr_cnt; 2422 /* We loose any upper of the 24 bits */ 2423 log.u_bbr.pacing_gain = (uint16_t)rsm->r_flags; 2424 } else { 2425 /* Its a SYN */ 2426 log.u_bbr.pkt_epoch = rack->rc_tp->iss; 2427 log.u_bbr.lost = 0; 2428 log.u_bbr.cwnd_gain = 0; 2429 log.u_bbr.pacing_gain = 0; 2430 } 2431 /* Write out general bits of interest rrs here */ 2432 log.u_bbr.use_lt_bw = rack->rc_highly_buffered; 2433 log.u_bbr.use_lt_bw <<= 1; 2434 log.u_bbr.use_lt_bw |= rack->forced_ack; 2435 log.u_bbr.use_lt_bw <<= 1; 2436 log.u_bbr.use_lt_bw |= rack->rc_gp_dyn_mul; 2437 log.u_bbr.use_lt_bw <<= 1; 2438 log.u_bbr.use_lt_bw |= rack->in_probe_rtt; 2439 log.u_bbr.use_lt_bw <<= 1; 2440 log.u_bbr.use_lt_bw |= rack->measure_saw_probe_rtt; 2441 log.u_bbr.use_lt_bw <<= 1; 2442 log.u_bbr.use_lt_bw |= rack->app_limited_needs_set; 2443 log.u_bbr.use_lt_bw <<= 1; 2444 log.u_bbr.use_lt_bw |= rack->rc_gp_filled; 2445 log.u_bbr.use_lt_bw <<= 1; 2446 log.u_bbr.use_lt_bw |= rack->rc_dragged_bottom; 2447 log.u_bbr.applimited = rack->r_ctl.rc_target_probertt_flight; 2448 log.u_bbr.epoch = rack->r_ctl.rc_time_probertt_starts; 2449 log.u_bbr.lt_epoch = rack->r_ctl.rc_time_probertt_entered; 2450 log.u_bbr.cur_del_rate = rack->r_ctl.rc_lower_rtt_us_cts; 2451 log.u_bbr.delRate = rack->r_ctl.rc_gp_srtt; 2452 log.u_bbr.bw_inuse = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 2453 log.u_bbr.bw_inuse <<= 32; 2454 if (rsm) 2455 log.u_bbr.bw_inuse |= ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]); 2456 TCP_LOG_EVENTP(tp, NULL, 2457 &rack->rc_inp->inp_socket->so_rcv, 2458 &rack->rc_inp->inp_socket->so_snd, 2459 BBR_LOG_BBRRTT, 0, 2460 0, &log, false, &tv); 2461 2462 2463 } 2464 } 2465 2466 static void 2467 rack_log_rtt_sample(struct tcp_rack *rack, uint32_t rtt) 2468 { 2469 /* 2470 * Log the rtt sample we are 2471 * applying to the srtt algorithm in 2472 * useconds. 2473 */ 2474 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2475 union tcp_log_stackspecific log; 2476 struct timeval tv; 2477 2478 /* Convert our ms to a microsecond */ 2479 memset(&log, 0, sizeof(log)); 2480 log.u_bbr.flex1 = rtt; 2481 log.u_bbr.flex2 = rack->r_ctl.ack_count; 2482 log.u_bbr.flex3 = rack->r_ctl.sack_count; 2483 log.u_bbr.flex4 = rack->r_ctl.sack_noextra_move; 2484 log.u_bbr.flex5 = rack->r_ctl.sack_moved_extra; 2485 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; 2486 log.u_bbr.flex7 = 1; 2487 log.u_bbr.flex8 = rack->sack_attack_disable; 2488 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2489 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2490 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2491 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2492 log.u_bbr.pacing_gain = rack->r_must_retran; 2493 /* 2494 * We capture in delRate the upper 32 bits as 2495 * the confidence level we had declared, and the 2496 * lower 32 bits as the actual RTT using the arrival 2497 * timestamp. 2498 */ 2499 log.u_bbr.delRate = rack->r_ctl.rack_rs.confidence; 2500 log.u_bbr.delRate <<= 32; 2501 log.u_bbr.delRate |= rack->r_ctl.rack_rs.rs_us_rtt; 2502 /* Lets capture all the things that make up t_rtxcur */ 2503 log.u_bbr.applimited = rack_rto_min; 2504 log.u_bbr.epoch = rack_rto_max; 2505 log.u_bbr.lt_epoch = rack->r_ctl.timer_slop; 2506 log.u_bbr.lost = rack_rto_min; 2507 log.u_bbr.pkt_epoch = TICKS_2_USEC(tcp_rexmit_slop); 2508 log.u_bbr.rttProp = RACK_REXMTVAL(rack->rc_tp); 2509 log.u_bbr.bw_inuse = rack->r_ctl.act_rcv_time.tv_sec; 2510 log.u_bbr.bw_inuse *= HPTS_USEC_IN_SEC; 2511 log.u_bbr.bw_inuse += rack->r_ctl.act_rcv_time.tv_usec; 2512 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2513 &rack->rc_inp->inp_socket->so_rcv, 2514 &rack->rc_inp->inp_socket->so_snd, 2515 TCP_LOG_RTT, 0, 2516 0, &log, false, &tv); 2517 } 2518 } 2519 2520 static void 2521 rack_log_rtt_sample_calc(struct tcp_rack *rack, uint32_t rtt, uint32_t send_time, uint32_t ack_time, int where) 2522 { 2523 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 2524 union tcp_log_stackspecific log; 2525 struct timeval tv; 2526 2527 /* Convert our ms to a microsecond */ 2528 memset(&log, 0, sizeof(log)); 2529 log.u_bbr.flex1 = rtt; 2530 log.u_bbr.flex2 = send_time; 2531 log.u_bbr.flex3 = ack_time; 2532 log.u_bbr.flex4 = where; 2533 log.u_bbr.flex7 = 2; 2534 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2535 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2536 &rack->rc_inp->inp_socket->so_rcv, 2537 &rack->rc_inp->inp_socket->so_snd, 2538 TCP_LOG_RTT, 0, 2539 0, &log, false, &tv); 2540 } 2541 } 2542 2543 2544 2545 static inline void 2546 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick, int event, int line) 2547 { 2548 if (rack_verbose_logging && (tp->t_logstate != TCP_LOG_STATE_OFF)) { 2549 union tcp_log_stackspecific log; 2550 struct timeval tv; 2551 2552 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2553 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 2554 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 2555 log.u_bbr.flex1 = line; 2556 log.u_bbr.flex2 = tick; 2557 log.u_bbr.flex3 = tp->t_maxunacktime; 2558 log.u_bbr.flex4 = tp->t_acktime; 2559 log.u_bbr.flex8 = event; 2560 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2561 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2562 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2563 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2564 log.u_bbr.pacing_gain = rack->r_must_retran; 2565 TCP_LOG_EVENTP(tp, NULL, 2566 &rack->rc_inp->inp_socket->so_rcv, 2567 &rack->rc_inp->inp_socket->so_snd, 2568 BBR_LOG_PROGRESS, 0, 2569 0, &log, false, &tv); 2570 } 2571 } 2572 2573 static void 2574 rack_log_type_bbrsnd(struct tcp_rack *rack, uint32_t len, uint32_t slot, uint32_t cts, struct timeval *tv) 2575 { 2576 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2577 union tcp_log_stackspecific log; 2578 2579 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2580 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 2581 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 2582 log.u_bbr.flex1 = slot; 2583 if (rack->rack_no_prr) 2584 log.u_bbr.flex2 = 0; 2585 else 2586 log.u_bbr.flex2 = rack->r_ctl.rc_prr_sndcnt; 2587 log.u_bbr.flex7 = (0x0000ffff & rack->r_ctl.rc_hpts_flags); 2588 log.u_bbr.flex8 = rack->rc_in_persist; 2589 log.u_bbr.timeStamp = cts; 2590 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2591 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2592 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2593 log.u_bbr.pacing_gain = rack->r_must_retran; 2594 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2595 &rack->rc_inp->inp_socket->so_rcv, 2596 &rack->rc_inp->inp_socket->so_snd, 2597 BBR_LOG_BBRSND, 0, 2598 0, &log, false, tv); 2599 } 2600 } 2601 2602 static void 2603 rack_log_doseg_done(struct tcp_rack *rack, uint32_t cts, int32_t nxt_pkt, int32_t did_out, int way_out, int nsegs) 2604 { 2605 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2606 union tcp_log_stackspecific log; 2607 struct timeval tv; 2608 2609 memset(&log, 0, sizeof(log)); 2610 log.u_bbr.flex1 = did_out; 2611 log.u_bbr.flex2 = nxt_pkt; 2612 log.u_bbr.flex3 = way_out; 2613 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 2614 if (rack->rack_no_prr) 2615 log.u_bbr.flex5 = 0; 2616 else 2617 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 2618 log.u_bbr.flex6 = nsegs; 2619 log.u_bbr.applimited = rack->r_ctl.rc_pace_min_segs; 2620 log.u_bbr.flex7 = rack->rc_ack_can_sendout_data; /* Do we have ack-can-send set */ 2621 log.u_bbr.flex7 <<= 1; 2622 log.u_bbr.flex7 |= rack->r_fast_output; /* is fast output primed */ 2623 log.u_bbr.flex7 <<= 1; 2624 log.u_bbr.flex7 |= rack->r_wanted_output; /* Do we want output */ 2625 log.u_bbr.flex8 = rack->rc_in_persist; 2626 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 2627 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2628 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2629 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 2630 log.u_bbr.use_lt_bw <<= 1; 2631 log.u_bbr.use_lt_bw |= rack->r_might_revert; 2632 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2633 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2634 log.u_bbr.pacing_gain = rack->r_must_retran; 2635 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2636 &rack->rc_inp->inp_socket->so_rcv, 2637 &rack->rc_inp->inp_socket->so_snd, 2638 BBR_LOG_DOSEG_DONE, 0, 2639 0, &log, false, &tv); 2640 } 2641 } 2642 2643 static void 2644 rack_log_type_pacing_sizes(struct tcpcb *tp, struct tcp_rack *rack, uint32_t arg1, uint32_t arg2, uint32_t arg3, uint8_t frm) 2645 { 2646 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 2647 union tcp_log_stackspecific log; 2648 struct timeval tv; 2649 uint32_t cts; 2650 2651 memset(&log, 0, sizeof(log)); 2652 cts = tcp_get_usecs(&tv); 2653 log.u_bbr.flex1 = rack->r_ctl.rc_pace_min_segs; 2654 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 2655 log.u_bbr.flex4 = arg1; 2656 log.u_bbr.flex5 = arg2; 2657 log.u_bbr.flex6 = arg3; 2658 log.u_bbr.flex8 = frm; 2659 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2660 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2661 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2662 log.u_bbr.applimited = rack->r_ctl.rc_sacked; 2663 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2664 log.u_bbr.pacing_gain = rack->r_must_retran; 2665 TCP_LOG_EVENTP(tp, NULL, 2666 &tp->t_inpcb->inp_socket->so_rcv, 2667 &tp->t_inpcb->inp_socket->so_snd, 2668 TCP_HDWR_PACE_SIZE, 0, 2669 0, &log, false, &tv); 2670 } 2671 } 2672 2673 static void 2674 rack_log_type_just_return(struct tcp_rack *rack, uint32_t cts, uint32_t tlen, uint32_t slot, 2675 uint8_t hpts_calling, int reason, uint32_t cwnd_to_use) 2676 { 2677 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2678 union tcp_log_stackspecific log; 2679 struct timeval tv; 2680 2681 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2682 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 2683 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 2684 log.u_bbr.flex1 = slot; 2685 log.u_bbr.flex2 = rack->r_ctl.rc_hpts_flags; 2686 log.u_bbr.flex4 = reason; 2687 if (rack->rack_no_prr) 2688 log.u_bbr.flex5 = 0; 2689 else 2690 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 2691 log.u_bbr.flex7 = hpts_calling; 2692 log.u_bbr.flex8 = rack->rc_in_persist; 2693 log.u_bbr.lt_epoch = cwnd_to_use; 2694 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2695 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2696 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2697 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2698 log.u_bbr.pacing_gain = rack->r_must_retran; 2699 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2700 &rack->rc_inp->inp_socket->so_rcv, 2701 &rack->rc_inp->inp_socket->so_snd, 2702 BBR_LOG_JUSTRET, 0, 2703 tlen, &log, false, &tv); 2704 } 2705 } 2706 2707 static void 2708 rack_log_to_cancel(struct tcp_rack *rack, int32_t hpts_removed, int line, uint32_t us_cts, 2709 struct timeval *tv, uint32_t flags_on_entry) 2710 { 2711 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2712 union tcp_log_stackspecific log; 2713 2714 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2715 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 2716 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 2717 log.u_bbr.flex1 = line; 2718 log.u_bbr.flex2 = rack->r_ctl.rc_last_output_to; 2719 log.u_bbr.flex3 = flags_on_entry; 2720 log.u_bbr.flex4 = us_cts; 2721 if (rack->rack_no_prr) 2722 log.u_bbr.flex5 = 0; 2723 else 2724 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 2725 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; 2726 log.u_bbr.flex7 = hpts_removed; 2727 log.u_bbr.flex8 = 1; 2728 log.u_bbr.applimited = rack->r_ctl.rc_hpts_flags; 2729 log.u_bbr.timeStamp = us_cts; 2730 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2731 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2732 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2733 log.u_bbr.pacing_gain = rack->r_must_retran; 2734 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2735 &rack->rc_inp->inp_socket->so_rcv, 2736 &rack->rc_inp->inp_socket->so_snd, 2737 BBR_LOG_TIMERCANC, 0, 2738 0, &log, false, tv); 2739 } 2740 } 2741 2742 static void 2743 rack_log_alt_to_to_cancel(struct tcp_rack *rack, 2744 uint32_t flex1, uint32_t flex2, 2745 uint32_t flex3, uint32_t flex4, 2746 uint32_t flex5, uint32_t flex6, 2747 uint16_t flex7, uint8_t mod) 2748 { 2749 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2750 union tcp_log_stackspecific log; 2751 struct timeval tv; 2752 2753 if (mod == 1) { 2754 /* No you can't use 1, its for the real to cancel */ 2755 return; 2756 } 2757 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2758 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2759 log.u_bbr.flex1 = flex1; 2760 log.u_bbr.flex2 = flex2; 2761 log.u_bbr.flex3 = flex3; 2762 log.u_bbr.flex4 = flex4; 2763 log.u_bbr.flex5 = flex5; 2764 log.u_bbr.flex6 = flex6; 2765 log.u_bbr.flex7 = flex7; 2766 log.u_bbr.flex8 = mod; 2767 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2768 &rack->rc_inp->inp_socket->so_rcv, 2769 &rack->rc_inp->inp_socket->so_snd, 2770 BBR_LOG_TIMERCANC, 0, 2771 0, &log, false, &tv); 2772 } 2773 } 2774 2775 static void 2776 rack_log_to_processing(struct tcp_rack *rack, uint32_t cts, int32_t ret, int32_t timers) 2777 { 2778 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2779 union tcp_log_stackspecific log; 2780 struct timeval tv; 2781 2782 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2783 log.u_bbr.flex1 = timers; 2784 log.u_bbr.flex2 = ret; 2785 log.u_bbr.flex3 = rack->r_ctl.rc_timer_exp; 2786 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 2787 log.u_bbr.flex5 = cts; 2788 if (rack->rack_no_prr) 2789 log.u_bbr.flex6 = 0; 2790 else 2791 log.u_bbr.flex6 = rack->r_ctl.rc_prr_sndcnt; 2792 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2793 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2794 log.u_bbr.pacing_gain = rack->r_must_retran; 2795 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2796 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2797 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2798 &rack->rc_inp->inp_socket->so_rcv, 2799 &rack->rc_inp->inp_socket->so_snd, 2800 BBR_LOG_TO_PROCESS, 0, 2801 0, &log, false, &tv); 2802 } 2803 } 2804 2805 static void 2806 rack_log_to_prr(struct tcp_rack *rack, int frm, int orig_cwnd) 2807 { 2808 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2809 union tcp_log_stackspecific log; 2810 struct timeval tv; 2811 2812 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2813 log.u_bbr.flex1 = rack->r_ctl.rc_prr_out; 2814 log.u_bbr.flex2 = rack->r_ctl.rc_prr_recovery_fs; 2815 if (rack->rack_no_prr) 2816 log.u_bbr.flex3 = 0; 2817 else 2818 log.u_bbr.flex3 = rack->r_ctl.rc_prr_sndcnt; 2819 log.u_bbr.flex4 = rack->r_ctl.rc_prr_delivered; 2820 log.u_bbr.flex5 = rack->r_ctl.rc_sacked; 2821 log.u_bbr.flex6 = rack->r_ctl.rc_holes_rxt; 2822 log.u_bbr.flex8 = frm; 2823 log.u_bbr.pkts_out = orig_cwnd; 2824 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2825 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2826 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 2827 log.u_bbr.use_lt_bw <<= 1; 2828 log.u_bbr.use_lt_bw |= rack->r_might_revert; 2829 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2830 &rack->rc_inp->inp_socket->so_rcv, 2831 &rack->rc_inp->inp_socket->so_snd, 2832 BBR_LOG_BBRUPD, 0, 2833 0, &log, false, &tv); 2834 } 2835 } 2836 2837 #ifdef NETFLIX_EXP_DETECTION 2838 static void 2839 rack_log_sad(struct tcp_rack *rack, int event) 2840 { 2841 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2842 union tcp_log_stackspecific log; 2843 struct timeval tv; 2844 2845 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2846 log.u_bbr.flex1 = rack->r_ctl.sack_count; 2847 log.u_bbr.flex2 = rack->r_ctl.ack_count; 2848 log.u_bbr.flex3 = rack->r_ctl.sack_moved_extra; 2849 log.u_bbr.flex4 = rack->r_ctl.sack_noextra_move; 2850 log.u_bbr.flex5 = rack->r_ctl.rc_num_maps_alloced; 2851 log.u_bbr.flex6 = tcp_sack_to_ack_thresh; 2852 log.u_bbr.pkts_out = tcp_sack_to_move_thresh; 2853 log.u_bbr.lt_epoch = (tcp_force_detection << 8); 2854 log.u_bbr.lt_epoch |= rack->do_detection; 2855 log.u_bbr.applimited = tcp_map_minimum; 2856 log.u_bbr.flex7 = rack->sack_attack_disable; 2857 log.u_bbr.flex8 = event; 2858 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2859 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2860 log.u_bbr.delivered = tcp_sad_decay_val; 2861 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2862 &rack->rc_inp->inp_socket->so_rcv, 2863 &rack->rc_inp->inp_socket->so_snd, 2864 TCP_SAD_DETECTION, 0, 2865 0, &log, false, &tv); 2866 } 2867 } 2868 #endif 2869 2870 static void 2871 rack_counter_destroy(void) 2872 { 2873 int i; 2874 2875 counter_u64_free(rack_fto_send); 2876 counter_u64_free(rack_fto_rsm_send); 2877 counter_u64_free(rack_nfto_resend); 2878 counter_u64_free(rack_hw_pace_init_fail); 2879 counter_u64_free(rack_hw_pace_lost); 2880 counter_u64_free(rack_non_fto_send); 2881 counter_u64_free(rack_extended_rfo); 2882 counter_u64_free(rack_ack_total); 2883 counter_u64_free(rack_express_sack); 2884 counter_u64_free(rack_sack_total); 2885 counter_u64_free(rack_move_none); 2886 counter_u64_free(rack_move_some); 2887 counter_u64_free(rack_sack_attacks_detected); 2888 counter_u64_free(rack_sack_attacks_reversed); 2889 counter_u64_free(rack_sack_used_next_merge); 2890 counter_u64_free(rack_sack_used_prev_merge); 2891 counter_u64_free(rack_badfr); 2892 counter_u64_free(rack_badfr_bytes); 2893 counter_u64_free(rack_rtm_prr_retran); 2894 counter_u64_free(rack_rtm_prr_newdata); 2895 counter_u64_free(rack_timestamp_mismatch); 2896 counter_u64_free(rack_find_high); 2897 counter_u64_free(rack_reorder_seen); 2898 counter_u64_free(rack_tlp_tot); 2899 counter_u64_free(rack_tlp_newdata); 2900 counter_u64_free(rack_tlp_retran); 2901 counter_u64_free(rack_tlp_retran_bytes); 2902 counter_u64_free(rack_tlp_retran_fail); 2903 counter_u64_free(rack_to_tot); 2904 counter_u64_free(rack_to_arm_rack); 2905 counter_u64_free(rack_to_arm_tlp); 2906 counter_u64_free(rack_calc_zero); 2907 counter_u64_free(rack_calc_nonzero); 2908 counter_u64_free(rack_paced_segments); 2909 counter_u64_free(rack_unpaced_segments); 2910 counter_u64_free(rack_saw_enobuf); 2911 counter_u64_free(rack_saw_enobuf_hw); 2912 counter_u64_free(rack_saw_enetunreach); 2913 counter_u64_free(rack_hot_alloc); 2914 counter_u64_free(rack_to_alloc); 2915 counter_u64_free(rack_to_alloc_hard); 2916 counter_u64_free(rack_to_alloc_emerg); 2917 counter_u64_free(rack_to_alloc_limited); 2918 counter_u64_free(rack_alloc_limited_conns); 2919 counter_u64_free(rack_split_limited); 2920 for (i = 0; i < MAX_NUM_OF_CNTS; i++) { 2921 counter_u64_free(rack_proc_comp_ack[i]); 2922 } 2923 counter_u64_free(rack_multi_single_eq); 2924 counter_u64_free(rack_proc_non_comp_ack); 2925 counter_u64_free(rack_sack_proc_all); 2926 counter_u64_free(rack_sack_proc_restart); 2927 counter_u64_free(rack_sack_proc_short); 2928 counter_u64_free(rack_enter_tlp_calc); 2929 counter_u64_free(rack_used_tlpmethod); 2930 counter_u64_free(rack_used_tlpmethod2); 2931 counter_u64_free(rack_sack_skipped_acked); 2932 counter_u64_free(rack_sack_splits); 2933 counter_u64_free(rack_progress_drops); 2934 counter_u64_free(rack_input_idle_reduces); 2935 counter_u64_free(rack_collapsed_win); 2936 counter_u64_free(rack_tlp_does_nada); 2937 counter_u64_free(rack_try_scwnd); 2938 counter_u64_free(rack_per_timer_hole); 2939 counter_u64_free(rack_large_ackcmp); 2940 counter_u64_free(rack_small_ackcmp); 2941 #ifdef INVARIANTS 2942 counter_u64_free(rack_adjust_map_bw); 2943 #endif 2944 COUNTER_ARRAY_FREE(rack_out_size, TCP_MSS_ACCT_SIZE); 2945 COUNTER_ARRAY_FREE(rack_opts_arry, RACK_OPTS_SIZE); 2946 } 2947 2948 static struct rack_sendmap * 2949 rack_alloc(struct tcp_rack *rack) 2950 { 2951 struct rack_sendmap *rsm; 2952 2953 /* 2954 * First get the top of the list it in 2955 * theory is the "hottest" rsm we have, 2956 * possibly just freed by ack processing. 2957 */ 2958 if (rack->rc_free_cnt > rack_free_cache) { 2959 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 2960 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 2961 counter_u64_add(rack_hot_alloc, 1); 2962 rack->rc_free_cnt--; 2963 return (rsm); 2964 } 2965 /* 2966 * Once we get under our free cache we probably 2967 * no longer have a "hot" one available. Lets 2968 * get one from UMA. 2969 */ 2970 rsm = uma_zalloc(rack_zone, M_NOWAIT); 2971 if (rsm) { 2972 rack->r_ctl.rc_num_maps_alloced++; 2973 counter_u64_add(rack_to_alloc, 1); 2974 return (rsm); 2975 } 2976 /* 2977 * Dig in to our aux rsm's (the last two) since 2978 * UMA failed to get us one. 2979 */ 2980 if (rack->rc_free_cnt) { 2981 counter_u64_add(rack_to_alloc_emerg, 1); 2982 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 2983 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 2984 rack->rc_free_cnt--; 2985 return (rsm); 2986 } 2987 return (NULL); 2988 } 2989 2990 static struct rack_sendmap * 2991 rack_alloc_full_limit(struct tcp_rack *rack) 2992 { 2993 if ((V_tcp_map_entries_limit > 0) && 2994 (rack->do_detection == 0) && 2995 (rack->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) { 2996 counter_u64_add(rack_to_alloc_limited, 1); 2997 if (!rack->alloc_limit_reported) { 2998 rack->alloc_limit_reported = 1; 2999 counter_u64_add(rack_alloc_limited_conns, 1); 3000 } 3001 return (NULL); 3002 } 3003 return (rack_alloc(rack)); 3004 } 3005 3006 /* wrapper to allocate a sendmap entry, subject to a specific limit */ 3007 static struct rack_sendmap * 3008 rack_alloc_limit(struct tcp_rack *rack, uint8_t limit_type) 3009 { 3010 struct rack_sendmap *rsm; 3011 3012 if (limit_type) { 3013 /* currently there is only one limit type */ 3014 if (V_tcp_map_split_limit > 0 && 3015 (rack->do_detection == 0) && 3016 rack->r_ctl.rc_num_split_allocs >= V_tcp_map_split_limit) { 3017 counter_u64_add(rack_split_limited, 1); 3018 if (!rack->alloc_limit_reported) { 3019 rack->alloc_limit_reported = 1; 3020 counter_u64_add(rack_alloc_limited_conns, 1); 3021 } 3022 return (NULL); 3023 } 3024 } 3025 3026 /* allocate and mark in the limit type, if set */ 3027 rsm = rack_alloc(rack); 3028 if (rsm != NULL && limit_type) { 3029 rsm->r_limit_type = limit_type; 3030 rack->r_ctl.rc_num_split_allocs++; 3031 } 3032 return (rsm); 3033 } 3034 3035 static void 3036 rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm) 3037 { 3038 if (rsm->r_flags & RACK_APP_LIMITED) { 3039 if (rack->r_ctl.rc_app_limited_cnt > 0) { 3040 rack->r_ctl.rc_app_limited_cnt--; 3041 } 3042 } 3043 if (rsm->r_limit_type) { 3044 /* currently there is only one limit type */ 3045 rack->r_ctl.rc_num_split_allocs--; 3046 } 3047 if (rsm == rack->r_ctl.rc_first_appl) { 3048 if (rack->r_ctl.rc_app_limited_cnt == 0) 3049 rack->r_ctl.rc_first_appl = NULL; 3050 else { 3051 /* Follow the next one out */ 3052 struct rack_sendmap fe; 3053 3054 fe.r_start = rsm->r_nseq_appl; 3055 rack->r_ctl.rc_first_appl = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 3056 } 3057 } 3058 if (rsm == rack->r_ctl.rc_resend) 3059 rack->r_ctl.rc_resend = NULL; 3060 if (rsm == rack->r_ctl.rc_rsm_at_retran) 3061 rack->r_ctl.rc_rsm_at_retran = NULL; 3062 if (rsm == rack->r_ctl.rc_end_appl) 3063 rack->r_ctl.rc_end_appl = NULL; 3064 if (rack->r_ctl.rc_tlpsend == rsm) 3065 rack->r_ctl.rc_tlpsend = NULL; 3066 if (rack->r_ctl.rc_sacklast == rsm) 3067 rack->r_ctl.rc_sacklast = NULL; 3068 memset(rsm, 0, sizeof(struct rack_sendmap)); 3069 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_free, rsm, r_tnext); 3070 rack->rc_free_cnt++; 3071 } 3072 3073 static void 3074 rack_free_trim(struct tcp_rack *rack) 3075 { 3076 struct rack_sendmap *rsm; 3077 3078 /* 3079 * Free up all the tail entries until 3080 * we get our list down to the limit. 3081 */ 3082 while (rack->rc_free_cnt > rack_free_cache) { 3083 rsm = TAILQ_LAST(&rack->r_ctl.rc_free, rack_head); 3084 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 3085 rack->rc_free_cnt--; 3086 uma_zfree(rack_zone, rsm); 3087 } 3088 } 3089 3090 3091 static uint32_t 3092 rack_get_measure_window(struct tcpcb *tp, struct tcp_rack *rack) 3093 { 3094 uint64_t srtt, bw, len, tim; 3095 uint32_t segsiz, def_len, minl; 3096 3097 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 3098 def_len = rack_def_data_window * segsiz; 3099 if (rack->rc_gp_filled == 0) { 3100 /* 3101 * We have no measurement (IW is in flight?) so 3102 * we can only guess using our data_window sysctl 3103 * value (usually 20MSS). 3104 */ 3105 return (def_len); 3106 } 3107 /* 3108 * Now we have a number of factors to consider. 3109 * 3110 * 1) We have a desired BDP which is usually 3111 * at least 2. 3112 * 2) We have a minimum number of rtt's usually 1 SRTT 3113 * but we allow it too to be more. 3114 * 3) We want to make sure a measurement last N useconds (if 3115 * we have set rack_min_measure_usec. 3116 * 3117 * We handle the first concern here by trying to create a data 3118 * window of max(rack_def_data_window, DesiredBDP). The 3119 * second concern we handle in not letting the measurement 3120 * window end normally until at least the required SRTT's 3121 * have gone by which is done further below in 3122 * rack_enough_for_measurement(). Finally the third concern 3123 * we also handle here by calculating how long that time 3124 * would take at the current BW and then return the 3125 * max of our first calculation and that length. Note 3126 * that if rack_min_measure_usec is 0, we don't deal 3127 * with concern 3. Also for both Concern 1 and 3 an 3128 * application limited period could end the measurement 3129 * earlier. 3130 * 3131 * So lets calculate the BDP with the "known" b/w using 3132 * the SRTT has our rtt and then multiply it by the 3133 * goal. 3134 */ 3135 bw = rack_get_bw(rack); 3136 srtt = (uint64_t)tp->t_srtt; 3137 len = bw * srtt; 3138 len /= (uint64_t)HPTS_USEC_IN_SEC; 3139 len *= max(1, rack_goal_bdp); 3140 /* Now we need to round up to the nearest MSS */ 3141 len = roundup(len, segsiz); 3142 if (rack_min_measure_usec) { 3143 /* Now calculate our min length for this b/w */ 3144 tim = rack_min_measure_usec; 3145 minl = (tim * bw) / (uint64_t)HPTS_USEC_IN_SEC; 3146 if (minl == 0) 3147 minl = 1; 3148 minl = roundup(minl, segsiz); 3149 if (len < minl) 3150 len = minl; 3151 } 3152 /* 3153 * Now if we have a very small window we want 3154 * to attempt to get the window that is 3155 * as small as possible. This happens on 3156 * low b/w connections and we don't want to 3157 * span huge numbers of rtt's between measurements. 3158 * 3159 * We basically include 2 over our "MIN window" so 3160 * that the measurement can be shortened (possibly) by 3161 * an ack'ed packet. 3162 */ 3163 if (len < def_len) 3164 return (max((uint32_t)len, ((MIN_GP_WIN+2) * segsiz))); 3165 else 3166 return (max((uint32_t)len, def_len)); 3167 3168 } 3169 3170 static int 3171 rack_enough_for_measurement(struct tcpcb *tp, struct tcp_rack *rack, tcp_seq th_ack, uint8_t *quality) 3172 { 3173 uint32_t tim, srtts, segsiz; 3174 3175 /* 3176 * Has enough time passed for the GP measurement to be valid? 3177 */ 3178 if ((tp->snd_max == tp->snd_una) || 3179 (th_ack == tp->snd_max)){ 3180 /* All is acked */ 3181 *quality = RACK_QUALITY_ALLACKED; 3182 return (1); 3183 } 3184 if (SEQ_LT(th_ack, tp->gput_seq)) { 3185 /* Not enough bytes yet */ 3186 return (0); 3187 } 3188 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 3189 if (SEQ_LT(th_ack, tp->gput_ack) && 3190 ((th_ack - tp->gput_seq) < max(rc_init_window(rack), (MIN_GP_WIN * segsiz)))) { 3191 /* Not enough bytes yet */ 3192 return (0); 3193 } 3194 if (rack->r_ctl.rc_first_appl && 3195 (SEQ_GEQ(th_ack, rack->r_ctl.rc_first_appl->r_end))) { 3196 /* 3197 * We are up to the app limited send point 3198 * we have to measure irrespective of the time.. 3199 */ 3200 *quality = RACK_QUALITY_APPLIMITED; 3201 return (1); 3202 } 3203 /* Now what about time? */ 3204 srtts = (rack->r_ctl.rc_gp_srtt * rack_min_srtts); 3205 tim = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - tp->gput_ts; 3206 if (tim >= srtts) { 3207 *quality = RACK_QUALITY_HIGH; 3208 return (1); 3209 } 3210 /* Nope not even a full SRTT has passed */ 3211 return (0); 3212 } 3213 3214 static void 3215 rack_log_timely(struct tcp_rack *rack, 3216 uint32_t logged, uint64_t cur_bw, uint64_t low_bnd, 3217 uint64_t up_bnd, int line, uint8_t method) 3218 { 3219 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 3220 union tcp_log_stackspecific log; 3221 struct timeval tv; 3222 3223 memset(&log, 0, sizeof(log)); 3224 log.u_bbr.flex1 = logged; 3225 log.u_bbr.flex2 = rack->rc_gp_timely_inc_cnt; 3226 log.u_bbr.flex2 <<= 4; 3227 log.u_bbr.flex2 |= rack->rc_gp_timely_dec_cnt; 3228 log.u_bbr.flex2 <<= 4; 3229 log.u_bbr.flex2 |= rack->rc_gp_incr; 3230 log.u_bbr.flex2 <<= 4; 3231 log.u_bbr.flex2 |= rack->rc_gp_bwred; 3232 log.u_bbr.flex3 = rack->rc_gp_incr; 3233 log.u_bbr.flex4 = rack->r_ctl.rack_per_of_gp_ss; 3234 log.u_bbr.flex5 = rack->r_ctl.rack_per_of_gp_ca; 3235 log.u_bbr.flex6 = rack->r_ctl.rack_per_of_gp_rec; 3236 log.u_bbr.flex7 = rack->rc_gp_bwred; 3237 log.u_bbr.flex8 = method; 3238 log.u_bbr.cur_del_rate = cur_bw; 3239 log.u_bbr.delRate = low_bnd; 3240 log.u_bbr.bw_inuse = up_bnd; 3241 log.u_bbr.rttProp = rack_get_bw(rack); 3242 log.u_bbr.pkt_epoch = line; 3243 log.u_bbr.pkts_out = rack->r_ctl.rc_rtt_diff; 3244 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3245 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3246 log.u_bbr.epoch = rack->r_ctl.rc_gp_srtt; 3247 log.u_bbr.lt_epoch = rack->r_ctl.rc_prev_gp_srtt; 3248 log.u_bbr.cwnd_gain = rack->rc_dragged_bottom; 3249 log.u_bbr.cwnd_gain <<= 1; 3250 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_rec; 3251 log.u_bbr.cwnd_gain <<= 1; 3252 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss; 3253 log.u_bbr.cwnd_gain <<= 1; 3254 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca; 3255 log.u_bbr.lost = rack->r_ctl.rc_loss_count; 3256 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3257 &rack->rc_inp->inp_socket->so_rcv, 3258 &rack->rc_inp->inp_socket->so_snd, 3259 TCP_TIMELY_WORK, 0, 3260 0, &log, false, &tv); 3261 } 3262 } 3263 3264 static int 3265 rack_bw_can_be_raised(struct tcp_rack *rack, uint64_t cur_bw, uint64_t last_bw_est, uint16_t mult) 3266 { 3267 /* 3268 * Before we increase we need to know if 3269 * the estimate just made was less than 3270 * our pacing goal (i.e. (cur_bw * mult) > last_bw_est) 3271 * 3272 * If we already are pacing at a fast enough 3273 * rate to push us faster there is no sense of 3274 * increasing. 3275 * 3276 * We first caculate our actual pacing rate (ss or ca multipler 3277 * times our cur_bw). 3278 * 3279 * Then we take the last measured rate and multipy by our 3280 * maximum pacing overage to give us a max allowable rate. 3281 * 3282 * If our act_rate is smaller than our max_allowable rate 3283 * then we should increase. Else we should hold steady. 3284 * 3285 */ 3286 uint64_t act_rate, max_allow_rate; 3287 3288 if (rack_timely_no_stopping) 3289 return (1); 3290 3291 if ((cur_bw == 0) || (last_bw_est == 0)) { 3292 /* 3293 * Initial startup case or 3294 * everything is acked case. 3295 */ 3296 rack_log_timely(rack, mult, cur_bw, 0, 0, 3297 __LINE__, 9); 3298 return (1); 3299 } 3300 if (mult <= 100) { 3301 /* 3302 * We can always pace at or slightly above our rate. 3303 */ 3304 rack_log_timely(rack, mult, cur_bw, 0, 0, 3305 __LINE__, 9); 3306 return (1); 3307 } 3308 act_rate = cur_bw * (uint64_t)mult; 3309 act_rate /= 100; 3310 max_allow_rate = last_bw_est * ((uint64_t)rack_max_per_above + (uint64_t)100); 3311 max_allow_rate /= 100; 3312 if (act_rate < max_allow_rate) { 3313 /* 3314 * Here the rate we are actually pacing at 3315 * is smaller than 10% above our last measurement. 3316 * This means we are pacing below what we would 3317 * like to try to achieve (plus some wiggle room). 3318 */ 3319 rack_log_timely(rack, mult, cur_bw, act_rate, max_allow_rate, 3320 __LINE__, 9); 3321 return (1); 3322 } else { 3323 /* 3324 * Here we are already pacing at least rack_max_per_above(10%) 3325 * what we are getting back. This indicates most likely 3326 * that we are being limited (cwnd/rwnd/app) and can't 3327 * get any more b/w. There is no sense of trying to 3328 * raise up the pacing rate its not speeding us up 3329 * and we already are pacing faster than we are getting. 3330 */ 3331 rack_log_timely(rack, mult, cur_bw, act_rate, max_allow_rate, 3332 __LINE__, 8); 3333 return (0); 3334 } 3335 } 3336 3337 static void 3338 rack_validate_multipliers_at_or_above100(struct tcp_rack *rack) 3339 { 3340 /* 3341 * When we drag bottom, we want to assure 3342 * that no multiplier is below 1.0, if so 3343 * we want to restore it to at least that. 3344 */ 3345 if (rack->r_ctl.rack_per_of_gp_rec < 100) { 3346 /* This is unlikely we usually do not touch recovery */ 3347 rack->r_ctl.rack_per_of_gp_rec = 100; 3348 } 3349 if (rack->r_ctl.rack_per_of_gp_ca < 100) { 3350 rack->r_ctl.rack_per_of_gp_ca = 100; 3351 } 3352 if (rack->r_ctl.rack_per_of_gp_ss < 100) { 3353 rack->r_ctl.rack_per_of_gp_ss = 100; 3354 } 3355 } 3356 3357 static void 3358 rack_validate_multipliers_at_or_below_100(struct tcp_rack *rack) 3359 { 3360 if (rack->r_ctl.rack_per_of_gp_ca > 100) { 3361 rack->r_ctl.rack_per_of_gp_ca = 100; 3362 } 3363 if (rack->r_ctl.rack_per_of_gp_ss > 100) { 3364 rack->r_ctl.rack_per_of_gp_ss = 100; 3365 } 3366 } 3367 3368 static void 3369 rack_increase_bw_mul(struct tcp_rack *rack, int timely_says, uint64_t cur_bw, uint64_t last_bw_est, int override) 3370 { 3371 int32_t calc, logged, plus; 3372 3373 logged = 0; 3374 3375 if (override) { 3376 /* 3377 * override is passed when we are 3378 * loosing b/w and making one last 3379 * gasp at trying to not loose out 3380 * to a new-reno flow. 3381 */ 3382 goto extra_boost; 3383 } 3384 /* In classic timely we boost by 5x if we have 5 increases in a row, lets not */ 3385 if (rack->rc_gp_incr && 3386 ((rack->rc_gp_timely_inc_cnt + 1) >= RACK_TIMELY_CNT_BOOST)) { 3387 /* 3388 * Reset and get 5 strokes more before the boost. Note 3389 * that the count is 0 based so we have to add one. 3390 */ 3391 extra_boost: 3392 plus = (uint32_t)rack_gp_increase_per * RACK_TIMELY_CNT_BOOST; 3393 rack->rc_gp_timely_inc_cnt = 0; 3394 } else 3395 plus = (uint32_t)rack_gp_increase_per; 3396 /* Must be at least 1% increase for true timely increases */ 3397 if ((plus < 1) && 3398 ((rack->r_ctl.rc_rtt_diff <= 0) || (timely_says <= 0))) 3399 plus = 1; 3400 if (rack->rc_gp_saw_rec && 3401 (rack->rc_gp_no_rec_chg == 0) && 3402 rack_bw_can_be_raised(rack, cur_bw, last_bw_est, 3403 rack->r_ctl.rack_per_of_gp_rec)) { 3404 /* We have been in recovery ding it too */ 3405 calc = rack->r_ctl.rack_per_of_gp_rec + plus; 3406 if (calc > 0xffff) 3407 calc = 0xffff; 3408 logged |= 1; 3409 rack->r_ctl.rack_per_of_gp_rec = (uint16_t)calc; 3410 if (rack_per_upper_bound_ss && 3411 (rack->rc_dragged_bottom == 0) && 3412 (rack->r_ctl.rack_per_of_gp_rec > rack_per_upper_bound_ss)) 3413 rack->r_ctl.rack_per_of_gp_rec = rack_per_upper_bound_ss; 3414 } 3415 if (rack->rc_gp_saw_ca && 3416 (rack->rc_gp_saw_ss == 0) && 3417 rack_bw_can_be_raised(rack, cur_bw, last_bw_est, 3418 rack->r_ctl.rack_per_of_gp_ca)) { 3419 /* In CA */ 3420 calc = rack->r_ctl.rack_per_of_gp_ca + plus; 3421 if (calc > 0xffff) 3422 calc = 0xffff; 3423 logged |= 2; 3424 rack->r_ctl.rack_per_of_gp_ca = (uint16_t)calc; 3425 if (rack_per_upper_bound_ca && 3426 (rack->rc_dragged_bottom == 0) && 3427 (rack->r_ctl.rack_per_of_gp_ca > rack_per_upper_bound_ca)) 3428 rack->r_ctl.rack_per_of_gp_ca = rack_per_upper_bound_ca; 3429 } 3430 if (rack->rc_gp_saw_ss && 3431 rack_bw_can_be_raised(rack, cur_bw, last_bw_est, 3432 rack->r_ctl.rack_per_of_gp_ss)) { 3433 /* In SS */ 3434 calc = rack->r_ctl.rack_per_of_gp_ss + plus; 3435 if (calc > 0xffff) 3436 calc = 0xffff; 3437 rack->r_ctl.rack_per_of_gp_ss = (uint16_t)calc; 3438 if (rack_per_upper_bound_ss && 3439 (rack->rc_dragged_bottom == 0) && 3440 (rack->r_ctl.rack_per_of_gp_ss > rack_per_upper_bound_ss)) 3441 rack->r_ctl.rack_per_of_gp_ss = rack_per_upper_bound_ss; 3442 logged |= 4; 3443 } 3444 if (logged && 3445 (rack->rc_gp_incr == 0)){ 3446 /* Go into increment mode */ 3447 rack->rc_gp_incr = 1; 3448 rack->rc_gp_timely_inc_cnt = 0; 3449 } 3450 if (rack->rc_gp_incr && 3451 logged && 3452 (rack->rc_gp_timely_inc_cnt < RACK_TIMELY_CNT_BOOST)) { 3453 rack->rc_gp_timely_inc_cnt++; 3454 } 3455 rack_log_timely(rack, logged, plus, 0, 0, 3456 __LINE__, 1); 3457 } 3458 3459 static uint32_t 3460 rack_get_decrease(struct tcp_rack *rack, uint32_t curper, int32_t rtt_diff) 3461 { 3462 /* 3463 * norm_grad = rtt_diff / minrtt; 3464 * new_per = curper * (1 - B * norm_grad) 3465 * 3466 * B = rack_gp_decrease_per (default 10%) 3467 * rtt_dif = input var current rtt-diff 3468 * curper = input var current percentage 3469 * minrtt = from rack filter 3470 * 3471 */ 3472 uint64_t perf; 3473 3474 perf = (((uint64_t)curper * ((uint64_t)1000000 - 3475 ((uint64_t)rack_gp_decrease_per * (uint64_t)10000 * 3476 (((uint64_t)rtt_diff * (uint64_t)1000000)/ 3477 (uint64_t)get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)))/ 3478 (uint64_t)1000000)) / 3479 (uint64_t)1000000); 3480 if (perf > curper) { 3481 /* TSNH */ 3482 perf = curper - 1; 3483 } 3484 return ((uint32_t)perf); 3485 } 3486 3487 static uint32_t 3488 rack_decrease_highrtt(struct tcp_rack *rack, uint32_t curper, uint32_t rtt) 3489 { 3490 /* 3491 * highrttthresh 3492 * result = curper * (1 - (B * ( 1 - ------ )) 3493 * gp_srtt 3494 * 3495 * B = rack_gp_decrease_per (default 10%) 3496 * highrttthresh = filter_min * rack_gp_rtt_maxmul 3497 */ 3498 uint64_t perf; 3499 uint32_t highrttthresh; 3500 3501 highrttthresh = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_maxmul; 3502 3503 perf = (((uint64_t)curper * ((uint64_t)1000000 - 3504 ((uint64_t)rack_gp_decrease_per * ((uint64_t)1000000 - 3505 ((uint64_t)highrttthresh * (uint64_t)1000000) / 3506 (uint64_t)rtt)) / 100)) /(uint64_t)1000000); 3507 return (perf); 3508 } 3509 3510 static void 3511 rack_decrease_bw_mul(struct tcp_rack *rack, int timely_says, uint32_t rtt, int32_t rtt_diff) 3512 { 3513 uint64_t logvar, logvar2, logvar3; 3514 uint32_t logged, new_per, ss_red, ca_red, rec_red, alt, val; 3515 3516 if (rack->rc_gp_incr) { 3517 /* Turn off increment counting */ 3518 rack->rc_gp_incr = 0; 3519 rack->rc_gp_timely_inc_cnt = 0; 3520 } 3521 ss_red = ca_red = rec_red = 0; 3522 logged = 0; 3523 /* Calculate the reduction value */ 3524 if (rtt_diff < 0) { 3525 rtt_diff *= -1; 3526 } 3527 /* Must be at least 1% reduction */ 3528 if (rack->rc_gp_saw_rec && (rack->rc_gp_no_rec_chg == 0)) { 3529 /* We have been in recovery ding it too */ 3530 if (timely_says == 2) { 3531 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_rec, rtt); 3532 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); 3533 if (alt < new_per) 3534 val = alt; 3535 else 3536 val = new_per; 3537 } else 3538 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); 3539 if (rack->r_ctl.rack_per_of_gp_rec > val) { 3540 rec_red = (rack->r_ctl.rack_per_of_gp_rec - val); 3541 rack->r_ctl.rack_per_of_gp_rec = (uint16_t)val; 3542 } else { 3543 rack->r_ctl.rack_per_of_gp_rec = rack_per_lower_bound; 3544 rec_red = 0; 3545 } 3546 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_rec) 3547 rack->r_ctl.rack_per_of_gp_rec = rack_per_lower_bound; 3548 logged |= 1; 3549 } 3550 if (rack->rc_gp_saw_ss) { 3551 /* Sent in SS */ 3552 if (timely_says == 2) { 3553 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_ss, rtt); 3554 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); 3555 if (alt < new_per) 3556 val = alt; 3557 else 3558 val = new_per; 3559 } else 3560 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ss, rtt_diff); 3561 if (rack->r_ctl.rack_per_of_gp_ss > new_per) { 3562 ss_red = rack->r_ctl.rack_per_of_gp_ss - val; 3563 rack->r_ctl.rack_per_of_gp_ss = (uint16_t)val; 3564 } else { 3565 ss_red = new_per; 3566 rack->r_ctl.rack_per_of_gp_ss = rack_per_lower_bound; 3567 logvar = new_per; 3568 logvar <<= 32; 3569 logvar |= alt; 3570 logvar2 = (uint32_t)rtt; 3571 logvar2 <<= 32; 3572 logvar2 |= (uint32_t)rtt_diff; 3573 logvar3 = rack_gp_rtt_maxmul; 3574 logvar3 <<= 32; 3575 logvar3 |= get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 3576 rack_log_timely(rack, timely_says, 3577 logvar2, logvar3, 3578 logvar, __LINE__, 10); 3579 } 3580 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_ss) 3581 rack->r_ctl.rack_per_of_gp_ss = rack_per_lower_bound; 3582 logged |= 4; 3583 } else if (rack->rc_gp_saw_ca) { 3584 /* Sent in CA */ 3585 if (timely_says == 2) { 3586 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_ca, rtt); 3587 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); 3588 if (alt < new_per) 3589 val = alt; 3590 else 3591 val = new_per; 3592 } else 3593 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ca, rtt_diff); 3594 if (rack->r_ctl.rack_per_of_gp_ca > val) { 3595 ca_red = rack->r_ctl.rack_per_of_gp_ca - val; 3596 rack->r_ctl.rack_per_of_gp_ca = (uint16_t)val; 3597 } else { 3598 rack->r_ctl.rack_per_of_gp_ca = rack_per_lower_bound; 3599 ca_red = 0; 3600 logvar = new_per; 3601 logvar <<= 32; 3602 logvar |= alt; 3603 logvar2 = (uint32_t)rtt; 3604 logvar2 <<= 32; 3605 logvar2 |= (uint32_t)rtt_diff; 3606 logvar3 = rack_gp_rtt_maxmul; 3607 logvar3 <<= 32; 3608 logvar3 |= get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 3609 rack_log_timely(rack, timely_says, 3610 logvar2, logvar3, 3611 logvar, __LINE__, 10); 3612 } 3613 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_ca) 3614 rack->r_ctl.rack_per_of_gp_ca = rack_per_lower_bound; 3615 logged |= 2; 3616 } 3617 if (rack->rc_gp_timely_dec_cnt < 0x7) { 3618 rack->rc_gp_timely_dec_cnt++; 3619 if (rack_timely_dec_clear && 3620 (rack->rc_gp_timely_dec_cnt == rack_timely_dec_clear)) 3621 rack->rc_gp_timely_dec_cnt = 0; 3622 } 3623 logvar = ss_red; 3624 logvar <<= 32; 3625 logvar |= ca_red; 3626 rack_log_timely(rack, logged, rec_red, rack_per_lower_bound, logvar, 3627 __LINE__, 2); 3628 } 3629 3630 static void 3631 rack_log_rtt_shrinks(struct tcp_rack *rack, uint32_t us_cts, 3632 uint32_t rtt, uint32_t line, uint8_t reas) 3633 { 3634 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 3635 union tcp_log_stackspecific log; 3636 struct timeval tv; 3637 3638 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 3639 log.u_bbr.flex1 = line; 3640 log.u_bbr.flex2 = rack->r_ctl.rc_time_probertt_starts; 3641 log.u_bbr.flex3 = rack->r_ctl.rc_lower_rtt_us_cts; 3642 log.u_bbr.flex4 = rack->r_ctl.rack_per_of_gp_ss; 3643 log.u_bbr.flex5 = rtt; 3644 log.u_bbr.flex6 = rack->rc_highly_buffered; 3645 log.u_bbr.flex6 <<= 1; 3646 log.u_bbr.flex6 |= rack->forced_ack; 3647 log.u_bbr.flex6 <<= 1; 3648 log.u_bbr.flex6 |= rack->rc_gp_dyn_mul; 3649 log.u_bbr.flex6 <<= 1; 3650 log.u_bbr.flex6 |= rack->in_probe_rtt; 3651 log.u_bbr.flex6 <<= 1; 3652 log.u_bbr.flex6 |= rack->measure_saw_probe_rtt; 3653 log.u_bbr.flex7 = rack->r_ctl.rack_per_of_gp_probertt; 3654 log.u_bbr.pacing_gain = rack->r_ctl.rack_per_of_gp_ca; 3655 log.u_bbr.cwnd_gain = rack->r_ctl.rack_per_of_gp_rec; 3656 log.u_bbr.flex8 = reas; 3657 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3658 log.u_bbr.delRate = rack_get_bw(rack); 3659 log.u_bbr.cur_del_rate = rack->r_ctl.rc_highest_us_rtt; 3660 log.u_bbr.cur_del_rate <<= 32; 3661 log.u_bbr.cur_del_rate |= rack->r_ctl.rc_lowest_us_rtt; 3662 log.u_bbr.applimited = rack->r_ctl.rc_time_probertt_entered; 3663 log.u_bbr.pkts_out = rack->r_ctl.rc_rtt_diff; 3664 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3665 log.u_bbr.epoch = rack->r_ctl.rc_gp_srtt; 3666 log.u_bbr.lt_epoch = rack->r_ctl.rc_prev_gp_srtt; 3667 log.u_bbr.pkt_epoch = rack->r_ctl.rc_lower_rtt_us_cts; 3668 log.u_bbr.delivered = rack->r_ctl.rc_target_probertt_flight; 3669 log.u_bbr.lost = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 3670 log.u_bbr.rttProp = us_cts; 3671 log.u_bbr.rttProp <<= 32; 3672 log.u_bbr.rttProp |= rack->r_ctl.rc_entry_gp_rtt; 3673 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3674 &rack->rc_inp->inp_socket->so_rcv, 3675 &rack->rc_inp->inp_socket->so_snd, 3676 BBR_LOG_RTT_SHRINKS, 0, 3677 0, &log, false, &rack->r_ctl.act_rcv_time); 3678 } 3679 } 3680 3681 static void 3682 rack_set_prtt_target(struct tcp_rack *rack, uint32_t segsiz, uint32_t rtt) 3683 { 3684 uint64_t bwdp; 3685 3686 bwdp = rack_get_bw(rack); 3687 bwdp *= (uint64_t)rtt; 3688 bwdp /= (uint64_t)HPTS_USEC_IN_SEC; 3689 rack->r_ctl.rc_target_probertt_flight = roundup((uint32_t)bwdp, segsiz); 3690 if (rack->r_ctl.rc_target_probertt_flight < (segsiz * rack_timely_min_segs)) { 3691 /* 3692 * A window protocol must be able to have 4 packets 3693 * outstanding as the floor in order to function 3694 * (especially considering delayed ack :D). 3695 */ 3696 rack->r_ctl.rc_target_probertt_flight = (segsiz * rack_timely_min_segs); 3697 } 3698 } 3699 3700 static void 3701 rack_enter_probertt(struct tcp_rack *rack, uint32_t us_cts) 3702 { 3703 /** 3704 * ProbeRTT is a bit different in rack_pacing than in 3705 * BBR. It is like BBR in that it uses the lowering of 3706 * the RTT as a signal that we saw something new and 3707 * counts from there for how long between. But it is 3708 * different in that its quite simple. It does not 3709 * play with the cwnd and wait until we get down 3710 * to N segments outstanding and hold that for 3711 * 200ms. Instead it just sets the pacing reduction 3712 * rate to a set percentage (70 by default) and hold 3713 * that for a number of recent GP Srtt's. 3714 */ 3715 uint32_t segsiz; 3716 3717 if (rack->rc_gp_dyn_mul == 0) 3718 return; 3719 3720 if (rack->rc_tp->snd_max == rack->rc_tp->snd_una) { 3721 /* We are idle */ 3722 return; 3723 } 3724 if ((rack->rc_tp->t_flags & TF_GPUTINPROG) && 3725 SEQ_GT(rack->rc_tp->snd_una, rack->rc_tp->gput_seq)) { 3726 /* 3727 * Stop the goodput now, the idea here is 3728 * that future measurements with in_probe_rtt 3729 * won't register if they are not greater so 3730 * we want to get what info (if any) is available 3731 * now. 3732 */ 3733 rack_do_goodput_measurement(rack->rc_tp, rack, 3734 rack->rc_tp->snd_una, __LINE__, 3735 RACK_QUALITY_PROBERTT); 3736 } 3737 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; 3738 rack->r_ctl.rc_time_probertt_entered = us_cts; 3739 segsiz = min(ctf_fixed_maxseg(rack->rc_tp), 3740 rack->r_ctl.rc_pace_min_segs); 3741 rack->in_probe_rtt = 1; 3742 rack->measure_saw_probe_rtt = 1; 3743 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 3744 rack->r_ctl.rc_time_probertt_starts = 0; 3745 rack->r_ctl.rc_entry_gp_rtt = rack->r_ctl.rc_gp_srtt; 3746 if (rack_probertt_use_min_rtt_entry) 3747 rack_set_prtt_target(rack, segsiz, get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)); 3748 else 3749 rack_set_prtt_target(rack, segsiz, rack->r_ctl.rc_gp_srtt); 3750 rack_log_rtt_shrinks(rack, us_cts, get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 3751 __LINE__, RACK_RTTS_ENTERPROBE); 3752 } 3753 3754 static void 3755 rack_exit_probertt(struct tcp_rack *rack, uint32_t us_cts) 3756 { 3757 struct rack_sendmap *rsm; 3758 uint32_t segsiz; 3759 3760 segsiz = min(ctf_fixed_maxseg(rack->rc_tp), 3761 rack->r_ctl.rc_pace_min_segs); 3762 rack->in_probe_rtt = 0; 3763 if ((rack->rc_tp->t_flags & TF_GPUTINPROG) && 3764 SEQ_GT(rack->rc_tp->snd_una, rack->rc_tp->gput_seq)) { 3765 /* 3766 * Stop the goodput now, the idea here is 3767 * that future measurements with in_probe_rtt 3768 * won't register if they are not greater so 3769 * we want to get what info (if any) is available 3770 * now. 3771 */ 3772 rack_do_goodput_measurement(rack->rc_tp, rack, 3773 rack->rc_tp->snd_una, __LINE__, 3774 RACK_QUALITY_PROBERTT); 3775 } else if (rack->rc_tp->t_flags & TF_GPUTINPROG) { 3776 /* 3777 * We don't have enough data to make a measurement. 3778 * So lets just stop and start here after exiting 3779 * probe-rtt. We probably are not interested in 3780 * the results anyway. 3781 */ 3782 rack->rc_tp->t_flags &= ~TF_GPUTINPROG; 3783 } 3784 /* 3785 * Measurements through the current snd_max are going 3786 * to be limited by the slower pacing rate. 3787 * 3788 * We need to mark these as app-limited so we 3789 * don't collapse the b/w. 3790 */ 3791 rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 3792 if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) { 3793 if (rack->r_ctl.rc_app_limited_cnt == 0) 3794 rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm; 3795 else { 3796 /* 3797 * Go out to the end app limited and mark 3798 * this new one as next and move the end_appl up 3799 * to this guy. 3800 */ 3801 if (rack->r_ctl.rc_end_appl) 3802 rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start; 3803 rack->r_ctl.rc_end_appl = rsm; 3804 } 3805 rsm->r_flags |= RACK_APP_LIMITED; 3806 rack->r_ctl.rc_app_limited_cnt++; 3807 } 3808 /* 3809 * Now, we need to examine our pacing rate multipliers. 3810 * If its under 100%, we need to kick it back up to 3811 * 100%. We also don't let it be over our "max" above 3812 * the actual rate i.e. 100% + rack_clamp_atexit_prtt. 3813 * Note setting clamp_atexit_prtt to 0 has the effect 3814 * of setting CA/SS to 100% always at exit (which is 3815 * the default behavior). 3816 */ 3817 if (rack_probertt_clear_is) { 3818 rack->rc_gp_incr = 0; 3819 rack->rc_gp_bwred = 0; 3820 rack->rc_gp_timely_inc_cnt = 0; 3821 rack->rc_gp_timely_dec_cnt = 0; 3822 } 3823 /* Do we do any clamping at exit? */ 3824 if (rack->rc_highly_buffered && rack_atexit_prtt_hbp) { 3825 rack->r_ctl.rack_per_of_gp_ca = rack_atexit_prtt_hbp; 3826 rack->r_ctl.rack_per_of_gp_ss = rack_atexit_prtt_hbp; 3827 } 3828 if ((rack->rc_highly_buffered == 0) && rack_atexit_prtt) { 3829 rack->r_ctl.rack_per_of_gp_ca = rack_atexit_prtt; 3830 rack->r_ctl.rack_per_of_gp_ss = rack_atexit_prtt; 3831 } 3832 /* 3833 * Lets set rtt_diff to 0, so that we will get a "boost" 3834 * after exiting. 3835 */ 3836 rack->r_ctl.rc_rtt_diff = 0; 3837 3838 /* Clear all flags so we start fresh */ 3839 rack->rc_tp->t_bytes_acked = 0; 3840 rack->rc_tp->ccv->flags &= ~CCF_ABC_SENTAWND; 3841 /* 3842 * If configured to, set the cwnd and ssthresh to 3843 * our targets. 3844 */ 3845 if (rack_probe_rtt_sets_cwnd) { 3846 uint64_t ebdp; 3847 uint32_t setto; 3848 3849 /* Set ssthresh so we get into CA once we hit our target */ 3850 if (rack_probertt_use_min_rtt_exit == 1) { 3851 /* Set to min rtt */ 3852 rack_set_prtt_target(rack, segsiz, 3853 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)); 3854 } else if (rack_probertt_use_min_rtt_exit == 2) { 3855 /* Set to current gp rtt */ 3856 rack_set_prtt_target(rack, segsiz, 3857 rack->r_ctl.rc_gp_srtt); 3858 } else if (rack_probertt_use_min_rtt_exit == 3) { 3859 /* Set to entry gp rtt */ 3860 rack_set_prtt_target(rack, segsiz, 3861 rack->r_ctl.rc_entry_gp_rtt); 3862 } else { 3863 uint64_t sum; 3864 uint32_t setval; 3865 3866 sum = rack->r_ctl.rc_entry_gp_rtt; 3867 sum *= 10; 3868 sum /= (uint64_t)(max(1, rack->r_ctl.rc_gp_srtt)); 3869 if (sum >= 20) { 3870 /* 3871 * A highly buffered path needs 3872 * cwnd space for timely to work. 3873 * Lets set things up as if 3874 * we are heading back here again. 3875 */ 3876 setval = rack->r_ctl.rc_entry_gp_rtt; 3877 } else if (sum >= 15) { 3878 /* 3879 * Lets take the smaller of the 3880 * two since we are just somewhat 3881 * buffered. 3882 */ 3883 setval = rack->r_ctl.rc_gp_srtt; 3884 if (setval > rack->r_ctl.rc_entry_gp_rtt) 3885 setval = rack->r_ctl.rc_entry_gp_rtt; 3886 } else { 3887 /* 3888 * Here we are not highly buffered 3889 * and should pick the min we can to 3890 * keep from causing loss. 3891 */ 3892 setval = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 3893 } 3894 rack_set_prtt_target(rack, segsiz, 3895 setval); 3896 } 3897 if (rack_probe_rtt_sets_cwnd > 1) { 3898 /* There is a percentage here to boost */ 3899 ebdp = rack->r_ctl.rc_target_probertt_flight; 3900 ebdp *= rack_probe_rtt_sets_cwnd; 3901 ebdp /= 100; 3902 setto = rack->r_ctl.rc_target_probertt_flight + ebdp; 3903 } else 3904 setto = rack->r_ctl.rc_target_probertt_flight; 3905 rack->rc_tp->snd_cwnd = roundup(setto, segsiz); 3906 if (rack->rc_tp->snd_cwnd < (segsiz * rack_timely_min_segs)) { 3907 /* Enforce a min */ 3908 rack->rc_tp->snd_cwnd = segsiz * rack_timely_min_segs; 3909 } 3910 /* If we set in the cwnd also set the ssthresh point so we are in CA */ 3911 rack->rc_tp->snd_ssthresh = (rack->rc_tp->snd_cwnd - 1); 3912 } 3913 rack_log_rtt_shrinks(rack, us_cts, 3914 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 3915 __LINE__, RACK_RTTS_EXITPROBE); 3916 /* Clear times last so log has all the info */ 3917 rack->r_ctl.rc_probertt_sndmax_atexit = rack->rc_tp->snd_max; 3918 rack->r_ctl.rc_time_probertt_entered = us_cts; 3919 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 3920 rack->r_ctl.rc_time_of_last_probertt = us_cts; 3921 } 3922 3923 static void 3924 rack_check_probe_rtt(struct tcp_rack *rack, uint32_t us_cts) 3925 { 3926 /* Check in on probe-rtt */ 3927 if (rack->rc_gp_filled == 0) { 3928 /* We do not do p-rtt unless we have gp measurements */ 3929 return; 3930 } 3931 if (rack->in_probe_rtt) { 3932 uint64_t no_overflow; 3933 uint32_t endtime, must_stay; 3934 3935 if (rack->r_ctl.rc_went_idle_time && 3936 ((us_cts - rack->r_ctl.rc_went_idle_time) > rack_min_probertt_hold)) { 3937 /* 3938 * We went idle during prtt, just exit now. 3939 */ 3940 rack_exit_probertt(rack, us_cts); 3941 } else if (rack_probe_rtt_safety_val && 3942 TSTMP_GT(us_cts, rack->r_ctl.rc_time_probertt_entered) && 3943 ((us_cts - rack->r_ctl.rc_time_probertt_entered) > rack_probe_rtt_safety_val)) { 3944 /* 3945 * Probe RTT safety value triggered! 3946 */ 3947 rack_log_rtt_shrinks(rack, us_cts, 3948 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 3949 __LINE__, RACK_RTTS_SAFETY); 3950 rack_exit_probertt(rack, us_cts); 3951 } 3952 /* Calculate the max we will wait */ 3953 endtime = rack->r_ctl.rc_time_probertt_entered + (rack->r_ctl.rc_gp_srtt * rack_max_drain_wait); 3954 if (rack->rc_highly_buffered) 3955 endtime += (rack->r_ctl.rc_gp_srtt * rack_max_drain_hbp); 3956 /* Calculate the min we must wait */ 3957 must_stay = rack->r_ctl.rc_time_probertt_entered + (rack->r_ctl.rc_gp_srtt * rack_must_drain); 3958 if ((ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.rc_target_probertt_flight) && 3959 TSTMP_LT(us_cts, endtime)) { 3960 uint32_t calc; 3961 /* Do we lower more? */ 3962 no_exit: 3963 if (TSTMP_GT(us_cts, rack->r_ctl.rc_time_probertt_entered)) 3964 calc = us_cts - rack->r_ctl.rc_time_probertt_entered; 3965 else 3966 calc = 0; 3967 calc /= max(rack->r_ctl.rc_gp_srtt, 1); 3968 if (calc) { 3969 /* Maybe */ 3970 calc *= rack_per_of_gp_probertt_reduce; 3971 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt - calc; 3972 /* Limit it too */ 3973 if (rack->r_ctl.rack_per_of_gp_probertt < rack_per_of_gp_lowthresh) 3974 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_lowthresh; 3975 } 3976 /* We must reach target or the time set */ 3977 return; 3978 } 3979 if (rack->r_ctl.rc_time_probertt_starts == 0) { 3980 if ((TSTMP_LT(us_cts, must_stay) && 3981 rack->rc_highly_buffered) || 3982 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > 3983 rack->r_ctl.rc_target_probertt_flight)) { 3984 /* We are not past the must_stay time */ 3985 goto no_exit; 3986 } 3987 rack_log_rtt_shrinks(rack, us_cts, 3988 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 3989 __LINE__, RACK_RTTS_REACHTARGET); 3990 rack->r_ctl.rc_time_probertt_starts = us_cts; 3991 if (rack->r_ctl.rc_time_probertt_starts == 0) 3992 rack->r_ctl.rc_time_probertt_starts = 1; 3993 /* Restore back to our rate we want to pace at in prtt */ 3994 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; 3995 } 3996 /* 3997 * Setup our end time, some number of gp_srtts plus 200ms. 3998 */ 3999 no_overflow = ((uint64_t)rack->r_ctl.rc_gp_srtt * 4000 (uint64_t)rack_probertt_gpsrtt_cnt_mul); 4001 if (rack_probertt_gpsrtt_cnt_div) 4002 endtime = (uint32_t)(no_overflow / (uint64_t)rack_probertt_gpsrtt_cnt_div); 4003 else 4004 endtime = 0; 4005 endtime += rack_min_probertt_hold; 4006 endtime += rack->r_ctl.rc_time_probertt_starts; 4007 if (TSTMP_GEQ(us_cts, endtime)) { 4008 /* yes, exit probertt */ 4009 rack_exit_probertt(rack, us_cts); 4010 } 4011 4012 } else if ((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= rack_time_between_probertt) { 4013 /* Go into probertt, its been too long since we went lower */ 4014 rack_enter_probertt(rack, us_cts); 4015 } 4016 } 4017 4018 static void 4019 rack_update_multiplier(struct tcp_rack *rack, int32_t timely_says, uint64_t last_bw_est, 4020 uint32_t rtt, int32_t rtt_diff) 4021 { 4022 uint64_t cur_bw, up_bnd, low_bnd, subfr; 4023 uint32_t losses; 4024 4025 if ((rack->rc_gp_dyn_mul == 0) || 4026 (rack->use_fixed_rate) || 4027 (rack->in_probe_rtt) || 4028 (rack->rc_always_pace == 0)) { 4029 /* No dynamic GP multipler in play */ 4030 return; 4031 } 4032 losses = rack->r_ctl.rc_loss_count - rack->r_ctl.rc_loss_at_start; 4033 cur_bw = rack_get_bw(rack); 4034 /* Calculate our up and down range */ 4035 up_bnd = rack->r_ctl.last_gp_comp_bw * (uint64_t)rack_gp_per_bw_mul_up; 4036 up_bnd /= 100; 4037 up_bnd += rack->r_ctl.last_gp_comp_bw; 4038 4039 subfr = (uint64_t)rack->r_ctl.last_gp_comp_bw * (uint64_t)rack_gp_per_bw_mul_down; 4040 subfr /= 100; 4041 low_bnd = rack->r_ctl.last_gp_comp_bw - subfr; 4042 if ((timely_says == 2) && (rack->r_ctl.rc_no_push_at_mrtt)) { 4043 /* 4044 * This is the case where our RTT is above 4045 * the max target and we have been configured 4046 * to just do timely no bonus up stuff in that case. 4047 * 4048 * There are two configurations, set to 1, and we 4049 * just do timely if we are over our max. If its 4050 * set above 1 then we slam the multipliers down 4051 * to 100 and then decrement per timely. 4052 */ 4053 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 4054 __LINE__, 3); 4055 if (rack->r_ctl.rc_no_push_at_mrtt > 1) 4056 rack_validate_multipliers_at_or_below_100(rack); 4057 rack_decrease_bw_mul(rack, timely_says, rtt, rtt_diff); 4058 } else if ((last_bw_est < low_bnd) && !losses) { 4059 /* 4060 * We are decreasing this is a bit complicated this 4061 * means we are loosing ground. This could be 4062 * because another flow entered and we are competing 4063 * for b/w with it. This will push the RTT up which 4064 * makes timely unusable unless we want to get shoved 4065 * into a corner and just be backed off (the age 4066 * old problem with delay based CC). 4067 * 4068 * On the other hand if it was a route change we 4069 * would like to stay somewhat contained and not 4070 * blow out the buffers. 4071 */ 4072 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 4073 __LINE__, 3); 4074 rack->r_ctl.last_gp_comp_bw = cur_bw; 4075 if (rack->rc_gp_bwred == 0) { 4076 /* Go into reduction counting */ 4077 rack->rc_gp_bwred = 1; 4078 rack->rc_gp_timely_dec_cnt = 0; 4079 } 4080 if ((rack->rc_gp_timely_dec_cnt < rack_timely_max_push_drop) || 4081 (timely_says == 0)) { 4082 /* 4083 * Push another time with a faster pacing 4084 * to try to gain back (we include override to 4085 * get a full raise factor). 4086 */ 4087 if ((rack->rc_gp_saw_ca && rack->r_ctl.rack_per_of_gp_ca <= rack_down_raise_thresh) || 4088 (rack->rc_gp_saw_ss && rack->r_ctl.rack_per_of_gp_ss <= rack_down_raise_thresh) || 4089 (timely_says == 0) || 4090 (rack_down_raise_thresh == 0)) { 4091 /* 4092 * Do an override up in b/w if we were 4093 * below the threshold or if the threshold 4094 * is zero we always do the raise. 4095 */ 4096 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 1); 4097 } else { 4098 /* Log it stays the same */ 4099 rack_log_timely(rack, 0, last_bw_est, low_bnd, 0, 4100 __LINE__, 11); 4101 } 4102 rack->rc_gp_timely_dec_cnt++; 4103 /* We are not incrementing really no-count */ 4104 rack->rc_gp_incr = 0; 4105 rack->rc_gp_timely_inc_cnt = 0; 4106 } else { 4107 /* 4108 * Lets just use the RTT 4109 * information and give up 4110 * pushing. 4111 */ 4112 goto use_timely; 4113 } 4114 } else if ((timely_says != 2) && 4115 !losses && 4116 (last_bw_est > up_bnd)) { 4117 /* 4118 * We are increasing b/w lets keep going, updating 4119 * our b/w and ignoring any timely input, unless 4120 * of course we are at our max raise (if there is one). 4121 */ 4122 4123 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 4124 __LINE__, 3); 4125 rack->r_ctl.last_gp_comp_bw = cur_bw; 4126 if (rack->rc_gp_saw_ss && 4127 rack_per_upper_bound_ss && 4128 (rack->r_ctl.rack_per_of_gp_ss == rack_per_upper_bound_ss)) { 4129 /* 4130 * In cases where we can't go higher 4131 * we should just use timely. 4132 */ 4133 goto use_timely; 4134 } 4135 if (rack->rc_gp_saw_ca && 4136 rack_per_upper_bound_ca && 4137 (rack->r_ctl.rack_per_of_gp_ca == rack_per_upper_bound_ca)) { 4138 /* 4139 * In cases where we can't go higher 4140 * we should just use timely. 4141 */ 4142 goto use_timely; 4143 } 4144 rack->rc_gp_bwred = 0; 4145 rack->rc_gp_timely_dec_cnt = 0; 4146 /* You get a set number of pushes if timely is trying to reduce */ 4147 if ((rack->rc_gp_incr < rack_timely_max_push_rise) || (timely_says == 0)) { 4148 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0); 4149 } else { 4150 /* Log it stays the same */ 4151 rack_log_timely(rack, 0, last_bw_est, up_bnd, 0, 4152 __LINE__, 12); 4153 } 4154 return; 4155 } else { 4156 /* 4157 * We are staying between the lower and upper range bounds 4158 * so use timely to decide. 4159 */ 4160 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 4161 __LINE__, 3); 4162 use_timely: 4163 if (timely_says) { 4164 rack->rc_gp_incr = 0; 4165 rack->rc_gp_timely_inc_cnt = 0; 4166 if ((rack->rc_gp_timely_dec_cnt < rack_timely_max_push_drop) && 4167 !losses && 4168 (last_bw_est < low_bnd)) { 4169 /* We are loosing ground */ 4170 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0); 4171 rack->rc_gp_timely_dec_cnt++; 4172 /* We are not incrementing really no-count */ 4173 rack->rc_gp_incr = 0; 4174 rack->rc_gp_timely_inc_cnt = 0; 4175 } else 4176 rack_decrease_bw_mul(rack, timely_says, rtt, rtt_diff); 4177 } else { 4178 rack->rc_gp_bwred = 0; 4179 rack->rc_gp_timely_dec_cnt = 0; 4180 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0); 4181 } 4182 } 4183 } 4184 4185 static int32_t 4186 rack_make_timely_judgement(struct tcp_rack *rack, uint32_t rtt, int32_t rtt_diff, uint32_t prev_rtt) 4187 { 4188 int32_t timely_says; 4189 uint64_t log_mult, log_rtt_a_diff; 4190 4191 log_rtt_a_diff = rtt; 4192 log_rtt_a_diff <<= 32; 4193 log_rtt_a_diff |= (uint32_t)rtt_diff; 4194 if (rtt >= (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * 4195 rack_gp_rtt_maxmul)) { 4196 /* Reduce the b/w multipler */ 4197 timely_says = 2; 4198 log_mult = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_maxmul; 4199 log_mult <<= 32; 4200 log_mult |= prev_rtt; 4201 rack_log_timely(rack, timely_says, log_mult, 4202 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4203 log_rtt_a_diff, __LINE__, 4); 4204 } else if (rtt <= (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) + 4205 ((get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_minmul) / 4206 max(rack_gp_rtt_mindiv , 1)))) { 4207 /* Increase the b/w multipler */ 4208 log_mult = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) + 4209 ((get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_minmul) / 4210 max(rack_gp_rtt_mindiv , 1)); 4211 log_mult <<= 32; 4212 log_mult |= prev_rtt; 4213 timely_says = 0; 4214 rack_log_timely(rack, timely_says, log_mult , 4215 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4216 log_rtt_a_diff, __LINE__, 5); 4217 } else { 4218 /* 4219 * Use a gradient to find it the timely gradient 4220 * is: 4221 * grad = rc_rtt_diff / min_rtt; 4222 * 4223 * anything below or equal to 0 will be 4224 * a increase indication. Anything above 4225 * zero is a decrease. Note we take care 4226 * of the actual gradient calculation 4227 * in the reduction (its not needed for 4228 * increase). 4229 */ 4230 log_mult = prev_rtt; 4231 if (rtt_diff <= 0) { 4232 /* 4233 * Rttdiff is less than zero, increase the 4234 * b/w multipler (its 0 or negative) 4235 */ 4236 timely_says = 0; 4237 rack_log_timely(rack, timely_says, log_mult, 4238 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), log_rtt_a_diff, __LINE__, 6); 4239 } else { 4240 /* Reduce the b/w multipler */ 4241 timely_says = 1; 4242 rack_log_timely(rack, timely_says, log_mult, 4243 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), log_rtt_a_diff, __LINE__, 7); 4244 } 4245 } 4246 return (timely_says); 4247 } 4248 4249 static void 4250 rack_do_goodput_measurement(struct tcpcb *tp, struct tcp_rack *rack, 4251 tcp_seq th_ack, int line, uint8_t quality) 4252 { 4253 uint64_t tim, bytes_ps, ltim, stim, utim; 4254 uint32_t segsiz, bytes, reqbytes, us_cts; 4255 int32_t gput, new_rtt_diff, timely_says; 4256 uint64_t resid_bw, subpart = 0, addpart = 0, srtt; 4257 int did_add = 0; 4258 4259 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 4260 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 4261 if (TSTMP_GEQ(us_cts, tp->gput_ts)) 4262 tim = us_cts - tp->gput_ts; 4263 else 4264 tim = 0; 4265 if (rack->r_ctl.rc_gp_cumack_ts > rack->r_ctl.rc_gp_output_ts) 4266 stim = rack->r_ctl.rc_gp_cumack_ts - rack->r_ctl.rc_gp_output_ts; 4267 else 4268 stim = 0; 4269 /* 4270 * Use the larger of the send time or ack time. This prevents us 4271 * from being influenced by ack artifacts to come up with too 4272 * high of measurement. Note that since we are spanning over many more 4273 * bytes in most of our measurements hopefully that is less likely to 4274 * occur. 4275 */ 4276 if (tim > stim) 4277 utim = max(tim, 1); 4278 else 4279 utim = max(stim, 1); 4280 /* Lets get a msec time ltim too for the old stuff */ 4281 ltim = max(1, (utim / HPTS_USEC_IN_MSEC)); 4282 gput = (((uint64_t) (th_ack - tp->gput_seq)) << 3) / ltim; 4283 reqbytes = min(rc_init_window(rack), (MIN_GP_WIN * segsiz)); 4284 if ((tim == 0) && (stim == 0)) { 4285 /* 4286 * Invalid measurement time, maybe 4287 * all on one ack/one send? 4288 */ 4289 bytes = 0; 4290 bytes_ps = 0; 4291 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4292 0, 0, 0, 10, __LINE__, NULL, quality); 4293 goto skip_measurement; 4294 } 4295 if (rack->r_ctl.rc_gp_lowrtt == 0xffffffff) { 4296 /* We never made a us_rtt measurement? */ 4297 bytes = 0; 4298 bytes_ps = 0; 4299 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4300 0, 0, 0, 10, __LINE__, NULL, quality); 4301 goto skip_measurement; 4302 } 4303 /* 4304 * Calculate the maximum possible b/w this connection 4305 * could have. We base our calculation on the lowest 4306 * rtt we have seen during the measurement and the 4307 * largest rwnd the client has given us in that time. This 4308 * forms a BDP that is the maximum that we could ever 4309 * get to the client. Anything larger is not valid. 4310 * 4311 * I originally had code here that rejected measurements 4312 * where the time was less than 1/2 the latest us_rtt. 4313 * But after thinking on that I realized its wrong since 4314 * say you had a 150Mbps or even 1Gbps link, and you 4315 * were a long way away.. example I am in Europe (100ms rtt) 4316 * talking to my 1Gbps link in S.C. Now measuring say 150,000 4317 * bytes my time would be 1.2ms, and yet my rtt would say 4318 * the measurement was invalid the time was < 50ms. The 4319 * same thing is true for 150Mb (8ms of time). 4320 * 4321 * A better way I realized is to look at what the maximum 4322 * the connection could possibly do. This is gated on 4323 * the lowest RTT we have seen and the highest rwnd. 4324 * We should in theory never exceed that, if we are 4325 * then something on the path is storing up packets 4326 * and then feeding them all at once to our endpoint 4327 * messing up our measurement. 4328 */ 4329 rack->r_ctl.last_max_bw = rack->r_ctl.rc_gp_high_rwnd; 4330 rack->r_ctl.last_max_bw *= HPTS_USEC_IN_SEC; 4331 rack->r_ctl.last_max_bw /= rack->r_ctl.rc_gp_lowrtt; 4332 if (SEQ_LT(th_ack, tp->gput_seq)) { 4333 /* No measurement can be made */ 4334 bytes = 0; 4335 bytes_ps = 0; 4336 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4337 0, 0, 0, 10, __LINE__, NULL, quality); 4338 goto skip_measurement; 4339 } else 4340 bytes = (th_ack - tp->gput_seq); 4341 bytes_ps = (uint64_t)bytes; 4342 /* 4343 * Don't measure a b/w for pacing unless we have gotten at least 4344 * an initial windows worth of data in this measurement interval. 4345 * 4346 * Small numbers of bytes get badly influenced by delayed ack and 4347 * other artifacts. Note we take the initial window or our 4348 * defined minimum GP (defaulting to 10 which hopefully is the 4349 * IW). 4350 */ 4351 if (rack->rc_gp_filled == 0) { 4352 /* 4353 * The initial estimate is special. We 4354 * have blasted out an IW worth of packets 4355 * without a real valid ack ts results. We 4356 * then setup the app_limited_needs_set flag, 4357 * this should get the first ack in (probably 2 4358 * MSS worth) to be recorded as the timestamp. 4359 * We thus allow a smaller number of bytes i.e. 4360 * IW - 2MSS. 4361 */ 4362 reqbytes -= (2 * segsiz); 4363 /* Also lets fill previous for our first measurement to be neutral */ 4364 rack->r_ctl.rc_prev_gp_srtt = rack->r_ctl.rc_gp_srtt; 4365 } 4366 if ((bytes_ps < reqbytes) || rack->app_limited_needs_set) { 4367 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4368 rack->r_ctl.rc_app_limited_cnt, 4369 0, 0, 10, __LINE__, NULL, quality); 4370 goto skip_measurement; 4371 } 4372 /* 4373 * We now need to calculate the Timely like status so 4374 * we can update (possibly) the b/w multipliers. 4375 */ 4376 new_rtt_diff = (int32_t)rack->r_ctl.rc_gp_srtt - (int32_t)rack->r_ctl.rc_prev_gp_srtt; 4377 if (rack->rc_gp_filled == 0) { 4378 /* No previous reading */ 4379 rack->r_ctl.rc_rtt_diff = new_rtt_diff; 4380 } else { 4381 if (rack->measure_saw_probe_rtt == 0) { 4382 /* 4383 * We don't want a probertt to be counted 4384 * since it will be negative incorrectly. We 4385 * expect to be reducing the RTT when we 4386 * pace at a slower rate. 4387 */ 4388 rack->r_ctl.rc_rtt_diff -= (rack->r_ctl.rc_rtt_diff / 8); 4389 rack->r_ctl.rc_rtt_diff += (new_rtt_diff / 8); 4390 } 4391 } 4392 timely_says = rack_make_timely_judgement(rack, 4393 rack->r_ctl.rc_gp_srtt, 4394 rack->r_ctl.rc_rtt_diff, 4395 rack->r_ctl.rc_prev_gp_srtt 4396 ); 4397 bytes_ps *= HPTS_USEC_IN_SEC; 4398 bytes_ps /= utim; 4399 if (bytes_ps > rack->r_ctl.last_max_bw) { 4400 /* 4401 * Something is on path playing 4402 * since this b/w is not possible based 4403 * on our BDP (highest rwnd and lowest rtt 4404 * we saw in the measurement window). 4405 * 4406 * Another option here would be to 4407 * instead skip the measurement. 4408 */ 4409 rack_log_pacing_delay_calc(rack, bytes, reqbytes, 4410 bytes_ps, rack->r_ctl.last_max_bw, 0, 4411 11, __LINE__, NULL, quality); 4412 bytes_ps = rack->r_ctl.last_max_bw; 4413 } 4414 /* We store gp for b/w in bytes per second */ 4415 if (rack->rc_gp_filled == 0) { 4416 /* Initial measurment */ 4417 if (bytes_ps) { 4418 rack->r_ctl.gp_bw = bytes_ps; 4419 rack->rc_gp_filled = 1; 4420 rack->r_ctl.num_measurements = 1; 4421 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 4422 } else { 4423 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4424 rack->r_ctl.rc_app_limited_cnt, 4425 0, 0, 10, __LINE__, NULL, quality); 4426 } 4427 if (rack->rc_inp->inp_in_hpts && 4428 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 4429 /* 4430 * Ok we can't trust the pacer in this case 4431 * where we transition from un-paced to paced. 4432 * Or for that matter when the burst mitigation 4433 * was making a wild guess and got it wrong. 4434 * Stop the pacer and clear up all the aggregate 4435 * delays etc. 4436 */ 4437 tcp_hpts_remove(rack->rc_inp, HPTS_REMOVE_OUTPUT); 4438 rack->r_ctl.rc_hpts_flags = 0; 4439 rack->r_ctl.rc_last_output_to = 0; 4440 } 4441 did_add = 2; 4442 } else if (rack->r_ctl.num_measurements < RACK_REQ_AVG) { 4443 /* Still a small number run an average */ 4444 rack->r_ctl.gp_bw += bytes_ps; 4445 addpart = rack->r_ctl.num_measurements; 4446 rack->r_ctl.num_measurements++; 4447 if (rack->r_ctl.num_measurements >= RACK_REQ_AVG) { 4448 /* We have collected enought to move forward */ 4449 rack->r_ctl.gp_bw /= (uint64_t)rack->r_ctl.num_measurements; 4450 } 4451 did_add = 3; 4452 } else { 4453 /* 4454 * We want to take 1/wma of the goodput and add in to 7/8th 4455 * of the old value weighted by the srtt. So if your measurement 4456 * period is say 2 SRTT's long you would get 1/4 as the 4457 * value, if it was like 1/2 SRTT then you would get 1/16th. 4458 * 4459 * But we must be careful not to take too much i.e. if the 4460 * srtt is say 20ms and the measurement is taken over 4461 * 400ms our weight would be 400/20 i.e. 20. On the 4462 * other hand if we get a measurement over 1ms with a 4463 * 10ms rtt we only want to take a much smaller portion. 4464 */ 4465 if (rack->r_ctl.num_measurements < 0xff) { 4466 rack->r_ctl.num_measurements++; 4467 } 4468 srtt = (uint64_t)tp->t_srtt; 4469 if (srtt == 0) { 4470 /* 4471 * Strange why did t_srtt go back to zero? 4472 */ 4473 if (rack->r_ctl.rc_rack_min_rtt) 4474 srtt = rack->r_ctl.rc_rack_min_rtt; 4475 else 4476 srtt = HPTS_USEC_IN_MSEC; 4477 } 4478 /* 4479 * XXXrrs: Note for reviewers, in playing with 4480 * dynamic pacing I discovered this GP calculation 4481 * as done originally leads to some undesired results. 4482 * Basically you can get longer measurements contributing 4483 * too much to the WMA. Thus I changed it if you are doing 4484 * dynamic adjustments to only do the aportioned adjustment 4485 * if we have a very small (time wise) measurement. Longer 4486 * measurements just get there weight (defaulting to 1/8) 4487 * add to the WMA. We may want to think about changing 4488 * this to always do that for both sides i.e. dynamic 4489 * and non-dynamic... but considering lots of folks 4490 * were playing with this I did not want to change the 4491 * calculation per.se. without your thoughts.. Lawerence? 4492 * Peter?? 4493 */ 4494 if (rack->rc_gp_dyn_mul == 0) { 4495 subpart = rack->r_ctl.gp_bw * utim; 4496 subpart /= (srtt * 8); 4497 if (subpart < (rack->r_ctl.gp_bw / 2)) { 4498 /* 4499 * The b/w update takes no more 4500 * away then 1/2 our running total 4501 * so factor it in. 4502 */ 4503 addpart = bytes_ps * utim; 4504 addpart /= (srtt * 8); 4505 } else { 4506 /* 4507 * Don't allow a single measurement 4508 * to account for more than 1/2 of the 4509 * WMA. This could happen on a retransmission 4510 * where utim becomes huge compared to 4511 * srtt (multiple retransmissions when using 4512 * the sending rate which factors in all the 4513 * transmissions from the first one). 4514 */ 4515 subpart = rack->r_ctl.gp_bw / 2; 4516 addpart = bytes_ps / 2; 4517 } 4518 resid_bw = rack->r_ctl.gp_bw - subpart; 4519 rack->r_ctl.gp_bw = resid_bw + addpart; 4520 did_add = 1; 4521 } else { 4522 if ((utim / srtt) <= 1) { 4523 /* 4524 * The b/w update was over a small period 4525 * of time. The idea here is to prevent a small 4526 * measurement time period from counting 4527 * too much. So we scale it based on the 4528 * time so it attributes less than 1/rack_wma_divisor 4529 * of its measurement. 4530 */ 4531 subpart = rack->r_ctl.gp_bw * utim; 4532 subpart /= (srtt * rack_wma_divisor); 4533 addpart = bytes_ps * utim; 4534 addpart /= (srtt * rack_wma_divisor); 4535 } else { 4536 /* 4537 * The scaled measurement was long 4538 * enough so lets just add in the 4539 * portion of the measurment i.e. 1/rack_wma_divisor 4540 */ 4541 subpart = rack->r_ctl.gp_bw / rack_wma_divisor; 4542 addpart = bytes_ps / rack_wma_divisor; 4543 } 4544 if ((rack->measure_saw_probe_rtt == 0) || 4545 (bytes_ps > rack->r_ctl.gp_bw)) { 4546 /* 4547 * For probe-rtt we only add it in 4548 * if its larger, all others we just 4549 * add in. 4550 */ 4551 did_add = 1; 4552 resid_bw = rack->r_ctl.gp_bw - subpart; 4553 rack->r_ctl.gp_bw = resid_bw + addpart; 4554 } 4555 } 4556 } 4557 if ((rack->gp_ready == 0) && 4558 (rack->r_ctl.num_measurements >= rack->r_ctl.req_measurements)) { 4559 /* We have enough measurements now */ 4560 rack->gp_ready = 1; 4561 rack_set_cc_pacing(rack); 4562 if (rack->defer_options) 4563 rack_apply_deferred_options(rack); 4564 } 4565 rack_log_pacing_delay_calc(rack, subpart, addpart, bytes_ps, stim, 4566 rack_get_bw(rack), 22, did_add, NULL, quality); 4567 /* We do not update any multipliers if we are in or have seen a probe-rtt */ 4568 if ((rack->measure_saw_probe_rtt == 0) && rack->rc_gp_rtt_set) 4569 rack_update_multiplier(rack, timely_says, bytes_ps, 4570 rack->r_ctl.rc_gp_srtt, 4571 rack->r_ctl.rc_rtt_diff); 4572 rack_log_pacing_delay_calc(rack, bytes, tim, bytes_ps, stim, 4573 rack_get_bw(rack), 3, line, NULL, quality); 4574 /* reset the gp srtt and setup the new prev */ 4575 rack->r_ctl.rc_prev_gp_srtt = rack->r_ctl.rc_gp_srtt; 4576 /* Record the lost count for the next measurement */ 4577 rack->r_ctl.rc_loss_at_start = rack->r_ctl.rc_loss_count; 4578 /* 4579 * We restart our diffs based on the gpsrtt in the 4580 * measurement window. 4581 */ 4582 rack->rc_gp_rtt_set = 0; 4583 rack->rc_gp_saw_rec = 0; 4584 rack->rc_gp_saw_ca = 0; 4585 rack->rc_gp_saw_ss = 0; 4586 rack->rc_dragged_bottom = 0; 4587 skip_measurement: 4588 4589 #ifdef STATS 4590 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_GPUT, 4591 gput); 4592 /* 4593 * XXXLAS: This is a temporary hack, and should be 4594 * chained off VOI_TCP_GPUT when stats(9) grows an 4595 * API to deal with chained VOIs. 4596 */ 4597 if (tp->t_stats_gput_prev > 0) 4598 stats_voi_update_abs_s32(tp->t_stats, 4599 VOI_TCP_GPUT_ND, 4600 ((gput - tp->t_stats_gput_prev) * 100) / 4601 tp->t_stats_gput_prev); 4602 #endif 4603 tp->t_flags &= ~TF_GPUTINPROG; 4604 tp->t_stats_gput_prev = gput; 4605 /* 4606 * Now are we app limited now and there is space from where we 4607 * were to where we want to go? 4608 * 4609 * We don't do the other case i.e. non-applimited here since 4610 * the next send will trigger us picking up the missing data. 4611 */ 4612 if (rack->r_ctl.rc_first_appl && 4613 TCPS_HAVEESTABLISHED(tp->t_state) && 4614 rack->r_ctl.rc_app_limited_cnt && 4615 (SEQ_GT(rack->r_ctl.rc_first_appl->r_start, th_ack)) && 4616 ((rack->r_ctl.rc_first_appl->r_end - th_ack) > 4617 max(rc_init_window(rack), (MIN_GP_WIN * segsiz)))) { 4618 /* 4619 * Yep there is enough outstanding to make a measurement here. 4620 */ 4621 struct rack_sendmap *rsm, fe; 4622 4623 rack->r_ctl.rc_gp_lowrtt = 0xffffffff; 4624 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; 4625 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 4626 rack->app_limited_needs_set = 0; 4627 tp->gput_seq = th_ack; 4628 if (rack->in_probe_rtt) 4629 rack->measure_saw_probe_rtt = 1; 4630 else if ((rack->measure_saw_probe_rtt) && 4631 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) 4632 rack->measure_saw_probe_rtt = 0; 4633 if ((rack->r_ctl.rc_first_appl->r_end - th_ack) >= rack_get_measure_window(tp, rack)) { 4634 /* There is a full window to gain info from */ 4635 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); 4636 } else { 4637 /* We can only measure up to the applimited point */ 4638 tp->gput_ack = tp->gput_seq + (rack->r_ctl.rc_first_appl->r_end - th_ack); 4639 if ((tp->gput_ack - tp->gput_seq) < (MIN_GP_WIN * segsiz)) { 4640 /* 4641 * We don't have enough to make a measurement. 4642 */ 4643 tp->t_flags &= ~TF_GPUTINPROG; 4644 rack_log_pacing_delay_calc(rack, tp->gput_ack, tp->gput_seq, 4645 0, 0, 0, 6, __LINE__, NULL, quality); 4646 return; 4647 } 4648 } 4649 if (tp->t_state >= TCPS_FIN_WAIT_1) { 4650 /* 4651 * We will get no more data into the SB 4652 * this means we need to have the data available 4653 * before we start a measurement. 4654 */ 4655 if (sbavail(&tp->t_inpcb->inp_socket->so_snd) < (tp->gput_ack - tp->gput_seq)) { 4656 /* Nope not enough data. */ 4657 return; 4658 } 4659 } 4660 tp->t_flags |= TF_GPUTINPROG; 4661 /* 4662 * Now we need to find the timestamp of the send at tp->gput_seq 4663 * for the send based measurement. 4664 */ 4665 fe.r_start = tp->gput_seq; 4666 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 4667 if (rsm) { 4668 /* Ok send-based limit is set */ 4669 if (SEQ_LT(rsm->r_start, tp->gput_seq)) { 4670 /* 4671 * Move back to include the earlier part 4672 * so our ack time lines up right (this may 4673 * make an overlapping measurement but thats 4674 * ok). 4675 */ 4676 tp->gput_seq = rsm->r_start; 4677 } 4678 if (rsm->r_flags & RACK_ACKED) 4679 tp->gput_ts = (uint32_t)rsm->r_ack_arrival; 4680 else 4681 rack->app_limited_needs_set = 1; 4682 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 4683 } else { 4684 /* 4685 * If we don't find the rsm due to some 4686 * send-limit set the current time, which 4687 * basically disables the send-limit. 4688 */ 4689 struct timeval tv; 4690 4691 microuptime(&tv); 4692 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); 4693 } 4694 rack_log_pacing_delay_calc(rack, 4695 tp->gput_seq, 4696 tp->gput_ack, 4697 (uint64_t)rsm, 4698 tp->gput_ts, 4699 rack->r_ctl.rc_app_limited_cnt, 4700 9, 4701 __LINE__, NULL, quality); 4702 } 4703 } 4704 4705 /* 4706 * CC wrapper hook functions 4707 */ 4708 static void 4709 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack, uint32_t th_ack, uint16_t nsegs, 4710 uint16_t type, int32_t recovery) 4711 { 4712 uint32_t prior_cwnd, acked; 4713 struct tcp_log_buffer *lgb = NULL; 4714 uint8_t labc_to_use, quality; 4715 4716 INP_WLOCK_ASSERT(tp->t_inpcb); 4717 tp->ccv->nsegs = nsegs; 4718 acked = tp->ccv->bytes_this_ack = (th_ack - tp->snd_una); 4719 if ((recovery) && (rack->r_ctl.rc_early_recovery_segs)) { 4720 uint32_t max; 4721 4722 max = rack->r_ctl.rc_early_recovery_segs * ctf_fixed_maxseg(tp); 4723 if (tp->ccv->bytes_this_ack > max) { 4724 tp->ccv->bytes_this_ack = max; 4725 } 4726 } 4727 #ifdef STATS 4728 stats_voi_update_abs_s32(tp->t_stats, VOI_TCP_CALCFRWINDIFF, 4729 ((int32_t)rack->r_ctl.cwnd_to_use) - tp->snd_wnd); 4730 #endif 4731 quality = RACK_QUALITY_NONE; 4732 if ((tp->t_flags & TF_GPUTINPROG) && 4733 rack_enough_for_measurement(tp, rack, th_ack, &quality)) { 4734 /* Measure the Goodput */ 4735 rack_do_goodput_measurement(tp, rack, th_ack, __LINE__, quality); 4736 #ifdef NETFLIX_PEAKRATE 4737 if ((type == CC_ACK) && 4738 (tp->t_maxpeakrate)) { 4739 /* 4740 * We update t_peakrate_thr. This gives us roughly 4741 * one update per round trip time. Note 4742 * it will only be used if pace_always is off i.e 4743 * we don't do this for paced flows. 4744 */ 4745 rack_update_peakrate_thr(tp); 4746 } 4747 #endif 4748 } 4749 /* Which way our we limited, if not cwnd limited no advance in CA */ 4750 if (tp->snd_cwnd <= tp->snd_wnd) 4751 tp->ccv->flags |= CCF_CWND_LIMITED; 4752 else 4753 tp->ccv->flags &= ~CCF_CWND_LIMITED; 4754 if (tp->snd_cwnd > tp->snd_ssthresh) { 4755 tp->t_bytes_acked += min(tp->ccv->bytes_this_ack, 4756 nsegs * V_tcp_abc_l_var * ctf_fixed_maxseg(tp)); 4757 /* For the setting of a window past use the actual scwnd we are using */ 4758 if (tp->t_bytes_acked >= rack->r_ctl.cwnd_to_use) { 4759 tp->t_bytes_acked -= rack->r_ctl.cwnd_to_use; 4760 tp->ccv->flags |= CCF_ABC_SENTAWND; 4761 } 4762 } else { 4763 tp->ccv->flags &= ~CCF_ABC_SENTAWND; 4764 tp->t_bytes_acked = 0; 4765 } 4766 prior_cwnd = tp->snd_cwnd; 4767 if ((recovery == 0) || (rack_max_abc_post_recovery == 0) || rack->r_use_labc_for_rec || 4768 (rack_client_low_buf && (rack->client_bufferlvl < rack_client_low_buf))) 4769 labc_to_use = rack->rc_labc; 4770 else 4771 labc_to_use = rack_max_abc_post_recovery; 4772 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 4773 union tcp_log_stackspecific log; 4774 struct timeval tv; 4775 4776 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 4777 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 4778 log.u_bbr.flex1 = th_ack; 4779 log.u_bbr.flex2 = tp->ccv->flags; 4780 log.u_bbr.flex3 = tp->ccv->bytes_this_ack; 4781 log.u_bbr.flex4 = tp->ccv->nsegs; 4782 log.u_bbr.flex5 = labc_to_use; 4783 log.u_bbr.flex6 = prior_cwnd; 4784 log.u_bbr.flex7 = V_tcp_do_newsack; 4785 log.u_bbr.flex8 = 1; 4786 lgb = tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 4787 0, &log, false, NULL, NULL, 0, &tv); 4788 } 4789 if (CC_ALGO(tp)->ack_received != NULL) { 4790 /* XXXLAS: Find a way to live without this */ 4791 tp->ccv->curack = th_ack; 4792 tp->ccv->labc = labc_to_use; 4793 tp->ccv->flags |= CCF_USE_LOCAL_ABC; 4794 CC_ALGO(tp)->ack_received(tp->ccv, type); 4795 } 4796 if (lgb) { 4797 lgb->tlb_stackinfo.u_bbr.flex6 = tp->snd_cwnd; 4798 } 4799 if (rack->r_must_retran) { 4800 if (SEQ_GEQ(th_ack, rack->r_ctl.rc_snd_max_at_rto)) { 4801 /* 4802 * We now are beyond the rxt point so lets disable 4803 * the flag. 4804 */ 4805 rack->r_ctl.rc_out_at_rto = 0; 4806 rack->r_must_retran = 0; 4807 } else if ((prior_cwnd + ctf_fixed_maxseg(tp)) <= tp->snd_cwnd) { 4808 /* 4809 * Only decrement the rc_out_at_rto if the cwnd advances 4810 * at least a whole segment. Otherwise next time the peer 4811 * acks, we won't be able to send this generaly happens 4812 * when we are in Congestion Avoidance. 4813 */ 4814 if (acked <= rack->r_ctl.rc_out_at_rto){ 4815 rack->r_ctl.rc_out_at_rto -= acked; 4816 } else { 4817 rack->r_ctl.rc_out_at_rto = 0; 4818 } 4819 } 4820 } 4821 #ifdef STATS 4822 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_LCWIN, rack->r_ctl.cwnd_to_use); 4823 #endif 4824 if (rack->r_ctl.rc_rack_largest_cwnd < rack->r_ctl.cwnd_to_use) { 4825 rack->r_ctl.rc_rack_largest_cwnd = rack->r_ctl.cwnd_to_use; 4826 } 4827 #ifdef NETFLIX_PEAKRATE 4828 /* we enforce max peak rate if it is set and we are not pacing */ 4829 if ((rack->rc_always_pace == 0) && 4830 tp->t_peakrate_thr && 4831 (tp->snd_cwnd > tp->t_peakrate_thr)) { 4832 tp->snd_cwnd = tp->t_peakrate_thr; 4833 } 4834 #endif 4835 } 4836 4837 static void 4838 tcp_rack_partialack(struct tcpcb *tp) 4839 { 4840 struct tcp_rack *rack; 4841 4842 rack = (struct tcp_rack *)tp->t_fb_ptr; 4843 INP_WLOCK_ASSERT(tp->t_inpcb); 4844 /* 4845 * If we are doing PRR and have enough 4846 * room to send <or> we are pacing and prr 4847 * is disabled we will want to see if we 4848 * can send data (by setting r_wanted_output to 4849 * true). 4850 */ 4851 if ((rack->r_ctl.rc_prr_sndcnt > 0) || 4852 rack->rack_no_prr) 4853 rack->r_wanted_output = 1; 4854 } 4855 4856 static void 4857 rack_post_recovery(struct tcpcb *tp, uint32_t th_ack) 4858 { 4859 struct tcp_rack *rack; 4860 uint32_t orig_cwnd; 4861 4862 orig_cwnd = tp->snd_cwnd; 4863 INP_WLOCK_ASSERT(tp->t_inpcb); 4864 rack = (struct tcp_rack *)tp->t_fb_ptr; 4865 /* only alert CC if we alerted when we entered */ 4866 if (CC_ALGO(tp)->post_recovery != NULL) { 4867 tp->ccv->curack = th_ack; 4868 CC_ALGO(tp)->post_recovery(tp->ccv); 4869 if (tp->snd_cwnd < tp->snd_ssthresh) { 4870 /* 4871 * Rack has burst control and pacing 4872 * so lets not set this any lower than 4873 * snd_ssthresh per RFC-6582 (option 2). 4874 */ 4875 tp->snd_cwnd = tp->snd_ssthresh; 4876 } 4877 } 4878 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 4879 union tcp_log_stackspecific log; 4880 struct timeval tv; 4881 4882 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 4883 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 4884 log.u_bbr.flex1 = th_ack; 4885 log.u_bbr.flex2 = tp->ccv->flags; 4886 log.u_bbr.flex3 = tp->ccv->bytes_this_ack; 4887 log.u_bbr.flex4 = tp->ccv->nsegs; 4888 log.u_bbr.flex5 = V_tcp_abc_l_var; 4889 log.u_bbr.flex6 = orig_cwnd; 4890 log.u_bbr.flex7 = V_tcp_do_newsack; 4891 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 4892 log.u_bbr.flex8 = 2; 4893 tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 4894 0, &log, false, NULL, NULL, 0, &tv); 4895 } 4896 if ((rack->rack_no_prr == 0) && 4897 (rack->no_prr_addback == 0) && 4898 (rack->r_ctl.rc_prr_sndcnt > 0)) { 4899 /* 4900 * Suck the next prr cnt back into cwnd, but 4901 * only do that if we are not application limited. 4902 */ 4903 if (ctf_outstanding(tp) <= sbavail(&(tp->t_inpcb->inp_socket->so_snd))) { 4904 /* 4905 * We are allowed to add back to the cwnd the amount we did 4906 * not get out if: 4907 * a) no_prr_addback is off. 4908 * b) we are not app limited 4909 * c) we are doing prr 4910 * <and> 4911 * d) it is bounded by rack_prr_addbackmax (if addback is 0, then none). 4912 */ 4913 tp->snd_cwnd += min((ctf_fixed_maxseg(tp) * rack_prr_addbackmax), 4914 rack->r_ctl.rc_prr_sndcnt); 4915 } 4916 rack->r_ctl.rc_prr_sndcnt = 0; 4917 rack_log_to_prr(rack, 1, 0); 4918 } 4919 rack_log_to_prr(rack, 14, orig_cwnd); 4920 tp->snd_recover = tp->snd_una; 4921 if (rack->r_ctl.dsack_persist) { 4922 rack->r_ctl.dsack_persist--; 4923 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { 4924 rack->r_ctl.num_dsack = 0; 4925 } 4926 rack_log_dsack_event(rack, 1, __LINE__, 0, 0); 4927 } 4928 EXIT_RECOVERY(tp->t_flags); 4929 } 4930 4931 static void 4932 rack_cong_signal(struct tcpcb *tp, uint32_t type, uint32_t ack) 4933 { 4934 struct tcp_rack *rack; 4935 uint32_t ssthresh_enter, cwnd_enter, in_rec_at_entry, orig_cwnd; 4936 4937 INP_WLOCK_ASSERT(tp->t_inpcb); 4938 #ifdef STATS 4939 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_CSIG, type); 4940 #endif 4941 if (IN_RECOVERY(tp->t_flags) == 0) { 4942 in_rec_at_entry = 0; 4943 ssthresh_enter = tp->snd_ssthresh; 4944 cwnd_enter = tp->snd_cwnd; 4945 } else 4946 in_rec_at_entry = 1; 4947 rack = (struct tcp_rack *)tp->t_fb_ptr; 4948 switch (type) { 4949 case CC_NDUPACK: 4950 tp->t_flags &= ~TF_WASFRECOVERY; 4951 tp->t_flags &= ~TF_WASCRECOVERY; 4952 if (!IN_FASTRECOVERY(tp->t_flags)) { 4953 rack->r_ctl.rc_prr_delivered = 0; 4954 rack->r_ctl.rc_prr_out = 0; 4955 if (rack->rack_no_prr == 0) { 4956 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp); 4957 rack_log_to_prr(rack, 2, in_rec_at_entry); 4958 } 4959 rack->r_ctl.rc_prr_recovery_fs = tp->snd_max - tp->snd_una; 4960 tp->snd_recover = tp->snd_max; 4961 if (tp->t_flags2 & TF2_ECN_PERMIT) 4962 tp->t_flags2 |= TF2_ECN_SND_CWR; 4963 } 4964 break; 4965 case CC_ECN: 4966 if (!IN_CONGRECOVERY(tp->t_flags) || 4967 /* 4968 * Allow ECN reaction on ACK to CWR, if 4969 * that data segment was also CE marked. 4970 */ 4971 SEQ_GEQ(ack, tp->snd_recover)) { 4972 EXIT_CONGRECOVERY(tp->t_flags); 4973 KMOD_TCPSTAT_INC(tcps_ecn_rcwnd); 4974 tp->snd_recover = tp->snd_max + 1; 4975 if (tp->t_flags2 & TF2_ECN_PERMIT) 4976 tp->t_flags2 |= TF2_ECN_SND_CWR; 4977 } 4978 break; 4979 case CC_RTO: 4980 tp->t_dupacks = 0; 4981 tp->t_bytes_acked = 0; 4982 EXIT_RECOVERY(tp->t_flags); 4983 tp->snd_ssthresh = max(2, min(tp->snd_wnd, rack->r_ctl.cwnd_to_use) / 2 / 4984 ctf_fixed_maxseg(tp)) * ctf_fixed_maxseg(tp); 4985 orig_cwnd = tp->snd_cwnd; 4986 tp->snd_cwnd = ctf_fixed_maxseg(tp); 4987 rack_log_to_prr(rack, 16, orig_cwnd); 4988 if (tp->t_flags2 & TF2_ECN_PERMIT) 4989 tp->t_flags2 |= TF2_ECN_SND_CWR; 4990 break; 4991 case CC_RTO_ERR: 4992 KMOD_TCPSTAT_INC(tcps_sndrexmitbad); 4993 /* RTO was unnecessary, so reset everything. */ 4994 tp->snd_cwnd = tp->snd_cwnd_prev; 4995 tp->snd_ssthresh = tp->snd_ssthresh_prev; 4996 tp->snd_recover = tp->snd_recover_prev; 4997 if (tp->t_flags & TF_WASFRECOVERY) { 4998 ENTER_FASTRECOVERY(tp->t_flags); 4999 tp->t_flags &= ~TF_WASFRECOVERY; 5000 } 5001 if (tp->t_flags & TF_WASCRECOVERY) { 5002 ENTER_CONGRECOVERY(tp->t_flags); 5003 tp->t_flags &= ~TF_WASCRECOVERY; 5004 } 5005 tp->snd_nxt = tp->snd_max; 5006 tp->t_badrxtwin = 0; 5007 break; 5008 } 5009 if ((CC_ALGO(tp)->cong_signal != NULL) && 5010 (type != CC_RTO)){ 5011 tp->ccv->curack = ack; 5012 CC_ALGO(tp)->cong_signal(tp->ccv, type); 5013 } 5014 if ((in_rec_at_entry == 0) && IN_RECOVERY(tp->t_flags)) { 5015 rack_log_to_prr(rack, 15, cwnd_enter); 5016 rack->r_ctl.dsack_byte_cnt = 0; 5017 rack->r_ctl.retran_during_recovery = 0; 5018 rack->r_ctl.rc_cwnd_at_erec = cwnd_enter; 5019 rack->r_ctl.rc_ssthresh_at_erec = ssthresh_enter; 5020 rack->r_ent_rec_ns = 1; 5021 } 5022 } 5023 5024 static inline void 5025 rack_cc_after_idle(struct tcp_rack *rack, struct tcpcb *tp) 5026 { 5027 uint32_t i_cwnd; 5028 5029 INP_WLOCK_ASSERT(tp->t_inpcb); 5030 5031 #ifdef NETFLIX_STATS 5032 KMOD_TCPSTAT_INC(tcps_idle_restarts); 5033 if (tp->t_state == TCPS_ESTABLISHED) 5034 KMOD_TCPSTAT_INC(tcps_idle_estrestarts); 5035 #endif 5036 if (CC_ALGO(tp)->after_idle != NULL) 5037 CC_ALGO(tp)->after_idle(tp->ccv); 5038 5039 if (tp->snd_cwnd == 1) 5040 i_cwnd = tp->t_maxseg; /* SYN(-ACK) lost */ 5041 else 5042 i_cwnd = rc_init_window(rack); 5043 5044 /* 5045 * Being idle is no differnt than the initial window. If the cc 5046 * clamps it down below the initial window raise it to the initial 5047 * window. 5048 */ 5049 if (tp->snd_cwnd < i_cwnd) { 5050 tp->snd_cwnd = i_cwnd; 5051 } 5052 } 5053 5054 /* 5055 * Indicate whether this ack should be delayed. We can delay the ack if 5056 * following conditions are met: 5057 * - There is no delayed ack timer in progress. 5058 * - Our last ack wasn't a 0-sized window. We never want to delay 5059 * the ack that opens up a 0-sized window. 5060 * - LRO wasn't used for this segment. We make sure by checking that the 5061 * segment size is not larger than the MSS. 5062 * - Delayed acks are enabled or this is a half-synchronized T/TCP 5063 * connection. 5064 */ 5065 #define DELAY_ACK(tp, tlen) \ 5066 (((tp->t_flags & TF_RXWIN0SENT) == 0) && \ 5067 ((tp->t_flags & TF_DELACK) == 0) && \ 5068 (tlen <= tp->t_maxseg) && \ 5069 (tp->t_delayed_ack || (tp->t_flags & TF_NEEDSYN))) 5070 5071 static struct rack_sendmap * 5072 rack_find_lowest_rsm(struct tcp_rack *rack) 5073 { 5074 struct rack_sendmap *rsm; 5075 5076 /* 5077 * Walk the time-order transmitted list looking for an rsm that is 5078 * not acked. This will be the one that was sent the longest time 5079 * ago that is still outstanding. 5080 */ 5081 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { 5082 if (rsm->r_flags & RACK_ACKED) { 5083 continue; 5084 } 5085 goto finish; 5086 } 5087 finish: 5088 return (rsm); 5089 } 5090 5091 static struct rack_sendmap * 5092 rack_find_high_nonack(struct tcp_rack *rack, struct rack_sendmap *rsm) 5093 { 5094 struct rack_sendmap *prsm; 5095 5096 /* 5097 * Walk the sequence order list backward until we hit and arrive at 5098 * the highest seq not acked. In theory when this is called it 5099 * should be the last segment (which it was not). 5100 */ 5101 counter_u64_add(rack_find_high, 1); 5102 prsm = rsm; 5103 RB_FOREACH_REVERSE_FROM(prsm, rack_rb_tree_head, rsm) { 5104 if (prsm->r_flags & (RACK_ACKED | RACK_HAS_FIN)) { 5105 continue; 5106 } 5107 return (prsm); 5108 } 5109 return (NULL); 5110 } 5111 5112 static uint32_t 5113 rack_calc_thresh_rack(struct tcp_rack *rack, uint32_t srtt, uint32_t cts) 5114 { 5115 int32_t lro; 5116 uint32_t thresh; 5117 5118 /* 5119 * lro is the flag we use to determine if we have seen reordering. 5120 * If it gets set we have seen reordering. The reorder logic either 5121 * works in one of two ways: 5122 * 5123 * If reorder-fade is configured, then we track the last time we saw 5124 * re-ordering occur. If we reach the point where enough time as 5125 * passed we no longer consider reordering has occuring. 5126 * 5127 * Or if reorder-face is 0, then once we see reordering we consider 5128 * the connection to alway be subject to reordering and just set lro 5129 * to 1. 5130 * 5131 * In the end if lro is non-zero we add the extra time for 5132 * reordering in. 5133 */ 5134 if (srtt == 0) 5135 srtt = 1; 5136 if (rack->r_ctl.rc_reorder_ts) { 5137 if (rack->r_ctl.rc_reorder_fade) { 5138 if (SEQ_GEQ(cts, rack->r_ctl.rc_reorder_ts)) { 5139 lro = cts - rack->r_ctl.rc_reorder_ts; 5140 if (lro == 0) { 5141 /* 5142 * No time as passed since the last 5143 * reorder, mark it as reordering. 5144 */ 5145 lro = 1; 5146 } 5147 } else { 5148 /* Negative time? */ 5149 lro = 0; 5150 } 5151 if (lro > rack->r_ctl.rc_reorder_fade) { 5152 /* Turn off reordering seen too */ 5153 rack->r_ctl.rc_reorder_ts = 0; 5154 lro = 0; 5155 } 5156 } else { 5157 /* Reodering does not fade */ 5158 lro = 1; 5159 } 5160 } else { 5161 lro = 0; 5162 } 5163 if (rack->rc_rack_tmr_std_based == 0) { 5164 thresh = srtt + rack->r_ctl.rc_pkt_delay; 5165 } else { 5166 /* Standards based pkt-delay is 1/4 srtt */ 5167 thresh = srtt + (srtt >> 2); 5168 } 5169 if (lro && (rack->rc_rack_tmr_std_based == 0)) { 5170 /* It must be set, if not you get 1/4 rtt */ 5171 if (rack->r_ctl.rc_reorder_shift) 5172 thresh += (srtt >> rack->r_ctl.rc_reorder_shift); 5173 else 5174 thresh += (srtt >> 2); 5175 } 5176 if (rack->rc_rack_use_dsack && 5177 lro && 5178 (rack->r_ctl.num_dsack > 0)) { 5179 /* 5180 * We only increase the reordering window if we 5181 * have seen reordering <and> we have a DSACK count. 5182 */ 5183 thresh += rack->r_ctl.num_dsack * (srtt >> 2); 5184 rack_log_dsack_event(rack, 4, __LINE__, srtt, thresh); 5185 } 5186 /* SRTT * 2 is the ceiling */ 5187 if (thresh > (srtt * 2)) { 5188 thresh = srtt * 2; 5189 } 5190 /* And we don't want it above the RTO max either */ 5191 if (thresh > rack_rto_max) { 5192 thresh = rack_rto_max; 5193 } 5194 rack_log_dsack_event(rack, 6, __LINE__, srtt, thresh); 5195 return (thresh); 5196 } 5197 5198 static uint32_t 5199 rack_calc_thresh_tlp(struct tcpcb *tp, struct tcp_rack *rack, 5200 struct rack_sendmap *rsm, uint32_t srtt) 5201 { 5202 struct rack_sendmap *prsm; 5203 uint32_t thresh, len; 5204 int segsiz; 5205 5206 if (srtt == 0) 5207 srtt = 1; 5208 if (rack->r_ctl.rc_tlp_threshold) 5209 thresh = srtt + (srtt / rack->r_ctl.rc_tlp_threshold); 5210 else 5211 thresh = (srtt * 2); 5212 5213 /* Get the previous sent packet, if any */ 5214 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 5215 counter_u64_add(rack_enter_tlp_calc, 1); 5216 len = rsm->r_end - rsm->r_start; 5217 if (rack->rack_tlp_threshold_use == TLP_USE_ID) { 5218 /* Exactly like the ID */ 5219 if (((tp->snd_max - tp->snd_una) - rack->r_ctl.rc_sacked + rack->r_ctl.rc_holes_rxt) <= segsiz) { 5220 uint32_t alt_thresh; 5221 /* 5222 * Compensate for delayed-ack with the d-ack time. 5223 */ 5224 counter_u64_add(rack_used_tlpmethod, 1); 5225 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time; 5226 if (alt_thresh > thresh) 5227 thresh = alt_thresh; 5228 } 5229 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_ONE) { 5230 /* 2.1 behavior */ 5231 prsm = TAILQ_PREV(rsm, rack_head, r_tnext); 5232 if (prsm && (len <= segsiz)) { 5233 /* 5234 * Two packets outstanding, thresh should be (2*srtt) + 5235 * possible inter-packet delay (if any). 5236 */ 5237 uint32_t inter_gap = 0; 5238 int idx, nidx; 5239 5240 counter_u64_add(rack_used_tlpmethod, 1); 5241 idx = rsm->r_rtr_cnt - 1; 5242 nidx = prsm->r_rtr_cnt - 1; 5243 if (rsm->r_tim_lastsent[nidx] >= prsm->r_tim_lastsent[idx]) { 5244 /* Yes it was sent later (or at the same time) */ 5245 inter_gap = rsm->r_tim_lastsent[idx] - prsm->r_tim_lastsent[nidx]; 5246 } 5247 thresh += inter_gap; 5248 } else if (len <= segsiz) { 5249 /* 5250 * Possibly compensate for delayed-ack. 5251 */ 5252 uint32_t alt_thresh; 5253 5254 counter_u64_add(rack_used_tlpmethod2, 1); 5255 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time; 5256 if (alt_thresh > thresh) 5257 thresh = alt_thresh; 5258 } 5259 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_TWO) { 5260 /* 2.2 behavior */ 5261 if (len <= segsiz) { 5262 uint32_t alt_thresh; 5263 /* 5264 * Compensate for delayed-ack with the d-ack time. 5265 */ 5266 counter_u64_add(rack_used_tlpmethod, 1); 5267 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time; 5268 if (alt_thresh > thresh) 5269 thresh = alt_thresh; 5270 } 5271 } 5272 /* Not above an RTO */ 5273 if (thresh > tp->t_rxtcur) { 5274 thresh = tp->t_rxtcur; 5275 } 5276 /* Not above a RTO max */ 5277 if (thresh > rack_rto_max) { 5278 thresh = rack_rto_max; 5279 } 5280 /* Apply user supplied min TLP */ 5281 if (thresh < rack_tlp_min) { 5282 thresh = rack_tlp_min; 5283 } 5284 return (thresh); 5285 } 5286 5287 static uint32_t 5288 rack_grab_rtt(struct tcpcb *tp, struct tcp_rack *rack) 5289 { 5290 /* 5291 * We want the rack_rtt which is the 5292 * last rtt we measured. However if that 5293 * does not exist we fallback to the srtt (which 5294 * we probably will never do) and then as a last 5295 * resort we use RACK_INITIAL_RTO if no srtt is 5296 * yet set. 5297 */ 5298 if (rack->rc_rack_rtt) 5299 return (rack->rc_rack_rtt); 5300 else if (tp->t_srtt == 0) 5301 return (RACK_INITIAL_RTO); 5302 return (tp->t_srtt); 5303 } 5304 5305 static struct rack_sendmap * 5306 rack_check_recovery_mode(struct tcpcb *tp, uint32_t tsused) 5307 { 5308 /* 5309 * Check to see that we don't need to fall into recovery. We will 5310 * need to do so if our oldest transmit is past the time we should 5311 * have had an ack. 5312 */ 5313 struct tcp_rack *rack; 5314 struct rack_sendmap *rsm; 5315 int32_t idx; 5316 uint32_t srtt, thresh; 5317 5318 rack = (struct tcp_rack *)tp->t_fb_ptr; 5319 if (RB_EMPTY(&rack->r_ctl.rc_mtree)) { 5320 return (NULL); 5321 } 5322 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 5323 if (rsm == NULL) 5324 return (NULL); 5325 5326 if (rsm->r_flags & RACK_ACKED) { 5327 rsm = rack_find_lowest_rsm(rack); 5328 if (rsm == NULL) 5329 return (NULL); 5330 } 5331 idx = rsm->r_rtr_cnt - 1; 5332 srtt = rack_grab_rtt(tp, rack); 5333 thresh = rack_calc_thresh_rack(rack, srtt, tsused); 5334 if (TSTMP_LT(tsused, ((uint32_t)rsm->r_tim_lastsent[idx]))) { 5335 return (NULL); 5336 } 5337 if ((tsused - ((uint32_t)rsm->r_tim_lastsent[idx])) < thresh) { 5338 return (NULL); 5339 } 5340 /* Ok if we reach here we are over-due and this guy can be sent */ 5341 if (IN_RECOVERY(tp->t_flags) == 0) { 5342 /* 5343 * For the one that enters us into recovery record undo 5344 * info. 5345 */ 5346 rack->r_ctl.rc_rsm_start = rsm->r_start; 5347 rack->r_ctl.rc_cwnd_at = tp->snd_cwnd; 5348 rack->r_ctl.rc_ssthresh_at = tp->snd_ssthresh; 5349 } 5350 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una); 5351 return (rsm); 5352 } 5353 5354 static uint32_t 5355 rack_get_persists_timer_val(struct tcpcb *tp, struct tcp_rack *rack) 5356 { 5357 int32_t t; 5358 int32_t tt; 5359 uint32_t ret_val; 5360 5361 t = (tp->t_srtt + (tp->t_rttvar << 2)); 5362 RACK_TCPT_RANGESET(tt, t * tcp_backoff[tp->t_rxtshift], 5363 rack_persist_min, rack_persist_max, rack->r_ctl.timer_slop); 5364 rack->r_ctl.rc_hpts_flags |= PACE_TMR_PERSIT; 5365 ret_val = (uint32_t)tt; 5366 return (ret_val); 5367 } 5368 5369 static uint32_t 5370 rack_timer_start(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int sup_rack) 5371 { 5372 /* 5373 * Start the FR timer, we do this based on getting the first one in 5374 * the rc_tmap. Note that if its NULL we must stop the timer. in all 5375 * events we need to stop the running timer (if its running) before 5376 * starting the new one. 5377 */ 5378 uint32_t thresh, exp, to, srtt, time_since_sent, tstmp_touse; 5379 uint32_t srtt_cur; 5380 int32_t idx; 5381 int32_t is_tlp_timer = 0; 5382 struct rack_sendmap *rsm; 5383 5384 if (rack->t_timers_stopped) { 5385 /* All timers have been stopped none are to run */ 5386 return (0); 5387 } 5388 if (rack->rc_in_persist) { 5389 /* We can't start any timer in persists */ 5390 return (rack_get_persists_timer_val(tp, rack)); 5391 } 5392 rack->rc_on_min_to = 0; 5393 if ((tp->t_state < TCPS_ESTABLISHED) || 5394 ((tp->t_flags & TF_SACK_PERMIT) == 0)) { 5395 goto activate_rxt; 5396 } 5397 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 5398 if ((rsm == NULL) || sup_rack) { 5399 /* Nothing on the send map or no rack */ 5400 activate_rxt: 5401 time_since_sent = 0; 5402 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 5403 if (rsm) { 5404 /* 5405 * Should we discount the RTX timer any? 5406 * 5407 * We want to discount it the smallest amount. 5408 * If a timer (Rack/TLP or RXT) has gone off more 5409 * recently thats the discount we want to use (now - timer time). 5410 * If the retransmit of the oldest packet was more recent then 5411 * we want to use that (now - oldest-packet-last_transmit_time). 5412 * 5413 */ 5414 idx = rsm->r_rtr_cnt - 1; 5415 if (TSTMP_GEQ(rack->r_ctl.rc_tlp_rxt_last_time, ((uint32_t)rsm->r_tim_lastsent[idx]))) 5416 tstmp_touse = (uint32_t)rack->r_ctl.rc_tlp_rxt_last_time; 5417 else 5418 tstmp_touse = (uint32_t)rsm->r_tim_lastsent[idx]; 5419 if (TSTMP_GT(cts, tstmp_touse)) 5420 time_since_sent = cts - tstmp_touse; 5421 } 5422 if (SEQ_LT(tp->snd_una, tp->snd_max) || sbavail(&(tp->t_inpcb->inp_socket->so_snd))) { 5423 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RXT; 5424 to = tp->t_rxtcur; 5425 if (to > time_since_sent) 5426 to -= time_since_sent; 5427 else 5428 to = rack->r_ctl.rc_min_to; 5429 if (to == 0) 5430 to = 1; 5431 /* Special case for KEEPINIT */ 5432 if ((TCPS_HAVEESTABLISHED(tp->t_state) == 0) && 5433 (TP_KEEPINIT(tp) != 0) && 5434 rsm) { 5435 /* 5436 * We have to put a ceiling on the rxt timer 5437 * of the keep-init timeout. 5438 */ 5439 uint32_t max_time, red; 5440 5441 max_time = TICKS_2_USEC(TP_KEEPINIT(tp)); 5442 if (TSTMP_GT(cts, (uint32_t)rsm->r_tim_lastsent[0])) { 5443 red = (cts - (uint32_t)rsm->r_tim_lastsent[0]); 5444 if (red < max_time) 5445 max_time -= red; 5446 else 5447 max_time = 1; 5448 } 5449 /* Reduce timeout to the keep value if needed */ 5450 if (max_time < to) 5451 to = max_time; 5452 } 5453 return (to); 5454 } 5455 return (0); 5456 } 5457 if (rsm->r_flags & RACK_ACKED) { 5458 rsm = rack_find_lowest_rsm(rack); 5459 if (rsm == NULL) { 5460 /* No lowest? */ 5461 goto activate_rxt; 5462 } 5463 } 5464 if (rack->sack_attack_disable) { 5465 /* 5466 * We don't want to do 5467 * any TLP's if you are an attacker. 5468 * Though if you are doing what 5469 * is expected you may still have 5470 * SACK-PASSED marks. 5471 */ 5472 goto activate_rxt; 5473 } 5474 /* Convert from ms to usecs */ 5475 if ((rsm->r_flags & RACK_SACK_PASSED) || (rsm->r_dupack >= DUP_ACK_THRESHOLD)) { 5476 if ((tp->t_flags & TF_SENTFIN) && 5477 ((tp->snd_max - tp->snd_una) == 1) && 5478 (rsm->r_flags & RACK_HAS_FIN)) { 5479 /* 5480 * We don't start a rack timer if all we have is a 5481 * FIN outstanding. 5482 */ 5483 goto activate_rxt; 5484 } 5485 if ((rack->use_rack_rr == 0) && 5486 (IN_FASTRECOVERY(tp->t_flags)) && 5487 (rack->rack_no_prr == 0) && 5488 (rack->r_ctl.rc_prr_sndcnt < ctf_fixed_maxseg(tp))) { 5489 /* 5490 * We are not cheating, in recovery and 5491 * not enough ack's to yet get our next 5492 * retransmission out. 5493 * 5494 * Note that classified attackers do not 5495 * get to use the rack-cheat. 5496 */ 5497 goto activate_tlp; 5498 } 5499 srtt = rack_grab_rtt(tp, rack); 5500 thresh = rack_calc_thresh_rack(rack, srtt, cts); 5501 idx = rsm->r_rtr_cnt - 1; 5502 exp = ((uint32_t)rsm->r_tim_lastsent[idx]) + thresh; 5503 if (SEQ_GEQ(exp, cts)) { 5504 to = exp - cts; 5505 if (to < rack->r_ctl.rc_min_to) { 5506 to = rack->r_ctl.rc_min_to; 5507 if (rack->r_rr_config == 3) 5508 rack->rc_on_min_to = 1; 5509 } 5510 } else { 5511 to = rack->r_ctl.rc_min_to; 5512 if (rack->r_rr_config == 3) 5513 rack->rc_on_min_to = 1; 5514 } 5515 } else { 5516 /* Ok we need to do a TLP not RACK */ 5517 activate_tlp: 5518 if ((rack->rc_tlp_in_progress != 0) && 5519 (rack->r_ctl.rc_tlp_cnt_out >= rack_tlp_limit)) { 5520 /* 5521 * The previous send was a TLP and we have sent 5522 * N TLP's without sending new data. 5523 */ 5524 goto activate_rxt; 5525 } 5526 rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext); 5527 if (rsm == NULL) { 5528 /* We found no rsm to TLP with. */ 5529 goto activate_rxt; 5530 } 5531 if (rsm->r_flags & RACK_HAS_FIN) { 5532 /* If its a FIN we dont do TLP */ 5533 rsm = NULL; 5534 goto activate_rxt; 5535 } 5536 idx = rsm->r_rtr_cnt - 1; 5537 time_since_sent = 0; 5538 if (TSTMP_GEQ(((uint32_t)rsm->r_tim_lastsent[idx]), rack->r_ctl.rc_tlp_rxt_last_time)) 5539 tstmp_touse = (uint32_t)rsm->r_tim_lastsent[idx]; 5540 else 5541 tstmp_touse = (uint32_t)rack->r_ctl.rc_tlp_rxt_last_time; 5542 if (TSTMP_GT(cts, tstmp_touse)) 5543 time_since_sent = cts - tstmp_touse; 5544 is_tlp_timer = 1; 5545 if (tp->t_srtt) { 5546 if ((rack->rc_srtt_measure_made == 0) && 5547 (tp->t_srtt == 1)) { 5548 /* 5549 * If another stack as run and set srtt to 1, 5550 * then the srtt was 0, so lets use the initial. 5551 */ 5552 srtt = RACK_INITIAL_RTO; 5553 } else { 5554 srtt_cur = tp->t_srtt; 5555 srtt = srtt_cur; 5556 } 5557 } else 5558 srtt = RACK_INITIAL_RTO; 5559 /* 5560 * If the SRTT is not keeping up and the 5561 * rack RTT has spiked we want to use 5562 * the last RTT not the smoothed one. 5563 */ 5564 if (rack_tlp_use_greater && 5565 tp->t_srtt && 5566 (srtt < rack_grab_rtt(tp, rack))) { 5567 srtt = rack_grab_rtt(tp, rack); 5568 } 5569 thresh = rack_calc_thresh_tlp(tp, rack, rsm, srtt); 5570 if (thresh > time_since_sent) { 5571 to = thresh - time_since_sent; 5572 } else { 5573 to = rack->r_ctl.rc_min_to; 5574 rack_log_alt_to_to_cancel(rack, 5575 thresh, /* flex1 */ 5576 time_since_sent, /* flex2 */ 5577 tstmp_touse, /* flex3 */ 5578 rack->r_ctl.rc_tlp_rxt_last_time, /* flex4 */ 5579 (uint32_t)rsm->r_tim_lastsent[idx], 5580 srtt, 5581 idx, 99); 5582 } 5583 if (to < rack_tlp_min) { 5584 to = rack_tlp_min; 5585 } 5586 if (to > TICKS_2_USEC(TCPTV_REXMTMAX)) { 5587 /* 5588 * If the TLP time works out to larger than the max 5589 * RTO lets not do TLP.. just RTO. 5590 */ 5591 goto activate_rxt; 5592 } 5593 } 5594 if (is_tlp_timer == 0) { 5595 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RACK; 5596 } else { 5597 rack->r_ctl.rc_hpts_flags |= PACE_TMR_TLP; 5598 } 5599 if (to == 0) 5600 to = 1; 5601 return (to); 5602 } 5603 5604 static void 5605 rack_enter_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 5606 { 5607 if (rack->rc_in_persist == 0) { 5608 if (tp->t_flags & TF_GPUTINPROG) { 5609 /* 5610 * Stop the goodput now, the calling of the 5611 * measurement function clears the flag. 5612 */ 5613 rack_do_goodput_measurement(tp, rack, tp->snd_una, __LINE__, 5614 RACK_QUALITY_PERSIST); 5615 } 5616 #ifdef NETFLIX_SHARED_CWND 5617 if (rack->r_ctl.rc_scw) { 5618 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 5619 rack->rack_scwnd_is_idle = 1; 5620 } 5621 #endif 5622 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); 5623 if (rack->r_ctl.rc_went_idle_time == 0) 5624 rack->r_ctl.rc_went_idle_time = 1; 5625 rack_timer_cancel(tp, rack, cts, __LINE__); 5626 tp->t_rxtshift = 0; 5627 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 5628 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 5629 rack->rc_in_persist = 1; 5630 } 5631 } 5632 5633 static void 5634 rack_exit_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 5635 { 5636 if (rack->rc_inp->inp_in_hpts) { 5637 tcp_hpts_remove(rack->rc_inp, HPTS_REMOVE_OUTPUT); 5638 rack->r_ctl.rc_hpts_flags = 0; 5639 } 5640 #ifdef NETFLIX_SHARED_CWND 5641 if (rack->r_ctl.rc_scw) { 5642 tcp_shared_cwnd_active(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 5643 rack->rack_scwnd_is_idle = 0; 5644 } 5645 #endif 5646 if (rack->rc_gp_dyn_mul && 5647 (rack->use_fixed_rate == 0) && 5648 (rack->rc_always_pace)) { 5649 /* 5650 * Do we count this as if a probe-rtt just 5651 * finished? 5652 */ 5653 uint32_t time_idle, idle_min; 5654 5655 time_idle = tcp_get_usecs(NULL) - rack->r_ctl.rc_went_idle_time; 5656 idle_min = rack_min_probertt_hold; 5657 if (rack_probertt_gpsrtt_cnt_div) { 5658 uint64_t extra; 5659 extra = (uint64_t)rack->r_ctl.rc_gp_srtt * 5660 (uint64_t)rack_probertt_gpsrtt_cnt_mul; 5661 extra /= (uint64_t)rack_probertt_gpsrtt_cnt_div; 5662 idle_min += (uint32_t)extra; 5663 } 5664 if (time_idle >= idle_min) { 5665 /* Yes, we count it as a probe-rtt. */ 5666 uint32_t us_cts; 5667 5668 us_cts = tcp_get_usecs(NULL); 5669 if (rack->in_probe_rtt == 0) { 5670 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 5671 rack->r_ctl.rc_time_probertt_entered = rack->r_ctl.rc_lower_rtt_us_cts; 5672 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts; 5673 rack->r_ctl.rc_time_of_last_probertt = rack->r_ctl.rc_lower_rtt_us_cts; 5674 } else { 5675 rack_exit_probertt(rack, us_cts); 5676 } 5677 } 5678 } 5679 rack->rc_in_persist = 0; 5680 rack->r_ctl.rc_went_idle_time = 0; 5681 tp->t_rxtshift = 0; 5682 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 5683 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 5684 rack->r_ctl.rc_agg_delayed = 0; 5685 rack->r_early = 0; 5686 rack->r_late = 0; 5687 rack->r_ctl.rc_agg_early = 0; 5688 } 5689 5690 static void 5691 rack_log_hpts_diag(struct tcp_rack *rack, uint32_t cts, 5692 struct hpts_diag *diag, struct timeval *tv) 5693 { 5694 if (rack_verbose_logging && rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 5695 union tcp_log_stackspecific log; 5696 5697 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 5698 log.u_bbr.flex1 = diag->p_nxt_slot; 5699 log.u_bbr.flex2 = diag->p_cur_slot; 5700 log.u_bbr.flex3 = diag->slot_req; 5701 log.u_bbr.flex4 = diag->inp_hptsslot; 5702 log.u_bbr.flex5 = diag->slot_remaining; 5703 log.u_bbr.flex6 = diag->need_new_to; 5704 log.u_bbr.flex7 = diag->p_hpts_active; 5705 log.u_bbr.flex8 = diag->p_on_min_sleep; 5706 /* Hijack other fields as needed */ 5707 log.u_bbr.epoch = diag->have_slept; 5708 log.u_bbr.lt_epoch = diag->yet_to_sleep; 5709 log.u_bbr.pkts_out = diag->co_ret; 5710 log.u_bbr.applimited = diag->hpts_sleep_time; 5711 log.u_bbr.delivered = diag->p_prev_slot; 5712 log.u_bbr.inflight = diag->p_runningslot; 5713 log.u_bbr.bw_inuse = diag->wheel_slot; 5714 log.u_bbr.rttProp = diag->wheel_cts; 5715 log.u_bbr.timeStamp = cts; 5716 log.u_bbr.delRate = diag->maxslots; 5717 log.u_bbr.cur_del_rate = diag->p_curtick; 5718 log.u_bbr.cur_del_rate <<= 32; 5719 log.u_bbr.cur_del_rate |= diag->p_lasttick; 5720 TCP_LOG_EVENTP(rack->rc_tp, NULL, 5721 &rack->rc_inp->inp_socket->so_rcv, 5722 &rack->rc_inp->inp_socket->so_snd, 5723 BBR_LOG_HPTSDIAG, 0, 5724 0, &log, false, tv); 5725 } 5726 5727 } 5728 5729 static void 5730 rack_log_wakeup(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb, uint32_t len, int type) 5731 { 5732 if (rack_verbose_logging && rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 5733 union tcp_log_stackspecific log; 5734 struct timeval tv; 5735 5736 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 5737 log.u_bbr.flex1 = sb->sb_flags; 5738 log.u_bbr.flex2 = len; 5739 log.u_bbr.flex3 = sb->sb_state; 5740 log.u_bbr.flex8 = type; 5741 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 5742 TCP_LOG_EVENTP(rack->rc_tp, NULL, 5743 &rack->rc_inp->inp_socket->so_rcv, 5744 &rack->rc_inp->inp_socket->so_snd, 5745 TCP_LOG_SB_WAKE, 0, 5746 len, &log, false, &tv); 5747 } 5748 } 5749 5750 static void 5751 rack_start_hpts_timer(struct tcp_rack *rack, struct tcpcb *tp, uint32_t cts, 5752 int32_t slot, uint32_t tot_len_this_send, int sup_rack) 5753 { 5754 struct hpts_diag diag; 5755 struct inpcb *inp; 5756 struct timeval tv; 5757 uint32_t delayed_ack = 0; 5758 uint32_t hpts_timeout; 5759 uint32_t entry_slot = slot; 5760 uint8_t stopped; 5761 uint32_t left = 0; 5762 uint32_t us_cts; 5763 5764 inp = tp->t_inpcb; 5765 if ((tp->t_state == TCPS_CLOSED) || 5766 (tp->t_state == TCPS_LISTEN)) { 5767 return; 5768 } 5769 if (inp->inp_in_hpts) { 5770 /* Already on the pacer */ 5771 return; 5772 } 5773 stopped = rack->rc_tmr_stopped; 5774 if (stopped && TSTMP_GT(rack->r_ctl.rc_timer_exp, cts)) { 5775 left = rack->r_ctl.rc_timer_exp - cts; 5776 } 5777 rack->r_ctl.rc_timer_exp = 0; 5778 rack->r_ctl.rc_hpts_flags = 0; 5779 us_cts = tcp_get_usecs(&tv); 5780 /* Now early/late accounting */ 5781 rack_log_pacing_delay_calc(rack, entry_slot, slot, 0, 0, 0, 26, __LINE__, NULL, 0); 5782 if (rack->r_early && (rack->rc_ack_can_sendout_data == 0)) { 5783 /* 5784 * We have a early carry over set, 5785 * we can always add more time so we 5786 * can always make this compensation. 5787 * 5788 * Note if ack's are allowed to wake us do not 5789 * penalize the next timer for being awoke 5790 * by an ack aka the rc_agg_early (non-paced mode). 5791 */ 5792 slot += rack->r_ctl.rc_agg_early; 5793 rack->r_early = 0; 5794 rack->r_ctl.rc_agg_early = 0; 5795 } 5796 if (rack->r_late) { 5797 /* 5798 * This is harder, we can 5799 * compensate some but it 5800 * really depends on what 5801 * the current pacing time is. 5802 */ 5803 if (rack->r_ctl.rc_agg_delayed >= slot) { 5804 /* 5805 * We can't compensate for it all. 5806 * And we have to have some time 5807 * on the clock. We always have a min 5808 * 10 slots (10 x 10 i.e. 100 usecs). 5809 */ 5810 if (slot <= HPTS_TICKS_PER_SLOT) { 5811 /* We gain delay */ 5812 rack->r_ctl.rc_agg_delayed += (HPTS_TICKS_PER_SLOT - slot); 5813 slot = HPTS_TICKS_PER_SLOT; 5814 } else { 5815 /* We take off some */ 5816 rack->r_ctl.rc_agg_delayed -= (slot - HPTS_TICKS_PER_SLOT); 5817 slot = HPTS_TICKS_PER_SLOT; 5818 } 5819 } else { 5820 slot -= rack->r_ctl.rc_agg_delayed; 5821 rack->r_ctl.rc_agg_delayed = 0; 5822 /* Make sure we have 100 useconds at minimum */ 5823 if (slot < HPTS_TICKS_PER_SLOT) { 5824 rack->r_ctl.rc_agg_delayed = HPTS_TICKS_PER_SLOT - slot; 5825 slot = HPTS_TICKS_PER_SLOT; 5826 } 5827 if (rack->r_ctl.rc_agg_delayed == 0) 5828 rack->r_late = 0; 5829 } 5830 } 5831 if (slot) { 5832 /* We are pacing too */ 5833 rack->r_ctl.rc_hpts_flags |= PACE_PKT_OUTPUT; 5834 } 5835 hpts_timeout = rack_timer_start(tp, rack, cts, sup_rack); 5836 #ifdef NETFLIX_EXP_DETECTION 5837 if (rack->sack_attack_disable && 5838 (slot < tcp_sad_pacing_interval)) { 5839 /* 5840 * We have a potential attacker on 5841 * the line. We have possibly some 5842 * (or now) pacing time set. We want to 5843 * slow down the processing of sacks by some 5844 * amount (if it is an attacker). Set the default 5845 * slot for attackers in place (unless the orginal 5846 * interval is longer). Its stored in 5847 * micro-seconds, so lets convert to msecs. 5848 */ 5849 slot = tcp_sad_pacing_interval; 5850 } 5851 #endif 5852 if (tp->t_flags & TF_DELACK) { 5853 delayed_ack = TICKS_2_USEC(tcp_delacktime); 5854 rack->r_ctl.rc_hpts_flags |= PACE_TMR_DELACK; 5855 } 5856 if (delayed_ack && ((hpts_timeout == 0) || 5857 (delayed_ack < hpts_timeout))) 5858 hpts_timeout = delayed_ack; 5859 else 5860 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK; 5861 /* 5862 * If no timers are going to run and we will fall off the hptsi 5863 * wheel, we resort to a keep-alive timer if its configured. 5864 */ 5865 if ((hpts_timeout == 0) && 5866 (slot == 0)) { 5867 if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) && 5868 (tp->t_state <= TCPS_CLOSING)) { 5869 /* 5870 * Ok we have no timer (persists, rack, tlp, rxt or 5871 * del-ack), we don't have segments being paced. So 5872 * all that is left is the keepalive timer. 5873 */ 5874 if (TCPS_HAVEESTABLISHED(tp->t_state)) { 5875 /* Get the established keep-alive time */ 5876 hpts_timeout = TICKS_2_USEC(TP_KEEPIDLE(tp)); 5877 } else { 5878 /* 5879 * Get the initial setup keep-alive time, 5880 * note that this is probably not going to 5881 * happen, since rack will be running a rxt timer 5882 * if a SYN of some sort is outstanding. It is 5883 * actually handled in rack_timeout_rxt(). 5884 */ 5885 hpts_timeout = TICKS_2_USEC(TP_KEEPINIT(tp)); 5886 } 5887 rack->r_ctl.rc_hpts_flags |= PACE_TMR_KEEP; 5888 if (rack->in_probe_rtt) { 5889 /* 5890 * We want to instead not wake up a long time from 5891 * now but to wake up about the time we would 5892 * exit probe-rtt and initiate a keep-alive ack. 5893 * This will get us out of probe-rtt and update 5894 * our min-rtt. 5895 */ 5896 hpts_timeout = rack_min_probertt_hold; 5897 } 5898 } 5899 } 5900 if (left && (stopped & (PACE_TMR_KEEP | PACE_TMR_DELACK)) == 5901 (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK)) { 5902 /* 5903 * RACK, TLP, persists and RXT timers all are restartable 5904 * based on actions input .. i.e we received a packet (ack 5905 * or sack) and that changes things (rw, or snd_una etc). 5906 * Thus we can restart them with a new value. For 5907 * keep-alive, delayed_ack we keep track of what was left 5908 * and restart the timer with a smaller value. 5909 */ 5910 if (left < hpts_timeout) 5911 hpts_timeout = left; 5912 } 5913 if (hpts_timeout) { 5914 /* 5915 * Hack alert for now we can't time-out over 2,147,483 5916 * seconds (a bit more than 596 hours), which is probably ok 5917 * :). 5918 */ 5919 if (hpts_timeout > 0x7ffffffe) 5920 hpts_timeout = 0x7ffffffe; 5921 rack->r_ctl.rc_timer_exp = cts + hpts_timeout; 5922 } 5923 rack_log_pacing_delay_calc(rack, entry_slot, slot, hpts_timeout, 0, 0, 27, __LINE__, NULL, 0); 5924 if ((rack->gp_ready == 0) && 5925 (rack->use_fixed_rate == 0) && 5926 (hpts_timeout < slot) && 5927 (rack->r_ctl.rc_hpts_flags & (PACE_TMR_TLP|PACE_TMR_RXT))) { 5928 /* 5929 * We have no good estimate yet for the 5930 * old clunky burst mitigation or the 5931 * real pacing. And the tlp or rxt is smaller 5932 * than the pacing calculation. Lets not 5933 * pace that long since we know the calculation 5934 * so far is not accurate. 5935 */ 5936 slot = hpts_timeout; 5937 } 5938 rack->r_ctl.last_pacing_time = slot; 5939 /** 5940 * Turn off all the flags for queuing by default. The 5941 * flags have important meanings to what happens when 5942 * LRO interacts with the transport. Most likely (by default now) 5943 * mbuf_queueing and ack compression are on. So the transport 5944 * has a couple of flags that control what happens (if those 5945 * are not on then these flags won't have any effect since it 5946 * won't go through the queuing LRO path). 5947 * 5948 * INP_MBUF_QUEUE_READY - This flags says that I am busy 5949 * pacing output, so don't disturb. But 5950 * it also means LRO can wake me if there 5951 * is a SACK arrival. 5952 * 5953 * INP_DONT_SACK_QUEUE - This flag is used in conjunction 5954 * with the above flag (QUEUE_READY) and 5955 * when present it says don't even wake me 5956 * if a SACK arrives. 5957 * 5958 * The idea behind these flags is that if we are pacing we 5959 * set the MBUF_QUEUE_READY and only get woken up if 5960 * a SACK arrives (which could change things) or if 5961 * our pacing timer expires. If, however, we have a rack 5962 * timer running, then we don't even want a sack to wake 5963 * us since the rack timer has to expire before we can send. 5964 * 5965 * Other cases should usually have none of the flags set 5966 * so LRO can call into us. 5967 */ 5968 inp->inp_flags2 &= ~(INP_DONT_SACK_QUEUE|INP_MBUF_QUEUE_READY); 5969 if (slot) { 5970 rack->r_ctl.rc_last_output_to = us_cts + slot; 5971 /* 5972 * A pacing timer (slot) is being set, in 5973 * such a case we cannot send (we are blocked by 5974 * the timer). So lets tell LRO that it should not 5975 * wake us unless there is a SACK. Note this only 5976 * will be effective if mbuf queueing is on or 5977 * compressed acks are being processed. 5978 */ 5979 inp->inp_flags2 |= INP_MBUF_QUEUE_READY; 5980 /* 5981 * But wait if we have a Rack timer running 5982 * even a SACK should not disturb us (with 5983 * the exception of r_rr_config 3). 5984 */ 5985 if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK) && 5986 (rack->r_rr_config != 3)) 5987 inp->inp_flags2 |= INP_DONT_SACK_QUEUE; 5988 if (rack->rc_ack_can_sendout_data) { 5989 /* 5990 * Ahh but wait, this is that special case 5991 * where the pacing timer can be disturbed 5992 * backout the changes (used for non-paced 5993 * burst limiting). 5994 */ 5995 inp->inp_flags2 &= ~(INP_DONT_SACK_QUEUE|INP_MBUF_QUEUE_READY); 5996 } 5997 if ((rack->use_rack_rr) && 5998 (rack->r_rr_config < 2) && 5999 ((hpts_timeout) && (hpts_timeout < slot))) { 6000 /* 6001 * Arrange for the hpts to kick back in after the 6002 * t-o if the t-o does not cause a send. 6003 */ 6004 (void)tcp_hpts_insert_diag(tp->t_inpcb, HPTS_USEC_TO_SLOTS(hpts_timeout), 6005 __LINE__, &diag); 6006 rack_log_hpts_diag(rack, us_cts, &diag, &tv); 6007 rack_log_to_start(rack, cts, hpts_timeout, slot, 0); 6008 } else { 6009 (void)tcp_hpts_insert_diag(tp->t_inpcb, HPTS_USEC_TO_SLOTS(slot), 6010 __LINE__, &diag); 6011 rack_log_hpts_diag(rack, us_cts, &diag, &tv); 6012 rack_log_to_start(rack, cts, hpts_timeout, slot, 1); 6013 } 6014 } else if (hpts_timeout) { 6015 /* 6016 * With respect to inp_flags2 here, lets let any new acks wake 6017 * us up here. Since we are not pacing (no pacing timer), output 6018 * can happen so we should let it. If its a Rack timer, then any inbound 6019 * packet probably won't change the sending (we will be blocked) 6020 * but it may change the prr stats so letting it in (the set defaults 6021 * at the start of this block) are good enough. 6022 */ 6023 (void)tcp_hpts_insert_diag(tp->t_inpcb, HPTS_USEC_TO_SLOTS(hpts_timeout), 6024 __LINE__, &diag); 6025 rack_log_hpts_diag(rack, us_cts, &diag, &tv); 6026 rack_log_to_start(rack, cts, hpts_timeout, slot, 0); 6027 } else { 6028 /* No timer starting */ 6029 #ifdef INVARIANTS 6030 if (SEQ_GT(tp->snd_max, tp->snd_una)) { 6031 panic("tp:%p rack:%p tlts:%d cts:%u slot:%u pto:%u -- no timer started?", 6032 tp, rack, tot_len_this_send, cts, slot, hpts_timeout); 6033 } 6034 #endif 6035 } 6036 rack->rc_tmr_stopped = 0; 6037 if (slot) 6038 rack_log_type_bbrsnd(rack, tot_len_this_send, slot, us_cts, &tv); 6039 } 6040 6041 /* 6042 * RACK Timer, here we simply do logging and house keeping. 6043 * the normal rack_output() function will call the 6044 * appropriate thing to check if we need to do a RACK retransmit. 6045 * We return 1, saying don't proceed with rack_output only 6046 * when all timers have been stopped (destroyed PCB?). 6047 */ 6048 static int 6049 rack_timeout_rack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 6050 { 6051 /* 6052 * This timer simply provides an internal trigger to send out data. 6053 * The check_recovery_mode call will see if there are needed 6054 * retransmissions, if so we will enter fast-recovery. The output 6055 * call may or may not do the same thing depending on sysctl 6056 * settings. 6057 */ 6058 struct rack_sendmap *rsm; 6059 6060 if (tp->t_timers->tt_flags & TT_STOPPED) { 6061 return (1); 6062 } 6063 counter_u64_add(rack_to_tot, 1); 6064 if (rack->r_state && (rack->r_state != tp->t_state)) 6065 rack_set_state(tp, rack); 6066 rack->rc_on_min_to = 0; 6067 rsm = rack_check_recovery_mode(tp, cts); 6068 rack_log_to_event(rack, RACK_TO_FRM_RACK, rsm); 6069 if (rsm) { 6070 rack->r_ctl.rc_resend = rsm; 6071 rack->r_timer_override = 1; 6072 if (rack->use_rack_rr) { 6073 /* 6074 * Don't accumulate extra pacing delay 6075 * we are allowing the rack timer to 6076 * over-ride pacing i.e. rrr takes precedence 6077 * if the pacing interval is longer than the rrr 6078 * time (in other words we get the min pacing 6079 * time versus rrr pacing time). 6080 */ 6081 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 6082 } 6083 } 6084 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RACK; 6085 if (rsm == NULL) { 6086 /* restart a timer and return 1 */ 6087 rack_start_hpts_timer(rack, tp, cts, 6088 0, 0, 0); 6089 return (1); 6090 } 6091 return (0); 6092 } 6093 6094 static void 6095 rack_adjust_orig_mlen(struct rack_sendmap *rsm) 6096 { 6097 if (rsm->m->m_len > rsm->orig_m_len) { 6098 /* 6099 * Mbuf grew, caused by sbcompress, our offset does 6100 * not change. 6101 */ 6102 rsm->orig_m_len = rsm->m->m_len; 6103 } else if (rsm->m->m_len < rsm->orig_m_len) { 6104 /* 6105 * Mbuf shrank, trimmed off the top by an ack, our 6106 * offset changes. 6107 */ 6108 rsm->soff -= (rsm->orig_m_len - rsm->m->m_len); 6109 rsm->orig_m_len = rsm->m->m_len; 6110 } 6111 } 6112 6113 static void 6114 rack_setup_offset_for_rsm(struct rack_sendmap *src_rsm, struct rack_sendmap *rsm) 6115 { 6116 struct mbuf *m; 6117 uint32_t soff; 6118 6119 if (src_rsm->m && (src_rsm->orig_m_len != src_rsm->m->m_len)) { 6120 /* Fix up the orig_m_len and possibly the mbuf offset */ 6121 rack_adjust_orig_mlen(src_rsm); 6122 } 6123 m = src_rsm->m; 6124 soff = src_rsm->soff + (src_rsm->r_end - src_rsm->r_start); 6125 while (soff >= m->m_len) { 6126 /* Move out past this mbuf */ 6127 soff -= m->m_len; 6128 m = m->m_next; 6129 KASSERT((m != NULL), 6130 ("rsm:%p nrsm:%p hit at soff:%u null m", 6131 src_rsm, rsm, soff)); 6132 } 6133 rsm->m = m; 6134 rsm->soff = soff; 6135 rsm->orig_m_len = m->m_len; 6136 } 6137 6138 static __inline void 6139 rack_clone_rsm(struct tcp_rack *rack, struct rack_sendmap *nrsm, 6140 struct rack_sendmap *rsm, uint32_t start) 6141 { 6142 int idx; 6143 6144 nrsm->r_start = start; 6145 nrsm->r_end = rsm->r_end; 6146 nrsm->r_rtr_cnt = rsm->r_rtr_cnt; 6147 nrsm->r_flags = rsm->r_flags; 6148 nrsm->r_dupack = rsm->r_dupack; 6149 nrsm->r_no_rtt_allowed = rsm->r_no_rtt_allowed; 6150 nrsm->r_rtr_bytes = 0; 6151 nrsm->r_fas = rsm->r_fas; 6152 rsm->r_end = nrsm->r_start; 6153 nrsm->r_just_ret = rsm->r_just_ret; 6154 for (idx = 0; idx < nrsm->r_rtr_cnt; idx++) { 6155 nrsm->r_tim_lastsent[idx] = rsm->r_tim_lastsent[idx]; 6156 } 6157 /* Now if we have SYN flag we keep it on the left edge */ 6158 if (nrsm->r_flags & RACK_HAS_SYN) 6159 nrsm->r_flags &= ~RACK_HAS_SYN; 6160 /* Now if we have a FIN flag we keep it on the right edge */ 6161 if (rsm->r_flags & RACK_HAS_FIN) 6162 rsm->r_flags &= ~RACK_HAS_FIN; 6163 /* Push bit must go to the right edge as well */ 6164 if (rsm->r_flags & RACK_HAD_PUSH) 6165 rsm->r_flags &= ~RACK_HAD_PUSH; 6166 /* Clone over the state of the hw_tls flag */ 6167 nrsm->r_hw_tls = rsm->r_hw_tls; 6168 /* 6169 * Now we need to find nrsm's new location in the mbuf chain 6170 * we basically calculate a new offset, which is soff + 6171 * how much is left in original rsm. Then we walk out the mbuf 6172 * chain to find the righ postion, it may be the same mbuf 6173 * or maybe not. 6174 */ 6175 KASSERT(((rsm->m != NULL) || 6176 (rsm->r_flags & (RACK_HAS_SYN|RACK_HAS_FIN))), 6177 ("rsm:%p nrsm:%p rack:%p -- rsm->m is NULL?", rsm, nrsm, rack)); 6178 if (rsm->m) 6179 rack_setup_offset_for_rsm(rsm, nrsm); 6180 } 6181 6182 static struct rack_sendmap * 6183 rack_merge_rsm(struct tcp_rack *rack, 6184 struct rack_sendmap *l_rsm, 6185 struct rack_sendmap *r_rsm) 6186 { 6187 /* 6188 * We are merging two ack'd RSM's, 6189 * the l_rsm is on the left (lower seq 6190 * values) and the r_rsm is on the right 6191 * (higher seq value). The simplest way 6192 * to merge these is to move the right 6193 * one into the left. I don't think there 6194 * is any reason we need to try to find 6195 * the oldest (or last oldest retransmitted). 6196 */ 6197 struct rack_sendmap *rm; 6198 6199 rack_log_map_chg(rack->rc_tp, rack, NULL, 6200 l_rsm, r_rsm, MAP_MERGE, r_rsm->r_end, __LINE__); 6201 l_rsm->r_end = r_rsm->r_end; 6202 if (l_rsm->r_dupack < r_rsm->r_dupack) 6203 l_rsm->r_dupack = r_rsm->r_dupack; 6204 if (r_rsm->r_rtr_bytes) 6205 l_rsm->r_rtr_bytes += r_rsm->r_rtr_bytes; 6206 if (r_rsm->r_in_tmap) { 6207 /* This really should not happen */ 6208 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, r_rsm, r_tnext); 6209 r_rsm->r_in_tmap = 0; 6210 } 6211 6212 /* Now the flags */ 6213 if (r_rsm->r_flags & RACK_HAS_FIN) 6214 l_rsm->r_flags |= RACK_HAS_FIN; 6215 if (r_rsm->r_flags & RACK_TLP) 6216 l_rsm->r_flags |= RACK_TLP; 6217 if (r_rsm->r_flags & RACK_RWND_COLLAPSED) 6218 l_rsm->r_flags |= RACK_RWND_COLLAPSED; 6219 if ((r_rsm->r_flags & RACK_APP_LIMITED) && 6220 ((l_rsm->r_flags & RACK_APP_LIMITED) == 0)) { 6221 /* 6222 * If both are app-limited then let the 6223 * free lower the count. If right is app 6224 * limited and left is not, transfer. 6225 */ 6226 l_rsm->r_flags |= RACK_APP_LIMITED; 6227 r_rsm->r_flags &= ~RACK_APP_LIMITED; 6228 if (r_rsm == rack->r_ctl.rc_first_appl) 6229 rack->r_ctl.rc_first_appl = l_rsm; 6230 } 6231 rm = RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, r_rsm); 6232 #ifdef INVARIANTS 6233 if (rm != r_rsm) { 6234 panic("removing head in rack:%p rsm:%p rm:%p", 6235 rack, r_rsm, rm); 6236 } 6237 #endif 6238 if ((r_rsm->r_limit_type == 0) && (l_rsm->r_limit_type != 0)) { 6239 /* Transfer the split limit to the map we free */ 6240 r_rsm->r_limit_type = l_rsm->r_limit_type; 6241 l_rsm->r_limit_type = 0; 6242 } 6243 rack_free(rack, r_rsm); 6244 return (l_rsm); 6245 } 6246 6247 /* 6248 * TLP Timer, here we simply setup what segment we want to 6249 * have the TLP expire on, the normal rack_output() will then 6250 * send it out. 6251 * 6252 * We return 1, saying don't proceed with rack_output only 6253 * when all timers have been stopped (destroyed PCB?). 6254 */ 6255 static int 6256 rack_timeout_tlp(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, uint8_t *doing_tlp) 6257 { 6258 /* 6259 * Tail Loss Probe. 6260 */ 6261 struct rack_sendmap *rsm = NULL; 6262 struct rack_sendmap *insret; 6263 struct socket *so; 6264 uint32_t amm; 6265 uint32_t out, avail; 6266 int collapsed_win = 0; 6267 6268 if (tp->t_timers->tt_flags & TT_STOPPED) { 6269 return (1); 6270 } 6271 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) { 6272 /* Its not time yet */ 6273 return (0); 6274 } 6275 if (ctf_progress_timeout_check(tp, true)) { 6276 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 6277 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT); 6278 return (1); 6279 } 6280 /* 6281 * A TLP timer has expired. We have been idle for 2 rtts. So we now 6282 * need to figure out how to force a full MSS segment out. 6283 */ 6284 rack_log_to_event(rack, RACK_TO_FRM_TLP, NULL); 6285 rack->r_ctl.retran_during_recovery = 0; 6286 rack->r_ctl.dsack_byte_cnt = 0; 6287 counter_u64_add(rack_tlp_tot, 1); 6288 if (rack->r_state && (rack->r_state != tp->t_state)) 6289 rack_set_state(tp, rack); 6290 so = tp->t_inpcb->inp_socket; 6291 avail = sbavail(&so->so_snd); 6292 out = tp->snd_max - tp->snd_una; 6293 if (out > tp->snd_wnd) { 6294 /* special case, we need a retransmission */ 6295 collapsed_win = 1; 6296 goto need_retran; 6297 } 6298 if (rack->r_ctl.dsack_persist && (rack->r_ctl.rc_tlp_cnt_out >= 1)) { 6299 rack->r_ctl.dsack_persist--; 6300 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { 6301 rack->r_ctl.num_dsack = 0; 6302 } 6303 rack_log_dsack_event(rack, 1, __LINE__, 0, 0); 6304 } 6305 if ((tp->t_flags & TF_GPUTINPROG) && 6306 (rack->r_ctl.rc_tlp_cnt_out == 1)) { 6307 /* 6308 * If this is the second in a row 6309 * TLP and we are doing a measurement 6310 * its time to abandon the measurement. 6311 * Something is likely broken on 6312 * the clients network and measuring a 6313 * broken network does us no good. 6314 */ 6315 tp->t_flags &= ~TF_GPUTINPROG; 6316 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 6317 rack->r_ctl.rc_gp_srtt /*flex1*/, 6318 tp->gput_seq, 6319 0, 0, 18, __LINE__, NULL, 0); 6320 } 6321 /* 6322 * Check our send oldest always settings, and if 6323 * there is an oldest to send jump to the need_retran. 6324 */ 6325 if (rack_always_send_oldest && (TAILQ_EMPTY(&rack->r_ctl.rc_tmap) == 0)) 6326 goto need_retran; 6327 6328 if (avail > out) { 6329 /* New data is available */ 6330 amm = avail - out; 6331 if (amm > ctf_fixed_maxseg(tp)) { 6332 amm = ctf_fixed_maxseg(tp); 6333 if ((amm + out) > tp->snd_wnd) { 6334 /* We are rwnd limited */ 6335 goto need_retran; 6336 } 6337 } else if (amm < ctf_fixed_maxseg(tp)) { 6338 /* not enough to fill a MTU */ 6339 goto need_retran; 6340 } 6341 if (IN_FASTRECOVERY(tp->t_flags)) { 6342 /* Unlikely */ 6343 if (rack->rack_no_prr == 0) { 6344 if (out + amm <= tp->snd_wnd) { 6345 rack->r_ctl.rc_prr_sndcnt = amm; 6346 rack->r_ctl.rc_tlp_new_data = amm; 6347 rack_log_to_prr(rack, 4, 0); 6348 } 6349 } else 6350 goto need_retran; 6351 } else { 6352 /* Set the send-new override */ 6353 if (out + amm <= tp->snd_wnd) 6354 rack->r_ctl.rc_tlp_new_data = amm; 6355 else 6356 goto need_retran; 6357 } 6358 rack->r_ctl.rc_tlpsend = NULL; 6359 counter_u64_add(rack_tlp_newdata, 1); 6360 goto send; 6361 } 6362 need_retran: 6363 /* 6364 * Ok we need to arrange the last un-acked segment to be re-sent, or 6365 * optionally the first un-acked segment. 6366 */ 6367 if (collapsed_win == 0) { 6368 if (rack_always_send_oldest) 6369 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 6370 else { 6371 rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 6372 if (rsm && (rsm->r_flags & (RACK_ACKED | RACK_HAS_FIN))) { 6373 rsm = rack_find_high_nonack(rack, rsm); 6374 } 6375 } 6376 if (rsm == NULL) { 6377 counter_u64_add(rack_tlp_does_nada, 1); 6378 #ifdef TCP_BLACKBOX 6379 tcp_log_dump_tp_logbuf(tp, "nada counter trips", M_NOWAIT, true); 6380 #endif 6381 goto out; 6382 } 6383 } else { 6384 /* 6385 * We must find the last segment 6386 * that was acceptable by the client. 6387 */ 6388 RB_FOREACH_REVERSE(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) { 6389 if ((rsm->r_flags & RACK_RWND_COLLAPSED) == 0) { 6390 /* Found one */ 6391 break; 6392 } 6393 } 6394 if (rsm == NULL) { 6395 /* None? if so send the first */ 6396 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 6397 if (rsm == NULL) { 6398 counter_u64_add(rack_tlp_does_nada, 1); 6399 #ifdef TCP_BLACKBOX 6400 tcp_log_dump_tp_logbuf(tp, "nada counter trips", M_NOWAIT, true); 6401 #endif 6402 goto out; 6403 } 6404 } 6405 } 6406 if ((rsm->r_end - rsm->r_start) > ctf_fixed_maxseg(tp)) { 6407 /* 6408 * We need to split this the last segment in two. 6409 */ 6410 struct rack_sendmap *nrsm; 6411 6412 nrsm = rack_alloc_full_limit(rack); 6413 if (nrsm == NULL) { 6414 /* 6415 * No memory to split, we will just exit and punt 6416 * off to the RXT timer. 6417 */ 6418 counter_u64_add(rack_tlp_does_nada, 1); 6419 goto out; 6420 } 6421 rack_clone_rsm(rack, nrsm, rsm, 6422 (rsm->r_end - ctf_fixed_maxseg(tp))); 6423 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__); 6424 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 6425 #ifdef INVARIANTS 6426 if (insret != NULL) { 6427 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 6428 nrsm, insret, rack, rsm); 6429 } 6430 #endif 6431 if (rsm->r_in_tmap) { 6432 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 6433 nrsm->r_in_tmap = 1; 6434 } 6435 rsm = nrsm; 6436 } 6437 rack->r_ctl.rc_tlpsend = rsm; 6438 send: 6439 /* Make sure output path knows we are doing a TLP */ 6440 *doing_tlp = 1; 6441 rack->r_timer_override = 1; 6442 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP; 6443 return (0); 6444 out: 6445 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP; 6446 return (0); 6447 } 6448 6449 /* 6450 * Delayed ack Timer, here we simply need to setup the 6451 * ACK_NOW flag and remove the DELACK flag. From there 6452 * the output routine will send the ack out. 6453 * 6454 * We only return 1, saying don't proceed, if all timers 6455 * are stopped (destroyed PCB?). 6456 */ 6457 static int 6458 rack_timeout_delack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 6459 { 6460 if (tp->t_timers->tt_flags & TT_STOPPED) { 6461 return (1); 6462 } 6463 rack_log_to_event(rack, RACK_TO_FRM_DELACK, NULL); 6464 tp->t_flags &= ~TF_DELACK; 6465 tp->t_flags |= TF_ACKNOW; 6466 KMOD_TCPSTAT_INC(tcps_delack); 6467 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK; 6468 return (0); 6469 } 6470 6471 /* 6472 * Persists timer, here we simply send the 6473 * same thing as a keepalive will. 6474 * the one byte send. 6475 * 6476 * We only return 1, saying don't proceed, if all timers 6477 * are stopped (destroyed PCB?). 6478 */ 6479 static int 6480 rack_timeout_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 6481 { 6482 struct tcptemp *t_template; 6483 struct inpcb *inp; 6484 int32_t retval = 1; 6485 6486 inp = tp->t_inpcb; 6487 6488 if (tp->t_timers->tt_flags & TT_STOPPED) { 6489 return (1); 6490 } 6491 if (rack->rc_in_persist == 0) 6492 return (0); 6493 if (ctf_progress_timeout_check(tp, false)) { 6494 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX); 6495 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 6496 tcp_set_inp_to_drop(inp, ETIMEDOUT); 6497 return (1); 6498 } 6499 KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL", __func__, tp)); 6500 /* 6501 * Persistence timer into zero window. Force a byte to be output, if 6502 * possible. 6503 */ 6504 KMOD_TCPSTAT_INC(tcps_persisttimeo); 6505 /* 6506 * Hack: if the peer is dead/unreachable, we do not time out if the 6507 * window is closed. After a full backoff, drop the connection if 6508 * the idle time (no responses to probes) reaches the maximum 6509 * backoff that we would use if retransmitting. 6510 */ 6511 if (tp->t_rxtshift == TCP_MAXRXTSHIFT && 6512 (ticks - tp->t_rcvtime >= tcp_maxpersistidle || 6513 TICKS_2_USEC(ticks - tp->t_rcvtime) >= RACK_REXMTVAL(tp) * tcp_totbackoff)) { 6514 KMOD_TCPSTAT_INC(tcps_persistdrop); 6515 retval = 1; 6516 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX); 6517 tcp_set_inp_to_drop(rack->rc_inp, ETIMEDOUT); 6518 goto out; 6519 } 6520 if ((sbavail(&rack->rc_inp->inp_socket->so_snd) == 0) && 6521 tp->snd_una == tp->snd_max) 6522 rack_exit_persist(tp, rack, cts); 6523 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_PERSIT; 6524 /* 6525 * If the user has closed the socket then drop a persisting 6526 * connection after a much reduced timeout. 6527 */ 6528 if (tp->t_state > TCPS_CLOSE_WAIT && 6529 (ticks - tp->t_rcvtime) >= TCPTV_PERSMAX) { 6530 retval = 1; 6531 KMOD_TCPSTAT_INC(tcps_persistdrop); 6532 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX); 6533 tcp_set_inp_to_drop(rack->rc_inp, ETIMEDOUT); 6534 goto out; 6535 } 6536 t_template = tcpip_maketemplate(rack->rc_inp); 6537 if (t_template) { 6538 /* only set it if we were answered */ 6539 if (rack->forced_ack == 0) { 6540 rack->forced_ack = 1; 6541 rack->r_ctl.forced_ack_ts = tcp_get_usecs(NULL); 6542 } 6543 tcp_respond(tp, t_template->tt_ipgen, 6544 &t_template->tt_t, (struct mbuf *)NULL, 6545 tp->rcv_nxt, tp->snd_una - 1, 0); 6546 /* This sends an ack */ 6547 if (tp->t_flags & TF_DELACK) 6548 tp->t_flags &= ~TF_DELACK; 6549 free(t_template, M_TEMP); 6550 } 6551 if (tp->t_rxtshift < TCP_MAXRXTSHIFT) 6552 tp->t_rxtshift++; 6553 out: 6554 rack_log_to_event(rack, RACK_TO_FRM_PERSIST, NULL); 6555 rack_start_hpts_timer(rack, tp, cts, 6556 0, 0, 0); 6557 return (retval); 6558 } 6559 6560 /* 6561 * If a keepalive goes off, we had no other timers 6562 * happening. We always return 1 here since this 6563 * routine either drops the connection or sends 6564 * out a segment with respond. 6565 */ 6566 static int 6567 rack_timeout_keepalive(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 6568 { 6569 struct tcptemp *t_template; 6570 struct inpcb *inp; 6571 6572 if (tp->t_timers->tt_flags & TT_STOPPED) { 6573 return (1); 6574 } 6575 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_KEEP; 6576 inp = tp->t_inpcb; 6577 rack_log_to_event(rack, RACK_TO_FRM_KEEP, NULL); 6578 /* 6579 * Keep-alive timer went off; send something or drop connection if 6580 * idle for too long. 6581 */ 6582 KMOD_TCPSTAT_INC(tcps_keeptimeo); 6583 if (tp->t_state < TCPS_ESTABLISHED) 6584 goto dropit; 6585 if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) && 6586 tp->t_state <= TCPS_CLOSING) { 6587 if (ticks - tp->t_rcvtime >= TP_KEEPIDLE(tp) + TP_MAXIDLE(tp)) 6588 goto dropit; 6589 /* 6590 * Send a packet designed to force a response if the peer is 6591 * up and reachable: either an ACK if the connection is 6592 * still alive, or an RST if the peer has closed the 6593 * connection due to timeout or reboot. Using sequence 6594 * number tp->snd_una-1 causes the transmitted zero-length 6595 * segment to lie outside the receive window; by the 6596 * protocol spec, this requires the correspondent TCP to 6597 * respond. 6598 */ 6599 KMOD_TCPSTAT_INC(tcps_keepprobe); 6600 t_template = tcpip_maketemplate(inp); 6601 if (t_template) { 6602 if (rack->forced_ack == 0) { 6603 rack->forced_ack = 1; 6604 rack->r_ctl.forced_ack_ts = tcp_get_usecs(NULL); 6605 } 6606 tcp_respond(tp, t_template->tt_ipgen, 6607 &t_template->tt_t, (struct mbuf *)NULL, 6608 tp->rcv_nxt, tp->snd_una - 1, 0); 6609 free(t_template, M_TEMP); 6610 } 6611 } 6612 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 6613 return (1); 6614 dropit: 6615 KMOD_TCPSTAT_INC(tcps_keepdrops); 6616 tcp_log_end_status(tp, TCP_EI_STATUS_KEEP_MAX); 6617 tcp_set_inp_to_drop(rack->rc_inp, ETIMEDOUT); 6618 return (1); 6619 } 6620 6621 /* 6622 * Retransmit helper function, clear up all the ack 6623 * flags and take care of important book keeping. 6624 */ 6625 static void 6626 rack_remxt_tmr(struct tcpcb *tp) 6627 { 6628 /* 6629 * The retransmit timer went off, all sack'd blocks must be 6630 * un-acked. 6631 */ 6632 struct rack_sendmap *rsm, *trsm = NULL; 6633 struct tcp_rack *rack; 6634 6635 rack = (struct tcp_rack *)tp->t_fb_ptr; 6636 rack_timer_cancel(tp, rack, tcp_get_usecs(NULL), __LINE__); 6637 rack_log_to_event(rack, RACK_TO_FRM_TMR, NULL); 6638 if (rack->r_state && (rack->r_state != tp->t_state)) 6639 rack_set_state(tp, rack); 6640 /* 6641 * Ideally we would like to be able to 6642 * mark SACK-PASS on anything not acked here. 6643 * 6644 * However, if we do that we would burst out 6645 * all that data 1ms apart. This would be unwise, 6646 * so for now we will just let the normal rxt timer 6647 * and tlp timer take care of it. 6648 * 6649 * Also we really need to stick them back in sequence 6650 * order. This way we send in the proper order and any 6651 * sacks that come floating in will "re-ack" the data. 6652 * To do this we zap the tmap with an INIT and then 6653 * walk through and place every rsm in the RB tree 6654 * back in its seq ordered place. 6655 */ 6656 TAILQ_INIT(&rack->r_ctl.rc_tmap); 6657 RB_FOREACH(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) { 6658 rsm->r_dupack = 0; 6659 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 6660 /* We must re-add it back to the tlist */ 6661 if (trsm == NULL) { 6662 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext); 6663 } else { 6664 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, trsm, rsm, r_tnext); 6665 } 6666 rsm->r_in_tmap = 1; 6667 trsm = rsm; 6668 if (rsm->r_flags & RACK_ACKED) 6669 rsm->r_flags |= RACK_WAS_ACKED; 6670 rsm->r_flags &= ~(RACK_ACKED | RACK_SACK_PASSED | RACK_WAS_SACKPASS); 6671 rsm->r_flags |= RACK_MUST_RXT; 6672 } 6673 /* Clear the count (we just un-acked them) */ 6674 rack->r_ctl.rc_last_timeout_snduna = tp->snd_una; 6675 rack->r_ctl.rc_sacked = 0; 6676 rack->r_ctl.rc_sacklast = NULL; 6677 rack->r_ctl.rc_agg_delayed = 0; 6678 rack->r_early = 0; 6679 rack->r_ctl.rc_agg_early = 0; 6680 rack->r_late = 0; 6681 /* Clear the tlp rtx mark */ 6682 rack->r_ctl.rc_resend = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 6683 if (rack->r_ctl.rc_resend != NULL) 6684 rack->r_ctl.rc_resend->r_flags |= RACK_TO_REXT; 6685 rack->r_ctl.rc_prr_sndcnt = 0; 6686 rack_log_to_prr(rack, 6, 0); 6687 rack->r_timer_override = 1; 6688 if ((((tp->t_flags & TF_SACK_PERMIT) == 0) 6689 #ifdef NETFLIX_EXP_DETECTION 6690 || (rack->sack_attack_disable != 0) 6691 #endif 6692 ) && ((tp->t_flags & TF_SENTFIN) == 0)) { 6693 /* 6694 * For non-sack customers new data 6695 * needs to go out as retransmits until 6696 * we retransmit up to snd_max. 6697 */ 6698 rack->r_must_retran = 1; 6699 rack->r_ctl.rc_out_at_rto = ctf_flight_size(rack->rc_tp, 6700 rack->r_ctl.rc_sacked); 6701 } 6702 rack->r_ctl.rc_snd_max_at_rto = tp->snd_max; 6703 } 6704 6705 static void 6706 rack_convert_rtts(struct tcpcb *tp) 6707 { 6708 if (tp->t_srtt > 1) { 6709 uint32_t val, frac; 6710 6711 val = tp->t_srtt >> TCP_RTT_SHIFT; 6712 frac = tp->t_srtt & 0x1f; 6713 tp->t_srtt = TICKS_2_USEC(val); 6714 /* 6715 * frac is the fractional part of the srtt (if any) 6716 * but its in ticks and every bit represents 6717 * 1/32nd of a hz. 6718 */ 6719 if (frac) { 6720 if (hz == 1000) { 6721 frac = (((uint64_t)frac * (uint64_t)HPTS_USEC_IN_MSEC) / (uint64_t)TCP_RTT_SCALE); 6722 } else { 6723 frac = (((uint64_t)frac * (uint64_t)HPTS_USEC_IN_SEC) / ((uint64_t)(hz) * (uint64_t)TCP_RTT_SCALE)); 6724 } 6725 tp->t_srtt += frac; 6726 } 6727 } 6728 if (tp->t_rttvar) { 6729 uint32_t val, frac; 6730 6731 val = tp->t_rttvar >> TCP_RTTVAR_SHIFT; 6732 frac = tp->t_rttvar & 0x1f; 6733 tp->t_rttvar = TICKS_2_USEC(val); 6734 /* 6735 * frac is the fractional part of the srtt (if any) 6736 * but its in ticks and every bit represents 6737 * 1/32nd of a hz. 6738 */ 6739 if (frac) { 6740 if (hz == 1000) { 6741 frac = (((uint64_t)frac * (uint64_t)HPTS_USEC_IN_MSEC) / (uint64_t)TCP_RTT_SCALE); 6742 } else { 6743 frac = (((uint64_t)frac * (uint64_t)HPTS_USEC_IN_SEC) / ((uint64_t)(hz) * (uint64_t)TCP_RTT_SCALE)); 6744 } 6745 tp->t_rttvar += frac; 6746 } 6747 } 6748 tp->t_rxtcur = RACK_REXMTVAL(tp); 6749 if (TCPS_HAVEESTABLISHED(tp->t_state)) { 6750 tp->t_rxtcur += TICKS_2_USEC(tcp_rexmit_slop); 6751 } 6752 if (tp->t_rxtcur > rack_rto_max) { 6753 tp->t_rxtcur = rack_rto_max; 6754 } 6755 } 6756 6757 static void 6758 rack_cc_conn_init(struct tcpcb *tp) 6759 { 6760 struct tcp_rack *rack; 6761 uint32_t srtt; 6762 6763 rack = (struct tcp_rack *)tp->t_fb_ptr; 6764 srtt = tp->t_srtt; 6765 cc_conn_init(tp); 6766 /* 6767 * Now convert to rack's internal format, 6768 * if required. 6769 */ 6770 if ((srtt == 0) && (tp->t_srtt != 0)) 6771 rack_convert_rtts(tp); 6772 /* 6773 * We want a chance to stay in slowstart as 6774 * we create a connection. TCP spec says that 6775 * initially ssthresh is infinite. For our 6776 * purposes that is the snd_wnd. 6777 */ 6778 if (tp->snd_ssthresh < tp->snd_wnd) { 6779 tp->snd_ssthresh = tp->snd_wnd; 6780 } 6781 /* 6782 * We also want to assure a IW worth of 6783 * data can get inflight. 6784 */ 6785 if (rc_init_window(rack) < tp->snd_cwnd) 6786 tp->snd_cwnd = rc_init_window(rack); 6787 } 6788 6789 /* 6790 * Re-transmit timeout! If we drop the PCB we will return 1, otherwise 6791 * we will setup to retransmit the lowest seq number outstanding. 6792 */ 6793 static int 6794 rack_timeout_rxt(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 6795 { 6796 int32_t rexmt; 6797 struct inpcb *inp; 6798 int32_t retval = 0; 6799 bool isipv6; 6800 6801 inp = tp->t_inpcb; 6802 if (tp->t_timers->tt_flags & TT_STOPPED) { 6803 return (1); 6804 } 6805 if ((tp->t_flags & TF_GPUTINPROG) && 6806 (tp->t_rxtshift)) { 6807 /* 6808 * We have had a second timeout 6809 * measurements on successive rxt's are not profitable. 6810 * It is unlikely to be of any use (the network is 6811 * broken or the client went away). 6812 */ 6813 tp->t_flags &= ~TF_GPUTINPROG; 6814 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 6815 rack->r_ctl.rc_gp_srtt /*flex1*/, 6816 tp->gput_seq, 6817 0, 0, 18, __LINE__, NULL, 0); 6818 } 6819 if (ctf_progress_timeout_check(tp, false)) { 6820 tcp_log_end_status(tp, TCP_EI_STATUS_RETRAN); 6821 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 6822 tcp_set_inp_to_drop(inp, ETIMEDOUT); 6823 return (1); 6824 } 6825 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RXT; 6826 rack->r_ctl.retran_during_recovery = 0; 6827 rack->r_ctl.dsack_byte_cnt = 0; 6828 if (IN_FASTRECOVERY(tp->t_flags)) 6829 tp->t_flags |= TF_WASFRECOVERY; 6830 else 6831 tp->t_flags &= ~TF_WASFRECOVERY; 6832 if (IN_CONGRECOVERY(tp->t_flags)) 6833 tp->t_flags |= TF_WASCRECOVERY; 6834 else 6835 tp->t_flags &= ~TF_WASCRECOVERY; 6836 if (TCPS_HAVEESTABLISHED(tp->t_state) && 6837 (tp->snd_una == tp->snd_max)) { 6838 /* Nothing outstanding .. nothing to do */ 6839 return (0); 6840 } 6841 if (rack->r_ctl.dsack_persist) { 6842 rack->r_ctl.dsack_persist--; 6843 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { 6844 rack->r_ctl.num_dsack = 0; 6845 } 6846 rack_log_dsack_event(rack, 1, __LINE__, 0, 0); 6847 } 6848 /* 6849 * Rack can only run one timer at a time, so we cannot 6850 * run a KEEPINIT (gating SYN sending) and a retransmit 6851 * timer for the SYN. So if we are in a front state and 6852 * have a KEEPINIT timer we need to check the first transmit 6853 * against now to see if we have exceeded the KEEPINIT time 6854 * (if one is set). 6855 */ 6856 if ((TCPS_HAVEESTABLISHED(tp->t_state) == 0) && 6857 (TP_KEEPINIT(tp) != 0)) { 6858 struct rack_sendmap *rsm; 6859 6860 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 6861 if (rsm) { 6862 /* Ok we have something outstanding to test keepinit with */ 6863 if ((TSTMP_GT(cts, (uint32_t)rsm->r_tim_lastsent[0])) && 6864 ((cts - (uint32_t)rsm->r_tim_lastsent[0]) >= TICKS_2_USEC(TP_KEEPINIT(tp)))) { 6865 /* We have exceeded the KEEPINIT time */ 6866 tcp_log_end_status(tp, TCP_EI_STATUS_KEEP_MAX); 6867 goto drop_it; 6868 } 6869 } 6870 } 6871 /* 6872 * Retransmission timer went off. Message has not been acked within 6873 * retransmit interval. Back off to a longer retransmit interval 6874 * and retransmit one segment. 6875 */ 6876 rack_remxt_tmr(tp); 6877 if ((rack->r_ctl.rc_resend == NULL) || 6878 ((rack->r_ctl.rc_resend->r_flags & RACK_RWND_COLLAPSED) == 0)) { 6879 /* 6880 * If the rwnd collapsed on 6881 * the one we are retransmitting 6882 * it does not count against the 6883 * rxt count. 6884 */ 6885 tp->t_rxtshift++; 6886 } 6887 if (tp->t_rxtshift > TCP_MAXRXTSHIFT) { 6888 tcp_log_end_status(tp, TCP_EI_STATUS_RETRAN); 6889 drop_it: 6890 tp->t_rxtshift = TCP_MAXRXTSHIFT; 6891 KMOD_TCPSTAT_INC(tcps_timeoutdrop); 6892 retval = 1; 6893 tcp_set_inp_to_drop(rack->rc_inp, 6894 (tp->t_softerror ? (uint16_t) tp->t_softerror : ETIMEDOUT)); 6895 goto out; 6896 } 6897 if (tp->t_state == TCPS_SYN_SENT) { 6898 /* 6899 * If the SYN was retransmitted, indicate CWND to be limited 6900 * to 1 segment in cc_conn_init(). 6901 */ 6902 tp->snd_cwnd = 1; 6903 } else if (tp->t_rxtshift == 1) { 6904 /* 6905 * first retransmit; record ssthresh and cwnd so they can be 6906 * recovered if this turns out to be a "bad" retransmit. A 6907 * retransmit is considered "bad" if an ACK for this segment 6908 * is received within RTT/2 interval; the assumption here is 6909 * that the ACK was already in flight. See "On Estimating 6910 * End-to-End Network Path Properties" by Allman and Paxson 6911 * for more details. 6912 */ 6913 tp->snd_cwnd_prev = tp->snd_cwnd; 6914 tp->snd_ssthresh_prev = tp->snd_ssthresh; 6915 tp->snd_recover_prev = tp->snd_recover; 6916 tp->t_badrxtwin = ticks + (USEC_2_TICKS(tp->t_srtt)/2); 6917 tp->t_flags |= TF_PREVVALID; 6918 } else if ((tp->t_flags & TF_RCVD_TSTMP) == 0) 6919 tp->t_flags &= ~TF_PREVVALID; 6920 KMOD_TCPSTAT_INC(tcps_rexmttimeo); 6921 if ((tp->t_state == TCPS_SYN_SENT) || 6922 (tp->t_state == TCPS_SYN_RECEIVED)) 6923 rexmt = RACK_INITIAL_RTO * tcp_backoff[tp->t_rxtshift]; 6924 else 6925 rexmt = max(rack_rto_min, (tp->t_srtt + (tp->t_rttvar << 2))) * tcp_backoff[tp->t_rxtshift]; 6926 6927 RACK_TCPT_RANGESET(tp->t_rxtcur, rexmt, 6928 max(rack_rto_min, rexmt), rack_rto_max, rack->r_ctl.timer_slop); 6929 /* 6930 * We enter the path for PLMTUD if connection is established or, if 6931 * connection is FIN_WAIT_1 status, reason for the last is that if 6932 * amount of data we send is very small, we could send it in couple 6933 * of packets and process straight to FIN. In that case we won't 6934 * catch ESTABLISHED state. 6935 */ 6936 #ifdef INET6 6937 isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV6) ? true : false; 6938 #else 6939 isipv6 = false; 6940 #endif 6941 if (((V_tcp_pmtud_blackhole_detect == 1) || 6942 (V_tcp_pmtud_blackhole_detect == 2 && !isipv6) || 6943 (V_tcp_pmtud_blackhole_detect == 3 && isipv6)) && 6944 ((tp->t_state == TCPS_ESTABLISHED) || 6945 (tp->t_state == TCPS_FIN_WAIT_1))) { 6946 /* 6947 * Idea here is that at each stage of mtu probe (usually, 6948 * 1448 -> 1188 -> 524) should be given 2 chances to recover 6949 * before further clamping down. 'tp->t_rxtshift % 2 == 0' 6950 * should take care of that. 6951 */ 6952 if (((tp->t_flags2 & (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) == 6953 (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) && 6954 (tp->t_rxtshift >= 2 && tp->t_rxtshift < 6 && 6955 tp->t_rxtshift % 2 == 0)) { 6956 /* 6957 * Enter Path MTU Black-hole Detection mechanism: - 6958 * Disable Path MTU Discovery (IP "DF" bit). - 6959 * Reduce MTU to lower value than what we negotiated 6960 * with peer. 6961 */ 6962 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) == 0) { 6963 /* Record that we may have found a black hole. */ 6964 tp->t_flags2 |= TF2_PLPMTU_BLACKHOLE; 6965 /* Keep track of previous MSS. */ 6966 tp->t_pmtud_saved_maxseg = tp->t_maxseg; 6967 } 6968 6969 /* 6970 * Reduce the MSS to blackhole value or to the 6971 * default in an attempt to retransmit. 6972 */ 6973 #ifdef INET6 6974 if (isipv6 && 6975 tp->t_maxseg > V_tcp_v6pmtud_blackhole_mss) { 6976 /* Use the sysctl tuneable blackhole MSS. */ 6977 tp->t_maxseg = V_tcp_v6pmtud_blackhole_mss; 6978 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated); 6979 } else if (isipv6) { 6980 /* Use the default MSS. */ 6981 tp->t_maxseg = V_tcp_v6mssdflt; 6982 /* 6983 * Disable Path MTU Discovery when we switch 6984 * to minmss. 6985 */ 6986 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 6987 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss); 6988 } 6989 #endif 6990 #if defined(INET6) && defined(INET) 6991 else 6992 #endif 6993 #ifdef INET 6994 if (tp->t_maxseg > V_tcp_pmtud_blackhole_mss) { 6995 /* Use the sysctl tuneable blackhole MSS. */ 6996 tp->t_maxseg = V_tcp_pmtud_blackhole_mss; 6997 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated); 6998 } else { 6999 /* Use the default MSS. */ 7000 tp->t_maxseg = V_tcp_mssdflt; 7001 /* 7002 * Disable Path MTU Discovery when we switch 7003 * to minmss. 7004 */ 7005 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 7006 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss); 7007 } 7008 #endif 7009 } else { 7010 /* 7011 * If further retransmissions are still unsuccessful 7012 * with a lowered MTU, maybe this isn't a blackhole 7013 * and we restore the previous MSS and blackhole 7014 * detection flags. The limit '6' is determined by 7015 * giving each probe stage (1448, 1188, 524) 2 7016 * chances to recover. 7017 */ 7018 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) && 7019 (tp->t_rxtshift >= 6)) { 7020 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 7021 tp->t_flags2 &= ~TF2_PLPMTU_BLACKHOLE; 7022 tp->t_maxseg = tp->t_pmtud_saved_maxseg; 7023 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_failed); 7024 } 7025 } 7026 } 7027 /* 7028 * Disable RFC1323 and SACK if we haven't got any response to 7029 * our third SYN to work-around some broken terminal servers 7030 * (most of which have hopefully been retired) that have bad VJ 7031 * header compression code which trashes TCP segments containing 7032 * unknown-to-them TCP options. 7033 */ 7034 if (tcp_rexmit_drop_options && (tp->t_state == TCPS_SYN_SENT) && 7035 (tp->t_rxtshift == 3)) 7036 tp->t_flags &= ~(TF_REQ_SCALE|TF_REQ_TSTMP|TF_SACK_PERMIT); 7037 /* 7038 * If we backed off this far, our srtt estimate is probably bogus. 7039 * Clobber it so we'll take the next rtt measurement as our srtt; 7040 * move the current srtt into rttvar to keep the current retransmit 7041 * times until then. 7042 */ 7043 if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) { 7044 #ifdef INET6 7045 if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) 7046 in6_losing(tp->t_inpcb); 7047 else 7048 #endif 7049 in_losing(tp->t_inpcb); 7050 tp->t_rttvar += tp->t_srtt; 7051 tp->t_srtt = 0; 7052 } 7053 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 7054 tp->snd_recover = tp->snd_max; 7055 tp->t_flags |= TF_ACKNOW; 7056 tp->t_rtttime = 0; 7057 rack_cong_signal(tp, CC_RTO, tp->snd_una); 7058 out: 7059 return (retval); 7060 } 7061 7062 static int 7063 rack_process_timers(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, uint8_t hpts_calling, uint8_t *doing_tlp) 7064 { 7065 int32_t ret = 0; 7066 int32_t timers = (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK); 7067 7068 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 7069 (tp->t_flags & TF_GPUTINPROG)) { 7070 /* 7071 * We have a goodput in progress 7072 * and we have entered a late state. 7073 * Do we have enough data in the sb 7074 * to handle the GPUT request? 7075 */ 7076 uint32_t bytes; 7077 7078 bytes = tp->gput_ack - tp->gput_seq; 7079 if (SEQ_GT(tp->gput_seq, tp->snd_una)) 7080 bytes += tp->gput_seq - tp->snd_una; 7081 if (bytes > sbavail(&tp->t_inpcb->inp_socket->so_snd)) { 7082 /* 7083 * There are not enough bytes in the socket 7084 * buffer that have been sent to cover this 7085 * measurement. Cancel it. 7086 */ 7087 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 7088 rack->r_ctl.rc_gp_srtt /*flex1*/, 7089 tp->gput_seq, 7090 0, 0, 18, __LINE__, NULL, 0); 7091 tp->t_flags &= ~TF_GPUTINPROG; 7092 } 7093 } 7094 if (timers == 0) { 7095 return (0); 7096 } 7097 if (tp->t_state == TCPS_LISTEN) { 7098 /* no timers on listen sockets */ 7099 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) 7100 return (0); 7101 return (1); 7102 } 7103 if ((timers & PACE_TMR_RACK) && 7104 rack->rc_on_min_to) { 7105 /* 7106 * For the rack timer when we 7107 * are on a min-timeout (which means rrr_conf = 3) 7108 * we don't want to check the timer. It may 7109 * be going off for a pace and thats ok we 7110 * want to send the retransmit (if its ready). 7111 * 7112 * If its on a normal rack timer (non-min) then 7113 * we will check if its expired. 7114 */ 7115 goto skip_time_check; 7116 } 7117 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) { 7118 uint32_t left; 7119 7120 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 7121 ret = -1; 7122 rack_log_to_processing(rack, cts, ret, 0); 7123 return (0); 7124 } 7125 if (hpts_calling == 0) { 7126 /* 7127 * A user send or queued mbuf (sack) has called us? We 7128 * return 0 and let the pacing guards 7129 * deal with it if they should or 7130 * should not cause a send. 7131 */ 7132 ret = -2; 7133 rack_log_to_processing(rack, cts, ret, 0); 7134 return (0); 7135 } 7136 /* 7137 * Ok our timer went off early and we are not paced false 7138 * alarm, go back to sleep. 7139 */ 7140 ret = -3; 7141 left = rack->r_ctl.rc_timer_exp - cts; 7142 tcp_hpts_insert(tp->t_inpcb, HPTS_MS_TO_SLOTS(left)); 7143 rack_log_to_processing(rack, cts, ret, left); 7144 return (1); 7145 } 7146 skip_time_check: 7147 rack->rc_tmr_stopped = 0; 7148 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_MASK; 7149 if (timers & PACE_TMR_DELACK) { 7150 ret = rack_timeout_delack(tp, rack, cts); 7151 } else if (timers & PACE_TMR_RACK) { 7152 rack->r_ctl.rc_tlp_rxt_last_time = cts; 7153 rack->r_fast_output = 0; 7154 ret = rack_timeout_rack(tp, rack, cts); 7155 } else if (timers & PACE_TMR_TLP) { 7156 rack->r_ctl.rc_tlp_rxt_last_time = cts; 7157 ret = rack_timeout_tlp(tp, rack, cts, doing_tlp); 7158 } else if (timers & PACE_TMR_RXT) { 7159 rack->r_ctl.rc_tlp_rxt_last_time = cts; 7160 rack->r_fast_output = 0; 7161 ret = rack_timeout_rxt(tp, rack, cts); 7162 } else if (timers & PACE_TMR_PERSIT) { 7163 ret = rack_timeout_persist(tp, rack, cts); 7164 } else if (timers & PACE_TMR_KEEP) { 7165 ret = rack_timeout_keepalive(tp, rack, cts); 7166 } 7167 rack_log_to_processing(rack, cts, ret, timers); 7168 return (ret); 7169 } 7170 7171 static void 7172 rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line) 7173 { 7174 struct timeval tv; 7175 uint32_t us_cts, flags_on_entry; 7176 uint8_t hpts_removed = 0; 7177 7178 flags_on_entry = rack->r_ctl.rc_hpts_flags; 7179 us_cts = tcp_get_usecs(&tv); 7180 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 7181 ((TSTMP_GEQ(us_cts, rack->r_ctl.rc_last_output_to)) || 7182 ((tp->snd_max - tp->snd_una) == 0))) { 7183 tcp_hpts_remove(rack->rc_inp, HPTS_REMOVE_OUTPUT); 7184 hpts_removed = 1; 7185 /* If we were not delayed cancel out the flag. */ 7186 if ((tp->snd_max - tp->snd_una) == 0) 7187 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 7188 rack_log_to_cancel(rack, hpts_removed, line, us_cts, &tv, flags_on_entry); 7189 } 7190 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { 7191 rack->rc_tmr_stopped = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK; 7192 if (rack->rc_inp->inp_in_hpts && 7193 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)) { 7194 /* 7195 * Canceling timer's when we have no output being 7196 * paced. We also must remove ourselves from the 7197 * hpts. 7198 */ 7199 tcp_hpts_remove(rack->rc_inp, HPTS_REMOVE_OUTPUT); 7200 hpts_removed = 1; 7201 } 7202 rack->r_ctl.rc_hpts_flags &= ~(PACE_TMR_MASK); 7203 } 7204 if (hpts_removed == 0) 7205 rack_log_to_cancel(rack, hpts_removed, line, us_cts, &tv, flags_on_entry); 7206 } 7207 7208 static void 7209 rack_timer_stop(struct tcpcb *tp, uint32_t timer_type) 7210 { 7211 return; 7212 } 7213 7214 static int 7215 rack_stopall(struct tcpcb *tp) 7216 { 7217 struct tcp_rack *rack; 7218 rack = (struct tcp_rack *)tp->t_fb_ptr; 7219 rack->t_timers_stopped = 1; 7220 return (0); 7221 } 7222 7223 static void 7224 rack_timer_activate(struct tcpcb *tp, uint32_t timer_type, uint32_t delta) 7225 { 7226 return; 7227 } 7228 7229 static int 7230 rack_timer_active(struct tcpcb *tp, uint32_t timer_type) 7231 { 7232 return (0); 7233 } 7234 7235 static void 7236 rack_stop_all_timers(struct tcpcb *tp) 7237 { 7238 struct tcp_rack *rack; 7239 7240 /* 7241 * Assure no timers are running. 7242 */ 7243 if (tcp_timer_active(tp, TT_PERSIST)) { 7244 /* We enter in persists, set the flag appropriately */ 7245 rack = (struct tcp_rack *)tp->t_fb_ptr; 7246 rack->rc_in_persist = 1; 7247 } 7248 tcp_timer_suspend(tp, TT_PERSIST); 7249 tcp_timer_suspend(tp, TT_REXMT); 7250 tcp_timer_suspend(tp, TT_KEEP); 7251 tcp_timer_suspend(tp, TT_DELACK); 7252 } 7253 7254 static void 7255 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack, 7256 struct rack_sendmap *rsm, uint64_t ts, uint16_t add_flag) 7257 { 7258 int32_t idx; 7259 7260 rsm->r_rtr_cnt++; 7261 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 7262 rsm->r_dupack = 0; 7263 if (rsm->r_rtr_cnt > RACK_NUM_OF_RETRANS) { 7264 rsm->r_rtr_cnt = RACK_NUM_OF_RETRANS; 7265 rsm->r_flags |= RACK_OVERMAX; 7266 } 7267 if ((rsm->r_rtr_cnt > 1) && ((rsm->r_flags & RACK_TLP) == 0)) { 7268 rack->r_ctl.rc_holes_rxt += (rsm->r_end - rsm->r_start); 7269 rsm->r_rtr_bytes += (rsm->r_end - rsm->r_start); 7270 } 7271 idx = rsm->r_rtr_cnt - 1; 7272 rsm->r_tim_lastsent[idx] = ts; 7273 /* 7274 * Here we don't add in the len of send, since its already 7275 * in snduna <->snd_max. 7276 */ 7277 rsm->r_fas = ctf_flight_size(rack->rc_tp, 7278 rack->r_ctl.rc_sacked); 7279 if (rsm->r_flags & RACK_ACKED) { 7280 /* Problably MTU discovery messing with us */ 7281 rsm->r_flags &= ~RACK_ACKED; 7282 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 7283 } 7284 if (rsm->r_in_tmap) { 7285 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 7286 rsm->r_in_tmap = 0; 7287 } 7288 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 7289 rsm->r_in_tmap = 1; 7290 if (rsm->r_flags & RACK_SACK_PASSED) { 7291 /* We have retransmitted due to the SACK pass */ 7292 rsm->r_flags &= ~RACK_SACK_PASSED; 7293 rsm->r_flags |= RACK_WAS_SACKPASS; 7294 } 7295 } 7296 7297 static uint32_t 7298 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack, 7299 struct rack_sendmap *rsm, uint64_t ts, int32_t *lenp, uint16_t add_flag) 7300 { 7301 /* 7302 * We (re-)transmitted starting at rsm->r_start for some length 7303 * (possibly less than r_end. 7304 */ 7305 struct rack_sendmap *nrsm, *insret; 7306 uint32_t c_end; 7307 int32_t len; 7308 7309 len = *lenp; 7310 c_end = rsm->r_start + len; 7311 if (SEQ_GEQ(c_end, rsm->r_end)) { 7312 /* 7313 * We retransmitted the whole piece or more than the whole 7314 * slopping into the next rsm. 7315 */ 7316 rack_update_rsm(tp, rack, rsm, ts, add_flag); 7317 if (c_end == rsm->r_end) { 7318 *lenp = 0; 7319 return (0); 7320 } else { 7321 int32_t act_len; 7322 7323 /* Hangs over the end return whats left */ 7324 act_len = rsm->r_end - rsm->r_start; 7325 *lenp = (len - act_len); 7326 return (rsm->r_end); 7327 } 7328 /* We don't get out of this block. */ 7329 } 7330 /* 7331 * Here we retransmitted less than the whole thing which means we 7332 * have to split this into what was transmitted and what was not. 7333 */ 7334 nrsm = rack_alloc_full_limit(rack); 7335 if (nrsm == NULL) { 7336 /* 7337 * We can't get memory, so lets not proceed. 7338 */ 7339 *lenp = 0; 7340 return (0); 7341 } 7342 /* 7343 * So here we are going to take the original rsm and make it what we 7344 * retransmitted. nrsm will be the tail portion we did not 7345 * retransmit. For example say the chunk was 1, 11 (10 bytes). And 7346 * we retransmitted 5 bytes i.e. 1, 5. The original piece shrinks to 7347 * 1, 6 and the new piece will be 6, 11. 7348 */ 7349 rack_clone_rsm(rack, nrsm, rsm, c_end); 7350 nrsm->r_dupack = 0; 7351 rack_log_retran_reason(rack, nrsm, __LINE__, 0, 2); 7352 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 7353 #ifdef INVARIANTS 7354 if (insret != NULL) { 7355 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 7356 nrsm, insret, rack, rsm); 7357 } 7358 #endif 7359 if (rsm->r_in_tmap) { 7360 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 7361 nrsm->r_in_tmap = 1; 7362 } 7363 rsm->r_flags &= (~RACK_HAS_FIN); 7364 rack_update_rsm(tp, rack, rsm, ts, add_flag); 7365 /* Log a split of rsm into rsm and nrsm */ 7366 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__); 7367 *lenp = 0; 7368 return (0); 7369 } 7370 7371 static void 7372 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len, 7373 uint32_t seq_out, uint8_t th_flags, int32_t err, uint64_t cts, 7374 struct rack_sendmap *hintrsm, uint16_t add_flag, struct mbuf *s_mb, uint32_t s_moff, int hw_tls) 7375 { 7376 struct tcp_rack *rack; 7377 struct rack_sendmap *rsm, *nrsm, *insret, fe; 7378 register uint32_t snd_max, snd_una; 7379 7380 /* 7381 * Add to the RACK log of packets in flight or retransmitted. If 7382 * there is a TS option we will use the TS echoed, if not we will 7383 * grab a TS. 7384 * 7385 * Retransmissions will increment the count and move the ts to its 7386 * proper place. Note that if options do not include TS's then we 7387 * won't be able to effectively use the ACK for an RTT on a retran. 7388 * 7389 * Notes about r_start and r_end. Lets consider a send starting at 7390 * sequence 1 for 10 bytes. In such an example the r_start would be 7391 * 1 (starting sequence) but the r_end would be r_start+len i.e. 11. 7392 * This means that r_end is actually the first sequence for the next 7393 * slot (11). 7394 * 7395 */ 7396 /* 7397 * If err is set what do we do XXXrrs? should we not add the thing? 7398 * -- i.e. return if err != 0 or should we pretend we sent it? -- 7399 * i.e. proceed with add ** do this for now. 7400 */ 7401 INP_WLOCK_ASSERT(tp->t_inpcb); 7402 if (err) 7403 /* 7404 * We don't log errors -- we could but snd_max does not 7405 * advance in this case either. 7406 */ 7407 return; 7408 7409 if (th_flags & TH_RST) { 7410 /* 7411 * We don't log resets and we return immediately from 7412 * sending 7413 */ 7414 return; 7415 } 7416 rack = (struct tcp_rack *)tp->t_fb_ptr; 7417 snd_una = tp->snd_una; 7418 snd_max = tp->snd_max; 7419 if (th_flags & (TH_SYN | TH_FIN)) { 7420 /* 7421 * The call to rack_log_output is made before bumping 7422 * snd_max. This means we can record one extra byte on a SYN 7423 * or FIN if seq_out is adding more on and a FIN is present 7424 * (and we are not resending). 7425 */ 7426 if ((th_flags & TH_SYN) && (seq_out == tp->iss)) 7427 len++; 7428 if (th_flags & TH_FIN) 7429 len++; 7430 if (SEQ_LT(snd_max, tp->snd_nxt)) { 7431 /* 7432 * The add/update as not been done for the FIN/SYN 7433 * yet. 7434 */ 7435 snd_max = tp->snd_nxt; 7436 } 7437 } 7438 if (SEQ_LEQ((seq_out + len), snd_una)) { 7439 /* Are sending an old segment to induce an ack (keep-alive)? */ 7440 return; 7441 } 7442 if (SEQ_LT(seq_out, snd_una)) { 7443 /* huh? should we panic? */ 7444 uint32_t end; 7445 7446 end = seq_out + len; 7447 seq_out = snd_una; 7448 if (SEQ_GEQ(end, seq_out)) 7449 len = end - seq_out; 7450 else 7451 len = 0; 7452 } 7453 if (len == 0) { 7454 /* We don't log zero window probes */ 7455 return; 7456 } 7457 rack->r_ctl.rc_time_last_sent = cts; 7458 if (IN_FASTRECOVERY(tp->t_flags)) { 7459 rack->r_ctl.rc_prr_out += len; 7460 } 7461 /* First question is it a retransmission or new? */ 7462 if (seq_out == snd_max) { 7463 /* Its new */ 7464 again: 7465 rsm = rack_alloc(rack); 7466 if (rsm == NULL) { 7467 /* 7468 * Hmm out of memory and the tcb got destroyed while 7469 * we tried to wait. 7470 */ 7471 return; 7472 } 7473 if (th_flags & TH_FIN) { 7474 rsm->r_flags = RACK_HAS_FIN|add_flag; 7475 } else { 7476 rsm->r_flags = add_flag; 7477 } 7478 if (hw_tls) 7479 rsm->r_hw_tls = 1; 7480 rsm->r_tim_lastsent[0] = cts; 7481 rsm->r_rtr_cnt = 1; 7482 rsm->r_rtr_bytes = 0; 7483 if (th_flags & TH_SYN) { 7484 /* The data space is one beyond snd_una */ 7485 rsm->r_flags |= RACK_HAS_SYN; 7486 } 7487 rsm->r_start = seq_out; 7488 rsm->r_end = rsm->r_start + len; 7489 rsm->r_dupack = 0; 7490 /* 7491 * save off the mbuf location that 7492 * sndmbuf_noadv returned (which is 7493 * where we started copying from).. 7494 */ 7495 rsm->m = s_mb; 7496 rsm->soff = s_moff; 7497 /* 7498 * Here we do add in the len of send, since its not yet 7499 * reflected in in snduna <->snd_max 7500 */ 7501 rsm->r_fas = (ctf_flight_size(rack->rc_tp, 7502 rack->r_ctl.rc_sacked) + 7503 (rsm->r_end - rsm->r_start)); 7504 /* rsm->m will be NULL if RACK_HAS_SYN or RACK_HAS_FIN is set */ 7505 if (rsm->m) { 7506 if (rsm->m->m_len <= rsm->soff) { 7507 /* 7508 * XXXrrs Question, will this happen? 7509 * 7510 * If sbsndptr is set at the correct place 7511 * then s_moff should always be somewhere 7512 * within rsm->m. But if the sbsndptr was 7513 * off then that won't be true. If it occurs 7514 * we need to walkout to the correct location. 7515 */ 7516 struct mbuf *lm; 7517 7518 lm = rsm->m; 7519 while (lm->m_len <= rsm->soff) { 7520 rsm->soff -= lm->m_len; 7521 lm = lm->m_next; 7522 KASSERT(lm != NULL, ("%s rack:%p lm goes null orig_off:%u origmb:%p rsm->soff:%u", 7523 __func__, rack, s_moff, s_mb, rsm->soff)); 7524 } 7525 rsm->m = lm; 7526 counter_u64_add(rack_sbsndptr_wrong, 1); 7527 } else 7528 counter_u64_add(rack_sbsndptr_right, 1); 7529 rsm->orig_m_len = rsm->m->m_len; 7530 } else 7531 rsm->orig_m_len = 0; 7532 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 7533 /* Log a new rsm */ 7534 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_NEW, 0, __LINE__); 7535 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 7536 #ifdef INVARIANTS 7537 if (insret != NULL) { 7538 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 7539 nrsm, insret, rack, rsm); 7540 } 7541 #endif 7542 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 7543 rsm->r_in_tmap = 1; 7544 /* 7545 * Special case detection, is there just a single 7546 * packet outstanding when we are not in recovery? 7547 * 7548 * If this is true mark it so. 7549 */ 7550 if ((IN_FASTRECOVERY(tp->t_flags) == 0) && 7551 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) == ctf_fixed_maxseg(tp))) { 7552 struct rack_sendmap *prsm; 7553 7554 prsm = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 7555 if (prsm) 7556 prsm->r_one_out_nr = 1; 7557 } 7558 return; 7559 } 7560 /* 7561 * If we reach here its a retransmission and we need to find it. 7562 */ 7563 memset(&fe, 0, sizeof(fe)); 7564 more: 7565 if (hintrsm && (hintrsm->r_start == seq_out)) { 7566 rsm = hintrsm; 7567 hintrsm = NULL; 7568 } else { 7569 /* No hints sorry */ 7570 rsm = NULL; 7571 } 7572 if ((rsm) && (rsm->r_start == seq_out)) { 7573 seq_out = rack_update_entry(tp, rack, rsm, cts, &len, add_flag); 7574 if (len == 0) { 7575 return; 7576 } else { 7577 goto more; 7578 } 7579 } 7580 /* Ok it was not the last pointer go through it the hard way. */ 7581 refind: 7582 fe.r_start = seq_out; 7583 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 7584 if (rsm) { 7585 if (rsm->r_start == seq_out) { 7586 seq_out = rack_update_entry(tp, rack, rsm, cts, &len, add_flag); 7587 if (len == 0) { 7588 return; 7589 } else { 7590 goto refind; 7591 } 7592 } 7593 if (SEQ_GEQ(seq_out, rsm->r_start) && SEQ_LT(seq_out, rsm->r_end)) { 7594 /* Transmitted within this piece */ 7595 /* 7596 * Ok we must split off the front and then let the 7597 * update do the rest 7598 */ 7599 nrsm = rack_alloc_full_limit(rack); 7600 if (nrsm == NULL) { 7601 rack_update_rsm(tp, rack, rsm, cts, add_flag); 7602 return; 7603 } 7604 /* 7605 * copy rsm to nrsm and then trim the front of rsm 7606 * to not include this part. 7607 */ 7608 rack_clone_rsm(rack, nrsm, rsm, seq_out); 7609 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 7610 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__); 7611 #ifdef INVARIANTS 7612 if (insret != NULL) { 7613 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 7614 nrsm, insret, rack, rsm); 7615 } 7616 #endif 7617 if (rsm->r_in_tmap) { 7618 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 7619 nrsm->r_in_tmap = 1; 7620 } 7621 rsm->r_flags &= (~RACK_HAS_FIN); 7622 seq_out = rack_update_entry(tp, rack, nrsm, cts, &len, add_flag); 7623 if (len == 0) { 7624 return; 7625 } else if (len > 0) 7626 goto refind; 7627 } 7628 } 7629 /* 7630 * Hmm not found in map did they retransmit both old and on into the 7631 * new? 7632 */ 7633 if (seq_out == tp->snd_max) { 7634 goto again; 7635 } else if (SEQ_LT(seq_out, tp->snd_max)) { 7636 #ifdef INVARIANTS 7637 printf("seq_out:%u len:%d snd_una:%u snd_max:%u -- but rsm not found?\n", 7638 seq_out, len, tp->snd_una, tp->snd_max); 7639 printf("Starting Dump of all rack entries\n"); 7640 RB_FOREACH(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) { 7641 printf("rsm:%p start:%u end:%u\n", 7642 rsm, rsm->r_start, rsm->r_end); 7643 } 7644 printf("Dump complete\n"); 7645 panic("seq_out not found rack:%p tp:%p", 7646 rack, tp); 7647 #endif 7648 } else { 7649 #ifdef INVARIANTS 7650 /* 7651 * Hmm beyond sndmax? (only if we are using the new rtt-pack 7652 * flag) 7653 */ 7654 panic("seq_out:%u(%d) is beyond snd_max:%u tp:%p", 7655 seq_out, len, tp->snd_max, tp); 7656 #endif 7657 } 7658 } 7659 7660 /* 7661 * Record one of the RTT updates from an ack into 7662 * our sample structure. 7663 */ 7664 7665 static void 7666 tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt, uint32_t len, uint32_t us_rtt, 7667 int confidence, struct rack_sendmap *rsm, uint16_t rtrcnt) 7668 { 7669 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || 7670 (rack->r_ctl.rack_rs.rs_rtt_lowest > rtt)) { 7671 rack->r_ctl.rack_rs.rs_rtt_lowest = rtt; 7672 } 7673 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || 7674 (rack->r_ctl.rack_rs.rs_rtt_highest < rtt)) { 7675 rack->r_ctl.rack_rs.rs_rtt_highest = rtt; 7676 } 7677 if (rack->rc_tp->t_flags & TF_GPUTINPROG) { 7678 if (us_rtt < rack->r_ctl.rc_gp_lowrtt) 7679 rack->r_ctl.rc_gp_lowrtt = us_rtt; 7680 if (rack->rc_tp->snd_wnd > rack->r_ctl.rc_gp_high_rwnd) 7681 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; 7682 } 7683 if ((confidence == 1) && 7684 ((rsm == NULL) || 7685 (rsm->r_just_ret) || 7686 (rsm->r_one_out_nr && 7687 len < (ctf_fixed_maxseg(rack->rc_tp) * 2)))) { 7688 /* 7689 * If the rsm had a just return 7690 * hit it then we can't trust the 7691 * rtt measurement for buffer deterimination 7692 * Note that a confidence of 2, indicates 7693 * SACK'd which overrides the r_just_ret or 7694 * the r_one_out_nr. If it was a CUM-ACK and 7695 * we had only two outstanding, but get an 7696 * ack for only 1. Then that also lowers our 7697 * confidence. 7698 */ 7699 confidence = 0; 7700 } 7701 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || 7702 (rack->r_ctl.rack_rs.rs_us_rtt > us_rtt)) { 7703 if (rack->r_ctl.rack_rs.confidence == 0) { 7704 /* 7705 * We take anything with no current confidence 7706 * saved. 7707 */ 7708 rack->r_ctl.rack_rs.rs_us_rtt = us_rtt; 7709 rack->r_ctl.rack_rs.confidence = confidence; 7710 rack->r_ctl.rack_rs.rs_us_rtrcnt = rtrcnt; 7711 } else if (confidence || rack->r_ctl.rack_rs.confidence) { 7712 /* 7713 * Once we have a confident number, 7714 * we can update it with a smaller 7715 * value since this confident number 7716 * may include the DSACK time until 7717 * the next segment (the second one) arrived. 7718 */ 7719 rack->r_ctl.rack_rs.rs_us_rtt = us_rtt; 7720 rack->r_ctl.rack_rs.confidence = confidence; 7721 rack->r_ctl.rack_rs.rs_us_rtrcnt = rtrcnt; 7722 } 7723 } 7724 rack_log_rtt_upd(rack->rc_tp, rack, us_rtt, len, rsm, confidence); 7725 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_VALID; 7726 rack->r_ctl.rack_rs.rs_rtt_tot += rtt; 7727 rack->r_ctl.rack_rs.rs_rtt_cnt++; 7728 } 7729 7730 /* 7731 * Collect new round-trip time estimate 7732 * and update averages and current timeout. 7733 */ 7734 static void 7735 tcp_rack_xmit_timer_commit(struct tcp_rack *rack, struct tcpcb *tp) 7736 { 7737 int32_t delta; 7738 uint32_t o_srtt, o_var; 7739 int32_t hrtt_up = 0; 7740 int32_t rtt; 7741 7742 if (rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) 7743 /* No valid sample */ 7744 return; 7745 if (rack->r_ctl.rc_rate_sample_method == USE_RTT_LOW) { 7746 /* We are to use the lowest RTT seen in a single ack */ 7747 rtt = rack->r_ctl.rack_rs.rs_rtt_lowest; 7748 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_HIGH) { 7749 /* We are to use the highest RTT seen in a single ack */ 7750 rtt = rack->r_ctl.rack_rs.rs_rtt_highest; 7751 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_AVG) { 7752 /* We are to use the average RTT seen in a single ack */ 7753 rtt = (int32_t)(rack->r_ctl.rack_rs.rs_rtt_tot / 7754 (uint64_t)rack->r_ctl.rack_rs.rs_rtt_cnt); 7755 } else { 7756 #ifdef INVARIANTS 7757 panic("Unknown rtt variant %d", rack->r_ctl.rc_rate_sample_method); 7758 #endif 7759 return; 7760 } 7761 if (rtt == 0) 7762 rtt = 1; 7763 if (rack->rc_gp_rtt_set == 0) { 7764 /* 7765 * With no RTT we have to accept 7766 * even one we are not confident of. 7767 */ 7768 rack->r_ctl.rc_gp_srtt = rack->r_ctl.rack_rs.rs_us_rtt; 7769 rack->rc_gp_rtt_set = 1; 7770 } else if (rack->r_ctl.rack_rs.confidence) { 7771 /* update the running gp srtt */ 7772 rack->r_ctl.rc_gp_srtt -= (rack->r_ctl.rc_gp_srtt/8); 7773 rack->r_ctl.rc_gp_srtt += rack->r_ctl.rack_rs.rs_us_rtt / 8; 7774 } 7775 if (rack->r_ctl.rack_rs.confidence) { 7776 /* 7777 * record the low and high for highly buffered path computation, 7778 * we only do this if we are confident (not a retransmission). 7779 */ 7780 if (rack->r_ctl.rc_highest_us_rtt < rack->r_ctl.rack_rs.rs_us_rtt) { 7781 rack->r_ctl.rc_highest_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; 7782 hrtt_up = 1; 7783 } 7784 if (rack->rc_highly_buffered == 0) { 7785 /* 7786 * Currently once we declare a path has 7787 * highly buffered there is no going 7788 * back, which may be a problem... 7789 */ 7790 if ((rack->r_ctl.rc_highest_us_rtt / rack->r_ctl.rc_lowest_us_rtt) > rack_hbp_thresh) { 7791 rack_log_rtt_shrinks(rack, rack->r_ctl.rack_rs.rs_us_rtt, 7792 rack->r_ctl.rc_highest_us_rtt, 7793 rack->r_ctl.rc_lowest_us_rtt, 7794 RACK_RTTS_SEEHBP); 7795 rack->rc_highly_buffered = 1; 7796 } 7797 } 7798 } 7799 if ((rack->r_ctl.rack_rs.confidence) || 7800 (rack->r_ctl.rack_rs.rs_us_rtrcnt == 1)) { 7801 /* 7802 * If we are highly confident of it <or> it was 7803 * never retransmitted we accept it as the last us_rtt. 7804 */ 7805 rack->r_ctl.rc_last_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; 7806 /* The lowest rtt can be set if its was not retransmited */ 7807 if (rack->r_ctl.rc_lowest_us_rtt > rack->r_ctl.rack_rs.rs_us_rtt) { 7808 rack->r_ctl.rc_lowest_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; 7809 if (rack->r_ctl.rc_lowest_us_rtt == 0) 7810 rack->r_ctl.rc_lowest_us_rtt = 1; 7811 } 7812 } 7813 o_srtt = tp->t_srtt; 7814 o_var = tp->t_rttvar; 7815 rack = (struct tcp_rack *)tp->t_fb_ptr; 7816 if (tp->t_srtt != 0) { 7817 /* 7818 * We keep a simple srtt in microseconds, like our rtt 7819 * measurement. We don't need to do any tricks with shifting 7820 * etc. Instead we just add in 1/8th of the new measurement 7821 * and subtract out 1/8 of the old srtt. We do the same with 7822 * the variance after finding the absolute value of the 7823 * difference between this sample and the current srtt. 7824 */ 7825 delta = tp->t_srtt - rtt; 7826 /* Take off 1/8th of the current sRTT */ 7827 tp->t_srtt -= (tp->t_srtt >> 3); 7828 /* Add in 1/8th of the new RTT just measured */ 7829 tp->t_srtt += (rtt >> 3); 7830 if (tp->t_srtt <= 0) 7831 tp->t_srtt = 1; 7832 /* Now lets make the absolute value of the variance */ 7833 if (delta < 0) 7834 delta = -delta; 7835 /* Subtract out 1/8th */ 7836 tp->t_rttvar -= (tp->t_rttvar >> 3); 7837 /* Add in 1/8th of the new variance we just saw */ 7838 tp->t_rttvar += (delta >> 3); 7839 if (tp->t_rttvar <= 0) 7840 tp->t_rttvar = 1; 7841 if (tp->t_rttbest > tp->t_srtt + tp->t_rttvar) 7842 tp->t_rttbest = tp->t_srtt + tp->t_rttvar; 7843 } else { 7844 /* 7845 * No rtt measurement yet - use the unsmoothed rtt. Set the 7846 * variance to half the rtt (so our first retransmit happens 7847 * at 3*rtt). 7848 */ 7849 tp->t_srtt = rtt; 7850 tp->t_rttvar = rtt >> 1; 7851 tp->t_rttbest = tp->t_srtt + tp->t_rttvar; 7852 } 7853 rack->rc_srtt_measure_made = 1; 7854 KMOD_TCPSTAT_INC(tcps_rttupdated); 7855 tp->t_rttupdated++; 7856 #ifdef STATS 7857 if (rack_stats_gets_ms_rtt == 0) { 7858 /* Send in the microsecond rtt used for rxt timeout purposes */ 7859 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rtt)); 7860 } else if (rack_stats_gets_ms_rtt == 1) { 7861 /* Send in the millisecond rtt used for rxt timeout purposes */ 7862 int32_t ms_rtt; 7863 7864 /* Round up */ 7865 ms_rtt = (rtt + HPTS_USEC_IN_MSEC - 1) / HPTS_USEC_IN_MSEC; 7866 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, ms_rtt)); 7867 } else if (rack_stats_gets_ms_rtt == 2) { 7868 /* Send in the millisecond rtt has close to the path RTT as we can get */ 7869 int32_t ms_rtt; 7870 7871 /* Round up */ 7872 ms_rtt = (rack->r_ctl.rack_rs.rs_us_rtt + HPTS_USEC_IN_MSEC - 1) / HPTS_USEC_IN_MSEC; 7873 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, ms_rtt)); 7874 } else { 7875 /* Send in the microsecond rtt has close to the path RTT as we can get */ 7876 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rack->r_ctl.rack_rs.rs_us_rtt)); 7877 } 7878 7879 #endif 7880 /* 7881 * the retransmit should happen at rtt + 4 * rttvar. Because of the 7882 * way we do the smoothing, srtt and rttvar will each average +1/2 7883 * tick of bias. When we compute the retransmit timer, we want 1/2 7884 * tick of rounding and 1 extra tick because of +-1/2 tick 7885 * uncertainty in the firing of the timer. The bias will give us 7886 * exactly the 1.5 tick we need. But, because the bias is 7887 * statistical, we have to test that we don't drop below the minimum 7888 * feasible timer (which is 2 ticks). 7889 */ 7890 tp->t_rxtshift = 0; 7891 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 7892 max(rack_rto_min, rtt + 2), rack_rto_max, rack->r_ctl.timer_slop); 7893 rack_log_rtt_sample(rack, rtt); 7894 tp->t_softerror = 0; 7895 } 7896 7897 7898 static void 7899 rack_apply_updated_usrtt(struct tcp_rack *rack, uint32_t us_rtt, uint32_t us_cts) 7900 { 7901 /* 7902 * Apply to filter the inbound us-rtt at us_cts. 7903 */ 7904 uint32_t old_rtt; 7905 7906 old_rtt = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 7907 apply_filter_min_small(&rack->r_ctl.rc_gp_min_rtt, 7908 us_rtt, us_cts); 7909 if (rack->r_ctl.last_pacing_time && 7910 rack->rc_gp_dyn_mul && 7911 (rack->r_ctl.last_pacing_time > us_rtt)) 7912 rack->pacing_longer_than_rtt = 1; 7913 else 7914 rack->pacing_longer_than_rtt = 0; 7915 if (old_rtt > us_rtt) { 7916 /* We just hit a new lower rtt time */ 7917 rack_log_rtt_shrinks(rack, us_cts, old_rtt, 7918 __LINE__, RACK_RTTS_NEWRTT); 7919 /* 7920 * Only count it if its lower than what we saw within our 7921 * calculated range. 7922 */ 7923 if ((old_rtt - us_rtt) > rack_min_rtt_movement) { 7924 if (rack_probertt_lower_within && 7925 rack->rc_gp_dyn_mul && 7926 (rack->use_fixed_rate == 0) && 7927 (rack->rc_always_pace)) { 7928 /* 7929 * We are seeing a new lower rtt very close 7930 * to the time that we would have entered probe-rtt. 7931 * This is probably due to the fact that a peer flow 7932 * has entered probe-rtt. Lets go in now too. 7933 */ 7934 uint32_t val; 7935 7936 val = rack_probertt_lower_within * rack_time_between_probertt; 7937 val /= 100; 7938 if ((rack->in_probe_rtt == 0) && 7939 ((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= (rack_time_between_probertt - val))) { 7940 rack_enter_probertt(rack, us_cts); 7941 } 7942 } 7943 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 7944 } 7945 } 7946 } 7947 7948 static int 7949 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack, 7950 struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type, tcp_seq th_ack) 7951 { 7952 uint32_t us_rtt; 7953 int32_t i, all; 7954 uint32_t t, len_acked; 7955 7956 if ((rsm->r_flags & RACK_ACKED) || 7957 (rsm->r_flags & RACK_WAS_ACKED)) 7958 /* Already done */ 7959 return (0); 7960 if (rsm->r_no_rtt_allowed) { 7961 /* Not allowed */ 7962 return (0); 7963 } 7964 if (ack_type == CUM_ACKED) { 7965 if (SEQ_GT(th_ack, rsm->r_end)) { 7966 len_acked = rsm->r_end - rsm->r_start; 7967 all = 1; 7968 } else { 7969 len_acked = th_ack - rsm->r_start; 7970 all = 0; 7971 } 7972 } else { 7973 len_acked = rsm->r_end - rsm->r_start; 7974 all = 0; 7975 } 7976 if (rsm->r_rtr_cnt == 1) { 7977 7978 t = cts - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 7979 if ((int)t <= 0) 7980 t = 1; 7981 if (!tp->t_rttlow || tp->t_rttlow > t) 7982 tp->t_rttlow = t; 7983 if (!rack->r_ctl.rc_rack_min_rtt || 7984 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 7985 rack->r_ctl.rc_rack_min_rtt = t; 7986 if (rack->r_ctl.rc_rack_min_rtt == 0) { 7987 rack->r_ctl.rc_rack_min_rtt = 1; 7988 } 7989 } 7990 if (TSTMP_GT(tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time), rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])) 7991 us_rtt = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 7992 else 7993 us_rtt = tcp_get_usecs(NULL) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 7994 if (us_rtt == 0) 7995 us_rtt = 1; 7996 if (CC_ALGO(tp)->rttsample != NULL) { 7997 /* Kick the RTT to the CC */ 7998 CC_ALGO(tp)->rttsample(tp->ccv, us_rtt, 1, rsm->r_fas); 7999 } 8000 rack_apply_updated_usrtt(rack, us_rtt, tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time)); 8001 if (ack_type == SACKED) { 8002 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)], cts, 1); 8003 tcp_rack_xmit_timer(rack, t + 1, len_acked, us_rtt, 2 , rsm, rsm->r_rtr_cnt); 8004 } else { 8005 /* 8006 * We need to setup what our confidence 8007 * is in this ack. 8008 * 8009 * If the rsm was app limited and it is 8010 * less than a mss in length (the end 8011 * of the send) then we have a gap. If we 8012 * were app limited but say we were sending 8013 * multiple MSS's then we are more confident 8014 * int it. 8015 * 8016 * When we are not app-limited then we see if 8017 * the rsm is being included in the current 8018 * measurement, we tell this by the app_limited_needs_set 8019 * flag. 8020 * 8021 * Note that being cwnd blocked is not applimited 8022 * as well as the pacing delay between packets which 8023 * are sending only 1 or 2 MSS's also will show up 8024 * in the RTT. We probably need to examine this algorithm 8025 * a bit more and enhance it to account for the delay 8026 * between rsm's. We could do that by saving off the 8027 * pacing delay of each rsm (in an rsm) and then 8028 * factoring that in somehow though for now I am 8029 * not sure how :) 8030 */ 8031 int calc_conf = 0; 8032 8033 if (rsm->r_flags & RACK_APP_LIMITED) { 8034 if (all && (len_acked <= ctf_fixed_maxseg(tp))) 8035 calc_conf = 0; 8036 else 8037 calc_conf = 1; 8038 } else if (rack->app_limited_needs_set == 0) { 8039 calc_conf = 1; 8040 } else { 8041 calc_conf = 0; 8042 } 8043 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)], cts, 2); 8044 tcp_rack_xmit_timer(rack, t + 1, len_acked, us_rtt, 8045 calc_conf, rsm, rsm->r_rtr_cnt); 8046 } 8047 if ((rsm->r_flags & RACK_TLP) && 8048 (!IN_FASTRECOVERY(tp->t_flags))) { 8049 /* Segment was a TLP and our retrans matched */ 8050 if (rack->r_ctl.rc_tlp_cwnd_reduce) { 8051 rack->r_ctl.rc_rsm_start = tp->snd_max; 8052 rack->r_ctl.rc_cwnd_at = tp->snd_cwnd; 8053 rack->r_ctl.rc_ssthresh_at = tp->snd_ssthresh; 8054 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una); 8055 } 8056 } 8057 if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)])) { 8058 /* New more recent rack_tmit_time */ 8059 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 8060 rack->rc_rack_rtt = t; 8061 } 8062 return (1); 8063 } 8064 /* 8065 * We clear the soft/rxtshift since we got an ack. 8066 * There is no assurance we will call the commit() function 8067 * so we need to clear these to avoid incorrect handling. 8068 */ 8069 tp->t_rxtshift = 0; 8070 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 8071 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 8072 tp->t_softerror = 0; 8073 if (to && (to->to_flags & TOF_TS) && 8074 (ack_type == CUM_ACKED) && 8075 (to->to_tsecr) && 8076 ((rsm->r_flags & RACK_OVERMAX) == 0)) { 8077 /* 8078 * Now which timestamp does it match? In this block the ACK 8079 * must be coming from a previous transmission. 8080 */ 8081 for (i = 0; i < rsm->r_rtr_cnt; i++) { 8082 if (rack_ts_to_msec(rsm->r_tim_lastsent[i]) == to->to_tsecr) { 8083 t = cts - (uint32_t)rsm->r_tim_lastsent[i]; 8084 if ((int)t <= 0) 8085 t = 1; 8086 if (CC_ALGO(tp)->rttsample != NULL) { 8087 /* 8088 * Kick the RTT to the CC, here 8089 * we lie a bit in that we know the 8090 * retransmission is correct even though 8091 * we retransmitted. This is because 8092 * we match the timestamps. 8093 */ 8094 if (TSTMP_GT(tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time), rsm->r_tim_lastsent[i])) 8095 us_rtt = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - (uint32_t)rsm->r_tim_lastsent[i]; 8096 else 8097 us_rtt = tcp_get_usecs(NULL) - (uint32_t)rsm->r_tim_lastsent[i]; 8098 CC_ALGO(tp)->rttsample(tp->ccv, us_rtt, 1, rsm->r_fas); 8099 } 8100 if ((i + 1) < rsm->r_rtr_cnt) { 8101 /* 8102 * The peer ack'd from our previous 8103 * transmission. We have a spurious 8104 * retransmission and thus we dont 8105 * want to update our rack_rtt. 8106 * 8107 * Hmm should there be a CC revert here? 8108 * 8109 */ 8110 return (0); 8111 } 8112 if (!tp->t_rttlow || tp->t_rttlow > t) 8113 tp->t_rttlow = t; 8114 if (!rack->r_ctl.rc_rack_min_rtt || SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 8115 rack->r_ctl.rc_rack_min_rtt = t; 8116 if (rack->r_ctl.rc_rack_min_rtt == 0) { 8117 rack->r_ctl.rc_rack_min_rtt = 1; 8118 } 8119 } 8120 if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, 8121 (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)])) { 8122 /* New more recent rack_tmit_time */ 8123 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 8124 rack->rc_rack_rtt = t; 8125 } 8126 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[i], cts, 3); 8127 tcp_rack_xmit_timer(rack, t + 1, len_acked, t, 0, rsm, 8128 rsm->r_rtr_cnt); 8129 return (1); 8130 } 8131 } 8132 goto ts_not_found; 8133 } else { 8134 /* 8135 * Ok its a SACK block that we retransmitted. or a windows 8136 * machine without timestamps. We can tell nothing from the 8137 * time-stamp since its not there or the time the peer last 8138 * recieved a segment that moved forward its cum-ack point. 8139 */ 8140 ts_not_found: 8141 i = rsm->r_rtr_cnt - 1; 8142 t = cts - (uint32_t)rsm->r_tim_lastsent[i]; 8143 if ((int)t <= 0) 8144 t = 1; 8145 if (rack->r_ctl.rc_rack_min_rtt && SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 8146 /* 8147 * We retransmitted and the ack came back in less 8148 * than the smallest rtt we have observed. We most 8149 * likely did an improper retransmit as outlined in 8150 * 6.2 Step 2 point 2 in the rack-draft so we 8151 * don't want to update our rack_rtt. We in 8152 * theory (in future) might want to think about reverting our 8153 * cwnd state but we won't for now. 8154 */ 8155 return (0); 8156 } else if (rack->r_ctl.rc_rack_min_rtt) { 8157 /* 8158 * We retransmitted it and the retransmit did the 8159 * job. 8160 */ 8161 if (!rack->r_ctl.rc_rack_min_rtt || 8162 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 8163 rack->r_ctl.rc_rack_min_rtt = t; 8164 if (rack->r_ctl.rc_rack_min_rtt == 0) { 8165 rack->r_ctl.rc_rack_min_rtt = 1; 8166 } 8167 } 8168 if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, (uint32_t)rsm->r_tim_lastsent[i])) { 8169 /* New more recent rack_tmit_time */ 8170 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[i]; 8171 rack->rc_rack_rtt = t; 8172 } 8173 return (1); 8174 } 8175 } 8176 return (0); 8177 } 8178 8179 /* 8180 * Mark the SACK_PASSED flag on all entries prior to rsm send wise. 8181 */ 8182 static void 8183 rack_log_sack_passed(struct tcpcb *tp, 8184 struct tcp_rack *rack, struct rack_sendmap *rsm) 8185 { 8186 struct rack_sendmap *nrsm; 8187 8188 nrsm = rsm; 8189 TAILQ_FOREACH_REVERSE_FROM(nrsm, &rack->r_ctl.rc_tmap, 8190 rack_head, r_tnext) { 8191 if (nrsm == rsm) { 8192 /* Skip orginal segment he is acked */ 8193 continue; 8194 } 8195 if (nrsm->r_flags & RACK_ACKED) { 8196 /* 8197 * Skip ack'd segments, though we 8198 * should not see these, since tmap 8199 * should not have ack'd segments. 8200 */ 8201 continue; 8202 } 8203 if (nrsm->r_flags & RACK_SACK_PASSED) { 8204 /* 8205 * We found one that is already marked 8206 * passed, we have been here before and 8207 * so all others below this are marked. 8208 */ 8209 break; 8210 } 8211 nrsm->r_flags |= RACK_SACK_PASSED; 8212 nrsm->r_flags &= ~RACK_WAS_SACKPASS; 8213 } 8214 } 8215 8216 static void 8217 rack_need_set_test(struct tcpcb *tp, 8218 struct tcp_rack *rack, 8219 struct rack_sendmap *rsm, 8220 tcp_seq th_ack, 8221 int line, 8222 int use_which) 8223 { 8224 8225 if ((tp->t_flags & TF_GPUTINPROG) && 8226 SEQ_GEQ(rsm->r_end, tp->gput_seq)) { 8227 /* 8228 * We were app limited, and this ack 8229 * butts up or goes beyond the point where we want 8230 * to start our next measurement. We need 8231 * to record the new gput_ts as here and 8232 * possibly update the start sequence. 8233 */ 8234 uint32_t seq, ts; 8235 8236 if (rsm->r_rtr_cnt > 1) { 8237 /* 8238 * This is a retransmit, can we 8239 * really make any assessment at this 8240 * point? We are not really sure of 8241 * the timestamp, is it this or the 8242 * previous transmission? 8243 * 8244 * Lets wait for something better that 8245 * is not retransmitted. 8246 */ 8247 return; 8248 } 8249 seq = tp->gput_seq; 8250 ts = tp->gput_ts; 8251 rack->app_limited_needs_set = 0; 8252 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 8253 /* Do we start at a new end? */ 8254 if ((use_which == RACK_USE_BEG) && 8255 SEQ_GEQ(rsm->r_start, tp->gput_seq)) { 8256 /* 8257 * When we get an ACK that just eats 8258 * up some of the rsm, we set RACK_USE_BEG 8259 * since whats at r_start (i.e. th_ack) 8260 * is left unacked and thats where the 8261 * measurement not starts. 8262 */ 8263 tp->gput_seq = rsm->r_start; 8264 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 8265 } 8266 if ((use_which == RACK_USE_END) && 8267 SEQ_GEQ(rsm->r_end, tp->gput_seq)) { 8268 /* 8269 * We use the end when the cumack 8270 * is moving forward and completely 8271 * deleting the rsm passed so basically 8272 * r_end holds th_ack. 8273 * 8274 * For SACK's we also want to use the end 8275 * since this piece just got sacked and 8276 * we want to target anything after that 8277 * in our measurement. 8278 */ 8279 tp->gput_seq = rsm->r_end; 8280 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 8281 } 8282 if (use_which == RACK_USE_END_OR_THACK) { 8283 /* 8284 * special case for ack moving forward, 8285 * not a sack, we need to move all the 8286 * way up to where this ack cum-ack moves 8287 * to. 8288 */ 8289 if (SEQ_GT(th_ack, rsm->r_end)) 8290 tp->gput_seq = th_ack; 8291 else 8292 tp->gput_seq = rsm->r_end; 8293 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 8294 } 8295 if (SEQ_GT(tp->gput_seq, tp->gput_ack)) { 8296 /* 8297 * We moved beyond this guy's range, re-calculate 8298 * the new end point. 8299 */ 8300 if (rack->rc_gp_filled == 0) { 8301 tp->gput_ack = tp->gput_seq + max(rc_init_window(rack), (MIN_GP_WIN * ctf_fixed_maxseg(tp))); 8302 } else { 8303 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); 8304 } 8305 } 8306 /* 8307 * We are moving the goal post, we may be able to clear the 8308 * measure_saw_probe_rtt flag. 8309 */ 8310 if ((rack->in_probe_rtt == 0) && 8311 (rack->measure_saw_probe_rtt) && 8312 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) 8313 rack->measure_saw_probe_rtt = 0; 8314 rack_log_pacing_delay_calc(rack, ts, tp->gput_ts, 8315 seq, tp->gput_seq, 0, 5, line, NULL, 0); 8316 if (rack->rc_gp_filled && 8317 ((tp->gput_ack - tp->gput_seq) < 8318 max(rc_init_window(rack), (MIN_GP_WIN * 8319 ctf_fixed_maxseg(tp))))) { 8320 uint32_t ideal_amount; 8321 8322 ideal_amount = rack_get_measure_window(tp, rack); 8323 if (ideal_amount > sbavail(&tp->t_inpcb->inp_socket->so_snd)) { 8324 /* 8325 * There is no sense of continuing this measurement 8326 * because its too small to gain us anything we 8327 * trust. Skip it and that way we can start a new 8328 * measurement quicker. 8329 */ 8330 tp->t_flags &= ~TF_GPUTINPROG; 8331 rack_log_pacing_delay_calc(rack, tp->gput_ack, tp->gput_seq, 8332 0, 0, 0, 6, __LINE__, NULL, 0); 8333 } else { 8334 /* 8335 * Reset the window further out. 8336 */ 8337 tp->gput_ack = tp->gput_seq + ideal_amount; 8338 } 8339 } 8340 } 8341 } 8342 8343 static inline int 8344 is_rsm_inside_declared_tlp_block(struct tcp_rack *rack, struct rack_sendmap *rsm) 8345 { 8346 if (SEQ_LT(rsm->r_end, rack->r_ctl.last_tlp_acked_start)) { 8347 /* Behind our TLP definition or right at */ 8348 return (0); 8349 } 8350 if (SEQ_GT(rsm->r_start, rack->r_ctl.last_tlp_acked_end)) { 8351 /* The start is beyond or right at our end of TLP definition */ 8352 return (0); 8353 } 8354 /* It has to be a sub-part of the original TLP recorded */ 8355 return (1); 8356 } 8357 8358 8359 static uint32_t 8360 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack, struct sackblk *sack, 8361 struct tcpopt *to, struct rack_sendmap **prsm, uint32_t cts, int *moved_two) 8362 { 8363 uint32_t start, end, changed = 0; 8364 struct rack_sendmap stack_map; 8365 struct rack_sendmap *rsm, *nrsm, fe, *insret, *prev, *next; 8366 int32_t used_ref = 1; 8367 int moved = 0; 8368 8369 start = sack->start; 8370 end = sack->end; 8371 rsm = *prsm; 8372 memset(&fe, 0, sizeof(fe)); 8373 do_rest_ofb: 8374 if ((rsm == NULL) || 8375 (SEQ_LT(end, rsm->r_start)) || 8376 (SEQ_GEQ(start, rsm->r_end)) || 8377 (SEQ_LT(start, rsm->r_start))) { 8378 /* 8379 * We are not in the right spot, 8380 * find the correct spot in the tree. 8381 */ 8382 used_ref = 0; 8383 fe.r_start = start; 8384 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 8385 moved++; 8386 } 8387 if (rsm == NULL) { 8388 /* TSNH */ 8389 goto out; 8390 } 8391 /* Ok we have an ACK for some piece of this rsm */ 8392 if (rsm->r_start != start) { 8393 if ((rsm->r_flags & RACK_ACKED) == 0) { 8394 /* 8395 * Before any splitting or hookery is 8396 * done is it a TLP of interest i.e. rxt? 8397 */ 8398 if ((rsm->r_flags & RACK_TLP) && 8399 (rsm->r_rtr_cnt > 1)) { 8400 /* 8401 * We are splitting a rxt TLP, check 8402 * if we need to save off the start/end 8403 */ 8404 if (rack->rc_last_tlp_acked_set && 8405 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 8406 /* 8407 * We already turned this on since we are inside 8408 * the previous one was a partially sack now we 8409 * are getting another one (maybe all of it). 8410 * 8411 */ 8412 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 8413 /* 8414 * Lets make sure we have all of it though. 8415 */ 8416 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 8417 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8418 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8419 rack->r_ctl.last_tlp_acked_end); 8420 } 8421 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 8422 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8423 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8424 rack->r_ctl.last_tlp_acked_end); 8425 } 8426 } else { 8427 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8428 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8429 rack->rc_last_tlp_past_cumack = 0; 8430 rack->rc_last_tlp_acked_set = 1; 8431 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 8432 } 8433 } 8434 /** 8435 * Need to split this in two pieces the before and after, 8436 * the before remains in the map, the after must be 8437 * added. In other words we have: 8438 * rsm |--------------| 8439 * sackblk |-------> 8440 * rsm will become 8441 * rsm |---| 8442 * and nrsm will be the sacked piece 8443 * nrsm |----------| 8444 * 8445 * But before we start down that path lets 8446 * see if the sack spans over on top of 8447 * the next guy and it is already sacked. 8448 * 8449 */ 8450 next = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8451 if (next && (next->r_flags & RACK_ACKED) && 8452 SEQ_GEQ(end, next->r_start)) { 8453 /** 8454 * So the next one is already acked, and 8455 * we can thus by hookery use our stack_map 8456 * to reflect the piece being sacked and 8457 * then adjust the two tree entries moving 8458 * the start and ends around. So we start like: 8459 * rsm |------------| (not-acked) 8460 * next |-----------| (acked) 8461 * sackblk |--------> 8462 * We want to end like so: 8463 * rsm |------| (not-acked) 8464 * next |-----------------| (acked) 8465 * nrsm |-----| 8466 * Where nrsm is a temporary stack piece we 8467 * use to update all the gizmos. 8468 */ 8469 /* Copy up our fudge block */ 8470 nrsm = &stack_map; 8471 memcpy(nrsm, rsm, sizeof(struct rack_sendmap)); 8472 /* Now adjust our tree blocks */ 8473 rsm->r_end = start; 8474 next->r_start = start; 8475 /* Now we must adjust back where next->m is */ 8476 rack_setup_offset_for_rsm(rsm, next); 8477 8478 /* We don't need to adjust rsm, it did not change */ 8479 /* Clear out the dup ack count of the remainder */ 8480 rsm->r_dupack = 0; 8481 rsm->r_just_ret = 0; 8482 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 8483 /* Now lets make sure our fudge block is right */ 8484 nrsm->r_start = start; 8485 /* Now lets update all the stats and such */ 8486 rack_update_rtt(tp, rack, nrsm, to, cts, SACKED, 0); 8487 if (rack->app_limited_needs_set) 8488 rack_need_set_test(tp, rack, nrsm, tp->snd_una, __LINE__, RACK_USE_END); 8489 changed += (nrsm->r_end - nrsm->r_start); 8490 rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start); 8491 if (nrsm->r_flags & RACK_SACK_PASSED) { 8492 counter_u64_add(rack_reorder_seen, 1); 8493 rack->r_ctl.rc_reorder_ts = cts; 8494 } 8495 /* 8496 * Now we want to go up from rsm (the 8497 * one left un-acked) to the next one 8498 * in the tmap. We do this so when 8499 * we walk backwards we include marking 8500 * sack-passed on rsm (The one passed in 8501 * is skipped since it is generally called 8502 * on something sacked before removing it 8503 * from the tmap). 8504 */ 8505 if (rsm->r_in_tmap) { 8506 nrsm = TAILQ_NEXT(rsm, r_tnext); 8507 /* 8508 * Now that we have the next 8509 * one walk backwards from there. 8510 */ 8511 if (nrsm && nrsm->r_in_tmap) 8512 rack_log_sack_passed(tp, rack, nrsm); 8513 } 8514 /* Now are we done? */ 8515 if (SEQ_LT(end, next->r_end) || 8516 (end == next->r_end)) { 8517 /* Done with block */ 8518 goto out; 8519 } 8520 rack_log_map_chg(tp, rack, &stack_map, rsm, next, MAP_SACK_M1, end, __LINE__); 8521 counter_u64_add(rack_sack_used_next_merge, 1); 8522 /* Postion for the next block */ 8523 start = next->r_end; 8524 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, next); 8525 if (rsm == NULL) 8526 goto out; 8527 } else { 8528 /** 8529 * We can't use any hookery here, so we 8530 * need to split the map. We enter like 8531 * so: 8532 * rsm |--------| 8533 * sackblk |-----> 8534 * We will add the new block nrsm and 8535 * that will be the new portion, and then 8536 * fall through after reseting rsm. So we 8537 * split and look like this: 8538 * rsm |----| 8539 * sackblk |-----> 8540 * nrsm |---| 8541 * We then fall through reseting 8542 * rsm to nrsm, so the next block 8543 * picks it up. 8544 */ 8545 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); 8546 if (nrsm == NULL) { 8547 /* 8548 * failed XXXrrs what can we do but loose the sack 8549 * info? 8550 */ 8551 goto out; 8552 } 8553 counter_u64_add(rack_sack_splits, 1); 8554 rack_clone_rsm(rack, nrsm, rsm, start); 8555 rsm->r_just_ret = 0; 8556 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 8557 #ifdef INVARIANTS 8558 if (insret != NULL) { 8559 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 8560 nrsm, insret, rack, rsm); 8561 } 8562 #endif 8563 if (rsm->r_in_tmap) { 8564 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 8565 nrsm->r_in_tmap = 1; 8566 } 8567 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SACK_M2, end, __LINE__); 8568 rsm->r_flags &= (~RACK_HAS_FIN); 8569 /* Position us to point to the new nrsm that starts the sack blk */ 8570 rsm = nrsm; 8571 } 8572 } else { 8573 /* Already sacked this piece */ 8574 counter_u64_add(rack_sack_skipped_acked, 1); 8575 moved++; 8576 if (end == rsm->r_end) { 8577 /* Done with block */ 8578 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8579 goto out; 8580 } else if (SEQ_LT(end, rsm->r_end)) { 8581 /* A partial sack to a already sacked block */ 8582 moved++; 8583 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8584 goto out; 8585 } else { 8586 /* 8587 * The end goes beyond this guy 8588 * repostion the start to the 8589 * next block. 8590 */ 8591 start = rsm->r_end; 8592 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8593 if (rsm == NULL) 8594 goto out; 8595 } 8596 } 8597 } 8598 if (SEQ_GEQ(end, rsm->r_end)) { 8599 /** 8600 * The end of this block is either beyond this guy or right 8601 * at this guy. I.e.: 8602 * rsm --- |-----| 8603 * end |-----| 8604 * <or> 8605 * end |---------| 8606 */ 8607 if ((rsm->r_flags & RACK_ACKED) == 0) { 8608 /* 8609 * Is it a TLP of interest? 8610 */ 8611 if ((rsm->r_flags & RACK_TLP) && 8612 (rsm->r_rtr_cnt > 1)) { 8613 /* 8614 * We are splitting a rxt TLP, check 8615 * if we need to save off the start/end 8616 */ 8617 if (rack->rc_last_tlp_acked_set && 8618 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 8619 /* 8620 * We already turned this on since we are inside 8621 * the previous one was a partially sack now we 8622 * are getting another one (maybe all of it). 8623 */ 8624 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 8625 /* 8626 * Lets make sure we have all of it though. 8627 */ 8628 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 8629 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8630 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8631 rack->r_ctl.last_tlp_acked_end); 8632 } 8633 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 8634 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8635 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8636 rack->r_ctl.last_tlp_acked_end); 8637 } 8638 } else { 8639 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8640 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8641 rack->rc_last_tlp_past_cumack = 0; 8642 rack->rc_last_tlp_acked_set = 1; 8643 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 8644 } 8645 } 8646 rack_update_rtt(tp, rack, rsm, to, cts, SACKED, 0); 8647 changed += (rsm->r_end - rsm->r_start); 8648 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start); 8649 if (rsm->r_in_tmap) /* should be true */ 8650 rack_log_sack_passed(tp, rack, rsm); 8651 /* Is Reordering occuring? */ 8652 if (rsm->r_flags & RACK_SACK_PASSED) { 8653 rsm->r_flags &= ~RACK_SACK_PASSED; 8654 counter_u64_add(rack_reorder_seen, 1); 8655 rack->r_ctl.rc_reorder_ts = cts; 8656 } 8657 if (rack->app_limited_needs_set) 8658 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END); 8659 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 8660 rsm->r_flags |= RACK_ACKED; 8661 if (rsm->r_in_tmap) { 8662 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 8663 rsm->r_in_tmap = 0; 8664 } 8665 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_SACK_M3, end, __LINE__); 8666 } else { 8667 counter_u64_add(rack_sack_skipped_acked, 1); 8668 moved++; 8669 } 8670 if (end == rsm->r_end) { 8671 /* This block only - done, setup for next */ 8672 goto out; 8673 } 8674 /* 8675 * There is more not coverend by this rsm move on 8676 * to the next block in the RB tree. 8677 */ 8678 nrsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8679 start = rsm->r_end; 8680 rsm = nrsm; 8681 if (rsm == NULL) 8682 goto out; 8683 goto do_rest_ofb; 8684 } 8685 /** 8686 * The end of this sack block is smaller than 8687 * our rsm i.e.: 8688 * rsm --- |-----| 8689 * end |--| 8690 */ 8691 if ((rsm->r_flags & RACK_ACKED) == 0) { 8692 /* 8693 * Is it a TLP of interest? 8694 */ 8695 if ((rsm->r_flags & RACK_TLP) && 8696 (rsm->r_rtr_cnt > 1)) { 8697 /* 8698 * We are splitting a rxt TLP, check 8699 * if we need to save off the start/end 8700 */ 8701 if (rack->rc_last_tlp_acked_set && 8702 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 8703 /* 8704 * We already turned this on since we are inside 8705 * the previous one was a partially sack now we 8706 * are getting another one (maybe all of it). 8707 */ 8708 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 8709 /* 8710 * Lets make sure we have all of it though. 8711 */ 8712 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 8713 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8714 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8715 rack->r_ctl.last_tlp_acked_end); 8716 } 8717 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 8718 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8719 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8720 rack->r_ctl.last_tlp_acked_end); 8721 } 8722 } else { 8723 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8724 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8725 rack->rc_last_tlp_past_cumack = 0; 8726 rack->rc_last_tlp_acked_set = 1; 8727 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 8728 } 8729 } 8730 prev = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8731 if (prev && 8732 (prev->r_flags & RACK_ACKED)) { 8733 /** 8734 * Goal, we want the right remainder of rsm to shrink 8735 * in place and span from (rsm->r_start = end) to rsm->r_end. 8736 * We want to expand prev to go all the way 8737 * to prev->r_end <- end. 8738 * so in the tree we have before: 8739 * prev |--------| (acked) 8740 * rsm |-------| (non-acked) 8741 * sackblk |-| 8742 * We churn it so we end up with 8743 * prev |----------| (acked) 8744 * rsm |-----| (non-acked) 8745 * nrsm |-| (temporary) 8746 * 8747 * Note if either prev/rsm is a TLP we don't 8748 * do this. 8749 */ 8750 nrsm = &stack_map; 8751 memcpy(nrsm, rsm, sizeof(struct rack_sendmap)); 8752 prev->r_end = end; 8753 rsm->r_start = end; 8754 /* Now adjust nrsm (stack copy) to be 8755 * the one that is the small 8756 * piece that was "sacked". 8757 */ 8758 nrsm->r_end = end; 8759 rsm->r_dupack = 0; 8760 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 8761 /* 8762 * Now that the rsm has had its start moved forward 8763 * lets go ahead and get its new place in the world. 8764 */ 8765 rack_setup_offset_for_rsm(prev, rsm); 8766 /* 8767 * Now nrsm is our new little piece 8768 * that is acked (which was merged 8769 * to prev). Update the rtt and changed 8770 * based on that. Also check for reordering. 8771 */ 8772 rack_update_rtt(tp, rack, nrsm, to, cts, SACKED, 0); 8773 if (rack->app_limited_needs_set) 8774 rack_need_set_test(tp, rack, nrsm, tp->snd_una, __LINE__, RACK_USE_END); 8775 changed += (nrsm->r_end - nrsm->r_start); 8776 rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start); 8777 if (nrsm->r_flags & RACK_SACK_PASSED) { 8778 counter_u64_add(rack_reorder_seen, 1); 8779 rack->r_ctl.rc_reorder_ts = cts; 8780 } 8781 rack_log_map_chg(tp, rack, prev, &stack_map, rsm, MAP_SACK_M4, end, __LINE__); 8782 rsm = prev; 8783 counter_u64_add(rack_sack_used_prev_merge, 1); 8784 } else { 8785 /** 8786 * This is the case where our previous 8787 * block is not acked either, so we must 8788 * split the block in two. 8789 */ 8790 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); 8791 if (nrsm == NULL) { 8792 /* failed rrs what can we do but loose the sack info? */ 8793 goto out; 8794 } 8795 if ((rsm->r_flags & RACK_TLP) && 8796 (rsm->r_rtr_cnt > 1)) { 8797 /* 8798 * We are splitting a rxt TLP, check 8799 * if we need to save off the start/end 8800 */ 8801 if (rack->rc_last_tlp_acked_set && 8802 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 8803 /* 8804 * We already turned this on since this block is inside 8805 * the previous one was a partially sack now we 8806 * are getting another one (maybe all of it). 8807 */ 8808 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 8809 /* 8810 * Lets make sure we have all of it though. 8811 */ 8812 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 8813 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8814 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8815 rack->r_ctl.last_tlp_acked_end); 8816 } 8817 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 8818 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8819 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8820 rack->r_ctl.last_tlp_acked_end); 8821 } 8822 } else { 8823 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8824 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8825 rack->rc_last_tlp_acked_set = 1; 8826 rack->rc_last_tlp_past_cumack = 0; 8827 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 8828 } 8829 } 8830 /** 8831 * In this case nrsm becomes 8832 * nrsm->r_start = end; 8833 * nrsm->r_end = rsm->r_end; 8834 * which is un-acked. 8835 * <and> 8836 * rsm->r_end = nrsm->r_start; 8837 * i.e. the remaining un-acked 8838 * piece is left on the left 8839 * hand side. 8840 * 8841 * So we start like this 8842 * rsm |----------| (not acked) 8843 * sackblk |---| 8844 * build it so we have 8845 * rsm |---| (acked) 8846 * nrsm |------| (not acked) 8847 */ 8848 counter_u64_add(rack_sack_splits, 1); 8849 rack_clone_rsm(rack, nrsm, rsm, end); 8850 rsm->r_flags &= (~RACK_HAS_FIN); 8851 rsm->r_just_ret = 0; 8852 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 8853 #ifdef INVARIANTS 8854 if (insret != NULL) { 8855 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 8856 nrsm, insret, rack, rsm); 8857 } 8858 #endif 8859 if (rsm->r_in_tmap) { 8860 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 8861 nrsm->r_in_tmap = 1; 8862 } 8863 nrsm->r_dupack = 0; 8864 rack_log_retran_reason(rack, nrsm, __LINE__, 0, 2); 8865 rack_update_rtt(tp, rack, rsm, to, cts, SACKED, 0); 8866 changed += (rsm->r_end - rsm->r_start); 8867 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start); 8868 if (rsm->r_in_tmap) /* should be true */ 8869 rack_log_sack_passed(tp, rack, rsm); 8870 /* Is Reordering occuring? */ 8871 if (rsm->r_flags & RACK_SACK_PASSED) { 8872 rsm->r_flags &= ~RACK_SACK_PASSED; 8873 counter_u64_add(rack_reorder_seen, 1); 8874 rack->r_ctl.rc_reorder_ts = cts; 8875 } 8876 if (rack->app_limited_needs_set) 8877 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END); 8878 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 8879 rsm->r_flags |= RACK_ACKED; 8880 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SACK_M5, end, __LINE__); 8881 if (rsm->r_in_tmap) { 8882 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 8883 rsm->r_in_tmap = 0; 8884 } 8885 } 8886 } else if (start != end){ 8887 /* 8888 * The block was already acked. 8889 */ 8890 counter_u64_add(rack_sack_skipped_acked, 1); 8891 moved++; 8892 } 8893 out: 8894 if (rsm && 8895 ((rsm->r_flags & RACK_TLP) == 0) && 8896 (rsm->r_flags & RACK_ACKED)) { 8897 /* 8898 * Now can we merge where we worked 8899 * with either the previous or 8900 * next block? 8901 */ 8902 next = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8903 while (next) { 8904 if (next->r_flags & RACK_TLP) 8905 break; 8906 if (next->r_flags & RACK_ACKED) { 8907 /* yep this and next can be merged */ 8908 rsm = rack_merge_rsm(rack, rsm, next); 8909 next = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8910 } else 8911 break; 8912 } 8913 /* Now what about the previous? */ 8914 prev = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8915 while (prev) { 8916 if (prev->r_flags & RACK_TLP) 8917 break; 8918 if (prev->r_flags & RACK_ACKED) { 8919 /* yep the previous and this can be merged */ 8920 rsm = rack_merge_rsm(rack, prev, rsm); 8921 prev = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8922 } else 8923 break; 8924 } 8925 } 8926 if (used_ref == 0) { 8927 counter_u64_add(rack_sack_proc_all, 1); 8928 } else { 8929 counter_u64_add(rack_sack_proc_short, 1); 8930 } 8931 /* Save off the next one for quick reference. */ 8932 if (rsm) 8933 nrsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8934 else 8935 nrsm = NULL; 8936 *prsm = rack->r_ctl.rc_sacklast = nrsm; 8937 /* Pass back the moved. */ 8938 *moved_two = moved; 8939 return (changed); 8940 } 8941 8942 static void inline 8943 rack_peer_reneges(struct tcp_rack *rack, struct rack_sendmap *rsm, tcp_seq th_ack) 8944 { 8945 struct rack_sendmap *tmap; 8946 8947 tmap = NULL; 8948 while (rsm && (rsm->r_flags & RACK_ACKED)) { 8949 /* Its no longer sacked, mark it so */ 8950 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 8951 #ifdef INVARIANTS 8952 if (rsm->r_in_tmap) { 8953 panic("rack:%p rsm:%p flags:0x%x in tmap?", 8954 rack, rsm, rsm->r_flags); 8955 } 8956 #endif 8957 rsm->r_flags &= ~(RACK_ACKED|RACK_SACK_PASSED|RACK_WAS_SACKPASS); 8958 /* Rebuild it into our tmap */ 8959 if (tmap == NULL) { 8960 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext); 8961 tmap = rsm; 8962 } else { 8963 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, tmap, rsm, r_tnext); 8964 tmap = rsm; 8965 } 8966 tmap->r_in_tmap = 1; 8967 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8968 } 8969 /* 8970 * Now lets possibly clear the sack filter so we start 8971 * recognizing sacks that cover this area. 8972 */ 8973 sack_filter_clear(&rack->r_ctl.rack_sf, th_ack); 8974 8975 } 8976 8977 static void 8978 rack_do_decay(struct tcp_rack *rack) 8979 { 8980 struct timeval res; 8981 8982 #define timersub(tvp, uvp, vvp) \ 8983 do { \ 8984 (vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec; \ 8985 (vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec; \ 8986 if ((vvp)->tv_usec < 0) { \ 8987 (vvp)->tv_sec--; \ 8988 (vvp)->tv_usec += 1000000; \ 8989 } \ 8990 } while (0) 8991 8992 timersub(&rack->r_ctl.act_rcv_time, &rack->r_ctl.rc_last_time_decay, &res); 8993 #undef timersub 8994 8995 rack->r_ctl.input_pkt++; 8996 if ((rack->rc_in_persist) || 8997 (res.tv_sec >= 1) || 8998 (rack->rc_tp->snd_max == rack->rc_tp->snd_una)) { 8999 /* 9000 * Check for decay of non-SAD, 9001 * we want all SAD detection metrics to 9002 * decay 1/4 per second (or more) passed. 9003 */ 9004 uint32_t pkt_delta; 9005 9006 pkt_delta = rack->r_ctl.input_pkt - rack->r_ctl.saved_input_pkt; 9007 /* Update our saved tracking values */ 9008 rack->r_ctl.saved_input_pkt = rack->r_ctl.input_pkt; 9009 rack->r_ctl.rc_last_time_decay = rack->r_ctl.act_rcv_time; 9010 /* Now do we escape without decay? */ 9011 #ifdef NETFLIX_EXP_DETECTION 9012 if (rack->rc_in_persist || 9013 (rack->rc_tp->snd_max == rack->rc_tp->snd_una) || 9014 (pkt_delta < tcp_sad_low_pps)){ 9015 /* 9016 * We don't decay idle connections 9017 * or ones that have a low input pps. 9018 */ 9019 return; 9020 } 9021 /* Decay the counters */ 9022 rack->r_ctl.ack_count = ctf_decay_count(rack->r_ctl.ack_count, 9023 tcp_sad_decay_val); 9024 rack->r_ctl.sack_count = ctf_decay_count(rack->r_ctl.sack_count, 9025 tcp_sad_decay_val); 9026 rack->r_ctl.sack_moved_extra = ctf_decay_count(rack->r_ctl.sack_moved_extra, 9027 tcp_sad_decay_val); 9028 rack->r_ctl.sack_noextra_move = ctf_decay_count(rack->r_ctl.sack_noextra_move, 9029 tcp_sad_decay_val); 9030 #endif 9031 } 9032 } 9033 9034 static void 9035 rack_process_to_cumack(struct tcpcb *tp, struct tcp_rack *rack, register uint32_t th_ack, uint32_t cts, struct tcpopt *to) 9036 { 9037 struct rack_sendmap *rsm, *rm; 9038 9039 /* 9040 * The ACK point is advancing to th_ack, we must drop off 9041 * the packets in the rack log and calculate any eligble 9042 * RTT's. 9043 */ 9044 rack->r_wanted_output = 1; 9045 9046 /* Tend any TLP that has been marked for 1/2 the seq space (its old) */ 9047 if ((rack->rc_last_tlp_acked_set == 1)&& 9048 (rack->rc_last_tlp_past_cumack == 1) && 9049 (SEQ_GT(rack->r_ctl.last_tlp_acked_start, th_ack))) { 9050 /* 9051 * We have reached the point where our last rack 9052 * tlp retransmit sequence is ahead of the cum-ack. 9053 * This can only happen when the cum-ack moves all 9054 * the way around (its been a full 2^^31+1 bytes 9055 * or more since we sent a retransmitted TLP). Lets 9056 * turn off the valid flag since its not really valid. 9057 * 9058 * Note since sack's also turn on this event we have 9059 * a complication, we have to wait to age it out until 9060 * the cum-ack is by the TLP before checking which is 9061 * what the next else clause does. 9062 */ 9063 rack_log_dsack_event(rack, 9, __LINE__, 9064 rack->r_ctl.last_tlp_acked_start, 9065 rack->r_ctl.last_tlp_acked_end); 9066 rack->rc_last_tlp_acked_set = 0; 9067 rack->rc_last_tlp_past_cumack = 0; 9068 } else if ((rack->rc_last_tlp_acked_set == 1) && 9069 (rack->rc_last_tlp_past_cumack == 0) && 9070 (SEQ_GEQ(th_ack, rack->r_ctl.last_tlp_acked_end))) { 9071 /* 9072 * It is safe to start aging TLP's out. 9073 */ 9074 rack->rc_last_tlp_past_cumack = 1; 9075 } 9076 /* We do the same for the tlp send seq as well */ 9077 if ((rack->rc_last_sent_tlp_seq_valid == 1) && 9078 (rack->rc_last_sent_tlp_past_cumack == 1) && 9079 (SEQ_GT(rack->r_ctl.last_sent_tlp_seq, th_ack))) { 9080 rack_log_dsack_event(rack, 9, __LINE__, 9081 rack->r_ctl.last_sent_tlp_seq, 9082 (rack->r_ctl.last_sent_tlp_seq + 9083 rack->r_ctl.last_sent_tlp_len)); 9084 rack->rc_last_sent_tlp_seq_valid = 0; 9085 rack->rc_last_sent_tlp_past_cumack = 0; 9086 } else if ((rack->rc_last_sent_tlp_seq_valid == 1) && 9087 (rack->rc_last_sent_tlp_past_cumack == 0) && 9088 (SEQ_GEQ(th_ack, rack->r_ctl.last_sent_tlp_seq))) { 9089 /* 9090 * It is safe to start aging TLP's send. 9091 */ 9092 rack->rc_last_sent_tlp_past_cumack = 1; 9093 } 9094 more: 9095 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 9096 if (rsm == NULL) { 9097 if ((th_ack - 1) == tp->iss) { 9098 /* 9099 * For the SYN incoming case we will not 9100 * have called tcp_output for the sending of 9101 * the SYN, so there will be no map. All 9102 * other cases should probably be a panic. 9103 */ 9104 return; 9105 } 9106 if (tp->t_flags & TF_SENTFIN) { 9107 /* if we sent a FIN we often will not have map */ 9108 return; 9109 } 9110 #ifdef INVARIANTS 9111 panic("No rack map tp:%p for state:%d ack:%u rack:%p snd_una:%u snd_max:%u snd_nxt:%u\n", 9112 tp, 9113 tp->t_state, th_ack, rack, 9114 tp->snd_una, tp->snd_max, tp->snd_nxt); 9115 #endif 9116 return; 9117 } 9118 if (SEQ_LT(th_ack, rsm->r_start)) { 9119 /* Huh map is missing this */ 9120 #ifdef INVARIANTS 9121 printf("Rack map starts at r_start:%u for th_ack:%u huh? ts:%d rs:%d\n", 9122 rsm->r_start, 9123 th_ack, tp->t_state, rack->r_state); 9124 #endif 9125 return; 9126 } 9127 rack_update_rtt(tp, rack, rsm, to, cts, CUM_ACKED, th_ack); 9128 9129 /* Now was it a retransmitted TLP? */ 9130 if ((rsm->r_flags & RACK_TLP) && 9131 (rsm->r_rtr_cnt > 1)) { 9132 /* 9133 * Yes, this rsm was a TLP and retransmitted, remember that 9134 * since if a DSACK comes back on this we don't want 9135 * to think of it as a reordered segment. This may 9136 * get updated again with possibly even other TLPs 9137 * in flight, but thats ok. Only when we don't send 9138 * a retransmitted TLP for 1/2 the sequences space 9139 * will it get turned off (above). 9140 */ 9141 if (rack->rc_last_tlp_acked_set && 9142 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 9143 /* 9144 * We already turned this on since the end matches, 9145 * the previous one was a partially ack now we 9146 * are getting another one (maybe all of it). 9147 */ 9148 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 9149 /* 9150 * Lets make sure we have all of it though. 9151 */ 9152 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 9153 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 9154 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 9155 rack->r_ctl.last_tlp_acked_end); 9156 } 9157 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 9158 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 9159 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 9160 rack->r_ctl.last_tlp_acked_end); 9161 } 9162 } else { 9163 rack->rc_last_tlp_past_cumack = 1; 9164 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 9165 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 9166 rack->rc_last_tlp_acked_set = 1; 9167 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 9168 } 9169 } 9170 /* Now do we consume the whole thing? */ 9171 if (SEQ_GEQ(th_ack, rsm->r_end)) { 9172 /* Its all consumed. */ 9173 uint32_t left; 9174 uint8_t newly_acked; 9175 9176 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_FREE, rsm->r_end, __LINE__); 9177 rack->r_ctl.rc_holes_rxt -= rsm->r_rtr_bytes; 9178 rsm->r_rtr_bytes = 0; 9179 /* Record the time of highest cumack sent */ 9180 rack->r_ctl.rc_gp_cumack_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 9181 rm = RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 9182 #ifdef INVARIANTS 9183 if (rm != rsm) { 9184 panic("removing head in rack:%p rsm:%p rm:%p", 9185 rack, rsm, rm); 9186 } 9187 #endif 9188 if (rsm->r_in_tmap) { 9189 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 9190 rsm->r_in_tmap = 0; 9191 } 9192 newly_acked = 1; 9193 if (rsm->r_flags & RACK_ACKED) { 9194 /* 9195 * It was acked on the scoreboard -- remove 9196 * it from total 9197 */ 9198 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 9199 newly_acked = 0; 9200 } else if (rsm->r_flags & RACK_SACK_PASSED) { 9201 /* 9202 * There are segments ACKED on the 9203 * scoreboard further up. We are seeing 9204 * reordering. 9205 */ 9206 rsm->r_flags &= ~RACK_SACK_PASSED; 9207 counter_u64_add(rack_reorder_seen, 1); 9208 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 9209 rsm->r_flags |= RACK_ACKED; 9210 rack->r_ctl.rc_reorder_ts = cts; 9211 if (rack->r_ent_rec_ns) { 9212 /* 9213 * We have sent no more, and we saw an sack 9214 * then ack arrive. 9215 */ 9216 rack->r_might_revert = 1; 9217 } 9218 } 9219 if ((rsm->r_flags & RACK_TO_REXT) && 9220 (tp->t_flags & TF_RCVD_TSTMP) && 9221 (to->to_flags & TOF_TS) && 9222 (to->to_tsecr != 0) && 9223 (tp->t_flags & TF_PREVVALID)) { 9224 /* 9225 * We can use the timestamp to see 9226 * if this retransmission was from the 9227 * first transmit. If so we made a mistake. 9228 */ 9229 tp->t_flags &= ~TF_PREVVALID; 9230 if (to->to_tsecr == rack_ts_to_msec(rsm->r_tim_lastsent[0])) { 9231 /* The first transmit is what this ack is for */ 9232 rack_cong_signal(tp, CC_RTO_ERR, th_ack); 9233 } 9234 } 9235 left = th_ack - rsm->r_end; 9236 if (rack->app_limited_needs_set && newly_acked) 9237 rack_need_set_test(tp, rack, rsm, th_ack, __LINE__, RACK_USE_END_OR_THACK); 9238 /* Free back to zone */ 9239 rack_free(rack, rsm); 9240 if (left) { 9241 goto more; 9242 } 9243 /* Check for reneging */ 9244 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 9245 if (rsm && (rsm->r_flags & RACK_ACKED) && (th_ack == rsm->r_start)) { 9246 /* 9247 * The peer has moved snd_una up to 9248 * the edge of this send, i.e. one 9249 * that it had previously acked. The only 9250 * way that can be true if the peer threw 9251 * away data (space issues) that it had 9252 * previously sacked (else it would have 9253 * given us snd_una up to (rsm->r_end). 9254 * We need to undo the acked markings here. 9255 * 9256 * Note we have to look to make sure th_ack is 9257 * our rsm->r_start in case we get an old ack 9258 * where th_ack is behind snd_una. 9259 */ 9260 rack_peer_reneges(rack, rsm, th_ack); 9261 } 9262 return; 9263 } 9264 if (rsm->r_flags & RACK_ACKED) { 9265 /* 9266 * It was acked on the scoreboard -- remove it from 9267 * total for the part being cum-acked. 9268 */ 9269 rack->r_ctl.rc_sacked -= (th_ack - rsm->r_start); 9270 } 9271 /* 9272 * Clear the dup ack count for 9273 * the piece that remains. 9274 */ 9275 rsm->r_dupack = 0; 9276 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 9277 if (rsm->r_rtr_bytes) { 9278 /* 9279 * It was retransmitted adjust the 9280 * sack holes for what was acked. 9281 */ 9282 int ack_am; 9283 9284 ack_am = (th_ack - rsm->r_start); 9285 if (ack_am >= rsm->r_rtr_bytes) { 9286 rack->r_ctl.rc_holes_rxt -= ack_am; 9287 rsm->r_rtr_bytes -= ack_am; 9288 } 9289 } 9290 /* 9291 * Update where the piece starts and record 9292 * the time of send of highest cumack sent. 9293 */ 9294 rack->r_ctl.rc_gp_cumack_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 9295 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_TRIM_HEAD, th_ack, __LINE__); 9296 /* Now we need to move our offset forward too */ 9297 if (rsm->m && (rsm->orig_m_len != rsm->m->m_len)) { 9298 /* Fix up the orig_m_len and possibly the mbuf offset */ 9299 rack_adjust_orig_mlen(rsm); 9300 } 9301 rsm->soff += (th_ack - rsm->r_start); 9302 rsm->r_start = th_ack; 9303 /* Now do we need to move the mbuf fwd too? */ 9304 if (rsm->m) { 9305 while (rsm->soff >= rsm->m->m_len) { 9306 rsm->soff -= rsm->m->m_len; 9307 rsm->m = rsm->m->m_next; 9308 KASSERT((rsm->m != NULL), 9309 (" nrsm:%p hit at soff:%u null m", 9310 rsm, rsm->soff)); 9311 } 9312 rsm->orig_m_len = rsm->m->m_len; 9313 } 9314 if (rack->app_limited_needs_set) 9315 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_BEG); 9316 } 9317 9318 static void 9319 rack_handle_might_revert(struct tcpcb *tp, struct tcp_rack *rack) 9320 { 9321 struct rack_sendmap *rsm; 9322 int sack_pass_fnd = 0; 9323 9324 if (rack->r_might_revert) { 9325 /* 9326 * Ok we have reordering, have not sent anything, we 9327 * might want to revert the congestion state if nothing 9328 * further has SACK_PASSED on it. Lets check. 9329 * 9330 * We also get here when we have DSACKs come in for 9331 * all the data that we FR'd. Note that a rxt or tlp 9332 * timer clears this from happening. 9333 */ 9334 9335 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { 9336 if (rsm->r_flags & RACK_SACK_PASSED) { 9337 sack_pass_fnd = 1; 9338 break; 9339 } 9340 } 9341 if (sack_pass_fnd == 0) { 9342 /* 9343 * We went into recovery 9344 * incorrectly due to reordering! 9345 */ 9346 int orig_cwnd; 9347 9348 rack->r_ent_rec_ns = 0; 9349 orig_cwnd = tp->snd_cwnd; 9350 tp->snd_cwnd = rack->r_ctl.rc_cwnd_at_erec; 9351 tp->snd_ssthresh = rack->r_ctl.rc_ssthresh_at_erec; 9352 tp->snd_recover = tp->snd_una; 9353 rack_log_to_prr(rack, 14, orig_cwnd); 9354 EXIT_RECOVERY(tp->t_flags); 9355 } 9356 rack->r_might_revert = 0; 9357 } 9358 } 9359 9360 #ifdef NETFLIX_EXP_DETECTION 9361 static void 9362 rack_do_detection(struct tcpcb *tp, struct tcp_rack *rack, uint32_t bytes_this_ack, uint32_t segsiz) 9363 { 9364 if ((rack->do_detection || tcp_force_detection) && 9365 tcp_sack_to_ack_thresh && 9366 tcp_sack_to_move_thresh && 9367 ((rack->r_ctl.rc_num_maps_alloced > tcp_map_minimum) || rack->sack_attack_disable)) { 9368 /* 9369 * We have thresholds set to find 9370 * possible attackers and disable sack. 9371 * Check them. 9372 */ 9373 uint64_t ackratio, moveratio, movetotal; 9374 9375 /* Log detecting */ 9376 rack_log_sad(rack, 1); 9377 ackratio = (uint64_t)(rack->r_ctl.sack_count); 9378 ackratio *= (uint64_t)(1000); 9379 if (rack->r_ctl.ack_count) 9380 ackratio /= (uint64_t)(rack->r_ctl.ack_count); 9381 else { 9382 /* We really should not hit here */ 9383 ackratio = 1000; 9384 } 9385 if ((rack->sack_attack_disable == 0) && 9386 (ackratio > rack_highest_sack_thresh_seen)) 9387 rack_highest_sack_thresh_seen = (uint32_t)ackratio; 9388 movetotal = rack->r_ctl.sack_moved_extra; 9389 movetotal += rack->r_ctl.sack_noextra_move; 9390 moveratio = rack->r_ctl.sack_moved_extra; 9391 moveratio *= (uint64_t)1000; 9392 if (movetotal) 9393 moveratio /= movetotal; 9394 else { 9395 /* No moves, thats pretty good */ 9396 moveratio = 0; 9397 } 9398 if ((rack->sack_attack_disable == 0) && 9399 (moveratio > rack_highest_move_thresh_seen)) 9400 rack_highest_move_thresh_seen = (uint32_t)moveratio; 9401 if (rack->sack_attack_disable == 0) { 9402 if ((ackratio > tcp_sack_to_ack_thresh) && 9403 (moveratio > tcp_sack_to_move_thresh)) { 9404 /* Disable sack processing */ 9405 rack->sack_attack_disable = 1; 9406 if (rack->r_rep_attack == 0) { 9407 rack->r_rep_attack = 1; 9408 counter_u64_add(rack_sack_attacks_detected, 1); 9409 } 9410 if (tcp_attack_on_turns_on_logging) { 9411 /* 9412 * Turn on logging, used for debugging 9413 * false positives. 9414 */ 9415 rack->rc_tp->t_logstate = tcp_attack_on_turns_on_logging; 9416 } 9417 /* Clamp the cwnd at flight size */ 9418 rack->r_ctl.rc_saved_cwnd = rack->rc_tp->snd_cwnd; 9419 rack->rc_tp->snd_cwnd = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 9420 rack_log_sad(rack, 2); 9421 } 9422 } else { 9423 /* We are sack-disabled check for false positives */ 9424 if ((ackratio <= tcp_restoral_thresh) || 9425 (rack->r_ctl.rc_num_maps_alloced < tcp_map_minimum)) { 9426 rack->sack_attack_disable = 0; 9427 rack_log_sad(rack, 3); 9428 /* Restart counting */ 9429 rack->r_ctl.sack_count = 0; 9430 rack->r_ctl.sack_moved_extra = 0; 9431 rack->r_ctl.sack_noextra_move = 1; 9432 rack->r_ctl.ack_count = max(1, 9433 (bytes_this_ack / segsiz)); 9434 9435 if (rack->r_rep_reverse == 0) { 9436 rack->r_rep_reverse = 1; 9437 counter_u64_add(rack_sack_attacks_reversed, 1); 9438 } 9439 /* Restore the cwnd */ 9440 if (rack->r_ctl.rc_saved_cwnd > rack->rc_tp->snd_cwnd) 9441 rack->rc_tp->snd_cwnd = rack->r_ctl.rc_saved_cwnd; 9442 } 9443 } 9444 } 9445 } 9446 #endif 9447 9448 static int 9449 rack_note_dsack(struct tcp_rack *rack, tcp_seq start, tcp_seq end) 9450 { 9451 9452 uint32_t am, l_end; 9453 int was_tlp = 0; 9454 9455 if (SEQ_GT(end, start)) 9456 am = end - start; 9457 else 9458 am = 0; 9459 if ((rack->rc_last_tlp_acked_set ) && 9460 (SEQ_GEQ(start, rack->r_ctl.last_tlp_acked_start)) && 9461 (SEQ_LEQ(end, rack->r_ctl.last_tlp_acked_end))) { 9462 /* 9463 * The DSACK is because of a TLP which we don't 9464 * do anything with the reordering window over since 9465 * it was not reordering that caused the DSACK but 9466 * our previous retransmit TLP. 9467 */ 9468 rack_log_dsack_event(rack, 7, __LINE__, start, end); 9469 was_tlp = 1; 9470 goto skip_dsack_round; 9471 } 9472 if (rack->rc_last_sent_tlp_seq_valid) { 9473 l_end = rack->r_ctl.last_sent_tlp_seq + rack->r_ctl.last_sent_tlp_len; 9474 if (SEQ_GEQ(start, rack->r_ctl.last_sent_tlp_seq) && 9475 (SEQ_LEQ(end, l_end))) { 9476 /* 9477 * This dsack is from the last sent TLP, ignore it 9478 * for reordering purposes. 9479 */ 9480 rack_log_dsack_event(rack, 7, __LINE__, start, end); 9481 was_tlp = 1; 9482 goto skip_dsack_round; 9483 } 9484 } 9485 if (rack->rc_dsack_round_seen == 0) { 9486 rack->rc_dsack_round_seen = 1; 9487 rack->r_ctl.dsack_round_end = rack->rc_tp->snd_max; 9488 rack->r_ctl.num_dsack++; 9489 rack->r_ctl.dsack_persist = 16; /* 16 is from the standard */ 9490 rack_log_dsack_event(rack, 2, __LINE__, 0, 0); 9491 } 9492 skip_dsack_round: 9493 /* 9494 * We keep track of how many DSACK blocks we get 9495 * after a recovery incident. 9496 */ 9497 rack->r_ctl.dsack_byte_cnt += am; 9498 if (!IN_FASTRECOVERY(rack->rc_tp->t_flags) && 9499 rack->r_ctl.retran_during_recovery && 9500 (rack->r_ctl.dsack_byte_cnt >= rack->r_ctl.retran_during_recovery)) { 9501 /* 9502 * False recovery most likely culprit is reordering. If 9503 * nothing else is missing we need to revert. 9504 */ 9505 rack->r_might_revert = 1; 9506 rack_handle_might_revert(rack->rc_tp, rack); 9507 rack->r_might_revert = 0; 9508 rack->r_ctl.retran_during_recovery = 0; 9509 rack->r_ctl.dsack_byte_cnt = 0; 9510 } 9511 return (was_tlp); 9512 } 9513 9514 static void 9515 rack_update_prr(struct tcpcb *tp, struct tcp_rack *rack, uint32_t changed, tcp_seq th_ack) 9516 { 9517 /* Deal with changed and PRR here (in recovery only) */ 9518 uint32_t pipe, snd_una; 9519 9520 rack->r_ctl.rc_prr_delivered += changed; 9521 9522 if (sbavail(&rack->rc_inp->inp_socket->so_snd) <= (tp->snd_max - tp->snd_una)) { 9523 /* 9524 * It is all outstanding, we are application limited 9525 * and thus we don't need more room to send anything. 9526 * Note we use tp->snd_una here and not th_ack because 9527 * the data as yet not been cut from the sb. 9528 */ 9529 rack->r_ctl.rc_prr_sndcnt = 0; 9530 return; 9531 } 9532 /* Compute prr_sndcnt */ 9533 if (SEQ_GT(tp->snd_una, th_ack)) { 9534 snd_una = tp->snd_una; 9535 } else { 9536 snd_una = th_ack; 9537 } 9538 pipe = ((tp->snd_max - snd_una) - rack->r_ctl.rc_sacked) + rack->r_ctl.rc_holes_rxt; 9539 if (pipe > tp->snd_ssthresh) { 9540 long sndcnt; 9541 9542 sndcnt = rack->r_ctl.rc_prr_delivered * tp->snd_ssthresh; 9543 if (rack->r_ctl.rc_prr_recovery_fs > 0) 9544 sndcnt /= (long)rack->r_ctl.rc_prr_recovery_fs; 9545 else { 9546 rack->r_ctl.rc_prr_sndcnt = 0; 9547 rack_log_to_prr(rack, 9, 0); 9548 sndcnt = 0; 9549 } 9550 sndcnt++; 9551 if (sndcnt > (long)rack->r_ctl.rc_prr_out) 9552 sndcnt -= rack->r_ctl.rc_prr_out; 9553 else 9554 sndcnt = 0; 9555 rack->r_ctl.rc_prr_sndcnt = sndcnt; 9556 rack_log_to_prr(rack, 10, 0); 9557 } else { 9558 uint32_t limit; 9559 9560 if (rack->r_ctl.rc_prr_delivered > rack->r_ctl.rc_prr_out) 9561 limit = (rack->r_ctl.rc_prr_delivered - rack->r_ctl.rc_prr_out); 9562 else 9563 limit = 0; 9564 if (changed > limit) 9565 limit = changed; 9566 limit += ctf_fixed_maxseg(tp); 9567 if (tp->snd_ssthresh > pipe) { 9568 rack->r_ctl.rc_prr_sndcnt = min((tp->snd_ssthresh - pipe), limit); 9569 rack_log_to_prr(rack, 11, 0); 9570 } else { 9571 rack->r_ctl.rc_prr_sndcnt = min(0, limit); 9572 rack_log_to_prr(rack, 12, 0); 9573 } 9574 } 9575 } 9576 9577 static void 9578 rack_log_ack(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th, int entered_recovery, int dup_ack_struck) 9579 { 9580 uint32_t changed; 9581 struct tcp_rack *rack; 9582 struct rack_sendmap *rsm; 9583 struct sackblk sack, sack_blocks[TCP_MAX_SACK + 1]; 9584 register uint32_t th_ack; 9585 int32_t i, j, k, num_sack_blks = 0; 9586 uint32_t cts, acked, ack_point, sack_changed = 0; 9587 int loop_start = 0, moved_two = 0; 9588 uint32_t tsused; 9589 9590 9591 INP_WLOCK_ASSERT(tp->t_inpcb); 9592 if (th->th_flags & TH_RST) { 9593 /* We don't log resets */ 9594 return; 9595 } 9596 rack = (struct tcp_rack *)tp->t_fb_ptr; 9597 cts = tcp_get_usecs(NULL); 9598 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 9599 changed = 0; 9600 th_ack = th->th_ack; 9601 if (rack->sack_attack_disable == 0) 9602 rack_do_decay(rack); 9603 if (BYTES_THIS_ACK(tp, th) >= ctf_fixed_maxseg(rack->rc_tp)) { 9604 /* 9605 * You only get credit for 9606 * MSS and greater (and you get extra 9607 * credit for larger cum-ack moves). 9608 */ 9609 int ac; 9610 9611 ac = BYTES_THIS_ACK(tp, th) / ctf_fixed_maxseg(rack->rc_tp); 9612 rack->r_ctl.ack_count += ac; 9613 counter_u64_add(rack_ack_total, ac); 9614 } 9615 if (rack->r_ctl.ack_count > 0xfff00000) { 9616 /* 9617 * reduce the number to keep us under 9618 * a uint32_t. 9619 */ 9620 rack->r_ctl.ack_count /= 2; 9621 rack->r_ctl.sack_count /= 2; 9622 } 9623 if (SEQ_GT(th_ack, tp->snd_una)) { 9624 rack_log_progress_event(rack, tp, ticks, PROGRESS_UPDATE, __LINE__); 9625 tp->t_acktime = ticks; 9626 } 9627 if (rsm && SEQ_GT(th_ack, rsm->r_start)) 9628 changed = th_ack - rsm->r_start; 9629 if (changed) { 9630 rack_process_to_cumack(tp, rack, th_ack, cts, to); 9631 } 9632 if ((to->to_flags & TOF_SACK) == 0) { 9633 /* We are done nothing left and no sack. */ 9634 rack_handle_might_revert(tp, rack); 9635 /* 9636 * For cases where we struck a dup-ack 9637 * with no SACK, add to the changes so 9638 * PRR will work right. 9639 */ 9640 if (dup_ack_struck && (changed == 0)) { 9641 changed += ctf_fixed_maxseg(rack->rc_tp); 9642 } 9643 goto out; 9644 } 9645 /* Sack block processing */ 9646 if (SEQ_GT(th_ack, tp->snd_una)) 9647 ack_point = th_ack; 9648 else 9649 ack_point = tp->snd_una; 9650 for (i = 0; i < to->to_nsacks; i++) { 9651 bcopy((to->to_sacks + i * TCPOLEN_SACK), 9652 &sack, sizeof(sack)); 9653 sack.start = ntohl(sack.start); 9654 sack.end = ntohl(sack.end); 9655 if (SEQ_GT(sack.end, sack.start) && 9656 SEQ_GT(sack.start, ack_point) && 9657 SEQ_LT(sack.start, tp->snd_max) && 9658 SEQ_GT(sack.end, ack_point) && 9659 SEQ_LEQ(sack.end, tp->snd_max)) { 9660 sack_blocks[num_sack_blks] = sack; 9661 num_sack_blks++; 9662 } else if (SEQ_LEQ(sack.start, th_ack) && 9663 SEQ_LEQ(sack.end, th_ack)) { 9664 int was_tlp; 9665 9666 was_tlp = rack_note_dsack(rack, sack.start, sack.end); 9667 /* 9668 * Its a D-SACK block. 9669 */ 9670 tcp_record_dsack(tp, sack.start, sack.end, was_tlp); 9671 } 9672 } 9673 if (rack->rc_dsack_round_seen) { 9674 /* Is the dsack roound over? */ 9675 if (SEQ_GEQ(th_ack, rack->r_ctl.dsack_round_end)) { 9676 /* Yes it is */ 9677 rack->rc_dsack_round_seen = 0; 9678 rack_log_dsack_event(rack, 3, __LINE__, 0, 0); 9679 } 9680 } 9681 /* 9682 * Sort the SACK blocks so we can update the rack scoreboard with 9683 * just one pass. 9684 */ 9685 num_sack_blks = sack_filter_blks(&rack->r_ctl.rack_sf, sack_blocks, 9686 num_sack_blks, th->th_ack); 9687 ctf_log_sack_filter(rack->rc_tp, num_sack_blks, sack_blocks); 9688 if (num_sack_blks == 0) { 9689 /* Nothing to sack (DSACKs?) */ 9690 goto out_with_totals; 9691 } 9692 if (num_sack_blks < 2) { 9693 /* Only one, we don't need to sort */ 9694 goto do_sack_work; 9695 } 9696 /* Sort the sacks */ 9697 for (i = 0; i < num_sack_blks; i++) { 9698 for (j = i + 1; j < num_sack_blks; j++) { 9699 if (SEQ_GT(sack_blocks[i].end, sack_blocks[j].end)) { 9700 sack = sack_blocks[i]; 9701 sack_blocks[i] = sack_blocks[j]; 9702 sack_blocks[j] = sack; 9703 } 9704 } 9705 } 9706 /* 9707 * Now are any of the sack block ends the same (yes some 9708 * implementations send these)? 9709 */ 9710 again: 9711 if (num_sack_blks == 0) 9712 goto out_with_totals; 9713 if (num_sack_blks > 1) { 9714 for (i = 0; i < num_sack_blks; i++) { 9715 for (j = i + 1; j < num_sack_blks; j++) { 9716 if (sack_blocks[i].end == sack_blocks[j].end) { 9717 /* 9718 * Ok these two have the same end we 9719 * want the smallest end and then 9720 * throw away the larger and start 9721 * again. 9722 */ 9723 if (SEQ_LT(sack_blocks[j].start, sack_blocks[i].start)) { 9724 /* 9725 * The second block covers 9726 * more area use that 9727 */ 9728 sack_blocks[i].start = sack_blocks[j].start; 9729 } 9730 /* 9731 * Now collapse out the dup-sack and 9732 * lower the count 9733 */ 9734 for (k = (j + 1); k < num_sack_blks; k++) { 9735 sack_blocks[j].start = sack_blocks[k].start; 9736 sack_blocks[j].end = sack_blocks[k].end; 9737 j++; 9738 } 9739 num_sack_blks--; 9740 goto again; 9741 } 9742 } 9743 } 9744 } 9745 do_sack_work: 9746 /* 9747 * First lets look to see if 9748 * we have retransmitted and 9749 * can use the transmit next? 9750 */ 9751 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 9752 if (rsm && 9753 SEQ_GT(sack_blocks[0].end, rsm->r_start) && 9754 SEQ_LT(sack_blocks[0].start, rsm->r_end)) { 9755 /* 9756 * We probably did the FR and the next 9757 * SACK in continues as we would expect. 9758 */ 9759 acked = rack_proc_sack_blk(tp, rack, &sack_blocks[0], to, &rsm, cts, &moved_two); 9760 if (acked) { 9761 rack->r_wanted_output = 1; 9762 changed += acked; 9763 sack_changed += acked; 9764 } 9765 if (num_sack_blks == 1) { 9766 /* 9767 * This is what we would expect from 9768 * a normal implementation to happen 9769 * after we have retransmitted the FR, 9770 * i.e the sack-filter pushes down 9771 * to 1 block and the next to be retransmitted 9772 * is the sequence in the sack block (has more 9773 * are acked). Count this as ACK'd data to boost 9774 * up the chances of recovering any false positives. 9775 */ 9776 rack->r_ctl.ack_count += (acked / ctf_fixed_maxseg(rack->rc_tp)); 9777 counter_u64_add(rack_ack_total, (acked / ctf_fixed_maxseg(rack->rc_tp))); 9778 counter_u64_add(rack_express_sack, 1); 9779 if (rack->r_ctl.ack_count > 0xfff00000) { 9780 /* 9781 * reduce the number to keep us under 9782 * a uint32_t. 9783 */ 9784 rack->r_ctl.ack_count /= 2; 9785 rack->r_ctl.sack_count /= 2; 9786 } 9787 goto out_with_totals; 9788 } else { 9789 /* 9790 * Start the loop through the 9791 * rest of blocks, past the first block. 9792 */ 9793 moved_two = 0; 9794 loop_start = 1; 9795 } 9796 } 9797 /* Its a sack of some sort */ 9798 rack->r_ctl.sack_count++; 9799 if (rack->r_ctl.sack_count > 0xfff00000) { 9800 /* 9801 * reduce the number to keep us under 9802 * a uint32_t. 9803 */ 9804 rack->r_ctl.ack_count /= 2; 9805 rack->r_ctl.sack_count /= 2; 9806 } 9807 counter_u64_add(rack_sack_total, 1); 9808 if (rack->sack_attack_disable) { 9809 /* An attacker disablement is in place */ 9810 if (num_sack_blks > 1) { 9811 rack->r_ctl.sack_count += (num_sack_blks - 1); 9812 rack->r_ctl.sack_moved_extra++; 9813 counter_u64_add(rack_move_some, 1); 9814 if (rack->r_ctl.sack_moved_extra > 0xfff00000) { 9815 rack->r_ctl.sack_moved_extra /= 2; 9816 rack->r_ctl.sack_noextra_move /= 2; 9817 } 9818 } 9819 goto out; 9820 } 9821 rsm = rack->r_ctl.rc_sacklast; 9822 for (i = loop_start; i < num_sack_blks; i++) { 9823 acked = rack_proc_sack_blk(tp, rack, &sack_blocks[i], to, &rsm, cts, &moved_two); 9824 if (acked) { 9825 rack->r_wanted_output = 1; 9826 changed += acked; 9827 sack_changed += acked; 9828 } 9829 if (moved_two) { 9830 /* 9831 * If we did not get a SACK for at least a MSS and 9832 * had to move at all, or if we moved more than our 9833 * threshold, it counts against the "extra" move. 9834 */ 9835 rack->r_ctl.sack_moved_extra += moved_two; 9836 counter_u64_add(rack_move_some, 1); 9837 } else { 9838 /* 9839 * else we did not have to move 9840 * any more than we would expect. 9841 */ 9842 rack->r_ctl.sack_noextra_move++; 9843 counter_u64_add(rack_move_none, 1); 9844 } 9845 if (moved_two && (acked < ctf_fixed_maxseg(rack->rc_tp))) { 9846 /* 9847 * If the SACK was not a full MSS then 9848 * we add to sack_count the number of 9849 * MSS's (or possibly more than 9850 * a MSS if its a TSO send) we had to skip by. 9851 */ 9852 rack->r_ctl.sack_count += moved_two; 9853 counter_u64_add(rack_sack_total, moved_two); 9854 } 9855 /* 9856 * Now we need to setup for the next 9857 * round. First we make sure we won't 9858 * exceed the size of our uint32_t on 9859 * the various counts, and then clear out 9860 * moved_two. 9861 */ 9862 if ((rack->r_ctl.sack_moved_extra > 0xfff00000) || 9863 (rack->r_ctl.sack_noextra_move > 0xfff00000)) { 9864 rack->r_ctl.sack_moved_extra /= 2; 9865 rack->r_ctl.sack_noextra_move /= 2; 9866 } 9867 if (rack->r_ctl.sack_count > 0xfff00000) { 9868 rack->r_ctl.ack_count /= 2; 9869 rack->r_ctl.sack_count /= 2; 9870 } 9871 moved_two = 0; 9872 } 9873 out_with_totals: 9874 if (num_sack_blks > 1) { 9875 /* 9876 * You get an extra stroke if 9877 * you have more than one sack-blk, this 9878 * could be where we are skipping forward 9879 * and the sack-filter is still working, or 9880 * it could be an attacker constantly 9881 * moving us. 9882 */ 9883 rack->r_ctl.sack_moved_extra++; 9884 counter_u64_add(rack_move_some, 1); 9885 } 9886 out: 9887 #ifdef NETFLIX_EXP_DETECTION 9888 rack_do_detection(tp, rack, BYTES_THIS_ACK(tp, th), ctf_fixed_maxseg(rack->rc_tp)); 9889 #endif 9890 if (changed) { 9891 /* Something changed cancel the rack timer */ 9892 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 9893 } 9894 tsused = tcp_get_usecs(NULL); 9895 rsm = tcp_rack_output(tp, rack, tsused); 9896 if ((!IN_FASTRECOVERY(tp->t_flags)) && 9897 rsm) { 9898 /* Enter recovery */ 9899 rack->r_ctl.rc_rsm_start = rsm->r_start; 9900 rack->r_ctl.rc_cwnd_at = tp->snd_cwnd; 9901 rack->r_ctl.rc_ssthresh_at = tp->snd_ssthresh; 9902 entered_recovery = 1; 9903 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una); 9904 /* 9905 * When we enter recovery we need to assure we send 9906 * one packet. 9907 */ 9908 if (rack->rack_no_prr == 0) { 9909 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp); 9910 rack_log_to_prr(rack, 8, 0); 9911 } 9912 rack->r_timer_override = 1; 9913 rack->r_early = 0; 9914 rack->r_ctl.rc_agg_early = 0; 9915 } else if (IN_FASTRECOVERY(tp->t_flags) && 9916 rsm && 9917 (rack->r_rr_config == 3)) { 9918 /* 9919 * Assure we can output and we get no 9920 * remembered pace time except the retransmit. 9921 */ 9922 rack->r_timer_override = 1; 9923 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 9924 rack->r_ctl.rc_resend = rsm; 9925 } 9926 if (IN_FASTRECOVERY(tp->t_flags) && 9927 (rack->rack_no_prr == 0) && 9928 (entered_recovery == 0)) { 9929 rack_update_prr(tp, rack, changed, th_ack); 9930 if ((rsm && (rack->r_ctl.rc_prr_sndcnt >= ctf_fixed_maxseg(tp)) && 9931 ((rack->rc_inp->inp_in_hpts == 0) && 9932 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)))) { 9933 /* 9934 * If you are pacing output you don't want 9935 * to override. 9936 */ 9937 rack->r_early = 0; 9938 rack->r_ctl.rc_agg_early = 0; 9939 rack->r_timer_override = 1; 9940 } 9941 } 9942 } 9943 9944 static void 9945 rack_strike_dupack(struct tcp_rack *rack) 9946 { 9947 struct rack_sendmap *rsm; 9948 9949 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 9950 while (rsm && (rsm->r_dupack >= DUP_ACK_THRESHOLD)) { 9951 rsm = TAILQ_NEXT(rsm, r_tnext); 9952 } 9953 if (rsm && (rsm->r_dupack < 0xff)) { 9954 rsm->r_dupack++; 9955 if (rsm->r_dupack >= DUP_ACK_THRESHOLD) { 9956 struct timeval tv; 9957 uint32_t cts; 9958 /* 9959 * Here we see if we need to retransmit. For 9960 * a SACK type connection if enough time has passed 9961 * we will get a return of the rsm. For a non-sack 9962 * connection we will get the rsm returned if the 9963 * dupack value is 3 or more. 9964 */ 9965 cts = tcp_get_usecs(&tv); 9966 rack->r_ctl.rc_resend = tcp_rack_output(rack->rc_tp, rack, cts); 9967 if (rack->r_ctl.rc_resend != NULL) { 9968 if (!IN_FASTRECOVERY(rack->rc_tp->t_flags)) { 9969 rack_cong_signal(rack->rc_tp, CC_NDUPACK, 9970 rack->rc_tp->snd_una); 9971 } 9972 rack->r_wanted_output = 1; 9973 rack->r_timer_override = 1; 9974 rack_log_retran_reason(rack, rsm, __LINE__, 1, 3); 9975 } 9976 } else { 9977 rack_log_retran_reason(rack, rsm, __LINE__, 0, 3); 9978 } 9979 } 9980 } 9981 9982 static void 9983 rack_check_bottom_drag(struct tcpcb *tp, 9984 struct tcp_rack *rack, 9985 struct socket *so, int32_t acked) 9986 { 9987 uint32_t segsiz, minseg; 9988 9989 segsiz = ctf_fixed_maxseg(tp); 9990 minseg = segsiz; 9991 9992 if (tp->snd_max == tp->snd_una) { 9993 /* 9994 * We are doing dynamic pacing and we are way 9995 * under. Basically everything got acked while 9996 * we were still waiting on the pacer to expire. 9997 * 9998 * This means we need to boost the b/w in 9999 * addition to any earlier boosting of 10000 * the multipler. 10001 */ 10002 rack->rc_dragged_bottom = 1; 10003 rack_validate_multipliers_at_or_above100(rack); 10004 /* 10005 * Lets use the segment bytes acked plus 10006 * the lowest RTT seen as the basis to 10007 * form a b/w estimate. This will be off 10008 * due to the fact that the true estimate 10009 * should be around 1/2 the time of the RTT 10010 * but we can settle for that. 10011 */ 10012 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_VALID) && 10013 acked) { 10014 uint64_t bw, calc_bw, rtt; 10015 10016 rtt = rack->r_ctl.rack_rs.rs_us_rtt; 10017 if (rtt == 0) { 10018 /* no us sample is there a ms one? */ 10019 if (rack->r_ctl.rack_rs.rs_rtt_lowest) { 10020 rtt = rack->r_ctl.rack_rs.rs_rtt_lowest; 10021 } else { 10022 goto no_measurement; 10023 } 10024 } 10025 bw = acked; 10026 calc_bw = bw * 1000000; 10027 calc_bw /= rtt; 10028 if (rack->r_ctl.last_max_bw && 10029 (rack->r_ctl.last_max_bw < calc_bw)) { 10030 /* 10031 * If we have a last calculated max bw 10032 * enforce it. 10033 */ 10034 calc_bw = rack->r_ctl.last_max_bw; 10035 } 10036 /* now plop it in */ 10037 if (rack->rc_gp_filled == 0) { 10038 if (calc_bw > ONE_POINT_TWO_MEG) { 10039 /* 10040 * If we have no measurement 10041 * don't let us set in more than 10042 * 1.2Mbps. If we are still too 10043 * low after pacing with this we 10044 * will hopefully have a max b/w 10045 * available to sanity check things. 10046 */ 10047 calc_bw = ONE_POINT_TWO_MEG; 10048 } 10049 rack->r_ctl.rc_rtt_diff = 0; 10050 rack->r_ctl.gp_bw = calc_bw; 10051 rack->rc_gp_filled = 1; 10052 if (rack->r_ctl.num_measurements < RACK_REQ_AVG) 10053 rack->r_ctl.num_measurements = RACK_REQ_AVG; 10054 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 10055 } else if (calc_bw > rack->r_ctl.gp_bw) { 10056 rack->r_ctl.rc_rtt_diff = 0; 10057 if (rack->r_ctl.num_measurements < RACK_REQ_AVG) 10058 rack->r_ctl.num_measurements = RACK_REQ_AVG; 10059 rack->r_ctl.gp_bw = calc_bw; 10060 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 10061 } else 10062 rack_increase_bw_mul(rack, -1, 0, 0, 1); 10063 if ((rack->gp_ready == 0) && 10064 (rack->r_ctl.num_measurements >= rack->r_ctl.req_measurements)) { 10065 /* We have enough measurements now */ 10066 rack->gp_ready = 1; 10067 rack_set_cc_pacing(rack); 10068 if (rack->defer_options) 10069 rack_apply_deferred_options(rack); 10070 } 10071 /* 10072 * For acks over 1mss we do a extra boost to simulate 10073 * where we would get 2 acks (we want 110 for the mul). 10074 */ 10075 if (acked > segsiz) 10076 rack_increase_bw_mul(rack, -1, 0, 0, 1); 10077 } else { 10078 /* 10079 * zero rtt possibly?, settle for just an old increase. 10080 */ 10081 no_measurement: 10082 rack_increase_bw_mul(rack, -1, 0, 0, 1); 10083 } 10084 } else if ((IN_FASTRECOVERY(tp->t_flags) == 0) && 10085 (sbavail(&so->so_snd) > max((segsiz * (4 + rack_req_segs)), 10086 minseg)) && 10087 (rack->r_ctl.cwnd_to_use > max((segsiz * (rack_req_segs + 2)), minseg)) && 10088 (tp->snd_wnd > max((segsiz * (rack_req_segs + 2)), minseg)) && 10089 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) <= 10090 (segsiz * rack_req_segs))) { 10091 /* 10092 * We are doing dynamic GP pacing and 10093 * we have everything except 1MSS or less 10094 * bytes left out. We are still pacing away. 10095 * And there is data that could be sent, This 10096 * means we are inserting delayed ack time in 10097 * our measurements because we are pacing too slow. 10098 */ 10099 rack_validate_multipliers_at_or_above100(rack); 10100 rack->rc_dragged_bottom = 1; 10101 rack_increase_bw_mul(rack, -1, 0, 0, 1); 10102 } 10103 } 10104 10105 10106 10107 static void 10108 rack_gain_for_fastoutput(struct tcp_rack *rack, struct tcpcb *tp, struct socket *so, uint32_t acked_amount) 10109 { 10110 /* 10111 * The fast output path is enabled and we 10112 * have moved the cumack forward. Lets see if 10113 * we can expand forward the fast path length by 10114 * that amount. What we would ideally like to 10115 * do is increase the number of bytes in the 10116 * fast path block (left_to_send) by the 10117 * acked amount. However we have to gate that 10118 * by two factors: 10119 * 1) The amount outstanding and the rwnd of the peer 10120 * (i.e. we don't want to exceed the rwnd of the peer). 10121 * <and> 10122 * 2) The amount of data left in the socket buffer (i.e. 10123 * we can't send beyond what is in the buffer). 10124 * 10125 * Note that this does not take into account any increase 10126 * in the cwnd. We will only extend the fast path by 10127 * what was acked. 10128 */ 10129 uint32_t new_total, gating_val; 10130 10131 new_total = acked_amount + rack->r_ctl.fsb.left_to_send; 10132 gating_val = min((sbavail(&so->so_snd) - (tp->snd_max - tp->snd_una)), 10133 (tp->snd_wnd - (tp->snd_max - tp->snd_una))); 10134 if (new_total <= gating_val) { 10135 /* We can increase left_to_send by the acked amount */ 10136 counter_u64_add(rack_extended_rfo, 1); 10137 rack->r_ctl.fsb.left_to_send = new_total; 10138 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(&rack->rc_inp->inp_socket->so_snd) - (tp->snd_max - tp->snd_una))), 10139 ("rack:%p left_to_send:%u sbavail:%u out:%u", 10140 rack, rack->r_ctl.fsb.left_to_send, 10141 sbavail(&rack->rc_inp->inp_socket->so_snd), 10142 (tp->snd_max - tp->snd_una))); 10143 10144 } 10145 } 10146 10147 static void 10148 rack_adjust_sendmap(struct tcp_rack *rack, struct sockbuf *sb, tcp_seq snd_una) 10149 { 10150 /* 10151 * Here any sendmap entry that points to the 10152 * beginning mbuf must be adjusted to the correct 10153 * offset. This must be called with: 10154 * 1) The socket buffer locked 10155 * 2) snd_una adjusted to its new postion. 10156 * 10157 * Note that (2) implies rack_ack_received has also 10158 * been called. 10159 * 10160 * We grab the first mbuf in the socket buffer and 10161 * then go through the front of the sendmap, recalculating 10162 * the stored offset for any sendmap entry that has 10163 * that mbuf. We must use the sb functions to do this 10164 * since its possible an add was done has well as 10165 * the subtraction we may have just completed. This should 10166 * not be a penalty though, since we just referenced the sb 10167 * to go in and trim off the mbufs that we freed (of course 10168 * there will be a penalty for the sendmap references though). 10169 */ 10170 struct mbuf *m; 10171 struct rack_sendmap *rsm; 10172 10173 SOCKBUF_LOCK_ASSERT(sb); 10174 m = sb->sb_mb; 10175 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 10176 if ((rsm == NULL) || (m == NULL)) { 10177 /* Nothing outstanding */ 10178 return; 10179 } 10180 while (rsm->m && (rsm->m == m)) { 10181 /* one to adjust */ 10182 #ifdef INVARIANTS 10183 struct mbuf *tm; 10184 uint32_t soff; 10185 10186 tm = sbsndmbuf(sb, (rsm->r_start - snd_una), &soff); 10187 if (rsm->orig_m_len != m->m_len) { 10188 rack_adjust_orig_mlen(rsm); 10189 } 10190 if (rsm->soff != soff) { 10191 /* 10192 * This is not a fatal error, we anticipate it 10193 * might happen (the else code), so we count it here 10194 * so that under invariant we can see that it really 10195 * does happen. 10196 */ 10197 counter_u64_add(rack_adjust_map_bw, 1); 10198 } 10199 rsm->m = tm; 10200 rsm->soff = soff; 10201 if (tm) 10202 rsm->orig_m_len = rsm->m->m_len; 10203 else 10204 rsm->orig_m_len = 0; 10205 #else 10206 rsm->m = sbsndmbuf(sb, (rsm->r_start - snd_una), &rsm->soff); 10207 if (rsm->m) 10208 rsm->orig_m_len = rsm->m->m_len; 10209 else 10210 rsm->orig_m_len = 0; 10211 #endif 10212 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, 10213 rsm); 10214 if (rsm == NULL) 10215 break; 10216 } 10217 } 10218 10219 /* 10220 * Return value of 1, we do not need to call rack_process_data(). 10221 * return value of 0, rack_process_data can be called. 10222 * For ret_val if its 0 the TCP is locked, if its non-zero 10223 * its unlocked and probably unsafe to touch the TCB. 10224 */ 10225 static int 10226 rack_process_ack(struct mbuf *m, struct tcphdr *th, struct socket *so, 10227 struct tcpcb *tp, struct tcpopt *to, 10228 uint32_t tiwin, int32_t tlen, 10229 int32_t * ofia, int32_t thflags, int32_t *ret_val) 10230 { 10231 int32_t ourfinisacked = 0; 10232 int32_t nsegs, acked_amount; 10233 int32_t acked; 10234 struct mbuf *mfree; 10235 struct tcp_rack *rack; 10236 int32_t under_pacing = 0; 10237 int32_t recovery = 0; 10238 10239 rack = (struct tcp_rack *)tp->t_fb_ptr; 10240 if (SEQ_GT(th->th_ack, tp->snd_max)) { 10241 __ctf_do_dropafterack(m, tp, th, thflags, tlen, ret_val, 10242 &rack->r_ctl.challenge_ack_ts, 10243 &rack->r_ctl.challenge_ack_cnt); 10244 rack->r_wanted_output = 1; 10245 return (1); 10246 } 10247 if (rack->gp_ready && 10248 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 10249 under_pacing = 1; 10250 } 10251 if (SEQ_GEQ(th->th_ack, tp->snd_una) || to->to_nsacks) { 10252 int in_rec, dup_ack_struck = 0; 10253 10254 in_rec = IN_FASTRECOVERY(tp->t_flags); 10255 if (rack->rc_in_persist) { 10256 tp->t_rxtshift = 0; 10257 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 10258 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 10259 } 10260 if ((th->th_ack == tp->snd_una) && 10261 (tiwin == tp->snd_wnd) && 10262 ((to->to_flags & TOF_SACK) == 0)) { 10263 rack_strike_dupack(rack); 10264 dup_ack_struck = 1; 10265 } 10266 rack_log_ack(tp, to, th, ((in_rec == 0) && IN_FASTRECOVERY(tp->t_flags)), dup_ack_struck); 10267 } 10268 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) { 10269 /* 10270 * Old ack, behind (or duplicate to) the last one rcv'd 10271 * Note: We mark reordering is occuring if its 10272 * less than and we have not closed our window. 10273 */ 10274 if (SEQ_LT(th->th_ack, tp->snd_una) && (sbspace(&so->so_rcv) > ctf_fixed_maxseg(tp))) { 10275 counter_u64_add(rack_reorder_seen, 1); 10276 rack->r_ctl.rc_reorder_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 10277 } 10278 return (0); 10279 } 10280 /* 10281 * If we reach this point, ACK is not a duplicate, i.e., it ACKs 10282 * something we sent. 10283 */ 10284 if (tp->t_flags & TF_NEEDSYN) { 10285 /* 10286 * T/TCP: Connection was half-synchronized, and our SYN has 10287 * been ACK'd (so connection is now fully synchronized). Go 10288 * to non-starred state, increment snd_una for ACK of SYN, 10289 * and check if we can do window scaling. 10290 */ 10291 tp->t_flags &= ~TF_NEEDSYN; 10292 tp->snd_una++; 10293 /* Do window scaling? */ 10294 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 10295 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 10296 tp->rcv_scale = tp->request_r_scale; 10297 /* Send window already scaled. */ 10298 } 10299 } 10300 nsegs = max(1, m->m_pkthdr.lro_nsegs); 10301 INP_WLOCK_ASSERT(tp->t_inpcb); 10302 10303 acked = BYTES_THIS_ACK(tp, th); 10304 KMOD_TCPSTAT_ADD(tcps_rcvackpack, nsegs); 10305 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked); 10306 /* 10307 * If we just performed our first retransmit, and the ACK arrives 10308 * within our recovery window, then it was a mistake to do the 10309 * retransmit in the first place. Recover our original cwnd and 10310 * ssthresh, and proceed to transmit where we left off. 10311 */ 10312 if ((tp->t_flags & TF_PREVVALID) && 10313 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { 10314 tp->t_flags &= ~TF_PREVVALID; 10315 if (tp->t_rxtshift == 1 && 10316 (int)(ticks - tp->t_badrxtwin) < 0) 10317 rack_cong_signal(tp, CC_RTO_ERR, th->th_ack); 10318 } 10319 if (acked) { 10320 /* assure we are not backed off */ 10321 tp->t_rxtshift = 0; 10322 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 10323 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 10324 rack->rc_tlp_in_progress = 0; 10325 rack->r_ctl.rc_tlp_cnt_out = 0; 10326 /* 10327 * If it is the RXT timer we want to 10328 * stop it, so we can restart a TLP. 10329 */ 10330 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) 10331 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 10332 #ifdef NETFLIX_HTTP_LOGGING 10333 tcp_http_check_for_comp(rack->rc_tp, th->th_ack); 10334 #endif 10335 } 10336 /* 10337 * If we have a timestamp reply, update smoothed round trip time. If 10338 * no timestamp is present but transmit timer is running and timed 10339 * sequence number was acked, update smoothed round trip time. Since 10340 * we now have an rtt measurement, cancel the timer backoff (cf., 10341 * Phil Karn's retransmit alg.). Recompute the initial retransmit 10342 * timer. 10343 * 10344 * Some boxes send broken timestamp replies during the SYN+ACK 10345 * phase, ignore timestamps of 0 or we could calculate a huge RTT 10346 * and blow up the retransmit timer. 10347 */ 10348 /* 10349 * If all outstanding data is acked, stop retransmit timer and 10350 * remember to restart (more output or persist). If there is more 10351 * data to be acked, restart retransmit timer, using current 10352 * (possibly backed-off) value. 10353 */ 10354 if (acked == 0) { 10355 if (ofia) 10356 *ofia = ourfinisacked; 10357 return (0); 10358 } 10359 if (IN_RECOVERY(tp->t_flags)) { 10360 if (SEQ_LT(th->th_ack, tp->snd_recover) && 10361 (SEQ_LT(th->th_ack, tp->snd_max))) { 10362 tcp_rack_partialack(tp); 10363 } else { 10364 rack_post_recovery(tp, th->th_ack); 10365 recovery = 1; 10366 } 10367 } 10368 /* 10369 * Let the congestion control algorithm update congestion control 10370 * related information. This typically means increasing the 10371 * congestion window. 10372 */ 10373 rack_ack_received(tp, rack, th->th_ack, nsegs, CC_ACK, recovery); 10374 SOCKBUF_LOCK(&so->so_snd); 10375 acked_amount = min(acked, (int)sbavail(&so->so_snd)); 10376 tp->snd_wnd -= acked_amount; 10377 mfree = sbcut_locked(&so->so_snd, acked_amount); 10378 if ((sbused(&so->so_snd) == 0) && 10379 (acked > acked_amount) && 10380 (tp->t_state >= TCPS_FIN_WAIT_1) && 10381 (tp->t_flags & TF_SENTFIN)) { 10382 /* 10383 * We must be sure our fin 10384 * was sent and acked (we can be 10385 * in FIN_WAIT_1 without having 10386 * sent the fin). 10387 */ 10388 ourfinisacked = 1; 10389 } 10390 tp->snd_una = th->th_ack; 10391 if (acked_amount && sbavail(&so->so_snd)) 10392 rack_adjust_sendmap(rack, &so->so_snd, tp->snd_una); 10393 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); 10394 /* NB: sowwakeup_locked() does an implicit unlock. */ 10395 sowwakeup_locked(so); 10396 m_freem(mfree); 10397 if (SEQ_GT(tp->snd_una, tp->snd_recover)) 10398 tp->snd_recover = tp->snd_una; 10399 10400 if (SEQ_LT(tp->snd_nxt, tp->snd_una)) { 10401 tp->snd_nxt = tp->snd_una; 10402 } 10403 if (under_pacing && 10404 (rack->use_fixed_rate == 0) && 10405 (rack->in_probe_rtt == 0) && 10406 rack->rc_gp_dyn_mul && 10407 rack->rc_always_pace) { 10408 /* Check if we are dragging bottom */ 10409 rack_check_bottom_drag(tp, rack, so, acked); 10410 } 10411 if (tp->snd_una == tp->snd_max) { 10412 /* Nothing left outstanding */ 10413 tp->t_flags &= ~TF_PREVVALID; 10414 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); 10415 rack->r_ctl.retran_during_recovery = 0; 10416 rack->r_ctl.dsack_byte_cnt = 0; 10417 if (rack->r_ctl.rc_went_idle_time == 0) 10418 rack->r_ctl.rc_went_idle_time = 1; 10419 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); 10420 if (sbavail(&tp->t_inpcb->inp_socket->so_snd) == 0) 10421 tp->t_acktime = 0; 10422 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 10423 /* Set need output so persist might get set */ 10424 rack->r_wanted_output = 1; 10425 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 10426 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 10427 (sbavail(&so->so_snd) == 0) && 10428 (tp->t_flags2 & TF2_DROP_AF_DATA)) { 10429 /* 10430 * The socket was gone and the 10431 * peer sent data (now or in the past), time to 10432 * reset him. 10433 */ 10434 *ret_val = 1; 10435 /* tcp_close will kill the inp pre-log the Reset */ 10436 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 10437 tp = tcp_close(tp); 10438 ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, tlen); 10439 return (1); 10440 } 10441 } 10442 if (ofia) 10443 *ofia = ourfinisacked; 10444 return (0); 10445 } 10446 10447 static void 10448 rack_collapsed_window(struct tcp_rack *rack) 10449 { 10450 /* 10451 * Now we must walk the 10452 * send map and divide the 10453 * ones left stranded. These 10454 * guys can't cause us to abort 10455 * the connection and are really 10456 * "unsent". However if a buggy 10457 * client actually did keep some 10458 * of the data i.e. collapsed the win 10459 * and refused to ack and then opened 10460 * the win and acked that data. We would 10461 * get into an ack war, the simplier 10462 * method then of just pretending we 10463 * did not send those segments something 10464 * won't work. 10465 */ 10466 struct rack_sendmap *rsm, *nrsm, fe, *insret; 10467 tcp_seq max_seq; 10468 10469 max_seq = rack->rc_tp->snd_una + rack->rc_tp->snd_wnd; 10470 memset(&fe, 0, sizeof(fe)); 10471 fe.r_start = max_seq; 10472 /* Find the first seq past or at maxseq */ 10473 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 10474 if (rsm == NULL) { 10475 /* Nothing to do strange */ 10476 rack->rc_has_collapsed = 0; 10477 return; 10478 } 10479 /* 10480 * Now do we need to split at 10481 * the collapse point? 10482 */ 10483 if (SEQ_GT(max_seq, rsm->r_start)) { 10484 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); 10485 if (nrsm == NULL) { 10486 /* We can't get a rsm, mark all? */ 10487 nrsm = rsm; 10488 goto no_split; 10489 } 10490 /* Clone it */ 10491 rack_clone_rsm(rack, nrsm, rsm, max_seq); 10492 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 10493 #ifdef INVARIANTS 10494 if (insret != NULL) { 10495 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 10496 nrsm, insret, rack, rsm); 10497 } 10498 #endif 10499 rack_log_map_chg(rack->rc_tp, rack, NULL, rsm, nrsm, MAP_SPLIT, max_seq, __LINE__); 10500 if (rsm->r_in_tmap) { 10501 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 10502 nrsm->r_in_tmap = 1; 10503 } 10504 /* 10505 * Set in the new RSM as the 10506 * collapsed starting point 10507 */ 10508 rsm = nrsm; 10509 } 10510 no_split: 10511 counter_u64_add(rack_collapsed_win, 1); 10512 RB_FOREACH_FROM(nrsm, rack_rb_tree_head, rsm) { 10513 nrsm->r_flags |= RACK_RWND_COLLAPSED; 10514 } 10515 rack->rc_has_collapsed = 1; 10516 } 10517 10518 static void 10519 rack_un_collapse_window(struct tcp_rack *rack) 10520 { 10521 struct rack_sendmap *rsm; 10522 10523 RB_FOREACH_REVERSE(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) { 10524 if (rsm->r_flags & RACK_RWND_COLLAPSED) 10525 rsm->r_flags &= ~RACK_RWND_COLLAPSED; 10526 else 10527 break; 10528 } 10529 rack->rc_has_collapsed = 0; 10530 } 10531 10532 static void 10533 rack_handle_delayed_ack(struct tcpcb *tp, struct tcp_rack *rack, 10534 int32_t tlen, int32_t tfo_syn) 10535 { 10536 if (DELAY_ACK(tp, tlen) || tfo_syn) { 10537 if (rack->rc_dack_mode && 10538 (tlen > 500) && 10539 (rack->rc_dack_toggle == 1)) { 10540 goto no_delayed_ack; 10541 } 10542 rack_timer_cancel(tp, rack, 10543 rack->r_ctl.rc_rcvtime, __LINE__); 10544 tp->t_flags |= TF_DELACK; 10545 } else { 10546 no_delayed_ack: 10547 rack->r_wanted_output = 1; 10548 tp->t_flags |= TF_ACKNOW; 10549 if (rack->rc_dack_mode) { 10550 if (tp->t_flags & TF_DELACK) 10551 rack->rc_dack_toggle = 1; 10552 else 10553 rack->rc_dack_toggle = 0; 10554 } 10555 } 10556 } 10557 10558 static void 10559 rack_validate_fo_sendwin_up(struct tcpcb *tp, struct tcp_rack *rack) 10560 { 10561 /* 10562 * If fast output is in progress, lets validate that 10563 * the new window did not shrink on us and make it 10564 * so fast output should end. 10565 */ 10566 if (rack->r_fast_output) { 10567 uint32_t out; 10568 10569 /* 10570 * Calculate what we will send if left as is 10571 * and compare that to our send window. 10572 */ 10573 out = ctf_outstanding(tp); 10574 if ((out + rack->r_ctl.fsb.left_to_send) > tp->snd_wnd) { 10575 /* ok we have an issue */ 10576 if (out >= tp->snd_wnd) { 10577 /* Turn off fast output the window is met or collapsed */ 10578 rack->r_fast_output = 0; 10579 } else { 10580 /* we have some room left */ 10581 rack->r_ctl.fsb.left_to_send = tp->snd_wnd - out; 10582 if (rack->r_ctl.fsb.left_to_send < ctf_fixed_maxseg(tp)) { 10583 /* If not at least 1 full segment never mind */ 10584 rack->r_fast_output = 0; 10585 } 10586 } 10587 } 10588 } 10589 } 10590 10591 10592 /* 10593 * Return value of 1, the TCB is unlocked and most 10594 * likely gone, return value of 0, the TCP is still 10595 * locked. 10596 */ 10597 static int 10598 rack_process_data(struct mbuf *m, struct tcphdr *th, struct socket *so, 10599 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, 10600 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt) 10601 { 10602 /* 10603 * Update window information. Don't look at window if no ACK: TAC's 10604 * send garbage on first SYN. 10605 */ 10606 int32_t nsegs; 10607 int32_t tfo_syn; 10608 struct tcp_rack *rack; 10609 10610 rack = (struct tcp_rack *)tp->t_fb_ptr; 10611 INP_WLOCK_ASSERT(tp->t_inpcb); 10612 nsegs = max(1, m->m_pkthdr.lro_nsegs); 10613 if ((thflags & TH_ACK) && 10614 (SEQ_LT(tp->snd_wl1, th->th_seq) || 10615 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) || 10616 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) { 10617 /* keep track of pure window updates */ 10618 if (tlen == 0 && 10619 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd) 10620 KMOD_TCPSTAT_INC(tcps_rcvwinupd); 10621 tp->snd_wnd = tiwin; 10622 rack_validate_fo_sendwin_up(tp, rack); 10623 tp->snd_wl1 = th->th_seq; 10624 tp->snd_wl2 = th->th_ack; 10625 if (tp->snd_wnd > tp->max_sndwnd) 10626 tp->max_sndwnd = tp->snd_wnd; 10627 rack->r_wanted_output = 1; 10628 } else if (thflags & TH_ACK) { 10629 if ((tp->snd_wl2 == th->th_ack) && (tiwin < tp->snd_wnd)) { 10630 tp->snd_wnd = tiwin; 10631 rack_validate_fo_sendwin_up(tp, rack); 10632 tp->snd_wl1 = th->th_seq; 10633 tp->snd_wl2 = th->th_ack; 10634 } 10635 } 10636 if (tp->snd_wnd < ctf_outstanding(tp)) 10637 /* The peer collapsed the window */ 10638 rack_collapsed_window(rack); 10639 else if (rack->rc_has_collapsed) 10640 rack_un_collapse_window(rack); 10641 /* Was persist timer active and now we have window space? */ 10642 if ((rack->rc_in_persist != 0) && 10643 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), 10644 rack->r_ctl.rc_pace_min_segs))) { 10645 rack_exit_persist(tp, rack, rack->r_ctl.rc_rcvtime); 10646 tp->snd_nxt = tp->snd_max; 10647 /* Make sure we output to start the timer */ 10648 rack->r_wanted_output = 1; 10649 } 10650 /* Do we enter persists? */ 10651 if ((rack->rc_in_persist == 0) && 10652 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && 10653 TCPS_HAVEESTABLISHED(tp->t_state) && 10654 (tp->snd_max == tp->snd_una) && 10655 sbavail(&tp->t_inpcb->inp_socket->so_snd) && 10656 (sbavail(&tp->t_inpcb->inp_socket->so_snd) > tp->snd_wnd)) { 10657 /* 10658 * Here the rwnd is less than 10659 * the pacing size, we are established, 10660 * nothing is outstanding, and there is 10661 * data to send. Enter persists. 10662 */ 10663 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime); 10664 } 10665 if (tp->t_flags2 & TF2_DROP_AF_DATA) { 10666 m_freem(m); 10667 return (0); 10668 } 10669 /* 10670 * don't process the URG bit, ignore them drag 10671 * along the up. 10672 */ 10673 tp->rcv_up = tp->rcv_nxt; 10674 INP_WLOCK_ASSERT(tp->t_inpcb); 10675 10676 /* 10677 * Process the segment text, merging it into the TCP sequencing 10678 * queue, and arranging for acknowledgment of receipt if necessary. 10679 * This process logically involves adjusting tp->rcv_wnd as data is 10680 * presented to the user (this happens in tcp_usrreq.c, case 10681 * PRU_RCVD). If a FIN has already been received on this connection 10682 * then we just ignore the text. 10683 */ 10684 tfo_syn = ((tp->t_state == TCPS_SYN_RECEIVED) && 10685 IS_FASTOPEN(tp->t_flags)); 10686 if ((tlen || (thflags & TH_FIN) || (tfo_syn && tlen > 0)) && 10687 TCPS_HAVERCVDFIN(tp->t_state) == 0) { 10688 tcp_seq save_start = th->th_seq; 10689 tcp_seq save_rnxt = tp->rcv_nxt; 10690 int save_tlen = tlen; 10691 10692 m_adj(m, drop_hdrlen); /* delayed header drop */ 10693 /* 10694 * Insert segment which includes th into TCP reassembly 10695 * queue with control block tp. Set thflags to whether 10696 * reassembly now includes a segment with FIN. This handles 10697 * the common case inline (segment is the next to be 10698 * received on an established connection, and the queue is 10699 * empty), avoiding linkage into and removal from the queue 10700 * and repetition of various conversions. Set DELACK for 10701 * segments received in order, but ack immediately when 10702 * segments are out of order (so fast retransmit can work). 10703 */ 10704 if (th->th_seq == tp->rcv_nxt && 10705 SEGQ_EMPTY(tp) && 10706 (TCPS_HAVEESTABLISHED(tp->t_state) || 10707 tfo_syn)) { 10708 #ifdef NETFLIX_SB_LIMITS 10709 u_int mcnt, appended; 10710 10711 if (so->so_rcv.sb_shlim) { 10712 mcnt = m_memcnt(m); 10713 appended = 0; 10714 if (counter_fo_get(so->so_rcv.sb_shlim, mcnt, 10715 CFO_NOSLEEP, NULL) == false) { 10716 counter_u64_add(tcp_sb_shlim_fails, 1); 10717 m_freem(m); 10718 return (0); 10719 } 10720 } 10721 #endif 10722 rack_handle_delayed_ack(tp, rack, tlen, tfo_syn); 10723 tp->rcv_nxt += tlen; 10724 if (tlen && 10725 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) && 10726 (tp->t_fbyte_in == 0)) { 10727 tp->t_fbyte_in = ticks; 10728 if (tp->t_fbyte_in == 0) 10729 tp->t_fbyte_in = 1; 10730 if (tp->t_fbyte_out && tp->t_fbyte_in) 10731 tp->t_flags2 |= TF2_FBYTES_COMPLETE; 10732 } 10733 thflags = th->th_flags & TH_FIN; 10734 KMOD_TCPSTAT_ADD(tcps_rcvpack, nsegs); 10735 KMOD_TCPSTAT_ADD(tcps_rcvbyte, tlen); 10736 SOCKBUF_LOCK(&so->so_rcv); 10737 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 10738 m_freem(m); 10739 } else 10740 #ifdef NETFLIX_SB_LIMITS 10741 appended = 10742 #endif 10743 sbappendstream_locked(&so->so_rcv, m, 0); 10744 10745 rack_log_wakeup(tp,rack, &so->so_rcv, tlen, 1); 10746 /* NB: sorwakeup_locked() does an implicit unlock. */ 10747 sorwakeup_locked(so); 10748 #ifdef NETFLIX_SB_LIMITS 10749 if (so->so_rcv.sb_shlim && appended != mcnt) 10750 counter_fo_release(so->so_rcv.sb_shlim, 10751 mcnt - appended); 10752 #endif 10753 } else { 10754 /* 10755 * XXX: Due to the header drop above "th" is 10756 * theoretically invalid by now. Fortunately 10757 * m_adj() doesn't actually frees any mbufs when 10758 * trimming from the head. 10759 */ 10760 tcp_seq temp = save_start; 10761 10762 thflags = tcp_reass(tp, th, &temp, &tlen, m); 10763 tp->t_flags |= TF_ACKNOW; 10764 if (tp->t_flags & TF_WAKESOR) { 10765 tp->t_flags &= ~TF_WAKESOR; 10766 /* NB: sorwakeup_locked() does an implicit unlock. */ 10767 sorwakeup_locked(so); 10768 } 10769 } 10770 if ((tp->t_flags & TF_SACK_PERMIT) && 10771 (save_tlen > 0) && 10772 TCPS_HAVEESTABLISHED(tp->t_state)) { 10773 if ((tlen == 0) && (SEQ_LT(save_start, save_rnxt))) { 10774 /* 10775 * DSACK actually handled in the fastpath 10776 * above. 10777 */ 10778 RACK_OPTS_INC(tcp_sack_path_1); 10779 tcp_update_sack_list(tp, save_start, 10780 save_start + save_tlen); 10781 } else if ((tlen > 0) && SEQ_GT(tp->rcv_nxt, save_rnxt)) { 10782 if ((tp->rcv_numsacks >= 1) && 10783 (tp->sackblks[0].end == save_start)) { 10784 /* 10785 * Partial overlap, recorded at todrop 10786 * above. 10787 */ 10788 RACK_OPTS_INC(tcp_sack_path_2a); 10789 tcp_update_sack_list(tp, 10790 tp->sackblks[0].start, 10791 tp->sackblks[0].end); 10792 } else { 10793 RACK_OPTS_INC(tcp_sack_path_2b); 10794 tcp_update_dsack_list(tp, save_start, 10795 save_start + save_tlen); 10796 } 10797 } else if (tlen >= save_tlen) { 10798 /* Update of sackblks. */ 10799 RACK_OPTS_INC(tcp_sack_path_3); 10800 tcp_update_dsack_list(tp, save_start, 10801 save_start + save_tlen); 10802 } else if (tlen > 0) { 10803 RACK_OPTS_INC(tcp_sack_path_4); 10804 tcp_update_dsack_list(tp, save_start, 10805 save_start + tlen); 10806 } 10807 } 10808 } else { 10809 m_freem(m); 10810 thflags &= ~TH_FIN; 10811 } 10812 10813 /* 10814 * If FIN is received ACK the FIN and let the user know that the 10815 * connection is closing. 10816 */ 10817 if (thflags & TH_FIN) { 10818 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) { 10819 /* The socket upcall is handled by socantrcvmore. */ 10820 socantrcvmore(so); 10821 /* 10822 * If connection is half-synchronized (ie NEEDSYN 10823 * flag on) then delay ACK, so it may be piggybacked 10824 * when SYN is sent. Otherwise, since we received a 10825 * FIN then no more input can be expected, send ACK 10826 * now. 10827 */ 10828 if (tp->t_flags & TF_NEEDSYN) { 10829 rack_timer_cancel(tp, rack, 10830 rack->r_ctl.rc_rcvtime, __LINE__); 10831 tp->t_flags |= TF_DELACK; 10832 } else { 10833 tp->t_flags |= TF_ACKNOW; 10834 } 10835 tp->rcv_nxt++; 10836 } 10837 switch (tp->t_state) { 10838 /* 10839 * In SYN_RECEIVED and ESTABLISHED STATES enter the 10840 * CLOSE_WAIT state. 10841 */ 10842 case TCPS_SYN_RECEIVED: 10843 tp->t_starttime = ticks; 10844 /* FALLTHROUGH */ 10845 case TCPS_ESTABLISHED: 10846 rack_timer_cancel(tp, rack, 10847 rack->r_ctl.rc_rcvtime, __LINE__); 10848 tcp_state_change(tp, TCPS_CLOSE_WAIT); 10849 break; 10850 10851 /* 10852 * If still in FIN_WAIT_1 STATE FIN has not been 10853 * acked so enter the CLOSING state. 10854 */ 10855 case TCPS_FIN_WAIT_1: 10856 rack_timer_cancel(tp, rack, 10857 rack->r_ctl.rc_rcvtime, __LINE__); 10858 tcp_state_change(tp, TCPS_CLOSING); 10859 break; 10860 10861 /* 10862 * In FIN_WAIT_2 state enter the TIME_WAIT state, 10863 * starting the time-wait timer, turning off the 10864 * other standard timers. 10865 */ 10866 case TCPS_FIN_WAIT_2: 10867 rack_timer_cancel(tp, rack, 10868 rack->r_ctl.rc_rcvtime, __LINE__); 10869 tcp_twstart(tp); 10870 return (1); 10871 } 10872 } 10873 /* 10874 * Return any desired output. 10875 */ 10876 if ((tp->t_flags & TF_ACKNOW) || 10877 (sbavail(&so->so_snd) > (tp->snd_max - tp->snd_una))) { 10878 rack->r_wanted_output = 1; 10879 } 10880 INP_WLOCK_ASSERT(tp->t_inpcb); 10881 return (0); 10882 } 10883 10884 /* 10885 * Here nothing is really faster, its just that we 10886 * have broken out the fast-data path also just like 10887 * the fast-ack. 10888 */ 10889 static int 10890 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th, struct socket *so, 10891 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 10892 uint32_t tiwin, int32_t nxt_pkt, uint8_t iptos) 10893 { 10894 int32_t nsegs; 10895 int32_t newsize = 0; /* automatic sockbuf scaling */ 10896 struct tcp_rack *rack; 10897 #ifdef NETFLIX_SB_LIMITS 10898 u_int mcnt, appended; 10899 #endif 10900 #ifdef TCPDEBUG 10901 /* 10902 * The size of tcp_saveipgen must be the size of the max ip header, 10903 * now IPv6. 10904 */ 10905 u_char tcp_saveipgen[IP6_HDR_LEN]; 10906 struct tcphdr tcp_savetcp; 10907 short ostate = 0; 10908 10909 #endif 10910 /* 10911 * If last ACK falls within this segment's sequence numbers, record 10912 * the timestamp. NOTE that the test is modified according to the 10913 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26). 10914 */ 10915 if (__predict_false(th->th_seq != tp->rcv_nxt)) { 10916 return (0); 10917 } 10918 if (__predict_false(tp->snd_nxt != tp->snd_max)) { 10919 return (0); 10920 } 10921 if (tiwin && tiwin != tp->snd_wnd) { 10922 return (0); 10923 } 10924 if (__predict_false((tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN)))) { 10925 return (0); 10926 } 10927 if (__predict_false((to->to_flags & TOF_TS) && 10928 (TSTMP_LT(to->to_tsval, tp->ts_recent)))) { 10929 return (0); 10930 } 10931 if (__predict_false((th->th_ack != tp->snd_una))) { 10932 return (0); 10933 } 10934 if (__predict_false(tlen > sbspace(&so->so_rcv))) { 10935 return (0); 10936 } 10937 if ((to->to_flags & TOF_TS) != 0 && 10938 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { 10939 tp->ts_recent_age = tcp_ts_getticks(); 10940 tp->ts_recent = to->to_tsval; 10941 } 10942 rack = (struct tcp_rack *)tp->t_fb_ptr; 10943 /* 10944 * This is a pure, in-sequence data packet with nothing on the 10945 * reassembly queue and we have enough buffer space to take it. 10946 */ 10947 nsegs = max(1, m->m_pkthdr.lro_nsegs); 10948 10949 #ifdef NETFLIX_SB_LIMITS 10950 if (so->so_rcv.sb_shlim) { 10951 mcnt = m_memcnt(m); 10952 appended = 0; 10953 if (counter_fo_get(so->so_rcv.sb_shlim, mcnt, 10954 CFO_NOSLEEP, NULL) == false) { 10955 counter_u64_add(tcp_sb_shlim_fails, 1); 10956 m_freem(m); 10957 return (1); 10958 } 10959 } 10960 #endif 10961 /* Clean receiver SACK report if present */ 10962 if (tp->rcv_numsacks) 10963 tcp_clean_sackreport(tp); 10964 KMOD_TCPSTAT_INC(tcps_preddat); 10965 tp->rcv_nxt += tlen; 10966 if (tlen && 10967 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) && 10968 (tp->t_fbyte_in == 0)) { 10969 tp->t_fbyte_in = ticks; 10970 if (tp->t_fbyte_in == 0) 10971 tp->t_fbyte_in = 1; 10972 if (tp->t_fbyte_out && tp->t_fbyte_in) 10973 tp->t_flags2 |= TF2_FBYTES_COMPLETE; 10974 } 10975 /* 10976 * Pull snd_wl1 up to prevent seq wrap relative to th_seq. 10977 */ 10978 tp->snd_wl1 = th->th_seq; 10979 /* 10980 * Pull rcv_up up to prevent seq wrap relative to rcv_nxt. 10981 */ 10982 tp->rcv_up = tp->rcv_nxt; 10983 KMOD_TCPSTAT_ADD(tcps_rcvpack, nsegs); 10984 KMOD_TCPSTAT_ADD(tcps_rcvbyte, tlen); 10985 #ifdef TCPDEBUG 10986 if (so->so_options & SO_DEBUG) 10987 tcp_trace(TA_INPUT, ostate, tp, 10988 (void *)tcp_saveipgen, &tcp_savetcp, 0); 10989 #endif 10990 newsize = tcp_autorcvbuf(m, th, so, tp, tlen); 10991 10992 /* Add data to socket buffer. */ 10993 SOCKBUF_LOCK(&so->so_rcv); 10994 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 10995 m_freem(m); 10996 } else { 10997 /* 10998 * Set new socket buffer size. Give up when limit is 10999 * reached. 11000 */ 11001 if (newsize) 11002 if (!sbreserve_locked(&so->so_rcv, 11003 newsize, so, NULL)) 11004 so->so_rcv.sb_flags &= ~SB_AUTOSIZE; 11005 m_adj(m, drop_hdrlen); /* delayed header drop */ 11006 #ifdef NETFLIX_SB_LIMITS 11007 appended = 11008 #endif 11009 sbappendstream_locked(&so->so_rcv, m, 0); 11010 ctf_calc_rwin(so, tp); 11011 } 11012 rack_log_wakeup(tp,rack, &so->so_rcv, tlen, 1); 11013 /* NB: sorwakeup_locked() does an implicit unlock. */ 11014 sorwakeup_locked(so); 11015 #ifdef NETFLIX_SB_LIMITS 11016 if (so->so_rcv.sb_shlim && mcnt != appended) 11017 counter_fo_release(so->so_rcv.sb_shlim, mcnt - appended); 11018 #endif 11019 rack_handle_delayed_ack(tp, rack, tlen, 0); 11020 if (tp->snd_una == tp->snd_max) 11021 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 11022 return (1); 11023 } 11024 11025 /* 11026 * This subfunction is used to try to highly optimize the 11027 * fast path. We again allow window updates that are 11028 * in sequence to remain in the fast-path. We also add 11029 * in the __predict's to attempt to help the compiler. 11030 * Note that if we return a 0, then we can *not* process 11031 * it and the caller should push the packet into the 11032 * slow-path. 11033 */ 11034 static int 11035 rack_fastack(struct mbuf *m, struct tcphdr *th, struct socket *so, 11036 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 11037 uint32_t tiwin, int32_t nxt_pkt, uint32_t cts) 11038 { 11039 int32_t acked; 11040 int32_t nsegs; 11041 #ifdef TCPDEBUG 11042 /* 11043 * The size of tcp_saveipgen must be the size of the max ip header, 11044 * now IPv6. 11045 */ 11046 u_char tcp_saveipgen[IP6_HDR_LEN]; 11047 struct tcphdr tcp_savetcp; 11048 short ostate = 0; 11049 #endif 11050 int32_t under_pacing = 0; 11051 struct tcp_rack *rack; 11052 11053 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) { 11054 /* Old ack, behind (or duplicate to) the last one rcv'd */ 11055 return (0); 11056 } 11057 if (__predict_false(SEQ_GT(th->th_ack, tp->snd_max))) { 11058 /* Above what we have sent? */ 11059 return (0); 11060 } 11061 if (__predict_false(tp->snd_nxt != tp->snd_max)) { 11062 /* We are retransmitting */ 11063 return (0); 11064 } 11065 if (__predict_false(tiwin == 0)) { 11066 /* zero window */ 11067 return (0); 11068 } 11069 if (__predict_false(tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN))) { 11070 /* We need a SYN or a FIN, unlikely.. */ 11071 return (0); 11072 } 11073 if ((to->to_flags & TOF_TS) && __predict_false(TSTMP_LT(to->to_tsval, tp->ts_recent))) { 11074 /* Timestamp is behind .. old ack with seq wrap? */ 11075 return (0); 11076 } 11077 if (__predict_false(IN_RECOVERY(tp->t_flags))) { 11078 /* Still recovering */ 11079 return (0); 11080 } 11081 rack = (struct tcp_rack *)tp->t_fb_ptr; 11082 if (rack->r_ctl.rc_sacked) { 11083 /* We have sack holes on our scoreboard */ 11084 return (0); 11085 } 11086 /* Ok if we reach here, we can process a fast-ack */ 11087 if (rack->gp_ready && 11088 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 11089 under_pacing = 1; 11090 } 11091 nsegs = max(1, m->m_pkthdr.lro_nsegs); 11092 rack_log_ack(tp, to, th, 0, 0); 11093 /* Did the window get updated? */ 11094 if (tiwin != tp->snd_wnd) { 11095 tp->snd_wnd = tiwin; 11096 rack_validate_fo_sendwin_up(tp, rack); 11097 tp->snd_wl1 = th->th_seq; 11098 if (tp->snd_wnd > tp->max_sndwnd) 11099 tp->max_sndwnd = tp->snd_wnd; 11100 } 11101 /* Do we exit persists? */ 11102 if ((rack->rc_in_persist != 0) && 11103 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), 11104 rack->r_ctl.rc_pace_min_segs))) { 11105 rack_exit_persist(tp, rack, cts); 11106 } 11107 /* Do we enter persists? */ 11108 if ((rack->rc_in_persist == 0) && 11109 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && 11110 TCPS_HAVEESTABLISHED(tp->t_state) && 11111 (tp->snd_max == tp->snd_una) && 11112 sbavail(&tp->t_inpcb->inp_socket->so_snd) && 11113 (sbavail(&tp->t_inpcb->inp_socket->so_snd) > tp->snd_wnd)) { 11114 /* 11115 * Here the rwnd is less than 11116 * the pacing size, we are established, 11117 * nothing is outstanding, and there is 11118 * data to send. Enter persists. 11119 */ 11120 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime); 11121 } 11122 /* 11123 * If last ACK falls within this segment's sequence numbers, record 11124 * the timestamp. NOTE that the test is modified according to the 11125 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26). 11126 */ 11127 if ((to->to_flags & TOF_TS) != 0 && 11128 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { 11129 tp->ts_recent_age = tcp_ts_getticks(); 11130 tp->ts_recent = to->to_tsval; 11131 } 11132 /* 11133 * This is a pure ack for outstanding data. 11134 */ 11135 KMOD_TCPSTAT_INC(tcps_predack); 11136 11137 /* 11138 * "bad retransmit" recovery. 11139 */ 11140 if ((tp->t_flags & TF_PREVVALID) && 11141 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { 11142 tp->t_flags &= ~TF_PREVVALID; 11143 if (tp->t_rxtshift == 1 && 11144 (int)(ticks - tp->t_badrxtwin) < 0) 11145 rack_cong_signal(tp, CC_RTO_ERR, th->th_ack); 11146 } 11147 /* 11148 * Recalculate the transmit timer / rtt. 11149 * 11150 * Some boxes send broken timestamp replies during the SYN+ACK 11151 * phase, ignore timestamps of 0 or we could calculate a huge RTT 11152 * and blow up the retransmit timer. 11153 */ 11154 acked = BYTES_THIS_ACK(tp, th); 11155 11156 #ifdef TCP_HHOOK 11157 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */ 11158 hhook_run_tcp_est_in(tp, th, to); 11159 #endif 11160 KMOD_TCPSTAT_ADD(tcps_rcvackpack, nsegs); 11161 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked); 11162 if (acked) { 11163 struct mbuf *mfree; 11164 11165 rack_ack_received(tp, rack, th->th_ack, nsegs, CC_ACK, 0); 11166 SOCKBUF_LOCK(&so->so_snd); 11167 mfree = sbcut_locked(&so->so_snd, acked); 11168 tp->snd_una = th->th_ack; 11169 /* Note we want to hold the sb lock through the sendmap adjust */ 11170 rack_adjust_sendmap(rack, &so->so_snd, tp->snd_una); 11171 /* Wake up the socket if we have room to write more */ 11172 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); 11173 sowwakeup_locked(so); 11174 m_freem(mfree); 11175 tp->t_rxtshift = 0; 11176 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 11177 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 11178 rack->rc_tlp_in_progress = 0; 11179 rack->r_ctl.rc_tlp_cnt_out = 0; 11180 /* 11181 * If it is the RXT timer we want to 11182 * stop it, so we can restart a TLP. 11183 */ 11184 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) 11185 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 11186 #ifdef NETFLIX_HTTP_LOGGING 11187 tcp_http_check_for_comp(rack->rc_tp, th->th_ack); 11188 #endif 11189 } 11190 /* 11191 * Let the congestion control algorithm update congestion control 11192 * related information. This typically means increasing the 11193 * congestion window. 11194 */ 11195 if (tp->snd_wnd < ctf_outstanding(tp)) { 11196 /* The peer collapsed the window */ 11197 rack_collapsed_window(rack); 11198 } else if (rack->rc_has_collapsed) 11199 rack_un_collapse_window(rack); 11200 11201 /* 11202 * Pull snd_wl2 up to prevent seq wrap relative to th_ack. 11203 */ 11204 tp->snd_wl2 = th->th_ack; 11205 tp->t_dupacks = 0; 11206 m_freem(m); 11207 /* ND6_HINT(tp); *//* Some progress has been made. */ 11208 11209 /* 11210 * If all outstanding data are acked, stop retransmit timer, 11211 * otherwise restart timer using current (possibly backed-off) 11212 * value. If process is waiting for space, wakeup/selwakeup/signal. 11213 * If data are ready to send, let tcp_output decide between more 11214 * output or persist. 11215 */ 11216 #ifdef TCPDEBUG 11217 if (so->so_options & SO_DEBUG) 11218 tcp_trace(TA_INPUT, ostate, tp, 11219 (void *)tcp_saveipgen, 11220 &tcp_savetcp, 0); 11221 #endif 11222 if (under_pacing && 11223 (rack->use_fixed_rate == 0) && 11224 (rack->in_probe_rtt == 0) && 11225 rack->rc_gp_dyn_mul && 11226 rack->rc_always_pace) { 11227 /* Check if we are dragging bottom */ 11228 rack_check_bottom_drag(tp, rack, so, acked); 11229 } 11230 if (tp->snd_una == tp->snd_max) { 11231 tp->t_flags &= ~TF_PREVVALID; 11232 rack->r_ctl.retran_during_recovery = 0; 11233 rack->r_ctl.dsack_byte_cnt = 0; 11234 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); 11235 if (rack->r_ctl.rc_went_idle_time == 0) 11236 rack->r_ctl.rc_went_idle_time = 1; 11237 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); 11238 if (sbavail(&tp->t_inpcb->inp_socket->so_snd) == 0) 11239 tp->t_acktime = 0; 11240 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 11241 } 11242 if (acked && rack->r_fast_output) 11243 rack_gain_for_fastoutput(rack, tp, so, (uint32_t)acked); 11244 if (sbavail(&so->so_snd)) { 11245 rack->r_wanted_output = 1; 11246 } 11247 return (1); 11248 } 11249 11250 /* 11251 * Return value of 1, the TCB is unlocked and most 11252 * likely gone, return value of 0, the TCP is still 11253 * locked. 11254 */ 11255 static int 11256 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th, struct socket *so, 11257 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 11258 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 11259 { 11260 int32_t ret_val = 0; 11261 int32_t todrop; 11262 int32_t ourfinisacked = 0; 11263 struct tcp_rack *rack; 11264 11265 ctf_calc_rwin(so, tp); 11266 /* 11267 * If the state is SYN_SENT: if seg contains an ACK, but not for our 11268 * SYN, drop the input. if seg contains a RST, then drop the 11269 * connection. if seg does not contain SYN, then drop it. Otherwise 11270 * this is an acceptable SYN segment initialize tp->rcv_nxt and 11271 * tp->irs if seg contains ack then advance tp->snd_una if seg 11272 * contains an ECE and ECN support is enabled, the stream is ECN 11273 * capable. if SYN has been acked change to ESTABLISHED else 11274 * SYN_RCVD state arrange for segment to be acked (eventually) 11275 * continue processing rest of data/controls. 11276 */ 11277 if ((thflags & TH_ACK) && 11278 (SEQ_LEQ(th->th_ack, tp->iss) || 11279 SEQ_GT(th->th_ack, tp->snd_max))) { 11280 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 11281 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11282 return (1); 11283 } 11284 if ((thflags & (TH_ACK | TH_RST)) == (TH_ACK | TH_RST)) { 11285 TCP_PROBE5(connect__refused, NULL, tp, 11286 mtod(m, const char *), tp, th); 11287 tp = tcp_drop(tp, ECONNREFUSED); 11288 ctf_do_drop(m, tp); 11289 return (1); 11290 } 11291 if (thflags & TH_RST) { 11292 ctf_do_drop(m, tp); 11293 return (1); 11294 } 11295 if (!(thflags & TH_SYN)) { 11296 ctf_do_drop(m, tp); 11297 return (1); 11298 } 11299 tp->irs = th->th_seq; 11300 tcp_rcvseqinit(tp); 11301 rack = (struct tcp_rack *)tp->t_fb_ptr; 11302 if (thflags & TH_ACK) { 11303 int tfo_partial = 0; 11304 11305 KMOD_TCPSTAT_INC(tcps_connects); 11306 soisconnected(so); 11307 #ifdef MAC 11308 mac_socketpeer_set_from_mbuf(m, so); 11309 #endif 11310 /* Do window scaling on this connection? */ 11311 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 11312 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 11313 tp->rcv_scale = tp->request_r_scale; 11314 } 11315 tp->rcv_adv += min(tp->rcv_wnd, 11316 TCP_MAXWIN << tp->rcv_scale); 11317 /* 11318 * If not all the data that was sent in the TFO SYN 11319 * has been acked, resend the remainder right away. 11320 */ 11321 if (IS_FASTOPEN(tp->t_flags) && 11322 (tp->snd_una != tp->snd_max)) { 11323 tp->snd_nxt = th->th_ack; 11324 tfo_partial = 1; 11325 } 11326 /* 11327 * If there's data, delay ACK; if there's also a FIN ACKNOW 11328 * will be turned on later. 11329 */ 11330 if (DELAY_ACK(tp, tlen) && tlen != 0 && !tfo_partial) { 11331 rack_timer_cancel(tp, rack, 11332 rack->r_ctl.rc_rcvtime, __LINE__); 11333 tp->t_flags |= TF_DELACK; 11334 } else { 11335 rack->r_wanted_output = 1; 11336 tp->t_flags |= TF_ACKNOW; 11337 rack->rc_dack_toggle = 0; 11338 } 11339 if (((thflags & (TH_CWR | TH_ECE)) == TH_ECE) && 11340 (V_tcp_do_ecn == 1)) { 11341 tp->t_flags2 |= TF2_ECN_PERMIT; 11342 KMOD_TCPSTAT_INC(tcps_ecn_shs); 11343 } 11344 if (SEQ_GT(th->th_ack, tp->snd_una)) { 11345 /* 11346 * We advance snd_una for the 11347 * fast open case. If th_ack is 11348 * acknowledging data beyond 11349 * snd_una we can't just call 11350 * ack-processing since the 11351 * data stream in our send-map 11352 * will start at snd_una + 1 (one 11353 * beyond the SYN). If its just 11354 * equal we don't need to do that 11355 * and there is no send_map. 11356 */ 11357 tp->snd_una++; 11358 } 11359 /* 11360 * Received <SYN,ACK> in SYN_SENT[*] state. Transitions: 11361 * SYN_SENT --> ESTABLISHED SYN_SENT* --> FIN_WAIT_1 11362 */ 11363 tp->t_starttime = ticks; 11364 if (tp->t_flags & TF_NEEDFIN) { 11365 tcp_state_change(tp, TCPS_FIN_WAIT_1); 11366 tp->t_flags &= ~TF_NEEDFIN; 11367 thflags &= ~TH_SYN; 11368 } else { 11369 tcp_state_change(tp, TCPS_ESTABLISHED); 11370 TCP_PROBE5(connect__established, NULL, tp, 11371 mtod(m, const char *), tp, th); 11372 rack_cc_conn_init(tp); 11373 } 11374 } else { 11375 /* 11376 * Received initial SYN in SYN-SENT[*] state => simultaneous 11377 * open. If segment contains CC option and there is a 11378 * cached CC, apply TAO test. If it succeeds, connection is * 11379 * half-synchronized. Otherwise, do 3-way handshake: 11380 * SYN-SENT -> SYN-RECEIVED SYN-SENT* -> SYN-RECEIVED* If 11381 * there was no CC option, clear cached CC value. 11382 */ 11383 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN); 11384 tcp_state_change(tp, TCPS_SYN_RECEIVED); 11385 } 11386 INP_WLOCK_ASSERT(tp->t_inpcb); 11387 /* 11388 * Advance th->th_seq to correspond to first data byte. If data, 11389 * trim to stay within window, dropping FIN if necessary. 11390 */ 11391 th->th_seq++; 11392 if (tlen > tp->rcv_wnd) { 11393 todrop = tlen - tp->rcv_wnd; 11394 m_adj(m, -todrop); 11395 tlen = tp->rcv_wnd; 11396 thflags &= ~TH_FIN; 11397 KMOD_TCPSTAT_INC(tcps_rcvpackafterwin); 11398 KMOD_TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop); 11399 } 11400 tp->snd_wl1 = th->th_seq - 1; 11401 tp->rcv_up = th->th_seq; 11402 /* 11403 * Client side of transaction: already sent SYN and data. If the 11404 * remote host used T/TCP to validate the SYN, our data will be 11405 * ACK'd; if so, enter normal data segment processing in the middle 11406 * of step 5, ack processing. Otherwise, goto step 6. 11407 */ 11408 if (thflags & TH_ACK) { 11409 /* For syn-sent we need to possibly update the rtt */ 11410 if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) { 11411 uint32_t t, mcts; 11412 11413 mcts = tcp_ts_getticks(); 11414 t = (mcts - to->to_tsecr) * HPTS_USEC_IN_MSEC; 11415 if (!tp->t_rttlow || tp->t_rttlow > t) 11416 tp->t_rttlow = t; 11417 rack_log_rtt_sample_calc(rack, t, (to->to_tsecr * 1000), (mcts * 1000), 4); 11418 tcp_rack_xmit_timer(rack, t + 1, 1, t, 0, NULL, 2); 11419 tcp_rack_xmit_timer_commit(rack, tp); 11420 } 11421 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) 11422 return (ret_val); 11423 /* We may have changed to FIN_WAIT_1 above */ 11424 if (tp->t_state == TCPS_FIN_WAIT_1) { 11425 /* 11426 * In FIN_WAIT_1 STATE in addition to the processing 11427 * for the ESTABLISHED state if our FIN is now 11428 * acknowledged then enter FIN_WAIT_2. 11429 */ 11430 if (ourfinisacked) { 11431 /* 11432 * If we can't receive any more data, then 11433 * closing user can proceed. Starting the 11434 * timer is contrary to the specification, 11435 * but if we don't get a FIN we'll hang 11436 * forever. 11437 * 11438 * XXXjl: we should release the tp also, and 11439 * use a compressed state. 11440 */ 11441 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 11442 soisdisconnected(so); 11443 tcp_timer_activate(tp, TT_2MSL, 11444 (tcp_fast_finwait2_recycle ? 11445 tcp_finwait2_timeout : 11446 TP_MAXIDLE(tp))); 11447 } 11448 tcp_state_change(tp, TCPS_FIN_WAIT_2); 11449 } 11450 } 11451 } 11452 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11453 tiwin, thflags, nxt_pkt)); 11454 } 11455 11456 /* 11457 * Return value of 1, the TCB is unlocked and most 11458 * likely gone, return value of 0, the TCP is still 11459 * locked. 11460 */ 11461 static int 11462 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th, struct socket *so, 11463 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 11464 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 11465 { 11466 struct tcp_rack *rack; 11467 int32_t ret_val = 0; 11468 int32_t ourfinisacked = 0; 11469 11470 ctf_calc_rwin(so, tp); 11471 if ((thflags & TH_ACK) && 11472 (SEQ_LEQ(th->th_ack, tp->snd_una) || 11473 SEQ_GT(th->th_ack, tp->snd_max))) { 11474 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 11475 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11476 return (1); 11477 } 11478 rack = (struct tcp_rack *)tp->t_fb_ptr; 11479 if (IS_FASTOPEN(tp->t_flags)) { 11480 /* 11481 * When a TFO connection is in SYN_RECEIVED, the 11482 * only valid packets are the initial SYN, a 11483 * retransmit/copy of the initial SYN (possibly with 11484 * a subset of the original data), a valid ACK, a 11485 * FIN, or a RST. 11486 */ 11487 if ((thflags & (TH_SYN | TH_ACK)) == (TH_SYN | TH_ACK)) { 11488 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 11489 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11490 return (1); 11491 } else if (thflags & TH_SYN) { 11492 /* non-initial SYN is ignored */ 11493 if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) || 11494 (rack->r_ctl.rc_hpts_flags & PACE_TMR_TLP) || 11495 (rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK)) { 11496 ctf_do_drop(m, NULL); 11497 return (0); 11498 } 11499 } else if (!(thflags & (TH_ACK | TH_FIN | TH_RST))) { 11500 ctf_do_drop(m, NULL); 11501 return (0); 11502 } 11503 } 11504 if ((thflags & TH_RST) || 11505 (tp->t_fin_is_rst && (thflags & TH_FIN))) 11506 return (ctf_process_rst(m, th, so, tp)); 11507 /* 11508 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 11509 * it's less than ts_recent, drop it. 11510 */ 11511 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 11512 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 11513 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 11514 return (ret_val); 11515 } 11516 /* 11517 * In the SYN-RECEIVED state, validate that the packet belongs to 11518 * this connection before trimming the data to fit the receive 11519 * window. Check the sequence number versus IRS since we know the 11520 * sequence numbers haven't wrapped. This is a partial fix for the 11521 * "LAND" DoS attack. 11522 */ 11523 if (SEQ_LT(th->th_seq, tp->irs)) { 11524 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 11525 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11526 return (1); 11527 } 11528 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 11529 &rack->r_ctl.challenge_ack_ts, 11530 &rack->r_ctl.challenge_ack_cnt)) { 11531 return (ret_val); 11532 } 11533 /* 11534 * If last ACK falls within this segment's sequence numbers, record 11535 * its timestamp. NOTE: 1) That the test incorporates suggestions 11536 * from the latest proposal of the tcplw@cray.com list (Braden 11537 * 1993/04/26). 2) That updating only on newer timestamps interferes 11538 * with our earlier PAWS tests, so this check should be solely 11539 * predicated on the sequence space of this segment. 3) That we 11540 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 11541 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 11542 * SEG.Len, This modified check allows us to overcome RFC1323's 11543 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 11544 * p.869. In such cases, we can still calculate the RTT correctly 11545 * when RCV.NXT == Last.ACK.Sent. 11546 */ 11547 if ((to->to_flags & TOF_TS) != 0 && 11548 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 11549 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 11550 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 11551 tp->ts_recent_age = tcp_ts_getticks(); 11552 tp->ts_recent = to->to_tsval; 11553 } 11554 tp->snd_wnd = tiwin; 11555 rack_validate_fo_sendwin_up(tp, rack); 11556 /* 11557 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 11558 * is on (half-synchronized state), then queue data for later 11559 * processing; else drop segment and return. 11560 */ 11561 if ((thflags & TH_ACK) == 0) { 11562 if (IS_FASTOPEN(tp->t_flags)) { 11563 rack_cc_conn_init(tp); 11564 } 11565 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11566 tiwin, thflags, nxt_pkt)); 11567 } 11568 KMOD_TCPSTAT_INC(tcps_connects); 11569 soisconnected(so); 11570 /* Do window scaling? */ 11571 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 11572 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 11573 tp->rcv_scale = tp->request_r_scale; 11574 } 11575 /* 11576 * Make transitions: SYN-RECEIVED -> ESTABLISHED SYN-RECEIVED* -> 11577 * FIN-WAIT-1 11578 */ 11579 tp->t_starttime = ticks; 11580 if (IS_FASTOPEN(tp->t_flags) && tp->t_tfo_pending) { 11581 tcp_fastopen_decrement_counter(tp->t_tfo_pending); 11582 tp->t_tfo_pending = NULL; 11583 } 11584 if (tp->t_flags & TF_NEEDFIN) { 11585 tcp_state_change(tp, TCPS_FIN_WAIT_1); 11586 tp->t_flags &= ~TF_NEEDFIN; 11587 } else { 11588 tcp_state_change(tp, TCPS_ESTABLISHED); 11589 TCP_PROBE5(accept__established, NULL, tp, 11590 mtod(m, const char *), tp, th); 11591 /* 11592 * TFO connections call cc_conn_init() during SYN 11593 * processing. Calling it again here for such connections 11594 * is not harmless as it would undo the snd_cwnd reduction 11595 * that occurs when a TFO SYN|ACK is retransmitted. 11596 */ 11597 if (!IS_FASTOPEN(tp->t_flags)) 11598 rack_cc_conn_init(tp); 11599 } 11600 /* 11601 * Account for the ACK of our SYN prior to 11602 * regular ACK processing below, except for 11603 * simultaneous SYN, which is handled later. 11604 */ 11605 if (SEQ_GT(th->th_ack, tp->snd_una) && !(tp->t_flags & TF_NEEDSYN)) 11606 tp->snd_una++; 11607 /* 11608 * If segment contains data or ACK, will call tcp_reass() later; if 11609 * not, do so now to pass queued data to user. 11610 */ 11611 if (tlen == 0 && (thflags & TH_FIN) == 0) { 11612 (void) tcp_reass(tp, (struct tcphdr *)0, NULL, 0, 11613 (struct mbuf *)0); 11614 if (tp->t_flags & TF_WAKESOR) { 11615 tp->t_flags &= ~TF_WAKESOR; 11616 /* NB: sorwakeup_locked() does an implicit unlock. */ 11617 sorwakeup_locked(so); 11618 } 11619 } 11620 tp->snd_wl1 = th->th_seq - 1; 11621 /* For syn-recv we need to possibly update the rtt */ 11622 if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) { 11623 uint32_t t, mcts; 11624 11625 mcts = tcp_ts_getticks(); 11626 t = (mcts - to->to_tsecr) * HPTS_USEC_IN_MSEC; 11627 if (!tp->t_rttlow || tp->t_rttlow > t) 11628 tp->t_rttlow = t; 11629 rack_log_rtt_sample_calc(rack, t, (to->to_tsecr * 1000), (mcts * 1000), 5); 11630 tcp_rack_xmit_timer(rack, t + 1, 1, t, 0, NULL, 2); 11631 tcp_rack_xmit_timer_commit(rack, tp); 11632 } 11633 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 11634 return (ret_val); 11635 } 11636 if (tp->t_state == TCPS_FIN_WAIT_1) { 11637 /* We could have went to FIN_WAIT_1 (or EST) above */ 11638 /* 11639 * In FIN_WAIT_1 STATE in addition to the processing for the 11640 * ESTABLISHED state if our FIN is now acknowledged then 11641 * enter FIN_WAIT_2. 11642 */ 11643 if (ourfinisacked) { 11644 /* 11645 * If we can't receive any more data, then closing 11646 * user can proceed. Starting the timer is contrary 11647 * to the specification, but if we don't get a FIN 11648 * we'll hang forever. 11649 * 11650 * XXXjl: we should release the tp also, and use a 11651 * compressed state. 11652 */ 11653 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 11654 soisdisconnected(so); 11655 tcp_timer_activate(tp, TT_2MSL, 11656 (tcp_fast_finwait2_recycle ? 11657 tcp_finwait2_timeout : 11658 TP_MAXIDLE(tp))); 11659 } 11660 tcp_state_change(tp, TCPS_FIN_WAIT_2); 11661 } 11662 } 11663 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11664 tiwin, thflags, nxt_pkt)); 11665 } 11666 11667 /* 11668 * Return value of 1, the TCB is unlocked and most 11669 * likely gone, return value of 0, the TCP is still 11670 * locked. 11671 */ 11672 static int 11673 rack_do_established(struct mbuf *m, struct tcphdr *th, struct socket *so, 11674 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 11675 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 11676 { 11677 int32_t ret_val = 0; 11678 struct tcp_rack *rack; 11679 11680 /* 11681 * Header prediction: check for the two common cases of a 11682 * uni-directional data xfer. If the packet has no control flags, 11683 * is in-sequence, the window didn't change and we're not 11684 * retransmitting, it's a candidate. If the length is zero and the 11685 * ack moved forward, we're the sender side of the xfer. Just free 11686 * the data acked & wake any higher level process that was blocked 11687 * waiting for space. If the length is non-zero and the ack didn't 11688 * move, we're the receiver side. If we're getting packets in-order 11689 * (the reassembly queue is empty), add the data toc The socket 11690 * buffer and note that we need a delayed ack. Make sure that the 11691 * hidden state-flags are also off. Since we check for 11692 * TCPS_ESTABLISHED first, it can only be TH_NEEDSYN. 11693 */ 11694 rack = (struct tcp_rack *)tp->t_fb_ptr; 11695 if (__predict_true(((to->to_flags & TOF_SACK) == 0)) && 11696 __predict_true((thflags & (TH_SYN | TH_FIN | TH_RST | TH_ACK)) == TH_ACK) && 11697 __predict_true(SEGQ_EMPTY(tp)) && 11698 __predict_true(th->th_seq == tp->rcv_nxt)) { 11699 if (tlen == 0) { 11700 if (rack_fastack(m, th, so, tp, to, drop_hdrlen, tlen, 11701 tiwin, nxt_pkt, rack->r_ctl.rc_rcvtime)) { 11702 return (0); 11703 } 11704 } else { 11705 if (rack_do_fastnewdata(m, th, so, tp, to, drop_hdrlen, tlen, 11706 tiwin, nxt_pkt, iptos)) { 11707 return (0); 11708 } 11709 } 11710 } 11711 ctf_calc_rwin(so, tp); 11712 11713 if ((thflags & TH_RST) || 11714 (tp->t_fin_is_rst && (thflags & TH_FIN))) 11715 return (ctf_process_rst(m, th, so, tp)); 11716 11717 /* 11718 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 11719 * synchronized state. 11720 */ 11721 if (thflags & TH_SYN) { 11722 ctf_challenge_ack(m, th, tp, &ret_val); 11723 return (ret_val); 11724 } 11725 /* 11726 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 11727 * it's less than ts_recent, drop it. 11728 */ 11729 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 11730 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 11731 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 11732 return (ret_val); 11733 } 11734 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 11735 &rack->r_ctl.challenge_ack_ts, 11736 &rack->r_ctl.challenge_ack_cnt)) { 11737 return (ret_val); 11738 } 11739 /* 11740 * If last ACK falls within this segment's sequence numbers, record 11741 * its timestamp. NOTE: 1) That the test incorporates suggestions 11742 * from the latest proposal of the tcplw@cray.com list (Braden 11743 * 1993/04/26). 2) That updating only on newer timestamps interferes 11744 * with our earlier PAWS tests, so this check should be solely 11745 * predicated on the sequence space of this segment. 3) That we 11746 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 11747 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 11748 * SEG.Len, This modified check allows us to overcome RFC1323's 11749 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 11750 * p.869. In such cases, we can still calculate the RTT correctly 11751 * when RCV.NXT == Last.ACK.Sent. 11752 */ 11753 if ((to->to_flags & TOF_TS) != 0 && 11754 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 11755 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 11756 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 11757 tp->ts_recent_age = tcp_ts_getticks(); 11758 tp->ts_recent = to->to_tsval; 11759 } 11760 /* 11761 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 11762 * is on (half-synchronized state), then queue data for later 11763 * processing; else drop segment and return. 11764 */ 11765 if ((thflags & TH_ACK) == 0) { 11766 if (tp->t_flags & TF_NEEDSYN) { 11767 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11768 tiwin, thflags, nxt_pkt)); 11769 11770 } else if (tp->t_flags & TF_ACKNOW) { 11771 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 11772 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 11773 return (ret_val); 11774 } else { 11775 ctf_do_drop(m, NULL); 11776 return (0); 11777 } 11778 } 11779 /* 11780 * Ack processing. 11781 */ 11782 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val)) { 11783 return (ret_val); 11784 } 11785 if (sbavail(&so->so_snd)) { 11786 if (ctf_progress_timeout_check(tp, true)) { 11787 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 11788 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT); 11789 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11790 return (1); 11791 } 11792 } 11793 /* State changes only happen in rack_process_data() */ 11794 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11795 tiwin, thflags, nxt_pkt)); 11796 } 11797 11798 /* 11799 * Return value of 1, the TCB is unlocked and most 11800 * likely gone, return value of 0, the TCP is still 11801 * locked. 11802 */ 11803 static int 11804 rack_do_close_wait(struct mbuf *m, struct tcphdr *th, struct socket *so, 11805 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 11806 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 11807 { 11808 int32_t ret_val = 0; 11809 struct tcp_rack *rack; 11810 11811 rack = (struct tcp_rack *)tp->t_fb_ptr; 11812 ctf_calc_rwin(so, tp); 11813 if ((thflags & TH_RST) || 11814 (tp->t_fin_is_rst && (thflags & TH_FIN))) 11815 return (ctf_process_rst(m, th, so, tp)); 11816 /* 11817 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 11818 * synchronized state. 11819 */ 11820 if (thflags & TH_SYN) { 11821 ctf_challenge_ack(m, th, tp, &ret_val); 11822 return (ret_val); 11823 } 11824 /* 11825 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 11826 * it's less than ts_recent, drop it. 11827 */ 11828 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 11829 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 11830 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 11831 return (ret_val); 11832 } 11833 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 11834 &rack->r_ctl.challenge_ack_ts, 11835 &rack->r_ctl.challenge_ack_cnt)) { 11836 return (ret_val); 11837 } 11838 /* 11839 * If last ACK falls within this segment's sequence numbers, record 11840 * its timestamp. NOTE: 1) That the test incorporates suggestions 11841 * from the latest proposal of the tcplw@cray.com list (Braden 11842 * 1993/04/26). 2) That updating only on newer timestamps interferes 11843 * with our earlier PAWS tests, so this check should be solely 11844 * predicated on the sequence space of this segment. 3) That we 11845 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 11846 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 11847 * SEG.Len, This modified check allows us to overcome RFC1323's 11848 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 11849 * p.869. In such cases, we can still calculate the RTT correctly 11850 * when RCV.NXT == Last.ACK.Sent. 11851 */ 11852 if ((to->to_flags & TOF_TS) != 0 && 11853 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 11854 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 11855 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 11856 tp->ts_recent_age = tcp_ts_getticks(); 11857 tp->ts_recent = to->to_tsval; 11858 } 11859 /* 11860 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 11861 * is on (half-synchronized state), then queue data for later 11862 * processing; else drop segment and return. 11863 */ 11864 if ((thflags & TH_ACK) == 0) { 11865 if (tp->t_flags & TF_NEEDSYN) { 11866 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11867 tiwin, thflags, nxt_pkt)); 11868 11869 } else if (tp->t_flags & TF_ACKNOW) { 11870 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 11871 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 11872 return (ret_val); 11873 } else { 11874 ctf_do_drop(m, NULL); 11875 return (0); 11876 } 11877 } 11878 /* 11879 * Ack processing. 11880 */ 11881 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val)) { 11882 return (ret_val); 11883 } 11884 if (sbavail(&so->so_snd)) { 11885 if (ctf_progress_timeout_check(tp, true)) { 11886 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 11887 tp, tick, PROGRESS_DROP, __LINE__); 11888 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT); 11889 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11890 return (1); 11891 } 11892 } 11893 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11894 tiwin, thflags, nxt_pkt)); 11895 } 11896 11897 static int 11898 rack_check_data_after_close(struct mbuf *m, 11899 struct tcpcb *tp, int32_t *tlen, struct tcphdr *th, struct socket *so) 11900 { 11901 struct tcp_rack *rack; 11902 11903 rack = (struct tcp_rack *)tp->t_fb_ptr; 11904 if (rack->rc_allow_data_af_clo == 0) { 11905 close_now: 11906 tcp_log_end_status(tp, TCP_EI_STATUS_DATA_A_CLOSE); 11907 /* tcp_close will kill the inp pre-log the Reset */ 11908 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 11909 tp = tcp_close(tp); 11910 KMOD_TCPSTAT_INC(tcps_rcvafterclose); 11911 ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, (*tlen)); 11912 return (1); 11913 } 11914 if (sbavail(&so->so_snd) == 0) 11915 goto close_now; 11916 /* Ok we allow data that is ignored and a followup reset */ 11917 tcp_log_end_status(tp, TCP_EI_STATUS_DATA_A_CLOSE); 11918 tp->rcv_nxt = th->th_seq + *tlen; 11919 tp->t_flags2 |= TF2_DROP_AF_DATA; 11920 rack->r_wanted_output = 1; 11921 *tlen = 0; 11922 return (0); 11923 } 11924 11925 /* 11926 * Return value of 1, the TCB is unlocked and most 11927 * likely gone, return value of 0, the TCP is still 11928 * locked. 11929 */ 11930 static int 11931 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th, struct socket *so, 11932 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 11933 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 11934 { 11935 int32_t ret_val = 0; 11936 int32_t ourfinisacked = 0; 11937 struct tcp_rack *rack; 11938 11939 rack = (struct tcp_rack *)tp->t_fb_ptr; 11940 ctf_calc_rwin(so, tp); 11941 11942 if ((thflags & TH_RST) || 11943 (tp->t_fin_is_rst && (thflags & TH_FIN))) 11944 return (ctf_process_rst(m, th, so, tp)); 11945 /* 11946 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 11947 * synchronized state. 11948 */ 11949 if (thflags & TH_SYN) { 11950 ctf_challenge_ack(m, th, tp, &ret_val); 11951 return (ret_val); 11952 } 11953 /* 11954 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 11955 * it's less than ts_recent, drop it. 11956 */ 11957 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 11958 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 11959 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 11960 return (ret_val); 11961 } 11962 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 11963 &rack->r_ctl.challenge_ack_ts, 11964 &rack->r_ctl.challenge_ack_cnt)) { 11965 return (ret_val); 11966 } 11967 /* 11968 * If new data are received on a connection after the user processes 11969 * are gone, then RST the other end. 11970 */ 11971 if ((so->so_state & SS_NOFDREF) && tlen) { 11972 if (rack_check_data_after_close(m, tp, &tlen, th, so)) 11973 return (1); 11974 } 11975 /* 11976 * If last ACK falls within this segment's sequence numbers, record 11977 * its timestamp. NOTE: 1) That the test incorporates suggestions 11978 * from the latest proposal of the tcplw@cray.com list (Braden 11979 * 1993/04/26). 2) That updating only on newer timestamps interferes 11980 * with our earlier PAWS tests, so this check should be solely 11981 * predicated on the sequence space of this segment. 3) That we 11982 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 11983 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 11984 * SEG.Len, This modified check allows us to overcome RFC1323's 11985 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 11986 * p.869. In such cases, we can still calculate the RTT correctly 11987 * when RCV.NXT == Last.ACK.Sent. 11988 */ 11989 if ((to->to_flags & TOF_TS) != 0 && 11990 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 11991 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 11992 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 11993 tp->ts_recent_age = tcp_ts_getticks(); 11994 tp->ts_recent = to->to_tsval; 11995 } 11996 /* 11997 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 11998 * is on (half-synchronized state), then queue data for later 11999 * processing; else drop segment and return. 12000 */ 12001 if ((thflags & TH_ACK) == 0) { 12002 if (tp->t_flags & TF_NEEDSYN) { 12003 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 12004 tiwin, thflags, nxt_pkt)); 12005 } else if (tp->t_flags & TF_ACKNOW) { 12006 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 12007 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 12008 return (ret_val); 12009 } else { 12010 ctf_do_drop(m, NULL); 12011 return (0); 12012 } 12013 } 12014 /* 12015 * Ack processing. 12016 */ 12017 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 12018 return (ret_val); 12019 } 12020 if (ourfinisacked) { 12021 /* 12022 * If we can't receive any more data, then closing user can 12023 * proceed. Starting the timer is contrary to the 12024 * specification, but if we don't get a FIN we'll hang 12025 * forever. 12026 * 12027 * XXXjl: we should release the tp also, and use a 12028 * compressed state. 12029 */ 12030 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 12031 soisdisconnected(so); 12032 tcp_timer_activate(tp, TT_2MSL, 12033 (tcp_fast_finwait2_recycle ? 12034 tcp_finwait2_timeout : 12035 TP_MAXIDLE(tp))); 12036 } 12037 tcp_state_change(tp, TCPS_FIN_WAIT_2); 12038 } 12039 if (sbavail(&so->so_snd)) { 12040 if (ctf_progress_timeout_check(tp, true)) { 12041 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 12042 tp, tick, PROGRESS_DROP, __LINE__); 12043 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT); 12044 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 12045 return (1); 12046 } 12047 } 12048 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 12049 tiwin, thflags, nxt_pkt)); 12050 } 12051 12052 /* 12053 * Return value of 1, the TCB is unlocked and most 12054 * likely gone, return value of 0, the TCP is still 12055 * locked. 12056 */ 12057 static int 12058 rack_do_closing(struct mbuf *m, struct tcphdr *th, struct socket *so, 12059 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 12060 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 12061 { 12062 int32_t ret_val = 0; 12063 int32_t ourfinisacked = 0; 12064 struct tcp_rack *rack; 12065 12066 rack = (struct tcp_rack *)tp->t_fb_ptr; 12067 ctf_calc_rwin(so, tp); 12068 12069 if ((thflags & TH_RST) || 12070 (tp->t_fin_is_rst && (thflags & TH_FIN))) 12071 return (ctf_process_rst(m, th, so, tp)); 12072 /* 12073 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 12074 * synchronized state. 12075 */ 12076 if (thflags & TH_SYN) { 12077 ctf_challenge_ack(m, th, tp, &ret_val); 12078 return (ret_val); 12079 } 12080 /* 12081 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 12082 * it's less than ts_recent, drop it. 12083 */ 12084 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 12085 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 12086 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 12087 return (ret_val); 12088 } 12089 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 12090 &rack->r_ctl.challenge_ack_ts, 12091 &rack->r_ctl.challenge_ack_cnt)) { 12092 return (ret_val); 12093 } 12094 /* 12095 * If new data are received on a connection after the user processes 12096 * are gone, then RST the other end. 12097 */ 12098 if ((so->so_state & SS_NOFDREF) && tlen) { 12099 if (rack_check_data_after_close(m, tp, &tlen, th, so)) 12100 return (1); 12101 } 12102 /* 12103 * If last ACK falls within this segment's sequence numbers, record 12104 * its timestamp. NOTE: 1) That the test incorporates suggestions 12105 * from the latest proposal of the tcplw@cray.com list (Braden 12106 * 1993/04/26). 2) That updating only on newer timestamps interferes 12107 * with our earlier PAWS tests, so this check should be solely 12108 * predicated on the sequence space of this segment. 3) That we 12109 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 12110 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 12111 * SEG.Len, This modified check allows us to overcome RFC1323's 12112 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 12113 * p.869. In such cases, we can still calculate the RTT correctly 12114 * when RCV.NXT == Last.ACK.Sent. 12115 */ 12116 if ((to->to_flags & TOF_TS) != 0 && 12117 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 12118 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 12119 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 12120 tp->ts_recent_age = tcp_ts_getticks(); 12121 tp->ts_recent = to->to_tsval; 12122 } 12123 /* 12124 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 12125 * is on (half-synchronized state), then queue data for later 12126 * processing; else drop segment and return. 12127 */ 12128 if ((thflags & TH_ACK) == 0) { 12129 if (tp->t_flags & TF_NEEDSYN) { 12130 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 12131 tiwin, thflags, nxt_pkt)); 12132 } else if (tp->t_flags & TF_ACKNOW) { 12133 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 12134 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 12135 return (ret_val); 12136 } else { 12137 ctf_do_drop(m, NULL); 12138 return (0); 12139 } 12140 } 12141 /* 12142 * Ack processing. 12143 */ 12144 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 12145 return (ret_val); 12146 } 12147 if (ourfinisacked) { 12148 tcp_twstart(tp); 12149 m_freem(m); 12150 return (1); 12151 } 12152 if (sbavail(&so->so_snd)) { 12153 if (ctf_progress_timeout_check(tp, true)) { 12154 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 12155 tp, tick, PROGRESS_DROP, __LINE__); 12156 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT); 12157 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 12158 return (1); 12159 } 12160 } 12161 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 12162 tiwin, thflags, nxt_pkt)); 12163 } 12164 12165 /* 12166 * Return value of 1, the TCB is unlocked and most 12167 * likely gone, return value of 0, the TCP is still 12168 * locked. 12169 */ 12170 static int 12171 rack_do_lastack(struct mbuf *m, struct tcphdr *th, struct socket *so, 12172 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 12173 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 12174 { 12175 int32_t ret_val = 0; 12176 int32_t ourfinisacked = 0; 12177 struct tcp_rack *rack; 12178 12179 rack = (struct tcp_rack *)tp->t_fb_ptr; 12180 ctf_calc_rwin(so, tp); 12181 12182 if ((thflags & TH_RST) || 12183 (tp->t_fin_is_rst && (thflags & TH_FIN))) 12184 return (ctf_process_rst(m, th, so, tp)); 12185 /* 12186 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 12187 * synchronized state. 12188 */ 12189 if (thflags & TH_SYN) { 12190 ctf_challenge_ack(m, th, tp, &ret_val); 12191 return (ret_val); 12192 } 12193 /* 12194 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 12195 * it's less than ts_recent, drop it. 12196 */ 12197 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 12198 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 12199 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 12200 return (ret_val); 12201 } 12202 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 12203 &rack->r_ctl.challenge_ack_ts, 12204 &rack->r_ctl.challenge_ack_cnt)) { 12205 return (ret_val); 12206 } 12207 /* 12208 * If new data are received on a connection after the user processes 12209 * are gone, then RST the other end. 12210 */ 12211 if ((so->so_state & SS_NOFDREF) && tlen) { 12212 if (rack_check_data_after_close(m, tp, &tlen, th, so)) 12213 return (1); 12214 } 12215 /* 12216 * If last ACK falls within this segment's sequence numbers, record 12217 * its timestamp. NOTE: 1) That the test incorporates suggestions 12218 * from the latest proposal of the tcplw@cray.com list (Braden 12219 * 1993/04/26). 2) That updating only on newer timestamps interferes 12220 * with our earlier PAWS tests, so this check should be solely 12221 * predicated on the sequence space of this segment. 3) That we 12222 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 12223 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 12224 * SEG.Len, This modified check allows us to overcome RFC1323's 12225 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 12226 * p.869. In such cases, we can still calculate the RTT correctly 12227 * when RCV.NXT == Last.ACK.Sent. 12228 */ 12229 if ((to->to_flags & TOF_TS) != 0 && 12230 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 12231 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 12232 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 12233 tp->ts_recent_age = tcp_ts_getticks(); 12234 tp->ts_recent = to->to_tsval; 12235 } 12236 /* 12237 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 12238 * is on (half-synchronized state), then queue data for later 12239 * processing; else drop segment and return. 12240 */ 12241 if ((thflags & TH_ACK) == 0) { 12242 if (tp->t_flags & TF_NEEDSYN) { 12243 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 12244 tiwin, thflags, nxt_pkt)); 12245 } else if (tp->t_flags & TF_ACKNOW) { 12246 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 12247 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 12248 return (ret_val); 12249 } else { 12250 ctf_do_drop(m, NULL); 12251 return (0); 12252 } 12253 } 12254 /* 12255 * case TCPS_LAST_ACK: Ack processing. 12256 */ 12257 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 12258 return (ret_val); 12259 } 12260 if (ourfinisacked) { 12261 tp = tcp_close(tp); 12262 ctf_do_drop(m, tp); 12263 return (1); 12264 } 12265 if (sbavail(&so->so_snd)) { 12266 if (ctf_progress_timeout_check(tp, true)) { 12267 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 12268 tp, tick, PROGRESS_DROP, __LINE__); 12269 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT); 12270 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 12271 return (1); 12272 } 12273 } 12274 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 12275 tiwin, thflags, nxt_pkt)); 12276 } 12277 12278 /* 12279 * Return value of 1, the TCB is unlocked and most 12280 * likely gone, return value of 0, the TCP is still 12281 * locked. 12282 */ 12283 static int 12284 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, struct socket *so, 12285 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 12286 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 12287 { 12288 int32_t ret_val = 0; 12289 int32_t ourfinisacked = 0; 12290 struct tcp_rack *rack; 12291 12292 rack = (struct tcp_rack *)tp->t_fb_ptr; 12293 ctf_calc_rwin(so, tp); 12294 12295 /* Reset receive buffer auto scaling when not in bulk receive mode. */ 12296 if ((thflags & TH_RST) || 12297 (tp->t_fin_is_rst && (thflags & TH_FIN))) 12298 return (ctf_process_rst(m, th, so, tp)); 12299 /* 12300 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 12301 * synchronized state. 12302 */ 12303 if (thflags & TH_SYN) { 12304 ctf_challenge_ack(m, th, tp, &ret_val); 12305 return (ret_val); 12306 } 12307 /* 12308 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 12309 * it's less than ts_recent, drop it. 12310 */ 12311 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 12312 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 12313 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 12314 return (ret_val); 12315 } 12316 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 12317 &rack->r_ctl.challenge_ack_ts, 12318 &rack->r_ctl.challenge_ack_cnt)) { 12319 return (ret_val); 12320 } 12321 /* 12322 * If new data are received on a connection after the user processes 12323 * are gone, then RST the other end. 12324 */ 12325 if ((so->so_state & SS_NOFDREF) && 12326 tlen) { 12327 if (rack_check_data_after_close(m, tp, &tlen, th, so)) 12328 return (1); 12329 } 12330 /* 12331 * If last ACK falls within this segment's sequence numbers, record 12332 * its timestamp. NOTE: 1) That the test incorporates suggestions 12333 * from the latest proposal of the tcplw@cray.com list (Braden 12334 * 1993/04/26). 2) That updating only on newer timestamps interferes 12335 * with our earlier PAWS tests, so this check should be solely 12336 * predicated on the sequence space of this segment. 3) That we 12337 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 12338 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 12339 * SEG.Len, This modified check allows us to overcome RFC1323's 12340 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 12341 * p.869. In such cases, we can still calculate the RTT correctly 12342 * when RCV.NXT == Last.ACK.Sent. 12343 */ 12344 if ((to->to_flags & TOF_TS) != 0 && 12345 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 12346 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 12347 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 12348 tp->ts_recent_age = tcp_ts_getticks(); 12349 tp->ts_recent = to->to_tsval; 12350 } 12351 /* 12352 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 12353 * is on (half-synchronized state), then queue data for later 12354 * processing; else drop segment and return. 12355 */ 12356 if ((thflags & TH_ACK) == 0) { 12357 if (tp->t_flags & TF_NEEDSYN) { 12358 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 12359 tiwin, thflags, nxt_pkt)); 12360 } else if (tp->t_flags & TF_ACKNOW) { 12361 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 12362 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 12363 return (ret_val); 12364 } else { 12365 ctf_do_drop(m, NULL); 12366 return (0); 12367 } 12368 } 12369 /* 12370 * Ack processing. 12371 */ 12372 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 12373 return (ret_val); 12374 } 12375 if (sbavail(&so->so_snd)) { 12376 if (ctf_progress_timeout_check(tp, true)) { 12377 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 12378 tp, tick, PROGRESS_DROP, __LINE__); 12379 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT); 12380 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 12381 return (1); 12382 } 12383 } 12384 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 12385 tiwin, thflags, nxt_pkt)); 12386 } 12387 12388 static void inline 12389 rack_clear_rate_sample(struct tcp_rack *rack) 12390 { 12391 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_EMPTY; 12392 rack->r_ctl.rack_rs.rs_rtt_cnt = 0; 12393 rack->r_ctl.rack_rs.rs_rtt_tot = 0; 12394 } 12395 12396 static void 12397 rack_set_pace_segments(struct tcpcb *tp, struct tcp_rack *rack, uint32_t line, uint64_t *fill_override) 12398 { 12399 uint64_t bw_est, rate_wanted; 12400 int chged = 0; 12401 uint32_t user_max, orig_min, orig_max; 12402 12403 orig_min = rack->r_ctl.rc_pace_min_segs; 12404 orig_max = rack->r_ctl.rc_pace_max_segs; 12405 user_max = ctf_fixed_maxseg(tp) * rack->rc_user_set_max_segs; 12406 if (ctf_fixed_maxseg(tp) != rack->r_ctl.rc_pace_min_segs) 12407 chged = 1; 12408 rack->r_ctl.rc_pace_min_segs = ctf_fixed_maxseg(tp); 12409 if (rack->use_fixed_rate || rack->rc_force_max_seg) { 12410 if (user_max != rack->r_ctl.rc_pace_max_segs) 12411 chged = 1; 12412 } 12413 if (rack->rc_force_max_seg) { 12414 rack->r_ctl.rc_pace_max_segs = user_max; 12415 } else if (rack->use_fixed_rate) { 12416 bw_est = rack_get_bw(rack); 12417 if ((rack->r_ctl.crte == NULL) || 12418 (bw_est != rack->r_ctl.crte->rate)) { 12419 rack->r_ctl.rc_pace_max_segs = user_max; 12420 } else { 12421 /* We are pacing right at the hardware rate */ 12422 uint32_t segsiz; 12423 12424 segsiz = min(ctf_fixed_maxseg(tp), 12425 rack->r_ctl.rc_pace_min_segs); 12426 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size( 12427 tp, bw_est, segsiz, 0, 12428 rack->r_ctl.crte, NULL); 12429 } 12430 } else if (rack->rc_always_pace) { 12431 if (rack->r_ctl.gp_bw || 12432 #ifdef NETFLIX_PEAKRATE 12433 rack->rc_tp->t_maxpeakrate || 12434 #endif 12435 rack->r_ctl.init_rate) { 12436 /* We have a rate of some sort set */ 12437 uint32_t orig; 12438 12439 bw_est = rack_get_bw(rack); 12440 orig = rack->r_ctl.rc_pace_max_segs; 12441 if (fill_override) 12442 rate_wanted = *fill_override; 12443 else 12444 rate_wanted = rack_get_output_bw(rack, bw_est, NULL, NULL); 12445 if (rate_wanted) { 12446 /* We have something */ 12447 rack->r_ctl.rc_pace_max_segs = rack_get_pacing_len(rack, 12448 rate_wanted, 12449 ctf_fixed_maxseg(rack->rc_tp)); 12450 } else 12451 rack->r_ctl.rc_pace_max_segs = rack->r_ctl.rc_pace_min_segs; 12452 if (orig != rack->r_ctl.rc_pace_max_segs) 12453 chged = 1; 12454 } else if ((rack->r_ctl.gp_bw == 0) && 12455 (rack->r_ctl.rc_pace_max_segs == 0)) { 12456 /* 12457 * If we have nothing limit us to bursting 12458 * out IW sized pieces. 12459 */ 12460 chged = 1; 12461 rack->r_ctl.rc_pace_max_segs = rc_init_window(rack); 12462 } 12463 } 12464 if (rack->r_ctl.rc_pace_max_segs > PACE_MAX_IP_BYTES) { 12465 chged = 1; 12466 rack->r_ctl.rc_pace_max_segs = PACE_MAX_IP_BYTES; 12467 } 12468 if (chged) 12469 rack_log_type_pacing_sizes(tp, rack, orig_min, orig_max, line, 2); 12470 } 12471 12472 12473 static void 12474 rack_init_fsb_block(struct tcpcb *tp, struct tcp_rack *rack) 12475 { 12476 #ifdef INET6 12477 struct ip6_hdr *ip6 = NULL; 12478 #endif 12479 #ifdef INET 12480 struct ip *ip = NULL; 12481 #endif 12482 struct udphdr *udp = NULL; 12483 12484 /* Ok lets fill in the fast block, it can only be used with no IP options! */ 12485 #ifdef INET6 12486 if (rack->r_is_v6) { 12487 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 12488 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 12489 if (tp->t_port) { 12490 rack->r_ctl.fsb.tcp_ip_hdr_len += sizeof(struct udphdr); 12491 udp = (struct udphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr)); 12492 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 12493 udp->uh_dport = tp->t_port; 12494 rack->r_ctl.fsb.udp = udp; 12495 rack->r_ctl.fsb.th = (struct tcphdr *)(udp + 1); 12496 } else 12497 { 12498 rack->r_ctl.fsb.th = (struct tcphdr *)(ip6 + 1); 12499 rack->r_ctl.fsb.udp = NULL; 12500 } 12501 tcpip_fillheaders(rack->rc_inp, 12502 tp->t_port, 12503 ip6, rack->r_ctl.fsb.th); 12504 } else 12505 #endif /* INET6 */ 12506 { 12507 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct tcpiphdr); 12508 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 12509 if (tp->t_port) { 12510 rack->r_ctl.fsb.tcp_ip_hdr_len += sizeof(struct udphdr); 12511 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip)); 12512 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 12513 udp->uh_dport = tp->t_port; 12514 rack->r_ctl.fsb.udp = udp; 12515 rack->r_ctl.fsb.th = (struct tcphdr *)(udp + 1); 12516 } else 12517 { 12518 rack->r_ctl.fsb.udp = NULL; 12519 rack->r_ctl.fsb.th = (struct tcphdr *)(ip + 1); 12520 } 12521 tcpip_fillheaders(rack->rc_inp, 12522 tp->t_port, 12523 ip, rack->r_ctl.fsb.th); 12524 } 12525 rack->r_fsb_inited = 1; 12526 } 12527 12528 static int 12529 rack_init_fsb(struct tcpcb *tp, struct tcp_rack *rack) 12530 { 12531 /* 12532 * Allocate the larger of spaces V6 if available else just 12533 * V4 and include udphdr (overbook) 12534 */ 12535 #ifdef INET6 12536 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr) + sizeof(struct udphdr); 12537 #else 12538 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct tcpiphdr) + sizeof(struct udphdr); 12539 #endif 12540 rack->r_ctl.fsb.tcp_ip_hdr = malloc(rack->r_ctl.fsb.tcp_ip_hdr_len, 12541 M_TCPFSB, M_NOWAIT|M_ZERO); 12542 if (rack->r_ctl.fsb.tcp_ip_hdr == NULL) { 12543 return (ENOMEM); 12544 } 12545 rack->r_fsb_inited = 0; 12546 return (0); 12547 } 12548 12549 static int 12550 rack_init(struct tcpcb *tp) 12551 { 12552 struct tcp_rack *rack = NULL; 12553 struct rack_sendmap *insret; 12554 uint32_t iwin, snt, us_cts; 12555 int err; 12556 12557 tp->t_fb_ptr = uma_zalloc(rack_pcb_zone, M_NOWAIT); 12558 if (tp->t_fb_ptr == NULL) { 12559 /* 12560 * We need to allocate memory but cant. The INP and INP_INFO 12561 * locks and they are recusive (happens during setup. So a 12562 * scheme to drop the locks fails :( 12563 * 12564 */ 12565 return (ENOMEM); 12566 } 12567 memset(tp->t_fb_ptr, 0, sizeof(struct tcp_rack)); 12568 12569 rack = (struct tcp_rack *)tp->t_fb_ptr; 12570 RB_INIT(&rack->r_ctl.rc_mtree); 12571 TAILQ_INIT(&rack->r_ctl.rc_free); 12572 TAILQ_INIT(&rack->r_ctl.rc_tmap); 12573 rack->rc_tp = tp; 12574 rack->rc_inp = tp->t_inpcb; 12575 /* Set the flag */ 12576 rack->r_is_v6 = (tp->t_inpcb->inp_vflag & INP_IPV6) != 0; 12577 /* Probably not needed but lets be sure */ 12578 rack_clear_rate_sample(rack); 12579 /* 12580 * Save off the default values, socket options will poke 12581 * at these if pacing is not on or we have not yet 12582 * reached where pacing is on (gp_ready/fixed enabled). 12583 * When they get set into the CC module (when gp_ready 12584 * is enabled or we enable fixed) then we will set these 12585 * values into the CC and place in here the old values 12586 * so we have a restoral. Then we will set the flag 12587 * rc_pacing_cc_set. That way whenever we turn off pacing 12588 * or switch off this stack, we will know to go restore 12589 * the saved values. 12590 */ 12591 rack->r_ctl.rc_saved_beta.beta = V_newreno_beta_ecn; 12592 rack->r_ctl.rc_saved_beta.beta_ecn = V_newreno_beta_ecn; 12593 /* We want abe like behavior as well */ 12594 rack->r_ctl.rc_saved_beta.newreno_flags |= CC_NEWRENO_BETA_ECN_ENABLED; 12595 rack->r_ctl.rc_reorder_fade = rack_reorder_fade; 12596 rack->rc_allow_data_af_clo = rack_ignore_data_after_close; 12597 rack->r_ctl.rc_tlp_threshold = rack_tlp_thresh; 12598 rack->r_ctl.roundends = tp->snd_max; 12599 if (use_rack_rr) 12600 rack->use_rack_rr = 1; 12601 if (V_tcp_delack_enabled) 12602 tp->t_delayed_ack = 1; 12603 else 12604 tp->t_delayed_ack = 0; 12605 #ifdef TCP_ACCOUNTING 12606 if (rack_tcp_accounting) { 12607 tp->t_flags2 |= TF2_TCP_ACCOUNTING; 12608 } 12609 #endif 12610 if (rack_enable_shared_cwnd) 12611 rack->rack_enable_scwnd = 1; 12612 rack->rc_user_set_max_segs = rack_hptsi_segments; 12613 rack->rc_force_max_seg = 0; 12614 if (rack_use_imac_dack) 12615 rack->rc_dack_mode = 1; 12616 TAILQ_INIT(&rack->r_ctl.opt_list); 12617 rack->r_ctl.rc_reorder_shift = rack_reorder_thresh; 12618 rack->r_ctl.rc_pkt_delay = rack_pkt_delay; 12619 rack->r_ctl.rc_tlp_cwnd_reduce = rack_lower_cwnd_at_tlp; 12620 rack->r_ctl.rc_lowest_us_rtt = 0xffffffff; 12621 rack->r_ctl.rc_highest_us_rtt = 0; 12622 rack->r_ctl.bw_rate_cap = rack_bw_rate_cap; 12623 rack->r_ctl.timer_slop = TICKS_2_USEC(tcp_rexmit_slop); 12624 if (rack_use_cmp_acks) 12625 rack->r_use_cmp_ack = 1; 12626 if (rack_disable_prr) 12627 rack->rack_no_prr = 1; 12628 if (rack_gp_no_rec_chg) 12629 rack->rc_gp_no_rec_chg = 1; 12630 if (rack_pace_every_seg && tcp_can_enable_pacing()) { 12631 rack->rc_always_pace = 1; 12632 if (rack->use_fixed_rate || rack->gp_ready) 12633 rack_set_cc_pacing(rack); 12634 } else 12635 rack->rc_always_pace = 0; 12636 if (rack_enable_mqueue_for_nonpaced || rack->r_use_cmp_ack) 12637 rack->r_mbuf_queue = 1; 12638 else 12639 rack->r_mbuf_queue = 0; 12640 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) 12641 tp->t_inpcb->inp_flags2 |= INP_SUPPORTS_MBUFQ; 12642 else 12643 tp->t_inpcb->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; 12644 rack_set_pace_segments(tp, rack, __LINE__, NULL); 12645 if (rack_limits_scwnd) 12646 rack->r_limit_scw = 1; 12647 else 12648 rack->r_limit_scw = 0; 12649 rack->rc_labc = V_tcp_abc_l_var; 12650 rack->r_ctl.rc_high_rwnd = tp->snd_wnd; 12651 rack->r_ctl.cwnd_to_use = tp->snd_cwnd; 12652 rack->r_ctl.rc_rate_sample_method = rack_rate_sample_method; 12653 rack->rack_tlp_threshold_use = rack_tlp_threshold_use; 12654 rack->r_ctl.rc_prr_sendalot = rack_send_a_lot_in_prr; 12655 rack->r_ctl.rc_min_to = rack_min_to; 12656 microuptime(&rack->r_ctl.act_rcv_time); 12657 rack->r_ctl.rc_last_time_decay = rack->r_ctl.act_rcv_time; 12658 rack->r_running_late = 0; 12659 rack->r_running_early = 0; 12660 rack->rc_init_win = rack_default_init_window; 12661 rack->r_ctl.rack_per_of_gp_ss = rack_per_of_gp_ss; 12662 if (rack_hw_up_only) 12663 rack->r_up_only = 1; 12664 if (rack_do_dyn_mul) { 12665 /* When dynamic adjustment is on CA needs to start at 100% */ 12666 rack->rc_gp_dyn_mul = 1; 12667 if (rack_do_dyn_mul >= 100) 12668 rack->r_ctl.rack_per_of_gp_ca = rack_do_dyn_mul; 12669 } else 12670 rack->r_ctl.rack_per_of_gp_ca = rack_per_of_gp_ca; 12671 rack->r_ctl.rack_per_of_gp_rec = rack_per_of_gp_rec; 12672 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; 12673 rack->r_ctl.rc_tlp_rxt_last_time = tcp_tv_to_mssectick(&rack->r_ctl.act_rcv_time); 12674 setup_time_filter_small(&rack->r_ctl.rc_gp_min_rtt, FILTER_TYPE_MIN, 12675 rack_probertt_filter_life); 12676 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 12677 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 12678 rack->r_ctl.rc_time_of_last_probertt = us_cts; 12679 rack->r_ctl.challenge_ack_ts = tcp_ts_getticks(); 12680 rack->r_ctl.rc_time_probertt_starts = 0; 12681 if (rack_dsack_std_based & 0x1) { 12682 /* Basically this means all rack timers are at least (srtt + 1/4 srtt) */ 12683 rack->rc_rack_tmr_std_based = 1; 12684 } 12685 if (rack_dsack_std_based & 0x2) { 12686 /* Basically this means rack timers are extended based on dsack by up to (2 * srtt) */ 12687 rack->rc_rack_use_dsack = 1; 12688 } 12689 /* We require at least one measurement, even if the sysctl is 0 */ 12690 if (rack_req_measurements) 12691 rack->r_ctl.req_measurements = rack_req_measurements; 12692 else 12693 rack->r_ctl.req_measurements = 1; 12694 if (rack_enable_hw_pacing) 12695 rack->rack_hdw_pace_ena = 1; 12696 if (rack_hw_rate_caps) 12697 rack->r_rack_hw_rate_caps = 1; 12698 /* Do we force on detection? */ 12699 #ifdef NETFLIX_EXP_DETECTION 12700 if (tcp_force_detection) 12701 rack->do_detection = 1; 12702 else 12703 #endif 12704 rack->do_detection = 0; 12705 if (rack_non_rxt_use_cr) 12706 rack->rack_rec_nonrxt_use_cr = 1; 12707 err = rack_init_fsb(tp, rack); 12708 if (err) { 12709 uma_zfree(rack_pcb_zone, tp->t_fb_ptr); 12710 tp->t_fb_ptr = NULL; 12711 return (err); 12712 } 12713 if (tp->snd_una != tp->snd_max) { 12714 /* Create a send map for the current outstanding data */ 12715 struct rack_sendmap *rsm; 12716 12717 rsm = rack_alloc(rack); 12718 if (rsm == NULL) { 12719 uma_zfree(rack_pcb_zone, tp->t_fb_ptr); 12720 tp->t_fb_ptr = NULL; 12721 return (ENOMEM); 12722 } 12723 rsm->r_no_rtt_allowed = 1; 12724 rsm->r_tim_lastsent[0] = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 12725 rsm->r_rtr_cnt = 1; 12726 rsm->r_rtr_bytes = 0; 12727 if (tp->t_flags & TF_SENTFIN) { 12728 rsm->r_end = tp->snd_max - 1; 12729 rsm->r_flags |= RACK_HAS_FIN; 12730 } else { 12731 rsm->r_end = tp->snd_max; 12732 } 12733 if (tp->snd_una == tp->iss) { 12734 /* The data space is one beyond snd_una */ 12735 rsm->r_flags |= RACK_HAS_SYN; 12736 rsm->r_start = tp->iss; 12737 rsm->r_end = rsm->r_start + (tp->snd_max - tp->snd_una); 12738 } else 12739 rsm->r_start = tp->snd_una; 12740 rsm->r_dupack = 0; 12741 if (rack->rc_inp->inp_socket->so_snd.sb_mb != NULL) { 12742 rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 0, &rsm->soff); 12743 if (rsm->m) 12744 rsm->orig_m_len = rsm->m->m_len; 12745 else 12746 rsm->orig_m_len = 0; 12747 } else { 12748 /* 12749 * This can happen if we have a stand-alone FIN or 12750 * SYN. 12751 */ 12752 rsm->m = NULL; 12753 rsm->orig_m_len = 0; 12754 rsm->soff = 0; 12755 } 12756 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 12757 #ifdef INVARIANTS 12758 if (insret != NULL) { 12759 panic("Insert in rb tree fails ret:%p rack:%p rsm:%p", 12760 insret, rack, rsm); 12761 } 12762 #endif 12763 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 12764 rsm->r_in_tmap = 1; 12765 } 12766 /* 12767 * Timers in Rack are kept in microseconds so lets 12768 * convert any initial incoming variables 12769 * from ticks into usecs. Note that we 12770 * also change the values of t_srtt and t_rttvar, if 12771 * they are non-zero. They are kept with a 5 12772 * bit decimal so we have to carefully convert 12773 * these to get the full precision. 12774 */ 12775 rack_convert_rtts(tp); 12776 tp->t_rttlow = TICKS_2_USEC(tp->t_rttlow); 12777 if (rack_do_hystart) { 12778 struct sockopt sopt; 12779 struct cc_newreno_opts opt; 12780 12781 sopt.sopt_valsize = sizeof(struct cc_newreno_opts); 12782 sopt.sopt_dir = SOPT_SET; 12783 opt.name = CC_NEWRENO_ENABLE_HYSTART; 12784 opt.val = rack_do_hystart; 12785 if (CC_ALGO(tp)->ctl_output != NULL) 12786 (void)CC_ALGO(tp)->ctl_output(tp->ccv, &sopt, &opt); 12787 } 12788 if (rack_def_profile) 12789 rack_set_profile(rack, rack_def_profile); 12790 /* Cancel the GP measurement in progress */ 12791 tp->t_flags &= ~TF_GPUTINPROG; 12792 if (SEQ_GT(tp->snd_max, tp->iss)) 12793 snt = tp->snd_max - tp->iss; 12794 else 12795 snt = 0; 12796 iwin = rc_init_window(rack); 12797 if (snt < iwin) { 12798 /* We are not past the initial window 12799 * so we need to make sure cwnd is 12800 * correct. 12801 */ 12802 if (tp->snd_cwnd < iwin) 12803 tp->snd_cwnd = iwin; 12804 /* 12805 * If we are within the initial window 12806 * we want ssthresh to be unlimited. Setting 12807 * it to the rwnd (which the default stack does 12808 * and older racks) is not really a good idea 12809 * since we want to be in SS and grow both the 12810 * cwnd and the rwnd (via dynamic rwnd growth). If 12811 * we set it to the rwnd then as the peer grows its 12812 * rwnd we will be stuck in CA and never hit SS. 12813 * 12814 * Its far better to raise it up high (this takes the 12815 * risk that there as been a loss already, probably 12816 * we should have an indicator in all stacks of loss 12817 * but we don't), but considering the normal use this 12818 * is a risk worth taking. The consequences of not 12819 * hitting SS are far worse than going one more time 12820 * into it early on (before we have sent even a IW). 12821 * It is highly unlikely that we will have had a loss 12822 * before getting the IW out. 12823 */ 12824 tp->snd_ssthresh = 0xffffffff; 12825 } 12826 rack_stop_all_timers(tp); 12827 /* Lets setup the fsb block */ 12828 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); 12829 rack_log_rtt_shrinks(rack, us_cts, tp->t_rxtcur, 12830 __LINE__, RACK_RTTS_INIT); 12831 return (0); 12832 } 12833 12834 static int 12835 rack_handoff_ok(struct tcpcb *tp) 12836 { 12837 if ((tp->t_state == TCPS_CLOSED) || 12838 (tp->t_state == TCPS_LISTEN)) { 12839 /* Sure no problem though it may not stick */ 12840 return (0); 12841 } 12842 if ((tp->t_state == TCPS_SYN_SENT) || 12843 (tp->t_state == TCPS_SYN_RECEIVED)) { 12844 /* 12845 * We really don't know if you support sack, 12846 * you have to get to ESTAB or beyond to tell. 12847 */ 12848 return (EAGAIN); 12849 } 12850 if ((tp->t_flags & TF_SENTFIN) && ((tp->snd_max - tp->snd_una) > 1)) { 12851 /* 12852 * Rack will only send a FIN after all data is acknowledged. 12853 * So in this case we have more data outstanding. We can't 12854 * switch stacks until either all data and only the FIN 12855 * is left (in which case rack_init() now knows how 12856 * to deal with that) <or> all is acknowledged and we 12857 * are only left with incoming data, though why you 12858 * would want to switch to rack after all data is acknowledged 12859 * I have no idea (rrs)! 12860 */ 12861 return (EAGAIN); 12862 } 12863 if ((tp->t_flags & TF_SACK_PERMIT) || rack_sack_not_required){ 12864 return (0); 12865 } 12866 /* 12867 * If we reach here we don't do SACK on this connection so we can 12868 * never do rack. 12869 */ 12870 return (EINVAL); 12871 } 12872 12873 12874 static void 12875 rack_fini(struct tcpcb *tp, int32_t tcb_is_purged) 12876 { 12877 int ack_cmp = 0; 12878 12879 if (tp->t_fb_ptr) { 12880 struct tcp_rack *rack; 12881 struct rack_sendmap *rsm, *nrsm, *rm; 12882 12883 rack = (struct tcp_rack *)tp->t_fb_ptr; 12884 if (tp->t_in_pkt) { 12885 /* 12886 * It is unsafe to process the packets since a 12887 * reset may be lurking in them (its rare but it 12888 * can occur). If we were to find a RST, then we 12889 * would end up dropping the connection and the 12890 * INP lock, so when we return the caller (tcp_usrreq) 12891 * will blow up when it trys to unlock the inp. 12892 */ 12893 struct mbuf *save, *m; 12894 12895 m = tp->t_in_pkt; 12896 tp->t_in_pkt = NULL; 12897 tp->t_tail_pkt = NULL; 12898 while (m) { 12899 save = m->m_nextpkt; 12900 m->m_nextpkt = NULL; 12901 m_freem(m); 12902 m = save; 12903 } 12904 if ((tp->t_inpcb) && 12905 (tp->t_inpcb->inp_flags2 & INP_MBUF_ACKCMP)) 12906 ack_cmp = 1; 12907 if (ack_cmp) { 12908 /* Total if we used large or small (if ack-cmp was used). */ 12909 if (rack->rc_inp->inp_flags2 & INP_MBUF_L_ACKS) 12910 counter_u64_add(rack_large_ackcmp, 1); 12911 else 12912 counter_u64_add(rack_small_ackcmp, 1); 12913 } 12914 } 12915 tp->t_flags &= ~TF_FORCEDATA; 12916 #ifdef NETFLIX_SHARED_CWND 12917 if (rack->r_ctl.rc_scw) { 12918 uint32_t limit; 12919 12920 if (rack->r_limit_scw) 12921 limit = max(1, rack->r_ctl.rc_lowest_us_rtt); 12922 else 12923 limit = 0; 12924 tcp_shared_cwnd_free_full(tp, rack->r_ctl.rc_scw, 12925 rack->r_ctl.rc_scw_index, 12926 limit); 12927 rack->r_ctl.rc_scw = NULL; 12928 } 12929 #endif 12930 if (rack->r_ctl.fsb.tcp_ip_hdr) { 12931 free(rack->r_ctl.fsb.tcp_ip_hdr, M_TCPFSB); 12932 rack->r_ctl.fsb.tcp_ip_hdr = NULL; 12933 rack->r_ctl.fsb.th = NULL; 12934 } 12935 /* Convert back to ticks, with */ 12936 if (tp->t_srtt > 1) { 12937 uint32_t val, frac; 12938 12939 val = USEC_2_TICKS(tp->t_srtt); 12940 frac = tp->t_srtt % (HPTS_USEC_IN_SEC / hz); 12941 tp->t_srtt = val << TCP_RTT_SHIFT; 12942 /* 12943 * frac is the fractional part here is left 12944 * over from converting to hz and shifting. 12945 * We need to convert this to the 5 bit 12946 * remainder. 12947 */ 12948 if (frac) { 12949 if (hz == 1000) { 12950 frac = (((uint64_t)frac * (uint64_t)TCP_RTT_SCALE) / (uint64_t)HPTS_USEC_IN_MSEC); 12951 } else { 12952 frac = (((uint64_t)frac * (uint64_t)(hz) * (uint64_t)TCP_RTT_SCALE) /(uint64_t)HPTS_USEC_IN_SEC); 12953 } 12954 tp->t_srtt += frac; 12955 } 12956 } 12957 if (tp->t_rttvar) { 12958 uint32_t val, frac; 12959 12960 val = USEC_2_TICKS(tp->t_rttvar); 12961 frac = tp->t_srtt % (HPTS_USEC_IN_SEC / hz); 12962 tp->t_rttvar = val << TCP_RTTVAR_SHIFT; 12963 /* 12964 * frac is the fractional part here is left 12965 * over from converting to hz and shifting. 12966 * We need to convert this to the 5 bit 12967 * remainder. 12968 */ 12969 if (frac) { 12970 if (hz == 1000) { 12971 frac = (((uint64_t)frac * (uint64_t)TCP_RTT_SCALE) / (uint64_t)HPTS_USEC_IN_MSEC); 12972 } else { 12973 frac = (((uint64_t)frac * (uint64_t)(hz) * (uint64_t)TCP_RTT_SCALE) /(uint64_t)HPTS_USEC_IN_SEC); 12974 } 12975 tp->t_rttvar += frac; 12976 } 12977 } 12978 tp->t_rxtcur = USEC_2_TICKS(tp->t_rxtcur); 12979 tp->t_rttlow = USEC_2_TICKS(tp->t_rttlow); 12980 if (rack->rc_always_pace) { 12981 tcp_decrement_paced_conn(); 12982 rack_undo_cc_pacing(rack); 12983 rack->rc_always_pace = 0; 12984 } 12985 /* Clean up any options if they were not applied */ 12986 while (!TAILQ_EMPTY(&rack->r_ctl.opt_list)) { 12987 struct deferred_opt_list *dol; 12988 12989 dol = TAILQ_FIRST(&rack->r_ctl.opt_list); 12990 TAILQ_REMOVE(&rack->r_ctl.opt_list, dol, next); 12991 free(dol, M_TCPDO); 12992 } 12993 /* rack does not use force data but other stacks may clear it */ 12994 if (rack->r_ctl.crte != NULL) { 12995 tcp_rel_pacing_rate(rack->r_ctl.crte, tp); 12996 rack->rack_hdrw_pacing = 0; 12997 rack->r_ctl.crte = NULL; 12998 } 12999 #ifdef TCP_BLACKBOX 13000 tcp_log_flowend(tp); 13001 #endif 13002 RB_FOREACH_SAFE(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm) { 13003 rm = RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 13004 #ifdef INVARIANTS 13005 if (rm != rsm) { 13006 panic("At fini, rack:%p rsm:%p rm:%p", 13007 rack, rsm, rm); 13008 } 13009 #endif 13010 uma_zfree(rack_zone, rsm); 13011 } 13012 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 13013 while (rsm) { 13014 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 13015 uma_zfree(rack_zone, rsm); 13016 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 13017 } 13018 rack->rc_free_cnt = 0; 13019 uma_zfree(rack_pcb_zone, tp->t_fb_ptr); 13020 tp->t_fb_ptr = NULL; 13021 } 13022 if (tp->t_inpcb) { 13023 tp->t_inpcb->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; 13024 tp->t_inpcb->inp_flags2 &= ~INP_MBUF_QUEUE_READY; 13025 tp->t_inpcb->inp_flags2 &= ~INP_DONT_SACK_QUEUE; 13026 tp->t_inpcb->inp_flags2 &= ~INP_MBUF_ACKCMP; 13027 /* Cancel the GP measurement in progress */ 13028 tp->t_flags &= ~TF_GPUTINPROG; 13029 tp->t_inpcb->inp_flags2 &= ~INP_MBUF_L_ACKS; 13030 } 13031 /* Make sure snd_nxt is correctly set */ 13032 tp->snd_nxt = tp->snd_max; 13033 } 13034 13035 static void 13036 rack_set_state(struct tcpcb *tp, struct tcp_rack *rack) 13037 { 13038 if ((rack->r_state == TCPS_CLOSED) && (tp->t_state != TCPS_CLOSED)) { 13039 rack->r_is_v6 = (tp->t_inpcb->inp_vflag & INP_IPV6) != 0; 13040 } 13041 switch (tp->t_state) { 13042 case TCPS_SYN_SENT: 13043 rack->r_state = TCPS_SYN_SENT; 13044 rack->r_substate = rack_do_syn_sent; 13045 break; 13046 case TCPS_SYN_RECEIVED: 13047 rack->r_state = TCPS_SYN_RECEIVED; 13048 rack->r_substate = rack_do_syn_recv; 13049 break; 13050 case TCPS_ESTABLISHED: 13051 rack_set_pace_segments(tp, rack, __LINE__, NULL); 13052 rack->r_state = TCPS_ESTABLISHED; 13053 rack->r_substate = rack_do_established; 13054 break; 13055 case TCPS_CLOSE_WAIT: 13056 rack_set_pace_segments(tp, rack, __LINE__, NULL); 13057 rack->r_state = TCPS_CLOSE_WAIT; 13058 rack->r_substate = rack_do_close_wait; 13059 break; 13060 case TCPS_FIN_WAIT_1: 13061 rack_set_pace_segments(tp, rack, __LINE__, NULL); 13062 rack->r_state = TCPS_FIN_WAIT_1; 13063 rack->r_substate = rack_do_fin_wait_1; 13064 break; 13065 case TCPS_CLOSING: 13066 rack_set_pace_segments(tp, rack, __LINE__, NULL); 13067 rack->r_state = TCPS_CLOSING; 13068 rack->r_substate = rack_do_closing; 13069 break; 13070 case TCPS_LAST_ACK: 13071 rack_set_pace_segments(tp, rack, __LINE__, NULL); 13072 rack->r_state = TCPS_LAST_ACK; 13073 rack->r_substate = rack_do_lastack; 13074 break; 13075 case TCPS_FIN_WAIT_2: 13076 rack_set_pace_segments(tp, rack, __LINE__, NULL); 13077 rack->r_state = TCPS_FIN_WAIT_2; 13078 rack->r_substate = rack_do_fin_wait_2; 13079 break; 13080 case TCPS_LISTEN: 13081 case TCPS_CLOSED: 13082 case TCPS_TIME_WAIT: 13083 default: 13084 break; 13085 }; 13086 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) 13087 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP; 13088 13089 } 13090 13091 static void 13092 rack_timer_audit(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb) 13093 { 13094 /* 13095 * We received an ack, and then did not 13096 * call send or were bounced out due to the 13097 * hpts was running. Now a timer is up as well, is 13098 * it the right timer? 13099 */ 13100 struct rack_sendmap *rsm; 13101 int tmr_up; 13102 13103 tmr_up = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK; 13104 if (rack->rc_in_persist && (tmr_up == PACE_TMR_PERSIT)) 13105 return; 13106 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 13107 if (((rsm == NULL) || (tp->t_state < TCPS_ESTABLISHED)) && 13108 (tmr_up == PACE_TMR_RXT)) { 13109 /* Should be an RXT */ 13110 return; 13111 } 13112 if (rsm == NULL) { 13113 /* Nothing outstanding? */ 13114 if (tp->t_flags & TF_DELACK) { 13115 if (tmr_up == PACE_TMR_DELACK) 13116 /* We are supposed to have delayed ack up and we do */ 13117 return; 13118 } else if (sbavail(&tp->t_inpcb->inp_socket->so_snd) && (tmr_up == PACE_TMR_RXT)) { 13119 /* 13120 * if we hit enobufs then we would expect the possiblity 13121 * of nothing outstanding and the RXT up (and the hptsi timer). 13122 */ 13123 return; 13124 } else if (((V_tcp_always_keepalive || 13125 rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) && 13126 (tp->t_state <= TCPS_CLOSING)) && 13127 (tmr_up == PACE_TMR_KEEP) && 13128 (tp->snd_max == tp->snd_una)) { 13129 /* We should have keep alive up and we do */ 13130 return; 13131 } 13132 } 13133 if (SEQ_GT(tp->snd_max, tp->snd_una) && 13134 ((tmr_up == PACE_TMR_TLP) || 13135 (tmr_up == PACE_TMR_RACK) || 13136 (tmr_up == PACE_TMR_RXT))) { 13137 /* 13138 * Either a Rack, TLP or RXT is fine if we 13139 * have outstanding data. 13140 */ 13141 return; 13142 } else if (tmr_up == PACE_TMR_DELACK) { 13143 /* 13144 * If the delayed ack was going to go off 13145 * before the rtx/tlp/rack timer were going to 13146 * expire, then that would be the timer in control. 13147 * Note we don't check the time here trusting the 13148 * code is correct. 13149 */ 13150 return; 13151 } 13152 /* 13153 * Ok the timer originally started is not what we want now. 13154 * We will force the hpts to be stopped if any, and restart 13155 * with the slot set to what was in the saved slot. 13156 */ 13157 if (rack->rc_inp->inp_in_hpts) { 13158 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 13159 uint32_t us_cts; 13160 13161 us_cts = tcp_get_usecs(NULL); 13162 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) { 13163 rack->r_early = 1; 13164 rack->r_ctl.rc_agg_early += (rack->r_ctl.rc_last_output_to - us_cts); 13165 } 13166 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 13167 } 13168 tcp_hpts_remove(tp->t_inpcb, HPTS_REMOVE_OUTPUT); 13169 } 13170 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 13171 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); 13172 } 13173 13174 13175 static void 13176 rack_do_win_updates(struct tcpcb *tp, struct tcp_rack *rack, uint32_t tiwin, uint32_t seq, uint32_t ack, uint32_t cts, uint32_t high_seq) 13177 { 13178 if ((SEQ_LT(tp->snd_wl1, seq) || 13179 (tp->snd_wl1 == seq && (SEQ_LT(tp->snd_wl2, ack) || 13180 (tp->snd_wl2 == ack && tiwin > tp->snd_wnd))))) { 13181 /* keep track of pure window updates */ 13182 if ((tp->snd_wl2 == ack) && (tiwin > tp->snd_wnd)) 13183 KMOD_TCPSTAT_INC(tcps_rcvwinupd); 13184 tp->snd_wnd = tiwin; 13185 rack_validate_fo_sendwin_up(tp, rack); 13186 tp->snd_wl1 = seq; 13187 tp->snd_wl2 = ack; 13188 if (tp->snd_wnd > tp->max_sndwnd) 13189 tp->max_sndwnd = tp->snd_wnd; 13190 rack->r_wanted_output = 1; 13191 } else if ((tp->snd_wl2 == ack) && (tiwin < tp->snd_wnd)) { 13192 tp->snd_wnd = tiwin; 13193 rack_validate_fo_sendwin_up(tp, rack); 13194 tp->snd_wl1 = seq; 13195 tp->snd_wl2 = ack; 13196 } else { 13197 /* Not a valid win update */ 13198 return; 13199 } 13200 if (tp->snd_wnd > tp->max_sndwnd) 13201 tp->max_sndwnd = tp->snd_wnd; 13202 if (tp->snd_wnd < (tp->snd_max - high_seq)) { 13203 /* The peer collapsed the window */ 13204 rack_collapsed_window(rack); 13205 } else if (rack->rc_has_collapsed) 13206 rack_un_collapse_window(rack); 13207 /* Do we exit persists? */ 13208 if ((rack->rc_in_persist != 0) && 13209 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), 13210 rack->r_ctl.rc_pace_min_segs))) { 13211 rack_exit_persist(tp, rack, cts); 13212 } 13213 /* Do we enter persists? */ 13214 if ((rack->rc_in_persist == 0) && 13215 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && 13216 TCPS_HAVEESTABLISHED(tp->t_state) && 13217 (tp->snd_max == tp->snd_una) && 13218 sbavail(&tp->t_inpcb->inp_socket->so_snd) && 13219 (sbavail(&tp->t_inpcb->inp_socket->so_snd) > tp->snd_wnd)) { 13220 /* 13221 * Here the rwnd is less than 13222 * the pacing size, we are established, 13223 * nothing is outstanding, and there is 13224 * data to send. Enter persists. 13225 */ 13226 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime); 13227 } 13228 } 13229 13230 static void 13231 rack_log_input_packet(struct tcpcb *tp, struct tcp_rack *rack, struct tcp_ackent *ae, int ackval, uint32_t high_seq) 13232 { 13233 13234 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 13235 union tcp_log_stackspecific log; 13236 struct timeval ltv; 13237 char tcp_hdr_buf[60]; 13238 struct tcphdr *th; 13239 struct timespec ts; 13240 uint32_t orig_snd_una; 13241 uint8_t xx = 0; 13242 13243 #ifdef NETFLIX_HTTP_LOGGING 13244 struct http_sendfile_track *http_req; 13245 13246 if (SEQ_GT(ae->ack, tp->snd_una)) { 13247 http_req = tcp_http_find_req_for_seq(tp, (ae->ack-1)); 13248 } else { 13249 http_req = tcp_http_find_req_for_seq(tp, ae->ack); 13250 } 13251 #endif 13252 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 13253 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 13254 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 13255 if (rack->rack_no_prr == 0) 13256 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 13257 else 13258 log.u_bbr.flex1 = 0; 13259 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 13260 log.u_bbr.use_lt_bw <<= 1; 13261 log.u_bbr.use_lt_bw |= rack->r_might_revert; 13262 log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced; 13263 log.u_bbr.inflight = ctf_flight_size(tp, rack->r_ctl.rc_sacked); 13264 log.u_bbr.pkts_out = tp->t_maxseg; 13265 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 13266 log.u_bbr.flex7 = 1; 13267 log.u_bbr.lost = ae->flags; 13268 log.u_bbr.cwnd_gain = ackval; 13269 log.u_bbr.pacing_gain = 0x2; 13270 if (ae->flags & TSTMP_HDWR) { 13271 /* Record the hardware timestamp if present */ 13272 log.u_bbr.flex3 = M_TSTMP; 13273 ts.tv_sec = ae->timestamp / 1000000000; 13274 ts.tv_nsec = ae->timestamp % 1000000000; 13275 ltv.tv_sec = ts.tv_sec; 13276 ltv.tv_usec = ts.tv_nsec / 1000; 13277 log.u_bbr.lt_epoch = tcp_tv_to_usectick(<v); 13278 } else if (ae->flags & TSTMP_LRO) { 13279 /* Record the LRO the arrival timestamp */ 13280 log.u_bbr.flex3 = M_TSTMP_LRO; 13281 ts.tv_sec = ae->timestamp / 1000000000; 13282 ts.tv_nsec = ae->timestamp % 1000000000; 13283 ltv.tv_sec = ts.tv_sec; 13284 ltv.tv_usec = ts.tv_nsec / 1000; 13285 log.u_bbr.flex5 = tcp_tv_to_usectick(<v); 13286 } 13287 log.u_bbr.timeStamp = tcp_get_usecs(<v); 13288 /* Log the rcv time */ 13289 log.u_bbr.delRate = ae->timestamp; 13290 #ifdef NETFLIX_HTTP_LOGGING 13291 log.u_bbr.applimited = tp->t_http_closed; 13292 log.u_bbr.applimited <<= 8; 13293 log.u_bbr.applimited |= tp->t_http_open; 13294 log.u_bbr.applimited <<= 8; 13295 log.u_bbr.applimited |= tp->t_http_req; 13296 if (http_req) { 13297 /* Copy out any client req info */ 13298 /* seconds */ 13299 log.u_bbr.pkt_epoch = (http_req->localtime / HPTS_USEC_IN_SEC); 13300 /* useconds */ 13301 log.u_bbr.delivered = (http_req->localtime % HPTS_USEC_IN_SEC); 13302 log.u_bbr.rttProp = http_req->timestamp; 13303 log.u_bbr.cur_del_rate = http_req->start; 13304 if (http_req->flags & TCP_HTTP_TRACK_FLG_OPEN) { 13305 log.u_bbr.flex8 |= 1; 13306 } else { 13307 log.u_bbr.flex8 |= 2; 13308 log.u_bbr.bw_inuse = http_req->end; 13309 } 13310 log.u_bbr.flex6 = http_req->start_seq; 13311 if (http_req->flags & TCP_HTTP_TRACK_FLG_COMP) { 13312 log.u_bbr.flex8 |= 4; 13313 log.u_bbr.epoch = http_req->end_seq; 13314 } 13315 } 13316 #endif 13317 memset(tcp_hdr_buf, 0, sizeof(tcp_hdr_buf)); 13318 th = (struct tcphdr *)tcp_hdr_buf; 13319 th->th_seq = ae->seq; 13320 th->th_ack = ae->ack; 13321 th->th_win = ae->win; 13322 /* Now fill in the ports */ 13323 th->th_sport = tp->t_inpcb->inp_fport; 13324 th->th_dport = tp->t_inpcb->inp_lport; 13325 th->th_flags = ae->flags & 0xff; 13326 /* Now do we have a timestamp option? */ 13327 if (ae->flags & HAS_TSTMP) { 13328 u_char *cp; 13329 uint32_t val; 13330 13331 th->th_off = ((sizeof(struct tcphdr) + TCPOLEN_TSTAMP_APPA) >> 2); 13332 cp = (u_char *)(th + 1); 13333 *cp = TCPOPT_NOP; 13334 cp++; 13335 *cp = TCPOPT_NOP; 13336 cp++; 13337 *cp = TCPOPT_TIMESTAMP; 13338 cp++; 13339 *cp = TCPOLEN_TIMESTAMP; 13340 cp++; 13341 val = htonl(ae->ts_value); 13342 bcopy((char *)&val, 13343 (char *)cp, sizeof(uint32_t)); 13344 val = htonl(ae->ts_echo); 13345 bcopy((char *)&val, 13346 (char *)(cp + 4), sizeof(uint32_t)); 13347 } else 13348 th->th_off = (sizeof(struct tcphdr) >> 2); 13349 13350 /* 13351 * For sane logging we need to play a little trick. 13352 * If the ack were fully processed we would have moved 13353 * snd_una to high_seq, but since compressed acks are 13354 * processed in two phases, at this point (logging) snd_una 13355 * won't be advanced. So we would see multiple acks showing 13356 * the advancement. We can prevent that by "pretending" that 13357 * snd_una was advanced and then un-advancing it so that the 13358 * logging code has the right value for tlb_snd_una. 13359 */ 13360 if (tp->snd_una != high_seq) { 13361 orig_snd_una = tp->snd_una; 13362 tp->snd_una = high_seq; 13363 xx = 1; 13364 } else 13365 xx = 0; 13366 TCP_LOG_EVENTP(tp, th, 13367 &tp->t_inpcb->inp_socket->so_rcv, 13368 &tp->t_inpcb->inp_socket->so_snd, TCP_LOG_IN, 0, 13369 0, &log, true, <v); 13370 if (xx) { 13371 tp->snd_una = orig_snd_una; 13372 } 13373 } 13374 13375 } 13376 13377 static int 13378 rack_do_compressed_ack_processing(struct tcpcb *tp, struct socket *so, struct mbuf *m, int nxt_pkt, struct timeval *tv) 13379 { 13380 /* 13381 * Handle a "special" compressed ack mbuf. Each incoming 13382 * ack has only four possible dispositions: 13383 * 13384 * A) It moves the cum-ack forward 13385 * B) It is behind the cum-ack. 13386 * C) It is a window-update ack. 13387 * D) It is a dup-ack. 13388 * 13389 * Note that we can have between 1 -> TCP_COMP_ACK_ENTRIES 13390 * in the incoming mbuf. We also need to still pay attention 13391 * to nxt_pkt since there may be another packet after this 13392 * one. 13393 */ 13394 #ifdef TCP_ACCOUNTING 13395 uint64_t ts_val; 13396 uint64_t rdstc; 13397 #endif 13398 int segsiz; 13399 struct timespec ts; 13400 struct tcp_rack *rack; 13401 struct tcp_ackent *ae; 13402 uint32_t tiwin, ms_cts, cts, acked, acked_amount, high_seq, win_seq, the_win, win_upd_ack; 13403 int cnt, i, did_out, ourfinisacked = 0; 13404 struct tcpopt to_holder, *to = NULL; 13405 int win_up_req = 0; 13406 int nsegs = 0; 13407 int under_pacing = 1; 13408 int recovery = 0; 13409 int idx; 13410 #ifdef TCP_ACCOUNTING 13411 sched_pin(); 13412 #endif 13413 rack = (struct tcp_rack *)tp->t_fb_ptr; 13414 if (rack->gp_ready && 13415 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) 13416 under_pacing = 0; 13417 else 13418 under_pacing = 1; 13419 13420 if (rack->r_state != tp->t_state) 13421 rack_set_state(tp, rack); 13422 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 13423 (tp->t_flags & TF_GPUTINPROG)) { 13424 /* 13425 * We have a goodput in progress 13426 * and we have entered a late state. 13427 * Do we have enough data in the sb 13428 * to handle the GPUT request? 13429 */ 13430 uint32_t bytes; 13431 13432 bytes = tp->gput_ack - tp->gput_seq; 13433 if (SEQ_GT(tp->gput_seq, tp->snd_una)) 13434 bytes += tp->gput_seq - tp->snd_una; 13435 if (bytes > sbavail(&tp->t_inpcb->inp_socket->so_snd)) { 13436 /* 13437 * There are not enough bytes in the socket 13438 * buffer that have been sent to cover this 13439 * measurement. Cancel it. 13440 */ 13441 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 13442 rack->r_ctl.rc_gp_srtt /*flex1*/, 13443 tp->gput_seq, 13444 0, 0, 18, __LINE__, NULL, 0); 13445 tp->t_flags &= ~TF_GPUTINPROG; 13446 } 13447 } 13448 to = &to_holder; 13449 to->to_flags = 0; 13450 KASSERT((m->m_len >= sizeof(struct tcp_ackent)), 13451 ("tp:%p m_cmpack:%p with invalid len:%u", tp, m, m->m_len)); 13452 cnt = m->m_len / sizeof(struct tcp_ackent); 13453 idx = cnt / 5; 13454 if (idx >= MAX_NUM_OF_CNTS) 13455 idx = MAX_NUM_OF_CNTS - 1; 13456 counter_u64_add(rack_proc_comp_ack[idx], 1); 13457 counter_u64_add(rack_multi_single_eq, cnt); 13458 high_seq = tp->snd_una; 13459 the_win = tp->snd_wnd; 13460 win_seq = tp->snd_wl1; 13461 win_upd_ack = tp->snd_wl2; 13462 cts = tcp_tv_to_usectick(tv); 13463 ms_cts = tcp_tv_to_mssectick(tv); 13464 segsiz = ctf_fixed_maxseg(tp); 13465 if ((rack->rc_gp_dyn_mul) && 13466 (rack->use_fixed_rate == 0) && 13467 (rack->rc_always_pace)) { 13468 /* Check in on probertt */ 13469 rack_check_probe_rtt(rack, cts); 13470 } 13471 for (i = 0; i < cnt; i++) { 13472 #ifdef TCP_ACCOUNTING 13473 ts_val = get_cyclecount(); 13474 #endif 13475 rack_clear_rate_sample(rack); 13476 ae = ((mtod(m, struct tcp_ackent *)) + i); 13477 /* Setup the window */ 13478 tiwin = ae->win << tp->snd_scale; 13479 /* figure out the type of ack */ 13480 if (SEQ_LT(ae->ack, high_seq)) { 13481 /* Case B*/ 13482 ae->ack_val_set = ACK_BEHIND; 13483 } else if (SEQ_GT(ae->ack, high_seq)) { 13484 /* Case A */ 13485 ae->ack_val_set = ACK_CUMACK; 13486 } else if (tiwin == the_win) { 13487 /* Case D */ 13488 ae->ack_val_set = ACK_DUPACK; 13489 } else { 13490 /* Case C */ 13491 ae->ack_val_set = ACK_RWND; 13492 } 13493 rack_log_input_packet(tp, rack, ae, ae->ack_val_set, high_seq); 13494 /* Validate timestamp */ 13495 if (ae->flags & HAS_TSTMP) { 13496 /* Setup for a timestamp */ 13497 to->to_flags = TOF_TS; 13498 ae->ts_echo -= tp->ts_offset; 13499 to->to_tsecr = ae->ts_echo; 13500 to->to_tsval = ae->ts_value; 13501 /* 13502 * If echoed timestamp is later than the current time, fall back to 13503 * non RFC1323 RTT calculation. Normalize timestamp if syncookies 13504 * were used when this connection was established. 13505 */ 13506 if (TSTMP_GT(ae->ts_echo, ms_cts)) 13507 to->to_tsecr = 0; 13508 if (tp->ts_recent && 13509 TSTMP_LT(ae->ts_value, tp->ts_recent)) { 13510 if (ctf_ts_check_ac(tp, (ae->flags & 0xff))) { 13511 #ifdef TCP_ACCOUNTING 13512 rdstc = get_cyclecount(); 13513 if (rdstc > ts_val) { 13514 counter_u64_add(tcp_proc_time[ae->ack_val_set] , 13515 (rdstc - ts_val)); 13516 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13517 tp->tcp_proc_time[ae->ack_val_set] += (rdstc - ts_val); 13518 } 13519 } 13520 #endif 13521 continue; 13522 } 13523 } 13524 if (SEQ_LEQ(ae->seq, tp->last_ack_sent) && 13525 SEQ_LEQ(tp->last_ack_sent, ae->seq)) { 13526 tp->ts_recent_age = tcp_ts_getticks(); 13527 tp->ts_recent = ae->ts_value; 13528 } 13529 } else { 13530 /* Setup for a no options */ 13531 to->to_flags = 0; 13532 } 13533 /* Update the rcv time and perform idle reduction possibly */ 13534 if (tp->t_idle_reduce && 13535 (tp->snd_max == tp->snd_una) && 13536 ((ticks - tp->t_rcvtime) >= tp->t_rxtcur)) { 13537 counter_u64_add(rack_input_idle_reduces, 1); 13538 rack_cc_after_idle(rack, tp); 13539 } 13540 tp->t_rcvtime = ticks; 13541 /* Now what about ECN? */ 13542 if (tp->t_flags2 & TF2_ECN_PERMIT) { 13543 if (ae->flags & TH_CWR) { 13544 tp->t_flags2 &= ~TF2_ECN_SND_ECE; 13545 tp->t_flags |= TF_ACKNOW; 13546 } 13547 switch (ae->codepoint & IPTOS_ECN_MASK) { 13548 case IPTOS_ECN_CE: 13549 tp->t_flags2 |= TF2_ECN_SND_ECE; 13550 KMOD_TCPSTAT_INC(tcps_ecn_ce); 13551 break; 13552 case IPTOS_ECN_ECT0: 13553 KMOD_TCPSTAT_INC(tcps_ecn_ect0); 13554 break; 13555 case IPTOS_ECN_ECT1: 13556 KMOD_TCPSTAT_INC(tcps_ecn_ect1); 13557 break; 13558 } 13559 13560 /* Process a packet differently from RFC3168. */ 13561 cc_ecnpkt_handler_flags(tp, ae->flags, ae->codepoint); 13562 /* Congestion experienced. */ 13563 if (ae->flags & TH_ECE) { 13564 rack_cong_signal(tp, CC_ECN, ae->ack); 13565 } 13566 } 13567 #ifdef TCP_ACCOUNTING 13568 /* Count for the specific type of ack in */ 13569 counter_u64_add(tcp_cnt_counters[ae->ack_val_set], 1); 13570 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13571 tp->tcp_cnt_counters[ae->ack_val_set]++; 13572 } 13573 #endif 13574 /* 13575 * Note how we could move up these in the determination 13576 * above, but we don't so that way the timestamp checks (and ECN) 13577 * is done first before we do any processing on the ACK. 13578 * The non-compressed path through the code has this 13579 * weakness (noted by @jtl) that it actually does some 13580 * processing before verifying the timestamp information. 13581 * We don't take that path here which is why we set 13582 * the ack_val_set first, do the timestamp and ecn 13583 * processing, and then look at what we have setup. 13584 */ 13585 if (ae->ack_val_set == ACK_BEHIND) { 13586 /* 13587 * Case B flag reordering, if window is not closed 13588 * or it could be a keep-alive or persists 13589 */ 13590 if (SEQ_LT(ae->ack, tp->snd_una) && (sbspace(&so->so_rcv) > segsiz)) { 13591 counter_u64_add(rack_reorder_seen, 1); 13592 rack->r_ctl.rc_reorder_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 13593 } 13594 } else if (ae->ack_val_set == ACK_DUPACK) { 13595 /* Case D */ 13596 rack_strike_dupack(rack); 13597 } else if (ae->ack_val_set == ACK_RWND) { 13598 /* Case C */ 13599 win_up_req = 1; 13600 win_upd_ack = ae->ack; 13601 win_seq = ae->seq; 13602 the_win = tiwin; 13603 rack_do_win_updates(tp, rack, the_win, win_seq, win_upd_ack, cts, high_seq); 13604 } else { 13605 /* Case A */ 13606 if (SEQ_GT(ae->ack, tp->snd_max)) { 13607 /* 13608 * We just send an ack since the incoming 13609 * ack is beyond the largest seq we sent. 13610 */ 13611 if ((tp->t_flags & TF_ACKNOW) == 0) { 13612 ctf_ack_war_checks(tp, &rack->r_ctl.challenge_ack_ts, &rack->r_ctl.challenge_ack_cnt); 13613 if (tp->t_flags && TF_ACKNOW) 13614 rack->r_wanted_output = 1; 13615 } 13616 } else { 13617 nsegs++; 13618 /* If the window changed setup to update */ 13619 if (tiwin != tp->snd_wnd) { 13620 win_upd_ack = ae->ack; 13621 win_seq = ae->seq; 13622 the_win = tiwin; 13623 rack_do_win_updates(tp, rack, the_win, win_seq, win_upd_ack, cts, high_seq); 13624 } 13625 #ifdef TCP_ACCOUNTING 13626 /* Account for the acks */ 13627 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13628 tp->tcp_cnt_counters[CNT_OF_ACKS_IN] += (((ae->ack - high_seq) + segsiz - 1) / segsiz); 13629 } 13630 counter_u64_add(tcp_cnt_counters[CNT_OF_ACKS_IN], 13631 (((ae->ack - high_seq) + segsiz - 1) / segsiz)); 13632 #endif 13633 high_seq = ae->ack; 13634 if (SEQ_GEQ(high_seq, rack->r_ctl.roundends)) { 13635 rack->r_ctl.current_round++; 13636 rack->r_ctl.roundends = tp->snd_max; 13637 if (CC_ALGO(tp)->newround != NULL) { 13638 CC_ALGO(tp)->newround(tp->ccv, rack->r_ctl.current_round); 13639 } 13640 } 13641 /* Setup our act_rcv_time */ 13642 if ((ae->flags & TSTMP_LRO) || (ae->flags & TSTMP_HDWR)) { 13643 ts.tv_sec = ae->timestamp / 1000000000; 13644 ts.tv_nsec = ae->timestamp % 1000000000; 13645 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; 13646 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; 13647 } else { 13648 rack->r_ctl.act_rcv_time = *tv; 13649 } 13650 rack_process_to_cumack(tp, rack, ae->ack, cts, to); 13651 if (rack->rc_dsack_round_seen) { 13652 /* Is the dsack round over? */ 13653 if (SEQ_GEQ(ae->ack, rack->r_ctl.dsack_round_end)) { 13654 /* Yes it is */ 13655 rack->rc_dsack_round_seen = 0; 13656 rack_log_dsack_event(rack, 3, __LINE__, 0, 0); 13657 } 13658 } 13659 } 13660 } 13661 /* And lets be sure to commit the rtt measurements for this ack */ 13662 tcp_rack_xmit_timer_commit(rack, tp); 13663 #ifdef TCP_ACCOUNTING 13664 rdstc = get_cyclecount(); 13665 if (rdstc > ts_val) { 13666 counter_u64_add(tcp_proc_time[ae->ack_val_set] , (rdstc - ts_val)); 13667 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13668 tp->tcp_proc_time[ae->ack_val_set] += (rdstc - ts_val); 13669 if (ae->ack_val_set == ACK_CUMACK) 13670 tp->tcp_proc_time[CYC_HANDLE_MAP] += (rdstc - ts_val); 13671 } 13672 } 13673 #endif 13674 } 13675 #ifdef TCP_ACCOUNTING 13676 ts_val = get_cyclecount(); 13677 #endif 13678 acked_amount = acked = (high_seq - tp->snd_una); 13679 if (acked) { 13680 if (rack->sack_attack_disable == 0) 13681 rack_do_decay(rack); 13682 if (acked >= segsiz) { 13683 /* 13684 * You only get credit for 13685 * MSS and greater (and you get extra 13686 * credit for larger cum-ack moves). 13687 */ 13688 int ac; 13689 13690 ac = acked / segsiz; 13691 rack->r_ctl.ack_count += ac; 13692 counter_u64_add(rack_ack_total, ac); 13693 } 13694 if (rack->r_ctl.ack_count > 0xfff00000) { 13695 /* 13696 * reduce the number to keep us under 13697 * a uint32_t. 13698 */ 13699 rack->r_ctl.ack_count /= 2; 13700 rack->r_ctl.sack_count /= 2; 13701 } 13702 if (tp->t_flags & TF_NEEDSYN) { 13703 /* 13704 * T/TCP: Connection was half-synchronized, and our SYN has 13705 * been ACK'd (so connection is now fully synchronized). Go 13706 * to non-starred state, increment snd_una for ACK of SYN, 13707 * and check if we can do window scaling. 13708 */ 13709 tp->t_flags &= ~TF_NEEDSYN; 13710 tp->snd_una++; 13711 acked_amount = acked = (high_seq - tp->snd_una); 13712 } 13713 if (acked > sbavail(&so->so_snd)) 13714 acked_amount = sbavail(&so->so_snd); 13715 #ifdef NETFLIX_EXP_DETECTION 13716 /* 13717 * We only care on a cum-ack move if we are in a sack-disabled 13718 * state. We have already added in to the ack_count, and we never 13719 * would disable on a cum-ack move, so we only care to do the 13720 * detection if it may "undo" it, i.e. we were in disabled already. 13721 */ 13722 if (rack->sack_attack_disable) 13723 rack_do_detection(tp, rack, acked_amount, segsiz); 13724 #endif 13725 if (IN_FASTRECOVERY(tp->t_flags) && 13726 (rack->rack_no_prr == 0)) 13727 rack_update_prr(tp, rack, acked_amount, high_seq); 13728 if (IN_RECOVERY(tp->t_flags)) { 13729 if (SEQ_LT(high_seq, tp->snd_recover) && 13730 (SEQ_LT(high_seq, tp->snd_max))) { 13731 tcp_rack_partialack(tp); 13732 } else { 13733 rack_post_recovery(tp, high_seq); 13734 recovery = 1; 13735 } 13736 } 13737 /* Handle the rack-log-ack part (sendmap) */ 13738 if ((sbused(&so->so_snd) == 0) && 13739 (acked > acked_amount) && 13740 (tp->t_state >= TCPS_FIN_WAIT_1) && 13741 (tp->t_flags & TF_SENTFIN)) { 13742 /* 13743 * We must be sure our fin 13744 * was sent and acked (we can be 13745 * in FIN_WAIT_1 without having 13746 * sent the fin). 13747 */ 13748 ourfinisacked = 1; 13749 /* 13750 * Lets make sure snd_una is updated 13751 * since most likely acked_amount = 0 (it 13752 * should be). 13753 */ 13754 tp->snd_una = high_seq; 13755 } 13756 /* Did we make a RTO error? */ 13757 if ((tp->t_flags & TF_PREVVALID) && 13758 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { 13759 tp->t_flags &= ~TF_PREVVALID; 13760 if (tp->t_rxtshift == 1 && 13761 (int)(ticks - tp->t_badrxtwin) < 0) 13762 rack_cong_signal(tp, CC_RTO_ERR, high_seq); 13763 } 13764 /* Handle the data in the socket buffer */ 13765 KMOD_TCPSTAT_ADD(tcps_rcvackpack, 1); 13766 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked); 13767 if (acked_amount > 0) { 13768 struct mbuf *mfree; 13769 13770 rack_ack_received(tp, rack, high_seq, nsegs, CC_ACK, recovery); 13771 SOCKBUF_LOCK(&so->so_snd); 13772 mfree = sbcut_locked(&so->so_snd, acked_amount); 13773 tp->snd_una = high_seq; 13774 /* Note we want to hold the sb lock through the sendmap adjust */ 13775 rack_adjust_sendmap(rack, &so->so_snd, tp->snd_una); 13776 /* Wake up the socket if we have room to write more */ 13777 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); 13778 sowwakeup_locked(so); 13779 m_freem(mfree); 13780 } 13781 /* update progress */ 13782 tp->t_acktime = ticks; 13783 rack_log_progress_event(rack, tp, tp->t_acktime, 13784 PROGRESS_UPDATE, __LINE__); 13785 /* Clear out shifts and such */ 13786 tp->t_rxtshift = 0; 13787 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 13788 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 13789 rack->rc_tlp_in_progress = 0; 13790 rack->r_ctl.rc_tlp_cnt_out = 0; 13791 /* Send recover and snd_nxt must be dragged along */ 13792 if (SEQ_GT(tp->snd_una, tp->snd_recover)) 13793 tp->snd_recover = tp->snd_una; 13794 if (SEQ_LT(tp->snd_nxt, tp->snd_una)) 13795 tp->snd_nxt = tp->snd_una; 13796 /* 13797 * If the RXT timer is running we want to 13798 * stop it, so we can restart a TLP (or new RXT). 13799 */ 13800 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) 13801 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 13802 #ifdef NETFLIX_HTTP_LOGGING 13803 tcp_http_check_for_comp(rack->rc_tp, high_seq); 13804 #endif 13805 tp->snd_wl2 = high_seq; 13806 tp->t_dupacks = 0; 13807 if (under_pacing && 13808 (rack->use_fixed_rate == 0) && 13809 (rack->in_probe_rtt == 0) && 13810 rack->rc_gp_dyn_mul && 13811 rack->rc_always_pace) { 13812 /* Check if we are dragging bottom */ 13813 rack_check_bottom_drag(tp, rack, so, acked); 13814 } 13815 if (tp->snd_una == tp->snd_max) { 13816 tp->t_flags &= ~TF_PREVVALID; 13817 rack->r_ctl.retran_during_recovery = 0; 13818 rack->r_ctl.dsack_byte_cnt = 0; 13819 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); 13820 if (rack->r_ctl.rc_went_idle_time == 0) 13821 rack->r_ctl.rc_went_idle_time = 1; 13822 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); 13823 if (sbavail(&tp->t_inpcb->inp_socket->so_snd) == 0) 13824 tp->t_acktime = 0; 13825 /* Set so we might enter persists... */ 13826 rack->r_wanted_output = 1; 13827 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 13828 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 13829 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 13830 (sbavail(&so->so_snd) == 0) && 13831 (tp->t_flags2 & TF2_DROP_AF_DATA)) { 13832 /* 13833 * The socket was gone and the 13834 * peer sent data (not now in the past), time to 13835 * reset him. 13836 */ 13837 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 13838 /* tcp_close will kill the inp pre-log the Reset */ 13839 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 13840 #ifdef TCP_ACCOUNTING 13841 rdstc = get_cyclecount(); 13842 if (rdstc > ts_val) { 13843 counter_u64_add(tcp_proc_time[ACK_CUMACK] , (rdstc - ts_val)); 13844 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13845 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 13846 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 13847 } 13848 } 13849 #endif 13850 m_freem(m); 13851 tp = tcp_close(tp); 13852 if (tp == NULL) { 13853 #ifdef TCP_ACCOUNTING 13854 sched_unpin(); 13855 #endif 13856 return (1); 13857 } 13858 /* 13859 * We would normally do drop-with-reset which would 13860 * send back a reset. We can't since we don't have 13861 * all the needed bits. Instead lets arrange for 13862 * a call to tcp_output(). That way since we 13863 * are in the closed state we will generate a reset. 13864 * 13865 * Note if tcp_accounting is on we don't unpin since 13866 * we do that after the goto label. 13867 */ 13868 goto send_out_a_rst; 13869 } 13870 if ((sbused(&so->so_snd) == 0) && 13871 (tp->t_state >= TCPS_FIN_WAIT_1) && 13872 (tp->t_flags & TF_SENTFIN)) { 13873 /* 13874 * If we can't receive any more data, then closing user can 13875 * proceed. Starting the timer is contrary to the 13876 * specification, but if we don't get a FIN we'll hang 13877 * forever. 13878 * 13879 */ 13880 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 13881 soisdisconnected(so); 13882 tcp_timer_activate(tp, TT_2MSL, 13883 (tcp_fast_finwait2_recycle ? 13884 tcp_finwait2_timeout : 13885 TP_MAXIDLE(tp))); 13886 } 13887 if (ourfinisacked == 0) { 13888 /* 13889 * We don't change to fin-wait-2 if we have our fin acked 13890 * which means we are probably in TCPS_CLOSING. 13891 */ 13892 tcp_state_change(tp, TCPS_FIN_WAIT_2); 13893 } 13894 } 13895 } 13896 /* Wake up the socket if we have room to write more */ 13897 if (sbavail(&so->so_snd)) { 13898 rack->r_wanted_output = 1; 13899 if (ctf_progress_timeout_check(tp, true)) { 13900 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 13901 tp, tick, PROGRESS_DROP, __LINE__); 13902 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT); 13903 /* 13904 * We cheat here and don't send a RST, we should send one 13905 * when the pacer drops the connection. 13906 */ 13907 #ifdef TCP_ACCOUNTING 13908 rdstc = get_cyclecount(); 13909 if (rdstc > ts_val) { 13910 counter_u64_add(tcp_proc_time[ACK_CUMACK] , (rdstc - ts_val)); 13911 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13912 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 13913 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 13914 } 13915 } 13916 sched_unpin(); 13917 #endif 13918 INP_WUNLOCK(rack->rc_inp); 13919 m_freem(m); 13920 return (1); 13921 } 13922 } 13923 if (ourfinisacked) { 13924 switch(tp->t_state) { 13925 case TCPS_CLOSING: 13926 #ifdef TCP_ACCOUNTING 13927 rdstc = get_cyclecount(); 13928 if (rdstc > ts_val) { 13929 counter_u64_add(tcp_proc_time[ACK_CUMACK] , 13930 (rdstc - ts_val)); 13931 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13932 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 13933 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 13934 } 13935 } 13936 sched_unpin(); 13937 #endif 13938 tcp_twstart(tp); 13939 m_freem(m); 13940 return (1); 13941 break; 13942 case TCPS_LAST_ACK: 13943 #ifdef TCP_ACCOUNTING 13944 rdstc = get_cyclecount(); 13945 if (rdstc > ts_val) { 13946 counter_u64_add(tcp_proc_time[ACK_CUMACK] , 13947 (rdstc - ts_val)); 13948 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13949 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 13950 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 13951 } 13952 } 13953 sched_unpin(); 13954 #endif 13955 tp = tcp_close(tp); 13956 ctf_do_drop(m, tp); 13957 return (1); 13958 break; 13959 case TCPS_FIN_WAIT_1: 13960 #ifdef TCP_ACCOUNTING 13961 rdstc = get_cyclecount(); 13962 if (rdstc > ts_val) { 13963 counter_u64_add(tcp_proc_time[ACK_CUMACK] , 13964 (rdstc - ts_val)); 13965 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13966 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 13967 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 13968 } 13969 } 13970 #endif 13971 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 13972 soisdisconnected(so); 13973 tcp_timer_activate(tp, TT_2MSL, 13974 (tcp_fast_finwait2_recycle ? 13975 tcp_finwait2_timeout : 13976 TP_MAXIDLE(tp))); 13977 } 13978 tcp_state_change(tp, TCPS_FIN_WAIT_2); 13979 break; 13980 default: 13981 break; 13982 } 13983 } 13984 if (rack->r_fast_output) { 13985 /* 13986 * We re doing fast output.. can we expand that? 13987 */ 13988 rack_gain_for_fastoutput(rack, tp, so, acked_amount); 13989 } 13990 #ifdef TCP_ACCOUNTING 13991 rdstc = get_cyclecount(); 13992 if (rdstc > ts_val) { 13993 counter_u64_add(tcp_proc_time[ACK_CUMACK] , (rdstc - ts_val)); 13994 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13995 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 13996 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 13997 } 13998 } 13999 14000 } else if (win_up_req) { 14001 rdstc = get_cyclecount(); 14002 if (rdstc > ts_val) { 14003 counter_u64_add(tcp_proc_time[ACK_RWND] , (rdstc - ts_val)); 14004 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 14005 tp->tcp_proc_time[ACK_RWND] += (rdstc - ts_val); 14006 } 14007 } 14008 #endif 14009 } 14010 /* Now is there a next packet, if so we are done */ 14011 m_freem(m); 14012 did_out = 0; 14013 if (nxt_pkt) { 14014 #ifdef TCP_ACCOUNTING 14015 sched_unpin(); 14016 #endif 14017 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, 5, nsegs); 14018 return (0); 14019 } 14020 rack_handle_might_revert(tp, rack); 14021 ctf_calc_rwin(so, tp); 14022 if ((rack->r_wanted_output != 0) || (rack->r_fast_output != 0)) { 14023 send_out_a_rst: 14024 (void)tp->t_fb->tfb_tcp_output(tp); 14025 did_out = 1; 14026 } 14027 rack_free_trim(rack); 14028 #ifdef TCP_ACCOUNTING 14029 sched_unpin(); 14030 #endif 14031 rack_timer_audit(tp, rack, &so->so_snd); 14032 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, 6, nsegs); 14033 return (0); 14034 } 14035 14036 14037 static int 14038 rack_do_segment_nounlock(struct mbuf *m, struct tcphdr *th, struct socket *so, 14039 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, uint8_t iptos, 14040 int32_t nxt_pkt, struct timeval *tv) 14041 { 14042 #ifdef TCP_ACCOUNTING 14043 uint64_t ts_val; 14044 #endif 14045 int32_t thflags, retval, did_out = 0; 14046 int32_t way_out = 0; 14047 /* 14048 * cts - is the current time from tv (caller gets ts) in microseconds. 14049 * ms_cts - is the current time from tv in milliseconds. 14050 * us_cts - is the time that LRO or hardware actually got the packet in microseconds. 14051 */ 14052 uint32_t cts, us_cts, ms_cts; 14053 uint32_t tiwin; 14054 struct timespec ts; 14055 struct tcpopt to; 14056 struct tcp_rack *rack; 14057 struct rack_sendmap *rsm; 14058 int32_t prev_state = 0; 14059 #ifdef TCP_ACCOUNTING 14060 int ack_val_set = 0xf; 14061 #endif 14062 int nsegs; 14063 /* 14064 * tv passed from common code is from either M_TSTMP_LRO or 14065 * tcp_get_usecs() if no LRO m_pkthdr timestamp is present. 14066 */ 14067 rack = (struct tcp_rack *)tp->t_fb_ptr; 14068 if (m->m_flags & M_ACKCMP) { 14069 return (rack_do_compressed_ack_processing(tp, so, m, nxt_pkt, tv)); 14070 } 14071 if (m->m_flags & M_ACKCMP) { 14072 panic("Impossible reach m has ackcmp? m:%p tp:%p", m, tp); 14073 } 14074 cts = tcp_tv_to_usectick(tv); 14075 ms_cts = tcp_tv_to_mssectick(tv); 14076 nsegs = m->m_pkthdr.lro_nsegs; 14077 counter_u64_add(rack_proc_non_comp_ack, 1); 14078 thflags = th->th_flags; 14079 #ifdef TCP_ACCOUNTING 14080 sched_pin(); 14081 if (thflags & TH_ACK) 14082 ts_val = get_cyclecount(); 14083 #endif 14084 if ((m->m_flags & M_TSTMP) || 14085 (m->m_flags & M_TSTMP_LRO)) { 14086 mbuf_tstmp2timespec(m, &ts); 14087 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; 14088 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; 14089 } else 14090 rack->r_ctl.act_rcv_time = *tv; 14091 kern_prefetch(rack, &prev_state); 14092 prev_state = 0; 14093 /* 14094 * Unscale the window into a 32-bit value. For the SYN_SENT state 14095 * the scale is zero. 14096 */ 14097 tiwin = th->th_win << tp->snd_scale; 14098 #ifdef TCP_ACCOUNTING 14099 if (thflags & TH_ACK) { 14100 /* 14101 * We have a tradeoff here. We can either do what we are 14102 * doing i.e. pinning to this CPU and then doing the accounting 14103 * <or> we could do a critical enter, setup the rdtsc and cpu 14104 * as in below, and then validate we are on the same CPU on 14105 * exit. I have choosen to not do the critical enter since 14106 * that often will gain you a context switch, and instead lock 14107 * us (line above this if) to the same CPU with sched_pin(). This 14108 * means we may be context switched out for a higher priority 14109 * interupt but we won't be moved to another CPU. 14110 * 14111 * If this occurs (which it won't very often since we most likely 14112 * are running this code in interupt context and only a higher 14113 * priority will bump us ... clock?) we will falsely add in 14114 * to the time the interupt processing time plus the ack processing 14115 * time. This is ok since its a rare event. 14116 */ 14117 ack_val_set = tcp_do_ack_accounting(tp, th, &to, tiwin, 14118 ctf_fixed_maxseg(tp)); 14119 } 14120 #endif 14121 /* 14122 * Parse options on any incoming segment. 14123 */ 14124 memset(&to, 0, sizeof(to)); 14125 tcp_dooptions(&to, (u_char *)(th + 1), 14126 (th->th_off << 2) - sizeof(struct tcphdr), 14127 (thflags & TH_SYN) ? TO_SYN : 0); 14128 NET_EPOCH_ASSERT(); 14129 INP_WLOCK_ASSERT(tp->t_inpcb); 14130 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN", 14131 __func__)); 14132 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT", 14133 __func__)); 14134 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 14135 (tp->t_flags & TF_GPUTINPROG)) { 14136 /* 14137 * We have a goodput in progress 14138 * and we have entered a late state. 14139 * Do we have enough data in the sb 14140 * to handle the GPUT request? 14141 */ 14142 uint32_t bytes; 14143 14144 bytes = tp->gput_ack - tp->gput_seq; 14145 if (SEQ_GT(tp->gput_seq, tp->snd_una)) 14146 bytes += tp->gput_seq - tp->snd_una; 14147 if (bytes > sbavail(&tp->t_inpcb->inp_socket->so_snd)) { 14148 /* 14149 * There are not enough bytes in the socket 14150 * buffer that have been sent to cover this 14151 * measurement. Cancel it. 14152 */ 14153 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 14154 rack->r_ctl.rc_gp_srtt /*flex1*/, 14155 tp->gput_seq, 14156 0, 0, 18, __LINE__, NULL, 0); 14157 tp->t_flags &= ~TF_GPUTINPROG; 14158 } 14159 } 14160 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 14161 union tcp_log_stackspecific log; 14162 struct timeval ltv; 14163 #ifdef NETFLIX_HTTP_LOGGING 14164 struct http_sendfile_track *http_req; 14165 14166 if (SEQ_GT(th->th_ack, tp->snd_una)) { 14167 http_req = tcp_http_find_req_for_seq(tp, (th->th_ack-1)); 14168 } else { 14169 http_req = tcp_http_find_req_for_seq(tp, th->th_ack); 14170 } 14171 #endif 14172 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 14173 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 14174 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 14175 if (rack->rack_no_prr == 0) 14176 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 14177 else 14178 log.u_bbr.flex1 = 0; 14179 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 14180 log.u_bbr.use_lt_bw <<= 1; 14181 log.u_bbr.use_lt_bw |= rack->r_might_revert; 14182 log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced; 14183 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 14184 log.u_bbr.pkts_out = rack->rc_tp->t_maxseg; 14185 log.u_bbr.flex3 = m->m_flags; 14186 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 14187 log.u_bbr.lost = thflags; 14188 log.u_bbr.pacing_gain = 0x1; 14189 #ifdef TCP_ACCOUNTING 14190 log.u_bbr.cwnd_gain = ack_val_set; 14191 #endif 14192 log.u_bbr.flex7 = 2; 14193 if (m->m_flags & M_TSTMP) { 14194 /* Record the hardware timestamp if present */ 14195 mbuf_tstmp2timespec(m, &ts); 14196 ltv.tv_sec = ts.tv_sec; 14197 ltv.tv_usec = ts.tv_nsec / 1000; 14198 log.u_bbr.lt_epoch = tcp_tv_to_usectick(<v); 14199 } else if (m->m_flags & M_TSTMP_LRO) { 14200 /* Record the LRO the arrival timestamp */ 14201 mbuf_tstmp2timespec(m, &ts); 14202 ltv.tv_sec = ts.tv_sec; 14203 ltv.tv_usec = ts.tv_nsec / 1000; 14204 log.u_bbr.flex5 = tcp_tv_to_usectick(<v); 14205 } 14206 log.u_bbr.timeStamp = tcp_get_usecs(<v); 14207 /* Log the rcv time */ 14208 log.u_bbr.delRate = m->m_pkthdr.rcv_tstmp; 14209 #ifdef NETFLIX_HTTP_LOGGING 14210 log.u_bbr.applimited = tp->t_http_closed; 14211 log.u_bbr.applimited <<= 8; 14212 log.u_bbr.applimited |= tp->t_http_open; 14213 log.u_bbr.applimited <<= 8; 14214 log.u_bbr.applimited |= tp->t_http_req; 14215 if (http_req) { 14216 /* Copy out any client req info */ 14217 /* seconds */ 14218 log.u_bbr.pkt_epoch = (http_req->localtime / HPTS_USEC_IN_SEC); 14219 /* useconds */ 14220 log.u_bbr.delivered = (http_req->localtime % HPTS_USEC_IN_SEC); 14221 log.u_bbr.rttProp = http_req->timestamp; 14222 log.u_bbr.cur_del_rate = http_req->start; 14223 if (http_req->flags & TCP_HTTP_TRACK_FLG_OPEN) { 14224 log.u_bbr.flex8 |= 1; 14225 } else { 14226 log.u_bbr.flex8 |= 2; 14227 log.u_bbr.bw_inuse = http_req->end; 14228 } 14229 log.u_bbr.flex6 = http_req->start_seq; 14230 if (http_req->flags & TCP_HTTP_TRACK_FLG_COMP) { 14231 log.u_bbr.flex8 |= 4; 14232 log.u_bbr.epoch = http_req->end_seq; 14233 } 14234 } 14235 #endif 14236 TCP_LOG_EVENTP(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_IN, 0, 14237 tlen, &log, true, <v); 14238 } 14239 if ((thflags & TH_SYN) && (thflags & TH_FIN) && V_drop_synfin) { 14240 way_out = 4; 14241 retval = 0; 14242 m_freem(m); 14243 goto done_with_input; 14244 } 14245 /* 14246 * If a segment with the ACK-bit set arrives in the SYN-SENT state 14247 * check SEQ.ACK first as described on page 66 of RFC 793, section 3.9. 14248 */ 14249 if ((tp->t_state == TCPS_SYN_SENT) && (thflags & TH_ACK) && 14250 (SEQ_LEQ(th->th_ack, tp->iss) || SEQ_GT(th->th_ack, tp->snd_max))) { 14251 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 14252 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 14253 #ifdef TCP_ACCOUNTING 14254 sched_unpin(); 14255 #endif 14256 return (1); 14257 } 14258 /* 14259 * If timestamps were negotiated during SYN/ACK and a 14260 * segment without a timestamp is received, silently drop 14261 * the segment, unless it is a RST segment or missing timestamps are 14262 * tolerated. 14263 * See section 3.2 of RFC 7323. 14264 */ 14265 if ((tp->t_flags & TF_RCVD_TSTMP) && !(to.to_flags & TOF_TS) && 14266 ((thflags & TH_RST) == 0) && (V_tcp_tolerate_missing_ts == 0)) { 14267 way_out = 5; 14268 retval = 0; 14269 m_freem(m); 14270 goto done_with_input; 14271 } 14272 14273 /* 14274 * Segment received on connection. Reset idle time and keep-alive 14275 * timer. XXX: This should be done after segment validation to 14276 * ignore broken/spoofed segs. 14277 */ 14278 if (tp->t_idle_reduce && 14279 (tp->snd_max == tp->snd_una) && 14280 ((ticks - tp->t_rcvtime) >= tp->t_rxtcur)) { 14281 counter_u64_add(rack_input_idle_reduces, 1); 14282 rack_cc_after_idle(rack, tp); 14283 } 14284 tp->t_rcvtime = ticks; 14285 #ifdef STATS 14286 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_FRWIN, tiwin); 14287 #endif 14288 if (tiwin > rack->r_ctl.rc_high_rwnd) 14289 rack->r_ctl.rc_high_rwnd = tiwin; 14290 /* 14291 * TCP ECN processing. XXXJTL: If we ever use ECN, we need to move 14292 * this to occur after we've validated the segment. 14293 */ 14294 if (tp->t_flags2 & TF2_ECN_PERMIT) { 14295 if (thflags & TH_CWR) { 14296 tp->t_flags2 &= ~TF2_ECN_SND_ECE; 14297 tp->t_flags |= TF_ACKNOW; 14298 } 14299 switch (iptos & IPTOS_ECN_MASK) { 14300 case IPTOS_ECN_CE: 14301 tp->t_flags2 |= TF2_ECN_SND_ECE; 14302 KMOD_TCPSTAT_INC(tcps_ecn_ce); 14303 break; 14304 case IPTOS_ECN_ECT0: 14305 KMOD_TCPSTAT_INC(tcps_ecn_ect0); 14306 break; 14307 case IPTOS_ECN_ECT1: 14308 KMOD_TCPSTAT_INC(tcps_ecn_ect1); 14309 break; 14310 } 14311 14312 /* Process a packet differently from RFC3168. */ 14313 cc_ecnpkt_handler(tp, th, iptos); 14314 14315 /* Congestion experienced. */ 14316 if (thflags & TH_ECE) { 14317 rack_cong_signal(tp, CC_ECN, th->th_ack); 14318 } 14319 } 14320 14321 /* 14322 * If echoed timestamp is later than the current time, fall back to 14323 * non RFC1323 RTT calculation. Normalize timestamp if syncookies 14324 * were used when this connection was established. 14325 */ 14326 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) { 14327 to.to_tsecr -= tp->ts_offset; 14328 if (TSTMP_GT(to.to_tsecr, ms_cts)) 14329 to.to_tsecr = 0; 14330 } 14331 14332 /* 14333 * If its the first time in we need to take care of options and 14334 * verify we can do SACK for rack! 14335 */ 14336 if (rack->r_state == 0) { 14337 /* Should be init'd by rack_init() */ 14338 KASSERT(rack->rc_inp != NULL, 14339 ("%s: rack->rc_inp unexpectedly NULL", __func__)); 14340 if (rack->rc_inp == NULL) { 14341 rack->rc_inp = tp->t_inpcb; 14342 } 14343 14344 /* 14345 * Process options only when we get SYN/ACK back. The SYN 14346 * case for incoming connections is handled in tcp_syncache. 14347 * According to RFC1323 the window field in a SYN (i.e., a 14348 * <SYN> or <SYN,ACK>) segment itself is never scaled. XXX 14349 * this is traditional behavior, may need to be cleaned up. 14350 */ 14351 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) { 14352 /* Handle parallel SYN for ECN */ 14353 if (!(thflags & TH_ACK) && 14354 ((thflags & (TH_CWR | TH_ECE)) == (TH_CWR | TH_ECE)) && 14355 ((V_tcp_do_ecn == 1) || (V_tcp_do_ecn == 2))) { 14356 tp->t_flags2 |= TF2_ECN_PERMIT; 14357 tp->t_flags2 |= TF2_ECN_SND_ECE; 14358 TCPSTAT_INC(tcps_ecn_shs); 14359 } 14360 if ((to.to_flags & TOF_SCALE) && 14361 (tp->t_flags & TF_REQ_SCALE)) { 14362 tp->t_flags |= TF_RCVD_SCALE; 14363 tp->snd_scale = to.to_wscale; 14364 } else 14365 tp->t_flags &= ~TF_REQ_SCALE; 14366 /* 14367 * Initial send window. It will be updated with the 14368 * next incoming segment to the scaled value. 14369 */ 14370 tp->snd_wnd = th->th_win; 14371 rack_validate_fo_sendwin_up(tp, rack); 14372 if ((to.to_flags & TOF_TS) && 14373 (tp->t_flags & TF_REQ_TSTMP)) { 14374 tp->t_flags |= TF_RCVD_TSTMP; 14375 tp->ts_recent = to.to_tsval; 14376 tp->ts_recent_age = cts; 14377 } else 14378 tp->t_flags &= ~TF_REQ_TSTMP; 14379 if (to.to_flags & TOF_MSS) { 14380 tcp_mss(tp, to.to_mss); 14381 } 14382 if ((tp->t_flags & TF_SACK_PERMIT) && 14383 (to.to_flags & TOF_SACKPERM) == 0) 14384 tp->t_flags &= ~TF_SACK_PERMIT; 14385 if (IS_FASTOPEN(tp->t_flags)) { 14386 if (to.to_flags & TOF_FASTOPEN) { 14387 uint16_t mss; 14388 14389 if (to.to_flags & TOF_MSS) 14390 mss = to.to_mss; 14391 else 14392 if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) 14393 mss = TCP6_MSS; 14394 else 14395 mss = TCP_MSS; 14396 tcp_fastopen_update_cache(tp, mss, 14397 to.to_tfo_len, to.to_tfo_cookie); 14398 } else 14399 tcp_fastopen_disable_path(tp); 14400 } 14401 } 14402 /* 14403 * At this point we are at the initial call. Here we decide 14404 * if we are doing RACK or not. We do this by seeing if 14405 * TF_SACK_PERMIT is set and the sack-not-required is clear. 14406 * The code now does do dup-ack counting so if you don't 14407 * switch back you won't get rack & TLP, but you will still 14408 * get this stack. 14409 */ 14410 14411 if ((rack_sack_not_required == 0) && 14412 ((tp->t_flags & TF_SACK_PERMIT) == 0)) { 14413 tcp_switch_back_to_default(tp); 14414 (*tp->t_fb->tfb_tcp_do_segment) (m, th, so, tp, drop_hdrlen, 14415 tlen, iptos); 14416 #ifdef TCP_ACCOUNTING 14417 sched_unpin(); 14418 #endif 14419 return (1); 14420 } 14421 tcp_set_hpts(tp->t_inpcb); 14422 sack_filter_clear(&rack->r_ctl.rack_sf, th->th_ack); 14423 } 14424 if (thflags & TH_FIN) 14425 tcp_log_end_status(tp, TCP_EI_STATUS_CLIENT_FIN); 14426 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 14427 if ((rack->rc_gp_dyn_mul) && 14428 (rack->use_fixed_rate == 0) && 14429 (rack->rc_always_pace)) { 14430 /* Check in on probertt */ 14431 rack_check_probe_rtt(rack, us_cts); 14432 } 14433 rack_clear_rate_sample(rack); 14434 if (rack->forced_ack) { 14435 uint32_t us_rtt; 14436 14437 /* 14438 * A persist or keep-alive was forced out, update our 14439 * min rtt time. Note we do not worry about lost 14440 * retransmissions since KEEP-ALIVES and persists 14441 * are usually way long on times of sending (though 14442 * if we were really paranoid or worried we could 14443 * at least use timestamps if available to validate). 14444 */ 14445 rack->forced_ack = 0; 14446 if (tiwin == tp->snd_wnd) { 14447 /* 14448 * Only apply the RTT update if this is 14449 * a response to our window probe. And that 14450 * means the rwnd sent must match the current 14451 * snd_wnd. If it does not, then we got a 14452 * window update ack instead. 14453 */ 14454 us_rtt = us_cts - rack->r_ctl.forced_ack_ts; 14455 if (us_rtt == 0) 14456 us_rtt = 1; 14457 rack_apply_updated_usrtt(rack, us_rtt, us_cts); 14458 tcp_rack_xmit_timer(rack, us_rtt, 0, us_rtt, 3, NULL, 1); 14459 } 14460 } 14461 /* 14462 * This is the one exception case where we set the rack state 14463 * always. All other times (timers etc) we must have a rack-state 14464 * set (so we assure we have done the checks above for SACK). 14465 */ 14466 rack->r_ctl.rc_rcvtime = cts; 14467 if (rack->r_state != tp->t_state) 14468 rack_set_state(tp, rack); 14469 if (SEQ_GT(th->th_ack, tp->snd_una) && 14470 (rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree)) != NULL) 14471 kern_prefetch(rsm, &prev_state); 14472 prev_state = rack->r_state; 14473 retval = (*rack->r_substate) (m, th, so, 14474 tp, &to, drop_hdrlen, 14475 tlen, tiwin, thflags, nxt_pkt, iptos); 14476 #ifdef INVARIANTS 14477 if ((retval == 0) && 14478 (tp->t_inpcb == NULL)) { 14479 panic("retval:%d tp:%p t_inpcb:NULL state:%d", 14480 retval, tp, prev_state); 14481 } 14482 #endif 14483 if (retval == 0) { 14484 /* 14485 * If retval is 1 the tcb is unlocked and most likely the tp 14486 * is gone. 14487 */ 14488 INP_WLOCK_ASSERT(tp->t_inpcb); 14489 if ((rack->rc_gp_dyn_mul) && 14490 (rack->rc_always_pace) && 14491 (rack->use_fixed_rate == 0) && 14492 rack->in_probe_rtt && 14493 (rack->r_ctl.rc_time_probertt_starts == 0)) { 14494 /* 14495 * If we are going for target, lets recheck before 14496 * we output. 14497 */ 14498 rack_check_probe_rtt(rack, us_cts); 14499 } 14500 if (rack->set_pacing_done_a_iw == 0) { 14501 /* How much has been acked? */ 14502 if ((tp->snd_una - tp->iss) > (ctf_fixed_maxseg(tp) * 10)) { 14503 /* We have enough to set in the pacing segment size */ 14504 rack->set_pacing_done_a_iw = 1; 14505 rack_set_pace_segments(tp, rack, __LINE__, NULL); 14506 } 14507 } 14508 tcp_rack_xmit_timer_commit(rack, tp); 14509 #ifdef TCP_ACCOUNTING 14510 /* 14511 * If we set the ack_val_se to what ack processing we are doing 14512 * we also want to track how many cycles we burned. Note 14513 * the bits after tcp_output we let be "free". This is because 14514 * we are also tracking the tcp_output times as well. Note the 14515 * use of 0xf here since we only have 11 counter (0 - 0xa) and 14516 * 0xf cannot be returned and is what we initialize it too to 14517 * indicate we are not doing the tabulations. 14518 */ 14519 if (ack_val_set != 0xf) { 14520 uint64_t crtsc; 14521 14522 crtsc = get_cyclecount(); 14523 counter_u64_add(tcp_proc_time[ack_val_set] , (crtsc - ts_val)); 14524 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 14525 tp->tcp_proc_time[ack_val_set] += (crtsc - ts_val); 14526 } 14527 } 14528 #endif 14529 if (nxt_pkt == 0) { 14530 if ((rack->r_wanted_output != 0) || (rack->r_fast_output != 0)) { 14531 do_output_now: 14532 did_out = 1; 14533 (void)tp->t_fb->tfb_tcp_output(tp); 14534 } 14535 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 14536 rack_free_trim(rack); 14537 } 14538 /* Update any rounds needed */ 14539 if (SEQ_GEQ(tp->snd_una, rack->r_ctl.roundends)) { 14540 rack->r_ctl.current_round++; 14541 rack->r_ctl.roundends = tp->snd_max; 14542 if (CC_ALGO(tp)->newround != NULL) { 14543 CC_ALGO(tp)->newround(tp->ccv, rack->r_ctl.current_round); 14544 } 14545 } 14546 if ((nxt_pkt == 0) && 14547 ((rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) == 0) && 14548 (SEQ_GT(tp->snd_max, tp->snd_una) || 14549 (tp->t_flags & TF_DELACK) || 14550 ((V_tcp_always_keepalive || rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) && 14551 (tp->t_state <= TCPS_CLOSING)))) { 14552 /* We could not send (probably in the hpts but stopped the timer earlier)? */ 14553 if ((tp->snd_max == tp->snd_una) && 14554 ((tp->t_flags & TF_DELACK) == 0) && 14555 (rack->rc_inp->inp_in_hpts) && 14556 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 14557 /* keep alive not needed if we are hptsi output yet */ 14558 ; 14559 } else { 14560 int late = 0; 14561 if (rack->rc_inp->inp_in_hpts) { 14562 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 14563 us_cts = tcp_get_usecs(NULL); 14564 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) { 14565 rack->r_early = 1; 14566 rack->r_ctl.rc_agg_early += (rack->r_ctl.rc_last_output_to - us_cts); 14567 } else 14568 late = 1; 14569 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 14570 } 14571 tcp_hpts_remove(tp->t_inpcb, HPTS_REMOVE_OUTPUT); 14572 } 14573 if (late && (did_out == 0)) { 14574 /* 14575 * We are late in the sending 14576 * and we did not call the output 14577 * (this probably should not happen). 14578 */ 14579 goto do_output_now; 14580 } 14581 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); 14582 } 14583 way_out = 1; 14584 } else if (nxt_pkt == 0) { 14585 /* Do we have the correct timer running? */ 14586 rack_timer_audit(tp, rack, &so->so_snd); 14587 way_out = 2; 14588 } 14589 done_with_input: 14590 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, way_out, max(1, nsegs)); 14591 if (did_out) 14592 rack->r_wanted_output = 0; 14593 #ifdef INVARIANTS 14594 if (tp->t_inpcb == NULL) { 14595 panic("OP:%d retval:%d tp:%p t_inpcb:NULL state:%d", 14596 did_out, 14597 retval, tp, prev_state); 14598 } 14599 #endif 14600 #ifdef TCP_ACCOUNTING 14601 } else { 14602 /* 14603 * Track the time (see above). 14604 */ 14605 if (ack_val_set != 0xf) { 14606 uint64_t crtsc; 14607 14608 crtsc = get_cyclecount(); 14609 counter_u64_add(tcp_proc_time[ack_val_set] , (crtsc - ts_val)); 14610 /* 14611 * Note we *DO NOT* increment the per-tcb counters since 14612 * in the else the TP may be gone!! 14613 */ 14614 } 14615 #endif 14616 } 14617 #ifdef TCP_ACCOUNTING 14618 sched_unpin(); 14619 #endif 14620 return (retval); 14621 } 14622 14623 void 14624 rack_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so, 14625 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, uint8_t iptos) 14626 { 14627 struct timeval tv; 14628 14629 /* First lets see if we have old packets */ 14630 if (tp->t_in_pkt) { 14631 if (ctf_do_queued_segments(so, tp, 1)) { 14632 m_freem(m); 14633 return; 14634 } 14635 } 14636 if (m->m_flags & M_TSTMP_LRO) { 14637 tv.tv_sec = m->m_pkthdr.rcv_tstmp /1000000000; 14638 tv.tv_usec = (m->m_pkthdr.rcv_tstmp % 1000000000)/1000; 14639 } else { 14640 /* Should not be should we kassert instead? */ 14641 tcp_get_usecs(&tv); 14642 } 14643 if (rack_do_segment_nounlock(m, th, so, tp, 14644 drop_hdrlen, tlen, iptos, 0, &tv) == 0) { 14645 INP_WUNLOCK(tp->t_inpcb); 14646 } 14647 } 14648 14649 struct rack_sendmap * 14650 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack, uint32_t tsused) 14651 { 14652 struct rack_sendmap *rsm = NULL; 14653 int32_t idx; 14654 uint32_t srtt = 0, thresh = 0, ts_low = 0; 14655 14656 /* Return the next guy to be re-transmitted */ 14657 if (RB_EMPTY(&rack->r_ctl.rc_mtree)) { 14658 return (NULL); 14659 } 14660 if (tp->t_flags & TF_SENTFIN) { 14661 /* retran the end FIN? */ 14662 return (NULL); 14663 } 14664 /* ok lets look at this one */ 14665 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 14666 if (rsm && ((rsm->r_flags & RACK_ACKED) == 0)) { 14667 goto check_it; 14668 } 14669 rsm = rack_find_lowest_rsm(rack); 14670 if (rsm == NULL) { 14671 return (NULL); 14672 } 14673 check_it: 14674 if (((rack->rc_tp->t_flags & TF_SACK_PERMIT) == 0) && 14675 (rsm->r_dupack >= DUP_ACK_THRESHOLD)) { 14676 /* 14677 * No sack so we automatically do the 3 strikes and 14678 * retransmit (no rack timer would be started). 14679 */ 14680 14681 return (rsm); 14682 } 14683 if (rsm->r_flags & RACK_ACKED) { 14684 return (NULL); 14685 } 14686 if (((rsm->r_flags & RACK_SACK_PASSED) == 0) && 14687 (rsm->r_dupack < DUP_ACK_THRESHOLD)) { 14688 /* Its not yet ready */ 14689 return (NULL); 14690 } 14691 srtt = rack_grab_rtt(tp, rack); 14692 idx = rsm->r_rtr_cnt - 1; 14693 ts_low = (uint32_t)rsm->r_tim_lastsent[idx]; 14694 thresh = rack_calc_thresh_rack(rack, srtt, tsused); 14695 if ((tsused == ts_low) || 14696 (TSTMP_LT(tsused, ts_low))) { 14697 /* No time since sending */ 14698 return (NULL); 14699 } 14700 if ((tsused - ts_low) < thresh) { 14701 /* It has not been long enough yet */ 14702 return (NULL); 14703 } 14704 if ((rsm->r_dupack >= DUP_ACK_THRESHOLD) || 14705 ((rsm->r_flags & RACK_SACK_PASSED) && 14706 (rack->sack_attack_disable == 0))) { 14707 /* 14708 * We have passed the dup-ack threshold <or> 14709 * a SACK has indicated this is missing. 14710 * Note that if you are a declared attacker 14711 * it is only the dup-ack threshold that 14712 * will cause retransmits. 14713 */ 14714 /* log retransmit reason */ 14715 rack_log_retran_reason(rack, rsm, (tsused - ts_low), thresh, 1); 14716 rack->r_fast_output = 0; 14717 return (rsm); 14718 } 14719 return (NULL); 14720 } 14721 14722 static void 14723 rack_log_pacing_delay_calc(struct tcp_rack *rack, uint32_t len, uint32_t slot, 14724 uint64_t bw_est, uint64_t bw, uint64_t len_time, int method, 14725 int line, struct rack_sendmap *rsm, uint8_t quality) 14726 { 14727 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 14728 union tcp_log_stackspecific log; 14729 struct timeval tv; 14730 14731 memset(&log, 0, sizeof(log)); 14732 log.u_bbr.flex1 = slot; 14733 log.u_bbr.flex2 = len; 14734 log.u_bbr.flex3 = rack->r_ctl.rc_pace_min_segs; 14735 log.u_bbr.flex4 = rack->r_ctl.rc_pace_max_segs; 14736 log.u_bbr.flex5 = rack->r_ctl.rack_per_of_gp_ss; 14737 log.u_bbr.flex6 = rack->r_ctl.rack_per_of_gp_ca; 14738 log.u_bbr.use_lt_bw = rack->rc_ack_can_sendout_data; 14739 log.u_bbr.use_lt_bw <<= 1; 14740 log.u_bbr.use_lt_bw |= rack->r_late; 14741 log.u_bbr.use_lt_bw <<= 1; 14742 log.u_bbr.use_lt_bw |= rack->r_early; 14743 log.u_bbr.use_lt_bw <<= 1; 14744 log.u_bbr.use_lt_bw |= rack->app_limited_needs_set; 14745 log.u_bbr.use_lt_bw <<= 1; 14746 log.u_bbr.use_lt_bw |= rack->rc_gp_filled; 14747 log.u_bbr.use_lt_bw <<= 1; 14748 log.u_bbr.use_lt_bw |= rack->measure_saw_probe_rtt; 14749 log.u_bbr.use_lt_bw <<= 1; 14750 log.u_bbr.use_lt_bw |= rack->in_probe_rtt; 14751 log.u_bbr.use_lt_bw <<= 1; 14752 log.u_bbr.use_lt_bw |= rack->gp_ready; 14753 log.u_bbr.pkt_epoch = line; 14754 log.u_bbr.epoch = rack->r_ctl.rc_agg_delayed; 14755 log.u_bbr.lt_epoch = rack->r_ctl.rc_agg_early; 14756 log.u_bbr.applimited = rack->r_ctl.rack_per_of_gp_rec; 14757 log.u_bbr.bw_inuse = bw_est; 14758 log.u_bbr.delRate = bw; 14759 if (rack->r_ctl.gp_bw == 0) 14760 log.u_bbr.cur_del_rate = 0; 14761 else 14762 log.u_bbr.cur_del_rate = rack_get_bw(rack); 14763 log.u_bbr.rttProp = len_time; 14764 log.u_bbr.pkts_out = rack->r_ctl.rc_rack_min_rtt; 14765 log.u_bbr.lost = rack->r_ctl.rc_probertt_sndmax_atexit; 14766 log.u_bbr.pacing_gain = rack_get_output_gain(rack, rsm); 14767 if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) { 14768 /* We are in slow start */ 14769 log.u_bbr.flex7 = 1; 14770 } else { 14771 /* we are on congestion avoidance */ 14772 log.u_bbr.flex7 = 0; 14773 } 14774 log.u_bbr.flex8 = method; 14775 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 14776 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 14777 log.u_bbr.cwnd_gain = rack->rc_gp_saw_rec; 14778 log.u_bbr.cwnd_gain <<= 1; 14779 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss; 14780 log.u_bbr.cwnd_gain <<= 1; 14781 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca; 14782 log.u_bbr.bbr_substate = quality; 14783 TCP_LOG_EVENTP(rack->rc_tp, NULL, 14784 &rack->rc_inp->inp_socket->so_rcv, 14785 &rack->rc_inp->inp_socket->so_snd, 14786 BBR_LOG_HPTSI_CALC, 0, 14787 0, &log, false, &tv); 14788 } 14789 } 14790 14791 static uint32_t 14792 rack_get_pacing_len(struct tcp_rack *rack, uint64_t bw, uint32_t mss) 14793 { 14794 uint32_t new_tso, user_max; 14795 14796 user_max = rack->rc_user_set_max_segs * mss; 14797 if (rack->rc_force_max_seg) { 14798 return (user_max); 14799 } 14800 if (rack->use_fixed_rate && 14801 ((rack->r_ctl.crte == NULL) || 14802 (bw != rack->r_ctl.crte->rate))) { 14803 /* Use the user mss since we are not exactly matched */ 14804 return (user_max); 14805 } 14806 new_tso = tcp_get_pacing_burst_size(rack->rc_tp, bw, mss, rack_pace_one_seg, rack->r_ctl.crte, NULL); 14807 if (new_tso > user_max) 14808 new_tso = user_max; 14809 return (new_tso); 14810 } 14811 14812 static int32_t 14813 pace_to_fill_cwnd(struct tcp_rack *rack, int32_t slot, uint32_t len, uint32_t segsiz, int *capped, uint64_t *rate_wanted, uint8_t non_paced) 14814 { 14815 uint64_t lentim, fill_bw; 14816 14817 /* Lets first see if we are full, if so continue with normal rate */ 14818 rack->r_via_fill_cw = 0; 14819 if (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.cwnd_to_use) 14820 return (slot); 14821 if ((ctf_outstanding(rack->rc_tp) + (segsiz-1)) > rack->rc_tp->snd_wnd) 14822 return (slot); 14823 if (rack->r_ctl.rc_last_us_rtt == 0) 14824 return (slot); 14825 if (rack->rc_pace_fill_if_rttin_range && 14826 (rack->r_ctl.rc_last_us_rtt >= 14827 (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack->rtt_limit_mul))) { 14828 /* The rtt is huge, N * smallest, lets not fill */ 14829 return (slot); 14830 } 14831 /* 14832 * first lets calculate the b/w based on the last us-rtt 14833 * and the sndwnd. 14834 */ 14835 fill_bw = rack->r_ctl.cwnd_to_use; 14836 /* Take the rwnd if its smaller */ 14837 if (fill_bw > rack->rc_tp->snd_wnd) 14838 fill_bw = rack->rc_tp->snd_wnd; 14839 if (rack->r_fill_less_agg) { 14840 /* 14841 * Now take away the inflight (this will reduce our 14842 * aggressiveness and yeah, if we get that much out in 1RTT 14843 * we will have had acks come back and still be behind). 14844 */ 14845 fill_bw -= ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 14846 } 14847 /* Now lets make it into a b/w */ 14848 fill_bw *= (uint64_t)HPTS_USEC_IN_SEC; 14849 fill_bw /= (uint64_t)rack->r_ctl.rc_last_us_rtt; 14850 /* We are below the min b/w */ 14851 if (non_paced) 14852 *rate_wanted = fill_bw; 14853 if ((fill_bw < RACK_MIN_BW) || (fill_bw < *rate_wanted)) 14854 return (slot); 14855 if (rack->r_ctl.bw_rate_cap && (fill_bw > rack->r_ctl.bw_rate_cap)) 14856 fill_bw = rack->r_ctl.bw_rate_cap; 14857 rack->r_via_fill_cw = 1; 14858 if (rack->r_rack_hw_rate_caps && 14859 (rack->r_ctl.crte != NULL)) { 14860 uint64_t high_rate; 14861 14862 high_rate = tcp_hw_highest_rate(rack->r_ctl.crte); 14863 if (fill_bw > high_rate) { 14864 /* We are capping bw at the highest rate table entry */ 14865 if (*rate_wanted > high_rate) { 14866 /* The original rate was also capped */ 14867 rack->r_via_fill_cw = 0; 14868 } 14869 rack_log_hdwr_pacing(rack, 14870 fill_bw, high_rate, __LINE__, 14871 0, 3); 14872 fill_bw = high_rate; 14873 if (capped) 14874 *capped = 1; 14875 } 14876 } else if ((rack->r_ctl.crte == NULL) && 14877 (rack->rack_hdrw_pacing == 0) && 14878 (rack->rack_hdw_pace_ena) && 14879 rack->r_rack_hw_rate_caps && 14880 (rack->rack_attempt_hdwr_pace == 0) && 14881 (rack->rc_inp->inp_route.ro_nh != NULL) && 14882 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { 14883 /* 14884 * Ok we may have a first attempt that is greater than our top rate 14885 * lets check. 14886 */ 14887 uint64_t high_rate; 14888 14889 high_rate = tcp_hw_highest_rate_ifp(rack->rc_inp->inp_route.ro_nh->nh_ifp, rack->rc_inp); 14890 if (high_rate) { 14891 if (fill_bw > high_rate) { 14892 fill_bw = high_rate; 14893 if (capped) 14894 *capped = 1; 14895 } 14896 } 14897 } 14898 /* 14899 * Ok fill_bw holds our mythical b/w to fill the cwnd 14900 * in a rtt, what does that time wise equate too? 14901 */ 14902 lentim = (uint64_t)(len) * (uint64_t)HPTS_USEC_IN_SEC; 14903 lentim /= fill_bw; 14904 *rate_wanted = fill_bw; 14905 if (non_paced || (lentim < slot)) { 14906 rack_log_pacing_delay_calc(rack, len, slot, fill_bw, 14907 0, lentim, 12, __LINE__, NULL, 0); 14908 return ((int32_t)lentim); 14909 } else 14910 return (slot); 14911 } 14912 14913 static int32_t 14914 rack_get_pacing_delay(struct tcp_rack *rack, struct tcpcb *tp, uint32_t len, struct rack_sendmap *rsm, uint32_t segsiz) 14915 { 14916 uint64_t srtt; 14917 int32_t slot = 0; 14918 int can_start_hw_pacing = 1; 14919 int err; 14920 14921 if (rack->rc_always_pace == 0) { 14922 /* 14923 * We use the most optimistic possible cwnd/srtt for 14924 * sending calculations. This will make our 14925 * calculation anticipate getting more through 14926 * quicker then possible. But thats ok we don't want 14927 * the peer to have a gap in data sending. 14928 */ 14929 uint64_t cwnd, tr_perms = 0; 14930 int32_t reduce = 0; 14931 14932 old_method: 14933 /* 14934 * We keep no precise pacing with the old method 14935 * instead we use the pacer to mitigate bursts. 14936 */ 14937 if (rack->r_ctl.rc_rack_min_rtt) 14938 srtt = rack->r_ctl.rc_rack_min_rtt; 14939 else 14940 srtt = max(tp->t_srtt, 1); 14941 if (rack->r_ctl.rc_rack_largest_cwnd) 14942 cwnd = rack->r_ctl.rc_rack_largest_cwnd; 14943 else 14944 cwnd = rack->r_ctl.cwnd_to_use; 14945 /* Inflate cwnd by 1000 so srtt of usecs is in ms */ 14946 tr_perms = (cwnd * 1000) / srtt; 14947 if (tr_perms == 0) { 14948 tr_perms = ctf_fixed_maxseg(tp); 14949 } 14950 /* 14951 * Calculate how long this will take to drain, if 14952 * the calculation comes out to zero, thats ok we 14953 * will use send_a_lot to possibly spin around for 14954 * more increasing tot_len_this_send to the point 14955 * that its going to require a pace, or we hit the 14956 * cwnd. Which in that case we are just waiting for 14957 * a ACK. 14958 */ 14959 slot = len / tr_perms; 14960 /* Now do we reduce the time so we don't run dry? */ 14961 if (slot && rack_slot_reduction) { 14962 reduce = (slot / rack_slot_reduction); 14963 if (reduce < slot) { 14964 slot -= reduce; 14965 } else 14966 slot = 0; 14967 } 14968 slot *= HPTS_USEC_IN_MSEC; 14969 if (rack->rc_pace_to_cwnd) { 14970 uint64_t rate_wanted = 0; 14971 14972 slot = pace_to_fill_cwnd(rack, slot, len, segsiz, NULL, &rate_wanted, 1); 14973 rack->rc_ack_can_sendout_data = 1; 14974 rack_log_pacing_delay_calc(rack, len, slot, rate_wanted, 0, 0, 14, __LINE__, NULL, 0); 14975 } else 14976 rack_log_pacing_delay_calc(rack, len, slot, tr_perms, reduce, 0, 7, __LINE__, NULL, 0); 14977 } else { 14978 uint64_t bw_est, res, lentim, rate_wanted; 14979 uint32_t orig_val, segs, oh; 14980 int capped = 0; 14981 int prev_fill; 14982 14983 if ((rack->r_rr_config == 1) && rsm) { 14984 return (rack->r_ctl.rc_min_to); 14985 } 14986 if (rack->use_fixed_rate) { 14987 rate_wanted = bw_est = rack_get_fixed_pacing_bw(rack); 14988 } else if ((rack->r_ctl.init_rate == 0) && 14989 #ifdef NETFLIX_PEAKRATE 14990 (rack->rc_tp->t_maxpeakrate == 0) && 14991 #endif 14992 (rack->r_ctl.gp_bw == 0)) { 14993 /* no way to yet do an estimate */ 14994 bw_est = rate_wanted = 0; 14995 } else { 14996 bw_est = rack_get_bw(rack); 14997 rate_wanted = rack_get_output_bw(rack, bw_est, rsm, &capped); 14998 } 14999 if ((bw_est == 0) || (rate_wanted == 0) || 15000 ((rack->gp_ready == 0) && (rack->use_fixed_rate == 0))) { 15001 /* 15002 * No way yet to make a b/w estimate or 15003 * our raise is set incorrectly. 15004 */ 15005 goto old_method; 15006 } 15007 /* We need to account for all the overheads */ 15008 segs = (len + segsiz - 1) / segsiz; 15009 /* 15010 * We need the diff between 1514 bytes (e-mtu with e-hdr) 15011 * and how much data we put in each packet. Yes this 15012 * means we may be off if we are larger than 1500 bytes 15013 * or smaller. But this just makes us more conservative. 15014 */ 15015 if (rack_hw_rate_min && 15016 (bw_est < rack_hw_rate_min)) 15017 can_start_hw_pacing = 0; 15018 if (ETHERNET_SEGMENT_SIZE > segsiz) 15019 oh = ETHERNET_SEGMENT_SIZE - segsiz; 15020 else 15021 oh = 0; 15022 segs *= oh; 15023 lentim = (uint64_t)(len + segs) * (uint64_t)HPTS_USEC_IN_SEC; 15024 res = lentim / rate_wanted; 15025 slot = (uint32_t)res; 15026 orig_val = rack->r_ctl.rc_pace_max_segs; 15027 if (rack->r_ctl.crte == NULL) { 15028 /* 15029 * Only do this if we are not hardware pacing 15030 * since if we are doing hw-pacing below we will 15031 * set make a call after setting up or changing 15032 * the rate. 15033 */ 15034 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 15035 } else if (rack->rc_inp->inp_snd_tag == NULL) { 15036 /* 15037 * We lost our rate somehow, this can happen 15038 * if the interface changed underneath us. 15039 */ 15040 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); 15041 rack->r_ctl.crte = NULL; 15042 /* Lets re-allow attempting to setup pacing */ 15043 rack->rack_hdrw_pacing = 0; 15044 rack->rack_attempt_hdwr_pace = 0; 15045 rack_log_hdwr_pacing(rack, 15046 rate_wanted, bw_est, __LINE__, 15047 0, 6); 15048 } 15049 /* Did we change the TSO size, if so log it */ 15050 if (rack->r_ctl.rc_pace_max_segs != orig_val) 15051 rack_log_pacing_delay_calc(rack, len, slot, orig_val, 0, 0, 15, __LINE__, NULL, 0); 15052 prev_fill = rack->r_via_fill_cw; 15053 if ((rack->rc_pace_to_cwnd) && 15054 (capped == 0) && 15055 (rack->use_fixed_rate == 0) && 15056 (rack->in_probe_rtt == 0) && 15057 (IN_FASTRECOVERY(rack->rc_tp->t_flags) == 0)) { 15058 /* 15059 * We want to pace at our rate *or* faster to 15060 * fill the cwnd to the max if its not full. 15061 */ 15062 slot = pace_to_fill_cwnd(rack, slot, (len+segs), segsiz, &capped, &rate_wanted, 0); 15063 } 15064 if ((rack->rc_inp->inp_route.ro_nh != NULL) && 15065 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { 15066 if ((rack->rack_hdw_pace_ena) && 15067 (can_start_hw_pacing > 0) && 15068 (rack->rack_hdrw_pacing == 0) && 15069 (rack->rack_attempt_hdwr_pace == 0)) { 15070 /* 15071 * Lets attempt to turn on hardware pacing 15072 * if we can. 15073 */ 15074 rack->rack_attempt_hdwr_pace = 1; 15075 rack->r_ctl.crte = tcp_set_pacing_rate(rack->rc_tp, 15076 rack->rc_inp->inp_route.ro_nh->nh_ifp, 15077 rate_wanted, 15078 RS_PACING_GEQ, 15079 &err, &rack->r_ctl.crte_prev_rate); 15080 if (rack->r_ctl.crte) { 15081 rack->rack_hdrw_pacing = 1; 15082 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size(tp, rate_wanted, segsiz, 15083 0, rack->r_ctl.crte, 15084 NULL); 15085 rack_log_hdwr_pacing(rack, 15086 rate_wanted, rack->r_ctl.crte->rate, __LINE__, 15087 err, 0); 15088 rack->r_ctl.last_hw_bw_req = rate_wanted; 15089 } else { 15090 counter_u64_add(rack_hw_pace_init_fail, 1); 15091 } 15092 } else if (rack->rack_hdrw_pacing && 15093 (rack->r_ctl.last_hw_bw_req != rate_wanted)) { 15094 /* Do we need to adjust our rate? */ 15095 const struct tcp_hwrate_limit_table *nrte; 15096 15097 if (rack->r_up_only && 15098 (rate_wanted < rack->r_ctl.crte->rate)) { 15099 /** 15100 * We have four possible states here 15101 * having to do with the previous time 15102 * and this time. 15103 * previous | this-time 15104 * A) 0 | 0 -- fill_cw not in the picture 15105 * B) 1 | 0 -- we were doing a fill-cw but now are not 15106 * C) 1 | 1 -- all rates from fill_cw 15107 * D) 0 | 1 -- we were doing non-fill and now we are filling 15108 * 15109 * For case A, C and D we don't allow a drop. But for 15110 * case B where we now our on our steady rate we do 15111 * allow a drop. 15112 * 15113 */ 15114 if (!((prev_fill == 1) && (rack->r_via_fill_cw == 0))) 15115 goto done_w_hdwr; 15116 } 15117 if ((rate_wanted > rack->r_ctl.crte->rate) || 15118 (rate_wanted <= rack->r_ctl.crte_prev_rate)) { 15119 if (rack_hw_rate_to_low && 15120 (bw_est < rack_hw_rate_to_low)) { 15121 /* 15122 * The pacing rate is too low for hardware, but 15123 * do allow hardware pacing to be restarted. 15124 */ 15125 rack_log_hdwr_pacing(rack, 15126 bw_est, rack->r_ctl.crte->rate, __LINE__, 15127 0, 5); 15128 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); 15129 rack->r_ctl.crte = NULL; 15130 rack->rack_attempt_hdwr_pace = 0; 15131 rack->rack_hdrw_pacing = 0; 15132 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); 15133 goto done_w_hdwr; 15134 } 15135 nrte = tcp_chg_pacing_rate(rack->r_ctl.crte, 15136 rack->rc_tp, 15137 rack->rc_inp->inp_route.ro_nh->nh_ifp, 15138 rate_wanted, 15139 RS_PACING_GEQ, 15140 &err, &rack->r_ctl.crte_prev_rate); 15141 if (nrte == NULL) { 15142 /* Lost the rate */ 15143 rack->rack_hdrw_pacing = 0; 15144 rack->r_ctl.crte = NULL; 15145 rack_log_hdwr_pacing(rack, 15146 rate_wanted, 0, __LINE__, 15147 err, 1); 15148 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); 15149 counter_u64_add(rack_hw_pace_lost, 1); 15150 } else if (nrte != rack->r_ctl.crte) { 15151 rack->r_ctl.crte = nrte; 15152 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size(tp, rate_wanted, 15153 segsiz, 0, 15154 rack->r_ctl.crte, 15155 NULL); 15156 rack_log_hdwr_pacing(rack, 15157 rate_wanted, rack->r_ctl.crte->rate, __LINE__, 15158 err, 2); 15159 rack->r_ctl.last_hw_bw_req = rate_wanted; 15160 } 15161 } else { 15162 /* We just need to adjust the segment size */ 15163 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); 15164 rack_log_hdwr_pacing(rack, 15165 rate_wanted, rack->r_ctl.crte->rate, __LINE__, 15166 0, 4); 15167 rack->r_ctl.last_hw_bw_req = rate_wanted; 15168 } 15169 } 15170 } 15171 if ((rack->r_ctl.crte != NULL) && 15172 (rack->r_ctl.crte->rate == rate_wanted)) { 15173 /* 15174 * We need to add a extra if the rates 15175 * are exactly matched. The idea is 15176 * we want the software to make sure the 15177 * queue is empty before adding more, this 15178 * gives us N MSS extra pace times where 15179 * N is our sysctl 15180 */ 15181 slot += (rack->r_ctl.crte->time_between * rack_hw_pace_extra_slots); 15182 } 15183 done_w_hdwr: 15184 if (rack_limit_time_with_srtt && 15185 (rack->use_fixed_rate == 0) && 15186 #ifdef NETFLIX_PEAKRATE 15187 (rack->rc_tp->t_maxpeakrate == 0) && 15188 #endif 15189 (rack->rack_hdrw_pacing == 0)) { 15190 /* 15191 * Sanity check, we do not allow the pacing delay 15192 * to be longer than the SRTT of the path. If it is 15193 * a slow path, then adding a packet should increase 15194 * the RTT and compensate for this i.e. the srtt will 15195 * be greater so the allowed pacing time will be greater. 15196 * 15197 * Note this restriction is not for where a peak rate 15198 * is set, we are doing fixed pacing or hardware pacing. 15199 */ 15200 if (rack->rc_tp->t_srtt) 15201 srtt = rack->rc_tp->t_srtt; 15202 else 15203 srtt = RACK_INITIAL_RTO * HPTS_USEC_IN_MSEC; /* its in ms convert */ 15204 if (srtt < (uint64_t)slot) { 15205 rack_log_pacing_delay_calc(rack, srtt, slot, rate_wanted, bw_est, lentim, 99, __LINE__, NULL, 0); 15206 slot = srtt; 15207 } 15208 } 15209 rack_log_pacing_delay_calc(rack, len, slot, rate_wanted, bw_est, lentim, 2, __LINE__, rsm, 0); 15210 } 15211 if (rack->r_ctl.crte && (rack->r_ctl.crte->rs_num_enobufs > 0)) { 15212 /* 15213 * If this rate is seeing enobufs when it 15214 * goes to send then either the nic is out 15215 * of gas or we are mis-estimating the time 15216 * somehow and not letting the queue empty 15217 * completely. Lets add to the pacing time. 15218 */ 15219 int hw_boost_delay; 15220 15221 hw_boost_delay = rack->r_ctl.crte->time_between * rack_enobuf_hw_boost_mult; 15222 if (hw_boost_delay > rack_enobuf_hw_max) 15223 hw_boost_delay = rack_enobuf_hw_max; 15224 else if (hw_boost_delay < rack_enobuf_hw_min) 15225 hw_boost_delay = rack_enobuf_hw_min; 15226 slot += hw_boost_delay; 15227 } 15228 if (slot) 15229 counter_u64_add(rack_calc_nonzero, 1); 15230 else 15231 counter_u64_add(rack_calc_zero, 1); 15232 return (slot); 15233 } 15234 15235 static void 15236 rack_start_gp_measurement(struct tcpcb *tp, struct tcp_rack *rack, 15237 tcp_seq startseq, uint32_t sb_offset) 15238 { 15239 struct rack_sendmap *my_rsm = NULL; 15240 struct rack_sendmap fe; 15241 15242 if (tp->t_state < TCPS_ESTABLISHED) { 15243 /* 15244 * We don't start any measurements if we are 15245 * not at least established. 15246 */ 15247 return; 15248 } 15249 if (tp->t_state >= TCPS_FIN_WAIT_1) { 15250 /* 15251 * We will get no more data into the SB 15252 * this means we need to have the data available 15253 * before we start a measurement. 15254 */ 15255 15256 if (sbavail(&tp->t_inpcb->inp_socket->so_snd) < 15257 max(rc_init_window(rack), 15258 (MIN_GP_WIN * ctf_fixed_maxseg(tp)))) { 15259 /* Nope not enough data */ 15260 return; 15261 } 15262 } 15263 tp->t_flags |= TF_GPUTINPROG; 15264 rack->r_ctl.rc_gp_lowrtt = 0xffffffff; 15265 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; 15266 tp->gput_seq = startseq; 15267 rack->app_limited_needs_set = 0; 15268 if (rack->in_probe_rtt) 15269 rack->measure_saw_probe_rtt = 1; 15270 else if ((rack->measure_saw_probe_rtt) && 15271 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) 15272 rack->measure_saw_probe_rtt = 0; 15273 if (rack->rc_gp_filled) 15274 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 15275 else { 15276 /* Special case initial measurement */ 15277 struct timeval tv; 15278 15279 tp->gput_ts = tcp_get_usecs(&tv); 15280 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); 15281 } 15282 /* 15283 * We take a guess out into the future, 15284 * if we have no measurement and no 15285 * initial rate, we measure the first 15286 * initial-windows worth of data to 15287 * speed up getting some GP measurement and 15288 * thus start pacing. 15289 */ 15290 if ((rack->rc_gp_filled == 0) && (rack->r_ctl.init_rate == 0)) { 15291 rack->app_limited_needs_set = 1; 15292 tp->gput_ack = startseq + max(rc_init_window(rack), 15293 (MIN_GP_WIN * ctf_fixed_maxseg(tp))); 15294 rack_log_pacing_delay_calc(rack, 15295 tp->gput_seq, 15296 tp->gput_ack, 15297 0, 15298 tp->gput_ts, 15299 rack->r_ctl.rc_app_limited_cnt, 15300 9, 15301 __LINE__, NULL, 0); 15302 return; 15303 } 15304 if (sb_offset) { 15305 /* 15306 * We are out somewhere in the sb 15307 * can we use the already outstanding data? 15308 */ 15309 if (rack->r_ctl.rc_app_limited_cnt == 0) { 15310 /* 15311 * Yes first one is good and in this case 15312 * the tp->gput_ts is correctly set based on 15313 * the last ack that arrived (no need to 15314 * set things up when an ack comes in). 15315 */ 15316 my_rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 15317 if ((my_rsm == NULL) || 15318 (my_rsm->r_rtr_cnt != 1)) { 15319 /* retransmission? */ 15320 goto use_latest; 15321 } 15322 } else { 15323 if (rack->r_ctl.rc_first_appl == NULL) { 15324 /* 15325 * If rc_first_appl is NULL 15326 * then the cnt should be 0. 15327 * This is probably an error, maybe 15328 * a KASSERT would be approprate. 15329 */ 15330 goto use_latest; 15331 } 15332 /* 15333 * If we have a marker pointer to the last one that is 15334 * app limited we can use that, but we need to set 15335 * things up so that when it gets ack'ed we record 15336 * the ack time (if its not already acked). 15337 */ 15338 rack->app_limited_needs_set = 1; 15339 /* 15340 * We want to get to the rsm that is either 15341 * next with space i.e. over 1 MSS or the one 15342 * after that (after the app-limited). 15343 */ 15344 my_rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, 15345 rack->r_ctl.rc_first_appl); 15346 if (my_rsm) { 15347 if ((my_rsm->r_end - my_rsm->r_start) <= ctf_fixed_maxseg(tp)) 15348 /* Have to use the next one */ 15349 my_rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, 15350 my_rsm); 15351 else { 15352 /* Use after the first MSS of it is acked */ 15353 tp->gput_seq = my_rsm->r_start + ctf_fixed_maxseg(tp); 15354 goto start_set; 15355 } 15356 } 15357 if ((my_rsm == NULL) || 15358 (my_rsm->r_rtr_cnt != 1)) { 15359 /* 15360 * Either its a retransmit or 15361 * the last is the app-limited one. 15362 */ 15363 goto use_latest; 15364 } 15365 } 15366 tp->gput_seq = my_rsm->r_start; 15367 start_set: 15368 if (my_rsm->r_flags & RACK_ACKED) { 15369 /* 15370 * This one has been acked use the arrival ack time 15371 */ 15372 tp->gput_ts = (uint32_t)my_rsm->r_ack_arrival; 15373 rack->app_limited_needs_set = 0; 15374 } 15375 rack->r_ctl.rc_gp_output_ts = my_rsm->r_tim_lastsent[(my_rsm->r_rtr_cnt-1)]; 15376 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); 15377 rack_log_pacing_delay_calc(rack, 15378 tp->gput_seq, 15379 tp->gput_ack, 15380 (uint64_t)my_rsm, 15381 tp->gput_ts, 15382 rack->r_ctl.rc_app_limited_cnt, 15383 9, 15384 __LINE__, NULL, 0); 15385 return; 15386 } 15387 15388 use_latest: 15389 /* 15390 * We don't know how long we may have been 15391 * idle or if this is the first-send. Lets 15392 * setup the flag so we will trim off 15393 * the first ack'd data so we get a true 15394 * measurement. 15395 */ 15396 rack->app_limited_needs_set = 1; 15397 tp->gput_ack = startseq + rack_get_measure_window(tp, rack); 15398 /* Find this guy so we can pull the send time */ 15399 fe.r_start = startseq; 15400 my_rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 15401 if (my_rsm) { 15402 rack->r_ctl.rc_gp_output_ts = my_rsm->r_tim_lastsent[(my_rsm->r_rtr_cnt-1)]; 15403 if (my_rsm->r_flags & RACK_ACKED) { 15404 /* 15405 * Unlikely since its probably what was 15406 * just transmitted (but I am paranoid). 15407 */ 15408 tp->gput_ts = (uint32_t)my_rsm->r_ack_arrival; 15409 rack->app_limited_needs_set = 0; 15410 } 15411 if (SEQ_LT(my_rsm->r_start, tp->gput_seq)) { 15412 /* This also is unlikely */ 15413 tp->gput_seq = my_rsm->r_start; 15414 } 15415 } else { 15416 /* 15417 * TSNH unless we have some send-map limit, 15418 * and even at that it should not be hitting 15419 * that limit (we should have stopped sending). 15420 */ 15421 struct timeval tv; 15422 15423 microuptime(&tv); 15424 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); 15425 } 15426 rack_log_pacing_delay_calc(rack, 15427 tp->gput_seq, 15428 tp->gput_ack, 15429 (uint64_t)my_rsm, 15430 tp->gput_ts, 15431 rack->r_ctl.rc_app_limited_cnt, 15432 9, __LINE__, NULL, 0); 15433 } 15434 15435 static inline uint32_t 15436 rack_what_can_we_send(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cwnd_to_use, 15437 uint32_t avail, int32_t sb_offset) 15438 { 15439 uint32_t len; 15440 uint32_t sendwin; 15441 15442 if (tp->snd_wnd > cwnd_to_use) 15443 sendwin = cwnd_to_use; 15444 else 15445 sendwin = tp->snd_wnd; 15446 if (ctf_outstanding(tp) >= tp->snd_wnd) { 15447 /* We never want to go over our peers rcv-window */ 15448 len = 0; 15449 } else { 15450 uint32_t flight; 15451 15452 flight = ctf_flight_size(tp, rack->r_ctl.rc_sacked); 15453 if (flight >= sendwin) { 15454 /* 15455 * We have in flight what we are allowed by cwnd (if 15456 * it was rwnd blocking it would have hit above out 15457 * >= tp->snd_wnd). 15458 */ 15459 return (0); 15460 } 15461 len = sendwin - flight; 15462 if ((len + ctf_outstanding(tp)) > tp->snd_wnd) { 15463 /* We would send too much (beyond the rwnd) */ 15464 len = tp->snd_wnd - ctf_outstanding(tp); 15465 } 15466 if ((len + sb_offset) > avail) { 15467 /* 15468 * We don't have that much in the SB, how much is 15469 * there? 15470 */ 15471 len = avail - sb_offset; 15472 } 15473 } 15474 return (len); 15475 } 15476 15477 static void 15478 rack_log_fsb(struct tcp_rack *rack, struct tcpcb *tp, struct socket *so, uint32_t flags, 15479 unsigned ipoptlen, int32_t orig_len, int32_t len, int error, 15480 int rsm_is_null, int optlen, int line, uint16_t mode) 15481 { 15482 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 15483 union tcp_log_stackspecific log; 15484 struct timeval tv; 15485 15486 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 15487 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 15488 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 15489 log.u_bbr.flex1 = error; 15490 log.u_bbr.flex2 = flags; 15491 log.u_bbr.flex3 = rsm_is_null; 15492 log.u_bbr.flex4 = ipoptlen; 15493 log.u_bbr.flex5 = tp->rcv_numsacks; 15494 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 15495 log.u_bbr.flex7 = optlen; 15496 log.u_bbr.flex8 = rack->r_fsb_inited; 15497 log.u_bbr.applimited = rack->r_fast_output; 15498 log.u_bbr.bw_inuse = rack_get_bw(rack); 15499 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL); 15500 log.u_bbr.cwnd_gain = mode; 15501 log.u_bbr.pkts_out = orig_len; 15502 log.u_bbr.lt_epoch = len; 15503 log.u_bbr.delivered = line; 15504 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 15505 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 15506 tcp_log_event_(tp, NULL, &so->so_rcv, &so->so_snd, TCP_LOG_FSB, 0, 15507 len, &log, false, NULL, NULL, 0, &tv); 15508 } 15509 } 15510 15511 15512 static struct mbuf * 15513 rack_fo_base_copym(struct mbuf *the_m, uint32_t the_off, int32_t *plen, 15514 struct rack_fast_send_blk *fsb, 15515 int32_t seglimit, int32_t segsize, int hw_tls) 15516 { 15517 #ifdef KERN_TLS 15518 struct ktls_session *tls, *ntls; 15519 struct mbuf *start; 15520 #endif 15521 struct mbuf *m, *n, **np, *smb; 15522 struct mbuf *top; 15523 int32_t off, soff; 15524 int32_t len = *plen; 15525 int32_t fragsize; 15526 int32_t len_cp = 0; 15527 uint32_t mlen, frags; 15528 15529 soff = off = the_off; 15530 smb = m = the_m; 15531 np = ⊤ 15532 top = NULL; 15533 #ifdef KERN_TLS 15534 if (hw_tls && (m->m_flags & M_EXTPG)) 15535 tls = m->m_epg_tls; 15536 else 15537 tls = NULL; 15538 start = m; 15539 #endif 15540 while (len > 0) { 15541 if (m == NULL) { 15542 *plen = len_cp; 15543 break; 15544 } 15545 #ifdef KERN_TLS 15546 if (hw_tls) { 15547 if (m->m_flags & M_EXTPG) 15548 ntls = m->m_epg_tls; 15549 else 15550 ntls = NULL; 15551 15552 /* 15553 * Avoid mixing TLS records with handshake 15554 * data or TLS records from different 15555 * sessions. 15556 */ 15557 if (tls != ntls) { 15558 MPASS(m != start); 15559 *plen = len_cp; 15560 break; 15561 } 15562 } 15563 #endif 15564 mlen = min(len, m->m_len - off); 15565 if (seglimit) { 15566 /* 15567 * For M_EXTPG mbufs, add 3 segments 15568 * + 1 in case we are crossing page boundaries 15569 * + 2 in case the TLS hdr/trailer are used 15570 * It is cheaper to just add the segments 15571 * than it is to take the cache miss to look 15572 * at the mbuf ext_pgs state in detail. 15573 */ 15574 if (m->m_flags & M_EXTPG) { 15575 fragsize = min(segsize, PAGE_SIZE); 15576 frags = 3; 15577 } else { 15578 fragsize = segsize; 15579 frags = 0; 15580 } 15581 15582 /* Break if we really can't fit anymore. */ 15583 if ((frags + 1) >= seglimit) { 15584 *plen = len_cp; 15585 break; 15586 } 15587 15588 /* 15589 * Reduce size if you can't copy the whole 15590 * mbuf. If we can't copy the whole mbuf, also 15591 * adjust len so the loop will end after this 15592 * mbuf. 15593 */ 15594 if ((frags + howmany(mlen, fragsize)) >= seglimit) { 15595 mlen = (seglimit - frags - 1) * fragsize; 15596 len = mlen; 15597 *plen = len_cp + len; 15598 } 15599 frags += howmany(mlen, fragsize); 15600 if (frags == 0) 15601 frags++; 15602 seglimit -= frags; 15603 KASSERT(seglimit > 0, 15604 ("%s: seglimit went too low", __func__)); 15605 } 15606 n = m_get(M_NOWAIT, m->m_type); 15607 *np = n; 15608 if (n == NULL) 15609 goto nospace; 15610 n->m_len = mlen; 15611 soff += mlen; 15612 len_cp += n->m_len; 15613 if (m->m_flags & (M_EXT|M_EXTPG)) { 15614 n->m_data = m->m_data + off; 15615 mb_dupcl(n, m); 15616 } else { 15617 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), 15618 (u_int)n->m_len); 15619 } 15620 len -= n->m_len; 15621 off = 0; 15622 m = m->m_next; 15623 np = &n->m_next; 15624 if (len || (soff == smb->m_len)) { 15625 /* 15626 * We have more so we move forward or 15627 * we have consumed the entire mbuf and 15628 * len has fell to 0. 15629 */ 15630 soff = 0; 15631 smb = m; 15632 } 15633 15634 } 15635 if (fsb != NULL) { 15636 fsb->m = smb; 15637 fsb->off = soff; 15638 if (smb) { 15639 /* 15640 * Save off the size of the mbuf. We do 15641 * this so that we can recognize when it 15642 * has been trimmed by sbcut() as acks 15643 * come in. 15644 */ 15645 fsb->o_m_len = smb->m_len; 15646 } else { 15647 /* 15648 * This is the case where the next mbuf went to NULL. This 15649 * means with this copy we have sent everything in the sb. 15650 * In theory we could clear the fast_output flag, but lets 15651 * not since its possible that we could get more added 15652 * and acks that call the extend function which would let 15653 * us send more. 15654 */ 15655 fsb->o_m_len = 0; 15656 } 15657 } 15658 return (top); 15659 nospace: 15660 if (top) 15661 m_freem(top); 15662 return (NULL); 15663 15664 } 15665 15666 /* 15667 * This is a copy of m_copym(), taking the TSO segment size/limit 15668 * constraints into account, and advancing the sndptr as it goes. 15669 */ 15670 static struct mbuf * 15671 rack_fo_m_copym(struct tcp_rack *rack, int32_t *plen, 15672 int32_t seglimit, int32_t segsize, struct mbuf **s_mb, int *s_soff) 15673 { 15674 struct mbuf *m, *n; 15675 int32_t soff; 15676 15677 soff = rack->r_ctl.fsb.off; 15678 m = rack->r_ctl.fsb.m; 15679 if (rack->r_ctl.fsb.o_m_len > m->m_len) { 15680 /* 15681 * The mbuf had the front of it chopped off by an ack 15682 * we need to adjust the soff/off by that difference. 15683 */ 15684 uint32_t delta; 15685 15686 delta = rack->r_ctl.fsb.o_m_len - m->m_len; 15687 soff -= delta; 15688 } else if (rack->r_ctl.fsb.o_m_len < m->m_len) { 15689 /* 15690 * The mbuf was expanded probably by 15691 * a m_compress. Just update o_m_len. 15692 */ 15693 rack->r_ctl.fsb.o_m_len = m->m_len; 15694 } 15695 KASSERT(soff >= 0, ("%s, negative off %d", __FUNCTION__, soff)); 15696 KASSERT(*plen >= 0, ("%s, negative len %d", __FUNCTION__, *plen)); 15697 KASSERT(soff < m->m_len, ("%s rack:%p len:%u m:%p m->m_len:%u < off?", 15698 __FUNCTION__, 15699 rack, *plen, m, m->m_len)); 15700 /* Save off the right location before we copy and advance */ 15701 *s_soff = soff; 15702 *s_mb = rack->r_ctl.fsb.m; 15703 n = rack_fo_base_copym(m, soff, plen, 15704 &rack->r_ctl.fsb, 15705 seglimit, segsize, rack->r_ctl.fsb.hw_tls); 15706 return (n); 15707 } 15708 15709 static int 15710 rack_fast_rsm_output(struct tcpcb *tp, struct tcp_rack *rack, struct rack_sendmap *rsm, 15711 uint64_t ts_val, uint32_t cts, uint32_t ms_cts, struct timeval *tv, int len, uint8_t doing_tlp) 15712 { 15713 /* 15714 * Enter the fast retransmit path. We are given that a sched_pin is 15715 * in place (if accounting is compliled in) and the cycle count taken 15716 * at the entry is in the ts_val. The concept her is that the rsm 15717 * now holds the mbuf offsets and such so we can directly transmit 15718 * without a lot of overhead, the len field is already set for 15719 * us to prohibit us from sending too much (usually its 1MSS). 15720 */ 15721 struct ip *ip = NULL; 15722 struct udphdr *udp = NULL; 15723 struct tcphdr *th = NULL; 15724 struct mbuf *m = NULL; 15725 struct inpcb *inp; 15726 uint8_t *cpto; 15727 struct tcp_log_buffer *lgb; 15728 #ifdef TCP_ACCOUNTING 15729 uint64_t crtsc; 15730 int cnt_thru = 1; 15731 #endif 15732 struct tcpopt to; 15733 u_char opt[TCP_MAXOLEN]; 15734 uint32_t hdrlen, optlen; 15735 int32_t slot, segsiz, max_val, tso = 0, error, flags, ulen = 0; 15736 uint32_t us_cts; 15737 uint32_t if_hw_tsomaxsegcount = 0, startseq; 15738 uint32_t if_hw_tsomaxsegsize; 15739 15740 #ifdef INET6 15741 struct ip6_hdr *ip6 = NULL; 15742 15743 if (rack->r_is_v6) { 15744 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 15745 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 15746 } else 15747 #endif /* INET6 */ 15748 { 15749 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 15750 hdrlen = sizeof(struct tcpiphdr); 15751 } 15752 if (tp->t_port && (V_tcp_udp_tunneling_port == 0)) { 15753 goto failed; 15754 } 15755 if (doing_tlp) { 15756 /* Its a TLP add the flag, it may already be there but be sure */ 15757 rsm->r_flags |= RACK_TLP; 15758 } else { 15759 /* If it was a TLP it is not not on this retransmit */ 15760 rsm->r_flags &= ~RACK_TLP; 15761 } 15762 startseq = rsm->r_start; 15763 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 15764 inp = rack->rc_inp; 15765 to.to_flags = 0; 15766 flags = tcp_outflags[tp->t_state]; 15767 if (flags & (TH_SYN|TH_RST)) { 15768 goto failed; 15769 } 15770 if (rsm->r_flags & RACK_HAS_FIN) { 15771 /* We can't send a FIN here */ 15772 goto failed; 15773 } 15774 if (flags & TH_FIN) { 15775 /* We never send a FIN */ 15776 flags &= ~TH_FIN; 15777 } 15778 if (tp->t_flags & TF_RCVD_TSTMP) { 15779 to.to_tsval = ms_cts + tp->ts_offset; 15780 to.to_tsecr = tp->ts_recent; 15781 to.to_flags = TOF_TS; 15782 } 15783 optlen = tcp_addoptions(&to, opt); 15784 hdrlen += optlen; 15785 udp = rack->r_ctl.fsb.udp; 15786 if (udp) 15787 hdrlen += sizeof(struct udphdr); 15788 if (rack->r_ctl.rc_pace_max_segs) 15789 max_val = rack->r_ctl.rc_pace_max_segs; 15790 else if (rack->rc_user_set_max_segs) 15791 max_val = rack->rc_user_set_max_segs * segsiz; 15792 else 15793 max_val = len; 15794 if ((tp->t_flags & TF_TSO) && 15795 V_tcp_do_tso && 15796 (len > segsiz) && 15797 (tp->t_port == 0)) 15798 tso = 1; 15799 #ifdef INET6 15800 if (MHLEN < hdrlen + max_linkhdr) 15801 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 15802 else 15803 #endif 15804 m = m_gethdr(M_NOWAIT, MT_DATA); 15805 if (m == NULL) 15806 goto failed; 15807 m->m_data += max_linkhdr; 15808 m->m_len = hdrlen; 15809 th = rack->r_ctl.fsb.th; 15810 /* Establish the len to send */ 15811 if (len > max_val) 15812 len = max_val; 15813 if ((tso) && (len + optlen > tp->t_maxseg)) { 15814 uint32_t if_hw_tsomax; 15815 int32_t max_len; 15816 15817 /* extract TSO information */ 15818 if_hw_tsomax = tp->t_tsomax; 15819 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; 15820 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; 15821 /* 15822 * Check if we should limit by maximum payload 15823 * length: 15824 */ 15825 if (if_hw_tsomax != 0) { 15826 /* compute maximum TSO length */ 15827 max_len = (if_hw_tsomax - hdrlen - 15828 max_linkhdr); 15829 if (max_len <= 0) { 15830 goto failed; 15831 } else if (len > max_len) { 15832 len = max_len; 15833 } 15834 } 15835 if (len <= segsiz) { 15836 /* 15837 * In case there are too many small fragments don't 15838 * use TSO: 15839 */ 15840 tso = 0; 15841 } 15842 } else { 15843 tso = 0; 15844 } 15845 if ((tso == 0) && (len > segsiz)) 15846 len = segsiz; 15847 us_cts = tcp_get_usecs(tv); 15848 if ((len == 0) || 15849 (len <= MHLEN - hdrlen - max_linkhdr)) { 15850 goto failed; 15851 } 15852 th->th_seq = htonl(rsm->r_start); 15853 th->th_ack = htonl(tp->rcv_nxt); 15854 /* 15855 * The PUSH bit should only be applied 15856 * if the full retransmission is made. If 15857 * we are sending less than this is the 15858 * left hand edge and should not have 15859 * the PUSH bit. 15860 */ 15861 if ((rsm->r_flags & RACK_HAD_PUSH) && 15862 (len == (rsm->r_end - rsm->r_start))) 15863 flags |= TH_PUSH; 15864 th->th_flags = flags; 15865 th->th_win = htons((u_short)(rack->r_ctl.fsb.recwin >> tp->rcv_scale)); 15866 if (th->th_win == 0) { 15867 tp->t_sndzerowin++; 15868 tp->t_flags |= TF_RXWIN0SENT; 15869 } else 15870 tp->t_flags &= ~TF_RXWIN0SENT; 15871 if (rsm->r_flags & RACK_TLP) { 15872 /* 15873 * TLP should not count in retran count, but 15874 * in its own bin 15875 */ 15876 counter_u64_add(rack_tlp_retran, 1); 15877 counter_u64_add(rack_tlp_retran_bytes, len); 15878 } else { 15879 tp->t_sndrexmitpack++; 15880 KMOD_TCPSTAT_INC(tcps_sndrexmitpack); 15881 KMOD_TCPSTAT_ADD(tcps_sndrexmitbyte, len); 15882 } 15883 #ifdef STATS 15884 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB, 15885 len); 15886 #endif 15887 if (rsm->m == NULL) 15888 goto failed; 15889 if (rsm->orig_m_len != rsm->m->m_len) { 15890 /* Fix up the orig_m_len and possibly the mbuf offset */ 15891 rack_adjust_orig_mlen(rsm); 15892 } 15893 m->m_next = rack_fo_base_copym(rsm->m, rsm->soff, &len, NULL, if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, rsm->r_hw_tls); 15894 if (len <= segsiz) { 15895 /* 15896 * Must have ran out of mbufs for the copy 15897 * shorten it to no longer need tso. Lets 15898 * not put on sendalot since we are low on 15899 * mbufs. 15900 */ 15901 tso = 0; 15902 } 15903 if ((m->m_next == NULL) || (len <= 0)){ 15904 goto failed; 15905 } 15906 if (udp) { 15907 if (rack->r_is_v6) 15908 ulen = hdrlen + len - sizeof(struct ip6_hdr); 15909 else 15910 ulen = hdrlen + len - sizeof(struct ip); 15911 udp->uh_ulen = htons(ulen); 15912 } 15913 m->m_pkthdr.rcvif = (struct ifnet *)0; 15914 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ 15915 #ifdef INET6 15916 if (rack->r_is_v6) { 15917 if (tp->t_port) { 15918 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; 15919 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 15920 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); 15921 th->th_sum = htons(0); 15922 UDPSTAT_INC(udps_opackets); 15923 } else { 15924 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 15925 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 15926 th->th_sum = in6_cksum_pseudo(ip6, 15927 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP, 15928 0); 15929 } 15930 } 15931 #endif 15932 #if defined(INET6) && defined(INET) 15933 else 15934 #endif 15935 #ifdef INET 15936 { 15937 if (tp->t_port) { 15938 m->m_pkthdr.csum_flags = CSUM_UDP; 15939 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 15940 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, 15941 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); 15942 th->th_sum = htons(0); 15943 UDPSTAT_INC(udps_opackets); 15944 } else { 15945 m->m_pkthdr.csum_flags = CSUM_TCP; 15946 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 15947 th->th_sum = in_pseudo(ip->ip_src.s_addr, 15948 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + 15949 IPPROTO_TCP + len + optlen)); 15950 } 15951 /* IP version must be set here for ipv4/ipv6 checking later */ 15952 KASSERT(ip->ip_v == IPVERSION, 15953 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); 15954 } 15955 #endif 15956 if (tso) { 15957 KASSERT(len > tp->t_maxseg - optlen, 15958 ("%s: len <= tso_segsz tp:%p", __func__, tp)); 15959 m->m_pkthdr.csum_flags |= CSUM_TSO; 15960 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen; 15961 } 15962 #ifdef INET6 15963 if (rack->r_is_v6) { 15964 ip6->ip6_hlim = rack->r_ctl.fsb.hoplimit; 15965 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); 15966 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) 15967 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 15968 else 15969 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 15970 } 15971 #endif 15972 #if defined(INET) && defined(INET6) 15973 else 15974 #endif 15975 #ifdef INET 15976 { 15977 ip->ip_len = htons(m->m_pkthdr.len); 15978 ip->ip_ttl = rack->r_ctl.fsb.hoplimit; 15979 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { 15980 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 15981 if (tp->t_port == 0 || len < V_tcp_minmss) { 15982 ip->ip_off |= htons(IP_DF); 15983 } 15984 } else { 15985 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 15986 } 15987 } 15988 #endif 15989 /* Time to copy in our header */ 15990 cpto = mtod(m, uint8_t *); 15991 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); 15992 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); 15993 if (optlen) { 15994 bcopy(opt, th + 1, optlen); 15995 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 15996 } else { 15997 th->th_off = sizeof(struct tcphdr) >> 2; 15998 } 15999 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 16000 union tcp_log_stackspecific log; 16001 16002 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 16003 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 16004 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 16005 if (rack->rack_no_prr) 16006 log.u_bbr.flex1 = 0; 16007 else 16008 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 16009 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; 16010 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 16011 log.u_bbr.flex4 = max_val; 16012 log.u_bbr.flex5 = 0; 16013 /* Save off the early/late values */ 16014 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 16015 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; 16016 log.u_bbr.bw_inuse = rack_get_bw(rack); 16017 if (doing_tlp == 0) 16018 log.u_bbr.flex8 = 1; 16019 else 16020 log.u_bbr.flex8 = 2; 16021 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL); 16022 log.u_bbr.flex7 = 55; 16023 log.u_bbr.pkts_out = tp->t_maxseg; 16024 log.u_bbr.timeStamp = cts; 16025 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 16026 log.u_bbr.lt_epoch = rack->r_ctl.cwnd_to_use; 16027 log.u_bbr.delivered = 0; 16028 lgb = tcp_log_event_(tp, th, NULL, NULL, TCP_LOG_OUT, ERRNO_UNK, 16029 len, &log, false, NULL, NULL, 0, tv); 16030 } else 16031 lgb = NULL; 16032 #ifdef INET6 16033 if (rack->r_is_v6) { 16034 error = ip6_output(m, NULL, 16035 &inp->inp_route6, 16036 0, NULL, NULL, inp); 16037 } 16038 #endif 16039 #if defined(INET) && defined(INET6) 16040 else 16041 #endif 16042 #ifdef INET 16043 { 16044 error = ip_output(m, NULL, 16045 &inp->inp_route, 16046 0, 0, inp); 16047 } 16048 #endif 16049 m = NULL; 16050 if (lgb) { 16051 lgb->tlb_errno = error; 16052 lgb = NULL; 16053 } 16054 if (error) { 16055 goto failed; 16056 } 16057 rack_log_output(tp, &to, len, rsm->r_start, flags, error, rack_to_usec_ts(tv), 16058 rsm, RACK_SENT_FP, rsm->m, rsm->soff, rsm->r_hw_tls); 16059 if (doing_tlp && (rack->fast_rsm_hack == 0)) { 16060 rack->rc_tlp_in_progress = 1; 16061 rack->r_ctl.rc_tlp_cnt_out++; 16062 } 16063 if (error == 0) { 16064 tcp_account_for_send(tp, len, 1, doing_tlp, rsm->r_hw_tls); 16065 if (doing_tlp) { 16066 rack->rc_last_sent_tlp_past_cumack = 0; 16067 rack->rc_last_sent_tlp_seq_valid = 1; 16068 rack->r_ctl.last_sent_tlp_seq = rsm->r_start; 16069 rack->r_ctl.last_sent_tlp_len = rsm->r_end - rsm->r_start; 16070 } 16071 } 16072 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); 16073 rack->forced_ack = 0; /* If we send something zap the FA flag */ 16074 if (IN_FASTRECOVERY(tp->t_flags) && rsm) 16075 rack->r_ctl.retran_during_recovery += len; 16076 { 16077 int idx; 16078 16079 idx = (len / segsiz) + 3; 16080 if (idx >= TCP_MSS_ACCT_ATIMER) 16081 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); 16082 else 16083 counter_u64_add(rack_out_size[idx], 1); 16084 } 16085 if (tp->t_rtttime == 0) { 16086 tp->t_rtttime = ticks; 16087 tp->t_rtseq = startseq; 16088 KMOD_TCPSTAT_INC(tcps_segstimed); 16089 } 16090 counter_u64_add(rack_fto_rsm_send, 1); 16091 if (error && (error == ENOBUFS)) { 16092 slot = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC); 16093 if (rack->rc_enobuf < 0x7f) 16094 rack->rc_enobuf++; 16095 if (slot < (10 * HPTS_USEC_IN_MSEC)) 16096 slot = 10 * HPTS_USEC_IN_MSEC; 16097 } else 16098 slot = rack_get_pacing_delay(rack, tp, len, NULL, segsiz); 16099 if ((slot == 0) || 16100 (rack->rc_always_pace == 0) || 16101 (rack->r_rr_config == 1)) { 16102 /* 16103 * We have no pacing set or we 16104 * are using old-style rack or 16105 * we are overriden to use the old 1ms pacing. 16106 */ 16107 slot = rack->r_ctl.rc_min_to; 16108 } 16109 rack_start_hpts_timer(rack, tp, cts, slot, len, 0); 16110 if (rack->r_must_retran) { 16111 rack->r_ctl.rc_out_at_rto -= (rsm->r_end - rsm->r_start); 16112 if ((SEQ_GEQ(rsm->r_end, rack->r_ctl.rc_snd_max_at_rto)) || 16113 ((rsm->r_flags & RACK_MUST_RXT) == 0)) { 16114 /* 16115 * We have retransmitted all we need. If 16116 * RACK_MUST_RXT is not set then we need to 16117 * not retransmit this guy. 16118 */ 16119 rack->r_must_retran = 0; 16120 rack->r_ctl.rc_out_at_rto = 0; 16121 if ((rsm->r_flags & RACK_MUST_RXT) == 0) { 16122 /* Not one we should rxt */ 16123 goto failed; 16124 } else { 16125 /* Clear the flag */ 16126 rsm->r_flags &= ~RACK_MUST_RXT; 16127 } 16128 } else { 16129 /* Remove the flag */ 16130 rsm->r_flags &= ~RACK_MUST_RXT; 16131 } 16132 } 16133 #ifdef TCP_ACCOUNTING 16134 crtsc = get_cyclecount(); 16135 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16136 tp->tcp_cnt_counters[SND_OUT_DATA] += cnt_thru; 16137 } 16138 counter_u64_add(tcp_cnt_counters[SND_OUT_DATA], cnt_thru); 16139 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16140 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); 16141 } 16142 counter_u64_add(tcp_proc_time[SND_OUT_DATA], (crtsc - ts_val)); 16143 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16144 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((len + segsiz - 1) / segsiz); 16145 } 16146 counter_u64_add(tcp_cnt_counters[CNT_OF_MSS_OUT], ((len + segsiz - 1) / segsiz)); 16147 sched_unpin(); 16148 #endif 16149 return (0); 16150 failed: 16151 if (m) 16152 m_free(m); 16153 return (-1); 16154 } 16155 16156 static void 16157 rack_sndbuf_autoscale(struct tcp_rack *rack) 16158 { 16159 /* 16160 * Automatic sizing of send socket buffer. Often the send buffer 16161 * size is not optimally adjusted to the actual network conditions 16162 * at hand (delay bandwidth product). Setting the buffer size too 16163 * small limits throughput on links with high bandwidth and high 16164 * delay (eg. trans-continental/oceanic links). Setting the 16165 * buffer size too big consumes too much real kernel memory, 16166 * especially with many connections on busy servers. 16167 * 16168 * The criteria to step up the send buffer one notch are: 16169 * 1. receive window of remote host is larger than send buffer 16170 * (with a fudge factor of 5/4th); 16171 * 2. send buffer is filled to 7/8th with data (so we actually 16172 * have data to make use of it); 16173 * 3. send buffer fill has not hit maximal automatic size; 16174 * 4. our send window (slow start and cogestion controlled) is 16175 * larger than sent but unacknowledged data in send buffer. 16176 * 16177 * Note that the rack version moves things much faster since 16178 * we want to avoid hitting cache lines in the rack_fast_output() 16179 * path so this is called much less often and thus moves 16180 * the SB forward by a percentage. 16181 */ 16182 struct socket *so; 16183 struct tcpcb *tp; 16184 uint32_t sendwin, scaleup; 16185 16186 tp = rack->rc_tp; 16187 so = rack->rc_inp->inp_socket; 16188 sendwin = min(rack->r_ctl.cwnd_to_use, tp->snd_wnd); 16189 if (V_tcp_do_autosndbuf && so->so_snd.sb_flags & SB_AUTOSIZE) { 16190 if ((tp->snd_wnd / 4 * 5) >= so->so_snd.sb_hiwat && 16191 sbused(&so->so_snd) >= 16192 (so->so_snd.sb_hiwat / 8 * 7) && 16193 sbused(&so->so_snd) < V_tcp_autosndbuf_max && 16194 sendwin >= (sbused(&so->so_snd) - 16195 (tp->snd_nxt - tp->snd_una))) { 16196 if (rack_autosndbuf_inc) 16197 scaleup = (rack_autosndbuf_inc * so->so_snd.sb_hiwat) / 100; 16198 else 16199 scaleup = V_tcp_autosndbuf_inc; 16200 if (scaleup < V_tcp_autosndbuf_inc) 16201 scaleup = V_tcp_autosndbuf_inc; 16202 scaleup += so->so_snd.sb_hiwat; 16203 if (scaleup > V_tcp_autosndbuf_max) 16204 scaleup = V_tcp_autosndbuf_max; 16205 if (!sbreserve_locked(&so->so_snd, scaleup, so, curthread)) 16206 so->so_snd.sb_flags &= ~SB_AUTOSIZE; 16207 } 16208 } 16209 } 16210 16211 static int 16212 rack_fast_output(struct tcpcb *tp, struct tcp_rack *rack, uint64_t ts_val, 16213 uint32_t cts, uint32_t ms_cts, struct timeval *tv, long tot_len, int *send_err) 16214 { 16215 /* 16216 * Enter to do fast output. We are given that the sched_pin is 16217 * in place (if accounting is compiled in) and the cycle count taken 16218 * at entry is in place in ts_val. The idea here is that 16219 * we know how many more bytes needs to be sent (presumably either 16220 * during pacing or to fill the cwnd and that was greater than 16221 * the max-burst). We have how much to send and all the info we 16222 * need to just send. 16223 */ 16224 struct ip *ip = NULL; 16225 struct udphdr *udp = NULL; 16226 struct tcphdr *th = NULL; 16227 struct mbuf *m, *s_mb; 16228 struct inpcb *inp; 16229 uint8_t *cpto; 16230 struct tcp_log_buffer *lgb; 16231 #ifdef TCP_ACCOUNTING 16232 uint64_t crtsc; 16233 #endif 16234 struct tcpopt to; 16235 u_char opt[TCP_MAXOLEN]; 16236 uint32_t hdrlen, optlen; 16237 int cnt_thru = 1; 16238 int32_t slot, segsiz, len, max_val, tso = 0, sb_offset, error, flags, ulen = 0; 16239 uint32_t us_cts, s_soff; 16240 uint32_t if_hw_tsomaxsegcount = 0, startseq; 16241 uint32_t if_hw_tsomaxsegsize; 16242 uint16_t add_flag = RACK_SENT_FP; 16243 #ifdef INET6 16244 struct ip6_hdr *ip6 = NULL; 16245 16246 if (rack->r_is_v6) { 16247 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 16248 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 16249 } else 16250 #endif /* INET6 */ 16251 { 16252 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 16253 hdrlen = sizeof(struct tcpiphdr); 16254 } 16255 if (tp->t_port && (V_tcp_udp_tunneling_port == 0)) { 16256 m = NULL; 16257 goto failed; 16258 } 16259 startseq = tp->snd_max; 16260 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 16261 inp = rack->rc_inp; 16262 len = rack->r_ctl.fsb.left_to_send; 16263 to.to_flags = 0; 16264 flags = rack->r_ctl.fsb.tcp_flags; 16265 if (tp->t_flags & TF_RCVD_TSTMP) { 16266 to.to_tsval = ms_cts + tp->ts_offset; 16267 to.to_tsecr = tp->ts_recent; 16268 to.to_flags = TOF_TS; 16269 } 16270 optlen = tcp_addoptions(&to, opt); 16271 hdrlen += optlen; 16272 udp = rack->r_ctl.fsb.udp; 16273 if (udp) 16274 hdrlen += sizeof(struct udphdr); 16275 if (rack->r_ctl.rc_pace_max_segs) 16276 max_val = rack->r_ctl.rc_pace_max_segs; 16277 else if (rack->rc_user_set_max_segs) 16278 max_val = rack->rc_user_set_max_segs * segsiz; 16279 else 16280 max_val = len; 16281 if ((tp->t_flags & TF_TSO) && 16282 V_tcp_do_tso && 16283 (len > segsiz) && 16284 (tp->t_port == 0)) 16285 tso = 1; 16286 again: 16287 #ifdef INET6 16288 if (MHLEN < hdrlen + max_linkhdr) 16289 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 16290 else 16291 #endif 16292 m = m_gethdr(M_NOWAIT, MT_DATA); 16293 if (m == NULL) 16294 goto failed; 16295 m->m_data += max_linkhdr; 16296 m->m_len = hdrlen; 16297 th = rack->r_ctl.fsb.th; 16298 /* Establish the len to send */ 16299 if (len > max_val) 16300 len = max_val; 16301 if ((tso) && (len + optlen > tp->t_maxseg)) { 16302 uint32_t if_hw_tsomax; 16303 int32_t max_len; 16304 16305 /* extract TSO information */ 16306 if_hw_tsomax = tp->t_tsomax; 16307 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; 16308 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; 16309 /* 16310 * Check if we should limit by maximum payload 16311 * length: 16312 */ 16313 if (if_hw_tsomax != 0) { 16314 /* compute maximum TSO length */ 16315 max_len = (if_hw_tsomax - hdrlen - 16316 max_linkhdr); 16317 if (max_len <= 0) { 16318 goto failed; 16319 } else if (len > max_len) { 16320 len = max_len; 16321 } 16322 } 16323 if (len <= segsiz) { 16324 /* 16325 * In case there are too many small fragments don't 16326 * use TSO: 16327 */ 16328 tso = 0; 16329 } 16330 } else { 16331 tso = 0; 16332 } 16333 if ((tso == 0) && (len > segsiz)) 16334 len = segsiz; 16335 us_cts = tcp_get_usecs(tv); 16336 if ((len == 0) || 16337 (len <= MHLEN - hdrlen - max_linkhdr)) { 16338 goto failed; 16339 } 16340 sb_offset = tp->snd_max - tp->snd_una; 16341 th->th_seq = htonl(tp->snd_max); 16342 th->th_ack = htonl(tp->rcv_nxt); 16343 th->th_flags = flags; 16344 th->th_win = htons((u_short)(rack->r_ctl.fsb.recwin >> tp->rcv_scale)); 16345 if (th->th_win == 0) { 16346 tp->t_sndzerowin++; 16347 tp->t_flags |= TF_RXWIN0SENT; 16348 } else 16349 tp->t_flags &= ~TF_RXWIN0SENT; 16350 tp->snd_up = tp->snd_una; /* drag it along, its deprecated */ 16351 KMOD_TCPSTAT_INC(tcps_sndpack); 16352 KMOD_TCPSTAT_ADD(tcps_sndbyte, len); 16353 #ifdef STATS 16354 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB, 16355 len); 16356 #endif 16357 if (rack->r_ctl.fsb.m == NULL) 16358 goto failed; 16359 16360 /* s_mb and s_soff are saved for rack_log_output */ 16361 m->m_next = rack_fo_m_copym(rack, &len, if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, 16362 &s_mb, &s_soff); 16363 if (len <= segsiz) { 16364 /* 16365 * Must have ran out of mbufs for the copy 16366 * shorten it to no longer need tso. Lets 16367 * not put on sendalot since we are low on 16368 * mbufs. 16369 */ 16370 tso = 0; 16371 } 16372 if (rack->r_ctl.fsb.rfo_apply_push && 16373 (len == rack->r_ctl.fsb.left_to_send)) { 16374 th->th_flags |= TH_PUSH; 16375 add_flag |= RACK_HAD_PUSH; 16376 } 16377 if ((m->m_next == NULL) || (len <= 0)){ 16378 goto failed; 16379 } 16380 if (udp) { 16381 if (rack->r_is_v6) 16382 ulen = hdrlen + len - sizeof(struct ip6_hdr); 16383 else 16384 ulen = hdrlen + len - sizeof(struct ip); 16385 udp->uh_ulen = htons(ulen); 16386 } 16387 m->m_pkthdr.rcvif = (struct ifnet *)0; 16388 if (tp->t_state == TCPS_ESTABLISHED && 16389 (tp->t_flags2 & TF2_ECN_PERMIT)) { 16390 /* 16391 * If the peer has ECN, mark data packets with ECN capable 16392 * transmission (ECT). Ignore pure ack packets, 16393 * retransmissions. 16394 */ 16395 if (len > 0 && SEQ_GEQ(tp->snd_nxt, tp->snd_max)) { 16396 #ifdef INET6 16397 if (rack->r_is_v6) 16398 ip6->ip6_flow |= htonl(IPTOS_ECN_ECT0 << 20); 16399 else 16400 #endif 16401 ip->ip_tos |= IPTOS_ECN_ECT0; 16402 KMOD_TCPSTAT_INC(tcps_ecn_ect0); 16403 /* 16404 * Reply with proper ECN notifications. 16405 * Only set CWR on new data segments. 16406 */ 16407 if (tp->t_flags2 & TF2_ECN_SND_CWR) { 16408 flags |= TH_CWR; 16409 tp->t_flags2 &= ~TF2_ECN_SND_CWR; 16410 } 16411 } 16412 if (tp->t_flags2 & TF2_ECN_SND_ECE) 16413 flags |= TH_ECE; 16414 } 16415 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ 16416 #ifdef INET6 16417 if (rack->r_is_v6) { 16418 if (tp->t_port) { 16419 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; 16420 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 16421 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); 16422 th->th_sum = htons(0); 16423 UDPSTAT_INC(udps_opackets); 16424 } else { 16425 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 16426 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 16427 th->th_sum = in6_cksum_pseudo(ip6, 16428 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP, 16429 0); 16430 } 16431 } 16432 #endif 16433 #if defined(INET6) && defined(INET) 16434 else 16435 #endif 16436 #ifdef INET 16437 { 16438 if (tp->t_port) { 16439 m->m_pkthdr.csum_flags = CSUM_UDP; 16440 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 16441 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, 16442 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); 16443 th->th_sum = htons(0); 16444 UDPSTAT_INC(udps_opackets); 16445 } else { 16446 m->m_pkthdr.csum_flags = CSUM_TCP; 16447 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 16448 th->th_sum = in_pseudo(ip->ip_src.s_addr, 16449 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + 16450 IPPROTO_TCP + len + optlen)); 16451 } 16452 /* IP version must be set here for ipv4/ipv6 checking later */ 16453 KASSERT(ip->ip_v == IPVERSION, 16454 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); 16455 } 16456 #endif 16457 if (tso) { 16458 KASSERT(len > tp->t_maxseg - optlen, 16459 ("%s: len <= tso_segsz tp:%p", __func__, tp)); 16460 m->m_pkthdr.csum_flags |= CSUM_TSO; 16461 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen; 16462 } 16463 #ifdef INET6 16464 if (rack->r_is_v6) { 16465 ip6->ip6_hlim = rack->r_ctl.fsb.hoplimit; 16466 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); 16467 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) 16468 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 16469 else 16470 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 16471 } 16472 #endif 16473 #if defined(INET) && defined(INET6) 16474 else 16475 #endif 16476 #ifdef INET 16477 { 16478 ip->ip_len = htons(m->m_pkthdr.len); 16479 ip->ip_ttl = rack->r_ctl.fsb.hoplimit; 16480 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { 16481 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 16482 if (tp->t_port == 0 || len < V_tcp_minmss) { 16483 ip->ip_off |= htons(IP_DF); 16484 } 16485 } else { 16486 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 16487 } 16488 } 16489 #endif 16490 /* Time to copy in our header */ 16491 cpto = mtod(m, uint8_t *); 16492 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); 16493 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); 16494 if (optlen) { 16495 bcopy(opt, th + 1, optlen); 16496 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 16497 } else { 16498 th->th_off = sizeof(struct tcphdr) >> 2; 16499 } 16500 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 16501 union tcp_log_stackspecific log; 16502 16503 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 16504 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 16505 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 16506 if (rack->rack_no_prr) 16507 log.u_bbr.flex1 = 0; 16508 else 16509 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 16510 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; 16511 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 16512 log.u_bbr.flex4 = max_val; 16513 log.u_bbr.flex5 = 0; 16514 /* Save off the early/late values */ 16515 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 16516 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; 16517 log.u_bbr.bw_inuse = rack_get_bw(rack); 16518 log.u_bbr.flex8 = 0; 16519 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL); 16520 log.u_bbr.flex7 = 44; 16521 log.u_bbr.pkts_out = tp->t_maxseg; 16522 log.u_bbr.timeStamp = cts; 16523 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 16524 log.u_bbr.lt_epoch = rack->r_ctl.cwnd_to_use; 16525 log.u_bbr.delivered = 0; 16526 lgb = tcp_log_event_(tp, th, NULL, NULL, TCP_LOG_OUT, ERRNO_UNK, 16527 len, &log, false, NULL, NULL, 0, tv); 16528 } else 16529 lgb = NULL; 16530 #ifdef INET6 16531 if (rack->r_is_v6) { 16532 error = ip6_output(m, NULL, 16533 &inp->inp_route6, 16534 0, NULL, NULL, inp); 16535 } 16536 #endif 16537 #if defined(INET) && defined(INET6) 16538 else 16539 #endif 16540 #ifdef INET 16541 { 16542 error = ip_output(m, NULL, 16543 &inp->inp_route, 16544 0, 0, inp); 16545 } 16546 #endif 16547 if (lgb) { 16548 lgb->tlb_errno = error; 16549 lgb = NULL; 16550 } 16551 if (error) { 16552 *send_err = error; 16553 m = NULL; 16554 goto failed; 16555 } 16556 rack_log_output(tp, &to, len, tp->snd_max, flags, error, rack_to_usec_ts(tv), 16557 NULL, add_flag, s_mb, s_soff, rack->r_ctl.fsb.hw_tls); 16558 m = NULL; 16559 if (tp->snd_una == tp->snd_max) { 16560 rack->r_ctl.rc_tlp_rxt_last_time = cts; 16561 rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__); 16562 tp->t_acktime = ticks; 16563 } 16564 if (error == 0) 16565 tcp_account_for_send(tp, len, 0, 0, rack->r_ctl.fsb.hw_tls); 16566 16567 rack->forced_ack = 0; /* If we send something zap the FA flag */ 16568 tot_len += len; 16569 if ((tp->t_flags & TF_GPUTINPROG) == 0) 16570 rack_start_gp_measurement(tp, rack, tp->snd_max, sb_offset); 16571 tp->snd_max += len; 16572 tp->snd_nxt = tp->snd_max; 16573 { 16574 int idx; 16575 16576 idx = (len / segsiz) + 3; 16577 if (idx >= TCP_MSS_ACCT_ATIMER) 16578 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); 16579 else 16580 counter_u64_add(rack_out_size[idx], 1); 16581 } 16582 if (len <= rack->r_ctl.fsb.left_to_send) 16583 rack->r_ctl.fsb.left_to_send -= len; 16584 else 16585 rack->r_ctl.fsb.left_to_send = 0; 16586 if (rack->r_ctl.fsb.left_to_send < segsiz) { 16587 rack->r_fast_output = 0; 16588 rack->r_ctl.fsb.left_to_send = 0; 16589 /* At the end of fast_output scale up the sb */ 16590 SOCKBUF_LOCK(&rack->rc_inp->inp_socket->so_snd); 16591 rack_sndbuf_autoscale(rack); 16592 SOCKBUF_UNLOCK(&rack->rc_inp->inp_socket->so_snd); 16593 } 16594 if (tp->t_rtttime == 0) { 16595 tp->t_rtttime = ticks; 16596 tp->t_rtseq = startseq; 16597 KMOD_TCPSTAT_INC(tcps_segstimed); 16598 } 16599 if ((rack->r_ctl.fsb.left_to_send >= segsiz) && 16600 (max_val > len) && 16601 (tso == 0)) { 16602 max_val -= len; 16603 len = segsiz; 16604 th = rack->r_ctl.fsb.th; 16605 cnt_thru++; 16606 goto again; 16607 } 16608 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); 16609 counter_u64_add(rack_fto_send, 1); 16610 slot = rack_get_pacing_delay(rack, tp, tot_len, NULL, segsiz); 16611 rack_start_hpts_timer(rack, tp, cts, slot, tot_len, 0); 16612 #ifdef TCP_ACCOUNTING 16613 crtsc = get_cyclecount(); 16614 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16615 tp->tcp_cnt_counters[SND_OUT_DATA] += cnt_thru; 16616 } 16617 counter_u64_add(tcp_cnt_counters[SND_OUT_DATA], cnt_thru); 16618 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16619 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); 16620 } 16621 counter_u64_add(tcp_proc_time[SND_OUT_DATA], (crtsc - ts_val)); 16622 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16623 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len + segsiz - 1) / segsiz); 16624 } 16625 counter_u64_add(tcp_cnt_counters[CNT_OF_MSS_OUT], ((tot_len + segsiz - 1) / segsiz)); 16626 sched_unpin(); 16627 #endif 16628 return (0); 16629 failed: 16630 if (m) 16631 m_free(m); 16632 rack->r_fast_output = 0; 16633 return (-1); 16634 } 16635 16636 static int 16637 rack_output(struct tcpcb *tp) 16638 { 16639 struct socket *so; 16640 uint32_t recwin; 16641 uint32_t sb_offset, s_moff = 0; 16642 int32_t len, flags, error = 0; 16643 struct mbuf *m, *s_mb = NULL; 16644 struct mbuf *mb; 16645 uint32_t if_hw_tsomaxsegcount = 0; 16646 uint32_t if_hw_tsomaxsegsize; 16647 int32_t segsiz, minseg; 16648 long tot_len_this_send = 0; 16649 #ifdef INET 16650 struct ip *ip = NULL; 16651 #endif 16652 #ifdef TCPDEBUG 16653 struct ipovly *ipov = NULL; 16654 #endif 16655 struct udphdr *udp = NULL; 16656 struct tcp_rack *rack; 16657 struct tcphdr *th; 16658 uint8_t pass = 0; 16659 uint8_t mark = 0; 16660 uint8_t wanted_cookie = 0; 16661 u_char opt[TCP_MAXOLEN]; 16662 unsigned ipoptlen, optlen, hdrlen, ulen=0; 16663 uint32_t rack_seq; 16664 16665 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 16666 unsigned ipsec_optlen = 0; 16667 16668 #endif 16669 int32_t idle, sendalot; 16670 int32_t sub_from_prr = 0; 16671 volatile int32_t sack_rxmit; 16672 struct rack_sendmap *rsm = NULL; 16673 int32_t tso, mtu; 16674 struct tcpopt to; 16675 int32_t slot = 0; 16676 int32_t sup_rack = 0; 16677 uint32_t cts, ms_cts, delayed, early; 16678 uint16_t add_flag = RACK_SENT_SP; 16679 /* The doing_tlp flag will be set by the actual rack_timeout_tlp() */ 16680 uint8_t hpts_calling, doing_tlp = 0; 16681 uint32_t cwnd_to_use, pace_max_seg; 16682 int32_t do_a_prefetch = 0; 16683 int32_t prefetch_rsm = 0; 16684 int32_t orig_len = 0; 16685 struct timeval tv; 16686 int32_t prefetch_so_done = 0; 16687 struct tcp_log_buffer *lgb; 16688 struct inpcb *inp; 16689 struct sockbuf *sb; 16690 uint64_t ts_val = 0; 16691 #ifdef TCP_ACCOUNTING 16692 uint64_t crtsc; 16693 #endif 16694 #ifdef INET6 16695 struct ip6_hdr *ip6 = NULL; 16696 int32_t isipv6; 16697 #endif 16698 uint8_t filled_all = 0; 16699 bool hw_tls = false; 16700 16701 /* setup and take the cache hits here */ 16702 rack = (struct tcp_rack *)tp->t_fb_ptr; 16703 #ifdef TCP_ACCOUNTING 16704 sched_pin(); 16705 ts_val = get_cyclecount(); 16706 #endif 16707 hpts_calling = rack->rc_inp->inp_hpts_calls; 16708 NET_EPOCH_ASSERT(); 16709 INP_WLOCK_ASSERT(rack->rc_inp); 16710 #ifdef TCP_OFFLOAD 16711 if (tp->t_flags & TF_TOE) { 16712 #ifdef TCP_ACCOUNTING 16713 sched_unpin(); 16714 #endif 16715 return (tcp_offload_output(tp)); 16716 } 16717 #endif 16718 /* 16719 * For TFO connections in SYN_RECEIVED, only allow the initial 16720 * SYN|ACK and those sent by the retransmit timer. 16721 */ 16722 if (IS_FASTOPEN(tp->t_flags) && 16723 (tp->t_state == TCPS_SYN_RECEIVED) && 16724 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN|ACK sent */ 16725 (rack->r_ctl.rc_resend == NULL)) { /* not a retransmit */ 16726 #ifdef TCP_ACCOUNTING 16727 sched_unpin(); 16728 #endif 16729 return (0); 16730 } 16731 #ifdef INET6 16732 if (rack->r_state) { 16733 /* Use the cache line loaded if possible */ 16734 isipv6 = rack->r_is_v6; 16735 } else { 16736 isipv6 = (rack->rc_inp->inp_vflag & INP_IPV6) != 0; 16737 } 16738 #endif 16739 early = 0; 16740 cts = tcp_get_usecs(&tv); 16741 ms_cts = tcp_tv_to_mssectick(&tv); 16742 if (((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0) && 16743 rack->rc_inp->inp_in_hpts) { 16744 /* 16745 * We are on the hpts for some timer but not hptsi output. 16746 * Remove from the hpts unconditionally. 16747 */ 16748 rack_timer_cancel(tp, rack, cts, __LINE__); 16749 } 16750 /* Are we pacing and late? */ 16751 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 16752 TSTMP_GEQ(cts, rack->r_ctl.rc_last_output_to)) { 16753 /* We are delayed */ 16754 delayed = cts - rack->r_ctl.rc_last_output_to; 16755 } else { 16756 delayed = 0; 16757 } 16758 /* Do the timers, which may override the pacer */ 16759 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { 16760 if (rack_process_timers(tp, rack, cts, hpts_calling, &doing_tlp)) { 16761 counter_u64_add(rack_out_size[TCP_MSS_ACCT_ATIMER], 1); 16762 #ifdef TCP_ACCOUNTING 16763 sched_unpin(); 16764 #endif 16765 return (0); 16766 } 16767 } 16768 if (rack->rc_in_persist) { 16769 if (rack->rc_inp->inp_in_hpts == 0) { 16770 /* Timer is not running */ 16771 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 16772 } 16773 #ifdef TCP_ACCOUNTING 16774 sched_unpin(); 16775 #endif 16776 return (0); 16777 } 16778 if ((rack->r_timer_override) || 16779 (rack->rc_ack_can_sendout_data) || 16780 (delayed) || 16781 (tp->t_state < TCPS_ESTABLISHED)) { 16782 rack->rc_ack_can_sendout_data = 0; 16783 if (rack->rc_inp->inp_in_hpts) 16784 tcp_hpts_remove(rack->rc_inp, HPTS_REMOVE_OUTPUT); 16785 } else if (rack->rc_inp->inp_in_hpts) { 16786 /* 16787 * On the hpts you can't pass even if ACKNOW is on, we will 16788 * when the hpts fires. 16789 */ 16790 #ifdef TCP_ACCOUNTING 16791 crtsc = get_cyclecount(); 16792 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16793 tp->tcp_proc_time[SND_BLOCKED] += (crtsc - ts_val); 16794 } 16795 counter_u64_add(tcp_proc_time[SND_BLOCKED], (crtsc - ts_val)); 16796 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16797 tp->tcp_cnt_counters[SND_BLOCKED]++; 16798 } 16799 counter_u64_add(tcp_cnt_counters[SND_BLOCKED], 1); 16800 sched_unpin(); 16801 #endif 16802 counter_u64_add(rack_out_size[TCP_MSS_ACCT_INPACE], 1); 16803 return (0); 16804 } 16805 rack->rc_inp->inp_hpts_calls = 0; 16806 /* Finish out both pacing early and late accounting */ 16807 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 16808 TSTMP_GT(rack->r_ctl.rc_last_output_to, cts)) { 16809 early = rack->r_ctl.rc_last_output_to - cts; 16810 } else 16811 early = 0; 16812 if (delayed) { 16813 rack->r_ctl.rc_agg_delayed += delayed; 16814 rack->r_late = 1; 16815 } else if (early) { 16816 rack->r_ctl.rc_agg_early += early; 16817 rack->r_early = 1; 16818 } 16819 /* Now that early/late accounting is done turn off the flag */ 16820 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 16821 rack->r_wanted_output = 0; 16822 rack->r_timer_override = 0; 16823 if ((tp->t_state != rack->r_state) && 16824 TCPS_HAVEESTABLISHED(tp->t_state)) { 16825 rack_set_state(tp, rack); 16826 } 16827 if ((rack->r_fast_output) && 16828 (doing_tlp == 0) && 16829 (tp->rcv_numsacks == 0)) { 16830 int ret; 16831 16832 error = 0; 16833 ret = rack_fast_output(tp, rack, ts_val, cts, ms_cts, &tv, tot_len_this_send, &error); 16834 if (ret >= 0) 16835 return(ret); 16836 else if (error) { 16837 inp = rack->rc_inp; 16838 so = inp->inp_socket; 16839 sb = &so->so_snd; 16840 goto nomore; 16841 } 16842 } 16843 inp = rack->rc_inp; 16844 /* 16845 * For TFO connections in SYN_SENT or SYN_RECEIVED, 16846 * only allow the initial SYN or SYN|ACK and those sent 16847 * by the retransmit timer. 16848 */ 16849 if (IS_FASTOPEN(tp->t_flags) && 16850 ((tp->t_state == TCPS_SYN_RECEIVED) || 16851 (tp->t_state == TCPS_SYN_SENT)) && 16852 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN or SYN|ACK sent */ 16853 (tp->t_rxtshift == 0)) { /* not a retransmit */ 16854 cwnd_to_use = rack->r_ctl.cwnd_to_use = tp->snd_cwnd; 16855 so = inp->inp_socket; 16856 sb = &so->so_snd; 16857 goto just_return_nolock; 16858 } 16859 /* 16860 * Determine length of data that should be transmitted, and flags 16861 * that will be used. If there is some data or critical controls 16862 * (SYN, RST) to send, then transmit; otherwise, investigate 16863 * further. 16864 */ 16865 idle = (tp->t_flags & TF_LASTIDLE) || (tp->snd_max == tp->snd_una); 16866 if (tp->t_idle_reduce) { 16867 if (idle && ((ticks - tp->t_rcvtime) >= tp->t_rxtcur)) 16868 rack_cc_after_idle(rack, tp); 16869 } 16870 tp->t_flags &= ~TF_LASTIDLE; 16871 if (idle) { 16872 if (tp->t_flags & TF_MORETOCOME) { 16873 tp->t_flags |= TF_LASTIDLE; 16874 idle = 0; 16875 } 16876 } 16877 if ((tp->snd_una == tp->snd_max) && 16878 rack->r_ctl.rc_went_idle_time && 16879 TSTMP_GT(cts, rack->r_ctl.rc_went_idle_time)) { 16880 idle = cts - rack->r_ctl.rc_went_idle_time; 16881 if (idle > rack_min_probertt_hold) { 16882 /* Count as a probe rtt */ 16883 if (rack->in_probe_rtt == 0) { 16884 rack->r_ctl.rc_lower_rtt_us_cts = cts; 16885 rack->r_ctl.rc_time_probertt_entered = rack->r_ctl.rc_lower_rtt_us_cts; 16886 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts; 16887 rack->r_ctl.rc_time_of_last_probertt = rack->r_ctl.rc_lower_rtt_us_cts; 16888 } else { 16889 rack_exit_probertt(rack, cts); 16890 } 16891 } 16892 idle = 0; 16893 } 16894 if (rack_use_fsb && (rack->r_fsb_inited == 0) && (rack->r_state != TCPS_CLOSED)) 16895 rack_init_fsb_block(tp, rack); 16896 again: 16897 /* 16898 * If we've recently taken a timeout, snd_max will be greater than 16899 * snd_nxt. There may be SACK information that allows us to avoid 16900 * resending already delivered data. Adjust snd_nxt accordingly. 16901 */ 16902 sendalot = 0; 16903 cts = tcp_get_usecs(&tv); 16904 ms_cts = tcp_tv_to_mssectick(&tv); 16905 tso = 0; 16906 mtu = 0; 16907 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 16908 minseg = segsiz; 16909 if (rack->r_ctl.rc_pace_max_segs == 0) 16910 pace_max_seg = rack->rc_user_set_max_segs * segsiz; 16911 else 16912 pace_max_seg = rack->r_ctl.rc_pace_max_segs; 16913 sb_offset = tp->snd_max - tp->snd_una; 16914 cwnd_to_use = rack->r_ctl.cwnd_to_use = tp->snd_cwnd; 16915 flags = tcp_outflags[tp->t_state]; 16916 while (rack->rc_free_cnt < rack_free_cache) { 16917 rsm = rack_alloc(rack); 16918 if (rsm == NULL) { 16919 if (inp->inp_hpts_calls) 16920 /* Retry in a ms */ 16921 slot = (1 * HPTS_USEC_IN_MSEC); 16922 so = inp->inp_socket; 16923 sb = &so->so_snd; 16924 goto just_return_nolock; 16925 } 16926 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_free, rsm, r_tnext); 16927 rack->rc_free_cnt++; 16928 rsm = NULL; 16929 } 16930 if (inp->inp_hpts_calls) 16931 inp->inp_hpts_calls = 0; 16932 sack_rxmit = 0; 16933 len = 0; 16934 rsm = NULL; 16935 if (flags & TH_RST) { 16936 SOCKBUF_LOCK(&inp->inp_socket->so_snd); 16937 so = inp->inp_socket; 16938 sb = &so->so_snd; 16939 goto send; 16940 } 16941 if (rack->r_ctl.rc_resend) { 16942 /* Retransmit timer */ 16943 rsm = rack->r_ctl.rc_resend; 16944 rack->r_ctl.rc_resend = NULL; 16945 len = rsm->r_end - rsm->r_start; 16946 sack_rxmit = 1; 16947 sendalot = 0; 16948 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), 16949 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p", 16950 __func__, __LINE__, 16951 rsm->r_start, tp->snd_una, tp, rack, rsm)); 16952 sb_offset = rsm->r_start - tp->snd_una; 16953 if (len >= segsiz) 16954 len = segsiz; 16955 } else if ((rsm = tcp_rack_output(tp, rack, cts)) != NULL) { 16956 /* We have a retransmit that takes precedence */ 16957 if ((!IN_FASTRECOVERY(tp->t_flags)) && 16958 ((tp->t_flags & TF_WASFRECOVERY) == 0)) { 16959 /* Enter recovery if not induced by a time-out */ 16960 rack->r_ctl.rc_rsm_start = rsm->r_start; 16961 rack->r_ctl.rc_cwnd_at = tp->snd_cwnd; 16962 rack->r_ctl.rc_ssthresh_at = tp->snd_ssthresh; 16963 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una); 16964 } 16965 #ifdef INVARIANTS 16966 if (SEQ_LT(rsm->r_start, tp->snd_una)) { 16967 panic("Huh, tp:%p rack:%p rsm:%p start:%u < snd_una:%u\n", 16968 tp, rack, rsm, rsm->r_start, tp->snd_una); 16969 } 16970 #endif 16971 len = rsm->r_end - rsm->r_start; 16972 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), 16973 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p", 16974 __func__, __LINE__, 16975 rsm->r_start, tp->snd_una, tp, rack, rsm)); 16976 sb_offset = rsm->r_start - tp->snd_una; 16977 sendalot = 0; 16978 if (len >= segsiz) 16979 len = segsiz; 16980 if (len > 0) { 16981 sack_rxmit = 1; 16982 KMOD_TCPSTAT_INC(tcps_sack_rexmits); 16983 KMOD_TCPSTAT_ADD(tcps_sack_rexmit_bytes, 16984 min(len, segsiz)); 16985 counter_u64_add(rack_rtm_prr_retran, 1); 16986 } 16987 } else if (rack->r_ctl.rc_tlpsend) { 16988 /* Tail loss probe */ 16989 long cwin; 16990 long tlen; 16991 16992 /* 16993 * Check if we can do a TLP with a RACK'd packet 16994 * this can happen if we are not doing the rack 16995 * cheat and we skipped to a TLP and it 16996 * went off. 16997 */ 16998 rsm = rack->r_ctl.rc_tlpsend; 16999 /* We are doing a TLP make sure the flag is preent */ 17000 rsm->r_flags |= RACK_TLP; 17001 rack->r_ctl.rc_tlpsend = NULL; 17002 sack_rxmit = 1; 17003 tlen = rsm->r_end - rsm->r_start; 17004 if (tlen > segsiz) 17005 tlen = segsiz; 17006 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), 17007 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p", 17008 __func__, __LINE__, 17009 rsm->r_start, tp->snd_una, tp, rack, rsm)); 17010 sb_offset = rsm->r_start - tp->snd_una; 17011 cwin = min(tp->snd_wnd, tlen); 17012 len = cwin; 17013 } 17014 if (rack->r_must_retran && 17015 (rsm == NULL)) { 17016 /* 17017 * Non-Sack and we had a RTO or Sack/non-Sack and a 17018 * MTU change, we need to retransmit until we reach 17019 * the former snd_max (rack->r_ctl.rc_snd_max_at_rto). 17020 */ 17021 if (SEQ_GT(tp->snd_max, tp->snd_una)) { 17022 int sendwin, flight; 17023 17024 sendwin = min(tp->snd_wnd, tp->snd_cwnd); 17025 flight = ctf_flight_size(tp, rack->r_ctl.rc_out_at_rto); 17026 if (flight >= sendwin) { 17027 so = inp->inp_socket; 17028 sb = &so->so_snd; 17029 goto just_return_nolock; 17030 } 17031 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 17032 if (rsm == NULL) { 17033 /* TSNH */ 17034 rack->r_must_retran = 0; 17035 rack->r_ctl.rc_out_at_rto = 0; 17036 rack->r_must_retran = 0; 17037 so = inp->inp_socket; 17038 sb = &so->so_snd; 17039 goto just_return_nolock; 17040 } 17041 if ((rsm->r_flags & RACK_MUST_RXT) == 0) { 17042 /* It does not have the flag, we are done */ 17043 rack->r_must_retran = 0; 17044 rack->r_ctl.rc_out_at_rto = 0; 17045 } else { 17046 sack_rxmit = 1; 17047 len = rsm->r_end - rsm->r_start; 17048 sendalot = 0; 17049 sb_offset = rsm->r_start - tp->snd_una; 17050 if (len >= segsiz) 17051 len = segsiz; 17052 /* 17053 * Delay removing the flag RACK_MUST_RXT so 17054 * that the fastpath for retransmit will 17055 * work with this rsm. 17056 */ 17057 17058 } 17059 } else { 17060 /* We must be done if there is nothing outstanding */ 17061 rack->r_must_retran = 0; 17062 rack->r_ctl.rc_out_at_rto = 0; 17063 } 17064 } 17065 /* 17066 * Enforce a connection sendmap count limit if set 17067 * as long as we are not retransmiting. 17068 */ 17069 if ((rsm == NULL) && 17070 (rack->do_detection == 0) && 17071 (V_tcp_map_entries_limit > 0) && 17072 (rack->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) { 17073 counter_u64_add(rack_to_alloc_limited, 1); 17074 if (!rack->alloc_limit_reported) { 17075 rack->alloc_limit_reported = 1; 17076 counter_u64_add(rack_alloc_limited_conns, 1); 17077 } 17078 so = inp->inp_socket; 17079 sb = &so->so_snd; 17080 goto just_return_nolock; 17081 } 17082 if (rsm && (rsm->r_flags & RACK_HAS_FIN)) { 17083 /* we are retransmitting the fin */ 17084 len--; 17085 if (len) { 17086 /* 17087 * When retransmitting data do *not* include the 17088 * FIN. This could happen from a TLP probe. 17089 */ 17090 flags &= ~TH_FIN; 17091 } 17092 } 17093 #ifdef INVARIANTS 17094 /* For debugging */ 17095 rack->r_ctl.rc_rsm_at_retran = rsm; 17096 #endif 17097 if (rsm && rack->r_fsb_inited && rack_use_rsm_rfo && 17098 ((rsm->r_flags & RACK_HAS_FIN) == 0)) { 17099 int ret; 17100 17101 ret = rack_fast_rsm_output(tp, rack, rsm, ts_val, cts, ms_cts, &tv, len, doing_tlp); 17102 if (ret == 0) 17103 return (0); 17104 } 17105 if (rsm && (rsm->r_flags & RACK_MUST_RXT)) { 17106 /* 17107 * Clear the flag in prep for the send 17108 * note that if we can't get an mbuf 17109 * and fail, we won't retransmit this 17110 * rsm but that should be ok (its rare). 17111 */ 17112 rsm->r_flags &= ~RACK_MUST_RXT; 17113 } 17114 so = inp->inp_socket; 17115 sb = &so->so_snd; 17116 if (do_a_prefetch == 0) { 17117 kern_prefetch(sb, &do_a_prefetch); 17118 do_a_prefetch = 1; 17119 } 17120 #ifdef NETFLIX_SHARED_CWND 17121 if ((tp->t_flags2 & TF2_TCP_SCWND_ALLOWED) && 17122 rack->rack_enable_scwnd) { 17123 /* We are doing cwnd sharing */ 17124 if (rack->gp_ready && 17125 (rack->rack_attempted_scwnd == 0) && 17126 (rack->r_ctl.rc_scw == NULL) && 17127 tp->t_lib) { 17128 /* The pcbid is in, lets make an attempt */ 17129 counter_u64_add(rack_try_scwnd, 1); 17130 rack->rack_attempted_scwnd = 1; 17131 rack->r_ctl.rc_scw = tcp_shared_cwnd_alloc(tp, 17132 &rack->r_ctl.rc_scw_index, 17133 segsiz); 17134 } 17135 if (rack->r_ctl.rc_scw && 17136 (rack->rack_scwnd_is_idle == 1) && 17137 sbavail(&so->so_snd)) { 17138 /* we are no longer out of data */ 17139 tcp_shared_cwnd_active(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 17140 rack->rack_scwnd_is_idle = 0; 17141 } 17142 if (rack->r_ctl.rc_scw) { 17143 /* First lets update and get the cwnd */ 17144 rack->r_ctl.cwnd_to_use = cwnd_to_use = tcp_shared_cwnd_update(rack->r_ctl.rc_scw, 17145 rack->r_ctl.rc_scw_index, 17146 tp->snd_cwnd, tp->snd_wnd, segsiz); 17147 } 17148 } 17149 #endif 17150 /* 17151 * Get standard flags, and add SYN or FIN if requested by 'hidden' 17152 * state flags. 17153 */ 17154 if (tp->t_flags & TF_NEEDFIN) 17155 flags |= TH_FIN; 17156 if (tp->t_flags & TF_NEEDSYN) 17157 flags |= TH_SYN; 17158 if ((sack_rxmit == 0) && (prefetch_rsm == 0)) { 17159 void *end_rsm; 17160 end_rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext); 17161 if (end_rsm) 17162 kern_prefetch(end_rsm, &prefetch_rsm); 17163 prefetch_rsm = 1; 17164 } 17165 SOCKBUF_LOCK(sb); 17166 /* 17167 * If snd_nxt == snd_max and we have transmitted a FIN, the 17168 * sb_offset will be > 0 even if so_snd.sb_cc is 0, resulting in a 17169 * negative length. This can also occur when TCP opens up its 17170 * congestion window while receiving additional duplicate acks after 17171 * fast-retransmit because TCP will reset snd_nxt to snd_max after 17172 * the fast-retransmit. 17173 * 17174 * In the normal retransmit-FIN-only case, however, snd_nxt will be 17175 * set to snd_una, the sb_offset will be 0, and the length may wind 17176 * up 0. 17177 * 17178 * If sack_rxmit is true we are retransmitting from the scoreboard 17179 * in which case len is already set. 17180 */ 17181 if ((sack_rxmit == 0) && 17182 (TCPS_HAVEESTABLISHED(tp->t_state) || IS_FASTOPEN(tp->t_flags))) { 17183 uint32_t avail; 17184 17185 avail = sbavail(sb); 17186 if (SEQ_GT(tp->snd_nxt, tp->snd_una) && avail) 17187 sb_offset = tp->snd_nxt - tp->snd_una; 17188 else 17189 sb_offset = 0; 17190 if ((IN_FASTRECOVERY(tp->t_flags) == 0) || rack->rack_no_prr) { 17191 if (rack->r_ctl.rc_tlp_new_data) { 17192 /* TLP is forcing out new data */ 17193 if (rack->r_ctl.rc_tlp_new_data > (uint32_t) (avail - sb_offset)) { 17194 rack->r_ctl.rc_tlp_new_data = (uint32_t) (avail - sb_offset); 17195 } 17196 if ((rack->r_ctl.rc_tlp_new_data + sb_offset) > tp->snd_wnd) { 17197 if (tp->snd_wnd > sb_offset) 17198 len = tp->snd_wnd - sb_offset; 17199 else 17200 len = 0; 17201 } else { 17202 len = rack->r_ctl.rc_tlp_new_data; 17203 } 17204 } else { 17205 len = rack_what_can_we_send(tp, rack, cwnd_to_use, avail, sb_offset); 17206 } 17207 if ((rack->r_ctl.crte == NULL) && IN_FASTRECOVERY(tp->t_flags) && (len > segsiz)) { 17208 /* 17209 * For prr=off, we need to send only 1 MSS 17210 * at a time. We do this because another sack could 17211 * be arriving that causes us to send retransmits and 17212 * we don't want to be on a long pace due to a larger send 17213 * that keeps us from sending out the retransmit. 17214 */ 17215 len = segsiz; 17216 } 17217 } else { 17218 uint32_t outstanding; 17219 /* 17220 * We are inside of a Fast recovery episode, this 17221 * is caused by a SACK or 3 dup acks. At this point 17222 * we have sent all the retransmissions and we rely 17223 * on PRR to dictate what we will send in the form of 17224 * new data. 17225 */ 17226 17227 outstanding = tp->snd_max - tp->snd_una; 17228 if ((rack->r_ctl.rc_prr_sndcnt + outstanding) > tp->snd_wnd) { 17229 if (tp->snd_wnd > outstanding) { 17230 len = tp->snd_wnd - outstanding; 17231 /* Check to see if we have the data */ 17232 if ((sb_offset + len) > avail) { 17233 /* It does not all fit */ 17234 if (avail > sb_offset) 17235 len = avail - sb_offset; 17236 else 17237 len = 0; 17238 } 17239 } else { 17240 len = 0; 17241 } 17242 } else if (avail > sb_offset) { 17243 len = avail - sb_offset; 17244 } else { 17245 len = 0; 17246 } 17247 if (len > 0) { 17248 if (len > rack->r_ctl.rc_prr_sndcnt) { 17249 len = rack->r_ctl.rc_prr_sndcnt; 17250 } 17251 if (len > 0) { 17252 sub_from_prr = 1; 17253 counter_u64_add(rack_rtm_prr_newdata, 1); 17254 } 17255 } 17256 if (len > segsiz) { 17257 /* 17258 * We should never send more than a MSS when 17259 * retransmitting or sending new data in prr 17260 * mode unless the override flag is on. Most 17261 * likely the PRR algorithm is not going to 17262 * let us send a lot as well :-) 17263 */ 17264 if (rack->r_ctl.rc_prr_sendalot == 0) { 17265 len = segsiz; 17266 } 17267 } else if (len < segsiz) { 17268 /* 17269 * Do we send any? The idea here is if the 17270 * send empty's the socket buffer we want to 17271 * do it. However if not then lets just wait 17272 * for our prr_sndcnt to get bigger. 17273 */ 17274 long leftinsb; 17275 17276 leftinsb = sbavail(sb) - sb_offset; 17277 if (leftinsb > len) { 17278 /* This send does not empty the sb */ 17279 len = 0; 17280 } 17281 } 17282 } 17283 } else if (!TCPS_HAVEESTABLISHED(tp->t_state)) { 17284 /* 17285 * If you have not established 17286 * and are not doing FAST OPEN 17287 * no data please. 17288 */ 17289 if ((sack_rxmit == 0) && 17290 (!IS_FASTOPEN(tp->t_flags))){ 17291 len = 0; 17292 sb_offset = 0; 17293 } 17294 } 17295 if (prefetch_so_done == 0) { 17296 kern_prefetch(so, &prefetch_so_done); 17297 prefetch_so_done = 1; 17298 } 17299 /* 17300 * Lop off SYN bit if it has already been sent. However, if this is 17301 * SYN-SENT state and if segment contains data and if we don't know 17302 * that foreign host supports TAO, suppress sending segment. 17303 */ 17304 if ((flags & TH_SYN) && SEQ_GT(tp->snd_nxt, tp->snd_una) && 17305 ((sack_rxmit == 0) && (tp->t_rxtshift == 0))) { 17306 /* 17307 * When sending additional segments following a TFO SYN|ACK, 17308 * do not include the SYN bit. 17309 */ 17310 if (IS_FASTOPEN(tp->t_flags) && 17311 (tp->t_state == TCPS_SYN_RECEIVED)) 17312 flags &= ~TH_SYN; 17313 } 17314 /* 17315 * Be careful not to send data and/or FIN on SYN segments. This 17316 * measure is needed to prevent interoperability problems with not 17317 * fully conformant TCP implementations. 17318 */ 17319 if ((flags & TH_SYN) && (tp->t_flags & TF_NOOPT)) { 17320 len = 0; 17321 flags &= ~TH_FIN; 17322 } 17323 /* 17324 * On TFO sockets, ensure no data is sent in the following cases: 17325 * 17326 * - When retransmitting SYN|ACK on a passively-created socket 17327 * 17328 * - When retransmitting SYN on an actively created socket 17329 * 17330 * - When sending a zero-length cookie (cookie request) on an 17331 * actively created socket 17332 * 17333 * - When the socket is in the CLOSED state (RST is being sent) 17334 */ 17335 if (IS_FASTOPEN(tp->t_flags) && 17336 (((flags & TH_SYN) && (tp->t_rxtshift > 0)) || 17337 ((tp->t_state == TCPS_SYN_SENT) && 17338 (tp->t_tfo_client_cookie_len == 0)) || 17339 (flags & TH_RST))) { 17340 sack_rxmit = 0; 17341 len = 0; 17342 } 17343 /* Without fast-open there should never be data sent on a SYN */ 17344 if ((flags & TH_SYN) && (!IS_FASTOPEN(tp->t_flags))) { 17345 tp->snd_nxt = tp->iss; 17346 len = 0; 17347 } 17348 if ((len > segsiz) && (tcp_dsack_block_exists(tp))) { 17349 /* We only send 1 MSS if we have a DSACK block */ 17350 add_flag |= RACK_SENT_W_DSACK; 17351 len = segsiz; 17352 } 17353 orig_len = len; 17354 if (len <= 0) { 17355 /* 17356 * If FIN has been sent but not acked, but we haven't been 17357 * called to retransmit, len will be < 0. Otherwise, window 17358 * shrank after we sent into it. If window shrank to 0, 17359 * cancel pending retransmit, pull snd_nxt back to (closed) 17360 * window, and set the persist timer if it isn't already 17361 * going. If the window didn't close completely, just wait 17362 * for an ACK. 17363 * 17364 * We also do a general check here to ensure that we will 17365 * set the persist timer when we have data to send, but a 17366 * 0-byte window. This makes sure the persist timer is set 17367 * even if the packet hits one of the "goto send" lines 17368 * below. 17369 */ 17370 len = 0; 17371 if ((tp->snd_wnd == 0) && 17372 (TCPS_HAVEESTABLISHED(tp->t_state)) && 17373 (tp->snd_una == tp->snd_max) && 17374 (sb_offset < (int)sbavail(sb))) { 17375 rack_enter_persist(tp, rack, cts); 17376 } 17377 } else if ((rsm == NULL) && 17378 (doing_tlp == 0) && 17379 (len < pace_max_seg)) { 17380 /* 17381 * We are not sending a maximum sized segment for 17382 * some reason. Should we not send anything (think 17383 * sws or persists)? 17384 */ 17385 if ((tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), minseg)) && 17386 (TCPS_HAVEESTABLISHED(tp->t_state)) && 17387 (len < minseg) && 17388 (len < (int)(sbavail(sb) - sb_offset))) { 17389 /* 17390 * Here the rwnd is less than 17391 * the minimum pacing size, this is not a retransmit, 17392 * we are established and 17393 * the send is not the last in the socket buffer 17394 * we send nothing, and we may enter persists 17395 * if nothing is outstanding. 17396 */ 17397 len = 0; 17398 if (tp->snd_max == tp->snd_una) { 17399 /* 17400 * Nothing out we can 17401 * go into persists. 17402 */ 17403 rack_enter_persist(tp, rack, cts); 17404 } 17405 } else if ((cwnd_to_use >= max(minseg, (segsiz * 4))) && 17406 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * segsiz)) && 17407 (len < (int)(sbavail(sb) - sb_offset)) && 17408 (len < minseg)) { 17409 /* 17410 * Here we are not retransmitting, and 17411 * the cwnd is not so small that we could 17412 * not send at least a min size (rxt timer 17413 * not having gone off), We have 2 segments or 17414 * more already in flight, its not the tail end 17415 * of the socket buffer and the cwnd is blocking 17416 * us from sending out a minimum pacing segment size. 17417 * Lets not send anything. 17418 */ 17419 len = 0; 17420 } else if (((tp->snd_wnd - ctf_outstanding(tp)) < 17421 min((rack->r_ctl.rc_high_rwnd/2), minseg)) && 17422 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * segsiz)) && 17423 (len < (int)(sbavail(sb) - sb_offset)) && 17424 (TCPS_HAVEESTABLISHED(tp->t_state))) { 17425 /* 17426 * Here we have a send window but we have 17427 * filled it up and we can't send another pacing segment. 17428 * We also have in flight more than 2 segments 17429 * and we are not completing the sb i.e. we allow 17430 * the last bytes of the sb to go out even if 17431 * its not a full pacing segment. 17432 */ 17433 len = 0; 17434 } else if ((rack->r_ctl.crte != NULL) && 17435 (tp->snd_wnd >= (pace_max_seg * max(1, rack_hw_rwnd_factor))) && 17436 (cwnd_to_use >= (pace_max_seg + (4 * segsiz))) && 17437 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) >= (2 * segsiz)) && 17438 (len < (int)(sbavail(sb) - sb_offset))) { 17439 /* 17440 * Here we are doing hardware pacing, this is not a TLP, 17441 * we are not sending a pace max segment size, there is rwnd 17442 * room to send at least N pace_max_seg, the cwnd is greater 17443 * than or equal to a full pacing segments plus 4 mss and we have 2 or 17444 * more segments in flight and its not the tail of the socket buffer. 17445 * 17446 * We don't want to send instead we need to get more ack's in to 17447 * allow us to send a full pacing segment. Normally, if we are pacing 17448 * about the right speed, we should have finished our pacing 17449 * send as most of the acks have come back if we are at the 17450 * right rate. This is a bit fuzzy since return path delay 17451 * can delay the acks, which is why we want to make sure we 17452 * have cwnd space to have a bit more than a max pace segments in flight. 17453 * 17454 * If we have not gotten our acks back we are pacing at too high a 17455 * rate delaying will not hurt and will bring our GP estimate down by 17456 * injecting the delay. If we don't do this we will send 17457 * 2 MSS out in response to the acks being clocked in which 17458 * defeats the point of hw-pacing (i.e. to help us get 17459 * larger TSO's out). 17460 */ 17461 len = 0; 17462 17463 } 17464 17465 } 17466 /* len will be >= 0 after this point. */ 17467 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__)); 17468 rack_sndbuf_autoscale(rack); 17469 /* 17470 * Decide if we can use TCP Segmentation Offloading (if supported by 17471 * hardware). 17472 * 17473 * TSO may only be used if we are in a pure bulk sending state. The 17474 * presence of TCP-MD5, SACK retransmits, SACK advertizements and IP 17475 * options prevent using TSO. With TSO the TCP header is the same 17476 * (except for the sequence number) for all generated packets. This 17477 * makes it impossible to transmit any options which vary per 17478 * generated segment or packet. 17479 * 17480 * IPv4 handling has a clear separation of ip options and ip header 17481 * flags while IPv6 combines both in in6p_outputopts. ip6_optlen() does 17482 * the right thing below to provide length of just ip options and thus 17483 * checking for ipoptlen is enough to decide if ip options are present. 17484 */ 17485 ipoptlen = 0; 17486 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 17487 /* 17488 * Pre-calculate here as we save another lookup into the darknesses 17489 * of IPsec that way and can actually decide if TSO is ok. 17490 */ 17491 #ifdef INET6 17492 if (isipv6 && IPSEC_ENABLED(ipv6)) 17493 ipsec_optlen = IPSEC_HDRSIZE(ipv6, tp->t_inpcb); 17494 #ifdef INET 17495 else 17496 #endif 17497 #endif /* INET6 */ 17498 #ifdef INET 17499 if (IPSEC_ENABLED(ipv4)) 17500 ipsec_optlen = IPSEC_HDRSIZE(ipv4, tp->t_inpcb); 17501 #endif /* INET */ 17502 #endif 17503 17504 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 17505 ipoptlen += ipsec_optlen; 17506 #endif 17507 if ((tp->t_flags & TF_TSO) && V_tcp_do_tso && len > segsiz && 17508 (tp->t_port == 0) && 17509 ((tp->t_flags & TF_SIGNATURE) == 0) && 17510 tp->rcv_numsacks == 0 && sack_rxmit == 0 && 17511 ipoptlen == 0) 17512 tso = 1; 17513 { 17514 uint32_t outstanding; 17515 17516 outstanding = tp->snd_max - tp->snd_una; 17517 if (tp->t_flags & TF_SENTFIN) { 17518 /* 17519 * If we sent a fin, snd_max is 1 higher than 17520 * snd_una 17521 */ 17522 outstanding--; 17523 } 17524 if (sack_rxmit) { 17525 if ((rsm->r_flags & RACK_HAS_FIN) == 0) 17526 flags &= ~TH_FIN; 17527 } else { 17528 if (SEQ_LT(tp->snd_nxt + len, tp->snd_una + 17529 sbused(sb))) 17530 flags &= ~TH_FIN; 17531 } 17532 } 17533 recwin = lmin(lmax(sbspace(&so->so_rcv), 0), 17534 (long)TCP_MAXWIN << tp->rcv_scale); 17535 17536 /* 17537 * Sender silly window avoidance. We transmit under the following 17538 * conditions when len is non-zero: 17539 * 17540 * - We have a full segment (or more with TSO) - This is the last 17541 * buffer in a write()/send() and we are either idle or running 17542 * NODELAY - we've timed out (e.g. persist timer) - we have more 17543 * then 1/2 the maximum send window's worth of data (receiver may be 17544 * limited the window size) - we need to retransmit 17545 */ 17546 if (len) { 17547 if (len >= segsiz) { 17548 goto send; 17549 } 17550 /* 17551 * NOTE! on localhost connections an 'ack' from the remote 17552 * end may occur synchronously with the output and cause us 17553 * to flush a buffer queued with moretocome. XXX 17554 * 17555 */ 17556 if (!(tp->t_flags & TF_MORETOCOME) && /* normal case */ 17557 (idle || (tp->t_flags & TF_NODELAY)) && 17558 ((uint32_t)len + (uint32_t)sb_offset >= sbavail(sb)) && 17559 (tp->t_flags & TF_NOPUSH) == 0) { 17560 pass = 2; 17561 goto send; 17562 } 17563 if ((tp->snd_una == tp->snd_max) && len) { /* Nothing outstanding */ 17564 pass = 22; 17565 goto send; 17566 } 17567 if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0) { 17568 pass = 4; 17569 goto send; 17570 } 17571 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) { /* retransmit case */ 17572 pass = 5; 17573 goto send; 17574 } 17575 if (sack_rxmit) { 17576 pass = 6; 17577 goto send; 17578 } 17579 if (((tp->snd_wnd - ctf_outstanding(tp)) < segsiz) && 17580 (ctf_outstanding(tp) < (segsiz * 2))) { 17581 /* 17582 * We have less than two MSS outstanding (delayed ack) 17583 * and our rwnd will not let us send a full sized 17584 * MSS. Lets go ahead and let this small segment 17585 * out because we want to try to have at least two 17586 * packets inflight to not be caught by delayed ack. 17587 */ 17588 pass = 12; 17589 goto send; 17590 } 17591 } 17592 /* 17593 * Sending of standalone window updates. 17594 * 17595 * Window updates are important when we close our window due to a 17596 * full socket buffer and are opening it again after the application 17597 * reads data from it. Once the window has opened again and the 17598 * remote end starts to send again the ACK clock takes over and 17599 * provides the most current window information. 17600 * 17601 * We must avoid the silly window syndrome whereas every read from 17602 * the receive buffer, no matter how small, causes a window update 17603 * to be sent. We also should avoid sending a flurry of window 17604 * updates when the socket buffer had queued a lot of data and the 17605 * application is doing small reads. 17606 * 17607 * Prevent a flurry of pointless window updates by only sending an 17608 * update when we can increase the advertized window by more than 17609 * 1/4th of the socket buffer capacity. When the buffer is getting 17610 * full or is very small be more aggressive and send an update 17611 * whenever we can increase by two mss sized segments. In all other 17612 * situations the ACK's to new incoming data will carry further 17613 * window increases. 17614 * 17615 * Don't send an independent window update if a delayed ACK is 17616 * pending (it will get piggy-backed on it) or the remote side 17617 * already has done a half-close and won't send more data. Skip 17618 * this if the connection is in T/TCP half-open state. 17619 */ 17620 if (recwin > 0 && !(tp->t_flags & TF_NEEDSYN) && 17621 !(tp->t_flags & TF_DELACK) && 17622 !TCPS_HAVERCVDFIN(tp->t_state)) { 17623 /* 17624 * "adv" is the amount we could increase the window, taking 17625 * into account that we are limited by TCP_MAXWIN << 17626 * tp->rcv_scale. 17627 */ 17628 int32_t adv; 17629 int oldwin; 17630 17631 adv = recwin; 17632 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt)) { 17633 oldwin = (tp->rcv_adv - tp->rcv_nxt); 17634 if (adv > oldwin) 17635 adv -= oldwin; 17636 else { 17637 /* We can't increase the window */ 17638 adv = 0; 17639 } 17640 } else 17641 oldwin = 0; 17642 17643 /* 17644 * If the new window size ends up being the same as or less 17645 * than the old size when it is scaled, then don't force 17646 * a window update. 17647 */ 17648 if (oldwin >> tp->rcv_scale >= (adv + oldwin) >> tp->rcv_scale) 17649 goto dontupdate; 17650 17651 if (adv >= (int32_t)(2 * segsiz) && 17652 (adv >= (int32_t)(so->so_rcv.sb_hiwat / 4) || 17653 recwin <= (int32_t)(so->so_rcv.sb_hiwat / 8) || 17654 so->so_rcv.sb_hiwat <= 8 * segsiz)) { 17655 pass = 7; 17656 goto send; 17657 } 17658 if (2 * adv >= (int32_t) so->so_rcv.sb_hiwat) { 17659 pass = 23; 17660 goto send; 17661 } 17662 } 17663 dontupdate: 17664 17665 /* 17666 * Send if we owe the peer an ACK, RST, SYN, or urgent data. ACKNOW 17667 * is also a catch-all for the retransmit timer timeout case. 17668 */ 17669 if (tp->t_flags & TF_ACKNOW) { 17670 pass = 8; 17671 goto send; 17672 } 17673 if (((flags & TH_SYN) && (tp->t_flags & TF_NEEDSYN) == 0)) { 17674 pass = 9; 17675 goto send; 17676 } 17677 /* 17678 * If our state indicates that FIN should be sent and we have not 17679 * yet done so, then we need to send. 17680 */ 17681 if ((flags & TH_FIN) && 17682 (tp->snd_nxt == tp->snd_una)) { 17683 pass = 11; 17684 goto send; 17685 } 17686 /* 17687 * No reason to send a segment, just return. 17688 */ 17689 just_return: 17690 SOCKBUF_UNLOCK(sb); 17691 just_return_nolock: 17692 { 17693 int app_limited = CTF_JR_SENT_DATA; 17694 17695 if (tot_len_this_send > 0) { 17696 /* Make sure snd_nxt is up to max */ 17697 rack->r_ctl.fsb.recwin = recwin; 17698 slot = rack_get_pacing_delay(rack, tp, tot_len_this_send, NULL, segsiz); 17699 if ((error == 0) && 17700 rack_use_rfo && 17701 ((flags & (TH_SYN|TH_FIN)) == 0) && 17702 (ipoptlen == 0) && 17703 (tp->snd_nxt == tp->snd_max) && 17704 (tp->rcv_numsacks == 0) && 17705 rack->r_fsb_inited && 17706 TCPS_HAVEESTABLISHED(tp->t_state) && 17707 (rack->r_must_retran == 0) && 17708 ((tp->t_flags & TF_NEEDFIN) == 0) && 17709 (len > 0) && (orig_len > 0) && 17710 (orig_len > len) && 17711 ((orig_len - len) >= segsiz) && 17712 ((optlen == 0) || 17713 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) { 17714 /* We can send at least one more MSS using our fsb */ 17715 17716 rack->r_fast_output = 1; 17717 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); 17718 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; 17719 rack->r_ctl.fsb.tcp_flags = flags; 17720 rack->r_ctl.fsb.left_to_send = orig_len - len; 17721 if (hw_tls) 17722 rack->r_ctl.fsb.hw_tls = 1; 17723 else 17724 rack->r_ctl.fsb.hw_tls = 0; 17725 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(sb) - (tp->snd_max - tp->snd_una))), 17726 ("rack:%p left_to_send:%u sbavail:%u out:%u", 17727 rack, rack->r_ctl.fsb.left_to_send, sbavail(sb), 17728 (tp->snd_max - tp->snd_una))); 17729 if (rack->r_ctl.fsb.left_to_send < segsiz) 17730 rack->r_fast_output = 0; 17731 else { 17732 if (rack->r_ctl.fsb.left_to_send == (sbavail(sb) - (tp->snd_max - tp->snd_una))) 17733 rack->r_ctl.fsb.rfo_apply_push = 1; 17734 else 17735 rack->r_ctl.fsb.rfo_apply_push = 0; 17736 } 17737 } else 17738 rack->r_fast_output = 0; 17739 17740 17741 rack_log_fsb(rack, tp, so, flags, 17742 ipoptlen, orig_len, len, 0, 17743 1, optlen, __LINE__, 1); 17744 if (SEQ_GT(tp->snd_max, tp->snd_nxt)) 17745 tp->snd_nxt = tp->snd_max; 17746 } else { 17747 int end_window = 0; 17748 uint32_t seq = tp->gput_ack; 17749 17750 rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 17751 if (rsm) { 17752 /* 17753 * Mark the last sent that we just-returned (hinting 17754 * that delayed ack may play a role in any rtt measurement). 17755 */ 17756 rsm->r_just_ret = 1; 17757 } 17758 counter_u64_add(rack_out_size[TCP_MSS_ACCT_JUSTRET], 1); 17759 rack->r_ctl.rc_agg_delayed = 0; 17760 rack->r_early = 0; 17761 rack->r_late = 0; 17762 rack->r_ctl.rc_agg_early = 0; 17763 if ((ctf_outstanding(tp) + 17764 min(max(segsiz, (rack->r_ctl.rc_high_rwnd/2)), 17765 minseg)) >= tp->snd_wnd) { 17766 /* We are limited by the rwnd */ 17767 app_limited = CTF_JR_RWND_LIMITED; 17768 if (IN_FASTRECOVERY(tp->t_flags)) 17769 rack->r_ctl.rc_prr_sndcnt = 0; 17770 } else if (ctf_outstanding(tp) >= sbavail(sb)) { 17771 /* We are limited by whats available -- app limited */ 17772 app_limited = CTF_JR_APP_LIMITED; 17773 if (IN_FASTRECOVERY(tp->t_flags)) 17774 rack->r_ctl.rc_prr_sndcnt = 0; 17775 } else if ((idle == 0) && 17776 ((tp->t_flags & TF_NODELAY) == 0) && 17777 ((uint32_t)len + (uint32_t)sb_offset >= sbavail(sb)) && 17778 (len < segsiz)) { 17779 /* 17780 * No delay is not on and the 17781 * user is sending less than 1MSS. This 17782 * brings out SWS avoidance so we 17783 * don't send. Another app-limited case. 17784 */ 17785 app_limited = CTF_JR_APP_LIMITED; 17786 } else if (tp->t_flags & TF_NOPUSH) { 17787 /* 17788 * The user has requested no push of 17789 * the last segment and we are 17790 * at the last segment. Another app 17791 * limited case. 17792 */ 17793 app_limited = CTF_JR_APP_LIMITED; 17794 } else if ((ctf_outstanding(tp) + minseg) > cwnd_to_use) { 17795 /* Its the cwnd */ 17796 app_limited = CTF_JR_CWND_LIMITED; 17797 } else if (IN_FASTRECOVERY(tp->t_flags) && 17798 (rack->rack_no_prr == 0) && 17799 (rack->r_ctl.rc_prr_sndcnt < segsiz)) { 17800 app_limited = CTF_JR_PRR; 17801 } else { 17802 /* Now why here are we not sending? */ 17803 #ifdef NOW 17804 #ifdef INVARIANTS 17805 panic("rack:%p hit JR_ASSESSING case cwnd_to_use:%u?", rack, cwnd_to_use); 17806 #endif 17807 #endif 17808 app_limited = CTF_JR_ASSESSING; 17809 } 17810 /* 17811 * App limited in some fashion, for our pacing GP 17812 * measurements we don't want any gap (even cwnd). 17813 * Close down the measurement window. 17814 */ 17815 if (rack_cwnd_block_ends_measure && 17816 ((app_limited == CTF_JR_CWND_LIMITED) || 17817 (app_limited == CTF_JR_PRR))) { 17818 /* 17819 * The reason we are not sending is 17820 * the cwnd (or prr). We have been configured 17821 * to end the measurement window in 17822 * this case. 17823 */ 17824 end_window = 1; 17825 } else if (rack_rwnd_block_ends_measure && 17826 (app_limited == CTF_JR_RWND_LIMITED)) { 17827 /* 17828 * We are rwnd limited and have been 17829 * configured to end the measurement 17830 * window in this case. 17831 */ 17832 end_window = 1; 17833 } else if (app_limited == CTF_JR_APP_LIMITED) { 17834 /* 17835 * A true application limited period, we have 17836 * ran out of data. 17837 */ 17838 end_window = 1; 17839 } else if (app_limited == CTF_JR_ASSESSING) { 17840 /* 17841 * In the assessing case we hit the end of 17842 * the if/else and had no known reason 17843 * This will panic us under invariants.. 17844 * 17845 * If we get this out in logs we need to 17846 * investagate which reason we missed. 17847 */ 17848 end_window = 1; 17849 } 17850 if (end_window) { 17851 uint8_t log = 0; 17852 17853 /* Adjust the Gput measurement */ 17854 if ((tp->t_flags & TF_GPUTINPROG) && 17855 SEQ_GT(tp->gput_ack, tp->snd_max)) { 17856 tp->gput_ack = tp->snd_max; 17857 if ((tp->gput_ack - tp->gput_seq) < (MIN_GP_WIN * segsiz)) { 17858 /* 17859 * There is not enough to measure. 17860 */ 17861 tp->t_flags &= ~TF_GPUTINPROG; 17862 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 17863 rack->r_ctl.rc_gp_srtt /*flex1*/, 17864 tp->gput_seq, 17865 0, 0, 18, __LINE__, NULL, 0); 17866 } else 17867 log = 1; 17868 } 17869 /* Mark the last packet has app limited */ 17870 rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 17871 if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) { 17872 if (rack->r_ctl.rc_app_limited_cnt == 0) 17873 rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm; 17874 else { 17875 /* 17876 * Go out to the end app limited and mark 17877 * this new one as next and move the end_appl up 17878 * to this guy. 17879 */ 17880 if (rack->r_ctl.rc_end_appl) 17881 rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start; 17882 rack->r_ctl.rc_end_appl = rsm; 17883 } 17884 rsm->r_flags |= RACK_APP_LIMITED; 17885 rack->r_ctl.rc_app_limited_cnt++; 17886 } 17887 if (log) 17888 rack_log_pacing_delay_calc(rack, 17889 rack->r_ctl.rc_app_limited_cnt, seq, 17890 tp->gput_ack, 0, 0, 4, __LINE__, NULL, 0); 17891 } 17892 } 17893 if (slot) { 17894 /* set the rack tcb into the slot N */ 17895 counter_u64_add(rack_paced_segments, 1); 17896 } else if (tot_len_this_send) { 17897 counter_u64_add(rack_unpaced_segments, 1); 17898 } 17899 /* Check if we need to go into persists or not */ 17900 if ((tp->snd_max == tp->snd_una) && 17901 TCPS_HAVEESTABLISHED(tp->t_state) && 17902 sbavail(sb) && 17903 (sbavail(sb) > tp->snd_wnd) && 17904 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), minseg))) { 17905 /* Yes lets make sure to move to persist before timer-start */ 17906 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime); 17907 } 17908 rack_start_hpts_timer(rack, tp, cts, slot, tot_len_this_send, sup_rack); 17909 rack_log_type_just_return(rack, cts, tot_len_this_send, slot, hpts_calling, app_limited, cwnd_to_use); 17910 } 17911 #ifdef NETFLIX_SHARED_CWND 17912 if ((sbavail(sb) == 0) && 17913 rack->r_ctl.rc_scw) { 17914 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 17915 rack->rack_scwnd_is_idle = 1; 17916 } 17917 #endif 17918 #ifdef TCP_ACCOUNTING 17919 if (tot_len_this_send > 0) { 17920 crtsc = get_cyclecount(); 17921 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17922 tp->tcp_cnt_counters[SND_OUT_DATA]++; 17923 } 17924 counter_u64_add(tcp_cnt_counters[SND_OUT_DATA], 1); 17925 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17926 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); 17927 } 17928 counter_u64_add(tcp_proc_time[SND_OUT_DATA], (crtsc - ts_val)); 17929 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17930 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len_this_send + segsiz - 1) / segsiz); 17931 } 17932 counter_u64_add(tcp_cnt_counters[CNT_OF_MSS_OUT], ((tot_len_this_send + segsiz - 1) / segsiz)); 17933 } else { 17934 crtsc = get_cyclecount(); 17935 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17936 tp->tcp_cnt_counters[SND_LIMITED]++; 17937 } 17938 counter_u64_add(tcp_cnt_counters[SND_LIMITED], 1); 17939 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17940 tp->tcp_proc_time[SND_LIMITED] += (crtsc - ts_val); 17941 } 17942 counter_u64_add(tcp_proc_time[SND_LIMITED], (crtsc - ts_val)); 17943 } 17944 sched_unpin(); 17945 #endif 17946 return (0); 17947 17948 send: 17949 if (rsm || sack_rxmit) 17950 counter_u64_add(rack_nfto_resend, 1); 17951 else 17952 counter_u64_add(rack_non_fto_send, 1); 17953 if ((flags & TH_FIN) && 17954 sbavail(sb)) { 17955 /* 17956 * We do not transmit a FIN 17957 * with data outstanding. We 17958 * need to make it so all data 17959 * is acked first. 17960 */ 17961 flags &= ~TH_FIN; 17962 } 17963 /* Enforce stack imposed max seg size if we have one */ 17964 if (rack->r_ctl.rc_pace_max_segs && 17965 (len > rack->r_ctl.rc_pace_max_segs)) { 17966 mark = 1; 17967 len = rack->r_ctl.rc_pace_max_segs; 17968 } 17969 SOCKBUF_LOCK_ASSERT(sb); 17970 if (len > 0) { 17971 if (len >= segsiz) 17972 tp->t_flags2 |= TF2_PLPMTU_MAXSEGSNT; 17973 else 17974 tp->t_flags2 &= ~TF2_PLPMTU_MAXSEGSNT; 17975 } 17976 /* 17977 * Before ESTABLISHED, force sending of initial options unless TCP 17978 * set not to do any options. NOTE: we assume that the IP/TCP header 17979 * plus TCP options always fit in a single mbuf, leaving room for a 17980 * maximum link header, i.e. max_linkhdr + sizeof (struct tcpiphdr) 17981 * + optlen <= MCLBYTES 17982 */ 17983 optlen = 0; 17984 #ifdef INET6 17985 if (isipv6) 17986 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 17987 else 17988 #endif 17989 hdrlen = sizeof(struct tcpiphdr); 17990 17991 /* 17992 * Compute options for segment. We only have to care about SYN and 17993 * established connection segments. Options for SYN-ACK segments 17994 * are handled in TCP syncache. 17995 */ 17996 to.to_flags = 0; 17997 if ((tp->t_flags & TF_NOOPT) == 0) { 17998 /* Maximum segment size. */ 17999 if (flags & TH_SYN) { 18000 tp->snd_nxt = tp->iss; 18001 to.to_mss = tcp_mssopt(&inp->inp_inc); 18002 if (tp->t_port) 18003 to.to_mss -= V_tcp_udp_tunneling_overhead; 18004 to.to_flags |= TOF_MSS; 18005 18006 /* 18007 * On SYN or SYN|ACK transmits on TFO connections, 18008 * only include the TFO option if it is not a 18009 * retransmit, as the presence of the TFO option may 18010 * have caused the original SYN or SYN|ACK to have 18011 * been dropped by a middlebox. 18012 */ 18013 if (IS_FASTOPEN(tp->t_flags) && 18014 (tp->t_rxtshift == 0)) { 18015 if (tp->t_state == TCPS_SYN_RECEIVED) { 18016 to.to_tfo_len = TCP_FASTOPEN_COOKIE_LEN; 18017 to.to_tfo_cookie = 18018 (u_int8_t *)&tp->t_tfo_cookie.server; 18019 to.to_flags |= TOF_FASTOPEN; 18020 wanted_cookie = 1; 18021 } else if (tp->t_state == TCPS_SYN_SENT) { 18022 to.to_tfo_len = 18023 tp->t_tfo_client_cookie_len; 18024 to.to_tfo_cookie = 18025 tp->t_tfo_cookie.client; 18026 to.to_flags |= TOF_FASTOPEN; 18027 wanted_cookie = 1; 18028 /* 18029 * If we wind up having more data to 18030 * send with the SYN than can fit in 18031 * one segment, don't send any more 18032 * until the SYN|ACK comes back from 18033 * the other end. 18034 */ 18035 sendalot = 0; 18036 } 18037 } 18038 } 18039 /* Window scaling. */ 18040 if ((flags & TH_SYN) && (tp->t_flags & TF_REQ_SCALE)) { 18041 to.to_wscale = tp->request_r_scale; 18042 to.to_flags |= TOF_SCALE; 18043 } 18044 /* Timestamps. */ 18045 if ((tp->t_flags & TF_RCVD_TSTMP) || 18046 ((flags & TH_SYN) && (tp->t_flags & TF_REQ_TSTMP))) { 18047 to.to_tsval = ms_cts + tp->ts_offset; 18048 to.to_tsecr = tp->ts_recent; 18049 to.to_flags |= TOF_TS; 18050 } 18051 /* Set receive buffer autosizing timestamp. */ 18052 if (tp->rfbuf_ts == 0 && 18053 (so->so_rcv.sb_flags & SB_AUTOSIZE)) 18054 tp->rfbuf_ts = tcp_ts_getticks(); 18055 /* Selective ACK's. */ 18056 if (tp->t_flags & TF_SACK_PERMIT) { 18057 if (flags & TH_SYN) 18058 to.to_flags |= TOF_SACKPERM; 18059 else if (TCPS_HAVEESTABLISHED(tp->t_state) && 18060 tp->rcv_numsacks > 0) { 18061 to.to_flags |= TOF_SACK; 18062 to.to_nsacks = tp->rcv_numsacks; 18063 to.to_sacks = (u_char *)tp->sackblks; 18064 } 18065 } 18066 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 18067 /* TCP-MD5 (RFC2385). */ 18068 if (tp->t_flags & TF_SIGNATURE) 18069 to.to_flags |= TOF_SIGNATURE; 18070 #endif /* TCP_SIGNATURE */ 18071 18072 /* Processing the options. */ 18073 hdrlen += optlen = tcp_addoptions(&to, opt); 18074 /* 18075 * If we wanted a TFO option to be added, but it was unable 18076 * to fit, ensure no data is sent. 18077 */ 18078 if (IS_FASTOPEN(tp->t_flags) && wanted_cookie && 18079 !(to.to_flags & TOF_FASTOPEN)) 18080 len = 0; 18081 } 18082 if (tp->t_port) { 18083 if (V_tcp_udp_tunneling_port == 0) { 18084 /* The port was removed?? */ 18085 SOCKBUF_UNLOCK(&so->so_snd); 18086 #ifdef TCP_ACCOUNTING 18087 crtsc = get_cyclecount(); 18088 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 18089 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 18090 } 18091 counter_u64_add(tcp_cnt_counters[SND_OUT_FAIL], 1); 18092 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 18093 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 18094 } 18095 counter_u64_add(tcp_proc_time[SND_OUT_FAIL], (crtsc - ts_val)); 18096 sched_unpin(); 18097 #endif 18098 return (EHOSTUNREACH); 18099 } 18100 hdrlen += sizeof(struct udphdr); 18101 } 18102 #ifdef INET6 18103 if (isipv6) 18104 ipoptlen = ip6_optlen(tp->t_inpcb); 18105 else 18106 #endif 18107 if (tp->t_inpcb->inp_options) 18108 ipoptlen = tp->t_inpcb->inp_options->m_len - 18109 offsetof(struct ipoption, ipopt_list); 18110 else 18111 ipoptlen = 0; 18112 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 18113 ipoptlen += ipsec_optlen; 18114 #endif 18115 18116 /* 18117 * Adjust data length if insertion of options will bump the packet 18118 * length beyond the t_maxseg length. Clear the FIN bit because we 18119 * cut off the tail of the segment. 18120 */ 18121 if (len + optlen + ipoptlen > tp->t_maxseg) { 18122 if (tso) { 18123 uint32_t if_hw_tsomax; 18124 uint32_t moff; 18125 int32_t max_len; 18126 18127 /* extract TSO information */ 18128 if_hw_tsomax = tp->t_tsomax; 18129 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; 18130 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; 18131 KASSERT(ipoptlen == 0, 18132 ("%s: TSO can't do IP options", __func__)); 18133 18134 /* 18135 * Check if we should limit by maximum payload 18136 * length: 18137 */ 18138 if (if_hw_tsomax != 0) { 18139 /* compute maximum TSO length */ 18140 max_len = (if_hw_tsomax - hdrlen - 18141 max_linkhdr); 18142 if (max_len <= 0) { 18143 len = 0; 18144 } else if (len > max_len) { 18145 sendalot = 1; 18146 len = max_len; 18147 mark = 2; 18148 } 18149 } 18150 /* 18151 * Prevent the last segment from being fractional 18152 * unless the send sockbuf can be emptied: 18153 */ 18154 max_len = (tp->t_maxseg - optlen); 18155 if ((sb_offset + len) < sbavail(sb)) { 18156 moff = len % (u_int)max_len; 18157 if (moff != 0) { 18158 mark = 3; 18159 len -= moff; 18160 } 18161 } 18162 /* 18163 * In case there are too many small fragments don't 18164 * use TSO: 18165 */ 18166 if (len <= segsiz) { 18167 mark = 4; 18168 tso = 0; 18169 } 18170 /* 18171 * Send the FIN in a separate segment after the bulk 18172 * sending is done. We don't trust the TSO 18173 * implementations to clear the FIN flag on all but 18174 * the last segment. 18175 */ 18176 if (tp->t_flags & TF_NEEDFIN) { 18177 sendalot = 4; 18178 } 18179 } else { 18180 mark = 5; 18181 if (optlen + ipoptlen >= tp->t_maxseg) { 18182 /* 18183 * Since we don't have enough space to put 18184 * the IP header chain and the TCP header in 18185 * one packet as required by RFC 7112, don't 18186 * send it. Also ensure that at least one 18187 * byte of the payload can be put into the 18188 * TCP segment. 18189 */ 18190 SOCKBUF_UNLOCK(&so->so_snd); 18191 error = EMSGSIZE; 18192 sack_rxmit = 0; 18193 goto out; 18194 } 18195 len = tp->t_maxseg - optlen - ipoptlen; 18196 sendalot = 5; 18197 } 18198 } else { 18199 tso = 0; 18200 mark = 6; 18201 } 18202 KASSERT(len + hdrlen + ipoptlen <= IP_MAXPACKET, 18203 ("%s: len > IP_MAXPACKET", __func__)); 18204 #ifdef DIAGNOSTIC 18205 #ifdef INET6 18206 if (max_linkhdr + hdrlen > MCLBYTES) 18207 #else 18208 if (max_linkhdr + hdrlen > MHLEN) 18209 #endif 18210 panic("tcphdr too big"); 18211 #endif 18212 18213 /* 18214 * This KASSERT is here to catch edge cases at a well defined place. 18215 * Before, those had triggered (random) panic conditions further 18216 * down. 18217 */ 18218 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__)); 18219 if ((len == 0) && 18220 (flags & TH_FIN) && 18221 (sbused(sb))) { 18222 /* 18223 * We have outstanding data, don't send a fin by itself!. 18224 */ 18225 goto just_return; 18226 } 18227 /* 18228 * Grab a header mbuf, attaching a copy of data to be transmitted, 18229 * and initialize the header from the template for sends on this 18230 * connection. 18231 */ 18232 hw_tls = (sb->sb_flags & SB_TLS_IFNET) != 0; 18233 if (len) { 18234 uint32_t max_val; 18235 uint32_t moff; 18236 18237 if (rack->r_ctl.rc_pace_max_segs) 18238 max_val = rack->r_ctl.rc_pace_max_segs; 18239 else if (rack->rc_user_set_max_segs) 18240 max_val = rack->rc_user_set_max_segs * segsiz; 18241 else 18242 max_val = len; 18243 /* 18244 * We allow a limit on sending with hptsi. 18245 */ 18246 if (len > max_val) { 18247 mark = 7; 18248 len = max_val; 18249 } 18250 #ifdef INET6 18251 if (MHLEN < hdrlen + max_linkhdr) 18252 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 18253 else 18254 #endif 18255 m = m_gethdr(M_NOWAIT, MT_DATA); 18256 18257 if (m == NULL) { 18258 SOCKBUF_UNLOCK(sb); 18259 error = ENOBUFS; 18260 sack_rxmit = 0; 18261 goto out; 18262 } 18263 m->m_data += max_linkhdr; 18264 m->m_len = hdrlen; 18265 18266 /* 18267 * Start the m_copy functions from the closest mbuf to the 18268 * sb_offset in the socket buffer chain. 18269 */ 18270 mb = sbsndptr_noadv(sb, sb_offset, &moff); 18271 s_mb = mb; 18272 s_moff = moff; 18273 if (len <= MHLEN - hdrlen - max_linkhdr && !hw_tls) { 18274 m_copydata(mb, moff, (int)len, 18275 mtod(m, caddr_t)+hdrlen); 18276 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) 18277 sbsndptr_adv(sb, mb, len); 18278 m->m_len += len; 18279 } else { 18280 struct sockbuf *msb; 18281 18282 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) 18283 msb = NULL; 18284 else 18285 msb = sb; 18286 m->m_next = tcp_m_copym( 18287 mb, moff, &len, 18288 if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, msb, 18289 ((rsm == NULL) ? hw_tls : 0) 18290 #ifdef NETFLIX_COPY_ARGS 18291 , &filled_all 18292 #endif 18293 ); 18294 if (len <= (tp->t_maxseg - optlen)) { 18295 /* 18296 * Must have ran out of mbufs for the copy 18297 * shorten it to no longer need tso. Lets 18298 * not put on sendalot since we are low on 18299 * mbufs. 18300 */ 18301 tso = 0; 18302 } 18303 if (m->m_next == NULL) { 18304 SOCKBUF_UNLOCK(sb); 18305 (void)m_free(m); 18306 error = ENOBUFS; 18307 sack_rxmit = 0; 18308 goto out; 18309 } 18310 } 18311 if (SEQ_LT(tp->snd_nxt, tp->snd_max) || sack_rxmit) { 18312 if (rsm && (rsm->r_flags & RACK_TLP)) { 18313 /* 18314 * TLP should not count in retran count, but 18315 * in its own bin 18316 */ 18317 counter_u64_add(rack_tlp_retran, 1); 18318 counter_u64_add(rack_tlp_retran_bytes, len); 18319 } else { 18320 tp->t_sndrexmitpack++; 18321 KMOD_TCPSTAT_INC(tcps_sndrexmitpack); 18322 KMOD_TCPSTAT_ADD(tcps_sndrexmitbyte, len); 18323 } 18324 #ifdef STATS 18325 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB, 18326 len); 18327 #endif 18328 } else { 18329 KMOD_TCPSTAT_INC(tcps_sndpack); 18330 KMOD_TCPSTAT_ADD(tcps_sndbyte, len); 18331 #ifdef STATS 18332 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB, 18333 len); 18334 #endif 18335 } 18336 /* 18337 * If we're sending everything we've got, set PUSH. (This 18338 * will keep happy those implementations which only give 18339 * data to the user when a buffer fills or a PUSH comes in.) 18340 */ 18341 if (sb_offset + len == sbused(sb) && 18342 sbused(sb) && 18343 !(flags & TH_SYN)) { 18344 flags |= TH_PUSH; 18345 add_flag |= RACK_HAD_PUSH; 18346 } 18347 18348 SOCKBUF_UNLOCK(sb); 18349 } else { 18350 SOCKBUF_UNLOCK(sb); 18351 if (tp->t_flags & TF_ACKNOW) 18352 KMOD_TCPSTAT_INC(tcps_sndacks); 18353 else if (flags & (TH_SYN | TH_FIN | TH_RST)) 18354 KMOD_TCPSTAT_INC(tcps_sndctrl); 18355 else 18356 KMOD_TCPSTAT_INC(tcps_sndwinup); 18357 18358 m = m_gethdr(M_NOWAIT, MT_DATA); 18359 if (m == NULL) { 18360 error = ENOBUFS; 18361 sack_rxmit = 0; 18362 goto out; 18363 } 18364 #ifdef INET6 18365 if (isipv6 && (MHLEN < hdrlen + max_linkhdr) && 18366 MHLEN >= hdrlen) { 18367 M_ALIGN(m, hdrlen); 18368 } else 18369 #endif 18370 m->m_data += max_linkhdr; 18371 m->m_len = hdrlen; 18372 } 18373 SOCKBUF_UNLOCK_ASSERT(sb); 18374 m->m_pkthdr.rcvif = (struct ifnet *)0; 18375 #ifdef MAC 18376 mac_inpcb_create_mbuf(inp, m); 18377 #endif 18378 if ((ipoptlen == 0) && (rack->r_ctl.fsb.tcp_ip_hdr) && rack->r_fsb_inited) { 18379 #ifdef INET6 18380 if (isipv6) 18381 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 18382 else 18383 #endif /* INET6 */ 18384 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 18385 th = rack->r_ctl.fsb.th; 18386 udp = rack->r_ctl.fsb.udp; 18387 if (udp) { 18388 #ifdef INET6 18389 if (isipv6) 18390 ulen = hdrlen + len - sizeof(struct ip6_hdr); 18391 else 18392 #endif /* INET6 */ 18393 ulen = hdrlen + len - sizeof(struct ip); 18394 udp->uh_ulen = htons(ulen); 18395 } 18396 } else { 18397 #ifdef INET6 18398 if (isipv6) { 18399 ip6 = mtod(m, struct ip6_hdr *); 18400 if (tp->t_port) { 18401 udp = (struct udphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr)); 18402 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 18403 udp->uh_dport = tp->t_port; 18404 ulen = hdrlen + len - sizeof(struct ip6_hdr); 18405 udp->uh_ulen = htons(ulen); 18406 th = (struct tcphdr *)(udp + 1); 18407 } else 18408 th = (struct tcphdr *)(ip6 + 1); 18409 tcpip_fillheaders(inp, tp->t_port, ip6, th); 18410 } else 18411 #endif /* INET6 */ 18412 { 18413 ip = mtod(m, struct ip *); 18414 #ifdef TCPDEBUG 18415 ipov = (struct ipovly *)ip; 18416 #endif 18417 if (tp->t_port) { 18418 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip)); 18419 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 18420 udp->uh_dport = tp->t_port; 18421 ulen = hdrlen + len - sizeof(struct ip); 18422 udp->uh_ulen = htons(ulen); 18423 th = (struct tcphdr *)(udp + 1); 18424 } else 18425 th = (struct tcphdr *)(ip + 1); 18426 tcpip_fillheaders(inp, tp->t_port, ip, th); 18427 } 18428 } 18429 /* 18430 * Fill in fields, remembering maximum advertised window for use in 18431 * delaying messages about window sizes. If resending a FIN, be sure 18432 * not to use a new sequence number. 18433 */ 18434 if (flags & TH_FIN && tp->t_flags & TF_SENTFIN && 18435 tp->snd_nxt == tp->snd_max) 18436 tp->snd_nxt--; 18437 /* 18438 * If we are starting a connection, send ECN setup SYN packet. If we 18439 * are on a retransmit, we may resend those bits a number of times 18440 * as per RFC 3168. 18441 */ 18442 if (tp->t_state == TCPS_SYN_SENT && V_tcp_do_ecn == 1) { 18443 if (tp->t_rxtshift >= 1) { 18444 if (tp->t_rxtshift <= V_tcp_ecn_maxretries) 18445 flags |= TH_ECE | TH_CWR; 18446 } else 18447 flags |= TH_ECE | TH_CWR; 18448 } 18449 /* Handle parallel SYN for ECN */ 18450 if ((tp->t_state == TCPS_SYN_RECEIVED) && 18451 (tp->t_flags2 & TF2_ECN_SND_ECE)) { 18452 flags |= TH_ECE; 18453 tp->t_flags2 &= ~TF2_ECN_SND_ECE; 18454 } 18455 if (TCPS_HAVEESTABLISHED(tp->t_state) && 18456 (tp->t_flags2 & TF2_ECN_PERMIT)) { 18457 /* 18458 * If the peer has ECN, mark data packets with ECN capable 18459 * transmission (ECT). Ignore pure ack packets, 18460 * retransmissions. 18461 */ 18462 if (len > 0 && SEQ_GEQ(tp->snd_nxt, tp->snd_max) && 18463 (sack_rxmit == 0)) { 18464 #ifdef INET6 18465 if (isipv6) 18466 ip6->ip6_flow |= htonl(IPTOS_ECN_ECT0 << 20); 18467 else 18468 #endif 18469 ip->ip_tos |= IPTOS_ECN_ECT0; 18470 KMOD_TCPSTAT_INC(tcps_ecn_ect0); 18471 /* 18472 * Reply with proper ECN notifications. 18473 * Only set CWR on new data segments. 18474 */ 18475 if (tp->t_flags2 & TF2_ECN_SND_CWR) { 18476 flags |= TH_CWR; 18477 tp->t_flags2 &= ~TF2_ECN_SND_CWR; 18478 } 18479 } 18480 if (tp->t_flags2 & TF2_ECN_SND_ECE) 18481 flags |= TH_ECE; 18482 } 18483 /* 18484 * If we are doing retransmissions, then snd_nxt will not reflect 18485 * the first unsent octet. For ACK only packets, we do not want the 18486 * sequence number of the retransmitted packet, we want the sequence 18487 * number of the next unsent octet. So, if there is no data (and no 18488 * SYN or FIN), use snd_max instead of snd_nxt when filling in 18489 * ti_seq. But if we are in persist state, snd_max might reflect 18490 * one byte beyond the right edge of the window, so use snd_nxt in 18491 * that case, since we know we aren't doing a retransmission. 18492 * (retransmit and persist are mutually exclusive...) 18493 */ 18494 if (sack_rxmit == 0) { 18495 if (len || (flags & (TH_SYN | TH_FIN))) { 18496 th->th_seq = htonl(tp->snd_nxt); 18497 rack_seq = tp->snd_nxt; 18498 } else { 18499 th->th_seq = htonl(tp->snd_max); 18500 rack_seq = tp->snd_max; 18501 } 18502 } else { 18503 th->th_seq = htonl(rsm->r_start); 18504 rack_seq = rsm->r_start; 18505 } 18506 th->th_ack = htonl(tp->rcv_nxt); 18507 th->th_flags = flags; 18508 /* 18509 * Calculate receive window. Don't shrink window, but avoid silly 18510 * window syndrome. 18511 * If a RST segment is sent, advertise a window of zero. 18512 */ 18513 if (flags & TH_RST) { 18514 recwin = 0; 18515 } else { 18516 if (recwin < (long)(so->so_rcv.sb_hiwat / 4) && 18517 recwin < (long)segsiz) { 18518 recwin = 0; 18519 } 18520 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt) && 18521 recwin < (long)(tp->rcv_adv - tp->rcv_nxt)) 18522 recwin = (long)(tp->rcv_adv - tp->rcv_nxt); 18523 } 18524 18525 /* 18526 * According to RFC1323 the window field in a SYN (i.e., a <SYN> or 18527 * <SYN,ACK>) segment itself is never scaled. The <SYN,ACK> case is 18528 * handled in syncache. 18529 */ 18530 if (flags & TH_SYN) 18531 th->th_win = htons((u_short) 18532 (min(sbspace(&so->so_rcv), TCP_MAXWIN))); 18533 else { 18534 /* Avoid shrinking window with window scaling. */ 18535 recwin = roundup2(recwin, 1 << tp->rcv_scale); 18536 th->th_win = htons((u_short)(recwin >> tp->rcv_scale)); 18537 } 18538 /* 18539 * Adjust the RXWIN0SENT flag - indicate that we have advertised a 0 18540 * window. This may cause the remote transmitter to stall. This 18541 * flag tells soreceive() to disable delayed acknowledgements when 18542 * draining the buffer. This can occur if the receiver is 18543 * attempting to read more data than can be buffered prior to 18544 * transmitting on the connection. 18545 */ 18546 if (th->th_win == 0) { 18547 tp->t_sndzerowin++; 18548 tp->t_flags |= TF_RXWIN0SENT; 18549 } else 18550 tp->t_flags &= ~TF_RXWIN0SENT; 18551 tp->snd_up = tp->snd_una; /* drag it along, its deprecated */ 18552 /* Now are we using fsb?, if so copy the template data to the mbuf */ 18553 if ((ipoptlen == 0) && (rack->r_ctl.fsb.tcp_ip_hdr) && rack->r_fsb_inited) { 18554 uint8_t *cpto; 18555 18556 cpto = mtod(m, uint8_t *); 18557 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); 18558 /* 18559 * We have just copied in: 18560 * IP/IP6 18561 * <optional udphdr> 18562 * tcphdr (no options) 18563 * 18564 * We need to grab the correct pointers into the mbuf 18565 * for both the tcp header, and possibly the udp header (if tunneling). 18566 * We do this by using the offset in the copy buffer and adding it 18567 * to the mbuf base pointer (cpto). 18568 */ 18569 #ifdef INET6 18570 if (isipv6) 18571 ip6 = mtod(m, struct ip6_hdr *); 18572 else 18573 #endif /* INET6 */ 18574 ip = mtod(m, struct ip *); 18575 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); 18576 /* If we have a udp header lets set it into the mbuf as well */ 18577 if (udp) 18578 udp = (struct udphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.udp - rack->r_ctl.fsb.tcp_ip_hdr)); 18579 } 18580 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 18581 if (to.to_flags & TOF_SIGNATURE) { 18582 /* 18583 * Calculate MD5 signature and put it into the place 18584 * determined before. 18585 * NOTE: since TCP options buffer doesn't point into 18586 * mbuf's data, calculate offset and use it. 18587 */ 18588 if (!TCPMD5_ENABLED() || TCPMD5_OUTPUT(m, th, 18589 (u_char *)(th + 1) + (to.to_signature - opt)) != 0) { 18590 /* 18591 * Do not send segment if the calculation of MD5 18592 * digest has failed. 18593 */ 18594 goto out; 18595 } 18596 } 18597 #endif 18598 if (optlen) { 18599 bcopy(opt, th + 1, optlen); 18600 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 18601 } 18602 /* 18603 * Put TCP length in extended header, and then checksum extended 18604 * header and data. 18605 */ 18606 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ 18607 #ifdef INET6 18608 if (isipv6) { 18609 /* 18610 * ip6_plen is not need to be filled now, and will be filled 18611 * in ip6_output. 18612 */ 18613 if (tp->t_port) { 18614 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; 18615 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 18616 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); 18617 th->th_sum = htons(0); 18618 UDPSTAT_INC(udps_opackets); 18619 } else { 18620 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 18621 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 18622 th->th_sum = in6_cksum_pseudo(ip6, 18623 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP, 18624 0); 18625 } 18626 } 18627 #endif 18628 #if defined(INET6) && defined(INET) 18629 else 18630 #endif 18631 #ifdef INET 18632 { 18633 if (tp->t_port) { 18634 m->m_pkthdr.csum_flags = CSUM_UDP; 18635 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 18636 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, 18637 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); 18638 th->th_sum = htons(0); 18639 UDPSTAT_INC(udps_opackets); 18640 } else { 18641 m->m_pkthdr.csum_flags = CSUM_TCP; 18642 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 18643 th->th_sum = in_pseudo(ip->ip_src.s_addr, 18644 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + 18645 IPPROTO_TCP + len + optlen)); 18646 } 18647 /* IP version must be set here for ipv4/ipv6 checking later */ 18648 KASSERT(ip->ip_v == IPVERSION, 18649 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); 18650 } 18651 #endif 18652 /* 18653 * Enable TSO and specify the size of the segments. The TCP pseudo 18654 * header checksum is always provided. XXX: Fixme: This is currently 18655 * not the case for IPv6. 18656 */ 18657 if (tso) { 18658 KASSERT(len > tp->t_maxseg - optlen, 18659 ("%s: len <= tso_segsz", __func__)); 18660 m->m_pkthdr.csum_flags |= CSUM_TSO; 18661 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen; 18662 } 18663 KASSERT(len + hdrlen == m_length(m, NULL), 18664 ("%s: mbuf chain different than expected: %d + %u != %u", 18665 __func__, len, hdrlen, m_length(m, NULL))); 18666 18667 #ifdef TCP_HHOOK 18668 /* Run HHOOK_TCP_ESTABLISHED_OUT helper hooks. */ 18669 hhook_run_tcp_est_out(tp, th, &to, len, tso); 18670 #endif 18671 /* We're getting ready to send; log now. */ 18672 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 18673 union tcp_log_stackspecific log; 18674 18675 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 18676 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 18677 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 18678 if (rack->rack_no_prr) 18679 log.u_bbr.flex1 = 0; 18680 else 18681 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 18682 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; 18683 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 18684 log.u_bbr.flex4 = orig_len; 18685 if (filled_all) 18686 log.u_bbr.flex5 = 0x80000000; 18687 else 18688 log.u_bbr.flex5 = 0; 18689 /* Save off the early/late values */ 18690 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 18691 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; 18692 log.u_bbr.bw_inuse = rack_get_bw(rack); 18693 if (rsm || sack_rxmit) { 18694 if (doing_tlp) 18695 log.u_bbr.flex8 = 2; 18696 else 18697 log.u_bbr.flex8 = 1; 18698 } else { 18699 if (doing_tlp) 18700 log.u_bbr.flex8 = 3; 18701 else 18702 log.u_bbr.flex8 = 0; 18703 } 18704 log.u_bbr.pacing_gain = rack_get_output_gain(rack, rsm); 18705 log.u_bbr.flex7 = mark; 18706 log.u_bbr.flex7 <<= 8; 18707 log.u_bbr.flex7 |= pass; 18708 log.u_bbr.pkts_out = tp->t_maxseg; 18709 log.u_bbr.timeStamp = cts; 18710 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 18711 log.u_bbr.lt_epoch = cwnd_to_use; 18712 log.u_bbr.delivered = sendalot; 18713 lgb = tcp_log_event_(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_OUT, ERRNO_UNK, 18714 len, &log, false, NULL, NULL, 0, &tv); 18715 } else 18716 lgb = NULL; 18717 18718 /* 18719 * Fill in IP length and desired time to live and send to IP level. 18720 * There should be a better way to handle ttl and tos; we could keep 18721 * them in the template, but need a way to checksum without them. 18722 */ 18723 /* 18724 * m->m_pkthdr.len should have been set before cksum calcuration, 18725 * because in6_cksum() need it. 18726 */ 18727 #ifdef INET6 18728 if (isipv6) { 18729 /* 18730 * we separately set hoplimit for every segment, since the 18731 * user might want to change the value via setsockopt. Also, 18732 * desired default hop limit might be changed via Neighbor 18733 * Discovery. 18734 */ 18735 rack->r_ctl.fsb.hoplimit = ip6->ip6_hlim = in6_selecthlim(inp, NULL); 18736 18737 /* 18738 * Set the packet size here for the benefit of DTrace 18739 * probes. ip6_output() will set it properly; it's supposed 18740 * to include the option header lengths as well. 18741 */ 18742 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); 18743 18744 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) 18745 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 18746 else 18747 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 18748 18749 if (tp->t_state == TCPS_SYN_SENT) 18750 TCP_PROBE5(connect__request, NULL, tp, ip6, tp, th); 18751 18752 TCP_PROBE5(send, NULL, tp, ip6, tp, th); 18753 /* TODO: IPv6 IP6TOS_ECT bit on */ 18754 error = ip6_output(m, 18755 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 18756 inp->in6p_outputopts, 18757 #else 18758 NULL, 18759 #endif 18760 &inp->inp_route6, 18761 ((rsm || sack_rxmit) ? IP_NO_SND_TAG_RL : 0), 18762 NULL, NULL, inp); 18763 18764 if (error == EMSGSIZE && inp->inp_route6.ro_nh != NULL) 18765 mtu = inp->inp_route6.ro_nh->nh_mtu; 18766 } 18767 #endif /* INET6 */ 18768 #if defined(INET) && defined(INET6) 18769 else 18770 #endif 18771 #ifdef INET 18772 { 18773 ip->ip_len = htons(m->m_pkthdr.len); 18774 #ifdef INET6 18775 if (inp->inp_vflag & INP_IPV6PROTO) 18776 ip->ip_ttl = in6_selecthlim(inp, NULL); 18777 #endif /* INET6 */ 18778 rack->r_ctl.fsb.hoplimit = ip->ip_ttl; 18779 /* 18780 * If we do path MTU discovery, then we set DF on every 18781 * packet. This might not be the best thing to do according 18782 * to RFC3390 Section 2. However the tcp hostcache migitates 18783 * the problem so it affects only the first tcp connection 18784 * with a host. 18785 * 18786 * NB: Don't set DF on small MTU/MSS to have a safe 18787 * fallback. 18788 */ 18789 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { 18790 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 18791 if (tp->t_port == 0 || len < V_tcp_minmss) { 18792 ip->ip_off |= htons(IP_DF); 18793 } 18794 } else { 18795 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 18796 } 18797 18798 if (tp->t_state == TCPS_SYN_SENT) 18799 TCP_PROBE5(connect__request, NULL, tp, ip, tp, th); 18800 18801 TCP_PROBE5(send, NULL, tp, ip, tp, th); 18802 18803 error = ip_output(m, 18804 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 18805 inp->inp_options, 18806 #else 18807 NULL, 18808 #endif 18809 &inp->inp_route, 18810 ((rsm || sack_rxmit) ? IP_NO_SND_TAG_RL : 0), 0, 18811 inp); 18812 if (error == EMSGSIZE && inp->inp_route.ro_nh != NULL) 18813 mtu = inp->inp_route.ro_nh->nh_mtu; 18814 } 18815 #endif /* INET */ 18816 18817 out: 18818 if (lgb) { 18819 lgb->tlb_errno = error; 18820 lgb = NULL; 18821 } 18822 /* 18823 * In transmit state, time the transmission and arrange for the 18824 * retransmit. In persist state, just set snd_max. 18825 */ 18826 if (error == 0) { 18827 tcp_account_for_send(tp, len, (rsm != NULL), doing_tlp, hw_tls); 18828 if (rsm && doing_tlp) { 18829 rack->rc_last_sent_tlp_past_cumack = 0; 18830 rack->rc_last_sent_tlp_seq_valid = 1; 18831 rack->r_ctl.last_sent_tlp_seq = rsm->r_start; 18832 rack->r_ctl.last_sent_tlp_len = rsm->r_end - rsm->r_start; 18833 } 18834 rack->forced_ack = 0; /* If we send something zap the FA flag */ 18835 if (rsm && (doing_tlp == 0)) { 18836 /* Set we retransmitted */ 18837 rack->rc_gp_saw_rec = 1; 18838 } else { 18839 if (cwnd_to_use > tp->snd_ssthresh) { 18840 /* Set we sent in CA */ 18841 rack->rc_gp_saw_ca = 1; 18842 } else { 18843 /* Set we sent in SS */ 18844 rack->rc_gp_saw_ss = 1; 18845 } 18846 } 18847 if (doing_tlp && (rsm == NULL)) { 18848 /* Make sure new data TLP cnt is clear */ 18849 rack->r_ctl.rc_tlp_new_data = 0; 18850 } 18851 if (TCPS_HAVEESTABLISHED(tp->t_state) && 18852 (tp->t_flags & TF_SACK_PERMIT) && 18853 tp->rcv_numsacks > 0) 18854 tcp_clean_dsack_blocks(tp); 18855 tot_len_this_send += len; 18856 if (len == 0) 18857 counter_u64_add(rack_out_size[TCP_MSS_ACCT_SNDACK], 1); 18858 else if (len == 1) { 18859 counter_u64_add(rack_out_size[TCP_MSS_ACCT_PERSIST], 1); 18860 } else if (len > 1) { 18861 int idx; 18862 18863 idx = (len / segsiz) + 3; 18864 if (idx >= TCP_MSS_ACCT_ATIMER) 18865 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); 18866 else 18867 counter_u64_add(rack_out_size[idx], 1); 18868 } 18869 } 18870 if ((rack->rack_no_prr == 0) && 18871 sub_from_prr && 18872 (error == 0)) { 18873 if (rack->r_ctl.rc_prr_sndcnt >= len) 18874 rack->r_ctl.rc_prr_sndcnt -= len; 18875 else 18876 rack->r_ctl.rc_prr_sndcnt = 0; 18877 } 18878 sub_from_prr = 0; 18879 if (doing_tlp) { 18880 /* Make sure the TLP is added */ 18881 add_flag |= RACK_TLP; 18882 } else if (rsm) { 18883 /* If its a resend without TLP then it must not have the flag */ 18884 rsm->r_flags &= ~RACK_TLP; 18885 } 18886 rack_log_output(tp, &to, len, rack_seq, (uint8_t) flags, error, 18887 rack_to_usec_ts(&tv), 18888 rsm, add_flag, s_mb, s_moff, hw_tls); 18889 18890 18891 if ((error == 0) && 18892 (len > 0) && 18893 (tp->snd_una == tp->snd_max)) 18894 rack->r_ctl.rc_tlp_rxt_last_time = cts; 18895 { 18896 tcp_seq startseq = tp->snd_nxt; 18897 18898 /* Track our lost count */ 18899 if (rsm && (doing_tlp == 0)) 18900 rack->r_ctl.rc_loss_count += rsm->r_end - rsm->r_start; 18901 /* 18902 * Advance snd_nxt over sequence space of this segment. 18903 */ 18904 if (error) 18905 /* We don't log or do anything with errors */ 18906 goto nomore; 18907 if (doing_tlp == 0) { 18908 if (rsm == NULL) { 18909 /* 18910 * Not a retransmission of some 18911 * sort, new data is going out so 18912 * clear our TLP count and flag. 18913 */ 18914 rack->rc_tlp_in_progress = 0; 18915 rack->r_ctl.rc_tlp_cnt_out = 0; 18916 } 18917 } else { 18918 /* 18919 * We have just sent a TLP, mark that it is true 18920 * and make sure our in progress is set so we 18921 * continue to check the count. 18922 */ 18923 rack->rc_tlp_in_progress = 1; 18924 rack->r_ctl.rc_tlp_cnt_out++; 18925 } 18926 if (flags & (TH_SYN | TH_FIN)) { 18927 if (flags & TH_SYN) 18928 tp->snd_nxt++; 18929 if (flags & TH_FIN) { 18930 tp->snd_nxt++; 18931 tp->t_flags |= TF_SENTFIN; 18932 } 18933 } 18934 /* In the ENOBUFS case we do *not* update snd_max */ 18935 if (sack_rxmit) 18936 goto nomore; 18937 18938 tp->snd_nxt += len; 18939 if (SEQ_GT(tp->snd_nxt, tp->snd_max)) { 18940 if (tp->snd_una == tp->snd_max) { 18941 /* 18942 * Update the time we just added data since 18943 * none was outstanding. 18944 */ 18945 rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__); 18946 tp->t_acktime = ticks; 18947 } 18948 tp->snd_max = tp->snd_nxt; 18949 /* 18950 * Time this transmission if not a retransmission and 18951 * not currently timing anything. 18952 * This is only relevant in case of switching back to 18953 * the base stack. 18954 */ 18955 if (tp->t_rtttime == 0) { 18956 tp->t_rtttime = ticks; 18957 tp->t_rtseq = startseq; 18958 KMOD_TCPSTAT_INC(tcps_segstimed); 18959 } 18960 if (len && 18961 ((tp->t_flags & TF_GPUTINPROG) == 0)) 18962 rack_start_gp_measurement(tp, rack, startseq, sb_offset); 18963 } 18964 /* 18965 * If we are doing FO we need to update the mbuf position and subtract 18966 * this happens when the peer sends us duplicate information and 18967 * we thus want to send a DSACK. 18968 * 18969 * XXXRRS: This brings to mind a ?, when we send a DSACK block is TSO 18970 * turned off? If not then we are going to echo multiple DSACK blocks 18971 * out (with the TSO), which we should not be doing. 18972 */ 18973 if (rack->r_fast_output && len) { 18974 if (rack->r_ctl.fsb.left_to_send > len) 18975 rack->r_ctl.fsb.left_to_send -= len; 18976 else 18977 rack->r_ctl.fsb.left_to_send = 0; 18978 if (rack->r_ctl.fsb.left_to_send < segsiz) 18979 rack->r_fast_output = 0; 18980 if (rack->r_fast_output) { 18981 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); 18982 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; 18983 } 18984 } 18985 } 18986 nomore: 18987 if (error) { 18988 rack->r_ctl.rc_agg_delayed = 0; 18989 rack->r_early = 0; 18990 rack->r_late = 0; 18991 rack->r_ctl.rc_agg_early = 0; 18992 SOCKBUF_UNLOCK_ASSERT(sb); /* Check gotos. */ 18993 /* 18994 * Failures do not advance the seq counter above. For the 18995 * case of ENOBUFS we will fall out and retry in 1ms with 18996 * the hpts. Everything else will just have to retransmit 18997 * with the timer. 18998 * 18999 * In any case, we do not want to loop around for another 19000 * send without a good reason. 19001 */ 19002 sendalot = 0; 19003 switch (error) { 19004 case EPERM: 19005 tp->t_softerror = error; 19006 #ifdef TCP_ACCOUNTING 19007 crtsc = get_cyclecount(); 19008 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19009 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 19010 } 19011 counter_u64_add(tcp_cnt_counters[SND_OUT_FAIL], 1); 19012 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19013 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 19014 } 19015 counter_u64_add(tcp_proc_time[SND_OUT_FAIL], (crtsc - ts_val)); 19016 sched_unpin(); 19017 #endif 19018 return (error); 19019 case ENOBUFS: 19020 /* 19021 * Pace us right away to retry in a some 19022 * time 19023 */ 19024 slot = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC); 19025 if (rack->rc_enobuf < 0x7f) 19026 rack->rc_enobuf++; 19027 if (slot < (10 * HPTS_USEC_IN_MSEC)) 19028 slot = 10 * HPTS_USEC_IN_MSEC; 19029 if (rack->r_ctl.crte != NULL) { 19030 counter_u64_add(rack_saw_enobuf_hw, 1); 19031 tcp_rl_log_enobuf(rack->r_ctl.crte); 19032 } 19033 counter_u64_add(rack_saw_enobuf, 1); 19034 goto enobufs; 19035 case EMSGSIZE: 19036 /* 19037 * For some reason the interface we used initially 19038 * to send segments changed to another or lowered 19039 * its MTU. If TSO was active we either got an 19040 * interface without TSO capabilits or TSO was 19041 * turned off. If we obtained mtu from ip_output() 19042 * then update it and try again. 19043 */ 19044 if (tso) 19045 tp->t_flags &= ~TF_TSO; 19046 if (mtu != 0) { 19047 tcp_mss_update(tp, -1, mtu, NULL, NULL); 19048 goto again; 19049 } 19050 slot = 10 * HPTS_USEC_IN_MSEC; 19051 rack_start_hpts_timer(rack, tp, cts, slot, 0, 0); 19052 #ifdef TCP_ACCOUNTING 19053 crtsc = get_cyclecount(); 19054 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19055 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 19056 } 19057 counter_u64_add(tcp_cnt_counters[SND_OUT_FAIL], 1); 19058 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19059 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 19060 } 19061 counter_u64_add(tcp_proc_time[SND_OUT_FAIL], (crtsc - ts_val)); 19062 sched_unpin(); 19063 #endif 19064 return (error); 19065 case ENETUNREACH: 19066 counter_u64_add(rack_saw_enetunreach, 1); 19067 case EHOSTDOWN: 19068 case EHOSTUNREACH: 19069 case ENETDOWN: 19070 if (TCPS_HAVERCVDSYN(tp->t_state)) { 19071 tp->t_softerror = error; 19072 } 19073 /* FALLTHROUGH */ 19074 default: 19075 slot = 10 * HPTS_USEC_IN_MSEC; 19076 rack_start_hpts_timer(rack, tp, cts, slot, 0, 0); 19077 #ifdef TCP_ACCOUNTING 19078 crtsc = get_cyclecount(); 19079 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19080 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 19081 } 19082 counter_u64_add(tcp_cnt_counters[SND_OUT_FAIL], 1); 19083 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19084 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 19085 } 19086 counter_u64_add(tcp_proc_time[SND_OUT_FAIL], (crtsc - ts_val)); 19087 sched_unpin(); 19088 #endif 19089 return (error); 19090 } 19091 } else { 19092 rack->rc_enobuf = 0; 19093 if (IN_FASTRECOVERY(tp->t_flags) && rsm) 19094 rack->r_ctl.retran_during_recovery += len; 19095 } 19096 KMOD_TCPSTAT_INC(tcps_sndtotal); 19097 19098 /* 19099 * Data sent (as far as we can tell). If this advertises a larger 19100 * window than any other segment, then remember the size of the 19101 * advertised window. Any pending ACK has now been sent. 19102 */ 19103 if (recwin > 0 && SEQ_GT(tp->rcv_nxt + recwin, tp->rcv_adv)) 19104 tp->rcv_adv = tp->rcv_nxt + recwin; 19105 19106 tp->last_ack_sent = tp->rcv_nxt; 19107 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); 19108 enobufs: 19109 if (sendalot) { 19110 /* Do we need to turn off sendalot? */ 19111 if (rack->r_ctl.rc_pace_max_segs && 19112 (tot_len_this_send >= rack->r_ctl.rc_pace_max_segs)) { 19113 /* We hit our max. */ 19114 sendalot = 0; 19115 } else if ((rack->rc_user_set_max_segs) && 19116 (tot_len_this_send >= (rack->rc_user_set_max_segs * segsiz))) { 19117 /* We hit the user defined max */ 19118 sendalot = 0; 19119 } 19120 } 19121 if ((error == 0) && (flags & TH_FIN)) 19122 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_FIN); 19123 if (flags & TH_RST) { 19124 /* 19125 * We don't send again after sending a RST. 19126 */ 19127 slot = 0; 19128 sendalot = 0; 19129 if (error == 0) 19130 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 19131 } else if ((slot == 0) && (sendalot == 0) && tot_len_this_send) { 19132 /* 19133 * Get our pacing rate, if an error 19134 * occurred in sending (ENOBUF) we would 19135 * hit the else if with slot preset. Other 19136 * errors return. 19137 */ 19138 slot = rack_get_pacing_delay(rack, tp, tot_len_this_send, rsm, segsiz); 19139 } 19140 if (rsm && 19141 (rsm->r_flags & RACK_HAS_SYN) == 0 && 19142 rack->use_rack_rr) { 19143 /* Its a retransmit and we use the rack cheat? */ 19144 if ((slot == 0) || 19145 (rack->rc_always_pace == 0) || 19146 (rack->r_rr_config == 1)) { 19147 /* 19148 * We have no pacing set or we 19149 * are using old-style rack or 19150 * we are overriden to use the old 1ms pacing. 19151 */ 19152 slot = rack->r_ctl.rc_min_to; 19153 } 19154 } 19155 /* We have sent clear the flag */ 19156 rack->r_ent_rec_ns = 0; 19157 if (rack->r_must_retran) { 19158 if (rsm) { 19159 rack->r_ctl.rc_out_at_rto -= (rsm->r_end - rsm->r_start); 19160 if (SEQ_GEQ(rsm->r_end, rack->r_ctl.rc_snd_max_at_rto)) { 19161 /* 19162 * We have retransmitted all. 19163 */ 19164 rack->r_must_retran = 0; 19165 rack->r_ctl.rc_out_at_rto = 0; 19166 } 19167 } else if (SEQ_GEQ(tp->snd_max, rack->r_ctl.rc_snd_max_at_rto)) { 19168 /* 19169 * Sending new data will also kill 19170 * the loop. 19171 */ 19172 rack->r_must_retran = 0; 19173 rack->r_ctl.rc_out_at_rto = 0; 19174 } 19175 } 19176 rack->r_ctl.fsb.recwin = recwin; 19177 if ((tp->t_flags & (TF_WASCRECOVERY|TF_WASFRECOVERY)) && 19178 SEQ_GT(tp->snd_max, rack->r_ctl.rc_snd_max_at_rto)) { 19179 /* 19180 * We hit an RTO and now have past snd_max at the RTO 19181 * clear all the WAS flags. 19182 */ 19183 tp->t_flags &= ~(TF_WASCRECOVERY|TF_WASFRECOVERY); 19184 } 19185 if (slot) { 19186 /* set the rack tcb into the slot N */ 19187 counter_u64_add(rack_paced_segments, 1); 19188 if ((error == 0) && 19189 rack_use_rfo && 19190 ((flags & (TH_SYN|TH_FIN)) == 0) && 19191 (rsm == NULL) && 19192 (tp->snd_nxt == tp->snd_max) && 19193 (ipoptlen == 0) && 19194 (tp->rcv_numsacks == 0) && 19195 rack->r_fsb_inited && 19196 TCPS_HAVEESTABLISHED(tp->t_state) && 19197 (rack->r_must_retran == 0) && 19198 ((tp->t_flags & TF_NEEDFIN) == 0) && 19199 (len > 0) && (orig_len > 0) && 19200 (orig_len > len) && 19201 ((orig_len - len) >= segsiz) && 19202 ((optlen == 0) || 19203 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) { 19204 /* We can send at least one more MSS using our fsb */ 19205 19206 rack->r_fast_output = 1; 19207 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); 19208 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; 19209 rack->r_ctl.fsb.tcp_flags = flags; 19210 rack->r_ctl.fsb.left_to_send = orig_len - len; 19211 if (hw_tls) 19212 rack->r_ctl.fsb.hw_tls = 1; 19213 else 19214 rack->r_ctl.fsb.hw_tls = 0; 19215 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(sb) - (tp->snd_max - tp->snd_una))), 19216 ("rack:%p left_to_send:%u sbavail:%u out:%u", 19217 rack, rack->r_ctl.fsb.left_to_send, sbavail(sb), 19218 (tp->snd_max - tp->snd_una))); 19219 if (rack->r_ctl.fsb.left_to_send < segsiz) 19220 rack->r_fast_output = 0; 19221 else { 19222 if (rack->r_ctl.fsb.left_to_send == (sbavail(sb) - (tp->snd_max - tp->snd_una))) 19223 rack->r_ctl.fsb.rfo_apply_push = 1; 19224 else 19225 rack->r_ctl.fsb.rfo_apply_push = 0; 19226 } 19227 } else 19228 rack->r_fast_output = 0; 19229 rack_log_fsb(rack, tp, so, flags, 19230 ipoptlen, orig_len, len, error, 19231 (rsm == NULL), optlen, __LINE__, 2); 19232 } else if (sendalot) { 19233 int ret; 19234 19235 if (len) 19236 counter_u64_add(rack_unpaced_segments, 1); 19237 sack_rxmit = 0; 19238 if ((error == 0) && 19239 rack_use_rfo && 19240 ((flags & (TH_SYN|TH_FIN)) == 0) && 19241 (rsm == NULL) && 19242 (ipoptlen == 0) && 19243 (tp->rcv_numsacks == 0) && 19244 (tp->snd_nxt == tp->snd_max) && 19245 (rack->r_must_retran == 0) && 19246 rack->r_fsb_inited && 19247 TCPS_HAVEESTABLISHED(tp->t_state) && 19248 ((tp->t_flags & TF_NEEDFIN) == 0) && 19249 (len > 0) && (orig_len > 0) && 19250 (orig_len > len) && 19251 ((orig_len - len) >= segsiz) && 19252 ((optlen == 0) || 19253 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) { 19254 /* we can use fast_output for more */ 19255 19256 rack->r_fast_output = 1; 19257 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); 19258 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; 19259 rack->r_ctl.fsb.tcp_flags = flags; 19260 rack->r_ctl.fsb.left_to_send = orig_len - len; 19261 if (hw_tls) 19262 rack->r_ctl.fsb.hw_tls = 1; 19263 else 19264 rack->r_ctl.fsb.hw_tls = 0; 19265 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(sb) - (tp->snd_max - tp->snd_una))), 19266 ("rack:%p left_to_send:%u sbavail:%u out:%u", 19267 rack, rack->r_ctl.fsb.left_to_send, sbavail(sb), 19268 (tp->snd_max - tp->snd_una))); 19269 if (rack->r_ctl.fsb.left_to_send < segsiz) { 19270 rack->r_fast_output = 0; 19271 } 19272 if (rack->r_fast_output) { 19273 if (rack->r_ctl.fsb.left_to_send == (sbavail(sb) - (tp->snd_max - tp->snd_una))) 19274 rack->r_ctl.fsb.rfo_apply_push = 1; 19275 else 19276 rack->r_ctl.fsb.rfo_apply_push = 0; 19277 rack_log_fsb(rack, tp, so, flags, 19278 ipoptlen, orig_len, len, error, 19279 (rsm == NULL), optlen, __LINE__, 3); 19280 error = 0; 19281 ret = rack_fast_output(tp, rack, ts_val, cts, ms_cts, &tv, tot_len_this_send, &error); 19282 if (ret >= 0) 19283 return (ret); 19284 else if (error) 19285 goto nomore; 19286 19287 } 19288 } 19289 goto again; 19290 } else if (len) { 19291 counter_u64_add(rack_unpaced_segments, 1); 19292 } 19293 /* Assure when we leave that snd_nxt will point to top */ 19294 if (SEQ_GT(tp->snd_max, tp->snd_nxt)) 19295 tp->snd_nxt = tp->snd_max; 19296 rack_start_hpts_timer(rack, tp, cts, slot, tot_len_this_send, 0); 19297 #ifdef TCP_ACCOUNTING 19298 crtsc = get_cyclecount() - ts_val; 19299 if (tot_len_this_send) { 19300 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19301 tp->tcp_cnt_counters[SND_OUT_DATA]++; 19302 } 19303 counter_u64_add(tcp_cnt_counters[SND_OUT_DATA], 1); 19304 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19305 tp->tcp_proc_time[SND_OUT_DATA] += crtsc; 19306 } 19307 counter_u64_add(tcp_proc_time[SND_OUT_DATA], crtsc); 19308 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19309 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len_this_send + segsiz - 1) /segsiz); 19310 } 19311 counter_u64_add(tcp_cnt_counters[CNT_OF_MSS_OUT], ((tot_len_this_send + segsiz - 1) /segsiz)); 19312 } else { 19313 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19314 tp->tcp_cnt_counters[SND_OUT_ACK]++; 19315 } 19316 counter_u64_add(tcp_cnt_counters[SND_OUT_ACK], 1); 19317 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19318 tp->tcp_proc_time[SND_OUT_ACK] += crtsc; 19319 } 19320 counter_u64_add(tcp_proc_time[SND_OUT_ACK], crtsc); 19321 } 19322 sched_unpin(); 19323 #endif 19324 if (error == ENOBUFS) 19325 error = 0; 19326 return (error); 19327 } 19328 19329 static void 19330 rack_update_seg(struct tcp_rack *rack) 19331 { 19332 uint32_t orig_val; 19333 19334 orig_val = rack->r_ctl.rc_pace_max_segs; 19335 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 19336 if (orig_val != rack->r_ctl.rc_pace_max_segs) 19337 rack_log_pacing_delay_calc(rack, 0, 0, orig_val, 0, 0, 15, __LINE__, NULL, 0); 19338 } 19339 19340 static void 19341 rack_mtu_change(struct tcpcb *tp) 19342 { 19343 /* 19344 * The MSS may have changed 19345 */ 19346 struct tcp_rack *rack; 19347 struct rack_sendmap *rsm; 19348 19349 rack = (struct tcp_rack *)tp->t_fb_ptr; 19350 if (rack->r_ctl.rc_pace_min_segs != ctf_fixed_maxseg(tp)) { 19351 /* 19352 * The MTU has changed we need to resend everything 19353 * since all we have sent is lost. We first fix 19354 * up the mtu though. 19355 */ 19356 rack_set_pace_segments(tp, rack, __LINE__, NULL); 19357 /* We treat this like a full retransmit timeout without the cwnd adjustment */ 19358 rack_remxt_tmr(tp); 19359 rack->r_fast_output = 0; 19360 rack->r_ctl.rc_out_at_rto = ctf_flight_size(tp, 19361 rack->r_ctl.rc_sacked); 19362 rack->r_ctl.rc_snd_max_at_rto = tp->snd_max; 19363 rack->r_must_retran = 1; 19364 /* Mark all inflight to needing to be rxt'd */ 19365 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { 19366 rsm->r_flags |= RACK_MUST_RXT; 19367 } 19368 } 19369 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 19370 /* We don't use snd_nxt to retransmit */ 19371 tp->snd_nxt = tp->snd_max; 19372 } 19373 19374 static int 19375 rack_set_profile(struct tcp_rack *rack, int prof) 19376 { 19377 int err = EINVAL; 19378 if (prof == 1) { 19379 /* pace_always=1 */ 19380 if (rack->rc_always_pace == 0) { 19381 if (tcp_can_enable_pacing() == 0) 19382 return (EBUSY); 19383 } 19384 rack->rc_always_pace = 1; 19385 if (rack->use_fixed_rate || rack->gp_ready) 19386 rack_set_cc_pacing(rack); 19387 rack->rc_inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19388 rack->rack_attempt_hdwr_pace = 0; 19389 /* cmpack=1 */ 19390 if (rack_use_cmp_acks) 19391 rack->r_use_cmp_ack = 1; 19392 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state) && 19393 rack->r_use_cmp_ack) 19394 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP; 19395 /* scwnd=1 */ 19396 rack->rack_enable_scwnd = 1; 19397 /* dynamic=100 */ 19398 rack->rc_gp_dyn_mul = 1; 19399 /* gp_inc_ca */ 19400 rack->r_ctl.rack_per_of_gp_ca = 100; 19401 /* rrr_conf=3 */ 19402 rack->r_rr_config = 3; 19403 /* npush=2 */ 19404 rack->r_ctl.rc_no_push_at_mrtt = 2; 19405 /* fillcw=1 */ 19406 rack->rc_pace_to_cwnd = 1; 19407 rack->rc_pace_fill_if_rttin_range = 0; 19408 rack->rtt_limit_mul = 0; 19409 /* noprr=1 */ 19410 rack->rack_no_prr = 1; 19411 /* lscwnd=1 */ 19412 rack->r_limit_scw = 1; 19413 /* gp_inc_rec */ 19414 rack->r_ctl.rack_per_of_gp_rec = 90; 19415 err = 0; 19416 19417 } else if (prof == 3) { 19418 /* Same as profile one execept fill_cw becomes 2 (less aggressive set) */ 19419 /* pace_always=1 */ 19420 if (rack->rc_always_pace == 0) { 19421 if (tcp_can_enable_pacing() == 0) 19422 return (EBUSY); 19423 } 19424 rack->rc_always_pace = 1; 19425 if (rack->use_fixed_rate || rack->gp_ready) 19426 rack_set_cc_pacing(rack); 19427 rack->rc_inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19428 rack->rack_attempt_hdwr_pace = 0; 19429 /* cmpack=1 */ 19430 if (rack_use_cmp_acks) 19431 rack->r_use_cmp_ack = 1; 19432 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state) && 19433 rack->r_use_cmp_ack) 19434 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP; 19435 /* scwnd=1 */ 19436 rack->rack_enable_scwnd = 1; 19437 /* dynamic=100 */ 19438 rack->rc_gp_dyn_mul = 1; 19439 /* gp_inc_ca */ 19440 rack->r_ctl.rack_per_of_gp_ca = 100; 19441 /* rrr_conf=3 */ 19442 rack->r_rr_config = 3; 19443 /* npush=2 */ 19444 rack->r_ctl.rc_no_push_at_mrtt = 2; 19445 /* fillcw=2 */ 19446 rack->rc_pace_to_cwnd = 1; 19447 rack->r_fill_less_agg = 1; 19448 rack->rc_pace_fill_if_rttin_range = 0; 19449 rack->rtt_limit_mul = 0; 19450 /* noprr=1 */ 19451 rack->rack_no_prr = 1; 19452 /* lscwnd=1 */ 19453 rack->r_limit_scw = 1; 19454 /* gp_inc_rec */ 19455 rack->r_ctl.rack_per_of_gp_rec = 90; 19456 err = 0; 19457 19458 19459 } else if (prof == 2) { 19460 /* cmpack=1 */ 19461 if (rack->rc_always_pace == 0) { 19462 if (tcp_can_enable_pacing() == 0) 19463 return (EBUSY); 19464 } 19465 rack->rc_always_pace = 1; 19466 if (rack->use_fixed_rate || rack->gp_ready) 19467 rack_set_cc_pacing(rack); 19468 rack->r_use_cmp_ack = 1; 19469 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state)) 19470 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP; 19471 /* pace_always=1 */ 19472 rack->rc_inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19473 /* scwnd=1 */ 19474 rack->rack_enable_scwnd = 1; 19475 /* dynamic=100 */ 19476 rack->rc_gp_dyn_mul = 1; 19477 rack->r_ctl.rack_per_of_gp_ca = 100; 19478 /* rrr_conf=3 */ 19479 rack->r_rr_config = 3; 19480 /* npush=2 */ 19481 rack->r_ctl.rc_no_push_at_mrtt = 2; 19482 /* fillcw=1 */ 19483 rack->rc_pace_to_cwnd = 1; 19484 rack->rc_pace_fill_if_rttin_range = 0; 19485 rack->rtt_limit_mul = 0; 19486 /* noprr=1 */ 19487 rack->rack_no_prr = 1; 19488 /* lscwnd=0 */ 19489 rack->r_limit_scw = 0; 19490 err = 0; 19491 } else if (prof == 0) { 19492 /* This changes things back to the default settings */ 19493 err = 0; 19494 if (rack->rc_always_pace) { 19495 tcp_decrement_paced_conn(); 19496 rack_undo_cc_pacing(rack); 19497 rack->rc_always_pace = 0; 19498 } 19499 if (rack_pace_every_seg && tcp_can_enable_pacing()) { 19500 rack->rc_always_pace = 1; 19501 if (rack->use_fixed_rate || rack->gp_ready) 19502 rack_set_cc_pacing(rack); 19503 } else 19504 rack->rc_always_pace = 0; 19505 if (rack_dsack_std_based & 0x1) { 19506 /* Basically this means all rack timers are at least (srtt + 1/4 srtt) */ 19507 rack->rc_rack_tmr_std_based = 1; 19508 } 19509 if (rack_dsack_std_based & 0x2) { 19510 /* Basically this means rack timers are extended based on dsack by up to (2 * srtt) */ 19511 rack->rc_rack_use_dsack = 1; 19512 } 19513 if (rack_use_cmp_acks) 19514 rack->r_use_cmp_ack = 1; 19515 else 19516 rack->r_use_cmp_ack = 0; 19517 if (rack_disable_prr) 19518 rack->rack_no_prr = 1; 19519 else 19520 rack->rack_no_prr = 0; 19521 if (rack_gp_no_rec_chg) 19522 rack->rc_gp_no_rec_chg = 1; 19523 else 19524 rack->rc_gp_no_rec_chg = 0; 19525 if (rack_enable_mqueue_for_nonpaced || rack->r_use_cmp_ack) { 19526 rack->r_mbuf_queue = 1; 19527 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state)) 19528 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP; 19529 rack->rc_inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19530 } else { 19531 rack->r_mbuf_queue = 0; 19532 rack->rc_inp->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; 19533 } 19534 if (rack_enable_shared_cwnd) 19535 rack->rack_enable_scwnd = 1; 19536 else 19537 rack->rack_enable_scwnd = 0; 19538 if (rack_do_dyn_mul) { 19539 /* When dynamic adjustment is on CA needs to start at 100% */ 19540 rack->rc_gp_dyn_mul = 1; 19541 if (rack_do_dyn_mul >= 100) 19542 rack->r_ctl.rack_per_of_gp_ca = rack_do_dyn_mul; 19543 } else { 19544 rack->r_ctl.rack_per_of_gp_ca = rack_per_of_gp_ca; 19545 rack->rc_gp_dyn_mul = 0; 19546 } 19547 rack->r_rr_config = 0; 19548 rack->r_ctl.rc_no_push_at_mrtt = 0; 19549 rack->rc_pace_to_cwnd = 0; 19550 rack->rc_pace_fill_if_rttin_range = 0; 19551 rack->rtt_limit_mul = 0; 19552 19553 if (rack_enable_hw_pacing) 19554 rack->rack_hdw_pace_ena = 1; 19555 else 19556 rack->rack_hdw_pace_ena = 0; 19557 if (rack_disable_prr) 19558 rack->rack_no_prr = 1; 19559 else 19560 rack->rack_no_prr = 0; 19561 if (rack_limits_scwnd) 19562 rack->r_limit_scw = 1; 19563 else 19564 rack->r_limit_scw = 0; 19565 err = 0; 19566 } 19567 return (err); 19568 } 19569 19570 static int 19571 rack_add_deferred_option(struct tcp_rack *rack, int sopt_name, uint64_t loptval) 19572 { 19573 struct deferred_opt_list *dol; 19574 19575 dol = malloc(sizeof(struct deferred_opt_list), 19576 M_TCPFSB, M_NOWAIT|M_ZERO); 19577 if (dol == NULL) { 19578 /* 19579 * No space yikes -- fail out.. 19580 */ 19581 return (0); 19582 } 19583 dol->optname = sopt_name; 19584 dol->optval = loptval; 19585 TAILQ_INSERT_TAIL(&rack->r_ctl.opt_list, dol, next); 19586 return (1); 19587 } 19588 19589 static int 19590 rack_process_option(struct tcpcb *tp, struct tcp_rack *rack, int sopt_name, 19591 uint32_t optval, uint64_t loptval) 19592 { 19593 struct epoch_tracker et; 19594 struct sockopt sopt; 19595 struct cc_newreno_opts opt; 19596 uint64_t val; 19597 int error = 0; 19598 uint16_t ca, ss; 19599 19600 switch (sopt_name) { 19601 19602 case TCP_RACK_DSACK_OPT: 19603 RACK_OPTS_INC(tcp_rack_dsack_opt); 19604 if (optval & 0x1) { 19605 rack->rc_rack_tmr_std_based = 1; 19606 } else { 19607 rack->rc_rack_tmr_std_based = 0; 19608 } 19609 if (optval & 0x2) { 19610 rack->rc_rack_use_dsack = 1; 19611 } else { 19612 rack->rc_rack_use_dsack = 0; 19613 } 19614 rack_log_dsack_event(rack, 5, __LINE__, 0, 0); 19615 break; 19616 case TCP_RACK_PACING_BETA: 19617 RACK_OPTS_INC(tcp_rack_beta); 19618 if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0) { 19619 /* This only works for newreno. */ 19620 error = EINVAL; 19621 break; 19622 } 19623 if (rack->rc_pacing_cc_set) { 19624 /* 19625 * Set them into the real CC module 19626 * whats in the rack pcb is the old values 19627 * to be used on restoral/ 19628 */ 19629 sopt.sopt_dir = SOPT_SET; 19630 opt.name = CC_NEWRENO_BETA; 19631 opt.val = optval; 19632 if (CC_ALGO(tp)->ctl_output != NULL) 19633 error = CC_ALGO(tp)->ctl_output(tp->ccv, &sopt, &opt); 19634 else { 19635 error = ENOENT; 19636 break; 19637 } 19638 } else { 19639 /* 19640 * Not pacing yet so set it into our local 19641 * rack pcb storage. 19642 */ 19643 rack->r_ctl.rc_saved_beta.beta = optval; 19644 } 19645 break; 19646 case TCP_RACK_TIMER_SLOP: 19647 RACK_OPTS_INC(tcp_rack_timer_slop); 19648 rack->r_ctl.timer_slop = optval; 19649 if (rack->rc_tp->t_srtt) { 19650 /* 19651 * If we have an SRTT lets update t_rxtcur 19652 * to have the new slop. 19653 */ 19654 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 19655 rack_rto_min, rack_rto_max, 19656 rack->r_ctl.timer_slop); 19657 } 19658 break; 19659 case TCP_RACK_PACING_BETA_ECN: 19660 RACK_OPTS_INC(tcp_rack_beta_ecn); 19661 if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0) { 19662 /* This only works for newreno. */ 19663 error = EINVAL; 19664 break; 19665 } 19666 if (rack->rc_pacing_cc_set) { 19667 /* 19668 * Set them into the real CC module 19669 * whats in the rack pcb is the old values 19670 * to be used on restoral/ 19671 */ 19672 sopt.sopt_dir = SOPT_SET; 19673 opt.name = CC_NEWRENO_BETA_ECN; 19674 opt.val = optval; 19675 if (CC_ALGO(tp)->ctl_output != NULL) 19676 error = CC_ALGO(tp)->ctl_output(tp->ccv, &sopt, &opt); 19677 else 19678 error = ENOENT; 19679 } else { 19680 /* 19681 * Not pacing yet so set it into our local 19682 * rack pcb storage. 19683 */ 19684 rack->r_ctl.rc_saved_beta.beta_ecn = optval; 19685 rack->r_ctl.rc_saved_beta.newreno_flags = CC_NEWRENO_BETA_ECN_ENABLED; 19686 } 19687 break; 19688 case TCP_DEFER_OPTIONS: 19689 RACK_OPTS_INC(tcp_defer_opt); 19690 if (optval) { 19691 if (rack->gp_ready) { 19692 /* Too late */ 19693 error = EINVAL; 19694 break; 19695 } 19696 rack->defer_options = 1; 19697 } else 19698 rack->defer_options = 0; 19699 break; 19700 case TCP_RACK_MEASURE_CNT: 19701 RACK_OPTS_INC(tcp_rack_measure_cnt); 19702 if (optval && (optval <= 0xff)) { 19703 rack->r_ctl.req_measurements = optval; 19704 } else 19705 error = EINVAL; 19706 break; 19707 case TCP_REC_ABC_VAL: 19708 RACK_OPTS_INC(tcp_rec_abc_val); 19709 if (optval > 0) 19710 rack->r_use_labc_for_rec = 1; 19711 else 19712 rack->r_use_labc_for_rec = 0; 19713 break; 19714 case TCP_RACK_ABC_VAL: 19715 RACK_OPTS_INC(tcp_rack_abc_val); 19716 if ((optval > 0) && (optval < 255)) 19717 rack->rc_labc = optval; 19718 else 19719 error = EINVAL; 19720 break; 19721 case TCP_HDWR_UP_ONLY: 19722 RACK_OPTS_INC(tcp_pacing_up_only); 19723 if (optval) 19724 rack->r_up_only = 1; 19725 else 19726 rack->r_up_only = 0; 19727 break; 19728 case TCP_PACING_RATE_CAP: 19729 RACK_OPTS_INC(tcp_pacing_rate_cap); 19730 rack->r_ctl.bw_rate_cap = loptval; 19731 break; 19732 case TCP_RACK_PROFILE: 19733 RACK_OPTS_INC(tcp_profile); 19734 error = rack_set_profile(rack, optval); 19735 break; 19736 case TCP_USE_CMP_ACKS: 19737 RACK_OPTS_INC(tcp_use_cmp_acks); 19738 if ((optval == 0) && (rack->rc_inp->inp_flags2 & INP_MBUF_ACKCMP)) { 19739 /* You can't turn it off once its on! */ 19740 error = EINVAL; 19741 } else if ((optval == 1) && (rack->r_use_cmp_ack == 0)) { 19742 rack->r_use_cmp_ack = 1; 19743 rack->r_mbuf_queue = 1; 19744 tp->t_inpcb->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19745 } 19746 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) 19747 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP; 19748 break; 19749 case TCP_SHARED_CWND_TIME_LIMIT: 19750 RACK_OPTS_INC(tcp_lscwnd); 19751 if (optval) 19752 rack->r_limit_scw = 1; 19753 else 19754 rack->r_limit_scw = 0; 19755 break; 19756 case TCP_RACK_PACE_TO_FILL: 19757 RACK_OPTS_INC(tcp_fillcw); 19758 if (optval == 0) 19759 rack->rc_pace_to_cwnd = 0; 19760 else { 19761 rack->rc_pace_to_cwnd = 1; 19762 if (optval > 1) 19763 rack->r_fill_less_agg = 1; 19764 } 19765 if ((optval >= rack_gp_rtt_maxmul) && 19766 rack_gp_rtt_maxmul && 19767 (optval < 0xf)) { 19768 rack->rc_pace_fill_if_rttin_range = 1; 19769 rack->rtt_limit_mul = optval; 19770 } else { 19771 rack->rc_pace_fill_if_rttin_range = 0; 19772 rack->rtt_limit_mul = 0; 19773 } 19774 break; 19775 case TCP_RACK_NO_PUSH_AT_MAX: 19776 RACK_OPTS_INC(tcp_npush); 19777 if (optval == 0) 19778 rack->r_ctl.rc_no_push_at_mrtt = 0; 19779 else if (optval < 0xff) 19780 rack->r_ctl.rc_no_push_at_mrtt = optval; 19781 else 19782 error = EINVAL; 19783 break; 19784 case TCP_SHARED_CWND_ENABLE: 19785 RACK_OPTS_INC(tcp_rack_scwnd); 19786 if (optval == 0) 19787 rack->rack_enable_scwnd = 0; 19788 else 19789 rack->rack_enable_scwnd = 1; 19790 break; 19791 case TCP_RACK_MBUF_QUEUE: 19792 /* Now do we use the LRO mbuf-queue feature */ 19793 RACK_OPTS_INC(tcp_rack_mbufq); 19794 if (optval || rack->r_use_cmp_ack) 19795 rack->r_mbuf_queue = 1; 19796 else 19797 rack->r_mbuf_queue = 0; 19798 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) 19799 tp->t_inpcb->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19800 else 19801 tp->t_inpcb->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; 19802 break; 19803 case TCP_RACK_NONRXT_CFG_RATE: 19804 RACK_OPTS_INC(tcp_rack_cfg_rate); 19805 if (optval == 0) 19806 rack->rack_rec_nonrxt_use_cr = 0; 19807 else 19808 rack->rack_rec_nonrxt_use_cr = 1; 19809 break; 19810 case TCP_NO_PRR: 19811 RACK_OPTS_INC(tcp_rack_noprr); 19812 if (optval == 0) 19813 rack->rack_no_prr = 0; 19814 else if (optval == 1) 19815 rack->rack_no_prr = 1; 19816 else if (optval == 2) 19817 rack->no_prr_addback = 1; 19818 else 19819 error = EINVAL; 19820 break; 19821 case TCP_TIMELY_DYN_ADJ: 19822 RACK_OPTS_INC(tcp_timely_dyn); 19823 if (optval == 0) 19824 rack->rc_gp_dyn_mul = 0; 19825 else { 19826 rack->rc_gp_dyn_mul = 1; 19827 if (optval >= 100) { 19828 /* 19829 * If the user sets something 100 or more 19830 * its the gp_ca value. 19831 */ 19832 rack->r_ctl.rack_per_of_gp_ca = optval; 19833 } 19834 } 19835 break; 19836 case TCP_RACK_DO_DETECTION: 19837 RACK_OPTS_INC(tcp_rack_do_detection); 19838 if (optval == 0) 19839 rack->do_detection = 0; 19840 else 19841 rack->do_detection = 1; 19842 break; 19843 case TCP_RACK_TLP_USE: 19844 if ((optval < TLP_USE_ID) || (optval > TLP_USE_TWO_TWO)) { 19845 error = EINVAL; 19846 break; 19847 } 19848 RACK_OPTS_INC(tcp_tlp_use); 19849 rack->rack_tlp_threshold_use = optval; 19850 break; 19851 case TCP_RACK_TLP_REDUCE: 19852 /* RACK TLP cwnd reduction (bool) */ 19853 RACK_OPTS_INC(tcp_rack_tlp_reduce); 19854 rack->r_ctl.rc_tlp_cwnd_reduce = optval; 19855 break; 19856 /* Pacing related ones */ 19857 case TCP_RACK_PACE_ALWAYS: 19858 /* 19859 * zero is old rack method, 1 is new 19860 * method using a pacing rate. 19861 */ 19862 RACK_OPTS_INC(tcp_rack_pace_always); 19863 if (optval > 0) { 19864 if (rack->rc_always_pace) { 19865 error = EALREADY; 19866 break; 19867 } else if (tcp_can_enable_pacing()) { 19868 rack->rc_always_pace = 1; 19869 if (rack->use_fixed_rate || rack->gp_ready) 19870 rack_set_cc_pacing(rack); 19871 } 19872 else { 19873 error = ENOSPC; 19874 break; 19875 } 19876 } else { 19877 if (rack->rc_always_pace) { 19878 tcp_decrement_paced_conn(); 19879 rack->rc_always_pace = 0; 19880 rack_undo_cc_pacing(rack); 19881 } 19882 } 19883 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) 19884 tp->t_inpcb->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19885 else 19886 tp->t_inpcb->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; 19887 /* A rate may be set irate or other, if so set seg size */ 19888 rack_update_seg(rack); 19889 break; 19890 case TCP_BBR_RACK_INIT_RATE: 19891 RACK_OPTS_INC(tcp_initial_rate); 19892 val = optval; 19893 /* Change from kbits per second to bytes per second */ 19894 val *= 1000; 19895 val /= 8; 19896 rack->r_ctl.init_rate = val; 19897 if (rack->rc_init_win != rack_default_init_window) { 19898 uint32_t win, snt; 19899 19900 /* 19901 * Options don't always get applied 19902 * in the order you think. So in order 19903 * to assure we update a cwnd we need 19904 * to check and see if we are still 19905 * where we should raise the cwnd. 19906 */ 19907 win = rc_init_window(rack); 19908 if (SEQ_GT(tp->snd_max, tp->iss)) 19909 snt = tp->snd_max - tp->iss; 19910 else 19911 snt = 0; 19912 if ((snt < win) && 19913 (tp->snd_cwnd < win)) 19914 tp->snd_cwnd = win; 19915 } 19916 if (rack->rc_always_pace) 19917 rack_update_seg(rack); 19918 break; 19919 case TCP_BBR_IWINTSO: 19920 RACK_OPTS_INC(tcp_initial_win); 19921 if (optval && (optval <= 0xff)) { 19922 uint32_t win, snt; 19923 19924 rack->rc_init_win = optval; 19925 win = rc_init_window(rack); 19926 if (SEQ_GT(tp->snd_max, tp->iss)) 19927 snt = tp->snd_max - tp->iss; 19928 else 19929 snt = 0; 19930 if ((snt < win) && 19931 (tp->t_srtt | 19932 #ifdef NETFLIX_PEAKRATE 19933 tp->t_maxpeakrate | 19934 #endif 19935 rack->r_ctl.init_rate)) { 19936 /* 19937 * We are not past the initial window 19938 * and we have some bases for pacing, 19939 * so we need to possibly adjust up 19940 * the cwnd. Note even if we don't set 19941 * the cwnd, its still ok to raise the rc_init_win 19942 * which can be used coming out of idle when we 19943 * would have a rate. 19944 */ 19945 if (tp->snd_cwnd < win) 19946 tp->snd_cwnd = win; 19947 } 19948 if (rack->rc_always_pace) 19949 rack_update_seg(rack); 19950 } else 19951 error = EINVAL; 19952 break; 19953 case TCP_RACK_FORCE_MSEG: 19954 RACK_OPTS_INC(tcp_rack_force_max_seg); 19955 if (optval) 19956 rack->rc_force_max_seg = 1; 19957 else 19958 rack->rc_force_max_seg = 0; 19959 break; 19960 case TCP_RACK_PACE_MAX_SEG: 19961 /* Max segments size in a pace in bytes */ 19962 RACK_OPTS_INC(tcp_rack_max_seg); 19963 rack->rc_user_set_max_segs = optval; 19964 rack_set_pace_segments(tp, rack, __LINE__, NULL); 19965 break; 19966 case TCP_RACK_PACE_RATE_REC: 19967 /* Set the fixed pacing rate in Bytes per second ca */ 19968 RACK_OPTS_INC(tcp_rack_pace_rate_rec); 19969 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; 19970 if (rack->r_ctl.rc_fixed_pacing_rate_ca == 0) 19971 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; 19972 if (rack->r_ctl.rc_fixed_pacing_rate_ss == 0) 19973 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; 19974 rack->use_fixed_rate = 1; 19975 if (rack->rc_always_pace) 19976 rack_set_cc_pacing(rack); 19977 rack_log_pacing_delay_calc(rack, 19978 rack->r_ctl.rc_fixed_pacing_rate_ss, 19979 rack->r_ctl.rc_fixed_pacing_rate_ca, 19980 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, 19981 __LINE__, NULL,0); 19982 break; 19983 19984 case TCP_RACK_PACE_RATE_SS: 19985 /* Set the fixed pacing rate in Bytes per second ca */ 19986 RACK_OPTS_INC(tcp_rack_pace_rate_ss); 19987 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; 19988 if (rack->r_ctl.rc_fixed_pacing_rate_ca == 0) 19989 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; 19990 if (rack->r_ctl.rc_fixed_pacing_rate_rec == 0) 19991 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; 19992 rack->use_fixed_rate = 1; 19993 if (rack->rc_always_pace) 19994 rack_set_cc_pacing(rack); 19995 rack_log_pacing_delay_calc(rack, 19996 rack->r_ctl.rc_fixed_pacing_rate_ss, 19997 rack->r_ctl.rc_fixed_pacing_rate_ca, 19998 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, 19999 __LINE__, NULL, 0); 20000 break; 20001 20002 case TCP_RACK_PACE_RATE_CA: 20003 /* Set the fixed pacing rate in Bytes per second ca */ 20004 RACK_OPTS_INC(tcp_rack_pace_rate_ca); 20005 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; 20006 if (rack->r_ctl.rc_fixed_pacing_rate_ss == 0) 20007 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; 20008 if (rack->r_ctl.rc_fixed_pacing_rate_rec == 0) 20009 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; 20010 rack->use_fixed_rate = 1; 20011 if (rack->rc_always_pace) 20012 rack_set_cc_pacing(rack); 20013 rack_log_pacing_delay_calc(rack, 20014 rack->r_ctl.rc_fixed_pacing_rate_ss, 20015 rack->r_ctl.rc_fixed_pacing_rate_ca, 20016 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, 20017 __LINE__, NULL, 0); 20018 break; 20019 case TCP_RACK_GP_INCREASE_REC: 20020 RACK_OPTS_INC(tcp_gp_inc_rec); 20021 rack->r_ctl.rack_per_of_gp_rec = optval; 20022 rack_log_pacing_delay_calc(rack, 20023 rack->r_ctl.rack_per_of_gp_ss, 20024 rack->r_ctl.rack_per_of_gp_ca, 20025 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, 20026 __LINE__, NULL, 0); 20027 break; 20028 case TCP_RACK_GP_INCREASE_CA: 20029 RACK_OPTS_INC(tcp_gp_inc_ca); 20030 ca = optval; 20031 if (ca < 100) { 20032 /* 20033 * We don't allow any reduction 20034 * over the GP b/w. 20035 */ 20036 error = EINVAL; 20037 break; 20038 } 20039 rack->r_ctl.rack_per_of_gp_ca = ca; 20040 rack_log_pacing_delay_calc(rack, 20041 rack->r_ctl.rack_per_of_gp_ss, 20042 rack->r_ctl.rack_per_of_gp_ca, 20043 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, 20044 __LINE__, NULL, 0); 20045 break; 20046 case TCP_RACK_GP_INCREASE_SS: 20047 RACK_OPTS_INC(tcp_gp_inc_ss); 20048 ss = optval; 20049 if (ss < 100) { 20050 /* 20051 * We don't allow any reduction 20052 * over the GP b/w. 20053 */ 20054 error = EINVAL; 20055 break; 20056 } 20057 rack->r_ctl.rack_per_of_gp_ss = ss; 20058 rack_log_pacing_delay_calc(rack, 20059 rack->r_ctl.rack_per_of_gp_ss, 20060 rack->r_ctl.rack_per_of_gp_ca, 20061 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, 20062 __LINE__, NULL, 0); 20063 break; 20064 case TCP_RACK_RR_CONF: 20065 RACK_OPTS_INC(tcp_rack_rrr_no_conf_rate); 20066 if (optval && optval <= 3) 20067 rack->r_rr_config = optval; 20068 else 20069 rack->r_rr_config = 0; 20070 break; 20071 case TCP_HDWR_RATE_CAP: 20072 RACK_OPTS_INC(tcp_hdwr_rate_cap); 20073 if (optval) { 20074 if (rack->r_rack_hw_rate_caps == 0) 20075 rack->r_rack_hw_rate_caps = 1; 20076 else 20077 error = EALREADY; 20078 } else { 20079 rack->r_rack_hw_rate_caps = 0; 20080 } 20081 break; 20082 case TCP_BBR_HDWR_PACE: 20083 RACK_OPTS_INC(tcp_hdwr_pacing); 20084 if (optval){ 20085 if (rack->rack_hdrw_pacing == 0) { 20086 rack->rack_hdw_pace_ena = 1; 20087 rack->rack_attempt_hdwr_pace = 0; 20088 } else 20089 error = EALREADY; 20090 } else { 20091 rack->rack_hdw_pace_ena = 0; 20092 #ifdef RATELIMIT 20093 if (rack->r_ctl.crte != NULL) { 20094 rack->rack_hdrw_pacing = 0; 20095 rack->rack_attempt_hdwr_pace = 0; 20096 tcp_rel_pacing_rate(rack->r_ctl.crte, tp); 20097 rack->r_ctl.crte = NULL; 20098 } 20099 #endif 20100 } 20101 break; 20102 /* End Pacing related ones */ 20103 case TCP_RACK_PRR_SENDALOT: 20104 /* Allow PRR to send more than one seg */ 20105 RACK_OPTS_INC(tcp_rack_prr_sendalot); 20106 rack->r_ctl.rc_prr_sendalot = optval; 20107 break; 20108 case TCP_RACK_MIN_TO: 20109 /* Minimum time between rack t-o's in ms */ 20110 RACK_OPTS_INC(tcp_rack_min_to); 20111 rack->r_ctl.rc_min_to = optval; 20112 break; 20113 case TCP_RACK_EARLY_SEG: 20114 /* If early recovery max segments */ 20115 RACK_OPTS_INC(tcp_rack_early_seg); 20116 rack->r_ctl.rc_early_recovery_segs = optval; 20117 break; 20118 case TCP_RACK_ENABLE_HYSTART: 20119 { 20120 struct sockopt sopt; 20121 struct cc_newreno_opts opt; 20122 20123 sopt.sopt_valsize = sizeof(struct cc_newreno_opts); 20124 sopt.sopt_dir = SOPT_SET; 20125 opt.name = CC_NEWRENO_ENABLE_HYSTART; 20126 opt.val = optval; 20127 if (CC_ALGO(tp)->ctl_output != NULL) 20128 error = CC_ALGO(tp)->ctl_output(tp->ccv, &sopt, &opt); 20129 else 20130 error = EINVAL; 20131 } 20132 break; 20133 case TCP_RACK_REORD_THRESH: 20134 /* RACK reorder threshold (shift amount) */ 20135 RACK_OPTS_INC(tcp_rack_reord_thresh); 20136 if ((optval > 0) && (optval < 31)) 20137 rack->r_ctl.rc_reorder_shift = optval; 20138 else 20139 error = EINVAL; 20140 break; 20141 case TCP_RACK_REORD_FADE: 20142 /* Does reordering fade after ms time */ 20143 RACK_OPTS_INC(tcp_rack_reord_fade); 20144 rack->r_ctl.rc_reorder_fade = optval; 20145 break; 20146 case TCP_RACK_TLP_THRESH: 20147 /* RACK TLP theshold i.e. srtt+(srtt/N) */ 20148 RACK_OPTS_INC(tcp_rack_tlp_thresh); 20149 if (optval) 20150 rack->r_ctl.rc_tlp_threshold = optval; 20151 else 20152 error = EINVAL; 20153 break; 20154 case TCP_BBR_USE_RACK_RR: 20155 RACK_OPTS_INC(tcp_rack_rr); 20156 if (optval) 20157 rack->use_rack_rr = 1; 20158 else 20159 rack->use_rack_rr = 0; 20160 break; 20161 case TCP_FAST_RSM_HACK: 20162 RACK_OPTS_INC(tcp_rack_fastrsm_hack); 20163 if (optval) 20164 rack->fast_rsm_hack = 1; 20165 else 20166 rack->fast_rsm_hack = 0; 20167 break; 20168 case TCP_RACK_PKT_DELAY: 20169 /* RACK added ms i.e. rack-rtt + reord + N */ 20170 RACK_OPTS_INC(tcp_rack_pkt_delay); 20171 rack->r_ctl.rc_pkt_delay = optval; 20172 break; 20173 case TCP_DELACK: 20174 RACK_OPTS_INC(tcp_rack_delayed_ack); 20175 if (optval == 0) 20176 tp->t_delayed_ack = 0; 20177 else 20178 tp->t_delayed_ack = 1; 20179 if (tp->t_flags & TF_DELACK) { 20180 tp->t_flags &= ~TF_DELACK; 20181 tp->t_flags |= TF_ACKNOW; 20182 NET_EPOCH_ENTER(et); 20183 rack_output(tp); 20184 NET_EPOCH_EXIT(et); 20185 } 20186 break; 20187 20188 case TCP_BBR_RACK_RTT_USE: 20189 RACK_OPTS_INC(tcp_rack_rtt_use); 20190 if ((optval != USE_RTT_HIGH) && 20191 (optval != USE_RTT_LOW) && 20192 (optval != USE_RTT_AVG)) 20193 error = EINVAL; 20194 else 20195 rack->r_ctl.rc_rate_sample_method = optval; 20196 break; 20197 case TCP_DATA_AFTER_CLOSE: 20198 RACK_OPTS_INC(tcp_data_after_close); 20199 if (optval) 20200 rack->rc_allow_data_af_clo = 1; 20201 else 20202 rack->rc_allow_data_af_clo = 0; 20203 break; 20204 default: 20205 break; 20206 } 20207 #ifdef NETFLIX_STATS 20208 tcp_log_socket_option(tp, sopt_name, optval, error); 20209 #endif 20210 return (error); 20211 } 20212 20213 20214 static void 20215 rack_apply_deferred_options(struct tcp_rack *rack) 20216 { 20217 struct deferred_opt_list *dol, *sdol; 20218 uint32_t s_optval; 20219 20220 TAILQ_FOREACH_SAFE(dol, &rack->r_ctl.opt_list, next, sdol) { 20221 TAILQ_REMOVE(&rack->r_ctl.opt_list, dol, next); 20222 /* Disadvantage of deferal is you loose the error return */ 20223 s_optval = (uint32_t)dol->optval; 20224 (void)rack_process_option(rack->rc_tp, rack, dol->optname, s_optval, dol->optval); 20225 free(dol, M_TCPDO); 20226 } 20227 } 20228 20229 static void 20230 rack_hw_tls_change(struct tcpcb *tp, int chg) 20231 { 20232 /* 20233 * HW tls state has changed.. fix all 20234 * rsm's in flight. 20235 */ 20236 struct tcp_rack *rack; 20237 struct rack_sendmap *rsm; 20238 20239 rack = (struct tcp_rack *)tp->t_fb_ptr; 20240 RB_FOREACH(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) { 20241 if (chg) 20242 rsm->r_hw_tls = 1; 20243 else 20244 rsm->r_hw_tls = 0; 20245 } 20246 if (chg) 20247 rack->r_ctl.fsb.hw_tls = 1; 20248 else 20249 rack->r_ctl.fsb.hw_tls = 0; 20250 } 20251 20252 static int 20253 rack_pru_options(struct tcpcb *tp, int flags) 20254 { 20255 if (flags & PRUS_OOB) 20256 return (EOPNOTSUPP); 20257 return (0); 20258 } 20259 20260 static struct tcp_function_block __tcp_rack = { 20261 .tfb_tcp_block_name = __XSTRING(STACKNAME), 20262 .tfb_tcp_output = rack_output, 20263 .tfb_do_queued_segments = ctf_do_queued_segments, 20264 .tfb_do_segment_nounlock = rack_do_segment_nounlock, 20265 .tfb_tcp_do_segment = rack_do_segment, 20266 .tfb_tcp_ctloutput = rack_ctloutput, 20267 .tfb_tcp_fb_init = rack_init, 20268 .tfb_tcp_fb_fini = rack_fini, 20269 .tfb_tcp_timer_stop_all = rack_stopall, 20270 .tfb_tcp_timer_activate = rack_timer_activate, 20271 .tfb_tcp_timer_active = rack_timer_active, 20272 .tfb_tcp_timer_stop = rack_timer_stop, 20273 .tfb_tcp_rexmit_tmr = rack_remxt_tmr, 20274 .tfb_tcp_handoff_ok = rack_handoff_ok, 20275 .tfb_tcp_mtu_chg = rack_mtu_change, 20276 .tfb_pru_options = rack_pru_options, 20277 .tfb_hwtls_change = rack_hw_tls_change, 20278 }; 20279 20280 /* 20281 * rack_ctloutput() must drop the inpcb lock before performing copyin on 20282 * socket option arguments. When it re-acquires the lock after the copy, it 20283 * has to revalidate that the connection is still valid for the socket 20284 * option. 20285 */ 20286 static int 20287 rack_set_sockopt(struct socket *so, struct sockopt *sopt, 20288 struct inpcb *inp, struct tcpcb *tp, struct tcp_rack *rack) 20289 { 20290 #ifdef INET6 20291 struct ip6_hdr *ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 20292 #endif 20293 #ifdef INET 20294 struct ip *ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 20295 #endif 20296 uint64_t loptval; 20297 int32_t error = 0, optval; 20298 20299 switch (sopt->sopt_level) { 20300 #ifdef INET6 20301 case IPPROTO_IPV6: 20302 MPASS(inp->inp_vflag & INP_IPV6PROTO); 20303 switch (sopt->sopt_name) { 20304 case IPV6_USE_MIN_MTU: 20305 tcp6_use_min_mtu(tp); 20306 break; 20307 case IPV6_TCLASS: 20308 /* 20309 * The DSCP codepoint has changed, update the fsb. 20310 */ 20311 ip6->ip6_flow = (ip6->ip6_flow & ~IPV6_FLOWINFO_MASK) | 20312 (rack->rc_inp->inp_flow & IPV6_FLOWINFO_MASK); 20313 break; 20314 } 20315 INP_WUNLOCK(inp); 20316 return (0); 20317 #endif 20318 #ifdef INET 20319 case IPPROTO_IP: 20320 switch (sopt->sopt_name) { 20321 case IP_TOS: 20322 /* 20323 * The DSCP codepoint has changed, update the fsb. 20324 */ 20325 ip->ip_tos = rack->rc_inp->inp_ip_tos; 20326 break; 20327 case IP_TTL: 20328 /* 20329 * The TTL has changed, update the fsb. 20330 */ 20331 ip->ip_ttl = rack->rc_inp->inp_ip_ttl; 20332 break; 20333 } 20334 INP_WUNLOCK(inp); 20335 return (0); 20336 #endif 20337 } 20338 20339 switch (sopt->sopt_name) { 20340 case TCP_RACK_TLP_REDUCE: /* URL:tlp_reduce */ 20341 /* Pacing related ones */ 20342 case TCP_RACK_PACE_ALWAYS: /* URL:pace_always */ 20343 case TCP_BBR_RACK_INIT_RATE: /* URL:irate */ 20344 case TCP_BBR_IWINTSO: /* URL:tso_iwin */ 20345 case TCP_RACK_PACE_MAX_SEG: /* URL:pace_max_seg */ 20346 case TCP_RACK_FORCE_MSEG: /* URL:force_max_seg */ 20347 case TCP_RACK_PACE_RATE_CA: /* URL:pr_ca */ 20348 case TCP_RACK_PACE_RATE_SS: /* URL:pr_ss*/ 20349 case TCP_RACK_PACE_RATE_REC: /* URL:pr_rec */ 20350 case TCP_RACK_GP_INCREASE_CA: /* URL:gp_inc_ca */ 20351 case TCP_RACK_GP_INCREASE_SS: /* URL:gp_inc_ss */ 20352 case TCP_RACK_GP_INCREASE_REC: /* URL:gp_inc_rec */ 20353 case TCP_RACK_RR_CONF: /* URL:rrr_conf */ 20354 case TCP_BBR_HDWR_PACE: /* URL:hdwrpace */ 20355 case TCP_HDWR_RATE_CAP: /* URL:hdwrcap boolean */ 20356 case TCP_PACING_RATE_CAP: /* URL:cap -- used by side-channel */ 20357 case TCP_HDWR_UP_ONLY: /* URL:uponly -- hardware pacing boolean */ 20358 /* End pacing related */ 20359 case TCP_FAST_RSM_HACK: /* URL:frsm_hack */ 20360 case TCP_DELACK: /* URL:delack (in base TCP i.e. tcp_hints along with cc etc ) */ 20361 case TCP_RACK_PRR_SENDALOT: /* URL:prr_sendalot */ 20362 case TCP_RACK_MIN_TO: /* URL:min_to */ 20363 case TCP_RACK_EARLY_SEG: /* URL:early_seg */ 20364 case TCP_RACK_REORD_THRESH: /* URL:reord_thresh */ 20365 case TCP_RACK_REORD_FADE: /* URL:reord_fade */ 20366 case TCP_RACK_TLP_THRESH: /* URL:tlp_thresh */ 20367 case TCP_RACK_PKT_DELAY: /* URL:pkt_delay */ 20368 case TCP_RACK_TLP_USE: /* URL:tlp_use */ 20369 case TCP_BBR_RACK_RTT_USE: /* URL:rttuse */ 20370 case TCP_BBR_USE_RACK_RR: /* URL:rackrr */ 20371 case TCP_RACK_DO_DETECTION: /* URL:detect */ 20372 case TCP_NO_PRR: /* URL:noprr */ 20373 case TCP_TIMELY_DYN_ADJ: /* URL:dynamic */ 20374 case TCP_DATA_AFTER_CLOSE: /* no URL */ 20375 case TCP_RACK_NONRXT_CFG_RATE: /* URL:nonrxtcr */ 20376 case TCP_SHARED_CWND_ENABLE: /* URL:scwnd */ 20377 case TCP_RACK_MBUF_QUEUE: /* URL:mqueue */ 20378 case TCP_RACK_NO_PUSH_AT_MAX: /* URL:npush */ 20379 case TCP_RACK_PACE_TO_FILL: /* URL:fillcw */ 20380 case TCP_SHARED_CWND_TIME_LIMIT: /* URL:lscwnd */ 20381 case TCP_RACK_PROFILE: /* URL:profile */ 20382 case TCP_USE_CMP_ACKS: /* URL:cmpack */ 20383 case TCP_RACK_ABC_VAL: /* URL:labc */ 20384 case TCP_REC_ABC_VAL: /* URL:reclabc */ 20385 case TCP_RACK_MEASURE_CNT: /* URL:measurecnt */ 20386 case TCP_DEFER_OPTIONS: /* URL:defer */ 20387 case TCP_RACK_DSACK_OPT: /* URL:dsack */ 20388 case TCP_RACK_PACING_BETA: /* URL:pacing_beta */ 20389 case TCP_RACK_PACING_BETA_ECN: /* URL:pacing_beta_ecn */ 20390 case TCP_RACK_TIMER_SLOP: /* URL:timer_slop */ 20391 case TCP_RACK_ENABLE_HYSTART: /* URL:hystart */ 20392 break; 20393 default: 20394 /* Filter off all unknown options to the base stack */ 20395 return (tcp_default_ctloutput(so, sopt, inp, tp)); 20396 break; 20397 } 20398 INP_WUNLOCK(inp); 20399 if (sopt->sopt_name == TCP_PACING_RATE_CAP) { 20400 error = sooptcopyin(sopt, &loptval, sizeof(loptval), sizeof(loptval)); 20401 /* 20402 * We truncate it down to 32 bits for the socket-option trace this 20403 * means rates > 34Gbps won't show right, but thats probably ok. 20404 */ 20405 optval = (uint32_t)loptval; 20406 } else { 20407 error = sooptcopyin(sopt, &optval, sizeof(optval), sizeof(optval)); 20408 /* Save it in 64 bit form too */ 20409 loptval = optval; 20410 } 20411 if (error) 20412 return (error); 20413 INP_WLOCK(inp); 20414 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) { 20415 INP_WUNLOCK(inp); 20416 return (ECONNRESET); 20417 } 20418 if (tp->t_fb != &__tcp_rack) { 20419 INP_WUNLOCK(inp); 20420 return (ENOPROTOOPT); 20421 } 20422 if (rack->defer_options && (rack->gp_ready == 0) && 20423 (sopt->sopt_name != TCP_DEFER_OPTIONS) && 20424 (sopt->sopt_name != TCP_RACK_PACING_BETA) && 20425 (sopt->sopt_name != TCP_RACK_PACING_BETA_ECN) && 20426 (sopt->sopt_name != TCP_RACK_MEASURE_CNT)) { 20427 /* Options are beind deferred */ 20428 if (rack_add_deferred_option(rack, sopt->sopt_name, loptval)) { 20429 INP_WUNLOCK(inp); 20430 return (0); 20431 } else { 20432 /* No memory to defer, fail */ 20433 INP_WUNLOCK(inp); 20434 return (ENOMEM); 20435 } 20436 } 20437 error = rack_process_option(tp, rack, sopt->sopt_name, optval, loptval); 20438 INP_WUNLOCK(inp); 20439 return (error); 20440 } 20441 20442 static void 20443 rack_fill_info(struct tcpcb *tp, struct tcp_info *ti) 20444 { 20445 20446 INP_WLOCK_ASSERT(tp->t_inpcb); 20447 bzero(ti, sizeof(*ti)); 20448 20449 ti->tcpi_state = tp->t_state; 20450 if ((tp->t_flags & TF_REQ_TSTMP) && (tp->t_flags & TF_RCVD_TSTMP)) 20451 ti->tcpi_options |= TCPI_OPT_TIMESTAMPS; 20452 if (tp->t_flags & TF_SACK_PERMIT) 20453 ti->tcpi_options |= TCPI_OPT_SACK; 20454 if ((tp->t_flags & TF_REQ_SCALE) && (tp->t_flags & TF_RCVD_SCALE)) { 20455 ti->tcpi_options |= TCPI_OPT_WSCALE; 20456 ti->tcpi_snd_wscale = tp->snd_scale; 20457 ti->tcpi_rcv_wscale = tp->rcv_scale; 20458 } 20459 if (tp->t_flags2 & TF2_ECN_PERMIT) 20460 ti->tcpi_options |= TCPI_OPT_ECN; 20461 if (tp->t_flags & TF_FASTOPEN) 20462 ti->tcpi_options |= TCPI_OPT_TFO; 20463 /* still kept in ticks is t_rcvtime */ 20464 ti->tcpi_last_data_recv = ((uint32_t)ticks - tp->t_rcvtime) * tick; 20465 /* Since we hold everything in precise useconds this is easy */ 20466 ti->tcpi_rtt = tp->t_srtt; 20467 ti->tcpi_rttvar = tp->t_rttvar; 20468 ti->tcpi_rto = tp->t_rxtcur; 20469 ti->tcpi_snd_ssthresh = tp->snd_ssthresh; 20470 ti->tcpi_snd_cwnd = tp->snd_cwnd; 20471 /* 20472 * FreeBSD-specific extension fields for tcp_info. 20473 */ 20474 ti->tcpi_rcv_space = tp->rcv_wnd; 20475 ti->tcpi_rcv_nxt = tp->rcv_nxt; 20476 ti->tcpi_snd_wnd = tp->snd_wnd; 20477 ti->tcpi_snd_bwnd = 0; /* Unused, kept for compat. */ 20478 ti->tcpi_snd_nxt = tp->snd_nxt; 20479 ti->tcpi_snd_mss = tp->t_maxseg; 20480 ti->tcpi_rcv_mss = tp->t_maxseg; 20481 ti->tcpi_snd_rexmitpack = tp->t_sndrexmitpack; 20482 ti->tcpi_rcv_ooopack = tp->t_rcvoopack; 20483 ti->tcpi_snd_zerowin = tp->t_sndzerowin; 20484 #ifdef NETFLIX_STATS 20485 ti->tcpi_total_tlp = tp->t_sndtlppack; 20486 ti->tcpi_total_tlp_bytes = tp->t_sndtlpbyte; 20487 memcpy(&ti->tcpi_rxsyninfo, &tp->t_rxsyninfo, sizeof(struct tcpsyninfo)); 20488 #endif 20489 #ifdef TCP_OFFLOAD 20490 if (tp->t_flags & TF_TOE) { 20491 ti->tcpi_options |= TCPI_OPT_TOE; 20492 tcp_offload_tcp_info(tp, ti); 20493 } 20494 #endif 20495 } 20496 20497 static int 20498 rack_get_sockopt(struct socket *so, struct sockopt *sopt, 20499 struct inpcb *inp, struct tcpcb *tp, struct tcp_rack *rack) 20500 { 20501 int32_t error, optval; 20502 uint64_t val, loptval; 20503 struct tcp_info ti; 20504 /* 20505 * Because all our options are either boolean or an int, we can just 20506 * pull everything into optval and then unlock and copy. If we ever 20507 * add a option that is not a int, then this will have quite an 20508 * impact to this routine. 20509 */ 20510 error = 0; 20511 switch (sopt->sopt_name) { 20512 case TCP_INFO: 20513 /* First get the info filled */ 20514 rack_fill_info(tp, &ti); 20515 /* Fix up the rtt related fields if needed */ 20516 INP_WUNLOCK(inp); 20517 error = sooptcopyout(sopt, &ti, sizeof ti); 20518 return (error); 20519 /* 20520 * Beta is the congestion control value for NewReno that influences how 20521 * much of a backoff happens when loss is detected. It is normally set 20522 * to 50 for 50% i.e. the cwnd is reduced to 50% of its previous value 20523 * when you exit recovery. 20524 */ 20525 case TCP_RACK_PACING_BETA: 20526 if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0) 20527 error = EINVAL; 20528 else if (rack->rc_pacing_cc_set == 0) 20529 optval = rack->r_ctl.rc_saved_beta.beta; 20530 else { 20531 /* 20532 * Reach out into the CC data and report back what 20533 * I have previously set. Yeah it looks hackish but 20534 * we don't want to report the saved values. 20535 */ 20536 if (tp->ccv->cc_data) 20537 optval = ((struct newreno *)tp->ccv->cc_data)->beta; 20538 else 20539 error = EINVAL; 20540 } 20541 break; 20542 /* 20543 * Beta_ecn is the congestion control value for NewReno that influences how 20544 * much of a backoff happens when a ECN mark is detected. It is normally set 20545 * to 80 for 80% i.e. the cwnd is reduced by 20% of its previous value when 20546 * you exit recovery. Note that classic ECN has a beta of 50, it is only 20547 * ABE Ecn that uses this "less" value, but we do too with pacing :) 20548 */ 20549 20550 case TCP_RACK_PACING_BETA_ECN: 20551 if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0) 20552 error = EINVAL; 20553 else if (rack->rc_pacing_cc_set == 0) 20554 optval = rack->r_ctl.rc_saved_beta.beta_ecn; 20555 else { 20556 /* 20557 * Reach out into the CC data and report back what 20558 * I have previously set. Yeah it looks hackish but 20559 * we don't want to report the saved values. 20560 */ 20561 if (tp->ccv->cc_data) 20562 optval = ((struct newreno *)tp->ccv->cc_data)->beta_ecn; 20563 else 20564 error = EINVAL; 20565 } 20566 break; 20567 case TCP_RACK_DSACK_OPT: 20568 optval = 0; 20569 if (rack->rc_rack_tmr_std_based) { 20570 optval |= 1; 20571 } 20572 if (rack->rc_rack_use_dsack) { 20573 optval |= 2; 20574 } 20575 break; 20576 case TCP_RACK_ENABLE_HYSTART: 20577 { 20578 struct sockopt sopt; 20579 struct cc_newreno_opts opt; 20580 20581 sopt.sopt_valsize = sizeof(struct cc_newreno_opts); 20582 sopt.sopt_dir = SOPT_GET; 20583 opt.name = CC_NEWRENO_ENABLE_HYSTART; 20584 if (CC_ALGO(tp)->ctl_output != NULL) 20585 error = CC_ALGO(tp)->ctl_output(tp->ccv, &sopt, &opt); 20586 else 20587 error = EINVAL; 20588 optval = opt.val; 20589 } 20590 break; 20591 case TCP_FAST_RSM_HACK: 20592 optval = rack->fast_rsm_hack; 20593 break; 20594 case TCP_DEFER_OPTIONS: 20595 optval = rack->defer_options; 20596 break; 20597 case TCP_RACK_MEASURE_CNT: 20598 optval = rack->r_ctl.req_measurements; 20599 break; 20600 case TCP_REC_ABC_VAL: 20601 optval = rack->r_use_labc_for_rec; 20602 break; 20603 case TCP_RACK_ABC_VAL: 20604 optval = rack->rc_labc; 20605 break; 20606 case TCP_HDWR_UP_ONLY: 20607 optval= rack->r_up_only; 20608 break; 20609 case TCP_PACING_RATE_CAP: 20610 loptval = rack->r_ctl.bw_rate_cap; 20611 break; 20612 case TCP_RACK_PROFILE: 20613 /* You cannot retrieve a profile, its write only */ 20614 error = EINVAL; 20615 break; 20616 case TCP_USE_CMP_ACKS: 20617 optval = rack->r_use_cmp_ack; 20618 break; 20619 case TCP_RACK_PACE_TO_FILL: 20620 optval = rack->rc_pace_to_cwnd; 20621 if (optval && rack->r_fill_less_agg) 20622 optval++; 20623 break; 20624 case TCP_RACK_NO_PUSH_AT_MAX: 20625 optval = rack->r_ctl.rc_no_push_at_mrtt; 20626 break; 20627 case TCP_SHARED_CWND_ENABLE: 20628 optval = rack->rack_enable_scwnd; 20629 break; 20630 case TCP_RACK_NONRXT_CFG_RATE: 20631 optval = rack->rack_rec_nonrxt_use_cr; 20632 break; 20633 case TCP_NO_PRR: 20634 if (rack->rack_no_prr == 1) 20635 optval = 1; 20636 else if (rack->no_prr_addback == 1) 20637 optval = 2; 20638 else 20639 optval = 0; 20640 break; 20641 case TCP_RACK_DO_DETECTION: 20642 optval = rack->do_detection; 20643 break; 20644 case TCP_RACK_MBUF_QUEUE: 20645 /* Now do we use the LRO mbuf-queue feature */ 20646 optval = rack->r_mbuf_queue; 20647 break; 20648 case TCP_TIMELY_DYN_ADJ: 20649 optval = rack->rc_gp_dyn_mul; 20650 break; 20651 case TCP_BBR_IWINTSO: 20652 optval = rack->rc_init_win; 20653 break; 20654 case TCP_RACK_TLP_REDUCE: 20655 /* RACK TLP cwnd reduction (bool) */ 20656 optval = rack->r_ctl.rc_tlp_cwnd_reduce; 20657 break; 20658 case TCP_BBR_RACK_INIT_RATE: 20659 val = rack->r_ctl.init_rate; 20660 /* convert to kbits per sec */ 20661 val *= 8; 20662 val /= 1000; 20663 optval = (uint32_t)val; 20664 break; 20665 case TCP_RACK_FORCE_MSEG: 20666 optval = rack->rc_force_max_seg; 20667 break; 20668 case TCP_RACK_PACE_MAX_SEG: 20669 /* Max segments in a pace */ 20670 optval = rack->rc_user_set_max_segs; 20671 break; 20672 case TCP_RACK_PACE_ALWAYS: 20673 /* Use the always pace method */ 20674 optval = rack->rc_always_pace; 20675 break; 20676 case TCP_RACK_PRR_SENDALOT: 20677 /* Allow PRR to send more than one seg */ 20678 optval = rack->r_ctl.rc_prr_sendalot; 20679 break; 20680 case TCP_RACK_MIN_TO: 20681 /* Minimum time between rack t-o's in ms */ 20682 optval = rack->r_ctl.rc_min_to; 20683 break; 20684 case TCP_RACK_EARLY_SEG: 20685 /* If early recovery max segments */ 20686 optval = rack->r_ctl.rc_early_recovery_segs; 20687 break; 20688 case TCP_RACK_REORD_THRESH: 20689 /* RACK reorder threshold (shift amount) */ 20690 optval = rack->r_ctl.rc_reorder_shift; 20691 break; 20692 case TCP_RACK_REORD_FADE: 20693 /* Does reordering fade after ms time */ 20694 optval = rack->r_ctl.rc_reorder_fade; 20695 break; 20696 case TCP_BBR_USE_RACK_RR: 20697 /* Do we use the rack cheat for rxt */ 20698 optval = rack->use_rack_rr; 20699 break; 20700 case TCP_RACK_RR_CONF: 20701 optval = rack->r_rr_config; 20702 break; 20703 case TCP_HDWR_RATE_CAP: 20704 optval = rack->r_rack_hw_rate_caps; 20705 break; 20706 case TCP_BBR_HDWR_PACE: 20707 optval = rack->rack_hdw_pace_ena; 20708 break; 20709 case TCP_RACK_TLP_THRESH: 20710 /* RACK TLP theshold i.e. srtt+(srtt/N) */ 20711 optval = rack->r_ctl.rc_tlp_threshold; 20712 break; 20713 case TCP_RACK_PKT_DELAY: 20714 /* RACK added ms i.e. rack-rtt + reord + N */ 20715 optval = rack->r_ctl.rc_pkt_delay; 20716 break; 20717 case TCP_RACK_TLP_USE: 20718 optval = rack->rack_tlp_threshold_use; 20719 break; 20720 case TCP_RACK_PACE_RATE_CA: 20721 optval = rack->r_ctl.rc_fixed_pacing_rate_ca; 20722 break; 20723 case TCP_RACK_PACE_RATE_SS: 20724 optval = rack->r_ctl.rc_fixed_pacing_rate_ss; 20725 break; 20726 case TCP_RACK_PACE_RATE_REC: 20727 optval = rack->r_ctl.rc_fixed_pacing_rate_rec; 20728 break; 20729 case TCP_RACK_GP_INCREASE_SS: 20730 optval = rack->r_ctl.rack_per_of_gp_ca; 20731 break; 20732 case TCP_RACK_GP_INCREASE_CA: 20733 optval = rack->r_ctl.rack_per_of_gp_ss; 20734 break; 20735 case TCP_BBR_RACK_RTT_USE: 20736 optval = rack->r_ctl.rc_rate_sample_method; 20737 break; 20738 case TCP_DELACK: 20739 optval = tp->t_delayed_ack; 20740 break; 20741 case TCP_DATA_AFTER_CLOSE: 20742 optval = rack->rc_allow_data_af_clo; 20743 break; 20744 case TCP_SHARED_CWND_TIME_LIMIT: 20745 optval = rack->r_limit_scw; 20746 break; 20747 case TCP_RACK_TIMER_SLOP: 20748 optval = rack->r_ctl.timer_slop; 20749 break; 20750 default: 20751 return (tcp_default_ctloutput(so, sopt, inp, tp)); 20752 break; 20753 } 20754 INP_WUNLOCK(inp); 20755 if (error == 0) { 20756 if (TCP_PACING_RATE_CAP) 20757 error = sooptcopyout(sopt, &loptval, sizeof loptval); 20758 else 20759 error = sooptcopyout(sopt, &optval, sizeof optval); 20760 } 20761 return (error); 20762 } 20763 20764 static int 20765 rack_ctloutput(struct socket *so, struct sockopt *sopt, struct inpcb *inp, struct tcpcb *tp) 20766 { 20767 int32_t error = EINVAL; 20768 struct tcp_rack *rack; 20769 20770 rack = (struct tcp_rack *)tp->t_fb_ptr; 20771 if (rack == NULL) { 20772 /* Huh? */ 20773 goto out; 20774 } 20775 if (sopt->sopt_dir == SOPT_SET) { 20776 return (rack_set_sockopt(so, sopt, inp, tp, rack)); 20777 } else if (sopt->sopt_dir == SOPT_GET) { 20778 return (rack_get_sockopt(so, sopt, inp, tp, rack)); 20779 } 20780 out: 20781 INP_WUNLOCK(inp); 20782 return (error); 20783 } 20784 20785 static const char *rack_stack_names[] = { 20786 __XSTRING(STACKNAME), 20787 #ifdef STACKALIAS 20788 __XSTRING(STACKALIAS), 20789 #endif 20790 }; 20791 20792 static int 20793 rack_ctor(void *mem, int32_t size, void *arg, int32_t how) 20794 { 20795 memset(mem, 0, size); 20796 return (0); 20797 } 20798 20799 static void 20800 rack_dtor(void *mem, int32_t size, void *arg) 20801 { 20802 20803 } 20804 20805 static bool rack_mod_inited = false; 20806 20807 static int 20808 tcp_addrack(module_t mod, int32_t type, void *data) 20809 { 20810 int32_t err = 0; 20811 int num_stacks; 20812 20813 switch (type) { 20814 case MOD_LOAD: 20815 rack_zone = uma_zcreate(__XSTRING(MODNAME) "_map", 20816 sizeof(struct rack_sendmap), 20817 rack_ctor, rack_dtor, NULL, NULL, UMA_ALIGN_PTR, 0); 20818 20819 rack_pcb_zone = uma_zcreate(__XSTRING(MODNAME) "_pcb", 20820 sizeof(struct tcp_rack), 20821 rack_ctor, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0); 20822 20823 sysctl_ctx_init(&rack_sysctl_ctx); 20824 rack_sysctl_root = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 20825 SYSCTL_STATIC_CHILDREN(_net_inet_tcp), 20826 OID_AUTO, 20827 #ifdef STACKALIAS 20828 __XSTRING(STACKALIAS), 20829 #else 20830 __XSTRING(STACKNAME), 20831 #endif 20832 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 20833 ""); 20834 if (rack_sysctl_root == NULL) { 20835 printf("Failed to add sysctl node\n"); 20836 err = EFAULT; 20837 goto free_uma; 20838 } 20839 rack_init_sysctls(); 20840 num_stacks = nitems(rack_stack_names); 20841 err = register_tcp_functions_as_names(&__tcp_rack, M_WAITOK, 20842 rack_stack_names, &num_stacks); 20843 if (err) { 20844 printf("Failed to register %s stack name for " 20845 "%s module\n", rack_stack_names[num_stacks], 20846 __XSTRING(MODNAME)); 20847 sysctl_ctx_free(&rack_sysctl_ctx); 20848 free_uma: 20849 uma_zdestroy(rack_zone); 20850 uma_zdestroy(rack_pcb_zone); 20851 rack_counter_destroy(); 20852 printf("Failed to register rack module -- err:%d\n", err); 20853 return (err); 20854 } 20855 tcp_lro_reg_mbufq(); 20856 rack_mod_inited = true; 20857 break; 20858 case MOD_QUIESCE: 20859 err = deregister_tcp_functions(&__tcp_rack, true, false); 20860 break; 20861 case MOD_UNLOAD: 20862 err = deregister_tcp_functions(&__tcp_rack, false, true); 20863 if (err == EBUSY) 20864 break; 20865 if (rack_mod_inited) { 20866 uma_zdestroy(rack_zone); 20867 uma_zdestroy(rack_pcb_zone); 20868 sysctl_ctx_free(&rack_sysctl_ctx); 20869 rack_counter_destroy(); 20870 rack_mod_inited = false; 20871 } 20872 tcp_lro_dereg_mbufq(); 20873 err = 0; 20874 break; 20875 default: 20876 return (EOPNOTSUPP); 20877 } 20878 return (err); 20879 } 20880 20881 static moduledata_t tcp_rack = { 20882 .name = __XSTRING(MODNAME), 20883 .evhand = tcp_addrack, 20884 .priv = 0 20885 }; 20886 20887 MODULE_VERSION(MODNAME, 1); 20888 DECLARE_MODULE(MODNAME, tcp_rack, SI_SUB_PROTO_DOMAIN, SI_ORDER_ANY); 20889 MODULE_DEPEND(MODNAME, tcphpts, 1, 1, 1); 20890