lsquic_send_ctl.h revision 71eb4000
1/* Copyright (c) 2017 - 2020 LiteSpeed Technologies Inc. See LICENSE. */ 2#ifndef LSQUIC_SEND_CTL_H 3#define LSQUIC_SEND_CTL_H 1 4 5#include <sys/queue.h> 6 7#include "lsquic_types.h" 8 9#ifndef LSQUIC_SEND_STATS 10# define LSQUIC_SEND_STATS 1 11#endif 12 13TAILQ_HEAD(lsquic_packets_tailq, lsquic_packet_out); 14 15struct lsquic_packet_out; 16struct ack_info; 17struct lsquic_alarmset; 18struct lsquic_engine_public; 19struct lsquic_conn_public; 20struct network_path; 21struct ver_neg; 22enum pns; 23struct to_coal; 24 25enum buf_packet_type { BPT_HIGHEST_PRIO, BPT_OTHER_PRIO, }; 26 27struct buf_packet_q 28{ 29 struct lsquic_packets_tailq bpq_packets; 30 unsigned bpq_count; 31}; 32 33enum send_ctl_flags { 34 SC_TCID0 = (1 << 0), 35 SC_NSTP = (1 << 2), 36 SC_PACE = (1 << 3), 37 SC_SCHED_TICK = (1 << 4), 38 SC_BUFFER_STREAM= (1 << 5), 39 SC_WAS_QUIET = (1 << 6), 40 SC_IETF = (1 << 7), 41#define SCBIT_LOST_ACK_SHIFT 8 42 SC_LOST_ACK_INIT= 1 << 8, 43 SC_LOST_ACK_HSK = SC_LOST_ACK_INIT << PNS_HSK, 44 SC_LOST_ACK_APP = SC_LOST_ACK_INIT << PNS_APP, 45 SC_1RTT_ACKED = 1 << 11, 46 SC_APP_LIMITED = 1 << 12, 47 SC_ECN = 1 << 13, 48 SC_QL_BITS = 1 << 14, 49 SC_SANITY_CHECK = 1 << 15, 50 SC_CIDLEN = 1 << 16, /* sc_cidlen is set */ 51 SC_POISON = 1 << 17, /* poisoned packet exists */ 52 SC_CLEANUP_BBR = 1 << 18, 53 SC_ACK_RECV_INIT= 1 << 19, 54 SC_ACK_RECV_HSK = SC_ACK_RECV_INIT << PNS_HSK, 55 SC_ACK_RECV_APP = SC_ACK_RECV_INIT << PNS_APP, 56 SC_ROUGH_RTT = 1 << 22, 57#if LSQUIC_DEVEL 58 SC_DYN_PTHRESH = 1 << 31u, /* dynamic packet threshold enabled */ 59#endif 60}; 61 62typedef struct lsquic_send_ctl { 63 /* The first section consists of struct members which are used in the 64 * time-critical lsquic_send_ctl_got_ack() in the approximate order 65 * of usage. 66 */ 67 lsquic_senhist_t sc_senhist; 68 enum send_ctl_flags sc_flags; 69 enum ecn sc_ecn; 70 unsigned sc_n_stop_waiting; 71 struct lsquic_packets_tailq sc_unacked_packets[N_PNS]; 72 lsquic_packno_t sc_largest_acked_packno; 73 lsquic_time_t sc_largest_acked_sent_time; 74 lsquic_time_t sc_last_sent_time; 75 lsquic_time_t sc_last_rto_time; 76 int (*sc_can_send)(struct lsquic_send_ctl *); 77 unsigned sc_bytes_unacked_retx; 78 unsigned sc_bytes_scheduled; 79 struct adaptive_cc sc_adaptive_cc; 80 const struct cong_ctl_if *sc_ci; 81 void *sc_cong_ctl; 82 struct lsquic_engine_public *sc_enpub; 83 unsigned sc_bytes_unacked_all; 84 unsigned sc_n_in_flight_all; 85 unsigned sc_n_in_flight_retx; 86 unsigned sc_n_consec_rtos; 87 unsigned sc_n_hsk; 88 unsigned sc_n_tlp; 89 enum quic_ft_bit sc_retx_frames; 90 struct lsquic_alarmset *sc_alset; 91 92 /* Second section: everything else. */ 93 struct lsquic_packets_tailq sc_scheduled_packets, 94 sc_0rtt_stash, 95 sc_lost_packets; 96 struct buf_packet_q sc_buffered_packets[BPT_OTHER_PRIO + 1]; 97 const struct ver_neg *sc_ver_neg; 98 struct lsquic_conn_public *sc_conn_pub; 99 struct pacer sc_pacer; 100 lsquic_packno_t sc_cur_packno; 101 lsquic_packno_t sc_largest_sent_at_cutback; 102 lsquic_packno_t sc_max_rtt_packno; 103 /* sc_largest_ack2ed is the packet number sent by peer that we acked and 104 * we know that our ACK was received by peer. This is used to determine 105 * the receive history cutoff point for the purposes of generating ACK 106 * frames in the absense of STOP_WAITING frames. Used when NSTP option 107 * is set. (The "ack2ed" is odd enough to not be confused with anything 108 * else and it is not insanely long.) 109 */ 110 lsquic_packno_t sc_largest_ack2ed[N_PNS]; 111 /* sc_largest_acked is the largest packet number in PNS_APP packet number 112 * space sent by peer for which we generated (not necessarily sent) an ACK. 113 * This information is used to drop stale ACK frames from packets in 114 * buffered queues. 115 */ 116 /* XXX We have both sc_largest_acked_packno and sc_largest_acked. Rename 117 * the latter to make the code more readable. 118 */ 119 lsquic_packno_t sc_largest_acked; 120 lsquic_time_t sc_loss_to; 121 uint64_t sc_ecn_total_acked[N_PNS]; 122 uint64_t sc_ecn_ce_cnt[N_PNS]; 123 struct 124 { 125 lsquic_stream_id_t stream_id; 126 enum buf_packet_type packet_type; 127 } sc_cached_bpt; 128 unsigned sc_next_limit; 129 unsigned sc_n_scheduled; 130 enum packno_bits sc_max_packno_bits; 131#if LSQUIC_SEND_STATS 132 struct { 133 unsigned n_total_sent, 134 n_resent, 135 n_delayed; 136 } sc_stats; 137#endif 138 unsigned char *sc_token; 139 size_t sc_token_sz; 140 unsigned sc_retry_count; 141 unsigned sc_rt_count; /* Count round trips */ 142 lsquic_packno_t sc_cur_rt_end; 143 lsquic_packno_t sc_gap; 144 unsigned sc_loss_count; /* Used to set loss bit */ 145 unsigned sc_square_count;/* Used to set square bit */ 146 unsigned sc_reord_thresh; 147 signed char sc_cidlen; /* For debug purposes */ 148} lsquic_send_ctl_t; 149 150void 151lsquic_send_ctl_init (lsquic_send_ctl_t *, struct lsquic_alarmset *, 152 struct lsquic_engine_public *, const struct ver_neg *, 153 struct lsquic_conn_public *, enum send_ctl_flags); 154 155int 156lsquic_send_ctl_sent_packet (lsquic_send_ctl_t *, struct lsquic_packet_out *); 157 158int 159lsquic_send_ctl_got_ack (lsquic_send_ctl_t *, const struct ack_info *, 160 lsquic_time_t, lsquic_time_t); 161 162lsquic_packno_t 163lsquic_send_ctl_smallest_unacked (lsquic_send_ctl_t *ctl); 164 165int 166lsquic_send_ctl_have_unacked_stream_frames (const lsquic_send_ctl_t *); 167 168int 169lsquic_send_ctl_have_unacked_retx_data (const struct lsquic_send_ctl *); 170 171void 172lsquic_send_ctl_cleanup (lsquic_send_ctl_t *); 173 174int 175lsquic_send_ctl_can_send (lsquic_send_ctl_t *ctl); 176 177void 178lsquic_send_ctl_scheduled_one (lsquic_send_ctl_t *, struct lsquic_packet_out *); 179 180void 181lsquic_send_ctl_delayed_one (lsquic_send_ctl_t *, struct lsquic_packet_out *); 182 183struct lsquic_packet_out * 184lsquic_send_ctl_next_packet_to_send (struct lsquic_send_ctl *, 185 const struct to_coal *); 186 187int 188lsquic_send_ctl_next_packet_to_send_predict (struct lsquic_send_ctl *); 189 190void 191lsquic_send_ctl_expire_all (lsquic_send_ctl_t *ctl); 192 193#define lsquic_send_ctl_n_in_flight(ctl) (+(ctl)->sc_n_in_flight) 194 195#define lsquic_send_ctl_n_scheduled(ctl) (+(ctl)->sc_n_scheduled) 196 197#define lsquic_send_ctl_largest_ack2ed(ctl, pns) \ 198 (+(ctl)->sc_largest_ack2ed[pns]) 199 200void 201lsquic_send_ctl_do_sanity_check (const struct lsquic_send_ctl *ctl); 202 203#ifndef NDEBUG 204#define lsquic_send_ctl_sanity_check(ctl) do { \ 205 if ((ctl)->sc_flags & SC_SANITY_CHECK) \ 206 lsquic_send_ctl_do_sanity_check(ctl); \ 207} while (0) 208#else 209#define lsquic_send_ctl_sanity_check(ctl) 210#endif 211 212int 213lsquic_send_ctl_have_outgoing_stream_frames (const lsquic_send_ctl_t *); 214 215int 216lsquic_send_ctl_have_outgoing_retx_frames (const lsquic_send_ctl_t *); 217 218struct lsquic_packet_out * 219lsquic_send_ctl_last_scheduled (struct lsquic_send_ctl *, enum packnum_space, 220 const struct network_path *, int); 221 222struct lsquic_packet_out * 223lsquic_send_ctl_new_packet_out (lsquic_send_ctl_t *, unsigned, 224 enum packnum_space, const struct network_path *); 225 226struct lsquic_packet_out * 227lsquic_send_ctl_get_writeable_packet (lsquic_send_ctl_t *, enum packnum_space, 228 unsigned need_at_least, const struct network_path *, int, int *is_err); 229 230struct lsquic_packet_out * 231lsquic_send_ctl_get_packet_for_stream (lsquic_send_ctl_t *, 232 unsigned need_at_least, const struct network_path *, 233 const struct lsquic_stream *); 234 235struct lsquic_packet_out * 236lsquic_send_ctl_get_packet_for_crypto (struct lsquic_send_ctl *ctl, 237 unsigned need_at_least, enum packnum_space, const struct network_path *); 238 239unsigned 240lsquic_send_ctl_reschedule_packets (lsquic_send_ctl_t *); 241 242#define lsquic_send_ctl_lost_ack(ctl) \ 243 (((ctl)->sc_flags & (SC_LOST_ACK_INIT|SC_LOST_ACK_HSK|SC_LOST_ACK_APP)) \ 244 >> SCBIT_LOST_ACK_SHIFT) 245 246#define lsquic_send_ctl_scheduled_ack(ctl, pns, acked) do { \ 247 (ctl)->sc_flags &= ~(SC_LOST_ACK_INIT << pns); \ 248 if (PNS_APP == pns) \ 249 (ctl)->sc_largest_acked = acked; \ 250} while (0) 251 252void 253lsquic_send_ctl_set_tcid0 (lsquic_send_ctl_t *, int); 254 255#define lsquic_send_ctl_turn_nstp_on(ctl) ((ctl)->sc_flags |= SC_NSTP) 256 257void 258lsquic_send_ctl_elide_stream_frames (lsquic_send_ctl_t *, lsquic_stream_id_t); 259 260int 261lsquic_send_ctl_squeeze_sched (lsquic_send_ctl_t *); 262 263#define lsquic_send_ctl_maybe_squeeze_sched(ctl) ( \ 264 (ctl)->sc_n_scheduled && lsquic_send_ctl_squeeze_sched(ctl) \ 265) 266 267/* Same return value as for squeezing, but without actual squeezing. */ 268int 269lsquic_send_ctl_have_delayed_packets (const lsquic_send_ctl_t *ctl); 270 271void 272lsquic_send_ctl_reset_packnos (lsquic_send_ctl_t *); 273 274void 275lsquic_send_ctl_ack_to_front (struct lsquic_send_ctl *, unsigned n_acks); 276 277#define lsquic_send_ctl_n_stop_waiting(ctl) \ 278 (+(ctl)->sc_n_stop_waiting) 279 280#define lsquic_send_ctl_n_stop_waiting_reset(ctl) do { \ 281 (ctl)->sc_n_stop_waiting = 0; \ 282} while (0) 283 284void 285lsquic_send_ctl_drop_scheduled (lsquic_send_ctl_t *); 286 287#define lsquic_send_ctl_tick_in(ctl, now) do { \ 288 if ((ctl)->sc_flags & SC_PACE) \ 289 { \ 290 (ctl)->sc_flags |= SC_SCHED_TICK; \ 291 lsquic_pacer_tick_in(&(ctl)->sc_pacer, now); \ 292 } \ 293 (ctl)->sc_flags &= ~SC_APP_LIMITED; \ 294} while (0) 295 296#define lsquic_send_ctl_tick_out(ctl) do { \ 297 if ((ctl)->sc_flags & SC_PACE) \ 298 lsquic_pacer_tick_out(&(ctl)->sc_pacer); \ 299} while (0) 300 301#define lsquic_send_ctl_next_pacer_time(ctl) ( \ 302 ((ctl)->sc_flags & SC_PACE) \ 303 && lsquic_pacer_delayed(&(ctl)->sc_pacer) \ 304 ? lsquic_pacer_next_sched(&(ctl)->sc_pacer) \ 305 : 0 ) 306 307enum packno_bits 308lsquic_send_ctl_packno_bits (struct lsquic_send_ctl *, enum packnum_space); 309 310int 311lsquic_send_ctl_schedule_buffered (lsquic_send_ctl_t *, enum buf_packet_type); 312 313#define lsquic_send_ctl_has_buffered(ctl) ( \ 314 TAILQ_FIRST(&(ctl)->sc_buffered_packets[BPT_HIGHEST_PRIO].bpq_packets) \ 315 || TAILQ_FIRST(&(ctl)->sc_buffered_packets[BPT_OTHER_PRIO].bpq_packets )) 316 317#define lsquic_send_ctl_has_buffered_high(ctl) ( \ 318 !TAILQ_EMPTY(&(ctl)->sc_buffered_packets[BPT_HIGHEST_PRIO].bpq_packets)) 319 320#define lsquic_send_ctl_invalidate_bpt_cache(ctl) do { \ 321 (ctl)->sc_cached_bpt.stream_id = UINT64_MAX; \ 322} while (0) 323 324#ifndef NDEBUG 325enum packno_bits 326lsquic_send_ctl_guess_packno_bits (struct lsquic_send_ctl *); 327 328int 329lsquic_send_ctl_schedule_stream_packets_immediately (struct lsquic_send_ctl *); 330 331enum buf_packet_type 332lsquic_send_ctl_determine_bpt (struct lsquic_send_ctl *, 333 const struct lsquic_stream *); 334 335enum packno_bits 336lsquic_send_ctl_calc_packno_bits (struct lsquic_send_ctl *); 337 338void 339lsquic_send_ctl_set_max_bpq_count (unsigned); 340#endif 341 342size_t 343lsquic_send_ctl_mem_used (const struct lsquic_send_ctl *); 344 345#define lsquic_send_ctl_set_buffer_stream_packets(ctl, b) do { \ 346 (ctl)->sc_flags &= ~SC_BUFFER_STREAM; \ 347 (ctl)->sc_flags |= -!!(b) & SC_BUFFER_STREAM; \ 348} while (0) 349 350int 351lsquic_send_ctl_turn_on_fin (struct lsquic_send_ctl *, 352 const struct lsquic_stream *); 353 354int 355lsquic_send_ctl_pacer_blocked (struct lsquic_send_ctl *); 356 357#define lsquic_send_ctl_incr_pack_sz(ctl, packet, delta) do { \ 358 (packet)->po_data_sz += (delta); \ 359 if ((packet)->po_flags & PO_SCHED) \ 360 (ctl)->sc_bytes_scheduled += (delta); \ 361 lsquic_send_ctl_sanity_check(ctl); \ 362} while (0) 363 364int 365lsquic_send_ctl_sched_is_blocked (struct lsquic_send_ctl *); 366 367void 368lsquic_send_ctl_verneg_done (struct lsquic_send_ctl *); 369 370int 371lsquic_send_ctl_retry (struct lsquic_send_ctl *, const unsigned char *, size_t); 372 373int 374lsquic_send_ctl_set_token (struct lsquic_send_ctl *, 375 const unsigned char *token, size_t token_sz); 376 377void 378lsquic_send_ctl_empty_pns (struct lsquic_send_ctl *, enum packnum_space); 379 380void 381lsquic_send_ctl_maybe_calc_rough_rtt (struct lsquic_send_ctl *, 382 enum packnum_space); 383 384void 385lsquic_send_ctl_repath (struct lsquic_send_ctl *ctl, 386 const struct network_path *old, const struct network_path *new, 387 int keep_path_properties); 388 389void 390lsquic_send_ctl_cancel_chals (struct lsquic_send_ctl *, 391 const struct network_path *); 392 393void 394lsquic_send_ctl_resize (struct lsquic_send_ctl *); 395 396void 397lsquic_send_ctl_return_enc_data (struct lsquic_send_ctl *); 398 399#define lsquic_send_ctl_1rtt_acked(ctl) ((ctl)->sc_flags & SC_1RTT_ACKED) 400 401void 402lsquic_send_ctl_maybe_app_limited (struct lsquic_send_ctl *, 403 const struct network_path *); 404 405#define lsquic_send_ctl_do_ql_bits(ctl) do { \ 406 (ctl)->sc_flags |= SC_QL_BITS; \ 407} while (0) 408 409void 410lsquic_send_ctl_cidlen_change (struct lsquic_send_ctl *, 411 unsigned orig_cid_len, unsigned new_cid_len); 412 413void 414lsquic_send_ctl_begin_optack_detection (struct lsquic_send_ctl *); 415 416void 417lsquic_send_ctl_path_validated (struct lsquic_send_ctl *); 418 419/* Has immediately sendable packets */ 420#define lsquic_send_ctl_has_sendable(ctl_) \ 421 (lsquic_send_ctl_n_scheduled(ctl_) > 0 \ 422 && lsquic_send_ctl_next_packet_to_send_predict(ctl_)) 423 424#define lsquic_send_ctl_in_recovery(ctl_) ((ctl_)->sc_largest_acked_packno \ 425 && (ctl_)->sc_largest_acked_packno <= (ctl_)->sc_largest_sent_at_cutback) 426 427#define send_ctl_in_recovery lsquic_send_ctl_in_recovery 428 429int 430lsquic_send_ctl_can_send_probe (const struct lsquic_send_ctl *, 431 const struct network_path *); 432 433#define lsquic_send_ctl_ecn_turned_on(ctl_) ((ctl_)->sc_ecn != ECN_NOT_ECT) 434 435void 436lsquic_send_ctl_disable_ecn (struct lsquic_send_ctl *); 437 438struct send_ctl_state 439{ 440 struct pacer pacer; 441 struct ack_state ack_state; 442 unsigned buf_counts[BPT_OTHER_PRIO + 1]; 443}; 444 445void 446lsquic_send_ctl_snapshot (struct lsquic_send_ctl *, struct send_ctl_state *); 447 448void 449lsquic_send_ctl_rollback (struct lsquic_send_ctl *, struct send_ctl_state *, 450 const struct iovec *, size_t); 451 452void 453lsquic_send_ctl_0rtt_to_1rtt (struct lsquic_send_ctl *); 454 455void 456lsquic_send_ctl_stash_0rtt_packets (struct lsquic_send_ctl *); 457 458#endif 459