lsquic_send_ctl.h revision 2f2f4363
1/* Copyright (c) 2017 - 2020 LiteSpeed Technologies Inc. See LICENSE. */ 2#ifndef LSQUIC_SEND_CTL_H 3#define LSQUIC_SEND_CTL_H 1 4 5#include <sys/queue.h> 6 7#include "lsquic_types.h" 8 9#ifndef LSQUIC_SEND_STATS 10# define LSQUIC_SEND_STATS 1 11#endif 12 13TAILQ_HEAD(lsquic_packets_tailq, lsquic_packet_out); 14 15struct lsquic_packet_out; 16struct ack_info; 17struct lsquic_alarmset; 18struct lsquic_engine_public; 19struct lsquic_conn_public; 20struct network_path; 21struct ver_neg; 22enum pns; 23 24enum buf_packet_type { BPT_HIGHEST_PRIO, BPT_OTHER_PRIO, }; 25 26struct buf_packet_q 27{ 28 struct lsquic_packets_tailq bpq_packets; 29 unsigned bpq_count; 30}; 31 32enum send_ctl_flags { 33 SC_TCID0 = (1 << 0), 34 SC_NSTP = (1 << 2), 35 SC_PACE = (1 << 3), 36 SC_SCHED_TICK = (1 << 4), 37 SC_BUFFER_STREAM= (1 << 5), 38 SC_WAS_QUIET = (1 << 6), 39 SC_IETF = (1 << 7), 40#define SCBIT_LOST_ACK_SHIFT 8 41 SC_LOST_ACK_INIT= 1 << 8, 42 SC_LOST_ACK_HSK = SC_LOST_ACK_INIT << PNS_HSK, 43 SC_LOST_ACK_APP = SC_LOST_ACK_INIT << PNS_APP, 44 SC_1RTT_ACKED = 1 << 11, 45 SC_APP_LIMITED = 1 << 12, 46 SC_ECN = 1 << 13, 47 SC_QL_BITS = 1 << 14, 48 SC_SANITY_CHECK = 1 << 15, 49 SC_CIDLEN = 1 << 16, /* sc_cidlen is set */ 50 SC_POISON = 1 << 17, /* poisoned packet exists */ 51}; 52 53typedef struct lsquic_send_ctl { 54 /* The first section consists of struct members which are used in the 55 * time-critical lsquic_send_ctl_got_ack() in the approximate order 56 * of usage. 57 */ 58 lsquic_senhist_t sc_senhist; 59 enum send_ctl_flags sc_flags; 60 enum ecn sc_ecn; 61 unsigned sc_n_stop_waiting; 62 struct lsquic_packets_tailq sc_unacked_packets[N_PNS]; 63 lsquic_packno_t sc_largest_acked_packno; 64 lsquic_time_t sc_largest_acked_sent_time; 65 lsquic_time_t sc_last_sent_time; 66 lsquic_time_t sc_last_rto_time; 67 int (*sc_can_send)(struct lsquic_send_ctl *); 68 unsigned sc_bytes_unacked_retx; 69 unsigned sc_bytes_scheduled; 70 union { 71 struct lsquic_cubic cubic; 72 struct lsquic_bbr bbr; 73 } sc_cong_u; 74 const struct cong_ctl_if *sc_ci; 75 struct lsquic_engine_public *sc_enpub; 76 unsigned sc_bytes_unacked_all; 77 unsigned sc_n_in_flight_all; 78 unsigned sc_n_in_flight_retx; 79 unsigned sc_n_consec_rtos; 80 unsigned sc_n_hsk; 81 unsigned sc_n_tlp; 82 enum quic_ft_bit sc_retx_frames; 83 struct lsquic_alarmset *sc_alset; 84 85 /* Second section: everything else. */ 86 struct lsquic_packets_tailq sc_scheduled_packets, 87 sc_lost_packets; 88 struct buf_packet_q sc_buffered_packets[BPT_OTHER_PRIO + 1]; 89 const struct ver_neg *sc_ver_neg; 90 struct lsquic_conn_public *sc_conn_pub; 91 struct pacer sc_pacer; 92 lsquic_packno_t sc_cur_packno; 93 lsquic_packno_t sc_largest_sent_at_cutback; 94 lsquic_packno_t sc_max_rtt_packno; 95 /* sc_largest_ack2ed is the packet number sent by peer that we acked and 96 * we know that our ACK was received by peer. This is used to determine 97 * the receive history cutoff point for the purposes of generating ACK 98 * frames in the absense of STOP_WAITING frames. Used when NSTP option 99 * is set. (The "ack2ed" is odd enough to not be confused with anything 100 * else and it is not insanely long.) 101 */ 102 lsquic_packno_t sc_largest_ack2ed[N_PNS]; 103 /* sc_largest_acked is the largest packet number in PNS_APP packet number 104 * space sent by peer for which we generated (not necessarily sent) an ACK. 105 * This information is used to drop stale ACK frames from packets in 106 * buffered queues. 107 */ 108 lsquic_packno_t sc_largest_acked; 109 lsquic_time_t sc_loss_to; 110 uint64_t sc_ecn_total_acked[N_PNS]; 111 uint64_t sc_ecn_ce_cnt[N_PNS]; 112 struct 113 { 114 lsquic_stream_id_t stream_id; 115 enum buf_packet_type packet_type; 116 } sc_cached_bpt; 117 unsigned sc_next_limit; 118 unsigned sc_n_scheduled; 119 enum packno_bits sc_max_packno_bits; 120#if LSQUIC_SEND_STATS 121 struct { 122 unsigned n_total_sent, 123 n_resent, 124 n_delayed; 125 } sc_stats; 126#endif 127 unsigned char *sc_token; 128 size_t sc_token_sz; 129 unsigned sc_retry_count; 130 unsigned sc_rt_count; /* Count round trips */ 131 lsquic_packno_t sc_cur_rt_end; 132 lsquic_packno_t sc_gap; 133 unsigned sc_loss_count; /* Used to set loss bit */ 134 unsigned sc_square_count;/* Used to set square bit */ 135 signed char sc_cidlen; /* For debug purposes */ 136} lsquic_send_ctl_t; 137 138void 139lsquic_send_ctl_init (lsquic_send_ctl_t *, struct lsquic_alarmset *, 140 struct lsquic_engine_public *, const struct ver_neg *, 141 struct lsquic_conn_public *, enum send_ctl_flags); 142 143int 144lsquic_send_ctl_sent_packet (lsquic_send_ctl_t *, struct lsquic_packet_out *); 145 146int 147lsquic_send_ctl_got_ack (lsquic_send_ctl_t *, const struct ack_info *, 148 lsquic_time_t, lsquic_time_t); 149 150lsquic_packno_t 151lsquic_send_ctl_smallest_unacked (lsquic_send_ctl_t *ctl); 152 153int 154lsquic_send_ctl_have_unacked_stream_frames (const lsquic_send_ctl_t *); 155 156void 157lsquic_send_ctl_cleanup (lsquic_send_ctl_t *); 158 159int 160lsquic_send_ctl_can_send (lsquic_send_ctl_t *ctl); 161 162void 163lsquic_send_ctl_scheduled_one (lsquic_send_ctl_t *, struct lsquic_packet_out *); 164 165void 166lsquic_send_ctl_delayed_one (lsquic_send_ctl_t *, struct lsquic_packet_out *); 167 168struct lsquic_packet_out * 169lsquic_send_ctl_next_packet_to_send (struct lsquic_send_ctl *, size_t); 170 171int 172lsquic_send_ctl_next_packet_to_send_predict (struct lsquic_send_ctl *); 173 174void 175lsquic_send_ctl_expire_all (lsquic_send_ctl_t *ctl); 176 177#define lsquic_send_ctl_n_in_flight(ctl) (+(ctl)->sc_n_in_flight) 178 179#define lsquic_send_ctl_n_scheduled(ctl) (+(ctl)->sc_n_scheduled) 180 181#define lsquic_send_ctl_largest_ack2ed(ctl, pns) \ 182 (+(ctl)->sc_largest_ack2ed[pns]) 183 184void 185lsquic_send_ctl_do_sanity_check (const struct lsquic_send_ctl *ctl); 186 187#ifndef NDEBUG 188#define lsquic_send_ctl_sanity_check(ctl) do { \ 189 if ((ctl)->sc_flags & SC_SANITY_CHECK) \ 190 lsquic_send_ctl_do_sanity_check(ctl); \ 191} while (0) 192#else 193#define lsquic_send_ctl_sanity_check(ctl) 194#endif 195 196int 197lsquic_send_ctl_have_outgoing_stream_frames (const lsquic_send_ctl_t *); 198 199int 200lsquic_send_ctl_have_outgoing_retx_frames (const lsquic_send_ctl_t *); 201 202struct lsquic_packet_out * 203lsquic_send_ctl_last_scheduled (struct lsquic_send_ctl *, enum packnum_space, 204 const struct network_path *, int); 205 206struct lsquic_packet_out * 207lsquic_send_ctl_new_packet_out (lsquic_send_ctl_t *, unsigned, 208 enum packnum_space, const struct network_path *); 209 210struct lsquic_packet_out * 211lsquic_send_ctl_get_writeable_packet (lsquic_send_ctl_t *, enum packnum_space, 212 unsigned need_at_least, const struct network_path *, int, int *is_err); 213 214struct lsquic_packet_out * 215lsquic_send_ctl_get_packet_for_stream (lsquic_send_ctl_t *, 216 unsigned need_at_least, const struct network_path *, 217 const struct lsquic_stream *); 218 219struct lsquic_packet_out * 220lsquic_send_ctl_get_packet_for_crypto (struct lsquic_send_ctl *ctl, 221 unsigned need_at_least, enum packnum_space, const struct network_path *); 222 223unsigned 224lsquic_send_ctl_reschedule_packets (lsquic_send_ctl_t *); 225 226#define lsquic_send_ctl_lost_ack(ctl) \ 227 (((ctl)->sc_flags & (SC_LOST_ACK_INIT|SC_LOST_ACK_HSK|SC_LOST_ACK_APP)) \ 228 >> SCBIT_LOST_ACK_SHIFT) 229 230#define lsquic_send_ctl_scheduled_ack(ctl, pns, acked) do { \ 231 (ctl)->sc_flags &= ~(SC_LOST_ACK_INIT << pns); \ 232 if (PNS_APP == pns) \ 233 (ctl)->sc_largest_acked = acked; \ 234} while (0) 235 236void 237lsquic_send_ctl_set_tcid0 (lsquic_send_ctl_t *, int); 238 239#define lsquic_send_ctl_turn_nstp_on(ctl) ((ctl)->sc_flags |= SC_NSTP) 240 241void 242lsquic_send_ctl_elide_stream_frames (lsquic_send_ctl_t *, lsquic_stream_id_t); 243 244int 245lsquic_send_ctl_squeeze_sched (lsquic_send_ctl_t *); 246 247#define lsquic_send_ctl_maybe_squeeze_sched(ctl) ( \ 248 (ctl)->sc_n_scheduled && lsquic_send_ctl_squeeze_sched(ctl) \ 249) 250 251/* Same return value as for squeezing, but without actual squeezing. */ 252int 253lsquic_send_ctl_have_delayed_packets (const lsquic_send_ctl_t *ctl); 254 255void 256lsquic_send_ctl_reset_packnos (lsquic_send_ctl_t *); 257 258void 259lsquic_send_ctl_ack_to_front (struct lsquic_send_ctl *, unsigned n_acks); 260 261#define lsquic_send_ctl_n_stop_waiting(ctl) \ 262 (+(ctl)->sc_n_stop_waiting) 263 264#define lsquic_send_ctl_n_stop_waiting_reset(ctl) do { \ 265 (ctl)->sc_n_stop_waiting = 0; \ 266} while (0) 267 268void 269lsquic_send_ctl_drop_scheduled (lsquic_send_ctl_t *); 270 271#define lsquic_send_ctl_tick_in(ctl, now) do { \ 272 if ((ctl)->sc_flags & SC_PACE) \ 273 { \ 274 (ctl)->sc_flags |= SC_SCHED_TICK; \ 275 lsquic_pacer_tick_in(&(ctl)->sc_pacer, now); \ 276 } \ 277 (ctl)->sc_flags &= ~SC_APP_LIMITED; \ 278} while (0) 279 280#define lsquic_send_ctl_tick_out(ctl) do { \ 281 if ((ctl)->sc_flags & SC_PACE) \ 282 lsquic_pacer_tick_out(&(ctl)->sc_pacer); \ 283} while (0) 284 285#define lsquic_send_ctl_next_pacer_time(ctl) ( \ 286 ((ctl)->sc_flags & SC_PACE) \ 287 && lsquic_pacer_delayed(&(ctl)->sc_pacer) \ 288 ? lsquic_pacer_next_sched(&(ctl)->sc_pacer) \ 289 : 0 ) 290 291enum packno_bits 292lsquic_send_ctl_packno_bits (lsquic_send_ctl_t *); 293 294int 295lsquic_send_ctl_schedule_buffered (lsquic_send_ctl_t *, enum buf_packet_type); 296 297#define lsquic_send_ctl_has_buffered(ctl) ( \ 298 TAILQ_FIRST(&(ctl)->sc_buffered_packets[BPT_HIGHEST_PRIO].bpq_packets) \ 299 || TAILQ_FIRST(&(ctl)->sc_buffered_packets[BPT_OTHER_PRIO].bpq_packets )) 300 301#define lsquic_send_ctl_has_buffered_high(ctl) ( \ 302 !TAILQ_EMPTY(&(ctl)->sc_buffered_packets[BPT_HIGHEST_PRIO].bpq_packets)) 303 304#define lsquic_send_ctl_invalidate_bpt_cache(ctl) do { \ 305 (ctl)->sc_cached_bpt.stream_id = UINT64_MAX; \ 306} while (0) 307 308#ifndef NDEBUG 309enum packno_bits 310lsquic_send_ctl_guess_packno_bits (struct lsquic_send_ctl *); 311 312int 313lsquic_send_ctl_schedule_stream_packets_immediately (struct lsquic_send_ctl *); 314 315enum buf_packet_type 316lsquic_send_ctl_determine_bpt (struct lsquic_send_ctl *, 317 const struct lsquic_stream *); 318 319enum packno_bits 320lsquic_send_ctl_calc_packno_bits (struct lsquic_send_ctl *); 321 322void 323lsquic_send_ctl_set_max_bpq_count (unsigned); 324#endif 325 326size_t 327lsquic_send_ctl_mem_used (const struct lsquic_send_ctl *); 328 329#define lsquic_send_ctl_set_buffer_stream_packets(ctl, b) do { \ 330 (ctl)->sc_flags &= ~SC_BUFFER_STREAM; \ 331 (ctl)->sc_flags |= -!!(b) & SC_BUFFER_STREAM; \ 332} while (0) 333 334int 335lsquic_send_ctl_turn_on_fin (struct lsquic_send_ctl *, 336 const struct lsquic_stream *); 337 338int 339lsquic_send_ctl_pacer_blocked (struct lsquic_send_ctl *); 340 341#define lsquic_send_ctl_incr_pack_sz(ctl, packet, delta) do { \ 342 (packet)->po_data_sz += (delta); \ 343 if ((packet)->po_flags & PO_SCHED) \ 344 (ctl)->sc_bytes_scheduled += (delta); \ 345 lsquic_send_ctl_sanity_check(ctl); \ 346} while (0) 347 348int 349lsquic_send_ctl_sched_is_blocked (struct lsquic_send_ctl *); 350 351void 352lsquic_send_ctl_verneg_done (struct lsquic_send_ctl *); 353 354int 355lsquic_send_ctl_retry (struct lsquic_send_ctl *, const unsigned char *, size_t); 356 357int 358lsquic_send_ctl_set_token (struct lsquic_send_ctl *, 359 const unsigned char *token, size_t token_sz); 360 361void 362lsquic_send_ctl_empty_pns (struct lsquic_send_ctl *, enum packnum_space); 363 364void 365lsquic_send_ctl_repath (struct lsquic_send_ctl *, struct network_path *old, 366 struct network_path *new); 367 368void 369lsquic_send_ctl_resize (struct lsquic_send_ctl *); 370 371void 372lsquic_send_ctl_return_enc_data (struct lsquic_send_ctl *); 373 374#define lsquic_send_ctl_1rtt_acked(ctl) ((ctl)->sc_flags & SC_1RTT_ACKED) 375 376void 377lsquic_send_ctl_maybe_app_limited (struct lsquic_send_ctl *, 378 const struct network_path *); 379 380#define lsquic_send_ctl_do_ql_bits(ctl) do { \ 381 (ctl)->sc_flags |= SC_QL_BITS; \ 382} while (0) 383 384void 385lsquic_send_ctl_cidlen_change (struct lsquic_send_ctl *, 386 unsigned orig_cid_len, unsigned new_cid_len); 387 388void 389lsquic_send_ctl_begin_optack_detection (struct lsquic_send_ctl *); 390 391#define lsquic_send_ctl_n_unacked(ctl_) ((ctl_)->sc_n_in_flight_retx) 392 393void 394lsquic_send_ctl_path_validated (struct lsquic_send_ctl *); 395 396/* Has immediately sendable packets */ 397#define lsquic_send_ctl_has_sendable(ctl_) \ 398 (lsquic_send_ctl_n_scheduled(ctl_) > 0 \ 399 && lsquic_send_ctl_next_packet_to_send_predict(ctl_)) 400 401#define lsquic_send_ctl_in_recovery(ctl_) ((ctl_)->sc_largest_acked_packno \ 402 && (ctl_)->sc_largest_acked_packno <= (ctl_)->sc_largest_sent_at_cutback) 403 404#define send_ctl_in_recovery lsquic_send_ctl_in_recovery 405 406int 407lsquic_send_ctl_can_send_probe (const struct lsquic_send_ctl *, 408 const struct network_path *); 409 410#define lsquic_send_ctl_ecn_turned_on(ctl_) ((ctl_)->sc_ecn != ECN_NOT_ECT) 411 412void 413lsquic_send_ctl_disable_ecn (struct lsquic_send_ctl *); 414 415struct send_ctl_state 416{ 417 struct pacer pacer; 418 struct ack_state ack_state; 419 unsigned buf_counts[BPT_OTHER_PRIO + 1]; 420}; 421 422void 423lsquic_send_ctl_snapshot (struct lsquic_send_ctl *, struct send_ctl_state *); 424 425void 426lsquic_send_ctl_rollback (struct lsquic_send_ctl *, struct send_ctl_state *, 427 const struct iovec *, size_t); 428 429#endif 430