lsquic_send_ctl.h revision 02b6086d
1/* Copyright (c) 2017 - 2019 LiteSpeed Technologies Inc. See LICENSE. */ 2#ifndef LSQUIC_SEND_CTL_H 3#define LSQUIC_SEND_CTL_H 1 4 5#include <sys/queue.h> 6 7#include "lsquic_types.h" 8 9#ifndef LSQUIC_SEND_STATS 10# define LSQUIC_SEND_STATS 1 11#endif 12 13TAILQ_HEAD(lsquic_packets_tailq, lsquic_packet_out); 14 15struct lsquic_packet_out; 16struct ack_info; 17struct lsquic_alarmset; 18struct lsquic_engine_public; 19struct lsquic_conn_public; 20struct network_path; 21struct ver_neg; 22enum pns; 23 24enum buf_packet_type { BPT_HIGHEST_PRIO, BPT_OTHER_PRIO, }; 25 26struct buf_packet_q 27{ 28 struct lsquic_packets_tailq bpq_packets; 29 unsigned bpq_count; 30}; 31 32enum send_ctl_flags { 33 SC_TCID0 = (1 << 0), 34 SC_NSTP = (1 << 2), 35 SC_PACE = (1 << 3), 36 SC_SCHED_TICK = (1 << 4), 37 SC_BUFFER_STREAM= (1 << 5), 38 SC_WAS_QUIET = (1 << 6), 39 SC_IETF = (1 << 7), 40#define SCBIT_LOST_ACK_SHIFT 8 41 SC_LOST_ACK_INIT= 1 << 8, 42 SC_LOST_ACK_HSK = SC_LOST_ACK_INIT << PNS_HSK, 43 SC_LOST_ACK_APP = SC_LOST_ACK_INIT << PNS_APP, 44 SC_1RTT_ACKED = 1 << 11, 45 SC_APP_LIMITED = 1 << 12, 46 SC_ECN = 1 << 13, 47 SC_QL_BITS = 1 << 14, 48}; 49 50typedef struct lsquic_send_ctl { 51 /* The first section consists of struct members which are used in the 52 * time-critical lsquic_send_ctl_got_ack() in the approximate order 53 * of usage. 54 */ 55 lsquic_senhist_t sc_senhist; 56 enum send_ctl_flags sc_flags; 57 enum ecn sc_ecn; 58 unsigned sc_n_stop_waiting; 59 struct lsquic_packets_tailq sc_unacked_packets[N_PNS]; 60 lsquic_packno_t sc_largest_acked_packno; 61 lsquic_time_t sc_largest_acked_sent_time; 62 lsquic_time_t sc_last_sent_time; 63 lsquic_time_t sc_last_rto_time; 64 unsigned sc_bytes_unacked_retx; 65 unsigned sc_bytes_scheduled; 66 union { 67 struct lsquic_cubic cubic; 68 struct lsquic_bbr bbr; 69 } sc_cong_u; 70 const struct cong_ctl_if *sc_ci; 71 struct lsquic_engine_public *sc_enpub; 72 unsigned sc_bytes_unacked_all; 73 unsigned sc_n_in_flight_all; 74 unsigned sc_n_in_flight_retx; 75 unsigned sc_n_consec_rtos; 76 unsigned sc_n_hsk; 77 unsigned sc_n_tlp; 78 enum quic_ft_bit sc_retx_frames; 79 struct lsquic_alarmset *sc_alset; 80 81 /* Second section: everything else. */ 82 struct lsquic_packets_tailq sc_scheduled_packets, 83 sc_lost_packets; 84 struct buf_packet_q sc_buffered_packets[BPT_OTHER_PRIO + 1]; 85 const struct ver_neg *sc_ver_neg; 86 struct lsquic_conn_public *sc_conn_pub; 87 struct pacer sc_pacer; 88 lsquic_packno_t sc_cur_packno; 89 lsquic_packno_t sc_largest_sent_at_cutback; 90 lsquic_packno_t sc_max_rtt_packno; 91 /* sc_largest_ack2ed is the packet number sent by peer that we acked and 92 * we know that our ACK was received by peer. This is used to determine 93 * the receive history cutoff point for the purposes of generating ACK 94 * frames in the absense of STOP_WAITING frames. Used when NSTP option 95 * is set. (The "ack2ed" is odd enough to not be confused with anything 96 * else and it is not insanely long.) 97 */ 98 lsquic_packno_t sc_largest_ack2ed[N_PNS]; 99 /* sc_largest_acked is the largest packet number in PNS_APP packet number 100 * space sent by peer for which we generated (not necessarily sent) an ACK. 101 * This information is used to drop stale ACK frames from packets in 102 * buffered queues. 103 */ 104 lsquic_packno_t sc_largest_acked; 105 lsquic_time_t sc_loss_to; 106 uint64_t sc_ecn_total_acked[N_PNS]; 107 uint64_t sc_ecn_ce_cnt[N_PNS]; 108 struct 109 { 110 lsquic_stream_id_t stream_id; 111 enum buf_packet_type packet_type; 112 } sc_cached_bpt; 113 unsigned sc_next_limit; 114 unsigned sc_n_scheduled; 115 enum packno_bits sc_max_packno_bits; 116#if LSQUIC_SEND_STATS 117 struct { 118 unsigned n_total_sent, 119 n_resent, 120 n_delayed; 121 } sc_stats; 122#endif 123 unsigned char *sc_token; 124 size_t sc_token_sz; 125 unsigned sc_retry_count; 126 unsigned sc_rt_count; /* Count round trips */ 127 lsquic_packno_t sc_cur_rt_end; 128 unsigned sc_loss_count; /* Used to set loss bit */ 129 unsigned sc_square_count;/* Used to set square bit */ 130} lsquic_send_ctl_t; 131 132void 133lsquic_send_ctl_init (lsquic_send_ctl_t *, struct lsquic_alarmset *, 134 struct lsquic_engine_public *, const struct ver_neg *, 135 struct lsquic_conn_public *, enum send_ctl_flags); 136 137int 138lsquic_send_ctl_sent_packet (lsquic_send_ctl_t *, struct lsquic_packet_out *); 139 140int 141lsquic_send_ctl_got_ack (lsquic_send_ctl_t *, const struct ack_info *, 142 lsquic_time_t, lsquic_time_t); 143 144lsquic_packno_t 145lsquic_send_ctl_smallest_unacked (lsquic_send_ctl_t *ctl); 146 147int 148lsquic_send_ctl_have_unacked_stream_frames (const lsquic_send_ctl_t *); 149 150void 151lsquic_send_ctl_cleanup (lsquic_send_ctl_t *); 152 153int 154lsquic_send_ctl_can_send (lsquic_send_ctl_t *ctl); 155 156void 157lsquic_send_ctl_scheduled_one (lsquic_send_ctl_t *, struct lsquic_packet_out *); 158 159void 160lsquic_send_ctl_delayed_one (lsquic_send_ctl_t *, struct lsquic_packet_out *); 161 162struct lsquic_packet_out * 163lsquic_send_ctl_next_packet_to_send (struct lsquic_send_ctl *, size_t); 164 165void 166lsquic_send_ctl_expire_all (lsquic_send_ctl_t *ctl); 167 168#define lsquic_send_ctl_n_in_flight(ctl) (+(ctl)->sc_n_in_flight) 169 170#define lsquic_send_ctl_n_scheduled(ctl) (+(ctl)->sc_n_scheduled) 171 172#define lsquic_send_ctl_largest_ack2ed(ctl, pns) \ 173 (+(ctl)->sc_largest_ack2ed[pns]) 174 175#if LSQUIC_EXTRA_CHECKS 176void 177lsquic_send_ctl_sanity_check (const lsquic_send_ctl_t *ctl); 178#else 179# define lsquic_send_ctl_sanity_check(ctl) 180#endif 181 182int 183lsquic_send_ctl_have_outgoing_stream_frames (const lsquic_send_ctl_t *); 184 185int 186lsquic_send_ctl_have_outgoing_retx_frames (const lsquic_send_ctl_t *); 187 188struct lsquic_packet_out * 189lsquic_send_ctl_last_scheduled (struct lsquic_send_ctl *, enum packnum_space, 190 const struct network_path *, int); 191 192struct lsquic_packet_out * 193lsquic_send_ctl_new_packet_out (lsquic_send_ctl_t *, unsigned, 194 enum packnum_space, const struct network_path *); 195 196struct lsquic_packet_out * 197lsquic_send_ctl_get_writeable_packet (lsquic_send_ctl_t *, enum packnum_space, 198 unsigned need_at_least, const struct network_path *, int, int *is_err); 199 200struct lsquic_packet_out * 201lsquic_send_ctl_get_packet_for_stream (lsquic_send_ctl_t *, 202 unsigned need_at_least, const struct network_path *, 203 const struct lsquic_stream *); 204 205struct lsquic_packet_out * 206lsquic_send_ctl_get_packet_for_crypto (struct lsquic_send_ctl *ctl, 207 unsigned need_at_least, enum packnum_space, const struct network_path *); 208 209unsigned 210lsquic_send_ctl_reschedule_packets (lsquic_send_ctl_t *); 211 212#define lsquic_send_ctl_lost_ack(ctl) \ 213 (((ctl)->sc_flags & (SC_LOST_ACK_INIT|SC_LOST_ACK_HSK|SC_LOST_ACK_APP)) \ 214 >> SCBIT_LOST_ACK_SHIFT) 215 216#define lsquic_send_ctl_scheduled_ack(ctl, pns, acked) do { \ 217 (ctl)->sc_flags &= ~(SC_LOST_ACK_INIT << pns); \ 218 if (PNS_APP == pns) \ 219 (ctl)->sc_largest_acked = acked; \ 220} while (0) 221 222void 223lsquic_send_ctl_set_tcid0 (lsquic_send_ctl_t *, int); 224 225#define lsquic_send_ctl_turn_nstp_on(ctl) ((ctl)->sc_flags |= SC_NSTP) 226 227void 228lsquic_send_ctl_elide_stream_frames (lsquic_send_ctl_t *, lsquic_stream_id_t); 229 230int 231lsquic_send_ctl_squeeze_sched (lsquic_send_ctl_t *); 232 233#define lsquic_send_ctl_maybe_squeeze_sched(ctl) ( \ 234 (ctl)->sc_n_scheduled && lsquic_send_ctl_squeeze_sched(ctl) \ 235) 236 237/* Same return value as for squeezing, but without actual squeezing. */ 238int 239lsquic_send_ctl_have_delayed_packets (const lsquic_send_ctl_t *ctl); 240 241void 242lsquic_send_ctl_reset_packnos (lsquic_send_ctl_t *); 243 244void 245lsquic_send_ctl_ack_to_front (struct lsquic_send_ctl *, unsigned n_acks); 246 247#define lsquic_send_ctl_n_stop_waiting(ctl) \ 248 (+(ctl)->sc_n_stop_waiting) 249 250#define lsquic_send_ctl_n_stop_waiting_reset(ctl) do { \ 251 (ctl)->sc_n_stop_waiting = 0; \ 252} while (0) 253 254void 255lsquic_send_ctl_drop_scheduled (lsquic_send_ctl_t *); 256 257#define lsquic_send_ctl_tick_in(ctl, now) do { \ 258 if ((ctl)->sc_flags & SC_PACE) \ 259 { \ 260 (ctl)->sc_flags |= SC_SCHED_TICK; \ 261 pacer_tick_in(&(ctl)->sc_pacer, now); \ 262 } \ 263 (ctl)->sc_flags &= ~SC_APP_LIMITED; \ 264} while (0) 265 266#define lsquic_send_ctl_tick_out(ctl) do { \ 267 if ((ctl)->sc_flags & SC_PACE) \ 268 pacer_tick_out(&(ctl)->sc_pacer); \ 269} while (0) 270 271#define lsquic_send_ctl_next_pacer_time(ctl) ( \ 272 ((ctl)->sc_flags & SC_PACE) \ 273 && pacer_delayed(&(ctl)->sc_pacer) \ 274 ? pacer_next_sched(&(ctl)->sc_pacer) \ 275 : 0 ) 276 277enum packno_bits 278lsquic_send_ctl_packno_bits (lsquic_send_ctl_t *); 279 280int 281lsquic_send_ctl_schedule_buffered (lsquic_send_ctl_t *, enum buf_packet_type); 282 283#define lsquic_send_ctl_has_buffered(ctl) ( \ 284 TAILQ_FIRST(&(ctl)->sc_buffered_packets[BPT_HIGHEST_PRIO].bpq_packets) \ 285 || TAILQ_FIRST(&(ctl)->sc_buffered_packets[BPT_OTHER_PRIO].bpq_packets )) 286 287#define lsquic_send_ctl_has_buffered_high(ctl) ( \ 288 !TAILQ_EMPTY(&(ctl)->sc_buffered_packets[BPT_HIGHEST_PRIO].bpq_packets)) 289 290#define lsquic_send_ctl_invalidate_bpt_cache(ctl) do { \ 291 (ctl)->sc_cached_bpt.stream_id = UINT64_MAX; \ 292} while (0) 293 294#ifndef NDEBUG 295enum packno_bits 296lsquic_send_ctl_guess_packno_bits (struct lsquic_send_ctl *); 297 298int 299lsquic_send_ctl_schedule_stream_packets_immediately (struct lsquic_send_ctl *); 300 301enum buf_packet_type 302lsquic_send_ctl_determine_bpt (struct lsquic_send_ctl *, 303 const struct lsquic_stream *); 304 305enum packno_bits 306lsquic_send_ctl_calc_packno_bits (struct lsquic_send_ctl *); 307 308void 309lsquic_send_ctl_set_max_bpq_count (unsigned); 310#endif 311 312size_t 313lsquic_send_ctl_mem_used (const struct lsquic_send_ctl *); 314 315#define lsquic_send_ctl_set_buffer_stream_packets(ctl, b) do { \ 316 (ctl)->sc_flags &= ~SC_BUFFER_STREAM; \ 317 (ctl)->sc_flags |= -!!(b) & SC_BUFFER_STREAM; \ 318} while (0) 319 320int 321lsquic_send_ctl_turn_on_fin (struct lsquic_send_ctl *, 322 const struct lsquic_stream *); 323 324int 325lsquic_send_ctl_pacer_blocked (struct lsquic_send_ctl *); 326 327#define lsquic_send_ctl_incr_pack_sz(ctl, packet, delta) do { \ 328 (packet)->po_data_sz += (delta); \ 329 if ((packet)->po_flags & PO_SCHED) \ 330 (ctl)->sc_bytes_scheduled += (delta); \ 331 lsquic_send_ctl_sanity_check(ctl); \ 332} while (0) 333 334int 335lsquic_send_ctl_sched_is_blocked (struct lsquic_send_ctl *); 336 337void 338lsquic_send_ctl_verneg_done (struct lsquic_send_ctl *); 339 340int 341lsquic_send_ctl_retry (struct lsquic_send_ctl *, const unsigned char *, size_t); 342 343int 344lsquic_send_ctl_set_token (struct lsquic_send_ctl *, 345 const unsigned char *token, size_t token_sz); 346 347void 348lsquic_send_ctl_empty_pns (struct lsquic_send_ctl *, enum packnum_space); 349 350void 351lsquic_send_ctl_repath (struct lsquic_send_ctl *, struct network_path *old, 352 struct network_path *new); 353 354void 355lsquic_send_ctl_return_enc_data (struct lsquic_send_ctl *); 356 357#define lsquic_send_ctl_1rtt_acked(ctl) ((ctl)->sc_flags & SC_1RTT_ACKED) 358 359void 360lsquic_send_ctl_maybe_app_limited (struct lsquic_send_ctl *, 361 const struct network_path *); 362 363#define lsquic_send_ctl_do_ql_bits(ctl) do { \ 364 (ctl)->sc_flags |= SC_QL_BITS; \ 365} while (0) 366 367#endif 368