lsquic_send_ctl.c revision 83287402
1/* Copyright (c) 2017 LiteSpeed Technologies Inc. See LICENSE. */ 2/* 3 * lsquic_send_ctl.c -- Logic for sending and sent packets 4 */ 5 6#include <assert.h> 7#include <errno.h> 8#include <inttypes.h> 9#include <stdlib.h> 10#include <string.h> 11#include <sys/queue.h> 12 13#include "lsquic_types.h" 14#include "lsquic_int_types.h" 15#include "lsquic.h" 16#include "lsquic_mm.h" 17#include "lsquic_engine_public.h" 18#include "lsquic_alarmset.h" 19#include "lsquic_packet_common.h" 20#include "lsquic_parse.h" 21#include "lsquic_packet_out.h" 22#include "lsquic_senhist.h" 23#include "lsquic_rtt.h" 24#include "lsquic_cubic.h" 25#include "lsquic_pacer.h" 26#include "lsquic_send_ctl.h" 27#include "lsquic_util.h" 28#include "lsquic_sfcw.h" 29#include "lsquic_stream.h" 30#include "lsquic_ver_neg.h" 31#include "lsquic_ev_log.h" 32#include "lsquic_conn.h" 33#include "lsquic_conn_flow.h" 34#include "lsquic_conn_public.h" 35 36#define LSQUIC_LOGGER_MODULE LSQLM_SENDCTL 37#define LSQUIC_LOG_CONN_ID ctl->sc_conn_pub->lconn->cn_cid 38#include "lsquic_logger.h" 39 40#define MAX_RESUBMITTED_ON_RTO 2 41#define MAX_RTO_BACKOFFS 10 42#define DEFAULT_RETX_DELAY 500000 /* Microseconds */ 43#define MAX_RTO_DELAY 60000000 /* Microseconds */ 44#define MIN_RTO_DELAY 1000000 /* Microseconds */ 45#define N_NACKS_BEFORE_RETX 3 46 47 48enum retx_mode { 49 RETX_MODE_HANDSHAKE, 50 RETX_MODE_LOSS, 51 RETX_MODE_TLP, 52 RETX_MODE_RTO, 53}; 54 55 56static const char *const retx2str[] = { 57 [RETX_MODE_HANDSHAKE] = "RETX_MODE_HANDSHAKE", 58 [RETX_MODE_LOSS] = "RETX_MODE_LOSS", 59 [RETX_MODE_TLP] = "RETX_MODE_TLP", 60 [RETX_MODE_RTO] = "RETX_MODE_RTO", 61}; 62 63 64static void 65update_for_resending (lsquic_send_ctl_t *ctl, lsquic_packet_out_t *packet_out); 66 67 68enum expire_filter { EXFI_ALL, EXFI_HSK, EXFI_LAST, }; 69 70 71static void 72send_ctl_expire (lsquic_send_ctl_t *, enum expire_filter); 73 74static void 75set_retx_alarm (lsquic_send_ctl_t *ctl); 76 77static void 78send_ctl_detect_losses (lsquic_send_ctl_t *ctl, lsquic_time_t time); 79 80 81int 82lsquic_send_ctl_have_unacked_stream_frames (const lsquic_send_ctl_t *ctl) 83{ 84 const lsquic_packet_out_t *packet_out; 85 TAILQ_FOREACH(packet_out, &ctl->sc_unacked_packets, po_next) 86 if (packet_out->po_frame_types & 87 ((1 << QUIC_FRAME_STREAM) | (1 << QUIC_FRAME_RST_STREAM))) 88 return 1; 89 return 0; 90} 91 92 93static lsquic_packet_out_t * 94send_ctl_first_unacked_retx_packet (const lsquic_send_ctl_t *ctl) 95{ 96 lsquic_packet_out_t *packet_out; 97 TAILQ_FOREACH(packet_out, &ctl->sc_unacked_packets, po_next) 98 if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK) 99 return packet_out; 100 return NULL; 101} 102 103 104static lsquic_packet_out_t * 105send_ctl_last_unacked_retx_packet (const lsquic_send_ctl_t *ctl) 106{ 107 lsquic_packet_out_t *packet_out; 108 TAILQ_FOREACH_REVERSE(packet_out, &ctl->sc_unacked_packets, 109 lsquic_packets_tailq, po_next) 110 if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK) 111 return packet_out; 112 return NULL; 113} 114 115 116static int 117have_unacked_handshake_packets (const lsquic_send_ctl_t *ctl) 118{ 119 const lsquic_packet_out_t *packet_out; 120 TAILQ_FOREACH(packet_out, &ctl->sc_unacked_packets, po_next) 121 if (packet_out->po_flags & PO_HELLO) 122 return 1; 123 return 0; 124} 125 126 127static enum retx_mode 128get_retx_mode (lsquic_send_ctl_t *ctl) 129{ 130 if (!(ctl->sc_conn_pub->lconn->cn_flags & LSCONN_HANDSHAKE_DONE) 131 && have_unacked_handshake_packets(ctl)) 132 return RETX_MODE_HANDSHAKE; 133 if (ctl->sc_loss_to) 134 return RETX_MODE_LOSS; 135 if (ctl->sc_n_tlp < 2) 136 return RETX_MODE_TLP; 137 return RETX_MODE_RTO; 138} 139 140 141static lsquic_time_t 142get_retx_delay (const struct lsquic_rtt_stats *rtt_stats) 143{ 144 lsquic_time_t srtt, delay; 145 146 srtt = lsquic_rtt_stats_get_srtt(rtt_stats); 147 if (srtt) 148 { 149 delay = srtt + 4 * lsquic_rtt_stats_get_rttvar(rtt_stats); 150 if (delay < MIN_RTO_DELAY) 151 delay = MIN_RTO_DELAY; 152 } 153 else 154 delay = DEFAULT_RETX_DELAY; 155 156 return delay; 157} 158 159 160static void 161retx_alarm_rings (void *ctx, lsquic_time_t expiry, lsquic_time_t now) 162{ 163 lsquic_send_ctl_t *ctl = ctx; 164 lsquic_packet_out_t *packet_out; 165 enum retx_mode rm; 166 167 /* This is a callback -- before it is called, the alarm is unset */ 168 assert(!lsquic_alarmset_is_set(ctl->sc_alset, AL_RETX)); 169 170 rm = get_retx_mode(ctl); 171 LSQ_INFO("retx timeout, mode %s", retx2str[rm]); 172 173 switch (rm) 174 { 175 case RETX_MODE_HANDSHAKE: 176 send_ctl_expire(ctl, EXFI_HSK); 177 /* Do not register cubic loss during handshake */ 178 break; 179 case RETX_MODE_LOSS: 180 send_ctl_detect_losses(ctl, lsquic_time_now()); 181 break; 182 case RETX_MODE_TLP: 183 ++ctl->sc_n_tlp; 184 send_ctl_expire(ctl, EXFI_LAST); 185 break; 186 case RETX_MODE_RTO: 187 ++ctl->sc_n_consec_rtos; 188 ctl->sc_next_limit = 2; 189 LSQ_DEBUG("packet RTO is %"PRIu64" usec", expiry); 190 send_ctl_expire(ctl, EXFI_ALL); 191 lsquic_cubic_timeout(&ctl->sc_cubic); 192 break; 193 } 194 195 packet_out = send_ctl_first_unacked_retx_packet(ctl); 196 if (packet_out) 197 set_retx_alarm(ctl); 198 lsquic_send_ctl_sanity_check(ctl); 199} 200 201 202void 203lsquic_send_ctl_init (lsquic_send_ctl_t *ctl, struct lsquic_alarmset *alset, 204 struct lsquic_engine_public *enpub, const struct ver_neg *ver_neg, 205 struct lsquic_conn_public *conn_pub, unsigned short pack_size) 206{ 207 memset(ctl, 0, sizeof(*ctl)); 208 TAILQ_INIT(&ctl->sc_scheduled_packets); 209 TAILQ_INIT(&ctl->sc_unacked_packets); 210 TAILQ_INIT(&ctl->sc_lost_packets); 211 ctl->sc_enpub = enpub; 212 ctl->sc_alset = alset; 213 ctl->sc_ver_neg = ver_neg; 214 ctl->sc_pack_size = pack_size; 215 ctl->sc_conn_pub = conn_pub; 216 if (enpub->enp_settings.es_pace_packets) 217 ctl->sc_flags |= SC_PACE; 218 lsquic_alarmset_init_alarm(alset, AL_RETX, retx_alarm_rings, ctl); 219 lsquic_senhist_init(&ctl->sc_senhist); 220 lsquic_cubic_init(&ctl->sc_cubic, LSQUIC_LOG_CONN_ID); 221 if (ctl->sc_flags & SC_PACE) 222 pacer_init(&ctl->sc_pacer, LSQUIC_LOG_CONN_ID, 100000); 223} 224 225 226static lsquic_time_t 227calculate_packet_rto (lsquic_send_ctl_t *ctl) 228{ 229 lsquic_time_t delay; 230 231 delay = get_retx_delay(&ctl->sc_conn_pub->rtt_stats); 232 233 unsigned exp = ctl->sc_n_consec_rtos; 234 if (exp > MAX_RTO_BACKOFFS) 235 exp = MAX_RTO_BACKOFFS; 236 237 delay = delay * (1 << exp); 238 239 return delay; 240} 241 242 243static lsquic_time_t 244calculate_tlp_delay (lsquic_send_ctl_t *ctl) 245{ 246 lsquic_time_t srtt, delay; 247 248 srtt = lsquic_rtt_stats_get_srtt(&ctl->sc_conn_pub->rtt_stats); 249 if (ctl->sc_n_in_flight > 1) 250 { 251 delay = 10000; /* 10 ms is the minimum tail loss probe delay */ 252 if (delay < 2 * srtt) 253 delay = 2 * srtt; 254 } 255 else 256 { 257 delay = srtt + srtt / 2 + MIN_RTO_DELAY; 258 if (delay < 2 * srtt) 259 delay = 2 * srtt; 260 } 261 262 return delay; 263} 264 265 266static void 267set_retx_alarm (lsquic_send_ctl_t *ctl) 268{ 269 enum retx_mode rm; 270 lsquic_time_t delay, now; 271 272 assert(!TAILQ_EMPTY(&ctl->sc_unacked_packets)); 273 274 now = lsquic_time_now(); 275 276 rm = get_retx_mode(ctl); 277 switch (rm) 278 { 279 case RETX_MODE_HANDSHAKE: 280 /* [draft-iyengar-quic-loss-recovery-01]: 281 * 282 * if (handshake packets are outstanding): 283 * alarm_duration = max(1.5 * smoothed_rtt, 10ms) << handshake_count; 284 * handshake_count++; 285 */ 286 delay = lsquic_rtt_stats_get_srtt(&ctl->sc_conn_pub->rtt_stats); 287 delay += delay / 2; 288 if (10000 > delay) 289 delay = 10000; 290 delay <<= ctl->sc_n_hsk; 291 ++ctl->sc_n_hsk; 292 break; 293 case RETX_MODE_LOSS: 294 delay = ctl->sc_loss_to; 295 break; 296 case RETX_MODE_TLP: 297 delay = calculate_tlp_delay(ctl); 298 break; 299 case RETX_MODE_RTO: 300 /* Base RTO on the first unacked packet, following reference 301 * implementation. 302 */ 303 delay = calculate_packet_rto(ctl); 304 break; 305 } 306 307 if (delay > MAX_RTO_DELAY) 308 delay = MAX_RTO_DELAY; 309 310 LSQ_DEBUG("set retx alarm to %"PRIu64", which is %"PRIu64 311 " usec from now, mode %s", now + delay, delay, retx2str[rm]); 312 lsquic_alarmset_set(ctl->sc_alset, AL_RETX, now + delay); 313} 314 315 316static int 317send_ctl_in_recovery (lsquic_send_ctl_t *ctl) 318{ 319 return ctl->sc_largest_acked_packno 320 && ctl->sc_largest_acked_packno <= ctl->sc_largest_sent_at_cutback; 321} 322 323 324static int 325send_ctl_in_slow_start (lsquic_send_ctl_t *ctl) 326{ 327 return lsquic_cubic_in_slow_start(&ctl->sc_cubic); 328} 329 330 331static lsquic_time_t 332send_ctl_transfer_time (void *ctx) 333{ 334 lsquic_send_ctl_t *const ctl = ctx; 335 uint64_t bandwidth, pacing_rate; 336 lsquic_time_t srtt, tx_time; 337 unsigned cwnd; 338 339 srtt = lsquic_rtt_stats_get_srtt(&ctl->sc_conn_pub->rtt_stats); 340 if (srtt == 0) 341 srtt = 50000; 342 cwnd = lsquic_cubic_get_cwnd(&ctl->sc_cubic); 343 bandwidth = (uint64_t) cwnd * (uint64_t) ctl->sc_pack_size * 1000000 / srtt; 344 if (send_ctl_in_slow_start(ctl)) 345 pacing_rate = bandwidth * 2; 346 else if (send_ctl_in_recovery(ctl)) 347 pacing_rate = bandwidth; 348 else 349 pacing_rate = bandwidth + bandwidth / 4; 350 351 tx_time = (uint64_t) ctl->sc_pack_size * 1000000 / pacing_rate; 352 LSQ_DEBUG("srtt: %"PRIu64"; ss: %d; rec: %d; cwnd: %u; bandwidth: " 353 "%"PRIu64"; tx_time: %"PRIu64, srtt, send_ctl_in_slow_start(ctl), 354 send_ctl_in_recovery(ctl), cwnd, bandwidth, tx_time); 355 return tx_time; 356} 357 358 359int 360lsquic_send_ctl_sent_packet (lsquic_send_ctl_t *ctl, 361 struct lsquic_packet_out *packet_out) 362{ 363 char frames[lsquic_frame_types_str_sz]; 364 LSQ_DEBUG("packet %"PRIu64" has been sent (frame types: %s)", 365 packet_out->po_packno, lsquic_frame_types_to_str(frames, 366 sizeof(frames), packet_out->po_frame_types)); 367 if (0 == lsquic_senhist_add(&ctl->sc_senhist, packet_out->po_packno)) 368 { 369 TAILQ_INSERT_TAIL(&ctl->sc_unacked_packets, packet_out, po_next); 370 if ((packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK) && 371 !lsquic_alarmset_is_set(ctl->sc_alset, AL_RETX)) 372 set_retx_alarm(ctl); 373 /* Hold on to packets that are not retransmittable because we need them 374 * to sample RTT information. They are released when ACK is received. 375 */ 376 ++ctl->sc_n_in_flight; 377#if LSQUIC_SEND_STATS 378 ++ctl->sc_stats.n_total_sent; 379#endif 380 return 0; 381 } 382 else 383 return -1; 384} 385 386 387static int 388in_acked_range (const ack_info_t *acki, lsquic_packno_t packno) 389{ 390 int i, low, high; 391 392 low = 0, high = (int) acki->n_ranges - 1; 393 do 394 { 395 i = low + (high - low) / 2; 396 if (acki->ranges[i].low <= packno && acki->ranges[i].high >= packno) 397 return 1; 398 else if (acki->ranges[i].high < packno) 399 high = i - 1; 400 else 401 low = i + 1; 402 } 403 while (low <= high); 404 405 return 0; 406} 407 408 409static void 410take_rtt_sample (lsquic_send_ctl_t *ctl, const lsquic_packet_out_t *packet_out, 411 lsquic_time_t now, lsquic_time_t lack_delta) 412{ 413 assert(packet_out->po_sent); 414 lsquic_time_t measured_rtt = now - packet_out->po_sent; 415 if (packet_out->po_packno > ctl->sc_max_rtt_packno && lack_delta < measured_rtt) 416 { 417 ctl->sc_max_rtt_packno = packet_out->po_packno; 418 lsquic_rtt_stats_update(&ctl->sc_conn_pub->rtt_stats, measured_rtt, lack_delta); 419 LSQ_DEBUG("packno %"PRIu64"; rtt: %"PRIu64"; delta: %"PRIu64"; " 420 "new srtt: %"PRIu64, packet_out->po_packno, measured_rtt, lack_delta, 421 lsquic_rtt_stats_get_srtt(&ctl->sc_conn_pub->rtt_stats)); 422 } 423} 424 425 426static void 427ack_streams (lsquic_packet_out_t *packet_out) 428{ 429 struct packet_out_srec_iter posi; 430 struct stream_rec *srec; 431 for (srec = posi_first(&posi, packet_out); srec; srec = posi_next(&posi)) 432 lsquic_stream_acked(srec->sr_stream); 433} 434 435 436/* Returns true if packet was rescheduled, false otherwise. In the latter 437 * case, you should not dereference packet_out after the function returns. 438 */ 439static int 440send_ctl_handle_lost_packet (lsquic_send_ctl_t *ctl, 441 lsquic_packet_out_t *packet_out) 442{ 443 assert(ctl->sc_n_in_flight); 444 --ctl->sc_n_in_flight; 445 TAILQ_REMOVE(&ctl->sc_unacked_packets, packet_out, po_next); 446 if (packet_out->po_flags & PO_ENCRYPTED) { 447 ctl->sc_enpub->enp_pmi->pmi_release(ctl->sc_enpub->enp_pmi_ctx, 448 packet_out->po_enc_data); 449 packet_out->po_flags &= ~PO_ENCRYPTED; 450 packet_out->po_enc_data = NULL; 451 } 452 if (packet_out->po_frame_types & (1 << QUIC_FRAME_ACK)) 453 { 454 ctl->sc_flags |= SC_LOST_ACK; 455 LSQ_DEBUG("lost ACK in packet %"PRIu64, packet_out->po_packno); 456 } 457 if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK) 458 { 459 LSQ_DEBUG("lost retransmittable packet %"PRIu64, 460 packet_out->po_packno); 461 TAILQ_INSERT_TAIL(&ctl->sc_lost_packets, packet_out, po_next); 462 packet_out->po_flags &= ~PO_WRITEABLE; 463 return 1; 464 } 465 else 466 { 467 LSQ_DEBUG("lost unretransmittable packet %"PRIu64, 468 packet_out->po_packno); 469 lsquic_packet_out_destroy(packet_out, ctl->sc_enpub); 470 return 0; 471 } 472} 473 474 475static lsquic_packno_t 476largest_retx_packet_number (const lsquic_send_ctl_t *ctl) 477{ 478 const lsquic_packet_out_t *packet_out; 479 TAILQ_FOREACH_REVERSE(packet_out, &ctl->sc_unacked_packets, 480 lsquic_packets_tailq, po_next) 481 { 482 if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK) 483 return packet_out->po_packno; 484 } 485 return 0; 486} 487 488 489static void 490send_ctl_detect_losses (lsquic_send_ctl_t *ctl, lsquic_time_t time) 491{ 492 lsquic_packet_out_t *packet_out, *next; 493 lsquic_packno_t largest_retx_packno, largest_lost_packno; 494 495 largest_retx_packno = largest_retx_packet_number(ctl); 496 largest_lost_packno = 0; 497 assert(largest_retx_packno); /* Otherwise, why detect losses? */ 498 ctl->sc_loss_to = 0; 499 500 for (packet_out = TAILQ_FIRST(&ctl->sc_unacked_packets); 501 packet_out && packet_out->po_packno <= ctl->sc_largest_acked_packno; 502 packet_out = next) 503 { 504 next = TAILQ_NEXT(packet_out, po_next); 505 506 if (packet_out->po_packno + N_NACKS_BEFORE_RETX < 507 ctl->sc_largest_acked_packno) 508 { 509 LSQ_DEBUG("loss by FACK detected, packet %"PRIu64, 510 packet_out->po_packno); 511 largest_lost_packno = packet_out->po_packno; 512 (void) send_ctl_handle_lost_packet(ctl, packet_out); 513 continue; 514 } 515 516 if ((packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK) && 517 largest_retx_packno <= ctl->sc_largest_acked_packno) 518 { 519 LSQ_DEBUG("loss by early retransmit detected, packet %"PRIu64, 520 packet_out->po_packno); 521 largest_lost_packno = packet_out->po_packno; 522 ctl->sc_loss_to = 523 lsquic_rtt_stats_get_srtt(&ctl->sc_conn_pub->rtt_stats) / 4; 524 LSQ_DEBUG("set sc_loss_to to %"PRIu64", packet %"PRIu64, 525 ctl->sc_loss_to, packet_out->po_packno); 526 (void) send_ctl_handle_lost_packet(ctl, packet_out); 527 continue; 528 } 529 530 if (ctl->sc_largest_acked_sent_time > packet_out->po_sent + 531 lsquic_rtt_stats_get_srtt(&ctl->sc_conn_pub->rtt_stats)) 532 { 533 LSQ_DEBUG("loss by sent time detected: packet %"PRIu64, 534 packet_out->po_packno); 535 largest_lost_packno = packet_out->po_packno; 536 (void) send_ctl_handle_lost_packet(ctl, packet_out); 537 continue; 538 } 539 } 540 541 if (largest_lost_packno > ctl->sc_largest_sent_at_cutback) 542 { 543 LSQ_DEBUG("detected new loss: packet %"PRIu64"; new lsac: " 544 "%"PRIu64, largest_lost_packno, ctl->sc_largest_sent_at_cutback); 545 lsquic_cubic_loss(&ctl->sc_cubic); 546 ctl->sc_largest_sent_at_cutback = 547 lsquic_senhist_largest(&ctl->sc_senhist); 548 } 549 else if (largest_lost_packno) 550 /* Lost packets whose numbers are smaller than the largest packet 551 * number sent at the time of the last loss event indicate the same 552 * loss event. This follows NewReno logic, see RFC 6582. 553 */ 554 LSQ_DEBUG("ignore loss of packet %"PRIu64" smaller than lsac " 555 "%"PRIu64, largest_lost_packno, ctl->sc_largest_sent_at_cutback); 556} 557 558 559int 560lsquic_send_ctl_got_ack (lsquic_send_ctl_t *ctl, 561 const struct ack_info *acki, 562 lsquic_time_t ack_recv_time) 563{ 564 struct lsquic_packets_tailq acked_acks = 565 TAILQ_HEAD_INITIALIZER(acked_acks); 566 lsquic_packet_out_t *packet_out, *next; 567 lsquic_time_t now = lsquic_time_now(); 568 lsquic_packno_t high; 569 int rtt_updated = 0; 570 int app_limited; 571 unsigned n; 572 573 LSQ_DEBUG("Got ACK frame, largest acked: %"PRIu64"; delta: %"PRIu64, 574 largest_acked(acki), acki->lack_delta); 575 576 /* Validate ACK first: */ 577 for (n = 0; n < acki->n_ranges; ++n) 578 if (!lsquic_senhist_sent_range(&ctl->sc_senhist, acki->ranges[n].low, 579 acki->ranges[n].high)) 580 { 581 LSQ_INFO("at least one packet in ACK range [%"PRIu64" - %"PRIu64"] " 582 "was never sent", acki->ranges[n].low, acki->ranges[n].high); 583 return -1; 584 } 585 586 /* Peer is acking packets that have been acked already. Schedule ACK 587 * and STOP_WAITING frame to chop the range if we get two of these in 588 * a row. 589 */ 590 if (lsquic_send_ctl_smallest_unacked(ctl) > smallest_acked(acki)) 591 ++ctl->sc_n_stop_waiting; 592 else 593 ctl->sc_n_stop_waiting = 0; 594 595 app_limited = ctl->sc_n_in_flight + 3 /* This is the "maximum 596 burst" parameter */ < lsquic_cubic_get_cwnd(&ctl->sc_cubic); 597 598 for (packet_out = TAILQ_FIRST(&ctl->sc_unacked_packets); 599 packet_out && packet_out->po_packno <= largest_acked(acki); 600 packet_out = next) 601 { 602 next = TAILQ_NEXT(packet_out, po_next); 603 if (!in_acked_range(acki, packet_out->po_packno)) 604 continue; 605 ctl->sc_largest_acked_packno = packet_out->po_packno; 606 ctl->sc_largest_acked_sent_time = packet_out->po_sent; 607 if (packet_out->po_packno == largest_acked(acki)) 608 { 609 take_rtt_sample(ctl, packet_out, ack_recv_time, acki->lack_delta); 610 ++rtt_updated; 611 } 612 lsquic_cubic_ack(&ctl->sc_cubic, now, now - packet_out->po_sent, 613 app_limited); 614 LSQ_DEBUG("Got ACK for packet %"PRIu64", remove from unacked queue", 615 packet_out->po_packno); 616 TAILQ_REMOVE(&ctl->sc_unacked_packets, packet_out, po_next); 617 ack_streams(packet_out); 618 if ((ctl->sc_flags & SC_NSTP) && 619 (packet_out->po_frame_types & (1 << QUIC_FRAME_ACK))) 620 TAILQ_INSERT_TAIL(&acked_acks, packet_out, po_next); 621 else 622 lsquic_packet_out_destroy(packet_out, ctl->sc_enpub); 623 assert(ctl->sc_n_in_flight); 624 --ctl->sc_n_in_flight; 625 } 626 627 if (rtt_updated) 628 { 629 ctl->sc_n_consec_rtos = 0; 630 ctl->sc_n_hsk = 0; 631 ctl->sc_n_tlp = 0; 632 } 633 634 if (send_ctl_first_unacked_retx_packet(ctl)) 635 { 636 send_ctl_detect_losses(ctl, ack_recv_time); 637 if (send_ctl_first_unacked_retx_packet(ctl)) 638 set_retx_alarm(ctl); 639 else 640 { 641 LSQ_DEBUG("All retransmittable packets lost: clear alarm"); 642 lsquic_alarmset_unset(ctl->sc_alset, AL_RETX); 643 } 644 } 645 else 646 { 647 LSQ_DEBUG("No unacked retransmittable packets: clear retx alarm"); 648 lsquic_alarmset_unset(ctl->sc_alset, AL_RETX); 649 } 650 lsquic_send_ctl_sanity_check(ctl); 651 652 /* Processing of packets that contain acked ACK frames is deferred because 653 * we only need to process one of them: the last one, which we know to 654 * contain the largest value. 655 */ 656 packet_out = TAILQ_LAST(&acked_acks, lsquic_packets_tailq); 657 if (packet_out) 658 { 659 high = ctl->sc_conn_pub->lconn->cn_pf->pf_parse_ack_high( 660 packet_out->po_data, packet_out->po_data_sz); 661 if (high > ctl->sc_largest_ack2ed) 662 ctl->sc_largest_ack2ed = high; 663 do 664 { 665 next = TAILQ_PREV(packet_out, lsquic_packets_tailq, po_next); 666 lsquic_packet_out_destroy(packet_out, ctl->sc_enpub); 667 } 668 while ((packet_out = next)); 669 } 670 671 return 0; 672} 673 674 675lsquic_packno_t 676lsquic_send_ctl_smallest_unacked (lsquic_send_ctl_t *ctl) 677{ 678 const lsquic_packet_out_t *packet_out; 679 680#ifndef NDEBUG 681 if ((ctl->sc_senhist.sh_flags & SH_REORDER) && 682 !TAILQ_EMPTY(&ctl->sc_unacked_packets)) 683 { 684 lsquic_packno_t smallest_unacked = UINT64_MAX; 685 TAILQ_FOREACH(packet_out, &ctl->sc_unacked_packets, po_next) 686 if (packet_out->po_packno < smallest_unacked) 687 smallest_unacked = packet_out->po_packno; 688 assert(smallest_unacked < UINT64_MAX); 689 return smallest_unacked; 690 } 691 else 692#endif 693 /* Packets are always sent out in order (unless we are reordering them 694 * on purpose). Thus, the first packet on the unacked packets list has 695 * the smallest packet number of all packets on that list. 696 */ 697 if ((packet_out = TAILQ_FIRST(&ctl->sc_unacked_packets))) 698 return packet_out->po_packno; 699 else 700 return lsquic_senhist_largest(&ctl->sc_senhist) + 1; 701} 702 703 704static struct lsquic_packet_out * 705send_ctl_next_lost (lsquic_send_ctl_t *ctl) 706{ 707 lsquic_packet_out_t *lost_packet = TAILQ_FIRST(&ctl->sc_lost_packets); 708 if (lost_packet) 709 { 710 TAILQ_REMOVE(&ctl->sc_lost_packets, lost_packet, po_next); 711 if (lost_packet->po_frame_types & (1 << QUIC_FRAME_STREAM)) 712 { 713 lsquic_packet_out_elide_reset_stream_frames(lost_packet, 714 ctl->sc_conn_pub->lconn->cn_pf, 0); 715 } 716 return lost_packet; 717 } 718 else 719 return NULL; 720} 721 722 723static lsquic_packno_t 724send_ctl_next_packno (lsquic_send_ctl_t *ctl) 725{ 726 return ++ctl->sc_cur_packno; 727} 728 729 730void 731lsquic_send_ctl_cleanup (lsquic_send_ctl_t *ctl) 732{ 733 lsquic_packet_out_t *packet_out; 734 lsquic_senhist_cleanup(&ctl->sc_senhist); 735 while ((packet_out = TAILQ_FIRST(&ctl->sc_scheduled_packets))) 736 { 737 TAILQ_REMOVE(&ctl->sc_scheduled_packets, packet_out, po_next); 738 lsquic_packet_out_destroy(packet_out, ctl->sc_enpub); 739 --ctl->sc_n_scheduled; 740 } 741 assert(0 == ctl->sc_n_scheduled); 742 while ((packet_out = TAILQ_FIRST(&ctl->sc_unacked_packets))) 743 { 744 TAILQ_REMOVE(&ctl->sc_unacked_packets, packet_out, po_next); 745 lsquic_packet_out_destroy(packet_out, ctl->sc_enpub); 746 --ctl->sc_n_in_flight; 747 } 748 assert(0 == ctl->sc_n_in_flight); 749 while ((packet_out = TAILQ_FIRST(&ctl->sc_lost_packets))) 750 { 751 TAILQ_REMOVE(&ctl->sc_lost_packets, packet_out, po_next); 752 lsquic_packet_out_destroy(packet_out, ctl->sc_enpub); 753 } 754#if LSQUIC_SEND_STATS 755 LSQ_NOTICE("stats: n_total_sent: %u; n_resent: %u; n_delayed: %u", 756 ctl->sc_stats.n_total_sent, ctl->sc_stats.n_resent, 757 ctl->sc_stats.n_delayed); 758#endif 759} 760 761 762#ifndef NDEBUG 763__attribute__((weak)) 764#endif 765int 766lsquic_send_ctl_can_send (lsquic_send_ctl_t *ctl) 767{ 768 const unsigned n_out = ctl->sc_n_scheduled + ctl->sc_n_in_flight; 769 if (ctl->sc_flags & SC_PACE) 770 { 771 if (n_out >= lsquic_cubic_get_cwnd(&ctl->sc_cubic)) 772 return 0; 773 if (pacer_can_schedule(&ctl->sc_pacer, n_out)) 774 return 1; 775 if (ctl->sc_flags & SC_SCHED_TICK) 776 { 777 ctl->sc_flags &= ~SC_SCHED_TICK; 778 lsquic_engine_add_conn_to_attq(ctl->sc_enpub, 779 ctl->sc_conn_pub->lconn, pacer_next_sched(&ctl->sc_pacer)); 780 } 781 return 0; 782 } 783 else 784 return n_out < lsquic_cubic_get_cwnd(&ctl->sc_cubic); 785} 786 787 788static void 789send_ctl_expire (lsquic_send_ctl_t *ctl, enum expire_filter filter) 790{ 791 lsquic_packet_out_t *packet_out, *next; 792 int n_resubmitted; 793 static const char *const filter_type2str[] = { 794 [EXFI_ALL] = "all", 795 [EXFI_HSK] = "handshake", 796 [EXFI_LAST] = "last", 797 }; 798 799 switch (filter) 800 { 801 case EXFI_ALL: 802 n_resubmitted = 0; 803 while ((packet_out = TAILQ_FIRST(&ctl->sc_unacked_packets))) 804 n_resubmitted += send_ctl_handle_lost_packet(ctl, packet_out); 805 break; 806 case EXFI_HSK: 807 n_resubmitted = 0; 808 for (packet_out = TAILQ_FIRST(&ctl->sc_unacked_packets); packet_out; 809 packet_out = next) 810 { 811 next = TAILQ_NEXT(packet_out, po_next); 812 if (packet_out->po_flags & PO_HELLO) 813 n_resubmitted += send_ctl_handle_lost_packet(ctl, packet_out); 814 } 815 break; 816 case EXFI_LAST: 817 packet_out = send_ctl_last_unacked_retx_packet(ctl); 818 if (packet_out) 819 n_resubmitted = send_ctl_handle_lost_packet(ctl, packet_out); 820 else 821 n_resubmitted = 0; 822 break; 823 } 824 825 LSQ_DEBUG("consider %s packets lost: %d resubmitted", 826 filter_type2str[filter], n_resubmitted); 827} 828 829 830void 831lsquic_send_ctl_expire_all (lsquic_send_ctl_t *ctl) 832{ 833 lsquic_alarmset_unset(ctl->sc_alset, AL_RETX); 834 send_ctl_expire(ctl, EXFI_ALL); 835 lsquic_send_ctl_sanity_check(ctl); 836} 837 838 839#ifndef NDEBUG 840void 841lsquic_send_ctl_sanity_check (const lsquic_send_ctl_t *ctl) 842{ 843 const struct lsquic_packet_out *packet_out; 844 unsigned count; 845 846 assert(!send_ctl_first_unacked_retx_packet(ctl) || 847 lsquic_alarmset_is_set(ctl->sc_alset, AL_RETX)); 848 if (lsquic_alarmset_is_set(ctl->sc_alset, AL_RETX)) 849 { 850 assert(send_ctl_first_unacked_retx_packet(ctl)); 851 assert(lsquic_time_now() < ctl->sc_alset->as_expiry[AL_RETX] + MAX_RTO_DELAY); 852 } 853 854 count = 0; 855 TAILQ_FOREACH(packet_out, &ctl->sc_unacked_packets, po_next) 856 ++count; 857 assert(count == ctl->sc_n_in_flight); 858} 859 860 861#endif 862 863 864void 865lsquic_send_ctl_scheduled_one (lsquic_send_ctl_t *ctl, 866 lsquic_packet_out_t *packet_out) 867{ 868#ifndef NDEBUG 869 const lsquic_packet_out_t *last; 870 last = TAILQ_LAST(&ctl->sc_scheduled_packets, lsquic_packets_tailq); 871 if (last) 872 assert((last->po_flags & PO_REPACKNO) || 873 last->po_packno < packet_out->po_packno); 874#endif 875 TAILQ_INSERT_TAIL(&ctl->sc_scheduled_packets, packet_out, po_next); 876 ++ctl->sc_n_scheduled; 877} 878 879 880lsquic_packet_out_t * 881lsquic_send_ctl_next_packet_to_send (lsquic_send_ctl_t *ctl) 882{ 883 lsquic_packet_out_t *packet_out; 884 885 packet_out = TAILQ_FIRST(&ctl->sc_scheduled_packets); 886 if (!packet_out) 887 return NULL; 888 889 if (ctl->sc_n_consec_rtos && 890 !(packet_out->po_frame_types & (1 << QUIC_FRAME_ACK))) 891 { 892 if (ctl->sc_next_limit) 893 --ctl->sc_next_limit; 894 else 895 return NULL; 896 } 897 898 if (packet_out->po_flags & PO_REPACKNO) 899 { 900 update_for_resending(ctl, packet_out); 901 packet_out->po_flags &= ~PO_REPACKNO; 902 } 903 904 TAILQ_REMOVE(&ctl->sc_scheduled_packets, packet_out, po_next); 905 --ctl->sc_n_scheduled; 906 return packet_out; 907} 908 909 910void 911lsquic_send_ctl_delayed_one (lsquic_send_ctl_t *ctl, 912 lsquic_packet_out_t *packet_out) 913{ 914 TAILQ_INSERT_HEAD(&ctl->sc_scheduled_packets, packet_out, po_next); 915 ++ctl->sc_n_scheduled; 916 packet_out->po_flags &= ~PO_WRITEABLE; 917 LSQ_DEBUG("packet %"PRIu64" has been delayed", packet_out->po_packno); 918#if LSQUIC_SEND_STATS 919 ++ctl->sc_stats.n_delayed; 920#endif 921} 922 923 924int 925lsquic_send_ctl_have_outgoing_stream_frames (const lsquic_send_ctl_t *ctl) 926{ 927 const lsquic_packet_out_t *packet_out; 928 TAILQ_FOREACH(packet_out, &ctl->sc_scheduled_packets, po_next) 929 if (packet_out->po_frame_types & 930 ((1 << QUIC_FRAME_STREAM) | (1 << QUIC_FRAME_RST_STREAM))) 931 return 1; 932 return 0; 933} 934 935 936int 937lsquic_send_ctl_have_outgoing_retx_frames (const lsquic_send_ctl_t *ctl) 938{ 939 const lsquic_packet_out_t *packet_out; 940 TAILQ_FOREACH(packet_out, &ctl->sc_scheduled_packets, po_next) 941 if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK) 942 return 1; 943 return 0; 944} 945 946 947lsquic_packet_out_t * 948lsquic_send_ctl_new_packet_out (lsquic_send_ctl_t *ctl, unsigned need_at_least) 949{ 950 lsquic_packet_out_t *packet_out; 951 lsquic_packno_t packno, smallest_unacked; 952 enum lsquic_packno_bits bits; 953 unsigned n_in_flight; 954 955 packno = send_ctl_next_packno(ctl); 956 smallest_unacked = lsquic_send_ctl_smallest_unacked(ctl); 957 n_in_flight = lsquic_cubic_get_cwnd(&ctl->sc_cubic); 958 bits = calc_packno_bits(packno, smallest_unacked, n_in_flight); 959 960 packet_out = lsquic_packet_out_new(&ctl->sc_enpub->enp_mm, 961 ctl->sc_conn_pub->packet_out_malo, 962 !(ctl->sc_flags & SC_TCID0), ctl->sc_pack_size, bits, 963 ctl->sc_ver_neg->vn_tag, NULL); 964 if (!packet_out) 965 return NULL; 966 967 if (need_at_least && lsquic_packet_out_avail(packet_out) < need_at_least) 968 { /* This should never happen, this is why this check is performed at 969 * this level and not lower, before the packet is actually allocated. 970 */ 971 LSQ_ERROR("wanted to allocate packet with at least %u bytes of " 972 "payload, but only got %u bytes (mtu: %u bytes)", need_at_least, 973 lsquic_packet_out_avail(packet_out), ctl->sc_pack_size); 974 lsquic_packet_out_destroy(packet_out, ctl->sc_enpub); 975 return NULL; 976 } 977 978 packet_out->po_packno = packno; 979 LSQ_DEBUG("created packet (smallest_unacked: %"PRIu64"; n_in_flight " 980 "estimate: %u; bits: %u) %"PRIu64"", smallest_unacked, 981 n_in_flight, bits, packno); 982 EV_LOG_PACKET_CREATED(LSQUIC_LOG_CONN_ID, packet_out); 983 return packet_out; 984} 985 986 987/* If `need_at_least' is set to zero, this means get maximum allowed payload 988 * size (in other words, allocate a new packet). 989 */ 990lsquic_packet_out_t * 991lsquic_send_ctl_get_writeable_packet (lsquic_send_ctl_t *ctl, 992 unsigned need_at_least, int *is_err) 993{ 994 lsquic_packet_out_t *packet_out; 995 unsigned n_out; 996 997 if (need_at_least != 0) 998 { 999 packet_out = lsquic_send_ctl_last_scheduled(ctl); 1000 if (packet_out && 1001 /* Do not append to resubmitted packets to avoid writing more 1002 * than one STREAM or RST_STREAM frame from the same stream to 1003 * the packet. This logic can be optimized: we can pass what 1004 * we want to write to this packet and use it if it's not STREAM 1005 * or RST_STREAM frame. We can go further and query whether the 1006 * packet already contains a frame from this stream. 1007 */ 1008 (packet_out->po_flags & PO_WRITEABLE) && 1009 lsquic_packet_out_avail(packet_out) >= need_at_least) 1010 { 1011 return packet_out; 1012 } 1013 } 1014 1015 if (!lsquic_send_ctl_can_send(ctl)) 1016 { 1017 *is_err = 0; 1018 return NULL; 1019 } 1020 1021 packet_out = lsquic_send_ctl_new_packet_out(ctl, need_at_least); 1022 if (packet_out) 1023 { 1024 if (ctl->sc_flags & SC_PACE) 1025 { 1026 n_out = ctl->sc_n_in_flight + ctl->sc_n_scheduled; 1027 pacer_packet_scheduled(&ctl->sc_pacer, n_out, 1028 send_ctl_in_recovery(ctl), send_ctl_transfer_time, ctl); 1029 } 1030 lsquic_send_ctl_scheduled_one(ctl, packet_out); 1031 } 1032 else 1033 *is_err = 1; 1034 return packet_out; 1035} 1036 1037 1038static void 1039update_for_resending (lsquic_send_ctl_t *ctl, lsquic_packet_out_t *packet_out) 1040{ 1041 1042 lsquic_packno_t oldno, packno; 1043 1044 /* When the packet is resent, it uses the same number of bytes to encode 1045 * the packet number as the original packet. This follows the reference 1046 * implementation. 1047 */ 1048 oldno = packet_out->po_packno; 1049 packno = send_ctl_next_packno(ctl); 1050 1051 packet_out->po_frame_types &= ~QFRAME_REGEN_MASK; 1052 assert(packet_out->po_frame_types); 1053 packet_out->po_packno = packno; 1054 1055 if (ctl->sc_ver_neg->vn_tag) 1056 { 1057 assert(packet_out->po_flags & PO_VERSION); /* It can only disappear */ 1058 packet_out->po_ver_tag = *ctl->sc_ver_neg->vn_tag; 1059 } 1060 1061 assert(packet_out->po_regen_sz < packet_out->po_data_sz); 1062 /* TODO: in Q038 and later, we can simply replace the ACK with NUL bytes 1063 * representing PADDING frame instead of doing memmove and adjusting 1064 * offsets. 1065 */ 1066 if (packet_out->po_regen_sz) 1067 lsquic_packet_out_chop_regen(packet_out); 1068 LSQ_DEBUG("Packet %"PRIu64" repackaged for resending as packet %"PRIu64, 1069 oldno, packno); 1070 EV_LOG_CONN_EVENT(LSQUIC_LOG_CONN_ID, "packet %"PRIu64" repackaged for " 1071 "resending as packet %"PRIu64, oldno, packno); 1072} 1073 1074 1075/* A droppable hello packet is a packet that contains a part of hello message 1076 * after handshake has been completed. 1077 */ 1078static int 1079droppable_hello_packet (const lsquic_send_ctl_t *ctl, 1080 const lsquic_packet_out_t *packet_out) 1081{ 1082 return 0 /* TODO: we cannot not resend HELLO packets if we are server. 1083 * For now, do not discard any HELLO packets. 1084 */ 1085 && (packet_out->po_flags & PO_HELLO) 1086 && (ctl->sc_conn_pub->lconn->cn_flags & LSCONN_HANDSHAKE_DONE); 1087} 1088 1089 1090unsigned 1091lsquic_send_ctl_reschedule_packets (lsquic_send_ctl_t *ctl) 1092{ 1093 lsquic_packet_out_t *packet_out; 1094 unsigned n = 0; 1095 1096 while (lsquic_send_ctl_can_send(ctl) && 1097 (packet_out = send_ctl_next_lost(ctl))) 1098 { 1099 if ((packet_out->po_regen_sz < packet_out->po_data_sz) 1100 && !droppable_hello_packet(ctl, packet_out)) 1101 { 1102 ++n; 1103 update_for_resending(ctl, packet_out); 1104 lsquic_send_ctl_scheduled_one(ctl, packet_out); 1105 } 1106 else 1107 { 1108 LSQ_DEBUG("Dropping packet %"PRIu64" from unacked queue", 1109 packet_out->po_packno); 1110 lsquic_packet_out_destroy(packet_out, ctl->sc_enpub); 1111 } 1112 } 1113 1114 if (n) 1115 LSQ_DEBUG("rescheduled %u packets", n); 1116 1117 return n; 1118} 1119 1120 1121void 1122lsquic_send_ctl_set_tcid0 (lsquic_send_ctl_t *ctl, int tcid0) 1123{ 1124 if (tcid0) 1125 { 1126 LSQ_INFO("set TCID flag"); 1127 ctl->sc_flags |= SC_TCID0; 1128 } 1129 else 1130 { 1131 LSQ_INFO("unset TCID flag"); 1132 ctl->sc_flags &= ~SC_TCID0; 1133 } 1134} 1135 1136 1137/* This function is called to inform the send controller that stream 1138 * `stream_id' has been reset. The controller elides this stream's stream 1139 * frames from packets that have already been scheduled. If a packet 1140 * becomes empty as a result, it is dropped. 1141 * 1142 * Packets on other queues do not need to be processed: unacked packets 1143 * have already been sent, and lost packets' reset stream frames will be 1144 * elided in due time. 1145 */ 1146void 1147lsquic_send_ctl_reset_stream (lsquic_send_ctl_t *ctl, uint32_t stream_id) 1148{ 1149 struct lsquic_packet_out *packet_out, *next; 1150 1151 for (packet_out = TAILQ_FIRST(&ctl->sc_scheduled_packets); packet_out; 1152 packet_out = next) 1153 { 1154 next = TAILQ_NEXT(packet_out, po_next); 1155 1156 if ((packet_out->po_frame_types & (1 << QUIC_FRAME_STREAM)) 1157 ) 1158 { 1159 lsquic_packet_out_elide_reset_stream_frames(packet_out, 1160 ctl->sc_conn_pub->lconn->cn_pf, stream_id); 1161 if (0 == packet_out->po_frame_types) 1162 { 1163 LSQ_DEBUG("cancel packet %"PRIu64" after eliding frames for " 1164 "stream %"PRIu32, packet_out->po_packno, stream_id); 1165 TAILQ_REMOVE(&ctl->sc_scheduled_packets, packet_out, po_next); 1166 lsquic_packet_out_destroy(packet_out, ctl->sc_enpub); 1167 assert(ctl->sc_n_scheduled); 1168 --ctl->sc_n_scheduled; 1169 } 1170 } 1171 } 1172} 1173 1174 1175/* Count how many packets will remain after the squeezing performed by 1176 * lsquic_send_ctl_squeeze_sched(). This is the number of delayed data 1177 * packets. 1178 */ 1179#ifndef NDEBUG 1180__attribute__((weak)) 1181#endif 1182int 1183lsquic_send_ctl_have_delayed_packets (const lsquic_send_ctl_t *ctl) 1184{ 1185 const struct lsquic_packet_out *packet_out; 1186 TAILQ_FOREACH(packet_out, &ctl->sc_scheduled_packets, po_next) 1187 if (packet_out->po_regen_sz < packet_out->po_data_sz) 1188 return 1; 1189 return 0; 1190} 1191 1192 1193#ifndef NDEBUG 1194static void 1195send_ctl_log_packet_q (const lsquic_send_ctl_t *ctl, const char *prefix, 1196 const struct lsquic_packets_tailq *tailq) 1197{ 1198 const lsquic_packet_out_t *packet_out; 1199 unsigned n_packets; 1200 char *buf; 1201 size_t bufsz; 1202 int off; 1203 1204 n_packets = 0; 1205 TAILQ_FOREACH(packet_out, tailq, po_next) 1206 ++n_packets; 1207 1208 if (n_packets == 0) 1209 { 1210 LSQ_DEBUG("%s: [<empty set>]", prefix); 1211 return; 1212 } 1213 1214 bufsz = n_packets * sizeof("18446744073709551615" /* UINT64_MAX */); 1215 buf = malloc(bufsz); 1216 if (!buf) 1217 { 1218 LSQ_ERROR("%s: malloc: %s", __func__, strerror(errno)); 1219 return; 1220 } 1221 1222 off = 0; 1223 TAILQ_FOREACH(packet_out, tailq, po_next) 1224 { 1225 if (off) 1226 buf[off++] = ' '; 1227 off += sprintf(buf + off, "%"PRIu64, packet_out->po_packno); 1228 } 1229 1230 LSQ_DEBUG("%s: [%s]", prefix, buf); 1231 free(buf); 1232} 1233 1234 1235#define LOG_PACKET_Q(prefix, queue) do { \ 1236 if (LSQ_LOG_ENABLED(LSQ_LOG_DEBUG)) \ 1237 send_ctl_log_packet_q(ctl, queue, prefix); \ 1238} while (0) 1239#else 1240#define LOG_PACKET_Q(p, q) 1241#endif 1242 1243 1244int 1245lsquic_send_ctl_squeeze_sched (lsquic_send_ctl_t *ctl) 1246{ 1247 struct lsquic_packet_out *packet_out, *next; 1248#ifndef NDEBUG 1249 int pre_squeeze_logged = 0; 1250#endif 1251 1252 for (packet_out = TAILQ_FIRST(&ctl->sc_scheduled_packets); packet_out; 1253 packet_out = next) 1254 { 1255 next = TAILQ_NEXT(packet_out, po_next); 1256 if (packet_out->po_regen_sz < packet_out->po_data_sz 1257 && !droppable_hello_packet(ctl, packet_out)) 1258 { 1259 packet_out->po_flags &= ~PO_WRITEABLE; 1260 if (packet_out->po_flags & PO_ENCRYPTED) 1261 { 1262 ctl->sc_enpub->enp_pmi->pmi_release(ctl->sc_enpub->enp_pmi_ctx, 1263 packet_out->po_enc_data); 1264 packet_out->po_enc_data = NULL; 1265 packet_out->po_flags &= ~PO_ENCRYPTED; 1266 } 1267 } 1268 else 1269 { 1270#ifndef NDEBUG 1271 /* Log the whole list before we squeeze for the first time */ 1272 if (!pre_squeeze_logged++) 1273 LOG_PACKET_Q(&ctl->sc_scheduled_packets, 1274 "unacked packets before squeezing"); 1275#endif 1276 TAILQ_REMOVE(&ctl->sc_scheduled_packets, packet_out, po_next); 1277 assert(ctl->sc_n_scheduled); 1278 --ctl->sc_n_scheduled; 1279 LSQ_DEBUG("Dropping packet %"PRIu64" from scheduled queue", 1280 packet_out->po_packno); 1281 lsquic_packet_out_destroy(packet_out, ctl->sc_enpub); 1282 } 1283 } 1284 1285#ifndef NDEBUG 1286 if (pre_squeeze_logged) 1287 LOG_PACKET_Q(&ctl->sc_scheduled_packets, 1288 "unacked packets after squeezing"); 1289 else if (ctl->sc_n_scheduled > 0) 1290 LOG_PACKET_Q(&ctl->sc_scheduled_packets, "delayed packets"); 1291#endif 1292 1293 return ctl->sc_n_scheduled > 0; 1294} 1295 1296 1297void 1298lsquic_send_ctl_reset_packnos (lsquic_send_ctl_t *ctl) 1299{ 1300 struct lsquic_packet_out *packet_out; 1301 1302 assert(ctl->sc_n_scheduled > 0); /* Otherwise, why is this called? */ 1303 ctl->sc_cur_packno = lsquic_senhist_largest(&ctl->sc_senhist); 1304 TAILQ_FOREACH(packet_out, &ctl->sc_scheduled_packets, po_next) 1305 packet_out->po_flags |= PO_REPACKNO; 1306} 1307 1308 1309void 1310lsquic_send_ctl_ack_to_front (lsquic_send_ctl_t *ctl) 1311{ 1312 struct lsquic_packet_out *ack_packet; 1313 1314 assert(ctl->sc_n_scheduled > 1); /* Otherwise, why is this called? */ 1315 ack_packet = TAILQ_LAST(&ctl->sc_scheduled_packets, lsquic_packets_tailq); 1316 assert(ack_packet->po_frame_types & (1 << QUIC_FRAME_ACK)); 1317 TAILQ_REMOVE(&ctl->sc_scheduled_packets, ack_packet, po_next); 1318 TAILQ_INSERT_HEAD(&ctl->sc_scheduled_packets, ack_packet, po_next); 1319} 1320 1321 1322void 1323lsquic_send_ctl_drop_scheduled (lsquic_send_ctl_t *ctl) 1324{ 1325 lsquic_packet_out_t *packet_out; 1326 const unsigned n = ctl->sc_n_scheduled; 1327 while ((packet_out = TAILQ_FIRST(&ctl->sc_scheduled_packets))) 1328 { 1329 TAILQ_REMOVE(&ctl->sc_scheduled_packets, packet_out, po_next); 1330 lsquic_packet_out_destroy(packet_out, ctl->sc_enpub); 1331 --ctl->sc_n_scheduled; 1332 } 1333 assert(0 == ctl->sc_n_scheduled); 1334 LSQ_DEBUG("dropped %u scheduled packet%s", n, n != 0 ? "s" : ""); 1335} 1336 1337 1338