lsquic_send_ctl.c revision da710add
1/* Copyright (c) 2017 LiteSpeed Technologies Inc. See LICENSE. */ 2/* 3 * lsquic_send_ctl.c -- Logic for sending and sent packets 4 */ 5 6#include <assert.h> 7#include <errno.h> 8#include <inttypes.h> 9#include <stdlib.h> 10#include <string.h> 11#include <sys/queue.h> 12 13#include "lsquic_types.h" 14#include "lsquic_int_types.h" 15#include "lsquic.h" 16#include "lsquic_mm.h" 17#include "lsquic_engine_public.h" 18#include "lsquic_alarmset.h" 19#include "lsquic_packet_common.h" 20#include "lsquic_parse.h" 21#include "lsquic_packet_out.h" 22#include "lsquic_senhist.h" 23#include "lsquic_rtt.h" 24#include "lsquic_cubic.h" 25#include "lsquic_pacer.h" 26#include "lsquic_send_ctl.h" 27#include "lsquic_util.h" 28#include "lsquic_sfcw.h" 29#include "lsquic_stream.h" 30#include "lsquic_ver_neg.h" 31#include "lsquic_ev_log.h" 32#include "lsquic_conn.h" 33#include "lsquic_conn_flow.h" 34#include "lsquic_conn_public.h" 35#include "lsquic_hash.h" 36 37#define LSQUIC_LOGGER_MODULE LSQLM_SENDCTL 38#define LSQUIC_LOG_CONN_ID ctl->sc_conn_pub->lconn->cn_cid 39#include "lsquic_logger.h" 40 41#define MAX_RESUBMITTED_ON_RTO 2 42#define MAX_RTO_BACKOFFS 10 43#define DEFAULT_RETX_DELAY 500000 /* Microseconds */ 44#define MAX_RTO_DELAY 60000000 /* Microseconds */ 45#define MIN_RTO_DELAY 1000000 /* Microseconds */ 46#define N_NACKS_BEFORE_RETX 3 47 48 49enum retx_mode { 50 RETX_MODE_HANDSHAKE, 51 RETX_MODE_LOSS, 52 RETX_MODE_TLP, 53 RETX_MODE_RTO, 54}; 55 56 57static const char *const retx2str[] = { 58 [RETX_MODE_HANDSHAKE] = "RETX_MODE_HANDSHAKE", 59 [RETX_MODE_LOSS] = "RETX_MODE_LOSS", 60 [RETX_MODE_TLP] = "RETX_MODE_TLP", 61 [RETX_MODE_RTO] = "RETX_MODE_RTO", 62}; 63 64 65static void 66update_for_resending (lsquic_send_ctl_t *ctl, lsquic_packet_out_t *packet_out); 67 68 69enum expire_filter { EXFI_ALL, EXFI_HSK, EXFI_LAST, }; 70 71 72static void 73send_ctl_expire (lsquic_send_ctl_t *, enum expire_filter); 74 75static void 76set_retx_alarm (lsquic_send_ctl_t *ctl); 77 78static void 79send_ctl_detect_losses (lsquic_send_ctl_t *ctl, lsquic_time_t time); 80 81static unsigned 82send_ctl_retx_bytes_out (const struct lsquic_send_ctl *ctl); 83 84 85#ifdef NDEBUG 86static 87#elif __GNUC__ 88__attribute__((weak)) 89#endif 90int 91lsquic_send_ctl_schedule_stream_packets_immediately (lsquic_send_ctl_t *ctl) 92{ 93 return !(ctl->sc_flags & SC_BUFFER_STREAM); 94} 95 96 97#ifdef NDEBUG 98static 99#elif __GNUC__ 100__attribute__((weak)) 101#endif 102enum lsquic_packno_bits 103lsquic_send_ctl_guess_packno_bits (lsquic_send_ctl_t *ctl) 104{ 105 return PACKNO_LEN_2; 106} 107 108 109int 110lsquic_send_ctl_have_unacked_stream_frames (const lsquic_send_ctl_t *ctl) 111{ 112 const lsquic_packet_out_t *packet_out; 113 TAILQ_FOREACH(packet_out, &ctl->sc_unacked_packets, po_next) 114 if (packet_out->po_frame_types & 115 ((1 << QUIC_FRAME_STREAM) | (1 << QUIC_FRAME_RST_STREAM))) 116 return 1; 117 return 0; 118} 119 120 121static lsquic_packet_out_t * 122send_ctl_first_unacked_retx_packet (const lsquic_send_ctl_t *ctl) 123{ 124 lsquic_packet_out_t *packet_out; 125 TAILQ_FOREACH(packet_out, &ctl->sc_unacked_packets, po_next) 126 if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK) 127 return packet_out; 128 return NULL; 129} 130 131 132static lsquic_packet_out_t * 133send_ctl_last_unacked_retx_packet (const lsquic_send_ctl_t *ctl) 134{ 135 lsquic_packet_out_t *packet_out; 136 TAILQ_FOREACH_REVERSE(packet_out, &ctl->sc_unacked_packets, 137 lsquic_packets_tailq, po_next) 138 if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK) 139 return packet_out; 140 return NULL; 141} 142 143 144static int 145have_unacked_handshake_packets (const lsquic_send_ctl_t *ctl) 146{ 147 const lsquic_packet_out_t *packet_out; 148 TAILQ_FOREACH(packet_out, &ctl->sc_unacked_packets, po_next) 149 if (packet_out->po_flags & PO_HELLO) 150 return 1; 151 return 0; 152} 153 154 155static enum retx_mode 156get_retx_mode (lsquic_send_ctl_t *ctl) 157{ 158 if (!(ctl->sc_conn_pub->lconn->cn_flags & LSCONN_HANDSHAKE_DONE) 159 && have_unacked_handshake_packets(ctl)) 160 return RETX_MODE_HANDSHAKE; 161 if (ctl->sc_loss_to) 162 return RETX_MODE_LOSS; 163 if (ctl->sc_n_tlp < 2) 164 return RETX_MODE_TLP; 165 return RETX_MODE_RTO; 166} 167 168 169static lsquic_time_t 170get_retx_delay (const struct lsquic_rtt_stats *rtt_stats) 171{ 172 lsquic_time_t srtt, delay; 173 174 srtt = lsquic_rtt_stats_get_srtt(rtt_stats); 175 if (srtt) 176 { 177 delay = srtt + 4 * lsquic_rtt_stats_get_rttvar(rtt_stats); 178 if (delay < MIN_RTO_DELAY) 179 delay = MIN_RTO_DELAY; 180 } 181 else 182 delay = DEFAULT_RETX_DELAY; 183 184 return delay; 185} 186 187 188static void 189retx_alarm_rings (void *ctx, lsquic_time_t expiry, lsquic_time_t now) 190{ 191 lsquic_send_ctl_t *ctl = ctx; 192 lsquic_packet_out_t *packet_out; 193 enum retx_mode rm; 194 195 /* This is a callback -- before it is called, the alarm is unset */ 196 assert(!lsquic_alarmset_is_set(ctl->sc_alset, AL_RETX)); 197 198 rm = get_retx_mode(ctl); 199 LSQ_INFO("retx timeout, mode %s", retx2str[rm]); 200 201 switch (rm) 202 { 203 case RETX_MODE_HANDSHAKE: 204 send_ctl_expire(ctl, EXFI_HSK); 205 /* Do not register cubic loss during handshake */ 206 break; 207 case RETX_MODE_LOSS: 208 send_ctl_detect_losses(ctl, lsquic_time_now()); 209 break; 210 case RETX_MODE_TLP: 211 ++ctl->sc_n_tlp; 212 send_ctl_expire(ctl, EXFI_LAST); 213 break; 214 case RETX_MODE_RTO: 215 ++ctl->sc_n_consec_rtos; 216 ctl->sc_next_limit = 2; 217 LSQ_DEBUG("packet RTO is %"PRIu64" usec", expiry); 218 send_ctl_expire(ctl, EXFI_ALL); 219 lsquic_cubic_timeout(&ctl->sc_cubic); 220 break; 221 } 222 223 packet_out = send_ctl_first_unacked_retx_packet(ctl); 224 if (packet_out) 225 set_retx_alarm(ctl); 226 lsquic_send_ctl_sanity_check(ctl); 227} 228 229 230void 231lsquic_send_ctl_init (lsquic_send_ctl_t *ctl, struct lsquic_alarmset *alset, 232 struct lsquic_engine_public *enpub, const struct ver_neg *ver_neg, 233 struct lsquic_conn_public *conn_pub, unsigned short pack_size) 234{ 235 unsigned i; 236 memset(ctl, 0, sizeof(*ctl)); 237 TAILQ_INIT(&ctl->sc_scheduled_packets); 238 TAILQ_INIT(&ctl->sc_unacked_packets); 239 TAILQ_INIT(&ctl->sc_lost_packets); 240 ctl->sc_enpub = enpub; 241 ctl->sc_alset = alset; 242 ctl->sc_ver_neg = ver_neg; 243 ctl->sc_pack_size = pack_size; 244 ctl->sc_conn_pub = conn_pub; 245 if (enpub->enp_settings.es_pace_packets) 246 ctl->sc_flags |= SC_PACE; 247 lsquic_alarmset_init_alarm(alset, AL_RETX, retx_alarm_rings, ctl); 248 lsquic_senhist_init(&ctl->sc_senhist); 249 lsquic_cubic_init(&ctl->sc_cubic, LSQUIC_LOG_CONN_ID); 250 if (ctl->sc_flags & SC_PACE) 251 pacer_init(&ctl->sc_pacer, LSQUIC_LOG_CONN_ID, 100000); 252 for (i = 0; i < sizeof(ctl->sc_buffered_packets) / 253 sizeof(ctl->sc_buffered_packets[0]); ++i) 254 TAILQ_INIT(&ctl->sc_buffered_packets[i].bpq_packets); 255} 256 257 258static lsquic_time_t 259calculate_packet_rto (lsquic_send_ctl_t *ctl) 260{ 261 lsquic_time_t delay; 262 263 delay = get_retx_delay(&ctl->sc_conn_pub->rtt_stats); 264 265 unsigned exp = ctl->sc_n_consec_rtos; 266 if (exp > MAX_RTO_BACKOFFS) 267 exp = MAX_RTO_BACKOFFS; 268 269 delay = delay * (1 << exp); 270 271 return delay; 272} 273 274 275static lsquic_time_t 276calculate_tlp_delay (lsquic_send_ctl_t *ctl) 277{ 278 lsquic_time_t srtt, delay; 279 280 srtt = lsquic_rtt_stats_get_srtt(&ctl->sc_conn_pub->rtt_stats); 281 if (ctl->sc_n_in_flight_all > 1) 282 { 283 delay = 10000; /* 10 ms is the minimum tail loss probe delay */ 284 if (delay < 2 * srtt) 285 delay = 2 * srtt; 286 } 287 else 288 { 289 delay = srtt + srtt / 2 + MIN_RTO_DELAY; 290 if (delay < 2 * srtt) 291 delay = 2 * srtt; 292 } 293 294 return delay; 295} 296 297 298static void 299set_retx_alarm (lsquic_send_ctl_t *ctl) 300{ 301 enum retx_mode rm; 302 lsquic_time_t delay, now; 303 304 assert(!TAILQ_EMPTY(&ctl->sc_unacked_packets)); 305 306 now = lsquic_time_now(); 307 308 rm = get_retx_mode(ctl); 309 switch (rm) 310 { 311 case RETX_MODE_HANDSHAKE: 312 /* [draft-iyengar-quic-loss-recovery-01]: 313 * 314 * if (handshake packets are outstanding): 315 * alarm_duration = max(1.5 * smoothed_rtt, 10ms) << handshake_count; 316 * handshake_count++; 317 */ 318 delay = lsquic_rtt_stats_get_srtt(&ctl->sc_conn_pub->rtt_stats); 319 if (delay) 320 { 321 delay += delay / 2; 322 if (10000 > delay) 323 delay = 10000; 324 } 325 else 326 delay = 150000; 327 delay <<= ctl->sc_n_hsk; 328 ++ctl->sc_n_hsk; 329 break; 330 case RETX_MODE_LOSS: 331 delay = ctl->sc_loss_to; 332 break; 333 case RETX_MODE_TLP: 334 delay = calculate_tlp_delay(ctl); 335 break; 336 case RETX_MODE_RTO: 337 /* Base RTO on the first unacked packet, following reference 338 * implementation. 339 */ 340 delay = calculate_packet_rto(ctl); 341 break; 342#ifdef WIN32 343 default: 344 delay = 0; 345#endif 346 } 347 348 if (delay > MAX_RTO_DELAY) 349 delay = MAX_RTO_DELAY; 350 351 LSQ_DEBUG("set retx alarm to %"PRIu64", which is %"PRIu64 352 " usec from now, mode %s", now + delay, delay, retx2str[rm]); 353 lsquic_alarmset_set(ctl->sc_alset, AL_RETX, now + delay); 354} 355 356 357static int 358send_ctl_in_recovery (lsquic_send_ctl_t *ctl) 359{ 360 return ctl->sc_largest_acked_packno 361 && ctl->sc_largest_acked_packno <= ctl->sc_largest_sent_at_cutback; 362} 363 364 365static int 366send_ctl_in_slow_start (lsquic_send_ctl_t *ctl) 367{ 368 return lsquic_cubic_in_slow_start(&ctl->sc_cubic); 369} 370 371 372static lsquic_time_t 373send_ctl_transfer_time (void *ctx) 374{ 375 lsquic_send_ctl_t *const ctl = ctx; 376 uint64_t bandwidth, pacing_rate; 377 lsquic_time_t srtt, tx_time; 378 unsigned long cwnd; 379 380 srtt = lsquic_rtt_stats_get_srtt(&ctl->sc_conn_pub->rtt_stats); 381 if (srtt == 0) 382 srtt = 50000; 383 cwnd = lsquic_cubic_get_cwnd(&ctl->sc_cubic); 384 bandwidth = cwnd * 1000000 / srtt; 385 if (send_ctl_in_slow_start(ctl)) 386 pacing_rate = bandwidth * 2; 387 else if (send_ctl_in_recovery(ctl)) 388 pacing_rate = bandwidth; 389 else 390 pacing_rate = bandwidth + bandwidth / 4; 391 392 tx_time = (uint64_t) ctl->sc_pack_size * 1000000 / pacing_rate; 393 LSQ_DEBUG("srtt: %"PRIu64"; ss: %d; rec: %d; cwnd: %lu; bandwidth: " 394 "%"PRIu64"; tx_time: %"PRIu64, srtt, send_ctl_in_slow_start(ctl), 395 send_ctl_in_recovery(ctl), cwnd, bandwidth, tx_time); 396 return tx_time; 397} 398 399 400static void 401send_ctl_unacked_append (struct lsquic_send_ctl *ctl, 402 struct lsquic_packet_out *packet_out) 403{ 404 TAILQ_INSERT_TAIL(&ctl->sc_unacked_packets, packet_out, po_next); 405 ctl->sc_bytes_unacked_all += lsquic_packet_out_total_sz(packet_out); 406 ctl->sc_n_in_flight_all += 1; 407 if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK) 408 { 409 ctl->sc_bytes_unacked_retx += lsquic_packet_out_total_sz(packet_out); 410 ++ctl->sc_n_in_flight_retx; 411 } 412} 413 414 415static void 416send_ctl_unacked_remove (struct lsquic_send_ctl *ctl, 417 struct lsquic_packet_out *packet_out, unsigned packet_sz) 418{ 419 TAILQ_REMOVE(&ctl->sc_unacked_packets, packet_out, po_next); 420 assert(ctl->sc_bytes_unacked_all >= packet_sz); 421 ctl->sc_bytes_unacked_all -= packet_sz; 422 ctl->sc_n_in_flight_all -= 1; 423 if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK) 424 { 425 ctl->sc_bytes_unacked_retx -= packet_sz; 426 --ctl->sc_n_in_flight_retx; 427 } 428} 429 430 431static void 432send_ctl_sched_Xpend_common (struct lsquic_send_ctl *ctl, 433 struct lsquic_packet_out *packet_out) 434{ 435 packet_out->po_flags |= PO_SCHED; 436 ++ctl->sc_n_scheduled; 437 ctl->sc_bytes_scheduled += lsquic_packet_out_total_sz(packet_out); 438 lsquic_send_ctl_sanity_check(ctl); 439} 440 441 442static void 443send_ctl_sched_append (struct lsquic_send_ctl *ctl, 444 struct lsquic_packet_out *packet_out) 445{ 446 TAILQ_INSERT_TAIL(&ctl->sc_scheduled_packets, packet_out, po_next); 447 send_ctl_sched_Xpend_common(ctl, packet_out); 448} 449 450 451static void 452send_ctl_sched_prepend (struct lsquic_send_ctl *ctl, 453 struct lsquic_packet_out *packet_out) 454{ 455 TAILQ_INSERT_HEAD(&ctl->sc_scheduled_packets, packet_out, po_next); 456 send_ctl_sched_Xpend_common(ctl, packet_out); 457} 458 459 460static void 461send_ctl_sched_remove (struct lsquic_send_ctl *ctl, 462 struct lsquic_packet_out *packet_out) 463{ 464 TAILQ_REMOVE(&ctl->sc_scheduled_packets, packet_out, po_next); 465 packet_out->po_flags &= ~PO_SCHED; 466 assert(ctl->sc_n_scheduled); 467 --ctl->sc_n_scheduled; 468 ctl->sc_bytes_scheduled -= lsquic_packet_out_total_sz(packet_out); 469 lsquic_send_ctl_sanity_check(ctl); 470} 471 472 473int 474lsquic_send_ctl_sent_packet (lsquic_send_ctl_t *ctl, 475 struct lsquic_packet_out *packet_out, int account) 476{ 477 char frames[lsquic_frame_types_str_sz]; 478 LSQ_DEBUG("packet %"PRIu64" has been sent (frame types: %s)", 479 packet_out->po_packno, lsquic_frame_types_to_str(frames, 480 sizeof(frames), packet_out->po_frame_types)); 481 if (account) 482 ctl->sc_bytes_out -= lsquic_packet_out_total_sz(packet_out); 483 lsquic_senhist_add(&ctl->sc_senhist, packet_out->po_packno); 484 send_ctl_unacked_append(ctl, packet_out); 485 if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK) 486 { 487 if (!lsquic_alarmset_is_set(ctl->sc_alset, AL_RETX)) 488 set_retx_alarm(ctl); 489 if (ctl->sc_n_in_flight_retx == 1) 490 ctl->sc_flags |= SC_WAS_QUIET; 491 } 492 /* TODO: Do we really want to use those for RTT info? Revisit this. */ 493 /* Hold on to packets that are not retransmittable because we need them 494 * to sample RTT information. They are released when ACK is received. 495 */ 496#if LSQUIC_SEND_STATS 497 ++ctl->sc_stats.n_total_sent; 498#endif 499 lsquic_send_ctl_sanity_check(ctl); 500 return 0; 501} 502 503 504static void 505take_rtt_sample (lsquic_send_ctl_t *ctl, 506 lsquic_time_t now, lsquic_time_t lack_delta) 507{ 508 const lsquic_packno_t packno = ctl->sc_largest_acked_packno; 509 const lsquic_time_t sent = ctl->sc_largest_acked_sent_time; 510 const lsquic_time_t measured_rtt = now - sent; 511 if (packno > ctl->sc_max_rtt_packno && lack_delta < measured_rtt) 512 { 513 ctl->sc_max_rtt_packno = packno; 514 lsquic_rtt_stats_update(&ctl->sc_conn_pub->rtt_stats, measured_rtt, lack_delta); 515 LSQ_DEBUG("packno %"PRIu64"; rtt: %"PRIu64"; delta: %"PRIu64"; " 516 "new srtt: %"PRIu64, packno, measured_rtt, lack_delta, 517 lsquic_rtt_stats_get_srtt(&ctl->sc_conn_pub->rtt_stats)); 518 } 519} 520 521 522static void 523send_ctl_release_enc_data (struct lsquic_send_ctl *ctl, 524 struct lsquic_packet_out *packet_out) 525{ 526 ctl->sc_enpub->enp_pmi->pmi_release(ctl->sc_enpub->enp_pmi_ctx, 527 packet_out->po_enc_data); 528 packet_out->po_flags &= ~PO_ENCRYPTED; 529 packet_out->po_enc_data = NULL; 530} 531 532 533/* Returns true if packet was rescheduled, false otherwise. In the latter 534 * case, you should not dereference packet_out after the function returns. 535 */ 536static int 537send_ctl_handle_lost_packet (lsquic_send_ctl_t *ctl, 538 lsquic_packet_out_t *packet_out) 539{ 540 unsigned packet_sz; 541 542 assert(ctl->sc_n_in_flight_all); 543 packet_sz = lsquic_packet_out_sent_sz(packet_out); 544 send_ctl_unacked_remove(ctl, packet_out, packet_sz); 545 if (packet_out->po_flags & PO_ENCRYPTED) 546 send_ctl_release_enc_data(ctl, packet_out); 547 if (packet_out->po_frame_types & (1 << QUIC_FRAME_ACK)) 548 { 549 ctl->sc_flags |= SC_LOST_ACK; 550 LSQ_DEBUG("lost ACK in packet %"PRIu64, packet_out->po_packno); 551 } 552 if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK) 553 { 554 LSQ_DEBUG("lost retransmittable packet %"PRIu64, 555 packet_out->po_packno); 556 TAILQ_INSERT_TAIL(&ctl->sc_lost_packets, packet_out, po_next); 557 return 1; 558 } 559 else 560 { 561 LSQ_DEBUG("lost unretransmittable packet %"PRIu64, 562 packet_out->po_packno); 563 lsquic_packet_out_destroy(packet_out, ctl->sc_enpub); 564 return 0; 565 } 566} 567 568 569static lsquic_packno_t 570largest_retx_packet_number (const lsquic_send_ctl_t *ctl) 571{ 572 const lsquic_packet_out_t *packet_out; 573 TAILQ_FOREACH_REVERSE(packet_out, &ctl->sc_unacked_packets, 574 lsquic_packets_tailq, po_next) 575 { 576 if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK) 577 return packet_out->po_packno; 578 } 579 return 0; 580} 581 582 583static void 584send_ctl_detect_losses (lsquic_send_ctl_t *ctl, lsquic_time_t time) 585{ 586 lsquic_packet_out_t *packet_out, *next; 587 lsquic_packno_t largest_retx_packno, largest_lost_packno; 588 589 largest_retx_packno = largest_retx_packet_number(ctl); 590 largest_lost_packno = 0; 591 ctl->sc_loss_to = 0; 592 593 for (packet_out = TAILQ_FIRST(&ctl->sc_unacked_packets); 594 packet_out && packet_out->po_packno <= ctl->sc_largest_acked_packno; 595 packet_out = next) 596 { 597 next = TAILQ_NEXT(packet_out, po_next); 598 599 if (packet_out->po_packno + N_NACKS_BEFORE_RETX < 600 ctl->sc_largest_acked_packno) 601 { 602 LSQ_DEBUG("loss by FACK detected, packet %"PRIu64, 603 packet_out->po_packno); 604 largest_lost_packno = packet_out->po_packno; 605 (void) send_ctl_handle_lost_packet(ctl, packet_out); 606 continue; 607 } 608 609 if (largest_retx_packno 610 && (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK) 611 && largest_retx_packno <= ctl->sc_largest_acked_packno) 612 { 613 LSQ_DEBUG("loss by early retransmit detected, packet %"PRIu64, 614 packet_out->po_packno); 615 largest_lost_packno = packet_out->po_packno; 616 ctl->sc_loss_to = 617 lsquic_rtt_stats_get_srtt(&ctl->sc_conn_pub->rtt_stats) / 4; 618 LSQ_DEBUG("set sc_loss_to to %"PRIu64", packet %"PRIu64, 619 ctl->sc_loss_to, packet_out->po_packno); 620 (void) send_ctl_handle_lost_packet(ctl, packet_out); 621 continue; 622 } 623 624 if (ctl->sc_largest_acked_sent_time > packet_out->po_sent + 625 lsquic_rtt_stats_get_srtt(&ctl->sc_conn_pub->rtt_stats)) 626 { 627 LSQ_DEBUG("loss by sent time detected: packet %"PRIu64, 628 packet_out->po_packno); 629 if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK) 630 largest_lost_packno = packet_out->po_packno; 631 else { /* don't count it as a loss */; } 632 (void) send_ctl_handle_lost_packet(ctl, packet_out); 633 continue; 634 } 635 } 636 637 if (largest_lost_packno > ctl->sc_largest_sent_at_cutback) 638 { 639 LSQ_DEBUG("detected new loss: packet %"PRIu64"; new lsac: " 640 "%"PRIu64, largest_lost_packno, ctl->sc_largest_sent_at_cutback); 641 lsquic_cubic_loss(&ctl->sc_cubic); 642 if (ctl->sc_flags & SC_PACE) 643 pacer_loss_event(&ctl->sc_pacer); 644 ctl->sc_largest_sent_at_cutback = 645 lsquic_senhist_largest(&ctl->sc_senhist); 646 } 647 else if (largest_lost_packno) 648 /* Lost packets whose numbers are smaller than the largest packet 649 * number sent at the time of the last loss event indicate the same 650 * loss event. This follows NewReno logic, see RFC 6582. 651 */ 652 LSQ_DEBUG("ignore loss of packet %"PRIu64" smaller than lsac " 653 "%"PRIu64, largest_lost_packno, ctl->sc_largest_sent_at_cutback); 654} 655 656 657int 658lsquic_send_ctl_got_ack (lsquic_send_ctl_t *ctl, 659 const struct ack_info *acki, 660 lsquic_time_t ack_recv_time) 661{ 662 struct lsquic_packets_tailq acked_acks = 663 TAILQ_HEAD_INITIALIZER(acked_acks); 664 const struct lsquic_packno_range *range = 665 &acki->ranges[ acki->n_ranges - 1 ]; 666 lsquic_packet_out_t *packet_out, *next; 667 lsquic_time_t now = 0; 668 lsquic_packno_t smallest_unacked; 669 lsquic_packno_t ack2ed[2]; 670 unsigned packet_sz; 671 int app_limited; 672 signed char do_rtt, skip_checks; 673 674 packet_out = TAILQ_FIRST(&ctl->sc_unacked_packets); 675#if __GNUC__ 676 __builtin_prefetch(packet_out); 677#endif 678 679#if __GNUC__ 680# define UNLIKELY(cond) __builtin_expect(cond, 0) 681#else 682# define UNLIKELY(cond) cond 683#endif 684 685#if __GNUC__ 686 if (UNLIKELY(LSQ_LOG_ENABLED(LSQ_LOG_DEBUG))) 687#endif 688 LSQ_DEBUG("Got ACK frame, largest acked: %"PRIu64"; delta: %"PRIu64, 689 largest_acked(acki), acki->lack_delta); 690 691 /* Validate ACK first: */ 692 if (UNLIKELY(largest_acked(acki) 693 > lsquic_senhist_largest(&ctl->sc_senhist))) 694 { 695 LSQ_INFO("at least one packet in ACK range [%"PRIu64" - %"PRIu64"] " 696 "was never sent", acki->ranges[0].low, acki->ranges[0].high); 697 return -1; 698 } 699 700 if (UNLIKELY(ctl->sc_flags & SC_WAS_QUIET)) 701 { 702 ctl->sc_flags &= ~SC_WAS_QUIET; 703 LSQ_DEBUG("ACK comes after a period of quiescence"); 704 if (!now) 705 now = lsquic_time_now(); 706 lsquic_cubic_was_quiet(&ctl->sc_cubic, now); 707 } 708 709 if (UNLIKELY(!packet_out)) 710 goto no_unacked_packets; 711 712 smallest_unacked = packet_out->po_packno; 713 ack2ed[1] = 0; 714 715 if (packet_out->po_packno > largest_acked(acki)) 716 goto detect_losses; 717 718 do_rtt = 0, skip_checks = 0; 719 app_limited = -1; 720 do 721 { 722 next = TAILQ_NEXT(packet_out, po_next); 723#if __GNUC__ 724 __builtin_prefetch(next); 725#endif 726 if (skip_checks) 727 goto after_checks; 728 /* This is faster than binary search in the normal case when the number 729 * of ranges is not much larger than the number of unacked packets. 730 */ 731 while (UNLIKELY(range->high < packet_out->po_packno)) 732 --range; 733 if (range->low <= packet_out->po_packno) 734 { 735 skip_checks = range == acki->ranges; 736 if (app_limited < 0) 737 app_limited = send_ctl_retx_bytes_out(ctl) + 3 * ctl->sc_pack_size /* This 738 is the "maximum burst" parameter */ 739 < lsquic_cubic_get_cwnd(&ctl->sc_cubic); 740 if (!now) 741 now = lsquic_time_now(); 742 after_checks: 743 packet_sz = lsquic_packet_out_sent_sz(packet_out); 744 ctl->sc_largest_acked_packno = packet_out->po_packno; 745 ctl->sc_largest_acked_sent_time = packet_out->po_sent; 746 send_ctl_unacked_remove(ctl, packet_out, packet_sz); 747 ack2ed[!!(packet_out->po_frame_types & (1 << QUIC_FRAME_ACK))] 748 = packet_out->po_ack2ed; 749 do_rtt |= packet_out->po_packno == largest_acked(acki); 750 lsquic_cubic_ack(&ctl->sc_cubic, now, now - packet_out->po_sent, 751 app_limited, packet_sz); 752 lsquic_packet_out_ack_streams(packet_out); 753 lsquic_packet_out_destroy(packet_out, ctl->sc_enpub); 754 } 755 packet_out = next; 756 } 757 while (packet_out && packet_out->po_packno <= largest_acked(acki)); 758 759 if (do_rtt) 760 { 761 take_rtt_sample(ctl, ack_recv_time, acki->lack_delta); 762 ctl->sc_n_consec_rtos = 0; 763 ctl->sc_n_hsk = 0; 764 ctl->sc_n_tlp = 0; 765 } 766 767 detect_losses: 768 send_ctl_detect_losses(ctl, ack_recv_time); 769 if (send_ctl_first_unacked_retx_packet(ctl)) 770 set_retx_alarm(ctl); 771 else 772 { 773 LSQ_DEBUG("No retransmittable packets: clear alarm"); 774 lsquic_alarmset_unset(ctl->sc_alset, AL_RETX); 775 } 776 lsquic_send_ctl_sanity_check(ctl); 777 778 if ((ctl->sc_flags & SC_NSTP) && ack2ed[1] > ctl->sc_largest_ack2ed) 779 ctl->sc_largest_ack2ed = ack2ed[1]; 780 781 if (ctl->sc_n_in_flight_retx == 0) 782 ctl->sc_flags |= SC_WAS_QUIET; 783 784 update_n_stop_waiting: 785 if (smallest_unacked > smallest_acked(acki)) 786 /* Peer is acking packets that have been acked already. Schedule ACK 787 * and STOP_WAITING frame to chop the range if we get two of these in 788 * a row. 789 */ 790 ++ctl->sc_n_stop_waiting; 791 else 792 ctl->sc_n_stop_waiting = 0; 793 lsquic_send_ctl_sanity_check(ctl); 794 return 0; 795 796 no_unacked_packets: 797 smallest_unacked = lsquic_senhist_largest(&ctl->sc_senhist) + 1; 798 ctl->sc_flags |= SC_WAS_QUIET; 799 goto update_n_stop_waiting; 800} 801 802 803lsquic_packno_t 804lsquic_send_ctl_smallest_unacked (lsquic_send_ctl_t *ctl) 805{ 806 const lsquic_packet_out_t *packet_out; 807 808 /* Packets are always sent out in order (unless we are reordering them 809 * on purpose). Thus, the first packet on the unacked packets list has 810 * the smallest packet number of all packets on that list. 811 */ 812 if ((packet_out = TAILQ_FIRST(&ctl->sc_unacked_packets))) 813 return packet_out->po_packno; 814 else 815 return lsquic_senhist_largest(&ctl->sc_senhist) + 1; 816} 817 818 819static struct lsquic_packet_out * 820send_ctl_next_lost (lsquic_send_ctl_t *ctl) 821{ 822 lsquic_packet_out_t *lost_packet = TAILQ_FIRST(&ctl->sc_lost_packets); 823 if (lost_packet) 824 { 825 TAILQ_REMOVE(&ctl->sc_lost_packets, lost_packet, po_next); 826 if (lost_packet->po_frame_types & (1 << QUIC_FRAME_STREAM)) 827 { 828 lsquic_packet_out_elide_reset_stream_frames(lost_packet, 0); 829 } 830 return lost_packet; 831 } 832 else 833 return NULL; 834} 835 836 837static lsquic_packno_t 838send_ctl_next_packno (lsquic_send_ctl_t *ctl) 839{ 840 return ++ctl->sc_cur_packno; 841} 842 843 844void 845lsquic_send_ctl_cleanup (lsquic_send_ctl_t *ctl) 846{ 847 lsquic_packet_out_t *packet_out; 848 lsquic_senhist_cleanup(&ctl->sc_senhist); 849 while ((packet_out = TAILQ_FIRST(&ctl->sc_scheduled_packets))) 850 { 851 send_ctl_sched_remove(ctl, packet_out); 852 lsquic_packet_out_destroy(packet_out, ctl->sc_enpub); 853 } 854 assert(0 == ctl->sc_n_scheduled); 855 assert(0 == ctl->sc_bytes_scheduled); 856 while ((packet_out = TAILQ_FIRST(&ctl->sc_unacked_packets))) 857 { 858 TAILQ_REMOVE(&ctl->sc_unacked_packets, packet_out, po_next); 859 ctl->sc_bytes_unacked_all -= lsquic_packet_out_total_sz(packet_out); 860 lsquic_packet_out_destroy(packet_out, ctl->sc_enpub); 861 --ctl->sc_n_in_flight_all; 862 } 863 assert(0 == ctl->sc_n_in_flight_all); 864 assert(0 == ctl->sc_bytes_unacked_all); 865 while ((packet_out = TAILQ_FIRST(&ctl->sc_lost_packets))) 866 { 867 TAILQ_REMOVE(&ctl->sc_lost_packets, packet_out, po_next); 868 lsquic_packet_out_destroy(packet_out, ctl->sc_enpub); 869 } 870 pacer_cleanup(&ctl->sc_pacer); 871#if LSQUIC_SEND_STATS 872 LSQ_NOTICE("stats: n_total_sent: %u; n_resent: %u; n_delayed: %u", 873 ctl->sc_stats.n_total_sent, ctl->sc_stats.n_resent, 874 ctl->sc_stats.n_delayed); 875#endif 876} 877 878 879static unsigned 880send_ctl_retx_bytes_out (const struct lsquic_send_ctl *ctl) 881{ 882 return ctl->sc_bytes_scheduled 883 + ctl->sc_bytes_unacked_retx 884 + ctl->sc_bytes_out; 885} 886 887 888int 889lsquic_send_ctl_pacer_blocked (struct lsquic_send_ctl *ctl) 890{ 891 return (ctl->sc_flags & SC_PACE) 892 && !pacer_can_schedule(&ctl->sc_pacer, 893 ctl->sc_n_scheduled + ctl->sc_n_in_flight_all); 894} 895 896 897#ifndef NDEBUG 898#if __GNUC__ 899__attribute__((weak)) 900#endif 901#endif 902int 903lsquic_send_ctl_can_send (lsquic_send_ctl_t *ctl) 904{ 905 const unsigned n_out = send_ctl_retx_bytes_out(ctl); 906 LSQ_DEBUG("%s: n_out: %u (unacked_retx: %u, out: %u); cwnd: %lu", __func__, 907 n_out, ctl->sc_bytes_unacked_retx, ctl->sc_bytes_out, 908 lsquic_cubic_get_cwnd(&ctl->sc_cubic)); 909 if (ctl->sc_flags & SC_PACE) 910 { 911 if (n_out >= lsquic_cubic_get_cwnd(&ctl->sc_cubic)) 912 return 0; 913 if (pacer_can_schedule(&ctl->sc_pacer, 914 ctl->sc_n_scheduled + ctl->sc_n_in_flight_all)) 915 return 1; 916 if (ctl->sc_flags & SC_SCHED_TICK) 917 { 918 ctl->sc_flags &= ~SC_SCHED_TICK; 919 lsquic_engine_add_conn_to_attq(ctl->sc_enpub, 920 ctl->sc_conn_pub->lconn, pacer_next_sched(&ctl->sc_pacer)); 921 } 922 return 0; 923 } 924 else 925 return n_out < lsquic_cubic_get_cwnd(&ctl->sc_cubic); 926} 927 928 929static void 930send_ctl_expire (lsquic_send_ctl_t *ctl, enum expire_filter filter) 931{ 932 lsquic_packet_out_t *packet_out, *next; 933 int n_resubmitted; 934 static const char *const filter_type2str[] = { 935 [EXFI_ALL] = "all", 936 [EXFI_HSK] = "handshake", 937 [EXFI_LAST] = "last", 938 }; 939 940 switch (filter) 941 { 942 case EXFI_ALL: 943 n_resubmitted = 0; 944 while ((packet_out = TAILQ_FIRST(&ctl->sc_unacked_packets))) 945 n_resubmitted += send_ctl_handle_lost_packet(ctl, packet_out); 946 break; 947 case EXFI_HSK: 948 n_resubmitted = 0; 949 for (packet_out = TAILQ_FIRST(&ctl->sc_unacked_packets); packet_out; 950 packet_out = next) 951 { 952 next = TAILQ_NEXT(packet_out, po_next); 953 if (packet_out->po_flags & PO_HELLO) 954 n_resubmitted += send_ctl_handle_lost_packet(ctl, packet_out); 955 } 956 break; 957 case EXFI_LAST: 958 packet_out = send_ctl_last_unacked_retx_packet(ctl); 959 if (packet_out) 960 n_resubmitted = send_ctl_handle_lost_packet(ctl, packet_out); 961 else 962 n_resubmitted = 0; 963 break; 964#ifdef WIN32 965 default: 966 n_resubmitted = 0; 967#endif 968 } 969 970 LSQ_DEBUG("consider %s packets lost: %d resubmitted", 971 filter_type2str[filter], n_resubmitted); 972} 973 974 975void 976lsquic_send_ctl_expire_all (lsquic_send_ctl_t *ctl) 977{ 978 lsquic_alarmset_unset(ctl->sc_alset, AL_RETX); 979 send_ctl_expire(ctl, EXFI_ALL); 980 lsquic_send_ctl_sanity_check(ctl); 981} 982 983 984#if LSQUIC_EXTRA_CHECKS 985void 986lsquic_send_ctl_sanity_check (const lsquic_send_ctl_t *ctl) 987{ 988 const struct lsquic_packet_out *packet_out; 989 unsigned count, bytes; 990 991 assert(!send_ctl_first_unacked_retx_packet(ctl) || 992 lsquic_alarmset_is_set(ctl->sc_alset, AL_RETX)); 993 if (lsquic_alarmset_is_set(ctl->sc_alset, AL_RETX)) 994 { 995 assert(send_ctl_first_unacked_retx_packet(ctl)); 996 assert(lsquic_time_now() < ctl->sc_alset->as_expiry[AL_RETX] + MAX_RTO_DELAY); 997 } 998 999 count = 0, bytes = 0; 1000 TAILQ_FOREACH(packet_out, &ctl->sc_unacked_packets, po_next) 1001 { 1002 bytes += lsquic_packet_out_sent_sz(packet_out); 1003 ++count; 1004 } 1005 assert(count == ctl->sc_n_in_flight_all); 1006 assert(bytes == ctl->sc_bytes_unacked_all); 1007 1008 count = 0, bytes = 0; 1009 TAILQ_FOREACH(packet_out, &ctl->sc_scheduled_packets, po_next) 1010 { 1011 assert(packet_out->po_flags & PO_SCHED); 1012 bytes += lsquic_packet_out_total_sz(packet_out); 1013 ++count; 1014 } 1015 assert(count == ctl->sc_n_scheduled); 1016 assert(bytes == ctl->sc_bytes_scheduled); 1017} 1018 1019 1020#endif 1021 1022 1023void 1024lsquic_send_ctl_scheduled_one (lsquic_send_ctl_t *ctl, 1025 lsquic_packet_out_t *packet_out) 1026{ 1027#ifndef NDEBUG 1028 const lsquic_packet_out_t *last; 1029 last = TAILQ_LAST(&ctl->sc_scheduled_packets, lsquic_packets_tailq); 1030 if (last) 1031 assert((last->po_flags & PO_REPACKNO) || 1032 last->po_packno < packet_out->po_packno); 1033#endif 1034 if (ctl->sc_flags & SC_PACE) 1035 { 1036 unsigned n_out = ctl->sc_n_in_flight_retx + ctl->sc_n_scheduled; 1037 pacer_packet_scheduled(&ctl->sc_pacer, n_out, 1038 send_ctl_in_recovery(ctl), send_ctl_transfer_time, ctl); 1039 } 1040 send_ctl_sched_append(ctl, packet_out); 1041} 1042 1043 1044lsquic_packet_out_t * 1045lsquic_send_ctl_next_packet_to_send (lsquic_send_ctl_t *ctl) 1046{ 1047 lsquic_packet_out_t *packet_out; 1048 1049 packet_out = TAILQ_FIRST(&ctl->sc_scheduled_packets); 1050 if (!packet_out) 1051 return NULL; 1052 1053 if (ctl->sc_n_consec_rtos && 1054 !(packet_out->po_frame_types & (1 << QUIC_FRAME_ACK))) 1055 { 1056 if (ctl->sc_next_limit) 1057 --ctl->sc_next_limit; 1058 else 1059 return NULL; 1060 } 1061 1062 if (packet_out->po_flags & PO_REPACKNO) 1063 { 1064 update_for_resending(ctl, packet_out); 1065 packet_out->po_flags &= ~PO_REPACKNO; 1066 } 1067 1068 send_ctl_sched_remove(ctl, packet_out); 1069 ctl->sc_bytes_out += lsquic_packet_out_total_sz(packet_out); 1070 return packet_out; 1071} 1072 1073 1074void 1075lsquic_send_ctl_delayed_one (lsquic_send_ctl_t *ctl, 1076 lsquic_packet_out_t *packet_out) 1077{ 1078 send_ctl_sched_prepend(ctl, packet_out); 1079 ctl->sc_bytes_out -= lsquic_packet_out_total_sz(packet_out); 1080 LSQ_DEBUG("packet %"PRIu64" has been delayed", packet_out->po_packno); 1081#if LSQUIC_SEND_STATS 1082 ++ctl->sc_stats.n_delayed; 1083#endif 1084} 1085 1086 1087int 1088lsquic_send_ctl_have_outgoing_stream_frames (const lsquic_send_ctl_t *ctl) 1089{ 1090 const lsquic_packet_out_t *packet_out; 1091 TAILQ_FOREACH(packet_out, &ctl->sc_scheduled_packets, po_next) 1092 if (packet_out->po_frame_types & 1093 ((1 << QUIC_FRAME_STREAM) | (1 << QUIC_FRAME_RST_STREAM))) 1094 return 1; 1095 return 0; 1096} 1097 1098 1099int 1100lsquic_send_ctl_have_outgoing_retx_frames (const lsquic_send_ctl_t *ctl) 1101{ 1102 const lsquic_packet_out_t *packet_out; 1103 TAILQ_FOREACH(packet_out, &ctl->sc_scheduled_packets, po_next) 1104 if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK) 1105 return 1; 1106 return 0; 1107} 1108 1109 1110static lsquic_packet_out_t * 1111send_ctl_allocate_packet (lsquic_send_ctl_t *ctl, enum lsquic_packno_bits bits, 1112 unsigned need_at_least) 1113{ 1114 lsquic_packet_out_t *packet_out; 1115 1116 packet_out = lsquic_packet_out_new(&ctl->sc_enpub->enp_mm, 1117 ctl->sc_conn_pub->packet_out_malo, 1118 !(ctl->sc_flags & SC_TCID0), ctl->sc_pack_size, bits, 1119 ctl->sc_ver_neg->vn_tag, NULL); 1120 if (!packet_out) 1121 return NULL; 1122 1123 if (need_at_least && lsquic_packet_out_avail(packet_out) < need_at_least) 1124 { /* This should never happen, this is why this check is performed at 1125 * this level and not lower, before the packet is actually allocated. 1126 */ 1127 LSQ_ERROR("wanted to allocate packet with at least %u bytes of " 1128 "payload, but only got %u bytes (mtu: %u bytes)", need_at_least, 1129 lsquic_packet_out_avail(packet_out), ctl->sc_pack_size); 1130 lsquic_packet_out_destroy(packet_out, ctl->sc_enpub); 1131 return NULL; 1132 } 1133 1134 return packet_out; 1135} 1136 1137 1138lsquic_packet_out_t * 1139lsquic_send_ctl_new_packet_out (lsquic_send_ctl_t *ctl, unsigned need_at_least) 1140{ 1141 lsquic_packet_out_t *packet_out; 1142 enum lsquic_packno_bits bits; 1143 1144 bits = lsquic_send_ctl_packno_bits(ctl); 1145 packet_out = send_ctl_allocate_packet(ctl, bits, need_at_least); 1146 if (!packet_out) 1147 return NULL; 1148 1149 packet_out->po_packno = send_ctl_next_packno(ctl); 1150 LSQ_DEBUG("created packet %"PRIu64, packet_out->po_packno); 1151 EV_LOG_PACKET_CREATED(LSQUIC_LOG_CONN_ID, packet_out); 1152 return packet_out; 1153} 1154 1155 1156/* Do not use for STREAM frames 1157 */ 1158lsquic_packet_out_t * 1159lsquic_send_ctl_get_writeable_packet (lsquic_send_ctl_t *ctl, 1160 unsigned need_at_least, int *is_err) 1161{ 1162 lsquic_packet_out_t *packet_out; 1163 1164 assert(need_at_least > 0); 1165 1166 packet_out = lsquic_send_ctl_last_scheduled(ctl); 1167 if (packet_out 1168 && !(packet_out->po_flags & PO_STREAM_END) 1169 && lsquic_packet_out_avail(packet_out) >= need_at_least) 1170 { 1171 return packet_out; 1172 } 1173 1174 if (!lsquic_send_ctl_can_send(ctl)) 1175 { 1176 *is_err = 0; 1177 return NULL; 1178 } 1179 1180 packet_out = lsquic_send_ctl_new_packet_out(ctl, need_at_least); 1181 if (packet_out) 1182 lsquic_send_ctl_scheduled_one(ctl, packet_out); 1183 else 1184 *is_err = 1; 1185 return packet_out; 1186} 1187 1188 1189static lsquic_packet_out_t * 1190send_ctl_get_packet_for_stream (lsquic_send_ctl_t *ctl, 1191 unsigned need_at_least, const lsquic_stream_t *stream) 1192{ 1193 lsquic_packet_out_t *packet_out; 1194 1195 assert(need_at_least > 0); 1196 1197 packet_out = lsquic_send_ctl_last_scheduled(ctl); 1198 if (packet_out 1199 && !(packet_out->po_flags & PO_STREAM_END) 1200 && lsquic_packet_out_avail(packet_out) >= need_at_least 1201 && !lsquic_packet_out_has_frame(packet_out, stream, QUIC_FRAME_STREAM)) 1202 { 1203 return packet_out; 1204 } 1205 1206 if (!lsquic_send_ctl_can_send(ctl)) 1207 return NULL; 1208 1209 packet_out = lsquic_send_ctl_new_packet_out(ctl, need_at_least); 1210 if (!packet_out) 1211 return NULL; 1212 1213 lsquic_send_ctl_scheduled_one(ctl, packet_out); 1214 return packet_out; 1215} 1216 1217 1218static void 1219update_for_resending (lsquic_send_ctl_t *ctl, lsquic_packet_out_t *packet_out) 1220{ 1221 1222 lsquic_packno_t oldno, packno; 1223 1224 /* When the packet is resent, it uses the same number of bytes to encode 1225 * the packet number as the original packet. This follows the reference 1226 * implementation. 1227 */ 1228 oldno = packet_out->po_packno; 1229 packno = send_ctl_next_packno(ctl); 1230 1231 packet_out->po_flags &= ~PO_SENT_SZ; 1232 packet_out->po_frame_types &= ~QFRAME_REGEN_MASK; 1233 assert(packet_out->po_frame_types); 1234 packet_out->po_packno = packno; 1235 1236 if (ctl->sc_ver_neg->vn_tag) 1237 { 1238 assert(packet_out->po_flags & PO_VERSION); /* It can only disappear */ 1239 packet_out->po_ver_tag = *ctl->sc_ver_neg->vn_tag; 1240 } 1241 1242 assert(packet_out->po_regen_sz < packet_out->po_data_sz); 1243 if (packet_out->po_regen_sz) 1244 { 1245 assert(!(packet_out->po_flags & PO_SCHED)); 1246 lsquic_packet_out_chop_regen(packet_out); 1247 } 1248 LSQ_DEBUG("Packet %"PRIu64" repackaged for resending as packet %"PRIu64, 1249 oldno, packno); 1250 EV_LOG_CONN_EVENT(LSQUIC_LOG_CONN_ID, "packet %"PRIu64" repackaged for " 1251 "resending as packet %"PRIu64, oldno, packno); 1252} 1253 1254 1255unsigned 1256lsquic_send_ctl_reschedule_packets (lsquic_send_ctl_t *ctl) 1257{ 1258 lsquic_packet_out_t *packet_out; 1259 unsigned n = 0; 1260 1261 while (lsquic_send_ctl_can_send(ctl) && 1262 (packet_out = send_ctl_next_lost(ctl))) 1263 { 1264 if (packet_out->po_regen_sz < packet_out->po_data_sz) 1265 { 1266 ++n; 1267 update_for_resending(ctl, packet_out); 1268 lsquic_send_ctl_scheduled_one(ctl, packet_out); 1269 } 1270 else 1271 { 1272 LSQ_DEBUG("Dropping packet %"PRIu64" from unacked queue", 1273 packet_out->po_packno); 1274 lsquic_packet_out_destroy(packet_out, ctl->sc_enpub); 1275 } 1276 } 1277 1278 if (n) 1279 LSQ_DEBUG("rescheduled %u packets", n); 1280 1281 return n; 1282} 1283 1284 1285void 1286lsquic_send_ctl_set_tcid0 (lsquic_send_ctl_t *ctl, int tcid0) 1287{ 1288 if (tcid0) 1289 { 1290 LSQ_INFO("set TCID flag"); 1291 ctl->sc_flags |= SC_TCID0; 1292 } 1293 else 1294 { 1295 LSQ_INFO("unset TCID flag"); 1296 ctl->sc_flags &= ~SC_TCID0; 1297 } 1298} 1299 1300 1301/* The controller elides this STREAM frames of stream `stream_id' from 1302 * scheduled and buffered packets. If a packet becomes empty as a result, 1303 * it is dropped. 1304 * 1305 * Packets on other queues do not need to be processed: unacked packets 1306 * have already been sent, and lost packets' reset stream frames will be 1307 * elided in due time. 1308 */ 1309void 1310lsquic_send_ctl_elide_stream_frames (lsquic_send_ctl_t *ctl, uint32_t stream_id) 1311{ 1312 struct lsquic_packet_out *packet_out, *next; 1313 struct lsquic_packet_out *pre_dropped; 1314 unsigned n, adj; 1315 1316 pre_dropped = NULL; 1317#ifdef WIN32 1318 next = NULL; 1319#endif 1320 for (packet_out = TAILQ_FIRST(&ctl->sc_scheduled_packets); packet_out; 1321 packet_out = next) 1322 { 1323 next = TAILQ_NEXT(packet_out, po_next); 1324 1325 if ((packet_out->po_frame_types & (1 << QUIC_FRAME_STREAM)) 1326 ) 1327 { 1328 adj = lsquic_packet_out_elide_reset_stream_frames(packet_out, 1329 stream_id); 1330 ctl->sc_bytes_scheduled -= adj; 1331 if (0 == packet_out->po_frame_types) 1332 { 1333 if (!pre_dropped) 1334 pre_dropped = TAILQ_PREV(packet_out, lsquic_packets_tailq, 1335 po_next); 1336 LSQ_DEBUG("cancel packet %"PRIu64" after eliding frames for " 1337 "stream %"PRIu32, packet_out->po_packno, stream_id); 1338 send_ctl_sched_remove(ctl, packet_out); 1339 lsquic_packet_out_destroy(packet_out, ctl->sc_enpub); 1340 } 1341 } 1342 } 1343 1344 /* Need to assign new packet numbers to all packets following the first 1345 * dropped packet to eliminate packet number gap. 1346 */ 1347 if (pre_dropped) 1348 { 1349 ctl->sc_cur_packno = lsquic_senhist_largest(&ctl->sc_senhist); 1350 for (packet_out = TAILQ_NEXT(pre_dropped, po_next); packet_out; 1351 packet_out = TAILQ_NEXT(packet_out, po_next)) 1352 { 1353 packet_out->po_flags |= PO_REPACKNO; 1354 if (packet_out->po_flags & PO_ENCRYPTED) 1355 send_ctl_release_enc_data(ctl, packet_out); 1356 } 1357 } 1358 1359 for (n = 0; n < sizeof(ctl->sc_buffered_packets) / 1360 sizeof(ctl->sc_buffered_packets[0]); ++n) 1361 { 1362 for (packet_out = TAILQ_FIRST(&ctl->sc_buffered_packets[n].bpq_packets); 1363 packet_out; packet_out = next) 1364 { 1365 next = TAILQ_NEXT(packet_out, po_next); 1366 assert(packet_out->po_frame_types & (1 << QUIC_FRAME_STREAM)); 1367 lsquic_packet_out_elide_reset_stream_frames(packet_out, stream_id); 1368 if (0 == packet_out->po_frame_types) 1369 { 1370 LSQ_DEBUG("cancel buffered packet in queue #%u after eliding " 1371 "frames for stream %"PRIu32, n, stream_id); 1372 TAILQ_REMOVE(&ctl->sc_buffered_packets[n].bpq_packets, 1373 packet_out, po_next); 1374 --ctl->sc_buffered_packets[n].bpq_count; 1375 lsquic_packet_out_destroy(packet_out, ctl->sc_enpub); 1376 LSQ_DEBUG("Elide packet from buffered queue #%u; count: %u", 1377 n, ctl->sc_buffered_packets[n].bpq_count); 1378 } 1379 } 1380 } 1381} 1382 1383 1384/* Count how many packets will remain after the squeezing performed by 1385 * lsquic_send_ctl_squeeze_sched(). This is the number of delayed data 1386 * packets. 1387 */ 1388#ifndef NDEBUG 1389#if __GNUC__ 1390__attribute__((weak)) 1391#endif 1392#endif 1393int 1394lsquic_send_ctl_have_delayed_packets (const lsquic_send_ctl_t *ctl) 1395{ 1396 const struct lsquic_packet_out *packet_out; 1397 TAILQ_FOREACH(packet_out, &ctl->sc_scheduled_packets, po_next) 1398 if (packet_out->po_regen_sz < packet_out->po_data_sz) 1399 return 1; 1400 return 0; 1401} 1402 1403 1404#ifndef NDEBUG 1405static void 1406send_ctl_log_packet_q (const lsquic_send_ctl_t *ctl, const char *prefix, 1407 const struct lsquic_packets_tailq *tailq) 1408{ 1409 const lsquic_packet_out_t *packet_out; 1410 unsigned n_packets; 1411 char *buf; 1412 size_t bufsz; 1413 int off; 1414 1415 n_packets = 0; 1416 TAILQ_FOREACH(packet_out, tailq, po_next) 1417 ++n_packets; 1418 1419 if (n_packets == 0) 1420 { 1421 LSQ_DEBUG("%s: [<empty set>]", prefix); 1422 return; 1423 } 1424 1425 bufsz = n_packets * sizeof("18446744073709551615" /* UINT64_MAX */); 1426 buf = malloc(bufsz); 1427 if (!buf) 1428 { 1429 LSQ_ERROR("%s: malloc: %s", __func__, strerror(errno)); 1430 return; 1431 } 1432 1433 off = 0; 1434 TAILQ_FOREACH(packet_out, tailq, po_next) 1435 { 1436 if (off) 1437 buf[off++] = ' '; 1438 off += sprintf(buf + off, "%"PRIu64, packet_out->po_packno); 1439 } 1440 1441 LSQ_DEBUG("%s: [%s]", prefix, buf); 1442 free(buf); 1443} 1444 1445 1446#define LOG_PACKET_Q(prefix, queue) do { \ 1447 if (LSQ_LOG_ENABLED(LSQ_LOG_DEBUG)) \ 1448 send_ctl_log_packet_q(ctl, queue, prefix); \ 1449} while (0) 1450#else 1451#define LOG_PACKET_Q(p, q) 1452#endif 1453 1454 1455int 1456lsquic_send_ctl_squeeze_sched (lsquic_send_ctl_t *ctl) 1457{ 1458 struct lsquic_packet_out *packet_out, *next; 1459#ifndef NDEBUG 1460 int pre_squeeze_logged = 0; 1461#endif 1462 1463 for (packet_out = TAILQ_FIRST(&ctl->sc_scheduled_packets); packet_out; 1464 packet_out = next) 1465 { 1466 next = TAILQ_NEXT(packet_out, po_next); 1467 if (packet_out->po_regen_sz < packet_out->po_data_sz) 1468 { 1469 if (packet_out->po_flags & PO_ENCRYPTED) 1470 send_ctl_release_enc_data(ctl, packet_out); 1471 } 1472 else 1473 { 1474#ifndef NDEBUG 1475 /* Log the whole list before we squeeze for the first time */ 1476 if (!pre_squeeze_logged++) 1477 LOG_PACKET_Q(&ctl->sc_scheduled_packets, 1478 "unacked packets before squeezing"); 1479#endif 1480 send_ctl_sched_remove(ctl, packet_out); 1481 LSQ_DEBUG("Dropping packet %"PRIu64" from scheduled queue", 1482 packet_out->po_packno); 1483 lsquic_packet_out_destroy(packet_out, ctl->sc_enpub); 1484 } 1485 } 1486 1487#ifndef NDEBUG 1488 if (pre_squeeze_logged) 1489 LOG_PACKET_Q(&ctl->sc_scheduled_packets, 1490 "unacked packets after squeezing"); 1491 else if (ctl->sc_n_scheduled > 0) 1492 LOG_PACKET_Q(&ctl->sc_scheduled_packets, "delayed packets"); 1493#endif 1494 1495 return ctl->sc_n_scheduled > 0; 1496} 1497 1498 1499void 1500lsquic_send_ctl_reset_packnos (lsquic_send_ctl_t *ctl) 1501{ 1502 struct lsquic_packet_out *packet_out; 1503 1504 assert(ctl->sc_n_scheduled > 0); /* Otherwise, why is this called? */ 1505 ctl->sc_cur_packno = lsquic_senhist_largest(&ctl->sc_senhist); 1506 TAILQ_FOREACH(packet_out, &ctl->sc_scheduled_packets, po_next) 1507 packet_out->po_flags |= PO_REPACKNO; 1508} 1509 1510 1511void 1512lsquic_send_ctl_ack_to_front (lsquic_send_ctl_t *ctl) 1513{ 1514 struct lsquic_packet_out *ack_packet; 1515 1516 assert(ctl->sc_n_scheduled > 1); /* Otherwise, why is this called? */ 1517 ack_packet = TAILQ_LAST(&ctl->sc_scheduled_packets, lsquic_packets_tailq); 1518 assert(ack_packet->po_frame_types & (1 << QUIC_FRAME_ACK)); 1519 TAILQ_REMOVE(&ctl->sc_scheduled_packets, ack_packet, po_next); 1520 TAILQ_INSERT_HEAD(&ctl->sc_scheduled_packets, ack_packet, po_next); 1521} 1522 1523 1524void 1525lsquic_send_ctl_drop_scheduled (lsquic_send_ctl_t *ctl) 1526{ 1527 lsquic_packet_out_t *packet_out; 1528 const unsigned n = ctl->sc_n_scheduled; 1529 while ((packet_out = TAILQ_FIRST(&ctl->sc_scheduled_packets))) 1530 { 1531 send_ctl_sched_remove(ctl, packet_out); 1532 lsquic_packet_out_destroy(packet_out, ctl->sc_enpub); 1533 } 1534 assert(0 == ctl->sc_n_scheduled); 1535 ctl->sc_cur_packno = lsquic_senhist_largest(&ctl->sc_senhist); 1536 LSQ_DEBUG("dropped %u scheduled packet%s", n, n != 0 ? "s" : ""); 1537} 1538 1539 1540#ifdef NDEBUG 1541static 1542#elif __GNUC__ 1543__attribute__((weak)) 1544#endif 1545enum buf_packet_type 1546lsquic_send_ctl_determine_bpt (lsquic_send_ctl_t *ctl, 1547 const lsquic_stream_t *stream) 1548{ 1549 const lsquic_stream_t *other_stream; 1550 struct lsquic_hash_elem *el; 1551 struct lsquic_hash *all_streams; 1552 1553 all_streams = ctl->sc_conn_pub->all_streams; 1554 for (el = lsquic_hash_first(all_streams); el; 1555 el = lsquic_hash_next(all_streams)) 1556 { 1557 other_stream = lsquic_hashelem_getdata(el); 1558 if (other_stream != stream 1559 && (!(other_stream->stream_flags & STREAM_U_WRITE_DONE)) 1560 && !lsquic_stream_is_critical(other_stream) 1561 && other_stream->sm_priority < stream->sm_priority) 1562 return BPT_OTHER_PRIO; 1563 } 1564 return BPT_HIGHEST_PRIO; 1565} 1566 1567 1568static enum buf_packet_type 1569send_ctl_lookup_bpt (lsquic_send_ctl_t *ctl, 1570 const struct lsquic_stream *stream) 1571{ 1572 if (ctl->sc_cached_bpt.stream_id != stream->id) 1573 { 1574 ctl->sc_cached_bpt.stream_id = stream->id; 1575 ctl->sc_cached_bpt.packet_type = 1576 lsquic_send_ctl_determine_bpt(ctl, stream); 1577 } 1578 return ctl->sc_cached_bpt.packet_type; 1579} 1580 1581 1582static unsigned 1583send_ctl_max_bpq_count (const lsquic_send_ctl_t *ctl, 1584 enum buf_packet_type packet_type) 1585{ 1586 unsigned count; 1587 1588 switch (packet_type) 1589 { 1590 case BPT_OTHER_PRIO: 1591 return MAX_BPQ_COUNT; 1592 case BPT_HIGHEST_PRIO: 1593 default: /* clang does not complain about absence of `default'... */ 1594 count = ctl->sc_n_scheduled + ctl->sc_n_in_flight_retx; 1595 if (count < lsquic_cubic_get_cwnd(&ctl->sc_cubic) / ctl->sc_pack_size) 1596 { 1597 count -= lsquic_cubic_get_cwnd(&ctl->sc_cubic) / ctl->sc_pack_size; 1598 if (count > MAX_BPQ_COUNT) 1599 return count; 1600 } 1601 return MAX_BPQ_COUNT; 1602 } 1603} 1604 1605 1606static lsquic_packet_out_t * 1607send_ctl_get_buffered_packet (lsquic_send_ctl_t *ctl, 1608 enum buf_packet_type packet_type, unsigned need_at_least, 1609 const struct lsquic_stream *stream) 1610{ 1611 struct buf_packet_q *const packet_q = 1612 &ctl->sc_buffered_packets[packet_type]; 1613 lsquic_packet_out_t *packet_out; 1614 enum lsquic_packno_bits bits; 1615 1616 packet_out = TAILQ_LAST(&packet_q->bpq_packets, lsquic_packets_tailq); 1617 if (packet_out 1618 && !(packet_out->po_flags & PO_STREAM_END) 1619 && lsquic_packet_out_avail(packet_out) >= need_at_least 1620 && !lsquic_packet_out_has_frame(packet_out, stream, QUIC_FRAME_STREAM)) 1621 { 1622 return packet_out; 1623 } 1624 1625 if (packet_q->bpq_count >= send_ctl_max_bpq_count(ctl, packet_type)) 1626 return NULL; 1627 1628 bits = lsquic_send_ctl_guess_packno_bits(ctl); 1629 packet_out = send_ctl_allocate_packet(ctl, bits, need_at_least); 1630 if (!packet_out) 1631 return NULL; 1632 1633 TAILQ_INSERT_TAIL(&packet_q->bpq_packets, packet_out, po_next); 1634 ++packet_q->bpq_count; 1635 LSQ_DEBUG("Add new packet to buffered queue #%u; count: %u", 1636 packet_type, packet_q->bpq_count); 1637 return packet_out; 1638} 1639 1640 1641lsquic_packet_out_t * 1642lsquic_send_ctl_get_packet_for_stream (lsquic_send_ctl_t *ctl, 1643 unsigned need_at_least, const struct lsquic_stream *stream) 1644{ 1645 enum buf_packet_type packet_type; 1646 1647 if (lsquic_send_ctl_schedule_stream_packets_immediately(ctl)) 1648 return send_ctl_get_packet_for_stream(ctl, need_at_least, stream); 1649 else 1650 { 1651 packet_type = send_ctl_lookup_bpt(ctl, stream); 1652 return send_ctl_get_buffered_packet(ctl, packet_type, need_at_least, 1653 stream); 1654 } 1655} 1656 1657 1658#ifdef NDEBUG 1659static 1660#elif __GNUC__ 1661__attribute__((weak)) 1662#endif 1663enum lsquic_packno_bits 1664lsquic_send_ctl_calc_packno_bits (lsquic_send_ctl_t *ctl) 1665{ 1666 lsquic_packno_t smallest_unacked; 1667 unsigned n_in_flight; 1668 1669 smallest_unacked = lsquic_send_ctl_smallest_unacked(ctl); 1670 n_in_flight = lsquic_cubic_get_cwnd(&ctl->sc_cubic) / ctl->sc_pack_size; 1671 return calc_packno_bits(ctl->sc_cur_packno + 1, smallest_unacked, 1672 n_in_flight); 1673} 1674 1675 1676enum lsquic_packno_bits 1677lsquic_send_ctl_packno_bits (lsquic_send_ctl_t *ctl) 1678{ 1679 1680 if (lsquic_send_ctl_schedule_stream_packets_immediately(ctl)) 1681 return lsquic_send_ctl_calc_packno_bits(ctl); 1682 else 1683 return lsquic_send_ctl_guess_packno_bits(ctl); 1684} 1685 1686 1687static int 1688split_buffered_packet (lsquic_send_ctl_t *ctl, 1689 enum buf_packet_type packet_type, lsquic_packet_out_t *packet_out, 1690 enum lsquic_packno_bits bits, unsigned excess_bytes) 1691{ 1692 struct buf_packet_q *const packet_q = 1693 &ctl->sc_buffered_packets[packet_type]; 1694 lsquic_packet_out_t *new_packet_out; 1695 1696 assert(TAILQ_FIRST(&packet_q->bpq_packets) == packet_out); 1697 1698 new_packet_out = send_ctl_allocate_packet(ctl, bits, 0); 1699 if (!packet_out) 1700 return -1; 1701 1702 if (0 == lsquic_packet_out_split_in_two(&ctl->sc_enpub->enp_mm, packet_out, 1703 new_packet_out, ctl->sc_conn_pub->lconn->cn_pf, excess_bytes)) 1704 { 1705 lsquic_packet_out_set_packno_bits(packet_out, bits); 1706 TAILQ_INSERT_AFTER(&packet_q->bpq_packets, packet_out, new_packet_out, 1707 po_next); 1708 ++packet_q->bpq_count; 1709 LSQ_DEBUG("Add split packet to buffered queue #%u; count: %u", 1710 packet_type, packet_q->bpq_count); 1711 return 0; 1712 } 1713 else 1714 { 1715 lsquic_packet_out_destroy(packet_out, ctl->sc_enpub); 1716 return -1; 1717 } 1718} 1719 1720 1721int 1722lsquic_send_ctl_schedule_buffered (lsquic_send_ctl_t *ctl, 1723 enum buf_packet_type packet_type) 1724{ 1725 struct buf_packet_q *const packet_q = 1726 &ctl->sc_buffered_packets[packet_type]; 1727 lsquic_packet_out_t *packet_out; 1728 unsigned used, excess; 1729 1730 assert(lsquic_send_ctl_schedule_stream_packets_immediately(ctl)); 1731 const enum lsquic_packno_bits bits = lsquic_send_ctl_calc_packno_bits(ctl); 1732 const unsigned need = packno_bits2len(bits); 1733 1734 while ((packet_out = TAILQ_FIRST(&packet_q->bpq_packets)) && 1735 lsquic_send_ctl_can_send(ctl)) 1736 { 1737 if (bits != lsquic_packet_out_packno_bits(packet_out)) 1738 { 1739 used = packno_bits2len(lsquic_packet_out_packno_bits(packet_out)); 1740 if (need > used 1741 && need - used > lsquic_packet_out_avail(packet_out)) 1742 { 1743 excess = need - used - lsquic_packet_out_avail(packet_out); 1744 if (0 != split_buffered_packet(ctl, packet_type, 1745 packet_out, bits, excess)) 1746 { 1747 return -1; 1748 } 1749 } 1750 } 1751 TAILQ_REMOVE(&packet_q->bpq_packets, packet_out, po_next); 1752 --packet_q->bpq_count; 1753 LSQ_DEBUG("Remove packet from buffered queue #%u; count: %u", 1754 packet_type, packet_q->bpq_count); 1755 packet_out->po_packno = send_ctl_next_packno(ctl); 1756 lsquic_send_ctl_scheduled_one(ctl, packet_out); 1757 } 1758 1759 return 0; 1760} 1761 1762 1763int 1764lsquic_send_ctl_turn_on_fin (struct lsquic_send_ctl *ctl, 1765 const struct lsquic_stream *stream) 1766{ 1767 enum buf_packet_type packet_type; 1768 struct buf_packet_q *packet_q; 1769 lsquic_packet_out_t *packet_out; 1770 const struct parse_funcs *pf; 1771 1772 pf = ctl->sc_conn_pub->lconn->cn_pf; 1773 packet_type = send_ctl_lookup_bpt(ctl, stream); 1774 packet_q = &ctl->sc_buffered_packets[packet_type]; 1775 1776 TAILQ_FOREACH_REVERSE(packet_out, &packet_q->bpq_packets, 1777 lsquic_packets_tailq, po_next) 1778 if (0 == lsquic_packet_out_turn_on_fin(packet_out, pf, stream)) 1779 return 0; 1780 1781 TAILQ_FOREACH(packet_out, &ctl->sc_scheduled_packets, po_next) 1782 if (0 == packet_out->po_sent 1783 && 0 == lsquic_packet_out_turn_on_fin(packet_out, pf, stream)) 1784 { 1785 return 0; 1786 } 1787 1788 return -1; 1789} 1790 1791 1792size_t 1793lsquic_send_ctl_mem_used (const struct lsquic_send_ctl *ctl) 1794{ 1795 const lsquic_packet_out_t *packet_out; 1796 unsigned n; 1797 size_t size; 1798 const struct lsquic_packets_tailq queues[] = { 1799 ctl->sc_scheduled_packets, 1800 ctl->sc_unacked_packets, 1801 ctl->sc_lost_packets, 1802 ctl->sc_buffered_packets[0].bpq_packets, 1803 ctl->sc_buffered_packets[1].bpq_packets, 1804 }; 1805 1806 size = sizeof(*ctl); 1807 1808 for (n = 0; n < sizeof(queues) / sizeof(queues[0]); ++n) 1809 TAILQ_FOREACH(packet_out, &queues[n], po_next) 1810 size += lsquic_packet_out_mem_used(packet_out); 1811 1812 return size; 1813} 1814 1815 1816