lsquic_send_ctl.c revision 9c444524
1/* Copyright (c) 2017 - 2019 LiteSpeed Technologies Inc. See LICENSE. */ 2/* 3 * lsquic_send_ctl.c -- Logic for sending and sent packets 4 */ 5 6#include <assert.h> 7#include <errno.h> 8#include <inttypes.h> 9#include <stdlib.h> 10#include <string.h> 11#include <sys/queue.h> 12 13#include "lsquic_types.h" 14#include "lsquic_int_types.h" 15#include "lsquic.h" 16#include "lsquic_mm.h" 17#include "lsquic_engine_public.h" 18#include "lsquic_alarmset.h" 19#include "lsquic_packet_common.h" 20#include "lsquic_parse.h" 21#include "lsquic_packet_out.h" 22#include "lsquic_senhist.h" 23#include "lsquic_rtt.h" 24#include "lsquic_cubic.h" 25#include "lsquic_pacer.h" 26#include "lsquic_send_ctl.h" 27#include "lsquic_util.h" 28#include "lsquic_sfcw.h" 29#include "lsquic_stream.h" 30#include "lsquic_ver_neg.h" 31#include "lsquic_ev_log.h" 32#include "lsquic_conn.h" 33#include "lsquic_conn_flow.h" 34#include "lsquic_conn_public.h" 35#include "lsquic_hash.h" 36 37#define LSQUIC_LOGGER_MODULE LSQLM_SENDCTL 38#define LSQUIC_LOG_CONN_ID ctl->sc_conn_pub->lconn->cn_cid 39#include "lsquic_logger.h" 40 41#define MAX_RESUBMITTED_ON_RTO 2 42#define MAX_RTO_BACKOFFS 10 43#define DEFAULT_RETX_DELAY 500000 /* Microseconds */ 44#define MAX_RTO_DELAY 60000000 /* Microseconds */ 45#define MIN_RTO_DELAY 1000000 /* Microseconds */ 46#define N_NACKS_BEFORE_RETX 3 47 48#define packet_out_total_sz(p) \ 49 lsquic_packet_out_total_sz(ctl->sc_conn_pub->lconn, p) 50#define packet_out_sent_sz(p) \ 51 lsquic_packet_out_sent_sz(ctl->sc_conn_pub->lconn, p) 52 53enum retx_mode { 54 RETX_MODE_HANDSHAKE, 55 RETX_MODE_LOSS, 56 RETX_MODE_TLP, 57 RETX_MODE_RTO, 58}; 59 60 61static const char *const retx2str[] = { 62 [RETX_MODE_HANDSHAKE] = "RETX_MODE_HANDSHAKE", 63 [RETX_MODE_LOSS] = "RETX_MODE_LOSS", 64 [RETX_MODE_TLP] = "RETX_MODE_TLP", 65 [RETX_MODE_RTO] = "RETX_MODE_RTO", 66}; 67 68 69static void 70update_for_resending (lsquic_send_ctl_t *ctl, lsquic_packet_out_t *packet_out); 71 72 73enum expire_filter { EXFI_ALL, EXFI_HSK, EXFI_LAST, }; 74 75 76static void 77send_ctl_expire (lsquic_send_ctl_t *, enum expire_filter); 78 79static void 80set_retx_alarm (lsquic_send_ctl_t *ctl); 81 82static void 83send_ctl_detect_losses (lsquic_send_ctl_t *ctl, lsquic_time_t time); 84 85static unsigned 86send_ctl_retx_bytes_out (const struct lsquic_send_ctl *ctl); 87 88 89#ifdef NDEBUG 90static 91#elif __GNUC__ 92__attribute__((weak)) 93#endif 94int 95lsquic_send_ctl_schedule_stream_packets_immediately (lsquic_send_ctl_t *ctl) 96{ 97 return !(ctl->sc_flags & SC_BUFFER_STREAM); 98} 99 100 101#ifdef NDEBUG 102static 103#elif __GNUC__ 104__attribute__((weak)) 105#endif 106enum lsquic_packno_bits 107lsquic_send_ctl_guess_packno_bits (lsquic_send_ctl_t *ctl) 108{ 109 return PACKNO_LEN_2; 110} 111 112 113int 114lsquic_send_ctl_have_unacked_stream_frames (const lsquic_send_ctl_t *ctl) 115{ 116 const lsquic_packet_out_t *packet_out; 117 TAILQ_FOREACH(packet_out, &ctl->sc_unacked_packets, po_next) 118 if (packet_out->po_frame_types & 119 ((1 << QUIC_FRAME_STREAM) | (1 << QUIC_FRAME_RST_STREAM))) 120 return 1; 121 return 0; 122} 123 124 125static lsquic_packet_out_t * 126send_ctl_first_unacked_retx_packet (const lsquic_send_ctl_t *ctl) 127{ 128 lsquic_packet_out_t *packet_out; 129 TAILQ_FOREACH(packet_out, &ctl->sc_unacked_packets, po_next) 130 if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK) 131 return packet_out; 132 return NULL; 133} 134 135 136static lsquic_packet_out_t * 137send_ctl_last_unacked_retx_packet (const lsquic_send_ctl_t *ctl) 138{ 139 lsquic_packet_out_t *packet_out; 140 TAILQ_FOREACH_REVERSE(packet_out, &ctl->sc_unacked_packets, 141 lsquic_packets_tailq, po_next) 142 if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK) 143 return packet_out; 144 return NULL; 145} 146 147 148static int 149have_unacked_handshake_packets (const lsquic_send_ctl_t *ctl) 150{ 151 const lsquic_packet_out_t *packet_out; 152 TAILQ_FOREACH(packet_out, &ctl->sc_unacked_packets, po_next) 153 if (packet_out->po_flags & PO_HELLO) 154 return 1; 155 return 0; 156} 157 158 159static enum retx_mode 160get_retx_mode (lsquic_send_ctl_t *ctl) 161{ 162 if (!(ctl->sc_conn_pub->lconn->cn_flags & LSCONN_HANDSHAKE_DONE) 163 && have_unacked_handshake_packets(ctl)) 164 return RETX_MODE_HANDSHAKE; 165 if (ctl->sc_loss_to) 166 return RETX_MODE_LOSS; 167 if (ctl->sc_n_tlp < 2) 168 return RETX_MODE_TLP; 169 return RETX_MODE_RTO; 170} 171 172 173static lsquic_time_t 174get_retx_delay (const struct lsquic_rtt_stats *rtt_stats) 175{ 176 lsquic_time_t srtt, delay; 177 178 srtt = lsquic_rtt_stats_get_srtt(rtt_stats); 179 if (srtt) 180 { 181 delay = srtt + 4 * lsquic_rtt_stats_get_rttvar(rtt_stats); 182 if (delay < MIN_RTO_DELAY) 183 delay = MIN_RTO_DELAY; 184 } 185 else 186 delay = DEFAULT_RETX_DELAY; 187 188 return delay; 189} 190 191 192static void 193retx_alarm_rings (void *ctx, lsquic_time_t expiry, lsquic_time_t now) 194{ 195 lsquic_send_ctl_t *ctl = ctx; 196 lsquic_packet_out_t *packet_out; 197 enum retx_mode rm; 198 199 /* This is a callback -- before it is called, the alarm is unset */ 200 assert(!lsquic_alarmset_is_set(ctl->sc_alset, AL_RETX)); 201 202 rm = get_retx_mode(ctl); 203 LSQ_INFO("retx timeout, mode %s", retx2str[rm]); 204 205 switch (rm) 206 { 207 case RETX_MODE_HANDSHAKE: 208 send_ctl_expire(ctl, EXFI_HSK); 209 /* Do not register cubic loss during handshake */ 210 break; 211 case RETX_MODE_LOSS: 212 send_ctl_detect_losses(ctl, lsquic_time_now()); 213 break; 214 case RETX_MODE_TLP: 215 ++ctl->sc_n_tlp; 216 send_ctl_expire(ctl, EXFI_LAST); 217 break; 218 case RETX_MODE_RTO: 219 ++ctl->sc_n_consec_rtos; 220 ctl->sc_next_limit = 2; 221 LSQ_DEBUG("packet RTO is %"PRIu64" usec", expiry); 222 send_ctl_expire(ctl, EXFI_ALL); 223 lsquic_cubic_timeout(&ctl->sc_cubic); 224 break; 225 } 226 227 packet_out = send_ctl_first_unacked_retx_packet(ctl); 228 if (packet_out) 229 set_retx_alarm(ctl); 230 lsquic_send_ctl_sanity_check(ctl); 231} 232 233 234void 235lsquic_send_ctl_init (lsquic_send_ctl_t *ctl, struct lsquic_alarmset *alset, 236 struct lsquic_engine_public *enpub, const struct ver_neg *ver_neg, 237 struct lsquic_conn_public *conn_pub, unsigned short pack_size) 238{ 239 unsigned i; 240 memset(ctl, 0, sizeof(*ctl)); 241 TAILQ_INIT(&ctl->sc_scheduled_packets); 242 TAILQ_INIT(&ctl->sc_unacked_packets); 243 TAILQ_INIT(&ctl->sc_lost_packets); 244 ctl->sc_enpub = enpub; 245 ctl->sc_alset = alset; 246 ctl->sc_ver_neg = ver_neg; 247 ctl->sc_pack_size = pack_size; 248 ctl->sc_conn_pub = conn_pub; 249 if (enpub->enp_settings.es_pace_packets) 250 ctl->sc_flags |= SC_PACE; 251 lsquic_alarmset_init_alarm(alset, AL_RETX, retx_alarm_rings, ctl); 252 lsquic_senhist_init(&ctl->sc_senhist); 253 lsquic_cubic_init(&ctl->sc_cubic, LSQUIC_LOG_CONN_ID); 254 if (ctl->sc_flags & SC_PACE) 255 pacer_init(&ctl->sc_pacer, LSQUIC_LOG_CONN_ID, 256 enpub->enp_settings.es_clock_granularity); 257 for (i = 0; i < sizeof(ctl->sc_buffered_packets) / 258 sizeof(ctl->sc_buffered_packets[0]); ++i) 259 TAILQ_INIT(&ctl->sc_buffered_packets[i].bpq_packets); 260 ctl->sc_max_packno_bits = PACKNO_LEN_4; /* Safe value before verneg */ 261} 262 263 264static lsquic_time_t 265calculate_packet_rto (lsquic_send_ctl_t *ctl) 266{ 267 lsquic_time_t delay; 268 269 delay = get_retx_delay(&ctl->sc_conn_pub->rtt_stats); 270 271 unsigned exp = ctl->sc_n_consec_rtos; 272 if (exp > MAX_RTO_BACKOFFS) 273 exp = MAX_RTO_BACKOFFS; 274 275 delay = delay * (1 << exp); 276 277 return delay; 278} 279 280 281static lsquic_time_t 282calculate_tlp_delay (lsquic_send_ctl_t *ctl) 283{ 284 lsquic_time_t srtt, delay; 285 286 srtt = lsquic_rtt_stats_get_srtt(&ctl->sc_conn_pub->rtt_stats); 287 if (ctl->sc_n_in_flight_all > 1) 288 { 289 delay = 10000; /* 10 ms is the minimum tail loss probe delay */ 290 if (delay < 2 * srtt) 291 delay = 2 * srtt; 292 } 293 else 294 { 295 delay = srtt + srtt / 2 + MIN_RTO_DELAY; 296 if (delay < 2 * srtt) 297 delay = 2 * srtt; 298 } 299 300 return delay; 301} 302 303 304static void 305set_retx_alarm (lsquic_send_ctl_t *ctl) 306{ 307 enum retx_mode rm; 308 lsquic_time_t delay, now; 309 310 assert(!TAILQ_EMPTY(&ctl->sc_unacked_packets)); 311 312 now = lsquic_time_now(); 313 314 rm = get_retx_mode(ctl); 315 switch (rm) 316 { 317 case RETX_MODE_HANDSHAKE: 318 /* [draft-iyengar-quic-loss-recovery-01]: 319 * 320 * if (handshake packets are outstanding): 321 * alarm_duration = max(1.5 * smoothed_rtt, 10ms) << handshake_count; 322 * handshake_count++; 323 */ 324 delay = lsquic_rtt_stats_get_srtt(&ctl->sc_conn_pub->rtt_stats); 325 if (delay) 326 { 327 delay += delay / 2; 328 if (10000 > delay) 329 delay = 10000; 330 } 331 else 332 delay = 150000; 333 delay <<= ctl->sc_n_hsk; 334 ++ctl->sc_n_hsk; 335 break; 336 case RETX_MODE_LOSS: 337 delay = ctl->sc_loss_to; 338 break; 339 case RETX_MODE_TLP: 340 delay = calculate_tlp_delay(ctl); 341 break; 342 case RETX_MODE_RTO: 343 /* Base RTO on the first unacked packet, following reference 344 * implementation. 345 */ 346 delay = calculate_packet_rto(ctl); 347 break; 348#ifdef WIN32 349 default: 350 delay = 0; 351#endif 352 } 353 354 if (delay > MAX_RTO_DELAY) 355 delay = MAX_RTO_DELAY; 356 357 LSQ_DEBUG("set retx alarm to %"PRIu64", which is %"PRIu64 358 " usec from now, mode %s", now + delay, delay, retx2str[rm]); 359 lsquic_alarmset_set(ctl->sc_alset, AL_RETX, now + delay); 360} 361 362 363static int 364send_ctl_in_recovery (lsquic_send_ctl_t *ctl) 365{ 366 return ctl->sc_largest_acked_packno 367 && ctl->sc_largest_acked_packno <= ctl->sc_largest_sent_at_cutback; 368} 369 370 371static int 372send_ctl_in_slow_start (lsquic_send_ctl_t *ctl) 373{ 374 return lsquic_cubic_in_slow_start(&ctl->sc_cubic); 375} 376 377 378static lsquic_time_t 379send_ctl_transfer_time (void *ctx) 380{ 381 lsquic_send_ctl_t *const ctl = ctx; 382 uint64_t bandwidth, pacing_rate; 383 lsquic_time_t srtt, tx_time; 384 unsigned long cwnd; 385 386 srtt = lsquic_rtt_stats_get_srtt(&ctl->sc_conn_pub->rtt_stats); 387 if (srtt == 0) 388 srtt = 50000; 389 cwnd = lsquic_cubic_get_cwnd(&ctl->sc_cubic); 390 bandwidth = cwnd * 1000000 / srtt; 391 if (send_ctl_in_slow_start(ctl)) 392 pacing_rate = bandwidth * 2; 393 else if (send_ctl_in_recovery(ctl)) 394 pacing_rate = bandwidth; 395 else 396 pacing_rate = bandwidth + bandwidth / 4; 397 398 tx_time = (uint64_t) ctl->sc_pack_size * 1000000 / pacing_rate; 399 LSQ_DEBUG("srtt: %"PRIu64"; ss: %d; rec: %d; cwnd: %lu; bandwidth: " 400 "%"PRIu64"; tx_time: %"PRIu64, srtt, send_ctl_in_slow_start(ctl), 401 send_ctl_in_recovery(ctl), cwnd, bandwidth, tx_time); 402 return tx_time; 403} 404 405 406static void 407send_ctl_unacked_append (struct lsquic_send_ctl *ctl, 408 struct lsquic_packet_out *packet_out) 409{ 410 TAILQ_INSERT_TAIL(&ctl->sc_unacked_packets, packet_out, po_next); 411 ctl->sc_bytes_unacked_all += packet_out_total_sz(packet_out); 412 ctl->sc_n_in_flight_all += 1; 413 if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK) 414 { 415 ctl->sc_bytes_unacked_retx += packet_out_total_sz(packet_out); 416 ++ctl->sc_n_in_flight_retx; 417 } 418} 419 420 421static void 422send_ctl_unacked_remove (struct lsquic_send_ctl *ctl, 423 struct lsquic_packet_out *packet_out, unsigned packet_sz) 424{ 425 TAILQ_REMOVE(&ctl->sc_unacked_packets, packet_out, po_next); 426 assert(ctl->sc_bytes_unacked_all >= packet_sz); 427 ctl->sc_bytes_unacked_all -= packet_sz; 428 ctl->sc_n_in_flight_all -= 1; 429 if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK) 430 { 431 ctl->sc_bytes_unacked_retx -= packet_sz; 432 --ctl->sc_n_in_flight_retx; 433 } 434} 435 436 437static void 438send_ctl_sched_Xpend_common (struct lsquic_send_ctl *ctl, 439 struct lsquic_packet_out *packet_out) 440{ 441 packet_out->po_flags |= PO_SCHED; 442 ++ctl->sc_n_scheduled; 443 ctl->sc_bytes_scheduled += packet_out_total_sz(packet_out); 444 lsquic_send_ctl_sanity_check(ctl); 445} 446 447 448static void 449send_ctl_sched_append (struct lsquic_send_ctl *ctl, 450 struct lsquic_packet_out *packet_out) 451{ 452 TAILQ_INSERT_TAIL(&ctl->sc_scheduled_packets, packet_out, po_next); 453 send_ctl_sched_Xpend_common(ctl, packet_out); 454} 455 456 457static void 458send_ctl_sched_prepend (struct lsquic_send_ctl *ctl, 459 struct lsquic_packet_out *packet_out) 460{ 461 TAILQ_INSERT_HEAD(&ctl->sc_scheduled_packets, packet_out, po_next); 462 send_ctl_sched_Xpend_common(ctl, packet_out); 463} 464 465 466static void 467send_ctl_sched_remove (struct lsquic_send_ctl *ctl, 468 struct lsquic_packet_out *packet_out) 469{ 470 TAILQ_REMOVE(&ctl->sc_scheduled_packets, packet_out, po_next); 471 packet_out->po_flags &= ~PO_SCHED; 472 assert(ctl->sc_n_scheduled); 473 --ctl->sc_n_scheduled; 474 ctl->sc_bytes_scheduled -= packet_out_total_sz(packet_out); 475 lsquic_send_ctl_sanity_check(ctl); 476} 477 478 479int 480lsquic_send_ctl_sent_packet (lsquic_send_ctl_t *ctl, 481 struct lsquic_packet_out *packet_out, int account) 482{ 483 char frames[lsquic_frame_types_str_sz]; 484 LSQ_DEBUG("packet %"PRIu64" has been sent (frame types: %s)", 485 packet_out->po_packno, lsquic_frame_types_to_str(frames, 486 sizeof(frames), packet_out->po_frame_types)); 487 if (account) 488 ctl->sc_bytes_out -= packet_out_total_sz(packet_out); 489 lsquic_senhist_add(&ctl->sc_senhist, packet_out->po_packno); 490 send_ctl_unacked_append(ctl, packet_out); 491 if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK) 492 { 493 if (!lsquic_alarmset_is_set(ctl->sc_alset, AL_RETX)) 494 set_retx_alarm(ctl); 495 if (ctl->sc_n_in_flight_retx == 1) 496 ctl->sc_flags |= SC_WAS_QUIET; 497 } 498 /* TODO: Do we really want to use those for RTT info? Revisit this. */ 499 /* Hold on to packets that are not retransmittable because we need them 500 * to sample RTT information. They are released when ACK is received. 501 */ 502#if LSQUIC_SEND_STATS 503 ++ctl->sc_stats.n_total_sent; 504#endif 505 lsquic_send_ctl_sanity_check(ctl); 506 return 0; 507} 508 509 510static void 511take_rtt_sample (lsquic_send_ctl_t *ctl, 512 lsquic_time_t now, lsquic_time_t lack_delta) 513{ 514 const lsquic_packno_t packno = ctl->sc_largest_acked_packno; 515 const lsquic_time_t sent = ctl->sc_largest_acked_sent_time; 516 const lsquic_time_t measured_rtt = now - sent; 517 if (packno > ctl->sc_max_rtt_packno && lack_delta < measured_rtt) 518 { 519 ctl->sc_max_rtt_packno = packno; 520 lsquic_rtt_stats_update(&ctl->sc_conn_pub->rtt_stats, measured_rtt, lack_delta); 521 LSQ_DEBUG("packno %"PRIu64"; rtt: %"PRIu64"; delta: %"PRIu64"; " 522 "new srtt: %"PRIu64, packno, measured_rtt, lack_delta, 523 lsquic_rtt_stats_get_srtt(&ctl->sc_conn_pub->rtt_stats)); 524 } 525} 526 527 528static void 529send_ctl_release_enc_data (struct lsquic_send_ctl *ctl, 530 struct lsquic_packet_out *packet_out) 531{ 532 ctl->sc_enpub->enp_pmi->pmi_release(ctl->sc_enpub->enp_pmi_ctx, 533 ctl->sc_conn_pub->lconn->cn_peer_ctx, packet_out->po_enc_data, 534 lsquic_packet_out_ipv6(packet_out)); 535 packet_out->po_flags &= ~PO_ENCRYPTED; 536 packet_out->po_enc_data = NULL; 537} 538 539 540static void 541send_ctl_destroy_packet (struct lsquic_send_ctl *ctl, 542 struct lsquic_packet_out *packet_out) 543{ 544 lsquic_packet_out_destroy(packet_out, ctl->sc_enpub, 545 ctl->sc_conn_pub->lconn->cn_peer_ctx); 546} 547 548 549/* Returns true if packet was rescheduled, false otherwise. In the latter 550 * case, you should not dereference packet_out after the function returns. 551 */ 552static int 553send_ctl_handle_lost_packet (lsquic_send_ctl_t *ctl, 554 lsquic_packet_out_t *packet_out) 555{ 556 unsigned packet_sz; 557 558 assert(ctl->sc_n_in_flight_all); 559 packet_sz = packet_out_sent_sz(packet_out); 560 send_ctl_unacked_remove(ctl, packet_out, packet_sz); 561 if (packet_out->po_flags & PO_ENCRYPTED) 562 send_ctl_release_enc_data(ctl, packet_out); 563 if (packet_out->po_frame_types & (1 << QUIC_FRAME_ACK)) 564 { 565 ctl->sc_flags |= SC_LOST_ACK; 566 LSQ_DEBUG("lost ACK in packet %"PRIu64, packet_out->po_packno); 567 } 568 if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK) 569 { 570 LSQ_DEBUG("lost retransmittable packet %"PRIu64, 571 packet_out->po_packno); 572 TAILQ_INSERT_TAIL(&ctl->sc_lost_packets, packet_out, po_next); 573 return 1; 574 } 575 else 576 { 577 LSQ_DEBUG("lost unretransmittable packet %"PRIu64, 578 packet_out->po_packno); 579 send_ctl_destroy_packet(ctl, packet_out); 580 return 0; 581 } 582} 583 584 585static lsquic_packno_t 586largest_retx_packet_number (const lsquic_send_ctl_t *ctl) 587{ 588 const lsquic_packet_out_t *packet_out; 589 TAILQ_FOREACH_REVERSE(packet_out, &ctl->sc_unacked_packets, 590 lsquic_packets_tailq, po_next) 591 { 592 if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK) 593 return packet_out->po_packno; 594 } 595 return 0; 596} 597 598 599static void 600send_ctl_detect_losses (lsquic_send_ctl_t *ctl, lsquic_time_t time) 601{ 602 lsquic_packet_out_t *packet_out, *next; 603 lsquic_packno_t largest_retx_packno, largest_lost_packno; 604 605 largest_retx_packno = largest_retx_packet_number(ctl); 606 largest_lost_packno = 0; 607 ctl->sc_loss_to = 0; 608 609 for (packet_out = TAILQ_FIRST(&ctl->sc_unacked_packets); 610 packet_out && packet_out->po_packno <= ctl->sc_largest_acked_packno; 611 packet_out = next) 612 { 613 next = TAILQ_NEXT(packet_out, po_next); 614 615 if (packet_out->po_packno + N_NACKS_BEFORE_RETX < 616 ctl->sc_largest_acked_packno) 617 { 618 LSQ_DEBUG("loss by FACK detected, packet %"PRIu64, 619 packet_out->po_packno); 620 largest_lost_packno = packet_out->po_packno; 621 (void) send_ctl_handle_lost_packet(ctl, packet_out); 622 continue; 623 } 624 625 if (largest_retx_packno 626 && (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK) 627 && largest_retx_packno <= ctl->sc_largest_acked_packno) 628 { 629 LSQ_DEBUG("loss by early retransmit detected, packet %"PRIu64, 630 packet_out->po_packno); 631 largest_lost_packno = packet_out->po_packno; 632 ctl->sc_loss_to = 633 lsquic_rtt_stats_get_srtt(&ctl->sc_conn_pub->rtt_stats) / 4; 634 LSQ_DEBUG("set sc_loss_to to %"PRIu64", packet %"PRIu64, 635 ctl->sc_loss_to, packet_out->po_packno); 636 (void) send_ctl_handle_lost_packet(ctl, packet_out); 637 continue; 638 } 639 640 if (ctl->sc_largest_acked_sent_time > packet_out->po_sent + 641 lsquic_rtt_stats_get_srtt(&ctl->sc_conn_pub->rtt_stats)) 642 { 643 LSQ_DEBUG("loss by sent time detected: packet %"PRIu64, 644 packet_out->po_packno); 645 if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK) 646 largest_lost_packno = packet_out->po_packno; 647 else { /* don't count it as a loss */; } 648 (void) send_ctl_handle_lost_packet(ctl, packet_out); 649 continue; 650 } 651 } 652 653 if (largest_lost_packno > ctl->sc_largest_sent_at_cutback) 654 { 655 LSQ_DEBUG("detected new loss: packet %"PRIu64"; new lsac: " 656 "%"PRIu64, largest_lost_packno, ctl->sc_largest_sent_at_cutback); 657 lsquic_cubic_loss(&ctl->sc_cubic); 658 if (ctl->sc_flags & SC_PACE) 659 pacer_loss_event(&ctl->sc_pacer); 660 ctl->sc_largest_sent_at_cutback = 661 lsquic_senhist_largest(&ctl->sc_senhist); 662 } 663 else if (largest_lost_packno) 664 /* Lost packets whose numbers are smaller than the largest packet 665 * number sent at the time of the last loss event indicate the same 666 * loss event. This follows NewReno logic, see RFC 6582. 667 */ 668 LSQ_DEBUG("ignore loss of packet %"PRIu64" smaller than lsac " 669 "%"PRIu64, largest_lost_packno, ctl->sc_largest_sent_at_cutback); 670} 671 672 673int 674lsquic_send_ctl_got_ack (lsquic_send_ctl_t *ctl, 675 const struct ack_info *acki, 676 lsquic_time_t ack_recv_time) 677{ 678 const struct lsquic_packno_range *range = 679 &acki->ranges[ acki->n_ranges - 1 ]; 680 lsquic_packet_out_t *packet_out, *next; 681 lsquic_time_t now = 0; 682 lsquic_packno_t smallest_unacked; 683 lsquic_packno_t ack2ed[2]; 684 unsigned packet_sz; 685 int app_limited; 686 signed char do_rtt, skip_checks; 687 688 packet_out = TAILQ_FIRST(&ctl->sc_unacked_packets); 689#if __GNUC__ 690 __builtin_prefetch(packet_out); 691#endif 692 693#if __GNUC__ 694# define UNLIKELY(cond) __builtin_expect(cond, 0) 695#else 696# define UNLIKELY(cond) cond 697#endif 698 699#if __GNUC__ 700 if (UNLIKELY(LSQ_LOG_ENABLED(LSQ_LOG_DEBUG))) 701#endif 702 LSQ_DEBUG("Got ACK frame, largest acked: %"PRIu64"; delta: %"PRIu64, 703 largest_acked(acki), acki->lack_delta); 704 705 /* Validate ACK first: */ 706 if (UNLIKELY(largest_acked(acki) 707 > lsquic_senhist_largest(&ctl->sc_senhist))) 708 { 709 LSQ_INFO("at least one packet in ACK range [%"PRIu64" - %"PRIu64"] " 710 "was never sent", acki->ranges[0].low, acki->ranges[0].high); 711 return -1; 712 } 713 714 if (UNLIKELY(ctl->sc_flags & SC_WAS_QUIET)) 715 { 716 ctl->sc_flags &= ~SC_WAS_QUIET; 717 LSQ_DEBUG("ACK comes after a period of quiescence"); 718 if (!now) 719 now = lsquic_time_now(); 720 lsquic_cubic_was_quiet(&ctl->sc_cubic, now); 721 } 722 723 if (UNLIKELY(!packet_out)) 724 goto no_unacked_packets; 725 726 smallest_unacked = packet_out->po_packno; 727 ack2ed[1] = 0; 728 729 if (packet_out->po_packno > largest_acked(acki)) 730 goto detect_losses; 731 732 do_rtt = 0, skip_checks = 0; 733 app_limited = -1; 734 do 735 { 736 next = TAILQ_NEXT(packet_out, po_next); 737#if __GNUC__ 738 __builtin_prefetch(next); 739#endif 740 if (skip_checks) 741 goto after_checks; 742 /* This is faster than binary search in the normal case when the number 743 * of ranges is not much larger than the number of unacked packets. 744 */ 745 while (UNLIKELY(range->high < packet_out->po_packno)) 746 --range; 747 if (range->low <= packet_out->po_packno) 748 { 749 skip_checks = range == acki->ranges; 750 if (app_limited < 0) 751 app_limited = send_ctl_retx_bytes_out(ctl) + 3 * ctl->sc_pack_size /* This 752 is the "maximum burst" parameter */ 753 < lsquic_cubic_get_cwnd(&ctl->sc_cubic); 754 if (!now) 755 now = lsquic_time_now(); 756 after_checks: 757 packet_sz = packet_out_sent_sz(packet_out); 758 ctl->sc_largest_acked_packno = packet_out->po_packno; 759 ctl->sc_largest_acked_sent_time = packet_out->po_sent; 760 send_ctl_unacked_remove(ctl, packet_out, packet_sz); 761 ack2ed[!!(packet_out->po_frame_types & (1 << QUIC_FRAME_ACK))] 762 = packet_out->po_ack2ed; 763 do_rtt |= packet_out->po_packno == largest_acked(acki); 764 lsquic_cubic_ack(&ctl->sc_cubic, now, now - packet_out->po_sent, 765 app_limited, packet_sz); 766 lsquic_packet_out_ack_streams(packet_out); 767 send_ctl_destroy_packet(ctl, packet_out); 768 } 769 packet_out = next; 770 } 771 while (packet_out && packet_out->po_packno <= largest_acked(acki)); 772 773 if (do_rtt) 774 { 775 take_rtt_sample(ctl, ack_recv_time, acki->lack_delta); 776 ctl->sc_n_consec_rtos = 0; 777 ctl->sc_n_hsk = 0; 778 ctl->sc_n_tlp = 0; 779 } 780 781 detect_losses: 782 send_ctl_detect_losses(ctl, ack_recv_time); 783 if (send_ctl_first_unacked_retx_packet(ctl)) 784 set_retx_alarm(ctl); 785 else 786 { 787 LSQ_DEBUG("No retransmittable packets: clear alarm"); 788 lsquic_alarmset_unset(ctl->sc_alset, AL_RETX); 789 } 790 lsquic_send_ctl_sanity_check(ctl); 791 792 if ((ctl->sc_flags & SC_NSTP) && ack2ed[1] > ctl->sc_largest_ack2ed) 793 ctl->sc_largest_ack2ed = ack2ed[1]; 794 795 if (ctl->sc_n_in_flight_retx == 0) 796 ctl->sc_flags |= SC_WAS_QUIET; 797 798 update_n_stop_waiting: 799 if (smallest_unacked > smallest_acked(acki)) 800 /* Peer is acking packets that have been acked already. Schedule ACK 801 * and STOP_WAITING frame to chop the range if we get two of these in 802 * a row. 803 */ 804 ++ctl->sc_n_stop_waiting; 805 else 806 ctl->sc_n_stop_waiting = 0; 807 lsquic_send_ctl_sanity_check(ctl); 808 return 0; 809 810 no_unacked_packets: 811 smallest_unacked = lsquic_senhist_largest(&ctl->sc_senhist) + 1; 812 ctl->sc_flags |= SC_WAS_QUIET; 813 goto update_n_stop_waiting; 814} 815 816 817lsquic_packno_t 818lsquic_send_ctl_smallest_unacked (lsquic_send_ctl_t *ctl) 819{ 820 const lsquic_packet_out_t *packet_out; 821 822 /* Packets are always sent out in order (unless we are reordering them 823 * on purpose). Thus, the first packet on the unacked packets list has 824 * the smallest packet number of all packets on that list. 825 */ 826 if ((packet_out = TAILQ_FIRST(&ctl->sc_unacked_packets))) 827 return packet_out->po_packno; 828 else 829 return lsquic_senhist_largest(&ctl->sc_senhist) + 1; 830} 831 832 833static struct lsquic_packet_out * 834send_ctl_next_lost (lsquic_send_ctl_t *ctl) 835{ 836 struct lsquic_packet_out *lost_packet; 837 838 get_next_lost: 839 lost_packet = TAILQ_FIRST(&ctl->sc_lost_packets); 840 if (lost_packet) 841 { 842 if (lost_packet->po_frame_types & (1 << QUIC_FRAME_STREAM)) 843 { 844 lsquic_packet_out_elide_reset_stream_frames(lost_packet, 0); 845 if (lost_packet->po_regen_sz >= lost_packet->po_data_sz) 846 { 847 LSQ_DEBUG("Dropping packet %"PRIu64" from lost queue", 848 lost_packet->po_packno); 849 TAILQ_REMOVE(&ctl->sc_lost_packets, lost_packet, po_next); 850 send_ctl_destroy_packet(ctl, lost_packet); 851 goto get_next_lost; 852 } 853 } 854 855 if (!lsquic_send_ctl_can_send(ctl)) 856 return NULL; 857 858 TAILQ_REMOVE(&ctl->sc_lost_packets, lost_packet, po_next); 859 } 860 861 return lost_packet; 862} 863 864 865static lsquic_packno_t 866send_ctl_next_packno (lsquic_send_ctl_t *ctl) 867{ 868 return ++ctl->sc_cur_packno; 869} 870 871 872void 873lsquic_send_ctl_cleanup (lsquic_send_ctl_t *ctl) 874{ 875 lsquic_packet_out_t *packet_out, *next; 876 unsigned n; 877 lsquic_senhist_cleanup(&ctl->sc_senhist); 878 while ((packet_out = TAILQ_FIRST(&ctl->sc_scheduled_packets))) 879 { 880 send_ctl_sched_remove(ctl, packet_out); 881 send_ctl_destroy_packet(ctl, packet_out); 882 } 883 assert(0 == ctl->sc_n_scheduled); 884 assert(0 == ctl->sc_bytes_scheduled); 885 while ((packet_out = TAILQ_FIRST(&ctl->sc_unacked_packets))) 886 { 887 TAILQ_REMOVE(&ctl->sc_unacked_packets, packet_out, po_next); 888 ctl->sc_bytes_unacked_all -= packet_out_total_sz(packet_out); 889 send_ctl_destroy_packet(ctl, packet_out); 890 --ctl->sc_n_in_flight_all; 891 } 892 assert(0 == ctl->sc_n_in_flight_all); 893 assert(0 == ctl->sc_bytes_unacked_all); 894 while ((packet_out = TAILQ_FIRST(&ctl->sc_lost_packets))) 895 { 896 TAILQ_REMOVE(&ctl->sc_lost_packets, packet_out, po_next); 897 send_ctl_destroy_packet(ctl, packet_out); 898 } 899 for (n = 0; n < sizeof(ctl->sc_buffered_packets) / 900 sizeof(ctl->sc_buffered_packets[0]); ++n) 901 { 902 for (packet_out = TAILQ_FIRST(&ctl->sc_buffered_packets[n].bpq_packets); 903 packet_out; packet_out = next) 904 { 905 next = TAILQ_NEXT(packet_out, po_next); 906 send_ctl_destroy_packet(ctl, packet_out); 907 } 908 } 909 if (ctl->sc_flags & SC_PACE) 910 pacer_cleanup(&ctl->sc_pacer); 911#if LSQUIC_SEND_STATS 912 LSQ_NOTICE("stats: n_total_sent: %u; n_resent: %u; n_delayed: %u", 913 ctl->sc_stats.n_total_sent, ctl->sc_stats.n_resent, 914 ctl->sc_stats.n_delayed); 915#endif 916} 917 918 919static unsigned 920send_ctl_retx_bytes_out (const struct lsquic_send_ctl *ctl) 921{ 922 return ctl->sc_bytes_scheduled 923 + ctl->sc_bytes_unacked_retx 924 + ctl->sc_bytes_out; 925} 926 927 928static unsigned 929send_ctl_all_bytes_out (const struct lsquic_send_ctl *ctl) 930{ 931 return ctl->sc_bytes_scheduled 932 + ctl->sc_bytes_unacked_all 933 + ctl->sc_bytes_out; 934} 935 936 937int 938lsquic_send_ctl_pacer_blocked (struct lsquic_send_ctl *ctl) 939{ 940 return (ctl->sc_flags & SC_PACE) 941 && !pacer_can_schedule(&ctl->sc_pacer, 942 ctl->sc_n_scheduled + ctl->sc_n_in_flight_all); 943} 944 945 946#ifndef NDEBUG 947#if __GNUC__ 948__attribute__((weak)) 949#endif 950#endif 951int 952lsquic_send_ctl_can_send (lsquic_send_ctl_t *ctl) 953{ 954 const unsigned n_out = send_ctl_all_bytes_out(ctl); 955 LSQ_DEBUG("%s: n_out: %u (unacked_all: %u, out: %u); cwnd: %lu", __func__, 956 n_out, ctl->sc_bytes_unacked_all, ctl->sc_bytes_out, 957 lsquic_cubic_get_cwnd(&ctl->sc_cubic)); 958 if (ctl->sc_flags & SC_PACE) 959 { 960 if (n_out >= lsquic_cubic_get_cwnd(&ctl->sc_cubic)) 961 return 0; 962 if (pacer_can_schedule(&ctl->sc_pacer, 963 ctl->sc_n_scheduled + ctl->sc_n_in_flight_all)) 964 return 1; 965 if (ctl->sc_flags & SC_SCHED_TICK) 966 { 967 ctl->sc_flags &= ~SC_SCHED_TICK; 968 lsquic_engine_add_conn_to_attq(ctl->sc_enpub, 969 ctl->sc_conn_pub->lconn, pacer_next_sched(&ctl->sc_pacer)); 970 } 971 return 0; 972 } 973 else 974 return n_out < lsquic_cubic_get_cwnd(&ctl->sc_cubic); 975} 976 977 978static void 979send_ctl_expire (lsquic_send_ctl_t *ctl, enum expire_filter filter) 980{ 981 lsquic_packet_out_t *packet_out, *next; 982 int n_resubmitted; 983 static const char *const filter_type2str[] = { 984 [EXFI_ALL] = "all", 985 [EXFI_HSK] = "handshake", 986 [EXFI_LAST] = "last", 987 }; 988 989 switch (filter) 990 { 991 case EXFI_ALL: 992 n_resubmitted = 0; 993 while ((packet_out = TAILQ_FIRST(&ctl->sc_unacked_packets))) 994 n_resubmitted += send_ctl_handle_lost_packet(ctl, packet_out); 995 break; 996 case EXFI_HSK: 997 n_resubmitted = 0; 998 for (packet_out = TAILQ_FIRST(&ctl->sc_unacked_packets); packet_out; 999 packet_out = next) 1000 { 1001 next = TAILQ_NEXT(packet_out, po_next); 1002 if (packet_out->po_flags & PO_HELLO) 1003 n_resubmitted += send_ctl_handle_lost_packet(ctl, packet_out); 1004 } 1005 break; 1006 case EXFI_LAST: 1007 packet_out = send_ctl_last_unacked_retx_packet(ctl); 1008 if (packet_out) 1009 n_resubmitted = send_ctl_handle_lost_packet(ctl, packet_out); 1010 else 1011 n_resubmitted = 0; 1012 break; 1013#ifdef WIN32 1014 default: 1015 n_resubmitted = 0; 1016#endif 1017 } 1018 1019 LSQ_DEBUG("consider %s packets lost: %d resubmitted", 1020 filter_type2str[filter], n_resubmitted); 1021} 1022 1023 1024void 1025lsquic_send_ctl_expire_all (lsquic_send_ctl_t *ctl) 1026{ 1027 lsquic_alarmset_unset(ctl->sc_alset, AL_RETX); 1028 send_ctl_expire(ctl, EXFI_ALL); 1029 lsquic_send_ctl_sanity_check(ctl); 1030} 1031 1032 1033#if LSQUIC_EXTRA_CHECKS 1034void 1035lsquic_send_ctl_sanity_check (const lsquic_send_ctl_t *ctl) 1036{ 1037 const struct lsquic_packet_out *packet_out; 1038 unsigned count, bytes; 1039 1040 assert(!send_ctl_first_unacked_retx_packet(ctl) || 1041 lsquic_alarmset_is_set(ctl->sc_alset, AL_RETX)); 1042 if (lsquic_alarmset_is_set(ctl->sc_alset, AL_RETX)) 1043 { 1044 assert(send_ctl_first_unacked_retx_packet(ctl)); 1045 assert(lsquic_time_now() < ctl->sc_alset->as_expiry[AL_RETX] + MAX_RTO_DELAY); 1046 } 1047 1048 count = 0, bytes = 0; 1049 TAILQ_FOREACH(packet_out, &ctl->sc_unacked_packets, po_next) 1050 { 1051 bytes += packet_out_sent_sz(packet_out); 1052 ++count; 1053 } 1054 assert(count == ctl->sc_n_in_flight_all); 1055 assert(bytes == ctl->sc_bytes_unacked_all); 1056 1057 count = 0, bytes = 0; 1058 TAILQ_FOREACH(packet_out, &ctl->sc_scheduled_packets, po_next) 1059 { 1060 assert(packet_out->po_flags & PO_SCHED); 1061 bytes += packet_out_total_sz(packet_out); 1062 ++count; 1063 } 1064 assert(count == ctl->sc_n_scheduled); 1065 assert(bytes == ctl->sc_bytes_scheduled); 1066} 1067 1068 1069#endif 1070 1071 1072void 1073lsquic_send_ctl_scheduled_one (lsquic_send_ctl_t *ctl, 1074 lsquic_packet_out_t *packet_out) 1075{ 1076#ifndef NDEBUG 1077 const lsquic_packet_out_t *last; 1078 last = TAILQ_LAST(&ctl->sc_scheduled_packets, lsquic_packets_tailq); 1079 if (last) 1080 assert((last->po_flags & PO_REPACKNO) || 1081 last->po_packno < packet_out->po_packno); 1082#endif 1083 if (ctl->sc_flags & SC_PACE) 1084 { 1085 unsigned n_out = ctl->sc_n_in_flight_retx + ctl->sc_n_scheduled; 1086 pacer_packet_scheduled(&ctl->sc_pacer, n_out, 1087 send_ctl_in_recovery(ctl), send_ctl_transfer_time, ctl); 1088 } 1089 send_ctl_sched_append(ctl, packet_out); 1090} 1091 1092 1093/* This mimics the logic in lsquic_send_ctl_next_packet_to_send(): we want 1094 * to check whether the first scheduled packet cannot be sent. 1095 */ 1096int 1097lsquic_send_ctl_sched_is_blocked (const struct lsquic_send_ctl *ctl) 1098{ 1099 const lsquic_packet_out_t *packet_out 1100 = TAILQ_FIRST(&ctl->sc_scheduled_packets); 1101 return ctl->sc_n_consec_rtos 1102 && 0 == ctl->sc_next_limit 1103 && packet_out 1104 && !(packet_out->po_frame_types & (1 << QUIC_FRAME_ACK)); 1105} 1106 1107 1108lsquic_packet_out_t * 1109lsquic_send_ctl_next_packet_to_send (lsquic_send_ctl_t *ctl) 1110{ 1111 lsquic_packet_out_t *packet_out; 1112 int dec_limit; 1113 1114 get_packet: 1115 packet_out = TAILQ_FIRST(&ctl->sc_scheduled_packets); 1116 if (!packet_out) 1117 return NULL; 1118 1119 if (ctl->sc_n_consec_rtos && 1120 !(packet_out->po_frame_types & (1 << QUIC_FRAME_ACK))) 1121 { 1122 if (ctl->sc_next_limit) 1123 dec_limit = 1; 1124 else 1125 return NULL; 1126 } 1127 else 1128 dec_limit = 0; 1129 1130 send_ctl_sched_remove(ctl, packet_out); 1131 if (packet_out->po_flags & PO_REPACKNO) 1132 { 1133 if (packet_out->po_regen_sz < packet_out->po_data_sz) 1134 { 1135 update_for_resending(ctl, packet_out); 1136 packet_out->po_flags &= ~PO_REPACKNO; 1137 } 1138 else 1139 { 1140 LSQ_DEBUG("Dropping packet %"PRIu64" from scheduled queue", 1141 packet_out->po_packno); 1142 send_ctl_destroy_packet(ctl, packet_out); 1143 goto get_packet; 1144 } 1145 } 1146 1147 ctl->sc_bytes_out += packet_out_total_sz(packet_out); 1148 if (dec_limit) 1149 { 1150 --ctl->sc_next_limit; 1151 packet_out->po_flags |= PO_LIMITED; 1152 } 1153 else 1154 packet_out->po_flags &= ~PO_LIMITED; 1155 return packet_out; 1156} 1157 1158 1159void 1160lsquic_send_ctl_delayed_one (lsquic_send_ctl_t *ctl, 1161 lsquic_packet_out_t *packet_out) 1162{ 1163 send_ctl_sched_prepend(ctl, packet_out); 1164 ctl->sc_bytes_out -= packet_out_total_sz(packet_out); 1165 if (packet_out->po_flags & PO_LIMITED) 1166 ++ctl->sc_next_limit; 1167 LSQ_DEBUG("packet %"PRIu64" has been delayed", packet_out->po_packno); 1168#if LSQUIC_SEND_STATS 1169 ++ctl->sc_stats.n_delayed; 1170#endif 1171} 1172 1173 1174int 1175lsquic_send_ctl_have_outgoing_stream_frames (const lsquic_send_ctl_t *ctl) 1176{ 1177 const lsquic_packet_out_t *packet_out; 1178 TAILQ_FOREACH(packet_out, &ctl->sc_scheduled_packets, po_next) 1179 if (packet_out->po_frame_types & 1180 ((1 << QUIC_FRAME_STREAM) | (1 << QUIC_FRAME_RST_STREAM))) 1181 return 1; 1182 return 0; 1183} 1184 1185 1186int 1187lsquic_send_ctl_have_outgoing_retx_frames (const lsquic_send_ctl_t *ctl) 1188{ 1189 const lsquic_packet_out_t *packet_out; 1190 TAILQ_FOREACH(packet_out, &ctl->sc_scheduled_packets, po_next) 1191 if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK) 1192 return 1; 1193 return 0; 1194} 1195 1196 1197static lsquic_packet_out_t * 1198send_ctl_allocate_packet (lsquic_send_ctl_t *ctl, enum lsquic_packno_bits bits, 1199 unsigned need_at_least) 1200{ 1201 lsquic_packet_out_t *packet_out; 1202 1203 packet_out = lsquic_packet_out_new(&ctl->sc_enpub->enp_mm, 1204 ctl->sc_conn_pub->packet_out_malo, 1205 !(ctl->sc_flags & SC_TCID0), ctl->sc_conn_pub->lconn, bits, 1206 ctl->sc_ver_neg->vn_tag, NULL); 1207 if (!packet_out) 1208 return NULL; 1209 1210 if (need_at_least && lsquic_packet_out_avail(packet_out) < need_at_least) 1211 { /* This should never happen, this is why this check is performed at 1212 * this level and not lower, before the packet is actually allocated. 1213 */ 1214 LSQ_ERROR("wanted to allocate packet with at least %u bytes of " 1215 "payload, but only got %u bytes (mtu: %u bytes)", need_at_least, 1216 lsquic_packet_out_avail(packet_out), ctl->sc_pack_size); 1217 send_ctl_destroy_packet(ctl, packet_out); 1218 return NULL; 1219 } 1220 1221 return packet_out; 1222} 1223 1224 1225lsquic_packet_out_t * 1226lsquic_send_ctl_new_packet_out (lsquic_send_ctl_t *ctl, unsigned need_at_least) 1227{ 1228 lsquic_packet_out_t *packet_out; 1229 enum lsquic_packno_bits bits; 1230 1231 bits = lsquic_send_ctl_packno_bits(ctl); 1232 packet_out = send_ctl_allocate_packet(ctl, bits, need_at_least); 1233 if (!packet_out) 1234 return NULL; 1235 1236 packet_out->po_packno = send_ctl_next_packno(ctl); 1237 LSQ_DEBUG("created packet %"PRIu64, packet_out->po_packno); 1238 EV_LOG_PACKET_CREATED(LSQUIC_LOG_CONN_ID, packet_out); 1239 return packet_out; 1240} 1241 1242 1243lsquic_packet_out_t * 1244lsquic_send_ctl_get_writeable_packet (lsquic_send_ctl_t *ctl, 1245 unsigned need_at_least, int *is_err) 1246{ 1247 lsquic_packet_out_t *packet_out; 1248 1249 assert(need_at_least > 0); 1250 1251 packet_out = lsquic_send_ctl_last_scheduled(ctl); 1252 if (packet_out 1253 && !(packet_out->po_flags & PO_STREAM_END) 1254 && lsquic_packet_out_avail(packet_out) >= need_at_least) 1255 { 1256 return packet_out; 1257 } 1258 1259 if (!lsquic_send_ctl_can_send(ctl)) 1260 { 1261 if (is_err) 1262 *is_err = 0; 1263 return NULL; 1264 } 1265 1266 packet_out = lsquic_send_ctl_new_packet_out(ctl, need_at_least); 1267 if (packet_out) 1268 lsquic_send_ctl_scheduled_one(ctl, packet_out); 1269 else if (is_err) 1270 *is_err = 1; 1271 return packet_out; 1272} 1273 1274 1275static void 1276update_for_resending (lsquic_send_ctl_t *ctl, lsquic_packet_out_t *packet_out) 1277{ 1278 1279 lsquic_packno_t oldno, packno; 1280 1281 /* When the packet is resent, it uses the same number of bytes to encode 1282 * the packet number as the original packet. This follows the reference 1283 * implementation. 1284 */ 1285 oldno = packet_out->po_packno; 1286 packno = send_ctl_next_packno(ctl); 1287 1288 packet_out->po_flags &= ~PO_SENT_SZ; 1289 packet_out->po_frame_types &= ~QFRAME_REGEN_MASK; 1290 assert(packet_out->po_frame_types); 1291 packet_out->po_packno = packno; 1292 1293 if (ctl->sc_ver_neg->vn_tag) 1294 { 1295 assert(packet_out->po_flags & PO_VERSION); /* It can only disappear */ 1296 packet_out->po_ver_tag = *ctl->sc_ver_neg->vn_tag; 1297 } 1298 1299 assert(packet_out->po_regen_sz < packet_out->po_data_sz); 1300 if (packet_out->po_regen_sz) 1301 { 1302 if (packet_out->po_flags & PO_SCHED) 1303 ctl->sc_bytes_scheduled -= packet_out->po_regen_sz; 1304 lsquic_packet_out_chop_regen(packet_out); 1305 } 1306 LSQ_DEBUG("Packet %"PRIu64" repackaged for resending as packet %"PRIu64, 1307 oldno, packno); 1308 EV_LOG_CONN_EVENT(LSQUIC_LOG_CONN_ID, "packet %"PRIu64" repackaged for " 1309 "resending as packet %"PRIu64, oldno, packno); 1310} 1311 1312 1313unsigned 1314lsquic_send_ctl_reschedule_packets (lsquic_send_ctl_t *ctl) 1315{ 1316 lsquic_packet_out_t *packet_out; 1317 unsigned n = 0; 1318 1319 while ((packet_out = send_ctl_next_lost(ctl))) 1320 { 1321 assert(packet_out->po_regen_sz < packet_out->po_data_sz); 1322 ++n; 1323#if LSQUIC_CONN_STATS 1324 ++ctl->sc_conn_pub->conn_stats->out.retx_packets; 1325#endif 1326 update_for_resending(ctl, packet_out); 1327 lsquic_send_ctl_scheduled_one(ctl, packet_out); 1328 } 1329 1330 if (n) 1331 LSQ_DEBUG("rescheduled %u packets", n); 1332 1333 return n; 1334} 1335 1336 1337void 1338lsquic_send_ctl_set_tcid0 (lsquic_send_ctl_t *ctl, int tcid0) 1339{ 1340 if (tcid0) 1341 { 1342 LSQ_INFO("set TCID flag"); 1343 ctl->sc_flags |= SC_TCID0; 1344 } 1345 else 1346 { 1347 LSQ_INFO("unset TCID flag"); 1348 ctl->sc_flags &= ~SC_TCID0; 1349 } 1350} 1351 1352 1353/* The controller elides this STREAM frames of stream `stream_id' from 1354 * scheduled and buffered packets. If a packet becomes empty as a result, 1355 * it is dropped. 1356 * 1357 * Packets on other queues do not need to be processed: unacked packets 1358 * have already been sent, and lost packets' reset stream frames will be 1359 * elided in due time. 1360 */ 1361void 1362lsquic_send_ctl_elide_stream_frames (lsquic_send_ctl_t *ctl, uint32_t stream_id) 1363{ 1364 struct lsquic_packet_out *packet_out, *next; 1365 unsigned n, adj; 1366 int dropped; 1367 1368 dropped = 0; 1369#ifdef WIN32 1370 next = NULL; 1371#endif 1372 for (packet_out = TAILQ_FIRST(&ctl->sc_scheduled_packets); packet_out; 1373 packet_out = next) 1374 { 1375 next = TAILQ_NEXT(packet_out, po_next); 1376 1377 if ((packet_out->po_frame_types & (1 << QUIC_FRAME_STREAM)) 1378 ) 1379 { 1380 adj = lsquic_packet_out_elide_reset_stream_frames(packet_out, 1381 stream_id); 1382 ctl->sc_bytes_scheduled -= adj; 1383 if (0 == packet_out->po_frame_types) 1384 { 1385 LSQ_DEBUG("cancel packet %"PRIu64" after eliding frames for " 1386 "stream %"PRIu32, packet_out->po_packno, stream_id); 1387 send_ctl_sched_remove(ctl, packet_out); 1388 send_ctl_destroy_packet(ctl, packet_out); 1389 ++dropped; 1390 } 1391 } 1392 } 1393 1394 if (dropped) 1395 lsquic_send_ctl_reset_packnos(ctl); 1396 1397 for (n = 0; n < sizeof(ctl->sc_buffered_packets) / 1398 sizeof(ctl->sc_buffered_packets[0]); ++n) 1399 { 1400 for (packet_out = TAILQ_FIRST(&ctl->sc_buffered_packets[n].bpq_packets); 1401 packet_out; packet_out = next) 1402 { 1403 next = TAILQ_NEXT(packet_out, po_next); 1404 if (packet_out->po_frame_types & (1 << QUIC_FRAME_STREAM)) 1405 { 1406 lsquic_packet_out_elide_reset_stream_frames(packet_out, stream_id); 1407 if (0 == packet_out->po_frame_types) 1408 { 1409 LSQ_DEBUG("cancel buffered packet in queue #%u after eliding " 1410 "frames for stream %"PRIu32, n, stream_id); 1411 TAILQ_REMOVE(&ctl->sc_buffered_packets[n].bpq_packets, 1412 packet_out, po_next); 1413 --ctl->sc_buffered_packets[n].bpq_count; 1414 send_ctl_destroy_packet(ctl, packet_out); 1415 LSQ_DEBUG("Elide packet from buffered queue #%u; count: %u", 1416 n, ctl->sc_buffered_packets[n].bpq_count); 1417 } 1418 } 1419 } 1420 } 1421} 1422 1423 1424/* Count how many packets will remain after the squeezing performed by 1425 * lsquic_send_ctl_squeeze_sched(). This is the number of delayed data 1426 * packets. 1427 */ 1428#ifndef NDEBUG 1429#if __GNUC__ 1430__attribute__((weak)) 1431#endif 1432#endif 1433int 1434lsquic_send_ctl_have_delayed_packets (const lsquic_send_ctl_t *ctl) 1435{ 1436 const struct lsquic_packet_out *packet_out; 1437 TAILQ_FOREACH(packet_out, &ctl->sc_scheduled_packets, po_next) 1438 if (packet_out->po_regen_sz < packet_out->po_data_sz) 1439 return 1; 1440 return 0; 1441} 1442 1443 1444#ifndef NDEBUG 1445static void 1446send_ctl_log_packet_q (const lsquic_send_ctl_t *ctl, const char *prefix, 1447 const struct lsquic_packets_tailq *tailq) 1448{ 1449 const lsquic_packet_out_t *packet_out; 1450 unsigned n_packets; 1451 char *buf; 1452 size_t bufsz; 1453 int off; 1454 1455 n_packets = 0; 1456 TAILQ_FOREACH(packet_out, tailq, po_next) 1457 ++n_packets; 1458 1459 if (n_packets == 0) 1460 { 1461 LSQ_DEBUG("%s: [<empty set>]", prefix); 1462 return; 1463 } 1464 1465 bufsz = n_packets * sizeof("18446744073709551615" /* UINT64_MAX */); 1466 buf = malloc(bufsz); 1467 if (!buf) 1468 { 1469 LSQ_ERROR("%s: malloc: %s", __func__, strerror(errno)); 1470 return; 1471 } 1472 1473 off = 0; 1474 TAILQ_FOREACH(packet_out, tailq, po_next) 1475 { 1476 if (off) 1477 buf[off++] = ' '; 1478 off += sprintf(buf + off, "%"PRIu64, packet_out->po_packno); 1479 } 1480 1481 LSQ_DEBUG("%s: [%s]", prefix, buf); 1482 free(buf); 1483} 1484 1485 1486#define LOG_PACKET_Q(prefix, queue) do { \ 1487 if (LSQ_LOG_ENABLED(LSQ_LOG_DEBUG)) \ 1488 send_ctl_log_packet_q(ctl, queue, prefix); \ 1489} while (0) 1490#else 1491#define LOG_PACKET_Q(p, q) 1492#endif 1493 1494 1495int 1496lsquic_send_ctl_squeeze_sched (lsquic_send_ctl_t *ctl) 1497{ 1498 struct lsquic_packet_out *packet_out, *next; 1499 int dropped; 1500#ifndef NDEBUG 1501 int pre_squeeze_logged = 0; 1502#endif 1503 1504 dropped = 0; 1505 for (packet_out = TAILQ_FIRST(&ctl->sc_scheduled_packets); packet_out; 1506 packet_out = next) 1507 { 1508 next = TAILQ_NEXT(packet_out, po_next); 1509 if (packet_out->po_regen_sz < packet_out->po_data_sz) 1510 { 1511 if (packet_out->po_flags & PO_ENCRYPTED) 1512 send_ctl_release_enc_data(ctl, packet_out); 1513 } 1514 else 1515 { 1516#ifndef NDEBUG 1517 /* Log the whole list before we squeeze for the first time */ 1518 if (!pre_squeeze_logged++) 1519 LOG_PACKET_Q(&ctl->sc_scheduled_packets, 1520 "unacked packets before squeezing"); 1521#endif 1522 send_ctl_sched_remove(ctl, packet_out); 1523 LSQ_DEBUG("Dropping packet %"PRIu64" from scheduled queue", 1524 packet_out->po_packno); 1525 send_ctl_destroy_packet(ctl, packet_out); 1526 ++dropped; 1527 } 1528 } 1529 1530 if (dropped) 1531 lsquic_send_ctl_reset_packnos(ctl); 1532 1533#ifndef NDEBUG 1534 if (pre_squeeze_logged) 1535 LOG_PACKET_Q(&ctl->sc_scheduled_packets, 1536 "unacked packets after squeezing"); 1537 else if (ctl->sc_n_scheduled > 0) 1538 LOG_PACKET_Q(&ctl->sc_scheduled_packets, "delayed packets"); 1539#endif 1540 1541 return ctl->sc_n_scheduled > 0; 1542} 1543 1544 1545void 1546lsquic_send_ctl_reset_packnos (lsquic_send_ctl_t *ctl) 1547{ 1548 struct lsquic_packet_out *packet_out; 1549 1550 ctl->sc_cur_packno = lsquic_senhist_largest(&ctl->sc_senhist); 1551 TAILQ_FOREACH(packet_out, &ctl->sc_scheduled_packets, po_next) 1552 packet_out->po_flags |= PO_REPACKNO; 1553} 1554 1555 1556void 1557lsquic_send_ctl_ack_to_front (lsquic_send_ctl_t *ctl) 1558{ 1559 struct lsquic_packet_out *ack_packet; 1560 1561 assert(ctl->sc_n_scheduled > 1); /* Otherwise, why is this called? */ 1562 ack_packet = TAILQ_LAST(&ctl->sc_scheduled_packets, lsquic_packets_tailq); 1563 assert(ack_packet->po_frame_types & (1 << QUIC_FRAME_ACK)); 1564 TAILQ_REMOVE(&ctl->sc_scheduled_packets, ack_packet, po_next); 1565 TAILQ_INSERT_HEAD(&ctl->sc_scheduled_packets, ack_packet, po_next); 1566} 1567 1568 1569void 1570lsquic_send_ctl_drop_scheduled (lsquic_send_ctl_t *ctl) 1571{ 1572 lsquic_packet_out_t *packet_out; 1573 const unsigned n = ctl->sc_n_scheduled; 1574 while ((packet_out = TAILQ_FIRST(&ctl->sc_scheduled_packets))) 1575 { 1576 send_ctl_sched_remove(ctl, packet_out); 1577 send_ctl_destroy_packet(ctl, packet_out); 1578 } 1579 assert(0 == ctl->sc_n_scheduled); 1580 ctl->sc_cur_packno = lsquic_senhist_largest(&ctl->sc_senhist); 1581 LSQ_DEBUG("dropped %u scheduled packet%s", n, n != 0 ? "s" : ""); 1582} 1583 1584 1585#ifdef NDEBUG 1586static 1587#elif __GNUC__ 1588__attribute__((weak)) 1589#endif 1590enum buf_packet_type 1591lsquic_send_ctl_determine_bpt (lsquic_send_ctl_t *ctl, 1592 const lsquic_stream_t *stream) 1593{ 1594 const lsquic_stream_t *other_stream; 1595 struct lsquic_hash_elem *el; 1596 struct lsquic_hash *all_streams; 1597 1598 all_streams = ctl->sc_conn_pub->all_streams; 1599 for (el = lsquic_hash_first(all_streams); el; 1600 el = lsquic_hash_next(all_streams)) 1601 { 1602 other_stream = lsquic_hashelem_getdata(el); 1603 if (other_stream != stream 1604 && (!(other_stream->stream_flags & STREAM_U_WRITE_DONE)) 1605 && !lsquic_stream_is_critical(other_stream) 1606 && other_stream->sm_priority < stream->sm_priority) 1607 return BPT_OTHER_PRIO; 1608 } 1609 return BPT_HIGHEST_PRIO; 1610} 1611 1612 1613static enum buf_packet_type 1614send_ctl_lookup_bpt (lsquic_send_ctl_t *ctl, 1615 const struct lsquic_stream *stream) 1616{ 1617 if (ctl->sc_cached_bpt.stream_id != stream->id) 1618 { 1619 ctl->sc_cached_bpt.stream_id = stream->id; 1620 ctl->sc_cached_bpt.packet_type = 1621 lsquic_send_ctl_determine_bpt(ctl, stream); 1622 } 1623 return ctl->sc_cached_bpt.packet_type; 1624} 1625 1626 1627static unsigned 1628send_ctl_max_bpq_count (const lsquic_send_ctl_t *ctl, 1629 enum buf_packet_type packet_type) 1630{ 1631 unsigned count; 1632 1633 switch (packet_type) 1634 { 1635 case BPT_OTHER_PRIO: 1636 return MAX_BPQ_COUNT; 1637 case BPT_HIGHEST_PRIO: 1638 default: /* clang does not complain about absence of `default'... */ 1639 count = ctl->sc_n_scheduled + ctl->sc_n_in_flight_retx; 1640 if (count < lsquic_cubic_get_cwnd(&ctl->sc_cubic) / ctl->sc_pack_size) 1641 { 1642 count -= lsquic_cubic_get_cwnd(&ctl->sc_cubic) / ctl->sc_pack_size; 1643 if (count > MAX_BPQ_COUNT) 1644 return count; 1645 } 1646 return MAX_BPQ_COUNT; 1647 } 1648} 1649 1650 1651static void 1652send_ctl_move_ack (struct lsquic_send_ctl *ctl, struct lsquic_packet_out *dst, 1653 struct lsquic_packet_out *src) 1654{ 1655 assert(dst->po_data_sz == 0); 1656 1657 if (lsquic_packet_out_avail(dst) >= src->po_regen_sz) 1658 { 1659 memcpy(dst->po_data, src->po_data, src->po_regen_sz); 1660 dst->po_data_sz = src->po_regen_sz; 1661 dst->po_regen_sz = src->po_regen_sz; 1662 dst->po_frame_types |= (QFRAME_REGEN_MASK & src->po_frame_types); 1663 src->po_frame_types &= ~QFRAME_REGEN_MASK; 1664 lsquic_packet_out_chop_regen(src); 1665 } 1666} 1667 1668 1669static lsquic_packet_out_t * 1670send_ctl_get_buffered_packet (lsquic_send_ctl_t *ctl, 1671 enum buf_packet_type packet_type, unsigned need_at_least, 1672 const struct lsquic_stream *stream) 1673{ 1674 struct buf_packet_q *const packet_q = 1675 &ctl->sc_buffered_packets[packet_type]; 1676 struct lsquic_conn *const lconn = ctl->sc_conn_pub->lconn; 1677 lsquic_packet_out_t *packet_out; 1678 enum lsquic_packno_bits bits; 1679 enum { AA_STEAL, AA_GENERATE, AA_NONE, } ack_action; 1680 1681 packet_out = TAILQ_LAST(&packet_q->bpq_packets, lsquic_packets_tailq); 1682 if (packet_out 1683 && !(packet_out->po_flags & PO_STREAM_END) 1684 && lsquic_packet_out_avail(packet_out) >= need_at_least) 1685 { 1686 return packet_out; 1687 } 1688 1689 if (packet_q->bpq_count >= send_ctl_max_bpq_count(ctl, packet_type)) 1690 return NULL; 1691 1692 if (packet_q->bpq_count == 0) 1693 { 1694 /* If ACK was written to the low-priority queue first, steal it */ 1695 if (packet_q == &ctl->sc_buffered_packets[BPT_HIGHEST_PRIO] 1696 && !TAILQ_EMPTY(&ctl->sc_buffered_packets[BPT_OTHER_PRIO].bpq_packets) 1697 && (TAILQ_FIRST(&ctl->sc_buffered_packets[BPT_OTHER_PRIO].bpq_packets) 1698 ->po_frame_types & QUIC_FTBIT_ACK)) 1699 { 1700 LSQ_DEBUG("steal ACK frame from low-priority buffered queue"); 1701 ack_action = AA_STEAL; 1702 bits = ctl->sc_max_packno_bits; 1703 } 1704 /* If ACK can be generated, write it to the first buffered packet. */ 1705 else if (lconn->cn_if->ci_can_write_ack(lconn)) 1706 { 1707 LSQ_DEBUG("generate ACK frame for first buffered packet in " 1708 "queue #%u", packet_type); 1709 ack_action = AA_GENERATE; 1710 /* Packet length is set to the largest possible size to guarantee 1711 * that buffered packet with the ACK will not need to be split. 1712 */ 1713 bits = ctl->sc_max_packno_bits; 1714 } 1715 else 1716 goto no_ack_action; 1717 } 1718 else 1719 { 1720 no_ack_action: 1721 ack_action = AA_NONE; 1722 bits = lsquic_send_ctl_guess_packno_bits(ctl); 1723 } 1724 1725 packet_out = send_ctl_allocate_packet(ctl, bits, need_at_least); 1726 if (!packet_out) 1727 return NULL; 1728 1729 switch (ack_action) 1730 { 1731 case AA_STEAL: 1732 send_ctl_move_ack(ctl, packet_out, 1733 TAILQ_FIRST(&ctl->sc_buffered_packets[BPT_OTHER_PRIO].bpq_packets)); 1734 break; 1735 case AA_GENERATE: 1736 lconn->cn_if->ci_write_ack(lconn, packet_out); 1737 break; 1738 case AA_NONE: 1739 break; 1740 } 1741 1742 TAILQ_INSERT_TAIL(&packet_q->bpq_packets, packet_out, po_next); 1743 ++packet_q->bpq_count; 1744 LSQ_DEBUG("Add new packet to buffered queue #%u; count: %u", 1745 packet_type, packet_q->bpq_count); 1746 return packet_out; 1747} 1748 1749 1750lsquic_packet_out_t * 1751lsquic_send_ctl_get_packet_for_stream (lsquic_send_ctl_t *ctl, 1752 unsigned need_at_least, const struct lsquic_stream *stream) 1753{ 1754 enum buf_packet_type packet_type; 1755 1756 if (lsquic_send_ctl_schedule_stream_packets_immediately(ctl)) 1757 return lsquic_send_ctl_get_writeable_packet(ctl, need_at_least, NULL); 1758 else 1759 { 1760 packet_type = send_ctl_lookup_bpt(ctl, stream); 1761 return send_ctl_get_buffered_packet(ctl, packet_type, need_at_least, 1762 stream); 1763 } 1764} 1765 1766 1767int 1768lsquic_send_ctl_buffered_and_same_prio_as_headers (struct lsquic_send_ctl *ctl, 1769 const struct lsquic_stream *stream) 1770{ 1771 return !lsquic_send_ctl_schedule_stream_packets_immediately(ctl) 1772 && BPT_HIGHEST_PRIO == send_ctl_lookup_bpt(ctl, stream); 1773} 1774 1775 1776#ifdef NDEBUG 1777static 1778#elif __GNUC__ 1779__attribute__((weak)) 1780#endif 1781enum lsquic_packno_bits 1782lsquic_send_ctl_calc_packno_bits (lsquic_send_ctl_t *ctl) 1783{ 1784 lsquic_packno_t smallest_unacked; 1785 enum lsquic_packno_bits bits; 1786 unsigned n_in_flight; 1787 1788 smallest_unacked = lsquic_send_ctl_smallest_unacked(ctl); 1789 n_in_flight = lsquic_cubic_get_cwnd(&ctl->sc_cubic) / ctl->sc_pack_size; 1790 bits = calc_packno_bits(ctl->sc_cur_packno + 1, smallest_unacked, 1791 n_in_flight); 1792 if (bits <= ctl->sc_max_packno_bits) 1793 return bits; 1794 else 1795 return ctl->sc_max_packno_bits; 1796} 1797 1798 1799enum lsquic_packno_bits 1800lsquic_send_ctl_packno_bits (lsquic_send_ctl_t *ctl) 1801{ 1802 1803 if (lsquic_send_ctl_schedule_stream_packets_immediately(ctl)) 1804 return lsquic_send_ctl_calc_packno_bits(ctl); 1805 else 1806 return lsquic_send_ctl_guess_packno_bits(ctl); 1807} 1808 1809 1810static int 1811split_buffered_packet (lsquic_send_ctl_t *ctl, 1812 enum buf_packet_type packet_type, lsquic_packet_out_t *packet_out, 1813 enum lsquic_packno_bits bits, unsigned excess_bytes) 1814{ 1815 struct buf_packet_q *const packet_q = 1816 &ctl->sc_buffered_packets[packet_type]; 1817 lsquic_packet_out_t *new_packet_out; 1818 1819 assert(TAILQ_FIRST(&packet_q->bpq_packets) == packet_out); 1820 1821 new_packet_out = send_ctl_allocate_packet(ctl, bits, 0); 1822 if (!packet_out) 1823 return -1; 1824 1825 if (0 == lsquic_packet_out_split_in_two(&ctl->sc_enpub->enp_mm, packet_out, 1826 new_packet_out, ctl->sc_conn_pub->lconn->cn_pf, excess_bytes)) 1827 { 1828 lsquic_packet_out_set_packno_bits(packet_out, bits); 1829 TAILQ_INSERT_AFTER(&packet_q->bpq_packets, packet_out, new_packet_out, 1830 po_next); 1831 ++packet_q->bpq_count; 1832 LSQ_DEBUG("Add split packet to buffered queue #%u; count: %u", 1833 packet_type, packet_q->bpq_count); 1834 return 0; 1835 } 1836 else 1837 { 1838 send_ctl_destroy_packet(ctl, packet_out); 1839 return -1; 1840 } 1841} 1842 1843 1844int 1845lsquic_send_ctl_schedule_buffered (lsquic_send_ctl_t *ctl, 1846 enum buf_packet_type packet_type) 1847{ 1848 struct buf_packet_q *const packet_q = 1849 &ctl->sc_buffered_packets[packet_type]; 1850 lsquic_packet_out_t *packet_out; 1851 unsigned used, excess; 1852 1853 assert(lsquic_send_ctl_schedule_stream_packets_immediately(ctl)); 1854 const enum lsquic_packno_bits bits = lsquic_send_ctl_calc_packno_bits(ctl); 1855 const unsigned need = packno_bits2len(bits); 1856 1857 while ((packet_out = TAILQ_FIRST(&packet_q->bpq_packets)) && 1858 lsquic_send_ctl_can_send(ctl)) 1859 { 1860 if (bits != lsquic_packet_out_packno_bits(packet_out)) 1861 { 1862 used = packno_bits2len(lsquic_packet_out_packno_bits(packet_out)); 1863 if (need > used 1864 && need - used > lsquic_packet_out_avail(packet_out)) 1865 { 1866 excess = need - used - lsquic_packet_out_avail(packet_out); 1867 if (0 != split_buffered_packet(ctl, packet_type, 1868 packet_out, bits, excess)) 1869 { 1870 return -1; 1871 } 1872 } 1873 } 1874 TAILQ_REMOVE(&packet_q->bpq_packets, packet_out, po_next); 1875 --packet_q->bpq_count; 1876 packet_out->po_packno = send_ctl_next_packno(ctl); 1877 LSQ_DEBUG("Remove packet from buffered queue #%u; count: %u. " 1878 "It becomes packet %"PRIu64, packet_type, packet_q->bpq_count, 1879 packet_out->po_packno); 1880 lsquic_send_ctl_scheduled_one(ctl, packet_out); 1881 } 1882 1883 return 0; 1884} 1885 1886 1887int 1888lsquic_send_ctl_turn_on_fin (struct lsquic_send_ctl *ctl, 1889 const struct lsquic_stream *stream) 1890{ 1891 enum buf_packet_type packet_type; 1892 struct buf_packet_q *packet_q; 1893 lsquic_packet_out_t *packet_out; 1894 const struct parse_funcs *pf; 1895 1896 pf = ctl->sc_conn_pub->lconn->cn_pf; 1897 packet_type = send_ctl_lookup_bpt(ctl, stream); 1898 packet_q = &ctl->sc_buffered_packets[packet_type]; 1899 1900 TAILQ_FOREACH_REVERSE(packet_out, &packet_q->bpq_packets, 1901 lsquic_packets_tailq, po_next) 1902 if (0 == lsquic_packet_out_turn_on_fin(packet_out, pf, stream)) 1903 return 0; 1904 1905 TAILQ_FOREACH(packet_out, &ctl->sc_scheduled_packets, po_next) 1906 if (0 == packet_out->po_sent 1907 && 0 == lsquic_packet_out_turn_on_fin(packet_out, pf, stream)) 1908 { 1909 return 0; 1910 } 1911 1912 return -1; 1913} 1914 1915 1916size_t 1917lsquic_send_ctl_mem_used (const struct lsquic_send_ctl *ctl) 1918{ 1919 const lsquic_packet_out_t *packet_out; 1920 unsigned n; 1921 size_t size; 1922 const struct lsquic_packets_tailq queues[] = { 1923 ctl->sc_scheduled_packets, 1924 ctl->sc_unacked_packets, 1925 ctl->sc_lost_packets, 1926 ctl->sc_buffered_packets[0].bpq_packets, 1927 ctl->sc_buffered_packets[1].bpq_packets, 1928 }; 1929 1930 size = sizeof(*ctl); 1931 1932 for (n = 0; n < sizeof(queues) / sizeof(queues[0]); ++n) 1933 TAILQ_FOREACH(packet_out, &queues[n], po_next) 1934 size += lsquic_packet_out_mem_used(packet_out); 1935 1936 return size; 1937} 1938 1939 1940void 1941lsquic_send_ctl_verneg_done (struct lsquic_send_ctl *ctl) 1942{ 1943 if ((1 << ctl->sc_conn_pub->lconn->cn_version) & 1944 LSQUIC_GQUIC_HEADER_VERSIONS) 1945 ctl->sc_max_packno_bits = PACKNO_LEN_6; 1946 else 1947 /* Assuming Q044 */ 1948 ctl->sc_max_packno_bits = PACKNO_LEN_4; 1949 LSQ_DEBUG("version negotiation done (%s): max packno bits: %u", 1950 lsquic_ver2str[ ctl->sc_conn_pub->lconn->cn_version ], 1951 ctl->sc_max_packno_bits); 1952} 1953