lsquic_send_ctl.c revision 8252b0b9
1/* Copyright (c) 2017 - 2018 LiteSpeed Technologies Inc. See LICENSE. */ 2/* 3 * lsquic_send_ctl.c -- Logic for sending and sent packets 4 */ 5 6#include <assert.h> 7#include <errno.h> 8#include <inttypes.h> 9#include <stdlib.h> 10#include <string.h> 11#include <sys/queue.h> 12 13#include "lsquic_types.h" 14#include "lsquic_int_types.h" 15#include "lsquic.h" 16#include "lsquic_mm.h" 17#include "lsquic_engine_public.h" 18#include "lsquic_alarmset.h" 19#include "lsquic_packet_common.h" 20#include "lsquic_parse.h" 21#include "lsquic_packet_out.h" 22#include "lsquic_senhist.h" 23#include "lsquic_rtt.h" 24#include "lsquic_cubic.h" 25#include "lsquic_pacer.h" 26#include "lsquic_send_ctl.h" 27#include "lsquic_util.h" 28#include "lsquic_sfcw.h" 29#include "lsquic_stream.h" 30#include "lsquic_ver_neg.h" 31#include "lsquic_ev_log.h" 32#include "lsquic_conn.h" 33#include "lsquic_conn_flow.h" 34#include "lsquic_conn_public.h" 35#include "lsquic_hash.h" 36 37#define LSQUIC_LOGGER_MODULE LSQLM_SENDCTL 38#define LSQUIC_LOG_CONN_ID ctl->sc_conn_pub->lconn->cn_cid 39#include "lsquic_logger.h" 40 41#define MAX_RESUBMITTED_ON_RTO 2 42#define MAX_RTO_BACKOFFS 10 43#define DEFAULT_RETX_DELAY 500000 /* Microseconds */ 44#define MAX_RTO_DELAY 60000000 /* Microseconds */ 45#define MIN_RTO_DELAY 1000000 /* Microseconds */ 46#define N_NACKS_BEFORE_RETX 3 47 48#define packet_out_total_sz(p) \ 49 lsquic_packet_out_total_sz(ctl->sc_conn_pub->lconn, p) 50#define packet_out_sent_sz(p) \ 51 lsquic_packet_out_sent_sz(ctl->sc_conn_pub->lconn, p) 52 53enum retx_mode { 54 RETX_MODE_HANDSHAKE, 55 RETX_MODE_LOSS, 56 RETX_MODE_TLP, 57 RETX_MODE_RTO, 58}; 59 60 61static const char *const retx2str[] = { 62 [RETX_MODE_HANDSHAKE] = "RETX_MODE_HANDSHAKE", 63 [RETX_MODE_LOSS] = "RETX_MODE_LOSS", 64 [RETX_MODE_TLP] = "RETX_MODE_TLP", 65 [RETX_MODE_RTO] = "RETX_MODE_RTO", 66}; 67 68 69static void 70update_for_resending (lsquic_send_ctl_t *ctl, lsquic_packet_out_t *packet_out); 71 72 73enum expire_filter { EXFI_ALL, EXFI_HSK, EXFI_LAST, }; 74 75 76static void 77send_ctl_expire (lsquic_send_ctl_t *, enum expire_filter); 78 79static void 80set_retx_alarm (lsquic_send_ctl_t *ctl); 81 82static void 83send_ctl_detect_losses (lsquic_send_ctl_t *ctl, lsquic_time_t time); 84 85static unsigned 86send_ctl_retx_bytes_out (const struct lsquic_send_ctl *ctl); 87 88 89#ifdef NDEBUG 90static 91#elif __GNUC__ 92__attribute__((weak)) 93#endif 94int 95lsquic_send_ctl_schedule_stream_packets_immediately (lsquic_send_ctl_t *ctl) 96{ 97 return !(ctl->sc_flags & SC_BUFFER_STREAM); 98} 99 100 101#ifdef NDEBUG 102static 103#elif __GNUC__ 104__attribute__((weak)) 105#endif 106enum lsquic_packno_bits 107lsquic_send_ctl_guess_packno_bits (lsquic_send_ctl_t *ctl) 108{ 109 return PACKNO_LEN_2; 110} 111 112 113int 114lsquic_send_ctl_have_unacked_stream_frames (const lsquic_send_ctl_t *ctl) 115{ 116 const lsquic_packet_out_t *packet_out; 117 TAILQ_FOREACH(packet_out, &ctl->sc_unacked_packets, po_next) 118 if (packet_out->po_frame_types & 119 ((1 << QUIC_FRAME_STREAM) | (1 << QUIC_FRAME_RST_STREAM))) 120 return 1; 121 return 0; 122} 123 124 125static lsquic_packet_out_t * 126send_ctl_first_unacked_retx_packet (const lsquic_send_ctl_t *ctl) 127{ 128 lsquic_packet_out_t *packet_out; 129 TAILQ_FOREACH(packet_out, &ctl->sc_unacked_packets, po_next) 130 if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK) 131 return packet_out; 132 return NULL; 133} 134 135 136static lsquic_packet_out_t * 137send_ctl_last_unacked_retx_packet (const lsquic_send_ctl_t *ctl) 138{ 139 lsquic_packet_out_t *packet_out; 140 TAILQ_FOREACH_REVERSE(packet_out, &ctl->sc_unacked_packets, 141 lsquic_packets_tailq, po_next) 142 if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK) 143 return packet_out; 144 return NULL; 145} 146 147 148static int 149have_unacked_handshake_packets (const lsquic_send_ctl_t *ctl) 150{ 151 const lsquic_packet_out_t *packet_out; 152 TAILQ_FOREACH(packet_out, &ctl->sc_unacked_packets, po_next) 153 if (packet_out->po_flags & PO_HELLO) 154 return 1; 155 return 0; 156} 157 158 159static enum retx_mode 160get_retx_mode (lsquic_send_ctl_t *ctl) 161{ 162 if (!(ctl->sc_conn_pub->lconn->cn_flags & LSCONN_HANDSHAKE_DONE) 163 && have_unacked_handshake_packets(ctl)) 164 return RETX_MODE_HANDSHAKE; 165 if (ctl->sc_loss_to) 166 return RETX_MODE_LOSS; 167 if (ctl->sc_n_tlp < 2) 168 return RETX_MODE_TLP; 169 return RETX_MODE_RTO; 170} 171 172 173static lsquic_time_t 174get_retx_delay (const struct lsquic_rtt_stats *rtt_stats) 175{ 176 lsquic_time_t srtt, delay; 177 178 srtt = lsquic_rtt_stats_get_srtt(rtt_stats); 179 if (srtt) 180 { 181 delay = srtt + 4 * lsquic_rtt_stats_get_rttvar(rtt_stats); 182 if (delay < MIN_RTO_DELAY) 183 delay = MIN_RTO_DELAY; 184 } 185 else 186 delay = DEFAULT_RETX_DELAY; 187 188 return delay; 189} 190 191 192static void 193retx_alarm_rings (void *ctx, lsquic_time_t expiry, lsquic_time_t now) 194{ 195 lsquic_send_ctl_t *ctl = ctx; 196 lsquic_packet_out_t *packet_out; 197 enum retx_mode rm; 198 199 /* This is a callback -- before it is called, the alarm is unset */ 200 assert(!lsquic_alarmset_is_set(ctl->sc_alset, AL_RETX)); 201 202 rm = get_retx_mode(ctl); 203 LSQ_INFO("retx timeout, mode %s", retx2str[rm]); 204 205 switch (rm) 206 { 207 case RETX_MODE_HANDSHAKE: 208 send_ctl_expire(ctl, EXFI_HSK); 209 /* Do not register cubic loss during handshake */ 210 break; 211 case RETX_MODE_LOSS: 212 send_ctl_detect_losses(ctl, lsquic_time_now()); 213 break; 214 case RETX_MODE_TLP: 215 ++ctl->sc_n_tlp; 216 send_ctl_expire(ctl, EXFI_LAST); 217 break; 218 case RETX_MODE_RTO: 219 ++ctl->sc_n_consec_rtos; 220 ctl->sc_next_limit = 2; 221 LSQ_DEBUG("packet RTO is %"PRIu64" usec", expiry); 222 send_ctl_expire(ctl, EXFI_ALL); 223 lsquic_cubic_timeout(&ctl->sc_cubic); 224 break; 225 } 226 227 packet_out = send_ctl_first_unacked_retx_packet(ctl); 228 if (packet_out) 229 set_retx_alarm(ctl); 230 lsquic_send_ctl_sanity_check(ctl); 231} 232 233 234void 235lsquic_send_ctl_init (lsquic_send_ctl_t *ctl, struct lsquic_alarmset *alset, 236 struct lsquic_engine_public *enpub, const struct ver_neg *ver_neg, 237 struct lsquic_conn_public *conn_pub, unsigned short pack_size) 238{ 239 unsigned i; 240 memset(ctl, 0, sizeof(*ctl)); 241 TAILQ_INIT(&ctl->sc_scheduled_packets); 242 TAILQ_INIT(&ctl->sc_unacked_packets); 243 TAILQ_INIT(&ctl->sc_lost_packets); 244 ctl->sc_enpub = enpub; 245 ctl->sc_alset = alset; 246 ctl->sc_ver_neg = ver_neg; 247 ctl->sc_pack_size = pack_size; 248 ctl->sc_conn_pub = conn_pub; 249 if (enpub->enp_settings.es_pace_packets) 250 ctl->sc_flags |= SC_PACE; 251 lsquic_alarmset_init_alarm(alset, AL_RETX, retx_alarm_rings, ctl); 252 lsquic_senhist_init(&ctl->sc_senhist); 253 lsquic_cubic_init(&ctl->sc_cubic, LSQUIC_LOG_CONN_ID); 254 if (ctl->sc_flags & SC_PACE) 255 pacer_init(&ctl->sc_pacer, LSQUIC_LOG_CONN_ID, 100000); 256 for (i = 0; i < sizeof(ctl->sc_buffered_packets) / 257 sizeof(ctl->sc_buffered_packets[0]); ++i) 258 TAILQ_INIT(&ctl->sc_buffered_packets[i].bpq_packets); 259} 260 261 262static lsquic_time_t 263calculate_packet_rto (lsquic_send_ctl_t *ctl) 264{ 265 lsquic_time_t delay; 266 267 delay = get_retx_delay(&ctl->sc_conn_pub->rtt_stats); 268 269 unsigned exp = ctl->sc_n_consec_rtos; 270 if (exp > MAX_RTO_BACKOFFS) 271 exp = MAX_RTO_BACKOFFS; 272 273 delay = delay * (1 << exp); 274 275 return delay; 276} 277 278 279static lsquic_time_t 280calculate_tlp_delay (lsquic_send_ctl_t *ctl) 281{ 282 lsquic_time_t srtt, delay; 283 284 srtt = lsquic_rtt_stats_get_srtt(&ctl->sc_conn_pub->rtt_stats); 285 if (ctl->sc_n_in_flight_all > 1) 286 { 287 delay = 10000; /* 10 ms is the minimum tail loss probe delay */ 288 if (delay < 2 * srtt) 289 delay = 2 * srtt; 290 } 291 else 292 { 293 delay = srtt + srtt / 2 + MIN_RTO_DELAY; 294 if (delay < 2 * srtt) 295 delay = 2 * srtt; 296 } 297 298 return delay; 299} 300 301 302static void 303set_retx_alarm (lsquic_send_ctl_t *ctl) 304{ 305 enum retx_mode rm; 306 lsquic_time_t delay, now; 307 308 assert(!TAILQ_EMPTY(&ctl->sc_unacked_packets)); 309 310 now = lsquic_time_now(); 311 312 rm = get_retx_mode(ctl); 313 switch (rm) 314 { 315 case RETX_MODE_HANDSHAKE: 316 /* [draft-iyengar-quic-loss-recovery-01]: 317 * 318 * if (handshake packets are outstanding): 319 * alarm_duration = max(1.5 * smoothed_rtt, 10ms) << handshake_count; 320 * handshake_count++; 321 */ 322 delay = lsquic_rtt_stats_get_srtt(&ctl->sc_conn_pub->rtt_stats); 323 if (delay) 324 { 325 delay += delay / 2; 326 if (10000 > delay) 327 delay = 10000; 328 } 329 else 330 delay = 150000; 331 delay <<= ctl->sc_n_hsk; 332 ++ctl->sc_n_hsk; 333 break; 334 case RETX_MODE_LOSS: 335 delay = ctl->sc_loss_to; 336 break; 337 case RETX_MODE_TLP: 338 delay = calculate_tlp_delay(ctl); 339 break; 340 case RETX_MODE_RTO: 341 /* Base RTO on the first unacked packet, following reference 342 * implementation. 343 */ 344 delay = calculate_packet_rto(ctl); 345 break; 346#ifdef WIN32 347 default: 348 delay = 0; 349#endif 350 } 351 352 if (delay > MAX_RTO_DELAY) 353 delay = MAX_RTO_DELAY; 354 355 LSQ_DEBUG("set retx alarm to %"PRIu64", which is %"PRIu64 356 " usec from now, mode %s", now + delay, delay, retx2str[rm]); 357 lsquic_alarmset_set(ctl->sc_alset, AL_RETX, now + delay); 358} 359 360 361static int 362send_ctl_in_recovery (lsquic_send_ctl_t *ctl) 363{ 364 return ctl->sc_largest_acked_packno 365 && ctl->sc_largest_acked_packno <= ctl->sc_largest_sent_at_cutback; 366} 367 368 369static int 370send_ctl_in_slow_start (lsquic_send_ctl_t *ctl) 371{ 372 return lsquic_cubic_in_slow_start(&ctl->sc_cubic); 373} 374 375 376static lsquic_time_t 377send_ctl_transfer_time (void *ctx) 378{ 379 lsquic_send_ctl_t *const ctl = ctx; 380 uint64_t bandwidth, pacing_rate; 381 lsquic_time_t srtt, tx_time; 382 unsigned long cwnd; 383 384 srtt = lsquic_rtt_stats_get_srtt(&ctl->sc_conn_pub->rtt_stats); 385 if (srtt == 0) 386 srtt = 50000; 387 cwnd = lsquic_cubic_get_cwnd(&ctl->sc_cubic); 388 bandwidth = cwnd * 1000000 / srtt; 389 if (send_ctl_in_slow_start(ctl)) 390 pacing_rate = bandwidth * 2; 391 else if (send_ctl_in_recovery(ctl)) 392 pacing_rate = bandwidth; 393 else 394 pacing_rate = bandwidth + bandwidth / 4; 395 396 tx_time = (uint64_t) ctl->sc_pack_size * 1000000 / pacing_rate; 397 LSQ_DEBUG("srtt: %"PRIu64"; ss: %d; rec: %d; cwnd: %lu; bandwidth: " 398 "%"PRIu64"; tx_time: %"PRIu64, srtt, send_ctl_in_slow_start(ctl), 399 send_ctl_in_recovery(ctl), cwnd, bandwidth, tx_time); 400 return tx_time; 401} 402 403 404static void 405send_ctl_unacked_append (struct lsquic_send_ctl *ctl, 406 struct lsquic_packet_out *packet_out) 407{ 408 TAILQ_INSERT_TAIL(&ctl->sc_unacked_packets, packet_out, po_next); 409 ctl->sc_bytes_unacked_all += packet_out_total_sz(packet_out); 410 ctl->sc_n_in_flight_all += 1; 411 if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK) 412 { 413 ctl->sc_bytes_unacked_retx += packet_out_total_sz(packet_out); 414 ++ctl->sc_n_in_flight_retx; 415 } 416} 417 418 419static void 420send_ctl_unacked_remove (struct lsquic_send_ctl *ctl, 421 struct lsquic_packet_out *packet_out, unsigned packet_sz) 422{ 423 TAILQ_REMOVE(&ctl->sc_unacked_packets, packet_out, po_next); 424 assert(ctl->sc_bytes_unacked_all >= packet_sz); 425 ctl->sc_bytes_unacked_all -= packet_sz; 426 ctl->sc_n_in_flight_all -= 1; 427 if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK) 428 { 429 ctl->sc_bytes_unacked_retx -= packet_sz; 430 --ctl->sc_n_in_flight_retx; 431 } 432} 433 434 435static void 436send_ctl_sched_Xpend_common (struct lsquic_send_ctl *ctl, 437 struct lsquic_packet_out *packet_out) 438{ 439 packet_out->po_flags |= PO_SCHED; 440 ++ctl->sc_n_scheduled; 441 ctl->sc_bytes_scheduled += packet_out_total_sz(packet_out); 442 lsquic_send_ctl_sanity_check(ctl); 443} 444 445 446static void 447send_ctl_sched_append (struct lsquic_send_ctl *ctl, 448 struct lsquic_packet_out *packet_out) 449{ 450 TAILQ_INSERT_TAIL(&ctl->sc_scheduled_packets, packet_out, po_next); 451 send_ctl_sched_Xpend_common(ctl, packet_out); 452} 453 454 455static void 456send_ctl_sched_prepend (struct lsquic_send_ctl *ctl, 457 struct lsquic_packet_out *packet_out) 458{ 459 TAILQ_INSERT_HEAD(&ctl->sc_scheduled_packets, packet_out, po_next); 460 send_ctl_sched_Xpend_common(ctl, packet_out); 461} 462 463 464static void 465send_ctl_sched_remove (struct lsquic_send_ctl *ctl, 466 struct lsquic_packet_out *packet_out) 467{ 468 TAILQ_REMOVE(&ctl->sc_scheduled_packets, packet_out, po_next); 469 packet_out->po_flags &= ~PO_SCHED; 470 assert(ctl->sc_n_scheduled); 471 --ctl->sc_n_scheduled; 472 ctl->sc_bytes_scheduled -= packet_out_total_sz(packet_out); 473 lsquic_send_ctl_sanity_check(ctl); 474} 475 476 477int 478lsquic_send_ctl_sent_packet (lsquic_send_ctl_t *ctl, 479 struct lsquic_packet_out *packet_out, int account) 480{ 481 char frames[lsquic_frame_types_str_sz]; 482 LSQ_DEBUG("packet %"PRIu64" has been sent (frame types: %s)", 483 packet_out->po_packno, lsquic_frame_types_to_str(frames, 484 sizeof(frames), packet_out->po_frame_types)); 485 if (account) 486 ctl->sc_bytes_out -= packet_out_total_sz(packet_out); 487 lsquic_senhist_add(&ctl->sc_senhist, packet_out->po_packno); 488 send_ctl_unacked_append(ctl, packet_out); 489 if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK) 490 { 491 if (!lsquic_alarmset_is_set(ctl->sc_alset, AL_RETX)) 492 set_retx_alarm(ctl); 493 if (ctl->sc_n_in_flight_retx == 1) 494 ctl->sc_flags |= SC_WAS_QUIET; 495 } 496 /* TODO: Do we really want to use those for RTT info? Revisit this. */ 497 /* Hold on to packets that are not retransmittable because we need them 498 * to sample RTT information. They are released when ACK is received. 499 */ 500#if LSQUIC_SEND_STATS 501 ++ctl->sc_stats.n_total_sent; 502#endif 503 lsquic_send_ctl_sanity_check(ctl); 504 return 0; 505} 506 507 508static void 509take_rtt_sample (lsquic_send_ctl_t *ctl, 510 lsquic_time_t now, lsquic_time_t lack_delta) 511{ 512 const lsquic_packno_t packno = ctl->sc_largest_acked_packno; 513 const lsquic_time_t sent = ctl->sc_largest_acked_sent_time; 514 const lsquic_time_t measured_rtt = now - sent; 515 if (packno > ctl->sc_max_rtt_packno && lack_delta < measured_rtt) 516 { 517 ctl->sc_max_rtt_packno = packno; 518 lsquic_rtt_stats_update(&ctl->sc_conn_pub->rtt_stats, measured_rtt, lack_delta); 519 LSQ_DEBUG("packno %"PRIu64"; rtt: %"PRIu64"; delta: %"PRIu64"; " 520 "new srtt: %"PRIu64, packno, measured_rtt, lack_delta, 521 lsquic_rtt_stats_get_srtt(&ctl->sc_conn_pub->rtt_stats)); 522 } 523} 524 525 526static void 527send_ctl_release_enc_data (struct lsquic_send_ctl *ctl, 528 struct lsquic_packet_out *packet_out) 529{ 530 ctl->sc_enpub->enp_pmi->pmi_release(ctl->sc_enpub->enp_pmi_ctx, 531 ctl->sc_conn_pub->lconn->cn_peer_ctx, packet_out->po_enc_data, 532 lsquic_packet_out_ipv6(packet_out)); 533 packet_out->po_flags &= ~PO_ENCRYPTED; 534 packet_out->po_enc_data = NULL; 535} 536 537 538static void 539send_ctl_destroy_packet (struct lsquic_send_ctl *ctl, 540 struct lsquic_packet_out *packet_out) 541{ 542 lsquic_packet_out_destroy(packet_out, ctl->sc_enpub, 543 ctl->sc_conn_pub->lconn->cn_peer_ctx); 544} 545 546 547/* Returns true if packet was rescheduled, false otherwise. In the latter 548 * case, you should not dereference packet_out after the function returns. 549 */ 550static int 551send_ctl_handle_lost_packet (lsquic_send_ctl_t *ctl, 552 lsquic_packet_out_t *packet_out) 553{ 554 unsigned packet_sz; 555 556 assert(ctl->sc_n_in_flight_all); 557 packet_sz = packet_out_sent_sz(packet_out); 558 send_ctl_unacked_remove(ctl, packet_out, packet_sz); 559 if (packet_out->po_flags & PO_ENCRYPTED) 560 send_ctl_release_enc_data(ctl, packet_out); 561 if (packet_out->po_frame_types & (1 << QUIC_FRAME_ACK)) 562 { 563 ctl->sc_flags |= SC_LOST_ACK; 564 LSQ_DEBUG("lost ACK in packet %"PRIu64, packet_out->po_packno); 565 } 566 if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK) 567 { 568 LSQ_DEBUG("lost retransmittable packet %"PRIu64, 569 packet_out->po_packno); 570 TAILQ_INSERT_TAIL(&ctl->sc_lost_packets, packet_out, po_next); 571 return 1; 572 } 573 else 574 { 575 LSQ_DEBUG("lost unretransmittable packet %"PRIu64, 576 packet_out->po_packno); 577 send_ctl_destroy_packet(ctl, packet_out); 578 return 0; 579 } 580} 581 582 583static lsquic_packno_t 584largest_retx_packet_number (const lsquic_send_ctl_t *ctl) 585{ 586 const lsquic_packet_out_t *packet_out; 587 TAILQ_FOREACH_REVERSE(packet_out, &ctl->sc_unacked_packets, 588 lsquic_packets_tailq, po_next) 589 { 590 if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK) 591 return packet_out->po_packno; 592 } 593 return 0; 594} 595 596 597static void 598send_ctl_detect_losses (lsquic_send_ctl_t *ctl, lsquic_time_t time) 599{ 600 lsquic_packet_out_t *packet_out, *next; 601 lsquic_packno_t largest_retx_packno, largest_lost_packno; 602 603 largest_retx_packno = largest_retx_packet_number(ctl); 604 largest_lost_packno = 0; 605 ctl->sc_loss_to = 0; 606 607 for (packet_out = TAILQ_FIRST(&ctl->sc_unacked_packets); 608 packet_out && packet_out->po_packno <= ctl->sc_largest_acked_packno; 609 packet_out = next) 610 { 611 next = TAILQ_NEXT(packet_out, po_next); 612 613 if (packet_out->po_packno + N_NACKS_BEFORE_RETX < 614 ctl->sc_largest_acked_packno) 615 { 616 LSQ_DEBUG("loss by FACK detected, packet %"PRIu64, 617 packet_out->po_packno); 618 largest_lost_packno = packet_out->po_packno; 619 (void) send_ctl_handle_lost_packet(ctl, packet_out); 620 continue; 621 } 622 623 if (largest_retx_packno 624 && (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK) 625 && largest_retx_packno <= ctl->sc_largest_acked_packno) 626 { 627 LSQ_DEBUG("loss by early retransmit detected, packet %"PRIu64, 628 packet_out->po_packno); 629 largest_lost_packno = packet_out->po_packno; 630 ctl->sc_loss_to = 631 lsquic_rtt_stats_get_srtt(&ctl->sc_conn_pub->rtt_stats) / 4; 632 LSQ_DEBUG("set sc_loss_to to %"PRIu64", packet %"PRIu64, 633 ctl->sc_loss_to, packet_out->po_packno); 634 (void) send_ctl_handle_lost_packet(ctl, packet_out); 635 continue; 636 } 637 638 if (ctl->sc_largest_acked_sent_time > packet_out->po_sent + 639 lsquic_rtt_stats_get_srtt(&ctl->sc_conn_pub->rtt_stats)) 640 { 641 LSQ_DEBUG("loss by sent time detected: packet %"PRIu64, 642 packet_out->po_packno); 643 if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK) 644 largest_lost_packno = packet_out->po_packno; 645 else { /* don't count it as a loss */; } 646 (void) send_ctl_handle_lost_packet(ctl, packet_out); 647 continue; 648 } 649 } 650 651 if (largest_lost_packno > ctl->sc_largest_sent_at_cutback) 652 { 653 LSQ_DEBUG("detected new loss: packet %"PRIu64"; new lsac: " 654 "%"PRIu64, largest_lost_packno, ctl->sc_largest_sent_at_cutback); 655 lsquic_cubic_loss(&ctl->sc_cubic); 656 if (ctl->sc_flags & SC_PACE) 657 pacer_loss_event(&ctl->sc_pacer); 658 ctl->sc_largest_sent_at_cutback = 659 lsquic_senhist_largest(&ctl->sc_senhist); 660 } 661 else if (largest_lost_packno) 662 /* Lost packets whose numbers are smaller than the largest packet 663 * number sent at the time of the last loss event indicate the same 664 * loss event. This follows NewReno logic, see RFC 6582. 665 */ 666 LSQ_DEBUG("ignore loss of packet %"PRIu64" smaller than lsac " 667 "%"PRIu64, largest_lost_packno, ctl->sc_largest_sent_at_cutback); 668} 669 670 671int 672lsquic_send_ctl_got_ack (lsquic_send_ctl_t *ctl, 673 const struct ack_info *acki, 674 lsquic_time_t ack_recv_time) 675{ 676 struct lsquic_packets_tailq acked_acks = 677 TAILQ_HEAD_INITIALIZER(acked_acks); 678 const struct lsquic_packno_range *range = 679 &acki->ranges[ acki->n_ranges - 1 ]; 680 lsquic_packet_out_t *packet_out, *next; 681 lsquic_time_t now = 0; 682 lsquic_packno_t smallest_unacked; 683 lsquic_packno_t ack2ed[2]; 684 unsigned packet_sz; 685 int app_limited; 686 signed char do_rtt, skip_checks; 687 688 packet_out = TAILQ_FIRST(&ctl->sc_unacked_packets); 689#if __GNUC__ 690 __builtin_prefetch(packet_out); 691#endif 692 693#if __GNUC__ 694# define UNLIKELY(cond) __builtin_expect(cond, 0) 695#else 696# define UNLIKELY(cond) cond 697#endif 698 699#if __GNUC__ 700 if (UNLIKELY(LSQ_LOG_ENABLED(LSQ_LOG_DEBUG))) 701#endif 702 LSQ_DEBUG("Got ACK frame, largest acked: %"PRIu64"; delta: %"PRIu64, 703 largest_acked(acki), acki->lack_delta); 704 705 /* Validate ACK first: */ 706 if (UNLIKELY(largest_acked(acki) 707 > lsquic_senhist_largest(&ctl->sc_senhist))) 708 { 709 LSQ_INFO("at least one packet in ACK range [%"PRIu64" - %"PRIu64"] " 710 "was never sent", acki->ranges[0].low, acki->ranges[0].high); 711 return -1; 712 } 713 714 if (UNLIKELY(ctl->sc_flags & SC_WAS_QUIET)) 715 { 716 ctl->sc_flags &= ~SC_WAS_QUIET; 717 LSQ_DEBUG("ACK comes after a period of quiescence"); 718 if (!now) 719 now = lsquic_time_now(); 720 lsquic_cubic_was_quiet(&ctl->sc_cubic, now); 721 } 722 723 if (UNLIKELY(!packet_out)) 724 goto no_unacked_packets; 725 726 smallest_unacked = packet_out->po_packno; 727 ack2ed[1] = 0; 728 729 if (packet_out->po_packno > largest_acked(acki)) 730 goto detect_losses; 731 732 do_rtt = 0, skip_checks = 0; 733 app_limited = -1; 734 do 735 { 736 next = TAILQ_NEXT(packet_out, po_next); 737#if __GNUC__ 738 __builtin_prefetch(next); 739#endif 740 if (skip_checks) 741 goto after_checks; 742 /* This is faster than binary search in the normal case when the number 743 * of ranges is not much larger than the number of unacked packets. 744 */ 745 while (UNLIKELY(range->high < packet_out->po_packno)) 746 --range; 747 if (range->low <= packet_out->po_packno) 748 { 749 skip_checks = range == acki->ranges; 750 if (app_limited < 0) 751 app_limited = send_ctl_retx_bytes_out(ctl) + 3 * ctl->sc_pack_size /* This 752 is the "maximum burst" parameter */ 753 < lsquic_cubic_get_cwnd(&ctl->sc_cubic); 754 if (!now) 755 now = lsquic_time_now(); 756 after_checks: 757 packet_sz = packet_out_sent_sz(packet_out); 758 ctl->sc_largest_acked_packno = packet_out->po_packno; 759 ctl->sc_largest_acked_sent_time = packet_out->po_sent; 760 send_ctl_unacked_remove(ctl, packet_out, packet_sz); 761 ack2ed[!!(packet_out->po_frame_types & (1 << QUIC_FRAME_ACK))] 762 = packet_out->po_ack2ed; 763 do_rtt |= packet_out->po_packno == largest_acked(acki); 764 lsquic_cubic_ack(&ctl->sc_cubic, now, now - packet_out->po_sent, 765 app_limited, packet_sz); 766 lsquic_packet_out_ack_streams(packet_out); 767 send_ctl_destroy_packet(ctl, packet_out); 768 } 769 packet_out = next; 770 } 771 while (packet_out && packet_out->po_packno <= largest_acked(acki)); 772 773 if (do_rtt) 774 { 775 take_rtt_sample(ctl, ack_recv_time, acki->lack_delta); 776 ctl->sc_n_consec_rtos = 0; 777 ctl->sc_n_hsk = 0; 778 ctl->sc_n_tlp = 0; 779 } 780 781 detect_losses: 782 send_ctl_detect_losses(ctl, ack_recv_time); 783 if (send_ctl_first_unacked_retx_packet(ctl)) 784 set_retx_alarm(ctl); 785 else 786 { 787 LSQ_DEBUG("No retransmittable packets: clear alarm"); 788 lsquic_alarmset_unset(ctl->sc_alset, AL_RETX); 789 } 790 lsquic_send_ctl_sanity_check(ctl); 791 792 if ((ctl->sc_flags & SC_NSTP) && ack2ed[1] > ctl->sc_largest_ack2ed) 793 ctl->sc_largest_ack2ed = ack2ed[1]; 794 795 if (ctl->sc_n_in_flight_retx == 0) 796 ctl->sc_flags |= SC_WAS_QUIET; 797 798 update_n_stop_waiting: 799 if (smallest_unacked > smallest_acked(acki)) 800 /* Peer is acking packets that have been acked already. Schedule ACK 801 * and STOP_WAITING frame to chop the range if we get two of these in 802 * a row. 803 */ 804 ++ctl->sc_n_stop_waiting; 805 else 806 ctl->sc_n_stop_waiting = 0; 807 lsquic_send_ctl_sanity_check(ctl); 808 return 0; 809 810 no_unacked_packets: 811 smallest_unacked = lsquic_senhist_largest(&ctl->sc_senhist) + 1; 812 ctl->sc_flags |= SC_WAS_QUIET; 813 goto update_n_stop_waiting; 814} 815 816 817lsquic_packno_t 818lsquic_send_ctl_smallest_unacked (lsquic_send_ctl_t *ctl) 819{ 820 const lsquic_packet_out_t *packet_out; 821 822 /* Packets are always sent out in order (unless we are reordering them 823 * on purpose). Thus, the first packet on the unacked packets list has 824 * the smallest packet number of all packets on that list. 825 */ 826 if ((packet_out = TAILQ_FIRST(&ctl->sc_unacked_packets))) 827 return packet_out->po_packno; 828 else 829 return lsquic_senhist_largest(&ctl->sc_senhist) + 1; 830} 831 832 833static struct lsquic_packet_out * 834send_ctl_next_lost (lsquic_send_ctl_t *ctl) 835{ 836 lsquic_packet_out_t *lost_packet = TAILQ_FIRST(&ctl->sc_lost_packets); 837 if (lost_packet) 838 { 839 TAILQ_REMOVE(&ctl->sc_lost_packets, lost_packet, po_next); 840 if (lost_packet->po_frame_types & (1 << QUIC_FRAME_STREAM)) 841 { 842 lsquic_packet_out_elide_reset_stream_frames(lost_packet, 0); 843 } 844 return lost_packet; 845 } 846 else 847 return NULL; 848} 849 850 851static lsquic_packno_t 852send_ctl_next_packno (lsquic_send_ctl_t *ctl) 853{ 854 return ++ctl->sc_cur_packno; 855} 856 857 858void 859lsquic_send_ctl_cleanup (lsquic_send_ctl_t *ctl) 860{ 861 lsquic_packet_out_t *packet_out, *next; 862 unsigned n; 863 lsquic_senhist_cleanup(&ctl->sc_senhist); 864 while ((packet_out = TAILQ_FIRST(&ctl->sc_scheduled_packets))) 865 { 866 send_ctl_sched_remove(ctl, packet_out); 867 send_ctl_destroy_packet(ctl, packet_out); 868 } 869 assert(0 == ctl->sc_n_scheduled); 870 assert(0 == ctl->sc_bytes_scheduled); 871 while ((packet_out = TAILQ_FIRST(&ctl->sc_unacked_packets))) 872 { 873 TAILQ_REMOVE(&ctl->sc_unacked_packets, packet_out, po_next); 874 ctl->sc_bytes_unacked_all -= packet_out_total_sz(packet_out); 875 send_ctl_destroy_packet(ctl, packet_out); 876 --ctl->sc_n_in_flight_all; 877 } 878 assert(0 == ctl->sc_n_in_flight_all); 879 assert(0 == ctl->sc_bytes_unacked_all); 880 while ((packet_out = TAILQ_FIRST(&ctl->sc_lost_packets))) 881 { 882 TAILQ_REMOVE(&ctl->sc_lost_packets, packet_out, po_next); 883 send_ctl_destroy_packet(ctl, packet_out); 884 } 885 for (n = 0; n < sizeof(ctl->sc_buffered_packets) / 886 sizeof(ctl->sc_buffered_packets[0]); ++n) 887 { 888 for (packet_out = TAILQ_FIRST(&ctl->sc_buffered_packets[n].bpq_packets); 889 packet_out; packet_out = next) 890 { 891 next = TAILQ_NEXT(packet_out, po_next); 892 send_ctl_destroy_packet(ctl, packet_out); 893 } 894 } 895 pacer_cleanup(&ctl->sc_pacer); 896#if LSQUIC_SEND_STATS 897 LSQ_NOTICE("stats: n_total_sent: %u; n_resent: %u; n_delayed: %u", 898 ctl->sc_stats.n_total_sent, ctl->sc_stats.n_resent, 899 ctl->sc_stats.n_delayed); 900#endif 901} 902 903 904static unsigned 905send_ctl_retx_bytes_out (const struct lsquic_send_ctl *ctl) 906{ 907 return ctl->sc_bytes_scheduled 908 + ctl->sc_bytes_unacked_retx 909 + ctl->sc_bytes_out; 910} 911 912 913static unsigned 914send_ctl_all_bytes_out (const struct lsquic_send_ctl *ctl) 915{ 916 return ctl->sc_bytes_scheduled 917 + ctl->sc_bytes_unacked_all 918 + ctl->sc_bytes_out; 919} 920 921 922int 923lsquic_send_ctl_pacer_blocked (struct lsquic_send_ctl *ctl) 924{ 925 return (ctl->sc_flags & SC_PACE) 926 && !pacer_can_schedule(&ctl->sc_pacer, 927 ctl->sc_n_scheduled + ctl->sc_n_in_flight_all); 928} 929 930 931#ifndef NDEBUG 932#if __GNUC__ 933__attribute__((weak)) 934#endif 935#endif 936int 937lsquic_send_ctl_can_send (lsquic_send_ctl_t *ctl) 938{ 939 const unsigned n_out = send_ctl_all_bytes_out(ctl); 940 LSQ_DEBUG("%s: n_out: %u (unacked_all: %u, out: %u); cwnd: %lu", __func__, 941 n_out, ctl->sc_bytes_unacked_all, ctl->sc_bytes_out, 942 lsquic_cubic_get_cwnd(&ctl->sc_cubic)); 943 if (ctl->sc_flags & SC_PACE) 944 { 945 if (n_out >= lsquic_cubic_get_cwnd(&ctl->sc_cubic)) 946 return 0; 947 if (pacer_can_schedule(&ctl->sc_pacer, 948 ctl->sc_n_scheduled + ctl->sc_n_in_flight_all)) 949 return 1; 950 if (ctl->sc_flags & SC_SCHED_TICK) 951 { 952 ctl->sc_flags &= ~SC_SCHED_TICK; 953 lsquic_engine_add_conn_to_attq(ctl->sc_enpub, 954 ctl->sc_conn_pub->lconn, pacer_next_sched(&ctl->sc_pacer)); 955 } 956 return 0; 957 } 958 else 959 return n_out < lsquic_cubic_get_cwnd(&ctl->sc_cubic); 960} 961 962 963static void 964send_ctl_expire (lsquic_send_ctl_t *ctl, enum expire_filter filter) 965{ 966 lsquic_packet_out_t *packet_out, *next; 967 int n_resubmitted; 968 static const char *const filter_type2str[] = { 969 [EXFI_ALL] = "all", 970 [EXFI_HSK] = "handshake", 971 [EXFI_LAST] = "last", 972 }; 973 974 switch (filter) 975 { 976 case EXFI_ALL: 977 n_resubmitted = 0; 978 while ((packet_out = TAILQ_FIRST(&ctl->sc_unacked_packets))) 979 n_resubmitted += send_ctl_handle_lost_packet(ctl, packet_out); 980 break; 981 case EXFI_HSK: 982 n_resubmitted = 0; 983 for (packet_out = TAILQ_FIRST(&ctl->sc_unacked_packets); packet_out; 984 packet_out = next) 985 { 986 next = TAILQ_NEXT(packet_out, po_next); 987 if (packet_out->po_flags & PO_HELLO) 988 n_resubmitted += send_ctl_handle_lost_packet(ctl, packet_out); 989 } 990 break; 991 case EXFI_LAST: 992 packet_out = send_ctl_last_unacked_retx_packet(ctl); 993 if (packet_out) 994 n_resubmitted = send_ctl_handle_lost_packet(ctl, packet_out); 995 else 996 n_resubmitted = 0; 997 break; 998#ifdef WIN32 999 default: 1000 n_resubmitted = 0; 1001#endif 1002 } 1003 1004 LSQ_DEBUG("consider %s packets lost: %d resubmitted", 1005 filter_type2str[filter], n_resubmitted); 1006} 1007 1008 1009void 1010lsquic_send_ctl_expire_all (lsquic_send_ctl_t *ctl) 1011{ 1012 lsquic_alarmset_unset(ctl->sc_alset, AL_RETX); 1013 send_ctl_expire(ctl, EXFI_ALL); 1014 lsquic_send_ctl_sanity_check(ctl); 1015} 1016 1017 1018#if LSQUIC_EXTRA_CHECKS 1019void 1020lsquic_send_ctl_sanity_check (const lsquic_send_ctl_t *ctl) 1021{ 1022 const struct lsquic_packet_out *packet_out; 1023 unsigned count, bytes; 1024 1025 assert(!send_ctl_first_unacked_retx_packet(ctl) || 1026 lsquic_alarmset_is_set(ctl->sc_alset, AL_RETX)); 1027 if (lsquic_alarmset_is_set(ctl->sc_alset, AL_RETX)) 1028 { 1029 assert(send_ctl_first_unacked_retx_packet(ctl)); 1030 assert(lsquic_time_now() < ctl->sc_alset->as_expiry[AL_RETX] + MAX_RTO_DELAY); 1031 } 1032 1033 count = 0, bytes = 0; 1034 TAILQ_FOREACH(packet_out, &ctl->sc_unacked_packets, po_next) 1035 { 1036 bytes += packet_out_sent_sz(packet_out); 1037 ++count; 1038 } 1039 assert(count == ctl->sc_n_in_flight_all); 1040 assert(bytes == ctl->sc_bytes_unacked_all); 1041 1042 count = 0, bytes = 0; 1043 TAILQ_FOREACH(packet_out, &ctl->sc_scheduled_packets, po_next) 1044 { 1045 assert(packet_out->po_flags & PO_SCHED); 1046 bytes += packet_out_total_sz(packet_out); 1047 ++count; 1048 } 1049 assert(count == ctl->sc_n_scheduled); 1050 assert(bytes == ctl->sc_bytes_scheduled); 1051} 1052 1053 1054#endif 1055 1056 1057void 1058lsquic_send_ctl_scheduled_one (lsquic_send_ctl_t *ctl, 1059 lsquic_packet_out_t *packet_out) 1060{ 1061#ifndef NDEBUG 1062 const lsquic_packet_out_t *last; 1063 last = TAILQ_LAST(&ctl->sc_scheduled_packets, lsquic_packets_tailq); 1064 if (last) 1065 assert((last->po_flags & PO_REPACKNO) || 1066 last->po_packno < packet_out->po_packno); 1067#endif 1068 if (ctl->sc_flags & SC_PACE) 1069 { 1070 unsigned n_out = ctl->sc_n_in_flight_retx + ctl->sc_n_scheduled; 1071 pacer_packet_scheduled(&ctl->sc_pacer, n_out, 1072 send_ctl_in_recovery(ctl), send_ctl_transfer_time, ctl); 1073 } 1074 send_ctl_sched_append(ctl, packet_out); 1075} 1076 1077 1078/* This mimics the logic in lsquic_send_ctl_next_packet_to_send(): we want 1079 * to check whether the first scheduled packet cannot be sent. 1080 */ 1081int 1082lsquic_send_ctl_sched_is_blocked (const struct lsquic_send_ctl *ctl) 1083{ 1084 const lsquic_packet_out_t *packet_out 1085 = TAILQ_FIRST(&ctl->sc_scheduled_packets); 1086 return ctl->sc_n_consec_rtos 1087 && 0 == ctl->sc_next_limit 1088 && packet_out 1089 && !(packet_out->po_frame_types & (1 << QUIC_FRAME_ACK)); 1090} 1091 1092 1093lsquic_packet_out_t * 1094lsquic_send_ctl_next_packet_to_send (lsquic_send_ctl_t *ctl) 1095{ 1096 lsquic_packet_out_t *packet_out; 1097 int dec_limit; 1098 1099 get_packet: 1100 packet_out = TAILQ_FIRST(&ctl->sc_scheduled_packets); 1101 if (!packet_out) 1102 return NULL; 1103 1104 if (ctl->sc_n_consec_rtos && 1105 !(packet_out->po_frame_types & (1 << QUIC_FRAME_ACK))) 1106 { 1107 if (ctl->sc_next_limit) 1108 dec_limit = 1; 1109 else 1110 return NULL; 1111 } 1112 else 1113 dec_limit = 0; 1114 1115 send_ctl_sched_remove(ctl, packet_out); 1116 if (packet_out->po_flags & PO_REPACKNO) 1117 { 1118 if (packet_out->po_regen_sz < packet_out->po_data_sz) 1119 { 1120 update_for_resending(ctl, packet_out); 1121 packet_out->po_flags &= ~PO_REPACKNO; 1122 } 1123 else 1124 { 1125 LSQ_DEBUG("Dropping packet %"PRIu64" from scheduled queue", 1126 packet_out->po_packno); 1127 send_ctl_destroy_packet(ctl, packet_out); 1128 goto get_packet; 1129 } 1130 } 1131 1132 ctl->sc_bytes_out += packet_out_total_sz(packet_out); 1133 if (dec_limit) 1134 { 1135 --ctl->sc_next_limit; 1136 packet_out->po_flags |= PO_LIMITED; 1137 } 1138 else 1139 packet_out->po_flags &= ~PO_LIMITED; 1140 return packet_out; 1141} 1142 1143 1144void 1145lsquic_send_ctl_delayed_one (lsquic_send_ctl_t *ctl, 1146 lsquic_packet_out_t *packet_out) 1147{ 1148 send_ctl_sched_prepend(ctl, packet_out); 1149 ctl->sc_bytes_out -= packet_out_total_sz(packet_out); 1150 if (packet_out->po_flags & PO_LIMITED) 1151 ++ctl->sc_next_limit; 1152 LSQ_DEBUG("packet %"PRIu64" has been delayed", packet_out->po_packno); 1153#if LSQUIC_SEND_STATS 1154 ++ctl->sc_stats.n_delayed; 1155#endif 1156} 1157 1158 1159int 1160lsquic_send_ctl_have_outgoing_stream_frames (const lsquic_send_ctl_t *ctl) 1161{ 1162 const lsquic_packet_out_t *packet_out; 1163 TAILQ_FOREACH(packet_out, &ctl->sc_scheduled_packets, po_next) 1164 if (packet_out->po_frame_types & 1165 ((1 << QUIC_FRAME_STREAM) | (1 << QUIC_FRAME_RST_STREAM))) 1166 return 1; 1167 return 0; 1168} 1169 1170 1171int 1172lsquic_send_ctl_have_outgoing_retx_frames (const lsquic_send_ctl_t *ctl) 1173{ 1174 const lsquic_packet_out_t *packet_out; 1175 TAILQ_FOREACH(packet_out, &ctl->sc_scheduled_packets, po_next) 1176 if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK) 1177 return 1; 1178 return 0; 1179} 1180 1181 1182static lsquic_packet_out_t * 1183send_ctl_allocate_packet (lsquic_send_ctl_t *ctl, enum lsquic_packno_bits bits, 1184 unsigned need_at_least) 1185{ 1186 lsquic_packet_out_t *packet_out; 1187 1188 packet_out = lsquic_packet_out_new(&ctl->sc_enpub->enp_mm, 1189 ctl->sc_conn_pub->packet_out_malo, 1190 !(ctl->sc_flags & SC_TCID0), ctl->sc_conn_pub->lconn, bits, 1191 ctl->sc_ver_neg->vn_tag, NULL); 1192 if (!packet_out) 1193 return NULL; 1194 1195 if (need_at_least && lsquic_packet_out_avail(packet_out) < need_at_least) 1196 { /* This should never happen, this is why this check is performed at 1197 * this level and not lower, before the packet is actually allocated. 1198 */ 1199 LSQ_ERROR("wanted to allocate packet with at least %u bytes of " 1200 "payload, but only got %u bytes (mtu: %u bytes)", need_at_least, 1201 lsquic_packet_out_avail(packet_out), ctl->sc_pack_size); 1202 send_ctl_destroy_packet(ctl, packet_out); 1203 return NULL; 1204 } 1205 1206 return packet_out; 1207} 1208 1209 1210lsquic_packet_out_t * 1211lsquic_send_ctl_new_packet_out (lsquic_send_ctl_t *ctl, unsigned need_at_least) 1212{ 1213 lsquic_packet_out_t *packet_out; 1214 enum lsquic_packno_bits bits; 1215 1216 bits = lsquic_send_ctl_packno_bits(ctl); 1217 packet_out = send_ctl_allocate_packet(ctl, bits, need_at_least); 1218 if (!packet_out) 1219 return NULL; 1220 1221 packet_out->po_packno = send_ctl_next_packno(ctl); 1222 LSQ_DEBUG("created packet %"PRIu64, packet_out->po_packno); 1223 EV_LOG_PACKET_CREATED(LSQUIC_LOG_CONN_ID, packet_out); 1224 return packet_out; 1225} 1226 1227 1228/* Do not use for STREAM frames 1229 */ 1230lsquic_packet_out_t * 1231lsquic_send_ctl_get_writeable_packet (lsquic_send_ctl_t *ctl, 1232 unsigned need_at_least, int *is_err) 1233{ 1234 lsquic_packet_out_t *packet_out; 1235 1236 assert(need_at_least > 0); 1237 1238 packet_out = lsquic_send_ctl_last_scheduled(ctl); 1239 if (packet_out 1240 && !(packet_out->po_flags & PO_STREAM_END) 1241 && lsquic_packet_out_avail(packet_out) >= need_at_least) 1242 { 1243 return packet_out; 1244 } 1245 1246 if (!lsquic_send_ctl_can_send(ctl)) 1247 { 1248 *is_err = 0; 1249 return NULL; 1250 } 1251 1252 packet_out = lsquic_send_ctl_new_packet_out(ctl, need_at_least); 1253 if (packet_out) 1254 lsquic_send_ctl_scheduled_one(ctl, packet_out); 1255 else 1256 *is_err = 1; 1257 return packet_out; 1258} 1259 1260 1261static lsquic_packet_out_t * 1262send_ctl_get_packet_for_stream (lsquic_send_ctl_t *ctl, 1263 unsigned need_at_least, const lsquic_stream_t *stream) 1264{ 1265 lsquic_packet_out_t *packet_out; 1266 1267 assert(need_at_least > 0); 1268 1269 packet_out = lsquic_send_ctl_last_scheduled(ctl); 1270 if (packet_out 1271 && !(packet_out->po_flags & PO_STREAM_END) 1272 && lsquic_packet_out_avail(packet_out) >= need_at_least 1273 && !lsquic_packet_out_has_frame(packet_out, stream, QUIC_FRAME_STREAM)) 1274 { 1275 return packet_out; 1276 } 1277 1278 if (!lsquic_send_ctl_can_send(ctl)) 1279 return NULL; 1280 1281 packet_out = lsquic_send_ctl_new_packet_out(ctl, need_at_least); 1282 if (!packet_out) 1283 return NULL; 1284 1285 lsquic_send_ctl_scheduled_one(ctl, packet_out); 1286 return packet_out; 1287} 1288 1289 1290static void 1291update_for_resending (lsquic_send_ctl_t *ctl, lsquic_packet_out_t *packet_out) 1292{ 1293 1294 lsquic_packno_t oldno, packno; 1295 1296 /* When the packet is resent, it uses the same number of bytes to encode 1297 * the packet number as the original packet. This follows the reference 1298 * implementation. 1299 */ 1300 oldno = packet_out->po_packno; 1301 packno = send_ctl_next_packno(ctl); 1302 1303 packet_out->po_flags &= ~PO_SENT_SZ; 1304 packet_out->po_frame_types &= ~QFRAME_REGEN_MASK; 1305 assert(packet_out->po_frame_types); 1306 packet_out->po_packno = packno; 1307 1308 if (ctl->sc_ver_neg->vn_tag) 1309 { 1310 assert(packet_out->po_flags & PO_VERSION); /* It can only disappear */ 1311 packet_out->po_ver_tag = *ctl->sc_ver_neg->vn_tag; 1312 } 1313 1314 assert(packet_out->po_regen_sz < packet_out->po_data_sz); 1315 if (packet_out->po_regen_sz) 1316 { 1317 if (packet_out->po_flags & PO_SCHED) 1318 ctl->sc_bytes_scheduled -= packet_out->po_regen_sz; 1319 lsquic_packet_out_chop_regen(packet_out); 1320 } 1321 LSQ_DEBUG("Packet %"PRIu64" repackaged for resending as packet %"PRIu64, 1322 oldno, packno); 1323 EV_LOG_CONN_EVENT(LSQUIC_LOG_CONN_ID, "packet %"PRIu64" repackaged for " 1324 "resending as packet %"PRIu64, oldno, packno); 1325} 1326 1327 1328unsigned 1329lsquic_send_ctl_reschedule_packets (lsquic_send_ctl_t *ctl) 1330{ 1331 lsquic_packet_out_t *packet_out; 1332 unsigned n = 0; 1333 1334 while (lsquic_send_ctl_can_send(ctl) && 1335 (packet_out = send_ctl_next_lost(ctl))) 1336 { 1337 if (packet_out->po_regen_sz < packet_out->po_data_sz) 1338 { 1339 ++n; 1340 update_for_resending(ctl, packet_out); 1341 lsquic_send_ctl_scheduled_one(ctl, packet_out); 1342 } 1343 else 1344 { 1345 LSQ_DEBUG("Dropping packet %"PRIu64" from unacked queue", 1346 packet_out->po_packno); 1347 send_ctl_destroy_packet(ctl, packet_out); 1348 } 1349 } 1350 1351 if (n) 1352 LSQ_DEBUG("rescheduled %u packets", n); 1353 1354 return n; 1355} 1356 1357 1358void 1359lsquic_send_ctl_set_tcid0 (lsquic_send_ctl_t *ctl, int tcid0) 1360{ 1361 if (tcid0) 1362 { 1363 LSQ_INFO("set TCID flag"); 1364 ctl->sc_flags |= SC_TCID0; 1365 } 1366 else 1367 { 1368 LSQ_INFO("unset TCID flag"); 1369 ctl->sc_flags &= ~SC_TCID0; 1370 } 1371} 1372 1373 1374/* The controller elides this STREAM frames of stream `stream_id' from 1375 * scheduled and buffered packets. If a packet becomes empty as a result, 1376 * it is dropped. 1377 * 1378 * Packets on other queues do not need to be processed: unacked packets 1379 * have already been sent, and lost packets' reset stream frames will be 1380 * elided in due time. 1381 */ 1382void 1383lsquic_send_ctl_elide_stream_frames (lsquic_send_ctl_t *ctl, uint32_t stream_id) 1384{ 1385 struct lsquic_packet_out *packet_out, *next; 1386 unsigned n, adj; 1387 int dropped; 1388 1389 dropped = 0; 1390#ifdef WIN32 1391 next = NULL; 1392#endif 1393 for (packet_out = TAILQ_FIRST(&ctl->sc_scheduled_packets); packet_out; 1394 packet_out = next) 1395 { 1396 next = TAILQ_NEXT(packet_out, po_next); 1397 1398 if ((packet_out->po_frame_types & (1 << QUIC_FRAME_STREAM)) 1399 ) 1400 { 1401 adj = lsquic_packet_out_elide_reset_stream_frames(packet_out, 1402 stream_id); 1403 ctl->sc_bytes_scheduled -= adj; 1404 if (0 == packet_out->po_frame_types) 1405 { 1406 LSQ_DEBUG("cancel packet %"PRIu64" after eliding frames for " 1407 "stream %"PRIu32, packet_out->po_packno, stream_id); 1408 send_ctl_sched_remove(ctl, packet_out); 1409 send_ctl_destroy_packet(ctl, packet_out); 1410 ++dropped; 1411 } 1412 } 1413 } 1414 1415 if (dropped) 1416 lsquic_send_ctl_reset_packnos(ctl); 1417 1418 for (n = 0; n < sizeof(ctl->sc_buffered_packets) / 1419 sizeof(ctl->sc_buffered_packets[0]); ++n) 1420 { 1421 for (packet_out = TAILQ_FIRST(&ctl->sc_buffered_packets[n].bpq_packets); 1422 packet_out; packet_out = next) 1423 { 1424 next = TAILQ_NEXT(packet_out, po_next); 1425 assert(packet_out->po_frame_types & (1 << QUIC_FRAME_STREAM)); 1426 lsquic_packet_out_elide_reset_stream_frames(packet_out, stream_id); 1427 if (0 == packet_out->po_frame_types) 1428 { 1429 LSQ_DEBUG("cancel buffered packet in queue #%u after eliding " 1430 "frames for stream %"PRIu32, n, stream_id); 1431 TAILQ_REMOVE(&ctl->sc_buffered_packets[n].bpq_packets, 1432 packet_out, po_next); 1433 --ctl->sc_buffered_packets[n].bpq_count; 1434 send_ctl_destroy_packet(ctl, packet_out); 1435 LSQ_DEBUG("Elide packet from buffered queue #%u; count: %u", 1436 n, ctl->sc_buffered_packets[n].bpq_count); 1437 } 1438 } 1439 } 1440} 1441 1442 1443/* Count how many packets will remain after the squeezing performed by 1444 * lsquic_send_ctl_squeeze_sched(). This is the number of delayed data 1445 * packets. 1446 */ 1447#ifndef NDEBUG 1448#if __GNUC__ 1449__attribute__((weak)) 1450#endif 1451#endif 1452int 1453lsquic_send_ctl_have_delayed_packets (const lsquic_send_ctl_t *ctl) 1454{ 1455 const struct lsquic_packet_out *packet_out; 1456 TAILQ_FOREACH(packet_out, &ctl->sc_scheduled_packets, po_next) 1457 if (packet_out->po_regen_sz < packet_out->po_data_sz) 1458 return 1; 1459 return 0; 1460} 1461 1462 1463#ifndef NDEBUG 1464static void 1465send_ctl_log_packet_q (const lsquic_send_ctl_t *ctl, const char *prefix, 1466 const struct lsquic_packets_tailq *tailq) 1467{ 1468 const lsquic_packet_out_t *packet_out; 1469 unsigned n_packets; 1470 char *buf; 1471 size_t bufsz; 1472 int off; 1473 1474 n_packets = 0; 1475 TAILQ_FOREACH(packet_out, tailq, po_next) 1476 ++n_packets; 1477 1478 if (n_packets == 0) 1479 { 1480 LSQ_DEBUG("%s: [<empty set>]", prefix); 1481 return; 1482 } 1483 1484 bufsz = n_packets * sizeof("18446744073709551615" /* UINT64_MAX */); 1485 buf = malloc(bufsz); 1486 if (!buf) 1487 { 1488 LSQ_ERROR("%s: malloc: %s", __func__, strerror(errno)); 1489 return; 1490 } 1491 1492 off = 0; 1493 TAILQ_FOREACH(packet_out, tailq, po_next) 1494 { 1495 if (off) 1496 buf[off++] = ' '; 1497 off += sprintf(buf + off, "%"PRIu64, packet_out->po_packno); 1498 } 1499 1500 LSQ_DEBUG("%s: [%s]", prefix, buf); 1501 free(buf); 1502} 1503 1504 1505#define LOG_PACKET_Q(prefix, queue) do { \ 1506 if (LSQ_LOG_ENABLED(LSQ_LOG_DEBUG)) \ 1507 send_ctl_log_packet_q(ctl, queue, prefix); \ 1508} while (0) 1509#else 1510#define LOG_PACKET_Q(p, q) 1511#endif 1512 1513 1514int 1515lsquic_send_ctl_squeeze_sched (lsquic_send_ctl_t *ctl) 1516{ 1517 struct lsquic_packet_out *packet_out, *next; 1518 int dropped; 1519#ifndef NDEBUG 1520 int pre_squeeze_logged = 0; 1521#endif 1522 1523 dropped = 0; 1524 for (packet_out = TAILQ_FIRST(&ctl->sc_scheduled_packets); packet_out; 1525 packet_out = next) 1526 { 1527 next = TAILQ_NEXT(packet_out, po_next); 1528 if (packet_out->po_regen_sz < packet_out->po_data_sz) 1529 { 1530 if (packet_out->po_flags & PO_ENCRYPTED) 1531 send_ctl_release_enc_data(ctl, packet_out); 1532 } 1533 else 1534 { 1535#ifndef NDEBUG 1536 /* Log the whole list before we squeeze for the first time */ 1537 if (!pre_squeeze_logged++) 1538 LOG_PACKET_Q(&ctl->sc_scheduled_packets, 1539 "unacked packets before squeezing"); 1540#endif 1541 send_ctl_sched_remove(ctl, packet_out); 1542 LSQ_DEBUG("Dropping packet %"PRIu64" from scheduled queue", 1543 packet_out->po_packno); 1544 send_ctl_destroy_packet(ctl, packet_out); 1545 ++dropped; 1546 } 1547 } 1548 1549 if (dropped) 1550 lsquic_send_ctl_reset_packnos(ctl); 1551 1552#ifndef NDEBUG 1553 if (pre_squeeze_logged) 1554 LOG_PACKET_Q(&ctl->sc_scheduled_packets, 1555 "unacked packets after squeezing"); 1556 else if (ctl->sc_n_scheduled > 0) 1557 LOG_PACKET_Q(&ctl->sc_scheduled_packets, "delayed packets"); 1558#endif 1559 1560 return ctl->sc_n_scheduled > 0; 1561} 1562 1563 1564void 1565lsquic_send_ctl_reset_packnos (lsquic_send_ctl_t *ctl) 1566{ 1567 struct lsquic_packet_out *packet_out; 1568 1569 ctl->sc_cur_packno = lsquic_senhist_largest(&ctl->sc_senhist); 1570 TAILQ_FOREACH(packet_out, &ctl->sc_scheduled_packets, po_next) 1571 packet_out->po_flags |= PO_REPACKNO; 1572} 1573 1574 1575void 1576lsquic_send_ctl_ack_to_front (lsquic_send_ctl_t *ctl) 1577{ 1578 struct lsquic_packet_out *ack_packet; 1579 1580 assert(ctl->sc_n_scheduled > 1); /* Otherwise, why is this called? */ 1581 ack_packet = TAILQ_LAST(&ctl->sc_scheduled_packets, lsquic_packets_tailq); 1582 assert(ack_packet->po_frame_types & (1 << QUIC_FRAME_ACK)); 1583 TAILQ_REMOVE(&ctl->sc_scheduled_packets, ack_packet, po_next); 1584 TAILQ_INSERT_HEAD(&ctl->sc_scheduled_packets, ack_packet, po_next); 1585} 1586 1587 1588void 1589lsquic_send_ctl_drop_scheduled (lsquic_send_ctl_t *ctl) 1590{ 1591 lsquic_packet_out_t *packet_out; 1592 const unsigned n = ctl->sc_n_scheduled; 1593 while ((packet_out = TAILQ_FIRST(&ctl->sc_scheduled_packets))) 1594 { 1595 send_ctl_sched_remove(ctl, packet_out); 1596 send_ctl_destroy_packet(ctl, packet_out); 1597 } 1598 assert(0 == ctl->sc_n_scheduled); 1599 ctl->sc_cur_packno = lsquic_senhist_largest(&ctl->sc_senhist); 1600 LSQ_DEBUG("dropped %u scheduled packet%s", n, n != 0 ? "s" : ""); 1601} 1602 1603 1604#ifdef NDEBUG 1605static 1606#elif __GNUC__ 1607__attribute__((weak)) 1608#endif 1609enum buf_packet_type 1610lsquic_send_ctl_determine_bpt (lsquic_send_ctl_t *ctl, 1611 const lsquic_stream_t *stream) 1612{ 1613 const lsquic_stream_t *other_stream; 1614 struct lsquic_hash_elem *el; 1615 struct lsquic_hash *all_streams; 1616 1617 all_streams = ctl->sc_conn_pub->all_streams; 1618 for (el = lsquic_hash_first(all_streams); el; 1619 el = lsquic_hash_next(all_streams)) 1620 { 1621 other_stream = lsquic_hashelem_getdata(el); 1622 if (other_stream != stream 1623 && (!(other_stream->stream_flags & STREAM_U_WRITE_DONE)) 1624 && !lsquic_stream_is_critical(other_stream) 1625 && other_stream->sm_priority < stream->sm_priority) 1626 return BPT_OTHER_PRIO; 1627 } 1628 return BPT_HIGHEST_PRIO; 1629} 1630 1631 1632static enum buf_packet_type 1633send_ctl_lookup_bpt (lsquic_send_ctl_t *ctl, 1634 const struct lsquic_stream *stream) 1635{ 1636 if (ctl->sc_cached_bpt.stream_id != stream->id) 1637 { 1638 ctl->sc_cached_bpt.stream_id = stream->id; 1639 ctl->sc_cached_bpt.packet_type = 1640 lsquic_send_ctl_determine_bpt(ctl, stream); 1641 } 1642 return ctl->sc_cached_bpt.packet_type; 1643} 1644 1645 1646static unsigned 1647send_ctl_max_bpq_count (const lsquic_send_ctl_t *ctl, 1648 enum buf_packet_type packet_type) 1649{ 1650 unsigned count; 1651 1652 switch (packet_type) 1653 { 1654 case BPT_OTHER_PRIO: 1655 return MAX_BPQ_COUNT; 1656 case BPT_HIGHEST_PRIO: 1657 default: /* clang does not complain about absence of `default'... */ 1658 count = ctl->sc_n_scheduled + ctl->sc_n_in_flight_retx; 1659 if (count < lsquic_cubic_get_cwnd(&ctl->sc_cubic) / ctl->sc_pack_size) 1660 { 1661 count -= lsquic_cubic_get_cwnd(&ctl->sc_cubic) / ctl->sc_pack_size; 1662 if (count > MAX_BPQ_COUNT) 1663 return count; 1664 } 1665 return MAX_BPQ_COUNT; 1666 } 1667} 1668 1669 1670static lsquic_packet_out_t * 1671send_ctl_get_buffered_packet (lsquic_send_ctl_t *ctl, 1672 enum buf_packet_type packet_type, unsigned need_at_least, 1673 const struct lsquic_stream *stream) 1674{ 1675 struct buf_packet_q *const packet_q = 1676 &ctl->sc_buffered_packets[packet_type]; 1677 lsquic_packet_out_t *packet_out; 1678 enum lsquic_packno_bits bits; 1679 1680 packet_out = TAILQ_LAST(&packet_q->bpq_packets, lsquic_packets_tailq); 1681 if (packet_out 1682 && !(packet_out->po_flags & PO_STREAM_END) 1683 && lsquic_packet_out_avail(packet_out) >= need_at_least 1684 && !lsquic_packet_out_has_frame(packet_out, stream, QUIC_FRAME_STREAM)) 1685 { 1686 return packet_out; 1687 } 1688 1689 if (packet_q->bpq_count >= send_ctl_max_bpq_count(ctl, packet_type)) 1690 return NULL; 1691 1692 bits = lsquic_send_ctl_guess_packno_bits(ctl); 1693 packet_out = send_ctl_allocate_packet(ctl, bits, need_at_least); 1694 if (!packet_out) 1695 return NULL; 1696 1697 TAILQ_INSERT_TAIL(&packet_q->bpq_packets, packet_out, po_next); 1698 ++packet_q->bpq_count; 1699 LSQ_DEBUG("Add new packet to buffered queue #%u; count: %u", 1700 packet_type, packet_q->bpq_count); 1701 return packet_out; 1702} 1703 1704 1705lsquic_packet_out_t * 1706lsquic_send_ctl_get_packet_for_stream (lsquic_send_ctl_t *ctl, 1707 unsigned need_at_least, const struct lsquic_stream *stream) 1708{ 1709 enum buf_packet_type packet_type; 1710 1711 if (lsquic_send_ctl_schedule_stream_packets_immediately(ctl)) 1712 return send_ctl_get_packet_for_stream(ctl, need_at_least, stream); 1713 else 1714 { 1715 packet_type = send_ctl_lookup_bpt(ctl, stream); 1716 return send_ctl_get_buffered_packet(ctl, packet_type, need_at_least, 1717 stream); 1718 } 1719} 1720 1721 1722#ifdef NDEBUG 1723static 1724#elif __GNUC__ 1725__attribute__((weak)) 1726#endif 1727enum lsquic_packno_bits 1728lsquic_send_ctl_calc_packno_bits (lsquic_send_ctl_t *ctl) 1729{ 1730 lsquic_packno_t smallest_unacked; 1731 unsigned n_in_flight; 1732 1733 smallest_unacked = lsquic_send_ctl_smallest_unacked(ctl); 1734 n_in_flight = lsquic_cubic_get_cwnd(&ctl->sc_cubic) / ctl->sc_pack_size; 1735 return calc_packno_bits(ctl->sc_cur_packno + 1, smallest_unacked, 1736 n_in_flight); 1737} 1738 1739 1740enum lsquic_packno_bits 1741lsquic_send_ctl_packno_bits (lsquic_send_ctl_t *ctl) 1742{ 1743 1744 if (lsquic_send_ctl_schedule_stream_packets_immediately(ctl)) 1745 return lsquic_send_ctl_calc_packno_bits(ctl); 1746 else 1747 return lsquic_send_ctl_guess_packno_bits(ctl); 1748} 1749 1750 1751static int 1752split_buffered_packet (lsquic_send_ctl_t *ctl, 1753 enum buf_packet_type packet_type, lsquic_packet_out_t *packet_out, 1754 enum lsquic_packno_bits bits, unsigned excess_bytes) 1755{ 1756 struct buf_packet_q *const packet_q = 1757 &ctl->sc_buffered_packets[packet_type]; 1758 lsquic_packet_out_t *new_packet_out; 1759 1760 assert(TAILQ_FIRST(&packet_q->bpq_packets) == packet_out); 1761 1762 new_packet_out = send_ctl_allocate_packet(ctl, bits, 0); 1763 if (!packet_out) 1764 return -1; 1765 1766 if (0 == lsquic_packet_out_split_in_two(&ctl->sc_enpub->enp_mm, packet_out, 1767 new_packet_out, ctl->sc_conn_pub->lconn->cn_pf, excess_bytes)) 1768 { 1769 lsquic_packet_out_set_packno_bits(packet_out, bits); 1770 TAILQ_INSERT_AFTER(&packet_q->bpq_packets, packet_out, new_packet_out, 1771 po_next); 1772 ++packet_q->bpq_count; 1773 LSQ_DEBUG("Add split packet to buffered queue #%u; count: %u", 1774 packet_type, packet_q->bpq_count); 1775 return 0; 1776 } 1777 else 1778 { 1779 send_ctl_destroy_packet(ctl, packet_out); 1780 return -1; 1781 } 1782} 1783 1784 1785int 1786lsquic_send_ctl_schedule_buffered (lsquic_send_ctl_t *ctl, 1787 enum buf_packet_type packet_type) 1788{ 1789 struct buf_packet_q *const packet_q = 1790 &ctl->sc_buffered_packets[packet_type]; 1791 lsquic_packet_out_t *packet_out; 1792 unsigned used, excess; 1793 1794 assert(lsquic_send_ctl_schedule_stream_packets_immediately(ctl)); 1795 const enum lsquic_packno_bits bits = lsquic_send_ctl_calc_packno_bits(ctl); 1796 const unsigned need = packno_bits2len(bits); 1797 1798 while ((packet_out = TAILQ_FIRST(&packet_q->bpq_packets)) && 1799 lsquic_send_ctl_can_send(ctl)) 1800 { 1801 if (bits != lsquic_packet_out_packno_bits(packet_out)) 1802 { 1803 used = packno_bits2len(lsquic_packet_out_packno_bits(packet_out)); 1804 if (need > used 1805 && need - used > lsquic_packet_out_avail(packet_out)) 1806 { 1807 excess = need - used - lsquic_packet_out_avail(packet_out); 1808 if (0 != split_buffered_packet(ctl, packet_type, 1809 packet_out, bits, excess)) 1810 { 1811 return -1; 1812 } 1813 } 1814 } 1815 TAILQ_REMOVE(&packet_q->bpq_packets, packet_out, po_next); 1816 --packet_q->bpq_count; 1817 LSQ_DEBUG("Remove packet from buffered queue #%u; count: %u", 1818 packet_type, packet_q->bpq_count); 1819 packet_out->po_packno = send_ctl_next_packno(ctl); 1820 lsquic_send_ctl_scheduled_one(ctl, packet_out); 1821 } 1822 1823 return 0; 1824} 1825 1826 1827int 1828lsquic_send_ctl_turn_on_fin (struct lsquic_send_ctl *ctl, 1829 const struct lsquic_stream *stream) 1830{ 1831 enum buf_packet_type packet_type; 1832 struct buf_packet_q *packet_q; 1833 lsquic_packet_out_t *packet_out; 1834 const struct parse_funcs *pf; 1835 1836 pf = ctl->sc_conn_pub->lconn->cn_pf; 1837 packet_type = send_ctl_lookup_bpt(ctl, stream); 1838 packet_q = &ctl->sc_buffered_packets[packet_type]; 1839 1840 TAILQ_FOREACH_REVERSE(packet_out, &packet_q->bpq_packets, 1841 lsquic_packets_tailq, po_next) 1842 if (0 == lsquic_packet_out_turn_on_fin(packet_out, pf, stream)) 1843 return 0; 1844 1845 TAILQ_FOREACH(packet_out, &ctl->sc_scheduled_packets, po_next) 1846 if (0 == packet_out->po_sent 1847 && 0 == lsquic_packet_out_turn_on_fin(packet_out, pf, stream)) 1848 { 1849 return 0; 1850 } 1851 1852 return -1; 1853} 1854 1855 1856size_t 1857lsquic_send_ctl_mem_used (const struct lsquic_send_ctl *ctl) 1858{ 1859 const lsquic_packet_out_t *packet_out; 1860 unsigned n; 1861 size_t size; 1862 const struct lsquic_packets_tailq queues[] = { 1863 ctl->sc_scheduled_packets, 1864 ctl->sc_unacked_packets, 1865 ctl->sc_lost_packets, 1866 ctl->sc_buffered_packets[0].bpq_packets, 1867 ctl->sc_buffered_packets[1].bpq_packets, 1868 }; 1869 1870 size = sizeof(*ctl); 1871 1872 for (n = 0; n < sizeof(queues) / sizeof(queues[0]); ++n) 1873 TAILQ_FOREACH(packet_out, &queues[n], po_next) 1874 size += lsquic_packet_out_mem_used(packet_out); 1875 1876 return size; 1877} 1878