lsquic_engine.c revision 4051ae3a
1/* Copyright (c) 2017 - 2020 LiteSpeed Technologies Inc. See LICENSE. */ 2/* 3 * lsquic_engine.c - QUIC engine 4 */ 5 6#include <assert.h> 7#include <errno.h> 8#include <inttypes.h> 9#include <limits.h> 10#include <stdint.h> 11#include <stdio.h> 12#include <stdlib.h> 13#include <string.h> 14#include <sys/queue.h> 15#include <time.h> 16#ifndef WIN32 17#include <sys/time.h> 18#include <netinet/in.h> 19#include <sys/types.h> 20#include <sys/stat.h> 21#include <fcntl.h> 22#include <unistd.h> 23#include <netdb.h> 24#endif 25 26#ifndef NDEBUG 27#include <sys/types.h> 28#endif 29 30#if defined(WIN32) || defined(NDEBUG) 31#define CAN_LOSE_PACKETS 0 32#else 33#define CAN_LOSE_PACKETS 1 34#endif 35 36#if CAN_LOSE_PACKETS 37#include <regex.h> /* For code that loses packets */ 38#endif 39 40#if LOG_PACKET_CHECKSUM 41#include <zlib.h> 42#endif 43 44#include <openssl/aead.h> 45 46#include "lsquic.h" 47#include "lsquic_types.h" 48#include "lsquic_int_types.h" 49#include "lsquic_sizes.h" 50#include "lsquic_parse_common.h" 51#include "lsquic_parse.h" 52#include "lsquic_packet_in.h" 53#include "lsquic_packet_out.h" 54#include "lsquic_senhist.h" 55#include "lsquic_rtt.h" 56#include "lsquic_cubic.h" 57#include "lsquic_pacer.h" 58#include "lsquic_bw_sampler.h" 59#include "lsquic_minmax.h" 60#include "lsquic_bbr.h" 61#include "lsquic_send_ctl.h" 62#include "lsquic_set.h" 63#include "lsquic_conn_flow.h" 64#include "lsquic_sfcw.h" 65#include "lsquic_hash.h" 66#include "lsquic_conn.h" 67#include "lsquic_full_conn.h" 68#include "lsquic_util.h" 69#include "lsquic_qtags.h" 70#include "lsquic_enc_sess.h" 71#include "lsquic_mm.h" 72#include "lsquic_engine_public.h" 73#include "lsquic_eng_hist.h" 74#include "lsquic_ev_log.h" 75#include "lsquic_version.h" 76#include "lsquic_pr_queue.h" 77#include "lsquic_mini_conn.h" 78#include "lsquic_mini_conn_ietf.h" 79#include "lsquic_stock_shi.h" 80#include "lsquic_purga.h" 81#include "lsquic_tokgen.h" 82#include "lsquic_attq.h" 83#include "lsquic_min_heap.h" 84#include "lsquic_http1x_if.h" 85#include "lsquic_handshake.h" 86#include "lsquic_crand.h" 87#include "lsquic_ietf.h" 88 89#define LSQUIC_LOGGER_MODULE LSQLM_ENGINE 90#include "lsquic_logger.h" 91 92#ifndef LSQUIC_DEBUG_NEXT_ADV_TICK 93#define LSQUIC_DEBUG_NEXT_ADV_TICK 1 94#endif 95 96#if LSQUIC_DEBUG_NEXT_ADV_TICK 97#include "lsquic_alarmset.h" 98#endif 99 100#define MIN(a, b) ((a) < (b) ? (a) : (b)) 101 102/* The batch of outgoing packets grows and shrinks dynamically */ 103#define MAX_OUT_BATCH_SIZE 1024 104#define MIN_OUT_BATCH_SIZE 4 105#define INITIAL_OUT_BATCH_SIZE 32 106 107struct out_batch 108{ 109 lsquic_conn_t *conns [MAX_OUT_BATCH_SIZE]; 110 struct lsquic_out_spec outs [MAX_OUT_BATCH_SIZE]; 111 unsigned pack_off[MAX_OUT_BATCH_SIZE]; 112 lsquic_packet_out_t *packets[MAX_OUT_BATCH_SIZE * 2]; 113 struct iovec iov [MAX_OUT_BATCH_SIZE * 2]; 114}; 115 116typedef struct lsquic_conn * (*conn_iter_f)(struct lsquic_engine *); 117 118static void 119process_connections (struct lsquic_engine *engine, conn_iter_f iter, 120 lsquic_time_t now); 121 122static void 123engine_incref_conn (lsquic_conn_t *conn, enum lsquic_conn_flags flag); 124 125static lsquic_conn_t * 126engine_decref_conn (lsquic_engine_t *engine, lsquic_conn_t *conn, 127 enum lsquic_conn_flags flag); 128 129static void 130force_close_conn (lsquic_engine_t *engine, lsquic_conn_t *conn); 131 132#if LSQUIC_COUNT_ENGINE_CALLS 133#define ENGINE_CALLS_INCR(e) do { ++(e)->n_engine_calls; } while (0) 134#else 135#define ENGINE_CALLS_INCR(e) 136#endif 137 138/* Nested calls to some LSQUIC functions are not supported. Functions that 139 * iterate over connections cannot be nested. 140 */ 141#define ENGINE_IN(e) do { \ 142 assert(!((e)->pub.enp_flags & ENPUB_PROC)); \ 143 (e)->pub.enp_flags |= ENPUB_PROC; \ 144 ENGINE_CALLS_INCR(e); \ 145} while (0) 146 147#define ENGINE_OUT(e) do { \ 148 assert((e)->pub.enp_flags & ENPUB_PROC); \ 149 (e)->pub.enp_flags &= ~ENPUB_PROC; \ 150} while (0) 151 152/* A connection can be referenced from one of six places: 153 * 154 * 1. A hash is used to find connections in order to dispatch an incoming 155 * packet. Connections can be hashed by CIDs or by address. In the 156 * former case, each connection has one or more mappings in the hash 157 * table. IETF QUIC connections have up to eight (in our implementation) 158 * source CIDs and each of those would have a mapping. In client mode, 159 * depending on QUIC versions and options selected, it is may be 160 * necessary to hash connections by address, in which case incoming 161 * packets are delivered to connections based on the address. 162 * 163 * 2. Outgoing queue. 164 * 165 * 3. Tickable queue 166 * 167 * 4. Advisory Tick Time queue. 168 * 169 * 5. Closing connections queue. This is a transient queue -- it only 170 * exists for the duration of process_connections() function call. 171 * 172 * 6. Ticked connections queue. Another transient queue, similar to (5). 173 * 174 * The idea is to destroy the connection when it is no longer referenced. 175 * For example, a connection tick may return TICK_SEND|TICK_CLOSE. In 176 * that case, the connection is referenced from two places: (2) and (5). 177 * After its packets are sent, it is only referenced in (5), and at the 178 * end of the function call, when it is removed from (5), reference count 179 * goes to zero and the connection is destroyed. If not all packets can 180 * be sent, at the end of the function call, the connection is referenced 181 * by (2) and will only be removed once all outgoing packets have been 182 * sent. 183 */ 184#define CONN_REF_FLAGS (LSCONN_HASHED \ 185 |LSCONN_HAS_OUTGOING \ 186 |LSCONN_TICKABLE \ 187 |LSCONN_TICKED \ 188 |LSCONN_CLOSING \ 189 |LSCONN_ATTQ) 190 191 192 193 194struct cid_update_batch 195{ 196 lsquic_cids_update_f cub_update_cids; 197 void *cub_update_ctx; 198 unsigned cub_count; 199 lsquic_cid_t cub_cids[20]; 200 void *cub_peer_ctxs[20]; 201}; 202 203static void 204cub_init (struct cid_update_batch *, lsquic_cids_update_f, void *); 205 206 207struct lsquic_engine 208{ 209 struct lsquic_engine_public pub; 210 enum { 211 ENG_SERVER = LSENG_SERVER, 212 ENG_HTTP = LSENG_HTTP, 213 ENG_COOLDOWN = (1 << 7), /* Cooldown: no new connections */ 214 ENG_PAST_DEADLINE 215 = (1 << 8), /* Previous call to a processing 216 * function went past time threshold. 217 */ 218 ENG_CONNS_BY_ADDR 219 = (1 << 9), /* Connections are hashed by address */ 220#ifndef NDEBUG 221 ENG_COALESCE = (1 << 24), /* Packet coalescing is enabled */ 222#endif 223#if CAN_LOSE_PACKETS 224 ENG_LOSE_PACKETS= (1 << 25), /* Lose *some* outgoing packets */ 225#endif 226#ifndef NDEBUG 227 ENG_DTOR = (1 << 26), /* Engine destructor */ 228#endif 229 } flags; 230 lsquic_packets_out_f packets_out; 231 void *packets_out_ctx; 232 lsquic_cids_update_f report_new_scids; 233 lsquic_cids_update_f report_live_scids; 234 lsquic_cids_update_f report_old_scids; 235 void *scids_ctx; 236 struct lsquic_hash *conns_hash; 237 struct min_heap conns_tickable; 238 struct min_heap conns_out; 239 struct eng_hist history; 240 unsigned batch_size; 241 struct lsquic_conn *curr_conn; 242 struct pr_queue *pr_queue; 243 struct attq *attq; 244 /* Track time last time a packet was sent to give new connections 245 * priority lower than that of existing connections. 246 */ 247 lsquic_time_t last_sent; 248#if CAN_LOSE_PACKETS 249 regex_t lose_packets_re; 250 const char *lose_packets_str; 251#endif 252 unsigned n_conns; 253 lsquic_time_t deadline; 254 lsquic_time_t resume_sending_at; 255 unsigned mini_conns_count; 256 struct lsquic_purga *purga; 257#if LSQUIC_CONN_STATS 258 struct { 259 unsigned conns; 260 } stats; 261 struct conn_stats conn_stats_sum; 262 FILE *stats_fh; 263#endif 264 struct cid_update_batch new_scids; 265 struct out_batch out_batch; 266#if LSQUIC_COUNT_ENGINE_CALLS 267 unsigned long n_engine_calls; 268#endif 269#if LSQUIC_DEBUG_NEXT_ADV_TICK 270 uintptr_t last_logged_conn; 271 unsigned last_logged_ae_why; 272 int last_tick_diff; 273#endif 274 struct crand crand; 275 EVP_AEAD_CTX retry_aead_ctx[N_IETF_RETRY_VERSIONS]; 276}; 277 278 279void 280lsquic_engine_init_settings (struct lsquic_engine_settings *settings, 281 unsigned flags) 282{ 283 memset(settings, 0, sizeof(*settings)); 284 settings->es_versions = LSQUIC_DF_VERSIONS; 285 if (flags & ENG_SERVER) 286 { 287 settings->es_cfcw = LSQUIC_DF_CFCW_SERVER; 288 settings->es_sfcw = LSQUIC_DF_SFCW_SERVER; 289 settings->es_init_max_data 290 = LSQUIC_DF_INIT_MAX_DATA_SERVER; 291 settings->es_init_max_stream_data_bidi_remote 292 = LSQUIC_DF_INIT_MAX_STREAM_DATA_BIDI_REMOTE_SERVER; 293 settings->es_init_max_stream_data_bidi_local 294 = LSQUIC_DF_INIT_MAX_STREAM_DATA_BIDI_LOCAL_SERVER; 295 settings->es_init_max_stream_data_uni 296 = LSQUIC_DF_INIT_MAX_STREAM_DATA_UNI_SERVER; 297 settings->es_init_max_streams_uni 298 = LSQUIC_DF_INIT_MAX_STREAMS_UNI_SERVER; 299 settings->es_ping_period = 0; 300 settings->es_noprogress_timeout 301 = LSQUIC_DF_NOPROGRESS_TIMEOUT_SERVER; 302 } 303 else 304 { 305 settings->es_cfcw = LSQUIC_DF_CFCW_CLIENT; 306 settings->es_sfcw = LSQUIC_DF_SFCW_CLIENT; 307 settings->es_init_max_data 308 = LSQUIC_DF_INIT_MAX_DATA_CLIENT; 309 settings->es_init_max_stream_data_bidi_remote 310 = LSQUIC_DF_INIT_MAX_STREAM_DATA_BIDI_REMOTE_CLIENT; 311 settings->es_init_max_stream_data_bidi_local 312 = LSQUIC_DF_INIT_MAX_STREAM_DATA_BIDI_LOCAL_CLIENT; 313 settings->es_init_max_stream_data_uni 314 = LSQUIC_DF_INIT_MAX_STREAM_DATA_UNI_CLIENT; 315 settings->es_init_max_streams_uni 316 = LSQUIC_DF_INIT_MAX_STREAMS_UNI_CLIENT; 317 settings->es_ping_period = LSQUIC_DF_PING_PERIOD; 318 settings->es_noprogress_timeout 319 = LSQUIC_DF_NOPROGRESS_TIMEOUT_CLIENT; 320 } 321 settings->es_max_streams_in = LSQUIC_DF_MAX_STREAMS_IN; 322 settings->es_idle_conn_to = LSQUIC_DF_IDLE_CONN_TO; 323 settings->es_idle_timeout = LSQUIC_DF_IDLE_TIMEOUT; 324 settings->es_handshake_to = LSQUIC_DF_HANDSHAKE_TO; 325 settings->es_silent_close = LSQUIC_DF_SILENT_CLOSE; 326 settings->es_max_header_list_size 327 = LSQUIC_DF_MAX_HEADER_LIST_SIZE; 328 settings->es_ua = LSQUIC_DF_UA; 329 settings->es_ecn = LSQUIC_DF_ECN; 330 331 settings->es_pdmd = QTAG_X509; 332 settings->es_aead = QTAG_AESG; 333 settings->es_kexs = QTAG_C255; 334 settings->es_support_push = LSQUIC_DF_SUPPORT_PUSH; 335 settings->es_support_tcid0 = LSQUIC_DF_SUPPORT_TCID0; 336 settings->es_support_nstp = LSQUIC_DF_SUPPORT_NSTP; 337 settings->es_honor_prst = LSQUIC_DF_HONOR_PRST; 338 settings->es_progress_check = LSQUIC_DF_PROGRESS_CHECK; 339 settings->es_rw_once = LSQUIC_DF_RW_ONCE; 340 settings->es_proc_time_thresh= LSQUIC_DF_PROC_TIME_THRESH; 341 settings->es_pace_packets = LSQUIC_DF_PACE_PACKETS; 342 settings->es_clock_granularity = LSQUIC_DF_CLOCK_GRANULARITY; 343 settings->es_max_inchoate = LSQUIC_DF_MAX_INCHOATE; 344 settings->es_send_prst = LSQUIC_DF_SEND_PRST; 345 settings->es_sttl = LSQUIC_DF_STTL; 346 settings->es_init_max_streams_bidi 347 = LSQUIC_DF_INIT_MAX_STREAMS_BIDI; 348 settings->es_scid_len = LSQUIC_DF_SCID_LEN; 349 settings->es_scid_iss_rate = LSQUIC_DF_SCID_ISS_RATE; 350 settings->es_qpack_dec_max_size = LSQUIC_DF_QPACK_DEC_MAX_SIZE; 351 settings->es_qpack_dec_max_blocked = LSQUIC_DF_QPACK_DEC_MAX_BLOCKED; 352 settings->es_qpack_enc_max_size = LSQUIC_DF_QPACK_ENC_MAX_SIZE; 353 settings->es_qpack_enc_max_blocked = LSQUIC_DF_QPACK_ENC_MAX_BLOCKED; 354 settings->es_allow_migration = LSQUIC_DF_ALLOW_MIGRATION; 355 settings->es_ql_bits = LSQUIC_DF_QL_BITS; 356 settings->es_spin = LSQUIC_DF_SPIN; 357 settings->es_delayed_acks = LSQUIC_DF_DELAYED_ACKS; 358 settings->es_timestamps = LSQUIC_DF_TIMESTAMPS; 359} 360 361 362/* Note: if returning an error, err_buf must be valid if non-NULL */ 363int 364lsquic_engine_check_settings (const struct lsquic_engine_settings *settings, 365 unsigned flags, 366 char *err_buf, size_t err_buf_sz) 367{ 368 if (settings->es_cfcw < LSQUIC_MIN_FCW || 369 settings->es_sfcw < LSQUIC_MIN_FCW) 370 { 371 if (err_buf) 372 snprintf(err_buf, err_buf_sz, "%s", 373 "flow control window set too low"); 374 return -1; 375 } 376 if (0 == (settings->es_versions & LSQUIC_SUPPORTED_VERSIONS)) 377 { 378 if (err_buf) 379 snprintf(err_buf, err_buf_sz, "%s", 380 "No supported QUIC versions specified"); 381 return -1; 382 } 383 if (settings->es_versions & ~LSQUIC_SUPPORTED_VERSIONS) 384 { 385 if (err_buf) 386 snprintf(err_buf, err_buf_sz, "%s", 387 "one or more unsupported QUIC version is specified"); 388 return -1; 389 } 390 if (flags & ENG_SERVER) 391 { 392 if (settings->es_handshake_to > 393 MAX_MINI_CONN_LIFESPAN_IN_USEC) 394 { 395 if (err_buf) 396 snprintf(err_buf, err_buf_sz, "handshake timeout %lu" 397 " usec is too large. The maximum for server is %u usec", 398 settings->es_handshake_to, MAX_MINI_CONN_LIFESPAN_IN_USEC); 399 return -1; 400 } 401 } 402 if (settings->es_idle_timeout > 600) 403 { 404 if (err_buf) 405 snprintf(err_buf, err_buf_sz, "%s", 406 "The maximum value of idle timeout is 600 seconds"); 407 return -1; 408 } 409 if (settings->es_scid_len > MAX_CID_LEN) 410 { 411 if (err_buf) 412 snprintf(err_buf, err_buf_sz, "Source connection ID cannot be %u " 413 "bytes long; it must be between 0 and %u.", 414 settings->es_scid_len, MAX_CID_LEN); 415 return -1; 416 } 417 418 if (settings->es_cc_algo > 2) 419 { 420 if (err_buf) 421 snprintf(err_buf, err_buf_sz, "Invalid congestion control " 422 "algorithm value %u", settings->es_cc_algo); 423 return -1; 424 } 425 426 if (!(settings->es_ql_bits >= 0 && settings->es_ql_bits <= 2)) 427 { 428 if (err_buf) 429 snprintf(err_buf, err_buf_sz, "Invalid QL bits value %d ", 430 settings->es_ql_bits); 431 return -1; 432 } 433 434 if (!(settings->es_spin == 0 || settings->es_spin == 1)) 435 { 436 if (err_buf) 437 snprintf(err_buf, err_buf_sz, "Invalid spin value %d", 438 settings->es_spin); 439 return -1; 440 } 441 442 return 0; 443} 444 445 446static void 447free_packet (void *ctx, void *conn_ctx, void *packet_data, char is_ipv6) 448{ 449 free(packet_data); 450} 451 452 453static void * 454malloc_buf (void *ctx, void *conn_ctx, unsigned short size, char is_ipv6) 455{ 456 return malloc(size); 457} 458 459 460static const struct lsquic_packout_mem_if stock_pmi = 461{ 462 malloc_buf, free_packet, free_packet, 463}; 464 465 466static int 467hash_conns_by_addr (const struct lsquic_engine *engine) 468{ 469 if (engine->flags & ENG_SERVER) 470 return 0; 471 if (engine->pub.enp_settings.es_versions & LSQUIC_FORCED_TCID0_VERSIONS) 472 return 1; 473 if ((engine->pub.enp_settings.es_versions & LSQUIC_GQUIC_HEADER_VERSIONS) 474 && engine->pub.enp_settings.es_support_tcid0) 475 return 1; 476 if (engine->pub.enp_settings.es_scid_len == 0) 477 return 1; 478 return 0; 479} 480 481 482lsquic_engine_t * 483lsquic_engine_new (unsigned flags, 484 const struct lsquic_engine_api *api) 485{ 486 lsquic_engine_t *engine; 487 size_t alpn_len; 488 unsigned i; 489 char err_buf[100]; 490 491 if (!api->ea_packets_out) 492 { 493 LSQ_ERROR("packets_out callback is not specified"); 494 return NULL; 495 } 496 497 if (!api->ea_stream_if) 498 { 499 LSQ_ERROR("stream interface is not specified"); 500 return NULL; 501 } 502 503 if (!(flags & LSENG_HTTP) && api->ea_alpn) 504 { 505 alpn_len = strlen(api->ea_alpn); 506 if (alpn_len < 1 || alpn_len > 255) 507 { 508 LSQ_ERROR("ALPN string length invalid: %zd bytes", alpn_len); 509 return NULL; 510 } 511 } 512 else 513 alpn_len = 0; 514 515 if (api->ea_settings && 516 0 != lsquic_engine_check_settings(api->ea_settings, flags, 517 err_buf, sizeof(err_buf))) 518 { 519 LSQ_ERROR("cannot create engine: %s", err_buf); 520 return NULL; 521 } 522 523 engine = calloc(1, sizeof(*engine)); 524 if (!engine) 525 return NULL; 526 if (0 != lsquic_mm_init(&engine->pub.enp_mm)) 527 { 528 free(engine); 529 return NULL; 530 } 531 if (api->ea_settings) 532 engine->pub.enp_settings = *api->ea_settings; 533 else 534 lsquic_engine_init_settings(&engine->pub.enp_settings, flags); 535 int tag_buf_len; 536 tag_buf_len = lsquic_gen_ver_tags(engine->pub.enp_ver_tags_buf, 537 sizeof(engine->pub.enp_ver_tags_buf), 538 engine->pub.enp_settings.es_versions); 539 if (tag_buf_len <= 0) 540 { 541 LSQ_ERROR("cannot generate version tags buffer"); 542 free(engine); 543 return NULL; 544 } 545 engine->pub.enp_ver_tags_len = tag_buf_len; 546 engine->pub.enp_flags = ENPUB_CAN_SEND; 547 engine->pub.enp_stream_if = api->ea_stream_if; 548 engine->pub.enp_stream_if_ctx = api->ea_stream_if_ctx; 549 550 engine->flags = flags; 551#ifndef NDEBUG 552 engine->flags |= ENG_COALESCE; 553#endif 554 engine->packets_out = api->ea_packets_out; 555 engine->packets_out_ctx = api->ea_packets_out_ctx; 556 engine->report_new_scids = api->ea_new_scids; 557 engine->report_live_scids = api->ea_live_scids; 558 engine->report_old_scids = api->ea_old_scids; 559 engine->scids_ctx = api->ea_cids_update_ctx; 560 cub_init(&engine->new_scids, engine->report_new_scids, engine->scids_ctx); 561 engine->pub.enp_lookup_cert = api->ea_lookup_cert; 562 engine->pub.enp_cert_lu_ctx = api->ea_cert_lu_ctx; 563 engine->pub.enp_get_ssl_ctx = api->ea_get_ssl_ctx; 564 if (api->ea_shi) 565 { 566 engine->pub.enp_shi = api->ea_shi; 567 engine->pub.enp_shi_ctx = api->ea_shi_ctx; 568 } 569 else 570 { 571 engine->pub.enp_shi = &stock_shi; 572 engine->pub.enp_shi_ctx = lsquic_stock_shared_hash_new(); 573 if (!engine->pub.enp_shi_ctx) 574 { 575 free(engine); 576 return NULL; 577 } 578 } 579 if (api->ea_hsi_if) 580 { 581 engine->pub.enp_hsi_if = api->ea_hsi_if; 582 engine->pub.enp_hsi_ctx = api->ea_hsi_ctx; 583 } 584 else 585 { 586 engine->pub.enp_hsi_if = lsquic_http1x_if; 587 engine->pub.enp_hsi_ctx = NULL; 588 } 589 if (api->ea_pmi) 590 { 591 engine->pub.enp_pmi = api->ea_pmi; 592 engine->pub.enp_pmi_ctx = api->ea_pmi_ctx; 593 } 594 else 595 { 596 engine->pub.enp_pmi = &stock_pmi; 597 engine->pub.enp_pmi_ctx = NULL; 598 } 599 engine->pub.enp_verify_cert = api->ea_verify_cert; 600 engine->pub.enp_verify_ctx = api->ea_verify_ctx; 601 engine->pub.enp_kli = api->ea_keylog_if; 602 engine->pub.enp_kli_ctx = api->ea_keylog_ctx; 603 engine->pub.enp_engine = engine; 604 if (hash_conns_by_addr(engine)) 605 engine->flags |= ENG_CONNS_BY_ADDR; 606 engine->conns_hash = lsquic_hash_create(); 607 engine->pub.enp_tokgen = lsquic_tg_new(&engine->pub); 608 if (!engine->pub.enp_tokgen) 609 return NULL; 610 engine->pub.enp_crand = &engine->crand; 611 if (engine->pub.enp_settings.es_noprogress_timeout) 612 engine->pub.enp_noprog_timeout 613 = engine->pub.enp_settings.es_noprogress_timeout * 1000000; 614 if (flags & ENG_SERVER) 615 { 616 engine->pr_queue = lsquic_prq_create( 617 10000 /* TODO: make configurable */, MAX_OUT_BATCH_SIZE, 618 &engine->pub); 619 if (!engine->pr_queue) 620 { 621 lsquic_tg_destroy(engine->pub.enp_tokgen); 622 return NULL; 623 } 624 engine->purga = lsquic_purga_new(30 * 1000 * 1000, 625 engine->report_old_scids, engine->scids_ctx); 626 if (!engine->purga) 627 { 628 lsquic_tg_destroy(engine->pub.enp_tokgen); 629 lsquic_prq_destroy(engine->pr_queue); 630 return NULL; 631 } 632 } 633 engine->attq = lsquic_attq_create(); 634 eng_hist_init(&engine->history); 635 engine->batch_size = INITIAL_OUT_BATCH_SIZE; 636 if (engine->pub.enp_settings.es_honor_prst) 637 { 638 engine->pub.enp_srst_hash = lsquic_hash_create(); 639 if (!engine->pub.enp_srst_hash) 640 { 641 lsquic_engine_destroy(engine); 642 return NULL; 643 } 644 } 645 646 if (alpn_len) 647 { 648 engine->pub.enp_alpn = malloc(alpn_len + 1); 649 if (!engine->pub.enp_alpn) 650 { 651 lsquic_engine_destroy(engine); 652 return NULL; 653 } 654 engine->pub.enp_alpn[0] = alpn_len; 655 memcpy(engine->pub.enp_alpn + 1, api->ea_alpn, alpn_len); 656 } 657 658 if (flags & LSENG_HTTP) 659 engine->pub.enp_flags |= ENPUB_HTTP; 660 661#ifndef NDEBUG 662 { 663 const char *env; 664 env = getenv("LSQUIC_LOSE_PACKETS_RE"); 665#if CAN_LOSE_PACKETS 666 if (env) 667 { 668 if (0 != regcomp(&engine->lose_packets_re, env, 669 REG_EXTENDED|REG_NOSUB)) 670 { 671 LSQ_ERROR("could not compile lost packet regex `%s'", env); 672 return NULL; 673 } 674 engine->flags |= ENG_LOSE_PACKETS; 675 engine->lose_packets_str = env; 676 LSQ_WARN("will lose packets that match the following regex: %s", 677 env); 678 } 679#endif 680 env = getenv("LSQUIC_COALESCE"); 681 if (env) 682 { 683 engine->flags &= ~ENG_COALESCE; 684 if (atoi(env)) 685 { 686 engine->flags |= ENG_COALESCE; 687 LSQ_NOTICE("will coalesce packets"); 688 } 689 else 690 LSQ_NOTICE("will not coalesce packets"); 691 } 692 } 693#endif 694#if LSQUIC_CONN_STATS 695 engine->stats_fh = api->ea_stats_fh; 696#endif 697 for (i = 0; i < sizeof(engine->retry_aead_ctx) 698 / sizeof(engine->retry_aead_ctx[0]); ++i) 699 if (1 != EVP_AEAD_CTX_init(&engine->retry_aead_ctx[i], 700 EVP_aead_aes_128_gcm(), lsquic_retry_key_buf[i], 701 IETF_RETRY_KEY_SZ, 16, NULL)) 702 { 703 LSQ_ERROR("could not initialize retry AEAD ctx #%u", i); 704 lsquic_engine_destroy(engine); 705 return NULL; 706 } 707 engine->pub.enp_retry_aead_ctx = engine->retry_aead_ctx; 708 709 LSQ_INFO("instantiated engine"); 710 return engine; 711} 712 713 714#if LOG_PACKET_CHECKSUM 715static void 716log_packet_checksum (const lsquic_cid_t *cid, const char *direction, 717 const unsigned char *buf, size_t bufsz) 718{ 719 EV_LOG_CONN_EVENT(cid, "packet %s checksum: %08X", direction, 720 (uint32_t) crc32(0, buf, bufsz)); 721} 722 723 724#endif 725 726 727static void 728grow_batch_size (struct lsquic_engine *engine) 729{ 730 engine->batch_size <<= engine->batch_size < MAX_OUT_BATCH_SIZE; 731} 732 733 734static void 735shrink_batch_size (struct lsquic_engine *engine) 736{ 737 engine->batch_size >>= engine->batch_size > MIN_OUT_BATCH_SIZE; 738} 739 740 741struct cce_cid_iter 742{ 743 const struct lsquic_conn *conn; 744 unsigned todo, n; 745}; 746 747 748static struct conn_cid_elem * 749cce_iter_next (struct cce_cid_iter *citer) 750{ 751 struct conn_cid_elem *cce; 752 753 while (citer->todo) 754 if (citer->todo & (1 << citer->n)) 755 { 756 citer->todo &= ~(1 << citer->n); 757 cce = &citer->conn->cn_cces[ citer->n++ ]; 758 if (!(cce->cce_flags & CCE_PORT)) 759 return cce; 760 } 761 else 762 ++citer->n; 763 764 return NULL; 765} 766 767 768static struct conn_cid_elem * 769cce_iter_first (struct cce_cid_iter *citer, const struct lsquic_conn *conn) 770{ 771 citer->conn = conn; 772 citer->todo = conn->cn_cces_mask; 773 citer->n = 0; 774 return cce_iter_next(citer); 775} 776 777 778#if LSQUIC_CONN_STATS 779void 780update_stats_sum (struct lsquic_engine *engine, struct lsquic_conn *conn) 781{ 782 unsigned long *const dst = (unsigned long *) &engine->conn_stats_sum; 783 const unsigned long *src; 784 const struct conn_stats *stats; 785 unsigned i; 786 787 if (conn->cn_if->ci_get_stats && (stats = conn->cn_if->ci_get_stats(conn))) 788 { 789 ++engine->stats.conns; 790 src = (unsigned long *) stats; 791 for (i = 0; i < sizeof(*stats) / sizeof(unsigned long); ++i) 792 dst[i] += src[i]; 793 } 794} 795 796 797#endif 798 799 800/* Wrapper to make sure important things occur before the connection is 801 * really destroyed. 802 */ 803static void 804destroy_conn (struct lsquic_engine *engine, struct lsquic_conn *conn, 805 lsquic_time_t now) 806{ 807 struct cce_cid_iter citer; 808 const struct conn_cid_elem *cce; 809 lsquic_time_t drain_time; 810 struct purga_el *puel; 811 812 engine->mini_conns_count -= !!(conn->cn_flags & LSCONN_MINI); 813 if (engine->purga 814 /* Blacklist all CIDs except for promoted mini connections */ 815 && (conn->cn_flags & (LSCONN_MINI|LSCONN_PROMOTED)) 816 != (LSCONN_MINI|LSCONN_PROMOTED)) 817 { 818 if (!(conn->cn_flags & LSCONN_IMMED_CLOSE) 819 && conn->cn_if->ci_drain_time && 820 (drain_time = conn->cn_if->ci_drain_time(conn), drain_time)) 821 { 822 for (cce = cce_iter_first(&citer, conn); cce; 823 cce = cce_iter_next(&citer)) 824 { 825 puel = lsquic_purga_add(engine->purga, &cce->cce_cid, 826 lsquic_conn_get_peer_ctx(conn, NULL), 827 PUTY_CONN_DRAIN, now); 828 if (puel) 829 puel->puel_time = now + drain_time; 830 } 831 } 832 else 833 { 834 for (cce = cce_iter_first(&citer, conn); cce; 835 cce = cce_iter_next(&citer)) 836 { 837 puel = lsquic_purga_add(engine->purga, &cce->cce_cid, 838 lsquic_conn_get_peer_ctx(conn, NULL), 839 PUTY_CONN_DELETED, now); 840 if (puel) 841 { 842 puel->puel_time = now; 843 puel->puel_count = 0; 844 } 845 } 846 } 847 } 848#if LSQUIC_CONN_STATS 849 update_stats_sum(engine, conn); 850#endif 851 --engine->n_conns; 852 conn->cn_flags |= LSCONN_NEVER_TICKABLE; 853 conn->cn_if->ci_destroy(conn); 854} 855 856 857static int 858maybe_grow_conn_heaps (struct lsquic_engine *engine) 859{ 860 struct min_heap_elem *els; 861 unsigned count; 862 863 if (engine->n_conns < lsquic_mh_nalloc(&engine->conns_tickable)) 864 return 0; /* Nothing to do */ 865 866 if (lsquic_mh_nalloc(&engine->conns_tickable)) 867 count = lsquic_mh_nalloc(&engine->conns_tickable) * 2 * 2; 868 else 869 count = 8; 870 871 els = malloc(sizeof(els[0]) * count); 872 if (!els) 873 { 874 LSQ_ERROR("%s: malloc failed", __func__); 875 return -1; 876 } 877 878 LSQ_DEBUG("grew heaps to %u elements", count / 2); 879 memcpy(&els[0], engine->conns_tickable.mh_elems, 880 sizeof(els[0]) * lsquic_mh_count(&engine->conns_tickable)); 881 memcpy(&els[count / 2], engine->conns_out.mh_elems, 882 sizeof(els[0]) * lsquic_mh_count(&engine->conns_out)); 883 free(engine->conns_tickable.mh_elems); 884 engine->conns_tickable.mh_elems = els; 885 engine->conns_out.mh_elems = &els[count / 2]; 886 engine->conns_tickable.mh_nalloc = count / 2; 887 engine->conns_out.mh_nalloc = count / 2; 888 return 0; 889} 890 891 892static void 893remove_cces_from_hash (struct lsquic_hash *hash, struct lsquic_conn *conn, 894 unsigned todo) 895{ 896 unsigned n; 897 898 for (n = 0; todo; todo &= ~(1 << n++)) 899 if ((todo & (1 << n)) && 900 (conn->cn_cces[n].cce_hash_el.qhe_flags & QHE_HASHED)) 901 lsquic_hash_erase(hash, &conn->cn_cces[n].cce_hash_el); 902} 903 904 905static void 906remove_all_cces_from_hash (struct lsquic_hash *hash, struct lsquic_conn *conn) 907{ 908 remove_cces_from_hash(hash, conn, conn->cn_cces_mask); 909} 910 911 912static void 913cub_add (struct cid_update_batch *cub, const lsquic_cid_t *cid, void *peer_ctx); 914 915 916static int 917insert_conn_into_hash (struct lsquic_engine *engine, struct lsquic_conn *conn, 918 void *peer_ctx) 919{ 920 struct conn_cid_elem *cce; 921 unsigned todo, done, n; 922 923 for (todo = conn->cn_cces_mask, done = 0, n = 0; todo; todo &= ~(1 << n++)) 924 if (todo & (1 << n)) 925 { 926 cce = &conn->cn_cces[n]; 927 assert(!(cce->cce_hash_el.qhe_flags & QHE_HASHED)); 928 if (lsquic_hash_insert(engine->conns_hash, cce->cce_cid.idbuf, 929 cce->cce_cid.len, conn, &cce->cce_hash_el)) 930 done |= 1 << n; 931 else 932 goto err; 933 if ((engine->flags & ENG_SERVER) && 0 == (cce->cce_flags & CCE_REG)) 934 { 935 cce->cce_flags |= CCE_REG; 936 cub_add(&engine->new_scids, &cce->cce_cid, peer_ctx); 937 } 938 } 939 940 return 0; 941 942 err: 943 remove_cces_from_hash(engine->conns_hash, conn, done); 944 return -1; 945} 946 947 948static lsquic_conn_t * 949new_full_conn_server (lsquic_engine_t *engine, lsquic_conn_t *mini_conn, 950 lsquic_time_t now) 951{ 952 const lsquic_cid_t *cid; 953 server_conn_ctor_f ctor; 954 lsquic_conn_t *conn; 955 unsigned flags; 956 if (0 != maybe_grow_conn_heaps(engine)) 957 return NULL; 958 flags = engine->flags & (ENG_SERVER|ENG_HTTP); 959 960 if (mini_conn->cn_flags & LSCONN_IETF) 961 ctor = lsquic_ietf_full_conn_server_new; 962 else 963 ctor = lsquic_gquic_full_conn_server_new; 964 965 conn = ctor(&engine->pub, flags, mini_conn); 966 if (!conn) 967 { 968 /* Otherwise, full_conn_server_new prints its own warnings */ 969 if (ENOMEM == errno) 970 { 971 cid = lsquic_conn_log_cid(mini_conn); 972 LSQ_WARNC("could not allocate full connection for %"CID_FMT": %s", 973 CID_BITS(cid), strerror(errno)); 974 } 975 return NULL; 976 } 977 ++engine->n_conns; 978 if (0 != insert_conn_into_hash(engine, conn, lsquic_conn_get_peer_ctx(conn, NULL))) 979 { 980 cid = lsquic_conn_log_cid(conn); 981 LSQ_WARNC("cannot add connection %"CID_FMT" to hash - destroy", 982 CID_BITS(cid)); 983 destroy_conn(engine, conn, now); 984 return NULL; 985 } 986 assert(!(conn->cn_flags & CONN_REF_FLAGS)); 987 conn->cn_flags |= LSCONN_HASHED; 988 return conn; 989} 990 991 992static enum 993{ 994 VER_NOT_SPECIFIED, 995 VER_SUPPORTED, 996 VER_UNSUPPORTED, 997} 998 999 1000version_matches (lsquic_engine_t *engine, const lsquic_packet_in_t *packet_in, 1001 enum lsquic_version *pversion) 1002{ 1003 lsquic_ver_tag_t ver_tag; 1004 enum lsquic_version version; 1005 1006 if (!packet_in->pi_quic_ver) 1007 { 1008 LSQ_DEBUG("packet does not specify version"); 1009 return VER_NOT_SPECIFIED; 1010 } 1011 1012 memcpy(&ver_tag, packet_in->pi_data + packet_in->pi_quic_ver, sizeof(ver_tag)); 1013 version = lsquic_tag2ver(ver_tag); 1014 if (version < N_LSQVER) 1015 { 1016 if (engine->pub.enp_settings.es_versions & (1 << version)) 1017 { 1018 LSQ_DEBUG("client-supplied version %s is supported", 1019 lsquic_ver2str[version]); 1020 *pversion = version; 1021 return VER_SUPPORTED; 1022 } 1023 else 1024 LSQ_DEBUG("client-supplied version %s is not supported", 1025 lsquic_ver2str[version]); 1026 } 1027 else 1028 LSQ_DEBUG("client-supplied version tag 0x%08X is not recognized", 1029 ver_tag); 1030 1031 return VER_UNSUPPORTED; 1032} 1033 1034 1035static void 1036schedule_req_packet (struct lsquic_engine *engine, enum packet_req_type type, 1037 const struct lsquic_packet_in *packet_in, const struct sockaddr *sa_local, 1038 const struct sockaddr *sa_peer, void *peer_ctx) 1039{ 1040 assert(engine->pr_queue); 1041 if (0 == lsquic_prq_new_req(engine->pr_queue, type, packet_in, peer_ctx, 1042 sa_local, sa_peer)) 1043 LSQ_DEBUGC("scheduled %s packet for cid %"CID_FMT, 1044 lsquic_preqt2str[type], CID_BITS(&packet_in->pi_conn_id)); 1045 else 1046 LSQ_DEBUG("cannot schedule %s packet", lsquic_preqt2str[type]); 1047} 1048 1049 1050static unsigned short 1051sa2port (const struct sockaddr *sa) 1052{ 1053 if (sa->sa_family == AF_INET) 1054 { 1055 struct sockaddr_in *const sa4 = (void *) sa; 1056 return sa4->sin_port; 1057 } 1058 else 1059 { 1060 struct sockaddr_in6 *const sa6 = (void *) sa; 1061 return sa6->sin6_port; 1062 } 1063} 1064 1065 1066static struct lsquic_hash_elem * 1067find_conn_by_addr (struct lsquic_hash *hash, const struct sockaddr *sa) 1068{ 1069 unsigned short port; 1070 1071 port = sa2port(sa); 1072 return lsquic_hash_find(hash, &port, sizeof(port)); 1073} 1074 1075 1076static lsquic_conn_t * 1077find_conn (lsquic_engine_t *engine, lsquic_packet_in_t *packet_in, 1078 struct packin_parse_state *ppstate, const struct sockaddr *sa_local) 1079{ 1080 struct lsquic_hash_elem *el; 1081 lsquic_conn_t *conn; 1082 1083 if (engine->flags & ENG_CONNS_BY_ADDR) 1084 el = find_conn_by_addr(engine->conns_hash, sa_local); 1085 else if (packet_in->pi_flags & PI_CONN_ID) 1086 el = lsquic_hash_find(engine->conns_hash, 1087 packet_in->pi_conn_id.idbuf, packet_in->pi_conn_id.len); 1088 else 1089 { 1090 LSQ_DEBUG("packet header does not have connection ID: discarding"); 1091 return NULL; 1092 } 1093 1094 if (!el) 1095 return NULL; 1096 1097 conn = lsquic_hashelem_getdata(el); 1098 conn->cn_pf->pf_parse_packet_in_finish(packet_in, ppstate); 1099 if ((engine->flags & ENG_CONNS_BY_ADDR) 1100 && !(conn->cn_flags & LSCONN_IETF) 1101 && (packet_in->pi_flags & PI_CONN_ID) 1102 && !LSQUIC_CIDS_EQ(CN_SCID(conn), &packet_in->pi_conn_id)) 1103 { 1104 LSQ_DEBUG("connection IDs do not match"); 1105 return NULL; 1106 } 1107 1108 return conn; 1109} 1110 1111 1112static lsquic_conn_t * 1113find_or_create_conn (lsquic_engine_t *engine, lsquic_packet_in_t *packet_in, 1114 struct packin_parse_state *ppstate, const struct sockaddr *sa_local, 1115 const struct sockaddr *sa_peer, void *peer_ctx, size_t packet_in_size) 1116{ 1117 struct lsquic_hash_elem *el; 1118 struct purga_el *puel; 1119 lsquic_conn_t *conn; 1120 1121 if (!(packet_in->pi_flags & PI_CONN_ID)) 1122 { 1123 LSQ_DEBUG("packet header does not have connection ID: discarding"); 1124 return NULL; 1125 } 1126 el = lsquic_hash_find(engine->conns_hash, 1127 packet_in->pi_conn_id.idbuf, packet_in->pi_conn_id.len); 1128 1129 if (el) 1130 { 1131 conn = lsquic_hashelem_getdata(el); 1132 conn->cn_pf->pf_parse_packet_in_finish(packet_in, ppstate); 1133 return conn; 1134 } 1135 1136 if (engine->flags & ENG_COOLDOWN) 1137 { /* Do not create incoming connections during cooldown */ 1138 LSQ_DEBUG("dropping inbound packet for unknown connection (cooldown)"); 1139 return NULL; 1140 } 1141 1142 if (engine->mini_conns_count >= engine->pub.enp_settings.es_max_inchoate) 1143 { 1144 LSQ_DEBUG("reached limit of %u inchoate connections", 1145 engine->pub.enp_settings.es_max_inchoate); 1146 return NULL; 1147 } 1148 1149 1150 if (engine->purga 1151 && (puel = lsquic_purga_contains(engine->purga, 1152 &packet_in->pi_conn_id), puel)) 1153 { 1154 switch (puel->puel_type) 1155 { 1156 case PUTY_CID_RETIRED: 1157 LSQ_DEBUGC("CID %"CID_FMT" was retired, ignore packet", 1158 CID_BITS(&packet_in->pi_conn_id)); 1159 return NULL; 1160 case PUTY_CONN_DRAIN: 1161 LSQ_DEBUG("drain till: %"PRIu64"; now: %"PRIu64, 1162 puel->puel_time, packet_in->pi_received); 1163 if (puel->puel_time > packet_in->pi_received) 1164 { 1165 LSQ_DEBUGC("CID %"CID_FMT" is in drain state, ignore packet", 1166 CID_BITS(&packet_in->pi_conn_id)); 1167 return NULL; 1168 } 1169 LSQ_DEBUGC("CID %"CID_FMT" goes from drain state to deleted", 1170 CID_BITS(&packet_in->pi_conn_id)); 1171 puel->puel_type = PUTY_CONN_DELETED; 1172 puel->puel_count = 0; 1173 puel->puel_time = 0; 1174 /* fall-through */ 1175 case PUTY_CONN_DELETED: 1176 LSQ_DEBUGC("Connection with CID %"CID_FMT" was deleted", 1177 CID_BITS(&packet_in->pi_conn_id)); 1178 if (puel->puel_time < packet_in->pi_received) 1179 { 1180 puel->puel_time = packet_in->pi_received 1181 /* Exponential back-off */ 1182 + 1000000ull * (1 << MIN(puel->puel_count, 4)); 1183 ++puel->puel_count; 1184 goto maybe_send_prst; 1185 } 1186 return NULL; 1187 default: 1188 assert(0); 1189 return NULL; 1190 } 1191 } 1192 1193 if (engine->pub.enp_settings.es_send_prst 1194 && !(packet_in->pi_flags & PI_GQUIC) 1195 && HETY_NOT_SET == packet_in->pi_header_type) 1196 goto maybe_send_prst; 1197 1198 if (0 != maybe_grow_conn_heaps(engine)) 1199 return NULL; 1200 1201 const struct parse_funcs *pf; 1202 enum lsquic_version version; 1203 switch (version_matches(engine, packet_in, &version)) 1204 { 1205 case VER_UNSUPPORTED: 1206 if (engine->flags & ENG_SERVER) 1207 schedule_req_packet(engine, PACKET_REQ_VERNEG, packet_in, 1208 sa_local, sa_peer, peer_ctx); 1209 return NULL; 1210 case VER_NOT_SPECIFIED: 1211 maybe_send_prst: 1212 if ((engine->flags & ENG_SERVER) && 1213 engine->pub.enp_settings.es_send_prst) 1214 schedule_req_packet(engine, PACKET_REQ_PUBRES, packet_in, 1215 sa_local, sa_peer, peer_ctx); 1216 return NULL; 1217 case VER_SUPPORTED: 1218 pf = select_pf_by_ver(version); 1219 pf->pf_parse_packet_in_finish(packet_in, ppstate); 1220 break; 1221 } 1222 1223 1224 if ((1 << version) & LSQUIC_IETF_VERSIONS) 1225 { 1226 conn = lsquic_mini_conn_ietf_new(&engine->pub, packet_in, version, 1227 sa_peer->sa_family == AF_INET, NULL, packet_in_size); 1228 } 1229 else 1230 { 1231 conn = lsquic_mini_conn_new(&engine->pub, packet_in, version); 1232 } 1233 if (!conn) 1234 return NULL; 1235 ++engine->mini_conns_count; 1236 ++engine->n_conns; 1237 if (0 != insert_conn_into_hash(engine, conn, peer_ctx)) 1238 { 1239 const lsquic_cid_t *cid = lsquic_conn_log_cid(conn); 1240 LSQ_WARNC("cannot add connection %"CID_FMT" to hash - destroy", 1241 CID_BITS(cid)); 1242 destroy_conn(engine, conn, packet_in->pi_received); 1243 return NULL; 1244 } 1245 assert(!(conn->cn_flags & CONN_REF_FLAGS)); 1246 conn->cn_flags |= LSCONN_HASHED; 1247 eng_hist_inc(&engine->history, packet_in->pi_received, sl_new_mini_conns); 1248 conn->cn_last_sent = engine->last_sent; 1249 return conn; 1250} 1251 1252 1253lsquic_conn_t * 1254lsquic_engine_find_conn (const struct lsquic_engine_public *engine, 1255 const lsquic_cid_t *cid) 1256{ 1257 struct lsquic_hash_elem *el; 1258 lsquic_conn_t *conn = NULL; 1259 el = lsquic_hash_find(engine->enp_engine->conns_hash, cid->idbuf, cid->len); 1260 1261 if (el) 1262 conn = lsquic_hashelem_getdata(el); 1263 return conn; 1264} 1265 1266 1267#if !defined(NDEBUG) && __GNUC__ 1268__attribute__((weak)) 1269#endif 1270void 1271lsquic_engine_add_conn_to_tickable (struct lsquic_engine_public *enpub, 1272 lsquic_conn_t *conn) 1273{ 1274 if (0 == (enpub->enp_flags & ENPUB_PROC) && 1275 0 == (conn->cn_flags & (LSCONN_TICKABLE|LSCONN_NEVER_TICKABLE))) 1276 { 1277 lsquic_engine_t *engine = (lsquic_engine_t *) enpub; 1278 lsquic_mh_insert(&engine->conns_tickable, conn, conn->cn_last_ticked); 1279 engine_incref_conn(conn, LSCONN_TICKABLE); 1280 } 1281} 1282 1283 1284void 1285lsquic_engine_add_conn_to_attq (struct lsquic_engine_public *enpub, 1286 lsquic_conn_t *conn, lsquic_time_t tick_time, unsigned why) 1287{ 1288 lsquic_engine_t *const engine = (lsquic_engine_t *) enpub; 1289 if (conn->cn_flags & LSCONN_TICKABLE) 1290 { 1291 /* Optimization: no need to add the connection to the Advisory Tick 1292 * Time Queue: it is about to be ticked, after which it its next tick 1293 * time may be queried again. 1294 */; 1295 } 1296 else if (conn->cn_flags & LSCONN_ATTQ) 1297 { 1298 if (lsquic_conn_adv_time(conn) != tick_time) 1299 { 1300 lsquic_attq_remove(engine->attq, conn); 1301 if (0 != lsquic_attq_add(engine->attq, conn, tick_time, why)) 1302 engine_decref_conn(engine, conn, LSCONN_ATTQ); 1303 } 1304 } 1305 else if (0 == lsquic_attq_add(engine->attq, conn, tick_time, why)) 1306 engine_incref_conn(conn, LSCONN_ATTQ); 1307} 1308 1309 1310static struct lsquic_conn * 1311find_conn_by_srst (struct lsquic_engine *engine, 1312 const struct lsquic_packet_in *packet_in) 1313{ 1314 struct lsquic_hash_elem *el; 1315 struct lsquic_conn *conn; 1316 1317 if (packet_in->pi_data_sz < IQUIC_MIN_SRST_SIZE 1318 || (packet_in->pi_data[0] & 0xC0) != 0x40) 1319 return NULL; 1320 1321 el = lsquic_hash_find(engine->pub.enp_srst_hash, 1322 packet_in->pi_data + packet_in->pi_data_sz - IQUIC_SRESET_TOKEN_SZ, 1323 IQUIC_SRESET_TOKEN_SZ); 1324 if (!el) 1325 return NULL; 1326 1327 conn = lsquic_hashelem_getdata(el); 1328 return conn; 1329} 1330 1331 1332/* Return 0 if packet is being processed by a real connection (mini or full), 1333 * otherwise return 1. 1334 */ 1335static int 1336process_packet_in (lsquic_engine_t *engine, lsquic_packet_in_t *packet_in, 1337 struct packin_parse_state *ppstate, const struct sockaddr *sa_local, 1338 const struct sockaddr *sa_peer, void *peer_ctx, size_t packet_in_size) 1339{ 1340 lsquic_conn_t *conn; 1341 const unsigned char *packet_in_data; 1342 1343 if (lsquic_packet_in_is_gquic_prst(packet_in) 1344 && !engine->pub.enp_settings.es_honor_prst) 1345 { 1346 lsquic_mm_put_packet_in(&engine->pub.enp_mm, packet_in); 1347 LSQ_DEBUG("public reset packet: discarding"); 1348 return 1; 1349 } 1350 1351 if (engine->flags & ENG_SERVER) 1352 { 1353 conn = find_or_create_conn(engine, packet_in, ppstate, sa_local, 1354 sa_peer, peer_ctx, packet_in_size); 1355 if (!engine->curr_conn) 1356 engine->curr_conn = conn; 1357 } 1358 else 1359 conn = find_conn(engine, packet_in, ppstate, sa_local); 1360 1361 if (!conn) 1362 { 1363 if (engine->pub.enp_settings.es_honor_prst 1364 && packet_in_size == packet_in->pi_data_sz /* Full UDP packet */ 1365 && !(packet_in->pi_flags & PI_GQUIC) 1366 && engine->pub.enp_srst_hash 1367 && (conn = find_conn_by_srst(engine, packet_in))) 1368 { 1369 LSQ_DEBUGC("got stateless reset for connection %"CID_FMT, 1370 CID_BITS(lsquic_conn_log_cid(conn))); 1371 conn->cn_if->ci_stateless_reset(conn); 1372 if (!(conn->cn_flags & LSCONN_TICKABLE) 1373 && conn->cn_if->ci_is_tickable(conn)) 1374 { 1375 lsquic_mh_insert(&engine->conns_tickable, conn, 1376 conn->cn_last_ticked); 1377 engine_incref_conn(conn, LSCONN_TICKABLE); 1378 } 1379 /* Even though the connection processes this packet, we return 1380 * 1 so that the caller does not add reset packet's random 1381 * bytes to the list of valid CIDs. 1382 */ 1383 } 1384 lsquic_mm_put_packet_in(&engine->pub.enp_mm, packet_in); 1385 return 1; 1386 } 1387 1388 if (0 == (conn->cn_flags & LSCONN_TICKABLE)) 1389 { 1390 lsquic_mh_insert(&engine->conns_tickable, conn, conn->cn_last_ticked); 1391 engine_incref_conn(conn, LSCONN_TICKABLE); 1392 } 1393 packet_in->pi_path_id = lsquic_conn_record_sockaddr(conn, peer_ctx, 1394 sa_local, sa_peer); 1395 lsquic_packet_in_upref(packet_in); 1396#if LOG_PACKET_CHECKSUM 1397 log_packet_checksum(lsquic_conn_log_cid(conn), "in", packet_in->pi_data, 1398 packet_in->pi_data_sz); 1399#endif 1400 /* Note on QLog: 1401 * For the PACKET_RX QLog event, we are interested in logging these things: 1402 * - raw packet (however it comes in, encrypted or not) 1403 * - frames (list of frame names) 1404 * - packet type and number 1405 * - packet rx timestamp 1406 * 1407 * Since only some of these items are available at this code 1408 * juncture, we will wait until after the packet has been 1409 * decrypted (if necessary) and parsed to call the log functions. 1410 * 1411 * Once the PACKET_RX event is finally logged, the timestamp 1412 * will come from packet_in->pi_received. For correct sequential 1413 * ordering of QLog events, be sure to process the QLogs downstream. 1414 * (Hint: Use the qlog_parser.py tool in tools/ for full QLog processing.) 1415 */ 1416 packet_in_data = packet_in->pi_data; 1417 packet_in_size = packet_in->pi_data_sz; 1418 conn->cn_if->ci_packet_in(conn, packet_in); 1419 QLOG_PACKET_RX(lsquic_conn_log_cid(conn), packet_in, packet_in_data, packet_in_size); 1420 lsquic_packet_in_put(&engine->pub.enp_mm, packet_in); 1421 return 0; 1422} 1423 1424 1425void 1426lsquic_engine_destroy (lsquic_engine_t *engine) 1427{ 1428 struct lsquic_hash_elem *el; 1429 lsquic_conn_t *conn; 1430 unsigned i; 1431 1432 LSQ_DEBUG("destroying engine"); 1433#ifndef NDEBUG 1434 engine->flags |= ENG_DTOR; 1435#endif 1436 1437 while ((conn = lsquic_mh_pop(&engine->conns_out))) 1438 { 1439 assert(conn->cn_flags & LSCONN_HAS_OUTGOING); 1440 (void) engine_decref_conn(engine, conn, LSCONN_HAS_OUTGOING); 1441 } 1442 1443 while ((conn = lsquic_mh_pop(&engine->conns_tickable))) 1444 { 1445 assert(conn->cn_flags & LSCONN_TICKABLE); 1446 (void) engine_decref_conn(engine, conn, LSCONN_TICKABLE); 1447 } 1448 1449 for (el = lsquic_hash_first(engine->conns_hash); el; 1450 el = lsquic_hash_next(engine->conns_hash)) 1451 { 1452 conn = lsquic_hashelem_getdata(el); 1453 force_close_conn(engine, conn); 1454 } 1455 lsquic_hash_destroy(engine->conns_hash); 1456 1457 while ((conn = lsquic_attq_pop(engine->attq, UINT64_MAX))) 1458 (void) engine_decref_conn(engine, conn, LSCONN_ATTQ); 1459 1460 assert(0 == engine->n_conns); 1461 assert(0 == engine->mini_conns_count); 1462 if (engine->pr_queue) 1463 lsquic_prq_destroy(engine->pr_queue); 1464 if (engine->purga) 1465 lsquic_purga_destroy(engine->purga); 1466 lsquic_attq_destroy(engine->attq); 1467 1468 assert(0 == lsquic_mh_count(&engine->conns_out)); 1469 assert(0 == lsquic_mh_count(&engine->conns_tickable)); 1470 if (engine->pub.enp_shi == &stock_shi) 1471 lsquic_stock_shared_hash_destroy(engine->pub.enp_shi_ctx); 1472 lsquic_mm_cleanup(&engine->pub.enp_mm); 1473 free(engine->conns_tickable.mh_elems); 1474#if CAN_LOSE_PACKETS 1475 if (engine->flags & ENG_LOSE_PACKETS) 1476 regfree(&engine->lose_packets_re); 1477#endif 1478 if (engine->pub.enp_tokgen) 1479 lsquic_tg_destroy(engine->pub.enp_tokgen); 1480#if LSQUIC_CONN_STATS 1481 if (engine->stats_fh) 1482 { 1483 const struct conn_stats *const stats = &engine->conn_stats_sum; 1484 fprintf(engine->stats_fh, "Aggregate connection stats collected by engine:\n"); 1485 fprintf(engine->stats_fh, "Connections: %u\n", engine->stats.conns); 1486 fprintf(engine->stats_fh, "Ticks: %lu\n", stats->n_ticks); 1487 fprintf(engine->stats_fh, "In:\n"); 1488 fprintf(engine->stats_fh, " Total bytes: %lu\n", stats->in.bytes); 1489 fprintf(engine->stats_fh, " packets: %lu\n", stats->in.packets); 1490 fprintf(engine->stats_fh, " undecryptable packets: %lu\n", stats->in.undec_packets); 1491 fprintf(engine->stats_fh, " duplicate packets: %lu\n", stats->in.dup_packets); 1492 fprintf(engine->stats_fh, " error packets: %lu\n", stats->in.err_packets); 1493 fprintf(engine->stats_fh, " STREAM frame count: %lu\n", stats->in.stream_frames); 1494 fprintf(engine->stats_fh, " STREAM payload size: %lu\n", stats->in.stream_data_sz); 1495 fprintf(engine->stats_fh, " Header bytes: %lu; uncompressed: %lu; ratio %.3lf\n", 1496 stats->in.headers_comp, stats->in.headers_uncomp, 1497 stats->in.headers_uncomp ? 1498 (double) stats->in.headers_comp / (double) stats->in.headers_uncomp 1499 : 0); 1500 fprintf(engine->stats_fh, " ACK frames: %lu\n", stats->in.n_acks); 1501 fprintf(engine->stats_fh, " ACK frames processed: %lu\n", stats->in.n_acks_proc); 1502 fprintf(engine->stats_fh, " ACK frames merged to new: %lu\n", stats->in.n_acks_merged[0]); 1503 fprintf(engine->stats_fh, " ACK frames merged to old: %lu\n", stats->in.n_acks_merged[1]); 1504 fprintf(engine->stats_fh, "Out:\n"); 1505 fprintf(engine->stats_fh, " Total bytes: %lu\n", stats->out.bytes); 1506 fprintf(engine->stats_fh, " packets: %lu\n", stats->out.packets); 1507 fprintf(engine->stats_fh, " acked via loss record: %lu\n", stats->out.acked_via_loss); 1508 fprintf(engine->stats_fh, " acks: %lu\n", stats->out.acks); 1509 fprintf(engine->stats_fh, " retx packets: %lu\n", stats->out.retx_packets); 1510 fprintf(engine->stats_fh, " STREAM frame count: %lu\n", stats->out.stream_frames); 1511 fprintf(engine->stats_fh, " STREAM payload size: %lu\n", stats->out.stream_data_sz); 1512 fprintf(engine->stats_fh, " Header bytes: %lu; uncompressed: %lu; ratio %.3lf\n", 1513 stats->out.headers_comp, stats->out.headers_uncomp, 1514 stats->out.headers_uncomp ? 1515 (double) stats->out.headers_comp / (double) stats->out.headers_uncomp 1516 : 0); 1517 fprintf(engine->stats_fh, " ACKs: %lu\n", stats->out.acks); 1518 } 1519#endif 1520 if (engine->pub.enp_srst_hash) 1521 lsquic_hash_destroy(engine->pub.enp_srst_hash); 1522#if LSQUIC_COUNT_ENGINE_CALLS 1523 LSQ_NOTICE("number of calls into the engine: %lu", engine->n_engine_calls); 1524#endif 1525 for (i = 0; i < sizeof(engine->retry_aead_ctx) 1526 / sizeof(engine->retry_aead_ctx[0]); ++i) 1527 EVP_AEAD_CTX_cleanup(&engine->pub.enp_retry_aead_ctx[i]); 1528 free(engine->pub.enp_alpn); 1529 free(engine); 1530} 1531 1532 1533static struct conn_cid_elem * 1534find_free_cce (struct lsquic_conn *conn) 1535{ 1536 struct conn_cid_elem *cce; 1537 1538 for (cce = conn->cn_cces; cce < END_OF_CCES(conn); ++cce) 1539 if (!(conn->cn_cces_mask & (1 << (cce - conn->cn_cces)))) 1540 return cce; 1541 1542 return NULL; 1543} 1544 1545 1546static int 1547add_conn_to_hash (struct lsquic_engine *engine, struct lsquic_conn *conn, 1548 const struct sockaddr *local_sa, void *peer_ctx) 1549{ 1550 struct conn_cid_elem *cce; 1551 1552 if (engine->flags & ENG_CONNS_BY_ADDR) 1553 { 1554 cce = find_free_cce(conn); 1555 if (!cce) 1556 { 1557 LSQ_ERROR("cannot find free CCE"); 1558 return -1; 1559 } 1560 cce->cce_port = sa2port(local_sa); 1561 cce->cce_flags = CCE_PORT; 1562 if (lsquic_hash_insert(engine->conns_hash, &cce->cce_port, 1563 sizeof(cce->cce_port), conn, &cce->cce_hash_el)) 1564 { 1565 conn->cn_cces_mask |= 1 << (cce - conn->cn_cces); 1566 return 0; 1567 } 1568 else 1569 return -1; 1570 1571 } 1572 else 1573 return insert_conn_into_hash(engine, conn, peer_ctx); 1574} 1575 1576 1577lsquic_conn_t * 1578lsquic_engine_connect (lsquic_engine_t *engine, enum lsquic_version version, 1579 const struct sockaddr *local_sa, 1580 const struct sockaddr *peer_sa, 1581 void *peer_ctx, lsquic_conn_ctx_t *conn_ctx, 1582 const char *hostname, unsigned short max_packet_size, 1583 const unsigned char *zero_rtt, size_t zero_rtt_len, 1584 const unsigned char *token, size_t token_sz) 1585{ 1586 lsquic_conn_t *conn; 1587 unsigned flags, versions; 1588 int is_ipv4; 1589 1590 ENGINE_CALLS_INCR(engine); 1591 1592 if (engine->flags & ENG_SERVER) 1593 { 1594 LSQ_ERROR("`%s' must only be called in client mode", __func__); 1595 goto err; 1596 } 1597 1598 if (engine->flags & ENG_CONNS_BY_ADDR 1599 && find_conn_by_addr(engine->conns_hash, local_sa)) 1600 { 1601 LSQ_ERROR("cannot have more than one connection on the same port"); 1602 goto err; 1603 } 1604 1605 if (0 != maybe_grow_conn_heaps(engine)) 1606 return NULL; 1607 flags = engine->flags & (ENG_SERVER|ENG_HTTP); 1608 is_ipv4 = peer_sa->sa_family == AF_INET; 1609 if (zero_rtt && zero_rtt_len) 1610 { 1611 version = lsquic_zero_rtt_version(zero_rtt, zero_rtt_len); 1612 if (version >= N_LSQVER) 1613 { 1614 LSQ_INFO("zero-rtt version is bad, won't use"); 1615 zero_rtt = NULL; 1616 zero_rtt_len = 0; 1617 } 1618 } 1619 if (version >= N_LSQVER) 1620 { 1621 if (version > N_LSQVER) 1622 LSQ_WARN("invalid version specified, engine will pick"); 1623 versions = engine->pub.enp_settings.es_versions; 1624 } 1625 else 1626 versions = 1u << version; 1627 if (versions & LSQUIC_IETF_VERSIONS) 1628 conn = lsquic_ietf_full_conn_client_new(&engine->pub, versions, 1629 flags, hostname, max_packet_size, 1630 is_ipv4, zero_rtt, zero_rtt_len, token, token_sz); 1631 else 1632 conn = lsquic_gquic_full_conn_client_new(&engine->pub, versions, 1633 flags, hostname, max_packet_size, is_ipv4, 1634 zero_rtt, zero_rtt_len); 1635 if (!conn) 1636 goto err; 1637 EV_LOG_CREATE_CONN(lsquic_conn_log_cid(conn), local_sa, peer_sa); 1638 EV_LOG_VER_NEG(lsquic_conn_log_cid(conn), "proposed", 1639 lsquic_ver2str[conn->cn_version]); 1640 ++engine->n_conns; 1641 lsquic_conn_record_sockaddr(conn, peer_ctx, local_sa, peer_sa); 1642 if (0 != add_conn_to_hash(engine, conn, local_sa, peer_ctx)) 1643 { 1644 const lsquic_cid_t *cid = lsquic_conn_log_cid(conn); 1645 LSQ_WARNC("cannot add connection %"CID_FMT" to hash - destroy", 1646 CID_BITS(cid)); 1647 destroy_conn(engine, conn, lsquic_time_now()); 1648 goto err; 1649 } 1650 assert(!(conn->cn_flags & 1651 (CONN_REF_FLAGS 1652 & ~LSCONN_TICKABLE /* This flag may be set as effect of user 1653 callbacks */ 1654 ))); 1655 conn->cn_flags |= LSCONN_HASHED; 1656 lsquic_mh_insert(&engine->conns_tickable, conn, conn->cn_last_ticked); 1657 engine_incref_conn(conn, LSCONN_TICKABLE); 1658 lsquic_conn_set_ctx(conn, conn_ctx); 1659 conn->cn_if->ci_client_call_on_new(conn); 1660 end: 1661 return conn; 1662 err: 1663 conn = NULL; 1664 goto end; 1665} 1666 1667 1668static void 1669remove_conn_from_hash (lsquic_engine_t *engine, lsquic_conn_t *conn) 1670{ 1671 remove_all_cces_from_hash(engine->conns_hash, conn); 1672 (void) engine_decref_conn(engine, conn, LSCONN_HASHED); 1673} 1674 1675 1676static void 1677refflags2str (enum lsquic_conn_flags flags, char s[7]) 1678{ 1679 *s = 'C'; s += !!(flags & LSCONN_CLOSING); 1680 *s = 'H'; s += !!(flags & LSCONN_HASHED); 1681 *s = 'O'; s += !!(flags & LSCONN_HAS_OUTGOING); 1682 *s = 'T'; s += !!(flags & LSCONN_TICKABLE); 1683 *s = 'A'; s += !!(flags & LSCONN_ATTQ); 1684 *s = 'K'; s += !!(flags & LSCONN_TICKED); 1685 *s = '\0'; 1686} 1687 1688 1689static void 1690engine_incref_conn (lsquic_conn_t *conn, enum lsquic_conn_flags flag) 1691{ 1692 char str[2][7]; 1693 assert(flag & CONN_REF_FLAGS); 1694 assert(!(conn->cn_flags & flag)); 1695 conn->cn_flags |= flag; 1696 LSQ_DEBUGC("incref conn %"CID_FMT", '%s' -> '%s'", 1697 CID_BITS(lsquic_conn_log_cid(conn)), 1698 (refflags2str(conn->cn_flags & ~flag, str[0]), str[0]), 1699 (refflags2str(conn->cn_flags, str[1]), str[1])); 1700} 1701 1702 1703static lsquic_conn_t * 1704engine_decref_conn (lsquic_engine_t *engine, lsquic_conn_t *conn, 1705 enum lsquic_conn_flags flags) 1706{ 1707 char str[2][7]; 1708 lsquic_time_t now; 1709 assert(flags & CONN_REF_FLAGS); 1710 assert(conn->cn_flags & flags); 1711#ifndef NDEBUG 1712 if (flags & LSCONN_CLOSING) 1713 assert(0 == (conn->cn_flags & LSCONN_HASHED)); 1714#endif 1715 conn->cn_flags &= ~flags; 1716 LSQ_DEBUGC("decref conn %"CID_FMT", '%s' -> '%s'", 1717 CID_BITS(lsquic_conn_log_cid(conn)), 1718 (refflags2str(conn->cn_flags | flags, str[0]), str[0]), 1719 (refflags2str(conn->cn_flags, str[1]), str[1])); 1720 if (0 == (conn->cn_flags & CONN_REF_FLAGS)) 1721 { 1722 now = lsquic_time_now(); 1723 if (conn->cn_flags & LSCONN_MINI) 1724 eng_hist_inc(&engine->history, now, sl_del_mini_conns); 1725 else 1726 eng_hist_inc(&engine->history, now, sl_del_full_conns); 1727 destroy_conn(engine, conn, now); 1728 return NULL; 1729 } 1730 else 1731 return conn; 1732} 1733 1734 1735/* This is not a general-purpose function. Only call from engine dtor. */ 1736static void 1737force_close_conn (lsquic_engine_t *engine, lsquic_conn_t *conn) 1738{ 1739 assert(engine->flags & ENG_DTOR); 1740 const enum lsquic_conn_flags flags = conn->cn_flags; 1741 assert(conn->cn_flags & CONN_REF_FLAGS); 1742 assert(!(flags & LSCONN_HAS_OUTGOING)); /* Should be removed already */ 1743 assert(!(flags & LSCONN_TICKABLE)); /* Should be removed already */ 1744 assert(!(flags & LSCONN_CLOSING)); /* It is in transient queue? */ 1745 if (flags & LSCONN_ATTQ) 1746 { 1747 lsquic_attq_remove(engine->attq, conn); 1748 (void) engine_decref_conn(engine, conn, LSCONN_ATTQ); 1749 } 1750 if (flags & LSCONN_HASHED) 1751 remove_conn_from_hash(engine, conn); 1752} 1753 1754 1755/* Iterator for tickable connections (those on the Tickable Queue). Before 1756 * a connection is returned, it is removed from the Advisory Tick Time queue 1757 * if necessary. 1758 */ 1759static lsquic_conn_t * 1760conn_iter_next_tickable (struct lsquic_engine *engine) 1761{ 1762 lsquic_conn_t *conn; 1763 1764 if (engine->flags & ENG_SERVER) 1765 while (1) 1766 { 1767 conn = lsquic_mh_pop(&engine->conns_tickable); 1768 if (conn && (conn->cn_flags & LSCONN_SKIP_ON_PROC)) 1769 (void) engine_decref_conn(engine, conn, LSCONN_TICKABLE); 1770 else 1771 break; 1772 } 1773 else 1774 conn = lsquic_mh_pop(&engine->conns_tickable); 1775 1776 if (conn) 1777 conn = engine_decref_conn(engine, conn, LSCONN_TICKABLE); 1778 if (conn && (conn->cn_flags & LSCONN_ATTQ)) 1779 { 1780 lsquic_attq_remove(engine->attq, conn); 1781 conn = engine_decref_conn(engine, conn, LSCONN_ATTQ); 1782 } 1783 1784 return conn; 1785} 1786 1787 1788static void 1789cub_init (struct cid_update_batch *cub, lsquic_cids_update_f update, 1790 void *update_ctx) 1791{ 1792 cub->cub_update_cids = update; 1793 cub->cub_update_ctx = update_ctx; 1794 cub->cub_count = 0; 1795} 1796 1797 1798static void 1799cub_flush (struct cid_update_batch *cub) 1800{ 1801 if (cub->cub_count > 0 && cub->cub_update_cids) 1802 cub->cub_update_cids(cub->cub_update_ctx, cub->cub_peer_ctxs, 1803 cub->cub_cids, cub->cub_count); 1804 cub->cub_count = 0; 1805} 1806 1807 1808static void 1809cub_add (struct cid_update_batch *cub, const lsquic_cid_t *cid, void *peer_ctx) 1810{ 1811 cub->cub_cids [ cub->cub_count ] = *cid; 1812 cub->cub_peer_ctxs[ cub->cub_count ] = peer_ctx; 1813 ++cub->cub_count; 1814 if (cub->cub_count == sizeof(cub->cub_cids) / sizeof(cub->cub_cids[0])) 1815 cub_flush(cub); 1816} 1817 1818 1819/* Process registered CIDs */ 1820static void 1821cub_add_cids_from_cces (struct cid_update_batch *cub, struct lsquic_conn *conn) 1822{ 1823 struct cce_cid_iter citer; 1824 struct conn_cid_elem *cce; 1825 void *peer_ctx; 1826 1827 peer_ctx = lsquic_conn_get_peer_ctx(conn, NULL); 1828 for (cce = cce_iter_first(&citer, conn); cce; cce = cce_iter_next(&citer)) 1829 if (cce->cce_flags & CCE_REG) 1830 cub_add(cub, &cce->cce_cid, peer_ctx); 1831} 1832 1833 1834static void 1835drop_all_mini_conns (lsquic_engine_t *engine) 1836{ 1837 struct lsquic_hash_elem *el; 1838 lsquic_conn_t *conn; 1839 struct cid_update_batch cub; 1840 1841 cub_init(&cub, engine->report_old_scids, engine->scids_ctx); 1842 1843 for (el = lsquic_hash_first(engine->conns_hash); el; 1844 el = lsquic_hash_next(engine->conns_hash)) 1845 { 1846 conn = lsquic_hashelem_getdata(el); 1847 if (conn->cn_flags & LSCONN_MINI) 1848 { 1849 /* If promoted, why is it still in this hash? */ 1850 assert(!(conn->cn_flags & LSCONN_PROMOTED)); 1851 if (!(conn->cn_flags & LSCONN_PROMOTED)) 1852 cub_add_cids_from_cces(&cub, conn); 1853 remove_conn_from_hash(engine, conn); 1854 } 1855 } 1856 1857 cub_flush(&cub); 1858} 1859 1860 1861void 1862lsquic_engine_process_conns (lsquic_engine_t *engine) 1863{ 1864 lsquic_conn_t *conn; 1865 lsquic_time_t now; 1866 1867 ENGINE_IN(engine); 1868 1869 now = lsquic_time_now(); 1870 while ((conn = lsquic_attq_pop(engine->attq, now))) 1871 { 1872 conn = engine_decref_conn(engine, conn, LSCONN_ATTQ); 1873 if (conn && !(conn->cn_flags & LSCONN_TICKABLE)) 1874 { 1875 lsquic_mh_insert(&engine->conns_tickable, conn, conn->cn_last_ticked); 1876 engine_incref_conn(conn, LSCONN_TICKABLE); 1877 } 1878 } 1879 1880 process_connections(engine, conn_iter_next_tickable, now); 1881 ENGINE_OUT(engine); 1882} 1883 1884 1885static void 1886release_or_return_enc_data (struct lsquic_engine *engine, 1887 void (*pmi_rel_or_ret) (void *, void *, void *, char), 1888 struct lsquic_conn *conn, struct lsquic_packet_out *packet_out) 1889{ 1890 pmi_rel_or_ret(engine->pub.enp_pmi_ctx, packet_out->po_path->np_peer_ctx, 1891 packet_out->po_enc_data, lsquic_packet_out_ipv6(packet_out)); 1892 packet_out->po_flags &= ~PO_ENCRYPTED; 1893 packet_out->po_enc_data = NULL; 1894} 1895 1896 1897static void 1898release_enc_data (struct lsquic_engine *engine, struct lsquic_conn *conn, 1899 struct lsquic_packet_out *packet_out) 1900{ 1901 release_or_return_enc_data(engine, engine->pub.enp_pmi->pmi_release, 1902 conn, packet_out); 1903} 1904 1905 1906static void 1907return_enc_data (struct lsquic_engine *engine, struct lsquic_conn *conn, 1908 struct lsquic_packet_out *packet_out) 1909{ 1910 release_or_return_enc_data(engine, engine->pub.enp_pmi->pmi_return, 1911 conn, packet_out); 1912} 1913 1914 1915static int 1916copy_packet (struct lsquic_engine *engine, struct lsquic_conn *conn, 1917 struct lsquic_packet_out *packet_out) 1918{ 1919 int ipv6; 1920 1921 ipv6 = NP_IS_IPv6(packet_out->po_path); 1922 if (packet_out->po_flags & PO_ENCRYPTED) 1923 { 1924 if (ipv6 == lsquic_packet_out_ipv6(packet_out) 1925 && packet_out->po_data_sz == packet_out->po_enc_data_sz 1926 && 0 == memcmp(packet_out->po_data, packet_out->po_enc_data, 1927 packet_out->po_data_sz)) 1928 return 0; 1929 if (ipv6 == lsquic_packet_out_ipv6(packet_out) 1930 && packet_out->po_data_sz <= packet_out->po_enc_data_sz) 1931 goto copy; 1932 return_enc_data(engine, conn, packet_out); 1933 } 1934 1935 packet_out->po_enc_data = engine->pub.enp_pmi->pmi_allocate( 1936 engine->pub.enp_pmi_ctx, packet_out->po_path->np_peer_ctx, 1937 packet_out->po_data_sz, ipv6); 1938 if (!packet_out->po_enc_data) 1939 { 1940 LSQ_DEBUG("could not allocate memory for outgoing unencrypted packet " 1941 "of size %hu", packet_out->po_data_sz); 1942 return -1; 1943 } 1944 1945 copy: 1946 memcpy(packet_out->po_enc_data, packet_out->po_data, 1947 packet_out->po_data_sz); 1948 packet_out->po_enc_data_sz = packet_out->po_data_sz; 1949 packet_out->po_sent_sz = packet_out->po_data_sz; 1950 packet_out->po_flags &= ~PO_IPv6; 1951 packet_out->po_flags |= PO_ENCRYPTED|PO_SENT_SZ|(ipv6 << POIPv6_SHIFT); 1952 1953 return 0; 1954} 1955 1956 1957STAILQ_HEAD(conns_stailq, lsquic_conn); 1958TAILQ_HEAD(conns_tailq, lsquic_conn); 1959 1960 1961struct conns_out_iter 1962{ 1963 struct min_heap *coi_heap; 1964 struct pr_queue *coi_prq; 1965 TAILQ_HEAD(, lsquic_conn) coi_active_list, 1966 coi_inactive_list; 1967 lsquic_conn_t *coi_next; 1968#ifndef NDEBUG 1969 lsquic_time_t coi_last_sent; 1970#endif 1971}; 1972 1973 1974static void 1975coi_init (struct conns_out_iter *iter, struct lsquic_engine *engine) 1976{ 1977 iter->coi_heap = &engine->conns_out; 1978 iter->coi_prq = engine->pr_queue; 1979 iter->coi_next = NULL; 1980 TAILQ_INIT(&iter->coi_active_list); 1981 TAILQ_INIT(&iter->coi_inactive_list); 1982#ifndef NDEBUG 1983 iter->coi_last_sent = 0; 1984#endif 1985} 1986 1987 1988static lsquic_conn_t * 1989coi_next (struct conns_out_iter *iter) 1990{ 1991 lsquic_conn_t *conn; 1992 1993 if (lsquic_mh_count(iter->coi_heap) > 0) 1994 { 1995 conn = lsquic_mh_pop(iter->coi_heap); 1996 TAILQ_INSERT_TAIL(&iter->coi_active_list, conn, cn_next_out); 1997 conn->cn_flags |= LSCONN_COI_ACTIVE; 1998#ifndef NDEBUG 1999 if (iter->coi_last_sent) 2000 assert(iter->coi_last_sent <= conn->cn_last_sent); 2001 iter->coi_last_sent = conn->cn_last_sent; 2002#endif 2003 return conn; 2004 } 2005 else if (iter->coi_prq && (conn = lsquic_prq_next_conn(iter->coi_prq))) 2006 { 2007 return conn; 2008 } 2009 else if (!TAILQ_EMPTY(&iter->coi_active_list)) 2010 { 2011 iter->coi_prq = NULL; /* Save function call in previous conditional */ 2012 conn = iter->coi_next; 2013 if (!conn) 2014 conn = TAILQ_FIRST(&iter->coi_active_list); 2015 if (conn) 2016 iter->coi_next = TAILQ_NEXT(conn, cn_next_out); 2017 return conn; 2018 } 2019 else 2020 return NULL; 2021} 2022 2023 2024static void 2025coi_deactivate (struct conns_out_iter *iter, lsquic_conn_t *conn) 2026{ 2027 if (!(conn->cn_flags & LSCONN_EVANESCENT)) 2028 { 2029 assert(!TAILQ_EMPTY(&iter->coi_active_list)); 2030 TAILQ_REMOVE(&iter->coi_active_list, conn, cn_next_out); 2031 conn->cn_flags &= ~LSCONN_COI_ACTIVE; 2032 TAILQ_INSERT_TAIL(&iter->coi_inactive_list, conn, cn_next_out); 2033 conn->cn_flags |= LSCONN_COI_INACTIVE; 2034 } 2035} 2036 2037 2038static void 2039coi_reactivate (struct conns_out_iter *iter, lsquic_conn_t *conn) 2040{ 2041 assert(conn->cn_flags & LSCONN_COI_INACTIVE); 2042 TAILQ_REMOVE(&iter->coi_inactive_list, conn, cn_next_out); 2043 conn->cn_flags &= ~LSCONN_COI_INACTIVE; 2044 TAILQ_INSERT_TAIL(&iter->coi_active_list, conn, cn_next_out); 2045 conn->cn_flags |= LSCONN_COI_ACTIVE; 2046} 2047 2048 2049static void 2050coi_reheap (struct conns_out_iter *iter, lsquic_engine_t *engine) 2051{ 2052 lsquic_conn_t *conn; 2053 while ((conn = TAILQ_FIRST(&iter->coi_active_list))) 2054 { 2055 TAILQ_REMOVE(&iter->coi_active_list, conn, cn_next_out); 2056 conn->cn_flags &= ~LSCONN_COI_ACTIVE; 2057 if ((conn->cn_flags & CONN_REF_FLAGS) != LSCONN_HAS_OUTGOING 2058 && !(conn->cn_flags & LSCONN_IMMED_CLOSE)) 2059 lsquic_mh_insert(iter->coi_heap, conn, conn->cn_last_sent); 2060 else /* Closed connection gets one shot at sending packets */ 2061 (void) engine_decref_conn(engine, conn, LSCONN_HAS_OUTGOING); 2062 } 2063 while ((conn = TAILQ_FIRST(&iter->coi_inactive_list))) 2064 { 2065 TAILQ_REMOVE(&iter->coi_inactive_list, conn, cn_next_out); 2066 conn->cn_flags &= ~LSCONN_COI_INACTIVE; 2067 (void) engine_decref_conn(engine, conn, LSCONN_HAS_OUTGOING); 2068 } 2069} 2070 2071 2072#if CAN_LOSE_PACKETS 2073static void 2074lose_matching_packets (const lsquic_engine_t *engine, struct out_batch *batch, 2075 unsigned n) 2076{ 2077 const lsquic_cid_t *cid; 2078 struct iovec *iov; 2079 unsigned i; 2080 char packno_str[22]; 2081 2082 for (i = 0; i < n; ++i) 2083 { 2084 snprintf(packno_str, sizeof(packno_str), "%"PRIu64, 2085 batch->packets[i]->po_packno); 2086 if (0 == regexec(&engine->lose_packets_re, packno_str, 0, NULL, 0)) 2087 { 2088 for (iov = batch->outs[i].iov; iov < 2089 batch->outs[i].iov + batch->outs[i].iovlen; ++iov) 2090 batch->outs[i].iov->iov_len -= 1; 2091 cid = lsquic_conn_log_cid(batch->conns[i]); 2092 LSQ_WARNC("losing packet %s for connection %"CID_FMT, packno_str, 2093 CID_BITS(cid)); 2094 } 2095 } 2096} 2097 2098 2099#endif 2100 2101 2102#ifdef NDEBUG 2103#define CONST_BATCH const 2104#else 2105#define CONST_BATCH 2106#endif 2107 2108 2109struct send_batch_ctx { 2110 struct conns_stailq *closed_conns; 2111 struct conns_tailq *ticked_conns; 2112 struct conns_out_iter *conns_iter; 2113 CONST_BATCH struct out_batch *batch; 2114}; 2115 2116 2117static void 2118close_conn_immediately (struct lsquic_engine *engine, 2119 const struct send_batch_ctx *sb_ctx, struct lsquic_conn *conn) 2120{ 2121 conn->cn_flags |= LSCONN_IMMED_CLOSE; 2122 if (!(conn->cn_flags & LSCONN_CLOSING)) 2123 { 2124 STAILQ_INSERT_TAIL(sb_ctx->closed_conns, conn, cn_next_closed_conn); 2125 engine_incref_conn(conn, LSCONN_CLOSING); 2126 if (conn->cn_flags & LSCONN_HASHED) 2127 remove_conn_from_hash(engine, conn); 2128 } 2129 if (conn->cn_flags & LSCONN_TICKED) 2130 { 2131 TAILQ_REMOVE(sb_ctx->ticked_conns, conn, cn_next_ticked); 2132 engine_decref_conn(engine, conn, LSCONN_TICKED); 2133 } 2134} 2135 2136 2137static void 2138close_conn_on_send_error (struct lsquic_engine *engine, 2139 const struct send_batch_ctx *sb_ctx, int n, int e_val) 2140{ 2141 const struct out_batch *batch = sb_ctx->batch; 2142 struct lsquic_conn *const conn = batch->conns[n]; 2143 char buf[2][INET6_ADDRSTRLEN + sizeof(":65535")]; 2144 2145 LSQ_WARNC("error sending packet for %s connection %"CID_FMT" - close it; " 2146 "src: %s; dst: %s; errno: %d", 2147 conn->cn_flags & LSCONN_EVANESCENT ? "evanecsent" : 2148 conn->cn_flags & LSCONN_MINI ? "mini" : "regular", 2149 CID_BITS(lsquic_conn_log_cid(conn)), 2150 SA2STR(batch->outs[n].local_sa, buf[0]), 2151 SA2STR(batch->outs[n].dest_sa, buf[1]), 2152 e_val); 2153 if (conn->cn_flags & LSCONN_EVANESCENT) 2154 lsquic_prq_drop(conn); 2155 else 2156 close_conn_immediately(engine, sb_ctx, conn); 2157} 2158 2159 2160static unsigned 2161send_batch (lsquic_engine_t *engine, const struct send_batch_ctx *sb_ctx, 2162 unsigned n_to_send) 2163{ 2164 int n_sent, i, e_val; 2165 lsquic_time_t now; 2166 unsigned off; 2167 size_t count; 2168 CONST_BATCH struct out_batch *const batch = sb_ctx->batch; 2169 struct lsquic_packet_out *CONST_BATCH *packet_out, *CONST_BATCH *end; 2170 2171#if CAN_LOSE_PACKETS 2172 if (engine->flags & ENG_LOSE_PACKETS) 2173 lose_matching_packets(engine, batch, n_to_send); 2174#endif 2175 /* Set sent time before the write to avoid underestimating RTT */ 2176 now = lsquic_time_now(); 2177 for (i = 0; i < (int) n_to_send; ++i) 2178 { 2179 off = batch->pack_off[i]; 2180 count = batch->outs[i].iovlen; 2181 assert(count > 0); 2182 packet_out = &batch->packets[off]; 2183 end = packet_out + count; 2184 do 2185 (*packet_out)->po_sent = now; 2186 while (++packet_out < end); 2187 } 2188 n_sent = engine->packets_out(engine->packets_out_ctx, batch->outs, 2189 n_to_send); 2190 e_val = errno; 2191 if (n_sent < (int) n_to_send) 2192 { 2193 engine->pub.enp_flags &= ~ENPUB_CAN_SEND; 2194 engine->resume_sending_at = now + 1000000; 2195 LSQ_DEBUG("cannot send packets"); 2196 EV_LOG_GENERIC_EVENT("cannot send packets"); 2197 if (!(EAGAIN == e_val || EWOULDBLOCK == e_val)) 2198 close_conn_on_send_error(engine, sb_ctx, 2199 n_sent < 0 ? 0 : n_sent, e_val); 2200 } 2201 if (n_sent >= 0) 2202 LSQ_DEBUG("packets out returned %d (out of %u)", n_sent, n_to_send); 2203 else 2204 { 2205 LSQ_DEBUG("packets out returned an error: %s", strerror(e_val)); 2206 n_sent = 0; 2207 } 2208 if (n_sent > 0) 2209 engine->last_sent = now + n_sent; 2210 for (i = 0; i < n_sent; ++i) 2211 { 2212 eng_hist_inc(&engine->history, now, sl_packets_out); 2213 /* `i' is added to maintain relative order */ 2214 batch->conns[i]->cn_last_sent = now + i; 2215 2216 off = batch->pack_off[i]; 2217 count = batch->outs[i].iovlen; 2218 assert(count > 0); 2219 packet_out = &batch->packets[off]; 2220 end = packet_out + count; 2221 do 2222 { 2223#if LOG_PACKET_CHECKSUM 2224 log_packet_checksum(lsquic_conn_log_cid(batch->conns[i]), "out", 2225 batch->outs[i].iov[packet_out - &batch->packets[off]].iov_base, 2226 batch->outs[i].iov[packet_out - &batch->packets[off]].iov_len); 2227#endif 2228 EV_LOG_PACKET_SENT(lsquic_conn_log_cid(batch->conns[i]), 2229 *packet_out); 2230 /* Release packet out buffer as soon as the packet is sent 2231 * successfully. If not successfully sent, we hold on to 2232 * this buffer until the packet sending is attempted again 2233 * or until it times out and regenerated. 2234 */ 2235 if ((*packet_out)->po_flags & PO_ENCRYPTED) 2236 release_enc_data(engine, batch->conns[i], *packet_out); 2237 batch->conns[i]->cn_if->ci_packet_sent(batch->conns[i], 2238 *packet_out); 2239 } 2240 while (++packet_out < end); 2241 } 2242 if (LSQ_LOG_ENABLED_EXT(LSQ_LOG_DEBUG, LSQLM_EVENT)) 2243 for ( ; i < (int) n_to_send; ++i) 2244 { 2245 off = batch->pack_off[i]; 2246 count = batch->outs[i].iovlen; 2247 assert(count > 0); 2248 packet_out = &batch->packets[off]; 2249 end = packet_out + count; 2250 do 2251 EV_LOG_PACKET_NOT_SENT(lsquic_conn_log_cid(batch->conns[i]), 2252 *packet_out); 2253 while (++packet_out < end); 2254 } 2255 /* Return packets to the connection in reverse order so that the packet 2256 * ordering is maintained. 2257 */ 2258 for (i = (int) n_to_send - 1; i >= n_sent; --i) 2259 { 2260 off = batch->pack_off[i]; 2261 count = batch->outs[i].iovlen; 2262 assert(count > 0); 2263 packet_out = &batch->packets[off + count - 1]; 2264 end = &batch->packets[off - 1]; 2265 do 2266 batch->conns[i]->cn_if->ci_packet_not_sent(batch->conns[i], 2267 *packet_out); 2268 while (--packet_out > end); 2269 if (!(batch->conns[i]->cn_flags & (LSCONN_COI_ACTIVE|LSCONN_EVANESCENT))) 2270 coi_reactivate(sb_ctx->conns_iter, batch->conns[i]); 2271 } 2272 return n_sent; 2273} 2274 2275 2276/* Return 1 if went past deadline, 0 otherwise */ 2277static int 2278check_deadline (lsquic_engine_t *engine) 2279{ 2280 if (engine->pub.enp_settings.es_proc_time_thresh && 2281 lsquic_time_now() > engine->deadline) 2282 { 2283 LSQ_INFO("went past threshold of %u usec, stop sending", 2284 engine->pub.enp_settings.es_proc_time_thresh); 2285 engine->flags |= ENG_PAST_DEADLINE; 2286 return 1; 2287 } 2288 else 2289 return 0; 2290} 2291 2292 2293static size_t 2294iov_size (const struct iovec *iov, const struct iovec *const end) 2295{ 2296 size_t size; 2297 2298 assert(iov < end); 2299 2300 size = 0; 2301 do 2302 size += iov->iov_len; 2303 while (++iov < end); 2304 2305 return size; 2306} 2307 2308 2309static void 2310send_packets_out (struct lsquic_engine *engine, 2311 struct conns_tailq *ticked_conns, 2312 struct conns_stailq *closed_conns) 2313{ 2314 unsigned n, w, n_sent, n_batches_sent; 2315 lsquic_packet_out_t *packet_out; 2316 struct lsquic_packet_out **packet; 2317 lsquic_conn_t *conn; 2318 struct out_batch *const batch = &engine->out_batch; 2319 struct iovec *iov, *packet_iov; 2320 struct conns_out_iter conns_iter; 2321 int shrink, deadline_exceeded; 2322 const struct send_batch_ctx sb_ctx = { 2323 closed_conns, 2324 ticked_conns, 2325 &conns_iter, 2326 &engine->out_batch, 2327 }; 2328 2329 coi_init(&conns_iter, engine); 2330 n_batches_sent = 0; 2331 n_sent = 0, n = 0; 2332 shrink = 0; 2333 deadline_exceeded = 0; 2334 iov = batch->iov; 2335 packet = batch->packets; 2336 2337 while ((conn = coi_next(&conns_iter))) 2338 { 2339 packet_out = conn->cn_if->ci_next_packet_to_send(conn, 0); 2340 if (!packet_out) { 2341 /* Evanescent connection always has a packet to send: */ 2342 assert(!(conn->cn_flags & LSCONN_EVANESCENT)); 2343 LSQ_DEBUGC("batched all outgoing packets for %s conn %"CID_FMT, 2344 (conn->cn_flags & LSCONN_MINI ? "mini" : "full"), 2345 CID_BITS(lsquic_conn_log_cid(conn))); 2346 coi_deactivate(&conns_iter, conn); 2347 continue; 2348 } 2349 batch->outs[n].iov = packet_iov = iov; 2350 next_coa: 2351 if (!(packet_out->po_flags & (PO_ENCRYPTED|PO_NOENCRYPT))) 2352 { 2353 switch (conn->cn_esf_c->esf_encrypt_packet(conn->cn_enc_session, 2354 &engine->pub, conn, packet_out)) 2355 { 2356 case ENCPA_NOMEM: 2357 /* Send what we have and wait for a more opportune moment */ 2358 conn->cn_if->ci_packet_not_sent(conn, packet_out); 2359 goto end_for; 2360 case ENCPA_BADCRYPT: 2361 /* This is pretty bad: close connection immediately */ 2362 conn->cn_if->ci_packet_not_sent(conn, packet_out); 2363 LSQ_INFOC("conn %"CID_FMT" has unsendable packets", 2364 CID_BITS(lsquic_conn_log_cid(conn))); 2365 if (!(conn->cn_flags & LSCONN_EVANESCENT)) 2366 { 2367 close_conn_immediately(engine, &sb_ctx, conn); 2368 coi_deactivate(&conns_iter, conn); 2369 } 2370 continue; 2371 case ENCPA_OK: 2372 break; 2373 } 2374 } 2375 else if ((packet_out->po_flags & PO_NOENCRYPT) 2376 && engine->pub.enp_pmi != &stock_pmi) 2377 { 2378 if (0 != copy_packet(engine, conn, packet_out)) 2379 { 2380 /* Copy can only fail if packet could not be allocated */ 2381 conn->cn_if->ci_packet_not_sent(conn, packet_out); 2382 goto end_for; 2383 } 2384 } 2385 LSQ_DEBUGC("batched packet %"PRIu64" for connection %"CID_FMT, 2386 packet_out->po_packno, CID_BITS(lsquic_conn_log_cid(conn))); 2387 if (packet_out->po_flags & PO_ENCRYPTED) 2388 { 2389 iov->iov_base = packet_out->po_enc_data; 2390 iov->iov_len = packet_out->po_enc_data_sz; 2391 } 2392 else 2393 { 2394 iov->iov_base = packet_out->po_data; 2395 iov->iov_len = packet_out->po_data_sz; 2396 } 2397 if (packet_iov == iov) 2398 { 2399 batch->pack_off[n] = packet - batch->packets; 2400 batch->outs [n].ecn = lsquic_packet_out_ecn(packet_out); 2401 batch->outs [n].peer_ctx = packet_out->po_path->np_peer_ctx; 2402 batch->outs [n].local_sa = NP_LOCAL_SA(packet_out->po_path); 2403 batch->outs [n].dest_sa = NP_PEER_SA(packet_out->po_path); 2404 batch->conns [n] = conn; 2405 } 2406 *packet = packet_out; 2407 ++packet; 2408 ++iov; 2409 if ((conn->cn_flags & LSCONN_IETF) 2410 && ((1 << packet_out->po_header_type) 2411 & ((1 << HETY_INITIAL)|(1 << HETY_HANDSHAKE)|(1 << HETY_0RTT))) 2412#ifndef NDEBUG 2413 && (engine->flags & ENG_COALESCE) 2414#endif 2415 && iov < batch->iov + sizeof(batch->iov) / sizeof(batch->iov[0])) 2416 { 2417 const size_t size = iov_size(packet_iov, iov); 2418 packet_out = conn->cn_if->ci_next_packet_to_send(conn, size); 2419 if (packet_out) 2420 goto next_coa; 2421 } 2422 batch->outs [n].iovlen = iov - packet_iov; 2423 ++n; 2424 if (n == engine->batch_size 2425 || iov >= batch->iov + sizeof(batch->iov) / sizeof(batch->iov[0])) 2426 { 2427 w = send_batch(engine, &sb_ctx, n); 2428 n = 0; 2429 iov = batch->iov; 2430 packet = batch->packets; 2431 ++n_batches_sent; 2432 n_sent += w; 2433 if (w < engine->batch_size) 2434 { 2435 shrink = 1; 2436 break; 2437 } 2438 deadline_exceeded = check_deadline(engine); 2439 if (deadline_exceeded) 2440 break; 2441 grow_batch_size(engine); 2442 } 2443 } 2444 end_for: 2445 2446 if (n > 0) { 2447 w = send_batch(engine, &sb_ctx, n); 2448 n_sent += w; 2449 shrink = w < n; 2450 ++n_batches_sent; 2451 } 2452 2453 if (shrink) 2454 shrink_batch_size(engine); 2455 else if (n_batches_sent > 1) 2456 { 2457 deadline_exceeded = check_deadline(engine); 2458 if (!deadline_exceeded) 2459 grow_batch_size(engine); 2460 } 2461 2462 coi_reheap(&conns_iter, engine); 2463 2464 LSQ_DEBUG("%s: sent %u packet%.*s", __func__, n_sent, n_sent != 1, "s"); 2465} 2466 2467 2468int 2469lsquic_engine_has_unsent_packets (lsquic_engine_t *engine) 2470{ 2471 return lsquic_mh_count(&engine->conns_out) > 0 2472 || (engine->pr_queue && lsquic_prq_have_pending(engine->pr_queue)) 2473 ; 2474} 2475 2476 2477static void 2478reset_deadline (lsquic_engine_t *engine, lsquic_time_t now) 2479{ 2480 engine->deadline = now + engine->pub.enp_settings.es_proc_time_thresh; 2481 engine->flags &= ~ENG_PAST_DEADLINE; 2482} 2483 2484 2485static void 2486check_tickable_conns_again (struct lsquic_engine *engine) 2487{ 2488 struct lsquic_hash_elem *el; 2489 struct lsquic_conn *conn; 2490 unsigned count; 2491 2492 count = 0; 2493 for (el = lsquic_hash_first(engine->conns_hash); el; 2494 el = lsquic_hash_next(engine->conns_hash)) 2495 { 2496 conn = lsquic_hashelem_getdata(el); 2497 if (!(conn->cn_flags & LSCONN_TICKABLE) 2498 && conn->cn_if->ci_is_tickable(conn)) 2499 { 2500 lsquic_mh_insert(&engine->conns_tickable, conn, 2501 conn->cn_last_ticked); 2502 engine_incref_conn(conn, LSCONN_TICKABLE); 2503 ++count; 2504 } 2505 } 2506 LSQ_DEBUG("%u connection%s tickable again after sending has been " 2507 "re-enabled", count, count == 1 ? " is" : "s are"); 2508} 2509 2510 2511void 2512lsquic_engine_send_unsent_packets (lsquic_engine_t *engine) 2513{ 2514 lsquic_conn_t *conn; 2515 struct conns_stailq closed_conns; 2516 struct conns_tailq ticked_conns = TAILQ_HEAD_INITIALIZER(ticked_conns); 2517 struct cid_update_batch cub; 2518 2519 ENGINE_IN(engine); 2520 cub_init(&cub, engine->report_old_scids, engine->scids_ctx); 2521 STAILQ_INIT(&closed_conns); 2522 reset_deadline(engine, lsquic_time_now()); 2523 if (!(engine->pub.enp_flags & ENPUB_CAN_SEND)) 2524 { 2525 LSQ_DEBUG("can send again"); 2526 EV_LOG_GENERIC_EVENT("can send again"); 2527 engine->pub.enp_flags |= ENPUB_CAN_SEND; 2528 check_tickable_conns_again(engine); 2529 } 2530 2531 send_packets_out(engine, &ticked_conns, &closed_conns); 2532 2533 while ((conn = STAILQ_FIRST(&closed_conns))) { 2534 STAILQ_REMOVE_HEAD(&closed_conns, cn_next_closed_conn); 2535 if ((conn->cn_flags & (LSCONN_MINI|LSCONN_PROMOTED)) == LSCONN_MINI) 2536 cub_add_cids_from_cces(&cub, conn); 2537 (void) engine_decref_conn(engine, conn, LSCONN_CLOSING); 2538 } 2539 2540 cub_flush(&cub); 2541 ENGINE_OUT(engine); 2542} 2543 2544 2545static lsquic_conn_t * 2546next_new_full_conn (struct conns_stailq *new_full_conns) 2547{ 2548 lsquic_conn_t *conn; 2549 2550 conn = STAILQ_FIRST(new_full_conns); 2551 if (conn) 2552 STAILQ_REMOVE_HEAD(new_full_conns, cn_next_new_full); 2553 return conn; 2554} 2555 2556 2557static void 2558process_connections (lsquic_engine_t *engine, conn_iter_f next_conn, 2559 lsquic_time_t now) 2560{ 2561 lsquic_conn_t *conn; 2562 enum tick_st tick_st; 2563 unsigned i, why; 2564 lsquic_time_t next_tick_time; 2565 struct conns_stailq closed_conns; 2566 struct conns_tailq ticked_conns; 2567 struct conns_stailq new_full_conns; 2568 struct cid_update_batch cub_old, cub_live; 2569 cub_init(&cub_old, engine->report_old_scids, engine->scids_ctx); 2570 cub_init(&cub_live, engine->report_live_scids, engine->scids_ctx); 2571 2572 eng_hist_tick(&engine->history, now); 2573 2574 STAILQ_INIT(&closed_conns); 2575 TAILQ_INIT(&ticked_conns); 2576 reset_deadline(engine, now); 2577 STAILQ_INIT(&new_full_conns); 2578 2579 if (!(engine->pub.enp_flags & ENPUB_CAN_SEND) 2580 && now > engine->resume_sending_at) 2581 { 2582 LSQ_NOTICE("failsafe activated: resume sending packets again after " 2583 "timeout"); 2584 EV_LOG_GENERIC_EVENT("resume sending packets again after timeout"); 2585 engine->pub.enp_flags |= ENPUB_CAN_SEND; 2586 } 2587 2588 i = 0; 2589 while ((conn = next_conn(engine)) 2590 || (conn = next_new_full_conn(&new_full_conns))) 2591 { 2592 tick_st = conn->cn_if->ci_tick(conn, now); 2593 conn->cn_last_ticked = now + i /* Maintain relative order */ ++; 2594 if (tick_st & TICK_PROMOTE) 2595 { 2596 lsquic_conn_t *new_conn; 2597 EV_LOG_CONN_EVENT(lsquic_conn_log_cid(conn), 2598 "scheduled for promotion"); 2599 assert(conn->cn_flags & LSCONN_MINI); 2600 new_conn = new_full_conn_server(engine, conn, now); 2601 if (new_conn) 2602 { 2603 STAILQ_INSERT_TAIL(&new_full_conns, new_conn, cn_next_new_full); 2604 new_conn->cn_last_sent = engine->last_sent; 2605 eng_hist_inc(&engine->history, now, sl_new_full_conns); 2606 conn->cn_flags |= LSCONN_PROMOTED; 2607 } 2608 tick_st |= TICK_CLOSE; /* Destroy mini connection */ 2609 } 2610 if (tick_st & TICK_SEND) 2611 { 2612 if (!(conn->cn_flags & LSCONN_HAS_OUTGOING)) 2613 { 2614 lsquic_mh_insert(&engine->conns_out, conn, conn->cn_last_sent); 2615 engine_incref_conn(conn, LSCONN_HAS_OUTGOING); 2616 } 2617 } 2618 if (tick_st & TICK_CLOSE) 2619 { 2620 STAILQ_INSERT_TAIL(&closed_conns, conn, cn_next_closed_conn); 2621 engine_incref_conn(conn, LSCONN_CLOSING); 2622 if (conn->cn_flags & LSCONN_HASHED) 2623 remove_conn_from_hash(engine, conn); 2624 } 2625 else 2626 { 2627 TAILQ_INSERT_TAIL(&ticked_conns, conn, cn_next_ticked); 2628 engine_incref_conn(conn, LSCONN_TICKED); 2629 if ((engine->flags & ENG_SERVER) && conn->cn_if->ci_report_live 2630 && conn->cn_if->ci_report_live(conn, now)) 2631 cub_add_cids_from_cces(&cub_live, conn); 2632 } 2633 } 2634 2635 if ((engine->pub.enp_flags & ENPUB_CAN_SEND) 2636 && lsquic_engine_has_unsent_packets(engine)) 2637 send_packets_out(engine, &ticked_conns, &closed_conns); 2638 2639 while ((conn = STAILQ_FIRST(&closed_conns))) { 2640 STAILQ_REMOVE_HEAD(&closed_conns, cn_next_closed_conn); 2641 if ((conn->cn_flags & (LSCONN_MINI|LSCONN_PROMOTED)) == LSCONN_MINI) 2642 cub_add_cids_from_cces(&cub_old, conn); 2643 (void) engine_decref_conn(engine, conn, LSCONN_CLOSING); 2644 } 2645 2646 while ((conn = TAILQ_FIRST(&ticked_conns))) 2647 { 2648 TAILQ_REMOVE(&ticked_conns, conn, cn_next_ticked); 2649 engine_decref_conn(engine, conn, LSCONN_TICKED); 2650 if (!(conn->cn_flags & LSCONN_TICKABLE) 2651 && conn->cn_if->ci_is_tickable(conn)) 2652 { 2653 /* Floyd heapification is not faster, don't bother. */ 2654 lsquic_mh_insert(&engine->conns_tickable, conn, conn->cn_last_ticked); 2655 engine_incref_conn(conn, LSCONN_TICKABLE); 2656 } 2657 else if (!(conn->cn_flags & LSCONN_ATTQ)) 2658 { 2659 next_tick_time = conn->cn_if->ci_next_tick_time(conn, &why); 2660 if (next_tick_time) 2661 { 2662 if (0 == lsquic_attq_add(engine->attq, conn, next_tick_time, 2663 why)) 2664 engine_incref_conn(conn, LSCONN_ATTQ); 2665 } 2666 else 2667 /* In all other cases, the idle timeout would make the next 2668 * tick time non-zero: 2669 */ 2670 assert((conn->cn_flags & LSCONN_IETF) 2671 && engine->pub.enp_settings.es_idle_timeout == 0); 2672 } 2673 } 2674 2675 cub_flush(&engine->new_scids); 2676 cub_flush(&cub_live); 2677 cub_flush(&cub_old); 2678} 2679 2680 2681static void 2682maybe_count_garbage (struct lsquic_engine *engine, size_t garbage_sz) 2683{ 2684 /* This is not very pretty (action at a distance via engine->curr_conn), 2685 * but it's the cheapest I can come up with to handle the "count garbage 2686 * toward amplification limit" requirement in 2687 * [draft-ietf-quic-transport-28] Section 8.1. 2688 */ 2689 if (engine->curr_conn && engine->curr_conn->cn_if->ci_count_garbage) 2690 engine->curr_conn->cn_if->ci_count_garbage(engine->curr_conn, 2691 garbage_sz); 2692} 2693 2694 2695/* Return 0 if packet is being processed by a real connection, 1 if the 2696 * packet was processed, but not by a connection, and -1 on error. 2697 */ 2698int 2699lsquic_engine_packet_in (lsquic_engine_t *engine, 2700 const unsigned char *packet_in_data, size_t packet_in_size, 2701 const struct sockaddr *sa_local, const struct sockaddr *sa_peer, 2702 void *peer_ctx, int ecn) 2703{ 2704 const unsigned char *const packet_begin = packet_in_data; 2705 const unsigned char *const packet_end = packet_in_data + packet_in_size; 2706 struct packin_parse_state ppstate; 2707 lsquic_packet_in_t *packet_in; 2708 int (*parse_packet_in_begin) (struct lsquic_packet_in *, size_t length, 2709 int is_server, unsigned cid_len, struct packin_parse_state *); 2710 unsigned n_zeroes; 2711 int s, is_ietf; 2712 lsquic_cid_t cid; 2713 2714 ENGINE_CALLS_INCR(engine); 2715 2716 if (engine->flags & ENG_SERVER) 2717 parse_packet_in_begin = lsquic_parse_packet_in_server_begin; 2718 else if (engine->flags & ENG_CONNS_BY_ADDR) 2719 { 2720 struct lsquic_hash_elem *el; 2721 const struct lsquic_conn *conn; 2722 el = find_conn_by_addr(engine->conns_hash, sa_local); 2723 if (!el) 2724 return -1; 2725 conn = lsquic_hashelem_getdata(el); 2726 if ((1 << conn->cn_version) & LSQUIC_GQUIC_HEADER_VERSIONS) 2727 parse_packet_in_begin = lsquic_gquic_parse_packet_in_begin; 2728 else if ((1 << conn->cn_version) & LSQUIC_IETF_VERSIONS) 2729 parse_packet_in_begin = lsquic_ietf_v1_parse_packet_in_begin; 2730 else if (conn->cn_version == LSQVER_050) 2731 parse_packet_in_begin = lsquic_Q050_parse_packet_in_begin; 2732 else 2733 { 2734#if LSQUIC_USE_Q098 2735 assert(conn->cn_version == LSQVER_046 || conn->cn_version == LSQVER_098); 2736#else 2737 assert(conn->cn_version == LSQVER_046); 2738#endif 2739 parse_packet_in_begin = lsquic_Q046_parse_packet_in_begin; 2740 } 2741 } 2742 else 2743 parse_packet_in_begin = lsquic_parse_packet_in_begin; 2744 2745 engine->curr_conn = NULL; 2746 n_zeroes = 0; 2747 is_ietf = 0; 2748#ifdef _MSC_VER 2749 s = 0; 2750 cid.len = 0; 2751 cid.idbuf[0] = 0; 2752#endif 2753 do 2754 { 2755 packet_in = lsquic_mm_get_packet_in(&engine->pub.enp_mm); 2756 if (!packet_in) 2757 return -1; 2758 /* Library does not modify packet_in_data, it is not referenced after 2759 * this function returns and subsequent release of pi_data is guarded 2760 * by PI_OWN_DATA flag. 2761 */ 2762 packet_in->pi_data = (unsigned char *) packet_in_data; 2763 if (0 != parse_packet_in_begin(packet_in, packet_end - packet_in_data, 2764 engine->flags & ENG_SERVER, 2765 engine->pub.enp_settings.es_scid_len, &ppstate)) 2766 { 2767 LSQ_DEBUG("Cannot parse incoming packet's header"); 2768 maybe_count_garbage(engine, packet_end - packet_in_data); 2769 lsquic_mm_put_packet_in(&engine->pub.enp_mm, packet_in); 2770 s = 1; 2771 break; 2772 } 2773 2774 /* [draft-ietf-quic-transport-27] Section 12.2: 2775 * " Receivers SHOULD ignore any subsequent packets with a different 2776 * " Destination Connection ID than the first packet in the datagram. 2777 */ 2778 if (is_ietf && packet_in_data > packet_begin) 2779 { 2780 if (!((packet_in->pi_flags & (PI_GQUIC|PI_CONN_ID)) == PI_CONN_ID 2781 && LSQUIC_CIDS_EQ(&packet_in->pi_dcid, &cid))) 2782 { 2783 packet_in_data += packet_in->pi_data_sz; 2784 maybe_count_garbage(engine, packet_in->pi_data_sz); 2785 continue; 2786 } 2787 } 2788 2789 is_ietf = 0 == (packet_in->pi_flags & PI_GQUIC); 2790 packet_in_data += packet_in->pi_data_sz; 2791 if (is_ietf && packet_in_data < packet_end) 2792 cid = packet_in->pi_dcid; 2793 packet_in->pi_received = lsquic_time_now(); 2794 packet_in->pi_flags |= (3 & ecn) << PIBIT_ECN_SHIFT; 2795 eng_hist_inc(&engine->history, packet_in->pi_received, sl_packets_in); 2796 s = process_packet_in(engine, packet_in, &ppstate, sa_local, sa_peer, 2797 peer_ctx, packet_in_size); 2798 n_zeroes += s == 0; 2799 } 2800 while (0 == s && packet_in_data < packet_end); 2801 2802 return n_zeroes > 0 ? 0 : s; 2803} 2804 2805 2806#if __GNUC__ && !defined(NDEBUG) 2807__attribute__((weak)) 2808#endif 2809unsigned 2810lsquic_engine_quic_versions (const lsquic_engine_t *engine) 2811{ 2812 return engine->pub.enp_settings.es_versions; 2813} 2814 2815 2816void 2817lsquic_engine_cooldown (lsquic_engine_t *engine) 2818{ 2819 struct lsquic_hash_elem *el; 2820 lsquic_conn_t *conn; 2821 2822 if (engine->flags & ENG_COOLDOWN) 2823 /* AFAICT, there is no harm in calling this function more than once, 2824 * but log it just in case, as it may indicate an error in the caller. 2825 */ 2826 LSQ_INFO("cooldown called again"); 2827 engine->flags |= ENG_COOLDOWN; 2828 LSQ_INFO("entering cooldown mode"); 2829 if (engine->flags & ENG_SERVER) 2830 drop_all_mini_conns(engine); 2831 for (el = lsquic_hash_first(engine->conns_hash); el; 2832 el = lsquic_hash_next(engine->conns_hash)) 2833 { 2834 conn = lsquic_hashelem_getdata(el); 2835 lsquic_conn_going_away(conn); 2836 } 2837} 2838 2839 2840int 2841lsquic_engine_earliest_adv_tick (lsquic_engine_t *engine, int *diff) 2842{ 2843 const struct attq_elem *next_attq; 2844 lsquic_time_t now, next_time; 2845#if LSQUIC_DEBUG_NEXT_ADV_TICK 2846 const struct lsquic_conn *conn; 2847 const enum lsq_log_level L = LSQ_LOG_DEBUG; /* Easy toggle */ 2848#endif 2849 2850 ENGINE_CALLS_INCR(engine); 2851 2852 if ((engine->flags & ENG_PAST_DEADLINE) 2853 && lsquic_mh_count(&engine->conns_out)) 2854 { 2855#if LSQUIC_DEBUG_NEXT_ADV_TICK 2856 conn = lsquic_mh_peek(&engine->conns_out); 2857 engine->last_logged_conn = 0; 2858 LSQ_LOGC(L, "next advisory tick is now: went past deadline last time " 2859 "and have %u outgoing connection%.*s (%"CID_FMT" first)", 2860 lsquic_mh_count(&engine->conns_out), 2861 lsquic_mh_count(&engine->conns_out) != 1, "s", 2862 CID_BITS(lsquic_conn_log_cid(conn))); 2863#endif 2864 *diff = 0; 2865 return 1; 2866 } 2867 2868 if (engine->pr_queue && lsquic_prq_have_pending(engine->pr_queue)) 2869 { 2870#if LSQUIC_DEBUG_NEXT_ADV_TICK 2871 engine->last_logged_conn = 0; 2872 LSQ_LOG(L, "next advisory tick is now: have pending PRQ elements"); 2873#endif 2874 *diff = 0; 2875 return 1; 2876 } 2877 2878 if (lsquic_mh_count(&engine->conns_tickable)) 2879 { 2880#if LSQUIC_DEBUG_NEXT_ADV_TICK 2881 conn = lsquic_mh_peek(&engine->conns_tickable); 2882 engine->last_logged_conn = 0; 2883 LSQ_LOGC(L, "next advisory tick is now: have %u tickable " 2884 "connection%.*s (%"CID_FMT" first)", 2885 lsquic_mh_count(&engine->conns_tickable), 2886 lsquic_mh_count(&engine->conns_tickable) != 1, "s", 2887 CID_BITS(lsquic_conn_log_cid(conn))); 2888#endif 2889 *diff = 0; 2890 return 1; 2891 } 2892 2893 next_attq = lsquic_attq_next(engine->attq); 2894 if (engine->pub.enp_flags & ENPUB_CAN_SEND) 2895 { 2896 if (next_attq) 2897 next_time = next_attq->ae_adv_time; 2898 else 2899 return 0; 2900 } 2901 else 2902 { 2903 if (next_attq) 2904 { 2905 next_time = next_attq->ae_adv_time; 2906 if (engine->resume_sending_at < next_time) 2907 { 2908 next_time = engine->resume_sending_at; 2909 next_attq = NULL; 2910 } 2911 } 2912 else 2913 next_time = engine->resume_sending_at; 2914 } 2915 2916 now = lsquic_time_now(); 2917 *diff = (int) ((int64_t) next_time - (int64_t) now); 2918#if LSQUIC_DEBUG_NEXT_ADV_TICK 2919 if (next_attq) 2920 { 2921 /* Deduplicate consecutive log messages about the same reason for the 2922 * same connection. 2923 * If diff is always zero or diff reset to a higher value, event is 2924 * still logged. 2925 */ 2926 if (!((unsigned) next_attq->ae_why == engine->last_logged_ae_why 2927 && (uintptr_t) next_attq->ae_conn 2928 == engine->last_logged_conn 2929 && *diff < engine->last_tick_diff)) 2930 { 2931 engine->last_logged_conn = (uintptr_t) next_attq->ae_conn; 2932 engine->last_logged_ae_why = (unsigned) next_attq->ae_why; 2933 engine->last_tick_diff = *diff; 2934 LSQ_LOGC(L, "next advisory tick is %d usec away: conn %"CID_FMT 2935 ": %s", *diff, CID_BITS(lsquic_conn_log_cid(next_attq->ae_conn)), 2936 lsquic_attq_why2str(next_attq->ae_why)); 2937 } 2938 } 2939 else 2940 LSQ_LOG(L, "next advisory tick is %d usec away: resume sending", *diff); 2941#endif 2942 return 1; 2943} 2944 2945 2946unsigned 2947lsquic_engine_count_attq (lsquic_engine_t *engine, int from_now) 2948{ 2949 lsquic_time_t now; 2950 ENGINE_CALLS_INCR(engine); 2951 now = lsquic_time_now(); 2952 if (from_now < 0) 2953 now -= from_now; 2954 else 2955 now += from_now; 2956 return lsquic_attq_count_before(engine->attq, now); 2957} 2958 2959 2960int 2961lsquic_engine_add_cid (struct lsquic_engine_public *enpub, 2962 struct lsquic_conn *conn, unsigned cce_idx) 2963{ 2964 struct lsquic_engine *const engine = (struct lsquic_engine *) enpub; 2965 struct conn_cid_elem *const cce = &conn->cn_cces[cce_idx]; 2966 void *peer_ctx; 2967 2968 assert(cce_idx < conn->cn_n_cces); 2969 assert(conn->cn_cces_mask & (1 << cce_idx)); 2970 assert(!(cce->cce_hash_el.qhe_flags & QHE_HASHED)); 2971 2972 if (lsquic_hash_insert(engine->conns_hash, cce->cce_cid.idbuf, 2973 cce->cce_cid.len, conn, &cce->cce_hash_el)) 2974 { 2975 LSQ_DEBUGC("add %"CID_FMT" to the list of SCIDs", 2976 CID_BITS(&cce->cce_cid)); 2977 peer_ctx = lsquic_conn_get_peer_ctx(conn, NULL); 2978 cce->cce_flags |= CCE_REG; 2979 cub_add(&engine->new_scids, &cce->cce_cid, peer_ctx); 2980 return 0; 2981 } 2982 else 2983 { 2984 LSQ_WARNC("could not add new cid %"CID_FMT" to the SCID hash", 2985 CID_BITS(&cce->cce_cid)); 2986 return -1; 2987 } 2988} 2989 2990 2991void 2992lsquic_engine_retire_cid (struct lsquic_engine_public *enpub, 2993 struct lsquic_conn *conn, unsigned cce_idx, lsquic_time_t now) 2994{ 2995 struct lsquic_engine *const engine = (struct lsquic_engine *) enpub; 2996 struct conn_cid_elem *const cce = &conn->cn_cces[cce_idx]; 2997 void *peer_ctx; 2998 2999 assert(cce_idx < conn->cn_n_cces); 3000 3001 if (cce->cce_hash_el.qhe_flags & QHE_HASHED) 3002 lsquic_hash_erase(engine->conns_hash, &cce->cce_hash_el); 3003 3004 if (engine->purga) 3005 { 3006 peer_ctx = lsquic_conn_get_peer_ctx(conn, NULL); 3007 lsquic_purga_add(engine->purga, &cce->cce_cid, peer_ctx, 3008 PUTY_CID_RETIRED, now); 3009 } 3010 conn->cn_cces_mask &= ~(1u << cce_idx); 3011 LSQ_DEBUGC("retire CID %"CID_FMT, CID_BITS(&cce->cce_cid)); 3012} 3013 3014 3015