lsquic_engine.c revision f4841319
1/* Copyright (c) 2017 - 2020 LiteSpeed Technologies Inc. See LICENSE. */ 2/* 3 * lsquic_engine.c - QUIC engine 4 */ 5 6#include <assert.h> 7#include <errno.h> 8#include <inttypes.h> 9#include <limits.h> 10#include <stdint.h> 11#include <stdio.h> 12#include <stdlib.h> 13#include <string.h> 14#include <sys/queue.h> 15#include <time.h> 16#ifndef WIN32 17#include <sys/time.h> 18#include <netinet/in.h> 19#include <sys/types.h> 20#include <sys/stat.h> 21#include <fcntl.h> 22#include <unistd.h> 23#include <netdb.h> 24#endif 25 26#ifndef NDEBUG 27#include <sys/types.h> 28#include <regex.h> /* For code that loses packets */ 29#endif 30 31#if LOG_PACKET_CHECKSUM 32#include <zlib.h> 33#endif 34 35#include <openssl/aead.h> 36 37#include "lsquic.h" 38#include "lsquic_types.h" 39#include "lsquic_int_types.h" 40#include "lsquic_sizes.h" 41#include "lsquic_parse_common.h" 42#include "lsquic_parse.h" 43#include "lsquic_packet_in.h" 44#include "lsquic_packet_out.h" 45#include "lsquic_senhist.h" 46#include "lsquic_rtt.h" 47#include "lsquic_cubic.h" 48#include "lsquic_pacer.h" 49#include "lsquic_bw_sampler.h" 50#include "lsquic_minmax.h" 51#include "lsquic_bbr.h" 52#include "lsquic_send_ctl.h" 53#include "lsquic_set.h" 54#include "lsquic_conn_flow.h" 55#include "lsquic_sfcw.h" 56#include "lsquic_hash.h" 57#include "lsquic_conn.h" 58#include "lsquic_full_conn.h" 59#include "lsquic_util.h" 60#include "lsquic_qtags.h" 61#include "lsquic_enc_sess.h" 62#include "lsquic_mm.h" 63#include "lsquic_engine_public.h" 64#include "lsquic_eng_hist.h" 65#include "lsquic_ev_log.h" 66#include "lsquic_version.h" 67#include "lsquic_pr_queue.h" 68#include "lsquic_mini_conn.h" 69#include "lsquic_mini_conn_ietf.h" 70#include "lsquic_stock_shi.h" 71#include "lsquic_purga.h" 72#include "lsquic_tokgen.h" 73#include "lsquic_attq.h" 74#include "lsquic_min_heap.h" 75#include "lsquic_http1x_if.h" 76#include "lsquic_handshake.h" 77#include "lsquic_crand.h" 78#include "lsquic_ietf.h" 79 80#define LSQUIC_LOGGER_MODULE LSQLM_ENGINE 81#include "lsquic_logger.h" 82 83#ifndef LSQUIC_DEBUG_NEXT_ADV_TICK 84#define LSQUIC_DEBUG_NEXT_ADV_TICK 1 85#endif 86 87#if LSQUIC_DEBUG_NEXT_ADV_TICK 88#include "lsquic_alarmset.h" 89#endif 90 91#define MIN(a, b) ((a) < (b) ? (a) : (b)) 92 93/* The batch of outgoing packets grows and shrinks dynamically */ 94#define MAX_OUT_BATCH_SIZE 1024 95#define MIN_OUT_BATCH_SIZE 4 96#define INITIAL_OUT_BATCH_SIZE 32 97 98struct out_batch 99{ 100 lsquic_conn_t *conns [MAX_OUT_BATCH_SIZE]; 101 struct lsquic_out_spec outs [MAX_OUT_BATCH_SIZE]; 102 unsigned pack_off[MAX_OUT_BATCH_SIZE]; 103 lsquic_packet_out_t *packets[MAX_OUT_BATCH_SIZE * 2]; 104 struct iovec iov [MAX_OUT_BATCH_SIZE * 2]; 105}; 106 107typedef struct lsquic_conn * (*conn_iter_f)(struct lsquic_engine *); 108 109static void 110process_connections (struct lsquic_engine *engine, conn_iter_f iter, 111 lsquic_time_t now); 112 113static void 114engine_incref_conn (lsquic_conn_t *conn, enum lsquic_conn_flags flag); 115 116static lsquic_conn_t * 117engine_decref_conn (lsquic_engine_t *engine, lsquic_conn_t *conn, 118 enum lsquic_conn_flags flag); 119 120static void 121force_close_conn (lsquic_engine_t *engine, lsquic_conn_t *conn); 122 123#if LSQUIC_COUNT_ENGINE_CALLS 124#define ENGINE_CALLS_INCR(e) do { ++(e)->n_engine_calls; } while (0) 125#else 126#define ENGINE_CALLS_INCR(e) 127#endif 128 129/* Nested calls to LSQUIC are not supported */ 130#define ENGINE_IN(e) do { \ 131 assert(!((e)->pub.enp_flags & ENPUB_PROC)); \ 132 (e)->pub.enp_flags |= ENPUB_PROC; \ 133 ENGINE_CALLS_INCR(e); \ 134} while (0) 135 136#define ENGINE_OUT(e) do { \ 137 assert((e)->pub.enp_flags & ENPUB_PROC); \ 138 (e)->pub.enp_flags &= ~ENPUB_PROC; \ 139} while (0) 140 141/* A connection can be referenced from one of six places: 142 * 143 * 1. A hash. The engine maintains two hash tables -- one for full, and 144 * one for mini connections. A connection starts its life in one of 145 * those. 146 * 147 * 2. Outgoing queue. 148 * 149 * 3. Tickable queue 150 * 151 * 4. Advisory Tick Time queue. 152 * 153 * 5. Closing connections queue. This is a transient queue -- it only 154 * exists for the duration of process_connections() function call. 155 * 156 * 6. Ticked connections queue. Another transient queue, similar to (5). 157 * 158 * The idea is to destroy the connection when it is no longer referenced. 159 * For example, a connection tick may return TICK_SEND|TICK_CLOSE. In 160 * that case, the connection is referenced from two places: (2) and (5). 161 * After its packets are sent, it is only referenced in (5), and at the 162 * end of the function call, when it is removed from (5), reference count 163 * goes to zero and the connection is destroyed. If not all packets can 164 * be sent, at the end of the function call, the connection is referenced 165 * by (2) and will only be removed once all outgoing packets have been 166 * sent. 167 */ 168#define CONN_REF_FLAGS (LSCONN_HASHED \ 169 |LSCONN_HAS_OUTGOING \ 170 |LSCONN_TICKABLE \ 171 |LSCONN_TICKED \ 172 |LSCONN_CLOSING \ 173 |LSCONN_ATTQ) 174 175 176 177 178struct cid_update_batch 179{ 180 lsquic_cids_update_f cub_update_cids; 181 void *cub_update_ctx; 182 unsigned cub_count; 183 lsquic_cid_t cub_cids[20]; 184 void *cub_peer_ctxs[20]; 185}; 186 187static void 188cub_init (struct cid_update_batch *, lsquic_cids_update_f, void *); 189 190 191struct lsquic_engine 192{ 193 struct lsquic_engine_public pub; 194 enum { 195 ENG_SERVER = LSENG_SERVER, 196 ENG_HTTP = LSENG_HTTP, 197 ENG_COOLDOWN = (1 << 7), /* Cooldown: no new connections */ 198 ENG_PAST_DEADLINE 199 = (1 << 8), /* Previous call to a processing 200 * function went past time threshold. 201 */ 202 ENG_CONNS_BY_ADDR 203 = (1 << 9), /* Connections are hashed by address */ 204#ifndef NDEBUG 205 ENG_COALESCE = (1 << 24), /* Packet coalescing is enabled */ 206 ENG_LOSE_PACKETS= (1 << 25), /* Lose *some* outgoing packets */ 207 ENG_DTOR = (1 << 26), /* Engine destructor */ 208#endif 209 } flags; 210 lsquic_packets_out_f packets_out; 211 void *packets_out_ctx; 212 lsquic_cids_update_f report_new_scids; 213 lsquic_cids_update_f report_live_scids; 214 lsquic_cids_update_f report_old_scids; 215 void *scids_ctx; 216 struct lsquic_hash *conns_hash; 217 struct min_heap conns_tickable; 218 struct min_heap conns_out; 219 /* Use a union because only one iterator is being used at any one time */ 220 union { 221 struct { 222 struct cert_susp_head *head; 223 } resumed; 224 struct lsquic_conn *one_conn; 225 } iter_state; 226 struct eng_hist history; 227 unsigned batch_size; 228 struct pr_queue *pr_queue; 229 struct attq *attq; 230 /* Track time last time a packet was sent to give new connections 231 * priority lower than that of existing connections. 232 */ 233 lsquic_time_t last_sent; 234#ifndef NDEBUG 235 regex_t lose_packets_re; 236 const char *lose_packets_str; 237#endif 238 unsigned n_conns; 239 lsquic_time_t deadline; 240 lsquic_time_t resume_sending_at; 241 unsigned mini_conns_count; 242 struct lsquic_purga *purga; 243#if LSQUIC_CONN_STATS 244 struct { 245 unsigned conns; 246 } stats; 247 struct conn_stats conn_stats_sum; 248 FILE *stats_fh; 249#endif 250 struct cid_update_batch new_scids; 251 struct out_batch out_batch; 252#if LSQUIC_COUNT_ENGINE_CALLS 253 unsigned long n_engine_calls; 254#endif 255#if LSQUIC_DEBUG_NEXT_ADV_TICK 256 uintptr_t last_logged_conn; 257 unsigned last_logged_ae_why; 258 int last_tick_diff; 259#endif 260 struct crand crand; 261 EVP_AEAD_CTX retry_aead_ctx; 262}; 263 264 265void 266lsquic_engine_init_settings (struct lsquic_engine_settings *settings, 267 unsigned flags) 268{ 269 memset(settings, 0, sizeof(*settings)); 270 settings->es_versions = LSQUIC_DF_VERSIONS; 271 if (flags & ENG_SERVER) 272 { 273 settings->es_cfcw = LSQUIC_DF_CFCW_SERVER; 274 settings->es_sfcw = LSQUIC_DF_SFCW_SERVER; 275 settings->es_init_max_data 276 = LSQUIC_DF_INIT_MAX_DATA_SERVER; 277 settings->es_init_max_stream_data_bidi_remote 278 = LSQUIC_DF_INIT_MAX_STREAM_DATA_BIDI_REMOTE_SERVER; 279 settings->es_init_max_stream_data_bidi_local 280 = LSQUIC_DF_INIT_MAX_STREAM_DATA_BIDI_LOCAL_SERVER; 281 settings->es_init_max_stream_data_uni 282 = LSQUIC_DF_INIT_MAX_STREAM_DATA_UNI_SERVER; 283 settings->es_init_max_streams_uni 284 = LSQUIC_DF_INIT_MAX_STREAMS_UNI_SERVER; 285 settings->es_ping_period = 0; 286 } 287 else 288 { 289 settings->es_cfcw = LSQUIC_DF_CFCW_CLIENT; 290 settings->es_sfcw = LSQUIC_DF_SFCW_CLIENT; 291 settings->es_init_max_data 292 = LSQUIC_DF_INIT_MAX_DATA_CLIENT; 293 settings->es_init_max_stream_data_bidi_remote 294 = LSQUIC_DF_INIT_MAX_STREAM_DATA_BIDI_REMOTE_CLIENT; 295 settings->es_init_max_stream_data_bidi_local 296 = LSQUIC_DF_INIT_MAX_STREAM_DATA_BIDI_LOCAL_CLIENT; 297 settings->es_init_max_stream_data_uni 298 = LSQUIC_DF_INIT_MAX_STREAM_DATA_UNI_CLIENT; 299 settings->es_init_max_streams_uni 300 = LSQUIC_DF_INIT_MAX_STREAMS_UNI_CLIENT; 301 settings->es_ping_period = LSQUIC_DF_PING_PERIOD; 302 } 303 settings->es_max_streams_in = LSQUIC_DF_MAX_STREAMS_IN; 304 settings->es_idle_conn_to = LSQUIC_DF_IDLE_CONN_TO; 305 settings->es_idle_timeout = LSQUIC_DF_IDLE_TIMEOUT; 306 settings->es_handshake_to = LSQUIC_DF_HANDSHAKE_TO; 307 settings->es_silent_close = LSQUIC_DF_SILENT_CLOSE; 308 settings->es_max_header_list_size 309 = LSQUIC_DF_MAX_HEADER_LIST_SIZE; 310 settings->es_ua = LSQUIC_DF_UA; 311 settings->es_ecn = LSQUIC_DF_ECN; 312 313 settings->es_pdmd = QTAG_X509; 314 settings->es_aead = QTAG_AESG; 315 settings->es_kexs = QTAG_C255; 316 settings->es_support_push = LSQUIC_DF_SUPPORT_PUSH; 317 settings->es_support_tcid0 = LSQUIC_DF_SUPPORT_TCID0; 318 settings->es_support_nstp = LSQUIC_DF_SUPPORT_NSTP; 319 settings->es_honor_prst = LSQUIC_DF_HONOR_PRST; 320 settings->es_progress_check = LSQUIC_DF_PROGRESS_CHECK; 321 settings->es_rw_once = LSQUIC_DF_RW_ONCE; 322 settings->es_proc_time_thresh= LSQUIC_DF_PROC_TIME_THRESH; 323 settings->es_pace_packets = LSQUIC_DF_PACE_PACKETS; 324 settings->es_clock_granularity = LSQUIC_DF_CLOCK_GRANULARITY; 325 settings->es_max_inchoate = LSQUIC_DF_MAX_INCHOATE; 326 settings->es_send_prst = LSQUIC_DF_SEND_PRST; 327 settings->es_sttl = LSQUIC_DF_STTL; 328 settings->es_init_max_streams_bidi 329 = LSQUIC_DF_INIT_MAX_STREAMS_BIDI; 330 settings->es_scid_len = LSQUIC_DF_SCID_LEN; 331 settings->es_scid_iss_rate = LSQUIC_DF_SCID_ISS_RATE; 332 settings->es_qpack_dec_max_size = LSQUIC_DF_QPACK_DEC_MAX_SIZE; 333 settings->es_qpack_dec_max_blocked = LSQUIC_DF_QPACK_DEC_MAX_BLOCKED; 334 settings->es_qpack_enc_max_size = LSQUIC_DF_QPACK_ENC_MAX_SIZE; 335 settings->es_qpack_enc_max_blocked = LSQUIC_DF_QPACK_ENC_MAX_BLOCKED; 336 settings->es_allow_migration = LSQUIC_DF_ALLOW_MIGRATION; 337 settings->es_ql_bits = LSQUIC_DF_QL_BITS; 338 settings->es_spin = LSQUIC_DF_SPIN; 339} 340 341 342/* Note: if returning an error, err_buf must be valid if non-NULL */ 343int 344lsquic_engine_check_settings (const struct lsquic_engine_settings *settings, 345 unsigned flags, 346 char *err_buf, size_t err_buf_sz) 347{ 348 if (settings->es_cfcw < LSQUIC_MIN_FCW || 349 settings->es_sfcw < LSQUIC_MIN_FCW) 350 { 351 if (err_buf) 352 snprintf(err_buf, err_buf_sz, "%s", 353 "flow control window set too low"); 354 return -1; 355 } 356 if (0 == (settings->es_versions & LSQUIC_SUPPORTED_VERSIONS)) 357 { 358 if (err_buf) 359 snprintf(err_buf, err_buf_sz, "%s", 360 "No supported QUIC versions specified"); 361 return -1; 362 } 363 if (settings->es_versions & ~LSQUIC_SUPPORTED_VERSIONS) 364 { 365 if (err_buf) 366 snprintf(err_buf, err_buf_sz, "%s", 367 "one or more unsupported QUIC version is specified"); 368 return -1; 369 } 370 if (flags & ENG_SERVER) 371 { 372 if (settings->es_handshake_to > 373 MAX_MINI_CONN_LIFESPAN_IN_USEC) 374 { 375 if (err_buf) 376 snprintf(err_buf, err_buf_sz, "handshake timeout %lu" 377 " usec is too large. The maximum for server is %u usec", 378 settings->es_handshake_to, MAX_MINI_CONN_LIFESPAN_IN_USEC); 379 return -1; 380 } 381 } 382 if (settings->es_idle_timeout > 600) 383 { 384 if (err_buf) 385 snprintf(err_buf, err_buf_sz, "%s", 386 "The maximum value of idle timeout is 600 seconds"); 387 return -1; 388 } 389 if (settings->es_scid_len > MAX_CID_LEN) 390 { 391 if (err_buf) 392 snprintf(err_buf, err_buf_sz, "Source connection ID cannot be %u " 393 "bytes long; it must be between 0 and %u.", 394 settings->es_scid_len, MAX_CID_LEN); 395 return -1; 396 } 397 398 if (settings->es_cc_algo > 2) 399 { 400 if (err_buf) 401 snprintf(err_buf, err_buf_sz, "Invalid congestion control " 402 "algorithm value %u", settings->es_cc_algo); 403 return -1; 404 } 405 406 if (!(settings->es_ql_bits >= -1 && settings->es_ql_bits <= 2)) 407 { 408 if (err_buf) 409 snprintf(err_buf, err_buf_sz, "Invalid QL bits value %d ", 410 settings->es_ql_bits); 411 return -1; 412 } 413 414 if (!(settings->es_spin == 0 || settings->es_spin == 1)) 415 { 416 if (err_buf) 417 snprintf(err_buf, err_buf_sz, "Invalid spin value %d", 418 settings->es_spin); 419 return -1; 420 } 421 422 return 0; 423} 424 425 426static void 427free_packet (void *ctx, void *conn_ctx, void *packet_data, char is_ipv6) 428{ 429 free(packet_data); 430} 431 432 433static void * 434malloc_buf (void *ctx, void *conn_ctx, unsigned short size, char is_ipv6) 435{ 436 return malloc(size); 437} 438 439 440static const struct lsquic_packout_mem_if stock_pmi = 441{ 442 malloc_buf, free_packet, free_packet, 443}; 444 445 446static int 447hash_conns_by_addr (const struct lsquic_engine *engine) 448{ 449 if (engine->flags & ENG_SERVER) 450 return 0; 451 if (engine->pub.enp_settings.es_versions & LSQUIC_FORCED_TCID0_VERSIONS) 452 return 1; 453 if ((engine->pub.enp_settings.es_versions & LSQUIC_GQUIC_HEADER_VERSIONS) 454 && engine->pub.enp_settings.es_support_tcid0) 455 return 1; 456 if (engine->pub.enp_settings.es_scid_len == 0) 457 return 1; 458 return 0; 459} 460 461 462lsquic_engine_t * 463lsquic_engine_new (unsigned flags, 464 const struct lsquic_engine_api *api) 465{ 466 lsquic_engine_t *engine; 467 char err_buf[100]; 468 469 if (!api->ea_packets_out) 470 { 471 LSQ_ERROR("packets_out callback is not specified"); 472 return NULL; 473 } 474 475 if (api->ea_settings && 476 0 != lsquic_engine_check_settings(api->ea_settings, flags, 477 err_buf, sizeof(err_buf))) 478 { 479 LSQ_ERROR("cannot create engine: %s", err_buf); 480 return NULL; 481 } 482 483 engine = calloc(1, sizeof(*engine)); 484 if (!engine) 485 return NULL; 486 if (0 != lsquic_mm_init(&engine->pub.enp_mm)) 487 { 488 free(engine); 489 return NULL; 490 } 491 if (api->ea_settings) 492 engine->pub.enp_settings = *api->ea_settings; 493 else 494 lsquic_engine_init_settings(&engine->pub.enp_settings, flags); 495 int tag_buf_len; 496 tag_buf_len = lsquic_gen_ver_tags(engine->pub.enp_ver_tags_buf, 497 sizeof(engine->pub.enp_ver_tags_buf), 498 engine->pub.enp_settings.es_versions); 499 if (tag_buf_len <= 0) 500 { 501 LSQ_ERROR("cannot generate version tags buffer"); 502 free(engine); 503 return NULL; 504 } 505 engine->pub.enp_ver_tags_len = tag_buf_len; 506 engine->pub.enp_flags = ENPUB_CAN_SEND; 507 engine->pub.enp_stream_if = api->ea_stream_if; 508 engine->pub.enp_stream_if_ctx = api->ea_stream_if_ctx; 509 510 engine->flags = flags; 511#ifndef NDEBUG 512 engine->flags |= ENG_COALESCE; 513#endif 514 engine->packets_out = api->ea_packets_out; 515 engine->packets_out_ctx = api->ea_packets_out_ctx; 516 engine->report_new_scids = api->ea_new_scids; 517 engine->report_live_scids = api->ea_live_scids; 518 engine->report_old_scids = api->ea_old_scids; 519 engine->scids_ctx = api->ea_cids_update_ctx; 520 cub_init(&engine->new_scids, engine->report_new_scids, engine->scids_ctx); 521 engine->pub.enp_lookup_cert = api->ea_lookup_cert; 522 engine->pub.enp_cert_lu_ctx = api->ea_cert_lu_ctx; 523 engine->pub.enp_get_ssl_ctx = api->ea_get_ssl_ctx; 524 if (api->ea_shi) 525 { 526 engine->pub.enp_shi = api->ea_shi; 527 engine->pub.enp_shi_ctx = api->ea_shi_ctx; 528 } 529 else 530 { 531 engine->pub.enp_shi = &stock_shi; 532 engine->pub.enp_shi_ctx = stock_shared_hash_new(); 533 if (!engine->pub.enp_shi_ctx) 534 { 535 free(engine); 536 return NULL; 537 } 538 } 539 if (api->ea_hsi_if) 540 { 541 engine->pub.enp_hsi_if = api->ea_hsi_if; 542 engine->pub.enp_hsi_ctx = api->ea_hsi_ctx; 543 } 544 else 545 { 546 engine->pub.enp_hsi_if = lsquic_http1x_if; 547 engine->pub.enp_hsi_ctx = NULL; 548 } 549 if (api->ea_pmi) 550 { 551 engine->pub.enp_pmi = api->ea_pmi; 552 engine->pub.enp_pmi_ctx = api->ea_pmi_ctx; 553 } 554 else 555 { 556 engine->pub.enp_pmi = &stock_pmi; 557 engine->pub.enp_pmi_ctx = NULL; 558 } 559 engine->pub.enp_verify_cert = api->ea_verify_cert; 560 engine->pub.enp_verify_ctx = api->ea_verify_ctx; 561 engine->pub.enp_kli = api->ea_keylog_if; 562 engine->pub.enp_kli_ctx = api->ea_keylog_ctx; 563 engine->pub.enp_engine = engine; 564 if (hash_conns_by_addr(engine)) 565 engine->flags |= ENG_CONNS_BY_ADDR; 566 engine->conns_hash = lsquic_hash_create(); 567 engine->pub.enp_tokgen = lsquic_tg_new(&engine->pub); 568 if (!engine->pub.enp_tokgen) 569 return NULL; 570 engine->pub.enp_crand = &engine->crand; 571 if (flags & ENG_SERVER) 572 { 573 engine->pr_queue = prq_create( 574 10000 /* TODO: make configurable */, MAX_OUT_BATCH_SIZE, 575 &engine->pub); 576 if (!engine->pr_queue) 577 { 578 lsquic_tg_destroy(engine->pub.enp_tokgen); 579 return NULL; 580 } 581 engine->purga = lsquic_purga_new(30 * 1000 * 1000, 582 engine->report_old_scids, engine->scids_ctx); 583 if (!engine->purga) 584 { 585 lsquic_tg_destroy(engine->pub.enp_tokgen); 586 prq_destroy(engine->pr_queue); 587 return NULL; 588 } 589 } 590 engine->attq = attq_create(); 591 eng_hist_init(&engine->history); 592 engine->batch_size = INITIAL_OUT_BATCH_SIZE; 593 if (engine->pub.enp_settings.es_honor_prst) 594 { 595 engine->pub.enp_srst_hash = lsquic_hash_create(); 596 if (!engine->pub.enp_srst_hash) 597 { 598 lsquic_engine_destroy(engine); 599 return NULL; 600 } 601 } 602 603#ifndef NDEBUG 604 { 605 const char *env; 606 env = getenv("LSQUIC_LOSE_PACKETS_RE"); 607 if (env) 608 { 609 if (0 != regcomp(&engine->lose_packets_re, env, 610 REG_EXTENDED|REG_NOSUB)) 611 { 612 LSQ_ERROR("could not compile lost packet regex `%s'", env); 613 return NULL; 614 } 615 engine->flags |= ENG_LOSE_PACKETS; 616 engine->lose_packets_str = env; 617 LSQ_WARN("will lose packets that match the following regex: %s", 618 env); 619 } 620 env = getenv("LSQUIC_COALESCE"); 621 if (env) 622 { 623 engine->flags &= ~ENG_COALESCE; 624 if (atoi(env)) 625 { 626 engine->flags |= ENG_COALESCE; 627 LSQ_NOTICE("will coalesce packets"); 628 } 629 else 630 LSQ_NOTICE("will not coalesce packets"); 631 } 632 } 633#endif 634#if LSQUIC_CONN_STATS 635 engine->stats_fh = api->ea_stats_fh; 636#endif 637 if (1 != EVP_AEAD_CTX_init(&engine->retry_aead_ctx, EVP_aead_aes_128_gcm(), 638 IETF_RETRY_KEY_BUF, IETF_RETRY_KEY_SZ, 16, NULL)) 639 { 640 LSQ_ERROR("could not initialize retry AEAD ctx"); 641 lsquic_engine_destroy(engine); 642 return NULL; 643 } 644 engine->pub.enp_retry_aead_ctx = &engine->retry_aead_ctx; 645 646 LSQ_INFO("instantiated engine"); 647 return engine; 648} 649 650 651#if LOG_PACKET_CHECKSUM 652static void 653log_packet_checksum (const lsquic_cid_t *cid, const char *direction, 654 const unsigned char *buf, size_t bufsz) 655{ 656 EV_LOG_CONN_EVENT(cid, "packet %s checksum: %08X", direction, 657 (uint32_t) crc32(0, buf, bufsz)); 658} 659 660 661#endif 662 663 664static void 665grow_batch_size (struct lsquic_engine *engine) 666{ 667 engine->batch_size <<= engine->batch_size < MAX_OUT_BATCH_SIZE; 668} 669 670 671static void 672shrink_batch_size (struct lsquic_engine *engine) 673{ 674 engine->batch_size >>= engine->batch_size > MIN_OUT_BATCH_SIZE; 675} 676 677 678struct cce_cid_iter 679{ 680 const struct lsquic_conn *conn; 681 unsigned todo, n; 682}; 683 684 685static struct conn_cid_elem * 686cce_iter_next (struct cce_cid_iter *citer) 687{ 688 struct conn_cid_elem *cce; 689 690 while (citer->todo) 691 if (citer->todo & (1 << citer->n)) 692 { 693 citer->todo &= ~(1 << citer->n); 694 cce = &citer->conn->cn_cces[ citer->n++ ]; 695 if (!(cce->cce_flags & CCE_PORT)) 696 return cce; 697 } 698 else 699 ++citer->n; 700 701 return NULL; 702} 703 704 705static struct conn_cid_elem * 706cce_iter_first (struct cce_cid_iter *citer, const struct lsquic_conn *conn) 707{ 708 citer->conn = conn; 709 citer->todo = conn->cn_cces_mask; 710 citer->n = 0; 711 return cce_iter_next(citer); 712} 713 714 715#if LSQUIC_CONN_STATS 716void 717update_stats_sum (struct lsquic_engine *engine, struct lsquic_conn *conn) 718{ 719 unsigned long *const dst = (unsigned long *) &engine->conn_stats_sum; 720 const unsigned long *src; 721 const struct conn_stats *stats; 722 unsigned i; 723 724 if (conn->cn_if->ci_get_stats && (stats = conn->cn_if->ci_get_stats(conn))) 725 { 726 ++engine->stats.conns; 727 src = (unsigned long *) stats; 728 for (i = 0; i < sizeof(*stats) / sizeof(unsigned long); ++i) 729 dst[i] += src[i]; 730 } 731} 732 733 734#endif 735 736 737/* Wrapper to make sure important things occur before the connection is 738 * really destroyed. 739 */ 740static void 741destroy_conn (struct lsquic_engine *engine, struct lsquic_conn *conn, 742 lsquic_time_t now) 743{ 744 struct cce_cid_iter citer; 745 const struct conn_cid_elem *cce; 746 lsquic_time_t drain_time; 747 struct purga_el *puel; 748 749 engine->mini_conns_count -= !!(conn->cn_flags & LSCONN_MINI); 750 if (engine->purga 751 /* Blacklist all CIDs except for promoted mini connections */ 752 && (conn->cn_flags & (LSCONN_MINI|LSCONN_PROMOTED)) 753 != (LSCONN_MINI|LSCONN_PROMOTED)) 754 { 755 if (!(conn->cn_flags & LSCONN_IMMED_CLOSE) 756 && conn->cn_if->ci_drain_time && 757 (drain_time = conn->cn_if->ci_drain_time(conn), drain_time)) 758 { 759 for (cce = cce_iter_first(&citer, conn); cce; 760 cce = cce_iter_next(&citer)) 761 { 762 puel = lsquic_purga_add(engine->purga, &cce->cce_cid, 763 lsquic_conn_get_peer_ctx(conn, NULL), 764 PUTY_CONN_DRAIN, now); 765 if (puel) 766 puel->puel_time = now + drain_time; 767 } 768 } 769 else 770 { 771 for (cce = cce_iter_first(&citer, conn); cce; 772 cce = cce_iter_next(&citer)) 773 { 774 puel = lsquic_purga_add(engine->purga, &cce->cce_cid, 775 lsquic_conn_get_peer_ctx(conn, NULL), 776 PUTY_CONN_DELETED, now); 777 if (puel) 778 { 779 puel->puel_time = now; 780 puel->puel_count = 0; 781 } 782 } 783 } 784 } 785#if LSQUIC_CONN_STATS 786 update_stats_sum(engine, conn); 787#endif 788 --engine->n_conns; 789 conn->cn_flags |= LSCONN_NEVER_TICKABLE; 790 conn->cn_if->ci_destroy(conn); 791} 792 793 794static int 795maybe_grow_conn_heaps (struct lsquic_engine *engine) 796{ 797 struct min_heap_elem *els; 798 unsigned count; 799 800 if (engine->n_conns < lsquic_mh_nalloc(&engine->conns_tickable)) 801 return 0; /* Nothing to do */ 802 803 if (lsquic_mh_nalloc(&engine->conns_tickable)) 804 count = lsquic_mh_nalloc(&engine->conns_tickable) * 2 * 2; 805 else 806 count = 8; 807 808 els = malloc(sizeof(els[0]) * count); 809 if (!els) 810 { 811 LSQ_ERROR("%s: malloc failed", __func__); 812 return -1; 813 } 814 815 LSQ_DEBUG("grew heaps to %u elements", count / 2); 816 memcpy(&els[0], engine->conns_tickable.mh_elems, 817 sizeof(els[0]) * lsquic_mh_count(&engine->conns_tickable)); 818 memcpy(&els[count / 2], engine->conns_out.mh_elems, 819 sizeof(els[0]) * lsquic_mh_count(&engine->conns_out)); 820 free(engine->conns_tickable.mh_elems); 821 engine->conns_tickable.mh_elems = els; 822 engine->conns_out.mh_elems = &els[count / 2]; 823 engine->conns_tickable.mh_nalloc = count / 2; 824 engine->conns_out.mh_nalloc = count / 2; 825 return 0; 826} 827 828 829static void 830remove_cces_from_hash (struct lsquic_hash *hash, struct lsquic_conn *conn, 831 unsigned todo) 832{ 833 unsigned n; 834 835 for (n = 0; todo; todo &= ~(1 << n++)) 836 if ((todo & (1 << n)) && 837 (conn->cn_cces[n].cce_hash_el.qhe_flags & QHE_HASHED)) 838 lsquic_hash_erase(hash, &conn->cn_cces[n].cce_hash_el); 839} 840 841 842static void 843remove_all_cces_from_hash (struct lsquic_hash *hash, struct lsquic_conn *conn) 844{ 845 remove_cces_from_hash(hash, conn, conn->cn_cces_mask); 846} 847 848 849static void 850cub_add (struct cid_update_batch *cub, const lsquic_cid_t *cid, void *peer_ctx); 851 852 853static int 854insert_conn_into_hash (struct lsquic_engine *engine, struct lsquic_conn *conn, 855 void *peer_ctx) 856{ 857 struct conn_cid_elem *cce; 858 unsigned todo, done, n; 859 860 for (todo = conn->cn_cces_mask, done = 0, n = 0; todo; todo &= ~(1 << n++)) 861 if (todo & (1 << n)) 862 { 863 cce = &conn->cn_cces[n]; 864 assert(!(cce->cce_hash_el.qhe_flags & QHE_HASHED)); 865 if (lsquic_hash_insert(engine->conns_hash, cce->cce_cid.idbuf, 866 cce->cce_cid.len, conn, &cce->cce_hash_el)) 867 done |= 1 << n; 868 else 869 goto err; 870 if ((engine->flags & ENG_SERVER) && 0 == (cce->cce_flags & CCE_REG)) 871 { 872 cce->cce_flags |= CCE_REG; 873 cub_add(&engine->new_scids, &cce->cce_cid, peer_ctx); 874 } 875 } 876 877 return 0; 878 879 err: 880 remove_cces_from_hash(engine->conns_hash, conn, done); 881 return -1; 882} 883 884 885static lsquic_conn_t * 886new_full_conn_server (lsquic_engine_t *engine, lsquic_conn_t *mini_conn, 887 lsquic_time_t now) 888{ 889 const lsquic_cid_t *cid; 890 server_conn_ctor_f ctor; 891 lsquic_conn_t *conn; 892 unsigned flags; 893 if (0 != maybe_grow_conn_heaps(engine)) 894 return NULL; 895 flags = engine->flags & (ENG_SERVER|ENG_HTTP); 896 897 if (mini_conn->cn_flags & LSCONN_IETF) 898 { 899 if (mini_conn->cn_version == LSQVER_ID24) 900 ctor = lsquic_id24_full_conn_server_new; 901 else 902 ctor = lsquic_ietf_full_conn_server_new; 903 } 904 else 905 ctor = lsquic_gquic_full_conn_server_new; 906 907 conn = ctor(&engine->pub, flags, mini_conn); 908 if (!conn) 909 { 910 /* Otherwise, full_conn_server_new prints its own warnings */ 911 if (ENOMEM == errno) 912 { 913 cid = lsquic_conn_log_cid(mini_conn); 914 LSQ_WARNC("could not allocate full connection for %"CID_FMT": %s", 915 CID_BITS(cid), strerror(errno)); 916 } 917 return NULL; 918 } 919 ++engine->n_conns; 920 if (0 != insert_conn_into_hash(engine, conn, lsquic_conn_get_peer_ctx(conn, NULL))) 921 { 922 cid = lsquic_conn_log_cid(conn); 923 LSQ_WARNC("cannot add connection %"CID_FMT" to hash - destroy", 924 CID_BITS(cid)); 925 destroy_conn(engine, conn, now); 926 return NULL; 927 } 928 assert(!(conn->cn_flags & CONN_REF_FLAGS)); 929 conn->cn_flags |= LSCONN_HASHED; 930 return conn; 931} 932 933 934static enum 935{ 936 VER_NOT_SPECIFIED, 937 VER_SUPPORTED, 938 VER_UNSUPPORTED, 939} 940 941 942version_matches (lsquic_engine_t *engine, const lsquic_packet_in_t *packet_in, 943 enum lsquic_version *pversion) 944{ 945 lsquic_ver_tag_t ver_tag; 946 enum lsquic_version version; 947 948 if (!packet_in->pi_quic_ver) 949 { 950 LSQ_DEBUG("packet does not specify version"); 951 return VER_NOT_SPECIFIED; 952 } 953 954 memcpy(&ver_tag, packet_in->pi_data + packet_in->pi_quic_ver, sizeof(ver_tag)); 955 version = lsquic_tag2ver(ver_tag); 956 if (version < N_LSQVER) 957 { 958 if (engine->pub.enp_settings.es_versions & (1 << version)) 959 { 960 LSQ_DEBUG("client-supplied version %s is supported", 961 lsquic_ver2str[version]); 962 *pversion = version; 963 return VER_SUPPORTED; 964 } 965 else 966 LSQ_DEBUG("client-supplied version %s is not supported", 967 lsquic_ver2str[version]); 968 } 969 else 970 LSQ_DEBUG("client-supplied version tag 0x%08X is not recognized", 971 ver_tag); 972 973 return VER_UNSUPPORTED; 974} 975 976 977static void 978schedule_req_packet (struct lsquic_engine *engine, enum packet_req_type type, 979 const struct lsquic_packet_in *packet_in, const struct sockaddr *sa_local, 980 const struct sockaddr *sa_peer, void *peer_ctx) 981{ 982 assert(engine->pr_queue); 983 if (0 == prq_new_req(engine->pr_queue, type, packet_in, peer_ctx, 984 sa_local, sa_peer)) 985 LSQ_DEBUGC("scheduled %s packet for cid %"CID_FMT, 986 lsquic_preqt2str[type], CID_BITS(&packet_in->pi_conn_id)); 987 else 988 LSQ_DEBUG("cannot schedule %s packet", lsquic_preqt2str[type]); 989} 990 991 992static unsigned short 993sa2port (const struct sockaddr *sa) 994{ 995 if (sa->sa_family == AF_INET) 996 { 997 struct sockaddr_in *const sa4 = (void *) sa; 998 return sa4->sin_port; 999 } 1000 else 1001 { 1002 struct sockaddr_in6 *const sa6 = (void *) sa; 1003 return sa6->sin6_port; 1004 } 1005} 1006 1007 1008static struct lsquic_hash_elem * 1009find_conn_by_addr (struct lsquic_hash *hash, const struct sockaddr *sa) 1010{ 1011 unsigned short port; 1012 1013 port = sa2port(sa); 1014 return lsquic_hash_find(hash, &port, sizeof(port)); 1015} 1016 1017 1018static lsquic_conn_t * 1019find_conn (lsquic_engine_t *engine, lsquic_packet_in_t *packet_in, 1020 struct packin_parse_state *ppstate, const struct sockaddr *sa_local) 1021{ 1022 struct lsquic_hash_elem *el; 1023 lsquic_conn_t *conn; 1024 1025 if (engine->flags & ENG_CONNS_BY_ADDR) 1026 el = find_conn_by_addr(engine->conns_hash, sa_local); 1027 else if (packet_in->pi_flags & PI_CONN_ID) 1028 el = lsquic_hash_find(engine->conns_hash, 1029 packet_in->pi_conn_id.idbuf, packet_in->pi_conn_id.len); 1030 else 1031 { 1032 LSQ_DEBUG("packet header does not have connection ID: discarding"); 1033 return NULL; 1034 } 1035 1036 if (!el) 1037 return NULL; 1038 1039 conn = lsquic_hashelem_getdata(el); 1040 conn->cn_pf->pf_parse_packet_in_finish(packet_in, ppstate); 1041 if ((engine->flags & ENG_CONNS_BY_ADDR) 1042 && !(conn->cn_flags & LSCONN_IETF) 1043 && (packet_in->pi_flags & PI_CONN_ID) 1044 && !LSQUIC_CIDS_EQ(CN_SCID(conn), &packet_in->pi_conn_id)) 1045 { 1046 LSQ_DEBUG("connection IDs do not match"); 1047 return NULL; 1048 } 1049 1050 return conn; 1051} 1052 1053 1054static lsquic_conn_t * 1055find_or_create_conn (lsquic_engine_t *engine, lsquic_packet_in_t *packet_in, 1056 struct packin_parse_state *ppstate, const struct sockaddr *sa_local, 1057 const struct sockaddr *sa_peer, void *peer_ctx, size_t packet_in_size) 1058{ 1059 struct lsquic_hash_elem *el; 1060 struct purga_el *puel; 1061 lsquic_conn_t *conn; 1062 1063 if (!(packet_in->pi_flags & PI_CONN_ID)) 1064 { 1065 LSQ_DEBUG("packet header does not have connection ID: discarding"); 1066 return NULL; 1067 } 1068 el = lsquic_hash_find(engine->conns_hash, 1069 packet_in->pi_conn_id.idbuf, packet_in->pi_conn_id.len); 1070 1071 if (el) 1072 { 1073 conn = lsquic_hashelem_getdata(el); 1074 conn->cn_pf->pf_parse_packet_in_finish(packet_in, ppstate); 1075 return conn; 1076 } 1077 1078 if (engine->flags & ENG_COOLDOWN) 1079 { /* Do not create incoming connections during cooldown */ 1080 LSQ_DEBUG("dropping inbound packet for unknown connection (cooldown)"); 1081 return NULL; 1082 } 1083 1084 if (engine->mini_conns_count >= engine->pub.enp_settings.es_max_inchoate) 1085 { 1086 LSQ_DEBUG("reached limit of %u inchoate connections", 1087 engine->pub.enp_settings.es_max_inchoate); 1088 return NULL; 1089 } 1090 1091 1092 if (engine->purga 1093 && (puel = lsquic_purga_contains(engine->purga, 1094 &packet_in->pi_conn_id), puel)) 1095 { 1096 switch (puel->puel_type) 1097 { 1098 case PUTY_CID_RETIRED: 1099 LSQ_DEBUGC("CID %"CID_FMT" was retired, ignore packet", 1100 CID_BITS(&packet_in->pi_conn_id)); 1101 return NULL; 1102 case PUTY_CONN_DRAIN: 1103 LSQ_DEBUG("drain till: %"PRIu64"; now: %"PRIu64, 1104 puel->puel_time, packet_in->pi_received); 1105 if (puel->puel_time > packet_in->pi_received) 1106 { 1107 LSQ_DEBUGC("CID %"CID_FMT" is in drain state, ignore packet", 1108 CID_BITS(&packet_in->pi_conn_id)); 1109 return NULL; 1110 } 1111 LSQ_DEBUGC("CID %"CID_FMT" goes from drain state to deleted", 1112 CID_BITS(&packet_in->pi_conn_id)); 1113 puel->puel_type = PUTY_CONN_DELETED; 1114 puel->puel_count = 0; 1115 puel->puel_time = 0; 1116 /* fall-through */ 1117 case PUTY_CONN_DELETED: 1118 LSQ_DEBUGC("Connection with CID %"CID_FMT" was deleted", 1119 CID_BITS(&packet_in->pi_conn_id)); 1120 if (puel->puel_time < packet_in->pi_received) 1121 { 1122 puel->puel_time = packet_in->pi_received 1123 /* Exponential back-off */ 1124 + 1000000ull * (1 << MIN(puel->puel_count, 4)); 1125 ++puel->puel_count; 1126 goto maybe_send_prst; 1127 } 1128 return NULL; 1129 default: 1130 assert(0); 1131 return NULL; 1132 } 1133 } 1134 1135 if (engine->pub.enp_settings.es_send_prst 1136 && !(packet_in->pi_flags & PI_GQUIC) 1137 && HETY_NOT_SET == packet_in->pi_header_type) 1138 goto maybe_send_prst; 1139 1140 if (0 != maybe_grow_conn_heaps(engine)) 1141 return NULL; 1142 1143 const struct parse_funcs *pf; 1144 enum lsquic_version version; 1145 switch (version_matches(engine, packet_in, &version)) 1146 { 1147 case VER_UNSUPPORTED: 1148 if (engine->flags & ENG_SERVER) 1149 schedule_req_packet(engine, PACKET_REQ_VERNEG, packet_in, 1150 sa_local, sa_peer, peer_ctx); 1151 return NULL; 1152 case VER_NOT_SPECIFIED: 1153 maybe_send_prst: 1154 if ((engine->flags & ENG_SERVER) && 1155 engine->pub.enp_settings.es_send_prst) 1156 schedule_req_packet(engine, PACKET_REQ_PUBRES, packet_in, 1157 sa_local, sa_peer, peer_ctx); 1158 return NULL; 1159 case VER_SUPPORTED: 1160 pf = select_pf_by_ver(version); 1161 pf->pf_parse_packet_in_finish(packet_in, ppstate); 1162 break; 1163 } 1164 1165 1166 if ((1 << version) & LSQUIC_IETF_VERSIONS) 1167 { 1168 conn = lsquic_mini_conn_ietf_new(&engine->pub, packet_in, version, 1169 sa_peer->sa_family == AF_INET, NULL, packet_in_size); 1170 } 1171 else 1172 { 1173 conn = mini_conn_new(&engine->pub, packet_in, version); 1174 } 1175 if (!conn) 1176 return NULL; 1177 ++engine->mini_conns_count; 1178 ++engine->n_conns; 1179 if (0 != insert_conn_into_hash(engine, conn, peer_ctx)) 1180 { 1181 const lsquic_cid_t *cid = lsquic_conn_log_cid(conn); 1182 LSQ_WARNC("cannot add connection %"CID_FMT" to hash - destroy", 1183 CID_BITS(cid)); 1184 destroy_conn(engine, conn, packet_in->pi_received); 1185 return NULL; 1186 } 1187 assert(!(conn->cn_flags & CONN_REF_FLAGS)); 1188 conn->cn_flags |= LSCONN_HASHED; 1189 eng_hist_inc(&engine->history, packet_in->pi_received, sl_new_mini_conns); 1190 conn->cn_last_sent = engine->last_sent; 1191 return conn; 1192} 1193 1194 1195lsquic_conn_t * 1196lsquic_engine_find_conn (const struct lsquic_engine_public *engine, 1197 const lsquic_cid_t *cid) 1198{ 1199 struct lsquic_hash_elem *el; 1200 lsquic_conn_t *conn = NULL; 1201 el = lsquic_hash_find(engine->enp_engine->conns_hash, cid->idbuf, cid->len); 1202 1203 if (el) 1204 conn = lsquic_hashelem_getdata(el); 1205 return conn; 1206} 1207 1208 1209#if !defined(NDEBUG) && __GNUC__ 1210__attribute__((weak)) 1211#endif 1212void 1213lsquic_engine_add_conn_to_tickable (struct lsquic_engine_public *enpub, 1214 lsquic_conn_t *conn) 1215{ 1216 if (0 == (enpub->enp_flags & ENPUB_PROC) && 1217 0 == (conn->cn_flags & (LSCONN_TICKABLE|LSCONN_NEVER_TICKABLE))) 1218 { 1219 lsquic_engine_t *engine = (lsquic_engine_t *) enpub; 1220 lsquic_mh_insert(&engine->conns_tickable, conn, conn->cn_last_ticked); 1221 engine_incref_conn(conn, LSCONN_TICKABLE); 1222 } 1223} 1224 1225 1226void 1227lsquic_engine_add_conn_to_attq (struct lsquic_engine_public *enpub, 1228 lsquic_conn_t *conn, lsquic_time_t tick_time, unsigned why) 1229{ 1230 lsquic_engine_t *const engine = (lsquic_engine_t *) enpub; 1231 if (conn->cn_flags & LSCONN_TICKABLE) 1232 { 1233 /* Optimization: no need to add the connection to the Advisory Tick 1234 * Time Queue: it is about to be ticked, after which it its next tick 1235 * time may be queried again. 1236 */; 1237 } 1238 else if (conn->cn_flags & LSCONN_ATTQ) 1239 { 1240 if (lsquic_conn_adv_time(conn) != tick_time) 1241 { 1242 attq_remove(engine->attq, conn); 1243 if (0 != attq_add(engine->attq, conn, tick_time, why)) 1244 engine_decref_conn(engine, conn, LSCONN_ATTQ); 1245 } 1246 } 1247 else if (0 == attq_add(engine->attq, conn, tick_time, why)) 1248 engine_incref_conn(conn, LSCONN_ATTQ); 1249} 1250 1251 1252static struct lsquic_conn * 1253find_conn_by_srst (struct lsquic_engine *engine, 1254 const struct lsquic_packet_in *packet_in) 1255{ 1256 struct lsquic_hash_elem *el; 1257 struct lsquic_conn *conn; 1258 1259 if (packet_in->pi_data_sz < IQUIC_MIN_SRST_SIZE 1260 || (packet_in->pi_data[0] & 0xC0) != 0x40) 1261 return NULL; 1262 1263 el = lsquic_hash_find(engine->pub.enp_srst_hash, 1264 packet_in->pi_data + packet_in->pi_data_sz - IQUIC_SRESET_TOKEN_SZ, 1265 IQUIC_SRESET_TOKEN_SZ); 1266 if (!el) 1267 return NULL; 1268 1269 conn = lsquic_hashelem_getdata(el); 1270 return conn; 1271} 1272 1273 1274/* Return 0 if packet is being processed by a real connection (mini or full), 1275 * otherwise return 1. 1276 */ 1277static int 1278process_packet_in (lsquic_engine_t *engine, lsquic_packet_in_t *packet_in, 1279 struct packin_parse_state *ppstate, const struct sockaddr *sa_local, 1280 const struct sockaddr *sa_peer, void *peer_ctx, size_t packet_in_size) 1281{ 1282 lsquic_conn_t *conn; 1283 const unsigned char *packet_in_data; 1284 1285 if (lsquic_packet_in_is_gquic_prst(packet_in) 1286 && !engine->pub.enp_settings.es_honor_prst) 1287 { 1288 lsquic_mm_put_packet_in(&engine->pub.enp_mm, packet_in); 1289 LSQ_DEBUG("public reset packet: discarding"); 1290 return 1; 1291 } 1292 1293 if (engine->flags & ENG_SERVER) 1294 conn = find_or_create_conn(engine, packet_in, ppstate, sa_local, 1295 sa_peer, peer_ctx, packet_in_size); 1296 else 1297 conn = find_conn(engine, packet_in, ppstate, sa_local); 1298 1299 if (!conn) 1300 { 1301 if (engine->pub.enp_settings.es_honor_prst 1302 && packet_in_size == packet_in->pi_data_sz /* Full UDP packet */ 1303 && !(packet_in->pi_flags & PI_GQUIC) 1304 && engine->pub.enp_srst_hash 1305 && (conn = find_conn_by_srst(engine, packet_in))) 1306 { 1307 LSQ_DEBUGC("got stateless reset for connection %"CID_FMT, 1308 CID_BITS(lsquic_conn_log_cid(conn))); 1309 conn->cn_if->ci_stateless_reset(conn); 1310 if (!(conn->cn_flags & LSCONN_TICKABLE) 1311 && conn->cn_if->ci_is_tickable(conn)) 1312 { 1313 lsquic_mh_insert(&engine->conns_tickable, conn, 1314 conn->cn_last_ticked); 1315 engine_incref_conn(conn, LSCONN_TICKABLE); 1316 } 1317 /* Even though the connection processes this packet, we return 1318 * 1 so that the caller does not add reset packet's random 1319 * bytes to the list of valid CIDs. 1320 */ 1321 } 1322 lsquic_mm_put_packet_in(&engine->pub.enp_mm, packet_in); 1323 return 1; 1324 } 1325 1326 if (0 == (conn->cn_flags & LSCONN_TICKABLE)) 1327 { 1328 lsquic_mh_insert(&engine->conns_tickable, conn, conn->cn_last_ticked); 1329 engine_incref_conn(conn, LSCONN_TICKABLE); 1330 } 1331 packet_in->pi_path_id = lsquic_conn_record_sockaddr(conn, peer_ctx, 1332 sa_local, sa_peer); 1333 lsquic_packet_in_upref(packet_in); 1334#if LOG_PACKET_CHECKSUM 1335 log_packet_checksum(lsquic_conn_log_cid(conn), "in", packet_in->pi_data, 1336 packet_in->pi_data_sz); 1337#endif 1338 /* Note on QLog: 1339 * For the PACKET_RX QLog event, we are interested in logging these things: 1340 * - raw packet (however it comes in, encrypted or not) 1341 * - frames (list of frame names) 1342 * - packet type and number 1343 * - packet rx timestamp 1344 * 1345 * Since only some of these items are available at this code 1346 * juncture, we will wait until after the packet has been 1347 * decrypted (if necessary) and parsed to call the log functions. 1348 * 1349 * Once the PACKET_RX event is finally logged, the timestamp 1350 * will come from packet_in->pi_received. For correct sequential 1351 * ordering of QLog events, be sure to process the QLogs downstream. 1352 * (Hint: Use the qlog_parser.py tool in tools/ for full QLog processing.) 1353 */ 1354 packet_in_data = packet_in->pi_data; 1355 packet_in_size = packet_in->pi_data_sz; 1356 conn->cn_if->ci_packet_in(conn, packet_in); 1357 QLOG_PACKET_RX(lsquic_conn_log_cid(conn), packet_in, packet_in_data, packet_in_size); 1358 lsquic_packet_in_put(&engine->pub.enp_mm, packet_in); 1359 return 0; 1360} 1361 1362 1363void 1364lsquic_engine_destroy (lsquic_engine_t *engine) 1365{ 1366 struct lsquic_hash_elem *el; 1367 lsquic_conn_t *conn; 1368 1369 LSQ_DEBUG("destroying engine"); 1370#ifndef NDEBUG 1371 engine->flags |= ENG_DTOR; 1372#endif 1373 1374 while ((conn = lsquic_mh_pop(&engine->conns_out))) 1375 { 1376 assert(conn->cn_flags & LSCONN_HAS_OUTGOING); 1377 (void) engine_decref_conn(engine, conn, LSCONN_HAS_OUTGOING); 1378 } 1379 1380 while ((conn = lsquic_mh_pop(&engine->conns_tickable))) 1381 { 1382 assert(conn->cn_flags & LSCONN_TICKABLE); 1383 (void) engine_decref_conn(engine, conn, LSCONN_TICKABLE); 1384 } 1385 1386 for (el = lsquic_hash_first(engine->conns_hash); el; 1387 el = lsquic_hash_next(engine->conns_hash)) 1388 { 1389 conn = lsquic_hashelem_getdata(el); 1390 force_close_conn(engine, conn); 1391 } 1392 lsquic_hash_destroy(engine->conns_hash); 1393 1394 assert(0 == engine->n_conns); 1395 assert(0 == engine->mini_conns_count); 1396 if (engine->pr_queue) 1397 prq_destroy(engine->pr_queue); 1398 if (engine->purga) 1399 lsquic_purga_destroy(engine->purga); 1400 attq_destroy(engine->attq); 1401 1402 assert(0 == lsquic_mh_count(&engine->conns_out)); 1403 assert(0 == lsquic_mh_count(&engine->conns_tickable)); 1404 if (engine->pub.enp_shi == &stock_shi) 1405 stock_shared_hash_destroy(engine->pub.enp_shi_ctx); 1406 lsquic_mm_cleanup(&engine->pub.enp_mm); 1407 free(engine->conns_tickable.mh_elems); 1408#ifndef NDEBUG 1409 if (engine->flags & ENG_LOSE_PACKETS) 1410 regfree(&engine->lose_packets_re); 1411#endif 1412 if (engine->pub.enp_tokgen) 1413 lsquic_tg_destroy(engine->pub.enp_tokgen); 1414#if LSQUIC_CONN_STATS 1415 if (engine->stats_fh) 1416 { 1417 const struct conn_stats *const stats = &engine->conn_stats_sum; 1418 fprintf(engine->stats_fh, "Aggregate connection stats collected by engine:\n"); 1419 fprintf(engine->stats_fh, "Connections: %u\n", engine->stats.conns); 1420 fprintf(engine->stats_fh, "Ticks: %lu\n", stats->n_ticks); 1421 fprintf(engine->stats_fh, "In:\n"); 1422 fprintf(engine->stats_fh, " Total bytes: %lu\n", stats->in.bytes); 1423 fprintf(engine->stats_fh, " packets: %lu\n", stats->in.packets); 1424 fprintf(engine->stats_fh, " undecryptable packets: %lu\n", stats->in.undec_packets); 1425 fprintf(engine->stats_fh, " duplicate packets: %lu\n", stats->in.dup_packets); 1426 fprintf(engine->stats_fh, " error packets: %lu\n", stats->in.err_packets); 1427 fprintf(engine->stats_fh, " STREAM frame count: %lu\n", stats->in.stream_frames); 1428 fprintf(engine->stats_fh, " STREAM payload size: %lu\n", stats->in.stream_data_sz); 1429 fprintf(engine->stats_fh, " Header bytes: %lu; uncompressed: %lu; ratio %.3lf\n", 1430 stats->in.headers_comp, stats->in.headers_uncomp, 1431 stats->in.headers_uncomp ? 1432 (double) stats->in.headers_comp / (double) stats->in.headers_uncomp 1433 : 0); 1434 fprintf(engine->stats_fh, " ACK frames: %lu\n", stats->in.n_acks); 1435 fprintf(engine->stats_fh, " ACK frames processed: %lu\n", stats->in.n_acks_proc); 1436 fprintf(engine->stats_fh, " ACK frames merged to new: %lu\n", stats->in.n_acks_merged[0]); 1437 fprintf(engine->stats_fh, " ACK frames merged to old: %lu\n", stats->in.n_acks_merged[1]); 1438 fprintf(engine->stats_fh, "Out:\n"); 1439 fprintf(engine->stats_fh, " Total bytes: %lu\n", stats->out.bytes); 1440 fprintf(engine->stats_fh, " packets: %lu\n", stats->out.packets); 1441 fprintf(engine->stats_fh, " acked via loss record: %lu\n", stats->out.acked_via_loss); 1442 fprintf(engine->stats_fh, " acks: %lu\n", stats->out.acks); 1443 fprintf(engine->stats_fh, " retx packets: %lu\n", stats->out.retx_packets); 1444 fprintf(engine->stats_fh, " STREAM frame count: %lu\n", stats->out.stream_frames); 1445 fprintf(engine->stats_fh, " STREAM payload size: %lu\n", stats->out.stream_data_sz); 1446 fprintf(engine->stats_fh, " Header bytes: %lu; uncompressed: %lu; ratio %.3lf\n", 1447 stats->out.headers_comp, stats->out.headers_uncomp, 1448 stats->out.headers_uncomp ? 1449 (double) stats->out.headers_comp / (double) stats->out.headers_uncomp 1450 : 0); 1451 fprintf(engine->stats_fh, " ACKs: %lu\n", stats->out.acks); 1452 } 1453#endif 1454 if (engine->pub.enp_srst_hash) 1455 lsquic_hash_destroy(engine->pub.enp_srst_hash); 1456#if LSQUIC_COUNT_ENGINE_CALLS 1457 LSQ_NOTICE("number of calls into the engine: %lu", engine->n_engine_calls); 1458#endif 1459 if (engine->pub.enp_retry_aead_ctx) 1460 EVP_AEAD_CTX_cleanup(engine->pub.enp_retry_aead_ctx); 1461 free(engine); 1462} 1463 1464 1465static struct conn_cid_elem * 1466find_free_cce (struct lsquic_conn *conn) 1467{ 1468 struct conn_cid_elem *cce; 1469 1470 for (cce = conn->cn_cces; cce < END_OF_CCES(conn); ++cce) 1471 if (!(conn->cn_cces_mask & (1 << (cce - conn->cn_cces)))) 1472 return cce; 1473 1474 return NULL; 1475} 1476 1477 1478static int 1479add_conn_to_hash (struct lsquic_engine *engine, struct lsquic_conn *conn, 1480 const struct sockaddr *local_sa, void *peer_ctx) 1481{ 1482 struct conn_cid_elem *cce; 1483 1484 if (engine->flags & ENG_CONNS_BY_ADDR) 1485 { 1486 cce = find_free_cce(conn); 1487 if (!cce) 1488 { 1489 LSQ_ERROR("cannot find free CCE"); 1490 return -1; 1491 } 1492 cce->cce_port = sa2port(local_sa); 1493 cce->cce_flags = CCE_PORT; 1494 if (lsquic_hash_insert(engine->conns_hash, &cce->cce_port, 1495 sizeof(cce->cce_port), conn, &cce->cce_hash_el)) 1496 { 1497 conn->cn_cces_mask |= 1 << (cce - conn->cn_cces); 1498 return 0; 1499 } 1500 else 1501 return -1; 1502 1503 } 1504 else 1505 return insert_conn_into_hash(engine, conn, peer_ctx); 1506} 1507 1508 1509lsquic_conn_t * 1510lsquic_engine_connect (lsquic_engine_t *engine, enum lsquic_version version, 1511 const struct sockaddr *local_sa, 1512 const struct sockaddr *peer_sa, 1513 void *peer_ctx, lsquic_conn_ctx_t *conn_ctx, 1514 const char *hostname, unsigned short max_packet_size, 1515 const unsigned char *zero_rtt, size_t zero_rtt_len, 1516 const unsigned char *token, size_t token_sz) 1517{ 1518 lsquic_conn_t *conn; 1519 unsigned flags, versions; 1520 int is_ipv4; 1521 1522 ENGINE_IN(engine); 1523 1524 if (engine->flags & ENG_SERVER) 1525 { 1526 LSQ_ERROR("`%s' must only be called in client mode", __func__); 1527 goto err; 1528 } 1529 1530 if (engine->flags & ENG_CONNS_BY_ADDR 1531 && find_conn_by_addr(engine->conns_hash, local_sa)) 1532 { 1533 LSQ_ERROR("cannot have more than one connection on the same port"); 1534 goto err; 1535 } 1536 1537 if (0 != maybe_grow_conn_heaps(engine)) 1538 return NULL; 1539 flags = engine->flags & (ENG_SERVER|ENG_HTTP); 1540 is_ipv4 = peer_sa->sa_family == AF_INET; 1541 if (zero_rtt && zero_rtt_len) 1542 { 1543 version = lsquic_zero_rtt_version(zero_rtt, zero_rtt_len); 1544 if (version >= N_LSQVER) 1545 { 1546 LSQ_INFO("zero-rtt version is bad, won't use"); 1547 zero_rtt = NULL; 1548 zero_rtt_len = 0; 1549 } 1550 } 1551 if (version >= N_LSQVER) 1552 { 1553 if (version > N_LSQVER) 1554 LSQ_WARN("invalid version specified, engine will pick"); 1555 versions = engine->pub.enp_settings.es_versions; 1556 } 1557 else 1558 versions = 1u << version; 1559 if (versions & LSQUIC_IETF_VERSIONS) 1560 { 1561 if (version == LSQVER_ID24) 1562 conn = lsquic_id24_full_conn_client_new(&engine->pub, versions, 1563 flags, hostname, max_packet_size, 1564 is_ipv4, zero_rtt, zero_rtt_len, token, token_sz); 1565 else 1566 conn = lsquic_ietf_full_conn_client_new(&engine->pub, versions, 1567 flags, hostname, max_packet_size, 1568 is_ipv4, zero_rtt, zero_rtt_len, token, token_sz); 1569 } 1570 else 1571 conn = lsquic_gquic_full_conn_client_new(&engine->pub, versions, 1572 flags, hostname, max_packet_size, is_ipv4, 1573 zero_rtt, zero_rtt_len); 1574 if (!conn) 1575 goto err; 1576 EV_LOG_CREATE_CONN(lsquic_conn_log_cid(conn), local_sa, peer_sa); 1577 EV_LOG_VER_NEG(lsquic_conn_log_cid(conn), "proposed", 1578 lsquic_ver2str[conn->cn_version]); 1579 ++engine->n_conns; 1580 lsquic_conn_record_sockaddr(conn, peer_ctx, local_sa, peer_sa); 1581 if (0 != add_conn_to_hash(engine, conn, local_sa, peer_ctx)) 1582 { 1583 const lsquic_cid_t *cid = lsquic_conn_log_cid(conn); 1584 LSQ_WARNC("cannot add connection %"CID_FMT" to hash - destroy", 1585 CID_BITS(cid)); 1586 destroy_conn(engine, conn, lsquic_time_now()); 1587 goto err; 1588 } 1589 assert(!(conn->cn_flags & 1590 (CONN_REF_FLAGS 1591 & ~LSCONN_TICKABLE /* This flag may be set as effect of user 1592 callbacks */ 1593 ))); 1594 conn->cn_flags |= LSCONN_HASHED; 1595 lsquic_mh_insert(&engine->conns_tickable, conn, conn->cn_last_ticked); 1596 engine_incref_conn(conn, LSCONN_TICKABLE); 1597 lsquic_conn_set_ctx(conn, conn_ctx); 1598 conn->cn_if->ci_client_call_on_new(conn); 1599 end: 1600 ENGINE_OUT(engine); 1601 return conn; 1602 err: 1603 conn = NULL; 1604 goto end; 1605} 1606 1607 1608static void 1609remove_conn_from_hash (lsquic_engine_t *engine, lsquic_conn_t *conn) 1610{ 1611 remove_all_cces_from_hash(engine->conns_hash, conn); 1612 (void) engine_decref_conn(engine, conn, LSCONN_HASHED); 1613} 1614 1615 1616static void 1617refflags2str (enum lsquic_conn_flags flags, char s[6]) 1618{ 1619 *s = 'C'; s += !!(flags & LSCONN_CLOSING); 1620 *s = 'H'; s += !!(flags & LSCONN_HASHED); 1621 *s = 'O'; s += !!(flags & LSCONN_HAS_OUTGOING); 1622 *s = 'T'; s += !!(flags & LSCONN_TICKABLE); 1623 *s = 'A'; s += !!(flags & LSCONN_ATTQ); 1624 *s = 'K'; s += !!(flags & LSCONN_TICKED); 1625 *s = '\0'; 1626} 1627 1628 1629static void 1630engine_incref_conn (lsquic_conn_t *conn, enum lsquic_conn_flags flag) 1631{ 1632 char str[2][7]; 1633 assert(flag & CONN_REF_FLAGS); 1634 assert(!(conn->cn_flags & flag)); 1635 conn->cn_flags |= flag; 1636 LSQ_DEBUGC("incref conn %"CID_FMT", '%s' -> '%s'", 1637 CID_BITS(lsquic_conn_log_cid(conn)), 1638 (refflags2str(conn->cn_flags & ~flag, str[0]), str[0]), 1639 (refflags2str(conn->cn_flags, str[1]), str[1])); 1640} 1641 1642 1643static lsquic_conn_t * 1644engine_decref_conn (lsquic_engine_t *engine, lsquic_conn_t *conn, 1645 enum lsquic_conn_flags flags) 1646{ 1647 char str[2][7]; 1648 lsquic_time_t now; 1649 assert(flags & CONN_REF_FLAGS); 1650 assert(conn->cn_flags & flags); 1651#ifndef NDEBUG 1652 if (flags & LSCONN_CLOSING) 1653 assert(0 == (conn->cn_flags & LSCONN_HASHED)); 1654#endif 1655 conn->cn_flags &= ~flags; 1656 LSQ_DEBUGC("decref conn %"CID_FMT", '%s' -> '%s'", 1657 CID_BITS(lsquic_conn_log_cid(conn)), 1658 (refflags2str(conn->cn_flags | flags, str[0]), str[0]), 1659 (refflags2str(conn->cn_flags, str[1]), str[1])); 1660 if (0 == (conn->cn_flags & CONN_REF_FLAGS)) 1661 { 1662 now = lsquic_time_now(); 1663 if (conn->cn_flags & LSCONN_MINI) 1664 eng_hist_inc(&engine->history, now, sl_del_mini_conns); 1665 else 1666 eng_hist_inc(&engine->history, now, sl_del_full_conns); 1667 destroy_conn(engine, conn, now); 1668 return NULL; 1669 } 1670 else 1671 return conn; 1672} 1673 1674 1675/* This is not a general-purpose function. Only call from engine dtor. */ 1676static void 1677force_close_conn (lsquic_engine_t *engine, lsquic_conn_t *conn) 1678{ 1679 assert(engine->flags & ENG_DTOR); 1680 const enum lsquic_conn_flags flags = conn->cn_flags; 1681 assert(conn->cn_flags & CONN_REF_FLAGS); 1682 assert(!(flags & LSCONN_HAS_OUTGOING)); /* Should be removed already */ 1683 assert(!(flags & LSCONN_TICKABLE)); /* Should be removed already */ 1684 assert(!(flags & LSCONN_CLOSING)); /* It is in transient queue? */ 1685 if (flags & LSCONN_ATTQ) 1686 { 1687 attq_remove(engine->attq, conn); 1688 (void) engine_decref_conn(engine, conn, LSCONN_ATTQ); 1689 } 1690 if (flags & LSCONN_HASHED) 1691 remove_conn_from_hash(engine, conn); 1692} 1693 1694 1695/* Iterator for tickable connections (those on the Tickable Queue). Before 1696 * a connection is returned, it is removed from the Advisory Tick Time queue 1697 * if necessary. 1698 */ 1699static lsquic_conn_t * 1700conn_iter_next_tickable (struct lsquic_engine *engine) 1701{ 1702 lsquic_conn_t *conn; 1703 1704 if (engine->flags & ENG_SERVER) 1705 while (1) 1706 { 1707 conn = lsquic_mh_pop(&engine->conns_tickable); 1708 if (conn && (conn->cn_flags & LSCONN_SKIP_ON_PROC)) 1709 (void) engine_decref_conn(engine, conn, LSCONN_TICKABLE); 1710 else 1711 break; 1712 } 1713 else 1714 conn = lsquic_mh_pop(&engine->conns_tickable); 1715 1716 if (conn) 1717 conn = engine_decref_conn(engine, conn, LSCONN_TICKABLE); 1718 if (conn && (conn->cn_flags & LSCONN_ATTQ)) 1719 { 1720 attq_remove(engine->attq, conn); 1721 conn = engine_decref_conn(engine, conn, LSCONN_ATTQ); 1722 } 1723 1724 return conn; 1725} 1726 1727 1728static void 1729cub_init (struct cid_update_batch *cub, lsquic_cids_update_f update, 1730 void *update_ctx) 1731{ 1732 cub->cub_update_cids = update; 1733 cub->cub_update_ctx = update_ctx; 1734 cub->cub_count = 0; 1735} 1736 1737 1738static void 1739cub_flush (struct cid_update_batch *cub) 1740{ 1741 if (cub->cub_count > 0 && cub->cub_update_cids) 1742 cub->cub_update_cids(cub->cub_update_ctx, cub->cub_peer_ctxs, 1743 cub->cub_cids, cub->cub_count); 1744 cub->cub_count = 0; 1745} 1746 1747 1748static void 1749cub_add (struct cid_update_batch *cub, const lsquic_cid_t *cid, void *peer_ctx) 1750{ 1751 cub->cub_cids [ cub->cub_count ] = *cid; 1752 cub->cub_peer_ctxs[ cub->cub_count ] = peer_ctx; 1753 ++cub->cub_count; 1754 if (cub->cub_count == sizeof(cub->cub_cids) / sizeof(cub->cub_cids[0])) 1755 cub_flush(cub); 1756} 1757 1758 1759/* Process registered CIDs */ 1760static void 1761cub_add_cids_from_cces (struct cid_update_batch *cub, struct lsquic_conn *conn) 1762{ 1763 struct cce_cid_iter citer; 1764 struct conn_cid_elem *cce; 1765 void *peer_ctx; 1766 1767 peer_ctx = lsquic_conn_get_peer_ctx(conn, NULL); 1768 for (cce = cce_iter_first(&citer, conn); cce; cce = cce_iter_next(&citer)) 1769 if (cce->cce_flags & CCE_REG) 1770 cub_add(cub, &cce->cce_cid, peer_ctx); 1771} 1772 1773 1774static void 1775drop_all_mini_conns (lsquic_engine_t *engine) 1776{ 1777 struct lsquic_hash_elem *el; 1778 lsquic_conn_t *conn; 1779 struct cid_update_batch cub; 1780 1781 cub_init(&cub, engine->report_old_scids, engine->scids_ctx); 1782 1783 for (el = lsquic_hash_first(engine->conns_hash); el; 1784 el = lsquic_hash_next(engine->conns_hash)) 1785 { 1786 conn = lsquic_hashelem_getdata(el); 1787 if (conn->cn_flags & LSCONN_MINI) 1788 { 1789 /* If promoted, why is it still in this hash? */ 1790 assert(!(conn->cn_flags & LSCONN_PROMOTED)); 1791 if (!(conn->cn_flags & LSCONN_PROMOTED)) 1792 cub_add_cids_from_cces(&cub, conn); 1793 remove_conn_from_hash(engine, conn); 1794 } 1795 } 1796 1797 cub_flush(&cub); 1798} 1799 1800 1801void 1802lsquic_engine_process_conns (lsquic_engine_t *engine) 1803{ 1804 lsquic_conn_t *conn; 1805 lsquic_time_t now; 1806 1807 ENGINE_IN(engine); 1808 1809 now = lsquic_time_now(); 1810 while ((conn = attq_pop(engine->attq, now))) 1811 { 1812 conn = engine_decref_conn(engine, conn, LSCONN_ATTQ); 1813 if (conn && !(conn->cn_flags & LSCONN_TICKABLE)) 1814 { 1815 lsquic_mh_insert(&engine->conns_tickable, conn, conn->cn_last_ticked); 1816 engine_incref_conn(conn, LSCONN_TICKABLE); 1817 } 1818 } 1819 1820 process_connections(engine, conn_iter_next_tickable, now); 1821 ENGINE_OUT(engine); 1822} 1823 1824 1825static void 1826release_or_return_enc_data (struct lsquic_engine *engine, 1827 void (*pmi_rel_or_ret) (void *, void *, void *, char), 1828 struct lsquic_conn *conn, struct lsquic_packet_out *packet_out) 1829{ 1830 pmi_rel_or_ret(engine->pub.enp_pmi_ctx, packet_out->po_path->np_peer_ctx, 1831 packet_out->po_enc_data, lsquic_packet_out_ipv6(packet_out)); 1832 packet_out->po_flags &= ~PO_ENCRYPTED; 1833 packet_out->po_enc_data = NULL; 1834} 1835 1836 1837static void 1838release_enc_data (struct lsquic_engine *engine, struct lsquic_conn *conn, 1839 struct lsquic_packet_out *packet_out) 1840{ 1841 release_or_return_enc_data(engine, engine->pub.enp_pmi->pmi_release, 1842 conn, packet_out); 1843} 1844 1845 1846static void 1847return_enc_data (struct lsquic_engine *engine, struct lsquic_conn *conn, 1848 struct lsquic_packet_out *packet_out) 1849{ 1850 release_or_return_enc_data(engine, engine->pub.enp_pmi->pmi_return, 1851 conn, packet_out); 1852} 1853 1854 1855static int 1856copy_packet (struct lsquic_engine *engine, struct lsquic_conn *conn, 1857 struct lsquic_packet_out *packet_out) 1858{ 1859 int ipv6; 1860 1861 ipv6 = NP_IS_IPv6(packet_out->po_path); 1862 if (packet_out->po_flags & PO_ENCRYPTED) 1863 { 1864 if (ipv6 == lsquic_packet_out_ipv6(packet_out) 1865 && packet_out->po_data_sz == packet_out->po_enc_data_sz 1866 && 0 == memcmp(packet_out->po_data, packet_out->po_enc_data, 1867 packet_out->po_data_sz)) 1868 return 0; 1869 if (ipv6 == lsquic_packet_out_ipv6(packet_out) 1870 && packet_out->po_data_sz <= packet_out->po_enc_data_sz) 1871 goto copy; 1872 return_enc_data(engine, conn, packet_out); 1873 } 1874 1875 packet_out->po_enc_data = engine->pub.enp_pmi->pmi_allocate( 1876 engine->pub.enp_pmi_ctx, packet_out->po_path->np_peer_ctx, 1877 packet_out->po_data_sz, ipv6); 1878 if (!packet_out->po_enc_data) 1879 { 1880 LSQ_DEBUG("could not allocate memory for outgoing unencrypted packet " 1881 "of size %hu", packet_out->po_data_sz); 1882 return -1; 1883 } 1884 1885 copy: 1886 memcpy(packet_out->po_enc_data, packet_out->po_data, 1887 packet_out->po_data_sz); 1888 packet_out->po_enc_data_sz = packet_out->po_data_sz; 1889 packet_out->po_sent_sz = packet_out->po_data_sz; 1890 packet_out->po_flags &= ~PO_IPv6; 1891 packet_out->po_flags |= PO_ENCRYPTED|PO_SENT_SZ|(ipv6 << POIPv6_SHIFT); 1892 1893 return 0; 1894} 1895 1896 1897STAILQ_HEAD(conns_stailq, lsquic_conn); 1898TAILQ_HEAD(conns_tailq, lsquic_conn); 1899 1900 1901struct conns_out_iter 1902{ 1903 struct min_heap *coi_heap; 1904 struct pr_queue *coi_prq; 1905 TAILQ_HEAD(, lsquic_conn) coi_active_list, 1906 coi_inactive_list; 1907 lsquic_conn_t *coi_next; 1908#ifndef NDEBUG 1909 lsquic_time_t coi_last_sent; 1910#endif 1911}; 1912 1913 1914static void 1915coi_init (struct conns_out_iter *iter, struct lsquic_engine *engine) 1916{ 1917 iter->coi_heap = &engine->conns_out; 1918 iter->coi_prq = engine->pr_queue; 1919 iter->coi_next = NULL; 1920 TAILQ_INIT(&iter->coi_active_list); 1921 TAILQ_INIT(&iter->coi_inactive_list); 1922#ifndef NDEBUG 1923 iter->coi_last_sent = 0; 1924#endif 1925} 1926 1927 1928static lsquic_conn_t * 1929coi_next (struct conns_out_iter *iter) 1930{ 1931 lsquic_conn_t *conn; 1932 1933 if (lsquic_mh_count(iter->coi_heap) > 0) 1934 { 1935 conn = lsquic_mh_pop(iter->coi_heap); 1936 TAILQ_INSERT_TAIL(&iter->coi_active_list, conn, cn_next_out); 1937 conn->cn_flags |= LSCONN_COI_ACTIVE; 1938#ifndef NDEBUG 1939 if (iter->coi_last_sent) 1940 assert(iter->coi_last_sent <= conn->cn_last_sent); 1941 iter->coi_last_sent = conn->cn_last_sent; 1942#endif 1943 return conn; 1944 } 1945 else if (iter->coi_prq && (conn = prq_next_conn(iter->coi_prq))) 1946 { 1947 return conn; 1948 } 1949 else if (!TAILQ_EMPTY(&iter->coi_active_list)) 1950 { 1951 iter->coi_prq = NULL; /* Save function call in previous conditional */ 1952 conn = iter->coi_next; 1953 if (!conn) 1954 conn = TAILQ_FIRST(&iter->coi_active_list); 1955 if (conn) 1956 iter->coi_next = TAILQ_NEXT(conn, cn_next_out); 1957 return conn; 1958 } 1959 else 1960 return NULL; 1961} 1962 1963 1964static void 1965coi_deactivate (struct conns_out_iter *iter, lsquic_conn_t *conn) 1966{ 1967 if (!(conn->cn_flags & LSCONN_EVANESCENT)) 1968 { 1969 assert(!TAILQ_EMPTY(&iter->coi_active_list)); 1970 TAILQ_REMOVE(&iter->coi_active_list, conn, cn_next_out); 1971 conn->cn_flags &= ~LSCONN_COI_ACTIVE; 1972 TAILQ_INSERT_TAIL(&iter->coi_inactive_list, conn, cn_next_out); 1973 conn->cn_flags |= LSCONN_COI_INACTIVE; 1974 } 1975} 1976 1977 1978static void 1979coi_reactivate (struct conns_out_iter *iter, lsquic_conn_t *conn) 1980{ 1981 assert(conn->cn_flags & LSCONN_COI_INACTIVE); 1982 TAILQ_REMOVE(&iter->coi_inactive_list, conn, cn_next_out); 1983 conn->cn_flags &= ~LSCONN_COI_INACTIVE; 1984 TAILQ_INSERT_TAIL(&iter->coi_active_list, conn, cn_next_out); 1985 conn->cn_flags |= LSCONN_COI_ACTIVE; 1986} 1987 1988 1989static void 1990coi_reheap (struct conns_out_iter *iter, lsquic_engine_t *engine) 1991{ 1992 lsquic_conn_t *conn; 1993 while ((conn = TAILQ_FIRST(&iter->coi_active_list))) 1994 { 1995 TAILQ_REMOVE(&iter->coi_active_list, conn, cn_next_out); 1996 conn->cn_flags &= ~LSCONN_COI_ACTIVE; 1997 if ((conn->cn_flags & CONN_REF_FLAGS) != LSCONN_HAS_OUTGOING 1998 && !(conn->cn_flags & LSCONN_IMMED_CLOSE)) 1999 lsquic_mh_insert(iter->coi_heap, conn, conn->cn_last_sent); 2000 else /* Closed connection gets one shot at sending packets */ 2001 (void) engine_decref_conn(engine, conn, LSCONN_HAS_OUTGOING); 2002 } 2003 while ((conn = TAILQ_FIRST(&iter->coi_inactive_list))) 2004 { 2005 TAILQ_REMOVE(&iter->coi_inactive_list, conn, cn_next_out); 2006 conn->cn_flags &= ~LSCONN_COI_INACTIVE; 2007 (void) engine_decref_conn(engine, conn, LSCONN_HAS_OUTGOING); 2008 } 2009} 2010 2011 2012#ifndef NDEBUG 2013static void 2014lose_matching_packets (const lsquic_engine_t *engine, struct out_batch *batch, 2015 unsigned n) 2016{ 2017 const lsquic_cid_t *cid; 2018 struct iovec *iov; 2019 unsigned i; 2020 char packno_str[22]; 2021 2022 for (i = 0; i < n; ++i) 2023 { 2024 snprintf(packno_str, sizeof(packno_str), "%"PRIu64, 2025 batch->packets[i]->po_packno); 2026 if (0 == regexec(&engine->lose_packets_re, packno_str, 0, NULL, 0)) 2027 { 2028 for (iov = batch->outs[i].iov; iov < 2029 batch->outs[i].iov + batch->outs[i].iovlen; ++iov) 2030 batch->outs[i].iov->iov_len -= 1; 2031 cid = lsquic_conn_log_cid(batch->conns[i]); 2032 LSQ_WARNC("losing packet %s for connection %"CID_FMT, packno_str, 2033 CID_BITS(cid)); 2034 } 2035 } 2036} 2037 2038 2039#endif 2040 2041 2042#ifdef NDEBUG 2043#define CONST_BATCH const 2044#else 2045#define CONST_BATCH 2046#endif 2047 2048 2049struct send_batch_ctx { 2050 struct conns_stailq *closed_conns; 2051 struct conns_tailq *ticked_conns; 2052 struct conns_out_iter *conns_iter; 2053 CONST_BATCH struct out_batch *batch; 2054}; 2055 2056 2057static void 2058close_conn_immediately (struct lsquic_engine *engine, 2059 const struct send_batch_ctx *sb_ctx, struct lsquic_conn *conn) 2060{ 2061 conn->cn_flags |= LSCONN_IMMED_CLOSE; 2062 if (!(conn->cn_flags & LSCONN_CLOSING)) 2063 { 2064 STAILQ_INSERT_TAIL(sb_ctx->closed_conns, conn, cn_next_closed_conn); 2065 engine_incref_conn(conn, LSCONN_CLOSING); 2066 if (conn->cn_flags & LSCONN_HASHED) 2067 remove_conn_from_hash(engine, conn); 2068 } 2069 if (conn->cn_flags & LSCONN_TICKED) 2070 { 2071 TAILQ_REMOVE(sb_ctx->ticked_conns, conn, cn_next_ticked); 2072 engine_decref_conn(engine, conn, LSCONN_TICKED); 2073 } 2074} 2075 2076 2077static void 2078close_conn_on_send_error (struct lsquic_engine *engine, 2079 const struct send_batch_ctx *sb_ctx, int n, int e_val) 2080{ 2081 const struct out_batch *batch = sb_ctx->batch; 2082 struct lsquic_conn *const conn = batch->conns[n]; 2083 char buf[2][INET6_ADDRSTRLEN + sizeof(":65535")]; 2084 2085 LSQ_WARNC("error sending packet for %s connection %"CID_FMT" - close it; " 2086 "src: %s; dst: %s; errno: %d", 2087 conn->cn_flags & LSCONN_EVANESCENT ? "evanecsent" : 2088 conn->cn_flags & LSCONN_MINI ? "mini" : "regular", 2089 CID_BITS(lsquic_conn_log_cid(conn)), 2090 SA2STR(batch->outs[n].local_sa, buf[0]), 2091 SA2STR(batch->outs[n].dest_sa, buf[1]), 2092 e_val); 2093 if (conn->cn_flags & LSCONN_EVANESCENT) 2094 lsquic_prq_drop(conn); 2095 else 2096 close_conn_immediately(engine, sb_ctx, conn); 2097} 2098 2099 2100static unsigned 2101send_batch (lsquic_engine_t *engine, const struct send_batch_ctx *sb_ctx, 2102 unsigned n_to_send) 2103{ 2104 int n_sent, i, e_val; 2105 lsquic_time_t now; 2106 unsigned off; 2107 size_t count; 2108 CONST_BATCH struct out_batch *const batch = sb_ctx->batch; 2109 struct lsquic_packet_out *CONST_BATCH *packet_out, *CONST_BATCH *end; 2110 2111#ifndef NDEBUG 2112 if (engine->flags & ENG_LOSE_PACKETS) 2113 lose_matching_packets(engine, batch, n_to_send); 2114#endif 2115 /* Set sent time before the write to avoid underestimating RTT */ 2116 now = lsquic_time_now(); 2117 for (i = 0; i < (int) n_to_send; ++i) 2118 { 2119 off = batch->pack_off[i]; 2120 count = batch->outs[i].iovlen; 2121 assert(count > 0); 2122 packet_out = &batch->packets[off]; 2123 end = packet_out + count; 2124 do 2125 (*packet_out)->po_sent = now; 2126 while (++packet_out < end); 2127 } 2128 n_sent = engine->packets_out(engine->packets_out_ctx, batch->outs, 2129 n_to_send); 2130 e_val = errno; 2131 if (n_sent < (int) n_to_send) 2132 { 2133 engine->pub.enp_flags &= ~ENPUB_CAN_SEND; 2134 engine->resume_sending_at = now + 1000000; 2135 LSQ_DEBUG("cannot send packets"); 2136 EV_LOG_GENERIC_EVENT("cannot send packets"); 2137 if (!(EAGAIN == e_val || EWOULDBLOCK == e_val)) 2138 close_conn_on_send_error(engine, sb_ctx, 2139 n_sent < 0 ? 0 : n_sent, e_val); 2140 } 2141 if (n_sent >= 0) 2142 LSQ_DEBUG("packets out returned %d (out of %u)", n_sent, n_to_send); 2143 else 2144 { 2145 LSQ_DEBUG("packets out returned an error: %s", strerror(e_val)); 2146 n_sent = 0; 2147 } 2148 if (n_sent > 0) 2149 engine->last_sent = now + n_sent; 2150 for (i = 0; i < n_sent; ++i) 2151 { 2152 eng_hist_inc(&engine->history, now, sl_packets_out); 2153 /* `i' is added to maintain relative order */ 2154 batch->conns[i]->cn_last_sent = now + i; 2155 2156 off = batch->pack_off[i]; 2157 count = batch->outs[i].iovlen; 2158 assert(count > 0); 2159 packet_out = &batch->packets[off]; 2160 end = packet_out + count; 2161 do 2162 { 2163#if LOG_PACKET_CHECKSUM 2164 log_packet_checksum(lsquic_conn_log_cid(batch->conns[i]), "out", 2165 batch->outs[i].iov[packet_out - &batch->packets[off]].iov_base, 2166 batch->outs[i].iov[packet_out - &batch->packets[off]].iov_len); 2167#endif 2168 EV_LOG_PACKET_SENT(lsquic_conn_log_cid(batch->conns[i]), 2169 *packet_out); 2170 /* Release packet out buffer as soon as the packet is sent 2171 * successfully. If not successfully sent, we hold on to 2172 * this buffer until the packet sending is attempted again 2173 * or until it times out and regenerated. 2174 */ 2175 if ((*packet_out)->po_flags & PO_ENCRYPTED) 2176 release_enc_data(engine, batch->conns[i], *packet_out); 2177 batch->conns[i]->cn_if->ci_packet_sent(batch->conns[i], 2178 *packet_out); 2179 } 2180 while (++packet_out < end); 2181 } 2182 if (LSQ_LOG_ENABLED_EXT(LSQ_LOG_DEBUG, LSQLM_EVENT)) 2183 for ( ; i < (int) n_to_send; ++i) 2184 { 2185 off = batch->pack_off[i]; 2186 count = batch->outs[i].iovlen; 2187 assert(count > 0); 2188 packet_out = &batch->packets[off]; 2189 end = packet_out + count; 2190 do 2191 EV_LOG_PACKET_NOT_SENT(lsquic_conn_log_cid(batch->conns[i]), 2192 *packet_out); 2193 while (++packet_out < end); 2194 } 2195 /* Return packets to the connection in reverse order so that the packet 2196 * ordering is maintained. 2197 */ 2198 for (i = (int) n_to_send - 1; i >= n_sent; --i) 2199 { 2200 off = batch->pack_off[i]; 2201 count = batch->outs[i].iovlen; 2202 assert(count > 0); 2203 packet_out = &batch->packets[off + count - 1]; 2204 end = &batch->packets[off - 1]; 2205 do 2206 batch->conns[i]->cn_if->ci_packet_not_sent(batch->conns[i], 2207 *packet_out); 2208 while (--packet_out > end); 2209 if (!(batch->conns[i]->cn_flags & (LSCONN_COI_ACTIVE|LSCONN_EVANESCENT))) 2210 coi_reactivate(sb_ctx->conns_iter, batch->conns[i]); 2211 } 2212 return n_sent; 2213} 2214 2215 2216/* Return 1 if went past deadline, 0 otherwise */ 2217static int 2218check_deadline (lsquic_engine_t *engine) 2219{ 2220 if (engine->pub.enp_settings.es_proc_time_thresh && 2221 lsquic_time_now() > engine->deadline) 2222 { 2223 LSQ_INFO("went past threshold of %u usec, stop sending", 2224 engine->pub.enp_settings.es_proc_time_thresh); 2225 engine->flags |= ENG_PAST_DEADLINE; 2226 return 1; 2227 } 2228 else 2229 return 0; 2230} 2231 2232 2233static size_t 2234iov_size (const struct iovec *iov, const struct iovec *const end) 2235{ 2236 size_t size; 2237 2238 assert(iov < end); 2239 2240 size = 0; 2241 do 2242 size += iov->iov_len; 2243 while (++iov < end); 2244 2245 return size; 2246} 2247 2248 2249static void 2250send_packets_out (struct lsquic_engine *engine, 2251 struct conns_tailq *ticked_conns, 2252 struct conns_stailq *closed_conns) 2253{ 2254 unsigned n, w, n_sent, n_batches_sent; 2255 lsquic_packet_out_t *packet_out; 2256 struct lsquic_packet_out **packet; 2257 lsquic_conn_t *conn; 2258 struct out_batch *const batch = &engine->out_batch; 2259 struct iovec *iov, *packet_iov; 2260 struct conns_out_iter conns_iter; 2261 int shrink, deadline_exceeded; 2262 const struct send_batch_ctx sb_ctx = { 2263 closed_conns, 2264 ticked_conns, 2265 &conns_iter, 2266 &engine->out_batch, 2267 }; 2268 2269 coi_init(&conns_iter, engine); 2270 n_batches_sent = 0; 2271 n_sent = 0, n = 0; 2272 shrink = 0; 2273 deadline_exceeded = 0; 2274 iov = batch->iov; 2275 packet = batch->packets; 2276 2277 while ((conn = coi_next(&conns_iter))) 2278 { 2279 packet_out = conn->cn_if->ci_next_packet_to_send(conn, 0); 2280 if (!packet_out) { 2281 /* Evanescent connection always has a packet to send: */ 2282 assert(!(conn->cn_flags & LSCONN_EVANESCENT)); 2283 LSQ_DEBUGC("batched all outgoing packets for %s conn %"CID_FMT, 2284 (conn->cn_flags & LSCONN_MINI ? "mini" : "full"), 2285 CID_BITS(lsquic_conn_log_cid(conn))); 2286 coi_deactivate(&conns_iter, conn); 2287 continue; 2288 } 2289 batch->outs[n].iov = packet_iov = iov; 2290 next_coa: 2291 if (!(packet_out->po_flags & (PO_ENCRYPTED|PO_NOENCRYPT))) 2292 { 2293 switch (conn->cn_esf_c->esf_encrypt_packet(conn->cn_enc_session, 2294 &engine->pub, conn, packet_out)) 2295 { 2296 case ENCPA_NOMEM: 2297 /* Send what we have and wait for a more opportune moment */ 2298 conn->cn_if->ci_packet_not_sent(conn, packet_out); 2299 goto end_for; 2300 case ENCPA_BADCRYPT: 2301 /* This is pretty bad: close connection immediately */ 2302 conn->cn_if->ci_packet_not_sent(conn, packet_out); 2303 LSQ_INFOC("conn %"CID_FMT" has unsendable packets", 2304 CID_BITS(lsquic_conn_log_cid(conn))); 2305 if (!(conn->cn_flags & LSCONN_EVANESCENT)) 2306 { 2307 close_conn_immediately(engine, &sb_ctx, conn); 2308 coi_deactivate(&conns_iter, conn); 2309 } 2310 continue; 2311 case ENCPA_OK: 2312 break; 2313 } 2314 } 2315 else if ((packet_out->po_flags & PO_NOENCRYPT) 2316 && engine->pub.enp_pmi != &stock_pmi) 2317 { 2318 if (0 != copy_packet(engine, conn, packet_out)) 2319 { 2320 /* Copy can only fail if packet could not be allocated */ 2321 conn->cn_if->ci_packet_not_sent(conn, packet_out); 2322 goto end_for; 2323 } 2324 } 2325 LSQ_DEBUGC("batched packet %"PRIu64" for connection %"CID_FMT, 2326 packet_out->po_packno, CID_BITS(lsquic_conn_log_cid(conn))); 2327 if (packet_out->po_flags & PO_ENCRYPTED) 2328 { 2329 iov->iov_base = packet_out->po_enc_data; 2330 iov->iov_len = packet_out->po_enc_data_sz; 2331 } 2332 else 2333 { 2334 iov->iov_base = packet_out->po_data; 2335 iov->iov_len = packet_out->po_data_sz; 2336 } 2337 if (packet_iov == iov) 2338 { 2339 batch->pack_off[n] = packet - batch->packets; 2340 batch->outs [n].ecn = lsquic_packet_out_ecn(packet_out); 2341 batch->outs [n].peer_ctx = packet_out->po_path->np_peer_ctx; 2342 batch->outs [n].local_sa = NP_LOCAL_SA(packet_out->po_path); 2343 batch->outs [n].dest_sa = NP_PEER_SA(packet_out->po_path); 2344 batch->conns [n] = conn; 2345 } 2346 *packet = packet_out; 2347 ++packet; 2348 ++iov; 2349 if ((conn->cn_flags & LSCONN_IETF) 2350 && ((1 << packet_out->po_header_type) 2351 & ((1 << HETY_INITIAL)|(1 << HETY_HANDSHAKE)|(1 << HETY_0RTT))) 2352#ifndef NDEBUG 2353 && (engine->flags & ENG_COALESCE) 2354#endif 2355 && iov < batch->iov + sizeof(batch->iov) / sizeof(batch->iov[0])) 2356 { 2357 const size_t size = iov_size(packet_iov, iov); 2358 packet_out = conn->cn_if->ci_next_packet_to_send(conn, size); 2359 if (packet_out) 2360 goto next_coa; 2361 } 2362 batch->outs [n].iovlen = iov - packet_iov; 2363 ++n; 2364 if (n == engine->batch_size 2365 || iov >= batch->iov + sizeof(batch->iov) / sizeof(batch->iov[0])) 2366 { 2367 w = send_batch(engine, &sb_ctx, n); 2368 n = 0; 2369 iov = batch->iov; 2370 packet = batch->packets; 2371 ++n_batches_sent; 2372 n_sent += w; 2373 if (w < engine->batch_size) 2374 { 2375 shrink = 1; 2376 break; 2377 } 2378 deadline_exceeded = check_deadline(engine); 2379 if (deadline_exceeded) 2380 break; 2381 grow_batch_size(engine); 2382 } 2383 } 2384 end_for: 2385 2386 if (n > 0) { 2387 w = send_batch(engine, &sb_ctx, n); 2388 n_sent += w; 2389 shrink = w < n; 2390 ++n_batches_sent; 2391 } 2392 2393 if (shrink) 2394 shrink_batch_size(engine); 2395 else if (n_batches_sent > 1) 2396 { 2397 deadline_exceeded = check_deadline(engine); 2398 if (!deadline_exceeded) 2399 grow_batch_size(engine); 2400 } 2401 2402 coi_reheap(&conns_iter, engine); 2403 2404 LSQ_DEBUG("%s: sent %u packet%.*s", __func__, n_sent, n_sent != 1, "s"); 2405} 2406 2407 2408int 2409lsquic_engine_has_unsent_packets (lsquic_engine_t *engine) 2410{ 2411 return lsquic_mh_count(&engine->conns_out) > 0 2412 || (engine->pr_queue && prq_have_pending(engine->pr_queue)) 2413 ; 2414} 2415 2416 2417static void 2418reset_deadline (lsquic_engine_t *engine, lsquic_time_t now) 2419{ 2420 engine->deadline = now + engine->pub.enp_settings.es_proc_time_thresh; 2421 engine->flags &= ~ENG_PAST_DEADLINE; 2422} 2423 2424 2425void 2426lsquic_engine_send_unsent_packets (lsquic_engine_t *engine) 2427{ 2428 lsquic_conn_t *conn; 2429 struct conns_stailq closed_conns; 2430 struct conns_tailq ticked_conns = TAILQ_HEAD_INITIALIZER(ticked_conns); 2431 struct cid_update_batch cub; 2432 2433 ENGINE_IN(engine); 2434 cub_init(&cub, engine->report_old_scids, engine->scids_ctx); 2435 STAILQ_INIT(&closed_conns); 2436 reset_deadline(engine, lsquic_time_now()); 2437 if (!(engine->pub.enp_flags & ENPUB_CAN_SEND)) 2438 { 2439 LSQ_DEBUG("can send again"); 2440 EV_LOG_GENERIC_EVENT("can send again"); 2441 engine->pub.enp_flags |= ENPUB_CAN_SEND; 2442 } 2443 2444 send_packets_out(engine, &ticked_conns, &closed_conns); 2445 2446 while ((conn = STAILQ_FIRST(&closed_conns))) { 2447 STAILQ_REMOVE_HEAD(&closed_conns, cn_next_closed_conn); 2448 if ((conn->cn_flags & (LSCONN_MINI|LSCONN_PROMOTED)) == LSCONN_MINI) 2449 cub_add_cids_from_cces(&cub, conn); 2450 (void) engine_decref_conn(engine, conn, LSCONN_CLOSING); 2451 } 2452 2453 cub_flush(&cub); 2454 ENGINE_OUT(engine); 2455} 2456 2457 2458static lsquic_conn_t * 2459next_new_full_conn (struct conns_stailq *new_full_conns) 2460{ 2461 lsquic_conn_t *conn; 2462 2463 conn = STAILQ_FIRST(new_full_conns); 2464 if (conn) 2465 STAILQ_REMOVE_HEAD(new_full_conns, cn_next_new_full); 2466 return conn; 2467} 2468 2469 2470static void 2471process_connections (lsquic_engine_t *engine, conn_iter_f next_conn, 2472 lsquic_time_t now) 2473{ 2474 lsquic_conn_t *conn; 2475 enum tick_st tick_st; 2476 unsigned i, why; 2477 lsquic_time_t next_tick_time; 2478 struct conns_stailq closed_conns; 2479 struct conns_tailq ticked_conns; 2480 struct conns_stailq new_full_conns; 2481 struct cid_update_batch cub_old, cub_live; 2482 cub_init(&cub_old, engine->report_old_scids, engine->scids_ctx); 2483 cub_init(&cub_live, engine->report_live_scids, engine->scids_ctx); 2484 2485 eng_hist_tick(&engine->history, now); 2486 2487 STAILQ_INIT(&closed_conns); 2488 TAILQ_INIT(&ticked_conns); 2489 reset_deadline(engine, now); 2490 STAILQ_INIT(&new_full_conns); 2491 2492 if (!(engine->pub.enp_flags & ENPUB_CAN_SEND) 2493 && now > engine->resume_sending_at) 2494 { 2495 LSQ_NOTICE("failsafe activated: resume sending packets again after " 2496 "timeout"); 2497 EV_LOG_GENERIC_EVENT("resume sending packets again after timeout"); 2498 engine->pub.enp_flags |= ENPUB_CAN_SEND; 2499 } 2500 2501 i = 0; 2502 while ((conn = next_conn(engine)) 2503 || (conn = next_new_full_conn(&new_full_conns))) 2504 { 2505 tick_st = conn->cn_if->ci_tick(conn, now); 2506 conn->cn_last_ticked = now + i /* Maintain relative order */ ++; 2507 if (tick_st & TICK_PROMOTE) 2508 { 2509 lsquic_conn_t *new_conn; 2510 EV_LOG_CONN_EVENT(lsquic_conn_log_cid(conn), 2511 "scheduled for promotion"); 2512 assert(conn->cn_flags & LSCONN_MINI); 2513 new_conn = new_full_conn_server(engine, conn, now); 2514 if (new_conn) 2515 { 2516 STAILQ_INSERT_TAIL(&new_full_conns, new_conn, cn_next_new_full); 2517 new_conn->cn_last_sent = engine->last_sent; 2518 eng_hist_inc(&engine->history, now, sl_new_full_conns); 2519 conn->cn_flags |= LSCONN_PROMOTED; 2520 } 2521 tick_st |= TICK_CLOSE; /* Destroy mini connection */ 2522 } 2523 if (tick_st & TICK_SEND) 2524 { 2525 if (!(conn->cn_flags & LSCONN_HAS_OUTGOING)) 2526 { 2527 lsquic_mh_insert(&engine->conns_out, conn, conn->cn_last_sent); 2528 engine_incref_conn(conn, LSCONN_HAS_OUTGOING); 2529 } 2530 } 2531 if (tick_st & TICK_CLOSE) 2532 { 2533 STAILQ_INSERT_TAIL(&closed_conns, conn, cn_next_closed_conn); 2534 engine_incref_conn(conn, LSCONN_CLOSING); 2535 if (conn->cn_flags & LSCONN_HASHED) 2536 remove_conn_from_hash(engine, conn); 2537 } 2538 else 2539 { 2540 TAILQ_INSERT_TAIL(&ticked_conns, conn, cn_next_ticked); 2541 engine_incref_conn(conn, LSCONN_TICKED); 2542 if ((engine->flags & ENG_SERVER) && conn->cn_if->ci_report_live 2543 && conn->cn_if->ci_report_live(conn, now)) 2544 cub_add_cids_from_cces(&cub_live, conn); 2545 } 2546 } 2547 2548 if ((engine->pub.enp_flags & ENPUB_CAN_SEND) 2549 && lsquic_engine_has_unsent_packets(engine)) 2550 send_packets_out(engine, &ticked_conns, &closed_conns); 2551 2552 while ((conn = STAILQ_FIRST(&closed_conns))) { 2553 STAILQ_REMOVE_HEAD(&closed_conns, cn_next_closed_conn); 2554 if ((conn->cn_flags & (LSCONN_MINI|LSCONN_PROMOTED)) == LSCONN_MINI) 2555 cub_add_cids_from_cces(&cub_old, conn); 2556 (void) engine_decref_conn(engine, conn, LSCONN_CLOSING); 2557 } 2558 2559 while ((conn = TAILQ_FIRST(&ticked_conns))) 2560 { 2561 TAILQ_REMOVE(&ticked_conns, conn, cn_next_ticked); 2562 engine_decref_conn(engine, conn, LSCONN_TICKED); 2563 if (!(conn->cn_flags & LSCONN_TICKABLE) 2564 && conn->cn_if->ci_is_tickable(conn)) 2565 { 2566 /* Floyd heapification is not faster, don't bother. */ 2567 lsquic_mh_insert(&engine->conns_tickable, conn, conn->cn_last_ticked); 2568 engine_incref_conn(conn, LSCONN_TICKABLE); 2569 } 2570 else if (!(conn->cn_flags & LSCONN_ATTQ)) 2571 { 2572 next_tick_time = conn->cn_if->ci_next_tick_time(conn, &why); 2573 if (next_tick_time) 2574 { 2575 if (0 == attq_add(engine->attq, conn, next_tick_time, why)) 2576 engine_incref_conn(conn, LSCONN_ATTQ); 2577 } 2578 else 2579 /* In all other cases, the idle timeout would make the next 2580 * tick time non-zero: 2581 */ 2582 assert((conn->cn_flags & LSCONN_IETF) 2583 && engine->pub.enp_settings.es_idle_timeout == 0); 2584 } 2585 } 2586 2587 cub_flush(&engine->new_scids); 2588 cub_flush(&cub_live); 2589 cub_flush(&cub_old); 2590} 2591 2592 2593/* Return 0 if packet is being processed by a real connection, 1 if the 2594 * packet was processed, but not by a connection, and -1 on error. 2595 */ 2596int 2597lsquic_engine_packet_in (lsquic_engine_t *engine, 2598 const unsigned char *packet_in_data, size_t packet_in_size, 2599 const struct sockaddr *sa_local, const struct sockaddr *sa_peer, 2600 void *peer_ctx, int ecn) 2601{ 2602 const unsigned char *const packet_end = packet_in_data + packet_in_size; 2603 struct packin_parse_state ppstate; 2604 lsquic_packet_in_t *packet_in; 2605 int (*parse_packet_in_begin) (struct lsquic_packet_in *, size_t length, 2606 int is_server, unsigned cid_len, struct packin_parse_state *); 2607 unsigned n_zeroes; 2608 int s; 2609 2610 ENGINE_CALLS_INCR(engine); 2611 2612 if (engine->flags & ENG_SERVER) 2613 parse_packet_in_begin = lsquic_parse_packet_in_server_begin; 2614 else 2615 if (engine->flags & ENG_CONNS_BY_ADDR) 2616 { 2617 struct lsquic_hash_elem *el; 2618 const struct lsquic_conn *conn; 2619 el = find_conn_by_addr(engine->conns_hash, sa_local); 2620 if (!el) 2621 return -1; 2622 conn = lsquic_hashelem_getdata(el); 2623 if ((1 << conn->cn_version) & LSQUIC_GQUIC_HEADER_VERSIONS) 2624 parse_packet_in_begin = lsquic_gquic_parse_packet_in_begin; 2625 else if ((1 << conn->cn_version) & LSQUIC_IETF_VERSIONS) 2626 parse_packet_in_begin = lsquic_ietf_v1_parse_packet_in_begin; 2627 else if (conn->cn_version == LSQVER_050) 2628 parse_packet_in_begin = lsquic_Q050_parse_packet_in_begin; 2629 else 2630 { 2631 assert(conn->cn_version == LSQVER_046 2632#if LSQUIC_USE_Q098 2633 || conn->cn_version == LSQVER_098 2634#endif 2635 2636 ); 2637 parse_packet_in_begin = lsquic_Q046_parse_packet_in_begin; 2638 } 2639 } 2640 else 2641 parse_packet_in_begin = lsquic_parse_packet_in_begin; 2642 2643 n_zeroes = 0; 2644 do 2645 { 2646 packet_in = lsquic_mm_get_packet_in(&engine->pub.enp_mm); 2647 if (!packet_in) 2648 return -1; 2649 /* Library does not modify packet_in_data, it is not referenced after 2650 * this function returns and subsequent release of pi_data is guarded 2651 * by PI_OWN_DATA flag. 2652 */ 2653 packet_in->pi_data = (unsigned char *) packet_in_data; 2654 if (0 != parse_packet_in_begin(packet_in, packet_end - packet_in_data, 2655 engine->flags & ENG_SERVER, 2656 engine->pub.enp_settings.es_scid_len, &ppstate)) 2657 { 2658 LSQ_DEBUG("Cannot parse incoming packet's header"); 2659 lsquic_mm_put_packet_in(&engine->pub.enp_mm, packet_in); 2660 errno = EINVAL; 2661 return -1; 2662 } 2663 2664 packet_in_data += packet_in->pi_data_sz; 2665 packet_in->pi_received = lsquic_time_now(); 2666 packet_in->pi_flags |= (3 & ecn) << PIBIT_ECN_SHIFT; 2667 eng_hist_inc(&engine->history, packet_in->pi_received, sl_packets_in); 2668 s = process_packet_in(engine, packet_in, &ppstate, sa_local, sa_peer, 2669 peer_ctx, packet_in_size); 2670 n_zeroes += s == 0; 2671 } 2672 while (0 == s && packet_in_data < packet_end); 2673 2674 return n_zeroes > 0 ? 0 : s; 2675} 2676 2677 2678#if __GNUC__ && !defined(NDEBUG) 2679__attribute__((weak)) 2680#endif 2681unsigned 2682lsquic_engine_quic_versions (const lsquic_engine_t *engine) 2683{ 2684 return engine->pub.enp_settings.es_versions; 2685} 2686 2687 2688void 2689lsquic_engine_cooldown (lsquic_engine_t *engine) 2690{ 2691 struct lsquic_hash_elem *el; 2692 lsquic_conn_t *conn; 2693 2694 if (engine->flags & ENG_COOLDOWN) 2695 /* AFAICT, there is no harm in calling this function more than once, 2696 * but log it just in case, as it may indicate an error in the caller. 2697 */ 2698 LSQ_INFO("cooldown called again"); 2699 engine->flags |= ENG_COOLDOWN; 2700 LSQ_INFO("entering cooldown mode"); 2701 if (engine->flags & ENG_SERVER) 2702 drop_all_mini_conns(engine); 2703 for (el = lsquic_hash_first(engine->conns_hash); el; 2704 el = lsquic_hash_next(engine->conns_hash)) 2705 { 2706 conn = lsquic_hashelem_getdata(el); 2707 lsquic_conn_going_away(conn); 2708 } 2709} 2710 2711 2712int 2713lsquic_engine_earliest_adv_tick (lsquic_engine_t *engine, int *diff) 2714{ 2715 const struct attq_elem *next_attq; 2716 lsquic_time_t now, next_time; 2717#if LSQUIC_DEBUG_NEXT_ADV_TICK 2718 const struct lsquic_conn *conn; 2719 const enum lsq_log_level L = LSQ_LOG_DEBUG; /* Easy toggle */ 2720#endif 2721 2722 ENGINE_CALLS_INCR(engine); 2723 2724 if ((engine->flags & ENG_PAST_DEADLINE) 2725 && lsquic_mh_count(&engine->conns_out)) 2726 { 2727#if LSQUIC_DEBUG_NEXT_ADV_TICK 2728 conn = lsquic_mh_peek(&engine->conns_out); 2729 engine->last_logged_conn = 0; 2730 LSQ_LOGC(L, "next advisory tick is now: went past deadline last time " 2731 "and have %u outgoing connection%.*s (%"CID_FMT" first)", 2732 lsquic_mh_count(&engine->conns_out), 2733 lsquic_mh_count(&engine->conns_out) != 1, "s", 2734 CID_BITS(lsquic_conn_log_cid(conn))); 2735#endif 2736 *diff = 0; 2737 return 1; 2738 } 2739 2740 if (engine->pr_queue && prq_have_pending(engine->pr_queue)) 2741 { 2742#if LSQUIC_DEBUG_NEXT_ADV_TICK 2743 engine->last_logged_conn = 0; 2744 LSQ_LOG(L, "next advisory tick is now: have pending PRQ elements"); 2745#endif 2746 *diff = 0; 2747 return 1; 2748 } 2749 2750 if (lsquic_mh_count(&engine->conns_tickable)) 2751 { 2752#if LSQUIC_DEBUG_NEXT_ADV_TICK 2753 conn = lsquic_mh_peek(&engine->conns_tickable); 2754 engine->last_logged_conn = 0; 2755 LSQ_LOGC(L, "next advisory tick is now: have %u tickable " 2756 "connection%.*s (%"CID_FMT" first)", 2757 lsquic_mh_count(&engine->conns_tickable), 2758 lsquic_mh_count(&engine->conns_tickable) != 1, "s", 2759 CID_BITS(lsquic_conn_log_cid(conn))); 2760#endif 2761 *diff = 0; 2762 return 1; 2763 } 2764 2765 next_attq = attq_next(engine->attq); 2766 if (engine->pub.enp_flags & ENPUB_CAN_SEND) 2767 { 2768 if (next_attq) 2769 next_time = next_attq->ae_adv_time; 2770 else 2771 return 0; 2772 } 2773 else 2774 { 2775 if (next_attq) 2776 { 2777 next_time = next_attq->ae_adv_time; 2778 if (engine->resume_sending_at < next_time) 2779 { 2780 next_time = engine->resume_sending_at; 2781 next_attq = NULL; 2782 } 2783 } 2784 else 2785 next_time = engine->resume_sending_at; 2786 } 2787 2788 now = lsquic_time_now(); 2789 *diff = (int) ((int64_t) next_time - (int64_t) now); 2790#if LSQUIC_DEBUG_NEXT_ADV_TICK 2791 if (next_attq) 2792 { 2793 /* Deduplicate consecutive log messages about the same reason for the 2794 * same connection. 2795 * If diff is always zero or diff reset to a higher value, event is 2796 * still logged. 2797 */ 2798 if (!((unsigned) next_attq->ae_why == engine->last_logged_ae_why 2799 && (uintptr_t) next_attq->ae_conn 2800 == engine->last_logged_conn 2801 && *diff < engine->last_tick_diff)) 2802 { 2803 engine->last_logged_conn = (uintptr_t) next_attq->ae_conn; 2804 engine->last_logged_ae_why = (unsigned) next_attq->ae_why; 2805 engine->last_tick_diff = *diff; 2806 LSQ_LOGC(L, "next advisory tick is %d usec away: conn %"CID_FMT 2807 ": %s", *diff, CID_BITS(lsquic_conn_log_cid(next_attq->ae_conn)), 2808 lsquic_attq_why2str(next_attq->ae_why)); 2809 } 2810 } 2811 else 2812 LSQ_LOG(L, "next advisory tick is %d usec away: resume sending", *diff); 2813#endif 2814 return 1; 2815} 2816 2817 2818unsigned 2819lsquic_engine_count_attq (lsquic_engine_t *engine, int from_now) 2820{ 2821 lsquic_time_t now; 2822 ENGINE_CALLS_INCR(engine); 2823 now = lsquic_time_now(); 2824 if (from_now < 0) 2825 now -= from_now; 2826 else 2827 now += from_now; 2828 return attq_count_before(engine->attq, now); 2829} 2830 2831 2832int 2833lsquic_engine_add_cid (struct lsquic_engine_public *enpub, 2834 struct lsquic_conn *conn, unsigned cce_idx) 2835{ 2836 struct lsquic_engine *const engine = (struct lsquic_engine *) enpub; 2837 struct conn_cid_elem *const cce = &conn->cn_cces[cce_idx]; 2838 void *peer_ctx; 2839 2840 assert(cce_idx < conn->cn_n_cces); 2841 assert(conn->cn_cces_mask & (1 << cce_idx)); 2842 assert(!(cce->cce_hash_el.qhe_flags & QHE_HASHED)); 2843 2844 if (lsquic_hash_insert(engine->conns_hash, cce->cce_cid.idbuf, 2845 cce->cce_cid.len, conn, &cce->cce_hash_el)) 2846 { 2847 LSQ_DEBUGC("add %"CID_FMT" to the list of SCIDs", 2848 CID_BITS(&cce->cce_cid)); 2849 peer_ctx = lsquic_conn_get_peer_ctx(conn, NULL); 2850 cce->cce_flags |= CCE_REG; 2851 cub_add(&engine->new_scids, &cce->cce_cid, peer_ctx); 2852 return 0; 2853 } 2854 else 2855 { 2856 LSQ_WARNC("could not add new cid %"CID_FMT" to the SCID hash", 2857 CID_BITS(&cce->cce_cid)); 2858 return -1; 2859 } 2860} 2861 2862 2863void 2864lsquic_engine_retire_cid (struct lsquic_engine_public *enpub, 2865 struct lsquic_conn *conn, unsigned cce_idx, lsquic_time_t now) 2866{ 2867 struct lsquic_engine *const engine = (struct lsquic_engine *) enpub; 2868 struct conn_cid_elem *const cce = &conn->cn_cces[cce_idx]; 2869 void *peer_ctx; 2870 2871 assert(cce_idx < conn->cn_n_cces); 2872 2873 if (cce->cce_hash_el.qhe_flags & QHE_HASHED) 2874 lsquic_hash_erase(engine->conns_hash, &cce->cce_hash_el); 2875 2876 if (engine->purga) 2877 { 2878 peer_ctx = lsquic_conn_get_peer_ctx(conn, NULL); 2879 lsquic_purga_add(engine->purga, &cce->cce_cid, peer_ctx, 2880 PUTY_CID_RETIRED, now); 2881 } 2882 conn->cn_cces_mask &= ~(1u << cce_idx); 2883 LSQ_DEBUGC("retire CID %"CID_FMT, CID_BITS(&cce->cce_cid)); 2884} 2885 2886 2887