lsquic_engine.c revision 10c41073
1/* Copyright (c) 2017 - 2020 LiteSpeed Technologies Inc. See LICENSE. */ 2/* 3 * lsquic_engine.c - QUIC engine 4 */ 5 6#include <assert.h> 7#include <errno.h> 8#include <inttypes.h> 9#include <limits.h> 10#include <stdint.h> 11#include <stdio.h> 12#include <stdlib.h> 13#include <string.h> 14#include <sys/queue.h> 15#include <time.h> 16#include <arpa/inet.h> 17#ifndef WIN32 18#include <sys/time.h> 19#include <netinet/in.h> 20#include <sys/types.h> 21#include <sys/stat.h> 22#include <fcntl.h> 23#include <unistd.h> 24#include <netdb.h> 25#endif 26 27#ifndef NDEBUG 28#include <sys/types.h> 29#include <regex.h> /* For code that loses packets */ 30#endif 31 32#if LOG_PACKET_CHECKSUM 33#include <zlib.h> 34#endif 35 36#include <openssl/aead.h> 37 38#include "lsquic.h" 39#include "lsquic_types.h" 40#include "lsquic_int_types.h" 41#include "lsquic_sizes.h" 42#include "lsquic_parse_common.h" 43#include "lsquic_parse.h" 44#include "lsquic_packet_in.h" 45#include "lsquic_packet_out.h" 46#include "lsquic_senhist.h" 47#include "lsquic_rtt.h" 48#include "lsquic_cubic.h" 49#include "lsquic_pacer.h" 50#include "lsquic_bw_sampler.h" 51#include "lsquic_minmax.h" 52#include "lsquic_bbr.h" 53#include "lsquic_send_ctl.h" 54#include "lsquic_set.h" 55#include "lsquic_conn_flow.h" 56#include "lsquic_sfcw.h" 57#include "lsquic_hash.h" 58#include "lsquic_conn.h" 59#include "lsquic_full_conn.h" 60#include "lsquic_util.h" 61#include "lsquic_qtags.h" 62#include "lsquic_enc_sess.h" 63#include "lsquic_mm.h" 64#include "lsquic_engine_public.h" 65#include "lsquic_eng_hist.h" 66#include "lsquic_ev_log.h" 67#include "lsquic_version.h" 68#include "lsquic_pr_queue.h" 69#include "lsquic_mini_conn.h" 70#include "lsquic_mini_conn_ietf.h" 71#include "lsquic_stock_shi.h" 72#include "lsquic_purga.h" 73#include "lsquic_tokgen.h" 74#include "lsquic_attq.h" 75#include "lsquic_min_heap.h" 76#include "lsquic_http1x_if.h" 77#include "lsquic_parse_common.h" 78#include "lsquic_handshake.h" 79#include "lsquic_crand.h" 80 81#define LSQUIC_LOGGER_MODULE LSQLM_ENGINE 82#include "lsquic_logger.h" 83 84#ifndef LSQUIC_DEBUG_NEXT_ADV_TICK 85#define LSQUIC_DEBUG_NEXT_ADV_TICK 1 86#endif 87 88#if LSQUIC_DEBUG_NEXT_ADV_TICK 89#include "lsquic_alarmset.h" 90#endif 91 92#define MIN(a, b) ((a) < (b) ? (a) : (b)) 93 94/* The batch of outgoing packets grows and shrinks dynamically */ 95#define MAX_OUT_BATCH_SIZE 1024 96#define MIN_OUT_BATCH_SIZE 4 97#define INITIAL_OUT_BATCH_SIZE 32 98 99struct out_batch 100{ 101 lsquic_conn_t *conns [MAX_OUT_BATCH_SIZE]; 102 struct lsquic_out_spec outs [MAX_OUT_BATCH_SIZE]; 103 unsigned pack_off[MAX_OUT_BATCH_SIZE]; 104 lsquic_packet_out_t *packets[MAX_OUT_BATCH_SIZE * 2]; 105 struct iovec iov [MAX_OUT_BATCH_SIZE * 2]; 106}; 107 108typedef struct lsquic_conn * (*conn_iter_f)(struct lsquic_engine *); 109 110static void 111process_connections (struct lsquic_engine *engine, conn_iter_f iter, 112 lsquic_time_t now); 113 114static void 115engine_incref_conn (lsquic_conn_t *conn, enum lsquic_conn_flags flag); 116 117static lsquic_conn_t * 118engine_decref_conn (lsquic_engine_t *engine, lsquic_conn_t *conn, 119 enum lsquic_conn_flags flag); 120 121static void 122force_close_conn (lsquic_engine_t *engine, lsquic_conn_t *conn); 123 124#if LSQUIC_COUNT_ENGINE_CALLS 125#define ENGINE_CALLS_INCR(e) do { ++(e)->n_engine_calls; } while (0) 126#else 127#define ENGINE_CALLS_INCR(e) 128#endif 129 130/* Nested calls to LSQUIC are not supported */ 131#define ENGINE_IN(e) do { \ 132 assert(!((e)->pub.enp_flags & ENPUB_PROC)); \ 133 (e)->pub.enp_flags |= ENPUB_PROC; \ 134 ENGINE_CALLS_INCR(e); \ 135} while (0) 136 137#define ENGINE_OUT(e) do { \ 138 assert((e)->pub.enp_flags & ENPUB_PROC); \ 139 (e)->pub.enp_flags &= ~ENPUB_PROC; \ 140} while (0) 141 142/* A connection can be referenced from one of six places: 143 * 144 * 1. A hash. The engine maintains two hash tables -- one for full, and 145 * one for mini connections. A connection starts its life in one of 146 * those. 147 * 148 * 2. Outgoing queue. 149 * 150 * 3. Tickable queue 151 * 152 * 4. Advisory Tick Time queue. 153 * 154 * 5. Closing connections queue. This is a transient queue -- it only 155 * exists for the duration of process_connections() function call. 156 * 157 * 6. Ticked connections queue. Another transient queue, similar to (5). 158 * 159 * The idea is to destroy the connection when it is no longer referenced. 160 * For example, a connection tick may return TICK_SEND|TICK_CLOSE. In 161 * that case, the connection is referenced from two places: (2) and (5). 162 * After its packets are sent, it is only referenced in (5), and at the 163 * end of the function call, when it is removed from (5), reference count 164 * goes to zero and the connection is destroyed. If not all packets can 165 * be sent, at the end of the function call, the connection is referenced 166 * by (2) and will only be removed once all outgoing packets have been 167 * sent. 168 */ 169#define CONN_REF_FLAGS (LSCONN_HASHED \ 170 |LSCONN_HAS_OUTGOING \ 171 |LSCONN_TICKABLE \ 172 |LSCONN_TICKED \ 173 |LSCONN_CLOSING \ 174 |LSCONN_ATTQ) 175 176 177 178 179struct cid_update_batch 180{ 181 lsquic_cids_update_f cub_update_cids; 182 void *cub_update_ctx; 183 unsigned cub_count; 184 lsquic_cid_t cub_cids[20]; 185 void *cub_peer_ctxs[20]; 186}; 187 188static void 189cub_init (struct cid_update_batch *, lsquic_cids_update_f, void *); 190 191 192struct lsquic_engine 193{ 194 struct lsquic_engine_public pub; 195 enum { 196 ENG_SERVER = LSENG_SERVER, 197 ENG_HTTP = LSENG_HTTP, 198 ENG_COOLDOWN = (1 << 7), /* Cooldown: no new connections */ 199 ENG_PAST_DEADLINE 200 = (1 << 8), /* Previous call to a processing 201 * function went past time threshold. 202 */ 203 ENG_CONNS_BY_ADDR 204 = (1 << 9), /* Connections are hashed by address */ 205#ifndef NDEBUG 206 ENG_COALESCE = (1 << 24), /* Packet coalescing is enabled */ 207 ENG_LOSE_PACKETS= (1 << 25), /* Lose *some* outgoing packets */ 208 ENG_DTOR = (1 << 26), /* Engine destructor */ 209#endif 210 } flags; 211 lsquic_packets_out_f packets_out; 212 void *packets_out_ctx; 213 lsquic_cids_update_f report_new_scids; 214 lsquic_cids_update_f report_live_scids; 215 lsquic_cids_update_f report_old_scids; 216 void *scids_ctx; 217 struct lsquic_hash *conns_hash; 218 struct min_heap conns_tickable; 219 struct min_heap conns_out; 220 /* Use a union because only one iterator is being used at any one time */ 221 union { 222 struct { 223 struct cert_susp_head *head; 224 } resumed; 225 struct lsquic_conn *one_conn; 226 } iter_state; 227 struct eng_hist history; 228 unsigned batch_size; 229 struct pr_queue *pr_queue; 230 struct attq *attq; 231 /* Track time last time a packet was sent to give new connections 232 * priority lower than that of existing connections. 233 */ 234 lsquic_time_t last_sent; 235#ifndef NDEBUG 236 regex_t lose_packets_re; 237 const char *lose_packets_str; 238#endif 239 unsigned n_conns; 240 lsquic_time_t deadline; 241 lsquic_time_t resume_sending_at; 242 unsigned mini_conns_count; 243 struct lsquic_purga *purga; 244#if LSQUIC_CONN_STATS 245 struct { 246 unsigned conns; 247 } stats; 248 struct conn_stats conn_stats_sum; 249 FILE *stats_fh; 250#endif 251 struct cid_update_batch new_scids; 252 struct out_batch out_batch; 253#if LSQUIC_COUNT_ENGINE_CALLS 254 unsigned long n_engine_calls; 255#endif 256#if LSQUIC_DEBUG_NEXT_ADV_TICK 257 uintptr_t last_logged_conn; 258 unsigned last_logged_ae_why; 259 int last_tick_diff; 260#endif 261 struct crand crand; 262}; 263 264 265void 266lsquic_engine_init_settings (struct lsquic_engine_settings *settings, 267 unsigned flags) 268{ 269 memset(settings, 0, sizeof(*settings)); 270 settings->es_versions = LSQUIC_DF_VERSIONS; 271 if (flags & ENG_SERVER) 272 { 273 settings->es_cfcw = LSQUIC_DF_CFCW_SERVER; 274 settings->es_sfcw = LSQUIC_DF_SFCW_SERVER; 275 settings->es_init_max_data 276 = LSQUIC_DF_INIT_MAX_DATA_SERVER; 277 settings->es_init_max_stream_data_bidi_remote 278 = LSQUIC_DF_INIT_MAX_STREAM_DATA_BIDI_REMOTE_SERVER; 279 settings->es_init_max_stream_data_bidi_local 280 = LSQUIC_DF_INIT_MAX_STREAM_DATA_BIDI_LOCAL_SERVER; 281 settings->es_init_max_stream_data_uni 282 = LSQUIC_DF_INIT_MAX_STREAM_DATA_UNI_SERVER; 283 settings->es_init_max_streams_uni 284 = LSQUIC_DF_INIT_MAX_STREAMS_UNI_SERVER; 285 settings->es_ping_period = 0; 286 } 287 else 288 { 289 settings->es_cfcw = LSQUIC_DF_CFCW_CLIENT; 290 settings->es_sfcw = LSQUIC_DF_SFCW_CLIENT; 291 settings->es_init_max_data 292 = LSQUIC_DF_INIT_MAX_DATA_CLIENT; 293 settings->es_init_max_stream_data_bidi_remote 294 = LSQUIC_DF_INIT_MAX_STREAM_DATA_BIDI_REMOTE_CLIENT; 295 settings->es_init_max_stream_data_bidi_local 296 = LSQUIC_DF_INIT_MAX_STREAM_DATA_BIDI_LOCAL_CLIENT; 297 settings->es_init_max_stream_data_uni 298 = LSQUIC_DF_INIT_MAX_STREAM_DATA_UNI_CLIENT; 299 settings->es_init_max_streams_uni 300 = LSQUIC_DF_INIT_MAX_STREAMS_UNI_CLIENT; 301 settings->es_ping_period = LSQUIC_DF_PING_PERIOD; 302 } 303 settings->es_max_streams_in = LSQUIC_DF_MAX_STREAMS_IN; 304 settings->es_idle_conn_to = LSQUIC_DF_IDLE_CONN_TO; 305 settings->es_idle_timeout = LSQUIC_DF_IDLE_TIMEOUT; 306 settings->es_handshake_to = LSQUIC_DF_HANDSHAKE_TO; 307 settings->es_silent_close = LSQUIC_DF_SILENT_CLOSE; 308 settings->es_max_header_list_size 309 = LSQUIC_DF_MAX_HEADER_LIST_SIZE; 310 settings->es_ua = LSQUIC_DF_UA; 311 settings->es_ecn = LSQUIC_DF_ECN; 312 313 settings->es_pdmd = QTAG_X509; 314 settings->es_aead = QTAG_AESG; 315 settings->es_kexs = QTAG_C255; 316 settings->es_support_push = LSQUIC_DF_SUPPORT_PUSH; 317 settings->es_support_tcid0 = LSQUIC_DF_SUPPORT_TCID0; 318 settings->es_support_nstp = LSQUIC_DF_SUPPORT_NSTP; 319 settings->es_honor_prst = LSQUIC_DF_HONOR_PRST; 320 settings->es_progress_check = LSQUIC_DF_PROGRESS_CHECK; 321 settings->es_rw_once = LSQUIC_DF_RW_ONCE; 322 settings->es_proc_time_thresh= LSQUIC_DF_PROC_TIME_THRESH; 323 settings->es_pace_packets = LSQUIC_DF_PACE_PACKETS; 324 settings->es_clock_granularity = LSQUIC_DF_CLOCK_GRANULARITY; 325 settings->es_max_inchoate = LSQUIC_DF_MAX_INCHOATE; 326 settings->es_send_prst = LSQUIC_DF_SEND_PRST; 327 settings->es_sttl = LSQUIC_DF_STTL; 328 settings->es_init_max_streams_bidi 329 = LSQUIC_DF_INIT_MAX_STREAMS_BIDI; 330 settings->es_scid_len = LSQUIC_DF_SCID_LEN; 331 settings->es_scid_iss_rate = LSQUIC_DF_SCID_ISS_RATE; 332 settings->es_qpack_dec_max_size = LSQUIC_DF_QPACK_DEC_MAX_SIZE; 333 settings->es_qpack_dec_max_blocked = LSQUIC_DF_QPACK_DEC_MAX_BLOCKED; 334 settings->es_qpack_enc_max_size = LSQUIC_DF_QPACK_ENC_MAX_SIZE; 335 settings->es_qpack_enc_max_blocked = LSQUIC_DF_QPACK_ENC_MAX_BLOCKED; 336 settings->es_allow_migration = LSQUIC_DF_ALLOW_MIGRATION; 337 settings->es_ql_bits = LSQUIC_DF_QL_BITS; 338} 339 340 341/* Note: if returning an error, err_buf must be valid if non-NULL */ 342int 343lsquic_engine_check_settings (const struct lsquic_engine_settings *settings, 344 unsigned flags, 345 char *err_buf, size_t err_buf_sz) 346{ 347 if (settings->es_cfcw < LSQUIC_MIN_FCW || 348 settings->es_sfcw < LSQUIC_MIN_FCW) 349 { 350 if (err_buf) 351 snprintf(err_buf, err_buf_sz, "%s", 352 "flow control window set too low"); 353 return -1; 354 } 355 if (0 == (settings->es_versions & LSQUIC_SUPPORTED_VERSIONS)) 356 { 357 if (err_buf) 358 snprintf(err_buf, err_buf_sz, "%s", 359 "No supported QUIC versions specified"); 360 return -1; 361 } 362 if (settings->es_versions & ~LSQUIC_SUPPORTED_VERSIONS) 363 { 364 if (err_buf) 365 snprintf(err_buf, err_buf_sz, "%s", 366 "one or more unsupported QUIC version is specified"); 367 return -1; 368 } 369 if (flags & ENG_SERVER) 370 { 371 if (settings->es_handshake_to > 372 MAX_MINI_CONN_LIFESPAN_IN_USEC) 373 { 374 if (err_buf) 375 snprintf(err_buf, err_buf_sz, "handshake timeout %lu" 376 " usec is too large. The maximum for server is %u usec", 377 settings->es_handshake_to, MAX_MINI_CONN_LIFESPAN_IN_USEC); 378 return -1; 379 } 380 } 381 if (settings->es_idle_timeout > 600) 382 { 383 if (err_buf) 384 snprintf(err_buf, err_buf_sz, "%s", 385 "The maximum value of idle timeout is 600 seconds"); 386 return -1; 387 } 388 if (settings->es_scid_len > MAX_CID_LEN) 389 { 390 if (err_buf) 391 snprintf(err_buf, err_buf_sz, "Source connection ID cannot be %u " 392 "bytes long; it must be between 0 and %u.", 393 settings->es_scid_len, MAX_CID_LEN); 394 return -1; 395 } 396 397 if (settings->es_cc_algo > 2) 398 { 399 if (err_buf) 400 snprintf(err_buf, err_buf_sz, "Invalid congestion control " 401 "algorithm value %u", settings->es_cc_algo); 402 return -1; 403 } 404 405 if (!(settings->es_ql_bits >= -1 && settings->es_ql_bits <= 2)) 406 { 407 if (err_buf) 408 snprintf(err_buf, err_buf_sz, "Invalid QL bits value %d ", 409 settings->es_ql_bits); 410 return -1; 411 } 412 413 return 0; 414} 415 416 417static void 418free_packet (void *ctx, void *conn_ctx, void *packet_data, char is_ipv6) 419{ 420 free(packet_data); 421} 422 423 424static void * 425malloc_buf (void *ctx, void *conn_ctx, unsigned short size, char is_ipv6) 426{ 427 return malloc(size); 428} 429 430 431static const struct lsquic_packout_mem_if stock_pmi = 432{ 433 malloc_buf, free_packet, free_packet, 434}; 435 436 437static int 438hash_conns_by_addr (const struct lsquic_engine *engine) 439{ 440 if (engine->flags & ENG_SERVER) 441 return 0; 442 if (engine->pub.enp_settings.es_versions & LSQUIC_FORCED_TCID0_VERSIONS) 443 return 1; 444 if ((engine->pub.enp_settings.es_versions & LSQUIC_GQUIC_HEADER_VERSIONS) 445 && engine->pub.enp_settings.es_support_tcid0) 446 return 1; 447 if (engine->pub.enp_settings.es_scid_len == 0) 448 return 1; 449 return 0; 450} 451 452 453lsquic_engine_t * 454lsquic_engine_new (unsigned flags, 455 const struct lsquic_engine_api *api) 456{ 457 lsquic_engine_t *engine; 458 char err_buf[100]; 459 460 if (!api->ea_packets_out) 461 { 462 LSQ_ERROR("packets_out callback is not specified"); 463 return NULL; 464 } 465 466 if (api->ea_settings && 467 0 != lsquic_engine_check_settings(api->ea_settings, flags, 468 err_buf, sizeof(err_buf))) 469 { 470 LSQ_ERROR("cannot create engine: %s", err_buf); 471 return NULL; 472 } 473 474 engine = calloc(1, sizeof(*engine)); 475 if (!engine) 476 return NULL; 477 if (0 != lsquic_mm_init(&engine->pub.enp_mm)) 478 { 479 free(engine); 480 return NULL; 481 } 482 if (api->ea_settings) 483 engine->pub.enp_settings = *api->ea_settings; 484 else 485 lsquic_engine_init_settings(&engine->pub.enp_settings, flags); 486 int tag_buf_len; 487 tag_buf_len = lsquic_gen_ver_tags(engine->pub.enp_ver_tags_buf, 488 sizeof(engine->pub.enp_ver_tags_buf), 489 engine->pub.enp_settings.es_versions); 490 if (tag_buf_len <= 0) 491 { 492 LSQ_ERROR("cannot generate version tags buffer"); 493 free(engine); 494 return NULL; 495 } 496 engine->pub.enp_ver_tags_len = tag_buf_len; 497 engine->pub.enp_flags = ENPUB_CAN_SEND; 498 engine->pub.enp_stream_if = api->ea_stream_if; 499 engine->pub.enp_stream_if_ctx = api->ea_stream_if_ctx; 500 501 engine->flags = flags; 502#ifndef NDEBUG 503 engine->flags |= ENG_COALESCE; 504#endif 505 engine->packets_out = api->ea_packets_out; 506 engine->packets_out_ctx = api->ea_packets_out_ctx; 507 engine->report_new_scids = api->ea_new_scids; 508 engine->report_live_scids = api->ea_live_scids; 509 engine->report_old_scids = api->ea_old_scids; 510 engine->scids_ctx = api->ea_cids_update_ctx; 511 cub_init(&engine->new_scids, engine->report_new_scids, engine->scids_ctx); 512 engine->pub.enp_lookup_cert = api->ea_lookup_cert; 513 engine->pub.enp_cert_lu_ctx = api->ea_cert_lu_ctx; 514 engine->pub.enp_get_ssl_ctx = api->ea_get_ssl_ctx; 515 if (api->ea_shi) 516 { 517 engine->pub.enp_shi = api->ea_shi; 518 engine->pub.enp_shi_ctx = api->ea_shi_ctx; 519 } 520 else 521 { 522 engine->pub.enp_shi = &stock_shi; 523 engine->pub.enp_shi_ctx = stock_shared_hash_new(); 524 if (!engine->pub.enp_shi_ctx) 525 { 526 free(engine); 527 return NULL; 528 } 529 } 530 if (api->ea_hsi_if) 531 { 532 engine->pub.enp_hsi_if = api->ea_hsi_if; 533 engine->pub.enp_hsi_ctx = api->ea_hsi_ctx; 534 } 535 else 536 { 537 engine->pub.enp_hsi_if = lsquic_http1x_if; 538 engine->pub.enp_hsi_ctx = NULL; 539 } 540 if (api->ea_pmi) 541 { 542 engine->pub.enp_pmi = api->ea_pmi; 543 engine->pub.enp_pmi_ctx = api->ea_pmi_ctx; 544 } 545 else 546 { 547 engine->pub.enp_pmi = &stock_pmi; 548 engine->pub.enp_pmi_ctx = NULL; 549 } 550 engine->pub.enp_verify_cert = api->ea_verify_cert; 551 engine->pub.enp_verify_ctx = api->ea_verify_ctx; 552 engine->pub.enp_kli = api->ea_keylog_if; 553 engine->pub.enp_kli_ctx = api->ea_keylog_ctx; 554 engine->pub.enp_engine = engine; 555 if (hash_conns_by_addr(engine)) 556 engine->flags |= ENG_CONNS_BY_ADDR; 557 engine->conns_hash = lsquic_hash_create(); 558 engine->pub.enp_tokgen = lsquic_tg_new(&engine->pub); 559 if (!engine->pub.enp_tokgen) 560 return NULL; 561 engine->pub.enp_crand = &engine->crand; 562 if (flags & ENG_SERVER) 563 { 564 engine->pr_queue = prq_create( 565 10000 /* TODO: make configurable */, MAX_OUT_BATCH_SIZE, 566 &engine->pub); 567 if (!engine->pr_queue) 568 { 569 lsquic_tg_destroy(engine->pub.enp_tokgen); 570 return NULL; 571 } 572 engine->purga = lsquic_purga_new(30 * 1000 * 1000, 573 engine->report_old_scids, engine->scids_ctx); 574 if (!engine->purga) 575 { 576 lsquic_tg_destroy(engine->pub.enp_tokgen); 577 prq_destroy(engine->pr_queue); 578 return NULL; 579 } 580 } 581 engine->attq = attq_create(); 582 eng_hist_init(&engine->history); 583 engine->batch_size = INITIAL_OUT_BATCH_SIZE; 584 if (engine->pub.enp_settings.es_honor_prst) 585 { 586 engine->pub.enp_srst_hash = lsquic_hash_create(); 587 if (!engine->pub.enp_srst_hash) 588 { 589 lsquic_engine_destroy(engine); 590 return NULL; 591 } 592 } 593 594#ifndef NDEBUG 595 { 596 const char *env; 597 env = getenv("LSQUIC_LOSE_PACKETS_RE"); 598 if (env) 599 { 600 if (0 != regcomp(&engine->lose_packets_re, env, 601 REG_EXTENDED|REG_NOSUB)) 602 { 603 LSQ_ERROR("could not compile lost packet regex `%s'", env); 604 return NULL; 605 } 606 engine->flags |= ENG_LOSE_PACKETS; 607 engine->lose_packets_str = env; 608 LSQ_WARN("will lose packets that match the following regex: %s", 609 env); 610 } 611 env = getenv("LSQUIC_COALESCE"); 612 if (env) 613 { 614 engine->flags &= ~ENG_COALESCE; 615 if (atoi(env)) 616 { 617 engine->flags |= ENG_COALESCE; 618 LSQ_NOTICE("will coalesce packets"); 619 } 620 else 621 LSQ_NOTICE("will not coalesce packets"); 622 } 623 } 624#endif 625#if LSQUIC_CONN_STATS 626 engine->stats_fh = api->ea_stats_fh; 627#endif 628 629 LSQ_INFO("instantiated engine"); 630 return engine; 631} 632 633 634#if LOG_PACKET_CHECKSUM 635static void 636log_packet_checksum (const lsquic_cid_t *cid, const char *direction, 637 const unsigned char *buf, size_t bufsz) 638{ 639 EV_LOG_CONN_EVENT(cid, "packet %s checksum: %08X", direction, 640 (uint32_t) crc32(0, buf, bufsz)); 641} 642 643 644#endif 645 646 647static void 648grow_batch_size (struct lsquic_engine *engine) 649{ 650 engine->batch_size <<= engine->batch_size < MAX_OUT_BATCH_SIZE; 651} 652 653 654static void 655shrink_batch_size (struct lsquic_engine *engine) 656{ 657 engine->batch_size >>= engine->batch_size > MIN_OUT_BATCH_SIZE; 658} 659 660 661struct cce_cid_iter 662{ 663 const struct lsquic_conn *conn; 664 unsigned todo, n; 665}; 666 667 668static struct conn_cid_elem * 669cce_iter_next (struct cce_cid_iter *citer) 670{ 671 struct conn_cid_elem *cce; 672 673 while (citer->todo) 674 if (citer->todo & (1 << citer->n)) 675 { 676 citer->todo &= ~(1 << citer->n); 677 cce = &citer->conn->cn_cces[ citer->n++ ]; 678 if (!(cce->cce_flags & CCE_PORT)) 679 return cce; 680 } 681 else 682 ++citer->n; 683 684 return NULL; 685} 686 687 688static struct conn_cid_elem * 689cce_iter_first (struct cce_cid_iter *citer, const struct lsquic_conn *conn) 690{ 691 citer->conn = conn; 692 citer->todo = conn->cn_cces_mask; 693 citer->n = 0; 694 return cce_iter_next(citer); 695} 696 697 698#if LSQUIC_CONN_STATS 699void 700update_stats_sum (struct lsquic_engine *engine, struct lsquic_conn *conn) 701{ 702 unsigned long *const dst = (unsigned long *) &engine->conn_stats_sum; 703 const unsigned long *src; 704 const struct conn_stats *stats; 705 unsigned i; 706 707 if (conn->cn_if->ci_get_stats && (stats = conn->cn_if->ci_get_stats(conn))) 708 { 709 ++engine->stats.conns; 710 src = (unsigned long *) stats; 711 for (i = 0; i < sizeof(*stats) / sizeof(unsigned long); ++i) 712 dst[i] += src[i]; 713 } 714} 715 716 717#endif 718 719 720/* Wrapper to make sure important things occur before the connection is 721 * really destroyed. 722 */ 723static void 724destroy_conn (struct lsquic_engine *engine, struct lsquic_conn *conn, 725 lsquic_time_t now) 726{ 727 struct cce_cid_iter citer; 728 const struct conn_cid_elem *cce; 729 lsquic_time_t drain_time; 730 struct purga_el *puel; 731 732 engine->mini_conns_count -= !!(conn->cn_flags & LSCONN_MINI); 733 if (engine->purga 734 /* Blacklist all CIDs except for promoted mini connections */ 735 && (conn->cn_flags & (LSCONN_MINI|LSCONN_PROMOTED)) 736 != (LSCONN_MINI|LSCONN_PROMOTED)) 737 { 738 if (!(conn->cn_flags & LSCONN_IMMED_CLOSE) 739 && conn->cn_if->ci_drain_time && 740 (drain_time = conn->cn_if->ci_drain_time(conn), drain_time)) 741 { 742 for (cce = cce_iter_first(&citer, conn); cce; 743 cce = cce_iter_next(&citer)) 744 { 745 puel = lsquic_purga_add(engine->purga, &cce->cce_cid, 746 lsquic_conn_get_peer_ctx(conn, NULL), 747 PUTY_CONN_DRAIN, now); 748 if (puel) 749 puel->puel_time = now + drain_time; 750 } 751 } 752 else 753 { 754 for (cce = cce_iter_first(&citer, conn); cce; 755 cce = cce_iter_next(&citer)) 756 { 757 puel = lsquic_purga_add(engine->purga, &cce->cce_cid, 758 lsquic_conn_get_peer_ctx(conn, NULL), 759 PUTY_CONN_DELETED, now); 760 if (puel) 761 { 762 puel->puel_time = now; 763 puel->puel_count = 0; 764 } 765 } 766 } 767 } 768#if LSQUIC_CONN_STATS 769 update_stats_sum(engine, conn); 770#endif 771 --engine->n_conns; 772 conn->cn_flags |= LSCONN_NEVER_TICKABLE; 773 conn->cn_if->ci_destroy(conn); 774} 775 776 777static int 778maybe_grow_conn_heaps (struct lsquic_engine *engine) 779{ 780 struct min_heap_elem *els; 781 unsigned count; 782 783 if (engine->n_conns < lsquic_mh_nalloc(&engine->conns_tickable)) 784 return 0; /* Nothing to do */ 785 786 if (lsquic_mh_nalloc(&engine->conns_tickable)) 787 count = lsquic_mh_nalloc(&engine->conns_tickable) * 2 * 2; 788 else 789 count = 8; 790 791 els = malloc(sizeof(els[0]) * count); 792 if (!els) 793 { 794 LSQ_ERROR("%s: malloc failed", __func__); 795 return -1; 796 } 797 798 LSQ_DEBUG("grew heaps to %u elements", count / 2); 799 memcpy(&els[0], engine->conns_tickable.mh_elems, 800 sizeof(els[0]) * lsquic_mh_count(&engine->conns_tickable)); 801 memcpy(&els[count / 2], engine->conns_out.mh_elems, 802 sizeof(els[0]) * lsquic_mh_count(&engine->conns_out)); 803 free(engine->conns_tickable.mh_elems); 804 engine->conns_tickable.mh_elems = els; 805 engine->conns_out.mh_elems = &els[count / 2]; 806 engine->conns_tickable.mh_nalloc = count / 2; 807 engine->conns_out.mh_nalloc = count / 2; 808 return 0; 809} 810 811 812static void 813remove_cces_from_hash (struct lsquic_hash *hash, struct lsquic_conn *conn, 814 unsigned todo) 815{ 816 unsigned n; 817 818 for (n = 0; todo; todo &= ~(1 << n++)) 819 if ((todo & (1 << n)) && 820 (conn->cn_cces[n].cce_hash_el.qhe_flags & QHE_HASHED)) 821 lsquic_hash_erase(hash, &conn->cn_cces[n].cce_hash_el); 822} 823 824 825static void 826remove_all_cces_from_hash (struct lsquic_hash *hash, struct lsquic_conn *conn) 827{ 828 remove_cces_from_hash(hash, conn, conn->cn_cces_mask); 829} 830 831 832static void 833cub_add (struct cid_update_batch *cub, const lsquic_cid_t *cid, void *peer_ctx); 834 835 836static int 837insert_conn_into_hash (struct lsquic_engine *engine, struct lsquic_conn *conn, 838 void *peer_ctx) 839{ 840 struct conn_cid_elem *cce; 841 unsigned todo, done, n; 842 843 for (todo = conn->cn_cces_mask, done = 0, n = 0; todo; todo &= ~(1 << n++)) 844 if (todo & (1 << n)) 845 { 846 cce = &conn->cn_cces[n]; 847 assert(!(cce->cce_hash_el.qhe_flags & QHE_HASHED)); 848 if (lsquic_hash_insert(engine->conns_hash, cce->cce_cid.idbuf, 849 cce->cce_cid.len, conn, &cce->cce_hash_el)) 850 done |= 1 << n; 851 else 852 goto err; 853 if ((engine->flags & ENG_SERVER) && 0 == (cce->cce_flags & CCE_REG)) 854 { 855 cce->cce_flags |= CCE_REG; 856 cub_add(&engine->new_scids, &cce->cce_cid, peer_ctx); 857 } 858 } 859 860 return 0; 861 862 err: 863 remove_cces_from_hash(engine->conns_hash, conn, done); 864 return -1; 865} 866 867 868static lsquic_conn_t * 869new_full_conn_server (lsquic_engine_t *engine, lsquic_conn_t *mini_conn, 870 lsquic_time_t now) 871{ 872 const lsquic_cid_t *cid; 873 server_conn_ctor_f ctor; 874 lsquic_conn_t *conn; 875 unsigned flags; 876 if (0 != maybe_grow_conn_heaps(engine)) 877 return NULL; 878 flags = engine->flags & (ENG_SERVER|ENG_HTTP); 879 880 if (mini_conn->cn_flags & LSCONN_IETF) 881 ctor = lsquic_ietf_full_conn_server_new; 882 else 883 ctor = lsquic_gquic_full_conn_server_new; 884 885 conn = ctor(&engine->pub, flags, mini_conn); 886 if (!conn) 887 { 888 /* Otherwise, full_conn_server_new prints its own warnings */ 889 if (ENOMEM == errno) 890 { 891 cid = lsquic_conn_log_cid(mini_conn); 892 LSQ_WARNC("could not allocate full connection for %"CID_FMT": %s", 893 CID_BITS(cid), strerror(errno)); 894 } 895 return NULL; 896 } 897 ++engine->n_conns; 898 if (0 != insert_conn_into_hash(engine, conn, lsquic_conn_get_peer_ctx(conn, NULL))) 899 { 900 cid = lsquic_conn_log_cid(conn); 901 LSQ_WARNC("cannot add connection %"CID_FMT" to hash - destroy", 902 CID_BITS(cid)); 903 destroy_conn(engine, conn, now); 904 return NULL; 905 } 906 assert(!(conn->cn_flags & CONN_REF_FLAGS)); 907 conn->cn_flags |= LSCONN_HASHED; 908 return conn; 909} 910 911 912static enum 913{ 914 VER_NOT_SPECIFIED, 915 VER_SUPPORTED, 916 VER_UNSUPPORTED, 917} 918 919 920version_matches (lsquic_engine_t *engine, const lsquic_packet_in_t *packet_in, 921 enum lsquic_version *pversion) 922{ 923 lsquic_ver_tag_t ver_tag; 924 enum lsquic_version version; 925 926 if (!packet_in->pi_quic_ver) 927 { 928 LSQ_DEBUG("packet does not specify version"); 929 return VER_NOT_SPECIFIED; 930 } 931 932 memcpy(&ver_tag, packet_in->pi_data + packet_in->pi_quic_ver, sizeof(ver_tag)); 933 version = lsquic_tag2ver(ver_tag); 934 if (version < N_LSQVER) 935 { 936 if (engine->pub.enp_settings.es_versions & (1 << version)) 937 { 938 LSQ_DEBUG("client-supplied version %s is supported", 939 lsquic_ver2str[version]); 940 *pversion = version; 941 return VER_SUPPORTED; 942 } 943 else 944 LSQ_DEBUG("client-supplied version %s is not supported", 945 lsquic_ver2str[version]); 946 } 947 else 948 LSQ_DEBUG("client-supplied version tag 0x%08X is not recognized", 949 ver_tag); 950 951 return VER_UNSUPPORTED; 952} 953 954 955static void 956schedule_req_packet (struct lsquic_engine *engine, enum packet_req_type type, 957 const struct lsquic_packet_in *packet_in, const struct sockaddr *sa_local, 958 const struct sockaddr *sa_peer, void *peer_ctx) 959{ 960 assert(engine->pr_queue); 961 if (0 == prq_new_req(engine->pr_queue, type, packet_in, peer_ctx, 962 sa_local, sa_peer)) 963 LSQ_DEBUGC("scheduled %s packet for cid %"CID_FMT, 964 lsquic_preqt2str[type], CID_BITS(&packet_in->pi_conn_id)); 965 else 966 LSQ_DEBUG("cannot schedule %s packet", lsquic_preqt2str[type]); 967} 968 969 970static unsigned short 971sa2port (const struct sockaddr *sa) 972{ 973 if (sa->sa_family == AF_INET) 974 { 975 struct sockaddr_in *const sa4 = (void *) sa; 976 return sa4->sin_port; 977 } 978 else 979 { 980 struct sockaddr_in6 *const sa6 = (void *) sa; 981 return sa6->sin6_port; 982 } 983} 984 985 986static struct lsquic_hash_elem * 987find_conn_by_addr (struct lsquic_hash *hash, const struct sockaddr *sa) 988{ 989 unsigned short port; 990 991 port = sa2port(sa); 992 return lsquic_hash_find(hash, &port, sizeof(port)); 993} 994 995 996static lsquic_conn_t * 997find_conn (lsquic_engine_t *engine, lsquic_packet_in_t *packet_in, 998 struct packin_parse_state *ppstate, const struct sockaddr *sa_local) 999{ 1000 struct lsquic_hash_elem *el; 1001 lsquic_conn_t *conn; 1002 1003 if (engine->flags & ENG_CONNS_BY_ADDR) 1004 el = find_conn_by_addr(engine->conns_hash, sa_local); 1005 else if (packet_in->pi_flags & PI_CONN_ID) 1006 el = lsquic_hash_find(engine->conns_hash, 1007 packet_in->pi_conn_id.idbuf, packet_in->pi_conn_id.len); 1008 else 1009 { 1010 LSQ_DEBUG("packet header does not have connection ID: discarding"); 1011 return NULL; 1012 } 1013 1014 if (!el) 1015 return NULL; 1016 1017 conn = lsquic_hashelem_getdata(el); 1018 conn->cn_pf->pf_parse_packet_in_finish(packet_in, ppstate); 1019 if ((engine->flags & ENG_CONNS_BY_ADDR) 1020 && !(conn->cn_flags & LSCONN_IETF) 1021 && (packet_in->pi_flags & PI_CONN_ID) 1022 && !LSQUIC_CIDS_EQ(CN_SCID(conn), &packet_in->pi_conn_id)) 1023 { 1024 LSQ_DEBUG("connection IDs do not match"); 1025 return NULL; 1026 } 1027 1028 return conn; 1029} 1030 1031 1032static lsquic_conn_t * 1033find_or_create_conn (lsquic_engine_t *engine, lsquic_packet_in_t *packet_in, 1034 struct packin_parse_state *ppstate, const struct sockaddr *sa_local, 1035 const struct sockaddr *sa_peer, void *peer_ctx, size_t packet_in_size) 1036{ 1037 struct lsquic_hash_elem *el; 1038 struct purga_el *puel; 1039 lsquic_conn_t *conn; 1040 1041 if (!(packet_in->pi_flags & PI_CONN_ID)) 1042 { 1043 LSQ_DEBUG("packet header does not have connection ID: discarding"); 1044 return NULL; 1045 } 1046 el = lsquic_hash_find(engine->conns_hash, 1047 packet_in->pi_conn_id.idbuf, packet_in->pi_conn_id.len); 1048 1049 if (el) 1050 { 1051 conn = lsquic_hashelem_getdata(el); 1052 conn->cn_pf->pf_parse_packet_in_finish(packet_in, ppstate); 1053 return conn; 1054 } 1055 1056 if (engine->flags & ENG_COOLDOWN) 1057 { /* Do not create incoming connections during cooldown */ 1058 LSQ_DEBUG("dropping inbound packet for unknown connection (cooldown)"); 1059 return NULL; 1060 } 1061 1062 if (engine->mini_conns_count >= engine->pub.enp_settings.es_max_inchoate) 1063 { 1064 LSQ_DEBUG("reached limit of %u inchoate connections", 1065 engine->pub.enp_settings.es_max_inchoate); 1066 return NULL; 1067 } 1068 1069 1070 if (engine->purga 1071 && (puel = lsquic_purga_contains(engine->purga, 1072 &packet_in->pi_conn_id), puel)) 1073 { 1074 switch (puel->puel_type) 1075 { 1076 case PUTY_CID_RETIRED: 1077 LSQ_DEBUGC("CID %"CID_FMT" was retired, ignore packet", 1078 CID_BITS(&packet_in->pi_conn_id)); 1079 return NULL; 1080 case PUTY_CONN_DRAIN: 1081 LSQ_DEBUG("drain till: %"PRIu64"; now: %"PRIu64, 1082 puel->puel_time, packet_in->pi_received); 1083 if (puel->puel_time > packet_in->pi_received) 1084 { 1085 LSQ_DEBUGC("CID %"CID_FMT" is in drain state, ignore packet", 1086 CID_BITS(&packet_in->pi_conn_id)); 1087 return NULL; 1088 } 1089 LSQ_DEBUGC("CID %"CID_FMT" goes from drain state to deleted", 1090 CID_BITS(&packet_in->pi_conn_id)); 1091 puel->puel_type = PUTY_CONN_DELETED; 1092 puel->puel_count = 0; 1093 puel->puel_time = 0; 1094 /* fall-through */ 1095 case PUTY_CONN_DELETED: 1096 LSQ_DEBUGC("Connection with CID %"CID_FMT" was deleted", 1097 CID_BITS(&packet_in->pi_conn_id)); 1098 if (puel->puel_time < packet_in->pi_received) 1099 { 1100 puel->puel_time = packet_in->pi_received 1101 /* Exponential back-off */ 1102 + 1000000ull * (1 << MIN(puel->puel_count, 4)); 1103 ++puel->puel_count; 1104 goto maybe_send_prst; 1105 } 1106 return NULL; 1107 default: 1108 assert(0); 1109 return NULL; 1110 } 1111 } 1112 1113 if (engine->pub.enp_settings.es_send_prst 1114 && !(packet_in->pi_flags & PI_GQUIC) 1115 && HETY_NOT_SET == packet_in->pi_header_type) 1116 goto maybe_send_prst; 1117 1118 if (0 != maybe_grow_conn_heaps(engine)) 1119 return NULL; 1120 1121 const struct parse_funcs *pf; 1122 enum lsquic_version version; 1123 switch (version_matches(engine, packet_in, &version)) 1124 { 1125 case VER_UNSUPPORTED: 1126 if (engine->flags & ENG_SERVER) 1127 schedule_req_packet(engine, PACKET_REQ_VERNEG, packet_in, 1128 sa_local, sa_peer, peer_ctx); 1129 return NULL; 1130 case VER_NOT_SPECIFIED: 1131 maybe_send_prst: 1132 if ((engine->flags & ENG_SERVER) && 1133 engine->pub.enp_settings.es_send_prst) 1134 schedule_req_packet(engine, PACKET_REQ_PUBRES, packet_in, 1135 sa_local, sa_peer, peer_ctx); 1136 return NULL; 1137 case VER_SUPPORTED: 1138 pf = select_pf_by_ver(version); 1139 pf->pf_parse_packet_in_finish(packet_in, ppstate); 1140 break; 1141 } 1142 1143 1144 if ((1 << version) & LSQUIC_IETF_VERSIONS) 1145 { 1146 conn = lsquic_mini_conn_ietf_new(&engine->pub, packet_in, version, 1147 sa_peer->sa_family == AF_INET, NULL, packet_in_size); 1148 } 1149 else 1150 { 1151 conn = mini_conn_new(&engine->pub, packet_in, version); 1152 } 1153 if (!conn) 1154 return NULL; 1155 ++engine->mini_conns_count; 1156 ++engine->n_conns; 1157 if (0 != insert_conn_into_hash(engine, conn, peer_ctx)) 1158 { 1159 const lsquic_cid_t *cid = lsquic_conn_log_cid(conn); 1160 LSQ_WARNC("cannot add connection %"CID_FMT" to hash - destroy", 1161 CID_BITS(cid)); 1162 destroy_conn(engine, conn, packet_in->pi_received); 1163 return NULL; 1164 } 1165 assert(!(conn->cn_flags & CONN_REF_FLAGS)); 1166 conn->cn_flags |= LSCONN_HASHED; 1167 eng_hist_inc(&engine->history, packet_in->pi_received, sl_new_mini_conns); 1168 conn->cn_last_sent = engine->last_sent; 1169 return conn; 1170} 1171 1172 1173lsquic_conn_t * 1174lsquic_engine_find_conn (const struct lsquic_engine_public *engine, 1175 const lsquic_cid_t *cid) 1176{ 1177 struct lsquic_hash_elem *el; 1178 lsquic_conn_t *conn = NULL; 1179 el = lsquic_hash_find(engine->enp_engine->conns_hash, cid->idbuf, cid->len); 1180 1181 if (el) 1182 conn = lsquic_hashelem_getdata(el); 1183 return conn; 1184} 1185 1186 1187#if !defined(NDEBUG) && __GNUC__ 1188__attribute__((weak)) 1189#endif 1190void 1191lsquic_engine_add_conn_to_tickable (struct lsquic_engine_public *enpub, 1192 lsquic_conn_t *conn) 1193{ 1194 if (0 == (enpub->enp_flags & ENPUB_PROC) && 1195 0 == (conn->cn_flags & (LSCONN_TICKABLE|LSCONN_NEVER_TICKABLE))) 1196 { 1197 lsquic_engine_t *engine = (lsquic_engine_t *) enpub; 1198 lsquic_mh_insert(&engine->conns_tickable, conn, conn->cn_last_ticked); 1199 engine_incref_conn(conn, LSCONN_TICKABLE); 1200 } 1201} 1202 1203 1204void 1205lsquic_engine_add_conn_to_attq (struct lsquic_engine_public *enpub, 1206 lsquic_conn_t *conn, lsquic_time_t tick_time, unsigned why) 1207{ 1208 lsquic_engine_t *const engine = (lsquic_engine_t *) enpub; 1209 if (conn->cn_flags & LSCONN_TICKABLE) 1210 { 1211 /* Optimization: no need to add the connection to the Advisory Tick 1212 * Time Queue: it is about to be ticked, after which it its next tick 1213 * time may be queried again. 1214 */; 1215 } 1216 else if (conn->cn_flags & LSCONN_ATTQ) 1217 { 1218 if (lsquic_conn_adv_time(conn) != tick_time) 1219 { 1220 attq_remove(engine->attq, conn); 1221 if (0 != attq_add(engine->attq, conn, tick_time, why)) 1222 engine_decref_conn(engine, conn, LSCONN_ATTQ); 1223 } 1224 } 1225 else if (0 == attq_add(engine->attq, conn, tick_time, why)) 1226 engine_incref_conn(conn, LSCONN_ATTQ); 1227} 1228 1229 1230static struct lsquic_conn * 1231find_conn_by_srst (struct lsquic_engine *engine, 1232 const struct lsquic_packet_in *packet_in) 1233{ 1234 struct lsquic_hash_elem *el; 1235 struct lsquic_conn *conn; 1236 1237 if (packet_in->pi_data_sz < IQUIC_MIN_SRST_SIZE 1238 || (packet_in->pi_data[0] & 0xC0) != 0x40) 1239 return NULL; 1240 1241 el = lsquic_hash_find(engine->pub.enp_srst_hash, 1242 packet_in->pi_data + packet_in->pi_data_sz - IQUIC_SRESET_TOKEN_SZ, 1243 IQUIC_SRESET_TOKEN_SZ); 1244 if (!el) 1245 return NULL; 1246 1247 conn = lsquic_hashelem_getdata(el); 1248 return conn; 1249} 1250 1251 1252/* Return 0 if packet is being processed by a real connection (mini or full), 1253 * otherwise return 1. 1254 */ 1255static int 1256process_packet_in (lsquic_engine_t *engine, lsquic_packet_in_t *packet_in, 1257 struct packin_parse_state *ppstate, const struct sockaddr *sa_local, 1258 const struct sockaddr *sa_peer, void *peer_ctx, size_t packet_in_size) 1259{ 1260 lsquic_conn_t *conn; 1261 const unsigned char *packet_in_data; 1262 1263 if (lsquic_packet_in_is_gquic_prst(packet_in) 1264 && !engine->pub.enp_settings.es_honor_prst) 1265 { 1266 lsquic_mm_put_packet_in(&engine->pub.enp_mm, packet_in); 1267 LSQ_DEBUG("public reset packet: discarding"); 1268 return 1; 1269 } 1270 1271 if (engine->flags & ENG_SERVER) 1272 conn = find_or_create_conn(engine, packet_in, ppstate, sa_local, 1273 sa_peer, peer_ctx, packet_in_size); 1274 else 1275 conn = find_conn(engine, packet_in, ppstate, sa_local); 1276 1277 if (!conn) 1278 { 1279 if (engine->pub.enp_settings.es_honor_prst 1280 && packet_in_size == packet_in->pi_data_sz /* Full UDP packet */ 1281 && !(packet_in->pi_flags & PI_GQUIC) 1282 && engine->pub.enp_srst_hash 1283 && (conn = find_conn_by_srst(engine, packet_in))) 1284 { 1285 LSQ_DEBUGC("got stateless reset for connection %"CID_FMT, 1286 CID_BITS(lsquic_conn_log_cid(conn))); 1287 conn->cn_if->ci_stateless_reset(conn); 1288 if (!(conn->cn_flags & LSCONN_TICKABLE) 1289 && conn->cn_if->ci_is_tickable(conn)) 1290 { 1291 lsquic_mh_insert(&engine->conns_tickable, conn, 1292 conn->cn_last_ticked); 1293 engine_incref_conn(conn, LSCONN_TICKABLE); 1294 } 1295 /* Even though the connection processes this packet, we return 1296 * 1 so that the caller does not add reset packet's random 1297 * bytes to the list of valid CIDs. 1298 */ 1299 } 1300 lsquic_mm_put_packet_in(&engine->pub.enp_mm, packet_in); 1301 return 1; 1302 } 1303 1304 if (0 == (conn->cn_flags & LSCONN_TICKABLE)) 1305 { 1306 lsquic_mh_insert(&engine->conns_tickable, conn, conn->cn_last_ticked); 1307 engine_incref_conn(conn, LSCONN_TICKABLE); 1308 } 1309 packet_in->pi_path_id = lsquic_conn_record_sockaddr(conn, peer_ctx, 1310 sa_local, sa_peer); 1311 lsquic_packet_in_upref(packet_in); 1312#if LOG_PACKET_CHECKSUM 1313 log_packet_checksum(lsquic_conn_log_cid(conn), "in", packet_in->pi_data, 1314 packet_in->pi_data_sz); 1315#endif 1316 /* Note on QLog: 1317 * For the PACKET_RX QLog event, we are interested in logging these things: 1318 * - raw packet (however it comes in, encrypted or not) 1319 * - frames (list of frame names) 1320 * - packet type and number 1321 * - packet rx timestamp 1322 * 1323 * Since only some of these items are available at this code 1324 * juncture, we will wait until after the packet has been 1325 * decrypted (if necessary) and parsed to call the log functions. 1326 * 1327 * Once the PACKET_RX event is finally logged, the timestamp 1328 * will come from packet_in->pi_received. For correct sequential 1329 * ordering of QLog events, be sure to process the QLogs downstream. 1330 * (Hint: Use the qlog_parser.py tool in tools/ for full QLog processing.) 1331 */ 1332 packet_in_data = packet_in->pi_data; 1333 packet_in_size = packet_in->pi_data_sz; 1334 conn->cn_if->ci_packet_in(conn, packet_in); 1335 QLOG_PACKET_RX(lsquic_conn_log_cid(conn), packet_in, packet_in_data, packet_in_size); 1336 lsquic_packet_in_put(&engine->pub.enp_mm, packet_in); 1337 return 0; 1338} 1339 1340 1341void 1342lsquic_engine_destroy (lsquic_engine_t *engine) 1343{ 1344 struct lsquic_hash_elem *el; 1345 lsquic_conn_t *conn; 1346 1347 LSQ_DEBUG("destroying engine"); 1348#ifndef NDEBUG 1349 engine->flags |= ENG_DTOR; 1350#endif 1351 1352 while ((conn = lsquic_mh_pop(&engine->conns_out))) 1353 { 1354 assert(conn->cn_flags & LSCONN_HAS_OUTGOING); 1355 (void) engine_decref_conn(engine, conn, LSCONN_HAS_OUTGOING); 1356 } 1357 1358 while ((conn = lsquic_mh_pop(&engine->conns_tickable))) 1359 { 1360 assert(conn->cn_flags & LSCONN_TICKABLE); 1361 (void) engine_decref_conn(engine, conn, LSCONN_TICKABLE); 1362 } 1363 1364 for (el = lsquic_hash_first(engine->conns_hash); el; 1365 el = lsquic_hash_next(engine->conns_hash)) 1366 { 1367 conn = lsquic_hashelem_getdata(el); 1368 force_close_conn(engine, conn); 1369 } 1370 lsquic_hash_destroy(engine->conns_hash); 1371 1372 assert(0 == engine->n_conns); 1373 assert(0 == engine->mini_conns_count); 1374 if (engine->pr_queue) 1375 prq_destroy(engine->pr_queue); 1376 if (engine->purga) 1377 lsquic_purga_destroy(engine->purga); 1378 attq_destroy(engine->attq); 1379 1380 assert(0 == lsquic_mh_count(&engine->conns_out)); 1381 assert(0 == lsquic_mh_count(&engine->conns_tickable)); 1382 if (engine->pub.enp_shi == &stock_shi) 1383 stock_shared_hash_destroy(engine->pub.enp_shi_ctx); 1384 lsquic_mm_cleanup(&engine->pub.enp_mm); 1385 free(engine->conns_tickable.mh_elems); 1386#ifndef NDEBUG 1387 if (engine->flags & ENG_LOSE_PACKETS) 1388 regfree(&engine->lose_packets_re); 1389#endif 1390 if (engine->pub.enp_tokgen) 1391 lsquic_tg_destroy(engine->pub.enp_tokgen); 1392#if LSQUIC_CONN_STATS 1393 if (engine->stats_fh) 1394 { 1395 const struct conn_stats *const stats = &engine->conn_stats_sum; 1396 fprintf(engine->stats_fh, "Aggregate connection stats collected by engine:\n"); 1397 fprintf(engine->stats_fh, "Connections: %u\n", engine->stats.conns); 1398 fprintf(engine->stats_fh, "Ticks: %lu\n", stats->n_ticks); 1399 fprintf(engine->stats_fh, "In:\n"); 1400 fprintf(engine->stats_fh, " Total bytes: %lu\n", stats->in.bytes); 1401 fprintf(engine->stats_fh, " packets: %lu\n", stats->in.packets); 1402 fprintf(engine->stats_fh, " undecryptable packets: %lu\n", stats->in.undec_packets); 1403 fprintf(engine->stats_fh, " duplicate packets: %lu\n", stats->in.dup_packets); 1404 fprintf(engine->stats_fh, " error packets: %lu\n", stats->in.err_packets); 1405 fprintf(engine->stats_fh, " STREAM frame count: %lu\n", stats->in.stream_frames); 1406 fprintf(engine->stats_fh, " STREAM payload size: %lu\n", stats->in.stream_data_sz); 1407 fprintf(engine->stats_fh, " Header bytes: %lu; uncompressed: %lu; ratio %.3lf\n", 1408 stats->in.headers_comp, stats->in.headers_uncomp, 1409 stats->in.headers_uncomp ? 1410 (double) stats->in.headers_comp / (double) stats->in.headers_uncomp 1411 : 0); 1412 fprintf(engine->stats_fh, " ACK frames: %lu\n", stats->in.n_acks); 1413 fprintf(engine->stats_fh, " ACK frames processed: %lu\n", stats->in.n_acks_proc); 1414 fprintf(engine->stats_fh, " ACK frames merged to new: %lu\n", stats->in.n_acks_merged[0]); 1415 fprintf(engine->stats_fh, " ACK frames merged to old: %lu\n", stats->in.n_acks_merged[1]); 1416 fprintf(engine->stats_fh, "Out:\n"); 1417 fprintf(engine->stats_fh, " Total bytes: %lu\n", stats->out.bytes); 1418 fprintf(engine->stats_fh, " packets: %lu\n", stats->out.packets); 1419 fprintf(engine->stats_fh, " acked via loss record: %lu\n", stats->out.acked_via_loss); 1420 fprintf(engine->stats_fh, " acks: %lu\n", stats->out.acks); 1421 fprintf(engine->stats_fh, " retx packets: %lu\n", stats->out.retx_packets); 1422 fprintf(engine->stats_fh, " STREAM frame count: %lu\n", stats->out.stream_frames); 1423 fprintf(engine->stats_fh, " STREAM payload size: %lu\n", stats->out.stream_data_sz); 1424 fprintf(engine->stats_fh, " Header bytes: %lu; uncompressed: %lu; ratio %.3lf\n", 1425 stats->out.headers_comp, stats->out.headers_uncomp, 1426 stats->out.headers_uncomp ? 1427 (double) stats->out.headers_comp / (double) stats->out.headers_uncomp 1428 : 0); 1429 fprintf(engine->stats_fh, " ACKs: %lu\n", stats->out.acks); 1430 } 1431#endif 1432 if (engine->pub.enp_srst_hash) 1433 lsquic_hash_destroy(engine->pub.enp_srst_hash); 1434#if LSQUIC_COUNT_ENGINE_CALLS 1435 LSQ_NOTICE("number of calls into the engine: %lu", engine->n_engine_calls); 1436#endif 1437 free(engine); 1438} 1439 1440 1441static struct conn_cid_elem * 1442find_free_cce (struct lsquic_conn *conn) 1443{ 1444 struct conn_cid_elem *cce; 1445 1446 for (cce = conn->cn_cces; cce < END_OF_CCES(conn); ++cce) 1447 if (!(conn->cn_cces_mask & (1 << (cce - conn->cn_cces)))) 1448 return cce; 1449 1450 return NULL; 1451} 1452 1453 1454static int 1455add_conn_to_hash (struct lsquic_engine *engine, struct lsquic_conn *conn, 1456 const struct sockaddr *local_sa, void *peer_ctx) 1457{ 1458 struct conn_cid_elem *cce; 1459 1460 if (engine->flags & ENG_CONNS_BY_ADDR) 1461 { 1462 cce = find_free_cce(conn); 1463 if (!cce) 1464 { 1465 LSQ_ERROR("cannot find free CCE"); 1466 return -1; 1467 } 1468 cce->cce_port = sa2port(local_sa); 1469 cce->cce_flags = CCE_PORT; 1470 if (lsquic_hash_insert(engine->conns_hash, &cce->cce_port, 1471 sizeof(cce->cce_port), conn, &cce->cce_hash_el)) 1472 { 1473 conn->cn_cces_mask |= 1 << (cce - conn->cn_cces); 1474 return 0; 1475 } 1476 else 1477 return -1; 1478 1479 } 1480 else 1481 return insert_conn_into_hash(engine, conn, peer_ctx); 1482} 1483 1484 1485lsquic_conn_t * 1486lsquic_engine_connect (lsquic_engine_t *engine, enum lsquic_version version, 1487 const struct sockaddr *local_sa, 1488 const struct sockaddr *peer_sa, 1489 void *peer_ctx, lsquic_conn_ctx_t *conn_ctx, 1490 const char *hostname, unsigned short max_packet_size, 1491 const unsigned char *zero_rtt, size_t zero_rtt_len, 1492 const unsigned char *token, size_t token_sz) 1493{ 1494 lsquic_conn_t *conn; 1495 unsigned flags, versions; 1496 int is_ipv4; 1497 1498 ENGINE_IN(engine); 1499 1500 if (engine->flags & ENG_SERVER) 1501 { 1502 LSQ_ERROR("`%s' must only be called in client mode", __func__); 1503 goto err; 1504 } 1505 1506 if (engine->flags & ENG_CONNS_BY_ADDR 1507 && find_conn_by_addr(engine->conns_hash, local_sa)) 1508 { 1509 LSQ_ERROR("cannot have more than one connection on the same port"); 1510 goto err; 1511 } 1512 1513 if (0 != maybe_grow_conn_heaps(engine)) 1514 return NULL; 1515 flags = engine->flags & (ENG_SERVER|ENG_HTTP); 1516 is_ipv4 = peer_sa->sa_family == AF_INET; 1517 if (zero_rtt && zero_rtt_len) 1518 { 1519 version = lsquic_zero_rtt_version(zero_rtt, zero_rtt_len); 1520 if (version >= N_LSQVER) 1521 { 1522 LSQ_INFO("zero-rtt version is bad, won't use"); 1523 zero_rtt = NULL; 1524 zero_rtt_len = 0; 1525 } 1526 } 1527 if (version >= N_LSQVER) 1528 { 1529 if (version > N_LSQVER) 1530 LSQ_WARN("invalid version specified, engine will pick"); 1531 versions = engine->pub.enp_settings.es_versions; 1532 } 1533 else 1534 versions = 1u << version; 1535 if (versions & LSQUIC_IETF_VERSIONS) 1536 conn = lsquic_ietf_full_conn_client_new(&engine->pub, versions, 1537 flags, hostname, max_packet_size, 1538 is_ipv4, zero_rtt, zero_rtt_len, token, token_sz); 1539 else 1540 conn = lsquic_gquic_full_conn_client_new(&engine->pub, versions, 1541 flags, hostname, max_packet_size, is_ipv4, 1542 zero_rtt, zero_rtt_len); 1543 if (!conn) 1544 goto err; 1545 EV_LOG_CREATE_CONN(lsquic_conn_log_cid(conn), local_sa, peer_sa); 1546 EV_LOG_VER_NEG(lsquic_conn_log_cid(conn), "proposed", 1547 lsquic_ver2str[conn->cn_version]); 1548 ++engine->n_conns; 1549 lsquic_conn_record_sockaddr(conn, peer_ctx, local_sa, peer_sa); 1550 if (0 != add_conn_to_hash(engine, conn, local_sa, peer_ctx)) 1551 { 1552 const lsquic_cid_t *cid = lsquic_conn_log_cid(conn); 1553 LSQ_WARNC("cannot add connection %"CID_FMT" to hash - destroy", 1554 CID_BITS(cid)); 1555 destroy_conn(engine, conn, lsquic_time_now()); 1556 goto err; 1557 } 1558 assert(!(conn->cn_flags & 1559 (CONN_REF_FLAGS 1560 & ~LSCONN_TICKABLE /* This flag may be set as effect of user 1561 callbacks */ 1562 ))); 1563 conn->cn_flags |= LSCONN_HASHED; 1564 lsquic_mh_insert(&engine->conns_tickable, conn, conn->cn_last_ticked); 1565 engine_incref_conn(conn, LSCONN_TICKABLE); 1566 lsquic_conn_set_ctx(conn, conn_ctx); 1567 conn->cn_if->ci_client_call_on_new(conn); 1568 end: 1569 ENGINE_OUT(engine); 1570 return conn; 1571 err: 1572 conn = NULL; 1573 goto end; 1574} 1575 1576 1577static void 1578remove_conn_from_hash (lsquic_engine_t *engine, lsquic_conn_t *conn) 1579{ 1580 remove_all_cces_from_hash(engine->conns_hash, conn); 1581 (void) engine_decref_conn(engine, conn, LSCONN_HASHED); 1582} 1583 1584 1585static void 1586refflags2str (enum lsquic_conn_flags flags, char s[6]) 1587{ 1588 *s = 'C'; s += !!(flags & LSCONN_CLOSING); 1589 *s = 'H'; s += !!(flags & LSCONN_HASHED); 1590 *s = 'O'; s += !!(flags & LSCONN_HAS_OUTGOING); 1591 *s = 'T'; s += !!(flags & LSCONN_TICKABLE); 1592 *s = 'A'; s += !!(flags & LSCONN_ATTQ); 1593 *s = 'K'; s += !!(flags & LSCONN_TICKED); 1594 *s = '\0'; 1595} 1596 1597 1598static void 1599engine_incref_conn (lsquic_conn_t *conn, enum lsquic_conn_flags flag) 1600{ 1601 char str[2][7]; 1602 assert(flag & CONN_REF_FLAGS); 1603 assert(!(conn->cn_flags & flag)); 1604 conn->cn_flags |= flag; 1605 LSQ_DEBUGC("incref conn %"CID_FMT", '%s' -> '%s'", 1606 CID_BITS(lsquic_conn_log_cid(conn)), 1607 (refflags2str(conn->cn_flags & ~flag, str[0]), str[0]), 1608 (refflags2str(conn->cn_flags, str[1]), str[1])); 1609} 1610 1611 1612static lsquic_conn_t * 1613engine_decref_conn (lsquic_engine_t *engine, lsquic_conn_t *conn, 1614 enum lsquic_conn_flags flags) 1615{ 1616 char str[2][7]; 1617 lsquic_time_t now; 1618 assert(flags & CONN_REF_FLAGS); 1619 assert(conn->cn_flags & flags); 1620#ifndef NDEBUG 1621 if (flags & LSCONN_CLOSING) 1622 assert(0 == (conn->cn_flags & LSCONN_HASHED)); 1623#endif 1624 conn->cn_flags &= ~flags; 1625 LSQ_DEBUGC("decref conn %"CID_FMT", '%s' -> '%s'", 1626 CID_BITS(lsquic_conn_log_cid(conn)), 1627 (refflags2str(conn->cn_flags | flags, str[0]), str[0]), 1628 (refflags2str(conn->cn_flags, str[1]), str[1])); 1629 if (0 == (conn->cn_flags & CONN_REF_FLAGS)) 1630 { 1631 now = lsquic_time_now(); 1632 if (conn->cn_flags & LSCONN_MINI) 1633 eng_hist_inc(&engine->history, now, sl_del_mini_conns); 1634 else 1635 eng_hist_inc(&engine->history, now, sl_del_full_conns); 1636 destroy_conn(engine, conn, now); 1637 return NULL; 1638 } 1639 else 1640 return conn; 1641} 1642 1643 1644/* This is not a general-purpose function. Only call from engine dtor. */ 1645static void 1646force_close_conn (lsquic_engine_t *engine, lsquic_conn_t *conn) 1647{ 1648 assert(engine->flags & ENG_DTOR); 1649 const enum lsquic_conn_flags flags = conn->cn_flags; 1650 assert(conn->cn_flags & CONN_REF_FLAGS); 1651 assert(!(flags & LSCONN_HAS_OUTGOING)); /* Should be removed already */ 1652 assert(!(flags & LSCONN_TICKABLE)); /* Should be removed already */ 1653 assert(!(flags & LSCONN_CLOSING)); /* It is in transient queue? */ 1654 if (flags & LSCONN_ATTQ) 1655 { 1656 attq_remove(engine->attq, conn); 1657 (void) engine_decref_conn(engine, conn, LSCONN_ATTQ); 1658 } 1659 if (flags & LSCONN_HASHED) 1660 remove_conn_from_hash(engine, conn); 1661} 1662 1663 1664/* Iterator for tickable connections (those on the Tickable Queue). Before 1665 * a connection is returned, it is removed from the Advisory Tick Time queue 1666 * if necessary. 1667 */ 1668static lsquic_conn_t * 1669conn_iter_next_tickable (struct lsquic_engine *engine) 1670{ 1671 lsquic_conn_t *conn; 1672 1673 if (engine->flags & ENG_SERVER) 1674 while (1) 1675 { 1676 conn = lsquic_mh_pop(&engine->conns_tickable); 1677 if (conn && (conn->cn_flags & LSCONN_SKIP_ON_PROC)) 1678 (void) engine_decref_conn(engine, conn, LSCONN_TICKABLE); 1679 else 1680 break; 1681 } 1682 else 1683 conn = lsquic_mh_pop(&engine->conns_tickable); 1684 1685 if (conn) 1686 conn = engine_decref_conn(engine, conn, LSCONN_TICKABLE); 1687 if (conn && (conn->cn_flags & LSCONN_ATTQ)) 1688 { 1689 attq_remove(engine->attq, conn); 1690 conn = engine_decref_conn(engine, conn, LSCONN_ATTQ); 1691 } 1692 1693 return conn; 1694} 1695 1696 1697static void 1698cub_init (struct cid_update_batch *cub, lsquic_cids_update_f update, 1699 void *update_ctx) 1700{ 1701 cub->cub_update_cids = update; 1702 cub->cub_update_ctx = update_ctx; 1703 cub->cub_count = 0; 1704} 1705 1706 1707static void 1708cub_flush (struct cid_update_batch *cub) 1709{ 1710 if (cub->cub_count > 0 && cub->cub_update_cids) 1711 cub->cub_update_cids(cub->cub_update_ctx, cub->cub_peer_ctxs, 1712 cub->cub_cids, cub->cub_count); 1713 cub->cub_count = 0; 1714} 1715 1716 1717static void 1718cub_add (struct cid_update_batch *cub, const lsquic_cid_t *cid, void *peer_ctx) 1719{ 1720 cub->cub_cids [ cub->cub_count ] = *cid; 1721 cub->cub_peer_ctxs[ cub->cub_count ] = peer_ctx; 1722 ++cub->cub_count; 1723 if (cub->cub_count == sizeof(cub->cub_cids) / sizeof(cub->cub_cids[0])) 1724 cub_flush(cub); 1725} 1726 1727 1728/* Process registered CIDs */ 1729static void 1730cub_add_cids_from_cces (struct cid_update_batch *cub, struct lsquic_conn *conn) 1731{ 1732 struct cce_cid_iter citer; 1733 struct conn_cid_elem *cce; 1734 void *peer_ctx; 1735 1736 peer_ctx = lsquic_conn_get_peer_ctx(conn, NULL); 1737 for (cce = cce_iter_first(&citer, conn); cce; cce = cce_iter_next(&citer)) 1738 if (cce->cce_flags & CCE_REG) 1739 cub_add(cub, &cce->cce_cid, peer_ctx); 1740} 1741 1742 1743static void 1744drop_all_mini_conns (lsquic_engine_t *engine) 1745{ 1746 struct lsquic_hash_elem *el; 1747 lsquic_conn_t *conn; 1748 struct cid_update_batch cub; 1749 1750 cub_init(&cub, engine->report_old_scids, engine->scids_ctx); 1751 1752 for (el = lsquic_hash_first(engine->conns_hash); el; 1753 el = lsquic_hash_next(engine->conns_hash)) 1754 { 1755 conn = lsquic_hashelem_getdata(el); 1756 if (conn->cn_flags & LSCONN_MINI) 1757 { 1758 /* If promoted, why is it still in this hash? */ 1759 assert(!(conn->cn_flags & LSCONN_PROMOTED)); 1760 if (!(conn->cn_flags & LSCONN_PROMOTED)) 1761 cub_add_cids_from_cces(&cub, conn); 1762 remove_conn_from_hash(engine, conn); 1763 } 1764 } 1765 1766 cub_flush(&cub); 1767} 1768 1769 1770void 1771lsquic_engine_process_conns (lsquic_engine_t *engine) 1772{ 1773 lsquic_conn_t *conn; 1774 lsquic_time_t now; 1775 1776 ENGINE_IN(engine); 1777 1778 now = lsquic_time_now(); 1779 while ((conn = attq_pop(engine->attq, now))) 1780 { 1781 conn = engine_decref_conn(engine, conn, LSCONN_ATTQ); 1782 if (conn && !(conn->cn_flags & LSCONN_TICKABLE)) 1783 { 1784 lsquic_mh_insert(&engine->conns_tickable, conn, conn->cn_last_ticked); 1785 engine_incref_conn(conn, LSCONN_TICKABLE); 1786 } 1787 } 1788 1789 process_connections(engine, conn_iter_next_tickable, now); 1790 ENGINE_OUT(engine); 1791} 1792 1793 1794static void 1795release_or_return_enc_data (struct lsquic_engine *engine, 1796 void (*pmi_rel_or_ret) (void *, void *, void *, char), 1797 struct lsquic_conn *conn, struct lsquic_packet_out *packet_out) 1798{ 1799 pmi_rel_or_ret(engine->pub.enp_pmi_ctx, packet_out->po_path->np_peer_ctx, 1800 packet_out->po_enc_data, lsquic_packet_out_ipv6(packet_out)); 1801 packet_out->po_flags &= ~PO_ENCRYPTED; 1802 packet_out->po_enc_data = NULL; 1803} 1804 1805 1806static void 1807release_enc_data (struct lsquic_engine *engine, struct lsquic_conn *conn, 1808 struct lsquic_packet_out *packet_out) 1809{ 1810 release_or_return_enc_data(engine, engine->pub.enp_pmi->pmi_release, 1811 conn, packet_out); 1812} 1813 1814 1815static void 1816return_enc_data (struct lsquic_engine *engine, struct lsquic_conn *conn, 1817 struct lsquic_packet_out *packet_out) 1818{ 1819 release_or_return_enc_data(engine, engine->pub.enp_pmi->pmi_return, 1820 conn, packet_out); 1821} 1822 1823 1824static int 1825copy_packet (struct lsquic_engine *engine, struct lsquic_conn *conn, 1826 struct lsquic_packet_out *packet_out) 1827{ 1828 int ipv6; 1829 1830 ipv6 = NP_IS_IPv6(packet_out->po_path); 1831 if (packet_out->po_flags & PO_ENCRYPTED) 1832 { 1833 if (ipv6 == lsquic_packet_out_ipv6(packet_out) 1834 && packet_out->po_data_sz == packet_out->po_enc_data_sz 1835 && 0 == memcmp(packet_out->po_data, packet_out->po_enc_data, 1836 packet_out->po_data_sz)) 1837 return 0; 1838 if (ipv6 == lsquic_packet_out_ipv6(packet_out) 1839 && packet_out->po_data_sz <= packet_out->po_enc_data_sz) 1840 goto copy; 1841 return_enc_data(engine, conn, packet_out); 1842 } 1843 1844 packet_out->po_enc_data = engine->pub.enp_pmi->pmi_allocate( 1845 engine->pub.enp_pmi_ctx, packet_out->po_path->np_peer_ctx, 1846 packet_out->po_data_sz, ipv6); 1847 if (!packet_out->po_enc_data) 1848 { 1849 LSQ_DEBUG("could not allocate memory for outgoing unencrypted packet " 1850 "of size %hu", packet_out->po_data_sz); 1851 return -1; 1852 } 1853 1854 copy: 1855 memcpy(packet_out->po_enc_data, packet_out->po_data, 1856 packet_out->po_data_sz); 1857 packet_out->po_enc_data_sz = packet_out->po_data_sz; 1858 packet_out->po_sent_sz = packet_out->po_data_sz; 1859 packet_out->po_flags &= ~PO_IPv6; 1860 packet_out->po_flags |= PO_ENCRYPTED|PO_SENT_SZ|(ipv6 << POIPv6_SHIFT); 1861 1862 return 0; 1863} 1864 1865 1866STAILQ_HEAD(conns_stailq, lsquic_conn); 1867TAILQ_HEAD(conns_tailq, lsquic_conn); 1868 1869 1870struct conns_out_iter 1871{ 1872 struct min_heap *coi_heap; 1873 struct pr_queue *coi_prq; 1874 TAILQ_HEAD(, lsquic_conn) coi_active_list, 1875 coi_inactive_list; 1876 lsquic_conn_t *coi_next; 1877#ifndef NDEBUG 1878 lsquic_time_t coi_last_sent; 1879#endif 1880}; 1881 1882 1883static void 1884coi_init (struct conns_out_iter *iter, struct lsquic_engine *engine) 1885{ 1886 iter->coi_heap = &engine->conns_out; 1887 iter->coi_prq = engine->pr_queue; 1888 iter->coi_next = NULL; 1889 TAILQ_INIT(&iter->coi_active_list); 1890 TAILQ_INIT(&iter->coi_inactive_list); 1891#ifndef NDEBUG 1892 iter->coi_last_sent = 0; 1893#endif 1894} 1895 1896 1897static lsquic_conn_t * 1898coi_next (struct conns_out_iter *iter) 1899{ 1900 lsquic_conn_t *conn; 1901 1902 if (lsquic_mh_count(iter->coi_heap) > 0) 1903 { 1904 conn = lsquic_mh_pop(iter->coi_heap); 1905 TAILQ_INSERT_TAIL(&iter->coi_active_list, conn, cn_next_out); 1906 conn->cn_flags |= LSCONN_COI_ACTIVE; 1907#ifndef NDEBUG 1908 if (iter->coi_last_sent) 1909 assert(iter->coi_last_sent <= conn->cn_last_sent); 1910 iter->coi_last_sent = conn->cn_last_sent; 1911#endif 1912 return conn; 1913 } 1914 else if (iter->coi_prq && (conn = prq_next_conn(iter->coi_prq))) 1915 { 1916 return conn; 1917 } 1918 else if (!TAILQ_EMPTY(&iter->coi_active_list)) 1919 { 1920 iter->coi_prq = NULL; /* Save function call in previous conditional */ 1921 conn = iter->coi_next; 1922 if (!conn) 1923 conn = TAILQ_FIRST(&iter->coi_active_list); 1924 if (conn) 1925 iter->coi_next = TAILQ_NEXT(conn, cn_next_out); 1926 return conn; 1927 } 1928 else 1929 return NULL; 1930} 1931 1932 1933static void 1934coi_deactivate (struct conns_out_iter *iter, lsquic_conn_t *conn) 1935{ 1936 if (!(conn->cn_flags & LSCONN_EVANESCENT)) 1937 { 1938 assert(!TAILQ_EMPTY(&iter->coi_active_list)); 1939 TAILQ_REMOVE(&iter->coi_active_list, conn, cn_next_out); 1940 conn->cn_flags &= ~LSCONN_COI_ACTIVE; 1941 TAILQ_INSERT_TAIL(&iter->coi_inactive_list, conn, cn_next_out); 1942 conn->cn_flags |= LSCONN_COI_INACTIVE; 1943 } 1944} 1945 1946 1947static void 1948coi_reactivate (struct conns_out_iter *iter, lsquic_conn_t *conn) 1949{ 1950 assert(conn->cn_flags & LSCONN_COI_INACTIVE); 1951 TAILQ_REMOVE(&iter->coi_inactive_list, conn, cn_next_out); 1952 conn->cn_flags &= ~LSCONN_COI_INACTIVE; 1953 TAILQ_INSERT_TAIL(&iter->coi_active_list, conn, cn_next_out); 1954 conn->cn_flags |= LSCONN_COI_ACTIVE; 1955} 1956 1957 1958static void 1959coi_reheap (struct conns_out_iter *iter, lsquic_engine_t *engine) 1960{ 1961 lsquic_conn_t *conn; 1962 while ((conn = TAILQ_FIRST(&iter->coi_active_list))) 1963 { 1964 TAILQ_REMOVE(&iter->coi_active_list, conn, cn_next_out); 1965 conn->cn_flags &= ~LSCONN_COI_ACTIVE; 1966 if ((conn->cn_flags & CONN_REF_FLAGS) != LSCONN_HAS_OUTGOING 1967 && !(conn->cn_flags & LSCONN_IMMED_CLOSE)) 1968 lsquic_mh_insert(iter->coi_heap, conn, conn->cn_last_sent); 1969 else /* Closed connection gets one shot at sending packets */ 1970 (void) engine_decref_conn(engine, conn, LSCONN_HAS_OUTGOING); 1971 } 1972 while ((conn = TAILQ_FIRST(&iter->coi_inactive_list))) 1973 { 1974 TAILQ_REMOVE(&iter->coi_inactive_list, conn, cn_next_out); 1975 conn->cn_flags &= ~LSCONN_COI_INACTIVE; 1976 (void) engine_decref_conn(engine, conn, LSCONN_HAS_OUTGOING); 1977 } 1978} 1979 1980 1981#ifndef NDEBUG 1982static void 1983lose_matching_packets (const lsquic_engine_t *engine, struct out_batch *batch, 1984 unsigned n) 1985{ 1986 const lsquic_cid_t *cid; 1987 struct iovec *iov; 1988 unsigned i; 1989 char packno_str[22]; 1990 1991 for (i = 0; i < n; ++i) 1992 { 1993 snprintf(packno_str, sizeof(packno_str), "%"PRIu64, 1994 batch->packets[i]->po_packno); 1995 if (0 == regexec(&engine->lose_packets_re, packno_str, 0, NULL, 0)) 1996 { 1997 for (iov = batch->outs[i].iov; iov < 1998 batch->outs[i].iov + batch->outs[i].iovlen; ++iov) 1999 batch->outs[i].iov->iov_len -= 1; 2000 cid = lsquic_conn_log_cid(batch->conns[i]); 2001 LSQ_WARNC("losing packet %s for connection %"CID_FMT, packno_str, 2002 CID_BITS(cid)); 2003 } 2004 } 2005} 2006 2007 2008#endif 2009 2010 2011#ifdef NDEBUG 2012#define CONST_BATCH const 2013#else 2014#define CONST_BATCH 2015#endif 2016 2017 2018static void 2019sockaddr2str (const struct sockaddr *addr, char *buf, size_t sz) 2020{ 2021 unsigned short port; 2022 int len; 2023 2024 switch (addr->sa_family) 2025 { 2026 case AF_INET: 2027 port = ((struct sockaddr_in *) addr)->sin_port; 2028 if (!inet_ntop(AF_INET, &((struct sockaddr_in *) addr)->sin_addr, 2029 buf, sz)) 2030 buf[0] = '\0'; 2031 break; 2032 case AF_INET6: 2033 port = ((struct sockaddr_in6 *) addr)->sin6_port; 2034 if (!inet_ntop(AF_INET6, &((struct sockaddr_in6 *) addr)->sin6_addr, 2035 buf, sz)) 2036 buf[0] = '\0'; 2037 break; 2038 default: 2039 port = 0; 2040 (void) snprintf(buf, sz, "<invalid family %d>", addr->sa_family); 2041 break; 2042 } 2043 2044 len = strlen(buf); 2045 if (len < (int) sz) 2046 snprintf(buf + len, sz - (size_t) len, ":%hu", port); 2047} 2048 2049 2050struct send_batch_ctx { 2051 struct conns_stailq *closed_conns; 2052 struct conns_tailq *ticked_conns; 2053 struct conns_out_iter *conns_iter; 2054 CONST_BATCH struct out_batch *batch; 2055}; 2056 2057 2058static void 2059close_conn_immediately (struct lsquic_engine *engine, 2060 const struct send_batch_ctx *sb_ctx, struct lsquic_conn *conn) 2061{ 2062 conn->cn_flags |= LSCONN_IMMED_CLOSE; 2063 if (!(conn->cn_flags & LSCONN_CLOSING)) 2064 { 2065 STAILQ_INSERT_TAIL(sb_ctx->closed_conns, conn, cn_next_closed_conn); 2066 engine_incref_conn(conn, LSCONN_CLOSING); 2067 if (conn->cn_flags & LSCONN_HASHED) 2068 remove_conn_from_hash(engine, conn); 2069 } 2070 if (conn->cn_flags & LSCONN_TICKED) 2071 { 2072 TAILQ_REMOVE(sb_ctx->ticked_conns, conn, cn_next_ticked); 2073 engine_decref_conn(engine, conn, LSCONN_TICKED); 2074 } 2075} 2076 2077 2078static void 2079close_conn_on_send_error (struct lsquic_engine *engine, 2080 const struct send_batch_ctx *sb_ctx, int n, int e_val) 2081{ 2082 const struct out_batch *batch = sb_ctx->batch; 2083 struct lsquic_conn *const conn = batch->conns[n]; 2084 char buf[2][INET6_ADDRSTRLEN + sizeof(":65535")]; 2085 2086 LSQ_WARNC("error sending packet for %s connection %"CID_FMT" - close it; " 2087 "src: %s; dst: %s; errno: %d", 2088 conn->cn_flags & LSCONN_EVANESCENT ? "evanecsent" : 2089 conn->cn_flags & LSCONN_MINI ? "mini" : "regular", 2090 CID_BITS(lsquic_conn_log_cid(conn)), 2091 (sockaddr2str(batch->outs[n].local_sa, buf[0], sizeof(buf[0])), buf[0]), 2092 (sockaddr2str(batch->outs[n].dest_sa, buf[1], sizeof(buf[1])), buf[1]), 2093 e_val); 2094 if (conn->cn_flags & LSCONN_EVANESCENT) 2095 lsquic_prq_drop(conn); 2096 else 2097 close_conn_immediately(engine, sb_ctx, conn); 2098} 2099 2100 2101static unsigned 2102send_batch (lsquic_engine_t *engine, const struct send_batch_ctx *sb_ctx, 2103 unsigned n_to_send) 2104{ 2105 int n_sent, i, e_val; 2106 lsquic_time_t now; 2107 unsigned off; 2108 size_t count; 2109 CONST_BATCH struct out_batch *const batch = sb_ctx->batch; 2110 struct lsquic_packet_out *CONST_BATCH *packet_out, *CONST_BATCH *end; 2111 2112#ifndef NDEBUG 2113 if (engine->flags & ENG_LOSE_PACKETS) 2114 lose_matching_packets(engine, batch, n_to_send); 2115#endif 2116 /* Set sent time before the write to avoid underestimating RTT */ 2117 now = lsquic_time_now(); 2118 for (i = 0; i < (int) n_to_send; ++i) 2119 { 2120 off = batch->pack_off[i]; 2121 count = batch->outs[i].iovlen; 2122 assert(count > 0); 2123 packet_out = &batch->packets[off]; 2124 end = packet_out + count; 2125 do 2126 (*packet_out)->po_sent = now; 2127 while (++packet_out < end); 2128 } 2129 n_sent = engine->packets_out(engine->packets_out_ctx, batch->outs, 2130 n_to_send); 2131 e_val = errno; 2132 if (n_sent < (int) n_to_send) 2133 { 2134 engine->pub.enp_flags &= ~ENPUB_CAN_SEND; 2135 engine->resume_sending_at = now + 1000000; 2136 LSQ_DEBUG("cannot send packets"); 2137 EV_LOG_GENERIC_EVENT("cannot send packets"); 2138 if (!(EAGAIN == e_val || EWOULDBLOCK == e_val)) 2139 close_conn_on_send_error(engine, sb_ctx, 2140 n_sent < 0 ? 0 : n_sent, e_val); 2141 } 2142 if (n_sent >= 0) 2143 LSQ_DEBUG("packets out returned %d (out of %u)", n_sent, n_to_send); 2144 else 2145 { 2146 LSQ_DEBUG("packets out returned an error: %s", strerror(e_val)); 2147 n_sent = 0; 2148 } 2149 if (n_sent > 0) 2150 engine->last_sent = now + n_sent; 2151 for (i = 0; i < n_sent; ++i) 2152 { 2153 eng_hist_inc(&engine->history, now, sl_packets_out); 2154 /* `i' is added to maintain relative order */ 2155 batch->conns[i]->cn_last_sent = now + i; 2156 2157 off = batch->pack_off[i]; 2158 count = batch->outs[i].iovlen; 2159 assert(count > 0); 2160 packet_out = &batch->packets[off]; 2161 end = packet_out + count; 2162 do 2163 { 2164#if LOG_PACKET_CHECKSUM 2165 log_packet_checksum(lsquic_conn_log_cid(batch->conns[i]), "out", 2166 batch->outs[i].iov[packet_out - &batch->packets[off]].iov_base, 2167 batch->outs[i].iov[packet_out - &batch->packets[off]].iov_len); 2168#endif 2169 EV_LOG_PACKET_SENT(lsquic_conn_log_cid(batch->conns[i]), 2170 *packet_out); 2171 /* Release packet out buffer as soon as the packet is sent 2172 * successfully. If not successfully sent, we hold on to 2173 * this buffer until the packet sending is attempted again 2174 * or until it times out and regenerated. 2175 */ 2176 if ((*packet_out)->po_flags & PO_ENCRYPTED) 2177 release_enc_data(engine, batch->conns[i], *packet_out); 2178 batch->conns[i]->cn_if->ci_packet_sent(batch->conns[i], 2179 *packet_out); 2180 } 2181 while (++packet_out < end); 2182 } 2183 if (LSQ_LOG_ENABLED_EXT(LSQ_LOG_DEBUG, LSQLM_EVENT)) 2184 for ( ; i < (int) n_to_send; ++i) 2185 { 2186 off = batch->pack_off[i]; 2187 count = batch->outs[i].iovlen; 2188 assert(count > 0); 2189 packet_out = &batch->packets[off]; 2190 end = packet_out + count; 2191 do 2192 EV_LOG_PACKET_NOT_SENT(lsquic_conn_log_cid(batch->conns[i]), 2193 *packet_out); 2194 while (++packet_out < end); 2195 } 2196 /* Return packets to the connection in reverse order so that the packet 2197 * ordering is maintained. 2198 */ 2199 for (i = (int) n_to_send - 1; i >= n_sent; --i) 2200 { 2201 off = batch->pack_off[i]; 2202 count = batch->outs[i].iovlen; 2203 assert(count > 0); 2204 packet_out = &batch->packets[off + count - 1]; 2205 end = &batch->packets[off - 1]; 2206 do 2207 batch->conns[i]->cn_if->ci_packet_not_sent(batch->conns[i], 2208 *packet_out); 2209 while (--packet_out > end); 2210 if (!(batch->conns[i]->cn_flags & (LSCONN_COI_ACTIVE|LSCONN_EVANESCENT))) 2211 coi_reactivate(sb_ctx->conns_iter, batch->conns[i]); 2212 } 2213 return n_sent; 2214} 2215 2216 2217/* Return 1 if went past deadline, 0 otherwise */ 2218static int 2219check_deadline (lsquic_engine_t *engine) 2220{ 2221 if (engine->pub.enp_settings.es_proc_time_thresh && 2222 lsquic_time_now() > engine->deadline) 2223 { 2224 LSQ_INFO("went past threshold of %u usec, stop sending", 2225 engine->pub.enp_settings.es_proc_time_thresh); 2226 engine->flags |= ENG_PAST_DEADLINE; 2227 return 1; 2228 } 2229 else 2230 return 0; 2231} 2232 2233 2234static size_t 2235iov_size (const struct iovec *iov, const struct iovec *const end) 2236{ 2237 size_t size; 2238 2239 assert(iov < end); 2240 2241 size = 0; 2242 do 2243 size += iov->iov_len; 2244 while (++iov < end); 2245 2246 return size; 2247} 2248 2249 2250static void 2251send_packets_out (struct lsquic_engine *engine, 2252 struct conns_tailq *ticked_conns, 2253 struct conns_stailq *closed_conns) 2254{ 2255 unsigned n, w, n_sent, n_batches_sent; 2256 lsquic_packet_out_t *packet_out; 2257 struct lsquic_packet_out **packet; 2258 lsquic_conn_t *conn; 2259 struct out_batch *const batch = &engine->out_batch; 2260 struct iovec *iov, *packet_iov; 2261 struct conns_out_iter conns_iter; 2262 int shrink, deadline_exceeded; 2263 const struct send_batch_ctx sb_ctx = { 2264 closed_conns, 2265 ticked_conns, 2266 &conns_iter, 2267 &engine->out_batch, 2268 }; 2269 2270 coi_init(&conns_iter, engine); 2271 n_batches_sent = 0; 2272 n_sent = 0, n = 0; 2273 shrink = 0; 2274 deadline_exceeded = 0; 2275 iov = batch->iov; 2276 packet = batch->packets; 2277 2278 while ((conn = coi_next(&conns_iter))) 2279 { 2280 packet_out = conn->cn_if->ci_next_packet_to_send(conn, 0); 2281 if (!packet_out) { 2282 /* Evanescent connection always has a packet to send: */ 2283 assert(!(conn->cn_flags & LSCONN_EVANESCENT)); 2284 LSQ_DEBUGC("batched all outgoing packets for %s conn %"CID_FMT, 2285 (conn->cn_flags & LSCONN_MINI ? "mini" : "full"), 2286 CID_BITS(lsquic_conn_log_cid(conn))); 2287 coi_deactivate(&conns_iter, conn); 2288 continue; 2289 } 2290 batch->outs[n].iov = packet_iov = iov; 2291 next_coa: 2292 if (!(packet_out->po_flags & (PO_ENCRYPTED|PO_NOENCRYPT))) 2293 { 2294 switch (conn->cn_esf_c->esf_encrypt_packet(conn->cn_enc_session, 2295 &engine->pub, conn, packet_out)) 2296 { 2297 case ENCPA_NOMEM: 2298 /* Send what we have and wait for a more opportune moment */ 2299 conn->cn_if->ci_packet_not_sent(conn, packet_out); 2300 goto end_for; 2301 case ENCPA_BADCRYPT: 2302 /* This is pretty bad: close connection immediately */ 2303 conn->cn_if->ci_packet_not_sent(conn, packet_out); 2304 LSQ_INFOC("conn %"CID_FMT" has unsendable packets", 2305 CID_BITS(lsquic_conn_log_cid(conn))); 2306 if (!(conn->cn_flags & LSCONN_EVANESCENT)) 2307 { 2308 close_conn_immediately(engine, &sb_ctx, conn); 2309 coi_deactivate(&conns_iter, conn); 2310 } 2311 continue; 2312 case ENCPA_OK: 2313 break; 2314 } 2315 } 2316 else if ((packet_out->po_flags & PO_NOENCRYPT) 2317 && engine->pub.enp_pmi != &stock_pmi) 2318 { 2319 if (0 != copy_packet(engine, conn, packet_out)) 2320 { 2321 /* Copy can only fail if packet could not be allocated */ 2322 conn->cn_if->ci_packet_not_sent(conn, packet_out); 2323 goto end_for; 2324 } 2325 } 2326 LSQ_DEBUGC("batched packet %"PRIu64" for connection %"CID_FMT, 2327 packet_out->po_packno, CID_BITS(lsquic_conn_log_cid(conn))); 2328 if (packet_out->po_flags & PO_ENCRYPTED) 2329 { 2330 iov->iov_base = packet_out->po_enc_data; 2331 iov->iov_len = packet_out->po_enc_data_sz; 2332 } 2333 else 2334 { 2335 iov->iov_base = packet_out->po_data; 2336 iov->iov_len = packet_out->po_data_sz; 2337 } 2338 if (packet_iov == iov) 2339 { 2340 batch->pack_off[n] = packet - batch->packets; 2341 batch->outs [n].ecn = lsquic_packet_out_ecn(packet_out); 2342 batch->outs [n].peer_ctx = packet_out->po_path->np_peer_ctx; 2343 batch->outs [n].local_sa = NP_LOCAL_SA(packet_out->po_path); 2344 batch->outs [n].dest_sa = NP_PEER_SA(packet_out->po_path); 2345 batch->conns [n] = conn; 2346 } 2347 *packet = packet_out; 2348 ++packet; 2349 ++iov; 2350 if ((conn->cn_flags & LSCONN_IETF) 2351 && ((1 << packet_out->po_header_type) 2352 & ((1 << HETY_INITIAL)|(1 << HETY_HANDSHAKE)|(1 << HETY_0RTT))) 2353#ifndef NDEBUG 2354 && (engine->flags & ENG_COALESCE) 2355#endif 2356 && iov < batch->iov + sizeof(batch->iov) / sizeof(batch->iov[0])) 2357 { 2358 const size_t size = iov_size(packet_iov, iov); 2359 packet_out = conn->cn_if->ci_next_packet_to_send(conn, size); 2360 if (packet_out) 2361 goto next_coa; 2362 } 2363 batch->outs [n].iovlen = iov - packet_iov; 2364 ++n; 2365 if (n == engine->batch_size 2366 || iov >= batch->iov + sizeof(batch->iov) / sizeof(batch->iov[0])) 2367 { 2368 w = send_batch(engine, &sb_ctx, n); 2369 n = 0; 2370 iov = batch->iov; 2371 packet = batch->packets; 2372 ++n_batches_sent; 2373 n_sent += w; 2374 if (w < engine->batch_size) 2375 { 2376 shrink = 1; 2377 break; 2378 } 2379 deadline_exceeded = check_deadline(engine); 2380 if (deadline_exceeded) 2381 break; 2382 grow_batch_size(engine); 2383 } 2384 } 2385 end_for: 2386 2387 if (n > 0) { 2388 w = send_batch(engine, &sb_ctx, n); 2389 n_sent += w; 2390 shrink = w < n; 2391 ++n_batches_sent; 2392 } 2393 2394 if (shrink) 2395 shrink_batch_size(engine); 2396 else if (n_batches_sent > 1) 2397 { 2398 deadline_exceeded = check_deadline(engine); 2399 if (!deadline_exceeded) 2400 grow_batch_size(engine); 2401 } 2402 2403 coi_reheap(&conns_iter, engine); 2404 2405 LSQ_DEBUG("%s: sent %u packet%.*s", __func__, n_sent, n_sent != 1, "s"); 2406} 2407 2408 2409int 2410lsquic_engine_has_unsent_packets (lsquic_engine_t *engine) 2411{ 2412 return lsquic_mh_count(&engine->conns_out) > 0 2413 || (engine->pr_queue && prq_have_pending(engine->pr_queue)) 2414 ; 2415} 2416 2417 2418static void 2419reset_deadline (lsquic_engine_t *engine, lsquic_time_t now) 2420{ 2421 engine->deadline = now + engine->pub.enp_settings.es_proc_time_thresh; 2422 engine->flags &= ~ENG_PAST_DEADLINE; 2423} 2424 2425 2426void 2427lsquic_engine_send_unsent_packets (lsquic_engine_t *engine) 2428{ 2429 lsquic_conn_t *conn; 2430 struct conns_stailq closed_conns; 2431 struct conns_tailq ticked_conns = TAILQ_HEAD_INITIALIZER(ticked_conns); 2432 struct cid_update_batch cub; 2433 2434 ENGINE_IN(engine); 2435 cub_init(&cub, engine->report_old_scids, engine->scids_ctx); 2436 STAILQ_INIT(&closed_conns); 2437 reset_deadline(engine, lsquic_time_now()); 2438 if (!(engine->pub.enp_flags & ENPUB_CAN_SEND)) 2439 { 2440 LSQ_DEBUG("can send again"); 2441 EV_LOG_GENERIC_EVENT("can send again"); 2442 engine->pub.enp_flags |= ENPUB_CAN_SEND; 2443 } 2444 2445 send_packets_out(engine, &ticked_conns, &closed_conns); 2446 2447 while ((conn = STAILQ_FIRST(&closed_conns))) { 2448 STAILQ_REMOVE_HEAD(&closed_conns, cn_next_closed_conn); 2449 if ((conn->cn_flags & (LSCONN_MINI|LSCONN_PROMOTED)) == LSCONN_MINI) 2450 cub_add_cids_from_cces(&cub, conn); 2451 (void) engine_decref_conn(engine, conn, LSCONN_CLOSING); 2452 } 2453 2454 cub_flush(&cub); 2455 ENGINE_OUT(engine); 2456} 2457 2458 2459static lsquic_conn_t * 2460next_new_full_conn (struct conns_stailq *new_full_conns) 2461{ 2462 lsquic_conn_t *conn; 2463 2464 conn = STAILQ_FIRST(new_full_conns); 2465 if (conn) 2466 STAILQ_REMOVE_HEAD(new_full_conns, cn_next_new_full); 2467 return conn; 2468} 2469 2470 2471static void 2472process_connections (lsquic_engine_t *engine, conn_iter_f next_conn, 2473 lsquic_time_t now) 2474{ 2475 lsquic_conn_t *conn; 2476 enum tick_st tick_st; 2477 unsigned i, why; 2478 lsquic_time_t next_tick_time; 2479 struct conns_stailq closed_conns; 2480 struct conns_tailq ticked_conns; 2481 struct conns_stailq new_full_conns; 2482 struct cid_update_batch cub_old, cub_live; 2483 cub_init(&cub_old, engine->report_old_scids, engine->scids_ctx); 2484 cub_init(&cub_live, engine->report_live_scids, engine->scids_ctx); 2485 2486 eng_hist_tick(&engine->history, now); 2487 2488 STAILQ_INIT(&closed_conns); 2489 TAILQ_INIT(&ticked_conns); 2490 reset_deadline(engine, now); 2491 STAILQ_INIT(&new_full_conns); 2492 2493 if (!(engine->pub.enp_flags & ENPUB_CAN_SEND) 2494 && now > engine->resume_sending_at) 2495 { 2496 LSQ_NOTICE("failsafe activated: resume sending packets again after " 2497 "timeout"); 2498 EV_LOG_GENERIC_EVENT("resume sending packets again after timeout"); 2499 engine->pub.enp_flags |= ENPUB_CAN_SEND; 2500 } 2501 2502 i = 0; 2503 while ((conn = next_conn(engine)) 2504 || (conn = next_new_full_conn(&new_full_conns))) 2505 { 2506 tick_st = conn->cn_if->ci_tick(conn, now); 2507 conn->cn_last_ticked = now + i /* Maintain relative order */ ++; 2508 if (tick_st & TICK_PROMOTE) 2509 { 2510 lsquic_conn_t *new_conn; 2511 EV_LOG_CONN_EVENT(lsquic_conn_log_cid(conn), 2512 "scheduled for promotion"); 2513 assert(conn->cn_flags & LSCONN_MINI); 2514 new_conn = new_full_conn_server(engine, conn, now); 2515 if (new_conn) 2516 { 2517 STAILQ_INSERT_TAIL(&new_full_conns, new_conn, cn_next_new_full); 2518 new_conn->cn_last_sent = engine->last_sent; 2519 eng_hist_inc(&engine->history, now, sl_new_full_conns); 2520 } 2521 tick_st |= TICK_CLOSE; /* Destroy mini connection */ 2522 conn->cn_flags |= LSCONN_PROMOTED; 2523 } 2524 if (tick_st & TICK_SEND) 2525 { 2526 if (!(conn->cn_flags & LSCONN_HAS_OUTGOING)) 2527 { 2528 lsquic_mh_insert(&engine->conns_out, conn, conn->cn_last_sent); 2529 engine_incref_conn(conn, LSCONN_HAS_OUTGOING); 2530 } 2531 } 2532 if (tick_st & TICK_CLOSE) 2533 { 2534 STAILQ_INSERT_TAIL(&closed_conns, conn, cn_next_closed_conn); 2535 engine_incref_conn(conn, LSCONN_CLOSING); 2536 if (conn->cn_flags & LSCONN_HASHED) 2537 remove_conn_from_hash(engine, conn); 2538 } 2539 else 2540 { 2541 TAILQ_INSERT_TAIL(&ticked_conns, conn, cn_next_ticked); 2542 engine_incref_conn(conn, LSCONN_TICKED); 2543 if ((engine->flags & ENG_SERVER) && conn->cn_if->ci_report_live 2544 && conn->cn_if->ci_report_live(conn, now)) 2545 cub_add_cids_from_cces(&cub_live, conn); 2546 } 2547 } 2548 2549 if ((engine->pub.enp_flags & ENPUB_CAN_SEND) 2550 && lsquic_engine_has_unsent_packets(engine)) 2551 send_packets_out(engine, &ticked_conns, &closed_conns); 2552 2553 while ((conn = STAILQ_FIRST(&closed_conns))) { 2554 STAILQ_REMOVE_HEAD(&closed_conns, cn_next_closed_conn); 2555 if ((conn->cn_flags & (LSCONN_MINI|LSCONN_PROMOTED)) == LSCONN_MINI) 2556 cub_add_cids_from_cces(&cub_old, conn); 2557 (void) engine_decref_conn(engine, conn, LSCONN_CLOSING); 2558 } 2559 2560 while ((conn = TAILQ_FIRST(&ticked_conns))) 2561 { 2562 TAILQ_REMOVE(&ticked_conns, conn, cn_next_ticked); 2563 engine_decref_conn(engine, conn, LSCONN_TICKED); 2564 if (!(conn->cn_flags & LSCONN_TICKABLE) 2565 && conn->cn_if->ci_is_tickable(conn)) 2566 { 2567 /* Floyd heapification is not faster, don't bother. */ 2568 lsquic_mh_insert(&engine->conns_tickable, conn, conn->cn_last_ticked); 2569 engine_incref_conn(conn, LSCONN_TICKABLE); 2570 } 2571 else if (!(conn->cn_flags & LSCONN_ATTQ)) 2572 { 2573 next_tick_time = conn->cn_if->ci_next_tick_time(conn, &why); 2574 if (next_tick_time) 2575 { 2576 if (0 == attq_add(engine->attq, conn, next_tick_time, why)) 2577 engine_incref_conn(conn, LSCONN_ATTQ); 2578 } 2579 else 2580 assert(0); 2581 } 2582 } 2583 2584 cub_flush(&engine->new_scids); 2585 cub_flush(&cub_live); 2586 cub_flush(&cub_old); 2587} 2588 2589 2590/* Return 0 if packet is being processed by a real connection, 1 if the 2591 * packet was processed, but not by a connection, and -1 on error. 2592 */ 2593int 2594lsquic_engine_packet_in (lsquic_engine_t *engine, 2595 const unsigned char *packet_in_data, size_t packet_in_size, 2596 const struct sockaddr *sa_local, const struct sockaddr *sa_peer, 2597 void *peer_ctx, int ecn) 2598{ 2599 const unsigned char *const packet_end = packet_in_data + packet_in_size; 2600 struct packin_parse_state ppstate; 2601 lsquic_packet_in_t *packet_in; 2602 int (*parse_packet_in_begin) (struct lsquic_packet_in *, size_t length, 2603 int is_server, unsigned cid_len, struct packin_parse_state *); 2604 unsigned n_zeroes; 2605 int s; 2606 2607 ENGINE_CALLS_INCR(engine); 2608 2609 if (engine->flags & ENG_SERVER) 2610 parse_packet_in_begin = lsquic_parse_packet_in_server_begin; 2611 else 2612 if (engine->flags & ENG_CONNS_BY_ADDR) 2613 { 2614 struct lsquic_hash_elem *el; 2615 const struct lsquic_conn *conn; 2616 el = find_conn_by_addr(engine->conns_hash, sa_local); 2617 if (!el) 2618 return -1; 2619 conn = lsquic_hashelem_getdata(el); 2620 if ((1 << conn->cn_version) & LSQUIC_GQUIC_HEADER_VERSIONS) 2621 parse_packet_in_begin = lsquic_gquic_parse_packet_in_begin; 2622 else if ((1 << conn->cn_version) & LSQUIC_IETF_VERSIONS) 2623 parse_packet_in_begin = lsquic_ietf_v1_parse_packet_in_begin; 2624 else if (conn->cn_version == LSQVER_050) 2625 parse_packet_in_begin = lsquic_Q050_parse_packet_in_begin; 2626 else 2627 { 2628 assert(conn->cn_version == LSQVER_046 2629#if LSQUIC_USE_Q098 2630 || conn->cn_version == LSQVER_098 2631#endif 2632 2633 ); 2634 parse_packet_in_begin = lsquic_Q046_parse_packet_in_begin; 2635 } 2636 } 2637 else 2638 parse_packet_in_begin = lsquic_parse_packet_in_begin; 2639 2640 n_zeroes = 0; 2641 do 2642 { 2643 packet_in = lsquic_mm_get_packet_in(&engine->pub.enp_mm); 2644 if (!packet_in) 2645 return -1; 2646 /* Library does not modify packet_in_data, it is not referenced after 2647 * this function returns and subsequent release of pi_data is guarded 2648 * by PI_OWN_DATA flag. 2649 */ 2650 packet_in->pi_data = (unsigned char *) packet_in_data; 2651 if (0 != parse_packet_in_begin(packet_in, packet_end - packet_in_data, 2652 engine->flags & ENG_SERVER, 2653 engine->pub.enp_settings.es_scid_len, &ppstate)) 2654 { 2655 LSQ_DEBUG("Cannot parse incoming packet's header"); 2656 lsquic_mm_put_packet_in(&engine->pub.enp_mm, packet_in); 2657 errno = EINVAL; 2658 return -1; 2659 } 2660 2661 packet_in_data += packet_in->pi_data_sz; 2662 packet_in->pi_received = lsquic_time_now(); 2663 packet_in->pi_flags |= (3 & ecn) << PIBIT_ECN_SHIFT; 2664 eng_hist_inc(&engine->history, packet_in->pi_received, sl_packets_in); 2665 s = process_packet_in(engine, packet_in, &ppstate, sa_local, sa_peer, 2666 peer_ctx, packet_in_size); 2667 n_zeroes += s == 0; 2668 } 2669 while (0 == s && packet_in_data < packet_end); 2670 2671 return n_zeroes > 0 ? 0 : s; 2672} 2673 2674 2675#if __GNUC__ && !defined(NDEBUG) 2676__attribute__((weak)) 2677#endif 2678unsigned 2679lsquic_engine_quic_versions (const lsquic_engine_t *engine) 2680{ 2681 return engine->pub.enp_settings.es_versions; 2682} 2683 2684 2685void 2686lsquic_engine_cooldown (lsquic_engine_t *engine) 2687{ 2688 struct lsquic_hash_elem *el; 2689 lsquic_conn_t *conn; 2690 2691 if (engine->flags & ENG_COOLDOWN) 2692 /* AFAICT, there is no harm in calling this function more than once, 2693 * but log it just in case, as it may indicate an error in the caller. 2694 */ 2695 LSQ_INFO("cooldown called again"); 2696 engine->flags |= ENG_COOLDOWN; 2697 LSQ_INFO("entering cooldown mode"); 2698 if (engine->flags & ENG_SERVER) 2699 drop_all_mini_conns(engine); 2700 for (el = lsquic_hash_first(engine->conns_hash); el; 2701 el = lsquic_hash_next(engine->conns_hash)) 2702 { 2703 conn = lsquic_hashelem_getdata(el); 2704 lsquic_conn_going_away(conn); 2705 } 2706} 2707 2708 2709int 2710lsquic_engine_earliest_adv_tick (lsquic_engine_t *engine, int *diff) 2711{ 2712 const struct attq_elem *next_attq; 2713 lsquic_time_t now, next_time; 2714#if LSQUIC_DEBUG_NEXT_ADV_TICK 2715 const struct lsquic_conn *conn; 2716 const enum lsq_log_level L = LSQ_LOG_DEBUG; /* Easy toggle */ 2717#endif 2718 2719 ENGINE_CALLS_INCR(engine); 2720 2721 if ((engine->flags & ENG_PAST_DEADLINE) 2722 && lsquic_mh_count(&engine->conns_out)) 2723 { 2724#if LSQUIC_DEBUG_NEXT_ADV_TICK 2725 conn = lsquic_mh_peek(&engine->conns_out); 2726 engine->last_logged_conn = 0; 2727 LSQ_LOGC(L, "next advisory tick is now: went past deadline last time " 2728 "and have %u outgoing connection%.*s (%"CID_FMT" first)", 2729 lsquic_mh_count(&engine->conns_out), 2730 lsquic_mh_count(&engine->conns_out) != 1, "s", 2731 CID_BITS(lsquic_conn_log_cid(conn))); 2732#endif 2733 *diff = 0; 2734 return 1; 2735 } 2736 2737 if (engine->pr_queue && prq_have_pending(engine->pr_queue)) 2738 { 2739#if LSQUIC_DEBUG_NEXT_ADV_TICK 2740 engine->last_logged_conn = 0; 2741 LSQ_LOG(L, "next advisory tick is now: have pending PRQ elements"); 2742#endif 2743 *diff = 0; 2744 return 1; 2745 } 2746 2747 if (lsquic_mh_count(&engine->conns_tickable)) 2748 { 2749#if LSQUIC_DEBUG_NEXT_ADV_TICK 2750 conn = lsquic_mh_peek(&engine->conns_tickable); 2751 engine->last_logged_conn = 0; 2752 LSQ_LOGC(L, "next advisory tick is now: have %u tickable " 2753 "connection%.*s (%"CID_FMT" first)", 2754 lsquic_mh_count(&engine->conns_tickable), 2755 lsquic_mh_count(&engine->conns_tickable) != 1, "s", 2756 CID_BITS(lsquic_conn_log_cid(conn))); 2757#endif 2758 *diff = 0; 2759 return 1; 2760 } 2761 2762 next_attq = attq_next(engine->attq); 2763 if (engine->pub.enp_flags & ENPUB_CAN_SEND) 2764 { 2765 if (next_attq) 2766 next_time = next_attq->ae_adv_time; 2767 else 2768 return 0; 2769 } 2770 else 2771 { 2772 if (next_attq) 2773 { 2774 next_time = next_attq->ae_adv_time; 2775 if (engine->resume_sending_at < next_time) 2776 { 2777 next_time = engine->resume_sending_at; 2778 next_attq = NULL; 2779 } 2780 } 2781 else 2782 next_time = engine->resume_sending_at; 2783 } 2784 2785 now = lsquic_time_now(); 2786 *diff = (int) ((int64_t) next_time - (int64_t) now); 2787#if LSQUIC_DEBUG_NEXT_ADV_TICK 2788 if (next_attq) 2789 { 2790 /* Deduplicate consecutive log messages about the same reason for the 2791 * same connection. 2792 * If diff is always zero or diff reset to a higher value, event is 2793 * still logged. 2794 */ 2795 if (!((unsigned) next_attq->ae_why == engine->last_logged_ae_why 2796 && (uintptr_t) next_attq->ae_conn 2797 == engine->last_logged_conn 2798 && *diff < engine->last_tick_diff)) 2799 { 2800 engine->last_logged_conn = (uintptr_t) next_attq->ae_conn; 2801 engine->last_logged_ae_why = (unsigned) next_attq->ae_why; 2802 engine->last_tick_diff = *diff; 2803 LSQ_LOGC(L, "next advisory tick is %d usec away: conn %"CID_FMT 2804 ": %s", *diff, CID_BITS(lsquic_conn_log_cid(next_attq->ae_conn)), 2805 lsquic_attq_why2str(next_attq->ae_why)); 2806 } 2807 } 2808 else 2809 LSQ_LOG(L, "next advisory tick is %d usec away: resume sending", *diff); 2810#endif 2811 return 1; 2812} 2813 2814 2815unsigned 2816lsquic_engine_count_attq (lsquic_engine_t *engine, int from_now) 2817{ 2818 lsquic_time_t now; 2819 ENGINE_CALLS_INCR(engine); 2820 now = lsquic_time_now(); 2821 if (from_now < 0) 2822 now -= from_now; 2823 else 2824 now += from_now; 2825 return attq_count_before(engine->attq, now); 2826} 2827 2828 2829int 2830lsquic_engine_add_cid (struct lsquic_engine_public *enpub, 2831 struct lsquic_conn *conn, unsigned cce_idx) 2832{ 2833 struct lsquic_engine *const engine = (struct lsquic_engine *) enpub; 2834 struct conn_cid_elem *const cce = &conn->cn_cces[cce_idx]; 2835 void *peer_ctx; 2836 2837 assert(cce_idx < conn->cn_n_cces); 2838 assert(conn->cn_cces_mask & (1 << cce_idx)); 2839 assert(!(cce->cce_hash_el.qhe_flags & QHE_HASHED)); 2840 2841 if (lsquic_hash_insert(engine->conns_hash, cce->cce_cid.idbuf, 2842 cce->cce_cid.len, conn, &cce->cce_hash_el)) 2843 { 2844 LSQ_DEBUGC("add %"CID_FMT" to the list of SCIDs", 2845 CID_BITS(&cce->cce_cid)); 2846 peer_ctx = lsquic_conn_get_peer_ctx(conn, NULL); 2847 cce->cce_flags |= CCE_REG; 2848 cub_add(&engine->new_scids, &cce->cce_cid, peer_ctx); 2849 return 0; 2850 } 2851 else 2852 { 2853 LSQ_WARNC("could not add new cid %"CID_FMT" to the SCID hash", 2854 CID_BITS(&cce->cce_cid)); 2855 return -1; 2856 } 2857} 2858 2859 2860void 2861lsquic_engine_retire_cid (struct lsquic_engine_public *enpub, 2862 struct lsquic_conn *conn, unsigned cce_idx, lsquic_time_t now) 2863{ 2864 struct lsquic_engine *const engine = (struct lsquic_engine *) enpub; 2865 struct conn_cid_elem *const cce = &conn->cn_cces[cce_idx]; 2866 void *peer_ctx; 2867 2868 assert(cce_idx < conn->cn_n_cces); 2869 2870 if (cce->cce_hash_el.qhe_flags & QHE_HASHED) 2871 lsquic_hash_erase(engine->conns_hash, &cce->cce_hash_el); 2872 2873 if (engine->purga) 2874 { 2875 peer_ctx = lsquic_conn_get_peer_ctx(conn, NULL); 2876 lsquic_purga_add(engine->purga, &cce->cce_cid, peer_ctx, 2877 PUTY_CID_RETIRED, now); 2878 } 2879 conn->cn_cces_mask &= ~(1u << cce_idx); 2880 LSQ_DEBUGC("retire CID %"CID_FMT, CID_BITS(&cce->cce_cid)); 2881} 2882 2883 2884