lsquic_engine.c revision 92f6e17b
1/* Copyright (c) 2017 - 2019 LiteSpeed Technologies Inc. See LICENSE. */ 2/* 3 * lsquic_engine.c - QUIC engine 4 */ 5 6#include <assert.h> 7#include <errno.h> 8#include <inttypes.h> 9#include <limits.h> 10#include <stdint.h> 11#include <stdio.h> 12#include <stdlib.h> 13#include <string.h> 14#include <sys/queue.h> 15#include <time.h> 16#ifndef WIN32 17#include <sys/time.h> 18#include <netinet/in.h> 19#include <sys/types.h> 20#include <sys/stat.h> 21#include <fcntl.h> 22#include <unistd.h> 23#include <netdb.h> 24#endif 25 26#ifndef NDEBUG 27#include <sys/types.h> 28#include <regex.h> /* For code that loses packets */ 29#endif 30 31#if LOG_PACKET_CHECKSUM 32#include <zlib.h> 33#endif 34 35#include <openssl/aead.h> 36 37#include "lsquic.h" 38#include "lsquic_types.h" 39#include "lsquic_int_types.h" 40#include "lsquic_sizes.h" 41#include "lsquic_parse_common.h" 42#include "lsquic_parse.h" 43#include "lsquic_packet_in.h" 44#include "lsquic_packet_out.h" 45#include "lsquic_senhist.h" 46#include "lsquic_rtt.h" 47#include "lsquic_cubic.h" 48#include "lsquic_pacer.h" 49#include "lsquic_bw_sampler.h" 50#include "lsquic_minmax.h" 51#include "lsquic_bbr.h" 52#include "lsquic_send_ctl.h" 53#include "lsquic_set.h" 54#include "lsquic_conn_flow.h" 55#include "lsquic_sfcw.h" 56#include "lsquic_hash.h" 57#include "lsquic_conn.h" 58#include "lsquic_full_conn.h" 59#include "lsquic_util.h" 60#include "lsquic_qtags.h" 61#include "lsquic_enc_sess.h" 62#include "lsquic_mm.h" 63#include "lsquic_engine_public.h" 64#include "lsquic_eng_hist.h" 65#include "lsquic_ev_log.h" 66#include "lsquic_version.h" 67#include "lsquic_pr_queue.h" 68#include "lsquic_mini_conn.h" 69#include "lsquic_mini_conn_ietf.h" 70#include "lsquic_stock_shi.h" 71#include "lsquic_purga.h" 72#include "lsquic_tokgen.h" 73#include "lsquic_attq.h" 74#include "lsquic_min_heap.h" 75#include "lsquic_http1x_if.h" 76#include "lsquic_parse_common.h" 77#include "lsquic_handshake.h" 78 79#define LSQUIC_LOGGER_MODULE LSQLM_ENGINE 80#include "lsquic_logger.h" 81 82#define MIN(a, b) ((a) < (b) ? (a) : (b)) 83 84/* The batch of outgoing packets grows and shrinks dynamically */ 85#define MAX_OUT_BATCH_SIZE 1024 86#define MIN_OUT_BATCH_SIZE 4 87#define INITIAL_OUT_BATCH_SIZE 32 88 89struct out_batch 90{ 91 lsquic_conn_t *conns [MAX_OUT_BATCH_SIZE]; 92 struct lsquic_out_spec outs [MAX_OUT_BATCH_SIZE]; 93 unsigned pack_off[MAX_OUT_BATCH_SIZE]; 94 lsquic_packet_out_t *packets[MAX_OUT_BATCH_SIZE * 2]; 95 struct iovec iov [MAX_OUT_BATCH_SIZE * 2]; 96}; 97 98typedef struct lsquic_conn * (*conn_iter_f)(struct lsquic_engine *); 99 100static void 101process_connections (struct lsquic_engine *engine, conn_iter_f iter, 102 lsquic_time_t now); 103 104static void 105engine_incref_conn (lsquic_conn_t *conn, enum lsquic_conn_flags flag); 106 107static lsquic_conn_t * 108engine_decref_conn (lsquic_engine_t *engine, lsquic_conn_t *conn, 109 enum lsquic_conn_flags flag); 110 111static void 112force_close_conn (lsquic_engine_t *engine, lsquic_conn_t *conn); 113 114#if LSQUIC_COUNT_ENGINE_CALLS 115#define ENGINE_CALLS_INCR(e) do { ++(e)->n_engine_calls; } while (0) 116#else 117#define ENGINE_CALLS_INCR(e) 118#endif 119 120/* Nested calls to LSQUIC are not supported */ 121#define ENGINE_IN(e) do { \ 122 assert(!((e)->pub.enp_flags & ENPUB_PROC)); \ 123 (e)->pub.enp_flags |= ENPUB_PROC; \ 124 ENGINE_CALLS_INCR(e); \ 125} while (0) 126 127#define ENGINE_OUT(e) do { \ 128 assert((e)->pub.enp_flags & ENPUB_PROC); \ 129 (e)->pub.enp_flags &= ~ENPUB_PROC; \ 130} while (0) 131 132/* A connection can be referenced from one of six places: 133 * 134 * 1. A hash. The engine maintains two hash tables -- one for full, and 135 * one for mini connections. A connection starts its life in one of 136 * those. 137 * 138 * 2. Outgoing queue. 139 * 140 * 3. Tickable queue 141 * 142 * 4. Advisory Tick Time queue. 143 * 144 * 5. Closing connections queue. This is a transient queue -- it only 145 * exists for the duration of process_connections() function call. 146 * 147 * 6. Ticked connections queue. Another transient queue, similar to (5). 148 * 149 * The idea is to destroy the connection when it is no longer referenced. 150 * For example, a connection tick may return TICK_SEND|TICK_CLOSE. In 151 * that case, the connection is referenced from two places: (2) and (5). 152 * After its packets are sent, it is only referenced in (5), and at the 153 * end of the function call, when it is removed from (5), reference count 154 * goes to zero and the connection is destroyed. If not all packets can 155 * be sent, at the end of the function call, the connection is referenced 156 * by (2) and will only be removed once all outgoing packets have been 157 * sent. 158 */ 159#define CONN_REF_FLAGS (LSCONN_HASHED \ 160 |LSCONN_HAS_OUTGOING \ 161 |LSCONN_TICKABLE \ 162 |LSCONN_TICKED \ 163 |LSCONN_CLOSING \ 164 |LSCONN_ATTQ) 165 166 167 168 169struct cid_update_batch 170{ 171 lsquic_cids_update_f cub_update_cids; 172 void *cub_update_ctx; 173 unsigned cub_count; 174 lsquic_cid_t cub_cids[20]; 175 void *cub_peer_ctxs[20]; 176}; 177 178static void 179cub_init (struct cid_update_batch *, lsquic_cids_update_f, void *); 180 181 182struct lsquic_engine 183{ 184 struct lsquic_engine_public pub; 185 enum { 186 ENG_SERVER = LSENG_SERVER, 187 ENG_HTTP = LSENG_HTTP, 188 ENG_COOLDOWN = (1 << 7), /* Cooldown: no new connections */ 189 ENG_PAST_DEADLINE 190 = (1 << 8), /* Previous call to a processing 191 * function went past time threshold. 192 */ 193 ENG_CONNS_BY_ADDR 194 = (1 << 9), /* Connections are hashed by address */ 195#ifndef NDEBUG 196 ENG_COALESCE = (1 << 24), /* Packet coalescing is enabled */ 197 ENG_LOSE_PACKETS= (1 << 25), /* Lose *some* outgoing packets */ 198 ENG_DTOR = (1 << 26), /* Engine destructor */ 199#endif 200 } flags; 201 lsquic_packets_out_f packets_out; 202 void *packets_out_ctx; 203 lsquic_cids_update_f report_new_scids; 204 lsquic_cids_update_f report_live_scids; 205 lsquic_cids_update_f report_old_scids; 206 void *scids_ctx; 207 struct lsquic_hash *conns_hash; 208 struct min_heap conns_tickable; 209 struct min_heap conns_out; 210 /* Use a union because only one iterator is being used at any one time */ 211 union { 212 struct { 213 struct cert_susp_head *head; 214 } resumed; 215 struct lsquic_conn *one_conn; 216 } iter_state; 217 struct eng_hist history; 218 unsigned batch_size; 219 struct pr_queue *pr_queue; 220 struct attq *attq; 221 /* Track time last time a packet was sent to give new connections 222 * priority lower than that of existing connections. 223 */ 224 lsquic_time_t last_sent; 225#ifndef NDEBUG 226 regex_t lose_packets_re; 227 const char *lose_packets_str; 228#endif 229 unsigned n_conns; 230 lsquic_time_t deadline; 231 lsquic_time_t resume_sending_at; 232 unsigned mini_conns_count; 233 struct lsquic_purga *purga; 234#if LSQUIC_CONN_STATS 235 struct { 236 unsigned conns; 237 } stats; 238 struct conn_stats conn_stats_sum; 239 FILE *stats_fh; 240#endif 241 struct cid_update_batch new_scids; 242 struct out_batch out_batch; 243#if LSQUIC_COUNT_ENGINE_CALLS 244 unsigned long n_engine_calls; 245#endif 246}; 247 248 249void 250lsquic_engine_init_settings (struct lsquic_engine_settings *settings, 251 unsigned flags) 252{ 253 memset(settings, 0, sizeof(*settings)); 254 settings->es_versions = LSQUIC_DF_VERSIONS; 255 if (flags & ENG_SERVER) 256 { 257 settings->es_cfcw = LSQUIC_DF_CFCW_SERVER; 258 settings->es_sfcw = LSQUIC_DF_SFCW_SERVER; 259 settings->es_support_srej= LSQUIC_DF_SUPPORT_SREJ_SERVER; 260 settings->es_init_max_data 261 = LSQUIC_DF_INIT_MAX_DATA_SERVER; 262 settings->es_init_max_stream_data_bidi_remote 263 = LSQUIC_DF_INIT_MAX_STREAM_DATA_BIDI_REMOTE_SERVER; 264 settings->es_init_max_stream_data_bidi_local 265 = LSQUIC_DF_INIT_MAX_STREAM_DATA_BIDI_LOCAL_SERVER; 266 settings->es_init_max_stream_data_uni 267 = LSQUIC_DF_INIT_MAX_STREAM_DATA_UNI_SERVER; 268 settings->es_init_max_streams_uni 269 = LSQUIC_DF_INIT_MAX_STREAMS_UNI_SERVER; 270 settings->es_ping_period = 0; 271 } 272 else 273 { 274 settings->es_cfcw = LSQUIC_DF_CFCW_CLIENT; 275 settings->es_sfcw = LSQUIC_DF_SFCW_CLIENT; 276 settings->es_support_srej= LSQUIC_DF_SUPPORT_SREJ_CLIENT; 277 settings->es_init_max_data 278 = LSQUIC_DF_INIT_MAX_DATA_CLIENT; 279 settings->es_init_max_stream_data_bidi_remote 280 = LSQUIC_DF_INIT_MAX_STREAM_DATA_BIDI_REMOTE_CLIENT; 281 settings->es_init_max_stream_data_bidi_local 282 = LSQUIC_DF_INIT_MAX_STREAM_DATA_BIDI_LOCAL_CLIENT; 283 settings->es_init_max_stream_data_uni 284 = LSQUIC_DF_INIT_MAX_STREAM_DATA_UNI_CLIENT; 285 settings->es_init_max_streams_uni 286 = LSQUIC_DF_INIT_MAX_STREAMS_UNI_CLIENT; 287 settings->es_ping_period = LSQUIC_DF_PING_PERIOD; 288 } 289 settings->es_max_streams_in = LSQUIC_DF_MAX_STREAMS_IN; 290 settings->es_idle_conn_to = LSQUIC_DF_IDLE_CONN_TO; 291 settings->es_idle_timeout = LSQUIC_DF_IDLE_TIMEOUT; 292 settings->es_handshake_to = LSQUIC_DF_HANDSHAKE_TO; 293 settings->es_silent_close = LSQUIC_DF_SILENT_CLOSE; 294 settings->es_max_header_list_size 295 = LSQUIC_DF_MAX_HEADER_LIST_SIZE; 296 settings->es_ua = LSQUIC_DF_UA; 297 settings->es_ecn = LSQUIC_DF_ECN; 298 299 settings->es_pdmd = QTAG_X509; 300 settings->es_aead = QTAG_AESG; 301 settings->es_kexs = QTAG_C255; 302 settings->es_support_push = LSQUIC_DF_SUPPORT_PUSH; 303 settings->es_support_tcid0 = LSQUIC_DF_SUPPORT_TCID0; 304 settings->es_support_nstp = LSQUIC_DF_SUPPORT_NSTP; 305 settings->es_honor_prst = LSQUIC_DF_HONOR_PRST; 306 settings->es_progress_check = LSQUIC_DF_PROGRESS_CHECK; 307 settings->es_rw_once = LSQUIC_DF_RW_ONCE; 308 settings->es_proc_time_thresh= LSQUIC_DF_PROC_TIME_THRESH; 309 settings->es_pace_packets = LSQUIC_DF_PACE_PACKETS; 310 settings->es_clock_granularity = LSQUIC_DF_CLOCK_GRANULARITY; 311 settings->es_max_inchoate = LSQUIC_DF_MAX_INCHOATE; 312 settings->es_send_prst = LSQUIC_DF_SEND_PRST; 313 settings->es_sttl = LSQUIC_DF_STTL; 314 settings->es_init_max_streams_bidi 315 = LSQUIC_DF_INIT_MAX_STREAMS_BIDI; 316 settings->es_scid_len = LSQUIC_DF_SCID_LEN; 317 settings->es_scid_iss_rate = LSQUIC_DF_SCID_ISS_RATE; 318 settings->es_qpack_dec_max_size = LSQUIC_DF_QPACK_DEC_MAX_SIZE; 319 settings->es_qpack_dec_max_blocked = LSQUIC_DF_QPACK_DEC_MAX_BLOCKED; 320 settings->es_qpack_enc_max_size = LSQUIC_DF_QPACK_ENC_MAX_SIZE; 321 settings->es_qpack_enc_max_blocked = LSQUIC_DF_QPACK_ENC_MAX_BLOCKED; 322 settings->es_allow_migration = LSQUIC_DF_ALLOW_MIGRATION; 323} 324 325 326/* Note: if returning an error, err_buf must be valid if non-NULL */ 327int 328lsquic_engine_check_settings (const struct lsquic_engine_settings *settings, 329 unsigned flags, 330 char *err_buf, size_t err_buf_sz) 331{ 332 if (settings->es_cfcw < LSQUIC_MIN_FCW || 333 settings->es_sfcw < LSQUIC_MIN_FCW) 334 { 335 if (err_buf) 336 snprintf(err_buf, err_buf_sz, "%s", 337 "flow control window set too low"); 338 return -1; 339 } 340 if (0 == (settings->es_versions & LSQUIC_SUPPORTED_VERSIONS)) 341 { 342 if (err_buf) 343 snprintf(err_buf, err_buf_sz, "%s", 344 "No supported QUIC versions specified"); 345 return -1; 346 } 347 if (settings->es_versions & ~LSQUIC_SUPPORTED_VERSIONS) 348 { 349 if (err_buf) 350 snprintf(err_buf, err_buf_sz, "%s", 351 "one or more unsupported QUIC version is specified"); 352 return -1; 353 } 354 if (flags & ENG_SERVER) 355 { 356 if (settings->es_handshake_to > 357 MAX_MINI_CONN_LIFESPAN_IN_USEC) 358 { 359 if (err_buf) 360 snprintf(err_buf, err_buf_sz, "handshake timeout %lu" 361 " usec is too large. The maximum for server is %u usec", 362 settings->es_handshake_to, MAX_MINI_CONN_LIFESPAN_IN_USEC); 363 return -1; 364 } 365 } 366 if (settings->es_idle_timeout > 600) 367 { 368 if (err_buf) 369 snprintf(err_buf, err_buf_sz, "%s", 370 "The maximum value of idle timeout is 600 seconds"); 371 return -1; 372 } 373 if (!(!(flags & ENG_SERVER) && settings->es_scid_len == 0) 374 && (settings->es_scid_len < 4 || settings->es_scid_len > 18)) 375 { 376 if (err_buf) 377 snprintf(err_buf, err_buf_sz, "Source connection ID cannot be %u " 378 "bytes long; it must be between 4 and 18.", 379 settings->es_scid_len); 380 return -1; 381 } 382 383 if (settings->es_cc_algo > 2) 384 { 385 if (err_buf) 386 snprintf(err_buf, err_buf_sz, "Invalid congestion control " 387 "algorithm value %u", settings->es_cc_algo); 388 return -1; 389 } 390 return 0; 391} 392 393 394static void 395free_packet (void *ctx, void *conn_ctx, void *packet_data, char is_ipv6) 396{ 397 free(packet_data); 398} 399 400 401static void * 402malloc_buf (void *ctx, void *conn_ctx, unsigned short size, char is_ipv6) 403{ 404 return malloc(size); 405} 406 407 408static const struct lsquic_packout_mem_if stock_pmi = 409{ 410 malloc_buf, free_packet, free_packet, 411}; 412 413 414static int 415hash_conns_by_addr (const struct lsquic_engine *engine) 416{ 417 if (engine->flags & ENG_SERVER) 418 return 0; 419 if (engine->pub.enp_settings.es_versions & LSQUIC_FORCED_TCID0_VERSIONS) 420 return 1; 421 if ((engine->pub.enp_settings.es_versions & LSQUIC_GQUIC_HEADER_VERSIONS) 422 && engine->pub.enp_settings.es_support_tcid0) 423 return 1; 424 if (engine->pub.enp_settings.es_scid_len == 0) 425 return 1; 426 return 0; 427} 428 429 430lsquic_engine_t * 431lsquic_engine_new (unsigned flags, 432 const struct lsquic_engine_api *api) 433{ 434 lsquic_engine_t *engine; 435 char err_buf[100]; 436 437 if (!api->ea_packets_out) 438 { 439 LSQ_ERROR("packets_out callback is not specified"); 440 return NULL; 441 } 442 443 if (api->ea_settings && 444 0 != lsquic_engine_check_settings(api->ea_settings, flags, 445 err_buf, sizeof(err_buf))) 446 { 447 LSQ_ERROR("cannot create engine: %s", err_buf); 448 return NULL; 449 } 450 451 engine = calloc(1, sizeof(*engine)); 452 if (!engine) 453 return NULL; 454 if (0 != lsquic_mm_init(&engine->pub.enp_mm)) 455 { 456 free(engine); 457 return NULL; 458 } 459 if (api->ea_settings) 460 engine->pub.enp_settings = *api->ea_settings; 461 else 462 lsquic_engine_init_settings(&engine->pub.enp_settings, flags); 463 int tag_buf_len; 464 tag_buf_len = lsquic_gen_ver_tags(engine->pub.enp_ver_tags_buf, 465 sizeof(engine->pub.enp_ver_tags_buf), 466 engine->pub.enp_settings.es_versions); 467 if (tag_buf_len <= 0) 468 { 469 LSQ_ERROR("cannot generate version tags buffer"); 470 free(engine); 471 return NULL; 472 } 473 engine->pub.enp_ver_tags_len = tag_buf_len; 474 engine->pub.enp_flags = ENPUB_CAN_SEND; 475 engine->pub.enp_stream_if = api->ea_stream_if; 476 engine->pub.enp_stream_if_ctx = api->ea_stream_if_ctx; 477 478 engine->flags = flags; 479#ifndef NDEBUG 480 engine->flags |= ENG_COALESCE; 481#endif 482 engine->packets_out = api->ea_packets_out; 483 engine->packets_out_ctx = api->ea_packets_out_ctx; 484 engine->report_new_scids = api->ea_new_scids; 485 engine->report_live_scids = api->ea_live_scids; 486 engine->report_old_scids = api->ea_old_scids; 487 engine->scids_ctx = api->ea_cids_update_ctx; 488 cub_init(&engine->new_scids, engine->report_new_scids, engine->scids_ctx); 489 engine->pub.enp_lookup_cert = api->ea_lookup_cert; 490 engine->pub.enp_cert_lu_ctx = api->ea_cert_lu_ctx; 491 engine->pub.enp_get_ssl_ctx = api->ea_get_ssl_ctx; 492 if (api->ea_shi) 493 { 494 engine->pub.enp_shi = api->ea_shi; 495 engine->pub.enp_shi_ctx = api->ea_shi_ctx; 496 } 497 else 498 { 499 engine->pub.enp_shi = &stock_shi; 500 engine->pub.enp_shi_ctx = stock_shared_hash_new(); 501 if (!engine->pub.enp_shi_ctx) 502 { 503 free(engine); 504 return NULL; 505 } 506 } 507 if (api->ea_hsi_if) 508 { 509 engine->pub.enp_hsi_if = api->ea_hsi_if; 510 engine->pub.enp_hsi_ctx = api->ea_hsi_ctx; 511 } 512 else 513 { 514 engine->pub.enp_hsi_if = lsquic_http1x_if; 515 engine->pub.enp_hsi_ctx = NULL; 516 } 517 if (api->ea_pmi) 518 { 519 engine->pub.enp_pmi = api->ea_pmi; 520 engine->pub.enp_pmi_ctx = api->ea_pmi_ctx; 521 } 522 else 523 { 524 engine->pub.enp_pmi = &stock_pmi; 525 engine->pub.enp_pmi_ctx = NULL; 526 } 527 engine->pub.enp_verify_cert = api->ea_verify_cert; 528 engine->pub.enp_verify_ctx = api->ea_verify_ctx; 529 engine->pub.enp_kli = api->ea_keylog_if; 530 engine->pub.enp_kli_ctx = api->ea_keylog_ctx; 531 engine->pub.enp_engine = engine; 532 if (hash_conns_by_addr(engine)) 533 engine->flags |= ENG_CONNS_BY_ADDR; 534 engine->conns_hash = lsquic_hash_create(); 535 if (flags & ENG_SERVER) 536 { 537 engine->pub.enp_tokgen = lsquic_tg_new(&engine->pub); 538 if (!engine->pub.enp_tokgen) 539 return NULL; 540 engine->pr_queue = prq_create( 541 10000 /* TODO: make configurable */, MAX_OUT_BATCH_SIZE, 542 &engine->pub); 543 if (!engine->pr_queue) 544 { 545 lsquic_tg_destroy(engine->pub.enp_tokgen); 546 return NULL; 547 } 548 engine->purga = lsquic_purga_new(30 * 1000 * 1000, 549 engine->report_old_scids, engine->scids_ctx); 550 if (!engine->purga) 551 { 552 lsquic_tg_destroy(engine->pub.enp_tokgen); 553 prq_destroy(engine->pr_queue); 554 return NULL; 555 } 556 } 557 engine->attq = attq_create(); 558 eng_hist_init(&engine->history); 559 engine->batch_size = INITIAL_OUT_BATCH_SIZE; 560 if (engine->pub.enp_settings.es_honor_prst) 561 { 562 engine->pub.enp_srst_hash = lsquic_hash_create(); 563 if (!engine->pub.enp_srst_hash) 564 { 565 lsquic_engine_destroy(engine); 566 return NULL; 567 } 568 } 569 570#ifndef NDEBUG 571 { 572 const char *env; 573 env = getenv("LSQUIC_LOSE_PACKETS_RE"); 574 if (env) 575 { 576 if (0 != regcomp(&engine->lose_packets_re, env, 577 REG_EXTENDED|REG_NOSUB)) 578 { 579 LSQ_ERROR("could not compile lost packet regex `%s'", env); 580 return NULL; 581 } 582 engine->flags |= ENG_LOSE_PACKETS; 583 engine->lose_packets_str = env; 584 LSQ_WARN("will lose packets that match the following regex: %s", 585 env); 586 } 587 env = getenv("LSQUIC_COALESCE"); 588 if (env) 589 { 590 engine->flags &= ~ENG_COALESCE; 591 if (atoi(env)) 592 { 593 engine->flags |= ENG_COALESCE; 594 LSQ_NOTICE("will coalesce packets"); 595 } 596 else 597 LSQ_NOTICE("will not coalesce packets"); 598 } 599 } 600#endif 601#if LSQUIC_CONN_STATS 602 engine->stats_fh = api->ea_stats_fh; 603#endif 604 605 LSQ_INFO("instantiated engine"); 606 return engine; 607} 608 609 610#if LOG_PACKET_CHECKSUM 611static void 612log_packet_checksum (const lsquic_cid_t *cid, const char *direction, 613 const unsigned char *buf, size_t bufsz) 614{ 615 EV_LOG_CONN_EVENT(cid, "packet %s checksum: %08X", direction, 616 (uint32_t) crc32(0, buf, bufsz)); 617} 618 619 620#endif 621 622 623static void 624grow_batch_size (struct lsquic_engine *engine) 625{ 626 engine->batch_size <<= engine->batch_size < MAX_OUT_BATCH_SIZE; 627} 628 629 630static void 631shrink_batch_size (struct lsquic_engine *engine) 632{ 633 engine->batch_size >>= engine->batch_size > MIN_OUT_BATCH_SIZE; 634} 635 636 637struct cce_cid_iter 638{ 639 const struct lsquic_conn *conn; 640 unsigned todo, n; 641}; 642 643 644static struct conn_cid_elem * 645cce_iter_next (struct cce_cid_iter *citer) 646{ 647 struct conn_cid_elem *cce; 648 649 while (citer->todo) 650 if (citer->todo & (1 << citer->n)) 651 { 652 citer->todo &= ~(1 << citer->n); 653 cce = &citer->conn->cn_cces[ citer->n++ ]; 654 if (!(cce->cce_flags & CCE_PORT)) 655 return cce; 656 } 657 else 658 ++citer->n; 659 660 return NULL; 661} 662 663 664static struct conn_cid_elem * 665cce_iter_first (struct cce_cid_iter *citer, const struct lsquic_conn *conn) 666{ 667 citer->conn = conn; 668 citer->todo = conn->cn_cces_mask; 669 citer->n = 0; 670 return cce_iter_next(citer); 671} 672 673 674#if LSQUIC_CONN_STATS 675void 676update_stats_sum (struct lsquic_engine *engine, struct lsquic_conn *conn) 677{ 678 unsigned long *const dst = (unsigned long *) &engine->conn_stats_sum; 679 const unsigned long *src; 680 const struct conn_stats *stats; 681 unsigned i; 682 683 if (conn->cn_if->ci_get_stats && (stats = conn->cn_if->ci_get_stats(conn))) 684 { 685 ++engine->stats.conns; 686 src = (unsigned long *) stats; 687 for (i = 0; i < sizeof(*stats) / sizeof(unsigned long); ++i) 688 dst[i] += src[i]; 689 } 690} 691 692 693#endif 694 695 696/* Wrapper to make sure important things occur before the connection is 697 * really destroyed. 698 */ 699static void 700destroy_conn (struct lsquic_engine *engine, struct lsquic_conn *conn, 701 lsquic_time_t now) 702{ 703 struct cce_cid_iter citer; 704 const struct conn_cid_elem *cce; 705 lsquic_time_t drain_time; 706 struct purga_el *puel; 707 708 engine->mini_conns_count -= !!(conn->cn_flags & LSCONN_MINI); 709 if (engine->purga 710 /* Blacklist all CIDs except for promoted mini connections */ 711 && (conn->cn_flags & (LSCONN_MINI|LSCONN_PROMOTED)) 712 != (LSCONN_MINI|LSCONN_PROMOTED)) 713 { 714 if (conn->cn_if->ci_drain_time && 715 (drain_time = conn->cn_if->ci_drain_time(conn), drain_time)) 716 { 717 for (cce = cce_iter_first(&citer, conn); cce; 718 cce = cce_iter_next(&citer)) 719 { 720 puel = lsquic_purga_add(engine->purga, &cce->cce_cid, 721 lsquic_conn_get_peer_ctx(conn, NULL), 722 PUTY_CONN_DRAIN, now); 723 if (puel) 724 puel->puel_time = now + drain_time; 725 } 726 } 727 else 728 { 729 for (cce = cce_iter_first(&citer, conn); cce; 730 cce = cce_iter_next(&citer)) 731 { 732 puel = lsquic_purga_add(engine->purga, &cce->cce_cid, 733 lsquic_conn_get_peer_ctx(conn, NULL), 734 PUTY_CONN_DELETED, now); 735 if (puel) 736 { 737 puel->puel_time = now; 738 puel->puel_count = 0; 739 } 740 } 741 } 742 } 743#if LSQUIC_CONN_STATS 744 update_stats_sum(engine, conn); 745#endif 746 --engine->n_conns; 747 conn->cn_flags |= LSCONN_NEVER_TICKABLE; 748 conn->cn_if->ci_destroy(conn); 749} 750 751 752static int 753maybe_grow_conn_heaps (struct lsquic_engine *engine) 754{ 755 struct min_heap_elem *els; 756 unsigned count; 757 758 if (engine->n_conns < lsquic_mh_nalloc(&engine->conns_tickable)) 759 return 0; /* Nothing to do */ 760 761 if (lsquic_mh_nalloc(&engine->conns_tickable)) 762 count = lsquic_mh_nalloc(&engine->conns_tickable) * 2 * 2; 763 else 764 count = 8; 765 766 els = malloc(sizeof(els[0]) * count); 767 if (!els) 768 { 769 LSQ_ERROR("%s: malloc failed", __func__); 770 return -1; 771 } 772 773 LSQ_DEBUG("grew heaps to %u elements", count / 2); 774 memcpy(&els[0], engine->conns_tickable.mh_elems, 775 sizeof(els[0]) * lsquic_mh_count(&engine->conns_tickable)); 776 memcpy(&els[count / 2], engine->conns_out.mh_elems, 777 sizeof(els[0]) * lsquic_mh_count(&engine->conns_out)); 778 free(engine->conns_tickable.mh_elems); 779 engine->conns_tickable.mh_elems = els; 780 engine->conns_out.mh_elems = &els[count / 2]; 781 engine->conns_tickable.mh_nalloc = count / 2; 782 engine->conns_out.mh_nalloc = count / 2; 783 return 0; 784} 785 786 787static void 788remove_cces_from_hash (struct lsquic_hash *hash, struct lsquic_conn *conn, 789 unsigned todo) 790{ 791 unsigned n; 792 793 for (n = 0; todo; todo &= ~(1 << n++)) 794 if ((todo & (1 << n)) && 795 (conn->cn_cces[n].cce_hash_el.qhe_flags & QHE_HASHED)) 796 lsquic_hash_erase(hash, &conn->cn_cces[n].cce_hash_el); 797} 798 799 800static void 801remove_all_cces_from_hash (struct lsquic_hash *hash, struct lsquic_conn *conn) 802{ 803 remove_cces_from_hash(hash, conn, conn->cn_cces_mask); 804} 805 806 807static void 808cub_add (struct cid_update_batch *cub, const lsquic_cid_t *cid, void *peer_ctx); 809 810 811static int 812insert_conn_into_hash (struct lsquic_engine *engine, struct lsquic_conn *conn, 813 void *peer_ctx) 814{ 815 struct conn_cid_elem *cce; 816 unsigned todo, done, n; 817 818 for (todo = conn->cn_cces_mask, done = 0, n = 0; todo; todo &= ~(1 << n++)) 819 if (todo & (1 << n)) 820 { 821 cce = &conn->cn_cces[n]; 822 assert(!(cce->cce_hash_el.qhe_flags & QHE_HASHED)); 823 if (lsquic_hash_insert(engine->conns_hash, cce->cce_cid.idbuf, 824 cce->cce_cid.len, conn, &cce->cce_hash_el)) 825 done |= 1 << n; 826 else 827 goto err; 828 if ((engine->flags & ENG_SERVER) && 0 == (cce->cce_flags & CCE_REG)) 829 { 830 cce->cce_flags |= CCE_REG; 831 cub_add(&engine->new_scids, &cce->cce_cid, peer_ctx); 832 } 833 } 834 835 return 0; 836 837 err: 838 remove_cces_from_hash(engine->conns_hash, conn, done); 839 return -1; 840} 841 842 843static lsquic_conn_t * 844new_full_conn_server (lsquic_engine_t *engine, lsquic_conn_t *mini_conn, 845 lsquic_time_t now) 846{ 847 const lsquic_cid_t *cid; 848 server_conn_ctor_f ctor; 849 lsquic_conn_t *conn; 850 unsigned flags; 851 if (0 != maybe_grow_conn_heaps(engine)) 852 return NULL; 853 flags = engine->flags & (ENG_SERVER|ENG_HTTP); 854 855 if (mini_conn->cn_flags & LSCONN_IETF) 856 ctor = lsquic_ietf_full_conn_server_new; 857 else 858 ctor = lsquic_gquic_full_conn_server_new; 859 860 conn = ctor(&engine->pub, flags, mini_conn); 861 if (!conn) 862 { 863 /* Otherwise, full_conn_server_new prints its own warnings */ 864 if (ENOMEM == errno) 865 { 866 cid = lsquic_conn_log_cid(mini_conn); 867 LSQ_WARNC("could not allocate full connection for %"CID_FMT": %s", 868 CID_BITS(cid), strerror(errno)); 869 } 870 return NULL; 871 } 872 ++engine->n_conns; 873 if (0 != insert_conn_into_hash(engine, conn, lsquic_conn_get_peer_ctx(conn, NULL))) 874 { 875 cid = lsquic_conn_log_cid(conn); 876 LSQ_WARNC("cannot add connection %"CID_FMT" to hash - destroy", 877 CID_BITS(cid)); 878 destroy_conn(engine, conn, now); 879 return NULL; 880 } 881 assert(!(conn->cn_flags & CONN_REF_FLAGS)); 882 conn->cn_flags |= LSCONN_HASHED; 883 return conn; 884} 885 886 887static enum 888{ 889 VER_NOT_SPECIFIED, 890 VER_SUPPORTED, 891 VER_UNSUPPORTED, 892} 893 894 895version_matches (lsquic_engine_t *engine, const lsquic_packet_in_t *packet_in, 896 enum lsquic_version *pversion) 897{ 898 lsquic_ver_tag_t ver_tag; 899 enum lsquic_version version; 900 901 if (!packet_in->pi_quic_ver) 902 { 903 LSQ_DEBUG("packet does not specify version"); 904 return VER_NOT_SPECIFIED; 905 } 906 907 memcpy(&ver_tag, packet_in->pi_data + packet_in->pi_quic_ver, sizeof(ver_tag)); 908 version = lsquic_tag2ver(ver_tag); 909 if (version < N_LSQVER) 910 { 911 if (engine->pub.enp_settings.es_versions & (1 << version)) 912 { 913 LSQ_DEBUG("client-supplied version %s is supported", 914 lsquic_ver2str[version]); 915 *pversion = version; 916 return VER_SUPPORTED; 917 } 918 else 919 LSQ_DEBUG("client-supplied version %s is not supported", 920 lsquic_ver2str[version]); 921 } 922 else 923 LSQ_DEBUG("client-supplied version tag 0x%08X is not recognized", 924 ver_tag); 925 926 return VER_UNSUPPORTED; 927} 928 929 930static void 931schedule_req_packet (struct lsquic_engine *engine, enum packet_req_type type, 932 const struct lsquic_packet_in *packet_in, const struct sockaddr *sa_local, 933 const struct sockaddr *sa_peer, void *peer_ctx) 934{ 935 assert(engine->pr_queue); 936 if (0 == prq_new_req(engine->pr_queue, type, packet_in, peer_ctx, 937 sa_local, sa_peer)) 938 LSQ_DEBUGC("scheduled %s packet for cid %"CID_FMT, 939 lsquic_preqt2str[type], CID_BITS(&packet_in->pi_conn_id)); 940 else 941 LSQ_DEBUG("cannot schedule %s packet", lsquic_preqt2str[type]); 942} 943 944 945static unsigned short 946sa2port (const struct sockaddr *sa) 947{ 948 if (sa->sa_family == AF_INET) 949 { 950 struct sockaddr_in *const sa4 = (void *) sa; 951 return sa4->sin_port; 952 } 953 else 954 { 955 struct sockaddr_in6 *const sa6 = (void *) sa; 956 return sa6->sin6_port; 957 } 958} 959 960 961static struct lsquic_hash_elem * 962find_conn_by_addr (struct lsquic_hash *hash, const struct sockaddr *sa) 963{ 964 unsigned short port; 965 966 port = sa2port(sa); 967 return lsquic_hash_find(hash, &port, sizeof(port)); 968} 969 970 971static lsquic_conn_t * 972find_conn (lsquic_engine_t *engine, lsquic_packet_in_t *packet_in, 973 struct packin_parse_state *ppstate, const struct sockaddr *sa_local) 974{ 975 struct lsquic_hash_elem *el; 976 lsquic_conn_t *conn; 977 978 if (engine->flags & ENG_CONNS_BY_ADDR) 979 el = find_conn_by_addr(engine->conns_hash, sa_local); 980 else if (packet_in->pi_flags & PI_CONN_ID) 981 el = lsquic_hash_find(engine->conns_hash, 982 packet_in->pi_conn_id.idbuf, packet_in->pi_conn_id.len); 983 else 984 { 985 LSQ_DEBUG("packet header does not have connection ID: discarding"); 986 return NULL; 987 } 988 989 if (!el) 990 return NULL; 991 992 conn = lsquic_hashelem_getdata(el); 993 conn->cn_pf->pf_parse_packet_in_finish(packet_in, ppstate); 994 if ((engine->flags & ENG_CONNS_BY_ADDR) 995 && !(conn->cn_flags & LSCONN_IETF) 996 && (packet_in->pi_flags & PI_CONN_ID) 997 && !LSQUIC_CIDS_EQ(CN_SCID(conn), &packet_in->pi_conn_id)) 998 { 999 LSQ_DEBUG("connection IDs do not match"); 1000 return NULL; 1001 } 1002 1003 return conn; 1004} 1005 1006 1007static lsquic_conn_t * 1008find_or_create_conn (lsquic_engine_t *engine, lsquic_packet_in_t *packet_in, 1009 struct packin_parse_state *ppstate, const struct sockaddr *sa_local, 1010 const struct sockaddr *sa_peer, void *peer_ctx) 1011{ 1012 struct lsquic_hash_elem *el; 1013 lsquic_cid_t odcid; 1014 struct purga_el *puel; 1015 lsquic_conn_t *conn; 1016 1017 if (!(packet_in->pi_flags & PI_CONN_ID)) 1018 { 1019 LSQ_DEBUG("packet header does not have connection ID: discarding"); 1020 return NULL; 1021 } 1022 el = lsquic_hash_find(engine->conns_hash, 1023 packet_in->pi_conn_id.idbuf, packet_in->pi_conn_id.len); 1024 1025 if (el) 1026 { 1027 conn = lsquic_hashelem_getdata(el); 1028 conn->cn_pf->pf_parse_packet_in_finish(packet_in, ppstate); 1029 return conn; 1030 } 1031 1032 if (engine->flags & ENG_COOLDOWN) 1033 { /* Do not create incoming connections during cooldown */ 1034 LSQ_DEBUG("dropping inbound packet for unknown connection (cooldown)"); 1035 return NULL; 1036 } 1037 1038 if (engine->mini_conns_count >= engine->pub.enp_settings.es_max_inchoate) 1039 { 1040 LSQ_DEBUG("reached limit of %u inchoate connections", 1041 engine->pub.enp_settings.es_max_inchoate); 1042 return NULL; 1043 } 1044 1045 1046 if (engine->purga 1047 && (puel = lsquic_purga_contains(engine->purga, 1048 &packet_in->pi_conn_id), puel)) 1049 { 1050 switch (puel->puel_type) 1051 { 1052 case PUTY_CID_RETIRED: 1053 LSQ_DEBUGC("CID %"CID_FMT" was retired, ignore packet", 1054 CID_BITS(&packet_in->pi_conn_id)); 1055 return NULL; 1056 case PUTY_CONN_DRAIN: 1057 LSQ_DEBUG("drain till: %"PRIu64"; now: %"PRIu64, 1058 puel->puel_time, packet_in->pi_received); 1059 if (puel->puel_time > packet_in->pi_received) 1060 { 1061 LSQ_DEBUGC("CID %"CID_FMT" is in drain state, ignore packet", 1062 CID_BITS(&packet_in->pi_conn_id)); 1063 return NULL; 1064 } 1065 LSQ_DEBUGC("CID %"CID_FMT" goes from drain state to deleted", 1066 CID_BITS(&packet_in->pi_conn_id)); 1067 puel->puel_type = PUTY_CONN_DELETED; 1068 puel->puel_count = 0; 1069 puel->puel_time = 0; 1070 /* fall-through */ 1071 case PUTY_CONN_DELETED: 1072 LSQ_DEBUGC("Connection with CID %"CID_FMT" was deleted", 1073 CID_BITS(&packet_in->pi_conn_id)); 1074 if (puel->puel_time < packet_in->pi_received) 1075 { 1076 puel->puel_time = packet_in->pi_received 1077 /* Exponential back-off */ 1078 + 1000000ull * (1 << MIN(puel->puel_count, 4)); 1079 ++puel->puel_count; 1080 goto maybe_send_prst; 1081 } 1082 return NULL; 1083 default: 1084 assert(0); 1085 return NULL; 1086 } 1087 } 1088 1089 if (engine->pub.enp_settings.es_send_prst 1090 && !(packet_in->pi_flags & PI_GQUIC) 1091 && HETY_NOT_SET == packet_in->pi_header_type) 1092 goto maybe_send_prst; 1093 1094 if (0 != maybe_grow_conn_heaps(engine)) 1095 return NULL; 1096 1097 const struct parse_funcs *pf; 1098 enum lsquic_version version; 1099 switch (version_matches(engine, packet_in, &version)) 1100 { 1101 case VER_UNSUPPORTED: 1102 if (engine->flags & ENG_SERVER) 1103 schedule_req_packet(engine, PACKET_REQ_VERNEG, packet_in, 1104 sa_local, sa_peer, peer_ctx); 1105 return NULL; 1106 case VER_NOT_SPECIFIED: 1107 maybe_send_prst: 1108 if ((engine->flags & ENG_SERVER) && 1109 engine->pub.enp_settings.es_send_prst) 1110 schedule_req_packet(engine, PACKET_REQ_PUBRES, packet_in, 1111 sa_local, sa_peer, peer_ctx); 1112 return NULL; 1113 case VER_SUPPORTED: 1114 pf = select_pf_by_ver(version); 1115 pf->pf_parse_packet_in_finish(packet_in, ppstate); 1116 break; 1117 } 1118 1119 1120 if ((1 << version) & LSQUIC_IETF_VERSIONS) 1121 { 1122 conn = lsquic_mini_conn_ietf_new(&engine->pub, packet_in, version, 1123 sa_peer->sa_family == AF_INET, odcid.len ? &odcid : NULL); 1124 } 1125 else 1126 { 1127 conn = mini_conn_new(&engine->pub, packet_in, version); 1128 } 1129 if (!conn) 1130 return NULL; 1131 ++engine->mini_conns_count; 1132 ++engine->n_conns; 1133 if (0 != insert_conn_into_hash(engine, conn, peer_ctx)) 1134 { 1135 const lsquic_cid_t *cid = lsquic_conn_log_cid(conn); 1136 LSQ_WARNC("cannot add connection %"CID_FMT" to hash - destroy", 1137 CID_BITS(cid)); 1138 destroy_conn(engine, conn, packet_in->pi_received); 1139 return NULL; 1140 } 1141 assert(!(conn->cn_flags & CONN_REF_FLAGS)); 1142 conn->cn_flags |= LSCONN_HASHED; 1143 eng_hist_inc(&engine->history, packet_in->pi_received, sl_new_mini_conns); 1144 conn->cn_last_sent = engine->last_sent; 1145 return conn; 1146} 1147 1148 1149lsquic_conn_t * 1150lsquic_engine_find_conn (const struct lsquic_engine_public *engine, 1151 const lsquic_cid_t *cid) 1152{ 1153 struct lsquic_hash_elem *el; 1154 lsquic_conn_t *conn = NULL; 1155 el = lsquic_hash_find(engine->enp_engine->conns_hash, cid->idbuf, cid->len); 1156 1157 if (el) 1158 conn = lsquic_hashelem_getdata(el); 1159 return conn; 1160} 1161 1162 1163#if !defined(NDEBUG) && __GNUC__ 1164__attribute__((weak)) 1165#endif 1166void 1167lsquic_engine_add_conn_to_tickable (struct lsquic_engine_public *enpub, 1168 lsquic_conn_t *conn) 1169{ 1170 if (0 == (enpub->enp_flags & ENPUB_PROC) && 1171 0 == (conn->cn_flags & (LSCONN_TICKABLE|LSCONN_NEVER_TICKABLE))) 1172 { 1173 lsquic_engine_t *engine = (lsquic_engine_t *) enpub; 1174 lsquic_mh_insert(&engine->conns_tickable, conn, conn->cn_last_ticked); 1175 engine_incref_conn(conn, LSCONN_TICKABLE); 1176 } 1177} 1178 1179 1180void 1181lsquic_engine_add_conn_to_attq (struct lsquic_engine_public *enpub, 1182 lsquic_conn_t *conn, lsquic_time_t tick_time) 1183{ 1184 lsquic_engine_t *const engine = (lsquic_engine_t *) enpub; 1185 if (conn->cn_flags & LSCONN_TICKABLE) 1186 { 1187 /* Optimization: no need to add the connection to the Advisory Tick 1188 * Time Queue: it is about to be ticked, after which it its next tick 1189 * time may be queried again. 1190 */; 1191 } 1192 else if (conn->cn_flags & LSCONN_ATTQ) 1193 { 1194 if (lsquic_conn_adv_time(conn) != tick_time) 1195 { 1196 attq_remove(engine->attq, conn); 1197 if (0 != attq_add(engine->attq, conn, tick_time)) 1198 engine_decref_conn(engine, conn, LSCONN_ATTQ); 1199 } 1200 } 1201 else if (0 == attq_add(engine->attq, conn, tick_time)) 1202 engine_incref_conn(conn, LSCONN_ATTQ); 1203} 1204 1205 1206static struct lsquic_conn * 1207find_conn_by_srst (struct lsquic_engine *engine, 1208 const struct lsquic_packet_in *packet_in) 1209{ 1210 struct lsquic_hash_elem *el; 1211 struct lsquic_conn *conn; 1212 1213 if (packet_in->pi_data_sz < IQUIC_MIN_SRST_SIZE 1214 || (packet_in->pi_data[0] & 0xC0) != 0x40) 1215 return NULL; 1216 1217 el = lsquic_hash_find(engine->pub.enp_srst_hash, 1218 packet_in->pi_data + packet_in->pi_data_sz - IQUIC_SRESET_TOKEN_SZ, 1219 IQUIC_SRESET_TOKEN_SZ); 1220 if (!el) 1221 return NULL; 1222 1223 conn = lsquic_hashelem_getdata(el); 1224 return conn; 1225} 1226 1227 1228/* Return 0 if packet is being processed by a real connection (mini or full), 1229 * otherwise return 1. 1230 */ 1231static int 1232process_packet_in (lsquic_engine_t *engine, lsquic_packet_in_t *packet_in, 1233 struct packin_parse_state *ppstate, const struct sockaddr *sa_local, 1234 const struct sockaddr *sa_peer, void *peer_ctx) 1235{ 1236 lsquic_conn_t *conn; 1237 const unsigned char *packet_in_data; 1238 size_t packet_in_size; 1239 1240 if (lsquic_packet_in_is_gquic_prst(packet_in) 1241 && !engine->pub.enp_settings.es_honor_prst) 1242 { 1243 lsquic_mm_put_packet_in(&engine->pub.enp_mm, packet_in); 1244 LSQ_DEBUG("public reset packet: discarding"); 1245 return 1; 1246 } 1247 1248 if (engine->flags & ENG_SERVER) 1249 conn = find_or_create_conn(engine, packet_in, ppstate, sa_local, 1250 sa_peer, peer_ctx); 1251 else 1252 conn = find_conn(engine, packet_in, ppstate, sa_local); 1253 1254 if (!conn) 1255 { 1256 if (engine->pub.enp_settings.es_honor_prst 1257 && !(packet_in->pi_flags & PI_GQUIC) 1258 && engine->pub.enp_srst_hash 1259 && (conn = find_conn_by_srst(engine, packet_in))) 1260 { 1261 LSQ_DEBUGC("got stateless reset for connection %"CID_FMT, 1262 CID_BITS(lsquic_conn_log_cid(conn))); 1263 conn->cn_if->ci_stateless_reset(conn); 1264 if (!(conn->cn_flags & LSCONN_TICKABLE) 1265 && conn->cn_if->ci_is_tickable(conn)) 1266 { 1267 lsquic_mh_insert(&engine->conns_tickable, conn, 1268 conn->cn_last_ticked); 1269 engine_incref_conn(conn, LSCONN_TICKABLE); 1270 } 1271 /* Even though the connection processes this packet, we return 1272 * 1 so that the caller does not add reset packet's random 1273 * bytes to the list of valid CIDs. 1274 */ 1275 } 1276 lsquic_mm_put_packet_in(&engine->pub.enp_mm, packet_in); 1277 return 1; 1278 } 1279 1280 if (0 == (conn->cn_flags & LSCONN_TICKABLE)) 1281 { 1282 lsquic_mh_insert(&engine->conns_tickable, conn, conn->cn_last_ticked); 1283 engine_incref_conn(conn, LSCONN_TICKABLE); 1284 } 1285 packet_in->pi_path_id = lsquic_conn_record_sockaddr(conn, peer_ctx, 1286 sa_local, sa_peer); 1287 lsquic_packet_in_upref(packet_in); 1288#if LOG_PACKET_CHECKSUM 1289 log_packet_checksum(lsquic_conn_log_cid(conn), "in", packet_in->pi_data, 1290 packet_in->pi_data_sz); 1291#endif 1292 /* Note on QLog: 1293 * For the PACKET_RX QLog event, we are interested in logging these things: 1294 * - raw packet (however it comes in, encrypted or not) 1295 * - frames (list of frame names) 1296 * - packet type and number 1297 * - packet rx timestamp 1298 * 1299 * Since only some of these items are available at this code 1300 * juncture, we will wait until after the packet has been 1301 * decrypted (if necessary) and parsed to call the log functions. 1302 * 1303 * Once the PACKET_RX event is finally logged, the timestamp 1304 * will come from packet_in->pi_received. For correct sequential 1305 * ordering of QLog events, be sure to process the QLogs downstream. 1306 * (Hint: Use the qlog_parser.py tool in tools/ for full QLog processing.) 1307 */ 1308 packet_in_data = packet_in->pi_data; 1309 packet_in_size = packet_in->pi_data_sz; 1310 conn->cn_if->ci_packet_in(conn, packet_in); 1311 QLOG_PACKET_RX(lsquic_conn_log_cid(conn), packet_in, packet_in_data, packet_in_size); 1312 lsquic_packet_in_put(&engine->pub.enp_mm, packet_in); 1313 return 0; 1314} 1315 1316 1317void 1318lsquic_engine_destroy (lsquic_engine_t *engine) 1319{ 1320 struct lsquic_hash_elem *el; 1321 lsquic_conn_t *conn; 1322 1323 LSQ_DEBUG("destroying engine"); 1324#ifndef NDEBUG 1325 engine->flags |= ENG_DTOR; 1326#endif 1327 1328 while ((conn = lsquic_mh_pop(&engine->conns_out))) 1329 { 1330 assert(conn->cn_flags & LSCONN_HAS_OUTGOING); 1331 (void) engine_decref_conn(engine, conn, LSCONN_HAS_OUTGOING); 1332 } 1333 1334 while ((conn = lsquic_mh_pop(&engine->conns_tickable))) 1335 { 1336 assert(conn->cn_flags & LSCONN_TICKABLE); 1337 (void) engine_decref_conn(engine, conn, LSCONN_TICKABLE); 1338 } 1339 1340 for (el = lsquic_hash_first(engine->conns_hash); el; 1341 el = lsquic_hash_next(engine->conns_hash)) 1342 { 1343 conn = lsquic_hashelem_getdata(el); 1344 force_close_conn(engine, conn); 1345 } 1346 lsquic_hash_destroy(engine->conns_hash); 1347 1348 assert(0 == engine->n_conns); 1349 assert(0 == engine->mini_conns_count); 1350 if (engine->pr_queue) 1351 prq_destroy(engine->pr_queue); 1352 if (engine->purga) 1353 lsquic_purga_destroy(engine->purga); 1354 attq_destroy(engine->attq); 1355 1356 assert(0 == lsquic_mh_count(&engine->conns_out)); 1357 assert(0 == lsquic_mh_count(&engine->conns_tickable)); 1358 if (engine->pub.enp_shi == &stock_shi) 1359 stock_shared_hash_destroy(engine->pub.enp_shi_ctx); 1360 lsquic_mm_cleanup(&engine->pub.enp_mm); 1361 free(engine->conns_tickable.mh_elems); 1362#ifndef NDEBUG 1363 if (engine->flags & ENG_LOSE_PACKETS) 1364 regfree(&engine->lose_packets_re); 1365#endif 1366 if (engine->pub.enp_tokgen) 1367 lsquic_tg_destroy(engine->pub.enp_tokgen); 1368#if LSQUIC_CONN_STATS 1369 if (engine->stats_fh) 1370 { 1371 const struct conn_stats *const stats = &engine->conn_stats_sum; 1372 fprintf(engine->stats_fh, "Aggregate connection stats collected by engine:\n"); 1373 fprintf(engine->stats_fh, "Connections: %u\n", engine->stats.conns); 1374 fprintf(engine->stats_fh, "Ticks: %lu\n", stats->n_ticks); 1375 fprintf(engine->stats_fh, "In:\n"); 1376 fprintf(engine->stats_fh, " Total bytes: %lu\n", stats->in.bytes); 1377 fprintf(engine->stats_fh, " packets: %lu\n", stats->in.packets); 1378 fprintf(engine->stats_fh, " undecryptable packets: %lu\n", stats->in.undec_packets); 1379 fprintf(engine->stats_fh, " duplicate packets: %lu\n", stats->in.dup_packets); 1380 fprintf(engine->stats_fh, " error packets: %lu\n", stats->in.err_packets); 1381 fprintf(engine->stats_fh, " STREAM frame count: %lu\n", stats->in.stream_frames); 1382 fprintf(engine->stats_fh, " STREAM payload size: %lu\n", stats->in.stream_data_sz); 1383 fprintf(engine->stats_fh, " Header bytes: %lu; uncompressed: %lu; ratio %.3lf\n", 1384 stats->in.headers_comp, stats->in.headers_uncomp, 1385 stats->in.headers_uncomp ? 1386 (double) stats->in.headers_comp / (double) stats->in.headers_uncomp 1387 : 0); 1388 fprintf(engine->stats_fh, " ACK frames: %lu\n", stats->in.n_acks); 1389 fprintf(engine->stats_fh, " ACK frames processed: %lu\n", stats->in.n_acks_proc); 1390 fprintf(engine->stats_fh, " ACK frames merged to new: %lu\n", stats->in.n_acks_merged[0]); 1391 fprintf(engine->stats_fh, " ACK frames merged to old: %lu\n", stats->in.n_acks_merged[1]); 1392 fprintf(engine->stats_fh, "Out:\n"); 1393 fprintf(engine->stats_fh, " Total bytes: %lu\n", stats->out.bytes); 1394 fprintf(engine->stats_fh, " packets: %lu\n", stats->out.packets); 1395 fprintf(engine->stats_fh, " acked via loss record: %lu\n", stats->out.acked_via_loss); 1396 fprintf(engine->stats_fh, " acks: %lu\n", stats->out.acks); 1397 fprintf(engine->stats_fh, " retx packets: %lu\n", stats->out.retx_packets); 1398 fprintf(engine->stats_fh, " STREAM frame count: %lu\n", stats->out.stream_frames); 1399 fprintf(engine->stats_fh, " STREAM payload size: %lu\n", stats->out.stream_data_sz); 1400 fprintf(engine->stats_fh, " Header bytes: %lu; uncompressed: %lu; ratio %.3lf\n", 1401 stats->out.headers_comp, stats->out.headers_uncomp, 1402 stats->out.headers_uncomp ? 1403 (double) stats->out.headers_comp / (double) stats->out.headers_uncomp 1404 : 0); 1405 fprintf(engine->stats_fh, " ACKs: %lu\n", stats->out.acks); 1406 } 1407#endif 1408 if (engine->pub.enp_srst_hash) 1409 lsquic_hash_destroy(engine->pub.enp_srst_hash); 1410#if LSQUIC_COUNT_ENGINE_CALLS 1411 LSQ_NOTICE("number of calls into the engine: %lu", engine->n_engine_calls); 1412#endif 1413 free(engine); 1414} 1415 1416 1417static struct conn_cid_elem * 1418find_free_cce (struct lsquic_conn *conn) 1419{ 1420 struct conn_cid_elem *cce; 1421 1422 for (cce = conn->cn_cces; cce < END_OF_CCES(conn); ++cce) 1423 if (!(conn->cn_cces_mask & (1 << (cce - conn->cn_cces)))) 1424 return cce; 1425 1426 return NULL; 1427} 1428 1429 1430static int 1431add_conn_to_hash (struct lsquic_engine *engine, struct lsquic_conn *conn, 1432 const struct sockaddr *local_sa, void *peer_ctx) 1433{ 1434 struct conn_cid_elem *cce; 1435 1436 if (engine->flags & ENG_CONNS_BY_ADDR) 1437 { 1438 cce = find_free_cce(conn); 1439 if (!cce) 1440 { 1441 LSQ_ERROR("cannot find free CCE"); 1442 return -1; 1443 } 1444 cce->cce_port = sa2port(local_sa); 1445 cce->cce_flags = CCE_PORT; 1446 if (lsquic_hash_insert(engine->conns_hash, &cce->cce_port, 1447 sizeof(cce->cce_port), conn, &cce->cce_hash_el)) 1448 { 1449 conn->cn_cces_mask |= 1 << (cce - conn->cn_cces); 1450 return 0; 1451 } 1452 else 1453 return -1; 1454 1455 } 1456 else 1457 return insert_conn_into_hash(engine, conn, peer_ctx); 1458} 1459 1460 1461lsquic_conn_t * 1462lsquic_engine_connect (lsquic_engine_t *engine, const struct sockaddr *local_sa, 1463 const struct sockaddr *peer_sa, 1464 void *peer_ctx, lsquic_conn_ctx_t *conn_ctx, 1465 const char *hostname, unsigned short max_packet_size, 1466 const unsigned char *zero_rtt, size_t zero_rtt_len, 1467 const unsigned char *token, size_t token_sz) 1468{ 1469 lsquic_conn_t *conn; 1470 unsigned flags; 1471 int is_ipv4; 1472 1473 ENGINE_IN(engine); 1474 1475 if (engine->flags & ENG_SERVER) 1476 { 1477 LSQ_ERROR("`%s' must only be called in client mode", __func__); 1478 goto err; 1479 } 1480 1481 if (engine->flags & ENG_CONNS_BY_ADDR 1482 && find_conn_by_addr(engine->conns_hash, local_sa)) 1483 { 1484 LSQ_ERROR("cannot have more than one connection on the same port"); 1485 goto err; 1486 } 1487 1488 if (0 != maybe_grow_conn_heaps(engine)) 1489 return NULL; 1490 flags = engine->flags & (ENG_SERVER|ENG_HTTP); 1491 is_ipv4 = peer_sa->sa_family == AF_INET; 1492 if (engine->pub.enp_settings.es_versions & LSQUIC_IETF_VERSIONS) 1493 conn = lsquic_ietf_full_conn_client_new(&engine->pub, 1494 flags, hostname, max_packet_size, 1495 is_ipv4, zero_rtt, zero_rtt_len, token, token_sz); 1496 else 1497 conn = lsquic_gquic_full_conn_client_new(&engine->pub, flags, 1498 hostname, max_packet_size, is_ipv4, 1499 zero_rtt, zero_rtt_len); 1500 if (!conn) 1501 goto err; 1502 EV_LOG_CREATE_CONN(lsquic_conn_log_cid(conn), local_sa, peer_sa); 1503 EV_LOG_VER_NEG(lsquic_conn_log_cid(conn), "proposed", 1504 lsquic_ver2str[conn->cn_version]); 1505 ++engine->n_conns; 1506 lsquic_conn_record_sockaddr(conn, peer_ctx, local_sa, peer_sa); 1507 if (0 != add_conn_to_hash(engine, conn, local_sa, peer_ctx)) 1508 { 1509 const lsquic_cid_t *cid = lsquic_conn_log_cid(conn); 1510 LSQ_WARNC("cannot add connection %"CID_FMT" to hash - destroy", 1511 CID_BITS(cid)); 1512 destroy_conn(engine, conn, lsquic_time_now()); 1513 goto err; 1514 } 1515 assert(!(conn->cn_flags & 1516 (CONN_REF_FLAGS 1517 & ~LSCONN_TICKABLE /* This flag may be set as effect of user 1518 callbacks */ 1519 ))); 1520 conn->cn_flags |= LSCONN_HASHED; 1521 lsquic_mh_insert(&engine->conns_tickable, conn, conn->cn_last_ticked); 1522 engine_incref_conn(conn, LSCONN_TICKABLE); 1523 lsquic_conn_set_ctx(conn, conn_ctx); 1524 conn->cn_if->ci_client_call_on_new(conn); 1525 end: 1526 ENGINE_OUT(engine); 1527 return conn; 1528 err: 1529 conn = NULL; 1530 goto end; 1531} 1532 1533 1534static void 1535remove_conn_from_hash (lsquic_engine_t *engine, lsquic_conn_t *conn) 1536{ 1537 remove_all_cces_from_hash(engine->conns_hash, conn); 1538 (void) engine_decref_conn(engine, conn, LSCONN_HASHED); 1539} 1540 1541 1542static void 1543refflags2str (enum lsquic_conn_flags flags, char s[6]) 1544{ 1545 *s = 'C'; s += !!(flags & LSCONN_CLOSING); 1546 *s = 'H'; s += !!(flags & LSCONN_HASHED); 1547 *s = 'O'; s += !!(flags & LSCONN_HAS_OUTGOING); 1548 *s = 'T'; s += !!(flags & LSCONN_TICKABLE); 1549 *s = 'A'; s += !!(flags & LSCONN_ATTQ); 1550 *s = 'K'; s += !!(flags & LSCONN_TICKED); 1551 *s = '\0'; 1552} 1553 1554 1555static void 1556engine_incref_conn (lsquic_conn_t *conn, enum lsquic_conn_flags flag) 1557{ 1558 const lsquic_cid_t *cid; 1559 char str[2][7]; 1560 assert(flag & CONN_REF_FLAGS); 1561 assert(!(conn->cn_flags & flag)); 1562 conn->cn_flags |= flag; 1563 cid = lsquic_conn_log_cid(conn); 1564 LSQ_DEBUGC("incref conn %"CID_FMT", '%s' -> '%s'", CID_BITS(cid), 1565 (refflags2str(conn->cn_flags & ~flag, str[0]), str[0]), 1566 (refflags2str(conn->cn_flags, str[1]), str[1])); 1567} 1568 1569 1570static lsquic_conn_t * 1571engine_decref_conn (lsquic_engine_t *engine, lsquic_conn_t *conn, 1572 enum lsquic_conn_flags flags) 1573{ 1574 const lsquic_cid_t *cid; 1575 char str[2][7]; 1576 lsquic_time_t now; 1577 assert(flags & CONN_REF_FLAGS); 1578 assert(conn->cn_flags & flags); 1579#ifndef NDEBUG 1580 if (flags & LSCONN_CLOSING) 1581 assert(0 == (conn->cn_flags & LSCONN_HASHED)); 1582#endif 1583 conn->cn_flags &= ~flags; 1584 cid = lsquic_conn_log_cid(conn); 1585 LSQ_DEBUGC("decref conn %"CID_FMT", '%s' -> '%s'", CID_BITS(cid), 1586 (refflags2str(conn->cn_flags | flags, str[0]), str[0]), 1587 (refflags2str(conn->cn_flags, str[1]), str[1])); 1588 if (0 == (conn->cn_flags & CONN_REF_FLAGS)) 1589 { 1590 now = lsquic_time_now(); 1591 if (conn->cn_flags & LSCONN_MINI) 1592 eng_hist_inc(&engine->history, now, sl_del_mini_conns); 1593 else 1594 eng_hist_inc(&engine->history, now, sl_del_full_conns); 1595 destroy_conn(engine, conn, now); 1596 return NULL; 1597 } 1598 else 1599 return conn; 1600} 1601 1602 1603/* This is not a general-purpose function. Only call from engine dtor. */ 1604static void 1605force_close_conn (lsquic_engine_t *engine, lsquic_conn_t *conn) 1606{ 1607 assert(engine->flags & ENG_DTOR); 1608 const enum lsquic_conn_flags flags = conn->cn_flags; 1609 assert(conn->cn_flags & CONN_REF_FLAGS); 1610 assert(!(flags & LSCONN_HAS_OUTGOING)); /* Should be removed already */ 1611 assert(!(flags & LSCONN_TICKABLE)); /* Should be removed already */ 1612 assert(!(flags & LSCONN_CLOSING)); /* It is in transient queue? */ 1613 if (flags & LSCONN_ATTQ) 1614 { 1615 attq_remove(engine->attq, conn); 1616 (void) engine_decref_conn(engine, conn, LSCONN_ATTQ); 1617 } 1618 if (flags & LSCONN_HASHED) 1619 remove_conn_from_hash(engine, conn); 1620} 1621 1622 1623/* Iterator for tickable connections (those on the Tickable Queue). Before 1624 * a connection is returned, it is removed from the Advisory Tick Time queue 1625 * if necessary. 1626 */ 1627static lsquic_conn_t * 1628conn_iter_next_tickable (struct lsquic_engine *engine) 1629{ 1630 lsquic_conn_t *conn; 1631 1632 if (engine->flags & ENG_SERVER) 1633 while (1) 1634 { 1635 conn = lsquic_mh_pop(&engine->conns_tickable); 1636 if (conn && (conn->cn_flags & LSCONN_SKIP_ON_PROC)) 1637 (void) engine_decref_conn(engine, conn, LSCONN_TICKABLE); 1638 else 1639 break; 1640 } 1641 else 1642 conn = lsquic_mh_pop(&engine->conns_tickable); 1643 1644 if (conn) 1645 conn = engine_decref_conn(engine, conn, LSCONN_TICKABLE); 1646 if (conn && (conn->cn_flags & LSCONN_ATTQ)) 1647 { 1648 attq_remove(engine->attq, conn); 1649 conn = engine_decref_conn(engine, conn, LSCONN_ATTQ); 1650 } 1651 1652 return conn; 1653} 1654 1655 1656static void 1657cub_init (struct cid_update_batch *cub, lsquic_cids_update_f update, 1658 void *update_ctx) 1659{ 1660 cub->cub_update_cids = update; 1661 cub->cub_update_ctx = update_ctx; 1662 cub->cub_count = 0; 1663} 1664 1665 1666static void 1667cub_flush (struct cid_update_batch *cub) 1668{ 1669 if (cub->cub_count > 0 && cub->cub_update_cids) 1670 cub->cub_update_cids(cub->cub_update_ctx, cub->cub_peer_ctxs, 1671 cub->cub_cids, cub->cub_count); 1672 cub->cub_count = 0; 1673} 1674 1675 1676static void 1677cub_add (struct cid_update_batch *cub, const lsquic_cid_t *cid, void *peer_ctx) 1678{ 1679 cub->cub_cids [ cub->cub_count ] = *cid; 1680 cub->cub_peer_ctxs[ cub->cub_count ] = peer_ctx; 1681 ++cub->cub_count; 1682 if (cub->cub_count == sizeof(cub->cub_cids) / sizeof(cub->cub_cids[0])) 1683 cub_flush(cub); 1684} 1685 1686 1687/* Process registered CIDs */ 1688static void 1689cub_add_cids_from_cces (struct cid_update_batch *cub, struct lsquic_conn *conn) 1690{ 1691 struct cce_cid_iter citer; 1692 struct conn_cid_elem *cce; 1693 void *peer_ctx; 1694 1695 peer_ctx = lsquic_conn_get_peer_ctx(conn, NULL); 1696 for (cce = cce_iter_first(&citer, conn); cce; cce = cce_iter_next(&citer)) 1697 if (cce->cce_flags & CCE_REG) 1698 cub_add(cub, &cce->cce_cid, peer_ctx); 1699} 1700 1701 1702static void 1703drop_all_mini_conns (lsquic_engine_t *engine) 1704{ 1705 struct lsquic_hash_elem *el; 1706 lsquic_conn_t *conn; 1707 struct cid_update_batch cub; 1708 1709 cub_init(&cub, engine->report_old_scids, engine->scids_ctx); 1710 1711 for (el = lsquic_hash_first(engine->conns_hash); el; 1712 el = lsquic_hash_next(engine->conns_hash)) 1713 { 1714 conn = lsquic_hashelem_getdata(el); 1715 if (conn->cn_flags & LSCONN_MINI) 1716 { 1717 /* If promoted, why is it still in this hash? */ 1718 assert(!(conn->cn_flags & LSCONN_PROMOTED)); 1719 if (!(conn->cn_flags & LSCONN_PROMOTED)) 1720 cub_add_cids_from_cces(&cub, conn); 1721 remove_conn_from_hash(engine, conn); 1722 } 1723 } 1724 1725 cub_flush(&cub); 1726} 1727 1728 1729void 1730lsquic_engine_process_conns (lsquic_engine_t *engine) 1731{ 1732 lsquic_conn_t *conn; 1733 lsquic_time_t now; 1734 1735 ENGINE_IN(engine); 1736 1737 now = lsquic_time_now(); 1738 while ((conn = attq_pop(engine->attq, now))) 1739 { 1740 conn = engine_decref_conn(engine, conn, LSCONN_ATTQ); 1741 if (conn && !(conn->cn_flags & LSCONN_TICKABLE)) 1742 { 1743 lsquic_mh_insert(&engine->conns_tickable, conn, conn->cn_last_ticked); 1744 engine_incref_conn(conn, LSCONN_TICKABLE); 1745 } 1746 } 1747 1748 process_connections(engine, conn_iter_next_tickable, now); 1749 ENGINE_OUT(engine); 1750} 1751 1752 1753static void 1754release_or_return_enc_data (struct lsquic_engine *engine, 1755 void (*pmi_rel_or_ret) (void *, void *, void *, char), 1756 struct lsquic_conn *conn, struct lsquic_packet_out *packet_out) 1757{ 1758 pmi_rel_or_ret(engine->pub.enp_pmi_ctx, packet_out->po_path->np_peer_ctx, 1759 packet_out->po_enc_data, lsquic_packet_out_ipv6(packet_out)); 1760 packet_out->po_flags &= ~PO_ENCRYPTED; 1761 packet_out->po_enc_data = NULL; 1762} 1763 1764 1765static void 1766release_enc_data (struct lsquic_engine *engine, struct lsquic_conn *conn, 1767 struct lsquic_packet_out *packet_out) 1768{ 1769 release_or_return_enc_data(engine, engine->pub.enp_pmi->pmi_release, 1770 conn, packet_out); 1771} 1772 1773 1774static void 1775return_enc_data (struct lsquic_engine *engine, struct lsquic_conn *conn, 1776 struct lsquic_packet_out *packet_out) 1777{ 1778 release_or_return_enc_data(engine, engine->pub.enp_pmi->pmi_return, 1779 conn, packet_out); 1780} 1781 1782 1783static int 1784copy_packet (struct lsquic_engine *engine, struct lsquic_conn *conn, 1785 struct lsquic_packet_out *packet_out) 1786{ 1787 int ipv6; 1788 1789 ipv6 = NP_IS_IPv6(packet_out->po_path); 1790 if (packet_out->po_flags & PO_ENCRYPTED) 1791 { 1792 if (ipv6 == lsquic_packet_out_ipv6(packet_out) 1793 && packet_out->po_data_sz == packet_out->po_enc_data_sz 1794 && 0 == memcmp(packet_out->po_data, packet_out->po_enc_data, 1795 packet_out->po_data_sz)) 1796 return 0; 1797 if (ipv6 == lsquic_packet_out_ipv6(packet_out) 1798 && packet_out->po_data_sz <= packet_out->po_enc_data_sz) 1799 goto copy; 1800 return_enc_data(engine, conn, packet_out); 1801 } 1802 1803 packet_out->po_enc_data = engine->pub.enp_pmi->pmi_allocate( 1804 engine->pub.enp_pmi_ctx, packet_out->po_path->np_peer_ctx, 1805 packet_out->po_data_sz, ipv6); 1806 if (!packet_out->po_enc_data) 1807 { 1808 LSQ_DEBUG("could not allocate memory for outgoing unencrypted packet " 1809 "of size %hu", packet_out->po_data_sz); 1810 return -1; 1811 } 1812 1813 copy: 1814 memcpy(packet_out->po_enc_data, packet_out->po_data, 1815 packet_out->po_data_sz); 1816 packet_out->po_enc_data_sz = packet_out->po_data_sz; 1817 packet_out->po_sent_sz = packet_out->po_data_sz; 1818 packet_out->po_flags &= ~PO_IPv6; 1819 packet_out->po_flags |= PO_ENCRYPTED|PO_SENT_SZ|(ipv6 << POIPv6_SHIFT); 1820 1821 return 0; 1822} 1823 1824 1825STAILQ_HEAD(conns_stailq, lsquic_conn); 1826TAILQ_HEAD(conns_tailq, lsquic_conn); 1827 1828 1829struct conns_out_iter 1830{ 1831 struct min_heap *coi_heap; 1832 struct pr_queue *coi_prq; 1833 TAILQ_HEAD(, lsquic_conn) coi_active_list, 1834 coi_inactive_list; 1835 lsquic_conn_t *coi_next; 1836#ifndef NDEBUG 1837 lsquic_time_t coi_last_sent; 1838#endif 1839}; 1840 1841 1842static void 1843coi_init (struct conns_out_iter *iter, struct lsquic_engine *engine) 1844{ 1845 iter->coi_heap = &engine->conns_out; 1846 iter->coi_prq = engine->pr_queue; 1847 iter->coi_next = NULL; 1848 TAILQ_INIT(&iter->coi_active_list); 1849 TAILQ_INIT(&iter->coi_inactive_list); 1850#ifndef NDEBUG 1851 iter->coi_last_sent = 0; 1852#endif 1853} 1854 1855 1856static lsquic_conn_t * 1857coi_next (struct conns_out_iter *iter) 1858{ 1859 lsquic_conn_t *conn; 1860 1861 if (lsquic_mh_count(iter->coi_heap) > 0) 1862 { 1863 conn = lsquic_mh_pop(iter->coi_heap); 1864 TAILQ_INSERT_TAIL(&iter->coi_active_list, conn, cn_next_out); 1865 conn->cn_flags |= LSCONN_COI_ACTIVE; 1866#ifndef NDEBUG 1867 if (iter->coi_last_sent) 1868 assert(iter->coi_last_sent <= conn->cn_last_sent); 1869 iter->coi_last_sent = conn->cn_last_sent; 1870#endif 1871 return conn; 1872 } 1873 else if (iter->coi_prq && (conn = prq_next_conn(iter->coi_prq))) 1874 { 1875 return conn; 1876 } 1877 else if (!TAILQ_EMPTY(&iter->coi_active_list)) 1878 { 1879 iter->coi_prq = NULL; /* Save function call in previous conditional */ 1880 conn = iter->coi_next; 1881 if (!conn) 1882 conn = TAILQ_FIRST(&iter->coi_active_list); 1883 if (conn) 1884 iter->coi_next = TAILQ_NEXT(conn, cn_next_out); 1885 return conn; 1886 } 1887 else 1888 return NULL; 1889} 1890 1891 1892static void 1893coi_deactivate (struct conns_out_iter *iter, lsquic_conn_t *conn) 1894{ 1895 if (!(conn->cn_flags & LSCONN_EVANESCENT)) 1896 { 1897 assert(!TAILQ_EMPTY(&iter->coi_active_list)); 1898 TAILQ_REMOVE(&iter->coi_active_list, conn, cn_next_out); 1899 conn->cn_flags &= ~LSCONN_COI_ACTIVE; 1900 TAILQ_INSERT_TAIL(&iter->coi_inactive_list, conn, cn_next_out); 1901 conn->cn_flags |= LSCONN_COI_INACTIVE; 1902 } 1903} 1904 1905 1906static void 1907coi_reactivate (struct conns_out_iter *iter, lsquic_conn_t *conn) 1908{ 1909 assert(conn->cn_flags & LSCONN_COI_INACTIVE); 1910 TAILQ_REMOVE(&iter->coi_inactive_list, conn, cn_next_out); 1911 conn->cn_flags &= ~LSCONN_COI_INACTIVE; 1912 TAILQ_INSERT_TAIL(&iter->coi_active_list, conn, cn_next_out); 1913 conn->cn_flags |= LSCONN_COI_ACTIVE; 1914} 1915 1916 1917static void 1918coi_reheap (struct conns_out_iter *iter, lsquic_engine_t *engine) 1919{ 1920 lsquic_conn_t *conn; 1921 while ((conn = TAILQ_FIRST(&iter->coi_active_list))) 1922 { 1923 TAILQ_REMOVE(&iter->coi_active_list, conn, cn_next_out); 1924 conn->cn_flags &= ~LSCONN_COI_ACTIVE; 1925 lsquic_mh_insert(iter->coi_heap, conn, conn->cn_last_sent); 1926 } 1927 while ((conn = TAILQ_FIRST(&iter->coi_inactive_list))) 1928 { 1929 TAILQ_REMOVE(&iter->coi_inactive_list, conn, cn_next_out); 1930 conn->cn_flags &= ~LSCONN_COI_INACTIVE; 1931 (void) engine_decref_conn(engine, conn, LSCONN_HAS_OUTGOING); 1932 } 1933} 1934 1935 1936#ifndef NDEBUG 1937static void 1938lose_matching_packets (const lsquic_engine_t *engine, struct out_batch *batch, 1939 unsigned n) 1940{ 1941 const lsquic_cid_t *cid; 1942 struct iovec *iov; 1943 unsigned i; 1944 char packno_str[22]; 1945 1946 for (i = 0; i < n; ++i) 1947 { 1948 snprintf(packno_str, sizeof(packno_str), "%"PRIu64, 1949 batch->packets[i]->po_packno); 1950 if (0 == regexec(&engine->lose_packets_re, packno_str, 0, NULL, 0)) 1951 { 1952 for (iov = batch->outs[i].iov; iov < 1953 batch->outs[i].iov + batch->outs[i].iovlen; ++iov) 1954 batch->outs[i].iov->iov_len -= 1; 1955 cid = lsquic_conn_log_cid(batch->conns[i]); 1956 LSQ_WARNC("losing packet %s for connection %"CID_FMT, packno_str, 1957 CID_BITS(cid)); 1958 } 1959 } 1960} 1961 1962 1963#endif 1964 1965 1966#ifdef NDEBUG 1967#define CONST_BATCH const 1968#else 1969#define CONST_BATCH 1970#endif 1971 1972static unsigned 1973send_batch (lsquic_engine_t *engine, struct conns_out_iter *conns_iter, 1974 CONST_BATCH struct out_batch *batch, unsigned n_to_send) 1975{ 1976 int n_sent, i; 1977 lsquic_time_t now; 1978 unsigned off; 1979 size_t count; 1980 struct lsquic_packet_out *CONST_BATCH *packet_out, *CONST_BATCH *end; 1981 1982#ifndef NDEBUG 1983 if (engine->flags & ENG_LOSE_PACKETS) 1984 lose_matching_packets(engine, batch, n_to_send); 1985#endif 1986 /* Set sent time before the write to avoid underestimating RTT */ 1987 now = lsquic_time_now(); 1988 for (i = 0; i < (int) n_to_send; ++i) 1989 { 1990 off = batch->pack_off[i]; 1991 count = batch->outs[i].iovlen; 1992 assert(count > 0); 1993 packet_out = &batch->packets[off]; 1994 end = packet_out + count; 1995 do 1996 (*packet_out)->po_sent = now; 1997 while (++packet_out < end); 1998 } 1999 n_sent = engine->packets_out(engine->packets_out_ctx, batch->outs, 2000 n_to_send); 2001 if (n_sent < (int) n_to_send) 2002 { 2003 engine->pub.enp_flags &= ~ENPUB_CAN_SEND; 2004 engine->resume_sending_at = now + 1000000; 2005 LSQ_DEBUG("cannot send packets"); 2006 EV_LOG_GENERIC_EVENT("cannot send packets"); 2007 } 2008 if (n_sent >= 0) 2009 LSQ_DEBUG("packets out returned %d (out of %u)", n_sent, n_to_send); 2010 else 2011 { 2012 LSQ_DEBUG("packets out returned an error: %s", strerror(errno)); 2013 n_sent = 0; 2014 } 2015 if (n_sent > 0) 2016 engine->last_sent = now + n_sent; 2017 for (i = 0; i < n_sent; ++i) 2018 { 2019 eng_hist_inc(&engine->history, now, sl_packets_out); 2020 /* `i' is added to maintain relative order */ 2021 batch->conns[i]->cn_last_sent = now + i; 2022 2023 off = batch->pack_off[i]; 2024 count = batch->outs[i].iovlen; 2025 assert(count > 0); 2026 packet_out = &batch->packets[off]; 2027 end = packet_out + count; 2028 do 2029 { 2030#if LOG_PACKET_CHECKSUM 2031 log_packet_checksum(lsquic_conn_log_cid(batch->conns[i]), "out", 2032 batch->outs[i].iov[packet_out - &batch->packets[off]].iov_base, 2033 batch->outs[i].iov[packet_out - &batch->packets[off]].iov_len); 2034#endif 2035 EV_LOG_PACKET_SENT(lsquic_conn_log_cid(batch->conns[i]), 2036 *packet_out); 2037 /* Release packet out buffer as soon as the packet is sent 2038 * successfully. If not successfully sent, we hold on to 2039 * this buffer until the packet sending is attempted again 2040 * or until it times out and regenerated. 2041 */ 2042 if ((*packet_out)->po_flags & PO_ENCRYPTED) 2043 release_enc_data(engine, batch->conns[i], *packet_out); 2044 batch->conns[i]->cn_if->ci_packet_sent(batch->conns[i], 2045 *packet_out); 2046 } 2047 while (++packet_out < end); 2048 } 2049 if (LSQ_LOG_ENABLED_EXT(LSQ_LOG_DEBUG, LSQLM_EVENT)) 2050 for ( ; i < (int) n_to_send; ++i) 2051 { 2052 off = batch->pack_off[i]; 2053 count = batch->outs[i].iovlen; 2054 assert(count > 0); 2055 packet_out = &batch->packets[off]; 2056 end = packet_out + count; 2057 do 2058 EV_LOG_PACKET_NOT_SENT(lsquic_conn_log_cid(batch->conns[i]), 2059 *packet_out); 2060 while (++packet_out < end); 2061 } 2062 /* Return packets to the connection in reverse order so that the packet 2063 * ordering is maintained. 2064 */ 2065 for (i = (int) n_to_send - 1; i >= n_sent; --i) 2066 { 2067 off = batch->pack_off[i]; 2068 count = batch->outs[i].iovlen; 2069 assert(count > 0); 2070 packet_out = &batch->packets[off + count - 1]; 2071 end = &batch->packets[off - 1]; 2072 do 2073 batch->conns[i]->cn_if->ci_packet_not_sent(batch->conns[i], 2074 *packet_out); 2075 while (--packet_out > end); 2076 if (!(batch->conns[i]->cn_flags & (LSCONN_COI_ACTIVE|LSCONN_EVANESCENT))) 2077 coi_reactivate(conns_iter, batch->conns[i]); 2078 } 2079 return n_sent; 2080} 2081 2082 2083/* Return 1 if went past deadline, 0 otherwise */ 2084static int 2085check_deadline (lsquic_engine_t *engine) 2086{ 2087 if (engine->pub.enp_settings.es_proc_time_thresh && 2088 lsquic_time_now() > engine->deadline) 2089 { 2090 LSQ_INFO("went past threshold of %u usec, stop sending", 2091 engine->pub.enp_settings.es_proc_time_thresh); 2092 engine->flags |= ENG_PAST_DEADLINE; 2093 return 1; 2094 } 2095 else 2096 return 0; 2097} 2098 2099 2100static size_t 2101iov_size (const struct iovec *iov, const struct iovec *const end) 2102{ 2103 size_t size; 2104 2105 assert(iov < end); 2106 2107 size = 0; 2108 do 2109 size += iov->iov_len; 2110 while (++iov < end); 2111 2112 return size; 2113} 2114 2115 2116/* XXX A lot of extra setup -- two extra arguments to this function, two extra 2117 * connection ref flags and queues -- is just to handle the ENCPA_BADCRYPT case, 2118 * which never really happens. 2119 */ 2120static void 2121send_packets_out (struct lsquic_engine *engine, 2122 struct conns_tailq *ticked_conns, 2123 struct conns_stailq *closed_conns) 2124{ 2125 const lsquic_cid_t *cid; 2126 unsigned n, w, n_sent, n_batches_sent; 2127 lsquic_packet_out_t *packet_out; 2128 struct lsquic_packet_out **packet; 2129 lsquic_conn_t *conn; 2130 struct out_batch *const batch = &engine->out_batch; 2131 struct iovec *iov, *packet_iov; 2132 struct conns_out_iter conns_iter; 2133 int shrink, deadline_exceeded; 2134 2135 coi_init(&conns_iter, engine); 2136 n_batches_sent = 0; 2137 n_sent = 0, n = 0; 2138 shrink = 0; 2139 deadline_exceeded = 0; 2140 iov = batch->iov; 2141 packet = batch->packets; 2142 2143 while ((conn = coi_next(&conns_iter))) 2144 { 2145 cid = lsquic_conn_log_cid(conn); 2146 packet_out = conn->cn_if->ci_next_packet_to_send(conn, 0); 2147 if (!packet_out) { 2148 /* Evanescent connection always has a packet to send: */ 2149 assert(!(conn->cn_flags & LSCONN_EVANESCENT)); 2150 LSQ_DEBUGC("batched all outgoing packets for %s conn %"CID_FMT, 2151 (conn->cn_flags & LSCONN_MINI ? "mini" : 2152 "full"), CID_BITS(cid)); 2153 coi_deactivate(&conns_iter, conn); 2154 continue; 2155 } 2156 batch->outs[n].iov = packet_iov = iov; 2157 next_coa: 2158 if (!(packet_out->po_flags & (PO_ENCRYPTED|PO_NOENCRYPT))) 2159 { 2160 switch (conn->cn_esf_c->esf_encrypt_packet(conn->cn_enc_session, 2161 &engine->pub, conn, packet_out)) 2162 { 2163 case ENCPA_NOMEM: 2164 /* Send what we have and wait for a more opportune moment */ 2165 conn->cn_if->ci_packet_not_sent(conn, packet_out); 2166 goto end_for; 2167 case ENCPA_BADCRYPT: 2168 /* This is pretty bad: close connection immediately */ 2169 conn->cn_if->ci_packet_not_sent(conn, packet_out); 2170 LSQ_INFOC("conn %"CID_FMT" has unsendable packets", 2171 CID_BITS(cid)); 2172 if (!(conn->cn_flags & LSCONN_EVANESCENT)) 2173 { 2174 if (!(conn->cn_flags & LSCONN_CLOSING)) 2175 { 2176 STAILQ_INSERT_TAIL(closed_conns, conn, cn_next_closed_conn); 2177 engine_incref_conn(conn, LSCONN_CLOSING); 2178 if (conn->cn_flags & LSCONN_HASHED) 2179 remove_conn_from_hash(engine, conn); 2180 } 2181 coi_deactivate(&conns_iter, conn); 2182 if (conn->cn_flags & LSCONN_TICKED) 2183 { 2184 TAILQ_REMOVE(ticked_conns, conn, cn_next_ticked); 2185 engine_decref_conn(engine, conn, LSCONN_TICKED); 2186 } 2187 } 2188 continue; 2189 case ENCPA_OK: 2190 break; 2191 } 2192 } 2193 else if ((packet_out->po_flags & PO_NOENCRYPT) 2194 && engine->pub.enp_pmi != &stock_pmi) 2195 { 2196 if (0 != copy_packet(engine, conn, packet_out)) 2197 { 2198 /* Copy can only fail if packet could not be allocated */ 2199 conn->cn_if->ci_packet_not_sent(conn, packet_out); 2200 goto end_for; 2201 } 2202 } 2203 LSQ_DEBUGC("batched packet %"PRIu64" for connection %"CID_FMT, 2204 packet_out->po_packno, CID_BITS(cid)); 2205 if (packet_out->po_flags & PO_ENCRYPTED) 2206 { 2207 iov->iov_base = packet_out->po_enc_data; 2208 iov->iov_len = packet_out->po_enc_data_sz; 2209 } 2210 else 2211 { 2212 iov->iov_base = packet_out->po_data; 2213 iov->iov_len = packet_out->po_data_sz; 2214 } 2215 if (packet_iov == iov) 2216 { 2217 batch->pack_off[n] = packet - batch->packets; 2218 batch->outs [n].ecn = lsquic_packet_out_ecn(packet_out); 2219 batch->outs [n].peer_ctx = packet_out->po_path->np_peer_ctx; 2220 batch->outs [n].local_sa = NP_LOCAL_SA(packet_out->po_path); 2221 batch->outs [n].dest_sa = NP_PEER_SA(packet_out->po_path); 2222 batch->conns [n] = conn; 2223 } 2224 *packet = packet_out; 2225 ++packet; 2226 ++iov; 2227 if ((conn->cn_flags & LSCONN_IETF) 2228 && ((1 << packet_out->po_header_type) 2229 & ((1 << HETY_INITIAL)|(1 << HETY_HANDSHAKE)|(1 << HETY_0RTT))) 2230#ifndef NDEBUG 2231 && (engine->flags & ENG_COALESCE) 2232#endif 2233 && iov < batch->iov + sizeof(batch->iov) / sizeof(batch->iov[0])) 2234 { 2235 const size_t size = iov_size(packet_iov, iov); 2236 packet_out = conn->cn_if->ci_next_packet_to_send(conn, size); 2237 if (packet_out) 2238 goto next_coa; 2239 } 2240 batch->outs [n].iovlen = iov - packet_iov; 2241 ++n; 2242 if (n == engine->batch_size 2243 || iov >= batch->iov + sizeof(batch->iov) / sizeof(batch->iov[0])) 2244 { 2245 w = send_batch(engine, &conns_iter, batch, n); 2246 n = 0; 2247 iov = batch->iov; 2248 packet = batch->packets; 2249 ++n_batches_sent; 2250 n_sent += w; 2251 if (w < engine->batch_size) 2252 { 2253 shrink = 1; 2254 break; 2255 } 2256 deadline_exceeded = check_deadline(engine); 2257 if (deadline_exceeded) 2258 break; 2259 grow_batch_size(engine); 2260 } 2261 } 2262 end_for: 2263 2264 if (n > 0) { 2265 w = send_batch(engine, &conns_iter, batch, n); 2266 n_sent += w; 2267 shrink = w < n; 2268 ++n_batches_sent; 2269 } 2270 2271 if (shrink) 2272 shrink_batch_size(engine); 2273 else if (n_batches_sent > 1) 2274 { 2275 deadline_exceeded = check_deadline(engine); 2276 if (!deadline_exceeded) 2277 grow_batch_size(engine); 2278 } 2279 2280 coi_reheap(&conns_iter, engine); 2281 2282 LSQ_DEBUG("%s: sent %u packet%.*s", __func__, n_sent, n_sent != 1, "s"); 2283} 2284 2285 2286int 2287lsquic_engine_has_unsent_packets (lsquic_engine_t *engine) 2288{ 2289 return lsquic_mh_count(&engine->conns_out) > 0 2290 || (engine->pr_queue && prq_have_pending(engine->pr_queue)) 2291 ; 2292} 2293 2294 2295static void 2296reset_deadline (lsquic_engine_t *engine, lsquic_time_t now) 2297{ 2298 engine->deadline = now + engine->pub.enp_settings.es_proc_time_thresh; 2299 engine->flags &= ~ENG_PAST_DEADLINE; 2300} 2301 2302 2303/* TODO: this is a user-facing function, account for load */ 2304void 2305lsquic_engine_send_unsent_packets (lsquic_engine_t *engine) 2306{ 2307 lsquic_conn_t *conn; 2308 struct conns_stailq closed_conns; 2309 struct conns_tailq ticked_conns = TAILQ_HEAD_INITIALIZER(ticked_conns); 2310 struct cid_update_batch cub; 2311 2312 ENGINE_IN(engine); 2313 cub_init(&cub, engine->report_old_scids, engine->scids_ctx); 2314 STAILQ_INIT(&closed_conns); 2315 reset_deadline(engine, lsquic_time_now()); 2316 if (!(engine->pub.enp_flags & ENPUB_CAN_SEND)) 2317 { 2318 LSQ_DEBUG("can send again"); 2319 EV_LOG_GENERIC_EVENT("can send again"); 2320 engine->pub.enp_flags |= ENPUB_CAN_SEND; 2321 } 2322 2323 send_packets_out(engine, &ticked_conns, &closed_conns); 2324 2325 while ((conn = STAILQ_FIRST(&closed_conns))) { 2326 STAILQ_REMOVE_HEAD(&closed_conns, cn_next_closed_conn); 2327 if ((conn->cn_flags & (LSCONN_MINI|LSCONN_PROMOTED)) == LSCONN_MINI) 2328 cub_add_cids_from_cces(&cub, conn); 2329 (void) engine_decref_conn(engine, conn, LSCONN_CLOSING); 2330 } 2331 2332 cub_flush(&cub); 2333 ENGINE_OUT(engine); 2334} 2335 2336 2337static lsquic_conn_t * 2338next_new_full_conn (struct conns_stailq *new_full_conns) 2339{ 2340 lsquic_conn_t *conn; 2341 2342 conn = STAILQ_FIRST(new_full_conns); 2343 if (conn) 2344 STAILQ_REMOVE_HEAD(new_full_conns, cn_next_new_full); 2345 return conn; 2346} 2347 2348 2349static void 2350process_connections (lsquic_engine_t *engine, conn_iter_f next_conn, 2351 lsquic_time_t now) 2352{ 2353 lsquic_conn_t *conn; 2354 enum tick_st tick_st; 2355 unsigned i; 2356 lsquic_time_t next_tick_time; 2357 struct conns_stailq closed_conns; 2358 struct conns_tailq ticked_conns; 2359 struct conns_stailq new_full_conns; 2360 struct cid_update_batch cub_old, cub_live; 2361 cub_init(&cub_old, engine->report_old_scids, engine->scids_ctx); 2362 cub_init(&cub_live, engine->report_live_scids, engine->scids_ctx); 2363 2364 eng_hist_tick(&engine->history, now); 2365 2366 STAILQ_INIT(&closed_conns); 2367 TAILQ_INIT(&ticked_conns); 2368 reset_deadline(engine, now); 2369 STAILQ_INIT(&new_full_conns); 2370 2371 if (!(engine->pub.enp_flags & ENPUB_CAN_SEND) 2372 && now > engine->resume_sending_at) 2373 { 2374 LSQ_NOTICE("failsafe activated: resume sending packets again after " 2375 "timeout"); 2376 EV_LOG_GENERIC_EVENT("resume sending packets again after timeout"); 2377 engine->pub.enp_flags |= ENPUB_CAN_SEND; 2378 } 2379 2380 i = 0; 2381 while ((conn = next_conn(engine)) 2382 || (conn = next_new_full_conn(&new_full_conns))) 2383 { 2384 tick_st = conn->cn_if->ci_tick(conn, now); 2385 conn->cn_last_ticked = now + i /* Maintain relative order */ ++; 2386 if (tick_st & TICK_PROMOTE) 2387 { 2388 lsquic_conn_t *new_conn; 2389 EV_LOG_CONN_EVENT(lsquic_conn_log_cid(conn), 2390 "scheduled for promotion"); 2391 assert(conn->cn_flags & LSCONN_MINI); 2392 new_conn = new_full_conn_server(engine, conn, now); 2393 if (new_conn) 2394 { 2395 STAILQ_INSERT_TAIL(&new_full_conns, new_conn, cn_next_new_full); 2396 new_conn->cn_last_sent = engine->last_sent; 2397 eng_hist_inc(&engine->history, now, sl_new_full_conns); 2398 } 2399 tick_st |= TICK_CLOSE; /* Destroy mini connection */ 2400 conn->cn_flags |= LSCONN_PROMOTED; 2401 } 2402 if (tick_st & TICK_SEND) 2403 { 2404 if (!(conn->cn_flags & LSCONN_HAS_OUTGOING)) 2405 { 2406 lsquic_mh_insert(&engine->conns_out, conn, conn->cn_last_sent); 2407 engine_incref_conn(conn, LSCONN_HAS_OUTGOING); 2408 } 2409 } 2410 if (tick_st & TICK_CLOSE) 2411 { 2412 STAILQ_INSERT_TAIL(&closed_conns, conn, cn_next_closed_conn); 2413 engine_incref_conn(conn, LSCONN_CLOSING); 2414 if (conn->cn_flags & LSCONN_HASHED) 2415 remove_conn_from_hash(engine, conn); 2416 } 2417 else 2418 { 2419 TAILQ_INSERT_TAIL(&ticked_conns, conn, cn_next_ticked); 2420 engine_incref_conn(conn, LSCONN_TICKED); 2421 if ((engine->flags & ENG_SERVER) && conn->cn_if->ci_report_live 2422 && conn->cn_if->ci_report_live(conn, now)) 2423 cub_add_cids_from_cces(&cub_live, conn); 2424 } 2425 } 2426 2427 if ((engine->pub.enp_flags & ENPUB_CAN_SEND) 2428 && lsquic_engine_has_unsent_packets(engine)) 2429 send_packets_out(engine, &ticked_conns, &closed_conns); 2430 2431 while ((conn = STAILQ_FIRST(&closed_conns))) { 2432 STAILQ_REMOVE_HEAD(&closed_conns, cn_next_closed_conn); 2433 if ((conn->cn_flags & (LSCONN_MINI|LSCONN_PROMOTED)) == LSCONN_MINI) 2434 cub_add_cids_from_cces(&cub_old, conn); 2435 (void) engine_decref_conn(engine, conn, LSCONN_CLOSING); 2436 } 2437 2438 /* TODO Heapification can be optimized by switching to the Floyd method: 2439 * https://en.wikipedia.org/wiki/Binary_heap#Building_a_heap 2440 */ 2441 while ((conn = TAILQ_FIRST(&ticked_conns))) 2442 { 2443 TAILQ_REMOVE(&ticked_conns, conn, cn_next_ticked); 2444 engine_decref_conn(engine, conn, LSCONN_TICKED); 2445 if (!(conn->cn_flags & LSCONN_TICKABLE) 2446 && conn->cn_if->ci_is_tickable(conn)) 2447 { 2448 lsquic_mh_insert(&engine->conns_tickable, conn, conn->cn_last_ticked); 2449 engine_incref_conn(conn, LSCONN_TICKABLE); 2450 } 2451 else if (!(conn->cn_flags & LSCONN_ATTQ)) 2452 { 2453 next_tick_time = conn->cn_if->ci_next_tick_time(conn); 2454 if (next_tick_time) 2455 { 2456 if (0 == attq_add(engine->attq, conn, next_tick_time)) 2457 engine_incref_conn(conn, LSCONN_ATTQ); 2458 } 2459 else 2460 assert(0); 2461 } 2462 } 2463 2464 cub_flush(&engine->new_scids); 2465 cub_flush(&cub_live); 2466 cub_flush(&cub_old); 2467} 2468 2469 2470/* Return 0 if packet is being processed by a real connection, 1 if the 2471 * packet was processed, but not by a connection, and -1 on error. 2472 */ 2473int 2474lsquic_engine_packet_in (lsquic_engine_t *engine, 2475 const unsigned char *packet_in_data, size_t packet_in_size, 2476 const struct sockaddr *sa_local, const struct sockaddr *sa_peer, 2477 void *peer_ctx, int ecn) 2478{ 2479 const unsigned char *const packet_end = packet_in_data + packet_in_size; 2480 struct packin_parse_state ppstate; 2481 lsquic_packet_in_t *packet_in; 2482 int (*parse_packet_in_begin) (struct lsquic_packet_in *, size_t length, 2483 int is_server, unsigned cid_len, struct packin_parse_state *); 2484 unsigned n_zeroes; 2485 int s; 2486 2487 ENGINE_CALLS_INCR(engine); 2488 2489 if (engine->flags & ENG_SERVER) 2490 parse_packet_in_begin = lsquic_parse_packet_in_server_begin; 2491 else 2492 if (engine->flags & ENG_CONNS_BY_ADDR) 2493 { 2494 struct lsquic_hash_elem *el; 2495 const struct lsquic_conn *conn; 2496 el = find_conn_by_addr(engine->conns_hash, sa_local); 2497 if (!el) 2498 return -1; 2499 conn = lsquic_hashelem_getdata(el); 2500 if ((1 << conn->cn_version) & LSQUIC_GQUIC_HEADER_VERSIONS) 2501 parse_packet_in_begin = lsquic_gquic_parse_packet_in_begin; 2502 else if ((1 << conn->cn_version) & LSQUIC_IETF_VERSIONS) 2503 parse_packet_in_begin = lsquic_ietf_v1_parse_packet_in_begin; 2504 else 2505 { 2506 assert(conn->cn_version == LSQVER_046 2507#if LSQUIC_USE_Q098 2508 || conn->cn_version == LSQVER_098 2509#endif 2510 2511 ); 2512 parse_packet_in_begin = lsquic_Q046_parse_packet_in_begin; 2513 } 2514 } 2515 else 2516 parse_packet_in_begin = lsquic_parse_packet_in_begin; 2517 2518 n_zeroes = 0; 2519 do 2520 { 2521 packet_in = lsquic_mm_get_packet_in(&engine->pub.enp_mm); 2522 if (!packet_in) 2523 return -1; 2524 /* Library does not modify packet_in_data, it is not referenced after 2525 * this function returns and subsequent release of pi_data is guarded 2526 * by PI_OWN_DATA flag. 2527 */ 2528 packet_in->pi_data = (unsigned char *) packet_in_data; 2529 if (0 != parse_packet_in_begin(packet_in, packet_end - packet_in_data, 2530 engine->flags & ENG_SERVER, 2531 engine->pub.enp_settings.es_scid_len, &ppstate)) 2532 { 2533 LSQ_DEBUG("Cannot parse incoming packet's header"); 2534 lsquic_mm_put_packet_in(&engine->pub.enp_mm, packet_in); 2535 errno = EINVAL; 2536 return -1; 2537 } 2538 2539 packet_in_data += packet_in->pi_data_sz; 2540 packet_in->pi_received = lsquic_time_now(); 2541 packet_in->pi_flags |= (3 & ecn) << PIBIT_ECN_SHIFT; 2542 eng_hist_inc(&engine->history, packet_in->pi_received, sl_packets_in); 2543 s = process_packet_in(engine, packet_in, &ppstate, sa_local, sa_peer, 2544 peer_ctx); 2545 n_zeroes += s == 0; 2546 } 2547 while (0 == s && packet_in_data < packet_end); 2548 2549 return n_zeroes > 0 ? 0 : s; 2550} 2551 2552 2553#if __GNUC__ && !defined(NDEBUG) 2554__attribute__((weak)) 2555#endif 2556unsigned 2557lsquic_engine_quic_versions (const lsquic_engine_t *engine) 2558{ 2559 return engine->pub.enp_settings.es_versions; 2560} 2561 2562 2563void 2564lsquic_engine_cooldown (lsquic_engine_t *engine) 2565{ 2566 struct lsquic_hash_elem *el; 2567 lsquic_conn_t *conn; 2568 2569 if (engine->flags & ENG_COOLDOWN) 2570 /* AFAICT, there is no harm in calling this function more than once, 2571 * but log it just in case, as it may indicate an error in the caller. 2572 */ 2573 LSQ_INFO("cooldown called again"); 2574 engine->flags |= ENG_COOLDOWN; 2575 LSQ_INFO("entering cooldown mode"); 2576 if (engine->flags & ENG_SERVER) 2577 drop_all_mini_conns(engine); 2578 for (el = lsquic_hash_first(engine->conns_hash); el; 2579 el = lsquic_hash_next(engine->conns_hash)) 2580 { 2581 conn = lsquic_hashelem_getdata(el); 2582 lsquic_conn_going_away(conn); 2583 } 2584} 2585 2586 2587int 2588lsquic_engine_earliest_adv_tick (lsquic_engine_t *engine, int *diff) 2589{ 2590 const lsquic_time_t *next_attq_time; 2591 lsquic_time_t now, next_time; 2592 2593 ENGINE_CALLS_INCR(engine); 2594 2595 if (((engine->flags & ENG_PAST_DEADLINE) 2596 && lsquic_mh_count(&engine->conns_out)) 2597 || (engine->pr_queue && prq_have_pending(engine->pr_queue)) 2598 || lsquic_mh_count(&engine->conns_tickable)) 2599 { 2600 *diff = 0; 2601 return 1; 2602 } 2603 2604 next_attq_time = attq_next_time(engine->attq); 2605 if (engine->pub.enp_flags & ENPUB_CAN_SEND) 2606 { 2607 if (next_attq_time) 2608 next_time = *next_attq_time; 2609 else 2610 return 0; 2611 } 2612 else 2613 { 2614 if (next_attq_time) 2615 next_time = MIN(*next_attq_time, engine->resume_sending_at); 2616 else 2617 next_time = engine->resume_sending_at; 2618 } 2619 2620 now = lsquic_time_now(); 2621 *diff = (int) ((int64_t) next_time - (int64_t) now); 2622 return 1; 2623} 2624 2625 2626unsigned 2627lsquic_engine_count_attq (lsquic_engine_t *engine, int from_now) 2628{ 2629 lsquic_time_t now; 2630 ENGINE_CALLS_INCR(engine); 2631 now = lsquic_time_now(); 2632 if (from_now < 0) 2633 now -= from_now; 2634 else 2635 now += from_now; 2636 return attq_count_before(engine->attq, now); 2637} 2638 2639 2640int 2641lsquic_engine_add_cid (struct lsquic_engine_public *enpub, 2642 struct lsquic_conn *conn, unsigned cce_idx) 2643{ 2644 struct lsquic_engine *const engine = (struct lsquic_engine *) enpub; 2645 struct conn_cid_elem *const cce = &conn->cn_cces[cce_idx]; 2646 void *peer_ctx; 2647 2648 assert(cce_idx < conn->cn_n_cces); 2649 assert(conn->cn_cces_mask & (1 << cce_idx)); 2650 assert(!(cce->cce_hash_el.qhe_flags & QHE_HASHED)); 2651 2652 if (lsquic_hash_insert(engine->conns_hash, cce->cce_cid.idbuf, 2653 cce->cce_cid.len, conn, &cce->cce_hash_el)) 2654 { 2655 LSQ_DEBUGC("add %"CID_FMT" to the list of SCIDs", 2656 CID_BITS(&cce->cce_cid)); 2657 peer_ctx = lsquic_conn_get_peer_ctx(conn, NULL); 2658 cce->cce_flags |= CCE_REG; 2659 cub_add(&engine->new_scids, &cce->cce_cid, peer_ctx); 2660 return 0; 2661 } 2662 else 2663 { 2664 LSQ_WARNC("could not add new cid %"CID_FMT" to the SCID hash", 2665 CID_BITS(&cce->cce_cid)); 2666 return -1; 2667 } 2668} 2669 2670 2671void 2672lsquic_engine_retire_cid (struct lsquic_engine_public *enpub, 2673 struct lsquic_conn *conn, unsigned cce_idx, lsquic_time_t now) 2674{ 2675 struct lsquic_engine *const engine = (struct lsquic_engine *) enpub; 2676 struct conn_cid_elem *const cce = &conn->cn_cces[cce_idx]; 2677 void *peer_ctx; 2678 2679 assert(cce_idx < conn->cn_n_cces); 2680 2681 if (cce->cce_hash_el.qhe_flags & QHE_HASHED) 2682 lsquic_hash_erase(engine->conns_hash, &cce->cce_hash_el); 2683 2684 if (engine->purga) 2685 { 2686 peer_ctx = lsquic_conn_get_peer_ctx(conn, NULL); 2687 lsquic_purga_add(engine->purga, &cce->cce_cid, peer_ctx, 2688 PUTY_CID_RETIRED, now); 2689 } 2690 conn->cn_cces_mask &= ~(1u << cce_idx); 2691 LSQ_DEBUGC("retire CID %"CID_FMT, CID_BITS(&cce->cce_cid)); 2692} 2693 2694 2695