lsquic_engine.c revision 82f3bcef
1/* Copyright (c) 2017 - 2018 LiteSpeed Technologies Inc. See LICENSE. */ 2/* 3 * lsquic_engine.c - QUIC engine 4 */ 5 6#include <assert.h> 7#include <errno.h> 8#include <inttypes.h> 9#include <stdint.h> 10#include <stdio.h> 11#include <stdlib.h> 12#include <string.h> 13#include <sys/queue.h> 14#include <time.h> 15#ifndef WIN32 16#include <sys/time.h> 17#include <netinet/in.h> 18#include <sys/types.h> 19#include <sys/stat.h> 20#include <fcntl.h> 21#include <unistd.h> 22#include <netdb.h> 23#endif 24 25 26 27#include "lsquic.h" 28#include "lsquic_types.h" 29#include "lsquic_alarmset.h" 30#include "lsquic_parse.h" 31#include "lsquic_packet_in.h" 32#include "lsquic_packet_out.h" 33#include "lsquic_senhist.h" 34#include "lsquic_rtt.h" 35#include "lsquic_cubic.h" 36#include "lsquic_pacer.h" 37#include "lsquic_send_ctl.h" 38#include "lsquic_set.h" 39#include "lsquic_conn_flow.h" 40#include "lsquic_sfcw.h" 41#include "lsquic_stream.h" 42#include "lsquic_conn.h" 43#include "lsquic_full_conn.h" 44#include "lsquic_util.h" 45#include "lsquic_qtags.h" 46#include "lsquic_str.h" 47#include "lsquic_handshake.h" 48#include "lsquic_mm.h" 49#include "lsquic_conn_hash.h" 50#include "lsquic_engine_public.h" 51#include "lsquic_eng_hist.h" 52#include "lsquic_ev_log.h" 53#include "lsquic_version.h" 54#include "lsquic_hash.h" 55#include "lsquic_attq.h" 56#include "lsquic_min_heap.h" 57 58#define LSQUIC_LOGGER_MODULE LSQLM_ENGINE 59#include "lsquic_logger.h" 60 61 62/* The batch of outgoing packets grows and shrinks dynamically */ 63#define MAX_OUT_BATCH_SIZE 1024 64#define MIN_OUT_BATCH_SIZE 256 65#define INITIAL_OUT_BATCH_SIZE 512 66 67struct out_batch 68{ 69 lsquic_conn_t *conns [MAX_OUT_BATCH_SIZE]; 70 lsquic_packet_out_t *packets[MAX_OUT_BATCH_SIZE]; 71 struct lsquic_out_spec outs [MAX_OUT_BATCH_SIZE]; 72}; 73 74typedef struct lsquic_conn * (*conn_iter_f)(struct lsquic_engine *); 75 76static void 77process_connections (struct lsquic_engine *engine, conn_iter_f iter, 78 lsquic_time_t now); 79 80static void 81engine_incref_conn (lsquic_conn_t *conn, enum lsquic_conn_flags flag); 82 83static lsquic_conn_t * 84engine_decref_conn (lsquic_engine_t *engine, lsquic_conn_t *conn, 85 enum lsquic_conn_flags flag); 86 87static void 88force_close_conn (lsquic_engine_t *engine, lsquic_conn_t *conn); 89 90/* Nested calls to LSQUIC are not supported */ 91#define ENGINE_IN(e) do { \ 92 assert(!((e)->pub.enp_flags & ENPUB_PROC)); \ 93 (e)->pub.enp_flags |= ENPUB_PROC; \ 94} while (0) 95 96#define ENGINE_OUT(e) do { \ 97 assert((e)->pub.enp_flags & ENPUB_PROC); \ 98 (e)->pub.enp_flags &= ~ENPUB_PROC; \ 99} while (0) 100 101/* A connection can be referenced from one of five places: 102 * 103 * 1. Connection hash: a connection starts its life in one of those. 104 * 105 * 2. Outgoing queue. 106 * 107 * 3. Tickable queue 108 * 109 * 4. Advisory Tick Time queue. 110 * 111 * 5. Closing connections queue. This is a transient queue -- it only 112 * exists for the duration of process_connections() function call. 113 * 114 * The idea is to destroy the connection when it is no longer referenced. 115 * For example, a connection tick may return TICK_SEND|TICK_CLOSE. In 116 * that case, the connection is referenced from two places: (2) and (5). 117 * After its packets are sent, it is only referenced in (5), and at the 118 * end of the function call, when it is removed from (5), reference count 119 * goes to zero and the connection is destroyed. If not all packets can 120 * be sent, at the end of the function call, the connection is referenced 121 * by (2) and will only be removed once all outgoing packets have been 122 * sent. 123 */ 124#define CONN_REF_FLAGS (LSCONN_HASHED \ 125 |LSCONN_HAS_OUTGOING \ 126 |LSCONN_TICKABLE \ 127 |LSCONN_CLOSING \ 128 |LSCONN_ATTQ) 129 130 131 132 133struct lsquic_engine 134{ 135 struct lsquic_engine_public pub; 136 enum { 137 ENG_SERVER = LSENG_SERVER, 138 ENG_HTTP = LSENG_HTTP, 139 ENG_COOLDOWN = (1 << 7), /* Cooldown: no new connections */ 140 ENG_PAST_DEADLINE 141 = (1 << 8), /* Previous call to a processing 142 * function went past time threshold. 143 */ 144#ifndef NDEBUG 145 ENG_DTOR = (1 << 26), /* Engine destructor */ 146#endif 147 } flags; 148 const struct lsquic_stream_if *stream_if; 149 void *stream_if_ctx; 150 lsquic_packets_out_f packets_out; 151 void *packets_out_ctx; 152 void *bad_handshake_ctx; 153 struct conn_hash conns_hash; 154 struct min_heap conns_tickable; 155 struct min_heap conns_out; 156 struct eng_hist history; 157 unsigned batch_size; 158 struct attq *attq; 159 /* Track time last time a packet was sent to give new connections 160 * priority lower than that of existing connections. 161 */ 162 lsquic_time_t last_sent; 163 unsigned n_conns; 164 lsquic_time_t deadline; 165 struct out_batch out_batch; 166}; 167 168 169void 170lsquic_engine_init_settings (struct lsquic_engine_settings *settings, 171 unsigned flags) 172{ 173 memset(settings, 0, sizeof(*settings)); 174 settings->es_versions = LSQUIC_DF_VERSIONS; 175 if (flags & ENG_SERVER) 176 { 177 settings->es_cfcw = LSQUIC_DF_CFCW_SERVER; 178 settings->es_sfcw = LSQUIC_DF_SFCW_SERVER; 179 settings->es_support_srej= LSQUIC_DF_SUPPORT_SREJ_SERVER; 180 } 181 else 182 { 183 settings->es_cfcw = LSQUIC_DF_CFCW_CLIENT; 184 settings->es_sfcw = LSQUIC_DF_SFCW_CLIENT; 185 settings->es_support_srej= LSQUIC_DF_SUPPORT_SREJ_CLIENT; 186 } 187 settings->es_max_streams_in = LSQUIC_DF_MAX_STREAMS_IN; 188 settings->es_idle_conn_to = LSQUIC_DF_IDLE_CONN_TO; 189 settings->es_handshake_to = LSQUIC_DF_HANDSHAKE_TO; 190 settings->es_silent_close = LSQUIC_DF_SILENT_CLOSE; 191 settings->es_max_header_list_size 192 = LSQUIC_DF_MAX_HEADER_LIST_SIZE; 193 settings->es_ua = LSQUIC_DF_UA; 194 195 settings->es_pdmd = QTAG_X509; 196 settings->es_aead = QTAG_AESG; 197 settings->es_kexs = QTAG_C255; 198 settings->es_support_push = LSQUIC_DF_SUPPORT_PUSH; 199 settings->es_support_tcid0 = LSQUIC_DF_SUPPORT_TCID0; 200 settings->es_support_nstp = LSQUIC_DF_SUPPORT_NSTP; 201 settings->es_honor_prst = LSQUIC_DF_HONOR_PRST; 202 settings->es_progress_check = LSQUIC_DF_PROGRESS_CHECK; 203 settings->es_rw_once = LSQUIC_DF_RW_ONCE; 204 settings->es_proc_time_thresh= LSQUIC_DF_PROC_TIME_THRESH; 205 settings->es_pace_packets = LSQUIC_DF_PACE_PACKETS; 206} 207 208 209/* Note: if returning an error, err_buf must be valid if non-NULL */ 210int 211lsquic_engine_check_settings (const struct lsquic_engine_settings *settings, 212 unsigned flags, 213 char *err_buf, size_t err_buf_sz) 214{ 215 if (settings->es_cfcw < LSQUIC_MIN_FCW || 216 settings->es_sfcw < LSQUIC_MIN_FCW) 217 { 218 if (err_buf) 219 snprintf(err_buf, err_buf_sz, "%s", 220 "flow control window set too low"); 221 return -1; 222 } 223 if (0 == (settings->es_versions & LSQUIC_SUPPORTED_VERSIONS)) 224 { 225 if (err_buf) 226 snprintf(err_buf, err_buf_sz, "%s", 227 "No supported QUIC versions specified"); 228 return -1; 229 } 230 if (settings->es_versions & ~LSQUIC_SUPPORTED_VERSIONS) 231 { 232 if (err_buf) 233 snprintf(err_buf, err_buf_sz, "%s", 234 "one or more unsupported QUIC version is specified"); 235 return -1; 236 } 237 return 0; 238} 239 240 241static void 242free_packet (void *ctx, unsigned char *packet_data) 243{ 244 free(packet_data); 245} 246 247 248static void * 249malloc_buf (void *ctx, size_t size) 250{ 251 return malloc(size); 252} 253 254 255static const struct lsquic_packout_mem_if stock_pmi = 256{ 257 malloc_buf, (void(*)(void *, void *)) free_packet, 258}; 259 260 261lsquic_engine_t * 262lsquic_engine_new (unsigned flags, 263 const struct lsquic_engine_api *api) 264{ 265 lsquic_engine_t *engine; 266 int tag_buf_len; 267 char err_buf[100]; 268 269 if (!api->ea_packets_out) 270 { 271 LSQ_ERROR("packets_out callback is not specified"); 272 return NULL; 273 } 274 275 if (api->ea_settings && 276 0 != lsquic_engine_check_settings(api->ea_settings, flags, 277 err_buf, sizeof(err_buf))) 278 { 279 LSQ_ERROR("cannot create engine: %s", err_buf); 280 return NULL; 281 } 282 283 engine = calloc(1, sizeof(*engine)); 284 if (!engine) 285 return NULL; 286 if (0 != lsquic_mm_init(&engine->pub.enp_mm)) 287 { 288 free(engine); 289 return NULL; 290 } 291 if (api->ea_settings) 292 engine->pub.enp_settings = *api->ea_settings; 293 else 294 lsquic_engine_init_settings(&engine->pub.enp_settings, flags); 295 tag_buf_len = gen_ver_tags(engine->pub.enp_ver_tags_buf, 296 sizeof(engine->pub.enp_ver_tags_buf), 297 engine->pub.enp_settings.es_versions); 298 if (tag_buf_len <= 0) 299 { 300 LSQ_ERROR("cannot generate version tags buffer"); 301 free(engine); 302 return NULL; 303 } 304 engine->pub.enp_ver_tags_len = tag_buf_len; 305 306 engine->flags = flags; 307 engine->stream_if = api->ea_stream_if; 308 engine->stream_if_ctx = api->ea_stream_if_ctx; 309 engine->packets_out = api->ea_packets_out; 310 engine->packets_out_ctx = api->ea_packets_out_ctx; 311 if (api->ea_pmi) 312 { 313 engine->pub.enp_pmi = api->ea_pmi; 314 engine->pub.enp_pmi_ctx = api->ea_pmi_ctx; 315 } 316 else 317 { 318 engine->pub.enp_pmi = &stock_pmi; 319 engine->pub.enp_pmi_ctx = NULL; 320 } 321 engine->pub.enp_engine = engine; 322 conn_hash_init(&engine->conns_hash); 323 engine->attq = attq_create(); 324 eng_hist_init(&engine->history); 325 engine->batch_size = INITIAL_OUT_BATCH_SIZE; 326 327 328 LSQ_INFO("instantiated engine"); 329 return engine; 330} 331 332 333static void 334grow_batch_size (struct lsquic_engine *engine) 335{ 336 engine->batch_size <<= engine->batch_size < MAX_OUT_BATCH_SIZE; 337} 338 339 340static void 341shrink_batch_size (struct lsquic_engine *engine) 342{ 343 engine->batch_size >>= engine->batch_size > MIN_OUT_BATCH_SIZE; 344} 345 346 347/* Wrapper to make sure important things occur before the connection is 348 * really destroyed. 349 */ 350static void 351destroy_conn (struct lsquic_engine *engine, lsquic_conn_t *conn) 352{ 353 --engine->n_conns; 354 conn->cn_flags |= LSCONN_NEVER_TICKABLE; 355 conn->cn_if->ci_destroy(conn); 356} 357 358 359static int 360maybe_grow_conn_heaps (struct lsquic_engine *engine) 361{ 362 struct min_heap_elem *els; 363 unsigned count; 364 365 if (engine->n_conns < lsquic_mh_nalloc(&engine->conns_tickable)) 366 return 0; /* Nothing to do */ 367 368 if (lsquic_mh_nalloc(&engine->conns_tickable)) 369 count = lsquic_mh_nalloc(&engine->conns_tickable) * 2 * 2; 370 else 371 count = 8; 372 373 els = malloc(sizeof(els[0]) * count); 374 if (!els) 375 { 376 LSQ_ERROR("%s: malloc failed", __func__); 377 return -1; 378 } 379 380 LSQ_DEBUG("grew heaps to %u elements", count / 2); 381 memcpy(&els[0], engine->conns_tickable.mh_elems, 382 sizeof(els[0]) * lsquic_mh_count(&engine->conns_tickable)); 383 memcpy(&els[count / 2], engine->conns_out.mh_elems, 384 sizeof(els[0]) * lsquic_mh_count(&engine->conns_out)); 385 free(engine->conns_tickable.mh_elems); 386 engine->conns_tickable.mh_elems = els; 387 engine->conns_out.mh_elems = &els[count / 2]; 388 engine->conns_tickable.mh_nalloc = count / 2; 389 engine->conns_out.mh_nalloc = count / 2; 390 return 0; 391} 392 393 394static lsquic_conn_t * 395new_full_conn_client (lsquic_engine_t *engine, const char *hostname, 396 unsigned short max_packet_size) 397{ 398 lsquic_conn_t *conn; 399 unsigned flags; 400 if (0 != maybe_grow_conn_heaps(engine)) 401 return NULL; 402 flags = engine->flags & (ENG_SERVER|ENG_HTTP); 403 conn = full_conn_client_new(&engine->pub, engine->stream_if, 404 engine->stream_if_ctx, flags, hostname, max_packet_size); 405 if (!conn) 406 return NULL; 407 ++engine->n_conns; 408 if (0 != conn_hash_add(&engine->conns_hash, conn)) 409 { 410 LSQ_WARN("cannot add connection %"PRIu64" to hash - destroy", 411 conn->cn_cid); 412 destroy_conn(engine, conn); 413 return NULL; 414 } 415 assert(!(conn->cn_flags & 416 (CONN_REF_FLAGS 417 & ~LSCONN_TICKABLE /* This flag may be set as effect of user 418 callbacks */ 419 ))); 420 conn->cn_flags |= LSCONN_HASHED; 421 return conn; 422} 423 424 425static lsquic_conn_t * 426find_or_create_conn (lsquic_engine_t *engine, lsquic_packet_in_t *packet_in, 427 struct packin_parse_state *ppstate, const struct sockaddr *sa_peer, 428 void *peer_ctx) 429{ 430 lsquic_conn_t *conn; 431 432 if (lsquic_packet_in_is_prst(packet_in) 433 && !engine->pub.enp_settings.es_honor_prst) 434 { 435 LSQ_DEBUG("public reset packet: discarding"); 436 return NULL; 437 } 438 439 if (!(packet_in->pi_flags & PI_CONN_ID)) 440 { 441 LSQ_DEBUG("packet header does not have connection ID: discarding"); 442 return NULL; 443 } 444 445 conn = conn_hash_find(&engine->conns_hash, packet_in->pi_conn_id); 446 if (conn) 447 { 448 conn->cn_pf->pf_parse_packet_in_finish(packet_in, ppstate); 449 return conn; 450 } 451 452 return conn; 453} 454 455 456#if !defined(NDEBUG) && __GNUC__ 457__attribute__((weak)) 458#endif 459void 460lsquic_engine_add_conn_to_tickable (struct lsquic_engine_public *enpub, 461 lsquic_conn_t *conn) 462{ 463 if (0 == (enpub->enp_flags & ENPUB_PROC) && 464 0 == (conn->cn_flags & (LSCONN_TICKABLE|LSCONN_NEVER_TICKABLE))) 465 { 466 lsquic_engine_t *engine = (lsquic_engine_t *) enpub; 467 lsquic_mh_insert(&engine->conns_tickable, conn, conn->cn_last_ticked); 468 engine_incref_conn(conn, LSCONN_TICKABLE); 469 } 470} 471 472 473void 474lsquic_engine_add_conn_to_attq (struct lsquic_engine_public *enpub, 475 lsquic_conn_t *conn, lsquic_time_t tick_time) 476{ 477 lsquic_engine_t *const engine = (lsquic_engine_t *) enpub; 478 if (conn->cn_flags & LSCONN_TICKABLE) 479 { 480 /* Optimization: no need to add the connection to the Advisory Tick 481 * Time Queue: it is about to be ticked, after which it its next tick 482 * time may be queried again. 483 */; 484 } 485 else if (conn->cn_flags & LSCONN_ATTQ) 486 { 487 if (lsquic_conn_adv_time(conn) != tick_time) 488 { 489 attq_remove(engine->attq, conn); 490 if (0 != attq_add(engine->attq, conn, tick_time)) 491 engine_decref_conn(engine, conn, LSCONN_ATTQ); 492 } 493 } 494 else if (0 == attq_add(engine->attq, conn, tick_time)) 495 engine_incref_conn(conn, LSCONN_ATTQ); 496} 497 498 499/* Return 0 if packet is being processed by a connections, otherwise return 1 */ 500static int 501process_packet_in (lsquic_engine_t *engine, lsquic_packet_in_t *packet_in, 502 struct packin_parse_state *ppstate, const struct sockaddr *sa_local, 503 const struct sockaddr *sa_peer, void *peer_ctx) 504{ 505 lsquic_conn_t *conn; 506 507 conn = find_or_create_conn(engine, packet_in, ppstate, sa_peer, peer_ctx); 508 if (!conn) 509 { 510 lsquic_mm_put_packet_in(&engine->pub.enp_mm, packet_in); 511 return 1; 512 } 513 514 if (0 == (conn->cn_flags & LSCONN_TICKABLE)) 515 { 516 lsquic_mh_insert(&engine->conns_tickable, conn, conn->cn_last_ticked); 517 engine_incref_conn(conn, LSCONN_TICKABLE); 518 } 519 lsquic_conn_record_sockaddr(conn, sa_local, sa_peer); 520 lsquic_packet_in_upref(packet_in); 521 conn->cn_peer_ctx = peer_ctx; 522 conn->cn_if->ci_packet_in(conn, packet_in); 523 lsquic_packet_in_put(&engine->pub.enp_mm, packet_in); 524 return 0; 525} 526 527 528void 529lsquic_engine_destroy (lsquic_engine_t *engine) 530{ 531 lsquic_conn_t *conn; 532 533 LSQ_DEBUG("destroying engine"); 534#ifndef NDEBUG 535 engine->flags |= ENG_DTOR; 536#endif 537 538 while ((conn = lsquic_mh_pop(&engine->conns_out))) 539 { 540 assert(conn->cn_flags & LSCONN_HAS_OUTGOING); 541 (void) engine_decref_conn(engine, conn, LSCONN_HAS_OUTGOING); 542 } 543 544 while ((conn = lsquic_mh_pop(&engine->conns_tickable))) 545 { 546 assert(conn->cn_flags & LSCONN_TICKABLE); 547 (void) engine_decref_conn(engine, conn, LSCONN_TICKABLE); 548 } 549 550 for (conn = conn_hash_first(&engine->conns_hash); conn; 551 conn = conn_hash_next(&engine->conns_hash)) 552 force_close_conn(engine, conn); 553 conn_hash_cleanup(&engine->conns_hash); 554 555 assert(0 == engine->n_conns); 556 attq_destroy(engine->attq); 557 558 assert(0 == lsquic_mh_count(&engine->conns_out)); 559 assert(0 == lsquic_mh_count(&engine->conns_tickable)); 560 free(engine->conns_tickable.mh_elems); 561 free(engine); 562} 563 564 565lsquic_conn_t * 566lsquic_engine_connect (lsquic_engine_t *engine, const struct sockaddr *peer_sa, 567 void *peer_ctx, lsquic_conn_ctx_t *conn_ctx, 568 const char *hostname, unsigned short max_packet_size) 569{ 570 lsquic_conn_t *conn; 571 572 if (engine->flags & ENG_SERVER) 573 { 574 LSQ_ERROR("`%s' must only be called in client mode", __func__); 575 return NULL; 576 } 577 578 if (0 == max_packet_size) 579 { 580 switch (peer_sa->sa_family) 581 { 582 case AF_INET: 583 max_packet_size = QUIC_MAX_IPv4_PACKET_SZ; 584 break; 585 default: 586 max_packet_size = QUIC_MAX_IPv6_PACKET_SZ; 587 break; 588 } 589 } 590 591 conn = new_full_conn_client(engine, hostname, max_packet_size); 592 if (!conn) 593 return NULL; 594 lsquic_mh_insert(&engine->conns_tickable, conn, conn->cn_last_ticked); 595 engine_incref_conn(conn, LSCONN_TICKABLE); 596 ENGINE_IN(engine); 597 lsquic_conn_record_peer_sa(conn, peer_sa); 598 conn->cn_peer_ctx = peer_ctx; 599 lsquic_conn_set_ctx(conn, conn_ctx); 600 full_conn_client_call_on_new(conn); 601 ENGINE_OUT(engine); 602 return conn; 603} 604 605 606static void 607remove_conn_from_hash (lsquic_engine_t *engine, lsquic_conn_t *conn) 608{ 609 conn_hash_remove(&engine->conns_hash, conn); 610 (void) engine_decref_conn(engine, conn, LSCONN_HASHED); 611} 612 613 614static void 615refflags2str (enum lsquic_conn_flags flags, char s[6]) 616{ 617 *s = 'C'; s += !!(flags & LSCONN_CLOSING); 618 *s = 'H'; s += !!(flags & LSCONN_HASHED); 619 *s = 'O'; s += !!(flags & LSCONN_HAS_OUTGOING); 620 *s = 'T'; s += !!(flags & LSCONN_TICKABLE); 621 *s = 'A'; s += !!(flags & LSCONN_ATTQ); 622 *s = '\0'; 623} 624 625 626static void 627engine_incref_conn (lsquic_conn_t *conn, enum lsquic_conn_flags flag) 628{ 629 char str[2][6]; 630 assert(flag & CONN_REF_FLAGS); 631 assert(!(conn->cn_flags & flag)); 632 conn->cn_flags |= flag; 633 LSQ_DEBUG("incref conn %"PRIu64", '%s' -> '%s'", conn->cn_cid, 634 (refflags2str(conn->cn_flags & ~flag, str[0]), str[0]), 635 (refflags2str(conn->cn_flags, str[1]), str[1])); 636} 637 638 639static lsquic_conn_t * 640engine_decref_conn (lsquic_engine_t *engine, lsquic_conn_t *conn, 641 enum lsquic_conn_flags flags) 642{ 643 char str[2][6]; 644 assert(flags & CONN_REF_FLAGS); 645 assert(conn->cn_flags & flags); 646#ifndef NDEBUG 647 if (flags & LSCONN_CLOSING) 648 assert(0 == (conn->cn_flags & LSCONN_HASHED)); 649#endif 650 conn->cn_flags &= ~flags; 651 LSQ_DEBUG("decref conn %"PRIu64", '%s' -> '%s'", conn->cn_cid, 652 (refflags2str(conn->cn_flags | flags, str[0]), str[0]), 653 (refflags2str(conn->cn_flags, str[1]), str[1])); 654 if (0 == (conn->cn_flags & CONN_REF_FLAGS)) 655 { 656 eng_hist_inc(&engine->history, 0, sl_del_full_conns); 657 destroy_conn(engine, conn); 658 return NULL; 659 } 660 else 661 return conn; 662} 663 664 665/* This is not a general-purpose function. Only call from engine dtor. */ 666static void 667force_close_conn (lsquic_engine_t *engine, lsquic_conn_t *conn) 668{ 669 assert(engine->flags & ENG_DTOR); 670 const enum lsquic_conn_flags flags = conn->cn_flags; 671 assert(conn->cn_flags & CONN_REF_FLAGS); 672 assert(!(flags & LSCONN_HAS_OUTGOING)); /* Should be removed already */ 673 assert(!(flags & LSCONN_TICKABLE)); /* Should be removed already */ 674 assert(!(flags & LSCONN_CLOSING)); /* It is in transient queue? */ 675 if (flags & LSCONN_ATTQ) 676 { 677 attq_remove(engine->attq, conn); 678 (void) engine_decref_conn(engine, conn, LSCONN_ATTQ); 679 } 680 if (flags & LSCONN_HASHED) 681 remove_conn_from_hash(engine, conn); 682} 683 684 685/* Iterator for tickable connections (those on the Tickable Queue). Before 686 * a connection is returned, it is removed from the Advisory Tick Time queue 687 * if necessary. 688 */ 689static lsquic_conn_t * 690conn_iter_next_tickable (struct lsquic_engine *engine) 691{ 692 lsquic_conn_t *conn; 693 694 conn = lsquic_mh_pop(&engine->conns_tickable); 695 696 if (conn) 697 conn = engine_decref_conn(engine, conn, LSCONN_TICKABLE); 698 if (conn && (conn->cn_flags & LSCONN_ATTQ)) 699 { 700 attq_remove(engine->attq, conn); 701 conn = engine_decref_conn(engine, conn, LSCONN_ATTQ); 702 } 703 704 return conn; 705} 706 707 708void 709lsquic_engine_process_conns (lsquic_engine_t *engine) 710{ 711 lsquic_conn_t *conn; 712 lsquic_time_t now; 713 714 ENGINE_IN(engine); 715 716 now = lsquic_time_now(); 717 while ((conn = attq_pop(engine->attq, now))) 718 { 719 conn = engine_decref_conn(engine, conn, LSCONN_ATTQ); 720 if (conn && !(conn->cn_flags & LSCONN_TICKABLE)) 721 { 722 lsquic_mh_insert(&engine->conns_tickable, conn, conn->cn_last_ticked); 723 engine_incref_conn(conn, LSCONN_TICKABLE); 724 } 725 } 726 727 process_connections(engine, conn_iter_next_tickable, now); 728 ENGINE_OUT(engine); 729} 730 731 732static int 733generate_header (const lsquic_packet_out_t *packet_out, 734 const struct parse_funcs *pf, lsquic_cid_t cid, 735 unsigned char *buf, size_t bufsz) 736{ 737 return pf->pf_gen_reg_pkt_header(buf, bufsz, 738 packet_out->po_flags & PO_CONN_ID ? &cid : NULL, 739 packet_out->po_flags & PO_VERSION ? &packet_out->po_ver_tag : NULL, 740 packet_out->po_flags & PO_NONCE ? packet_out->po_nonce : NULL, 741 packet_out->po_packno, lsquic_packet_out_packno_bits(packet_out)); 742} 743 744 745static ssize_t 746really_encrypt_packet (const lsquic_conn_t *conn, 747 const lsquic_packet_out_t *packet_out, 748 unsigned char *buf, size_t bufsz) 749{ 750 int enc, header_sz, is_hello_packet; 751 size_t packet_sz; 752 unsigned char header_buf[QUIC_MAX_PUBHDR_SZ]; 753 754 header_sz = generate_header(packet_out, conn->cn_pf, conn->cn_cid, 755 header_buf, sizeof(header_buf)); 756 if (header_sz < 0) 757 return -1; 758 759 is_hello_packet = !!(packet_out->po_flags & PO_HELLO); 760 enc = conn->cn_esf->esf_encrypt(conn->cn_enc_session, conn->cn_version, 0, 761 packet_out->po_packno, header_buf, header_sz, 762 packet_out->po_data, packet_out->po_data_sz, 763 buf, bufsz, &packet_sz, is_hello_packet); 764 if (0 == enc) 765 { 766 LSQ_DEBUG("encrypted packet %"PRIu64"; plaintext is %u bytes, " 767 "ciphertext is %zd bytes", 768 packet_out->po_packno, 769 lsquic_po_header_length(packet_out->po_flags) + 770 packet_out->po_data_sz, 771 packet_sz); 772 return packet_sz; 773 } 774 else 775 return -1; 776} 777 778 779static enum { ENCPA_OK, ENCPA_NOMEM, ENCPA_BADCRYPT, } 780encrypt_packet (lsquic_engine_t *engine, const lsquic_conn_t *conn, 781 lsquic_packet_out_t *packet_out) 782{ 783 ssize_t enc_sz; 784 size_t bufsz; 785 unsigned sent_sz; 786 unsigned char *buf; 787 788 bufsz = lsquic_po_header_length(packet_out->po_flags) + 789 packet_out->po_data_sz + QUIC_PACKET_HASH_SZ; 790 buf = engine->pub.enp_pmi->pmi_allocate(engine->pub.enp_pmi_ctx, bufsz); 791 if (!buf) 792 { 793 LSQ_DEBUG("could not allocate memory for outgoing packet of size %zd", 794 bufsz); 795 return ENCPA_NOMEM; 796 } 797 798 { 799 enc_sz = really_encrypt_packet(conn, packet_out, buf, bufsz); 800 sent_sz = enc_sz; 801 } 802 803 if (enc_sz < 0) 804 { 805 engine->pub.enp_pmi->pmi_release(engine->pub.enp_pmi_ctx, buf); 806 return ENCPA_BADCRYPT; 807 } 808 809 packet_out->po_enc_data = buf; 810 packet_out->po_enc_data_sz = enc_sz; 811 packet_out->po_sent_sz = sent_sz; 812 packet_out->po_flags |= PO_ENCRYPTED|PO_SENT_SZ; 813 814 return ENCPA_OK; 815} 816 817 818STAILQ_HEAD(conns_stailq, lsquic_conn); 819 820 821struct conns_out_iter 822{ 823 struct min_heap *coi_heap; 824 TAILQ_HEAD(, lsquic_conn) coi_active_list, 825 coi_inactive_list; 826 lsquic_conn_t *coi_next; 827#ifndef NDEBUG 828 lsquic_time_t coi_last_sent; 829#endif 830}; 831 832 833static void 834coi_init (struct conns_out_iter *iter, struct lsquic_engine *engine) 835{ 836 iter->coi_heap = &engine->conns_out; 837 iter->coi_next = NULL; 838 TAILQ_INIT(&iter->coi_active_list); 839 TAILQ_INIT(&iter->coi_inactive_list); 840#ifndef NDEBUG 841 iter->coi_last_sent = 0; 842#endif 843} 844 845 846static lsquic_conn_t * 847coi_next (struct conns_out_iter *iter) 848{ 849 lsquic_conn_t *conn; 850 851 if (lsquic_mh_count(iter->coi_heap) > 0) 852 { 853 conn = lsquic_mh_pop(iter->coi_heap); 854 TAILQ_INSERT_TAIL(&iter->coi_active_list, conn, cn_next_out); 855 conn->cn_flags |= LSCONN_COI_ACTIVE; 856#ifndef NDEBUG 857 if (iter->coi_last_sent) 858 assert(iter->coi_last_sent <= conn->cn_last_sent); 859 iter->coi_last_sent = conn->cn_last_sent; 860#endif 861 return conn; 862 } 863 else if (!TAILQ_EMPTY(&iter->coi_active_list)) 864 { 865 conn = iter->coi_next; 866 if (!conn) 867 conn = TAILQ_FIRST(&iter->coi_active_list); 868 if (conn) 869 iter->coi_next = TAILQ_NEXT(conn, cn_next_out); 870 return conn; 871 } 872 else 873 return NULL; 874} 875 876 877static void 878coi_deactivate (struct conns_out_iter *iter, lsquic_conn_t *conn) 879{ 880 if (!(conn->cn_flags & LSCONN_EVANESCENT)) 881 { 882 assert(!TAILQ_EMPTY(&iter->coi_active_list)); 883 TAILQ_REMOVE(&iter->coi_active_list, conn, cn_next_out); 884 conn->cn_flags &= ~LSCONN_COI_ACTIVE; 885 TAILQ_INSERT_TAIL(&iter->coi_inactive_list, conn, cn_next_out); 886 conn->cn_flags |= LSCONN_COI_INACTIVE; 887 } 888} 889 890 891static void 892coi_remove (struct conns_out_iter *iter, lsquic_conn_t *conn) 893{ 894 assert(conn->cn_flags & LSCONN_COI_ACTIVE); 895 if (conn->cn_flags & LSCONN_COI_ACTIVE) 896 { 897 TAILQ_REMOVE(&iter->coi_active_list, conn, cn_next_out); 898 conn->cn_flags &= ~LSCONN_COI_ACTIVE; 899 } 900} 901 902 903static void 904coi_reactivate (struct conns_out_iter *iter, lsquic_conn_t *conn) 905{ 906 assert(conn->cn_flags & LSCONN_COI_INACTIVE); 907 TAILQ_REMOVE(&iter->coi_inactive_list, conn, cn_next_out); 908 conn->cn_flags &= ~LSCONN_COI_INACTIVE; 909 TAILQ_INSERT_TAIL(&iter->coi_active_list, conn, cn_next_out); 910 conn->cn_flags |= LSCONN_COI_ACTIVE; 911} 912 913 914static void 915coi_reheap (struct conns_out_iter *iter, lsquic_engine_t *engine) 916{ 917 lsquic_conn_t *conn; 918 while ((conn = TAILQ_FIRST(&iter->coi_active_list))) 919 { 920 TAILQ_REMOVE(&iter->coi_active_list, conn, cn_next_out); 921 conn->cn_flags &= ~LSCONN_COI_ACTIVE; 922 lsquic_mh_insert(iter->coi_heap, conn, conn->cn_last_sent); 923 } 924 while ((conn = TAILQ_FIRST(&iter->coi_inactive_list))) 925 { 926 TAILQ_REMOVE(&iter->coi_inactive_list, conn, cn_next_out); 927 conn->cn_flags &= ~LSCONN_COI_INACTIVE; 928 (void) engine_decref_conn(engine, conn, LSCONN_HAS_OUTGOING); 929 } 930} 931 932 933static unsigned 934send_batch (lsquic_engine_t *engine, struct conns_out_iter *conns_iter, 935 struct out_batch *batch, unsigned n_to_send) 936{ 937 int n_sent, i; 938 lsquic_time_t now; 939 940 /* Set sent time before the write to avoid underestimating RTT */ 941 now = lsquic_time_now(); 942 for (i = 0; i < (int) n_to_send; ++i) 943 batch->packets[i]->po_sent = now; 944 n_sent = engine->packets_out(engine->packets_out_ctx, batch->outs, 945 n_to_send); 946 if (n_sent >= 0) 947 LSQ_DEBUG("packets out returned %d (out of %u)", n_sent, n_to_send); 948 else 949 { 950 LSQ_DEBUG("packets out returned an error: %s", strerror(errno)); 951 n_sent = 0; 952 } 953 if (n_sent > 0) 954 engine->last_sent = now + n_sent; 955 for (i = 0; i < n_sent; ++i) 956 { 957 eng_hist_inc(&engine->history, now, sl_packets_out); 958 EV_LOG_PACKET_SENT(batch->conns[i]->cn_cid, batch->packets[i]); 959 batch->conns[i]->cn_if->ci_packet_sent(batch->conns[i], 960 batch->packets[i]); 961 /* `i' is added to maintain relative order */ 962 batch->conns[i]->cn_last_sent = now + i; 963 /* Release packet out buffer as soon as the packet is sent 964 * successfully. If not successfully sent, we hold on to 965 * this buffer until the packet sending is attempted again 966 * or until it times out and regenerated. 967 */ 968 if (batch->packets[i]->po_flags & PO_ENCRYPTED) 969 { 970 batch->packets[i]->po_flags &= ~PO_ENCRYPTED; 971 engine->pub.enp_pmi->pmi_release(engine->pub.enp_pmi_ctx, 972 batch->packets[i]->po_enc_data); 973 batch->packets[i]->po_enc_data = NULL; /* JIC */ 974 } 975 } 976 if (LSQ_LOG_ENABLED_EXT(LSQ_LOG_DEBUG, LSQLM_EVENT)) 977 for ( ; i < (int) n_to_send; ++i) 978 EV_LOG_PACKET_NOT_SENT(batch->conns[i]->cn_cid, batch->packets[i]); 979 /* Return packets to the connection in reverse order so that the packet 980 * ordering is maintained. 981 */ 982 for (i = (int) n_to_send - 1; i >= n_sent; --i) 983 { 984 batch->conns[i]->cn_if->ci_packet_not_sent(batch->conns[i], 985 batch->packets[i]); 986 if (!(batch->conns[i]->cn_flags & (LSCONN_COI_ACTIVE|LSCONN_EVANESCENT))) 987 coi_reactivate(conns_iter, batch->conns[i]); 988 } 989 return n_sent; 990} 991 992 993/* Return 1 if went past deadline, 0 otherwise */ 994static int 995check_deadline (lsquic_engine_t *engine) 996{ 997 if (engine->pub.enp_settings.es_proc_time_thresh && 998 lsquic_time_now() > engine->deadline) 999 { 1000 LSQ_INFO("went past threshold of %u usec, stop sending", 1001 engine->pub.enp_settings.es_proc_time_thresh); 1002 engine->flags |= ENG_PAST_DEADLINE; 1003 return 1; 1004 } 1005 else 1006 return 0; 1007} 1008 1009 1010static void 1011send_packets_out (struct lsquic_engine *engine, 1012 struct conns_stailq *closed_conns) 1013{ 1014 unsigned n, w, n_sent, n_batches_sent; 1015 lsquic_packet_out_t *packet_out; 1016 lsquic_conn_t *conn; 1017 struct out_batch *const batch = &engine->out_batch; 1018 struct conns_out_iter conns_iter; 1019 int shrink, deadline_exceeded; 1020 1021 coi_init(&conns_iter, engine); 1022 n_batches_sent = 0; 1023 n_sent = 0, n = 0; 1024 shrink = 0; 1025 deadline_exceeded = 0; 1026 1027 while ((conn = coi_next(&conns_iter))) 1028 { 1029 packet_out = conn->cn_if->ci_next_packet_to_send(conn); 1030 if (!packet_out) { 1031 LSQ_DEBUG("batched all outgoing packets for conn %"PRIu64, 1032 conn->cn_cid); 1033 coi_deactivate(&conns_iter, conn); 1034 continue; 1035 } 1036 if (!(packet_out->po_flags & (PO_ENCRYPTED|PO_NOENCRYPT))) 1037 { 1038 switch (encrypt_packet(engine, conn, packet_out)) 1039 { 1040 case ENCPA_NOMEM: 1041 /* Send what we have and wait for a more opportune moment */ 1042 conn->cn_if->ci_packet_not_sent(conn, packet_out); 1043 goto end_for; 1044 case ENCPA_BADCRYPT: 1045 /* This is pretty bad: close connection immediately */ 1046 conn->cn_if->ci_packet_not_sent(conn, packet_out); 1047 LSQ_INFO("conn %"PRIu64" has unsendable packets", conn->cn_cid); 1048 if (!(conn->cn_flags & LSCONN_EVANESCENT)) 1049 { 1050 if (!(conn->cn_flags & LSCONN_CLOSING)) 1051 { 1052 STAILQ_INSERT_TAIL(closed_conns, conn, cn_next_closed_conn); 1053 engine_incref_conn(conn, LSCONN_CLOSING); 1054 if (conn->cn_flags & LSCONN_HASHED) 1055 remove_conn_from_hash(engine, conn); 1056 } 1057 coi_remove(&conns_iter, conn); 1058 } 1059 continue; 1060 case ENCPA_OK: 1061 break; 1062 } 1063 } 1064 LSQ_DEBUG("batched packet %"PRIu64" for connection %"PRIu64, 1065 packet_out->po_packno, conn->cn_cid); 1066 assert(conn->cn_flags & LSCONN_HAS_PEER_SA); 1067 if (packet_out->po_flags & PO_ENCRYPTED) 1068 { 1069 batch->outs[n].buf = packet_out->po_enc_data; 1070 batch->outs[n].sz = packet_out->po_enc_data_sz; 1071 } 1072 else 1073 { 1074 batch->outs[n].buf = packet_out->po_data; 1075 batch->outs[n].sz = packet_out->po_data_sz; 1076 } 1077 batch->outs [n].peer_ctx = conn->cn_peer_ctx; 1078 batch->outs [n].local_sa = (struct sockaddr *) conn->cn_local_addr; 1079 batch->outs [n].dest_sa = (struct sockaddr *) conn->cn_peer_addr; 1080 batch->conns [n] = conn; 1081 batch->packets[n] = packet_out; 1082 ++n; 1083 if (n == engine->batch_size) 1084 { 1085 n = 0; 1086 w = send_batch(engine, &conns_iter, batch, engine->batch_size); 1087 ++n_batches_sent; 1088 n_sent += w; 1089 if (w < engine->batch_size) 1090 { 1091 shrink = 1; 1092 break; 1093 } 1094 deadline_exceeded = check_deadline(engine); 1095 if (deadline_exceeded) 1096 break; 1097 grow_batch_size(engine); 1098 } 1099 } 1100 end_for: 1101 1102 if (n > 0) { 1103 w = send_batch(engine, &conns_iter, batch, n); 1104 n_sent += w; 1105 shrink = w < n; 1106 ++n_batches_sent; 1107 deadline_exceeded = check_deadline(engine); 1108 } 1109 1110 if (shrink) 1111 shrink_batch_size(engine); 1112 else if (n_batches_sent > 1 && !deadline_exceeded) 1113 grow_batch_size(engine); 1114 1115 coi_reheap(&conns_iter, engine); 1116 1117 LSQ_DEBUG("%s: sent %u packet%.*s", __func__, n_sent, n_sent != 1, "s"); 1118} 1119 1120 1121int 1122lsquic_engine_has_unsent_packets (lsquic_engine_t *engine) 1123{ 1124 return lsquic_mh_count(&engine->conns_out) > 0 1125 ; 1126} 1127 1128 1129static void 1130reset_deadline (lsquic_engine_t *engine, lsquic_time_t now) 1131{ 1132 engine->deadline = now + engine->pub.enp_settings.es_proc_time_thresh; 1133 engine->flags &= ~ENG_PAST_DEADLINE; 1134} 1135 1136 1137/* TODO: this is a user-facing function, account for load */ 1138void 1139lsquic_engine_send_unsent_packets (lsquic_engine_t *engine) 1140{ 1141 lsquic_conn_t *conn; 1142 struct conns_stailq closed_conns; 1143 1144 STAILQ_INIT(&closed_conns); 1145 reset_deadline(engine, lsquic_time_now()); 1146 1147 send_packets_out(engine, &closed_conns); 1148 1149 while ((conn = STAILQ_FIRST(&closed_conns))) { 1150 STAILQ_REMOVE_HEAD(&closed_conns, cn_next_closed_conn); 1151 (void) engine_decref_conn(engine, conn, LSCONN_CLOSING); 1152 } 1153 1154} 1155 1156 1157static void 1158process_connections (lsquic_engine_t *engine, conn_iter_f next_conn, 1159 lsquic_time_t now) 1160{ 1161 lsquic_conn_t *conn; 1162 enum tick_st tick_st; 1163 unsigned i; 1164 lsquic_time_t next_tick_time; 1165 struct conns_stailq closed_conns, ticked_conns; 1166 1167 eng_hist_tick(&engine->history, now); 1168 1169 STAILQ_INIT(&closed_conns); 1170 STAILQ_INIT(&ticked_conns); 1171 reset_deadline(engine, now); 1172 1173 i = 0; 1174 while ((conn = next_conn(engine)) 1175 ) 1176 { 1177 tick_st = conn->cn_if->ci_tick(conn, now); 1178 conn->cn_last_ticked = now + i /* Maintain relative order */ ++; 1179 if (tick_st & TICK_SEND) 1180 { 1181 if (!(conn->cn_flags & LSCONN_HAS_OUTGOING)) 1182 { 1183 lsquic_mh_insert(&engine->conns_out, conn, conn->cn_last_sent); 1184 engine_incref_conn(conn, LSCONN_HAS_OUTGOING); 1185 } 1186 } 1187 if (tick_st & TICK_CLOSE) 1188 { 1189 STAILQ_INSERT_TAIL(&closed_conns, conn, cn_next_closed_conn); 1190 engine_incref_conn(conn, LSCONN_CLOSING); 1191 if (conn->cn_flags & LSCONN_HASHED) 1192 remove_conn_from_hash(engine, conn); 1193 } 1194 else 1195 STAILQ_INSERT_TAIL(&ticked_conns, conn, cn_next_ticked); 1196 } 1197 1198 if (lsquic_engine_has_unsent_packets(engine)) 1199 send_packets_out(engine, &closed_conns); 1200 1201 while ((conn = STAILQ_FIRST(&closed_conns))) { 1202 STAILQ_REMOVE_HEAD(&closed_conns, cn_next_closed_conn); 1203 (void) engine_decref_conn(engine, conn, LSCONN_CLOSING); 1204 } 1205 1206 /* TODO Heapification can be optimized by switching to the Floyd method: 1207 * https://en.wikipedia.org/wiki/Binary_heap#Building_a_heap 1208 */ 1209 while ((conn = STAILQ_FIRST(&ticked_conns))) 1210 { 1211 STAILQ_REMOVE_HEAD(&ticked_conns, cn_next_ticked); 1212 if (!(conn->cn_flags & LSCONN_TICKABLE) 1213 && conn->cn_if->ci_is_tickable(conn)) 1214 { 1215 lsquic_mh_insert(&engine->conns_tickable, conn, conn->cn_last_ticked); 1216 engine_incref_conn(conn, LSCONN_TICKABLE); 1217 } 1218 else if (!(conn->cn_flags & LSCONN_ATTQ)) 1219 { 1220 next_tick_time = conn->cn_if->ci_next_tick_time(conn); 1221 if (next_tick_time) 1222 { 1223 if (0 == attq_add(engine->attq, conn, next_tick_time)) 1224 engine_incref_conn(conn, LSCONN_ATTQ); 1225 } 1226 else 1227 assert(0); 1228 } 1229 } 1230 1231} 1232 1233 1234/* Return 0 if packet is being processed by a real connection, 1 if the 1235 * packet was processed, but not by a connection, and -1 on error. 1236 */ 1237int 1238lsquic_engine_packet_in (lsquic_engine_t *engine, 1239 const unsigned char *packet_in_data, size_t packet_in_size, 1240 const struct sockaddr *sa_local, const struct sockaddr *sa_peer, 1241 void *peer_ctx) 1242{ 1243 struct packin_parse_state ppstate; 1244 lsquic_packet_in_t *packet_in; 1245 1246 if (packet_in_size > QUIC_MAX_PACKET_SZ) 1247 { 1248 LSQ_DEBUG("Cannot handle packet_in_size(%zd) > %d packet incoming " 1249 "packet's header", packet_in_size, QUIC_MAX_PACKET_SZ); 1250 errno = E2BIG; 1251 return -1; 1252 } 1253 1254 packet_in = lsquic_mm_get_packet_in(&engine->pub.enp_mm); 1255 if (!packet_in) 1256 return -1; 1257 1258 /* Library does not modify packet_in_data, it is not referenced after 1259 * this function returns and subsequent release of pi_data is guarded 1260 * by PI_OWN_DATA flag. 1261 */ 1262 packet_in->pi_data = (unsigned char *) packet_in_data; 1263 if (0 != parse_packet_in_begin(packet_in, packet_in_size, 1264 engine->flags & ENG_SERVER, &ppstate)) 1265 { 1266 LSQ_DEBUG("Cannot parse incoming packet's header"); 1267 lsquic_mm_put_packet_in(&engine->pub.enp_mm, packet_in); 1268 errno = EINVAL; 1269 return -1; 1270 } 1271 1272 packet_in->pi_received = lsquic_time_now(); 1273 eng_hist_inc(&engine->history, packet_in->pi_received, sl_packets_in); 1274 return process_packet_in(engine, packet_in, &ppstate, sa_local, sa_peer, 1275 peer_ctx); 1276} 1277 1278 1279#if __GNUC__ && !defined(NDEBUG) 1280__attribute__((weak)) 1281#endif 1282unsigned 1283lsquic_engine_quic_versions (const lsquic_engine_t *engine) 1284{ 1285 return engine->pub.enp_settings.es_versions; 1286} 1287 1288 1289int 1290lsquic_engine_earliest_adv_tick (lsquic_engine_t *engine, int *diff) 1291{ 1292 const lsquic_time_t *next_time; 1293 lsquic_time_t now; 1294 1295 if (((engine->flags & ENG_PAST_DEADLINE) 1296 && lsquic_mh_count(&engine->conns_out)) 1297 || lsquic_mh_count(&engine->conns_tickable)) 1298 { 1299 *diff = 0; 1300 return 1; 1301 } 1302 1303 next_time = attq_next_time(engine->attq); 1304 if (!next_time) 1305 return 0; 1306 1307 now = lsquic_time_now(); 1308 *diff = (int) ((int64_t) *next_time - (int64_t) now); 1309 return 1; 1310} 1311 1312 1313unsigned 1314lsquic_engine_count_attq (lsquic_engine_t *engine, int from_now) 1315{ 1316 lsquic_time_t now; 1317 now = lsquic_time_now(); 1318 if (from_now < 0) 1319 now -= from_now; 1320 else 1321 now += from_now; 1322 return attq_count_before(engine->attq, now); 1323} 1324 1325 1326