lsquic_pr_queue.c revision b1a7c3f9
1/* Copyright (c) 2017 - 2020 LiteSpeed Technologies Inc. See LICENSE. */ 2/* 3 * lsquic_pr_queue.c -- packet request queue. 4 */ 5 6#include <assert.h> 7#include <errno.h> 8#include <inttypes.h> 9#include <stdlib.h> 10#include <string.h> 11#include <sys/queue.h> 12 13#ifndef WIN32 14#include <netinet/in.h> 15#include <sys/socket.h> 16#endif 17 18#include <openssl/aead.h> 19#include <openssl/rand.h> 20 21#include "lsquic.h" 22#include "lsquic_types.h" 23#include "lsquic_int_types.h" 24#include "lsquic_packet_common.h" 25#include "lsquic_packet_gquic.h" 26#include "lsquic_packet_out.h" 27#include "lsquic_packet_in.h" 28#include "lsquic_hash.h" 29#include "lsquic_conn.h" 30#include "lsquic_parse.h" 31#include "lsquic_malo.h" 32#include "lsquic_pr_queue.h" 33#include "lsquic_parse_common.h" 34#include "lsquic_tokgen.h" 35#include "lsquic_version.h" 36#include "lsquic_mm.h" 37#include "lsquic_engine_public.h" 38#include "lsquic_sizes.h" 39#include "lsquic_handshake.h" 40#include "lsquic_xxhash.h" 41#include "lsquic_crand.h" 42 43#define LSQUIC_LOGGER_MODULE LSQLM_PRQ 44#include "lsquic_logger.h" 45 46#define MAX(a, b) ((a) > (b) ? (a) : (b)) 47#define MIN(a, b) ((a) < (b) ? (a) : (b)) 48 49 50static const struct conn_iface evanescent_conn_iface; 51 52 53struct packet_req 54{ 55 struct lsquic_hash_elem pr_hash_el; 56 lsquic_cid_t pr_scid; 57 lsquic_cid_t pr_dcid; 58 enum packet_req_type pr_type; 59 enum pr_flags { 60 PR_GQUIC = 1 << 0, 61 } pr_flags; 62 enum lsquic_version pr_version; 63 unsigned pr_rst_sz; 64 struct network_path pr_path; 65}; 66 67 68struct evanescent_conn 69{ 70 struct lsquic_conn evc_conn; 71 struct packet_req *evc_req; 72 struct pr_queue *evc_queue; 73 struct lsquic_packet_out evc_packet_out; 74 struct conn_cid_elem evc_cces[1]; 75 enum { 76 EVC_DROP = 1 << 0, 77 } evc_flags; 78 unsigned char evc_buf[0]; 79}; 80 81 82/* [draft-ietf-quic-transport-22], Section 17.2.1 */ 83#define IQUIC_VERNEG_SIZE (1 /* Type */ + 4 /* Version (zero tag) */ \ 84 + 1 /* DCIL */ + MAX_CID_LEN + 1 /* SCIL */ + MAX_CID_LEN + \ 85 4 * N_LSQVER) 86 87 88struct pr_queue 89{ 90 TAILQ_HEAD(, lsquic_conn) prq_free_conns, 91 prq_returned_conns; 92 struct malo *prq_reqs_pool; 93 const struct lsquic_engine_public 94 *prq_enpub; 95 struct lsquic_hash *prq_reqs_hash; 96 unsigned prq_max_reqs; 97 unsigned prq_nreqs; 98 unsigned prq_max_conns; 99 unsigned prq_nconns; 100 unsigned prq_verneg_g_sz; /* Size of prq_verneg_g_buf */ 101 unsigned prq_pubres_g_sz; /* Size of prq_pubres_g_buf */ 102 103 /* GQUIC version negotiation and stateless reset packets are generated 104 * once, when the Packet Request Queue is created. For each request, 105 * these buffers are simply copied and the connection ID is replaced. 106 * 107 * Since IETF QUIC uses variable-length connections IDs, we have to 108 * generate packets every time. 109 */ 110 unsigned char prq_pubres_g_buf[GQUIC_RESET_SZ]; 111 unsigned char prq_verneg_g_buf[1 + GQUIC_CID_LEN 112 + N_LSQVER * 4]; 113}; 114 115 116static int 117comp_reqs (const void *s1, const void *s2, size_t n) 118{ 119 const struct packet_req *a, *b; 120 121 a = s1; 122 b = s2; 123 if (a->pr_type == b->pr_type && LSQUIC_CIDS_EQ(&a->pr_dcid, &b->pr_dcid)) 124 return 0; 125 else 126 return -1; 127} 128 129 130static unsigned 131hash_req (const void *p, size_t len, unsigned seed) 132{ 133 const struct packet_req *req; 134 135 req = p; 136 return XXH32(req->pr_dcid.idbuf, req->pr_dcid.len, seed); 137} 138 139 140struct pr_queue * 141lsquic_prq_create (unsigned max_elems, unsigned max_conns, 142 const struct lsquic_engine_public *enpub) 143{ 144 const struct parse_funcs *pf; 145 struct pr_queue *prq; 146 struct malo *malo; 147 struct lsquic_hash *hash; 148 unsigned verneg_g_sz; 149 ssize_t prst_g_sz; 150 int len; 151 152 malo = lsquic_malo_create(sizeof(struct packet_req)); 153 if (!malo) 154 { 155 LSQ_WARN("malo_create failed: %s", strerror(errno)); 156 goto err0; 157 } 158 159 160 hash = lsquic_hash_create_ext(comp_reqs, hash_req); 161 if (!hash) 162 { 163 LSQ_WARN("cannot create hash"); 164 goto err1; 165 } 166 167 prq = malloc(sizeof(*prq)); 168 if (!prq) 169 { 170 LSQ_WARN("malloc failed: %s", strerror(errno)); 171 goto err2; 172 } 173 174 const lsquic_cid_t cid = { .len = 8, }; 175 pf = select_pf_by_ver(LSQVER_043); 176 len = lsquic_gquic_gen_ver_nego_pkt(prq->prq_verneg_g_buf, 177 sizeof(prq->prq_verneg_g_buf), &cid, 178 enpub->enp_settings.es_versions); 179 assert(len > 0); 180 if (len <= 0) 181 { 182 LSQ_ERROR("cannot generate version negotiation packet"); 183 goto err3; 184 } 185 verneg_g_sz = (unsigned) len; 186 187 prst_g_sz = pf->pf_generate_simple_prst(0 /* This is just placeholder */, 188 prq->prq_pubres_g_buf, sizeof(prq->prq_pubres_g_buf)); 189 if (prst_g_sz < 0) 190 { 191 LSQ_ERROR("cannot generate public reset packet"); 192 goto err3; 193 } 194 195 TAILQ_INIT(&prq->prq_free_conns); 196 TAILQ_INIT(&prq->prq_returned_conns); 197 prq->prq_reqs_hash = hash; 198 prq->prq_reqs_pool = malo; 199 prq->prq_max_reqs = max_elems; 200 prq->prq_nreqs = 0; 201 prq->prq_max_conns = max_conns; 202 prq->prq_nconns = 0; 203 prq->prq_verneg_g_sz = verneg_g_sz; 204 prq->prq_pubres_g_sz = (unsigned) prst_g_sz; 205 prq->prq_enpub = enpub; 206 207 LSQ_INFO("initialized queue of size %d", max_elems); 208 209 return prq; 210 211 err3: 212 free(prq); 213 err2: 214 lsquic_hash_destroy(hash); 215 err1: 216 lsquic_malo_destroy(malo); 217 err0: 218 return NULL; 219} 220 221 222void 223lsquic_prq_destroy (struct pr_queue *prq) 224{ 225 struct lsquic_conn *conn; 226 227 LSQ_INFO("destroy"); 228 while ((conn = TAILQ_FIRST(&prq->prq_free_conns))) 229 { 230 TAILQ_REMOVE(&prq->prq_free_conns, conn, cn_next_pr); 231 free(conn); 232 } 233 lsquic_hash_destroy(prq->prq_reqs_hash); 234 lsquic_malo_destroy(prq->prq_reqs_pool); 235 free(prq); 236} 237 238 239static struct packet_req * 240get_req (struct pr_queue *prq) 241{ 242 struct packet_req *req; 243 if (prq->prq_nreqs < prq->prq_max_reqs) 244 { 245 req = lsquic_malo_get(prq->prq_reqs_pool); 246 if (req) 247 ++prq->prq_nreqs; 248 else 249 LSQ_WARN("malo_get failed: %s", strerror(errno)); 250 return req; 251 } 252 else 253 return NULL; 254} 255 256 257static void 258put_req (struct pr_queue *prq, struct packet_req *req) 259{ 260 lsquic_malo_put(req); 261 --prq->prq_nreqs; 262} 263 264 265static int 266lsquic_prq_new_req_ext (struct pr_queue *prq, enum packet_req_type type, 267 unsigned flags, enum lsquic_version version, unsigned short data_sz, 268 const lsquic_cid_t *dcid, const lsquic_cid_t *scid, void *peer_ctx, 269 const struct sockaddr *local_addr, const struct sockaddr *peer_addr) 270{ 271 struct packet_req *req; 272 unsigned max, size, rand; 273 274 if (type == PACKET_REQ_PUBRES && !(flags & PR_GQUIC)) 275 { 276 if (data_sz <= IQUIC_MIN_SRST_SIZE) 277 { 278 LSQ_DEBUGC("not scheduling public reset: incoming packet for CID " 279 "%"CID_FMT" too small: %hu bytes", CID_BITS(dcid), data_sz); 280 return -1; 281 } 282 /* Use a random stateless reset size */ 283 max = MIN(IQUIC_MAX_SRST_SIZE, data_sz - 1u); 284 if (max > IQUIC_MIN_SRST_SIZE) 285 { 286 rand = lsquic_crand_get_byte(prq->prq_enpub->enp_crand); 287 size = IQUIC_MIN_SRST_SIZE + rand % (max - IQUIC_MIN_SRST_SIZE); 288 } 289 else 290 size = IQUIC_MIN_SRST_SIZE; 291 LSQ_DEBUGC("selected %u-byte reset size for CID %"CID_FMT 292 " (range is [%u, %u])", size, CID_BITS(dcid), 293 IQUIC_MIN_SRST_SIZE, max); 294 } 295 else 296 size = 0; 297 298 req = get_req(prq); 299 if (!req) 300 { 301 LSQ_DEBUG("out of reqs: cannot allocated another one"); 302 return -1; 303 } 304 305 req->pr_type = type; 306 req->pr_dcid = *dcid; 307 if (lsquic_hash_find(prq->prq_reqs_hash, req, sizeof(*req))) 308 { 309 LSQ_DEBUG("request for this DCID and type already exists"); 310 put_req(prq, req); 311 return -1; 312 } 313 314 req->pr_hash_el.qhe_flags = 0; 315 if (!lsquic_hash_insert(prq->prq_reqs_hash, req, sizeof(*req), 316 req, &req->pr_hash_el)) 317 { 318 LSQ_DEBUG("could not insert req into hash"); 319 put_req(prq, req); 320 return -1; 321 } 322 323 req->pr_flags = flags; 324 req->pr_rst_sz = size; 325 req->pr_version = version; 326 req->pr_scid = *scid; 327 req->pr_path.np_peer_ctx = peer_ctx; 328 memcpy(req->pr_path.np_local_addr, local_addr, 329 sizeof(req->pr_path.np_local_addr)); 330 memcpy(NP_PEER_SA(&req->pr_path), peer_addr, 331 sizeof(req->pr_path.np_peer_addr)); 332 333 LSQ_DEBUGC("scheduled %s packet for connection %"CID_FMT, 334 lsquic_preqt2str[type], CID_BITS(&req->pr_dcid)); 335 return 0; 336} 337 338 339int 340lsquic_prq_new_req (struct pr_queue *prq, enum packet_req_type type, 341 const struct lsquic_packet_in *packet_in, void *peer_ctx, 342 const struct sockaddr *local_addr, const struct sockaddr *peer_addr) 343{ 344 lsquic_ver_tag_t ver_tag; 345 enum lsquic_version version; 346 enum pr_flags flags; 347 lsquic_cid_t scid; 348 349 if (packet_in->pi_flags & PI_GQUIC) 350 flags = PR_GQUIC; 351 else 352 flags = 0; 353 354 if (packet_in->pi_quic_ver) 355 { 356 memcpy(&ver_tag, packet_in->pi_data + packet_in->pi_quic_ver, 357 sizeof(ver_tag)); 358 version = lsquic_tag2ver(ver_tag); 359 } 360 else /* Got to set it to something sensible... */ 361 version = LSQVER_ID27; 362 363 lsquic_scid_from_packet_in(packet_in, &scid); 364 return lsquic_prq_new_req_ext(prq, type, flags, version, 365 packet_in->pi_data_sz, &packet_in->pi_dcid, &scid, 366 peer_ctx, local_addr, peer_addr); 367} 368 369 370static size_t 371max_bufsz (const struct pr_queue *prq) 372{ 373 return MAX(MAX(MAX(IQUIC_VERNEG_SIZE, 374 IQUIC_MIN_SRST_SIZE), 375 sizeof(prq->prq_verneg_g_buf)), 376 sizeof(prq->prq_pubres_g_buf)); 377} 378 379 380static struct evanescent_conn * 381get_evconn (struct pr_queue *prq) 382{ 383 struct evanescent_conn *evconn; 384 struct lsquic_conn *lconn; 385 struct lsquic_packet_out *packet_out; 386 size_t bufsz; 387 388 if (prq->prq_nconns >= prq->prq_max_conns) 389 { /* This deserves a warning */ 390 LSQ_WARN("tried to get connection past limit of %u", prq->prq_max_conns); 391 return NULL; 392 } 393 394 lconn = TAILQ_FIRST(&prq->prq_free_conns); 395 if (lconn) 396 { 397 TAILQ_REMOVE(&prq->prq_free_conns, lconn, cn_next_pr); 398 evconn = (struct evanescent_conn *) lconn; 399 evconn->evc_flags = 0; 400 return evconn; 401 } 402 403 bufsz = max_bufsz(prq); 404 evconn = calloc(1, sizeof(*evconn) + bufsz); 405 if (!evconn) 406 { 407 LSQ_WARN("calloc failed: %s", strerror(errno)); 408 return NULL; 409 } 410 411 /* These values stay the same between connection usages: */ 412 evconn->evc_queue = prq; 413 lconn = &evconn->evc_conn; 414 lconn->cn_cces = evconn->evc_cces; 415 lconn->cn_cces_mask = 1; 416 lconn->cn_n_cces = sizeof(evconn->evc_cces) / sizeof(evconn->evc_cces[0]); 417 lconn->cn_if = &evanescent_conn_iface; 418 lconn->cn_flags = LSCONN_EVANESCENT; 419 packet_out = &evconn->evc_packet_out; 420 packet_out->po_flags = PO_NOENCRYPT; 421 packet_out->po_data = evconn->evc_buf; 422 423 return evconn; 424} 425 426 427struct lsquic_conn * 428lsquic_prq_next_conn (struct pr_queue *prq) 429{ 430 struct evanescent_conn *evconn; 431 struct lsquic_conn *lconn; 432 struct lsquic_hash_elem *el; 433 struct packet_req *req; 434 struct lsquic_packet_out *packet_out; 435 int (*gen_verneg) (unsigned char *, size_t, const lsquic_cid_t *, 436 const lsquic_cid_t *, unsigned, uint8_t); 437 int len; 438 439 lconn = TAILQ_FIRST(&prq->prq_returned_conns); 440 if (lconn) 441 { 442 TAILQ_REMOVE(&prq->prq_returned_conns, lconn, cn_next_pr); 443 return lconn; 444 } 445 446 el = lsquic_hash_first(prq->prq_reqs_hash); 447 if (!el) /* Nothing is queued */ 448 return NULL; 449 450 evconn = get_evconn(prq); 451 if (!evconn) /* Reached limit or malloc failed */ 452 return NULL; 453 454 req = lsquic_hashelem_getdata(el); 455 packet_out = &evconn->evc_packet_out; 456 switch ((req->pr_type << 29) | req->pr_flags) 457 { 458 case (PACKET_REQ_VERNEG << 29) | PR_GQUIC: 459 packet_out->po_data_sz = prq->prq_verneg_g_sz; 460 packet_out->po_flags |= PO_VERNEG; 461 memcpy(packet_out->po_data, prq->prq_verneg_g_buf, 462 prq->prq_verneg_g_sz); 463 memcpy(packet_out->po_data + 1, req->pr_dcid.idbuf, GQUIC_CID_LEN); 464 break; 465 case (PACKET_REQ_PUBRES << 29) | PR_GQUIC: 466 packet_out->po_flags &= ~PO_VERNEG; 467 packet_out->po_data_sz = prq->prq_pubres_g_sz; 468 memcpy(packet_out->po_data, prq->prq_pubres_g_buf, 469 prq->prq_pubres_g_sz); 470 memcpy(packet_out->po_data + 1, req->pr_dcid.idbuf, GQUIC_CID_LEN); 471 break; 472 case (PACKET_REQ_VERNEG << 29) | 0: 473 packet_out->po_flags |= PO_VERNEG; 474 if (req->pr_version == LSQVER_046) 475 gen_verneg = lsquic_Q046_gen_ver_nego_pkt; 476 else 477 gen_verneg = lsquic_ietf_v1_gen_ver_nego_pkt; 478 len = gen_verneg(packet_out->po_data, max_bufsz(prq), 479 /* Flip SCID/DCID here: */ &req->pr_dcid, &req->pr_scid, 480 prq->prq_enpub->enp_settings.es_versions, 481 lsquic_crand_get_byte(prq->prq_enpub->enp_crand)); 482 if (len > 0) 483 packet_out->po_data_sz = len; 484 else 485 packet_out->po_data_sz = 0; 486 break; 487 default: 488 packet_out->po_flags &= ~PO_VERNEG; 489 packet_out->po_data_sz = req->pr_rst_sz; 490 RAND_bytes(packet_out->po_data, req->pr_rst_sz - IQUIC_SRESET_TOKEN_SZ); 491 packet_out->po_data[0] &= ~0x80; 492 packet_out->po_data[0] |= 0x40; 493 lsquic_tg_generate_sreset(prq->prq_enpub->enp_tokgen, &req->pr_dcid, 494 packet_out->po_data + req->pr_rst_sz - IQUIC_SRESET_TOKEN_SZ); 495 break; 496 } 497 498 lsquic_hash_erase(prq->prq_reqs_hash, el); 499 evconn->evc_req = req; 500 501 lconn= &evconn->evc_conn; 502 evconn->evc_cces[0].cce_cid = req->pr_dcid; 503 packet_out->po_path = &req->pr_path; 504 505 ++prq->prq_nconns; 506 return lconn; 507} 508 509 510int 511lsquic_prq_have_pending (const struct pr_queue *prq) 512{ 513 return lsquic_hash_count(prq->prq_reqs_hash) > 0; 514} 515 516 517static struct lsquic_packet_out * 518evanescent_conn_ci_next_packet_to_send (struct lsquic_conn *lconn, 519 const struct to_coal *to_coal_UNUSED) 520{ 521 struct evanescent_conn *const evconn = (struct evanescent_conn *) lconn; 522 assert(!to_coal_UNUSED); 523 return &evconn->evc_packet_out; 524} 525 526 527static void 528prq_free_conn (struct pr_queue *prq, struct lsquic_conn *lconn) 529{ 530 struct evanescent_conn *const evconn = (struct evanescent_conn *) lconn; 531 532 TAILQ_INSERT_HEAD(&prq->prq_free_conns, lconn, cn_next_pr); 533 put_req(prq, evconn->evc_req); 534 --prq->prq_nconns; 535} 536 537 538static void 539evanescent_conn_ci_packet_sent (struct lsquic_conn *lconn, 540 struct lsquic_packet_out *packet_out) 541{ 542 struct evanescent_conn *const evconn = (struct evanescent_conn *) lconn; 543 struct pr_queue *const prq = evconn->evc_queue; 544 545 assert(packet_out == &evconn->evc_packet_out); 546 assert(prq->prq_nconns > 0); 547 548 LSQ_DEBUGC("sent %s packet for connection %"CID_FMT"; free resources", 549 lsquic_preqt2str[ evconn->evc_req->pr_type ], 550 CID_BITS(&evconn->evc_req->pr_dcid)); 551 prq_free_conn(prq, lconn); 552} 553 554 555static void 556evanescent_conn_ci_packet_not_sent (struct lsquic_conn *lconn, 557 struct lsquic_packet_out *packet_out) 558{ 559 struct evanescent_conn *const evconn = (struct evanescent_conn *) lconn; 560 struct pr_queue *const prq = evconn->evc_queue; 561 562 assert(packet_out == &evconn->evc_packet_out); 563 assert(prq->prq_nconns > 0); 564 565 if (evconn->evc_flags & EVC_DROP) 566 { 567 LSQ_DEBUGC("packet not sent; drop connection %"CID_FMT, 568 CID_BITS(&evconn->evc_req->pr_dcid)); 569 prq_free_conn(prq, lconn); 570 } 571 else 572 { 573 LSQ_DEBUG("packet not sent; put connection onto used list"); 574 TAILQ_INSERT_HEAD(&prq->prq_returned_conns, lconn, cn_next_pr); 575 } 576} 577 578 579static enum tick_st 580evanescent_conn_ci_tick (struct lsquic_conn *lconn, lsquic_time_t now) 581{ 582 assert(0); 583 return TICK_CLOSE; 584} 585 586 587static void 588evanescent_conn_ci_destroy (struct lsquic_conn *lconn) 589{ 590 assert(0); 591} 592 593 594static struct lsquic_engine * 595evanescent_conn_ci_get_engine (struct lsquic_conn *lconn) 596{ 597 assert(0); 598 return NULL; 599} 600 601 602static void 603evanescent_conn_ci_hsk_done (struct lsquic_conn *lconn, 604 enum lsquic_hsk_status status) 605{ 606 assert(0); 607} 608 609 610static void 611evanescent_conn_ci_packet_in (struct lsquic_conn *lconn, 612 struct lsquic_packet_in *packet_in) 613{ 614 assert(0); 615} 616 617 618static void 619evanescent_conn_ci_client_call_on_new (struct lsquic_conn *lconn) 620{ 621 assert(0); 622} 623 624 625static struct network_path * 626evanescent_conn_ci_get_path (struct lsquic_conn *lconn, 627 const struct sockaddr *sa) 628{ 629 struct evanescent_conn *const evconn = (struct evanescent_conn *) lconn; 630 631 return &evconn->evc_req->pr_path; 632} 633 634 635static unsigned char 636evanescent_conn_ci_record_addrs (struct lsquic_conn *lconn, void *peer_ctx, 637 const struct sockaddr *local_sa, const struct sockaddr *peer_sa) 638{ 639 assert(0); 640 return 0; 641} 642 643 644static const struct conn_iface evanescent_conn_iface = { 645 .ci_client_call_on_new = evanescent_conn_ci_client_call_on_new, 646 .ci_destroy = evanescent_conn_ci_destroy, 647 .ci_get_engine = evanescent_conn_ci_get_engine, 648 .ci_get_path = evanescent_conn_ci_get_path, 649 .ci_hsk_done = evanescent_conn_ci_hsk_done, 650 .ci_next_packet_to_send = evanescent_conn_ci_next_packet_to_send, 651 .ci_packet_in = evanescent_conn_ci_packet_in, 652 .ci_packet_not_sent = evanescent_conn_ci_packet_not_sent, 653 .ci_packet_sent = evanescent_conn_ci_packet_sent, 654 .ci_record_addrs = evanescent_conn_ci_record_addrs, 655 .ci_tick = evanescent_conn_ci_tick, 656}; 657 658 659const char *const lsquic_preqt2str[] = 660{ 661 [PACKET_REQ_VERNEG] = "version negotiation", 662 [PACKET_REQ_PUBRES] = "stateless reset", 663}; 664 665 666void 667lsquic_prq_drop (struct lsquic_conn *lconn) 668{ 669 struct evanescent_conn *const evconn = (void *) lconn; 670 671 evconn->evc_flags |= EVC_DROP; 672 LSQ_DEBUGC("mark for connection %"CID_FMT" for dropping", 673 CID_BITS(&evconn->evc_req->pr_dcid)); 674} 675