lsquic_pr_queue.c revision 03e6b668
1/* Copyright (c) 2017 - 2019 LiteSpeed Technologies Inc. See LICENSE. */ 2/* 3 * lsquic_pr_queue.c -- packet request queue. 4 */ 5 6#include <assert.h> 7#include <errno.h> 8#include <inttypes.h> 9#include <netinet/in.h> 10#include <stdlib.h> 11#include <string.h> 12#include <sys/queue.h> 13#include <sys/socket.h> 14 15#include <openssl/aead.h> 16#include <openssl/rand.h> 17 18#include "lsquic.h" 19#include "lsquic_types.h" 20#include "lsquic_int_types.h" 21#include "lsquic_packet_common.h" 22#include "lsquic_packet_gquic.h" 23#include "lsquic_packet_out.h" 24#include "lsquic_packet_in.h" 25#include "lsquic_hash.h" 26#include "lsquic_conn.h" 27#include "lsquic_parse.h" 28#include "lsquic_malo.h" 29#include "lsquic_pr_queue.h" 30#include "lsquic_parse_common.h" 31#include "lsquic_tokgen.h" 32#include "lsquic_version.h" 33#include "lsquic_mm.h" 34#include "lsquic_engine_public.h" 35#include "lsquic_sizes.h" 36#include "lsquic_handshake.h" 37#include "lsquic_xxhash.h" 38 39#define LSQUIC_LOGGER_MODULE LSQLM_PRQ 40#include "lsquic_logger.h" 41 42#define MAX(a, b) ((a) > (b) ? (a) : (b)) 43#define MIN(a, b) ((a) < (b) ? (a) : (b)) 44 45 46static const struct conn_iface evanescent_conn_iface; 47 48 49struct packet_req 50{ 51 struct lsquic_hash_elem pr_hash_el; 52 lsquic_cid_t pr_scid; 53 lsquic_cid_t pr_dcid; 54 enum packet_req_type pr_type; 55 enum pr_flags { 56 PR_GQUIC = 1 << 0, 57 } pr_flags; 58 enum lsquic_version pr_version; 59 unsigned pr_rst_sz; 60 struct network_path pr_path; 61}; 62 63 64struct evanescent_conn 65{ 66 struct lsquic_conn evc_conn; 67 struct packet_req *evc_req; 68 struct pr_queue *evc_queue; 69 struct lsquic_packet_out evc_packet_out; 70 struct conn_cid_elem evc_cces[1]; 71 unsigned char evc_buf[0]; 72}; 73 74 75/* [draft-ietf-quic-transport-22], Section 17.2.1 */ 76#define IQUIC_VERNEG_SIZE (1 /* Type */ + 4 /* Version (zero tag) */ \ 77 + 1 /* DCIL */ + MAX_CID_LEN + 1 /* SCIL */ + MAX_CID_LEN + \ 78 4 * N_LSQVER) 79 80 81struct pr_queue 82{ 83 TAILQ_HEAD(, lsquic_conn) prq_free_conns, 84 prq_returned_conns; 85 struct malo *prq_reqs_pool; 86 const struct lsquic_engine_public 87 *prq_enpub; 88 struct lsquic_hash *prq_reqs_hash; 89 unsigned prq_max_reqs; 90 unsigned prq_nreqs; 91 unsigned prq_max_conns; 92 unsigned prq_nconns; 93 unsigned prq_verneg_g_sz; /* Size of prq_verneg_g_buf */ 94 unsigned prq_pubres_g_sz; /* Size of prq_pubres_g_buf */ 95 96 /* GQUIC version negotiation and stateless reset packets are generated 97 * once, when the Packet Request Queue is created. For each request, 98 * these buffers are simply copied and the connection ID is replaced. 99 * 100 * Since IETF QUIC uses variable-length connections IDs, we have to 101 * generate packets every time. 102 */ 103 unsigned char prq_pubres_g_buf[GQUIC_RESET_SZ]; 104 unsigned char prq_verneg_g_buf[1 + GQUIC_CID_LEN 105 + N_LSQVER * 4]; 106 /* We generate random nybbles in batches */ 107#define NYBBLE_COUNT_BITS 4 108#define NYBBLE_COUNT (1 << NYBBLE_COUNT_BITS) 109#define NYBBLE_MASK (NYBBLE_COUNT - 1) 110 unsigned prq_rand_nybble_off; 111 uint8_t prq_rand_nybble_buf[NYBBLE_COUNT * 2]; 112}; 113 114 115static uint8_t 116get_rand_byte (struct pr_queue *); 117 118 119static int 120comp_reqs (const void *s1, const void *s2, size_t n) 121{ 122 const struct packet_req *a, *b; 123 124 a = s1; 125 b = s2; 126 if (a->pr_type == b->pr_type && LSQUIC_CIDS_EQ(&a->pr_dcid, &b->pr_dcid)) 127 return 0; 128 else 129 return -1; 130} 131 132 133static unsigned 134hash_req (const void *p, size_t len, unsigned seed) 135{ 136 const struct packet_req *req; 137 138 req = p; 139 return XXH32(req->pr_dcid.idbuf, req->pr_dcid.len, seed); 140} 141 142 143struct pr_queue * 144prq_create (unsigned max_elems, unsigned max_conns, 145 const struct lsquic_engine_public *enpub) 146{ 147 const struct parse_funcs *pf; 148 struct pr_queue *prq; 149 struct malo *malo; 150 struct lsquic_hash *hash; 151 unsigned verneg_g_sz; 152 ssize_t prst_g_sz; 153 int len; 154 155 malo = lsquic_malo_create(sizeof(struct packet_req)); 156 if (!malo) 157 { 158 LSQ_WARN("malo_create failed: %s", strerror(errno)); 159 goto err0; 160 } 161 162 163 hash = lsquic_hash_create_ext(comp_reqs, hash_req); 164 if (!hash) 165 { 166 LSQ_WARN("cannot create hash"); 167 goto err1; 168 } 169 170 prq = malloc(sizeof(*prq)); 171 if (!prq) 172 { 173 LSQ_WARN("malloc failed: %s", strerror(errno)); 174 goto err2; 175 } 176 177 const lsquic_cid_t cid = { .len = 8, }; 178 pf = select_pf_by_ver(LSQVER_039); 179 len = lsquic_gquic_gen_ver_nego_pkt(prq->prq_verneg_g_buf, 180 sizeof(prq->prq_verneg_g_buf), &cid, 181 enpub->enp_settings.es_versions); 182 assert(len > 0); 183 if (len <= 0) 184 { 185 LSQ_ERROR("cannot generate version negotiation packet"); 186 goto err3; 187 } 188 verneg_g_sz = (unsigned) len; 189 190 prst_g_sz = pf->pf_generate_simple_prst(0 /* This is just placeholder */, 191 prq->prq_pubres_g_buf, sizeof(prq->prq_pubres_g_buf)); 192 if (prst_g_sz < 0) 193 { 194 LSQ_ERROR("cannot generate public reset packet"); 195 goto err3; 196 } 197 198 TAILQ_INIT(&prq->prq_free_conns); 199 TAILQ_INIT(&prq->prq_returned_conns); 200 prq->prq_reqs_hash = hash; 201 prq->prq_reqs_pool = malo; 202 prq->prq_max_reqs = max_elems; 203 prq->prq_nreqs = 0; 204 prq->prq_max_conns = max_conns; 205 prq->prq_nconns = 0; 206 prq->prq_verneg_g_sz = verneg_g_sz; 207 prq->prq_pubres_g_sz = (unsigned) prst_g_sz; 208 prq->prq_enpub = enpub; 209 prq->prq_rand_nybble_off = 0; 210 211 LSQ_INFO("initialized queue of size %d", max_elems); 212 213 return prq; 214 215 err3: 216 free(prq); 217 err2: 218 lsquic_hash_destroy(hash); 219 err1: 220 lsquic_malo_destroy(malo); 221 err0: 222 return NULL; 223} 224 225 226void 227prq_destroy (struct pr_queue *prq) 228{ 229 struct lsquic_conn *conn; 230 231 LSQ_INFO("destroy"); 232 while ((conn = TAILQ_FIRST(&prq->prq_free_conns))) 233 { 234 TAILQ_REMOVE(&prq->prq_free_conns, conn, cn_next_pr); 235 free(conn); 236 } 237 lsquic_hash_destroy(prq->prq_reqs_hash); 238 lsquic_malo_destroy(prq->prq_reqs_pool); 239 free(prq); 240} 241 242 243static struct packet_req * 244get_req (struct pr_queue *prq) 245{ 246 struct packet_req *req; 247 if (prq->prq_nreqs < prq->prq_max_reqs) 248 { 249 req = lsquic_malo_get(prq->prq_reqs_pool); 250 if (req) 251 ++prq->prq_nreqs; 252 else 253 LSQ_WARN("malo_get failed: %s", strerror(errno)); 254 return req; 255 } 256 else 257 return NULL; 258} 259 260 261static void 262put_req (struct pr_queue *prq, struct packet_req *req) 263{ 264 lsquic_malo_put(req); 265 --prq->prq_nreqs; 266} 267 268 269int 270lsquic_prq_new_req (struct pr_queue *prq, enum packet_req_type type, 271 unsigned flags, enum lsquic_version version, unsigned short data_sz, 272 const lsquic_cid_t *dcid, const lsquic_cid_t *scid, void *peer_ctx, 273 const struct sockaddr *local_addr, const struct sockaddr *peer_addr) 274{ 275 struct packet_req *req; 276 unsigned max, size, rand; 277 278 if (type == PACKET_REQ_PUBRES && !(flags & PR_GQUIC)) 279 { 280 if (data_sz <= IQUIC_MIN_SRST_SIZE) 281 { 282 LSQ_DEBUGC("not scheduling public reset: incoming packet for CID " 283 "%"CID_FMT" too small: %hu bytes", CID_BITS(dcid), data_sz); 284 return -1; 285 } 286 /* Use a random stateless reset size */ 287 max = MIN(IQUIC_MAX_SRST_SIZE, data_sz - 1u); 288 if (max > IQUIC_MIN_SRST_SIZE) 289 { 290 rand = get_rand_byte(prq); 291 size = IQUIC_MIN_SRST_SIZE + rand % (max - IQUIC_MIN_SRST_SIZE); 292 } 293 else 294 size = IQUIC_MIN_SRST_SIZE; 295 LSQ_DEBUGC("selected %u-byte reset size for CID %"CID_FMT 296 " (range is [%u, %u])", size, CID_BITS(dcid), 297 IQUIC_MIN_SRST_SIZE, max); 298 } 299 else 300 size = 0; 301 302 req = get_req(prq); 303 if (!req) 304 { 305 LSQ_DEBUG("out of reqs: cannot allocated another one"); 306 return -1; 307 } 308 309 req->pr_type = type; 310 req->pr_dcid = *dcid; 311 if (lsquic_hash_find(prq->prq_reqs_hash, req, sizeof(req))) 312 { 313 LSQ_DEBUG("request for this DCID and type already exists"); 314 put_req(prq, req); 315 return -1; 316 } 317 318 req->pr_hash_el.qhe_flags = 0; 319 if (!lsquic_hash_insert(prq->prq_reqs_hash, req, sizeof(req), 320 req, &req->pr_hash_el)) 321 { 322 LSQ_DEBUG("could not insert req into hash"); 323 put_req(prq, req); 324 return -1; 325 } 326 327 req->pr_flags = flags; 328 req->pr_rst_sz = size; 329 req->pr_version = version; 330 req->pr_scid = *scid; 331 req->pr_path.np_peer_ctx = peer_ctx; 332 memcpy(NP_LOCAL_SA(&req->pr_path), local_addr, 333 sizeof(req->pr_path.np_local_addr)); 334 memcpy(NP_PEER_SA(&req->pr_path), peer_addr, 335 sizeof(req->pr_path.np_peer_addr)); 336 337 LSQ_DEBUGC("scheduled %s packet for connection %"CID_FMT, 338 lsquic_preqt2str[type], CID_BITS(&req->pr_dcid)); 339 return 0; 340} 341 342 343int 344prq_new_req (struct pr_queue *prq, enum packet_req_type type, 345 const struct lsquic_packet_in *packet_in, void *peer_ctx, 346 const struct sockaddr *local_addr, const struct sockaddr *peer_addr) 347{ 348 lsquic_ver_tag_t ver_tag; 349 enum lsquic_version version; 350 enum pr_flags flags; 351 lsquic_cid_t scid; 352 353 if (packet_in->pi_flags & PI_GQUIC) 354 flags = PR_GQUIC; 355 else 356 flags = 0; 357 358 if (packet_in->pi_quic_ver) 359 { 360 memcpy(&ver_tag, packet_in->pi_data + packet_in->pi_quic_ver, 361 sizeof(ver_tag)); 362 version = lsquic_tag2ver(ver_tag); 363 } 364 else /* Got to set it to something sensible... */ 365 version = LSQVER_ID24; 366 367 lsquic_scid_from_packet_in(packet_in, &scid); 368 return lsquic_prq_new_req(prq, type, flags, version, packet_in->pi_data_sz, 369 &packet_in->pi_dcid, &scid, peer_ctx, local_addr, peer_addr); 370} 371 372 373static size_t 374max_bufsz (const struct pr_queue *prq) 375{ 376 return MAX(MAX(MAX(IQUIC_VERNEG_SIZE, 377 IQUIC_MIN_SRST_SIZE), 378 sizeof(prq->prq_verneg_g_buf)), 379 sizeof(prq->prq_pubres_g_buf)); 380} 381 382 383static struct evanescent_conn * 384get_evconn (struct pr_queue *prq) 385{ 386 struct evanescent_conn *evconn; 387 struct lsquic_conn *lconn; 388 struct lsquic_packet_out *packet_out; 389 size_t bufsz; 390 391 if (prq->prq_nconns >= prq->prq_max_conns) 392 { /* This deserves a warning */ 393 LSQ_WARN("tried to get connection past limit of %u", prq->prq_max_conns); 394 return NULL; 395 } 396 397 lconn = TAILQ_FIRST(&prq->prq_free_conns); 398 if (lconn) 399 { 400 TAILQ_REMOVE(&prq->prq_free_conns, lconn, cn_next_pr); 401 return (struct evanescent_conn *) lconn; 402 } 403 404 bufsz = max_bufsz(prq); 405 evconn = calloc(1, sizeof(*evconn) + bufsz); 406 if (!evconn) 407 { 408 LSQ_WARN("calloc failed: %s", strerror(errno)); 409 return NULL; 410 } 411 412 /* These values stay the same between connection usages: */ 413 evconn->evc_queue = prq; 414 lconn = &evconn->evc_conn; 415 lconn->cn_cces = evconn->evc_cces; 416 lconn->cn_cces_mask = 1; 417 lconn->cn_n_cces = sizeof(evconn->evc_cces) / sizeof(evconn->evc_cces[0]); 418 lconn->cn_if = &evanescent_conn_iface; 419 lconn->cn_flags = LSCONN_EVANESCENT; 420 packet_out = &evconn->evc_packet_out; 421 packet_out->po_flags = PO_NOENCRYPT; 422 packet_out->po_data = evconn->evc_buf; 423 424 return evconn; 425} 426 427 428static uint8_t 429get_rand_nybble (struct pr_queue *prq) 430{ 431 uint8_t byte; 432 433 if (prq->prq_rand_nybble_off == 0) 434 RAND_bytes(prq->prq_rand_nybble_buf, sizeof(prq->prq_rand_nybble_buf)); 435 436 byte = prq->prq_rand_nybble_buf[prq->prq_rand_nybble_off / 2]; 437 if (prq->prq_rand_nybble_off & 1) 438 byte >>= 4; 439 else 440 byte &= 0xF; 441 prq->prq_rand_nybble_off = (prq->prq_rand_nybble_off + 1) & NYBBLE_MASK; 442 return byte; 443} 444 445 446static uint8_t 447get_rand_byte (struct pr_queue *prq) 448{ 449 return (get_rand_nybble(prq) << 4) | get_rand_nybble(prq); 450} 451 452 453struct lsquic_conn * 454prq_next_conn (struct pr_queue *prq) 455{ 456 struct evanescent_conn *evconn; 457 struct lsquic_conn *lconn; 458 struct lsquic_hash_elem *el; 459 struct packet_req *req; 460 struct lsquic_packet_out *packet_out; 461 int (*gen_verneg) (unsigned char *, size_t, const lsquic_cid_t *, 462 const lsquic_cid_t *, unsigned, uint8_t); 463 int len; 464 465 lconn = TAILQ_FIRST(&prq->prq_returned_conns); 466 if (lconn) 467 { 468 TAILQ_REMOVE(&prq->prq_returned_conns, lconn, cn_next_pr); 469 return lconn; 470 } 471 472 el = lsquic_hash_first(prq->prq_reqs_hash); 473 if (!el) /* Nothing is queued */ 474 return NULL; 475 476 evconn = get_evconn(prq); 477 if (!evconn) /* Reached limit or malloc failed */ 478 return NULL; 479 480 req = lsquic_hashelem_getdata(el); 481 packet_out = &evconn->evc_packet_out; 482 switch ((req->pr_type << 29) | req->pr_flags) 483 { 484 case (PACKET_REQ_VERNEG << 29) | PR_GQUIC: 485 packet_out->po_data_sz = prq->prq_verneg_g_sz; 486 packet_out->po_flags |= PO_VERNEG; 487 memcpy(packet_out->po_data, prq->prq_verneg_g_buf, 488 prq->prq_verneg_g_sz); 489 memcpy(packet_out->po_data + 1, req->pr_dcid.idbuf, GQUIC_CID_LEN); 490 break; 491 case (PACKET_REQ_PUBRES << 29) | PR_GQUIC: 492 packet_out->po_flags &= ~PO_VERNEG; 493 packet_out->po_data_sz = prq->prq_pubres_g_sz; 494 memcpy(packet_out->po_data, prq->prq_pubres_g_buf, 495 prq->prq_pubres_g_sz); 496 memcpy(packet_out->po_data + 1, req->pr_dcid.idbuf, GQUIC_CID_LEN); 497 break; 498 case (PACKET_REQ_VERNEG << 29) | 0: 499 packet_out->po_flags |= PO_VERNEG; 500 if (req->pr_version == LSQVER_046) 501 gen_verneg = lsquic_Q046_gen_ver_nego_pkt; 502 else 503 gen_verneg = lsquic_ietf_v1_gen_ver_nego_pkt; 504 len = gen_verneg(packet_out->po_data, max_bufsz(prq), 505 /* Flip SCID/DCID here: */ &req->pr_dcid, &req->pr_scid, 506 prq->prq_enpub->enp_settings.es_versions, 507 get_rand_byte(prq)); 508 if (len > 0) 509 packet_out->po_data_sz = len; 510 else 511 packet_out->po_data_sz = 0; 512 break; 513 default: 514 packet_out->po_flags &= ~PO_VERNEG; 515 packet_out->po_data_sz = req->pr_rst_sz; 516 RAND_bytes(packet_out->po_data, req->pr_rst_sz - IQUIC_SRESET_TOKEN_SZ); 517 packet_out->po_data[0] &= ~0x80; 518 packet_out->po_data[0] |= 0x40; 519 lsquic_tg_generate_sreset(prq->prq_enpub->enp_tokgen, &req->pr_dcid, 520 packet_out->po_data + req->pr_rst_sz - IQUIC_SRESET_TOKEN_SZ); 521 break; 522 } 523 524 lsquic_hash_erase(prq->prq_reqs_hash, el); 525 evconn->evc_req = req; 526 527 lconn= &evconn->evc_conn; 528 evconn->evc_cces[0].cce_cid = req->pr_dcid; 529 packet_out->po_path = &req->pr_path; 530 531 ++prq->prq_nconns; 532 return lconn; 533} 534 535 536int 537prq_have_pending (const struct pr_queue *prq) 538{ 539 return lsquic_hash_count(prq->prq_reqs_hash) > 0; 540} 541 542 543static struct lsquic_packet_out * 544evanescent_conn_ci_next_packet_to_send (struct lsquic_conn *lconn, size_t size) 545{ 546 struct evanescent_conn *const evconn = (struct evanescent_conn *) lconn; 547 assert(size == 0); 548 return &evconn->evc_packet_out; 549} 550 551 552static void 553evanescent_conn_ci_packet_sent (struct lsquic_conn *lconn, 554 struct lsquic_packet_out *packet_out) 555{ 556 struct evanescent_conn *const evconn = (struct evanescent_conn *) lconn; 557 struct pr_queue *const prq = evconn->evc_queue; 558 559 assert(packet_out == &evconn->evc_packet_out); 560 assert(prq->prq_nconns > 0); 561 562 LSQ_DEBUGC("sent %s packet for connection %"CID_FMT"; free resources", 563 lsquic_preqt2str[ evconn->evc_req->pr_type ], 564 CID_BITS(&evconn->evc_req->pr_dcid)); 565 TAILQ_INSERT_HEAD(&prq->prq_free_conns, lconn, cn_next_pr); 566 put_req(prq, evconn->evc_req); 567 --prq->prq_nconns; 568} 569 570 571static void 572evanescent_conn_ci_packet_not_sent (struct lsquic_conn *lconn, 573 struct lsquic_packet_out *packet_out) 574{ 575 struct evanescent_conn *const evconn = (struct evanescent_conn *) lconn; 576 struct pr_queue *const prq = evconn->evc_queue; 577 578 assert(packet_out == &evconn->evc_packet_out); 579 assert(prq->prq_nconns > 0); 580 581 LSQ_DEBUG("packet not sent; put connection onto used list"); 582 TAILQ_INSERT_HEAD(&prq->prq_returned_conns, lconn, cn_next_pr); 583} 584 585 586static enum tick_st 587evanescent_conn_ci_tick (struct lsquic_conn *lconn, lsquic_time_t now) 588{ 589 assert(0); 590 return TICK_CLOSE; 591} 592 593 594static void 595evanescent_conn_ci_destroy (struct lsquic_conn *lconn) 596{ 597 assert(0); 598} 599 600 601static struct lsquic_engine * 602evanescent_conn_ci_get_engine (struct lsquic_conn *lconn) 603{ 604 assert(0); 605 return NULL; 606} 607 608 609static void 610evanescent_conn_ci_hsk_done (struct lsquic_conn *lconn, 611 enum lsquic_hsk_status status) 612{ 613 assert(0); 614} 615 616 617static void 618evanescent_conn_ci_packet_in (struct lsquic_conn *lconn, 619 struct lsquic_packet_in *packet_in) 620{ 621 assert(0); 622} 623 624 625static void 626evanescent_conn_ci_client_call_on_new (struct lsquic_conn *lconn) 627{ 628 assert(0); 629} 630 631 632static struct network_path * 633evanescent_conn_ci_get_path (struct lsquic_conn *lconn, 634 const struct sockaddr *sa) 635{ 636 struct evanescent_conn *const evconn = (struct evanescent_conn *) lconn; 637 638 return &evconn->evc_req->pr_path; 639} 640 641 642static unsigned char 643evanescent_conn_ci_record_addrs (struct lsquic_conn *lconn, void *peer_ctx, 644 const struct sockaddr *local_sa, const struct sockaddr *peer_sa) 645{ 646 assert(0); 647 return 0; 648} 649 650 651static const struct conn_iface evanescent_conn_iface = { 652 .ci_client_call_on_new = evanescent_conn_ci_client_call_on_new, 653 .ci_destroy = evanescent_conn_ci_destroy, 654 .ci_get_engine = evanescent_conn_ci_get_engine, 655 .ci_get_path = evanescent_conn_ci_get_path, 656 .ci_hsk_done = evanescent_conn_ci_hsk_done, 657 .ci_next_packet_to_send = evanescent_conn_ci_next_packet_to_send, 658 .ci_packet_in = evanescent_conn_ci_packet_in, 659 .ci_packet_not_sent = evanescent_conn_ci_packet_not_sent, 660 .ci_packet_sent = evanescent_conn_ci_packet_sent, 661 .ci_record_addrs = evanescent_conn_ci_record_addrs, 662 .ci_tick = evanescent_conn_ci_tick, 663}; 664 665 666const char *const lsquic_preqt2str[] = 667{ 668 [PACKET_REQ_VERNEG] = "version negotiation", 669 [PACKET_REQ_PUBRES] = "stateless reset", 670}; 671