lsquic_pr_queue.c revision 1c9cee3e
1/* Copyright (c) 2017 - 2019 LiteSpeed Technologies Inc. See LICENSE. */ 2/* 3 * lsquic_pr_queue.c -- packet request queue. 4 */ 5 6#include <assert.h> 7#include <errno.h> 8#include <inttypes.h> 9#include <netinet/in.h> 10#include <stdlib.h> 11#include <string.h> 12#include <sys/queue.h> 13#include <sys/socket.h> 14 15#include <openssl/aead.h> 16#include <openssl/rand.h> 17 18#include "lsquic.h" 19#include "lsquic_types.h" 20#include "lsquic_int_types.h" 21#include "lsquic_packet_common.h" 22#include "lsquic_packet_gquic.h" 23#include "lsquic_packet_out.h" 24#include "lsquic_packet_in.h" 25#include "lsquic_hash.h" 26#include "lsquic_conn.h" 27#include "lsquic_parse.h" 28#include "lsquic_malo.h" 29#include "lsquic_pr_queue.h" 30#include "lsquic_parse_common.h" 31#include "lsquic_tokgen.h" 32#include "lsquic_version.h" 33#include "lsquic_mm.h" 34#include "lsquic_engine_public.h" 35#include "lsquic_sizes.h" 36#include "lsquic_handshake.h" 37#include "lsquic_xxhash.h" 38 39#define LSQUIC_LOGGER_MODULE LSQLM_PRQ 40#include "lsquic_logger.h" 41 42#define MAX(a, b) ((a) > (b) ? (a) : (b)) 43#define MIN(a, b) ((a) < (b) ? (a) : (b)) 44 45 46static const struct conn_iface evanescent_conn_iface; 47 48 49struct packet_req 50{ 51 struct lsquic_hash_elem pr_hash_el; 52 lsquic_cid_t pr_scid; 53 lsquic_cid_t pr_dcid; 54 enum packet_req_type pr_type; 55 enum pr_flags { 56 PR_GQUIC = 1 << 0, 57 } pr_flags; 58 enum lsquic_version pr_version; 59 unsigned pr_rst_sz; 60 struct network_path pr_path; 61}; 62 63 64struct evanescent_conn 65{ 66 struct lsquic_conn evc_conn; 67 struct packet_req *evc_req; 68 struct pr_queue *evc_queue; 69 struct lsquic_packet_out evc_packet_out; 70 struct conn_cid_elem evc_cces[1]; 71 unsigned char evc_buf[0]; 72}; 73 74 75/* [draft-ietf-quic-transport-22], Section 17.2.1 */ 76#define IQUIC_VERNEG_SIZE (1 /* Type */ + 4 /* Version (zero tag) */ \ 77 + 1 /* DCIL */ + MAX_CID_LEN + 1 /* SCIL */ + MAX_CID_LEN + \ 78 4 * N_LSQVER) 79 80 81struct pr_queue 82{ 83 TAILQ_HEAD(, lsquic_conn) prq_free_conns, 84 prq_returned_conns; 85 struct malo *prq_reqs_pool; 86 const struct lsquic_engine_public 87 *prq_enpub; 88 struct lsquic_hash *prq_reqs_hash; 89 unsigned prq_max_reqs; 90 unsigned prq_nreqs; 91 unsigned prq_max_conns; 92 unsigned prq_nconns; 93 unsigned prq_verneg_g_sz; /* Size of prq_verneg_g_buf */ 94 unsigned prq_pubres_g_sz; /* Size of prq_pubres_g_buf */ 95 96 /* GQUIC version negotiation and stateless reset packets are generated 97 * once, when the Packet Request Queue is created. For each request, 98 * these buffers are simply copied and the connection ID is replaced. 99 * 100 * Since IETF QUIC uses variable-length connections IDs, we have to 101 * generate packets every time. 102 */ 103 unsigned char prq_pubres_g_buf[GQUIC_RESET_SZ]; 104 unsigned char prq_verneg_g_buf[1 + GQUIC_CID_LEN 105 + N_LSQVER * 4]; 106 /* We generate random nybbles in batches */ 107#define NYBBLE_COUNT_BITS 4 108#define NYBBLE_COUNT (1 << NYBBLE_COUNT_BITS) 109#define NYBBLE_MASK (NYBBLE_COUNT - 1) 110 unsigned prq_rand_nybble_off; 111 uint8_t prq_rand_nybble_buf[NYBBLE_COUNT * 2]; 112}; 113 114 115static uint8_t 116get_rand_byte (struct pr_queue *); 117 118 119static int 120comp_reqs (const void *s1, const void *s2, size_t n) 121{ 122 const struct packet_req *a, *b; 123 124 a = s1; 125 b = s2; 126 if (a->pr_type == b->pr_type && LSQUIC_CIDS_EQ(&a->pr_dcid, &b->pr_dcid)) 127 return 0; 128 else 129 return -1; 130} 131 132 133static unsigned 134hash_req (const void *p, size_t len, unsigned seed) 135{ 136 const struct packet_req *req; 137 138 req = p; 139 return XXH32(req->pr_dcid.idbuf, req->pr_dcid.len, seed); 140} 141 142 143struct pr_queue * 144prq_create (unsigned max_elems, unsigned max_conns, 145 const struct lsquic_engine_public *enpub) 146{ 147 const struct parse_funcs *pf; 148 struct pr_queue *prq; 149 struct malo *malo; 150 struct lsquic_hash *hash; 151 unsigned verneg_g_sz; 152 ssize_t prst_g_sz; 153 int len; 154 155 malo = lsquic_malo_create(sizeof(struct packet_req)); 156 if (!malo) 157 { 158 LSQ_WARN("malo_create failed: %s", strerror(errno)); 159 goto err0; 160 } 161 162 163 hash = lsquic_hash_create_ext(comp_reqs, hash_req); 164 if (!hash) 165 { 166 LSQ_WARN("cannot create hash"); 167 goto err1; 168 } 169 170 prq = malloc(sizeof(*prq)); 171 if (!prq) 172 { 173 LSQ_WARN("malloc failed: %s", strerror(errno)); 174 goto err2; 175 } 176 177 const lsquic_cid_t cid = { .len = 8, }; 178 pf = select_pf_by_ver(LSQVER_039); 179 len = lsquic_gquic_gen_ver_nego_pkt(prq->prq_verneg_g_buf, 180 sizeof(prq->prq_verneg_g_buf), &cid, 181 enpub->enp_settings.es_versions); 182 assert(len > 0); 183 if (len <= 0) 184 { 185 LSQ_ERROR("cannot generate version negotiation packet"); 186 goto err3; 187 } 188 verneg_g_sz = (unsigned) len; 189 190 prst_g_sz = pf->pf_generate_simple_prst(0 /* This is just placeholder */, 191 prq->prq_pubres_g_buf, sizeof(prq->prq_pubres_g_buf)); 192 if (prst_g_sz < 0) 193 { 194 LSQ_ERROR("cannot generate public reset packet"); 195 goto err3; 196 } 197 198 TAILQ_INIT(&prq->prq_free_conns); 199 TAILQ_INIT(&prq->prq_returned_conns); 200 prq->prq_reqs_hash = hash; 201 prq->prq_reqs_pool = malo; 202 prq->prq_max_reqs = max_elems; 203 prq->prq_nreqs = 0; 204 prq->prq_max_conns = max_conns; 205 prq->prq_nconns = 0; 206 prq->prq_verneg_g_sz = verneg_g_sz; 207 prq->prq_pubres_g_sz = (unsigned) prst_g_sz; 208 prq->prq_enpub = enpub; 209 prq->prq_rand_nybble_off = 0; 210 211 LSQ_INFO("initialized queue of size %d", max_elems); 212 213 return prq; 214 215 err3: 216 free(prq); 217 err2: 218 lsquic_hash_destroy(hash); 219 err1: 220 lsquic_malo_destroy(malo); 221 err0: 222 return NULL; 223} 224 225 226void 227prq_destroy (struct pr_queue *prq) 228{ 229 struct lsquic_conn *conn; 230 231 LSQ_INFO("destroy"); 232 while ((conn = TAILQ_FIRST(&prq->prq_free_conns))) 233 { 234 TAILQ_REMOVE(&prq->prq_free_conns, conn, cn_next_pr); 235 free(conn); 236 } 237 lsquic_hash_destroy(prq->prq_reqs_hash); 238 lsquic_malo_destroy(prq->prq_reqs_pool); 239 free(prq); 240} 241 242 243static struct packet_req * 244get_req (struct pr_queue *prq) 245{ 246 struct packet_req *req; 247 if (prq->prq_nreqs < prq->prq_max_reqs) 248 { 249 req = lsquic_malo_get(prq->prq_reqs_pool); 250 if (req) 251 ++prq->prq_nreqs; 252 else 253 LSQ_WARN("malo_get failed: %s", strerror(errno)); 254 return req; 255 } 256 else 257 return NULL; 258} 259 260 261static void 262put_req (struct pr_queue *prq, struct packet_req *req) 263{ 264 lsquic_malo_put(req); 265 --prq->prq_nreqs; 266} 267 268 269int 270prq_new_req (struct pr_queue *prq, enum packet_req_type type, 271 const struct lsquic_packet_in *packet_in, void *peer_ctx, 272 const struct sockaddr *local_addr, const struct sockaddr *peer_addr) 273{ 274 struct packet_req *req; 275 lsquic_ver_tag_t ver_tag; 276 enum lsquic_version version; 277 enum pr_flags flags; 278 unsigned max, size, rand; 279 280 if (packet_in->pi_flags & PI_GQUIC) 281 flags = PR_GQUIC; 282 else 283 flags = 0; 284 285 if (packet_in->pi_quic_ver) 286 { 287 memcpy(&ver_tag, packet_in->pi_data + packet_in->pi_quic_ver, 288 sizeof(ver_tag)); 289 version = lsquic_tag2ver(ver_tag); 290 } 291 else /* Got to set it to something sensible... */ 292 version = LSQVER_ID23; 293 294 if (type == PACKET_REQ_PUBRES && !(flags & PR_GQUIC)) 295 { 296 if (packet_in->pi_data_sz <= IQUIC_MIN_SRST_SIZE) 297 { 298 LSQ_DEBUGC("not scheduling public reset: incoming packet for CID " 299 "%"CID_FMT" too small: %hu bytes", 300 CID_BITS(&packet_in->pi_dcid), packet_in->pi_data_sz); 301 return -1; 302 } 303 /* Use a random stateless reset size */ 304 max = MIN(IQUIC_MAX_SRST_SIZE, packet_in->pi_data_sz - 1u); 305 if (max > IQUIC_MIN_SRST_SIZE) 306 { 307 rand = get_rand_byte(prq); 308 size = IQUIC_MIN_SRST_SIZE + rand % (max - IQUIC_MIN_SRST_SIZE); 309 } 310 else 311 size = IQUIC_MIN_SRST_SIZE; 312 LSQ_DEBUGC("selected %u-byte reset size for CID %"CID_FMT 313 " (range is [%u, %u])", size, CID_BITS(&packet_in->pi_dcid), 314 IQUIC_MIN_SRST_SIZE, max); 315 } 316 else 317 size = 0; 318 319 req = get_req(prq); 320 if (!req) 321 { 322 LSQ_DEBUG("out of reqs: cannot allocated another one"); 323 return -1; 324 } 325 326 req->pr_type = type; 327 req->pr_dcid = packet_in->pi_dcid; 328 if (lsquic_hash_find(prq->prq_reqs_hash, req, sizeof(req))) 329 { 330 LSQ_DEBUG("request for this DCID and type already exists"); 331 put_req(prq, req); 332 return -1; 333 } 334 335 req->pr_hash_el.qhe_flags = 0; 336 if (!lsquic_hash_insert(prq->prq_reqs_hash, req, sizeof(req), 337 req, &req->pr_hash_el)) 338 { 339 LSQ_DEBUG("could not insert req into hash"); 340 put_req(prq, req); 341 return -1; 342 } 343 344 req->pr_flags = flags; 345 req->pr_rst_sz = size; 346 req->pr_version = version; 347 lsquic_scid_from_packet_in(packet_in, &req->pr_scid); 348 req->pr_path.np_peer_ctx = peer_ctx; 349 memcpy(NP_LOCAL_SA(&req->pr_path), local_addr, 350 sizeof(req->pr_path.np_local_addr)); 351 memcpy(NP_PEER_SA(&req->pr_path), peer_addr, 352 sizeof(req->pr_path.np_peer_addr)); 353 354 LSQ_DEBUGC("scheduled %s packet for connection %"CID_FMT, 355 lsquic_preqt2str[type], CID_BITS(&req->pr_dcid)); 356 return 0; 357} 358 359 360static size_t 361max_bufsz (const struct pr_queue *prq) 362{ 363 return MAX(MAX(MAX(IQUIC_VERNEG_SIZE, 364 IQUIC_MIN_SRST_SIZE), 365 sizeof(prq->prq_verneg_g_buf)), 366 sizeof(prq->prq_pubres_g_buf)); 367} 368 369 370static struct evanescent_conn * 371get_evconn (struct pr_queue *prq) 372{ 373 struct evanescent_conn *evconn; 374 struct lsquic_conn *lconn; 375 struct lsquic_packet_out *packet_out; 376 size_t bufsz; 377 378 if (prq->prq_nconns >= prq->prq_max_conns) 379 { /* This deserves a warning */ 380 LSQ_WARN("tried to get connection past limit of %u", prq->prq_max_conns); 381 return NULL; 382 } 383 384 lconn = TAILQ_FIRST(&prq->prq_free_conns); 385 if (lconn) 386 { 387 TAILQ_REMOVE(&prq->prq_free_conns, lconn, cn_next_pr); 388 return (struct evanescent_conn *) lconn; 389 } 390 391 bufsz = max_bufsz(prq); 392 evconn = calloc(1, sizeof(*evconn) + bufsz); 393 if (!evconn) 394 { 395 LSQ_WARN("calloc failed: %s", strerror(errno)); 396 return NULL; 397 } 398 399 /* These values stay the same between connection usages: */ 400 evconn->evc_queue = prq; 401 lconn = &evconn->evc_conn; 402 lconn->cn_cces = evconn->evc_cces; 403 lconn->cn_cces_mask = 1; 404 lconn->cn_n_cces = sizeof(evconn->evc_cces) / sizeof(evconn->evc_cces[0]); 405 lconn->cn_if = &evanescent_conn_iface; 406 lconn->cn_flags = LSCONN_EVANESCENT; 407 packet_out = &evconn->evc_packet_out; 408 packet_out->po_flags = PO_NOENCRYPT; 409 packet_out->po_data = evconn->evc_buf; 410 411 return evconn; 412} 413 414 415static uint8_t 416get_rand_nybble (struct pr_queue *prq) 417{ 418 uint8_t byte; 419 420 if (prq->prq_rand_nybble_off == 0) 421 RAND_bytes(prq->prq_rand_nybble_buf, sizeof(prq->prq_rand_nybble_buf)); 422 423 byte = prq->prq_rand_nybble_buf[prq->prq_rand_nybble_off / 2]; 424 if (prq->prq_rand_nybble_off & 1) 425 byte >>= 4; 426 else 427 byte &= 0xF; 428 prq->prq_rand_nybble_off = (prq->prq_rand_nybble_off + 1) & NYBBLE_MASK; 429 return byte; 430} 431 432 433static uint8_t 434get_rand_byte (struct pr_queue *prq) 435{ 436 return (get_rand_nybble(prq) << 4) | get_rand_nybble(prq); 437} 438 439 440struct lsquic_conn * 441prq_next_conn (struct pr_queue *prq) 442{ 443 struct evanescent_conn *evconn; 444 struct lsquic_conn *lconn; 445 struct lsquic_hash_elem *el; 446 struct packet_req *req; 447 struct lsquic_packet_out *packet_out; 448 int (*gen_verneg) (unsigned char *, size_t, const lsquic_cid_t *, 449 const lsquic_cid_t *, unsigned, uint8_t); 450 int len; 451 452 lconn = TAILQ_FIRST(&prq->prq_returned_conns); 453 if (lconn) 454 { 455 TAILQ_REMOVE(&prq->prq_returned_conns, lconn, cn_next_pr); 456 return lconn; 457 } 458 459 el = lsquic_hash_first(prq->prq_reqs_hash); 460 if (!el) /* Nothing is queued */ 461 return NULL; 462 463 evconn = get_evconn(prq); 464 if (!evconn) /* Reached limit or malloc failed */ 465 return NULL; 466 467 req = lsquic_hashelem_getdata(el); 468 packet_out = &evconn->evc_packet_out; 469 switch ((req->pr_type << 29) | req->pr_flags) 470 { 471 case (PACKET_REQ_VERNEG << 29) | PR_GQUIC: 472 packet_out->po_data_sz = prq->prq_verneg_g_sz; 473 packet_out->po_flags |= PO_VERNEG; 474 memcpy(packet_out->po_data, prq->prq_verneg_g_buf, 475 prq->prq_verneg_g_sz); 476 memcpy(packet_out->po_data + 1, req->pr_dcid.idbuf, GQUIC_CID_LEN); 477 break; 478 case (PACKET_REQ_PUBRES << 29) | PR_GQUIC: 479 packet_out->po_flags &= ~PO_VERNEG; 480 packet_out->po_data_sz = prq->prq_pubres_g_sz; 481 memcpy(packet_out->po_data, prq->prq_pubres_g_buf, 482 prq->prq_pubres_g_sz); 483 memcpy(packet_out->po_data + 1, req->pr_dcid.idbuf, GQUIC_CID_LEN); 484 break; 485 case (PACKET_REQ_VERNEG << 29) | 0: 486 packet_out->po_flags |= PO_VERNEG; 487 if (req->pr_version == LSQVER_046) 488 gen_verneg = lsquic_Q046_gen_ver_nego_pkt; 489 else 490 gen_verneg = lsquic_ietf_v1_gen_ver_nego_pkt; 491 len = gen_verneg(packet_out->po_data, max_bufsz(prq), 492 /* Flip SCID/DCID here: */ &req->pr_dcid, &req->pr_scid, 493 prq->prq_enpub->enp_settings.es_versions, 494 get_rand_byte(prq)); 495 if (len > 0) 496 packet_out->po_data_sz = len; 497 else 498 packet_out->po_data_sz = 0; 499 break; 500 default: 501 packet_out->po_flags &= ~PO_VERNEG; 502 packet_out->po_data_sz = req->pr_rst_sz; 503 RAND_bytes(packet_out->po_data, req->pr_rst_sz - IQUIC_SRESET_TOKEN_SZ); 504 packet_out->po_data[0] &= ~0x80; 505 packet_out->po_data[0] |= 0x40; 506 lsquic_tg_generate_sreset(prq->prq_enpub->enp_tokgen, &req->pr_dcid, 507 packet_out->po_data + req->pr_rst_sz - IQUIC_SRESET_TOKEN_SZ); 508 break; 509 } 510 511 lsquic_hash_erase(prq->prq_reqs_hash, el); 512 evconn->evc_req = req; 513 514 lconn= &evconn->evc_conn; 515 evconn->evc_cces[0].cce_cid = req->pr_dcid; 516 packet_out->po_path = &req->pr_path; 517 518 ++prq->prq_nconns; 519 return lconn; 520} 521 522 523int 524prq_have_pending (const struct pr_queue *prq) 525{ 526 return lsquic_hash_count(prq->prq_reqs_hash) > 0; 527} 528 529 530static struct lsquic_packet_out * 531evanescent_conn_ci_next_packet_to_send (struct lsquic_conn *lconn, size_t size) 532{ 533 struct evanescent_conn *const evconn = (struct evanescent_conn *) lconn; 534 assert(size == 0); 535 return &evconn->evc_packet_out; 536} 537 538 539static void 540evanescent_conn_ci_packet_sent (struct lsquic_conn *lconn, 541 struct lsquic_packet_out *packet_out) 542{ 543 struct evanescent_conn *const evconn = (struct evanescent_conn *) lconn; 544 struct pr_queue *const prq = evconn->evc_queue; 545 546 assert(packet_out == &evconn->evc_packet_out); 547 assert(prq->prq_nconns > 0); 548 549 LSQ_DEBUGC("sent %s packet for connection %"CID_FMT"; free resources", 550 lsquic_preqt2str[ evconn->evc_req->pr_type ], 551 CID_BITS(&evconn->evc_req->pr_dcid)); 552 TAILQ_INSERT_HEAD(&prq->prq_free_conns, lconn, cn_next_pr); 553 put_req(prq, evconn->evc_req); 554 --prq->prq_nconns; 555} 556 557 558static void 559evanescent_conn_ci_packet_not_sent (struct lsquic_conn *lconn, 560 struct lsquic_packet_out *packet_out) 561{ 562 struct evanescent_conn *const evconn = (struct evanescent_conn *) lconn; 563 struct pr_queue *const prq = evconn->evc_queue; 564 565 assert(packet_out == &evconn->evc_packet_out); 566 assert(prq->prq_nconns > 0); 567 568 LSQ_DEBUG("packet not sent; put connection onto used list"); 569 TAILQ_INSERT_HEAD(&prq->prq_returned_conns, lconn, cn_next_pr); 570} 571 572 573static enum tick_st 574evanescent_conn_ci_tick (struct lsquic_conn *lconn, lsquic_time_t now) 575{ 576 assert(0); 577 return TICK_CLOSE; 578} 579 580 581static void 582evanescent_conn_ci_destroy (struct lsquic_conn *lconn) 583{ 584 assert(0); 585} 586 587 588static struct lsquic_engine * 589evanescent_conn_ci_get_engine (struct lsquic_conn *lconn) 590{ 591 assert(0); 592 return NULL; 593} 594 595 596static void 597evanescent_conn_ci_hsk_done (struct lsquic_conn *lconn, 598 enum lsquic_hsk_status status) 599{ 600 assert(0); 601} 602 603 604static void 605evanescent_conn_ci_packet_in (struct lsquic_conn *lconn, 606 struct lsquic_packet_in *packet_in) 607{ 608 assert(0); 609} 610 611 612static void 613evanescent_conn_ci_client_call_on_new (struct lsquic_conn *lconn) 614{ 615 assert(0); 616} 617 618 619static struct network_path * 620evanescent_conn_ci_get_path (struct lsquic_conn *lconn, 621 const struct sockaddr *sa) 622{ 623 struct evanescent_conn *const evconn = (struct evanescent_conn *) lconn; 624 625 return &evconn->evc_req->pr_path; 626} 627 628 629static unsigned char 630evanescent_conn_ci_record_addrs (struct lsquic_conn *lconn, void *peer_ctx, 631 const struct sockaddr *local_sa, const struct sockaddr *peer_sa) 632{ 633 assert(0); 634 return 0; 635} 636 637 638static const struct conn_iface evanescent_conn_iface = { 639 .ci_client_call_on_new = evanescent_conn_ci_client_call_on_new, 640 .ci_destroy = evanescent_conn_ci_destroy, 641 .ci_get_engine = evanescent_conn_ci_get_engine, 642 .ci_get_path = evanescent_conn_ci_get_path, 643 .ci_hsk_done = evanescent_conn_ci_hsk_done, 644 .ci_next_packet_to_send = evanescent_conn_ci_next_packet_to_send, 645 .ci_packet_in = evanescent_conn_ci_packet_in, 646 .ci_packet_not_sent = evanescent_conn_ci_packet_not_sent, 647 .ci_packet_sent = evanescent_conn_ci_packet_sent, 648 .ci_record_addrs = evanescent_conn_ci_record_addrs, 649 .ci_tick = evanescent_conn_ci_tick, 650}; 651 652 653const char *const lsquic_preqt2str[] = 654{ 655 [PACKET_REQ_VERNEG] = "version negotiation", 656 [PACKET_REQ_PUBRES] = "stateless reset", 657}; 658