lsquic_mini_conn_ietf.c revision 26e8f082
1/* Copyright (c) 2017 - 2021 LiteSpeed Technologies Inc. See LICENSE. */ 2/* 3 * lsquic_mini_conn_ietf.c -- Mini connection used by the IETF QUIC 4 */ 5 6#include <assert.h> 7#include <errno.h> 8#include <inttypes.h> 9#include <limits.h> 10#include <stddef.h> 11#include <stdint.h> 12#include <string.h> 13#include <sys/queue.h> 14#include <stdlib.h> 15 16#include "lsquic.h" 17#include "lsquic_int_types.h" 18#include "lsquic_sizes.h" 19#include "lsquic_hash.h" 20#include "lsquic_conn.h" 21#include "lsquic_mm.h" 22#include "lsquic_malo.h" 23#include "lsquic_engine_public.h" 24#include "lsquic_packet_common.h" 25#include "lsquic_packet_in.h" 26#include "lsquic_packet_out.h" 27#include "lsquic_parse.h" 28#include "lsquic_rtt.h" 29#include "lsquic_util.h" 30#include "lsquic_enc_sess.h" 31#include "lsquic_trechist.h" 32#include "lsquic_mini_conn_ietf.h" 33#include "lsquic_ev_log.h" 34#include "lsquic_trans_params.h" 35#include "lsquic_ietf.h" 36#include "lsquic_packet_ietf.h" 37#include "lsquic_attq.h" 38#include "lsquic_alarmset.h" 39#include "lsquic_crand.h" 40 41#define LSQUIC_LOGGER_MODULE LSQLM_MINI_CONN 42#define LSQUIC_LOG_CONN_ID lsquic_conn_log_cid(&conn->imc_conn) 43#include "lsquic_logger.h" 44 45#define MIN(a, b) ((a) < (b) ? (a) : (b)) 46#define MAX(a, b) ((a) > (b) ? (a) : (b)) 47 48static const struct conn_iface mini_conn_ietf_iface; 49 50static unsigned highest_bit_set (unsigned long long); 51 52static int 53imico_can_send (const struct ietf_mini_conn *, size_t); 54 55 56static const enum header_type el2hety[] = 57{ 58 [ENC_LEV_INIT] = HETY_HANDSHAKE, 59 [ENC_LEV_CLEAR] = HETY_INITIAL, 60 [ENC_LEV_FORW] = HETY_NOT_SET, 61 [ENC_LEV_EARLY] = 0, /* Invalid */ 62}; 63 64 65static void 66imico_destroy_packet (struct ietf_mini_conn *conn, 67 struct lsquic_packet_out *packet_out) 68{ 69 lsquic_packet_out_destroy(packet_out, conn->imc_enpub, 70 conn->imc_path.np_peer_ctx); 71} 72 73 74int 75lsquic_mini_conn_ietf_ecn_ok (const struct ietf_mini_conn *conn) 76{ 77 packno_set_t acked; 78 79 /* First flight has only Initial and Handshake packets */ 80 acked = conn->imc_acked_packnos[PNS_INIT] 81 | conn->imc_acked_packnos[PNS_HSK] 82 ; 83 return 0 != (conn->imc_ecn_packnos & acked); 84} 85 86 87#define imico_ecn_ok lsquic_mini_conn_ietf_ecn_ok 88 89 90static enum ecn 91imico_get_ecn (struct ietf_mini_conn *conn) 92{ 93 if (!conn->imc_enpub->enp_settings.es_ecn) 94 return ECN_NOT_ECT; 95 else if (!conn->imc_sent_packnos /* We set ECT0 in first flight */ 96 || imico_ecn_ok(conn)) 97 return ECN_ECT0; 98 else 99 return ECN_NOT_ECT; 100} 101 102 103static struct lsquic_packet_out * 104imico_get_packet_out (struct ietf_mini_conn *conn, 105 enum header_type header_type, size_t need) 106{ 107 struct lsquic_packet_out *packet_out; 108 enum ecn ecn; 109 110 if (need) 111 TAILQ_FOREACH(packet_out, &conn->imc_packets_out, po_next) 112 if (!(packet_out->po_flags & PO_SENT) 113 && packet_out->po_header_type == header_type 114 && lsquic_packet_out_avail(packet_out) >= need) 115 return packet_out; 116 117 if (conn->imc_next_packno >= MAX_PACKETS) 118 { 119 LSQ_DEBUG("ran out of outgoing packet numbers, won't allocate packet"); 120 return NULL; 121 } 122 123 packet_out = lsquic_packet_out_new(&conn->imc_enpub->enp_mm, NULL, 1, 124 &conn->imc_conn, IQUIC_PACKNO_LEN_1, NULL, NULL, &conn->imc_path, 125 header_type); 126 if (!packet_out) 127 { 128 LSQ_WARN("could not allocate packet: %s", strerror(errno)); 129 return NULL; 130 } 131 132 packet_out->po_header_type = header_type; 133 packet_out->po_packno = conn->imc_next_packno++; 134 packet_out->po_flags |= PO_MINI; 135 lsquic_packet_out_set_pns(packet_out, lsquic_hety2pns[header_type]); 136 ecn = imico_get_ecn(conn); 137 packet_out->po_lflags |= ecn << POECN_SHIFT; 138 TAILQ_INSERT_TAIL(&conn->imc_packets_out, packet_out, po_next); 139 packet_out->po_loss_chain = packet_out; 140 return packet_out; 141} 142 143 144static struct ietf_mini_conn * 145cryst_get_conn (const struct mini_crypto_stream *cryst) 146{ 147 return (void *) 148 ((unsigned char *) (cryst - cryst->mcs_enc_level) 149 - offsetof(struct ietf_mini_conn, imc_streams)); 150} 151 152 153struct msg_ctx 154{ 155 const unsigned char *buf; 156 const unsigned char *const end; 157}; 158 159 160static size_t 161read_from_msg_ctx (void *ctx, void *buf, size_t len, int *fin) 162{ 163 struct msg_ctx *msg_ctx = ctx; 164 if (len > (uintptr_t) (msg_ctx->end - msg_ctx->buf)) 165 len = msg_ctx->end - msg_ctx->buf; 166 memcpy(buf, msg_ctx->buf, len); 167 msg_ctx->buf += len; 168 return len; 169} 170 171 172static int 173imico_chlo_has_been_consumed (const struct ietf_mini_conn *conn) 174{ 175 return conn->imc_streams[ENC_LEV_CLEAR].mcs_read_off > 3 176 && conn->imc_streams[ENC_LEV_CLEAR].mcs_read_off >= conn->imc_ch_len; 177} 178 179 180static int 181imico_maybe_process_params (struct ietf_mini_conn *conn) 182{ 183 const struct transport_params *params; 184 185 if (imico_chlo_has_been_consumed(conn) 186 && (conn->imc_flags & (IMC_ENC_SESS_INITED|IMC_HAVE_TP)) 187 == IMC_ENC_SESS_INITED) 188 { 189 params = conn->imc_conn.cn_esf.i->esfi_get_peer_transport_params( 190 conn->imc_conn.cn_enc_session); 191 if (params) 192 { 193 conn->imc_flags |= IMC_HAVE_TP; 194 conn->imc_ack_exp = params->tp_ack_delay_exponent; 195 if (params->tp_set & (1 << TPI_MAX_UDP_PAYLOAD_SIZE)) 196 { 197 if (params->tp_numerics[TPI_MAX_UDP_PAYLOAD_SIZE] 198 < conn->imc_path.np_pack_size) 199 conn->imc_path.np_pack_size = 200 params->tp_numerics[TPI_MAX_UDP_PAYLOAD_SIZE]; 201 } 202 LSQ_DEBUG("read transport params, packet size is set to %hu bytes", 203 conn->imc_path.np_pack_size); 204 } 205 else 206 { 207 conn->imc_flags |= IMC_BAD_TRANS_PARAMS; 208 return -1; 209 } 210 } 211 212 return 0; 213} 214 215 216static ssize_t 217imico_stream_write (void *stream, const void *bufp, size_t bufsz) 218{ 219 struct mini_crypto_stream *const cryst = stream; 220 struct ietf_mini_conn *const conn = cryst_get_conn(cryst); 221 struct lsquic_conn *const lconn = &conn->imc_conn; 222 const struct parse_funcs *const pf = lconn->cn_pf; 223 struct msg_ctx msg_ctx = { bufp, (unsigned char *) bufp + bufsz, }; 224 struct lsquic_packet_out *packet_out; 225 size_t header_sz, need; 226 const unsigned char *p; 227 int len; 228 229 if (0 != imico_maybe_process_params(conn)) 230 return -1; 231 232 if (PNS_INIT == lsquic_enclev2pns[ cryst->mcs_enc_level ] 233 && (conn->imc_flags & IMC_IGNORE_INIT)) 234 { 235 LSQ_WARN("trying to write at the ignored Initial level"); 236 return bufsz; 237 } 238 239 while (msg_ctx.buf < msg_ctx.end) 240 { 241 header_sz = lconn->cn_pf->pf_calc_crypto_frame_header_sz( 242 cryst->mcs_write_off, msg_ctx.end - msg_ctx.buf); 243 need = header_sz + 1; 244 packet_out = imico_get_packet_out(conn, 245 el2hety[ cryst->mcs_enc_level ], need); 246 if (!packet_out) 247 return -1; 248 249 p = msg_ctx.buf; 250 len = pf->pf_gen_crypto_frame(packet_out->po_data + packet_out->po_data_sz, 251 lsquic_packet_out_avail(packet_out), 0, cryst->mcs_write_off, 0, 252 msg_ctx.end - msg_ctx.buf, read_from_msg_ctx, &msg_ctx); 253 if (len < 0) 254 return len; 255 EV_LOG_GENERATED_CRYPTO_FRAME(LSQUIC_LOG_CONN_ID, pf, 256 packet_out->po_data + packet_out->po_data_sz, len); 257 packet_out->po_data_sz += len; 258 packet_out->po_frame_types |= 1 << QUIC_FRAME_CRYPTO; 259 packet_out->po_flags |= PO_HELLO; 260 cryst->mcs_write_off += msg_ctx.buf - p; 261 } 262 263 assert(msg_ctx.buf == msg_ctx.end); 264 return bufsz; 265} 266 267 268static int 269imico_stream_flush (void *stream) 270{ 271 return 0; 272} 273 274 275static struct stream_frame * 276imico_find_stream_frame (const struct ietf_mini_conn *conn, 277 enum enc_level enc_level, unsigned read_off) 278{ 279 struct stream_frame *frame; 280 281 if (conn->imc_last_in.frame && enc_level == conn->imc_last_in.enc_level 282 && read_off == DF_ROFF(conn->imc_last_in.frame)) 283 return conn->imc_last_in.frame; 284 285 TAILQ_FOREACH(frame, &conn->imc_crypto_frames, next_frame) 286 if (enc_level == frame->stream_id && read_off == DF_ROFF(frame)) 287 return frame; 288 289 return NULL; 290} 291 292 293static void 294imico_read_chlo_size (struct ietf_mini_conn *conn, const unsigned char *buf, 295 size_t sz) 296{ 297 const unsigned char *const end = buf + sz; 298 299 assert(conn->imc_streams[ENC_LEV_CLEAR].mcs_read_off < 4); 300 switch (conn->imc_streams[ENC_LEV_CLEAR].mcs_read_off) 301 { 302 case 0: 303 if (buf == end) 304 return; 305 if (*buf != 1) 306 { 307 LSQ_DEBUG("Does not begin with ClientHello"); 308 conn->imc_flags |= IMC_ERROR; 309 return; 310 } 311 ++buf; 312 /* fall-through */ 313 case 1: 314 if (buf == end) 315 return; 316 if (*buf != 0) 317 { 318 LSQ_DEBUG("ClientHello larger than 16K"); 319 conn->imc_flags |= IMC_ERROR; 320 return; 321 } 322 ++buf; 323 /* fall-through */ 324 case 2: 325 if (buf == end) 326 return; 327 conn->imc_ch_len = *buf << 8; 328 ++buf; 329 /* fall-through */ 330 default: 331 if (buf == end) 332 return; 333 conn->imc_ch_len |= *buf; 334 } 335} 336 337 338static ssize_t 339imico_stream_readf (void *stream, 340 size_t (*readf)(void *, const unsigned char *, size_t, int), void *ctx) 341{ 342 struct mini_crypto_stream *const cryst = stream; 343 struct ietf_mini_conn *const conn = cryst_get_conn(cryst); 344 struct stream_frame *frame; 345 const unsigned char *buf; 346 size_t nread, total_read; 347 unsigned avail; 348 349 total_read = 0; 350 while ((frame = imico_find_stream_frame(conn, cryst->mcs_enc_level, 351 cryst->mcs_read_off))) 352 { 353 avail = DF_SIZE(frame) - frame->data_frame.df_read_off; 354 buf = frame->data_frame.df_data + frame->data_frame.df_read_off; 355 nread = readf(ctx, buf, avail, DF_FIN(frame)); 356 if (cryst->mcs_enc_level == ENC_LEV_CLEAR && cryst->mcs_read_off < 4) 357 imico_read_chlo_size(conn, buf, nread); 358 total_read += nread; 359 cryst->mcs_read_off += nread; 360 frame->data_frame.df_read_off += nread; 361 LSQ_DEBUG("read %zu bytes at offset %"PRIu64" on enc level %u", nread, 362 DF_ROFF(frame), cryst->mcs_enc_level); 363 if (DF_END(frame) == DF_ROFF(frame)) 364 { 365 if (frame == conn->imc_last_in.frame) 366 conn->imc_last_in.frame = NULL; 367 else 368 { 369 TAILQ_REMOVE(&conn->imc_crypto_frames, frame, next_frame); 370 --conn->imc_n_crypto_frames; 371 conn->imc_crypto_frames_sz -= DF_SIZE(frame); 372 lsquic_packet_in_put(&conn->imc_enpub->enp_mm, 373 frame->packet_in); 374 lsquic_malo_put(frame); 375 } 376 } 377 if (nread < avail) 378 break; 379 } 380 381 if (total_read > 0) 382 return total_read; 383 else 384 { 385 /* CRYPTO streams never end, so zero bytes read always means 386 * EWOULDBLOCK 387 */ 388 errno = EWOULDBLOCK; 389 return -1; 390 } 391} 392 393 394static int 395imico_stream_wantX (struct mini_crypto_stream *cryst, int bit, int is_want) 396{ 397 int old; 398 399 old = (cryst->mcs_flags & (1 << bit)) > 0; 400 cryst->mcs_flags &= ~(1 << bit); 401 cryst->mcs_flags |= !!is_want << bit; 402 return old; 403} 404 405 406static int 407imico_stream_wantwrite (void *stream, int is_want) 408{ 409 return imico_stream_wantX(stream, MCSBIT_WANTWRITE, is_want); 410} 411 412 413static int 414imico_stream_wantread (void *stream, int is_want) 415{ 416 return imico_stream_wantX(stream, MCSBIT_WANTREAD, is_want); 417} 418 419 420static enum enc_level 421imico_stream_enc_level (void *stream) 422{ 423 struct mini_crypto_stream *const cryst = stream; 424 return cryst->mcs_enc_level; 425} 426 427 428static const struct crypto_stream_if crypto_stream_if = 429{ 430 .csi_write = imico_stream_write, 431 .csi_flush = imico_stream_flush, 432 .csi_readf = imico_stream_readf, 433 .csi_wantwrite = imico_stream_wantwrite, 434 .csi_wantread = imico_stream_wantread, 435 .csi_enc_level = imico_stream_enc_level, 436}; 437 438 439static int 440is_first_packet_ok (const struct lsquic_packet_in *packet_in, 441 size_t udp_payload_size) 442{ 443 if (udp_payload_size < IQUIC_MIN_INIT_PACKET_SZ) 444 { 445 /* [draft-ietf-quic-transport-24] Section 14 */ 446 LSQ_LOG1(LSQ_LOG_DEBUG, "incoming UDP payload too small: %zu bytes", 447 udp_payload_size); 448 return 0; 449 } 450 /* TODO: Move decryption of the first packet into this function? */ 451 return 1; /* TODO */ 452} 453 454 455static void 456imico_peer_addr_validated (struct ietf_mini_conn *conn, const char *how) 457{ 458 if (!(conn->imc_flags & IMC_ADDR_VALIDATED)) 459 { 460 conn->imc_flags |= IMC_ADDR_VALIDATED; 461 LSQ_DEBUG("peer address validated (%s)", how); 462 } 463} 464 465 466struct lsquic_conn * 467lsquic_mini_conn_ietf_new (struct lsquic_engine_public *enpub, 468 const struct lsquic_packet_in *packet_in, 469 enum lsquic_version version, int is_ipv4, const lsquic_cid_t *odcid, 470 size_t udp_payload_size) 471{ 472 struct ietf_mini_conn *conn; 473 enc_session_t *enc_sess; 474 enum enc_level i; 475 const struct enc_session_funcs_iquic *esfi; 476 unsigned char rand_nybble; 477 478 if (!is_first_packet_ok(packet_in, udp_payload_size)) 479 return NULL; 480 481 conn = lsquic_malo_get(enpub->enp_mm.malo.mini_conn_ietf); 482 if (!conn) 483 { 484 LSQ_LOG1(LSQ_LOG_WARN, "cannot allocate mini connection: %s", 485 strerror(errno)); 486 return NULL; 487 } 488 memset(conn, 0, sizeof(*conn)); 489 conn->imc_conn.cn_if = &mini_conn_ietf_iface; 490 conn->imc_conn.cn_cces = conn->imc_cces; 491 conn->imc_conn.cn_n_cces = sizeof(conn->imc_cces) 492 / sizeof(conn->imc_cces[0]); 493 conn->imc_cces[0].cce_cid = packet_in->pi_dcid; 494 conn->imc_cces[0].cce_flags = CCE_USED; 495 conn->imc_conn.cn_cces_mask = 1; 496 lsquic_scid_from_packet_in(packet_in, &conn->imc_path.np_dcid); 497 LSQ_DEBUGC("recv SCID from client %"CID_FMT, CID_BITS(&conn->imc_cces[0].cce_cid)); 498 LSQ_DEBUGC("recv DCID from client %"CID_FMT, CID_BITS(&conn->imc_path.np_dcid)); 499 500 /* Generate new SCID. Since is not the original SCID, it is given 501 * a sequence number (0) and therefore can be retired by the client. 502 */ 503 enpub->enp_generate_scid(enpub->enp_gen_scid_ctx, &conn->imc_conn, 504 &conn->imc_conn.cn_cces[1].cce_cid, enpub->enp_settings.es_scid_len); 505 506 LSQ_DEBUGC("generated SCID %"CID_FMT" at index %u, switching to it", 507 CID_BITS(&conn->imc_conn.cn_cces[1].cce_cid), 1); 508 conn->imc_conn.cn_cces[1].cce_flags = CCE_SEQNO | CCE_USED; 509 conn->imc_conn.cn_cces_mask |= 1u << 1; 510 conn->imc_conn.cn_cur_cce_idx = 1; 511 512 conn->imc_conn.cn_flags = LSCONN_MINI|LSCONN_IETF|LSCONN_SERVER; 513 conn->imc_conn.cn_version = version; 514 515 for (i = 0; i < N_ENC_LEVS; ++i) 516 { 517 conn->imc_streams[i].mcs_enc_level = i; 518 conn->imc_stream_ps[i] = &conn->imc_streams[i]; 519 } 520 521 rand_nybble = lsquic_crand_get_nybble(enpub->enp_crand); 522 if (rand_nybble == 0) 523 { 524 /* Use trechist for about one out of every sixteen connections so 525 * that the code does not grow stale. 526 */ 527 LSQ_DEBUG("using trechist"); 528 conn->imc_flags |= IMC_TRECHIST; 529 conn->imc_recvd_packnos.trechist.hist_elems 530 = malloc(TRECHIST_SIZE * IMICO_N_PNS); 531 if (!conn->imc_recvd_packnos.trechist.hist_elems) 532 { 533 LSQ_WARN("cannot allocate trechist elems"); 534 return NULL; 535 } 536 } 537 538 esfi = select_esf_iquic_by_ver(version); 539 enc_sess = esfi->esfi_create_server(enpub, &conn->imc_conn, 540 &packet_in->pi_dcid, conn->imc_stream_ps, &crypto_stream_if, 541 &conn->imc_cces[0].cce_cid, &conn->imc_path.np_dcid); 542 if (!enc_sess) 543 { 544 lsquic_malo_put(conn); 545 return NULL; 546 } 547 548 conn->imc_enpub = enpub; 549 conn->imc_created = packet_in->pi_received; 550 if (enpub->enp_settings.es_base_plpmtu) 551 conn->imc_path.np_pack_size = enpub->enp_settings.es_base_plpmtu; 552 else if (is_ipv4) 553 conn->imc_path.np_pack_size = IQUIC_MAX_IPv4_PACKET_SZ; 554 else 555 conn->imc_path.np_pack_size = IQUIC_MAX_IPv6_PACKET_SZ; 556 conn->imc_conn.cn_pf = select_pf_by_ver(version); 557 conn->imc_conn.cn_esf.i = esfi; 558 conn->imc_conn.cn_enc_session = enc_sess; 559 conn->imc_conn.cn_esf_c = select_esf_common_by_ver(version); 560 TAILQ_INIT(&conn->imc_packets_out); 561 TAILQ_INIT(&conn->imc_app_packets); 562 TAILQ_INIT(&conn->imc_crypto_frames); 563 if (odcid) 564 imico_peer_addr_validated(conn, "odcid"); 565#if LSQUIC_DEVEL 566 { 567 const char *const s = getenv("LSQUIC_LOSE_0RTT"); 568 if (s && atoi(s)) 569 { 570 LSQ_DEBUG("will lose 0-RTT packets (via env variable)"); 571 conn->imc_delayed_packets_count = UCHAR_MAX; 572 } 573 } 574#endif 575 576 LSQ_DEBUG("created mini connection object %p; max packet size=%hu", 577 conn, conn->imc_path.np_pack_size); 578 return &conn->imc_conn; 579} 580 581 582static void 583ietf_mini_conn_ci_client_call_on_new (struct lsquic_conn *lconn) 584{ 585 assert(0); 586} 587 588 589static void 590ietf_mini_conn_ci_destroy (struct lsquic_conn *lconn) 591{ 592 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 593 struct lsquic_packet_out *packet_out; 594 struct lsquic_packet_in *packet_in; 595 struct stream_frame *frame; 596 597 while ((packet_out = TAILQ_FIRST(&conn->imc_packets_out))) 598 { 599 TAILQ_REMOVE(&conn->imc_packets_out, packet_out, po_next); 600 imico_destroy_packet(conn, packet_out); 601 } 602 while ((packet_in = TAILQ_FIRST(&conn->imc_app_packets))) 603 { 604 TAILQ_REMOVE(&conn->imc_app_packets, packet_in, pi_next); 605 lsquic_packet_in_put(&conn->imc_enpub->enp_mm, packet_in); 606 } 607 while ((frame = TAILQ_FIRST(&conn->imc_crypto_frames))) 608 { 609 TAILQ_REMOVE(&conn->imc_crypto_frames, frame, next_frame); 610 lsquic_packet_in_put(&conn->imc_enpub->enp_mm, frame->packet_in); 611 lsquic_malo_put(frame); 612 } 613 if (lconn->cn_enc_session) 614 lconn->cn_esf.i->esfi_destroy(lconn->cn_enc_session); 615 LSQ_DEBUG("ietf_mini_conn_ci_destroyed"); 616 if (conn->imc_flags & IMC_TRECHIST) 617 free(conn->imc_recvd_packnos.trechist.hist_elems); 618 lsquic_malo_put(conn); 619} 620 621 622static struct lsquic_engine * 623ietf_mini_conn_ci_get_engine (struct lsquic_conn *lconn) 624{ 625 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 626 return conn->imc_enpub->enp_engine; 627} 628 629 630static void 631ietf_mini_conn_ci_hsk_done (struct lsquic_conn *lconn, 632 enum lsquic_hsk_status status) 633{ 634 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 635 636 switch (status) 637 { 638 case LSQ_HSK_OK: 639 case LSQ_HSK_RESUMED_OK: 640 conn->imc_flags |= IMC_HSK_OK; 641 conn->imc_conn.cn_flags |= LSCONN_HANDSHAKE_DONE; 642 LSQ_DEBUG("handshake OK"); 643 break; 644 default: 645 assert(0); 646 /* fall-through */ 647 case LSQ_HSK_FAIL: 648 conn->imc_flags |= IMC_HSK_FAILED|IMC_ERROR; 649 LSQ_INFO("handshake failed"); 650 break; 651 } 652} 653 654 655static void 656ietf_mini_conn_ci_tls_alert (struct lsquic_conn *lconn, uint8_t alert) 657{ 658 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 659 LSQ_DEBUG("got TLS alert %"PRIu8, alert); 660 conn->imc_flags |= IMC_ERROR|IMC_TLS_ALERT; 661 conn->imc_tls_alert = alert; 662} 663 664 665/* A mini connection is only tickable if it has unsent packets. This can 666 * occur when packet sending is delayed. 667 * 668 * Otherwise, a mini connection is not tickable: Either there are incoming 669 * packets, in which case, the connection is going to be ticked, or there is 670 * an alarm pending, in which case it will be handled via the attq. 671 */ 672static int 673ietf_mini_conn_ci_is_tickable (struct lsquic_conn *lconn) 674{ 675 struct ietf_mini_conn *const conn = (struct ietf_mini_conn *) lconn; 676 const struct lsquic_packet_out *packet_out; 677 size_t packet_size; 678 679 if (conn->imc_enpub->enp_flags & ENPUB_CAN_SEND) 680 TAILQ_FOREACH(packet_out, &conn->imc_packets_out, po_next) 681 if (!(packet_out->po_flags & PO_SENT)) 682 { 683 packet_size = lsquic_packet_out_total_sz(lconn, packet_out); 684 return imico_can_send(conn, packet_size); 685 } 686 687 return 0; 688} 689 690 691static int 692imico_can_send (const struct ietf_mini_conn *conn, size_t size) 693{ 694 return (conn->imc_flags & IMC_ADDR_VALIDATED) 695 || conn->imc_bytes_in * 3 >= conn->imc_bytes_out + size 696 ; 697} 698 699 700static void 701imico_zero_pad (struct lsquic_packet_out *packet_out) 702{ 703 size_t pad_size; 704 705 pad_size = lsquic_packet_out_avail(packet_out); 706 memset(packet_out->po_data + packet_out->po_data_sz, 0, pad_size); 707 packet_out->po_data_sz += pad_size; 708 packet_out->po_frame_types |= QUIC_FTBIT_PADDING; 709} 710 711 712static struct lsquic_packet_out * 713ietf_mini_conn_ci_next_packet_to_send (struct lsquic_conn *lconn, 714 const struct to_coal *to_coal) 715{ 716 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 717 struct lsquic_packet_out *packet_out; 718 size_t packet_size; 719 720 TAILQ_FOREACH(packet_out, &conn->imc_packets_out, po_next) 721 { 722 if (packet_out->po_flags & PO_SENT) 723 continue; 724 /* [draft-ietf-quic-transport-32] Section 14.1: 725 " a server MUST expand the payload of all UDP datagrams carrying 726 " ack-eliciting Initial packets to at least the smallest allowed 727 " maximum datagram size of 1200 bytes. 728 */ 729 if (packet_out->po_header_type == HETY_INITIAL 730 && !(packet_out->po_frame_types & (1 << QUIC_FRAME_PADDING)) 731 && (packet_out->po_frame_types & IQUIC_FRAME_ACKABLE_MASK) 732 && lsquic_packet_out_avail(packet_out) > 0) 733 imico_zero_pad(packet_out); 734 packet_size = lsquic_packet_out_total_sz(lconn, packet_out); 735 if (!(to_coal 736 && (packet_size + to_coal->prev_sz_sum 737 > conn->imc_path.np_pack_size 738 || !lsquic_packet_out_equal_dcids(to_coal->prev_packet, packet_out)) 739 )) 740 { 741 if (!imico_can_send(conn, packet_size)) 742 { 743 LSQ_DEBUG("cannot send packet %"PRIu64" of size %zu: client " 744 "address has not been validated", packet_out->po_packno, 745 packet_size); 746 return NULL; 747 } 748 packet_out->po_flags |= PO_SENT; 749 conn->imc_bytes_out += packet_size; 750 if (!to_coal) 751 LSQ_DEBUG("packet_to_send: %"PRIu64, packet_out->po_packno); 752 else 753 LSQ_DEBUG("packet_to_send: %"PRIu64" (coalesced)", 754 packet_out->po_packno); 755 return packet_out; 756 } 757 else 758 return NULL; 759 } 760 761 return NULL; 762} 763 764 765static int 766imico_calc_retx_timeout (const struct ietf_mini_conn *conn) 767{ 768 lsquic_time_t to; 769 to = lsquic_rtt_stats_get_srtt(&conn->imc_rtt_stats); 770 if (to) 771 { 772 to += to / 2; 773 if (to < 10000) 774 to = 10000; 775 } 776 else 777 to = 300000; 778 return to << conn->imc_hsk_count; 779} 780 781 782static lsquic_time_t 783ietf_mini_conn_ci_next_tick_time (struct lsquic_conn *lconn, unsigned *why) 784{ 785 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 786 const struct lsquic_packet_out *packet_out; 787 lsquic_time_t exp_time, retx_time; 788 789 exp_time = conn->imc_created + 790 conn->imc_enpub->enp_settings.es_handshake_to; 791 792 TAILQ_FOREACH(packet_out, &conn->imc_packets_out, po_next) 793 if (packet_out->po_flags & PO_SENT) 794 { 795 retx_time = packet_out->po_sent + imico_calc_retx_timeout(conn); 796 if (retx_time < exp_time) 797 { 798 *why = N_AEWS + AL_RETX_HSK; 799 return retx_time; 800 } 801 else 802 { 803 *why = AEW_MINI_EXPIRE; 804 return exp_time; 805 } 806 } 807 808 *why = AEW_MINI_EXPIRE; 809 return exp_time; 810} 811 812 813#define IMICO_PROC_FRAME_ARGS \ 814 struct ietf_mini_conn *conn, struct lsquic_packet_in *packet_in, \ 815 const unsigned char *p, size_t len 816 817 818static void 819imico_dispatch_stream_events (struct ietf_mini_conn *conn) 820{ 821 enum enc_level i; 822 823 for (i = 0; i < N_ENC_LEVS; ++i) 824 if ((conn->imc_streams[i].mcs_flags & (MCS_CREATED|MCS_WANTREAD)) 825 == (MCS_CREATED|MCS_WANTREAD)) 826 { 827 LSQ_DEBUG("dispatch read events on level #%u", i); 828 lsquic_mini_cry_sm_if.on_read((void *) &conn->imc_streams[i], 829 conn->imc_conn.cn_enc_session); 830 } 831 832 for (i = 0; i < N_ENC_LEVS; ++i) 833 if ((conn->imc_streams[i].mcs_flags & (MCS_CREATED|MCS_WANTWRITE)) 834 == (MCS_CREATED|MCS_WANTWRITE)) 835 { 836 LSQ_DEBUG("dispatch write events on level #%u", i); 837 lsquic_mini_cry_sm_if.on_write((void *) &conn->imc_streams[i], 838 conn->imc_conn.cn_enc_session); 839 } 840} 841 842 843static int 844imico_stash_stream_frame (struct ietf_mini_conn *conn, 845 enum enc_level enc_level, struct lsquic_packet_in *packet_in, 846 const struct stream_frame *frame) 847{ 848 struct stream_frame *copy; 849 850 if (conn->imc_n_crypto_frames >= IMICO_MAX_STASHED_FRAMES) 851 { 852 LSQ_INFO("cannot stash more CRYPTO frames, at %hhu already, while max " 853 "is %u", conn->imc_n_crypto_frames, IMICO_MAX_STASHED_FRAMES); 854 return -1; 855 } 856 857 if (conn->imc_crypto_frames_sz + DF_SIZE(frame) > IMICO_MAX_BUFFERED_CRYPTO) 858 { 859 LSQ_INFO("cannot stash more than %u bytes of CRYPTO frames", 860 IMICO_MAX_BUFFERED_CRYPTO); 861 return -1; 862 } 863 864 copy = lsquic_malo_get(conn->imc_enpub->enp_mm.malo.stream_frame); 865 if (!copy) 866 { 867 LSQ_INFO("could not allocate stream frame for stashing"); 868 return -1; 869 } 870 871 *copy = *frame; 872 copy->packet_in = lsquic_packet_in_get(packet_in); 873 copy->stream_id = enc_level; 874 TAILQ_INSERT_TAIL(&conn->imc_crypto_frames, copy, next_frame); 875 ++conn->imc_n_crypto_frames; 876 conn->imc_crypto_frames_sz += DF_SIZE(frame); 877 return 0; 878} 879 880 881static unsigned 882imico_process_crypto_frame (IMICO_PROC_FRAME_ARGS) 883{ 884 int parsed_len; 885 enum enc_level enc_level, i; 886 struct stream_frame stream_frame; 887 888 parsed_len = conn->imc_conn.cn_pf->pf_parse_crypto_frame(p, len, 889 &stream_frame); 890 if (parsed_len < 0) 891 { 892 conn->imc_flags |= IMC_PARSE_FAILED; 893 return 0; 894 } 895 896 enc_level = lsquic_packet_in_enc_level(packet_in); 897 EV_LOG_CRYPTO_FRAME_IN(LSQUIC_LOG_CONN_ID, &stream_frame, enc_level); 898 899 if (conn->imc_streams[enc_level].mcs_read_off >= DF_OFF(&stream_frame) 900 && conn->imc_streams[enc_level].mcs_read_off < DF_END(&stream_frame)) 901 LSQ_DEBUG("Got CRYPTO frame for enc level #%u", enc_level); 902 else if (conn->imc_streams[enc_level].mcs_read_off < DF_OFF(&stream_frame)) 903 { 904 LSQ_DEBUG("Can't read CRYPTO frame on enc level #%u at offset %"PRIu64 905 " yet -- stash", enc_level, DF_OFF(&stream_frame)); 906 if (0 == imico_stash_stream_frame(conn, enc_level, packet_in, 907 &stream_frame)) 908 return parsed_len; 909 else 910 return 0; 911 } 912 else 913 { 914 LSQ_DEBUG("Got duplicate CRYPTO frame for enc level #%u -- ignore", 915 enc_level); 916 return parsed_len; 917 } 918 919 if (!(conn->imc_flags & IMC_ENC_SESS_INITED)) 920 { 921 if (0 != conn->imc_conn.cn_esf.i->esfi_init_server( 922 conn->imc_conn.cn_enc_session)) 923 return 0; 924 conn->imc_flags |= IMC_ENC_SESS_INITED; 925 } 926 927 if (!(conn->imc_streams[enc_level].mcs_flags & MCS_CREATED)) 928 { 929 LSQ_DEBUG("creating stream on level #%u", enc_level); 930 conn->imc_streams[enc_level].mcs_flags |= MCS_CREATED; 931 lsquic_mini_cry_sm_if.on_new_stream(conn->imc_conn.cn_enc_session, 932 (void *) &conn->imc_streams[enc_level]); 933 } 934 935 /* Assume that receiving a CRYPTO frame at a higher level means that we 936 * no longer want to read from a lower level. 937 */ 938 for (i = 0; i < enc_level; ++i) 939 conn->imc_streams[i].mcs_flags &= ~MCS_WANTREAD; 940 941 conn->imc_last_in.frame = &stream_frame; 942 conn->imc_last_in.enc_level = enc_level; 943 imico_dispatch_stream_events(conn); 944 conn->imc_last_in.frame = NULL; 945 946 if (DF_ROFF(&stream_frame) < DF_END(&stream_frame)) 947 { 948 /* This is an odd condition, but let's handle it just in case */ 949 LSQ_DEBUG("New CRYPTO frame on enc level #%u not fully read -- stash", 950 enc_level); 951 if (0 != imico_stash_stream_frame(conn, enc_level, packet_in, 952 &stream_frame)) 953 return 0; 954 } 955 956 957 return parsed_len; 958} 959 960 961static ptrdiff_t 962imico_count_zero_bytes (const unsigned char *p, size_t len) 963{ 964 const unsigned char *const end = p + len; 965 while (p < end && 0 == *p) 966 ++p; 967 return len - (end - p); 968} 969 970 971static unsigned 972imico_process_padding_frame (IMICO_PROC_FRAME_ARGS) 973{ 974 len = (size_t) imico_count_zero_bytes(p, len); 975 EV_LOG_PADDING_FRAME_IN(LSQUIC_LOG_CONN_ID, len); 976 return len; 977} 978 979 980static void 981imico_take_rtt_sample (struct ietf_mini_conn *conn, 982 const struct lsquic_packet_out *packet_out, 983 lsquic_time_t now, lsquic_time_t lack_delta) 984{ 985 assert(packet_out->po_sent); 986 lsquic_time_t measured_rtt = now - packet_out->po_sent; 987 if (lack_delta < measured_rtt) 988 { 989 lsquic_rtt_stats_update(&conn->imc_rtt_stats, measured_rtt, lack_delta); 990 LSQ_DEBUG("srtt: %"PRIu64" usec, var: %"PRIu64, 991 lsquic_rtt_stats_get_srtt(&conn->imc_rtt_stats), 992 lsquic_rtt_stats_get_rttvar(&conn->imc_rtt_stats)); 993 } 994} 995 996 997static unsigned 998imico_process_ack_frame (IMICO_PROC_FRAME_ARGS) 999{ 1000 int parsed_len; 1001 unsigned n; 1002 lsquic_packet_out_t *packet_out, *next; 1003 struct ack_info *acki; 1004 lsquic_packno_t packno; 1005 lsquic_time_t warn_time; 1006 packno_set_t acked; 1007 enum packnum_space pns; 1008 uint8_t ack_exp; 1009 1010 if (conn->imc_flags & IMC_HAVE_TP) 1011 ack_exp = conn->imc_ack_exp; 1012 else 1013 ack_exp = TP_DEF_ACK_DELAY_EXP; /* Odd: no transport params yet? */ 1014 acki = conn->imc_enpub->enp_mm.acki; 1015 parsed_len = conn->imc_conn.cn_pf->pf_parse_ack_frame(p, len, acki, 1016 ack_exp); 1017 if (parsed_len < 0) 1018 { 1019 conn->imc_flags |= IMC_PARSE_FAILED; 1020 return 0; 1021 } 1022 1023 pns = lsquic_hety2pns[ packet_in->pi_header_type ]; 1024 acked = 0; 1025 1026 for (n = 0; n < acki->n_ranges; ++n) 1027 { 1028 if (acki->ranges[n].high <= MAX_PACKETS) 1029 { 1030 acked |= (1ULL << acki->ranges[n].high) 1031 | ((1ULL << acki->ranges[n].high) - 1); 1032 acked &= ~((1ULL << acki->ranges[n].low) - 1); 1033 } 1034 else 1035 { 1036 packno = acki->ranges[n].high; 1037 goto err_never_sent; 1038 } 1039 } 1040 if (acked & ~conn->imc_sent_packnos) 1041 { 1042 packno = highest_bit_set(acked & ~conn->imc_sent_packnos); 1043 goto err_never_sent; 1044 } 1045 1046 EV_LOG_ACK_FRAME_IN(LSQUIC_LOG_CONN_ID, acki); 1047 for (packet_out = TAILQ_FIRST(&conn->imc_packets_out); packet_out; 1048 packet_out = next) 1049 { 1050 next = TAILQ_NEXT(packet_out, po_next); 1051 if ((1ULL << packet_out->po_packno) & acked) 1052 { 1053 assert(lsquic_packet_out_pns(packet_out) == pns); 1054 LSQ_DEBUG("Got ACK for packet %"PRIu64, packet_out->po_packno); 1055 if (packet_out->po_packno == largest_acked(acki)) 1056 imico_take_rtt_sample(conn, packet_out, 1057 packet_in->pi_received, acki->lack_delta); 1058 TAILQ_REMOVE(&conn->imc_packets_out, packet_out, po_next); 1059 imico_destroy_packet(conn, packet_out); 1060 } 1061 } 1062 1063 if (conn->imc_sent_packnos & ~conn->imc_acked_packnos[pns] & acked) 1064 { 1065 LSQ_DEBUG("Newly acked packets, reset handshake count"); 1066 conn->imc_hsk_count = 0; 1067 } 1068 1069 conn->imc_acked_packnos[pns] |= acked; 1070 1071 return parsed_len; 1072 1073 err_never_sent: 1074 warn_time = lsquic_time_now(); 1075 if (0 == conn->imc_enpub->enp_last_warning[WT_ACKPARSE_MINI] 1076 || conn->imc_enpub->enp_last_warning[WT_ACKPARSE_MINI] 1077 + WARNING_INTERVAL < warn_time) 1078 { 1079 conn->imc_enpub->enp_last_warning[WT_ACKPARSE_MINI] = warn_time; 1080 LSQ_WARN("packet %"PRIu64" (pns: %u) was never sent", packno, pns); 1081 } 1082 else 1083 LSQ_DEBUG("packet %"PRIu64" (pns: %u) was never sent", packno, pns); 1084 return 0; 1085} 1086 1087 1088static unsigned 1089imico_process_ping_frame (IMICO_PROC_FRAME_ARGS) 1090{ 1091 LSQ_DEBUG("got a PING frame, do nothing"); 1092 return 1; 1093} 1094 1095 1096static unsigned 1097imico_process_connection_close_frame (IMICO_PROC_FRAME_ARGS) 1098{ 1099 struct lsquic_packet_out *packet_out; 1100 uint64_t error_code; 1101 uint16_t reason_len; 1102 uint8_t reason_off; 1103 int parsed_len, app_error; 1104 1105 while ((packet_out = TAILQ_FIRST(&conn->imc_packets_out))) 1106 { 1107 TAILQ_REMOVE(&conn->imc_packets_out, packet_out, po_next); 1108 imico_destroy_packet(conn, packet_out); 1109 } 1110 conn->imc_flags |= IMC_CLOSE_RECVD; 1111 parsed_len = conn->imc_conn.cn_pf->pf_parse_connect_close_frame(p, len, 1112 &app_error, &error_code, &reason_len, &reason_off); 1113 if (parsed_len < 0) 1114 { 1115 conn->imc_flags |= IMC_PARSE_FAILED; 1116 return 0; 1117 } 1118 EV_LOG_CONNECTION_CLOSE_FRAME_IN(LSQUIC_LOG_CONN_ID, error_code, 1119 (int) reason_len, (const char *) p + reason_off); 1120 LSQ_INFO("Received CONNECTION_CLOSE frame (%s-level code: %"PRIu64"; " 1121 "reason: %.*s)", app_error ? "application" : "transport", 1122 error_code, (int) reason_len, (const char *) p + reason_off); 1123 return 0; /* This shuts down the connection */ 1124} 1125 1126 1127static unsigned 1128imico_process_invalid_frame (IMICO_PROC_FRAME_ARGS) 1129{ 1130 LSQ_DEBUG("invalid frame %u (%s)", p[0], 1131 frame_type_2_str[ conn->imc_conn.cn_pf->pf_parse_frame_type(p, len) ]); 1132 return 0; 1133} 1134 1135 1136static unsigned (*const imico_process_frames[N_QUIC_FRAMES]) 1137 (IMICO_PROC_FRAME_ARGS) = 1138{ 1139 [QUIC_FRAME_PADDING] = imico_process_padding_frame, 1140 [QUIC_FRAME_CRYPTO] = imico_process_crypto_frame, 1141 [QUIC_FRAME_ACK] = imico_process_ack_frame, 1142 [QUIC_FRAME_PING] = imico_process_ping_frame, 1143 [QUIC_FRAME_CONNECTION_CLOSE] = imico_process_connection_close_frame, 1144 /* Some of them are invalid, while others are unexpected. We treat 1145 * them the same: handshake cannot proceed. 1146 */ 1147 [QUIC_FRAME_RST_STREAM] = imico_process_invalid_frame, 1148 [QUIC_FRAME_MAX_DATA] = imico_process_invalid_frame, 1149 [QUIC_FRAME_MAX_STREAM_DATA] = imico_process_invalid_frame, 1150 [QUIC_FRAME_MAX_STREAMS] = imico_process_invalid_frame, 1151 [QUIC_FRAME_BLOCKED] = imico_process_invalid_frame, 1152 [QUIC_FRAME_STREAM_BLOCKED] = imico_process_invalid_frame, 1153 [QUIC_FRAME_STREAMS_BLOCKED] = imico_process_invalid_frame, 1154 [QUIC_FRAME_NEW_CONNECTION_ID] = imico_process_invalid_frame, 1155 [QUIC_FRAME_STOP_SENDING] = imico_process_invalid_frame, 1156 [QUIC_FRAME_PATH_CHALLENGE] = imico_process_invalid_frame, 1157 [QUIC_FRAME_PATH_RESPONSE] = imico_process_invalid_frame, 1158 /* STREAM frame can only come in the App PNS and we delay those packets: */ 1159 [QUIC_FRAME_STREAM] = imico_process_invalid_frame, 1160 [QUIC_FRAME_HANDSHAKE_DONE] = imico_process_invalid_frame, 1161 [QUIC_FRAME_ACK_FREQUENCY] = imico_process_invalid_frame, 1162 [QUIC_FRAME_TIMESTAMP] = imico_process_invalid_frame, 1163}; 1164 1165 1166static unsigned 1167imico_process_packet_frame (struct ietf_mini_conn *conn, 1168 struct lsquic_packet_in *packet_in, const unsigned char *p, size_t len) 1169{ 1170 enum enc_level enc_level; 1171 enum quic_frame_type type; 1172 1173 enc_level = lsquic_packet_in_enc_level(packet_in); 1174 type = conn->imc_conn.cn_pf->pf_parse_frame_type(p, len); 1175 if (lsquic_legal_frames_by_level[conn->imc_conn.cn_version][enc_level] 1176 & (1 << type)) 1177 { 1178 packet_in->pi_frame_types |= 1 << type; 1179 return imico_process_frames[type](conn, packet_in, p, len); 1180 } 1181 else 1182 { 1183 LSQ_DEBUG("invalid frame %u at encryption level %s", type, 1184 lsquic_enclev2str[enc_level]); 1185 return 0; 1186 } 1187} 1188 1189 1190static int 1191imico_parse_regular_packet (struct ietf_mini_conn *conn, 1192 struct lsquic_packet_in *packet_in) 1193{ 1194 const unsigned char *p, *pend; 1195 unsigned len; 1196 1197 p = packet_in->pi_data + packet_in->pi_header_sz; 1198 pend = packet_in->pi_data + packet_in->pi_data_sz; 1199 1200 while (p < pend) 1201 { 1202 len = imico_process_packet_frame(conn, packet_in, p, pend - p); 1203 if (len > 0) 1204 p += len; 1205 else 1206 return -1; 1207 } 1208 1209 return 0; 1210} 1211 1212 1213static unsigned 1214highest_bit_set (unsigned long long sz) 1215{ 1216#if __GNUC__ 1217 unsigned clz = __builtin_clzll(sz); 1218 return 63 - clz; 1219#else 1220 unsigned long y; 1221 unsigned n; 1222 n = 64; 1223 y = sz >> 32; if (y) { n -= 32; sz = y; } 1224 y = sz >> 16; if (y) { n -= 16; sz = y; } 1225 y = sz >> 8; if (y) { n -= 8; sz = y; } 1226 y = sz >> 4; if (y) { n -= 4; sz = y; } 1227 y = sz >> 2; if (y) { n -= 2; sz = y; } 1228 y = sz >> 1; if (y) return 63 - n + 2; 1229 return 63 - n + sz; 1230#endif 1231} 1232 1233 1234static void 1235ignore_init (struct ietf_mini_conn *conn) 1236{ 1237 struct lsquic_packet_out *packet_out, *next; 1238 unsigned count; 1239 1240 conn->imc_flags |= IMC_IGNORE_INIT; 1241 conn->imc_flags &= ~(IMC_QUEUED_ACK_INIT << PNS_INIT); 1242 1243 count = 0; 1244 for (packet_out = TAILQ_FIRST(&conn->imc_packets_out); packet_out; 1245 packet_out = next) 1246 { 1247 next = TAILQ_NEXT(packet_out, po_next); 1248 if (PNS_INIT == lsquic_packet_out_pns(packet_out)) 1249 { 1250 TAILQ_REMOVE(&conn->imc_packets_out, packet_out, po_next); 1251 imico_destroy_packet(conn, packet_out); 1252 ++count; 1253 } 1254 } 1255 1256 LSQ_DEBUG("henceforth, no Initial packets shall be sent or received; " 1257 "destroyed %u packet%.*s", count, count != 1, "s"); 1258} 1259 1260 1261static void 1262imico_maybe_delay_processing (struct ietf_mini_conn *conn, 1263 struct lsquic_packet_in *packet_in) 1264{ 1265 unsigned max_delayed; 1266 1267 if (conn->imc_flags & IMC_ADDR_VALIDATED) 1268 max_delayed = IMICO_MAX_DELAYED_PACKETS_VALIDATED; 1269 else 1270 max_delayed = IMICO_MAX_DELAYED_PACKETS_UNVALIDATED; 1271 1272 if (conn->imc_delayed_packets_count < max_delayed) 1273 { 1274 ++conn->imc_delayed_packets_count; 1275 lsquic_packet_in_upref(packet_in); 1276 TAILQ_INSERT_TAIL(&conn->imc_app_packets, packet_in, pi_next); 1277 LSQ_DEBUG("delay processing of packet (now delayed %hhu)", 1278 conn->imc_delayed_packets_count); 1279 } 1280 else 1281 LSQ_DEBUG("drop packet, already delayed %hhu packets", 1282 conn->imc_delayed_packets_count); 1283} 1284 1285 1286/* [draft-ietf-quic-transport-30] Section 8.1: 1287 " Additionally, a server MAY consider the client address validated if 1288 " the client uses a connection ID chosen by the server and the 1289 " connection ID contains at least 64 bits of entropy. 1290 * 1291 * We use RAND_bytes() to generate SCIDs, so it's all entropy. 1292 */ 1293static void 1294imico_maybe_validate_by_dcid (struct ietf_mini_conn *conn, 1295 const lsquic_cid_t *dcid) 1296{ 1297 unsigned i; 1298 1299 if (dcid->len >= 8) 1300 /* Generic code with unnecessary loop as future-proofing */ 1301 for (i = 0; i < conn->imc_conn.cn_n_cces; ++i) 1302 if ((conn->imc_conn.cn_cces_mask & (1 << i)) 1303 && (conn->imc_conn.cn_cces[i].cce_flags & CCE_SEQNO) 1304 && LSQUIC_CIDS_EQ(&conn->imc_conn.cn_cces[i].cce_cid, dcid)) 1305 { 1306 imico_peer_addr_validated(conn, "dcid/scid + entropy"); 1307 return; 1308 } 1309} 1310 1311 1312static int 1313imico_received_packet_is_dup (struct ietf_mini_conn *conn, 1314 enum packnum_space pns, lsquic_packno_t packno) 1315{ 1316 if (conn->imc_flags & IMC_TRECHIST) 1317 return lsquic_trechist_contains( 1318 conn->imc_recvd_packnos.trechist.hist_masks[pns], 1319 conn->imc_recvd_packnos.trechist.hist_elems 1320 + TRECHIST_MAX_RANGES * pns, packno); 1321 else 1322 return !!(conn->imc_recvd_packnos.bitmasks[pns] & (1ULL << packno)); 1323} 1324 1325 1326static int 1327imico_packno_is_largest (struct ietf_mini_conn *conn, 1328 enum packnum_space pns, lsquic_packno_t packno) 1329{ 1330 if (conn->imc_flags & IMC_TRECHIST) 1331 return 0 == conn->imc_recvd_packnos.trechist.hist_masks[pns] 1332 || packno > lsquic_trechist_max( 1333 conn->imc_recvd_packnos.trechist.hist_masks[pns], 1334 conn->imc_recvd_packnos.trechist.hist_elems 1335 + TRECHIST_MAX_RANGES * pns); 1336 else 1337 return 0 == conn->imc_recvd_packnos.bitmasks[pns] 1338 || packno > highest_bit_set(conn->imc_recvd_packnos.bitmasks[pns]); 1339} 1340 1341 1342static void 1343imico_record_recvd_packno (struct ietf_mini_conn *conn, 1344 enum packnum_space pns, lsquic_packno_t packno) 1345{ 1346 if (conn->imc_flags & IMC_TRECHIST) 1347 { 1348 if (0 != lsquic_trechist_insert( 1349 &conn->imc_recvd_packnos.trechist.hist_masks[pns], 1350 conn->imc_recvd_packnos.trechist.hist_elems 1351 + TRECHIST_MAX_RANGES * pns, packno)) 1352 { 1353 LSQ_INFO("too many ranges for trechist to hold or range too wide"); 1354 conn->imc_flags |= IMC_ERROR; 1355 } 1356 } 1357 else 1358 conn->imc_recvd_packnos.bitmasks[pns] |= 1ULL << packno; 1359} 1360 1361 1362static int 1363imico_switch_to_trechist (struct ietf_mini_conn *conn) 1364{ 1365 uint32_t masks[IMICO_N_PNS]; 1366 enum packnum_space pns; 1367 struct trechist_elem *elems; 1368 struct ietf_mini_rechist iter; 1369 1370 elems = malloc(TRECHIST_SIZE * N_PNS); 1371 if (!elems) 1372 { 1373 LSQ_WARN("cannot allocate trechist elems"); 1374 return -1; 1375 } 1376 1377 for (pns = 0; pns < IMICO_N_PNS; ++pns) 1378 if (conn->imc_recvd_packnos.bitmasks[pns]) 1379 { 1380 lsquic_imico_rechist_init(&iter, conn, pns); 1381 lsquic_trechist_copy_ranges(&masks[pns], 1382 elems + TRECHIST_MAX_RANGES * pns, &iter, 1383 lsquic_imico_rechist_first, 1384 lsquic_imico_rechist_next); 1385 } 1386 else 1387 masks[pns] = 0; 1388 1389 memcpy(conn->imc_recvd_packnos.trechist.hist_masks, masks, sizeof(masks)); 1390 conn->imc_recvd_packnos.trechist.hist_elems = elems; 1391 conn->imc_flags |= IMC_TRECHIST; 1392 LSQ_DEBUG("switched to trechist"); 1393 return 0; 1394} 1395 1396 1397/* Only a single packet is supported */ 1398static void 1399ietf_mini_conn_ci_packet_in (struct lsquic_conn *lconn, 1400 struct lsquic_packet_in *packet_in) 1401{ 1402 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 1403 enum dec_packin dec_packin; 1404 enum packnum_space pns; 1405 1406 /* Update "bytes in" count as early as possible. From 1407 * [draft-ietf-quic-transport-28] Section 8.1: 1408 " For the purposes of 1409 " avoiding amplification prior to address validation, servers MUST 1410 " count all of the payload bytes received in datagrams that are 1411 " uniquely attributed to a single connection. This includes datagrams 1412 " that contain packets that are successfully processed and datagrams 1413 " that contain packets that are all discarded. 1414 */ 1415 conn->imc_bytes_in += packet_in->pi_data_sz; 1416 1417 if (conn->imc_flags & IMC_ERROR) 1418 { 1419 LSQ_DEBUG("ignore incoming packet: connection is in error state"); 1420 return; 1421 } 1422 1423 if (!(conn->imc_flags & IMC_ADDR_VALIDATED)) 1424 imico_maybe_validate_by_dcid(conn, &packet_in->pi_dcid); 1425 1426 pns = lsquic_hety2pns[ packet_in->pi_header_type ]; 1427 if (pns == PNS_INIT && (conn->imc_flags & IMC_IGNORE_INIT)) 1428 { 1429 LSQ_DEBUG("ignore init packet"); /* Don't bother decrypting */ 1430 return; 1431 } 1432 1433 dec_packin = lconn->cn_esf_c->esf_decrypt_packet(lconn->cn_enc_session, 1434 conn->imc_enpub, &conn->imc_conn, packet_in); 1435 if (dec_packin != DECPI_OK) 1436 { 1437 LSQ_DEBUG("could not decrypt packet"); 1438 if (DECPI_NOT_YET == dec_packin) 1439 imico_maybe_delay_processing(conn, packet_in); 1440 return; 1441 } 1442 1443 EV_LOG_PACKET_IN(LSQUIC_LOG_CONN_ID, packet_in); 1444 1445 if (pns == PNS_APP) 1446 { 1447 imico_maybe_delay_processing(conn, packet_in); 1448 return; 1449 } 1450 else if (pns == PNS_HSK) 1451 imico_peer_addr_validated(conn, "handshake PNS"); 1452 1453 if (((conn->imc_flags >> IMCBIT_PNS_BIT_SHIFT) & 3) < pns) 1454 { 1455 conn->imc_flags &= ~(3 << IMCBIT_PNS_BIT_SHIFT); 1456 conn->imc_flags |= pns << IMCBIT_PNS_BIT_SHIFT; 1457 } 1458 1459 if (pns == PNS_HSK && !(conn->imc_flags & IMC_IGNORE_INIT)) 1460 ignore_init(conn); 1461 1462 if (packet_in->pi_packno > MAX_PACKETS 1463 && !(conn->imc_flags & IMC_TRECHIST)) 1464 { 1465 if (0 != imico_switch_to_trechist(conn)) 1466 return; 1467 } 1468 1469 if (imico_received_packet_is_dup(conn, pns, packet_in->pi_packno)) 1470 { 1471 LSQ_DEBUG("duplicate packet %"PRIu64, packet_in->pi_packno); 1472 return; 1473 } 1474 1475 /* Update receive history before processing the packet: if there is an 1476 * error, the connection is terminated and recording this packet number 1477 * is helpful when it is printed along with other diagnostics in dtor. 1478 */ 1479 if (imico_packno_is_largest(conn, pns, packet_in->pi_packno)) 1480 conn->imc_largest_recvd[pns] = packet_in->pi_received; 1481 imico_record_recvd_packno(conn, pns, packet_in->pi_packno); 1482 1483 if (0 != imico_parse_regular_packet(conn, packet_in)) 1484 { 1485 LSQ_DEBUG("connection is now in error state"); 1486 conn->imc_flags |= IMC_ERROR; 1487 return; 1488 } 1489 1490 if (!(conn->imc_flags & (IMC_QUEUED_ACK_INIT << pns))) 1491 LSQ_DEBUG("queued ACK in %s", lsquic_pns2str[pns]); 1492 conn->imc_flags |= IMC_QUEUED_ACK_INIT << pns; 1493 ++conn->imc_ecn_counts_in[pns][ lsquic_packet_in_ecn(packet_in) ]; 1494 conn->imc_incoming_ecn <<= 1; 1495 conn->imc_incoming_ecn |= lsquic_packet_in_ecn(packet_in) != ECN_NOT_ECT; 1496} 1497 1498 1499static void 1500ietf_mini_conn_ci_packet_sent (struct lsquic_conn *lconn, 1501 struct lsquic_packet_out *packet_out) 1502{ 1503 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 1504 conn->imc_sent_packnos |= 1ULL << packet_out->po_packno; 1505 conn->imc_ecn_packnos |= !!lsquic_packet_out_ecn(packet_out) 1506 << packet_out->po_packno; 1507#if 0 1508 if (packet_out->po_frame_types & (1 << QUIC_FRAME_ACK)) 1509 { 1510 assert(mc->mc_flags & MC_UNSENT_ACK); 1511 mc->mc_flags &= ~MC_UNSENT_ACK; 1512 } 1513#endif 1514 ++conn->imc_ecn_counts_out[ lsquic_packet_out_pns(packet_out) ] 1515 [ lsquic_packet_out_ecn(packet_out) ]; 1516 if (packet_out->po_header_type == HETY_HANDSHAKE) 1517 conn->imc_flags |= IMC_HSK_PACKET_SENT; 1518 LSQ_DEBUG("%s: packet %"PRIu64" sent", __func__, packet_out->po_packno); 1519} 1520 1521 1522static void 1523ietf_mini_conn_ci_packet_not_sent (struct lsquic_conn *lconn, 1524 struct lsquic_packet_out *packet_out) 1525{ 1526 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 1527 size_t packet_size; 1528 1529 packet_out->po_flags &= ~PO_SENT; 1530 packet_size = lsquic_packet_out_total_sz(lconn, packet_out); 1531 conn->imc_bytes_out -= packet_size; 1532 LSQ_DEBUG("%s: packet %"PRIu64" not sent", __func__, packet_out->po_packno); 1533} 1534 1535 1536static void 1537imico_return_enc_data (struct ietf_mini_conn *conn, 1538 struct lsquic_packet_out *packet_out) 1539{ 1540 conn->imc_enpub->enp_pmi->pmi_return(conn->imc_enpub->enp_pmi_ctx, 1541 conn->imc_path.np_peer_ctx, packet_out->po_enc_data, 1542 lsquic_packet_out_ipv6(packet_out)); 1543 packet_out->po_flags &= ~PO_ENCRYPTED; 1544 packet_out->po_enc_data = NULL; 1545} 1546 1547 1548static int 1549imico_repackage_packet (struct ietf_mini_conn *conn, 1550 struct lsquic_packet_out *packet_out) 1551{ 1552 const lsquic_packno_t oldno = packet_out->po_packno; 1553 const lsquic_packno_t packno = conn->imc_next_packno++; 1554 if (packno > MAX_PACKETS) 1555 return -1; 1556 1557 LSQ_DEBUG("Packet %"PRIu64" repackaged for resending as packet %"PRIu64, 1558 oldno, packno); 1559 EV_LOG_CONN_EVENT(LSQUIC_LOG_CONN_ID, "packet %"PRIu64" repackaged for " 1560 "resending as packet %"PRIu64, oldno, packno); 1561 packet_out->po_packno = packno; 1562 packet_out->po_flags &= ~PO_SENT; 1563 lsquic_packet_out_set_ecn(packet_out, imico_get_ecn(conn)); 1564 if (packet_out->po_flags & PO_ENCRYPTED) 1565 imico_return_enc_data(conn, packet_out); 1566 TAILQ_INSERT_TAIL(&conn->imc_packets_out, packet_out, po_next); 1567 return 0; 1568} 1569 1570 1571static int 1572imico_handle_losses_and_have_unsent (struct ietf_mini_conn *conn, 1573 lsquic_time_t now) 1574{ 1575 TAILQ_HEAD(, lsquic_packet_out) lost_packets = 1576 TAILQ_HEAD_INITIALIZER(lost_packets); 1577 const struct lsquic_conn *const lconn = &conn->imc_conn; 1578 lsquic_packet_out_t *packet_out, *next; 1579 lsquic_time_t retx_to = 0; 1580 unsigned n_to_send = 0; 1581 size_t packet_size; 1582 1583 for (packet_out = TAILQ_FIRST(&conn->imc_packets_out); packet_out; 1584 packet_out = next) 1585 { 1586 next = TAILQ_NEXT(packet_out, po_next); 1587 if (packet_out->po_flags & PO_SENT) 1588 { 1589 if (0 == retx_to) 1590 retx_to = imico_calc_retx_timeout(conn); 1591 if (packet_out->po_sent + retx_to < now) 1592 { 1593 LSQ_DEBUG("packet %"PRIu64" has been lost (rto: %"PRIu64")", 1594 packet_out->po_packno, retx_to); 1595 TAILQ_REMOVE(&conn->imc_packets_out, packet_out, po_next); 1596 TAILQ_INSERT_TAIL(&lost_packets, packet_out, po_next); 1597 } 1598 } 1599 else if (packet_size = lsquic_packet_out_total_sz(lconn, packet_out), 1600 imico_can_send(conn, packet_size)) 1601 ++n_to_send; 1602 else 1603 break; 1604 } 1605 1606 conn->imc_hsk_count += !TAILQ_EMPTY(&lost_packets); 1607 1608 while ((packet_out = TAILQ_FIRST(&lost_packets))) 1609 { 1610 TAILQ_REMOVE(&lost_packets, packet_out, po_next); 1611 if ((packet_out->po_frame_types & IQUIC_FRAME_RETX_MASK) 1612 && 0 == imico_repackage_packet(conn, packet_out)) 1613 { 1614 packet_size = lsquic_packet_out_total_sz(lconn, packet_out); 1615 if (imico_can_send(conn, packet_size)) 1616 ++n_to_send; 1617 } 1618 else 1619 imico_destroy_packet(conn, packet_out); 1620 } 1621 1622 return n_to_send > 0; 1623} 1624 1625 1626static int 1627imico_have_packets_to_send (struct ietf_mini_conn *conn, lsquic_time_t now) 1628{ 1629 return imico_handle_losses_and_have_unsent(conn, now); 1630} 1631 1632 1633void 1634lsquic_imico_rechist_init (struct ietf_mini_rechist *rechist, 1635 const struct ietf_mini_conn *conn, enum packnum_space pns) 1636{ 1637 assert(pns < IMICO_N_PNS); 1638 rechist->conn = conn; 1639 rechist->pns = pns; 1640 if (conn->imc_flags & IMC_TRECHIST) 1641 lsquic_trechist_iter(&rechist->u.trechist_iter, 1642 conn->imc_recvd_packnos.trechist.hist_masks[pns], 1643 conn->imc_recvd_packnos.trechist.hist_elems + TRECHIST_MAX_RANGES * pns); 1644 else 1645 { 1646 rechist->u.bitmask.cur_set = 0; 1647 rechist->u.bitmask.cur_idx = 0; 1648 } 1649} 1650 1651 1652static lsquic_time_t 1653imico_rechist_largest_recv (void *rechist_ctx) 1654{ 1655 struct ietf_mini_rechist *rechist = rechist_ctx; 1656 return rechist->conn->imc_largest_recvd[ rechist->pns ]; 1657} 1658 1659 1660static const struct lsquic_packno_range * 1661imico_bitmask_rechist_next (struct ietf_mini_rechist *rechist) 1662{ 1663 const struct ietf_mini_conn *conn = rechist->conn; 1664 packno_set_t packnos; 1665 int i; 1666 1667 packnos = rechist->u.bitmask.cur_set; 1668 if (0 == packnos) 1669 return NULL; 1670 1671 /* There may be a faster way to do this, but for now, we just want 1672 * correctness. 1673 */ 1674 for (i = rechist->u.bitmask.cur_idx; i >= 0; --i) 1675 if (packnos & (1ULL << i)) 1676 { 1677 rechist->u.bitmask.range.low = i; 1678 rechist->u.bitmask.range.high = i; 1679 break; 1680 } 1681 assert(i >= 0); /* We must have hit at least one bit */ 1682 --i; 1683 for ( ; i >= 0 && (packnos & (1ULL << i)); --i) 1684 rechist->u.bitmask.range.low = i; 1685 if (i >= 0) 1686 { 1687 rechist->u.bitmask.cur_set = packnos & ((1ULL << i) - 1); 1688 rechist->u.bitmask.cur_idx = i; 1689 } 1690 else 1691 rechist->u.bitmask.cur_set = 0; 1692 LSQ_DEBUG("%s: return [%"PRIu64", %"PRIu64"]", __func__, 1693 rechist->u.bitmask.range.low, rechist->u.bitmask.range.high); 1694 return &rechist->u.bitmask.range; 1695} 1696 1697 1698const struct lsquic_packno_range * 1699lsquic_imico_rechist_next (void *rechist_ctx) 1700{ 1701 struct ietf_mini_rechist *rechist = rechist_ctx; 1702 1703 if (rechist->conn->imc_flags & IMC_TRECHIST) 1704 return lsquic_trechist_next(&rechist->u.trechist_iter); 1705 else 1706 return imico_bitmask_rechist_next(rechist); 1707} 1708 1709 1710const struct lsquic_packno_range * 1711lsquic_imico_rechist_first (void *rechist_ctx) 1712{ 1713 struct ietf_mini_rechist *rechist = rechist_ctx; 1714 1715 if (rechist->conn->imc_flags & IMC_TRECHIST) 1716 return lsquic_trechist_first(&rechist->u.trechist_iter); 1717 else 1718 { 1719 rechist->u.bitmask.cur_set 1720 = rechist->conn->imc_recvd_packnos.bitmasks[ rechist->pns ]; 1721 rechist->u.bitmask.cur_idx 1722 = highest_bit_set(rechist->u.bitmask.cur_set); 1723 return lsquic_imico_rechist_next(rechist_ctx); 1724 } 1725} 1726 1727 1728static const enum header_type pns2hety[] = 1729{ 1730 [PNS_INIT] = HETY_INITIAL, 1731 [PNS_HSK] = HETY_HANDSHAKE, 1732 [PNS_APP] = HETY_NOT_SET, 1733}; 1734 1735 1736static int 1737imico_generate_ack (struct ietf_mini_conn *conn, enum packnum_space pns, 1738 lsquic_time_t now) 1739{ 1740 struct lsquic_packet_out *packet_out; 1741 enum header_type header_type; 1742 struct ietf_mini_rechist rechist; 1743 int not_used_has_missing, len; 1744 uint64_t ecn_counts_buf[4]; 1745 const uint64_t *ecn_counts; 1746 1747 header_type = pns2hety[pns]; 1748 1749 if (conn->imc_incoming_ecn) 1750 { 1751 ecn_counts_buf[0] = conn->imc_ecn_counts_in[pns][0]; 1752 ecn_counts_buf[1] = conn->imc_ecn_counts_in[pns][1]; 1753 ecn_counts_buf[2] = conn->imc_ecn_counts_in[pns][2]; 1754 ecn_counts_buf[3] = conn->imc_ecn_counts_in[pns][3]; 1755 ecn_counts = ecn_counts_buf; 1756 } 1757 else 1758 ecn_counts = NULL; 1759 1760 packet_out = imico_get_packet_out(conn, header_type, 0); 1761 if (!packet_out) 1762 return -1; 1763 1764 /* Generate ACK frame */ 1765 lsquic_imico_rechist_init(&rechist, conn, pns); 1766 len = conn->imc_conn.cn_pf->pf_gen_ack_frame( 1767 packet_out->po_data + packet_out->po_data_sz, 1768 lsquic_packet_out_avail(packet_out), lsquic_imico_rechist_first, 1769 lsquic_imico_rechist_next, imico_rechist_largest_recv, &rechist, 1770 now, ¬_used_has_missing, &packet_out->po_ack2ed, ecn_counts); 1771 if (len < 0) 1772 { 1773 LSQ_WARN("could not generate ACK frame"); 1774 return -1; 1775 } 1776 EV_LOG_GENERATED_ACK_FRAME(LSQUIC_LOG_CONN_ID, conn->imc_conn.cn_pf, 1777 packet_out->po_data + packet_out->po_data_sz, len); 1778 packet_out->po_frame_types |= 1 << QUIC_FRAME_ACK; 1779 packet_out->po_data_sz += len; 1780 packet_out->po_regen_sz += len; 1781 conn->imc_flags &= ~(IMC_QUEUED_ACK_INIT << pns); 1782 LSQ_DEBUG("wrote ACK frame of size %d in %s", len, lsquic_pns2str[pns]); 1783 return 0; 1784} 1785 1786 1787static int 1788imico_generate_acks (struct ietf_mini_conn *conn, lsquic_time_t now) 1789{ 1790 enum packnum_space pns; 1791 1792 for (pns = PNS_INIT; pns < IMICO_N_PNS; ++pns) 1793 if (conn->imc_flags & (IMC_QUEUED_ACK_INIT << pns) 1794 && !(pns == PNS_INIT && (conn->imc_flags & IMC_IGNORE_INIT))) 1795 if (0 != imico_generate_ack(conn, pns, now)) 1796 return -1; 1797 1798 return 0; 1799} 1800 1801 1802static void 1803imico_generate_conn_close (struct ietf_mini_conn *conn) 1804{ 1805 struct lsquic_packet_out *packet_out; 1806 enum header_type header_type; 1807 enum packnum_space pns, pns_max; 1808 unsigned error_code; 1809 const char *reason; 1810 size_t need; 1811 int sz, rlen, is_app; 1812 char reason_buf[0x20]; 1813 1814 if (conn->imc_flags & IMC_ABORT_ERROR) 1815 { 1816 is_app = !!(conn->imc_flags & IMC_ABORT_ISAPP); 1817 error_code = conn->imc_error_code; 1818 reason = NULL; 1819 rlen = 0; 1820 } 1821 else if (conn->imc_flags & IMC_TLS_ALERT) 1822 { 1823 is_app = 0; 1824 error_code = 0x100 + conn->imc_tls_alert; 1825 if (ALERT_NO_APPLICATION_PROTOCOL == conn->imc_tls_alert) 1826 reason = "no suitable application protocol"; 1827 else 1828 { 1829 snprintf(reason_buf, sizeof(reason_buf), "TLS alert %"PRIu8, 1830 conn->imc_tls_alert); 1831 reason = reason_buf; 1832 } 1833 rlen = strlen(reason); 1834 } 1835 else if (conn->imc_flags & IMC_BAD_TRANS_PARAMS) 1836 { 1837 is_app = 0; 1838 error_code = TEC_TRANSPORT_PARAMETER_ERROR; 1839 reason = "bad transport parameters"; 1840 rlen = 24; 1841 } 1842 else if (conn->imc_flags & IMC_HSK_FAILED) 1843 { 1844 is_app = 0; 1845 error_code = TEC_NO_ERROR; 1846 reason = "handshake failed"; 1847 rlen = 16; 1848 } 1849 else if (conn->imc_flags & IMC_PARSE_FAILED) 1850 { 1851 is_app = 0; 1852 error_code = TEC_FRAME_ENCODING_ERROR; 1853 reason = "cannot decode frame"; 1854 rlen = 19; 1855 } 1856 else 1857 { 1858 is_app = 0; 1859 error_code = TEC_INTERNAL_ERROR; 1860 reason = NULL; 1861 rlen = 0; 1862 } 1863 1864 1865/* [draft-ietf-quic-transport-28] Section 10.3.1: 1866 * 1867 " A client will always know whether the server has Handshake keys (see 1868 " Section 17.2.2.1), but it is possible that a server does not know 1869 " whether the client has Handshake keys. Under these circumstances, a 1870 " server SHOULD send a CONNECTION_CLOSE frame in both Handshake and 1871 " Initial packets to ensure that at least one of them is processable by 1872 " the client. 1873--- 8< --- 1874 " Sending a CONNECTION_CLOSE of type 0x1d in an Initial or Handshake 1875 " packet could expose application state or be used to alter application 1876 " state. A CONNECTION_CLOSE of type 0x1d MUST be replaced by a 1877 " CONNECTION_CLOSE of type 0x1c when sending the frame in Initial or 1878 " Handshake packets. Otherwise, information about the application 1879 " state might be revealed. Endpoints MUST clear the value of the 1880 " Reason Phrase field and SHOULD use the APPLICATION_ERROR code when 1881 " converting to a CONNECTION_CLOSE of type 0x1c. 1882 */ 1883 LSQ_DEBUG("sending CONNECTION_CLOSE, is_app: %d, error code: %u, " 1884 "reason: %.*s", is_app, error_code, rlen, reason); 1885 if (is_app && conn->imc_conn.cn_version > LSQVER_ID27) 1886 { 1887 LSQ_DEBUG("convert to 0x1C, replace code and reason"); 1888 is_app = 0; 1889 error_code = TEC_APPLICATION_ERROR; 1890 rlen = 0; 1891 } 1892 1893 pns = (conn->imc_flags >> IMCBIT_PNS_BIT_SHIFT) & 3; 1894 switch ((!!(conn->imc_flags & IMC_HSK_PACKET_SENT) << 1) 1895 | (pns == PNS_HSK) /* Handshake packet received */) 1896 { 1897 case (0 << 1) | 0: 1898 pns = PNS_INIT; 1899 pns_max = PNS_INIT; 1900 break; 1901 case (1 << 1) | 0: 1902 pns = PNS_INIT; 1903 pns_max = PNS_HSK; 1904 break; 1905 default: 1906 pns = PNS_HSK; 1907 pns_max = PNS_HSK; 1908 break; 1909 } 1910 1911 need = conn->imc_conn.cn_pf->pf_connect_close_frame_size(is_app, 1912 error_code, 0, rlen); 1913 LSQ_DEBUG("will generate %u CONNECTION_CLOSE frame%.*s", 1914 pns_max - pns + 1, pns_max > pns, "s"); 1915 do 1916 { 1917 header_type = pns2hety[pns]; 1918 packet_out = imico_get_packet_out(conn, header_type, need); 1919 if (!packet_out) 1920 return; 1921 sz = conn->imc_conn.cn_pf->pf_gen_connect_close_frame( 1922 packet_out->po_data + packet_out->po_data_sz, 1923 lsquic_packet_out_avail(packet_out), is_app, error_code, reason, 1924 rlen); 1925 if (sz >= 0) 1926 { 1927 packet_out->po_frame_types |= 1 << QUIC_FRAME_CONNECTION_CLOSE; 1928 packet_out->po_data_sz += sz; 1929 LSQ_DEBUG("generated CONNECTION_CLOSE frame"); 1930 } 1931 else 1932 LSQ_WARN("could not generate CONNECTION_CLOSE frame"); 1933 ++pns; 1934 } 1935 while (pns <= pns_max); 1936} 1937 1938 1939static int 1940imico_generate_handshake_done (struct ietf_mini_conn *conn) 1941{ 1942 struct lsquic_packet_out *packet_out; 1943 unsigned need; 1944 int sz; 1945 1946 need = conn->imc_conn.cn_pf->pf_handshake_done_frame_size(); 1947 packet_out = imico_get_packet_out(conn, HETY_NOT_SET, need); 1948 if (!packet_out) 1949 return -1; 1950 sz = conn->imc_conn.cn_pf->pf_gen_handshake_done_frame( 1951 packet_out->po_data + packet_out->po_data_sz, 1952 lsquic_packet_out_avail(packet_out)); 1953 if (sz < 0) 1954 { 1955 LSQ_WARN("could not generate HANDSHAKE_DONE frame"); 1956 return -1; 1957 } 1958 1959 packet_out->po_frame_types |= 1 << QUIC_FRAME_HANDSHAKE_DONE; 1960 packet_out->po_data_sz += sz; 1961 LSQ_DEBUG("generated HANDSHAKE_DONE frame"); 1962 conn->imc_flags |= IMC_HSK_DONE_SENT; 1963 1964 return 0; 1965} 1966 1967 1968static enum tick_st 1969ietf_mini_conn_ci_tick (struct lsquic_conn *lconn, lsquic_time_t now) 1970{ 1971 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 1972 enum tick_st tick; 1973 1974 if (conn->imc_created + conn->imc_enpub->enp_settings.es_handshake_to < now) 1975 { 1976 LSQ_DEBUG("connection expired: closing"); 1977 return TICK_CLOSE; 1978 } 1979 1980 1981 if (conn->imc_flags & (IMC_QUEUED_ACK_INIT|IMC_QUEUED_ACK_HSK)) 1982 { 1983 if (0 != imico_generate_acks(conn, now)) 1984 { 1985 conn->imc_flags |= IMC_ERROR; 1986 return TICK_CLOSE; 1987 } 1988 } 1989 1990 1991 tick = 0; 1992 1993 if (conn->imc_flags & IMC_ERROR) 1994 { 1995 close_on_error: 1996 if (!(conn->imc_flags & IMC_CLOSE_RECVD)) 1997 imico_generate_conn_close(conn); 1998 tick |= TICK_CLOSE; 1999 } 2000 else if (conn->imc_flags & IMC_HSK_OK) 2001 { 2002 if (lconn->cn_esf.i->esfi_in_init(lconn->cn_enc_session)) 2003 LSQ_DEBUG("still in init, defer HANDSHAKE_DONE"); 2004 else if (0 != imico_generate_handshake_done(conn)) 2005 goto close_on_error; 2006 tick |= TICK_PROMOTE; 2007 } 2008 2009 if (imico_have_packets_to_send(conn, now)) 2010 tick |= TICK_SEND; 2011 else 2012 tick |= TICK_QUIET; 2013 2014 LSQ_DEBUG("Return TICK %d", tick); 2015 return tick; 2016} 2017 2018 2019static void 2020ietf_mini_conn_ci_internal_error (struct lsquic_conn *lconn, 2021 const char *format, ...) 2022{ 2023 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 2024 LSQ_INFO("internal error reported"); 2025 conn->imc_flags |= IMC_ERROR; 2026} 2027 2028 2029static void 2030ietf_mini_conn_ci_abort_error (struct lsquic_conn *lconn, int is_app, 2031 unsigned error_code, const char *fmt, ...) 2032{ 2033 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 2034 va_list ap; 2035 const char *err_str, *percent; 2036 char err_buf[0x100]; 2037 2038 percent = strchr(fmt, '%'); 2039 if (percent) 2040 { 2041 va_start(ap, fmt); 2042 vsnprintf(err_buf, sizeof(err_buf), fmt, ap); 2043 va_end(ap); 2044 err_str = err_buf; 2045 } 2046 else 2047 err_str = fmt; 2048 LSQ_INFO("abort error: is_app: %d; error code: %u; error str: %s", 2049 is_app, error_code, err_str); 2050 conn->imc_flags |= IMC_ERROR|IMC_ABORT_ERROR; 2051 if (is_app) 2052 conn->imc_flags |= IMC_ABORT_ISAPP; 2053 conn->imc_error_code = error_code; 2054} 2055 2056 2057static struct network_path * 2058ietf_mini_conn_ci_get_path (struct lsquic_conn *lconn, 2059 const struct sockaddr *sa) 2060{ 2061 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 2062 2063 return &conn->imc_path; 2064} 2065 2066 2067static const lsquic_cid_t * 2068ietf_mini_conn_ci_get_log_cid (const struct lsquic_conn *lconn) 2069{ 2070 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 2071 2072 if (conn->imc_path.np_dcid.len) 2073 return &conn->imc_path.np_dcid; 2074 else 2075 return CN_SCID(lconn); 2076} 2077 2078 2079static unsigned char 2080ietf_mini_conn_ci_record_addrs (struct lsquic_conn *lconn, void *peer_ctx, 2081 const struct sockaddr *local_sa, const struct sockaddr *peer_sa) 2082{ 2083 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 2084 const struct sockaddr *orig_peer_sa; 2085 struct lsquic_packet_out *packet_out; 2086 size_t len; 2087 char path_str[4][INET6_ADDRSTRLEN + sizeof(":65535")]; 2088 2089 if (NP_IS_IPv6(&conn->imc_path) != (AF_INET6 == peer_sa->sa_family)) 2090 TAILQ_FOREACH(packet_out, &conn->imc_packets_out, po_next) 2091 if ((packet_out->po_flags & (PO_SENT|PO_ENCRYPTED)) == PO_ENCRYPTED) 2092 imico_return_enc_data(conn, packet_out); 2093 2094 orig_peer_sa = NP_PEER_SA(&conn->imc_path); 2095 if (orig_peer_sa->sa_family == 0) 2096 LSQ_DEBUG("connection to %s from %s", SA2STR(local_sa, path_str[0]), 2097 SA2STR(peer_sa, path_str[1])); 2098 else if (!(lsquic_sockaddr_eq(NP_PEER_SA(&conn->imc_path), peer_sa) 2099 && lsquic_sockaddr_eq(NP_LOCAL_SA(&conn->imc_path), local_sa))) 2100 { 2101 LSQ_DEBUG("path changed from (%s - %s) to (%s - %s)", 2102 SA2STR(NP_LOCAL_SA(&conn->imc_path), path_str[0]), 2103 SA2STR(NP_PEER_SA(&conn->imc_path), path_str[1]), 2104 SA2STR(local_sa, path_str[2]), 2105 SA2STR(peer_sa, path_str[3])); 2106 conn->imc_flags |= IMC_PATH_CHANGED; 2107 } 2108 2109 len = local_sa->sa_family == AF_INET ? sizeof(struct sockaddr_in) 2110 : sizeof(struct sockaddr_in6); 2111 2112 memcpy(conn->imc_path.np_peer_addr, peer_sa, len); 2113 memcpy(conn->imc_path.np_local_addr, local_sa, len); 2114 conn->imc_path.np_peer_ctx = peer_ctx; 2115 return 0; 2116} 2117 2118 2119void 2120ietf_mini_conn_ci_count_garbage (struct lsquic_conn *lconn, size_t garbage_sz) 2121{ 2122 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 2123 2124 conn->imc_bytes_in += garbage_sz; 2125 LSQ_DEBUG("count %zd bytes of garbage, new value: %u bytes", garbage_sz, 2126 conn->imc_bytes_in); 2127} 2128 2129 2130static const struct conn_iface mini_conn_ietf_iface = { 2131 .ci_abort_error = ietf_mini_conn_ci_abort_error, 2132 .ci_client_call_on_new = ietf_mini_conn_ci_client_call_on_new, 2133 .ci_count_garbage = ietf_mini_conn_ci_count_garbage, 2134 .ci_destroy = ietf_mini_conn_ci_destroy, 2135 .ci_get_engine = ietf_mini_conn_ci_get_engine, 2136 .ci_get_log_cid = ietf_mini_conn_ci_get_log_cid, 2137 .ci_get_path = ietf_mini_conn_ci_get_path, 2138 .ci_hsk_done = ietf_mini_conn_ci_hsk_done, 2139 .ci_internal_error = ietf_mini_conn_ci_internal_error, 2140 .ci_is_tickable = ietf_mini_conn_ci_is_tickable, 2141 .ci_next_packet_to_send = ietf_mini_conn_ci_next_packet_to_send, 2142 .ci_next_tick_time = ietf_mini_conn_ci_next_tick_time, 2143 .ci_packet_in = ietf_mini_conn_ci_packet_in, 2144 .ci_packet_not_sent = ietf_mini_conn_ci_packet_not_sent, 2145 .ci_packet_sent = ietf_mini_conn_ci_packet_sent, 2146 .ci_record_addrs = ietf_mini_conn_ci_record_addrs, 2147 .ci_tick = ietf_mini_conn_ci_tick, 2148 .ci_tls_alert = ietf_mini_conn_ci_tls_alert, 2149}; 2150