1/* Copyright (c) 2017 - 2022 LiteSpeed Technologies Inc. See LICENSE. */ 2/* 3 * lsquic_mini_conn_ietf.c -- Mini connection used by the IETF QUIC 4 */ 5 6#include <assert.h> 7#include <errno.h> 8#include <inttypes.h> 9#include <limits.h> 10#include <stddef.h> 11#include <stdint.h> 12#include <string.h> 13#include <sys/queue.h> 14#include <stdlib.h> 15 16#include "lsquic.h" 17#include "lsquic_int_types.h" 18#include "lsquic_sizes.h" 19#include "lsquic_hash.h" 20#include "lsquic_conn.h" 21#include "lsquic_mm.h" 22#include "lsquic_malo.h" 23#include "lsquic_engine_public.h" 24#include "lsquic_packet_common.h" 25#include "lsquic_packet_in.h" 26#include "lsquic_packet_out.h" 27#include "lsquic_parse.h" 28#include "lsquic_rtt.h" 29#include "lsquic_util.h" 30#include "lsquic_enc_sess.h" 31#include "lsquic_trechist.h" 32#include "lsquic_mini_conn_ietf.h" 33#include "lsquic_ev_log.h" 34#include "lsquic_trans_params.h" 35#include "lsquic_ietf.h" 36#include "lsquic_packet_ietf.h" 37#include "lsquic_attq.h" 38#include "lsquic_alarmset.h" 39#include "lsquic_crand.h" 40 41#define LSQUIC_LOGGER_MODULE LSQLM_MINI_CONN 42#define LSQUIC_LOG_CONN_ID lsquic_conn_log_cid(&conn->imc_conn) 43#include "lsquic_logger.h" 44 45#define MIN(a, b) ((a) < (b) ? (a) : (b)) 46#define MAX(a, b) ((a) > (b) ? (a) : (b)) 47 48static const struct conn_iface mini_conn_ietf_iface; 49 50static unsigned highest_bit_set (unsigned long long); 51 52static int 53imico_can_send (const struct ietf_mini_conn *, size_t); 54 55static void 56ietf_mini_conn_ci_abort_error (struct lsquic_conn *lconn, int is_app, 57 unsigned error_code, const char *fmt, ...); 58 59static const enum header_type el2hety[] = 60{ 61 [ENC_LEV_INIT] = HETY_HANDSHAKE, 62 [ENC_LEV_CLEAR] = HETY_INITIAL, 63 [ENC_LEV_FORW] = HETY_NOT_SET, 64 [ENC_LEV_EARLY] = 0, /* Invalid */ 65}; 66 67 68static void 69imico_destroy_packet (struct ietf_mini_conn *conn, 70 struct lsquic_packet_out *packet_out) 71{ 72 lsquic_packet_out_destroy(packet_out, conn->imc_enpub, 73 conn->imc_path.np_peer_ctx); 74} 75 76 77int 78lsquic_mini_conn_ietf_ecn_ok (const struct ietf_mini_conn *conn) 79{ 80 packno_set_t acked; 81 82 /* First flight has only Initial and Handshake packets */ 83 acked = conn->imc_acked_packnos[PNS_INIT] 84 | conn->imc_acked_packnos[PNS_HSK] 85 ; 86 return 0 != (conn->imc_ecn_packnos & acked); 87} 88 89 90#define imico_ecn_ok lsquic_mini_conn_ietf_ecn_ok 91 92 93static enum ecn 94imico_get_ecn (struct ietf_mini_conn *conn) 95{ 96 if (!conn->imc_enpub->enp_settings.es_ecn) 97 return ECN_NOT_ECT; 98 else if (!conn->imc_sent_packnos /* We set ECT0 in first flight */ 99 || imico_ecn_ok(conn)) 100 return ECN_ECT0; 101 else 102 return ECN_NOT_ECT; 103} 104 105 106static struct lsquic_packet_out * 107imico_get_packet_out (struct ietf_mini_conn *conn, 108 enum header_type header_type, size_t need) 109{ 110 struct lsquic_packet_out *packet_out; 111 enum ecn ecn; 112 113 if (need) 114 TAILQ_FOREACH(packet_out, &conn->imc_packets_out, po_next) 115 if (!(packet_out->po_flags & PO_SENT) 116 && packet_out->po_header_type == header_type 117 && lsquic_packet_out_avail(packet_out) >= need) 118 return packet_out; 119 120 if (conn->imc_next_packno >= MAX_PACKETS) 121 { 122 LSQ_DEBUG("ran out of outgoing packet numbers, won't allocate packet"); 123 return NULL; 124 } 125 126 packet_out = lsquic_packet_out_new(&conn->imc_enpub->enp_mm, NULL, 1, 127 &conn->imc_conn, IQUIC_PACKNO_LEN_1, NULL, NULL, &conn->imc_path, 128 header_type); 129 if (!packet_out) 130 { 131 LSQ_WARN("could not allocate packet: %s", strerror(errno)); 132 return NULL; 133 } 134 135 packet_out->po_header_type = header_type; 136 packet_out->po_packno = conn->imc_next_packno++; 137 packet_out->po_flags |= PO_MINI; 138 lsquic_packet_out_set_pns(packet_out, lsquic_hety2pns[header_type]); 139 ecn = imico_get_ecn(conn); 140 packet_out->po_lflags |= ecn << POECN_SHIFT; 141 TAILQ_INSERT_TAIL(&conn->imc_packets_out, packet_out, po_next); 142 packet_out->po_loss_chain = packet_out; 143 return packet_out; 144} 145 146 147static struct ietf_mini_conn * 148cryst_get_conn (const struct mini_crypto_stream *cryst) 149{ 150 return (void *) 151 ((unsigned char *) (cryst - cryst->mcs_enc_level) 152 - offsetof(struct ietf_mini_conn, imc_streams)); 153} 154 155 156struct msg_ctx 157{ 158 const unsigned char *buf; 159 const unsigned char *const end; 160}; 161 162 163static size_t 164read_from_msg_ctx (void *ctx, void *buf, size_t len, int *fin) 165{ 166 struct msg_ctx *msg_ctx = ctx; 167 if (len > (uintptr_t) (msg_ctx->end - msg_ctx->buf)) 168 len = msg_ctx->end - msg_ctx->buf; 169 memcpy(buf, msg_ctx->buf, len); 170 msg_ctx->buf += len; 171 return len; 172} 173 174 175static int 176imico_chlo_has_been_consumed (const struct ietf_mini_conn *conn) 177{ 178 return conn->imc_streams[ENC_LEV_CLEAR].mcs_read_off > 3 179 && conn->imc_streams[ENC_LEV_CLEAR].mcs_read_off >= conn->imc_ch_len; 180} 181 182 183static int 184imico_maybe_process_params (struct ietf_mini_conn *conn) 185{ 186 const struct transport_params *params; 187 188 if (imico_chlo_has_been_consumed(conn) 189 && (conn->imc_flags & (IMC_ENC_SESS_INITED|IMC_HAVE_TP)) 190 == IMC_ENC_SESS_INITED) 191 { 192 params = conn->imc_conn.cn_esf.i->esfi_get_peer_transport_params( 193 conn->imc_conn.cn_enc_session); 194 if (params) 195 { 196 conn->imc_flags |= IMC_HAVE_TP; 197 conn->imc_ack_exp = params->tp_ack_delay_exponent; 198 if (params->tp_set & (1 << TPI_MAX_UDP_PAYLOAD_SIZE)) 199 { 200 if (params->tp_numerics[TPI_MAX_UDP_PAYLOAD_SIZE] 201 < conn->imc_path.np_pack_size) 202 conn->imc_path.np_pack_size = 203 params->tp_numerics[TPI_MAX_UDP_PAYLOAD_SIZE]; 204 } 205 LSQ_DEBUG("read transport params, packet size is set to %hu bytes", 206 conn->imc_path.np_pack_size); 207 } 208 else 209 { 210 conn->imc_flags |= IMC_BAD_TRANS_PARAMS; 211 return -1; 212 } 213 } 214 215 return 0; 216} 217 218 219static ssize_t 220imico_stream_write (void *stream, const void *bufp, size_t bufsz) 221{ 222 struct mini_crypto_stream *const cryst = stream; 223 struct ietf_mini_conn *const conn = cryst_get_conn(cryst); 224 struct lsquic_conn *const lconn = &conn->imc_conn; 225 const struct parse_funcs *const pf = lconn->cn_pf; 226 struct msg_ctx msg_ctx = { bufp, (unsigned char *) bufp + bufsz, }; 227 struct lsquic_packet_out *packet_out; 228 size_t header_sz, need; 229 const unsigned char *p; 230 int len; 231 232 if (0 != imico_maybe_process_params(conn)) 233 return -1; 234 235 if (PNS_INIT == lsquic_enclev2pns[ cryst->mcs_enc_level ] 236 && (conn->imc_flags & IMC_IGNORE_INIT)) 237 { 238 LSQ_WARN("trying to write at the ignored Initial level"); 239 return bufsz; 240 } 241 242 while (msg_ctx.buf < msg_ctx.end) 243 { 244 header_sz = lconn->cn_pf->pf_calc_crypto_frame_header_sz( 245 cryst->mcs_write_off, msg_ctx.end - msg_ctx.buf); 246 need = header_sz + 1; 247 packet_out = imico_get_packet_out(conn, 248 el2hety[ cryst->mcs_enc_level ], need); 249 if (!packet_out) 250 return -1; 251 252 p = msg_ctx.buf; 253 len = pf->pf_gen_crypto_frame(packet_out->po_data + packet_out->po_data_sz, 254 lsquic_packet_out_avail(packet_out), 0, cryst->mcs_write_off, 0, 255 msg_ctx.end - msg_ctx.buf, read_from_msg_ctx, &msg_ctx); 256 if (len < 0) 257 return len; 258 EV_LOG_GENERATED_CRYPTO_FRAME(LSQUIC_LOG_CONN_ID, pf, 259 packet_out->po_data + packet_out->po_data_sz, len); 260 packet_out->po_data_sz += len; 261 packet_out->po_frame_types |= 1 << QUIC_FRAME_CRYPTO; 262 packet_out->po_flags |= PO_HELLO; 263 cryst->mcs_write_off += msg_ctx.buf - p; 264 } 265 266 assert(msg_ctx.buf == msg_ctx.end); 267 return bufsz; 268} 269 270 271static int 272imico_stream_flush (void *stream) 273{ 274 return 0; 275} 276 277 278static struct stream_frame * 279imico_find_stream_frame (const struct ietf_mini_conn *conn, 280 enum enc_level enc_level, unsigned read_off) 281{ 282 struct stream_frame *frame; 283 284 if (conn->imc_last_in.frame && enc_level == conn->imc_last_in.enc_level 285 && read_off == DF_ROFF(conn->imc_last_in.frame)) 286 return conn->imc_last_in.frame; 287 288 TAILQ_FOREACH(frame, &conn->imc_crypto_frames, next_frame) 289 if (enc_level == frame->stream_id && read_off == DF_ROFF(frame)) 290 return frame; 291 292 return NULL; 293} 294 295 296static void 297imico_read_chlo_size (struct ietf_mini_conn *conn, const unsigned char *buf, 298 size_t sz) 299{ 300 const unsigned char *const end = buf + sz; 301 302 assert(conn->imc_streams[ENC_LEV_CLEAR].mcs_read_off < 4); 303 switch (conn->imc_streams[ENC_LEV_CLEAR].mcs_read_off) 304 { 305 case 0: 306 if (buf == end) 307 return; 308 if (*buf != 1) 309 { 310 LSQ_DEBUG("Does not begin with ClientHello"); 311 conn->imc_flags |= IMC_ERROR; 312 return; 313 } 314 ++buf; 315 /* fall-through */ 316 case 1: 317 if (buf == end) 318 return; 319 if (*buf != 0) 320 { 321 LSQ_DEBUG("ClientHello larger than 16K"); 322 conn->imc_flags |= IMC_ERROR; 323 return; 324 } 325 ++buf; 326 /* fall-through */ 327 case 2: 328 if (buf == end) 329 return; 330 conn->imc_ch_len = *buf << 8; 331 ++buf; 332 /* fall-through */ 333 default: 334 if (buf == end) 335 return; 336 conn->imc_ch_len |= *buf; 337 } 338} 339 340 341static ssize_t 342imico_stream_readf (void *stream, 343 size_t (*readf)(void *, const unsigned char *, size_t, int), void *ctx) 344{ 345 struct mini_crypto_stream *const cryst = stream; 346 struct ietf_mini_conn *const conn = cryst_get_conn(cryst); 347 struct stream_frame *frame; 348 const unsigned char *buf; 349 size_t nread, total_read; 350 unsigned avail; 351 352 total_read = 0; 353 while ((frame = imico_find_stream_frame(conn, cryst->mcs_enc_level, 354 cryst->mcs_read_off))) 355 { 356 avail = DF_SIZE(frame) - frame->data_frame.df_read_off; 357 buf = frame->data_frame.df_data + frame->data_frame.df_read_off; 358 nread = readf(ctx, buf, avail, DF_FIN(frame)); 359 if (cryst->mcs_enc_level == ENC_LEV_CLEAR && cryst->mcs_read_off < 4) 360 imico_read_chlo_size(conn, buf, nread); 361 total_read += nread; 362 cryst->mcs_read_off += nread; 363 frame->data_frame.df_read_off += nread; 364 LSQ_DEBUG("read %zu bytes at offset %"PRIu64" on enc level %u", nread, 365 DF_ROFF(frame), cryst->mcs_enc_level); 366 if (DF_END(frame) == DF_ROFF(frame)) 367 { 368 if (frame == conn->imc_last_in.frame) 369 conn->imc_last_in.frame = NULL; 370 else 371 { 372 TAILQ_REMOVE(&conn->imc_crypto_frames, frame, next_frame); 373 --conn->imc_n_crypto_frames; 374 conn->imc_crypto_frames_sz -= DF_SIZE(frame); 375 lsquic_packet_in_put(&conn->imc_enpub->enp_mm, 376 frame->packet_in); 377 lsquic_malo_put(frame); 378 } 379 } 380 if (nread < avail) 381 break; 382 } 383 384 if (total_read > 0) 385 return total_read; 386 else 387 { 388 /* CRYPTO streams never end, so zero bytes read always means 389 * EWOULDBLOCK 390 */ 391 errno = EWOULDBLOCK; 392 return -1; 393 } 394} 395 396 397static int 398imico_stream_wantX (struct mini_crypto_stream *cryst, int bit, int is_want) 399{ 400 int old; 401 402 old = (cryst->mcs_flags & (1 << bit)) > 0; 403 cryst->mcs_flags &= ~(1 << bit); 404 cryst->mcs_flags |= !!is_want << bit; 405 return old; 406} 407 408 409static int 410imico_stream_wantwrite (void *stream, int is_want) 411{ 412 return imico_stream_wantX(stream, MCSBIT_WANTWRITE, is_want); 413} 414 415 416static int 417imico_stream_wantread (void *stream, int is_want) 418{ 419 return imico_stream_wantX(stream, MCSBIT_WANTREAD, is_want); 420} 421 422 423static enum enc_level 424imico_stream_enc_level (void *stream) 425{ 426 struct mini_crypto_stream *const cryst = stream; 427 return cryst->mcs_enc_level; 428} 429 430 431static const struct crypto_stream_if crypto_stream_if = 432{ 433 .csi_write = imico_stream_write, 434 .csi_flush = imico_stream_flush, 435 .csi_readf = imico_stream_readf, 436 .csi_wantwrite = imico_stream_wantwrite, 437 .csi_wantread = imico_stream_wantread, 438 .csi_enc_level = imico_stream_enc_level, 439}; 440 441 442static int 443is_first_packet_ok (const struct lsquic_packet_in *packet_in, 444 size_t udp_payload_size) 445{ 446 if (udp_payload_size < IQUIC_MIN_INIT_PACKET_SZ) 447 { 448 /* [draft-ietf-quic-transport-24] Section 14 */ 449 LSQ_LOG1(LSQ_LOG_DEBUG, "incoming UDP payload too small: %zu bytes", 450 udp_payload_size); 451 return 0; 452 } 453 /* TODO: Move decryption of the first packet into this function? */ 454 return 1; /* TODO */ 455} 456 457 458static void 459imico_peer_addr_validated (struct ietf_mini_conn *conn, const char *how) 460{ 461 if (!(conn->imc_flags & IMC_ADDR_VALIDATED)) 462 { 463 conn->imc_flags |= IMC_ADDR_VALIDATED; 464 LSQ_DEBUG("peer address validated (%s)", how); 465 } 466} 467 468 469struct lsquic_conn * 470lsquic_mini_conn_ietf_new (struct lsquic_engine_public *enpub, 471 const struct lsquic_packet_in *packet_in, 472 enum lsquic_version version, int is_ipv4, const lsquic_cid_t *odcid, 473 size_t udp_payload_size) 474{ 475 struct ietf_mini_conn *conn; 476 enc_session_t *enc_sess; 477 enum enc_level i; 478 const struct enc_session_funcs_iquic *esfi; 479 unsigned char rand_nybble; 480 481 if (!is_first_packet_ok(packet_in, udp_payload_size)) 482 return NULL; 483 484 conn = lsquic_malo_get(enpub->enp_mm.malo.mini_conn_ietf); 485 if (!conn) 486 { 487 LSQ_LOG1(LSQ_LOG_WARN, "cannot allocate mini connection: %s", 488 strerror(errno)); 489 return NULL; 490 } 491 memset(conn, 0, sizeof(*conn)); 492 conn->imc_conn.cn_if = &mini_conn_ietf_iface; 493 conn->imc_conn.cn_cces = conn->imc_cces; 494 conn->imc_conn.cn_n_cces = sizeof(conn->imc_cces) 495 / sizeof(conn->imc_cces[0]); 496 conn->imc_cces[0].cce_cid = packet_in->pi_dcid; 497 conn->imc_cces[0].cce_flags = CCE_USED; 498 conn->imc_conn.cn_cces_mask = 1; 499 lsquic_scid_from_packet_in(packet_in, &conn->imc_path.np_dcid); 500 LSQ_DEBUGC("recv SCID from client %"CID_FMT, CID_BITS(&conn->imc_cces[0].cce_cid)); 501 LSQ_DEBUGC("recv DCID from client %"CID_FMT, CID_BITS(&conn->imc_path.np_dcid)); 502 503 /* Generate new SCID. Since is not the original SCID, it is given 504 * a sequence number (0) and therefore can be retired by the client. 505 */ 506 enpub->enp_generate_scid(enpub->enp_gen_scid_ctx, &conn->imc_conn, 507 &conn->imc_conn.cn_cces[1].cce_cid, enpub->enp_settings.es_scid_len); 508 509 LSQ_DEBUGC("generated SCID %"CID_FMT" at index %u, switching to it", 510 CID_BITS(&conn->imc_conn.cn_cces[1].cce_cid), 1); 511 conn->imc_conn.cn_cces[1].cce_flags = CCE_SEQNO | CCE_USED; 512 conn->imc_conn.cn_cces_mask |= 1u << 1; 513 conn->imc_conn.cn_cur_cce_idx = 1; 514 515 conn->imc_conn.cn_flags = LSCONN_MINI|LSCONN_IETF|LSCONN_SERVER; 516 conn->imc_conn.cn_version = version; 517 518 for (i = 0; i < N_ENC_LEVS; ++i) 519 { 520 conn->imc_streams[i].mcs_enc_level = i; 521 conn->imc_stream_ps[i] = &conn->imc_streams[i]; 522 } 523 524 rand_nybble = lsquic_crand_get_nybble(enpub->enp_crand); 525 if (rand_nybble == 0) 526 { 527 /* Use trechist for about one out of every sixteen connections so 528 * that the code does not grow stale. 529 */ 530 LSQ_DEBUG("using trechist"); 531 conn->imc_flags |= IMC_TRECHIST; 532 conn->imc_recvd_packnos.trechist.hist_elems 533 = malloc(TRECHIST_SIZE * IMICO_N_PNS); 534 if (!conn->imc_recvd_packnos.trechist.hist_elems) 535 { 536 LSQ_WARN("cannot allocate trechist elems"); 537 return NULL; 538 } 539 } 540 541 esfi = select_esf_iquic_by_ver(version); 542 enc_sess = esfi->esfi_create_server(enpub, &conn->imc_conn, 543 &packet_in->pi_dcid, conn->imc_stream_ps, &crypto_stream_if, 544 &conn->imc_cces[0].cce_cid, &conn->imc_path.np_dcid); 545 if (!enc_sess) 546 { 547 lsquic_malo_put(conn); 548 return NULL; 549 } 550 551 conn->imc_enpub = enpub; 552 conn->imc_created = packet_in->pi_received; 553 if (enpub->enp_settings.es_base_plpmtu) 554 conn->imc_path.np_pack_size = enpub->enp_settings.es_base_plpmtu; 555 else if (is_ipv4) 556 conn->imc_path.np_pack_size = IQUIC_MAX_IPv4_PACKET_SZ; 557 else 558 conn->imc_path.np_pack_size = IQUIC_MAX_IPv6_PACKET_SZ; 559 conn->imc_conn.cn_pf = select_pf_by_ver(version); 560 conn->imc_conn.cn_esf.i = esfi; 561 conn->imc_conn.cn_enc_session = enc_sess; 562 conn->imc_conn.cn_esf_c = select_esf_common_by_ver(version); 563 TAILQ_INIT(&conn->imc_packets_out); 564 TAILQ_INIT(&conn->imc_app_packets); 565 TAILQ_INIT(&conn->imc_crypto_frames); 566 if (odcid) 567 imico_peer_addr_validated(conn, "odcid"); 568#if LSQUIC_DEVEL 569 { 570 const char *const s = getenv("LSQUIC_LOSE_0RTT"); 571 if (s && atoi(s)) 572 { 573 LSQ_DEBUG("will lose 0-RTT packets (via env variable)"); 574 conn->imc_delayed_packets_count = UCHAR_MAX; 575 } 576 } 577#endif 578 579 LSQ_DEBUG("created mini connection object %p; max packet size=%hu", 580 conn, conn->imc_path.np_pack_size); 581 return &conn->imc_conn; 582} 583 584 585static void 586ietf_mini_conn_ci_client_call_on_new (struct lsquic_conn *lconn) 587{ 588 assert(0); 589} 590 591 592static void 593ietf_mini_conn_ci_destroy (struct lsquic_conn *lconn) 594{ 595 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 596 struct lsquic_packet_out *packet_out; 597 struct lsquic_packet_in *packet_in; 598 struct stream_frame *frame; 599 600 while ((packet_out = TAILQ_FIRST(&conn->imc_packets_out))) 601 { 602 TAILQ_REMOVE(&conn->imc_packets_out, packet_out, po_next); 603 imico_destroy_packet(conn, packet_out); 604 } 605 while ((packet_in = TAILQ_FIRST(&conn->imc_app_packets))) 606 { 607 TAILQ_REMOVE(&conn->imc_app_packets, packet_in, pi_next); 608 lsquic_packet_in_put(&conn->imc_enpub->enp_mm, packet_in); 609 } 610 while ((frame = TAILQ_FIRST(&conn->imc_crypto_frames))) 611 { 612 TAILQ_REMOVE(&conn->imc_crypto_frames, frame, next_frame); 613 lsquic_packet_in_put(&conn->imc_enpub->enp_mm, frame->packet_in); 614 lsquic_malo_put(frame); 615 } 616 if (lconn->cn_enc_session) 617 lconn->cn_esf.i->esfi_destroy(lconn->cn_enc_session); 618 LSQ_DEBUG("ietf_mini_conn_ci_destroyed"); 619 if (conn->imc_flags & IMC_TRECHIST) 620 free(conn->imc_recvd_packnos.trechist.hist_elems); 621 lsquic_malo_put(conn); 622} 623 624 625static struct lsquic_engine * 626ietf_mini_conn_ci_get_engine (struct lsquic_conn *lconn) 627{ 628 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 629 return conn->imc_enpub->enp_engine; 630} 631 632 633static void 634ietf_mini_conn_ci_hsk_done (struct lsquic_conn *lconn, 635 enum lsquic_hsk_status status) 636{ 637 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 638 639 switch (status) 640 { 641 case LSQ_HSK_OK: 642 case LSQ_HSK_RESUMED_OK: 643 conn->imc_flags |= IMC_HSK_OK; 644 conn->imc_conn.cn_flags |= LSCONN_HANDSHAKE_DONE; 645 LSQ_DEBUG("handshake OK"); 646 break; 647 default: 648 assert(0); 649 /* fall-through */ 650 case LSQ_HSK_FAIL: 651 conn->imc_flags |= IMC_HSK_FAILED|IMC_ERROR; 652 LSQ_INFO("handshake failed"); 653 break; 654 } 655} 656 657 658static void 659ietf_mini_conn_ci_tls_alert (struct lsquic_conn *lconn, uint8_t alert) 660{ 661 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 662 LSQ_DEBUG("got TLS alert %"PRIu8, alert); 663 conn->imc_flags |= IMC_ERROR|IMC_TLS_ALERT; 664 conn->imc_tls_alert = alert; 665} 666 667 668/* A mini connection is only tickable if it has unsent packets. This can 669 * occur when packet sending is delayed. 670 * 671 * Otherwise, a mini connection is not tickable: Either there are incoming 672 * packets, in which case, the connection is going to be ticked, or there is 673 * an alarm pending, in which case it will be handled via the attq. 674 */ 675static int 676ietf_mini_conn_ci_is_tickable (struct lsquic_conn *lconn) 677{ 678 struct ietf_mini_conn *const conn = (struct ietf_mini_conn *) lconn; 679 const struct lsquic_packet_out *packet_out; 680 size_t packet_size; 681 682 if (conn->imc_enpub->enp_flags & ENPUB_CAN_SEND) 683 TAILQ_FOREACH(packet_out, &conn->imc_packets_out, po_next) 684 if (!(packet_out->po_flags & PO_SENT)) 685 { 686 packet_size = lsquic_packet_out_total_sz(lconn, packet_out); 687 return imico_can_send(conn, packet_size); 688 } 689 690 return 0; 691} 692 693 694static int 695imico_can_send (const struct ietf_mini_conn *conn, size_t size) 696{ 697 return (conn->imc_flags & IMC_ADDR_VALIDATED) 698 || conn->imc_bytes_in * 3 >= conn->imc_bytes_out + size 699 ; 700} 701 702 703static void 704imico_zero_pad (struct lsquic_packet_out *packet_out) 705{ 706 size_t pad_size; 707 708 pad_size = lsquic_packet_out_avail(packet_out); 709 memset(packet_out->po_data + packet_out->po_data_sz, 0, pad_size); 710 packet_out->po_data_sz += pad_size; 711 packet_out->po_frame_types |= QUIC_FTBIT_PADDING; 712} 713 714 715static struct lsquic_packet_out * 716ietf_mini_conn_ci_next_packet_to_send (struct lsquic_conn *lconn, 717 const struct to_coal *to_coal) 718{ 719 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 720 struct lsquic_packet_out *packet_out; 721 size_t packet_size; 722 723 TAILQ_FOREACH(packet_out, &conn->imc_packets_out, po_next) 724 { 725 if (packet_out->po_flags & PO_SENT) 726 continue; 727 /* [draft-ietf-quic-transport-32] Section 14.1: 728 " a server MUST expand the payload of all UDP datagrams carrying 729 " ack-eliciting Initial packets to at least the smallest allowed 730 " maximum datagram size of 1200 bytes. 731 */ 732 if (packet_out->po_header_type == HETY_INITIAL 733 && !(packet_out->po_frame_types & (1 << QUIC_FRAME_PADDING)) 734 && (packet_out->po_frame_types & IQUIC_FRAME_ACKABLE_MASK) 735 && lsquic_packet_out_avail(packet_out) > 0) 736 imico_zero_pad(packet_out); 737 packet_size = lsquic_packet_out_total_sz(lconn, packet_out); 738 if (!(to_coal 739 && (packet_size + to_coal->prev_sz_sum 740 > conn->imc_path.np_pack_size 741 || !lsquic_packet_out_equal_dcids(to_coal->prev_packet, packet_out)) 742 )) 743 { 744 if (!imico_can_send(conn, packet_size)) 745 { 746 LSQ_DEBUG("cannot send packet %"PRIu64" of size %zu: client " 747 "address has not been validated", packet_out->po_packno, 748 packet_size); 749 return NULL; 750 } 751 packet_out->po_flags |= PO_SENT; 752 conn->imc_bytes_out += packet_size; 753 if (!to_coal) 754 LSQ_DEBUG("packet_to_send: %"PRIu64, packet_out->po_packno); 755 else 756 LSQ_DEBUG("packet_to_send: %"PRIu64" (coalesced)", 757 packet_out->po_packno); 758 return packet_out; 759 } 760 else 761 return NULL; 762 } 763 764 return NULL; 765} 766 767 768static int 769imico_calc_retx_timeout (const struct ietf_mini_conn *conn) 770{ 771 lsquic_time_t to; 772 to = lsquic_rtt_stats_get_srtt(&conn->imc_rtt_stats); 773 if (to) 774 { 775 to += to / 2; 776 if (to < 10000) 777 to = 10000; 778 } 779 else 780 to = 300000; 781 return to << conn->imc_hsk_count; 782} 783 784 785static lsquic_time_t 786ietf_mini_conn_ci_next_tick_time (struct lsquic_conn *lconn, unsigned *why) 787{ 788 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 789 const struct lsquic_packet_out *packet_out; 790 lsquic_time_t exp_time, retx_time; 791 792 exp_time = conn->imc_created + 793 conn->imc_enpub->enp_settings.es_handshake_to; 794 795 TAILQ_FOREACH(packet_out, &conn->imc_packets_out, po_next) 796 if (packet_out->po_flags & PO_SENT) 797 { 798 retx_time = packet_out->po_sent + imico_calc_retx_timeout(conn); 799 if (retx_time < exp_time) 800 { 801 *why = N_AEWS + AL_RETX_HSK; 802 return retx_time; 803 } 804 else 805 { 806 *why = AEW_MINI_EXPIRE; 807 return exp_time; 808 } 809 } 810 811 *why = AEW_MINI_EXPIRE; 812 return exp_time; 813} 814 815 816#define IMICO_PROC_FRAME_ARGS \ 817 struct ietf_mini_conn *conn, struct lsquic_packet_in *packet_in, \ 818 const unsigned char *p, size_t len 819 820 821static void 822imico_dispatch_stream_events (struct ietf_mini_conn *conn) 823{ 824 enum enc_level i; 825 826 for (i = 0; i < N_ENC_LEVS; ++i) 827 if ((conn->imc_streams[i].mcs_flags & (MCS_CREATED|MCS_WANTREAD)) 828 == (MCS_CREATED|MCS_WANTREAD)) 829 { 830 LSQ_DEBUG("dispatch read events on level #%u", i); 831 lsquic_mini_cry_sm_if.on_read((void *) &conn->imc_streams[i], 832 conn->imc_conn.cn_enc_session); 833 } 834 835 for (i = 0; i < N_ENC_LEVS; ++i) 836 if ((conn->imc_streams[i].mcs_flags & (MCS_CREATED|MCS_WANTWRITE)) 837 == (MCS_CREATED|MCS_WANTWRITE)) 838 { 839 LSQ_DEBUG("dispatch write events on level #%u", i); 840 lsquic_mini_cry_sm_if.on_write((void *) &conn->imc_streams[i], 841 conn->imc_conn.cn_enc_session); 842 } 843} 844 845 846static int 847imico_stash_stream_frame (struct ietf_mini_conn *conn, 848 enum enc_level enc_level, struct lsquic_packet_in *packet_in, 849 const struct stream_frame *frame) 850{ 851 struct stream_frame *copy; 852 853 if (conn->imc_n_crypto_frames >= IMICO_MAX_STASHED_FRAMES) 854 { 855 LSQ_INFO("cannot stash more CRYPTO frames, at %hhu already, while max " 856 "is %u", conn->imc_n_crypto_frames, IMICO_MAX_STASHED_FRAMES); 857 return -1; 858 } 859 860 if (conn->imc_crypto_frames_sz + DF_SIZE(frame) > IMICO_MAX_BUFFERED_CRYPTO) 861 { 862 LSQ_INFO("cannot stash more than %u bytes of CRYPTO frames", 863 IMICO_MAX_BUFFERED_CRYPTO); 864 return -1; 865 } 866 867 copy = lsquic_malo_get(conn->imc_enpub->enp_mm.malo.stream_frame); 868 if (!copy) 869 { 870 LSQ_INFO("could not allocate stream frame for stashing"); 871 return -1; 872 } 873 874 *copy = *frame; 875 copy->packet_in = lsquic_packet_in_get(packet_in); 876 copy->stream_id = enc_level; 877 TAILQ_INSERT_TAIL(&conn->imc_crypto_frames, copy, next_frame); 878 ++conn->imc_n_crypto_frames; 879 conn->imc_crypto_frames_sz += DF_SIZE(frame); 880 return 0; 881} 882 883 884static unsigned 885imico_process_crypto_frame (IMICO_PROC_FRAME_ARGS) 886{ 887 int parsed_len; 888 enum enc_level enc_level, i; 889 struct stream_frame stream_frame; 890 891 parsed_len = conn->imc_conn.cn_pf->pf_parse_crypto_frame(p, len, 892 &stream_frame); 893 if (parsed_len < 0) 894 { 895 conn->imc_flags |= IMC_PARSE_FAILED; 896 return 0; 897 } 898 899 enc_level = lsquic_packet_in_enc_level(packet_in); 900 EV_LOG_CRYPTO_FRAME_IN(LSQUIC_LOG_CONN_ID, &stream_frame, enc_level); 901 902 if (conn->imc_streams[enc_level].mcs_read_off >= DF_OFF(&stream_frame) 903 && conn->imc_streams[enc_level].mcs_read_off < DF_END(&stream_frame)) 904 LSQ_DEBUG("Got CRYPTO frame for enc level #%u", enc_level); 905 else if (conn->imc_streams[enc_level].mcs_read_off < DF_OFF(&stream_frame)) 906 { 907 LSQ_DEBUG("Can't read CRYPTO frame on enc level #%u at offset %"PRIu64 908 " yet -- stash", enc_level, DF_OFF(&stream_frame)); 909 if (0 == imico_stash_stream_frame(conn, enc_level, packet_in, 910 &stream_frame)) 911 return parsed_len; 912 else 913 return 0; 914 } 915 else 916 { 917 LSQ_DEBUG("Got duplicate CRYPTO frame for enc level #%u -- ignore", 918 enc_level); 919 return parsed_len; 920 } 921 922 if (!(conn->imc_flags & IMC_ENC_SESS_INITED)) 923 { 924 if (0 != conn->imc_conn.cn_esf.i->esfi_init_server( 925 conn->imc_conn.cn_enc_session)) 926 return 0; 927 conn->imc_flags |= IMC_ENC_SESS_INITED; 928 } 929 930 if (!(conn->imc_streams[enc_level].mcs_flags & MCS_CREATED)) 931 { 932 LSQ_DEBUG("creating stream on level #%u", enc_level); 933 conn->imc_streams[enc_level].mcs_flags |= MCS_CREATED; 934 lsquic_mini_cry_sm_if.on_new_stream(conn->imc_conn.cn_enc_session, 935 (void *) &conn->imc_streams[enc_level]); 936 } 937 938 /* Assume that receiving a CRYPTO frame at a higher level means that we 939 * no longer want to read from a lower level. 940 */ 941 for (i = 0; i < enc_level; ++i) 942 conn->imc_streams[i].mcs_flags &= ~MCS_WANTREAD; 943 944 conn->imc_last_in.frame = &stream_frame; 945 conn->imc_last_in.enc_level = enc_level; 946 imico_dispatch_stream_events(conn); 947 conn->imc_last_in.frame = NULL; 948 949 if (DF_ROFF(&stream_frame) < DF_END(&stream_frame)) 950 { 951 /* This is an odd condition, but let's handle it just in case */ 952 LSQ_DEBUG("New CRYPTO frame on enc level #%u not fully read -- stash", 953 enc_level); 954 if (0 != imico_stash_stream_frame(conn, enc_level, packet_in, 955 &stream_frame)) 956 return 0; 957 } 958 959 960 return parsed_len; 961} 962 963 964static ptrdiff_t 965imico_count_zero_bytes (const unsigned char *p, size_t len) 966{ 967 const unsigned char *const end = p + len; 968 while (p < end && 0 == *p) 969 ++p; 970 return len - (end - p); 971} 972 973 974static unsigned 975imico_process_padding_frame (IMICO_PROC_FRAME_ARGS) 976{ 977 len = (size_t) imico_count_zero_bytes(p, len); 978 EV_LOG_PADDING_FRAME_IN(LSQUIC_LOG_CONN_ID, len); 979 return len; 980} 981 982 983static void 984imico_take_rtt_sample (struct ietf_mini_conn *conn, 985 const struct lsquic_packet_out *packet_out, 986 lsquic_time_t now, lsquic_time_t lack_delta) 987{ 988 assert(packet_out->po_sent); 989 lsquic_time_t measured_rtt = now - packet_out->po_sent; 990 if (lack_delta < measured_rtt) 991 { 992 lsquic_rtt_stats_update(&conn->imc_rtt_stats, measured_rtt, lack_delta); 993 LSQ_DEBUG("srtt: %"PRIu64" usec, var: %"PRIu64, 994 lsquic_rtt_stats_get_srtt(&conn->imc_rtt_stats), 995 lsquic_rtt_stats_get_rttvar(&conn->imc_rtt_stats)); 996 } 997} 998 999 1000static unsigned 1001imico_process_ack_frame (IMICO_PROC_FRAME_ARGS) 1002{ 1003 int parsed_len; 1004 unsigned n; 1005 lsquic_packet_out_t *packet_out, *next; 1006 struct ack_info *acki; 1007 lsquic_packno_t packno; 1008 lsquic_time_t warn_time; 1009 packno_set_t acked; 1010 enum packnum_space pns; 1011 uint8_t ack_exp; 1012 1013 if (conn->imc_flags & IMC_HAVE_TP) 1014 ack_exp = conn->imc_ack_exp; 1015 else 1016 ack_exp = TP_DEF_ACK_DELAY_EXP; /* Odd: no transport params yet? */ 1017 acki = conn->imc_enpub->enp_mm.acki; 1018 parsed_len = conn->imc_conn.cn_pf->pf_parse_ack_frame(p, len, acki, 1019 ack_exp); 1020 if (parsed_len < 0) 1021 { 1022 conn->imc_flags |= IMC_PARSE_FAILED; 1023 return 0; 1024 } 1025 1026 pns = lsquic_hety2pns[ packet_in->pi_header_type ]; 1027 acked = 0; 1028 1029 for (n = 0; n < acki->n_ranges; ++n) 1030 { 1031 if (acki->ranges[n].high <= MAX_PACKETS) 1032 { 1033 acked |= (1ULL << acki->ranges[n].high) 1034 | ((1ULL << acki->ranges[n].high) - 1); 1035 acked &= ~((1ULL << acki->ranges[n].low) - 1); 1036 } 1037 else 1038 { 1039 packno = acki->ranges[n].high; 1040 goto err_never_sent; 1041 } 1042 } 1043 if (acked & ~conn->imc_sent_packnos) 1044 { 1045 packno = highest_bit_set(acked & ~conn->imc_sent_packnos); 1046 goto err_never_sent; 1047 } 1048 1049 EV_LOG_ACK_FRAME_IN(LSQUIC_LOG_CONN_ID, acki); 1050 for (packet_out = TAILQ_FIRST(&conn->imc_packets_out); packet_out; 1051 packet_out = next) 1052 { 1053 next = TAILQ_NEXT(packet_out, po_next); 1054 if ((1ULL << packet_out->po_packno) & acked) 1055 { 1056 assert(lsquic_packet_out_pns(packet_out) == pns); 1057 LSQ_DEBUG("Got ACK for packet %"PRIu64, packet_out->po_packno); 1058 if (packet_out->po_packno == largest_acked(acki)) 1059 imico_take_rtt_sample(conn, packet_out, 1060 packet_in->pi_received, acki->lack_delta); 1061 TAILQ_REMOVE(&conn->imc_packets_out, packet_out, po_next); 1062 imico_destroy_packet(conn, packet_out); 1063 } 1064 } 1065 1066 if (conn->imc_sent_packnos & ~conn->imc_acked_packnos[pns] & acked) 1067 { 1068 LSQ_DEBUG("Newly acked packets, reset handshake count"); 1069 conn->imc_hsk_count = 0; 1070 } 1071 1072 conn->imc_acked_packnos[pns] |= acked; 1073 1074 return parsed_len; 1075 1076 err_never_sent: 1077 warn_time = lsquic_time_now(); 1078 if (0 == conn->imc_enpub->enp_last_warning[WT_ACKPARSE_MINI] 1079 || conn->imc_enpub->enp_last_warning[WT_ACKPARSE_MINI] 1080 + WARNING_INTERVAL < warn_time) 1081 { 1082 conn->imc_enpub->enp_last_warning[WT_ACKPARSE_MINI] = warn_time; 1083 LSQ_WARN("packet %"PRIu64" (pns: %u) was never sent", packno, pns); 1084 } 1085 else 1086 LSQ_DEBUG("packet %"PRIu64" (pns: %u) was never sent", packno, pns); 1087 return 0; 1088} 1089 1090 1091static unsigned 1092imico_process_ping_frame (IMICO_PROC_FRAME_ARGS) 1093{ 1094 LSQ_DEBUG("got a PING frame, do nothing"); 1095 return 1; 1096} 1097 1098 1099static unsigned 1100imico_process_connection_close_frame (IMICO_PROC_FRAME_ARGS) 1101{ 1102 struct lsquic_packet_out *packet_out; 1103 uint64_t error_code; 1104 uint16_t reason_len; 1105 uint8_t reason_off; 1106 int parsed_len, app_error; 1107 1108 while ((packet_out = TAILQ_FIRST(&conn->imc_packets_out))) 1109 { 1110 TAILQ_REMOVE(&conn->imc_packets_out, packet_out, po_next); 1111 imico_destroy_packet(conn, packet_out); 1112 } 1113 conn->imc_flags |= IMC_CLOSE_RECVD; 1114 parsed_len = conn->imc_conn.cn_pf->pf_parse_connect_close_frame(p, len, 1115 &app_error, &error_code, &reason_len, &reason_off); 1116 if (parsed_len < 0) 1117 { 1118 conn->imc_flags |= IMC_PARSE_FAILED; 1119 return 0; 1120 } 1121 EV_LOG_CONNECTION_CLOSE_FRAME_IN(LSQUIC_LOG_CONN_ID, error_code, 1122 (int) reason_len, (const char *) p + reason_off); 1123 LSQ_INFO("Received CONNECTION_CLOSE frame (%s-level code: %"PRIu64"; " 1124 "reason: %.*s)", app_error ? "application" : "transport", 1125 error_code, (int) reason_len, (const char *) p + reason_off); 1126 return 0; /* This shuts down the connection */ 1127} 1128 1129 1130static unsigned 1131imico_process_invalid_frame (IMICO_PROC_FRAME_ARGS) 1132{ 1133 LSQ_DEBUG("invalid frame %u (%s)", p[0], 1134 frame_type_2_str[ conn->imc_conn.cn_pf->pf_parse_frame_type(p, len) ]); 1135 return 0; 1136} 1137 1138 1139static unsigned (*const imico_process_frames[N_QUIC_FRAMES]) 1140 (IMICO_PROC_FRAME_ARGS) = 1141{ 1142 [QUIC_FRAME_PADDING] = imico_process_padding_frame, 1143 [QUIC_FRAME_CRYPTO] = imico_process_crypto_frame, 1144 [QUIC_FRAME_ACK] = imico_process_ack_frame, 1145 [QUIC_FRAME_PING] = imico_process_ping_frame, 1146 [QUIC_FRAME_CONNECTION_CLOSE] = imico_process_connection_close_frame, 1147 /* Some of them are invalid, while others are unexpected. We treat 1148 * them the same: handshake cannot proceed. 1149 */ 1150 [QUIC_FRAME_RST_STREAM] = imico_process_invalid_frame, 1151 [QUIC_FRAME_MAX_DATA] = imico_process_invalid_frame, 1152 [QUIC_FRAME_MAX_STREAM_DATA] = imico_process_invalid_frame, 1153 [QUIC_FRAME_MAX_STREAMS] = imico_process_invalid_frame, 1154 [QUIC_FRAME_BLOCKED] = imico_process_invalid_frame, 1155 [QUIC_FRAME_STREAM_BLOCKED] = imico_process_invalid_frame, 1156 [QUIC_FRAME_STREAMS_BLOCKED] = imico_process_invalid_frame, 1157 [QUIC_FRAME_NEW_CONNECTION_ID] = imico_process_invalid_frame, 1158 [QUIC_FRAME_STOP_SENDING] = imico_process_invalid_frame, 1159 [QUIC_FRAME_PATH_CHALLENGE] = imico_process_invalid_frame, 1160 [QUIC_FRAME_PATH_RESPONSE] = imico_process_invalid_frame, 1161 /* STREAM frame can only come in the App PNS and we delay those packets: */ 1162 [QUIC_FRAME_STREAM] = imico_process_invalid_frame, 1163 [QUIC_FRAME_HANDSHAKE_DONE] = imico_process_invalid_frame, 1164 [QUIC_FRAME_ACK_FREQUENCY] = imico_process_invalid_frame, 1165 [QUIC_FRAME_TIMESTAMP] = imico_process_invalid_frame, 1166}; 1167 1168 1169static unsigned 1170imico_process_packet_frame (struct ietf_mini_conn *conn, 1171 struct lsquic_packet_in *packet_in, const unsigned char *p, size_t len) 1172{ 1173 enum enc_level enc_level; 1174 enum quic_frame_type type; 1175 1176 enc_level = lsquic_packet_in_enc_level(packet_in); 1177 type = conn->imc_conn.cn_pf->pf_parse_frame_type(p, len); 1178 if (lsquic_legal_frames_by_level[conn->imc_conn.cn_version][enc_level] 1179 & (1 << type)) 1180 { 1181 packet_in->pi_frame_types |= 1 << type; 1182 return imico_process_frames[type](conn, packet_in, p, len); 1183 } 1184 else 1185 { 1186 LSQ_DEBUG("invalid frame %u at encryption level %s", type, 1187 lsquic_enclev2str[enc_level]); 1188 return 0; 1189 } 1190} 1191 1192 1193static int 1194imico_parse_regular_packet (struct ietf_mini_conn *conn, 1195 struct lsquic_packet_in *packet_in) 1196{ 1197 const unsigned char *p, *pend; 1198 unsigned len; 1199 1200 p = packet_in->pi_data + packet_in->pi_header_sz; 1201 pend = packet_in->pi_data + packet_in->pi_data_sz; 1202 1203 while (p < pend) 1204 { 1205 len = imico_process_packet_frame(conn, packet_in, p, pend - p); 1206 if (len > 0) 1207 p += len; 1208 else 1209 return -1; 1210 } 1211 1212 return 0; 1213} 1214 1215 1216static unsigned 1217highest_bit_set (unsigned long long sz) 1218{ 1219#if __GNUC__ 1220 unsigned clz = __builtin_clzll(sz); 1221 return 63 - clz; 1222#else 1223 unsigned long y; 1224 unsigned n; 1225 n = 64; 1226 y = sz >> 32; if (y) { n -= 32; sz = y; } 1227 y = sz >> 16; if (y) { n -= 16; sz = y; } 1228 y = sz >> 8; if (y) { n -= 8; sz = y; } 1229 y = sz >> 4; if (y) { n -= 4; sz = y; } 1230 y = sz >> 2; if (y) { n -= 2; sz = y; } 1231 y = sz >> 1; if (y) return 63 - n + 2; 1232 return 63 - n + sz; 1233#endif 1234} 1235 1236 1237static void 1238ignore_init (struct ietf_mini_conn *conn) 1239{ 1240 struct lsquic_packet_out *packet_out, *next; 1241 unsigned count; 1242 1243 conn->imc_flags |= IMC_IGNORE_INIT; 1244 conn->imc_flags &= ~(IMC_QUEUED_ACK_INIT << PNS_INIT); 1245 1246 count = 0; 1247 for (packet_out = TAILQ_FIRST(&conn->imc_packets_out); packet_out; 1248 packet_out = next) 1249 { 1250 next = TAILQ_NEXT(packet_out, po_next); 1251 if (PNS_INIT == lsquic_packet_out_pns(packet_out)) 1252 { 1253 TAILQ_REMOVE(&conn->imc_packets_out, packet_out, po_next); 1254 imico_destroy_packet(conn, packet_out); 1255 ++count; 1256 } 1257 } 1258 1259 LSQ_DEBUG("henceforth, no Initial packets shall be sent or received; " 1260 "destroyed %u packet%.*s", count, count != 1, "s"); 1261} 1262 1263 1264static void 1265imico_maybe_delay_processing (struct ietf_mini_conn *conn, 1266 struct lsquic_packet_in *packet_in) 1267{ 1268 unsigned max_delayed; 1269 1270 if (conn->imc_flags & IMC_ADDR_VALIDATED) 1271 max_delayed = IMICO_MAX_DELAYED_PACKETS_VALIDATED; 1272 else 1273 max_delayed = IMICO_MAX_DELAYED_PACKETS_UNVALIDATED; 1274 1275 if (conn->imc_delayed_packets_count < max_delayed) 1276 { 1277 ++conn->imc_delayed_packets_count; 1278 lsquic_packet_in_upref(packet_in); 1279 TAILQ_INSERT_TAIL(&conn->imc_app_packets, packet_in, pi_next); 1280 LSQ_DEBUG("delay processing of packet (now delayed %hhu)", 1281 conn->imc_delayed_packets_count); 1282 } 1283 else 1284 LSQ_DEBUG("drop packet, already delayed %hhu packets", 1285 conn->imc_delayed_packets_count); 1286} 1287 1288 1289/* [draft-ietf-quic-transport-30] Section 8.1: 1290 " Additionally, a server MAY consider the client address validated if 1291 " the client uses a connection ID chosen by the server and the 1292 " connection ID contains at least 64 bits of entropy. 1293 * 1294 * We use RAND_bytes() to generate SCIDs, so it's all entropy. 1295 */ 1296static void 1297imico_maybe_validate_by_dcid (struct ietf_mini_conn *conn, 1298 const lsquic_cid_t *dcid) 1299{ 1300 unsigned i; 1301 1302 if (dcid->len >= 8) 1303 /* Generic code with unnecessary loop as future-proofing */ 1304 for (i = 0; i < conn->imc_conn.cn_n_cces; ++i) 1305 if ((conn->imc_conn.cn_cces_mask & (1 << i)) 1306 && (conn->imc_conn.cn_cces[i].cce_flags & CCE_SEQNO) 1307 && LSQUIC_CIDS_EQ(&conn->imc_conn.cn_cces[i].cce_cid, dcid)) 1308 { 1309 imico_peer_addr_validated(conn, "dcid/scid + entropy"); 1310 return; 1311 } 1312} 1313 1314 1315static int 1316imico_received_packet_is_dup (struct ietf_mini_conn *conn, 1317 enum packnum_space pns, lsquic_packno_t packno) 1318{ 1319 if (conn->imc_flags & IMC_TRECHIST) 1320 return lsquic_trechist_contains( 1321 conn->imc_recvd_packnos.trechist.hist_masks[pns], 1322 conn->imc_recvd_packnos.trechist.hist_elems 1323 + TRECHIST_MAX_RANGES * pns, packno); 1324 else 1325 return !!(conn->imc_recvd_packnos.bitmasks[pns] & (1ULL << packno)); 1326} 1327 1328 1329static int 1330imico_packno_is_largest (struct ietf_mini_conn *conn, 1331 enum packnum_space pns, lsquic_packno_t packno) 1332{ 1333 if (conn->imc_flags & IMC_TRECHIST) 1334 return 0 == conn->imc_recvd_packnos.trechist.hist_masks[pns] 1335 || packno > lsquic_trechist_max( 1336 conn->imc_recvd_packnos.trechist.hist_masks[pns], 1337 conn->imc_recvd_packnos.trechist.hist_elems 1338 + TRECHIST_MAX_RANGES * pns); 1339 else 1340 return 0 == conn->imc_recvd_packnos.bitmasks[pns] 1341 || packno > highest_bit_set(conn->imc_recvd_packnos.bitmasks[pns]); 1342} 1343 1344 1345static void 1346imico_record_recvd_packno (struct ietf_mini_conn *conn, 1347 enum packnum_space pns, lsquic_packno_t packno) 1348{ 1349 if (conn->imc_flags & IMC_TRECHIST) 1350 { 1351 if (0 != lsquic_trechist_insert( 1352 &conn->imc_recvd_packnos.trechist.hist_masks[pns], 1353 conn->imc_recvd_packnos.trechist.hist_elems 1354 + TRECHIST_MAX_RANGES * pns, packno)) 1355 { 1356 LSQ_INFO("too many ranges for trechist to hold or range too wide"); 1357 conn->imc_flags |= IMC_ERROR; 1358 } 1359 } 1360 else 1361 conn->imc_recvd_packnos.bitmasks[pns] |= 1ULL << packno; 1362} 1363 1364 1365static int 1366imico_switch_to_trechist (struct ietf_mini_conn *conn) 1367{ 1368 uint32_t masks[IMICO_N_PNS]; 1369 enum packnum_space pns; 1370 struct trechist_elem *elems; 1371 struct ietf_mini_rechist iter; 1372 1373 elems = malloc(TRECHIST_SIZE * N_PNS); 1374 if (!elems) 1375 { 1376 LSQ_WARN("cannot allocate trechist elems"); 1377 return -1; 1378 } 1379 1380 for (pns = 0; pns < IMICO_N_PNS; ++pns) 1381 if (conn->imc_recvd_packnos.bitmasks[pns]) 1382 { 1383 lsquic_imico_rechist_init(&iter, conn, pns); 1384 lsquic_trechist_copy_ranges(&masks[pns], 1385 elems + TRECHIST_MAX_RANGES * pns, &iter, 1386 lsquic_imico_rechist_first, 1387 lsquic_imico_rechist_next); 1388 } 1389 else 1390 masks[pns] = 0; 1391 1392 memcpy(conn->imc_recvd_packnos.trechist.hist_masks, masks, sizeof(masks)); 1393 conn->imc_recvd_packnos.trechist.hist_elems = elems; 1394 conn->imc_flags |= IMC_TRECHIST; 1395 LSQ_DEBUG("switched to trechist"); 1396 return 0; 1397} 1398 1399 1400/* Only a single packet is supported */ 1401static void 1402ietf_mini_conn_ci_packet_in (struct lsquic_conn *lconn, 1403 struct lsquic_packet_in *packet_in) 1404{ 1405 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 1406 enum dec_packin dec_packin; 1407 enum packnum_space pns; 1408 1409 /* Update "bytes in" count as early as possible. From 1410 * [draft-ietf-quic-transport-28] Section 8.1: 1411 " For the purposes of 1412 " avoiding amplification prior to address validation, servers MUST 1413 " count all of the payload bytes received in datagrams that are 1414 " uniquely attributed to a single connection. This includes datagrams 1415 " that contain packets that are successfully processed and datagrams 1416 " that contain packets that are all discarded. 1417 */ 1418 conn->imc_bytes_in += packet_in->pi_data_sz; 1419 1420 if (conn->imc_flags & IMC_ERROR) 1421 { 1422 LSQ_DEBUG("ignore incoming packet: connection is in error state"); 1423 return; 1424 } 1425 1426 if (!(conn->imc_flags & IMC_ADDR_VALIDATED)) 1427 imico_maybe_validate_by_dcid(conn, &packet_in->pi_dcid); 1428 1429 pns = lsquic_hety2pns[ packet_in->pi_header_type ]; 1430 if (pns == PNS_INIT && (conn->imc_flags & IMC_IGNORE_INIT)) 1431 { 1432 LSQ_DEBUG("ignore init packet"); /* Don't bother decrypting */ 1433 return; 1434 } 1435 1436 dec_packin = lconn->cn_esf_c->esf_decrypt_packet(lconn->cn_enc_session, 1437 conn->imc_enpub, &conn->imc_conn, packet_in); 1438 switch (dec_packin) 1439 { 1440 case DECPI_OK: 1441 break; 1442 case DECPI_VIOLATION: 1443 ietf_mini_conn_ci_abort_error(lconn, 0, TEC_PROTOCOL_VIOLATION, 1444 "protocol violation detected while decrypting packet"); 1445 return; 1446 case DECPI_NOT_YET: 1447 imico_maybe_delay_processing(conn, packet_in); 1448 return; 1449 default: 1450 LSQ_DEBUG("could not decrypt packet"); 1451 return; 1452 } 1453 1454 EV_LOG_PACKET_IN(LSQUIC_LOG_CONN_ID, packet_in); 1455 1456 if (pns == PNS_APP) 1457 { 1458 imico_maybe_delay_processing(conn, packet_in); 1459 return; 1460 } 1461 else if (pns == PNS_HSK) 1462 imico_peer_addr_validated(conn, "handshake PNS"); 1463 1464 if (((conn->imc_flags >> IMCBIT_PNS_BIT_SHIFT) & 3) < pns) 1465 { 1466 conn->imc_flags &= ~(3 << IMCBIT_PNS_BIT_SHIFT); 1467 conn->imc_flags |= pns << IMCBIT_PNS_BIT_SHIFT; 1468 } 1469 1470 if (pns == PNS_HSK && !(conn->imc_flags & IMC_IGNORE_INIT)) 1471 ignore_init(conn); 1472 1473 if (packet_in->pi_packno > MAX_PACKETS 1474 && !(conn->imc_flags & IMC_TRECHIST)) 1475 { 1476 if (0 != imico_switch_to_trechist(conn)) 1477 return; 1478 } 1479 1480 if (imico_received_packet_is_dup(conn, pns, packet_in->pi_packno)) 1481 { 1482 LSQ_DEBUG("duplicate packet %"PRIu64, packet_in->pi_packno); 1483 return; 1484 } 1485 1486 /* Update receive history before processing the packet: if there is an 1487 * error, the connection is terminated and recording this packet number 1488 * is helpful when it is printed along with other diagnostics in dtor. 1489 */ 1490 if (imico_packno_is_largest(conn, pns, packet_in->pi_packno)) 1491 conn->imc_largest_recvd[pns] = packet_in->pi_received; 1492 imico_record_recvd_packno(conn, pns, packet_in->pi_packno); 1493 1494 if (0 != imico_parse_regular_packet(conn, packet_in)) 1495 { 1496 LSQ_DEBUG("connection is now in error state"); 1497 conn->imc_flags |= IMC_ERROR; 1498 return; 1499 } 1500 1501 if (!(conn->imc_flags & (IMC_QUEUED_ACK_INIT << pns))) 1502 LSQ_DEBUG("queued ACK in %s", lsquic_pns2str[pns]); 1503 conn->imc_flags |= IMC_QUEUED_ACK_INIT << pns; 1504 ++conn->imc_ecn_counts_in[pns][ lsquic_packet_in_ecn(packet_in) ]; 1505 conn->imc_incoming_ecn <<= 1; 1506 conn->imc_incoming_ecn |= lsquic_packet_in_ecn(packet_in) != ECN_NOT_ECT; 1507} 1508 1509 1510static void 1511ietf_mini_conn_ci_packet_sent (struct lsquic_conn *lconn, 1512 struct lsquic_packet_out *packet_out) 1513{ 1514 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 1515 conn->imc_sent_packnos |= 1ULL << packet_out->po_packno; 1516 conn->imc_ecn_packnos |= !!lsquic_packet_out_ecn(packet_out) 1517 << packet_out->po_packno; 1518#if 0 1519 if (packet_out->po_frame_types & (1 << QUIC_FRAME_ACK)) 1520 { 1521 assert(mc->mc_flags & MC_UNSENT_ACK); 1522 mc->mc_flags &= ~MC_UNSENT_ACK; 1523 } 1524#endif 1525 if (packet_out->po_header_type == HETY_HANDSHAKE) 1526 conn->imc_flags |= IMC_HSK_PACKET_SENT; 1527 LSQ_DEBUG("%s: packet %"PRIu64" sent", __func__, packet_out->po_packno); 1528} 1529 1530 1531static void 1532ietf_mini_conn_ci_packet_not_sent (struct lsquic_conn *lconn, 1533 struct lsquic_packet_out *packet_out) 1534{ 1535 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 1536 size_t packet_size; 1537 1538 packet_out->po_flags &= ~PO_SENT; 1539 packet_size = lsquic_packet_out_total_sz(lconn, packet_out); 1540 conn->imc_bytes_out -= packet_size; 1541 LSQ_DEBUG("%s: packet %"PRIu64" not sent", __func__, packet_out->po_packno); 1542} 1543 1544 1545static void 1546imico_return_enc_data (struct ietf_mini_conn *conn, 1547 struct lsquic_packet_out *packet_out) 1548{ 1549 conn->imc_enpub->enp_pmi->pmi_return(conn->imc_enpub->enp_pmi_ctx, 1550 conn->imc_path.np_peer_ctx, packet_out->po_enc_data, 1551 lsquic_packet_out_ipv6(packet_out)); 1552 packet_out->po_flags &= ~PO_ENCRYPTED; 1553 packet_out->po_enc_data = NULL; 1554} 1555 1556 1557static int 1558imico_repackage_packet (struct ietf_mini_conn *conn, 1559 struct lsquic_packet_out *packet_out) 1560{ 1561 const lsquic_packno_t oldno = packet_out->po_packno; 1562 const lsquic_packno_t packno = conn->imc_next_packno++; 1563 if (packno > MAX_PACKETS) 1564 return -1; 1565 1566 LSQ_DEBUG("Packet %"PRIu64" repackaged for resending as packet %"PRIu64, 1567 oldno, packno); 1568 EV_LOG_CONN_EVENT(LSQUIC_LOG_CONN_ID, "packet %"PRIu64" repackaged for " 1569 "resending as packet %"PRIu64, oldno, packno); 1570 packet_out->po_packno = packno; 1571 packet_out->po_flags &= ~PO_SENT; 1572 lsquic_packet_out_set_ecn(packet_out, imico_get_ecn(conn)); 1573 if (packet_out->po_flags & PO_ENCRYPTED) 1574 imico_return_enc_data(conn, packet_out); 1575 TAILQ_INSERT_TAIL(&conn->imc_packets_out, packet_out, po_next); 1576 return 0; 1577} 1578 1579 1580static int 1581imico_handle_losses_and_have_unsent (struct ietf_mini_conn *conn, 1582 lsquic_time_t now) 1583{ 1584 TAILQ_HEAD(, lsquic_packet_out) lost_packets = 1585 TAILQ_HEAD_INITIALIZER(lost_packets); 1586 const struct lsquic_conn *const lconn = &conn->imc_conn; 1587 lsquic_packet_out_t *packet_out, *next; 1588 lsquic_time_t retx_to = 0; 1589 unsigned n_to_send = 0; 1590 size_t packet_size; 1591 1592 for (packet_out = TAILQ_FIRST(&conn->imc_packets_out); packet_out; 1593 packet_out = next) 1594 { 1595 next = TAILQ_NEXT(packet_out, po_next); 1596 if (packet_out->po_flags & PO_SENT) 1597 { 1598 if (0 == retx_to) 1599 retx_to = imico_calc_retx_timeout(conn); 1600 if (packet_out->po_sent + retx_to < now) 1601 { 1602 LSQ_DEBUG("packet %"PRIu64" has been lost (rto: %"PRIu64")", 1603 packet_out->po_packno, retx_to); 1604 TAILQ_REMOVE(&conn->imc_packets_out, packet_out, po_next); 1605 TAILQ_INSERT_TAIL(&lost_packets, packet_out, po_next); 1606 } 1607 } 1608 else if (packet_size = lsquic_packet_out_total_sz(lconn, packet_out), 1609 imico_can_send(conn, packet_size)) 1610 ++n_to_send; 1611 else 1612 break; 1613 } 1614 1615 conn->imc_hsk_count += !TAILQ_EMPTY(&lost_packets); 1616 1617 while ((packet_out = TAILQ_FIRST(&lost_packets))) 1618 { 1619 TAILQ_REMOVE(&lost_packets, packet_out, po_next); 1620 if ((packet_out->po_frame_types & IQUIC_FRAME_RETX_MASK) 1621 && 0 == imico_repackage_packet(conn, packet_out)) 1622 { 1623 packet_size = lsquic_packet_out_total_sz(lconn, packet_out); 1624 if (imico_can_send(conn, packet_size)) 1625 ++n_to_send; 1626 } 1627 else 1628 imico_destroy_packet(conn, packet_out); 1629 } 1630 1631 return n_to_send > 0; 1632} 1633 1634 1635static int 1636imico_have_packets_to_send (struct ietf_mini_conn *conn, lsquic_time_t now) 1637{ 1638 return imico_handle_losses_and_have_unsent(conn, now); 1639} 1640 1641 1642void 1643lsquic_imico_rechist_init (struct ietf_mini_rechist *rechist, 1644 const struct ietf_mini_conn *conn, enum packnum_space pns) 1645{ 1646 assert(pns < IMICO_N_PNS); 1647 rechist->conn = conn; 1648 rechist->pns = pns; 1649 if (conn->imc_flags & IMC_TRECHIST) 1650 lsquic_trechist_iter(&rechist->u.trechist_iter, 1651 conn->imc_recvd_packnos.trechist.hist_masks[pns], 1652 conn->imc_recvd_packnos.trechist.hist_elems + TRECHIST_MAX_RANGES * pns); 1653 else 1654 { 1655 rechist->u.bitmask.cur_set = 0; 1656 rechist->u.bitmask.cur_idx = 0; 1657 } 1658} 1659 1660 1661static lsquic_time_t 1662imico_rechist_largest_recv (void *rechist_ctx) 1663{ 1664 struct ietf_mini_rechist *rechist = rechist_ctx; 1665 return rechist->conn->imc_largest_recvd[ rechist->pns ]; 1666} 1667 1668 1669static const struct lsquic_packno_range * 1670imico_bitmask_rechist_next (struct ietf_mini_rechist *rechist) 1671{ 1672 const struct ietf_mini_conn *conn = rechist->conn; 1673 packno_set_t packnos; 1674 int i; 1675 1676 packnos = rechist->u.bitmask.cur_set; 1677 if (0 == packnos) 1678 return NULL; 1679 1680 /* There may be a faster way to do this, but for now, we just want 1681 * correctness. 1682 */ 1683 for (i = rechist->u.bitmask.cur_idx; i >= 0; --i) 1684 if (packnos & (1ULL << i)) 1685 { 1686 rechist->u.bitmask.range.low = i; 1687 rechist->u.bitmask.range.high = i; 1688 break; 1689 } 1690 assert(i >= 0); /* We must have hit at least one bit */ 1691 --i; 1692 for ( ; i >= 0 && (packnos & (1ULL << i)); --i) 1693 rechist->u.bitmask.range.low = i; 1694 if (i >= 0) 1695 { 1696 rechist->u.bitmask.cur_set = packnos & ((1ULL << i) - 1); 1697 rechist->u.bitmask.cur_idx = i; 1698 } 1699 else 1700 rechist->u.bitmask.cur_set = 0; 1701 LSQ_DEBUG("%s: return [%"PRIu64", %"PRIu64"]", __func__, 1702 rechist->u.bitmask.range.low, rechist->u.bitmask.range.high); 1703 return &rechist->u.bitmask.range; 1704} 1705 1706 1707const struct lsquic_packno_range * 1708lsquic_imico_rechist_next (void *rechist_ctx) 1709{ 1710 struct ietf_mini_rechist *rechist = rechist_ctx; 1711 1712 if (rechist->conn->imc_flags & IMC_TRECHIST) 1713 return lsquic_trechist_next(&rechist->u.trechist_iter); 1714 else 1715 return imico_bitmask_rechist_next(rechist); 1716} 1717 1718 1719const struct lsquic_packno_range * 1720lsquic_imico_rechist_first (void *rechist_ctx) 1721{ 1722 struct ietf_mini_rechist *rechist = rechist_ctx; 1723 1724 if (rechist->conn->imc_flags & IMC_TRECHIST) 1725 return lsquic_trechist_first(&rechist->u.trechist_iter); 1726 else 1727 { 1728 rechist->u.bitmask.cur_set 1729 = rechist->conn->imc_recvd_packnos.bitmasks[ rechist->pns ]; 1730 rechist->u.bitmask.cur_idx 1731 = highest_bit_set(rechist->u.bitmask.cur_set); 1732 return lsquic_imico_rechist_next(rechist_ctx); 1733 } 1734} 1735 1736 1737static const enum header_type pns2hety[] = 1738{ 1739 [PNS_INIT] = HETY_INITIAL, 1740 [PNS_HSK] = HETY_HANDSHAKE, 1741 [PNS_APP] = HETY_NOT_SET, 1742}; 1743 1744 1745static int 1746imico_generate_ack (struct ietf_mini_conn *conn, enum packnum_space pns, 1747 lsquic_time_t now) 1748{ 1749 struct lsquic_packet_out *packet_out; 1750 enum header_type header_type; 1751 struct ietf_mini_rechist rechist; 1752 int not_used_has_missing, len; 1753 uint64_t ecn_counts_buf[4]; 1754 const uint64_t *ecn_counts; 1755 1756 header_type = pns2hety[pns]; 1757 1758 if (conn->imc_incoming_ecn) 1759 { 1760 ecn_counts_buf[0] = conn->imc_ecn_counts_in[pns][0]; 1761 ecn_counts_buf[1] = conn->imc_ecn_counts_in[pns][1]; 1762 ecn_counts_buf[2] = conn->imc_ecn_counts_in[pns][2]; 1763 ecn_counts_buf[3] = conn->imc_ecn_counts_in[pns][3]; 1764 ecn_counts = ecn_counts_buf; 1765 } 1766 else 1767 ecn_counts = NULL; 1768 1769 packet_out = imico_get_packet_out(conn, header_type, 0); 1770 if (!packet_out) 1771 return -1; 1772 1773 /* Generate ACK frame */ 1774 lsquic_imico_rechist_init(&rechist, conn, pns); 1775 len = conn->imc_conn.cn_pf->pf_gen_ack_frame( 1776 packet_out->po_data + packet_out->po_data_sz, 1777 lsquic_packet_out_avail(packet_out), lsquic_imico_rechist_first, 1778 lsquic_imico_rechist_next, imico_rechist_largest_recv, &rechist, 1779 now, ¬_used_has_missing, &packet_out->po_ack2ed, ecn_counts); 1780 if (len < 0) 1781 { 1782 LSQ_WARN("could not generate ACK frame"); 1783 return -1; 1784 } 1785 EV_LOG_GENERATED_ACK_FRAME(LSQUIC_LOG_CONN_ID, conn->imc_conn.cn_pf, 1786 packet_out->po_data + packet_out->po_data_sz, len); 1787 packet_out->po_frame_types |= 1 << QUIC_FRAME_ACK; 1788 packet_out->po_data_sz += len; 1789 packet_out->po_regen_sz += len; 1790 conn->imc_flags &= ~(IMC_QUEUED_ACK_INIT << pns); 1791 LSQ_DEBUG("wrote ACK frame of size %d in %s", len, lsquic_pns2str[pns]); 1792 return 0; 1793} 1794 1795 1796static int 1797imico_generate_acks (struct ietf_mini_conn *conn, lsquic_time_t now) 1798{ 1799 enum packnum_space pns; 1800 1801 for (pns = PNS_INIT; pns < IMICO_N_PNS; ++pns) 1802 if (conn->imc_flags & (IMC_QUEUED_ACK_INIT << pns) 1803 && !(pns == PNS_INIT && (conn->imc_flags & IMC_IGNORE_INIT))) 1804 if (0 != imico_generate_ack(conn, pns, now)) 1805 return -1; 1806 1807 return 0; 1808} 1809 1810 1811static void 1812imico_generate_conn_close (struct ietf_mini_conn *conn) 1813{ 1814 struct lsquic_packet_out *packet_out; 1815 enum header_type header_type; 1816 enum packnum_space pns, pns_max; 1817 unsigned error_code; 1818 const char *reason; 1819 size_t need; 1820 int sz, rlen, is_app; 1821 char reason_buf[0x20]; 1822 1823 if (conn->imc_flags & IMC_ABORT_ERROR) 1824 { 1825 is_app = !!(conn->imc_flags & IMC_ABORT_ISAPP); 1826 error_code = conn->imc_error_code; 1827 reason = NULL; 1828 rlen = 0; 1829 } 1830 else if (conn->imc_flags & IMC_TLS_ALERT) 1831 { 1832 is_app = 0; 1833 error_code = 0x100 + conn->imc_tls_alert; 1834 if (ALERT_NO_APPLICATION_PROTOCOL == conn->imc_tls_alert) 1835 reason = "no suitable application protocol"; 1836 else 1837 { 1838 snprintf(reason_buf, sizeof(reason_buf), "TLS alert %"PRIu8, 1839 conn->imc_tls_alert); 1840 reason = reason_buf; 1841 } 1842 rlen = strlen(reason); 1843 } 1844 else if (conn->imc_flags & IMC_BAD_TRANS_PARAMS) 1845 { 1846 is_app = 0; 1847 error_code = TEC_TRANSPORT_PARAMETER_ERROR; 1848 reason = "bad transport parameters"; 1849 rlen = 24; 1850 } 1851 else if (conn->imc_flags & IMC_HSK_FAILED) 1852 { 1853 is_app = 0; 1854 error_code = TEC_NO_ERROR; 1855 reason = "handshake failed"; 1856 rlen = 16; 1857 } 1858 else if (conn->imc_flags & IMC_PARSE_FAILED) 1859 { 1860 is_app = 0; 1861 error_code = TEC_FRAME_ENCODING_ERROR; 1862 reason = "cannot decode frame"; 1863 rlen = 19; 1864 } 1865 else 1866 { 1867 is_app = 0; 1868 error_code = TEC_INTERNAL_ERROR; 1869 reason = NULL; 1870 rlen = 0; 1871 } 1872 1873 1874/* [draft-ietf-quic-transport-28] Section 10.3.1: 1875 * 1876 " A client will always know whether the server has Handshake keys (see 1877 " Section 17.2.2.1), but it is possible that a server does not know 1878 " whether the client has Handshake keys. Under these circumstances, a 1879 " server SHOULD send a CONNECTION_CLOSE frame in both Handshake and 1880 " Initial packets to ensure that at least one of them is processable by 1881 " the client. 1882--- 8< --- 1883 " Sending a CONNECTION_CLOSE of type 0x1d in an Initial or Handshake 1884 " packet could expose application state or be used to alter application 1885 " state. A CONNECTION_CLOSE of type 0x1d MUST be replaced by a 1886 " CONNECTION_CLOSE of type 0x1c when sending the frame in Initial or 1887 " Handshake packets. Otherwise, information about the application 1888 " state might be revealed. Endpoints MUST clear the value of the 1889 " Reason Phrase field and SHOULD use the APPLICATION_ERROR code when 1890 " converting to a CONNECTION_CLOSE of type 0x1c. 1891 */ 1892 LSQ_DEBUG("sending CONNECTION_CLOSE, is_app: %d, error code: %u, " 1893 "reason: %.*s", is_app, error_code, rlen, reason); 1894 if (is_app && conn->imc_conn.cn_version > LSQVER_ID27) 1895 { 1896 LSQ_DEBUG("convert to 0x1C, replace code and reason"); 1897 is_app = 0; 1898 error_code = TEC_APPLICATION_ERROR; 1899 rlen = 0; 1900 } 1901 1902 pns = (conn->imc_flags >> IMCBIT_PNS_BIT_SHIFT) & 3; 1903 switch ((!!(conn->imc_flags & IMC_HSK_PACKET_SENT) << 1) 1904 | (pns == PNS_HSK) /* Handshake packet received */) 1905 { 1906 case (0 << 1) | 0: 1907 pns = PNS_INIT; 1908 pns_max = PNS_INIT; 1909 break; 1910 case (1 << 1) | 0: 1911 pns = PNS_INIT; 1912 pns_max = PNS_HSK; 1913 break; 1914 default: 1915 pns = PNS_HSK; 1916 pns_max = PNS_HSK; 1917 break; 1918 } 1919 1920 need = conn->imc_conn.cn_pf->pf_connect_close_frame_size(is_app, 1921 error_code, 0, rlen); 1922 LSQ_DEBUG("will generate %u CONNECTION_CLOSE frame%.*s", 1923 pns_max - pns + 1, pns_max > pns, "s"); 1924 do 1925 { 1926 header_type = pns2hety[pns]; 1927 packet_out = imico_get_packet_out(conn, header_type, need); 1928 if (!packet_out) 1929 return; 1930 sz = conn->imc_conn.cn_pf->pf_gen_connect_close_frame( 1931 packet_out->po_data + packet_out->po_data_sz, 1932 lsquic_packet_out_avail(packet_out), is_app, error_code, reason, 1933 rlen); 1934 if (sz >= 0) 1935 { 1936 packet_out->po_frame_types |= 1 << QUIC_FRAME_CONNECTION_CLOSE; 1937 packet_out->po_data_sz += sz; 1938 LSQ_DEBUG("generated CONNECTION_CLOSE frame"); 1939 } 1940 else 1941 LSQ_WARN("could not generate CONNECTION_CLOSE frame"); 1942 ++pns; 1943 } 1944 while (pns <= pns_max); 1945} 1946 1947 1948static int 1949imico_generate_handshake_done (struct ietf_mini_conn *conn) 1950{ 1951 struct lsquic_packet_out *packet_out; 1952 unsigned need; 1953 int sz; 1954 1955 need = conn->imc_conn.cn_pf->pf_handshake_done_frame_size(); 1956 packet_out = imico_get_packet_out(conn, HETY_NOT_SET, need); 1957 if (!packet_out) 1958 return -1; 1959 sz = conn->imc_conn.cn_pf->pf_gen_handshake_done_frame( 1960 packet_out->po_data + packet_out->po_data_sz, 1961 lsquic_packet_out_avail(packet_out)); 1962 if (sz < 0) 1963 { 1964 LSQ_WARN("could not generate HANDSHAKE_DONE frame"); 1965 return -1; 1966 } 1967 1968 packet_out->po_frame_types |= 1 << QUIC_FRAME_HANDSHAKE_DONE; 1969 packet_out->po_data_sz += sz; 1970 LSQ_DEBUG("generated HANDSHAKE_DONE frame"); 1971 conn->imc_flags |= IMC_HSK_DONE_SENT; 1972 1973 return 0; 1974} 1975 1976 1977static enum tick_st 1978ietf_mini_conn_ci_tick (struct lsquic_conn *lconn, lsquic_time_t now) 1979{ 1980 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 1981 enum tick_st tick; 1982 1983 if (conn->imc_created + conn->imc_enpub->enp_settings.es_handshake_to < now) 1984 { 1985 LSQ_DEBUG("connection expired: closing"); 1986 return TICK_CLOSE; 1987 } 1988 1989 1990 if (conn->imc_flags & (IMC_QUEUED_ACK_INIT|IMC_QUEUED_ACK_HSK)) 1991 { 1992 if (0 != imico_generate_acks(conn, now)) 1993 { 1994 conn->imc_flags |= IMC_ERROR; 1995 return TICK_CLOSE; 1996 } 1997 } 1998 1999 2000 tick = 0; 2001 2002 if (conn->imc_flags & IMC_ERROR) 2003 { 2004 close_on_error: 2005 if (!(conn->imc_flags & IMC_CLOSE_RECVD)) 2006 imico_generate_conn_close(conn); 2007 tick |= TICK_CLOSE; 2008 } 2009 else if (conn->imc_flags & IMC_HSK_OK) 2010 { 2011 if (lconn->cn_esf.i->esfi_in_init(lconn->cn_enc_session)) 2012 LSQ_DEBUG("still in init, defer HANDSHAKE_DONE"); 2013 else if (0 != imico_generate_handshake_done(conn)) 2014 goto close_on_error; 2015 tick |= TICK_PROMOTE; 2016 } 2017 2018 if (imico_have_packets_to_send(conn, now)) 2019 tick |= TICK_SEND; 2020 else 2021 tick |= TICK_QUIET; 2022 2023 LSQ_DEBUG("Return TICK %d", tick); 2024 return tick; 2025} 2026 2027 2028static void 2029ietf_mini_conn_ci_internal_error (struct lsquic_conn *lconn, 2030 const char *format, ...) 2031{ 2032 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 2033 LSQ_INFO("internal error reported"); 2034 conn->imc_flags |= IMC_ERROR; 2035} 2036 2037 2038static void 2039ietf_mini_conn_ci_abort_error (struct lsquic_conn *lconn, int is_app, 2040 unsigned error_code, const char *fmt, ...) 2041{ 2042 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 2043 va_list ap; 2044 const char *err_str, *percent; 2045 char err_buf[0x100]; 2046 2047 percent = strchr(fmt, '%'); 2048 if (percent) 2049 { 2050 va_start(ap, fmt); 2051 vsnprintf(err_buf, sizeof(err_buf), fmt, ap); 2052 va_end(ap); 2053 err_str = err_buf; 2054 } 2055 else 2056 err_str = fmt; 2057 LSQ_INFO("abort error: is_app: %d; error code: %u; error str: %s", 2058 is_app, error_code, err_str); 2059 conn->imc_flags |= IMC_ERROR|IMC_ABORT_ERROR; 2060 if (is_app) 2061 conn->imc_flags |= IMC_ABORT_ISAPP; 2062 conn->imc_error_code = error_code; 2063} 2064 2065 2066static struct network_path * 2067ietf_mini_conn_ci_get_path (struct lsquic_conn *lconn, 2068 const struct sockaddr *sa) 2069{ 2070 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 2071 2072 return &conn->imc_path; 2073} 2074 2075 2076static const lsquic_cid_t * 2077ietf_mini_conn_ci_get_log_cid (const struct lsquic_conn *lconn) 2078{ 2079 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 2080 2081 if (conn->imc_path.np_dcid.len) 2082 return &conn->imc_path.np_dcid; 2083 else 2084 return CN_SCID(lconn); 2085} 2086 2087 2088static unsigned char 2089ietf_mini_conn_ci_record_addrs (struct lsquic_conn *lconn, void *peer_ctx, 2090 const struct sockaddr *local_sa, const struct sockaddr *peer_sa) 2091{ 2092 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 2093 const struct sockaddr *orig_peer_sa; 2094 struct lsquic_packet_out *packet_out; 2095 size_t len; 2096 char path_str[4][INET6_ADDRSTRLEN + sizeof(":65535")]; 2097 2098 if (NP_IS_IPv6(&conn->imc_path) != (AF_INET6 == peer_sa->sa_family)) 2099 TAILQ_FOREACH(packet_out, &conn->imc_packets_out, po_next) 2100 if ((packet_out->po_flags & (PO_SENT|PO_ENCRYPTED)) == PO_ENCRYPTED) 2101 imico_return_enc_data(conn, packet_out); 2102 2103 orig_peer_sa = NP_PEER_SA(&conn->imc_path); 2104 if (orig_peer_sa->sa_family == 0) 2105 LSQ_DEBUG("connection to %s from %s", SA2STR(local_sa, path_str[0]), 2106 SA2STR(peer_sa, path_str[1])); 2107 else if (!(lsquic_sockaddr_eq(NP_PEER_SA(&conn->imc_path), peer_sa) 2108 && lsquic_sockaddr_eq(NP_LOCAL_SA(&conn->imc_path), local_sa))) 2109 { 2110 LSQ_DEBUG("path changed from (%s - %s) to (%s - %s)", 2111 SA2STR(NP_LOCAL_SA(&conn->imc_path), path_str[0]), 2112 SA2STR(NP_PEER_SA(&conn->imc_path), path_str[1]), 2113 SA2STR(local_sa, path_str[2]), 2114 SA2STR(peer_sa, path_str[3])); 2115 conn->imc_flags |= IMC_PATH_CHANGED; 2116 } 2117 2118 len = local_sa->sa_family == AF_INET ? sizeof(struct sockaddr_in) 2119 : sizeof(struct sockaddr_in6); 2120 2121 memcpy(conn->imc_path.np_peer_addr, peer_sa, len); 2122 memcpy(conn->imc_path.np_local_addr, local_sa, len); 2123 conn->imc_path.np_peer_ctx = peer_ctx; 2124 return 0; 2125} 2126 2127 2128void 2129ietf_mini_conn_ci_count_garbage (struct lsquic_conn *lconn, size_t garbage_sz) 2130{ 2131 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 2132 2133 conn->imc_bytes_in += garbage_sz; 2134 LSQ_DEBUG("count %zd bytes of garbage, new value: %u bytes", garbage_sz, 2135 conn->imc_bytes_in); 2136} 2137 2138 2139static const struct conn_iface mini_conn_ietf_iface = { 2140 .ci_abort_error = ietf_mini_conn_ci_abort_error, 2141 .ci_client_call_on_new = ietf_mini_conn_ci_client_call_on_new, 2142 .ci_count_garbage = ietf_mini_conn_ci_count_garbage, 2143 .ci_destroy = ietf_mini_conn_ci_destroy, 2144 .ci_get_engine = ietf_mini_conn_ci_get_engine, 2145 .ci_get_log_cid = ietf_mini_conn_ci_get_log_cid, 2146 .ci_get_path = ietf_mini_conn_ci_get_path, 2147 .ci_hsk_done = ietf_mini_conn_ci_hsk_done, 2148 .ci_internal_error = ietf_mini_conn_ci_internal_error, 2149 .ci_is_tickable = ietf_mini_conn_ci_is_tickable, 2150 .ci_next_packet_to_send = ietf_mini_conn_ci_next_packet_to_send, 2151 .ci_next_tick_time = ietf_mini_conn_ci_next_tick_time, 2152 .ci_packet_in = ietf_mini_conn_ci_packet_in, 2153 .ci_packet_not_sent = ietf_mini_conn_ci_packet_not_sent, 2154 .ci_packet_sent = ietf_mini_conn_ci_packet_sent, 2155 .ci_record_addrs = ietf_mini_conn_ci_record_addrs, 2156 .ci_tick = ietf_mini_conn_ci_tick, 2157 .ci_tls_alert = ietf_mini_conn_ci_tls_alert, 2158}; 2159