lsquic_mini_conn_ietf.c revision f198a02d
1/* Copyright (c) 2017 - 2020 LiteSpeed Technologies Inc. See LICENSE. */ 2/* 3 * lsquic_mini_conn_ietf.c -- Mini connection used by the IETF QUIC 4 */ 5 6#include <assert.h> 7#include <errno.h> 8#include <inttypes.h> 9#include <limits.h> 10#include <stddef.h> 11#include <stdint.h> 12#include <string.h> 13#include <sys/queue.h> 14#include <stdlib.h> 15 16#include "lsquic.h" 17#include "lsquic_int_types.h" 18#include "lsquic_sizes.h" 19#include "lsquic_hash.h" 20#include "lsquic_conn.h" 21#include "lsquic_mm.h" 22#include "lsquic_malo.h" 23#include "lsquic_engine_public.h" 24#include "lsquic_packet_common.h" 25#include "lsquic_packet_in.h" 26#include "lsquic_packet_out.h" 27#include "lsquic_parse.h" 28#include "lsquic_rtt.h" 29#include "lsquic_util.h" 30#include "lsquic_enc_sess.h" 31#include "lsquic_trechist.h" 32#include "lsquic_mini_conn_ietf.h" 33#include "lsquic_ev_log.h" 34#include "lsquic_trans_params.h" 35#include "lsquic_ietf.h" 36#include "lsquic_packet_ietf.h" 37#include "lsquic_attq.h" 38#include "lsquic_alarmset.h" 39#include "lsquic_crand.h" 40 41#define LSQUIC_LOGGER_MODULE LSQLM_MINI_CONN 42#define LSQUIC_LOG_CONN_ID lsquic_conn_log_cid(&conn->imc_conn) 43#include "lsquic_logger.h" 44 45#define MIN(a, b) ((a) < (b) ? (a) : (b)) 46#define MAX(a, b) ((a) > (b) ? (a) : (b)) 47 48static const struct conn_iface mini_conn_ietf_iface; 49 50static unsigned highest_bit_set (unsigned long long); 51 52static int 53imico_can_send (const struct ietf_mini_conn *, size_t); 54 55 56static const enum header_type el2hety[] = 57{ 58 [ENC_LEV_INIT] = HETY_HANDSHAKE, 59 [ENC_LEV_CLEAR] = HETY_INITIAL, 60 [ENC_LEV_FORW] = HETY_NOT_SET, 61 [ENC_LEV_EARLY] = 0, /* Invalid */ 62}; 63 64 65static void 66imico_destroy_packet (struct ietf_mini_conn *conn, 67 struct lsquic_packet_out *packet_out) 68{ 69 lsquic_packet_out_destroy(packet_out, conn->imc_enpub, 70 conn->imc_path.np_peer_ctx); 71} 72 73 74int 75lsquic_mini_conn_ietf_ecn_ok (const struct ietf_mini_conn *conn) 76{ 77 packno_set_t acked; 78 79 /* First flight has only Initial and Handshake packets */ 80 acked = conn->imc_acked_packnos[PNS_INIT] 81 | conn->imc_acked_packnos[PNS_HSK] 82 ; 83 return 0 != (conn->imc_ecn_packnos & acked); 84} 85 86 87#define imico_ecn_ok lsquic_mini_conn_ietf_ecn_ok 88 89 90static enum ecn 91imico_get_ecn (struct ietf_mini_conn *conn) 92{ 93 if (!conn->imc_enpub->enp_settings.es_ecn) 94 return ECN_NOT_ECT; 95 else if (!conn->imc_sent_packnos /* We set ECT0 in first flight */ 96 || imico_ecn_ok(conn)) 97 return ECN_ECT0; 98 else 99 return ECN_NOT_ECT; 100} 101 102 103static struct lsquic_packet_out * 104imico_get_packet_out (struct ietf_mini_conn *conn, 105 enum header_type header_type, size_t need) 106{ 107 struct lsquic_packet_out *packet_out; 108 enum ecn ecn; 109 110 if (need) 111 TAILQ_FOREACH(packet_out, &conn->imc_packets_out, po_next) 112 if (!(packet_out->po_flags & PO_SENT) 113 && packet_out->po_header_type == header_type 114 && lsquic_packet_out_avail(packet_out) >= need) 115 return packet_out; 116 117 if (conn->imc_next_packno >= MAX_PACKETS) 118 { 119 LSQ_DEBUG("ran out of outgoing packet numbers, won't allocate packet"); 120 return NULL; 121 } 122 123 packet_out = lsquic_packet_out_new(&conn->imc_enpub->enp_mm, NULL, 1, 124 &conn->imc_conn, IQUIC_PACKNO_LEN_1, NULL, NULL, &conn->imc_path); 125 if (!packet_out) 126 { 127 LSQ_WARN("could not allocate packet: %s", strerror(errno)); 128 return NULL; 129 } 130 131 packet_out->po_header_type = header_type; 132 packet_out->po_packno = conn->imc_next_packno++; 133 packet_out->po_flags |= PO_MINI; 134 lsquic_packet_out_set_pns(packet_out, lsquic_hety2pns[header_type]); 135 ecn = imico_get_ecn(conn); 136 packet_out->po_lflags |= ecn << POECN_SHIFT; 137 TAILQ_INSERT_TAIL(&conn->imc_packets_out, packet_out, po_next); 138 packet_out->po_loss_chain = packet_out; 139 return packet_out; 140} 141 142 143static struct ietf_mini_conn * 144cryst_get_conn (const struct mini_crypto_stream *cryst) 145{ 146 return (void *) 147 ((unsigned char *) (cryst - cryst->mcs_enc_level) 148 - offsetof(struct ietf_mini_conn, imc_streams)); 149} 150 151 152struct msg_ctx 153{ 154 const unsigned char *buf; 155 const unsigned char *const end; 156}; 157 158 159static size_t 160read_from_msg_ctx (void *ctx, void *buf, size_t len, int *fin) 161{ 162 struct msg_ctx *msg_ctx = ctx; 163 if (len > (uintptr_t) (msg_ctx->end - msg_ctx->buf)) 164 len = msg_ctx->end - msg_ctx->buf; 165 memcpy(buf, msg_ctx->buf, len); 166 msg_ctx->buf += len; 167 return len; 168} 169 170 171static int 172imico_chlo_has_been_consumed (const struct ietf_mini_conn *conn) 173{ 174 return conn->imc_streams[ENC_LEV_CLEAR].mcs_read_off > 3 175 && conn->imc_streams[ENC_LEV_CLEAR].mcs_read_off >= conn->imc_ch_len; 176} 177 178 179static int 180imico_maybe_process_params (struct ietf_mini_conn *conn) 181{ 182 const struct transport_params *params; 183 184 if (imico_chlo_has_been_consumed(conn) 185 && (conn->imc_flags & (IMC_ENC_SESS_INITED|IMC_HAVE_TP)) 186 == IMC_ENC_SESS_INITED) 187 { 188 params = conn->imc_conn.cn_esf.i->esfi_get_peer_transport_params( 189 conn->imc_conn.cn_enc_session); 190 if (params) 191 { 192 conn->imc_flags |= IMC_HAVE_TP; 193 conn->imc_ack_exp = params->tp_ack_delay_exponent; 194 if (params->tp_set & (1 << TPI_MAX_UDP_PAYLOAD_SIZE)) 195 { 196 if (params->tp_numerics[TPI_MAX_UDP_PAYLOAD_SIZE] 197 < conn->imc_path.np_pack_size) 198 conn->imc_path.np_pack_size = 199 params->tp_numerics[TPI_MAX_UDP_PAYLOAD_SIZE]; 200 } 201 LSQ_DEBUG("read transport params, packet size is set to %hu bytes", 202 conn->imc_path.np_pack_size); 203 } 204 else 205 { 206 conn->imc_flags |= IMC_BAD_TRANS_PARAMS; 207 return -1; 208 } 209 } 210 211 return 0; 212} 213 214 215static ssize_t 216imico_stream_write (void *stream, const void *bufp, size_t bufsz) 217{ 218 struct mini_crypto_stream *const cryst = stream; 219 struct ietf_mini_conn *const conn = cryst_get_conn(cryst); 220 struct lsquic_conn *const lconn = &conn->imc_conn; 221 const struct parse_funcs *const pf = lconn->cn_pf; 222 struct msg_ctx msg_ctx = { bufp, (unsigned char *) bufp + bufsz, }; 223 struct lsquic_packet_out *packet_out; 224 size_t header_sz, need; 225 const unsigned char *p; 226 int len; 227 228 if (0 != imico_maybe_process_params(conn)) 229 return -1; 230 231 if (PNS_INIT == lsquic_enclev2pns[ cryst->mcs_enc_level ] 232 && (conn->imc_flags & IMC_IGNORE_INIT)) 233 { 234 LSQ_WARN("trying to write at the ignored Initial level"); 235 return bufsz; 236 } 237 238 while (msg_ctx.buf < msg_ctx.end) 239 { 240 header_sz = lconn->cn_pf->pf_calc_crypto_frame_header_sz( 241 cryst->mcs_write_off, msg_ctx.end - msg_ctx.buf); 242 need = header_sz + 1; 243 packet_out = imico_get_packet_out(conn, 244 el2hety[ cryst->mcs_enc_level ], need); 245 if (!packet_out) 246 return -1; 247 248 p = msg_ctx.buf; 249 len = pf->pf_gen_crypto_frame(packet_out->po_data + packet_out->po_data_sz, 250 lsquic_packet_out_avail(packet_out), 0, cryst->mcs_write_off, 0, 251 msg_ctx.end - msg_ctx.buf, read_from_msg_ctx, &msg_ctx); 252 if (len < 0) 253 return len; 254 EV_LOG_GENERATED_CRYPTO_FRAME(LSQUIC_LOG_CONN_ID, pf, 255 packet_out->po_data + packet_out->po_data_sz, len); 256 packet_out->po_data_sz += len; 257 packet_out->po_frame_types |= 1 << QUIC_FRAME_CRYPTO; 258 packet_out->po_flags |= PO_HELLO; 259 cryst->mcs_write_off += msg_ctx.buf - p; 260 } 261 262 assert(msg_ctx.buf == msg_ctx.end); 263 return bufsz; 264} 265 266 267static int 268imico_stream_flush (void *stream) 269{ 270 return 0; 271} 272 273 274static struct stream_frame * 275imico_find_stream_frame (const struct ietf_mini_conn *conn, 276 enum enc_level enc_level, unsigned read_off) 277{ 278 struct stream_frame *frame; 279 280 if (conn->imc_last_in.frame && enc_level == conn->imc_last_in.enc_level 281 && read_off == DF_ROFF(conn->imc_last_in.frame)) 282 return conn->imc_last_in.frame; 283 284 TAILQ_FOREACH(frame, &conn->imc_crypto_frames, next_frame) 285 if (enc_level == frame->stream_id && read_off == DF_ROFF(frame)) 286 return frame; 287 288 return NULL; 289} 290 291 292static void 293imico_read_chlo_size (struct ietf_mini_conn *conn, const unsigned char *buf, 294 size_t sz) 295{ 296 const unsigned char *const end = buf + sz; 297 298 assert(conn->imc_streams[ENC_LEV_CLEAR].mcs_read_off < 4); 299 switch (conn->imc_streams[ENC_LEV_CLEAR].mcs_read_off) 300 { 301 case 0: 302 if (buf == end) 303 return; 304 if (*buf != 1) 305 { 306 LSQ_DEBUG("Does not begin with ClientHello"); 307 conn->imc_flags |= IMC_ERROR; 308 return; 309 } 310 ++buf; 311 /* fall-through */ 312 case 1: 313 if (buf == end) 314 return; 315 if (*buf != 0) 316 { 317 LSQ_DEBUG("ClientHello larger than 16K"); 318 conn->imc_flags |= IMC_ERROR; 319 return; 320 } 321 ++buf; 322 /* fall-through */ 323 case 2: 324 if (buf == end) 325 return; 326 conn->imc_ch_len = *buf << 8; 327 ++buf; 328 /* fall-through */ 329 default: 330 if (buf == end) 331 return; 332 conn->imc_ch_len |= *buf; 333 } 334} 335 336 337static ssize_t 338imico_stream_readf (void *stream, 339 size_t (*readf)(void *, const unsigned char *, size_t, int), void *ctx) 340{ 341 struct mini_crypto_stream *const cryst = stream; 342 struct ietf_mini_conn *const conn = cryst_get_conn(cryst); 343 struct stream_frame *frame; 344 const unsigned char *buf; 345 size_t nread, total_read; 346 unsigned avail; 347 348 total_read = 0; 349 while ((frame = imico_find_stream_frame(conn, cryst->mcs_enc_level, 350 cryst->mcs_read_off))) 351 { 352 avail = DF_SIZE(frame) - frame->data_frame.df_read_off; 353 buf = frame->data_frame.df_data + frame->data_frame.df_read_off; 354 nread = readf(ctx, buf, avail, DF_FIN(frame)); 355 if (cryst->mcs_enc_level == ENC_LEV_CLEAR && cryst->mcs_read_off < 4) 356 imico_read_chlo_size(conn, buf, nread); 357 total_read += nread; 358 cryst->mcs_read_off += nread; 359 frame->data_frame.df_read_off += nread; 360 LSQ_DEBUG("read %zu bytes at offset %"PRIu64" on enc level %u", nread, 361 DF_ROFF(frame), cryst->mcs_enc_level); 362 if (DF_END(frame) == DF_ROFF(frame)) 363 { 364 if (frame == conn->imc_last_in.frame) 365 conn->imc_last_in.frame = NULL; 366 else 367 { 368 TAILQ_REMOVE(&conn->imc_crypto_frames, frame, next_frame); 369 --conn->imc_n_crypto_frames; 370 conn->imc_crypto_frames_sz -= DF_SIZE(frame); 371 lsquic_packet_in_put(&conn->imc_enpub->enp_mm, 372 frame->packet_in); 373 lsquic_malo_put(frame); 374 } 375 } 376 if (nread < avail) 377 break; 378 } 379 380 if (total_read > 0) 381 return total_read; 382 else 383 { 384 /* CRYPTO streams never end, so zero bytes read always means 385 * EWOULDBLOCK 386 */ 387 errno = EWOULDBLOCK; 388 return -1; 389 } 390} 391 392 393static int 394imico_stream_wantX (struct mini_crypto_stream *cryst, int bit, int is_want) 395{ 396 int old; 397 398 old = (cryst->mcs_flags & (1 << bit)) > 0; 399 cryst->mcs_flags &= ~(1 << bit); 400 cryst->mcs_flags |= !!is_want << bit; 401 return old; 402} 403 404 405static int 406imico_stream_wantwrite (void *stream, int is_want) 407{ 408 return imico_stream_wantX(stream, MCSBIT_WANTWRITE, is_want); 409} 410 411 412static int 413imico_stream_wantread (void *stream, int is_want) 414{ 415 return imico_stream_wantX(stream, MCSBIT_WANTREAD, is_want); 416} 417 418 419static enum enc_level 420imico_stream_enc_level (void *stream) 421{ 422 struct mini_crypto_stream *const cryst = stream; 423 return cryst->mcs_enc_level; 424} 425 426 427static const struct crypto_stream_if crypto_stream_if = 428{ 429 .csi_write = imico_stream_write, 430 .csi_flush = imico_stream_flush, 431 .csi_readf = imico_stream_readf, 432 .csi_wantwrite = imico_stream_wantwrite, 433 .csi_wantread = imico_stream_wantread, 434 .csi_enc_level = imico_stream_enc_level, 435}; 436 437 438static int 439is_first_packet_ok (const struct lsquic_packet_in *packet_in, 440 size_t udp_payload_size) 441{ 442 if (udp_payload_size < IQUIC_MIN_INIT_PACKET_SZ) 443 { 444 /* [draft-ietf-quic-transport-24] Section 14 */ 445 LSQ_LOG1(LSQ_LOG_DEBUG, "incoming UDP payload too small: %zu bytes", 446 udp_payload_size); 447 return 0; 448 } 449 /* TODO: Move decryption of the first packet into this function? */ 450 return 1; /* TODO */ 451} 452 453 454static void 455imico_peer_addr_validated (struct ietf_mini_conn *conn, const char *how) 456{ 457 if (!(conn->imc_flags & IMC_ADDR_VALIDATED)) 458 { 459 conn->imc_flags |= IMC_ADDR_VALIDATED; 460 LSQ_DEBUG("peer address validated (%s)", how); 461 } 462} 463 464 465struct lsquic_conn * 466lsquic_mini_conn_ietf_new (struct lsquic_engine_public *enpub, 467 const struct lsquic_packet_in *packet_in, 468 enum lsquic_version version, int is_ipv4, const lsquic_cid_t *odcid, 469 size_t udp_payload_size) 470{ 471 struct ietf_mini_conn *conn; 472 enc_session_t *enc_sess; 473 enum enc_level i; 474 const struct enc_session_funcs_iquic *esfi; 475 unsigned char rand_nybble; 476 477 if (!is_first_packet_ok(packet_in, udp_payload_size)) 478 return NULL; 479 480 conn = lsquic_malo_get(enpub->enp_mm.malo.mini_conn_ietf); 481 if (!conn) 482 { 483 LSQ_LOG1(LSQ_LOG_WARN, "cannot allocate mini connection: %s", 484 strerror(errno)); 485 return NULL; 486 } 487 memset(conn, 0, sizeof(*conn)); 488 conn->imc_conn.cn_if = &mini_conn_ietf_iface; 489 conn->imc_conn.cn_cces = conn->imc_cces; 490 conn->imc_conn.cn_n_cces = sizeof(conn->imc_cces) 491 / sizeof(conn->imc_cces[0]); 492 conn->imc_cces[0].cce_cid = packet_in->pi_dcid; 493 conn->imc_cces[0].cce_flags = CCE_USED; 494 conn->imc_conn.cn_cces_mask = 1; 495 lsquic_scid_from_packet_in(packet_in, &conn->imc_path.np_dcid); 496 LSQ_DEBUGC("recv SCID from client %"CID_FMT, CID_BITS(&conn->imc_cces[0].cce_cid)); 497 LSQ_DEBUGC("recv DCID from client %"CID_FMT, CID_BITS(&conn->imc_path.np_dcid)); 498 499 /* Generate new SCID. Since is not the original SCID, it is given 500 * a sequence number (0) and therefore can be retired by the client. 501 */ 502 enpub->enp_generate_scid(&conn->imc_conn, 503 &conn->imc_conn.cn_cces[1].cce_cid, enpub->enp_settings.es_scid_len); 504 505 LSQ_DEBUGC("generated SCID %"CID_FMT" at index %u, switching to it", 506 CID_BITS(&conn->imc_conn.cn_cces[1].cce_cid), 1); 507 conn->imc_conn.cn_cces[1].cce_flags = CCE_SEQNO | CCE_USED; 508 conn->imc_conn.cn_cces_mask |= 1u << 1; 509 conn->imc_conn.cn_cur_cce_idx = 1; 510 511 conn->imc_conn.cn_flags = LSCONN_MINI|LSCONN_IETF|LSCONN_SERVER; 512 conn->imc_conn.cn_version = version; 513 514 for (i = 0; i < N_ENC_LEVS; ++i) 515 { 516 conn->imc_streams[i].mcs_enc_level = i; 517 conn->imc_stream_ps[i] = &conn->imc_streams[i]; 518 } 519 520 rand_nybble = lsquic_crand_get_nybble(enpub->enp_crand); 521 if (rand_nybble == 0) 522 { 523 /* Use trechist for about one out of every sixteen connections so 524 * that the code does not grow stale. 525 */ 526 LSQ_DEBUG("using trechist"); 527 conn->imc_flags |= IMC_TRECHIST; 528 conn->imc_recvd_packnos.trechist.hist_elems 529 = malloc(TRECHIST_SIZE * IMICO_N_PNS); 530 if (!conn->imc_recvd_packnos.trechist.hist_elems) 531 { 532 LSQ_WARN("cannot allocate trechist elems"); 533 return NULL; 534 } 535 } 536 537 esfi = select_esf_iquic_by_ver(version); 538 enc_sess = esfi->esfi_create_server(enpub, &conn->imc_conn, 539 &packet_in->pi_dcid, conn->imc_stream_ps, &crypto_stream_if, 540 &conn->imc_cces[0].cce_cid, &conn->imc_path.np_dcid); 541 if (!enc_sess) 542 { 543 lsquic_malo_put(conn); 544 return NULL; 545 } 546 547 conn->imc_enpub = enpub; 548 conn->imc_created = packet_in->pi_received; 549 if (enpub->enp_settings.es_base_plpmtu) 550 conn->imc_path.np_pack_size = enpub->enp_settings.es_base_plpmtu; 551 else if (is_ipv4) 552 conn->imc_path.np_pack_size = IQUIC_MAX_IPv4_PACKET_SZ; 553 else 554 conn->imc_path.np_pack_size = IQUIC_MAX_IPv6_PACKET_SZ; 555 conn->imc_conn.cn_pf = select_pf_by_ver(version); 556 conn->imc_conn.cn_esf.i = esfi; 557 conn->imc_conn.cn_enc_session = enc_sess; 558 conn->imc_conn.cn_esf_c = select_esf_common_by_ver(version); 559 TAILQ_INIT(&conn->imc_packets_out); 560 TAILQ_INIT(&conn->imc_app_packets); 561 TAILQ_INIT(&conn->imc_crypto_frames); 562 if (odcid) 563 imico_peer_addr_validated(conn, "odcid"); 564#if LSQUIC_DEVEL 565 { 566 const char *const s = getenv("LSQUIC_LOSE_0RTT"); 567 if (s && atoi(s)) 568 { 569 LSQ_DEBUG("will lose 0-RTT packets (via env variable)"); 570 conn->imc_delayed_packets_count = UCHAR_MAX; 571 } 572 } 573#endif 574 575 LSQ_DEBUG("created mini connection object %p; max packet size=%hu", 576 conn, conn->imc_path.np_pack_size); 577 return &conn->imc_conn; 578} 579 580 581static void 582ietf_mini_conn_ci_client_call_on_new (struct lsquic_conn *lconn) 583{ 584 assert(0); 585} 586 587 588static void 589ietf_mini_conn_ci_destroy (struct lsquic_conn *lconn) 590{ 591 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 592 struct lsquic_packet_out *packet_out; 593 struct lsquic_packet_in *packet_in; 594 struct stream_frame *frame; 595 596 while ((packet_out = TAILQ_FIRST(&conn->imc_packets_out))) 597 { 598 TAILQ_REMOVE(&conn->imc_packets_out, packet_out, po_next); 599 imico_destroy_packet(conn, packet_out); 600 } 601 while ((packet_in = TAILQ_FIRST(&conn->imc_app_packets))) 602 { 603 TAILQ_REMOVE(&conn->imc_app_packets, packet_in, pi_next); 604 lsquic_packet_in_put(&conn->imc_enpub->enp_mm, packet_in); 605 } 606 while ((frame = TAILQ_FIRST(&conn->imc_crypto_frames))) 607 { 608 TAILQ_REMOVE(&conn->imc_crypto_frames, frame, next_frame); 609 lsquic_packet_in_put(&conn->imc_enpub->enp_mm, frame->packet_in); 610 lsquic_malo_put(frame); 611 } 612 if (lconn->cn_enc_session) 613 lconn->cn_esf.i->esfi_destroy(lconn->cn_enc_session); 614 LSQ_DEBUG("ietf_mini_conn_ci_destroyed"); 615 if (conn->imc_flags & IMC_TRECHIST) 616 free(conn->imc_recvd_packnos.trechist.hist_elems); 617 lsquic_malo_put(conn); 618} 619 620 621static struct lsquic_engine * 622ietf_mini_conn_ci_get_engine (struct lsquic_conn *lconn) 623{ 624 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 625 return conn->imc_enpub->enp_engine; 626} 627 628 629static void 630ietf_mini_conn_ci_hsk_done (struct lsquic_conn *lconn, 631 enum lsquic_hsk_status status) 632{ 633 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 634 635 switch (status) 636 { 637 case LSQ_HSK_OK: 638 case LSQ_HSK_RESUMED_OK: 639 conn->imc_flags |= IMC_HSK_OK; 640 conn->imc_conn.cn_flags |= LSCONN_HANDSHAKE_DONE; 641 LSQ_DEBUG("handshake OK"); 642 break; 643 default: 644 assert(0); 645 /* fall-through */ 646 case LSQ_HSK_FAIL: 647 conn->imc_flags |= IMC_HSK_FAILED|IMC_ERROR; 648 LSQ_INFO("handshake failed"); 649 break; 650 } 651} 652 653 654static void 655ietf_mini_conn_ci_tls_alert (struct lsquic_conn *lconn, uint8_t alert) 656{ 657 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 658 LSQ_DEBUG("got TLS alert %"PRIu8, alert); 659 conn->imc_flags |= IMC_ERROR|IMC_TLS_ALERT; 660 conn->imc_tls_alert = alert; 661} 662 663 664/* A mini connection is only tickable if it has unsent packets. This can 665 * occur when packet sending is delayed. 666 * 667 * Otherwise, a mini connection is not tickable: Either there are incoming 668 * packets, in which case, the connection is going to be ticked, or there is 669 * an alarm pending, in which case it will be handled via the attq. 670 */ 671static int 672ietf_mini_conn_ci_is_tickable (struct lsquic_conn *lconn) 673{ 674 struct ietf_mini_conn *const conn = (struct ietf_mini_conn *) lconn; 675 const struct lsquic_packet_out *packet_out; 676 size_t packet_size; 677 678 if (conn->imc_enpub->enp_flags & ENPUB_CAN_SEND) 679 TAILQ_FOREACH(packet_out, &conn->imc_packets_out, po_next) 680 if (!(packet_out->po_flags & PO_SENT)) 681 { 682 packet_size = lsquic_packet_out_total_sz(lconn, packet_out); 683 return imico_can_send(conn, packet_size); 684 } 685 686 return 0; 687} 688 689 690static int 691imico_can_send (const struct ietf_mini_conn *conn, size_t size) 692{ 693 return (conn->imc_flags & IMC_ADDR_VALIDATED) 694 || conn->imc_bytes_in * 3 >= conn->imc_bytes_out + size 695 ; 696} 697 698 699static struct lsquic_packet_out * 700ietf_mini_conn_ci_next_packet_to_send (struct lsquic_conn *lconn, 701 const struct to_coal *to_coal) 702{ 703 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 704 struct lsquic_packet_out *packet_out; 705 size_t packet_size; 706 707 TAILQ_FOREACH(packet_out, &conn->imc_packets_out, po_next) 708 { 709 if (packet_out->po_flags & PO_SENT) 710 continue; 711 packet_size = lsquic_packet_out_total_sz(lconn, packet_out); 712 if (!(to_coal 713 && (packet_size + to_coal->prev_sz_sum 714 > conn->imc_path.np_pack_size 715 || !lsquic_packet_out_equal_dcids(to_coal->prev_packet, packet_out)) 716 )) 717 { 718 if (!imico_can_send(conn, packet_size)) 719 { 720 LSQ_DEBUG("cannot send packet %"PRIu64" of size %zu: client " 721 "address has not been validated", packet_out->po_packno, 722 packet_size); 723 return NULL; 724 } 725 packet_out->po_flags |= PO_SENT; 726 conn->imc_bytes_out += packet_size; 727 if (!to_coal) 728 LSQ_DEBUG("packet_to_send: %"PRIu64, packet_out->po_packno); 729 else 730 LSQ_DEBUG("packet_to_send: %"PRIu64" (coalesced)", 731 packet_out->po_packno); 732 return packet_out; 733 } 734 else 735 return NULL; 736 } 737 738 return NULL; 739} 740 741 742static int 743imico_calc_retx_timeout (const struct ietf_mini_conn *conn) 744{ 745 lsquic_time_t to; 746 to = lsquic_rtt_stats_get_srtt(&conn->imc_rtt_stats); 747 if (to) 748 { 749 to += to / 2; 750 if (to < 10000) 751 to = 10000; 752 } 753 else 754 to = 300000; 755 return to << conn->imc_hsk_count; 756} 757 758 759static lsquic_time_t 760ietf_mini_conn_ci_next_tick_time (struct lsquic_conn *lconn, unsigned *why) 761{ 762 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 763 const struct lsquic_packet_out *packet_out; 764 lsquic_time_t exp_time, retx_time; 765 766 exp_time = conn->imc_created + 767 conn->imc_enpub->enp_settings.es_handshake_to; 768 769 TAILQ_FOREACH(packet_out, &conn->imc_packets_out, po_next) 770 if (packet_out->po_flags & PO_SENT) 771 { 772 retx_time = packet_out->po_sent + imico_calc_retx_timeout(conn); 773 if (retx_time < exp_time) 774 { 775 *why = N_AEWS + AL_RETX_HSK; 776 return retx_time; 777 } 778 else 779 { 780 *why = AEW_MINI_EXPIRE; 781 return exp_time; 782 } 783 } 784 785 *why = AEW_MINI_EXPIRE; 786 return exp_time; 787} 788 789 790#define IMICO_PROC_FRAME_ARGS \ 791 struct ietf_mini_conn *conn, struct lsquic_packet_in *packet_in, \ 792 const unsigned char *p, size_t len 793 794 795static void 796imico_dispatch_stream_events (struct ietf_mini_conn *conn) 797{ 798 enum enc_level i; 799 800 for (i = 0; i < N_ENC_LEVS; ++i) 801 if ((conn->imc_streams[i].mcs_flags & (MCS_CREATED|MCS_WANTREAD)) 802 == (MCS_CREATED|MCS_WANTREAD)) 803 { 804 LSQ_DEBUG("dispatch read events on level #%u", i); 805 lsquic_mini_cry_sm_if.on_read((void *) &conn->imc_streams[i], 806 conn->imc_conn.cn_enc_session); 807 } 808 809 for (i = 0; i < N_ENC_LEVS; ++i) 810 if ((conn->imc_streams[i].mcs_flags & (MCS_CREATED|MCS_WANTWRITE)) 811 == (MCS_CREATED|MCS_WANTWRITE)) 812 { 813 LSQ_DEBUG("dispatch write events on level #%u", i); 814 lsquic_mini_cry_sm_if.on_write((void *) &conn->imc_streams[i], 815 conn->imc_conn.cn_enc_session); 816 } 817} 818 819 820static int 821imico_stash_stream_frame (struct ietf_mini_conn *conn, 822 enum enc_level enc_level, struct lsquic_packet_in *packet_in, 823 const struct stream_frame *frame) 824{ 825 struct stream_frame *copy; 826 827 if (conn->imc_n_crypto_frames >= IMICO_MAX_STASHED_FRAMES) 828 { 829 LSQ_INFO("cannot stash more CRYPTO frames, at %hhu already, while max " 830 "is %u", conn->imc_n_crypto_frames, IMICO_MAX_STASHED_FRAMES); 831 return -1; 832 } 833 834 if (conn->imc_crypto_frames_sz + DF_SIZE(frame) > IMICO_MAX_BUFFERED_CRYPTO) 835 { 836 LSQ_INFO("cannot stash more than %u bytes of CRYPTO frames", 837 IMICO_MAX_BUFFERED_CRYPTO); 838 return -1; 839 } 840 841 copy = lsquic_malo_get(conn->imc_enpub->enp_mm.malo.stream_frame); 842 if (!copy) 843 { 844 LSQ_INFO("could not allocate stream frame for stashing"); 845 return -1; 846 } 847 848 *copy = *frame; 849 copy->packet_in = lsquic_packet_in_get(packet_in); 850 copy->stream_id = enc_level; 851 TAILQ_INSERT_TAIL(&conn->imc_crypto_frames, copy, next_frame); 852 ++conn->imc_n_crypto_frames; 853 conn->imc_crypto_frames_sz += DF_SIZE(frame); 854 return 0; 855} 856 857 858static unsigned 859imico_process_crypto_frame (IMICO_PROC_FRAME_ARGS) 860{ 861 int parsed_len; 862 enum enc_level enc_level, i; 863 struct stream_frame stream_frame; 864 865 parsed_len = conn->imc_conn.cn_pf->pf_parse_crypto_frame(p, len, 866 &stream_frame); 867 if (parsed_len < 0) 868 { 869 conn->imc_flags |= IMC_PARSE_FAILED; 870 return 0; 871 } 872 873 enc_level = lsquic_packet_in_enc_level(packet_in); 874 EV_LOG_CRYPTO_FRAME_IN(LSQUIC_LOG_CONN_ID, &stream_frame, enc_level); 875 876 if (conn->imc_streams[enc_level].mcs_read_off >= DF_OFF(&stream_frame) 877 && conn->imc_streams[enc_level].mcs_read_off < DF_END(&stream_frame)) 878 LSQ_DEBUG("Got CRYPTO frame for enc level #%u", enc_level); 879 else if (conn->imc_streams[enc_level].mcs_read_off < DF_OFF(&stream_frame)) 880 { 881 LSQ_DEBUG("Can't read CRYPTO frame on enc level #%u at offset %"PRIu64 882 " yet -- stash", enc_level, DF_OFF(&stream_frame)); 883 if (0 == imico_stash_stream_frame(conn, enc_level, packet_in, 884 &stream_frame)) 885 return parsed_len; 886 else 887 return 0; 888 } 889 else 890 { 891 LSQ_DEBUG("Got duplicate CRYPTO frame for enc level #%u -- ignore", 892 enc_level); 893 return parsed_len; 894 } 895 896 if (!(conn->imc_flags & IMC_ENC_SESS_INITED)) 897 { 898 if (0 != conn->imc_conn.cn_esf.i->esfi_init_server( 899 conn->imc_conn.cn_enc_session)) 900 return 0; 901 conn->imc_flags |= IMC_ENC_SESS_INITED; 902 } 903 904 if (!(conn->imc_streams[enc_level].mcs_flags & MCS_CREATED)) 905 { 906 LSQ_DEBUG("creating stream on level #%u", enc_level); 907 conn->imc_streams[enc_level].mcs_flags |= MCS_CREATED; 908 lsquic_mini_cry_sm_if.on_new_stream(conn->imc_conn.cn_enc_session, 909 (void *) &conn->imc_streams[enc_level]); 910 } 911 912 /* Assume that receiving a CRYPTO frame at a higher level means that we 913 * no longer want to read from a lower level. 914 */ 915 for (i = 0; i < enc_level; ++i) 916 conn->imc_streams[i].mcs_flags &= ~MCS_WANTREAD; 917 918 conn->imc_last_in.frame = &stream_frame; 919 conn->imc_last_in.enc_level = enc_level; 920 imico_dispatch_stream_events(conn); 921 conn->imc_last_in.frame = NULL; 922 923 if (DF_ROFF(&stream_frame) < DF_END(&stream_frame)) 924 { 925 /* This is an odd condition, but let's handle it just in case */ 926 LSQ_DEBUG("New CRYPTO frame on enc level #%u not fully read -- stash", 927 enc_level); 928 if (0 != imico_stash_stream_frame(conn, enc_level, packet_in, 929 &stream_frame)) 930 return 0; 931 } 932 933 934 return parsed_len; 935} 936 937 938static ptrdiff_t 939imico_count_zero_bytes (const unsigned char *p, size_t len) 940{ 941 const unsigned char *const end = p + len; 942 while (p < end && 0 == *p) 943 ++p; 944 return len - (end - p); 945} 946 947 948static unsigned 949imico_process_padding_frame (IMICO_PROC_FRAME_ARGS) 950{ 951 len = (size_t) imico_count_zero_bytes(p, len); 952 EV_LOG_PADDING_FRAME_IN(LSQUIC_LOG_CONN_ID, len); 953 return len; 954} 955 956 957static void 958imico_take_rtt_sample (struct ietf_mini_conn *conn, 959 const struct lsquic_packet_out *packet_out, 960 lsquic_time_t now, lsquic_time_t lack_delta) 961{ 962 assert(packet_out->po_sent); 963 lsquic_time_t measured_rtt = now - packet_out->po_sent; 964 if (lack_delta < measured_rtt) 965 { 966 lsquic_rtt_stats_update(&conn->imc_rtt_stats, measured_rtt, lack_delta); 967 LSQ_DEBUG("srtt: %"PRIu64" usec, var: %"PRIu64, 968 lsquic_rtt_stats_get_srtt(&conn->imc_rtt_stats), 969 lsquic_rtt_stats_get_rttvar(&conn->imc_rtt_stats)); 970 } 971} 972 973 974static unsigned 975imico_process_ack_frame (IMICO_PROC_FRAME_ARGS) 976{ 977 int parsed_len; 978 unsigned n; 979 lsquic_packet_out_t *packet_out, *next; 980 struct ack_info *acki; 981 lsquic_packno_t packno; 982 lsquic_time_t warn_time; 983 packno_set_t acked; 984 enum packnum_space pns; 985 uint8_t ack_exp; 986 987 if (conn->imc_flags & IMC_HAVE_TP) 988 ack_exp = conn->imc_ack_exp; 989 else 990 ack_exp = TP_DEF_ACK_DELAY_EXP; /* Odd: no transport params yet? */ 991 acki = conn->imc_enpub->enp_mm.acki; 992 parsed_len = conn->imc_conn.cn_pf->pf_parse_ack_frame(p, len, acki, 993 ack_exp); 994 if (parsed_len < 0) 995 { 996 conn->imc_flags |= IMC_PARSE_FAILED; 997 return 0; 998 } 999 1000 pns = lsquic_hety2pns[ packet_in->pi_header_type ]; 1001 acked = 0; 1002 1003 for (n = 0; n < acki->n_ranges; ++n) 1004 { 1005 if (acki->ranges[n].high <= MAX_PACKETS) 1006 { 1007 acked |= (1ULL << acki->ranges[n].high) 1008 | ((1ULL << acki->ranges[n].high) - 1); 1009 acked &= ~((1ULL << acki->ranges[n].low) - 1); 1010 } 1011 else 1012 { 1013 packno = acki->ranges[n].high; 1014 goto err_never_sent; 1015 } 1016 } 1017 if (acked & ~conn->imc_sent_packnos) 1018 { 1019 packno = highest_bit_set(acked & ~conn->imc_sent_packnos); 1020 goto err_never_sent; 1021 } 1022 1023 EV_LOG_ACK_FRAME_IN(LSQUIC_LOG_CONN_ID, acki); 1024 for (packet_out = TAILQ_FIRST(&conn->imc_packets_out); packet_out; 1025 packet_out = next) 1026 { 1027 next = TAILQ_NEXT(packet_out, po_next); 1028 if ((1ULL << packet_out->po_packno) & acked) 1029 { 1030 assert(lsquic_packet_out_pns(packet_out) == pns); 1031 LSQ_DEBUG("Got ACK for packet %"PRIu64, packet_out->po_packno); 1032 if (packet_out->po_packno == largest_acked(acki)) 1033 imico_take_rtt_sample(conn, packet_out, 1034 packet_in->pi_received, acki->lack_delta); 1035 TAILQ_REMOVE(&conn->imc_packets_out, packet_out, po_next); 1036 imico_destroy_packet(conn, packet_out); 1037 } 1038 } 1039 1040 if (conn->imc_sent_packnos & ~conn->imc_acked_packnos[pns] & acked) 1041 { 1042 LSQ_DEBUG("Newly acked packets, reset handshake count"); 1043 conn->imc_hsk_count = 0; 1044 } 1045 1046 conn->imc_acked_packnos[pns] |= acked; 1047 1048 return parsed_len; 1049 1050 err_never_sent: 1051 warn_time = lsquic_time_now(); 1052 if (0 == conn->imc_enpub->enp_last_warning[WT_ACKPARSE_MINI] 1053 || conn->imc_enpub->enp_last_warning[WT_ACKPARSE_MINI] 1054 + WARNING_INTERVAL < warn_time) 1055 { 1056 conn->imc_enpub->enp_last_warning[WT_ACKPARSE_MINI] = warn_time; 1057 LSQ_WARN("packet %"PRIu64" (pns: %u) was never sent", packno, pns); 1058 } 1059 else 1060 LSQ_DEBUG("packet %"PRIu64" (pns: %u) was never sent", packno, pns); 1061 return 0; 1062} 1063 1064 1065static unsigned 1066imico_process_ping_frame (IMICO_PROC_FRAME_ARGS) 1067{ 1068 LSQ_DEBUG("got a PING frame, do nothing"); 1069 return 1; 1070} 1071 1072 1073static unsigned 1074imico_process_connection_close_frame (IMICO_PROC_FRAME_ARGS) 1075{ 1076 struct lsquic_packet_out *packet_out; 1077 uint64_t error_code; 1078 uint16_t reason_len; 1079 uint8_t reason_off; 1080 int parsed_len, app_error; 1081 1082 while ((packet_out = TAILQ_FIRST(&conn->imc_packets_out))) 1083 { 1084 TAILQ_REMOVE(&conn->imc_packets_out, packet_out, po_next); 1085 imico_destroy_packet(conn, packet_out); 1086 } 1087 conn->imc_flags |= IMC_CLOSE_RECVD; 1088 parsed_len = conn->imc_conn.cn_pf->pf_parse_connect_close_frame(p, len, 1089 &app_error, &error_code, &reason_len, &reason_off); 1090 if (parsed_len < 0) 1091 { 1092 conn->imc_flags |= IMC_PARSE_FAILED; 1093 return 0; 1094 } 1095 EV_LOG_CONNECTION_CLOSE_FRAME_IN(LSQUIC_LOG_CONN_ID, error_code, 1096 (int) reason_len, (const char *) p + reason_off); 1097 LSQ_INFO("Received CONNECTION_CLOSE frame (%s-level code: %"PRIu64"; " 1098 "reason: %.*s)", app_error ? "application" : "transport", 1099 error_code, (int) reason_len, (const char *) p + reason_off); 1100 return 0; /* This shuts down the connection */ 1101} 1102 1103 1104static unsigned 1105imico_process_invalid_frame (IMICO_PROC_FRAME_ARGS) 1106{ 1107 LSQ_DEBUG("invalid frame %u (%s)", p[0], 1108 frame_type_2_str[ conn->imc_conn.cn_pf->pf_parse_frame_type(p, len) ]); 1109 return 0; 1110} 1111 1112 1113static unsigned (*const imico_process_frames[N_QUIC_FRAMES]) 1114 (IMICO_PROC_FRAME_ARGS) = 1115{ 1116 [QUIC_FRAME_PADDING] = imico_process_padding_frame, 1117 [QUIC_FRAME_CRYPTO] = imico_process_crypto_frame, 1118 [QUIC_FRAME_ACK] = imico_process_ack_frame, 1119 [QUIC_FRAME_PING] = imico_process_ping_frame, 1120 [QUIC_FRAME_CONNECTION_CLOSE] = imico_process_connection_close_frame, 1121 /* Some of them are invalid, while others are unexpected. We treat 1122 * them the same: handshake cannot proceed. 1123 */ 1124 [QUIC_FRAME_RST_STREAM] = imico_process_invalid_frame, 1125 [QUIC_FRAME_MAX_DATA] = imico_process_invalid_frame, 1126 [QUIC_FRAME_MAX_STREAM_DATA] = imico_process_invalid_frame, 1127 [QUIC_FRAME_MAX_STREAMS] = imico_process_invalid_frame, 1128 [QUIC_FRAME_BLOCKED] = imico_process_invalid_frame, 1129 [QUIC_FRAME_STREAM_BLOCKED] = imico_process_invalid_frame, 1130 [QUIC_FRAME_STREAMS_BLOCKED] = imico_process_invalid_frame, 1131 [QUIC_FRAME_NEW_CONNECTION_ID] = imico_process_invalid_frame, 1132 [QUIC_FRAME_STOP_SENDING] = imico_process_invalid_frame, 1133 [QUIC_FRAME_PATH_CHALLENGE] = imico_process_invalid_frame, 1134 [QUIC_FRAME_PATH_RESPONSE] = imico_process_invalid_frame, 1135 /* STREAM frame can only come in the App PNS and we delay those packets: */ 1136 [QUIC_FRAME_STREAM] = imico_process_invalid_frame, 1137 [QUIC_FRAME_HANDSHAKE_DONE] = imico_process_invalid_frame, 1138 [QUIC_FRAME_ACK_FREQUENCY] = imico_process_invalid_frame, 1139 [QUIC_FRAME_TIMESTAMP] = imico_process_invalid_frame, 1140}; 1141 1142 1143static unsigned 1144imico_process_packet_frame (struct ietf_mini_conn *conn, 1145 struct lsquic_packet_in *packet_in, const unsigned char *p, size_t len) 1146{ 1147 enum enc_level enc_level; 1148 enum quic_frame_type type; 1149 1150 enc_level = lsquic_packet_in_enc_level(packet_in); 1151 type = conn->imc_conn.cn_pf->pf_parse_frame_type(p, len); 1152 if (lsquic_legal_frames_by_level[conn->imc_conn.cn_version][enc_level] 1153 & (1 << type)) 1154 { 1155 packet_in->pi_frame_types |= 1 << type; 1156 return imico_process_frames[type](conn, packet_in, p, len); 1157 } 1158 else 1159 { 1160 LSQ_DEBUG("invalid frame %u at encryption level %s", type, 1161 lsquic_enclev2str[enc_level]); 1162 return 0; 1163 } 1164} 1165 1166 1167static int 1168imico_parse_regular_packet (struct ietf_mini_conn *conn, 1169 struct lsquic_packet_in *packet_in) 1170{ 1171 const unsigned char *p, *pend; 1172 unsigned len; 1173 1174 p = packet_in->pi_data + packet_in->pi_header_sz; 1175 pend = packet_in->pi_data + packet_in->pi_data_sz; 1176 1177 while (p < pend) 1178 { 1179 len = imico_process_packet_frame(conn, packet_in, p, pend - p); 1180 if (len > 0) 1181 p += len; 1182 else 1183 return -1; 1184 } 1185 1186 return 0; 1187} 1188 1189 1190static unsigned 1191highest_bit_set (unsigned long long sz) 1192{ 1193#if __GNUC__ 1194 unsigned clz = __builtin_clzll(sz); 1195 return 63 - clz; 1196#else 1197 unsigned long y; 1198 unsigned n; 1199 n = 64; 1200 y = sz >> 32; if (y) { n -= 32; sz = y; } 1201 y = sz >> 16; if (y) { n -= 16; sz = y; } 1202 y = sz >> 8; if (y) { n -= 8; sz = y; } 1203 y = sz >> 4; if (y) { n -= 4; sz = y; } 1204 y = sz >> 2; if (y) { n -= 2; sz = y; } 1205 y = sz >> 1; if (y) return 63 - n + 2; 1206 return 63 - n + sz; 1207#endif 1208} 1209 1210 1211static void 1212ignore_init (struct ietf_mini_conn *conn) 1213{ 1214 struct lsquic_packet_out *packet_out, *next; 1215 unsigned count; 1216 1217 conn->imc_flags |= IMC_IGNORE_INIT; 1218 conn->imc_flags &= ~(IMC_QUEUED_ACK_INIT << PNS_INIT); 1219 1220 count = 0; 1221 for (packet_out = TAILQ_FIRST(&conn->imc_packets_out); packet_out; 1222 packet_out = next) 1223 { 1224 next = TAILQ_NEXT(packet_out, po_next); 1225 if (PNS_INIT == lsquic_packet_out_pns(packet_out)) 1226 { 1227 TAILQ_REMOVE(&conn->imc_packets_out, packet_out, po_next); 1228 imico_destroy_packet(conn, packet_out); 1229 ++count; 1230 } 1231 } 1232 1233 LSQ_DEBUG("henceforth, no Initial packets shall be sent or received; " 1234 "destroyed %u packet%.*s", count, count != 1, "s"); 1235} 1236 1237 1238static void 1239imico_maybe_delay_processing (struct ietf_mini_conn *conn, 1240 struct lsquic_packet_in *packet_in) 1241{ 1242 unsigned max_delayed; 1243 1244 if (conn->imc_flags & IMC_ADDR_VALIDATED) 1245 max_delayed = IMICO_MAX_DELAYED_PACKETS_VALIDATED; 1246 else 1247 max_delayed = IMICO_MAX_DELAYED_PACKETS_UNVALIDATED; 1248 1249 if (conn->imc_delayed_packets_count < max_delayed) 1250 { 1251 ++conn->imc_delayed_packets_count; 1252 lsquic_packet_in_upref(packet_in); 1253 TAILQ_INSERT_TAIL(&conn->imc_app_packets, packet_in, pi_next); 1254 LSQ_DEBUG("delay processing of packet (now delayed %hhu)", 1255 conn->imc_delayed_packets_count); 1256 } 1257 else 1258 LSQ_DEBUG("drop packet, already delayed %hhu packets", 1259 conn->imc_delayed_packets_count); 1260} 1261 1262 1263/* [draft-ietf-quic-transport-30] Section 8.1: 1264 " Additionally, a server MAY consider the client address validated if 1265 " the client uses a connection ID chosen by the server and the 1266 " connection ID contains at least 64 bits of entropy. 1267 * 1268 * We use RAND_bytes() to generate SCIDs, so it's all entropy. 1269 */ 1270static void 1271imico_maybe_validate_by_dcid (struct ietf_mini_conn *conn, 1272 const lsquic_cid_t *dcid) 1273{ 1274 unsigned i; 1275 1276 if (dcid->len >= 8) 1277 /* Generic code with unnecessary loop as future-proofing */ 1278 for (i = 0; i < conn->imc_conn.cn_n_cces; ++i) 1279 if ((conn->imc_conn.cn_cces_mask & (1 << i)) 1280 && (conn->imc_conn.cn_cces[i].cce_flags & CCE_SEQNO) 1281 && LSQUIC_CIDS_EQ(&conn->imc_conn.cn_cces[i].cce_cid, dcid)) 1282 { 1283 imico_peer_addr_validated(conn, "dcid/scid + entropy"); 1284 return; 1285 } 1286} 1287 1288 1289static int 1290imico_received_packet_is_dup (struct ietf_mini_conn *conn, 1291 enum packnum_space pns, lsquic_packno_t packno) 1292{ 1293 if (conn->imc_flags & IMC_TRECHIST) 1294 return lsquic_trechist_contains( 1295 conn->imc_recvd_packnos.trechist.hist_masks[pns], 1296 conn->imc_recvd_packnos.trechist.hist_elems 1297 + TRECHIST_MAX_RANGES * pns, packno); 1298 else 1299 return !!(conn->imc_recvd_packnos.bitmasks[pns] & (1ULL << packno)); 1300} 1301 1302 1303static int 1304imico_packno_is_largest (struct ietf_mini_conn *conn, 1305 enum packnum_space pns, lsquic_packno_t packno) 1306{ 1307 if (conn->imc_flags & IMC_TRECHIST) 1308 return 0 == conn->imc_recvd_packnos.trechist.hist_masks[pns] 1309 || packno > lsquic_trechist_max( 1310 conn->imc_recvd_packnos.trechist.hist_masks[pns], 1311 conn->imc_recvd_packnos.trechist.hist_elems 1312 + TRECHIST_MAX_RANGES * pns); 1313 else 1314 return 0 == conn->imc_recvd_packnos.bitmasks[pns] 1315 || packno > highest_bit_set(conn->imc_recvd_packnos.bitmasks[pns]); 1316} 1317 1318 1319static void 1320imico_record_recvd_packno (struct ietf_mini_conn *conn, 1321 enum packnum_space pns, lsquic_packno_t packno) 1322{ 1323 if (conn->imc_flags & IMC_TRECHIST) 1324 { 1325 if (0 != lsquic_trechist_insert( 1326 &conn->imc_recvd_packnos.trechist.hist_masks[pns], 1327 conn->imc_recvd_packnos.trechist.hist_elems 1328 + TRECHIST_MAX_RANGES * pns, packno)) 1329 { 1330 LSQ_INFO("too many ranges for trechist to hold or range too wide"); 1331 conn->imc_flags |= IMC_ERROR; 1332 } 1333 } 1334 else 1335 conn->imc_recvd_packnos.bitmasks[pns] |= 1ULL << packno; 1336} 1337 1338 1339static int 1340imico_switch_to_trechist (struct ietf_mini_conn *conn) 1341{ 1342 uint32_t masks[IMICO_N_PNS]; 1343 enum packnum_space pns; 1344 struct trechist_elem *elems; 1345 struct ietf_mini_rechist iter; 1346 1347 elems = malloc(TRECHIST_SIZE * N_PNS); 1348 if (!elems) 1349 { 1350 LSQ_WARN("cannot allocate trechist elems"); 1351 return -1; 1352 } 1353 1354 for (pns = 0; pns < IMICO_N_PNS; ++pns) 1355 if (conn->imc_recvd_packnos.bitmasks[pns]) 1356 { 1357 lsquic_imico_rechist_init(&iter, conn, pns); 1358 if (0 != lsquic_trechist_copy_ranges(&masks[pns], 1359 elems + TRECHIST_MAX_RANGES * pns, &iter, 1360 lsquic_imico_rechist_first, 1361 lsquic_imico_rechist_next)) 1362 { 1363 LSQ_WARN("cannot copy ranges from bitmask to trechist"); 1364 free(elems); 1365 return -1; 1366 } 1367 } 1368 else 1369 masks[pns] = 0; 1370 1371 memcpy(conn->imc_recvd_packnos.trechist.hist_masks, masks, sizeof(masks)); 1372 conn->imc_recvd_packnos.trechist.hist_elems = elems; 1373 conn->imc_flags |= IMC_TRECHIST; 1374 LSQ_DEBUG("switched to trechist"); 1375 return 0; 1376} 1377 1378 1379/* Only a single packet is supported */ 1380static void 1381ietf_mini_conn_ci_packet_in (struct lsquic_conn *lconn, 1382 struct lsquic_packet_in *packet_in) 1383{ 1384 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 1385 enum dec_packin dec_packin; 1386 enum packnum_space pns; 1387 1388 /* Update "bytes in" count as early as possible. From 1389 * [draft-ietf-quic-transport-28] Section 8.1: 1390 " For the purposes of 1391 " avoiding amplification prior to address validation, servers MUST 1392 " count all of the payload bytes received in datagrams that are 1393 " uniquely attributed to a single connection. This includes datagrams 1394 " that contain packets that are successfully processed and datagrams 1395 " that contain packets that are all discarded. 1396 */ 1397 conn->imc_bytes_in += packet_in->pi_data_sz; 1398 1399 if (conn->imc_flags & IMC_ERROR) 1400 { 1401 LSQ_DEBUG("ignore incoming packet: connection is in error state"); 1402 return; 1403 } 1404 1405 if (!(conn->imc_flags & IMC_ADDR_VALIDATED)) 1406 imico_maybe_validate_by_dcid(conn, &packet_in->pi_dcid); 1407 1408 pns = lsquic_hety2pns[ packet_in->pi_header_type ]; 1409 if (pns == PNS_INIT && (conn->imc_flags & IMC_IGNORE_INIT)) 1410 { 1411 LSQ_DEBUG("ignore init packet"); /* Don't bother decrypting */ 1412 return; 1413 } 1414 1415 dec_packin = lconn->cn_esf_c->esf_decrypt_packet(lconn->cn_enc_session, 1416 conn->imc_enpub, &conn->imc_conn, packet_in); 1417 if (dec_packin != DECPI_OK) 1418 { 1419 LSQ_DEBUG("could not decrypt packet"); 1420 if (DECPI_NOT_YET == dec_packin) 1421 imico_maybe_delay_processing(conn, packet_in); 1422 return; 1423 } 1424 1425 EV_LOG_PACKET_IN(LSQUIC_LOG_CONN_ID, packet_in); 1426 1427 if (pns == PNS_APP) 1428 { 1429 imico_maybe_delay_processing(conn, packet_in); 1430 return; 1431 } 1432 else if (pns == PNS_HSK) 1433 imico_peer_addr_validated(conn, "handshake PNS"); 1434 1435 if (((conn->imc_flags >> IMCBIT_PNS_BIT_SHIFT) & 3) < pns) 1436 { 1437 conn->imc_flags &= ~(3 << IMCBIT_PNS_BIT_SHIFT); 1438 conn->imc_flags |= pns << IMCBIT_PNS_BIT_SHIFT; 1439 } 1440 1441 if (pns == PNS_HSK && !(conn->imc_flags & IMC_IGNORE_INIT)) 1442 ignore_init(conn); 1443 1444 if (packet_in->pi_packno > MAX_PACKETS 1445 && !(conn->imc_flags & IMC_TRECHIST)) 1446 { 1447 if (0 != imico_switch_to_trechist(conn)) 1448 return; 1449 } 1450 1451 if (imico_received_packet_is_dup(conn, pns, packet_in->pi_packno)) 1452 { 1453 LSQ_DEBUG("duplicate packet %"PRIu64, packet_in->pi_packno); 1454 return; 1455 } 1456 1457 /* Update receive history before processing the packet: if there is an 1458 * error, the connection is terminated and recording this packet number 1459 * is helpful when it is printed along with other diagnostics in dtor. 1460 */ 1461 if (imico_packno_is_largest(conn, pns, packet_in->pi_packno)) 1462 conn->imc_largest_recvd[pns] = packet_in->pi_received; 1463 imico_record_recvd_packno(conn, pns, packet_in->pi_packno); 1464 1465 if (0 != imico_parse_regular_packet(conn, packet_in)) 1466 { 1467 LSQ_DEBUG("connection is now in error state"); 1468 conn->imc_flags |= IMC_ERROR; 1469 return; 1470 } 1471 1472 if (!(conn->imc_flags & (IMC_QUEUED_ACK_INIT << pns))) 1473 LSQ_DEBUG("queued ACK in %s", lsquic_pns2str[pns]); 1474 conn->imc_flags |= IMC_QUEUED_ACK_INIT << pns; 1475 ++conn->imc_ecn_counts_in[pns][ lsquic_packet_in_ecn(packet_in) ]; 1476 conn->imc_incoming_ecn <<= 1; 1477 conn->imc_incoming_ecn |= lsquic_packet_in_ecn(packet_in) != ECN_NOT_ECT; 1478} 1479 1480 1481static void 1482ietf_mini_conn_ci_packet_sent (struct lsquic_conn *lconn, 1483 struct lsquic_packet_out *packet_out) 1484{ 1485 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 1486 conn->imc_sent_packnos |= 1ULL << packet_out->po_packno; 1487 conn->imc_ecn_packnos |= !!lsquic_packet_out_ecn(packet_out) 1488 << packet_out->po_packno; 1489#if 0 1490 if (packet_out->po_frame_types & (1 << QUIC_FRAME_ACK)) 1491 { 1492 assert(mc->mc_flags & MC_UNSENT_ACK); 1493 mc->mc_flags &= ~MC_UNSENT_ACK; 1494 } 1495#endif 1496 ++conn->imc_ecn_counts_out[ lsquic_packet_out_pns(packet_out) ] 1497 [ lsquic_packet_out_ecn(packet_out) ]; 1498 if (packet_out->po_header_type == HETY_HANDSHAKE) 1499 conn->imc_flags |= IMC_HSK_PACKET_SENT; 1500 LSQ_DEBUG("%s: packet %"PRIu64" sent", __func__, packet_out->po_packno); 1501} 1502 1503 1504static void 1505ietf_mini_conn_ci_packet_not_sent (struct lsquic_conn *lconn, 1506 struct lsquic_packet_out *packet_out) 1507{ 1508 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 1509 size_t packet_size; 1510 1511 packet_out->po_flags &= ~PO_SENT; 1512 packet_size = lsquic_packet_out_total_sz(lconn, packet_out); 1513 conn->imc_bytes_out -= packet_size; 1514 LSQ_DEBUG("%s: packet %"PRIu64" not sent", __func__, packet_out->po_packno); 1515} 1516 1517 1518static void 1519imico_return_enc_data (struct ietf_mini_conn *conn, 1520 struct lsquic_packet_out *packet_out) 1521{ 1522 conn->imc_enpub->enp_pmi->pmi_return(conn->imc_enpub->enp_pmi_ctx, 1523 conn->imc_path.np_peer_ctx, packet_out->po_enc_data, 1524 lsquic_packet_out_ipv6(packet_out)); 1525 packet_out->po_flags &= ~PO_ENCRYPTED; 1526 packet_out->po_enc_data = NULL; 1527} 1528 1529 1530static int 1531imico_repackage_packet (struct ietf_mini_conn *conn, 1532 struct lsquic_packet_out *packet_out) 1533{ 1534 const lsquic_packno_t oldno = packet_out->po_packno; 1535 const lsquic_packno_t packno = conn->imc_next_packno++; 1536 if (packno > MAX_PACKETS) 1537 return -1; 1538 1539 LSQ_DEBUG("Packet %"PRIu64" repackaged for resending as packet %"PRIu64, 1540 oldno, packno); 1541 EV_LOG_CONN_EVENT(LSQUIC_LOG_CONN_ID, "packet %"PRIu64" repackaged for " 1542 "resending as packet %"PRIu64, oldno, packno); 1543 packet_out->po_packno = packno; 1544 packet_out->po_flags &= ~PO_SENT; 1545 lsquic_packet_out_set_ecn(packet_out, imico_get_ecn(conn)); 1546 if (packet_out->po_flags & PO_ENCRYPTED) 1547 imico_return_enc_data(conn, packet_out); 1548 TAILQ_INSERT_TAIL(&conn->imc_packets_out, packet_out, po_next); 1549 return 0; 1550} 1551 1552 1553static int 1554imico_handle_losses_and_have_unsent (struct ietf_mini_conn *conn, 1555 lsquic_time_t now) 1556{ 1557 TAILQ_HEAD(, lsquic_packet_out) lost_packets = 1558 TAILQ_HEAD_INITIALIZER(lost_packets); 1559 const struct lsquic_conn *const lconn = &conn->imc_conn; 1560 lsquic_packet_out_t *packet_out, *next; 1561 lsquic_time_t retx_to = 0; 1562 unsigned n_to_send = 0; 1563 size_t packet_size; 1564 1565 for (packet_out = TAILQ_FIRST(&conn->imc_packets_out); packet_out; 1566 packet_out = next) 1567 { 1568 next = TAILQ_NEXT(packet_out, po_next); 1569 if (packet_out->po_flags & PO_SENT) 1570 { 1571 if (0 == retx_to) 1572 retx_to = imico_calc_retx_timeout(conn); 1573 if (packet_out->po_sent + retx_to < now) 1574 { 1575 LSQ_DEBUG("packet %"PRIu64" has been lost (rto: %"PRIu64")", 1576 packet_out->po_packno, retx_to); 1577 TAILQ_REMOVE(&conn->imc_packets_out, packet_out, po_next); 1578 TAILQ_INSERT_TAIL(&lost_packets, packet_out, po_next); 1579 } 1580 } 1581 else if (packet_size = lsquic_packet_out_total_sz(lconn, packet_out), 1582 imico_can_send(conn, packet_size)) 1583 ++n_to_send; 1584 else 1585 break; 1586 } 1587 1588 conn->imc_hsk_count += !TAILQ_EMPTY(&lost_packets); 1589 1590 while ((packet_out = TAILQ_FIRST(&lost_packets))) 1591 { 1592 TAILQ_REMOVE(&lost_packets, packet_out, po_next); 1593 if ((packet_out->po_frame_types & IQUIC_FRAME_RETX_MASK) 1594 && 0 == imico_repackage_packet(conn, packet_out)) 1595 { 1596 packet_size = lsquic_packet_out_total_sz(lconn, packet_out); 1597 if (imico_can_send(conn, packet_size)) 1598 ++n_to_send; 1599 } 1600 else 1601 imico_destroy_packet(conn, packet_out); 1602 } 1603 1604 return n_to_send > 0; 1605} 1606 1607 1608static int 1609imico_have_packets_to_send (struct ietf_mini_conn *conn, lsquic_time_t now) 1610{ 1611 return imico_handle_losses_and_have_unsent(conn, now); 1612} 1613 1614 1615void 1616lsquic_imico_rechist_init (struct ietf_mini_rechist *rechist, 1617 const struct ietf_mini_conn *conn, enum packnum_space pns) 1618{ 1619 assert(pns < IMICO_N_PNS); 1620 rechist->conn = conn; 1621 rechist->pns = pns; 1622 if (conn->imc_flags & IMC_TRECHIST) 1623 lsquic_trechist_iter(&rechist->u.trechist_iter, 1624 conn->imc_recvd_packnos.trechist.hist_masks[pns], 1625 conn->imc_recvd_packnos.trechist.hist_elems + TRECHIST_MAX_RANGES * pns); 1626 else 1627 { 1628 rechist->u.bitmask.cur_set = 0; 1629 rechist->u.bitmask.cur_idx = 0; 1630 } 1631} 1632 1633 1634static lsquic_time_t 1635imico_rechist_largest_recv (void *rechist_ctx) 1636{ 1637 struct ietf_mini_rechist *rechist = rechist_ctx; 1638 return rechist->conn->imc_largest_recvd[ rechist->pns ]; 1639} 1640 1641 1642static const struct lsquic_packno_range * 1643imico_bitmask_rechist_next (struct ietf_mini_rechist *rechist) 1644{ 1645 const struct ietf_mini_conn *conn = rechist->conn; 1646 packno_set_t packnos; 1647 int i; 1648 1649 packnos = rechist->u.bitmask.cur_set; 1650 if (0 == packnos) 1651 return NULL; 1652 1653 /* There may be a faster way to do this, but for now, we just want 1654 * correctness. 1655 */ 1656 for (i = rechist->u.bitmask.cur_idx; i >= 0; --i) 1657 if (packnos & (1ULL << i)) 1658 { 1659 rechist->u.bitmask.range.low = i; 1660 rechist->u.bitmask.range.high = i; 1661 break; 1662 } 1663 assert(i >= 0); /* We must have hit at least one bit */ 1664 --i; 1665 for ( ; i >= 0 && (packnos & (1ULL << i)); --i) 1666 rechist->u.bitmask.range.low = i; 1667 if (i >= 0) 1668 { 1669 rechist->u.bitmask.cur_set = packnos & ((1ULL << i) - 1); 1670 rechist->u.bitmask.cur_idx = i; 1671 } 1672 else 1673 rechist->u.bitmask.cur_set = 0; 1674 LSQ_DEBUG("%s: return [%"PRIu64", %"PRIu64"]", __func__, 1675 rechist->u.bitmask.range.low, rechist->u.bitmask.range.high); 1676 return &rechist->u.bitmask.range; 1677} 1678 1679 1680const struct lsquic_packno_range * 1681lsquic_imico_rechist_next (void *rechist_ctx) 1682{ 1683 struct ietf_mini_rechist *rechist = rechist_ctx; 1684 1685 if (rechist->conn->imc_flags & IMC_TRECHIST) 1686 return lsquic_trechist_next(&rechist->u.trechist_iter); 1687 else 1688 return imico_bitmask_rechist_next(rechist); 1689} 1690 1691 1692const struct lsquic_packno_range * 1693lsquic_imico_rechist_first (void *rechist_ctx) 1694{ 1695 struct ietf_mini_rechist *rechist = rechist_ctx; 1696 1697 if (rechist->conn->imc_flags & IMC_TRECHIST) 1698 return lsquic_trechist_first(&rechist->u.trechist_iter); 1699 else 1700 { 1701 rechist->u.bitmask.cur_set 1702 = rechist->conn->imc_recvd_packnos.bitmasks[ rechist->pns ]; 1703 rechist->u.bitmask.cur_idx 1704 = highest_bit_set(rechist->u.bitmask.cur_set); 1705 return lsquic_imico_rechist_next(rechist_ctx); 1706 } 1707} 1708 1709 1710static const enum header_type pns2hety[] = 1711{ 1712 [PNS_INIT] = HETY_INITIAL, 1713 [PNS_HSK] = HETY_HANDSHAKE, 1714 [PNS_APP] = HETY_NOT_SET, 1715}; 1716 1717 1718static int 1719imico_generate_ack (struct ietf_mini_conn *conn, enum packnum_space pns, 1720 lsquic_time_t now) 1721{ 1722 struct lsquic_packet_out *packet_out; 1723 enum header_type header_type; 1724 struct ietf_mini_rechist rechist; 1725 int not_used_has_missing, len; 1726 uint64_t ecn_counts_buf[4]; 1727 const uint64_t *ecn_counts; 1728 1729 header_type = pns2hety[pns]; 1730 1731 if (conn->imc_incoming_ecn) 1732 { 1733 ecn_counts_buf[0] = conn->imc_ecn_counts_in[pns][0]; 1734 ecn_counts_buf[1] = conn->imc_ecn_counts_in[pns][1]; 1735 ecn_counts_buf[2] = conn->imc_ecn_counts_in[pns][2]; 1736 ecn_counts_buf[3] = conn->imc_ecn_counts_in[pns][3]; 1737 ecn_counts = ecn_counts_buf; 1738 } 1739 else 1740 ecn_counts = NULL; 1741 1742 packet_out = imico_get_packet_out(conn, header_type, 0); 1743 if (!packet_out) 1744 return -1; 1745 1746 /* Generate ACK frame */ 1747 lsquic_imico_rechist_init(&rechist, conn, pns); 1748 len = conn->imc_conn.cn_pf->pf_gen_ack_frame( 1749 packet_out->po_data + packet_out->po_data_sz, 1750 lsquic_packet_out_avail(packet_out), lsquic_imico_rechist_first, 1751 lsquic_imico_rechist_next, imico_rechist_largest_recv, &rechist, 1752 now, ¬_used_has_missing, &packet_out->po_ack2ed, ecn_counts); 1753 if (len < 0) 1754 { 1755 LSQ_WARN("could not generate ACK frame"); 1756 return -1; 1757 } 1758 EV_LOG_GENERATED_ACK_FRAME(LSQUIC_LOG_CONN_ID, conn->imc_conn.cn_pf, 1759 packet_out->po_data + packet_out->po_data_sz, len); 1760 packet_out->po_frame_types |= 1 << QUIC_FRAME_ACK; 1761 packet_out->po_data_sz += len; 1762 packet_out->po_regen_sz += len; 1763 conn->imc_flags &= ~(IMC_QUEUED_ACK_INIT << pns); 1764 LSQ_DEBUG("wrote ACK frame of size %d in %s", len, lsquic_pns2str[pns]); 1765 return 0; 1766} 1767 1768 1769static int 1770imico_generate_acks (struct ietf_mini_conn *conn, lsquic_time_t now) 1771{ 1772 enum packnum_space pns; 1773 1774 for (pns = PNS_INIT; pns < IMICO_N_PNS; ++pns) 1775 if (conn->imc_flags & (IMC_QUEUED_ACK_INIT << pns) 1776 && !(pns == PNS_INIT && (conn->imc_flags & IMC_IGNORE_INIT))) 1777 if (0 != imico_generate_ack(conn, pns, now)) 1778 return -1; 1779 1780 return 0; 1781} 1782 1783 1784static void 1785imico_generate_conn_close (struct ietf_mini_conn *conn) 1786{ 1787 struct lsquic_packet_out *packet_out; 1788 enum header_type header_type; 1789 enum packnum_space pns, pns_max; 1790 unsigned error_code; 1791 const char *reason; 1792 size_t need; 1793 int sz, rlen, is_app; 1794 char reason_buf[0x20]; 1795 1796 if (conn->imc_flags & IMC_ABORT_ERROR) 1797 { 1798 is_app = !!(conn->imc_flags & IMC_ABORT_ISAPP); 1799 error_code = conn->imc_error_code; 1800 reason = NULL; 1801 rlen = 0; 1802 } 1803 else if (conn->imc_flags & IMC_TLS_ALERT) 1804 { 1805 is_app = 0; 1806 error_code = 0x100 + conn->imc_tls_alert; 1807 if (ALERT_NO_APPLICATION_PROTOCOL == conn->imc_tls_alert) 1808 reason = "no suitable application protocol"; 1809 else 1810 { 1811 snprintf(reason_buf, sizeof(reason_buf), "TLS alert %"PRIu8, 1812 conn->imc_tls_alert); 1813 reason = reason_buf; 1814 } 1815 rlen = strlen(reason); 1816 } 1817 else if (conn->imc_flags & IMC_BAD_TRANS_PARAMS) 1818 { 1819 is_app = 0; 1820 error_code = TEC_TRANSPORT_PARAMETER_ERROR; 1821 reason = "bad transport parameters"; 1822 rlen = 24; 1823 } 1824 else if (conn->imc_flags & IMC_HSK_FAILED) 1825 { 1826 is_app = 0; 1827 error_code = TEC_NO_ERROR; 1828 reason = "handshake failed"; 1829 rlen = 16; 1830 } 1831 else if (conn->imc_flags & IMC_PARSE_FAILED) 1832 { 1833 is_app = 0; 1834 error_code = TEC_FRAME_ENCODING_ERROR; 1835 reason = "cannot decode frame"; 1836 rlen = 19; 1837 } 1838 else 1839 { 1840 is_app = 0; 1841 error_code = TEC_INTERNAL_ERROR; 1842 reason = NULL; 1843 rlen = 0; 1844 } 1845 1846 1847/* [draft-ietf-quic-transport-28] Section 10.3.1: 1848 * 1849 " A client will always know whether the server has Handshake keys (see 1850 " Section 17.2.2.1), but it is possible that a server does not know 1851 " whether the client has Handshake keys. Under these circumstances, a 1852 " server SHOULD send a CONNECTION_CLOSE frame in both Handshake and 1853 " Initial packets to ensure that at least one of them is processable by 1854 " the client. 1855--- 8< --- 1856 " Sending a CONNECTION_CLOSE of type 0x1d in an Initial or Handshake 1857 " packet could expose application state or be used to alter application 1858 " state. A CONNECTION_CLOSE of type 0x1d MUST be replaced by a 1859 " CONNECTION_CLOSE of type 0x1c when sending the frame in Initial or 1860 " Handshake packets. Otherwise, information about the application 1861 " state might be revealed. Endpoints MUST clear the value of the 1862 " Reason Phrase field and SHOULD use the APPLICATION_ERROR code when 1863 " converting to a CONNECTION_CLOSE of type 0x1c. 1864 */ 1865 LSQ_DEBUG("sending CONNECTION_CLOSE, is_app: %d, error code: %u, " 1866 "reason: %.*s", is_app, error_code, rlen, reason); 1867 if (is_app && conn->imc_conn.cn_version > LSQVER_ID27) 1868 { 1869 LSQ_DEBUG("convert to 0x1C, replace code and reason"); 1870 is_app = 0; 1871 error_code = TEC_APPLICATION_ERROR; 1872 rlen = 0; 1873 } 1874 1875 pns = (conn->imc_flags >> IMCBIT_PNS_BIT_SHIFT) & 3; 1876 switch ((!!(conn->imc_flags & IMC_HSK_PACKET_SENT) << 1) 1877 | (pns == PNS_HSK) /* Handshake packet received */) 1878 { 1879 case (0 << 1) | 0: 1880 pns = PNS_INIT; 1881 pns_max = PNS_INIT; 1882 break; 1883 case (1 << 1) | 0: 1884 pns = PNS_INIT; 1885 pns_max = PNS_HSK; 1886 break; 1887 default: 1888 pns = PNS_HSK; 1889 pns_max = PNS_HSK; 1890 break; 1891 } 1892 1893 need = conn->imc_conn.cn_pf->pf_connect_close_frame_size(is_app, 1894 error_code, 0, rlen); 1895 LSQ_DEBUG("will generate %u CONNECTION_CLOSE frame%.*s", 1896 pns_max - pns + 1, pns_max > pns, "s"); 1897 do 1898 { 1899 header_type = pns2hety[pns]; 1900 packet_out = imico_get_packet_out(conn, header_type, need); 1901 if (!packet_out) 1902 return; 1903 sz = conn->imc_conn.cn_pf->pf_gen_connect_close_frame( 1904 packet_out->po_data + packet_out->po_data_sz, 1905 lsquic_packet_out_avail(packet_out), is_app, error_code, reason, 1906 rlen); 1907 if (sz >= 0) 1908 { 1909 packet_out->po_frame_types |= 1 << QUIC_FRAME_CONNECTION_CLOSE; 1910 packet_out->po_data_sz += sz; 1911 LSQ_DEBUG("generated CONNECTION_CLOSE frame"); 1912 } 1913 else 1914 LSQ_WARN("could not generate CONNECTION_CLOSE frame"); 1915 ++pns; 1916 } 1917 while (pns <= pns_max); 1918} 1919 1920 1921static int 1922imico_generate_handshake_done (struct ietf_mini_conn *conn) 1923{ 1924 struct lsquic_packet_out *packet_out; 1925 unsigned need; 1926 int sz; 1927 1928 need = conn->imc_conn.cn_pf->pf_handshake_done_frame_size(); 1929 packet_out = imico_get_packet_out(conn, HETY_NOT_SET, need); 1930 if (!packet_out) 1931 return -1; 1932 sz = conn->imc_conn.cn_pf->pf_gen_handshake_done_frame( 1933 packet_out->po_data + packet_out->po_data_sz, 1934 lsquic_packet_out_avail(packet_out)); 1935 if (sz < 0) 1936 { 1937 LSQ_WARN("could not generate HANDSHAKE_DONE frame"); 1938 return -1; 1939 } 1940 1941 packet_out->po_frame_types |= 1 << QUIC_FRAME_HANDSHAKE_DONE; 1942 packet_out->po_data_sz += sz; 1943 LSQ_DEBUG("generated HANDSHAKE_DONE frame"); 1944 conn->imc_flags |= IMC_HSK_DONE_SENT; 1945 1946 return 0; 1947} 1948 1949 1950static enum tick_st 1951ietf_mini_conn_ci_tick (struct lsquic_conn *lconn, lsquic_time_t now) 1952{ 1953 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 1954 enum tick_st tick; 1955 1956 if (conn->imc_created + conn->imc_enpub->enp_settings.es_handshake_to < now) 1957 { 1958 LSQ_DEBUG("connection expired: closing"); 1959 return TICK_CLOSE; 1960 } 1961 1962 1963 if (conn->imc_flags & (IMC_QUEUED_ACK_INIT|IMC_QUEUED_ACK_HSK)) 1964 { 1965 if (0 != imico_generate_acks(conn, now)) 1966 { 1967 conn->imc_flags |= IMC_ERROR; 1968 return TICK_CLOSE; 1969 } 1970 } 1971 1972 1973 tick = 0; 1974 1975 if (conn->imc_flags & IMC_ERROR) 1976 { 1977 close_on_error: 1978 if (!(conn->imc_flags & IMC_CLOSE_RECVD)) 1979 imico_generate_conn_close(conn); 1980 tick |= TICK_CLOSE; 1981 } 1982 else if (conn->imc_flags & IMC_HSK_OK) 1983 { 1984 if (lconn->cn_esf.i->esfi_in_init(lconn->cn_enc_session)) 1985 LSQ_DEBUG("still in init, defer HANDSHAKE_DONE"); 1986 else if (0 != imico_generate_handshake_done(conn)) 1987 goto close_on_error; 1988 tick |= TICK_PROMOTE; 1989 } 1990 1991 if (imico_have_packets_to_send(conn, now)) 1992 tick |= TICK_SEND; 1993 else 1994 tick |= TICK_QUIET; 1995 1996 LSQ_DEBUG("Return TICK %d", tick); 1997 return tick; 1998} 1999 2000 2001static void 2002ietf_mini_conn_ci_internal_error (struct lsquic_conn *lconn, 2003 const char *format, ...) 2004{ 2005 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 2006 LSQ_INFO("internal error reported"); 2007 conn->imc_flags |= IMC_ERROR; 2008} 2009 2010 2011static void 2012ietf_mini_conn_ci_abort_error (struct lsquic_conn *lconn, int is_app, 2013 unsigned error_code, const char *fmt, ...) 2014{ 2015 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 2016 va_list ap; 2017 const char *err_str, *percent; 2018 char err_buf[0x100]; 2019 2020 percent = strchr(fmt, '%'); 2021 if (percent) 2022 { 2023 va_start(ap, fmt); 2024 vsnprintf(err_buf, sizeof(err_buf), fmt, ap); 2025 va_end(ap); 2026 err_str = err_buf; 2027 } 2028 else 2029 err_str = fmt; 2030 LSQ_INFO("abort error: is_app: %d; error code: %u; error str: %s", 2031 is_app, error_code, err_str); 2032 conn->imc_flags |= IMC_ERROR|IMC_ABORT_ERROR; 2033 if (is_app) 2034 conn->imc_flags |= IMC_ABORT_ISAPP; 2035 conn->imc_error_code = error_code; 2036} 2037 2038 2039static struct network_path * 2040ietf_mini_conn_ci_get_path (struct lsquic_conn *lconn, 2041 const struct sockaddr *sa) 2042{ 2043 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 2044 2045 return &conn->imc_path; 2046} 2047 2048 2049static const lsquic_cid_t * 2050ietf_mini_conn_ci_get_log_cid (const struct lsquic_conn *lconn) 2051{ 2052 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 2053 2054 if (conn->imc_path.np_dcid.len) 2055 return &conn->imc_path.np_dcid; 2056 else 2057 return CN_SCID(lconn); 2058} 2059 2060 2061static unsigned char 2062ietf_mini_conn_ci_record_addrs (struct lsquic_conn *lconn, void *peer_ctx, 2063 const struct sockaddr *local_sa, const struct sockaddr *peer_sa) 2064{ 2065 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 2066 const struct sockaddr *orig_peer_sa; 2067 struct lsquic_packet_out *packet_out; 2068 size_t len; 2069 char path_str[4][INET6_ADDRSTRLEN + sizeof(":65535")]; 2070 2071 if (NP_IS_IPv6(&conn->imc_path) != (AF_INET6 == peer_sa->sa_family)) 2072 TAILQ_FOREACH(packet_out, &conn->imc_packets_out, po_next) 2073 if ((packet_out->po_flags & (PO_SENT|PO_ENCRYPTED)) == PO_ENCRYPTED) 2074 imico_return_enc_data(conn, packet_out); 2075 2076 orig_peer_sa = NP_PEER_SA(&conn->imc_path); 2077 if (orig_peer_sa->sa_family == 0) 2078 LSQ_DEBUG("connection to %s from %s", SA2STR(local_sa, path_str[0]), 2079 SA2STR(peer_sa, path_str[1])); 2080 else if (!(lsquic_sockaddr_eq(NP_PEER_SA(&conn->imc_path), peer_sa) 2081 && lsquic_sockaddr_eq(NP_LOCAL_SA(&conn->imc_path), local_sa))) 2082 { 2083 LSQ_DEBUG("path changed from (%s - %s) to (%s - %s)", 2084 SA2STR(NP_LOCAL_SA(&conn->imc_path), path_str[0]), 2085 SA2STR(NP_PEER_SA(&conn->imc_path), path_str[1]), 2086 SA2STR(local_sa, path_str[2]), 2087 SA2STR(peer_sa, path_str[3])); 2088 conn->imc_flags |= IMC_PATH_CHANGED; 2089 } 2090 2091 len = local_sa->sa_family == AF_INET ? sizeof(struct sockaddr_in) 2092 : sizeof(struct sockaddr_in6); 2093 2094 memcpy(conn->imc_path.np_peer_addr, peer_sa, len); 2095 memcpy(conn->imc_path.np_local_addr, local_sa, len); 2096 conn->imc_path.np_peer_ctx = peer_ctx; 2097 return 0; 2098} 2099 2100 2101void 2102ietf_mini_conn_ci_count_garbage (struct lsquic_conn *lconn, size_t garbage_sz) 2103{ 2104 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 2105 2106 conn->imc_bytes_in += garbage_sz; 2107 LSQ_DEBUG("count %zd bytes of garbage, new value: %u bytes", garbage_sz, 2108 conn->imc_bytes_in); 2109} 2110 2111 2112static const struct conn_iface mini_conn_ietf_iface = { 2113 .ci_abort_error = ietf_mini_conn_ci_abort_error, 2114 .ci_client_call_on_new = ietf_mini_conn_ci_client_call_on_new, 2115 .ci_count_garbage = ietf_mini_conn_ci_count_garbage, 2116 .ci_destroy = ietf_mini_conn_ci_destroy, 2117 .ci_get_engine = ietf_mini_conn_ci_get_engine, 2118 .ci_get_log_cid = ietf_mini_conn_ci_get_log_cid, 2119 .ci_get_path = ietf_mini_conn_ci_get_path, 2120 .ci_hsk_done = ietf_mini_conn_ci_hsk_done, 2121 .ci_internal_error = ietf_mini_conn_ci_internal_error, 2122 .ci_is_tickable = ietf_mini_conn_ci_is_tickable, 2123 .ci_next_packet_to_send = ietf_mini_conn_ci_next_packet_to_send, 2124 .ci_next_tick_time = ietf_mini_conn_ci_next_tick_time, 2125 .ci_packet_in = ietf_mini_conn_ci_packet_in, 2126 .ci_packet_not_sent = ietf_mini_conn_ci_packet_not_sent, 2127 .ci_packet_sent = ietf_mini_conn_ci_packet_sent, 2128 .ci_record_addrs = ietf_mini_conn_ci_record_addrs, 2129 .ci_tick = ietf_mini_conn_ci_tick, 2130 .ci_tls_alert = ietf_mini_conn_ci_tls_alert, 2131}; 2132