lsquic_mini_conn_ietf.c revision fbc6cc04
1/* Copyright (c) 2017 - 2020 LiteSpeed Technologies Inc. See LICENSE. */ 2/* 3 * lsquic_mini_conn_ietf.c -- Mini connection used by the IETF QUIC 4 */ 5 6#include <assert.h> 7#include <errno.h> 8#include <inttypes.h> 9#include <stddef.h> 10#include <stdint.h> 11#include <string.h> 12#include <sys/queue.h> 13#include <stdlib.h> 14 15#include "lsquic.h" 16#include "lsquic_int_types.h" 17#include "lsquic_sizes.h" 18#include "lsquic_hash.h" 19#include "lsquic_conn.h" 20#include "lsquic_mm.h" 21#include "lsquic_malo.h" 22#include "lsquic_engine_public.h" 23#include "lsquic_packet_common.h" 24#include "lsquic_packet_in.h" 25#include "lsquic_packet_out.h" 26#include "lsquic_parse.h" 27#include "lsquic_rtt.h" 28#include "lsquic_util.h" 29#include "lsquic_enc_sess.h" 30#include "lsquic_trechist.h" 31#include "lsquic_mini_conn_ietf.h" 32#include "lsquic_ev_log.h" 33#include "lsquic_trans_params.h" 34#include "lsquic_ietf.h" 35#include "lsquic_packet_ietf.h" 36#include "lsquic_attq.h" 37#include "lsquic_alarmset.h" 38#include "lsquic_crand.h" 39 40#define LSQUIC_LOGGER_MODULE LSQLM_MINI_CONN 41#define LSQUIC_LOG_CONN_ID lsquic_conn_log_cid(&conn->imc_conn) 42#include "lsquic_logger.h" 43 44#define MIN(a, b) ((a) < (b) ? (a) : (b)) 45#define MAX(a, b) ((a) > (b) ? (a) : (b)) 46 47static const struct conn_iface mini_conn_ietf_iface; 48 49static unsigned highest_bit_set (unsigned long long); 50 51static int 52imico_can_send (const struct ietf_mini_conn *, size_t); 53 54 55static const enum header_type el2hety[] = 56{ 57 [ENC_LEV_INIT] = HETY_HANDSHAKE, 58 [ENC_LEV_CLEAR] = HETY_INITIAL, 59 [ENC_LEV_FORW] = HETY_NOT_SET, 60 [ENC_LEV_EARLY] = 0, /* Invalid */ 61}; 62 63 64static void 65imico_destroy_packet (struct ietf_mini_conn *conn, 66 struct lsquic_packet_out *packet_out) 67{ 68 lsquic_packet_out_destroy(packet_out, conn->imc_enpub, 69 conn->imc_path.np_peer_ctx); 70} 71 72 73int 74lsquic_mini_conn_ietf_ecn_ok (const struct ietf_mini_conn *conn) 75{ 76 packno_set_t acked; 77 78 /* First flight has only Initial and Handshake packets */ 79 acked = conn->imc_acked_packnos[PNS_INIT] 80 | conn->imc_acked_packnos[PNS_HSK] 81 ; 82 return 0 != (conn->imc_ecn_packnos & acked); 83} 84 85 86#define imico_ecn_ok lsquic_mini_conn_ietf_ecn_ok 87 88 89static enum ecn 90imico_get_ecn (struct ietf_mini_conn *conn) 91{ 92 if (!conn->imc_enpub->enp_settings.es_ecn) 93 return ECN_NOT_ECT; 94 else if (!conn->imc_sent_packnos /* We set ECT0 in first flight */ 95 || imico_ecn_ok(conn)) 96 return ECN_ECT0; 97 else 98 return ECN_NOT_ECT; 99} 100 101 102static struct lsquic_packet_out * 103imico_get_packet_out (struct ietf_mini_conn *conn, 104 enum header_type header_type, size_t need) 105{ 106 struct lsquic_packet_out *packet_out; 107 enum ecn ecn; 108 109 if (need) 110 TAILQ_FOREACH(packet_out, &conn->imc_packets_out, po_next) 111 if (!(packet_out->po_flags & PO_SENT) 112 && packet_out->po_header_type == header_type 113 && lsquic_packet_out_avail(packet_out) >= need) 114 return packet_out; 115 116 if (conn->imc_next_packno >= MAX_PACKETS) 117 { 118 LSQ_DEBUG("ran out of outgoing packet numbers, won't allocate packet"); 119 return NULL; 120 } 121 122 packet_out = lsquic_packet_out_new(&conn->imc_enpub->enp_mm, NULL, 1, 123 &conn->imc_conn, IQUIC_PACKNO_LEN_1, NULL, NULL, &conn->imc_path); 124 if (!packet_out) 125 { 126 LSQ_WARN("could not allocate packet: %s", strerror(errno)); 127 return NULL; 128 } 129 130 packet_out->po_header_type = header_type; 131 packet_out->po_packno = conn->imc_next_packno++; 132 packet_out->po_flags |= PO_MINI; 133 lsquic_packet_out_set_pns(packet_out, lsquic_hety2pns[header_type]); 134 ecn = imico_get_ecn(conn); 135 packet_out->po_lflags |= ecn << POECN_SHIFT; 136 TAILQ_INSERT_TAIL(&conn->imc_packets_out, packet_out, po_next); 137 packet_out->po_loss_chain = packet_out; 138 return packet_out; 139} 140 141 142static struct ietf_mini_conn * 143cryst_get_conn (const struct mini_crypto_stream *cryst) 144{ 145 return (void *) 146 ((unsigned char *) (cryst - cryst->mcs_enc_level) 147 - offsetof(struct ietf_mini_conn, imc_streams)); 148} 149 150 151struct msg_ctx 152{ 153 const unsigned char *buf; 154 const unsigned char *const end; 155}; 156 157 158static size_t 159read_from_msg_ctx (void *ctx, void *buf, size_t len, int *fin) 160{ 161 struct msg_ctx *msg_ctx = ctx; 162 if (len > (uintptr_t) (msg_ctx->end - msg_ctx->buf)) 163 len = msg_ctx->end - msg_ctx->buf; 164 memcpy(buf, msg_ctx->buf, len); 165 msg_ctx->buf += len; 166 return len; 167} 168 169 170static int 171imico_chlo_has_been_consumed (const struct ietf_mini_conn *conn) 172{ 173 return conn->imc_streams[ENC_LEV_CLEAR].mcs_read_off > 3 174 && conn->imc_streams[ENC_LEV_CLEAR].mcs_read_off >= conn->imc_ch_len; 175} 176 177 178static int 179imico_maybe_process_params (struct ietf_mini_conn *conn) 180{ 181 const struct transport_params *params; 182 183 if (imico_chlo_has_been_consumed(conn) 184 && (conn->imc_flags & (IMC_ENC_SESS_INITED|IMC_HAVE_TP)) 185 == IMC_ENC_SESS_INITED) 186 { 187 params = conn->imc_conn.cn_esf.i->esfi_get_peer_transport_params( 188 conn->imc_conn.cn_enc_session); 189 if (params) 190 { 191 conn->imc_flags |= IMC_HAVE_TP; 192 conn->imc_ack_exp = params->tp_ack_delay_exponent; 193 if (params->tp_set & (1 << TPI_MAX_UDP_PAYLOAD_SIZE)) 194 { 195 if (params->tp_numerics[TPI_MAX_UDP_PAYLOAD_SIZE] 196 < conn->imc_path.np_pack_size) 197 conn->imc_path.np_pack_size = 198 params->tp_numerics[TPI_MAX_UDP_PAYLOAD_SIZE]; 199 } 200 LSQ_DEBUG("read transport params, packet size is set to %hu bytes", 201 conn->imc_path.np_pack_size); 202 } 203 else 204 { 205 conn->imc_flags |= IMC_BAD_TRANS_PARAMS; 206 return -1; 207 } 208 } 209 210 return 0; 211} 212 213 214static ssize_t 215imico_stream_write (void *stream, const void *bufp, size_t bufsz) 216{ 217 struct mini_crypto_stream *const cryst = stream; 218 struct ietf_mini_conn *const conn = cryst_get_conn(cryst); 219 struct lsquic_conn *const lconn = &conn->imc_conn; 220 const struct parse_funcs *const pf = lconn->cn_pf; 221 struct msg_ctx msg_ctx = { bufp, (unsigned char *) bufp + bufsz, }; 222 struct lsquic_packet_out *packet_out; 223 size_t header_sz, need; 224 const unsigned char *p; 225 int len; 226 227 if (0 != imico_maybe_process_params(conn)) 228 return -1; 229 230 if (PNS_INIT == lsquic_enclev2pns[ cryst->mcs_enc_level ] 231 && (conn->imc_flags & IMC_IGNORE_INIT)) 232 { 233 LSQ_WARN("trying to write at the ignored Initial level"); 234 return bufsz; 235 } 236 237 while (msg_ctx.buf < msg_ctx.end) 238 { 239 header_sz = lconn->cn_pf->pf_calc_crypto_frame_header_sz( 240 cryst->mcs_write_off, msg_ctx.end - msg_ctx.buf); 241 need = header_sz + 1; 242 packet_out = imico_get_packet_out(conn, 243 el2hety[ cryst->mcs_enc_level ], need); 244 if (!packet_out) 245 return -1; 246 247 p = msg_ctx.buf; 248 len = pf->pf_gen_crypto_frame(packet_out->po_data + packet_out->po_data_sz, 249 lsquic_packet_out_avail(packet_out), 0, cryst->mcs_write_off, 0, 250 msg_ctx.end - msg_ctx.buf, read_from_msg_ctx, &msg_ctx); 251 if (len < 0) 252 return len; 253 EV_LOG_GENERATED_CRYPTO_FRAME(LSQUIC_LOG_CONN_ID, pf, 254 packet_out->po_data + packet_out->po_data_sz, len); 255 packet_out->po_data_sz += len; 256 packet_out->po_frame_types |= 1 << QUIC_FRAME_CRYPTO; 257 packet_out->po_flags |= PO_HELLO; 258 cryst->mcs_write_off += msg_ctx.buf - p; 259 } 260 261 assert(msg_ctx.buf == msg_ctx.end); 262 return bufsz; 263} 264 265 266static int 267imico_stream_flush (void *stream) 268{ 269 return 0; 270} 271 272 273static struct stream_frame * 274imico_find_stream_frame (const struct ietf_mini_conn *conn, 275 enum enc_level enc_level, unsigned read_off) 276{ 277 struct stream_frame *frame; 278 279 if (conn->imc_last_in.frame && enc_level == conn->imc_last_in.enc_level 280 && read_off == DF_ROFF(conn->imc_last_in.frame)) 281 return conn->imc_last_in.frame; 282 283 TAILQ_FOREACH(frame, &conn->imc_crypto_frames, next_frame) 284 if (enc_level == frame->stream_id && read_off == DF_ROFF(frame)) 285 return frame; 286 287 return NULL; 288} 289 290 291static void 292imico_read_chlo_size (struct ietf_mini_conn *conn, const unsigned char *buf, 293 size_t sz) 294{ 295 const unsigned char *const end = buf + sz; 296 297 assert(conn->imc_streams[ENC_LEV_CLEAR].mcs_read_off < 4); 298 switch (conn->imc_streams[ENC_LEV_CLEAR].mcs_read_off) 299 { 300 case 0: 301 if (buf == end) 302 return; 303 if (*buf != 1) 304 { 305 LSQ_DEBUG("Does not begin with ClientHello"); 306 conn->imc_flags |= IMC_ERROR; 307 return; 308 } 309 ++buf; 310 /* fall-through */ 311 case 1: 312 if (buf == end) 313 return; 314 if (*buf != 0) 315 { 316 LSQ_DEBUG("ClientHello larger than 16K"); 317 conn->imc_flags |= IMC_ERROR; 318 return; 319 } 320 ++buf; 321 /* fall-through */ 322 case 2: 323 if (buf == end) 324 return; 325 conn->imc_ch_len = *buf << 8; 326 ++buf; 327 /* fall-through */ 328 default: 329 if (buf == end) 330 return; 331 conn->imc_ch_len |= *buf; 332 } 333} 334 335 336static ssize_t 337imico_stream_readf (void *stream, 338 size_t (*readf)(void *, const unsigned char *, size_t, int), void *ctx) 339{ 340 struct mini_crypto_stream *const cryst = stream; 341 struct ietf_mini_conn *const conn = cryst_get_conn(cryst); 342 struct stream_frame *frame; 343 const unsigned char *buf; 344 size_t nread, total_read; 345 unsigned avail; 346 347 total_read = 0; 348 while ((frame = imico_find_stream_frame(conn, cryst->mcs_enc_level, 349 cryst->mcs_read_off))) 350 { 351 avail = DF_SIZE(frame) - frame->data_frame.df_read_off; 352 buf = frame->data_frame.df_data + frame->data_frame.df_read_off; 353 nread = readf(ctx, buf, avail, DF_FIN(frame)); 354 if (cryst->mcs_enc_level == ENC_LEV_CLEAR && cryst->mcs_read_off < 4) 355 imico_read_chlo_size(conn, buf, nread); 356 total_read += nread; 357 cryst->mcs_read_off += nread; 358 frame->data_frame.df_read_off += nread; 359 LSQ_DEBUG("read %zu bytes at offset %"PRIu64" on enc level %u", nread, 360 DF_ROFF(frame), cryst->mcs_enc_level); 361 if (DF_END(frame) == DF_ROFF(frame)) 362 { 363 if (frame == conn->imc_last_in.frame) 364 conn->imc_last_in.frame = NULL; 365 else 366 { 367 TAILQ_REMOVE(&conn->imc_crypto_frames, frame, next_frame); 368 --conn->imc_n_crypto_frames; 369 conn->imc_crypto_frames_sz -= DF_SIZE(frame); 370 lsquic_packet_in_put(&conn->imc_enpub->enp_mm, 371 frame->packet_in); 372 lsquic_malo_put(frame); 373 } 374 } 375 if (nread < avail) 376 break; 377 } 378 379 if (total_read > 0) 380 return total_read; 381 else 382 { 383 /* CRYPTO streams never end, so zero bytes read always means 384 * EWOULDBLOCK 385 */ 386 errno = EWOULDBLOCK; 387 return -1; 388 } 389} 390 391 392static int 393imico_stream_wantX (struct mini_crypto_stream *cryst, int bit, int is_want) 394{ 395 int old; 396 397 old = (cryst->mcs_flags & (1 << bit)) > 0; 398 cryst->mcs_flags &= ~(1 << bit); 399 cryst->mcs_flags |= !!is_want << bit; 400 return old; 401} 402 403 404static int 405imico_stream_wantwrite (void *stream, int is_want) 406{ 407 return imico_stream_wantX(stream, MCSBIT_WANTWRITE, is_want); 408} 409 410 411static int 412imico_stream_wantread (void *stream, int is_want) 413{ 414 return imico_stream_wantX(stream, MCSBIT_WANTREAD, is_want); 415} 416 417 418static enum enc_level 419imico_stream_enc_level (void *stream) 420{ 421 struct mini_crypto_stream *const cryst = stream; 422 return cryst->mcs_enc_level; 423} 424 425 426static const struct crypto_stream_if crypto_stream_if = 427{ 428 .csi_write = imico_stream_write, 429 .csi_flush = imico_stream_flush, 430 .csi_readf = imico_stream_readf, 431 .csi_wantwrite = imico_stream_wantwrite, 432 .csi_wantread = imico_stream_wantread, 433 .csi_enc_level = imico_stream_enc_level, 434}; 435 436 437static int 438is_first_packet_ok (const struct lsquic_packet_in *packet_in, 439 size_t udp_payload_size) 440{ 441 if (udp_payload_size < IQUIC_MIN_INIT_PACKET_SZ) 442 { 443 /* [draft-ietf-quic-transport-24] Section 14 */ 444 LSQ_LOG1(LSQ_LOG_DEBUG, "incoming UDP payload too small: %zu bytes", 445 udp_payload_size); 446 return 0; 447 } 448 /* TODO: Move decryption of the first packet into this function? */ 449 return 1; /* TODO */ 450} 451 452 453static void 454imico_peer_addr_validated (struct ietf_mini_conn *conn, const char *how) 455{ 456 if (!(conn->imc_flags & IMC_ADDR_VALIDATED)) 457 { 458 conn->imc_flags |= IMC_ADDR_VALIDATED; 459 LSQ_DEBUG("peer address validated (%s)", how); 460 } 461} 462 463 464struct lsquic_conn * 465lsquic_mini_conn_ietf_new (struct lsquic_engine_public *enpub, 466 const struct lsquic_packet_in *packet_in, 467 enum lsquic_version version, int is_ipv4, const lsquic_cid_t *odcid, 468 size_t udp_payload_size) 469{ 470 struct ietf_mini_conn *conn; 471 enc_session_t *enc_sess; 472 enum enc_level i; 473 const struct enc_session_funcs_iquic *esfi; 474 unsigned char rand_nybble; 475 476 if (!is_first_packet_ok(packet_in, udp_payload_size)) 477 return NULL; 478 479 conn = lsquic_malo_get(enpub->enp_mm.malo.mini_conn_ietf); 480 if (!conn) 481 { 482 LSQ_LOG1(LSQ_LOG_WARN, "cannot allocate mini connection: %s", 483 strerror(errno)); 484 return NULL; 485 } 486 memset(conn, 0, sizeof(*conn)); 487 conn->imc_conn.cn_if = &mini_conn_ietf_iface; 488 conn->imc_conn.cn_cces = conn->imc_cces; 489 conn->imc_conn.cn_n_cces = sizeof(conn->imc_cces) 490 / sizeof(conn->imc_cces[0]); 491 conn->imc_cces[0].cce_cid = packet_in->pi_dcid; 492 conn->imc_cces[0].cce_flags = CCE_USED; 493 conn->imc_conn.cn_cces_mask = 1; 494 lsquic_scid_from_packet_in(packet_in, &conn->imc_path.np_dcid); 495 LSQ_DEBUGC("recv SCID from client %"CID_FMT, CID_BITS(&conn->imc_cces[0].cce_cid)); 496 LSQ_DEBUGC("recv DCID from client %"CID_FMT, CID_BITS(&conn->imc_path.np_dcid)); 497 498 /* Generate new SCID. Since is not the original SCID, it is given 499 * a sequence number (0) and therefore can be retired by the client. 500 */ 501 enpub->enp_generate_scid(&conn->imc_conn, 502 &conn->imc_conn.cn_cces[1].cce_cid, enpub->enp_settings.es_scid_len); 503 504 LSQ_DEBUGC("generated SCID %"CID_FMT" at index %u, switching to it", 505 CID_BITS(&conn->imc_conn.cn_cces[1].cce_cid), 1); 506 conn->imc_conn.cn_cces[1].cce_flags = CCE_SEQNO | CCE_USED; 507 conn->imc_conn.cn_cces_mask |= 1u << 1; 508 conn->imc_conn.cn_cur_cce_idx = 1; 509 510 conn->imc_conn.cn_flags = LSCONN_MINI|LSCONN_IETF|LSCONN_SERVER; 511 conn->imc_conn.cn_version = version; 512 513 for (i = 0; i < N_ENC_LEVS; ++i) 514 { 515 conn->imc_streams[i].mcs_enc_level = i; 516 conn->imc_stream_ps[i] = &conn->imc_streams[i]; 517 } 518 519 rand_nybble = lsquic_crand_get_nybble(enpub->enp_crand); 520 if (rand_nybble == 0) 521 { 522 /* Use trechist for about one out of every sixteen connections so 523 * that the code does not grow stale. 524 */ 525 LSQ_DEBUG("using trechist"); 526 conn->imc_flags |= IMC_TRECHIST; 527 conn->imc_recvd_packnos.trechist.hist_elems 528 = malloc(TRECHIST_SIZE * IMICO_N_PNS); 529 if (!conn->imc_recvd_packnos.trechist.hist_elems) 530 { 531 LSQ_WARN("cannot allocate trechist elems"); 532 return NULL; 533 } 534 } 535 536 esfi = select_esf_iquic_by_ver(version); 537 enc_sess = esfi->esfi_create_server(enpub, &conn->imc_conn, 538 &packet_in->pi_dcid, conn->imc_stream_ps, &crypto_stream_if, 539 &conn->imc_cces[0].cce_cid, &conn->imc_path.np_dcid); 540 if (!enc_sess) 541 { 542 lsquic_malo_put(conn); 543 return NULL; 544 } 545 546 conn->imc_enpub = enpub; 547 conn->imc_created = packet_in->pi_received; 548 if (enpub->enp_settings.es_base_plpmtu) 549 conn->imc_path.np_pack_size = enpub->enp_settings.es_base_plpmtu; 550 else if (is_ipv4) 551 conn->imc_path.np_pack_size = IQUIC_MAX_IPv4_PACKET_SZ; 552 else 553 conn->imc_path.np_pack_size = IQUIC_MAX_IPv6_PACKET_SZ; 554 conn->imc_conn.cn_pf = select_pf_by_ver(version); 555 conn->imc_conn.cn_esf.i = esfi; 556 conn->imc_conn.cn_enc_session = enc_sess; 557 conn->imc_conn.cn_esf_c = select_esf_common_by_ver(version); 558 TAILQ_INIT(&conn->imc_packets_out); 559 TAILQ_INIT(&conn->imc_app_packets); 560 TAILQ_INIT(&conn->imc_crypto_frames); 561 if (odcid) 562 imico_peer_addr_validated(conn, "odcid"); 563 564 LSQ_DEBUG("created mini connection object %p; max packet size=%hu", 565 conn, conn->imc_path.np_pack_size); 566 return &conn->imc_conn; 567} 568 569 570static void 571ietf_mini_conn_ci_client_call_on_new (struct lsquic_conn *lconn) 572{ 573 assert(0); 574} 575 576 577static void 578ietf_mini_conn_ci_destroy (struct lsquic_conn *lconn) 579{ 580 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 581 struct lsquic_packet_out *packet_out; 582 struct lsquic_packet_in *packet_in; 583 struct stream_frame *frame; 584 585 while ((packet_out = TAILQ_FIRST(&conn->imc_packets_out))) 586 { 587 TAILQ_REMOVE(&conn->imc_packets_out, packet_out, po_next); 588 imico_destroy_packet(conn, packet_out); 589 } 590 while ((packet_in = TAILQ_FIRST(&conn->imc_app_packets))) 591 { 592 TAILQ_REMOVE(&conn->imc_app_packets, packet_in, pi_next); 593 lsquic_packet_in_put(&conn->imc_enpub->enp_mm, packet_in); 594 } 595 while ((frame = TAILQ_FIRST(&conn->imc_crypto_frames))) 596 { 597 TAILQ_REMOVE(&conn->imc_crypto_frames, frame, next_frame); 598 lsquic_packet_in_put(&conn->imc_enpub->enp_mm, frame->packet_in); 599 lsquic_malo_put(frame); 600 } 601 if (lconn->cn_enc_session) 602 lconn->cn_esf.i->esfi_destroy(lconn->cn_enc_session); 603 LSQ_DEBUG("ietf_mini_conn_ci_destroyed"); 604 if (conn->imc_flags & IMC_TRECHIST) 605 free(conn->imc_recvd_packnos.trechist.hist_elems); 606 lsquic_malo_put(conn); 607} 608 609 610static struct lsquic_engine * 611ietf_mini_conn_ci_get_engine (struct lsquic_conn *lconn) 612{ 613 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 614 return conn->imc_enpub->enp_engine; 615} 616 617 618static void 619ietf_mini_conn_ci_hsk_done (struct lsquic_conn *lconn, 620 enum lsquic_hsk_status status) 621{ 622 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 623 624 switch (status) 625 { 626 case LSQ_HSK_OK: 627 case LSQ_HSK_RESUMED_OK: 628 conn->imc_flags |= IMC_HSK_OK; 629 conn->imc_conn.cn_flags |= LSCONN_HANDSHAKE_DONE; 630 LSQ_DEBUG("handshake OK"); 631 break; 632 default: 633 assert(0); 634 /* fall-through */ 635 case LSQ_HSK_FAIL: 636 conn->imc_flags |= IMC_HSK_FAILED|IMC_ERROR; 637 LSQ_INFO("handshake failed"); 638 break; 639 } 640} 641 642 643static void 644ietf_mini_conn_ci_tls_alert (struct lsquic_conn *lconn, uint8_t alert) 645{ 646 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 647 LSQ_DEBUG("got TLS alert %"PRIu8, alert); 648 conn->imc_flags |= IMC_ERROR|IMC_TLS_ALERT; 649 conn->imc_tls_alert = alert; 650} 651 652 653/* A mini connection is only tickable if it has unsent packets. This can 654 * occur when packet sending is delayed. 655 * 656 * Otherwise, a mini connection is not tickable: Either there are incoming 657 * packets, in which case, the connection is going to be ticked, or there is 658 * an alarm pending, in which case it will be handled via the attq. 659 */ 660static int 661ietf_mini_conn_ci_is_tickable (struct lsquic_conn *lconn) 662{ 663 struct ietf_mini_conn *const conn = (struct ietf_mini_conn *) lconn; 664 const struct lsquic_packet_out *packet_out; 665 size_t packet_size; 666 667 if (conn->imc_enpub->enp_flags & ENPUB_CAN_SEND) 668 TAILQ_FOREACH(packet_out, &conn->imc_packets_out, po_next) 669 if (!(packet_out->po_flags & PO_SENT)) 670 { 671 packet_size = lsquic_packet_out_total_sz(lconn, packet_out); 672 return imico_can_send(conn, packet_size); 673 } 674 675 return 0; 676} 677 678 679static int 680imico_can_send (const struct ietf_mini_conn *conn, size_t size) 681{ 682 return (conn->imc_flags & IMC_ADDR_VALIDATED) 683 || conn->imc_bytes_in * 3 >= conn->imc_bytes_out + size 684 ; 685} 686 687 688static struct lsquic_packet_out * 689ietf_mini_conn_ci_next_packet_to_send (struct lsquic_conn *lconn, 690 const struct to_coal *to_coal) 691{ 692 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 693 struct lsquic_packet_out *packet_out; 694 size_t packet_size; 695 696 TAILQ_FOREACH(packet_out, &conn->imc_packets_out, po_next) 697 { 698 if (packet_out->po_flags & PO_SENT) 699 continue; 700 packet_size = lsquic_packet_out_total_sz(lconn, packet_out); 701 if (!(to_coal 702 && (packet_size + to_coal->prev_sz_sum 703 > conn->imc_path.np_pack_size 704 || !lsquic_packet_out_equal_dcids(to_coal->prev_packet, packet_out)) 705 )) 706 { 707 if (!imico_can_send(conn, packet_size)) 708 { 709 LSQ_DEBUG("cannot send packet %"PRIu64" of size %zu: client " 710 "address has not been validated", packet_out->po_packno, 711 packet_size); 712 return NULL; 713 } 714 packet_out->po_flags |= PO_SENT; 715 conn->imc_bytes_out += packet_size; 716 if (!to_coal) 717 LSQ_DEBUG("packet_to_send: %"PRIu64, packet_out->po_packno); 718 else 719 LSQ_DEBUG("packet_to_send: %"PRIu64" (coalesced)", 720 packet_out->po_packno); 721 return packet_out; 722 } 723 else 724 return NULL; 725 } 726 727 return NULL; 728} 729 730 731static int 732imico_calc_retx_timeout (const struct ietf_mini_conn *conn) 733{ 734 lsquic_time_t to; 735 to = lsquic_rtt_stats_get_srtt(&conn->imc_rtt_stats); 736 if (to) 737 { 738 to += to / 2; 739 if (to < 10000) 740 to = 10000; 741 } 742 else 743 to = 300000; 744 return to << conn->imc_hsk_count; 745} 746 747 748static lsquic_time_t 749ietf_mini_conn_ci_next_tick_time (struct lsquic_conn *lconn, unsigned *why) 750{ 751 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 752 const struct lsquic_packet_out *packet_out; 753 lsquic_time_t exp_time, retx_time; 754 755 exp_time = conn->imc_created + 756 conn->imc_enpub->enp_settings.es_handshake_to; 757 758 TAILQ_FOREACH(packet_out, &conn->imc_packets_out, po_next) 759 if (packet_out->po_flags & PO_SENT) 760 { 761 retx_time = packet_out->po_sent + imico_calc_retx_timeout(conn); 762 if (retx_time < exp_time) 763 { 764 *why = N_AEWS + AL_RETX_HSK; 765 return retx_time; 766 } 767 else 768 { 769 *why = AEW_MINI_EXPIRE; 770 return exp_time; 771 } 772 } 773 774 *why = AEW_MINI_EXPIRE; 775 return exp_time; 776} 777 778 779#define IMICO_PROC_FRAME_ARGS \ 780 struct ietf_mini_conn *conn, struct lsquic_packet_in *packet_in, \ 781 const unsigned char *p, size_t len 782 783 784static void 785imico_dispatch_stream_events (struct ietf_mini_conn *conn) 786{ 787 enum enc_level i; 788 789 for (i = 0; i < N_ENC_LEVS; ++i) 790 if ((conn->imc_streams[i].mcs_flags & (MCS_CREATED|MCS_WANTREAD)) 791 == (MCS_CREATED|MCS_WANTREAD)) 792 { 793 LSQ_DEBUG("dispatch read events on level #%u", i); 794 lsquic_mini_cry_sm_if.on_read((void *) &conn->imc_streams[i], 795 conn->imc_conn.cn_enc_session); 796 } 797 798 for (i = 0; i < N_ENC_LEVS; ++i) 799 if ((conn->imc_streams[i].mcs_flags & (MCS_CREATED|MCS_WANTWRITE)) 800 == (MCS_CREATED|MCS_WANTWRITE)) 801 { 802 LSQ_DEBUG("dispatch write events on level #%u", i); 803 lsquic_mini_cry_sm_if.on_write((void *) &conn->imc_streams[i], 804 conn->imc_conn.cn_enc_session); 805 } 806} 807 808 809static int 810imico_stash_stream_frame (struct ietf_mini_conn *conn, 811 enum enc_level enc_level, struct lsquic_packet_in *packet_in, 812 const struct stream_frame *frame) 813{ 814 struct stream_frame *copy; 815 816 if (conn->imc_n_crypto_frames >= IMICO_MAX_STASHED_FRAMES) 817 { 818 LSQ_INFO("cannot stash more CRYPTO frames, at %hhu already, while max " 819 "is %u", conn->imc_n_crypto_frames, IMICO_MAX_STASHED_FRAMES); 820 return -1; 821 } 822 823 if (conn->imc_crypto_frames_sz + DF_SIZE(frame) > IMICO_MAX_BUFFERED_CRYPTO) 824 { 825 LSQ_INFO("cannot stash more than %u bytes of CRYPTO frames", 826 IMICO_MAX_BUFFERED_CRYPTO); 827 return -1; 828 } 829 830 copy = lsquic_malo_get(conn->imc_enpub->enp_mm.malo.stream_frame); 831 if (!copy) 832 { 833 LSQ_INFO("could not allocate stream frame for stashing"); 834 return -1; 835 } 836 837 *copy = *frame; 838 copy->packet_in = lsquic_packet_in_get(packet_in); 839 copy->stream_id = enc_level; 840 TAILQ_INSERT_TAIL(&conn->imc_crypto_frames, copy, next_frame); 841 ++conn->imc_n_crypto_frames; 842 conn->imc_crypto_frames_sz += DF_SIZE(frame); 843 return 0; 844} 845 846 847static unsigned 848imico_process_crypto_frame (IMICO_PROC_FRAME_ARGS) 849{ 850 int parsed_len; 851 enum enc_level enc_level, i; 852 struct stream_frame stream_frame; 853 854 parsed_len = conn->imc_conn.cn_pf->pf_parse_crypto_frame(p, len, 855 &stream_frame); 856 if (parsed_len < 0) 857 { 858 conn->imc_flags |= IMC_PARSE_FAILED; 859 return 0; 860 } 861 862 enc_level = lsquic_packet_in_enc_level(packet_in); 863 EV_LOG_CRYPTO_FRAME_IN(LSQUIC_LOG_CONN_ID, &stream_frame, enc_level); 864 865 if (conn->imc_streams[enc_level].mcs_read_off >= DF_OFF(&stream_frame) 866 && conn->imc_streams[enc_level].mcs_read_off < DF_END(&stream_frame)) 867 LSQ_DEBUG("Got CRYPTO frame for enc level #%u", enc_level); 868 else if (conn->imc_streams[enc_level].mcs_read_off < DF_OFF(&stream_frame)) 869 { 870 LSQ_DEBUG("Can't read CRYPTO frame on enc level #%u at offset %"PRIu64 871 " yet -- stash", enc_level, DF_OFF(&stream_frame)); 872 if (0 == imico_stash_stream_frame(conn, enc_level, packet_in, 873 &stream_frame)) 874 return parsed_len; 875 else 876 return 0; 877 } 878 else 879 { 880 LSQ_DEBUG("Got duplicate CRYPTO frame for enc level #%u -- ignore", 881 enc_level); 882 return parsed_len; 883 } 884 885 if (!(conn->imc_flags & IMC_ENC_SESS_INITED)) 886 { 887 if (0 != conn->imc_conn.cn_esf.i->esfi_init_server( 888 conn->imc_conn.cn_enc_session)) 889 return 0; 890 conn->imc_flags |= IMC_ENC_SESS_INITED; 891 } 892 893 if (!(conn->imc_streams[enc_level].mcs_flags & MCS_CREATED)) 894 { 895 LSQ_DEBUG("creating stream on level #%u", enc_level); 896 conn->imc_streams[enc_level].mcs_flags |= MCS_CREATED; 897 lsquic_mini_cry_sm_if.on_new_stream(conn->imc_conn.cn_enc_session, 898 (void *) &conn->imc_streams[enc_level]); 899 } 900 901 /* Assume that receiving a CRYPTO frame at a higher level means that we 902 * no longer want to read from a lower level. 903 */ 904 for (i = 0; i < enc_level; ++i) 905 conn->imc_streams[i].mcs_flags &= ~MCS_WANTREAD; 906 907 conn->imc_last_in.frame = &stream_frame; 908 conn->imc_last_in.enc_level = enc_level; 909 imico_dispatch_stream_events(conn); 910 conn->imc_last_in.frame = NULL; 911 912 if (DF_ROFF(&stream_frame) < DF_END(&stream_frame)) 913 { 914 /* This is an odd condition, but let's handle it just in case */ 915 LSQ_DEBUG("New CRYPTO frame on enc level #%u not fully read -- stash", 916 enc_level); 917 if (0 != imico_stash_stream_frame(conn, enc_level, packet_in, 918 &stream_frame)) 919 return 0; 920 } 921 922 923 return parsed_len; 924} 925 926 927static ptrdiff_t 928imico_count_zero_bytes (const unsigned char *p, size_t len) 929{ 930 const unsigned char *const end = p + len; 931 while (p < end && 0 == *p) 932 ++p; 933 return len - (end - p); 934} 935 936 937static unsigned 938imico_process_padding_frame (IMICO_PROC_FRAME_ARGS) 939{ 940 len = (size_t) imico_count_zero_bytes(p, len); 941 EV_LOG_PADDING_FRAME_IN(LSQUIC_LOG_CONN_ID, len); 942 return len; 943} 944 945 946static void 947imico_take_rtt_sample (struct ietf_mini_conn *conn, 948 const struct lsquic_packet_out *packet_out, 949 lsquic_time_t now, lsquic_time_t lack_delta) 950{ 951 assert(packet_out->po_sent); 952 lsquic_time_t measured_rtt = now - packet_out->po_sent; 953 if (lack_delta < measured_rtt) 954 { 955 lsquic_rtt_stats_update(&conn->imc_rtt_stats, measured_rtt, lack_delta); 956 LSQ_DEBUG("srtt: %"PRIu64" usec, var: %"PRIu64, 957 lsquic_rtt_stats_get_srtt(&conn->imc_rtt_stats), 958 lsquic_rtt_stats_get_rttvar(&conn->imc_rtt_stats)); 959 } 960} 961 962 963static unsigned 964imico_process_ack_frame (IMICO_PROC_FRAME_ARGS) 965{ 966 int parsed_len; 967 unsigned n; 968 lsquic_packet_out_t *packet_out, *next; 969 struct ack_info *acki; 970 lsquic_packno_t packno; 971 lsquic_time_t warn_time; 972 packno_set_t acked; 973 enum packnum_space pns; 974 uint8_t ack_exp; 975 976 if (conn->imc_flags & IMC_HAVE_TP) 977 ack_exp = conn->imc_ack_exp; 978 else 979 ack_exp = TP_DEF_ACK_DELAY_EXP; /* Odd: no transport params yet? */ 980 acki = conn->imc_enpub->enp_mm.acki; 981 parsed_len = conn->imc_conn.cn_pf->pf_parse_ack_frame(p, len, acki, 982 ack_exp); 983 if (parsed_len < 0) 984 { 985 conn->imc_flags |= IMC_PARSE_FAILED; 986 return 0; 987 } 988 989 pns = lsquic_hety2pns[ packet_in->pi_header_type ]; 990 acked = 0; 991 992 for (n = 0; n < acki->n_ranges; ++n) 993 { 994 if (acki->ranges[n].high <= MAX_PACKETS) 995 { 996 acked |= (1ULL << acki->ranges[n].high) 997 | ((1ULL << acki->ranges[n].high) - 1); 998 acked &= ~((1ULL << acki->ranges[n].low) - 1); 999 } 1000 else 1001 { 1002 packno = acki->ranges[n].high; 1003 goto err_never_sent; 1004 } 1005 } 1006 if (acked & ~conn->imc_sent_packnos) 1007 { 1008 packno = highest_bit_set(acked & ~conn->imc_sent_packnos); 1009 goto err_never_sent; 1010 } 1011 1012 EV_LOG_ACK_FRAME_IN(LSQUIC_LOG_CONN_ID, acki); 1013 for (packet_out = TAILQ_FIRST(&conn->imc_packets_out); packet_out; 1014 packet_out = next) 1015 { 1016 next = TAILQ_NEXT(packet_out, po_next); 1017 if ((1ULL << packet_out->po_packno) & acked) 1018 { 1019 assert(lsquic_packet_out_pns(packet_out) == pns); 1020 LSQ_DEBUG("Got ACK for packet %"PRIu64, packet_out->po_packno); 1021 if (packet_out->po_packno == largest_acked(acki)) 1022 imico_take_rtt_sample(conn, packet_out, 1023 packet_in->pi_received, acki->lack_delta); 1024 TAILQ_REMOVE(&conn->imc_packets_out, packet_out, po_next); 1025 imico_destroy_packet(conn, packet_out); 1026 } 1027 } 1028 1029 if (conn->imc_sent_packnos & ~conn->imc_acked_packnos[pns] & acked) 1030 { 1031 LSQ_DEBUG("Newly acked packets, reset handshake count"); 1032 conn->imc_hsk_count = 0; 1033 } 1034 1035 conn->imc_acked_packnos[pns] |= acked; 1036 1037 return parsed_len; 1038 1039 err_never_sent: 1040 warn_time = lsquic_time_now(); 1041 if (0 == conn->imc_enpub->enp_last_warning[WT_ACKPARSE_MINI] 1042 || conn->imc_enpub->enp_last_warning[WT_ACKPARSE_MINI] 1043 + WARNING_INTERVAL < warn_time) 1044 { 1045 conn->imc_enpub->enp_last_warning[WT_ACKPARSE_MINI] = warn_time; 1046 LSQ_WARN("packet %"PRIu64" (pns: %u) was never sent", packno, pns); 1047 } 1048 else 1049 LSQ_DEBUG("packet %"PRIu64" (pns: %u) was never sent", packno, pns); 1050 return 0; 1051} 1052 1053 1054static unsigned 1055imico_process_ping_frame (IMICO_PROC_FRAME_ARGS) 1056{ 1057 LSQ_DEBUG("got a PING frame, do nothing"); 1058 return 1; 1059} 1060 1061 1062static unsigned 1063imico_process_connection_close_frame (IMICO_PROC_FRAME_ARGS) 1064{ 1065 struct lsquic_packet_out *packet_out; 1066 uint64_t error_code; 1067 uint16_t reason_len; 1068 uint8_t reason_off; 1069 int parsed_len, app_error; 1070 1071 while ((packet_out = TAILQ_FIRST(&conn->imc_packets_out))) 1072 { 1073 TAILQ_REMOVE(&conn->imc_packets_out, packet_out, po_next); 1074 imico_destroy_packet(conn, packet_out); 1075 } 1076 conn->imc_flags |= IMC_CLOSE_RECVD; 1077 parsed_len = conn->imc_conn.cn_pf->pf_parse_connect_close_frame(p, len, 1078 &app_error, &error_code, &reason_len, &reason_off); 1079 if (parsed_len < 0) 1080 { 1081 conn->imc_flags |= IMC_PARSE_FAILED; 1082 return 0; 1083 } 1084 EV_LOG_CONNECTION_CLOSE_FRAME_IN(LSQUIC_LOG_CONN_ID, error_code, 1085 (int) reason_len, (const char *) p + reason_off); 1086 LSQ_INFO("Received CONNECTION_CLOSE frame (%s-level code: %"PRIu64"; " 1087 "reason: %.*s)", app_error ? "application" : "transport", 1088 error_code, (int) reason_len, (const char *) p + reason_off); 1089 return 0; /* This shuts down the connection */ 1090} 1091 1092 1093static unsigned 1094imico_process_invalid_frame (IMICO_PROC_FRAME_ARGS) 1095{ 1096 LSQ_DEBUG("invalid frame %u (%s)", p[0], 1097 frame_type_2_str[ conn->imc_conn.cn_pf->pf_parse_frame_type(p, len) ]); 1098 return 0; 1099} 1100 1101 1102static unsigned (*const imico_process_frames[N_QUIC_FRAMES]) 1103 (IMICO_PROC_FRAME_ARGS) = 1104{ 1105 [QUIC_FRAME_PADDING] = imico_process_padding_frame, 1106 [QUIC_FRAME_CRYPTO] = imico_process_crypto_frame, 1107 [QUIC_FRAME_ACK] = imico_process_ack_frame, 1108 [QUIC_FRAME_PING] = imico_process_ping_frame, 1109 [QUIC_FRAME_CONNECTION_CLOSE] = imico_process_connection_close_frame, 1110 /* Some of them are invalid, while others are unexpected. We treat 1111 * them the same: handshake cannot proceed. 1112 */ 1113 [QUIC_FRAME_RST_STREAM] = imico_process_invalid_frame, 1114 [QUIC_FRAME_MAX_DATA] = imico_process_invalid_frame, 1115 [QUIC_FRAME_MAX_STREAM_DATA] = imico_process_invalid_frame, 1116 [QUIC_FRAME_MAX_STREAMS] = imico_process_invalid_frame, 1117 [QUIC_FRAME_BLOCKED] = imico_process_invalid_frame, 1118 [QUIC_FRAME_STREAM_BLOCKED] = imico_process_invalid_frame, 1119 [QUIC_FRAME_STREAMS_BLOCKED] = imico_process_invalid_frame, 1120 [QUIC_FRAME_NEW_CONNECTION_ID] = imico_process_invalid_frame, 1121 [QUIC_FRAME_STOP_SENDING] = imico_process_invalid_frame, 1122 [QUIC_FRAME_PATH_CHALLENGE] = imico_process_invalid_frame, 1123 [QUIC_FRAME_PATH_RESPONSE] = imico_process_invalid_frame, 1124 /* STREAM frame can only come in the App PNS and we delay those packets: */ 1125 [QUIC_FRAME_STREAM] = imico_process_invalid_frame, 1126 [QUIC_FRAME_HANDSHAKE_DONE] = imico_process_invalid_frame, 1127 [QUIC_FRAME_ACK_FREQUENCY] = imico_process_invalid_frame, 1128 [QUIC_FRAME_TIMESTAMP] = imico_process_invalid_frame, 1129}; 1130 1131 1132static unsigned 1133imico_process_packet_frame (struct ietf_mini_conn *conn, 1134 struct lsquic_packet_in *packet_in, const unsigned char *p, size_t len) 1135{ 1136 enum enc_level enc_level; 1137 enum quic_frame_type type; 1138 1139 enc_level = lsquic_packet_in_enc_level(packet_in); 1140 type = conn->imc_conn.cn_pf->pf_parse_frame_type(p, len); 1141 if (lsquic_legal_frames_by_level[conn->imc_conn.cn_version][enc_level] 1142 & (1 << type)) 1143 { 1144 packet_in->pi_frame_types |= 1 << type; 1145 return imico_process_frames[type](conn, packet_in, p, len); 1146 } 1147 else 1148 { 1149 LSQ_DEBUG("invalid frame %u at encryption level %s", type, 1150 lsquic_enclev2str[enc_level]); 1151 return 0; 1152 } 1153} 1154 1155 1156static int 1157imico_parse_regular_packet (struct ietf_mini_conn *conn, 1158 struct lsquic_packet_in *packet_in) 1159{ 1160 const unsigned char *p, *pend; 1161 unsigned len; 1162 1163 p = packet_in->pi_data + packet_in->pi_header_sz; 1164 pend = packet_in->pi_data + packet_in->pi_data_sz; 1165 1166 while (p < pend) 1167 { 1168 len = imico_process_packet_frame(conn, packet_in, p, pend - p); 1169 if (len > 0) 1170 p += len; 1171 else 1172 return -1; 1173 } 1174 1175 return 0; 1176} 1177 1178 1179static unsigned 1180highest_bit_set (unsigned long long sz) 1181{ 1182#if __GNUC__ 1183 unsigned clz = __builtin_clzll(sz); 1184 return 63 - clz; 1185#else 1186 unsigned long y; 1187 unsigned n; 1188 n = 64; 1189 y = sz >> 32; if (y) { n -= 32; sz = y; } 1190 y = sz >> 16; if (y) { n -= 16; sz = y; } 1191 y = sz >> 8; if (y) { n -= 8; sz = y; } 1192 y = sz >> 4; if (y) { n -= 4; sz = y; } 1193 y = sz >> 2; if (y) { n -= 2; sz = y; } 1194 y = sz >> 1; if (y) return 63 - n + 2; 1195 return 63 - n + sz; 1196#endif 1197} 1198 1199 1200static void 1201ignore_init (struct ietf_mini_conn *conn) 1202{ 1203 struct lsquic_packet_out *packet_out, *next; 1204 unsigned count; 1205 1206 conn->imc_flags |= IMC_IGNORE_INIT; 1207 conn->imc_flags &= ~(IMC_QUEUED_ACK_INIT << PNS_INIT); 1208 1209 count = 0; 1210 for (packet_out = TAILQ_FIRST(&conn->imc_packets_out); packet_out; 1211 packet_out = next) 1212 { 1213 next = TAILQ_NEXT(packet_out, po_next); 1214 if (PNS_INIT == lsquic_packet_out_pns(packet_out)) 1215 { 1216 TAILQ_REMOVE(&conn->imc_packets_out, packet_out, po_next); 1217 imico_destroy_packet(conn, packet_out); 1218 ++count; 1219 } 1220 } 1221 1222 LSQ_DEBUG("henceforth, no Initial packets shall be sent or received; " 1223 "destroyed %u packet%.*s", count, count != 1, "s"); 1224} 1225 1226 1227static void 1228imico_maybe_delay_processing (struct ietf_mini_conn *conn, 1229 struct lsquic_packet_in *packet_in) 1230{ 1231 unsigned max_delayed; 1232 1233 if (conn->imc_flags & IMC_ADDR_VALIDATED) 1234 max_delayed = IMICO_MAX_DELAYED_PACKETS_VALIDATED; 1235 else 1236 max_delayed = IMICO_MAX_DELAYED_PACKETS_UNVALIDATED; 1237 1238 if (conn->imc_delayed_packets_count < max_delayed) 1239 { 1240 ++conn->imc_delayed_packets_count; 1241 lsquic_packet_in_upref(packet_in); 1242 TAILQ_INSERT_TAIL(&conn->imc_app_packets, packet_in, pi_next); 1243 LSQ_DEBUG("delay processing of packet (now delayed %hhu)", 1244 conn->imc_delayed_packets_count); 1245 } 1246 else 1247 LSQ_DEBUG("drop packet, already delayed %hhu packets", 1248 conn->imc_delayed_packets_count); 1249} 1250 1251 1252/* [draft-ietf-quic-transport-30] Section 8.1: 1253 " Additionally, a server MAY consider the client address validated if 1254 " the client uses a connection ID chosen by the server and the 1255 " connection ID contains at least 64 bits of entropy. 1256 * 1257 * We use RAND_bytes() to generate SCIDs, so it's all entropy. 1258 */ 1259static void 1260imico_maybe_validate_by_dcid (struct ietf_mini_conn *conn, 1261 const lsquic_cid_t *dcid) 1262{ 1263 unsigned i; 1264 1265 if (dcid->len >= 8) 1266 /* Generic code with unnecessary loop as future-proofing */ 1267 for (i = 0; i < conn->imc_conn.cn_n_cces; ++i) 1268 if ((conn->imc_conn.cn_cces_mask & (1 << i)) 1269 && (conn->imc_conn.cn_cces[i].cce_flags & CCE_SEQNO) 1270 && LSQUIC_CIDS_EQ(&conn->imc_conn.cn_cces[i].cce_cid, dcid)) 1271 { 1272 imico_peer_addr_validated(conn, "dcid/scid + entropy"); 1273 return; 1274 } 1275} 1276 1277 1278static int 1279imico_received_packet_is_dup (struct ietf_mini_conn *conn, 1280 enum packnum_space pns, lsquic_packno_t packno) 1281{ 1282 if (conn->imc_flags & IMC_TRECHIST) 1283 return lsquic_trechist_contains( 1284 conn->imc_recvd_packnos.trechist.hist_masks[pns], 1285 conn->imc_recvd_packnos.trechist.hist_elems 1286 + TRECHIST_MAX_RANGES * pns, packno); 1287 else 1288 return !!(conn->imc_recvd_packnos.bitmasks[pns] & (1ULL << packno)); 1289} 1290 1291 1292static int 1293imico_packno_is_largest (struct ietf_mini_conn *conn, 1294 enum packnum_space pns, lsquic_packno_t packno) 1295{ 1296 if (conn->imc_flags & IMC_TRECHIST) 1297 return 0 == conn->imc_recvd_packnos.trechist.hist_masks[pns] 1298 || packno > lsquic_trechist_max( 1299 conn->imc_recvd_packnos.trechist.hist_masks[pns], 1300 conn->imc_recvd_packnos.trechist.hist_elems 1301 + TRECHIST_MAX_RANGES * pns); 1302 else 1303 return 0 == conn->imc_recvd_packnos.bitmasks[pns] 1304 || packno > highest_bit_set(conn->imc_recvd_packnos.bitmasks[pns]); 1305} 1306 1307 1308static void 1309imico_record_recvd_packno (struct ietf_mini_conn *conn, 1310 enum packnum_space pns, lsquic_packno_t packno) 1311{ 1312 if (conn->imc_flags & IMC_TRECHIST) 1313 { 1314 if (0 != lsquic_trechist_insert( 1315 &conn->imc_recvd_packnos.trechist.hist_masks[pns], 1316 conn->imc_recvd_packnos.trechist.hist_elems 1317 + TRECHIST_MAX_RANGES * pns, packno)) 1318 { 1319 LSQ_INFO("too many ranges for trechist to hold or range too wide"); 1320 conn->imc_flags |= IMC_ERROR; 1321 } 1322 } 1323 else 1324 conn->imc_recvd_packnos.bitmasks[pns] |= 1ULL << packno; 1325} 1326 1327 1328static int 1329imico_switch_to_trechist (struct ietf_mini_conn *conn) 1330{ 1331 uint32_t masks[IMICO_N_PNS]; 1332 enum packnum_space pns; 1333 struct trechist_elem *elems; 1334 struct ietf_mini_rechist iter; 1335 1336 elems = malloc(TRECHIST_SIZE * N_PNS); 1337 if (!elems) 1338 { 1339 LSQ_WARN("cannot allocate trechist elems"); 1340 return -1; 1341 } 1342 1343 for (pns = 0; pns < IMICO_N_PNS; ++pns) 1344 if (conn->imc_recvd_packnos.bitmasks[pns]) 1345 { 1346 lsquic_imico_rechist_init(&iter, conn, pns); 1347 if (0 != lsquic_trechist_copy_ranges(&masks[pns], 1348 elems + TRECHIST_MAX_RANGES * pns, &iter, 1349 lsquic_imico_rechist_first, 1350 lsquic_imico_rechist_next)) 1351 { 1352 LSQ_WARN("cannot copy ranges from bitmask to trechist"); 1353 free(elems); 1354 return -1; 1355 } 1356 } 1357 else 1358 masks[pns] = 0; 1359 1360 memcpy(conn->imc_recvd_packnos.trechist.hist_masks, masks, sizeof(masks)); 1361 conn->imc_recvd_packnos.trechist.hist_elems = elems; 1362 conn->imc_flags |= IMC_TRECHIST; 1363 LSQ_DEBUG("switched to trechist"); 1364 return 0; 1365} 1366 1367 1368/* Only a single packet is supported */ 1369static void 1370ietf_mini_conn_ci_packet_in (struct lsquic_conn *lconn, 1371 struct lsquic_packet_in *packet_in) 1372{ 1373 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 1374 enum dec_packin dec_packin; 1375 enum packnum_space pns; 1376 1377 /* Update "bytes in" count as early as possible. From 1378 * [draft-ietf-quic-transport-28] Section 8.1: 1379 " For the purposes of 1380 " avoiding amplification prior to address validation, servers MUST 1381 " count all of the payload bytes received in datagrams that are 1382 " uniquely attributed to a single connection. This includes datagrams 1383 " that contain packets that are successfully processed and datagrams 1384 " that contain packets that are all discarded. 1385 */ 1386 conn->imc_bytes_in += packet_in->pi_data_sz; 1387 1388 if (conn->imc_flags & IMC_ERROR) 1389 { 1390 LSQ_DEBUG("ignore incoming packet: connection is in error state"); 1391 return; 1392 } 1393 1394 if (!(conn->imc_flags & IMC_ADDR_VALIDATED)) 1395 imico_maybe_validate_by_dcid(conn, &packet_in->pi_dcid); 1396 1397 pns = lsquic_hety2pns[ packet_in->pi_header_type ]; 1398 if (pns == PNS_INIT && (conn->imc_flags & IMC_IGNORE_INIT)) 1399 { 1400 LSQ_DEBUG("ignore init packet"); /* Don't bother decrypting */ 1401 return; 1402 } 1403 1404 dec_packin = lconn->cn_esf_c->esf_decrypt_packet(lconn->cn_enc_session, 1405 conn->imc_enpub, &conn->imc_conn, packet_in); 1406 if (dec_packin != DECPI_OK) 1407 { 1408 LSQ_DEBUG("could not decrypt packet"); 1409 if (DECPI_NOT_YET == dec_packin) 1410 imico_maybe_delay_processing(conn, packet_in); 1411 return; 1412 } 1413 1414 EV_LOG_PACKET_IN(LSQUIC_LOG_CONN_ID, packet_in); 1415 1416 if (pns == PNS_APP) 1417 { 1418 imico_maybe_delay_processing(conn, packet_in); 1419 return; 1420 } 1421 else if (pns == PNS_HSK) 1422 imico_peer_addr_validated(conn, "handshake PNS"); 1423 1424 if (((conn->imc_flags >> IMCBIT_PNS_BIT_SHIFT) & 3) < pns) 1425 { 1426 conn->imc_flags &= ~(3 << IMCBIT_PNS_BIT_SHIFT); 1427 conn->imc_flags |= pns << IMCBIT_PNS_BIT_SHIFT; 1428 } 1429 1430 if (pns == PNS_HSK && !(conn->imc_flags & IMC_IGNORE_INIT)) 1431 ignore_init(conn); 1432 1433 if (packet_in->pi_packno > MAX_PACKETS 1434 && !(conn->imc_flags & IMC_TRECHIST)) 1435 { 1436 if (0 != imico_switch_to_trechist(conn)) 1437 return; 1438 } 1439 1440 if (imico_received_packet_is_dup(conn, pns, packet_in->pi_packno)) 1441 { 1442 LSQ_DEBUG("duplicate packet %"PRIu64, packet_in->pi_packno); 1443 return; 1444 } 1445 1446 /* Update receive history before processing the packet: if there is an 1447 * error, the connection is terminated and recording this packet number 1448 * is helpful when it is printed along with other diagnostics in dtor. 1449 */ 1450 if (imico_packno_is_largest(conn, pns, packet_in->pi_packno)) 1451 conn->imc_largest_recvd[pns] = packet_in->pi_received; 1452 imico_record_recvd_packno(conn, pns, packet_in->pi_packno); 1453 1454 if (0 != imico_parse_regular_packet(conn, packet_in)) 1455 { 1456 LSQ_DEBUG("connection is now in error state"); 1457 conn->imc_flags |= IMC_ERROR; 1458 return; 1459 } 1460 1461 if (!(conn->imc_flags & (IMC_QUEUED_ACK_INIT << pns))) 1462 LSQ_DEBUG("queued ACK in %s", lsquic_pns2str[pns]); 1463 conn->imc_flags |= IMC_QUEUED_ACK_INIT << pns; 1464 ++conn->imc_ecn_counts_in[pns][ lsquic_packet_in_ecn(packet_in) ]; 1465 conn->imc_incoming_ecn <<= 1; 1466 conn->imc_incoming_ecn |= lsquic_packet_in_ecn(packet_in) != ECN_NOT_ECT; 1467} 1468 1469 1470static void 1471ietf_mini_conn_ci_packet_sent (struct lsquic_conn *lconn, 1472 struct lsquic_packet_out *packet_out) 1473{ 1474 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 1475 conn->imc_sent_packnos |= 1ULL << packet_out->po_packno; 1476 conn->imc_ecn_packnos |= !!lsquic_packet_out_ecn(packet_out) 1477 << packet_out->po_packno; 1478#if 0 1479 if (packet_out->po_frame_types & (1 << QUIC_FRAME_ACK)) 1480 { 1481 assert(mc->mc_flags & MC_UNSENT_ACK); 1482 mc->mc_flags &= ~MC_UNSENT_ACK; 1483 } 1484#endif 1485 ++conn->imc_ecn_counts_out[ lsquic_packet_out_pns(packet_out) ] 1486 [ lsquic_packet_out_ecn(packet_out) ]; 1487 if (packet_out->po_header_type == HETY_HANDSHAKE) 1488 conn->imc_flags |= IMC_HSK_PACKET_SENT; 1489 LSQ_DEBUG("%s: packet %"PRIu64" sent", __func__, packet_out->po_packno); 1490} 1491 1492 1493static void 1494ietf_mini_conn_ci_packet_not_sent (struct lsquic_conn *lconn, 1495 struct lsquic_packet_out *packet_out) 1496{ 1497 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 1498 size_t packet_size; 1499 1500 packet_out->po_flags &= ~PO_SENT; 1501 packet_size = lsquic_packet_out_total_sz(lconn, packet_out); 1502 conn->imc_bytes_out -= packet_size; 1503 LSQ_DEBUG("%s: packet %"PRIu64" not sent", __func__, packet_out->po_packno); 1504} 1505 1506 1507static void 1508imico_return_enc_data (struct ietf_mini_conn *conn, 1509 struct lsquic_packet_out *packet_out) 1510{ 1511 conn->imc_enpub->enp_pmi->pmi_return(conn->imc_enpub->enp_pmi_ctx, 1512 conn->imc_path.np_peer_ctx, packet_out->po_enc_data, 1513 lsquic_packet_out_ipv6(packet_out)); 1514 packet_out->po_flags &= ~PO_ENCRYPTED; 1515 packet_out->po_enc_data = NULL; 1516} 1517 1518 1519static int 1520imico_repackage_packet (struct ietf_mini_conn *conn, 1521 struct lsquic_packet_out *packet_out) 1522{ 1523 const lsquic_packno_t oldno = packet_out->po_packno; 1524 const lsquic_packno_t packno = conn->imc_next_packno++; 1525 if (packno > MAX_PACKETS) 1526 return -1; 1527 1528 LSQ_DEBUG("Packet %"PRIu64" repackaged for resending as packet %"PRIu64, 1529 oldno, packno); 1530 EV_LOG_CONN_EVENT(LSQUIC_LOG_CONN_ID, "packet %"PRIu64" repackaged for " 1531 "resending as packet %"PRIu64, oldno, packno); 1532 packet_out->po_packno = packno; 1533 packet_out->po_flags &= ~PO_SENT; 1534 lsquic_packet_out_set_ecn(packet_out, imico_get_ecn(conn)); 1535 if (packet_out->po_flags & PO_ENCRYPTED) 1536 imico_return_enc_data(conn, packet_out); 1537 TAILQ_INSERT_TAIL(&conn->imc_packets_out, packet_out, po_next); 1538 return 0; 1539} 1540 1541 1542static int 1543imico_handle_losses_and_have_unsent (struct ietf_mini_conn *conn, 1544 lsquic_time_t now) 1545{ 1546 TAILQ_HEAD(, lsquic_packet_out) lost_packets = 1547 TAILQ_HEAD_INITIALIZER(lost_packets); 1548 const struct lsquic_conn *const lconn = &conn->imc_conn; 1549 lsquic_packet_out_t *packet_out, *next; 1550 lsquic_time_t retx_to = 0; 1551 unsigned n_to_send = 0; 1552 size_t packet_size; 1553 1554 for (packet_out = TAILQ_FIRST(&conn->imc_packets_out); packet_out; 1555 packet_out = next) 1556 { 1557 next = TAILQ_NEXT(packet_out, po_next); 1558 if (packet_out->po_flags & PO_SENT) 1559 { 1560 if (0 == retx_to) 1561 retx_to = imico_calc_retx_timeout(conn); 1562 if (packet_out->po_sent + retx_to < now) 1563 { 1564 LSQ_DEBUG("packet %"PRIu64" has been lost (rto: %"PRIu64")", 1565 packet_out->po_packno, retx_to); 1566 TAILQ_REMOVE(&conn->imc_packets_out, packet_out, po_next); 1567 TAILQ_INSERT_TAIL(&lost_packets, packet_out, po_next); 1568 } 1569 } 1570 else if (packet_size = lsquic_packet_out_total_sz(lconn, packet_out), 1571 imico_can_send(conn, packet_size)) 1572 ++n_to_send; 1573 else 1574 break; 1575 } 1576 1577 conn->imc_hsk_count += !TAILQ_EMPTY(&lost_packets); 1578 1579 while ((packet_out = TAILQ_FIRST(&lost_packets))) 1580 { 1581 TAILQ_REMOVE(&lost_packets, packet_out, po_next); 1582 if ((packet_out->po_frame_types & IQUIC_FRAME_RETX_MASK) 1583 && 0 == imico_repackage_packet(conn, packet_out)) 1584 { 1585 packet_size = lsquic_packet_out_total_sz(lconn, packet_out); 1586 if (imico_can_send(conn, packet_size)) 1587 ++n_to_send; 1588 } 1589 else 1590 imico_destroy_packet(conn, packet_out); 1591 } 1592 1593 return n_to_send > 0; 1594} 1595 1596 1597static int 1598imico_have_packets_to_send (struct ietf_mini_conn *conn, lsquic_time_t now) 1599{ 1600 return imico_handle_losses_and_have_unsent(conn, now); 1601} 1602 1603 1604void 1605lsquic_imico_rechist_init (struct ietf_mini_rechist *rechist, 1606 const struct ietf_mini_conn *conn, enum packnum_space pns) 1607{ 1608 rechist->conn = conn; 1609 rechist->pns = pns; 1610 if (conn->imc_flags & IMC_TRECHIST) 1611 lsquic_trechist_iter(&rechist->u.trechist_iter, 1612 conn->imc_recvd_packnos.trechist.hist_masks[pns], 1613 conn->imc_recvd_packnos.trechist.hist_elems + TRECHIST_MAX_RANGES * pns); 1614 else 1615 { 1616 rechist->u.bitmask.cur_set = 0; 1617 rechist->u.bitmask.cur_idx = 0; 1618 } 1619} 1620 1621 1622static lsquic_time_t 1623imico_rechist_largest_recv (void *rechist_ctx) 1624{ 1625 struct ietf_mini_rechist *rechist = rechist_ctx; 1626 return rechist->conn->imc_largest_recvd[ rechist->pns ]; 1627} 1628 1629 1630static const struct lsquic_packno_range * 1631imico_bitmask_rechist_next (struct ietf_mini_rechist *rechist) 1632{ 1633 const struct ietf_mini_conn *conn = rechist->conn; 1634 packno_set_t packnos; 1635 int i; 1636 1637 packnos = rechist->u.bitmask.cur_set; 1638 if (0 == packnos) 1639 return NULL; 1640 1641 /* There may be a faster way to do this, but for now, we just want 1642 * correctness. 1643 */ 1644 for (i = rechist->u.bitmask.cur_idx; i >= 0; --i) 1645 if (packnos & (1ULL << i)) 1646 { 1647 rechist->u.bitmask.range.low = i; 1648 rechist->u.bitmask.range.high = i; 1649 break; 1650 } 1651 assert(i >= 0); /* We must have hit at least one bit */ 1652 --i; 1653 for ( ; i >= 0 && (packnos & (1ULL << i)); --i) 1654 rechist->u.bitmask.range.low = i; 1655 if (i >= 0) 1656 { 1657 rechist->u.bitmask.cur_set = packnos & ((1ULL << i) - 1); 1658 rechist->u.bitmask.cur_idx = i; 1659 } 1660 else 1661 rechist->u.bitmask.cur_set = 0; 1662 LSQ_DEBUG("%s: return [%"PRIu64", %"PRIu64"]", __func__, 1663 rechist->u.bitmask.range.low, rechist->u.bitmask.range.high); 1664 return &rechist->u.bitmask.range; 1665} 1666 1667 1668const struct lsquic_packno_range * 1669lsquic_imico_rechist_next (void *rechist_ctx) 1670{ 1671 struct ietf_mini_rechist *rechist = rechist_ctx; 1672 1673 if (rechist->conn->imc_flags & IMC_TRECHIST) 1674 return lsquic_trechist_next(&rechist->u.trechist_iter); 1675 else 1676 return imico_bitmask_rechist_next(rechist); 1677} 1678 1679 1680const struct lsquic_packno_range * 1681lsquic_imico_rechist_first (void *rechist_ctx) 1682{ 1683 struct ietf_mini_rechist *rechist = rechist_ctx; 1684 1685 if (rechist->conn->imc_flags & IMC_TRECHIST) 1686 return lsquic_trechist_first(&rechist->u.trechist_iter); 1687 else 1688 { 1689 rechist->u.bitmask.cur_set 1690 = rechist->conn->imc_recvd_packnos.bitmasks[ rechist->pns ]; 1691 rechist->u.bitmask.cur_idx 1692 = highest_bit_set(rechist->u.bitmask.cur_set); 1693 return lsquic_imico_rechist_next(rechist_ctx); 1694 } 1695} 1696 1697 1698static const enum header_type pns2hety[] = 1699{ 1700 [PNS_INIT] = HETY_INITIAL, 1701 [PNS_HSK] = HETY_HANDSHAKE, 1702 [PNS_APP] = HETY_NOT_SET, 1703}; 1704 1705 1706static int 1707imico_generate_ack (struct ietf_mini_conn *conn, enum packnum_space pns, 1708 lsquic_time_t now) 1709{ 1710 struct lsquic_packet_out *packet_out; 1711 enum header_type header_type; 1712 struct ietf_mini_rechist rechist; 1713 int not_used_has_missing, len; 1714 uint64_t ecn_counts_buf[4]; 1715 const uint64_t *ecn_counts; 1716 1717 header_type = pns2hety[pns]; 1718 1719 if (conn->imc_incoming_ecn) 1720 { 1721 ecn_counts_buf[0] = conn->imc_ecn_counts_in[pns][0]; 1722 ecn_counts_buf[1] = conn->imc_ecn_counts_in[pns][1]; 1723 ecn_counts_buf[2] = conn->imc_ecn_counts_in[pns][2]; 1724 ecn_counts_buf[3] = conn->imc_ecn_counts_in[pns][3]; 1725 ecn_counts = ecn_counts_buf; 1726 } 1727 else 1728 ecn_counts = NULL; 1729 1730 packet_out = imico_get_packet_out(conn, header_type, 0); 1731 if (!packet_out) 1732 return -1; 1733 1734 /* Generate ACK frame */ 1735 lsquic_imico_rechist_init(&rechist, conn, pns); 1736 len = conn->imc_conn.cn_pf->pf_gen_ack_frame( 1737 packet_out->po_data + packet_out->po_data_sz, 1738 lsquic_packet_out_avail(packet_out), lsquic_imico_rechist_first, 1739 lsquic_imico_rechist_next, imico_rechist_largest_recv, &rechist, 1740 now, ¬_used_has_missing, &packet_out->po_ack2ed, ecn_counts); 1741 if (len < 0) 1742 { 1743 LSQ_WARN("could not generate ACK frame"); 1744 return -1; 1745 } 1746 EV_LOG_GENERATED_ACK_FRAME(LSQUIC_LOG_CONN_ID, conn->imc_conn.cn_pf, 1747 packet_out->po_data + packet_out->po_data_sz, len); 1748 packet_out->po_frame_types |= 1 << QUIC_FRAME_ACK; 1749 packet_out->po_data_sz += len; 1750 packet_out->po_regen_sz += len; 1751 conn->imc_flags &= ~(IMC_QUEUED_ACK_INIT << pns); 1752 LSQ_DEBUG("wrote ACK frame of size %d in %s", len, lsquic_pns2str[pns]); 1753 return 0; 1754} 1755 1756 1757static int 1758imico_generate_acks (struct ietf_mini_conn *conn, lsquic_time_t now) 1759{ 1760 enum packnum_space pns; 1761 1762 for (pns = PNS_INIT; pns < IMICO_N_PNS; ++pns) 1763 if (conn->imc_flags & (IMC_QUEUED_ACK_INIT << pns) 1764 && !(pns == PNS_INIT && (conn->imc_flags & IMC_IGNORE_INIT))) 1765 if (0 != imico_generate_ack(conn, pns, now)) 1766 return -1; 1767 1768 return 0; 1769} 1770 1771 1772static void 1773imico_generate_conn_close (struct ietf_mini_conn *conn) 1774{ 1775 struct lsquic_packet_out *packet_out; 1776 enum header_type header_type; 1777 enum packnum_space pns, pns_max; 1778 unsigned error_code; 1779 const char *reason; 1780 size_t need; 1781 int sz, rlen, is_app; 1782 char reason_buf[0x20]; 1783 1784 if (conn->imc_flags & IMC_ABORT_ERROR) 1785 { 1786 is_app = !!(conn->imc_flags & IMC_ABORT_ISAPP); 1787 error_code = conn->imc_error_code; 1788 reason = NULL; 1789 rlen = 0; 1790 } 1791 else if (conn->imc_flags & IMC_TLS_ALERT) 1792 { 1793 is_app = 0; 1794 error_code = 0x100 + conn->imc_tls_alert; 1795 if (ALERT_NO_APPLICATION_PROTOCOL == conn->imc_tls_alert) 1796 reason = "no suitable application protocol"; 1797 else 1798 { 1799 snprintf(reason_buf, sizeof(reason_buf), "TLS alert %"PRIu8, 1800 conn->imc_tls_alert); 1801 reason = reason_buf; 1802 } 1803 rlen = strlen(reason); 1804 } 1805 else if (conn->imc_flags & IMC_BAD_TRANS_PARAMS) 1806 { 1807 is_app = 0; 1808 error_code = TEC_TRANSPORT_PARAMETER_ERROR; 1809 reason = "bad transport parameters"; 1810 rlen = 24; 1811 } 1812 else if (conn->imc_flags & IMC_HSK_FAILED) 1813 { 1814 is_app = 0; 1815 error_code = TEC_NO_ERROR; 1816 reason = "handshake failed"; 1817 rlen = 16; 1818 } 1819 else if (conn->imc_flags & IMC_PARSE_FAILED) 1820 { 1821 is_app = 0; 1822 error_code = TEC_FRAME_ENCODING_ERROR; 1823 reason = "cannot decode frame"; 1824 rlen = 19; 1825 } 1826 else 1827 { 1828 is_app = 0; 1829 error_code = TEC_INTERNAL_ERROR; 1830 reason = NULL; 1831 rlen = 0; 1832 } 1833 1834 1835/* [draft-ietf-quic-transport-28] Section 10.3.1: 1836 * 1837 " A client will always know whether the server has Handshake keys (see 1838 " Section 17.2.2.1), but it is possible that a server does not know 1839 " whether the client has Handshake keys. Under these circumstances, a 1840 " server SHOULD send a CONNECTION_CLOSE frame in both Handshake and 1841 " Initial packets to ensure that at least one of them is processable by 1842 " the client. 1843--- 8< --- 1844 " Sending a CONNECTION_CLOSE of type 0x1d in an Initial or Handshake 1845 " packet could expose application state or be used to alter application 1846 " state. A CONNECTION_CLOSE of type 0x1d MUST be replaced by a 1847 " CONNECTION_CLOSE of type 0x1c when sending the frame in Initial or 1848 " Handshake packets. Otherwise, information about the application 1849 " state might be revealed. Endpoints MUST clear the value of the 1850 " Reason Phrase field and SHOULD use the APPLICATION_ERROR code when 1851 " converting to a CONNECTION_CLOSE of type 0x1c. 1852 */ 1853 LSQ_DEBUG("sending CONNECTION_CLOSE, is_app: %d, error code: %u, " 1854 "reason: %.*s", is_app, error_code, rlen, reason); 1855 if (is_app && conn->imc_conn.cn_version > LSQVER_ID27) 1856 { 1857 LSQ_DEBUG("convert to 0x1C, replace code and reason"); 1858 is_app = 0; 1859 error_code = TEC_APPLICATION_ERROR; 1860 rlen = 0; 1861 } 1862 1863 pns = (conn->imc_flags >> IMCBIT_PNS_BIT_SHIFT) & 3; 1864 switch ((!!(conn->imc_flags & IMC_HSK_PACKET_SENT) << 1) 1865 | (pns == PNS_HSK) /* Handshake packet received */) 1866 { 1867 case (0 << 1) | 0: 1868 pns = PNS_INIT; 1869 pns_max = PNS_INIT; 1870 break; 1871 case (1 << 1) | 0: 1872 pns = PNS_INIT; 1873 pns_max = PNS_HSK; 1874 break; 1875 default: 1876 pns = PNS_HSK; 1877 pns_max = PNS_HSK; 1878 break; 1879 } 1880 1881 need = conn->imc_conn.cn_pf->pf_connect_close_frame_size(is_app, 1882 error_code, 0, rlen); 1883 LSQ_DEBUG("will generate %u CONNECTION_CLOSE frame%.*s", 1884 pns_max - pns + 1, pns_max > pns, "s"); 1885 do 1886 { 1887 header_type = pns2hety[pns]; 1888 packet_out = imico_get_packet_out(conn, header_type, need); 1889 if (!packet_out) 1890 return; 1891 sz = conn->imc_conn.cn_pf->pf_gen_connect_close_frame( 1892 packet_out->po_data + packet_out->po_data_sz, 1893 lsquic_packet_out_avail(packet_out), is_app, error_code, reason, 1894 rlen); 1895 if (sz >= 0) 1896 { 1897 packet_out->po_frame_types |= 1 << QUIC_FRAME_CONNECTION_CLOSE; 1898 packet_out->po_data_sz += sz; 1899 LSQ_DEBUG("generated CONNECTION_CLOSE frame"); 1900 } 1901 else 1902 LSQ_WARN("could not generate CONNECTION_CLOSE frame"); 1903 ++pns; 1904 } 1905 while (pns <= pns_max); 1906} 1907 1908 1909static int 1910imico_generate_handshake_done (struct ietf_mini_conn *conn) 1911{ 1912 struct lsquic_packet_out *packet_out; 1913 unsigned need; 1914 int sz; 1915 1916 need = conn->imc_conn.cn_pf->pf_handshake_done_frame_size(); 1917 packet_out = imico_get_packet_out(conn, HETY_NOT_SET, need); 1918 if (!packet_out) 1919 return -1; 1920 sz = conn->imc_conn.cn_pf->pf_gen_handshake_done_frame( 1921 packet_out->po_data + packet_out->po_data_sz, 1922 lsquic_packet_out_avail(packet_out)); 1923 if (sz < 0) 1924 { 1925 LSQ_WARN("could not generate HANDSHAKE_DONE frame"); 1926 return -1; 1927 } 1928 1929 packet_out->po_frame_types |= 1 << QUIC_FRAME_HANDSHAKE_DONE; 1930 packet_out->po_data_sz += sz; 1931 LSQ_DEBUG("generated HANDSHAKE_DONE frame"); 1932 conn->imc_flags |= IMC_HSK_DONE_SENT; 1933 1934 return 0; 1935} 1936 1937 1938static enum tick_st 1939ietf_mini_conn_ci_tick (struct lsquic_conn *lconn, lsquic_time_t now) 1940{ 1941 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 1942 enum tick_st tick; 1943 1944 if (conn->imc_created + conn->imc_enpub->enp_settings.es_handshake_to < now) 1945 { 1946 LSQ_DEBUG("connection expired: closing"); 1947 return TICK_CLOSE; 1948 } 1949 1950 1951 if (conn->imc_flags & (IMC_QUEUED_ACK_INIT|IMC_QUEUED_ACK_HSK)) 1952 { 1953 if (0 != imico_generate_acks(conn, now)) 1954 { 1955 conn->imc_flags |= IMC_ERROR; 1956 return TICK_CLOSE; 1957 } 1958 } 1959 1960 1961 tick = 0; 1962 1963 if (conn->imc_flags & IMC_ERROR) 1964 { 1965 close_on_error: 1966 if (!(conn->imc_flags & IMC_CLOSE_RECVD)) 1967 imico_generate_conn_close(conn); 1968 tick |= TICK_CLOSE; 1969 } 1970 else if (conn->imc_flags & IMC_HSK_OK) 1971 { 1972 if (lconn->cn_esf.i->esfi_in_init(lconn->cn_enc_session)) 1973 LSQ_DEBUG("still in init, defer HANDSHAKE_DONE"); 1974 else if (0 != imico_generate_handshake_done(conn)) 1975 goto close_on_error; 1976 tick |= TICK_PROMOTE; 1977 } 1978 1979 if (imico_have_packets_to_send(conn, now)) 1980 tick |= TICK_SEND; 1981 else 1982 tick |= TICK_QUIET; 1983 1984 LSQ_DEBUG("Return TICK %d", tick); 1985 return tick; 1986} 1987 1988 1989static void 1990ietf_mini_conn_ci_internal_error (struct lsquic_conn *lconn, 1991 const char *format, ...) 1992{ 1993 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 1994 LSQ_INFO("internal error reported"); 1995 conn->imc_flags |= IMC_ERROR; 1996} 1997 1998 1999static void 2000ietf_mini_conn_ci_abort_error (struct lsquic_conn *lconn, int is_app, 2001 unsigned error_code, const char *fmt, ...) 2002{ 2003 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 2004 va_list ap; 2005 const char *err_str, *percent; 2006 char err_buf[0x100]; 2007 2008 percent = strchr(fmt, '%'); 2009 if (percent) 2010 { 2011 va_start(ap, fmt); 2012 vsnprintf(err_buf, sizeof(err_buf), fmt, ap); 2013 va_end(ap); 2014 err_str = err_buf; 2015 } 2016 else 2017 err_str = fmt; 2018 LSQ_INFO("abort error: is_app: %d; error code: %u; error str: %s", 2019 is_app, error_code, err_str); 2020 conn->imc_flags |= IMC_ERROR|IMC_ABORT_ERROR; 2021 if (is_app) 2022 conn->imc_flags |= IMC_ABORT_ISAPP; 2023 conn->imc_error_code = error_code; 2024} 2025 2026 2027static struct network_path * 2028ietf_mini_conn_ci_get_path (struct lsquic_conn *lconn, 2029 const struct sockaddr *sa) 2030{ 2031 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 2032 2033 return &conn->imc_path; 2034} 2035 2036 2037static const lsquic_cid_t * 2038ietf_mini_conn_ci_get_log_cid (const struct lsquic_conn *lconn) 2039{ 2040 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 2041 2042 if (conn->imc_path.np_dcid.len) 2043 return &conn->imc_path.np_dcid; 2044 else 2045 return CN_SCID(lconn); 2046} 2047 2048 2049static unsigned char 2050ietf_mini_conn_ci_record_addrs (struct lsquic_conn *lconn, void *peer_ctx, 2051 const struct sockaddr *local_sa, const struct sockaddr *peer_sa) 2052{ 2053 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 2054 const struct sockaddr *orig_peer_sa; 2055 struct lsquic_packet_out *packet_out; 2056 size_t len; 2057 char path_str[4][INET6_ADDRSTRLEN + sizeof(":65535")]; 2058 2059 if (NP_IS_IPv6(&conn->imc_path) != (AF_INET6 == peer_sa->sa_family)) 2060 TAILQ_FOREACH(packet_out, &conn->imc_packets_out, po_next) 2061 if ((packet_out->po_flags & (PO_SENT|PO_ENCRYPTED)) == PO_ENCRYPTED) 2062 imico_return_enc_data(conn, packet_out); 2063 2064 orig_peer_sa = NP_PEER_SA(&conn->imc_path); 2065 if (orig_peer_sa->sa_family == 0) 2066 LSQ_DEBUG("connection to %s from %s", SA2STR(local_sa, path_str[0]), 2067 SA2STR(peer_sa, path_str[1])); 2068 else if (!(lsquic_sockaddr_eq(NP_PEER_SA(&conn->imc_path), peer_sa) 2069 && lsquic_sockaddr_eq(NP_LOCAL_SA(&conn->imc_path), local_sa))) 2070 { 2071 LSQ_DEBUG("path changed from (%s - %s) to (%s - %s)", 2072 SA2STR(NP_LOCAL_SA(&conn->imc_path), path_str[0]), 2073 SA2STR(NP_PEER_SA(&conn->imc_path), path_str[1]), 2074 SA2STR(local_sa, path_str[2]), 2075 SA2STR(peer_sa, path_str[3])); 2076 conn->imc_flags |= IMC_PATH_CHANGED; 2077 } 2078 2079 len = local_sa->sa_family == AF_INET ? sizeof(struct sockaddr_in) 2080 : sizeof(struct sockaddr_in6); 2081 2082 memcpy(conn->imc_path.np_peer_addr, peer_sa, len); 2083 memcpy(conn->imc_path.np_local_addr, local_sa, len); 2084 conn->imc_path.np_peer_ctx = peer_ctx; 2085 return 0; 2086} 2087 2088 2089void 2090ietf_mini_conn_ci_count_garbage (struct lsquic_conn *lconn, size_t garbage_sz) 2091{ 2092 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 2093 2094 conn->imc_bytes_in += garbage_sz; 2095 LSQ_DEBUG("count %zd bytes of garbage, new value: %u bytes", garbage_sz, 2096 conn->imc_bytes_in); 2097} 2098 2099 2100static const struct conn_iface mini_conn_ietf_iface = { 2101 .ci_abort_error = ietf_mini_conn_ci_abort_error, 2102 .ci_client_call_on_new = ietf_mini_conn_ci_client_call_on_new, 2103 .ci_count_garbage = ietf_mini_conn_ci_count_garbage, 2104 .ci_destroy = ietf_mini_conn_ci_destroy, 2105 .ci_get_engine = ietf_mini_conn_ci_get_engine, 2106 .ci_get_log_cid = ietf_mini_conn_ci_get_log_cid, 2107 .ci_get_path = ietf_mini_conn_ci_get_path, 2108 .ci_hsk_done = ietf_mini_conn_ci_hsk_done, 2109 .ci_internal_error = ietf_mini_conn_ci_internal_error, 2110 .ci_is_tickable = ietf_mini_conn_ci_is_tickable, 2111 .ci_next_packet_to_send = ietf_mini_conn_ci_next_packet_to_send, 2112 .ci_next_tick_time = ietf_mini_conn_ci_next_tick_time, 2113 .ci_packet_in = ietf_mini_conn_ci_packet_in, 2114 .ci_packet_not_sent = ietf_mini_conn_ci_packet_not_sent, 2115 .ci_packet_sent = ietf_mini_conn_ci_packet_sent, 2116 .ci_record_addrs = ietf_mini_conn_ci_record_addrs, 2117 .ci_tick = ietf_mini_conn_ci_tick, 2118 .ci_tls_alert = ietf_mini_conn_ci_tls_alert, 2119}; 2120