lsquic_mini_conn_ietf.c revision cd35ff02
1/* Copyright (c) 2017 - 2020 LiteSpeed Technologies Inc. See LICENSE. */ 2/* 3 * lsquic_mini_conn_ietf.c -- Mini connection used by the IETF QUIC 4 */ 5 6#include <assert.h> 7#include <errno.h> 8#include <inttypes.h> 9#include <stddef.h> 10#include <stdint.h> 11#include <string.h> 12#include <sys/queue.h> 13#include <stdlib.h> 14 15#include "lsquic.h" 16#include "lsquic_int_types.h" 17#include "lsquic_sizes.h" 18#include "lsquic_hash.h" 19#include "lsquic_conn.h" 20#include "lsquic_mm.h" 21#include "lsquic_malo.h" 22#include "lsquic_engine_public.h" 23#include "lsquic_packet_common.h" 24#include "lsquic_packet_in.h" 25#include "lsquic_packet_out.h" 26#include "lsquic_parse.h" 27#include "lsquic_rtt.h" 28#include "lsquic_util.h" 29#include "lsquic_enc_sess.h" 30#include "lsquic_mini_conn_ietf.h" 31#include "lsquic_ev_log.h" 32#include "lsquic_trans_params.h" 33#include "lsquic_ietf.h" 34#include "lsquic_packet_ietf.h" 35#include "lsquic_attq.h" 36#include "lsquic_alarmset.h" 37 38#define LSQUIC_LOGGER_MODULE LSQLM_MINI_CONN 39#define LSQUIC_LOG_CONN_ID lsquic_conn_log_cid(&conn->imc_conn) 40#include "lsquic_logger.h" 41 42#define MIN(a, b) ((a) < (b) ? (a) : (b)) 43#define MAX(a, b) ((a) > (b) ? (a) : (b)) 44 45static const struct conn_iface mini_conn_ietf_iface; 46 47static unsigned highest_bit_set (unsigned long long); 48 49static int 50imico_can_send (const struct ietf_mini_conn *, size_t); 51 52 53static const enum header_type el2hety[] = 54{ 55 [ENC_LEV_INIT] = HETY_HANDSHAKE, 56 [ENC_LEV_CLEAR] = HETY_INITIAL, 57 [ENC_LEV_FORW] = HETY_NOT_SET, 58 [ENC_LEV_EARLY] = 0, /* Invalid */ 59}; 60 61 62static void 63imico_destroy_packet (struct ietf_mini_conn *conn, 64 struct lsquic_packet_out *packet_out) 65{ 66 lsquic_packet_out_destroy(packet_out, conn->imc_enpub, 67 conn->imc_path.np_peer_ctx); 68} 69 70 71int 72lsquic_mini_conn_ietf_ecn_ok (const struct ietf_mini_conn *conn) 73{ 74 packno_set_t acked; 75 76 /* First flight has only Initial and Handshake packets */ 77 acked = conn->imc_acked_packnos[PNS_INIT] 78 | conn->imc_acked_packnos[PNS_HSK] 79 ; 80 return 0 != (conn->imc_ecn_packnos & acked); 81} 82 83 84#define imico_ecn_ok lsquic_mini_conn_ietf_ecn_ok 85 86 87static enum ecn 88imico_get_ecn (struct ietf_mini_conn *conn) 89{ 90 if (!conn->imc_enpub->enp_settings.es_ecn) 91 return ECN_NOT_ECT; 92 else if (!conn->imc_sent_packnos /* We set ECT0 in first flight */ 93 || imico_ecn_ok(conn)) 94 return ECN_ECT0; 95 else 96 return ECN_NOT_ECT; 97} 98 99 100static struct lsquic_packet_out * 101imico_get_packet_out (struct ietf_mini_conn *conn, 102 enum header_type header_type, size_t need) 103{ 104 struct lsquic_packet_out *packet_out; 105 enum ecn ecn; 106 107 if (need) 108 TAILQ_FOREACH(packet_out, &conn->imc_packets_out, po_next) 109 if (!(packet_out->po_flags & PO_SENT) 110 && packet_out->po_header_type == header_type 111 && lsquic_packet_out_avail(packet_out) >= need) 112 return packet_out; 113 114 if (conn->imc_next_packno >= MAX_PACKETS) 115 { 116 LSQ_DEBUG("ran out of outgoing packet numbers, won't allocate packet"); 117 return NULL; 118 } 119 120 packet_out = lsquic_packet_out_new(&conn->imc_enpub->enp_mm, NULL, 1, 121 &conn->imc_conn, IQUIC_PACKNO_LEN_1, NULL, NULL, &conn->imc_path); 122 if (!packet_out) 123 { 124 LSQ_WARN("could not allocate packet: %s", strerror(errno)); 125 return NULL; 126 } 127 128 packet_out->po_header_type = header_type; 129 packet_out->po_packno = conn->imc_next_packno++; 130 packet_out->po_flags |= PO_MINI; 131 lsquic_packet_out_set_pns(packet_out, lsquic_hety2pns[header_type]); 132 ecn = imico_get_ecn(conn); 133 packet_out->po_lflags |= ecn << POECN_SHIFT; 134 TAILQ_INSERT_TAIL(&conn->imc_packets_out, packet_out, po_next); 135 packet_out->po_loss_chain = packet_out; 136 return packet_out; 137} 138 139 140static struct ietf_mini_conn * 141cryst_get_conn (const struct mini_crypto_stream *cryst) 142{ 143 return (void *) 144 ((unsigned char *) (cryst - cryst->mcs_enc_level) 145 - offsetof(struct ietf_mini_conn, imc_streams)); 146} 147 148 149struct msg_ctx 150{ 151 const unsigned char *buf; 152 const unsigned char *const end; 153}; 154 155 156static size_t 157read_from_msg_ctx (void *ctx, void *buf, size_t len, int *fin) 158{ 159 struct msg_ctx *msg_ctx = ctx; 160 if (len > (uintptr_t) (msg_ctx->end - msg_ctx->buf)) 161 len = msg_ctx->end - msg_ctx->buf; 162 memcpy(buf, msg_ctx->buf, len); 163 msg_ctx->buf += len; 164 return len; 165} 166 167 168static int 169imico_chlo_has_been_consumed (const struct ietf_mini_conn *conn) 170{ 171 return conn->imc_streams[ENC_LEV_CLEAR].mcs_read_off > 3 172 && conn->imc_streams[ENC_LEV_CLEAR].mcs_read_off >= conn->imc_ch_len; 173} 174 175 176static int 177imico_maybe_process_params (struct ietf_mini_conn *conn) 178{ 179 const struct transport_params *params; 180 181 if (imico_chlo_has_been_consumed(conn) 182 && (conn->imc_flags & (IMC_ENC_SESS_INITED|IMC_HAVE_TP)) 183 == IMC_ENC_SESS_INITED) 184 { 185 params = conn->imc_conn.cn_esf.i->esfi_get_peer_transport_params( 186 conn->imc_conn.cn_enc_session); 187 if (params) 188 { 189 conn->imc_flags |= IMC_HAVE_TP; 190 conn->imc_ack_exp = params->tp_ack_delay_exponent; 191 if (params->tp_set & (1 << TPI_MAX_UDP_PAYLOAD_SIZE)) 192 { 193 if (params->tp_numerics[TPI_MAX_UDP_PAYLOAD_SIZE] 194 < conn->imc_path.np_pack_size) 195 conn->imc_path.np_pack_size = 196 params->tp_numerics[TPI_MAX_UDP_PAYLOAD_SIZE]; 197 } 198 LSQ_DEBUG("read transport params, packet size is set to %hu bytes", 199 conn->imc_path.np_pack_size); 200 } 201 else 202 { 203 conn->imc_flags |= IMC_BAD_TRANS_PARAMS; 204 return -1; 205 } 206 } 207 208 return 0; 209} 210 211 212static ssize_t 213imico_stream_write (void *stream, const void *bufp, size_t bufsz) 214{ 215 struct mini_crypto_stream *const cryst = stream; 216 struct ietf_mini_conn *const conn = cryst_get_conn(cryst); 217 struct lsquic_conn *const lconn = &conn->imc_conn; 218 const struct parse_funcs *const pf = lconn->cn_pf; 219 struct msg_ctx msg_ctx = { bufp, (unsigned char *) bufp + bufsz, }; 220 struct lsquic_packet_out *packet_out; 221 size_t header_sz, need; 222 const unsigned char *p; 223 int len; 224 225 if (0 != imico_maybe_process_params(conn)) 226 return -1; 227 228 if (PNS_INIT == lsquic_enclev2pns[ cryst->mcs_enc_level ] 229 && (conn->imc_flags & IMC_IGNORE_INIT)) 230 { 231 LSQ_WARN("trying to write at the ignored Initial level"); 232 return bufsz; 233 } 234 235 while (msg_ctx.buf < msg_ctx.end) 236 { 237 header_sz = lconn->cn_pf->pf_calc_crypto_frame_header_sz( 238 cryst->mcs_write_off, msg_ctx.end - msg_ctx.buf); 239 need = header_sz + 1; 240 packet_out = imico_get_packet_out(conn, 241 el2hety[ cryst->mcs_enc_level ], need); 242 if (!packet_out) 243 return -1; 244 245 p = msg_ctx.buf; 246 len = pf->pf_gen_crypto_frame(packet_out->po_data + packet_out->po_data_sz, 247 lsquic_packet_out_avail(packet_out), 0, cryst->mcs_write_off, 0, 248 msg_ctx.end - msg_ctx.buf, read_from_msg_ctx, &msg_ctx); 249 if (len < 0) 250 return len; 251 EV_LOG_GENERATED_CRYPTO_FRAME(LSQUIC_LOG_CONN_ID, pf, 252 packet_out->po_data + packet_out->po_data_sz, len); 253 packet_out->po_data_sz += len; 254 packet_out->po_frame_types |= 1 << QUIC_FRAME_CRYPTO; 255 packet_out->po_flags |= PO_HELLO; 256 cryst->mcs_write_off += msg_ctx.buf - p; 257 } 258 259 assert(msg_ctx.buf == msg_ctx.end); 260 return bufsz; 261} 262 263 264static int 265imico_stream_flush (void *stream) 266{ 267 return 0; 268} 269 270 271static struct stream_frame * 272imico_find_stream_frame (const struct ietf_mini_conn *conn, 273 enum enc_level enc_level, unsigned read_off) 274{ 275 struct stream_frame *frame; 276 277 if (conn->imc_last_in.frame && enc_level == conn->imc_last_in.enc_level 278 && read_off == DF_ROFF(conn->imc_last_in.frame)) 279 return conn->imc_last_in.frame; 280 281 TAILQ_FOREACH(frame, &conn->imc_crypto_frames, next_frame) 282 if (enc_level == frame->stream_id && read_off == DF_ROFF(frame)) 283 return frame; 284 285 return NULL; 286} 287 288 289static void 290imico_read_chlo_size (struct ietf_mini_conn *conn, const unsigned char *buf, 291 size_t sz) 292{ 293 const unsigned char *const end = buf + sz; 294 295 assert(conn->imc_streams[ENC_LEV_CLEAR].mcs_read_off < 4); 296 switch (conn->imc_streams[ENC_LEV_CLEAR].mcs_read_off) 297 { 298 case 0: 299 if (buf == end) 300 return; 301 if (*buf != 1) 302 { 303 LSQ_DEBUG("Does not begin with ClientHello"); 304 conn->imc_flags |= IMC_ERROR; 305 return; 306 } 307 ++buf; 308 /* fall-through */ 309 case 1: 310 if (buf == end) 311 return; 312 if (*buf != 0) 313 { 314 LSQ_DEBUG("ClientHello larger than 16K"); 315 conn->imc_flags |= IMC_ERROR; 316 return; 317 } 318 ++buf; 319 /* fall-through */ 320 case 2: 321 if (buf == end) 322 return; 323 conn->imc_ch_len = *buf << 8; 324 ++buf; 325 /* fall-through */ 326 default: 327 if (buf == end) 328 return; 329 conn->imc_ch_len |= *buf; 330 } 331} 332 333 334static ssize_t 335imico_stream_readf (void *stream, 336 size_t (*readf)(void *, const unsigned char *, size_t, int), void *ctx) 337{ 338 struct mini_crypto_stream *const cryst = stream; 339 struct ietf_mini_conn *const conn = cryst_get_conn(cryst); 340 struct stream_frame *frame; 341 const unsigned char *buf; 342 size_t nread, total_read; 343 unsigned avail; 344 345 total_read = 0; 346 while ((frame = imico_find_stream_frame(conn, cryst->mcs_enc_level, 347 cryst->mcs_read_off))) 348 { 349 avail = DF_SIZE(frame) - frame->data_frame.df_read_off; 350 buf = frame->data_frame.df_data + frame->data_frame.df_read_off; 351 nread = readf(ctx, buf, avail, DF_FIN(frame)); 352 if (cryst->mcs_enc_level == ENC_LEV_CLEAR && cryst->mcs_read_off < 4) 353 imico_read_chlo_size(conn, buf, nread); 354 total_read += nread; 355 cryst->mcs_read_off += nread; 356 frame->data_frame.df_read_off += nread; 357 LSQ_DEBUG("read %zu bytes at offset %"PRIu64" on enc level %u", nread, 358 DF_ROFF(frame), cryst->mcs_enc_level); 359 if (DF_END(frame) == DF_ROFF(frame)) 360 { 361 if (frame == conn->imc_last_in.frame) 362 conn->imc_last_in.frame = NULL; 363 else 364 { 365 TAILQ_REMOVE(&conn->imc_crypto_frames, frame, next_frame); 366 --conn->imc_n_crypto_frames; 367 conn->imc_crypto_frames_sz -= DF_SIZE(frame); 368 lsquic_packet_in_put(&conn->imc_enpub->enp_mm, 369 frame->packet_in); 370 lsquic_malo_put(frame); 371 } 372 } 373 if (nread < avail) 374 break; 375 } 376 377 if (total_read > 0) 378 return total_read; 379 else 380 { 381 /* CRYPTO streams never end, so zero bytes read always means 382 * EWOULDBLOCK 383 */ 384 errno = EWOULDBLOCK; 385 return -1; 386 } 387} 388 389 390static int 391imico_stream_wantX (struct mini_crypto_stream *cryst, int bit, int is_want) 392{ 393 int old; 394 395 old = (cryst->mcs_flags & (1 << bit)) > 0; 396 cryst->mcs_flags &= ~(1 << bit); 397 cryst->mcs_flags |= !!is_want << bit; 398 return old; 399} 400 401 402static int 403imico_stream_wantwrite (void *stream, int is_want) 404{ 405 return imico_stream_wantX(stream, MCSBIT_WANTWRITE, is_want); 406} 407 408 409static int 410imico_stream_wantread (void *stream, int is_want) 411{ 412 return imico_stream_wantX(stream, MCSBIT_WANTREAD, is_want); 413} 414 415 416static enum enc_level 417imico_stream_enc_level (void *stream) 418{ 419 struct mini_crypto_stream *const cryst = stream; 420 return cryst->mcs_enc_level; 421} 422 423 424static const struct crypto_stream_if crypto_stream_if = 425{ 426 .csi_write = imico_stream_write, 427 .csi_flush = imico_stream_flush, 428 .csi_readf = imico_stream_readf, 429 .csi_wantwrite = imico_stream_wantwrite, 430 .csi_wantread = imico_stream_wantread, 431 .csi_enc_level = imico_stream_enc_level, 432}; 433 434 435static int 436is_first_packet_ok (const struct lsquic_packet_in *packet_in, 437 size_t udp_payload_size) 438{ 439 if (udp_payload_size < IQUIC_MIN_INIT_PACKET_SZ) 440 { 441 /* [draft-ietf-quic-transport-24] Section 14 */ 442 LSQ_LOG1(LSQ_LOG_DEBUG, "incoming UDP payload too small: %zu bytes", 443 udp_payload_size); 444 return 0; 445 } 446 /* TODO: Move decryption of the first packet into this function? */ 447 return 1; /* TODO */ 448} 449 450 451static void 452imico_peer_addr_validated (struct ietf_mini_conn *conn, const char *how) 453{ 454 if (!(conn->imc_flags & IMC_ADDR_VALIDATED)) 455 { 456 conn->imc_flags |= IMC_ADDR_VALIDATED; 457 LSQ_DEBUG("peer address validated (%s)", how); 458 } 459} 460 461 462struct lsquic_conn * 463lsquic_mini_conn_ietf_new (struct lsquic_engine_public *enpub, 464 const struct lsquic_packet_in *packet_in, 465 enum lsquic_version version, int is_ipv4, const lsquic_cid_t *odcid, 466 size_t udp_payload_size) 467{ 468 struct ietf_mini_conn *conn; 469 enc_session_t *enc_sess; 470 enum enc_level i; 471 const struct enc_session_funcs_iquic *esfi; 472 473 if (!is_first_packet_ok(packet_in, udp_payload_size)) 474 return NULL; 475 476 conn = lsquic_malo_get(enpub->enp_mm.malo.mini_conn_ietf); 477 if (!conn) 478 { 479 LSQ_LOG1(LSQ_LOG_WARN, "cannot allocate mini connection: %s", 480 strerror(errno)); 481 return NULL; 482 } 483 memset(conn, 0, sizeof(*conn)); 484 conn->imc_conn.cn_if = &mini_conn_ietf_iface; 485 conn->imc_conn.cn_cces = conn->imc_cces; 486 conn->imc_conn.cn_n_cces = sizeof(conn->imc_cces) 487 / sizeof(conn->imc_cces[0]); 488 conn->imc_cces[0].cce_cid = packet_in->pi_dcid; 489 conn->imc_cces[0].cce_flags = CCE_USED; 490 conn->imc_conn.cn_cces_mask = 1; 491 lsquic_scid_from_packet_in(packet_in, &conn->imc_path.np_dcid); 492 LSQ_DEBUGC("recv SCID from client %"CID_FMT, CID_BITS(&conn->imc_cces[0].cce_cid)); 493 LSQ_DEBUGC("recv DCID from client %"CID_FMT, CID_BITS(&conn->imc_path.np_dcid)); 494 495 /* Generate new SCID. Since is not the original SCID, it is given 496 * a sequence number (0) and therefore can be retired by the client. 497 */ 498 lsquic_generate_cid(&conn->imc_conn.cn_cces[1].cce_cid, 499 enpub->enp_settings.es_scid_len); 500 LSQ_DEBUGC("generated SCID %"CID_FMT" at index %u, switching to it", 501 CID_BITS(&conn->imc_conn.cn_cces[1].cce_cid), 1); 502 conn->imc_conn.cn_cces[1].cce_flags = CCE_SEQNO | CCE_USED; 503 conn->imc_conn.cn_cces_mask |= 1u << 1; 504 conn->imc_conn.cn_cur_cce_idx = 1; 505 506 conn->imc_conn.cn_flags = LSCONN_MINI|LSCONN_IETF|LSCONN_SERVER; 507 conn->imc_conn.cn_version = version; 508 509 for (i = 0; i < N_ENC_LEVS; ++i) 510 { 511 conn->imc_streams[i].mcs_enc_level = i; 512 conn->imc_stream_ps[i] = &conn->imc_streams[i]; 513 } 514 515 esfi = select_esf_iquic_by_ver(version); 516 enc_sess = esfi->esfi_create_server(enpub, &conn->imc_conn, 517 &packet_in->pi_dcid, conn->imc_stream_ps, &crypto_stream_if, 518 &conn->imc_cces[0].cce_cid, &conn->imc_path.np_dcid); 519 if (!enc_sess) 520 { 521 lsquic_malo_put(conn); 522 return NULL; 523 } 524 525 conn->imc_enpub = enpub; 526 conn->imc_created = packet_in->pi_received; 527 if (enpub->enp_settings.es_base_plpmtu) 528 conn->imc_path.np_pack_size = enpub->enp_settings.es_base_plpmtu; 529 else if (is_ipv4) 530 conn->imc_path.np_pack_size = IQUIC_MAX_IPv4_PACKET_SZ; 531 else 532 conn->imc_path.np_pack_size = IQUIC_MAX_IPv6_PACKET_SZ; 533 conn->imc_conn.cn_pf = select_pf_by_ver(version); 534 conn->imc_conn.cn_esf.i = esfi; 535 conn->imc_conn.cn_enc_session = enc_sess; 536 conn->imc_conn.cn_esf_c = select_esf_common_by_ver(version); 537 TAILQ_INIT(&conn->imc_packets_out); 538 TAILQ_INIT(&conn->imc_app_packets); 539 TAILQ_INIT(&conn->imc_crypto_frames); 540 if (odcid) 541 imico_peer_addr_validated(conn, "odcid"); 542 543 LSQ_DEBUG("created mini connection object %p; max packet size=%hu", 544 conn, conn->imc_path.np_pack_size); 545 return &conn->imc_conn; 546} 547 548 549static void 550ietf_mini_conn_ci_client_call_on_new (struct lsquic_conn *lconn) 551{ 552 assert(0); 553} 554 555 556static void 557ietf_mini_conn_ci_destroy (struct lsquic_conn *lconn) 558{ 559 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 560 struct lsquic_packet_out *packet_out; 561 struct lsquic_packet_in *packet_in; 562 struct stream_frame *frame; 563 564 while ((packet_out = TAILQ_FIRST(&conn->imc_packets_out))) 565 { 566 TAILQ_REMOVE(&conn->imc_packets_out, packet_out, po_next); 567 imico_destroy_packet(conn, packet_out); 568 } 569 while ((packet_in = TAILQ_FIRST(&conn->imc_app_packets))) 570 { 571 TAILQ_REMOVE(&conn->imc_app_packets, packet_in, pi_next); 572 lsquic_packet_in_put(&conn->imc_enpub->enp_mm, packet_in); 573 } 574 while ((frame = TAILQ_FIRST(&conn->imc_crypto_frames))) 575 { 576 TAILQ_REMOVE(&conn->imc_crypto_frames, frame, next_frame); 577 lsquic_packet_in_put(&conn->imc_enpub->enp_mm, frame->packet_in); 578 lsquic_malo_put(frame); 579 } 580 if (lconn->cn_enc_session) 581 lconn->cn_esf.i->esfi_destroy(lconn->cn_enc_session); 582 LSQ_DEBUG("ietf_mini_conn_ci_destroyed"); 583 lsquic_malo_put(conn); 584} 585 586 587static struct lsquic_engine * 588ietf_mini_conn_ci_get_engine (struct lsquic_conn *lconn) 589{ 590 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 591 return conn->imc_enpub->enp_engine; 592} 593 594 595static void 596ietf_mini_conn_ci_hsk_done (struct lsquic_conn *lconn, 597 enum lsquic_hsk_status status) 598{ 599 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 600 601 switch (status) 602 { 603 case LSQ_HSK_OK: 604 case LSQ_HSK_RESUMED_OK: 605 conn->imc_flags |= IMC_HSK_OK; 606 conn->imc_conn.cn_flags |= LSCONN_HANDSHAKE_DONE; 607 LSQ_DEBUG("handshake OK"); 608 break; 609 default: 610 assert(0); 611 /* fall-through */ 612 case LSQ_HSK_FAIL: 613 conn->imc_flags |= IMC_HSK_FAILED|IMC_ERROR; 614 LSQ_INFO("handshake failed"); 615 break; 616 } 617} 618 619 620static void 621ietf_mini_conn_ci_tls_alert (struct lsquic_conn *lconn, uint8_t alert) 622{ 623 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 624 LSQ_DEBUG("got TLS alert %"PRIu8, alert); 625 conn->imc_flags |= IMC_ERROR|IMC_TLS_ALERT; 626 conn->imc_tls_alert = alert; 627} 628 629 630/* A mini connection is only tickable if it has unsent packets. This can 631 * occur when packet sending is delayed. 632 * 633 * Otherwise, a mini connection is not tickable: Either there are incoming 634 * packets, in which case, the connection is going to be ticked, or there is 635 * an alarm pending, in which case it will be handled via the attq. 636 */ 637static int 638ietf_mini_conn_ci_is_tickable (struct lsquic_conn *lconn) 639{ 640 struct ietf_mini_conn *const conn = (struct ietf_mini_conn *) lconn; 641 const struct lsquic_packet_out *packet_out; 642 size_t packet_size; 643 644 if (conn->imc_enpub->enp_flags & ENPUB_CAN_SEND) 645 TAILQ_FOREACH(packet_out, &conn->imc_packets_out, po_next) 646 if (!(packet_out->po_flags & PO_SENT)) 647 { 648 packet_size = lsquic_packet_out_total_sz(lconn, packet_out); 649 return imico_can_send(conn, packet_size); 650 } 651 652 return 0; 653} 654 655 656static int 657imico_can_send (const struct ietf_mini_conn *conn, size_t size) 658{ 659 return (conn->imc_flags & IMC_ADDR_VALIDATED) 660 || conn->imc_bytes_in * 3 >= conn->imc_bytes_out + size 661 ; 662} 663 664 665static struct lsquic_packet_out * 666ietf_mini_conn_ci_next_packet_to_send (struct lsquic_conn *lconn, 667 const struct to_coal *to_coal) 668{ 669 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 670 struct lsquic_packet_out *packet_out; 671 size_t packet_size; 672 673 TAILQ_FOREACH(packet_out, &conn->imc_packets_out, po_next) 674 { 675 if (packet_out->po_flags & PO_SENT) 676 continue; 677 packet_size = lsquic_packet_out_total_sz(lconn, packet_out); 678 if (!(to_coal 679 && (packet_size + to_coal->prev_sz_sum 680 > conn->imc_path.np_pack_size 681 || !lsquic_packet_out_equal_dcids(to_coal->prev_packet, packet_out)) 682 )) 683 { 684 if (!imico_can_send(conn, packet_size)) 685 { 686 LSQ_DEBUG("cannot send packet %"PRIu64" of size %zu: client " 687 "address has not been validated", packet_out->po_packno, 688 packet_size); 689 return NULL; 690 } 691 packet_out->po_flags |= PO_SENT; 692 conn->imc_bytes_out += packet_size; 693 if (!to_coal) 694 LSQ_DEBUG("packet_to_send: %"PRIu64, packet_out->po_packno); 695 else 696 LSQ_DEBUG("packet_to_send: %"PRIu64" (coalesced)", 697 packet_out->po_packno); 698 return packet_out; 699 } 700 else 701 return NULL; 702 } 703 704 return NULL; 705} 706 707 708static int 709imico_calc_retx_timeout (const struct ietf_mini_conn *conn) 710{ 711 lsquic_time_t to; 712 to = lsquic_rtt_stats_get_srtt(&conn->imc_rtt_stats); 713 if (to) 714 { 715 to += to / 2; 716 if (to < 10000) 717 to = 10000; 718 } 719 else 720 to = 300000; 721 return to << conn->imc_hsk_count; 722} 723 724 725static lsquic_time_t 726ietf_mini_conn_ci_next_tick_time (struct lsquic_conn *lconn, unsigned *why) 727{ 728 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 729 const struct lsquic_packet_out *packet_out; 730 lsquic_time_t exp_time, retx_time; 731 732 exp_time = conn->imc_created + 733 conn->imc_enpub->enp_settings.es_handshake_to; 734 735 TAILQ_FOREACH(packet_out, &conn->imc_packets_out, po_next) 736 if (packet_out->po_flags & PO_SENT) 737 { 738 retx_time = packet_out->po_sent + imico_calc_retx_timeout(conn); 739 if (retx_time < exp_time) 740 { 741 *why = N_AEWS + AL_RETX_HSK; 742 return retx_time; 743 } 744 else 745 { 746 *why = AEW_MINI_EXPIRE; 747 return exp_time; 748 } 749 } 750 751 *why = AEW_MINI_EXPIRE; 752 return exp_time; 753} 754 755 756#define IMICO_PROC_FRAME_ARGS \ 757 struct ietf_mini_conn *conn, struct lsquic_packet_in *packet_in, \ 758 const unsigned char *p, size_t len 759 760 761static void 762imico_dispatch_stream_events (struct ietf_mini_conn *conn) 763{ 764 enum enc_level i; 765 766 for (i = 0; i < N_ENC_LEVS; ++i) 767 if ((conn->imc_streams[i].mcs_flags & (MCS_CREATED|MCS_WANTREAD)) 768 == (MCS_CREATED|MCS_WANTREAD)) 769 { 770 LSQ_DEBUG("dispatch read events on level #%u", i); 771 lsquic_mini_cry_sm_if.on_read((void *) &conn->imc_streams[i], 772 conn->imc_conn.cn_enc_session); 773 } 774 775 for (i = 0; i < N_ENC_LEVS; ++i) 776 if ((conn->imc_streams[i].mcs_flags & (MCS_CREATED|MCS_WANTWRITE)) 777 == (MCS_CREATED|MCS_WANTWRITE)) 778 { 779 LSQ_DEBUG("dispatch write events on level #%u", i); 780 lsquic_mini_cry_sm_if.on_write((void *) &conn->imc_streams[i], 781 conn->imc_conn.cn_enc_session); 782 } 783} 784 785 786static int 787imico_stash_stream_frame (struct ietf_mini_conn *conn, 788 enum enc_level enc_level, struct lsquic_packet_in *packet_in, 789 const struct stream_frame *frame) 790{ 791 struct stream_frame *copy; 792 793 if (conn->imc_n_crypto_frames >= IMICO_MAX_STASHED_FRAMES) 794 { 795 LSQ_INFO("cannot stash more CRYPTO frames, at %hhu already, while max " 796 "is %u", conn->imc_n_crypto_frames, IMICO_MAX_STASHED_FRAMES); 797 return -1; 798 } 799 800 if (conn->imc_crypto_frames_sz + DF_SIZE(frame) > IMICO_MAX_BUFFERED_CRYPTO) 801 { 802 LSQ_INFO("cannot stash more than %u bytes of CRYPTO frames", 803 IMICO_MAX_BUFFERED_CRYPTO); 804 return -1; 805 } 806 807 copy = lsquic_malo_get(conn->imc_enpub->enp_mm.malo.stream_frame); 808 if (!copy) 809 { 810 LSQ_INFO("could not allocate stream frame for stashing"); 811 return -1; 812 } 813 814 *copy = *frame; 815 copy->packet_in = lsquic_packet_in_get(packet_in); 816 copy->stream_id = enc_level; 817 TAILQ_INSERT_TAIL(&conn->imc_crypto_frames, copy, next_frame); 818 ++conn->imc_n_crypto_frames; 819 conn->imc_crypto_frames_sz += DF_SIZE(frame); 820 return 0; 821} 822 823 824static unsigned 825imico_process_crypto_frame (IMICO_PROC_FRAME_ARGS) 826{ 827 int parsed_len; 828 enum enc_level enc_level, i; 829 struct stream_frame stream_frame; 830 831 parsed_len = conn->imc_conn.cn_pf->pf_parse_crypto_frame(p, len, 832 &stream_frame); 833 if (parsed_len < 0) 834 { 835 conn->imc_flags |= IMC_PARSE_FAILED; 836 return 0; 837 } 838 839 enc_level = lsquic_packet_in_enc_level(packet_in); 840 EV_LOG_CRYPTO_FRAME_IN(LSQUIC_LOG_CONN_ID, &stream_frame, enc_level); 841 842 if (conn->imc_streams[enc_level].mcs_read_off >= DF_OFF(&stream_frame) 843 && conn->imc_streams[enc_level].mcs_read_off < DF_END(&stream_frame)) 844 LSQ_DEBUG("Got CRYPTO frame for enc level #%u", enc_level); 845 else if (conn->imc_streams[enc_level].mcs_read_off < DF_OFF(&stream_frame)) 846 { 847 LSQ_DEBUG("Can't read CRYPTO frame on enc level #%u at offset %"PRIu64 848 " yet -- stash", enc_level, DF_OFF(&stream_frame)); 849 if (0 == imico_stash_stream_frame(conn, enc_level, packet_in, 850 &stream_frame)) 851 return parsed_len; 852 else 853 return 0; 854 } 855 else 856 { 857 LSQ_DEBUG("Got duplicate CRYPTO frame for enc level #%u -- ignore", 858 enc_level); 859 return parsed_len; 860 } 861 862 if (!(conn->imc_flags & IMC_ENC_SESS_INITED)) 863 { 864 if (0 != conn->imc_conn.cn_esf.i->esfi_init_server( 865 conn->imc_conn.cn_enc_session)) 866 return 0; 867 conn->imc_flags |= IMC_ENC_SESS_INITED; 868 } 869 870 if (!(conn->imc_streams[enc_level].mcs_flags & MCS_CREATED)) 871 { 872 LSQ_DEBUG("creating stream on level #%u", enc_level); 873 conn->imc_streams[enc_level].mcs_flags |= MCS_CREATED; 874 lsquic_mini_cry_sm_if.on_new_stream(conn->imc_conn.cn_enc_session, 875 (void *) &conn->imc_streams[enc_level]); 876 } 877 878 /* Assume that receiving a CRYPTO frame at a higher level means that we 879 * no longer want to read from a lower level. 880 */ 881 for (i = 0; i < enc_level; ++i) 882 conn->imc_streams[i].mcs_flags &= ~MCS_WANTREAD; 883 884 conn->imc_last_in.frame = &stream_frame; 885 conn->imc_last_in.enc_level = enc_level; 886 imico_dispatch_stream_events(conn); 887 conn->imc_last_in.frame = NULL; 888 889 if (DF_ROFF(&stream_frame) < DF_END(&stream_frame)) 890 { 891 /* This is an odd condition, but let's handle it just in case */ 892 LSQ_DEBUG("New CRYPTO frame on enc level #%u not fully read -- stash", 893 enc_level); 894 if (0 != imico_stash_stream_frame(conn, enc_level, packet_in, 895 &stream_frame)) 896 return 0; 897 } 898 899 900 return parsed_len; 901} 902 903 904static ptrdiff_t 905imico_count_zero_bytes (const unsigned char *p, size_t len) 906{ 907 const unsigned char *const end = p + len; 908 while (p < end && 0 == *p) 909 ++p; 910 return len - (end - p); 911} 912 913 914static unsigned 915imico_process_padding_frame (IMICO_PROC_FRAME_ARGS) 916{ 917 len = (size_t) imico_count_zero_bytes(p, len); 918 EV_LOG_PADDING_FRAME_IN(LSQUIC_LOG_CONN_ID, len); 919 return len; 920} 921 922 923static void 924imico_take_rtt_sample (struct ietf_mini_conn *conn, 925 const struct lsquic_packet_out *packet_out, 926 lsquic_time_t now, lsquic_time_t lack_delta) 927{ 928 assert(packet_out->po_sent); 929 lsquic_time_t measured_rtt = now - packet_out->po_sent; 930 if (lack_delta < measured_rtt) 931 { 932 lsquic_rtt_stats_update(&conn->imc_rtt_stats, measured_rtt, lack_delta); 933 LSQ_DEBUG("srtt: %"PRIu64" usec, var: %"PRIu64, 934 lsquic_rtt_stats_get_srtt(&conn->imc_rtt_stats), 935 lsquic_rtt_stats_get_rttvar(&conn->imc_rtt_stats)); 936 } 937} 938 939 940static unsigned 941imico_process_ack_frame (IMICO_PROC_FRAME_ARGS) 942{ 943 int parsed_len; 944 unsigned n; 945 lsquic_packet_out_t *packet_out, *next; 946 struct ack_info *acki; 947 lsquic_packno_t packno; 948 lsquic_time_t warn_time; 949 packno_set_t acked; 950 enum packnum_space pns; 951 uint8_t ack_exp; 952 953 if (conn->imc_flags & IMC_HAVE_TP) 954 ack_exp = conn->imc_ack_exp; 955 else 956 ack_exp = TP_DEF_ACK_DELAY_EXP; /* Odd: no transport params yet? */ 957 acki = conn->imc_enpub->enp_mm.acki; 958 parsed_len = conn->imc_conn.cn_pf->pf_parse_ack_frame(p, len, acki, 959 ack_exp); 960 if (parsed_len < 0) 961 { 962 conn->imc_flags |= IMC_PARSE_FAILED; 963 return 0; 964 } 965 966 pns = lsquic_hety2pns[ packet_in->pi_header_type ]; 967 acked = 0; 968 969 for (n = 0; n < acki->n_ranges; ++n) 970 { 971 if (acki->ranges[n].high <= MAX_PACKETS) 972 { 973 acked |= (1ULL << acki->ranges[n].high) 974 | ((1ULL << acki->ranges[n].high) - 1); 975 acked &= ~((1ULL << acki->ranges[n].low) - 1); 976 } 977 else 978 { 979 packno = acki->ranges[n].high; 980 goto err_never_sent; 981 } 982 } 983 if (acked & ~conn->imc_sent_packnos) 984 { 985 packno = highest_bit_set(acked & ~conn->imc_sent_packnos); 986 goto err_never_sent; 987 } 988 989 EV_LOG_ACK_FRAME_IN(LSQUIC_LOG_CONN_ID, acki); 990 for (packet_out = TAILQ_FIRST(&conn->imc_packets_out); packet_out; 991 packet_out = next) 992 { 993 next = TAILQ_NEXT(packet_out, po_next); 994 if ((1ULL << packet_out->po_packno) & acked) 995 { 996 assert(lsquic_packet_out_pns(packet_out) == pns); 997 LSQ_DEBUG("Got ACK for packet %"PRIu64, packet_out->po_packno); 998 if (packet_out->po_packno == largest_acked(acki)) 999 imico_take_rtt_sample(conn, packet_out, 1000 packet_in->pi_received, acki->lack_delta); 1001 TAILQ_REMOVE(&conn->imc_packets_out, packet_out, po_next); 1002 imico_destroy_packet(conn, packet_out); 1003 } 1004 } 1005 1006 if (conn->imc_sent_packnos & ~conn->imc_acked_packnos[pns] & acked) 1007 { 1008 LSQ_DEBUG("Newly acked packets, reset handshake count"); 1009 conn->imc_hsk_count = 0; 1010 } 1011 1012 conn->imc_acked_packnos[pns] |= acked; 1013 1014 return parsed_len; 1015 1016 err_never_sent: 1017 warn_time = lsquic_time_now(); 1018 if (0 == conn->imc_enpub->enp_last_warning[WT_ACKPARSE_MINI] 1019 || conn->imc_enpub->enp_last_warning[WT_ACKPARSE_MINI] 1020 + WARNING_INTERVAL < warn_time) 1021 { 1022 conn->imc_enpub->enp_last_warning[WT_ACKPARSE_MINI] = warn_time; 1023 LSQ_WARN("packet %"PRIu64" (pns: %u) was never sent", packno, pns); 1024 } 1025 else 1026 LSQ_DEBUG("packet %"PRIu64" (pns: %u) was never sent", packno, pns); 1027 return 0; 1028} 1029 1030 1031static unsigned 1032imico_process_ping_frame (IMICO_PROC_FRAME_ARGS) 1033{ 1034 LSQ_DEBUG("got a PING frame, do nothing"); 1035 return 1; 1036} 1037 1038 1039static unsigned 1040imico_process_connection_close_frame (IMICO_PROC_FRAME_ARGS) 1041{ 1042 struct lsquic_packet_out *packet_out; 1043 uint64_t error_code; 1044 uint16_t reason_len; 1045 uint8_t reason_off; 1046 int parsed_len, app_error; 1047 1048 while ((packet_out = TAILQ_FIRST(&conn->imc_packets_out))) 1049 { 1050 TAILQ_REMOVE(&conn->imc_packets_out, packet_out, po_next); 1051 imico_destroy_packet(conn, packet_out); 1052 } 1053 conn->imc_flags |= IMC_CLOSE_RECVD; 1054 parsed_len = conn->imc_conn.cn_pf->pf_parse_connect_close_frame(p, len, 1055 &app_error, &error_code, &reason_len, &reason_off); 1056 if (parsed_len < 0) 1057 { 1058 conn->imc_flags |= IMC_PARSE_FAILED; 1059 return 0; 1060 } 1061 EV_LOG_CONNECTION_CLOSE_FRAME_IN(LSQUIC_LOG_CONN_ID, error_code, 1062 (int) reason_len, (const char *) p + reason_off); 1063 LSQ_INFO("Received CONNECTION_CLOSE frame (%s-level code: %"PRIu64"; " 1064 "reason: %.*s)", app_error ? "application" : "transport", 1065 error_code, (int) reason_len, (const char *) p + reason_off); 1066 return 0; /* This shuts down the connection */ 1067} 1068 1069 1070static unsigned 1071imico_process_invalid_frame (IMICO_PROC_FRAME_ARGS) 1072{ 1073 LSQ_DEBUG("invalid frame %u (%s)", p[0], 1074 frame_type_2_str[ conn->imc_conn.cn_pf->pf_parse_frame_type(p, len) ]); 1075 return 0; 1076} 1077 1078 1079static unsigned (*const imico_process_frames[N_QUIC_FRAMES]) 1080 (IMICO_PROC_FRAME_ARGS) = 1081{ 1082 [QUIC_FRAME_PADDING] = imico_process_padding_frame, 1083 [QUIC_FRAME_CRYPTO] = imico_process_crypto_frame, 1084 [QUIC_FRAME_ACK] = imico_process_ack_frame, 1085 [QUIC_FRAME_PING] = imico_process_ping_frame, 1086 [QUIC_FRAME_CONNECTION_CLOSE] = imico_process_connection_close_frame, 1087 /* Some of them are invalid, while others are unexpected. We treat 1088 * them the same: handshake cannot proceed. 1089 */ 1090 [QUIC_FRAME_RST_STREAM] = imico_process_invalid_frame, 1091 [QUIC_FRAME_MAX_DATA] = imico_process_invalid_frame, 1092 [QUIC_FRAME_MAX_STREAM_DATA] = imico_process_invalid_frame, 1093 [QUIC_FRAME_MAX_STREAMS] = imico_process_invalid_frame, 1094 [QUIC_FRAME_BLOCKED] = imico_process_invalid_frame, 1095 [QUIC_FRAME_STREAM_BLOCKED] = imico_process_invalid_frame, 1096 [QUIC_FRAME_STREAMS_BLOCKED] = imico_process_invalid_frame, 1097 [QUIC_FRAME_NEW_CONNECTION_ID] = imico_process_invalid_frame, 1098 [QUIC_FRAME_STOP_SENDING] = imico_process_invalid_frame, 1099 [QUIC_FRAME_PATH_CHALLENGE] = imico_process_invalid_frame, 1100 [QUIC_FRAME_PATH_RESPONSE] = imico_process_invalid_frame, 1101 /* STREAM frame can only come in the App PNS and we delay those packets: */ 1102 [QUIC_FRAME_STREAM] = imico_process_invalid_frame, 1103 [QUIC_FRAME_HANDSHAKE_DONE] = imico_process_invalid_frame, 1104 [QUIC_FRAME_ACK_FREQUENCY] = imico_process_invalid_frame, 1105 [QUIC_FRAME_TIMESTAMP] = imico_process_invalid_frame, 1106}; 1107 1108 1109static unsigned 1110imico_process_packet_frame (struct ietf_mini_conn *conn, 1111 struct lsquic_packet_in *packet_in, const unsigned char *p, size_t len) 1112{ 1113 enum enc_level enc_level; 1114 enum quic_frame_type type; 1115 1116 enc_level = lsquic_packet_in_enc_level(packet_in); 1117 type = conn->imc_conn.cn_pf->pf_parse_frame_type(p, len); 1118 if (lsquic_legal_frames_by_level[conn->imc_conn.cn_version][enc_level] 1119 & (1 << type)) 1120 { 1121 packet_in->pi_frame_types |= 1 << type; 1122 return imico_process_frames[type](conn, packet_in, p, len); 1123 } 1124 else 1125 { 1126 LSQ_DEBUG("invalid frame %u at encryption level %s", type, 1127 lsquic_enclev2str[enc_level]); 1128 return 0; 1129 } 1130} 1131 1132 1133static int 1134imico_parse_regular_packet (struct ietf_mini_conn *conn, 1135 struct lsquic_packet_in *packet_in) 1136{ 1137 const unsigned char *p, *pend; 1138 unsigned len; 1139 1140 p = packet_in->pi_data + packet_in->pi_header_sz; 1141 pend = packet_in->pi_data + packet_in->pi_data_sz; 1142 1143 while (p < pend) 1144 { 1145 len = imico_process_packet_frame(conn, packet_in, p, pend - p); 1146 if (len > 0) 1147 p += len; 1148 else 1149 return -1; 1150 } 1151 1152 return 0; 1153} 1154 1155 1156static unsigned 1157highest_bit_set (unsigned long long sz) 1158{ 1159#if __GNUC__ 1160 unsigned clz = __builtin_clzll(sz); 1161 return 63 - clz; 1162#else 1163 unsigned long y; 1164 unsigned n; 1165 n = 64; 1166 y = sz >> 32; if (y) { n -= 32; sz = y; } 1167 y = sz >> 16; if (y) { n -= 16; sz = y; } 1168 y = sz >> 8; if (y) { n -= 8; sz = y; } 1169 y = sz >> 4; if (y) { n -= 4; sz = y; } 1170 y = sz >> 2; if (y) { n -= 2; sz = y; } 1171 y = sz >> 1; if (y) return 63 - n + 2; 1172 return 63 - n + sz; 1173#endif 1174} 1175 1176 1177static void 1178ignore_init (struct ietf_mini_conn *conn) 1179{ 1180 struct lsquic_packet_out *packet_out, *next; 1181 unsigned count; 1182 1183 conn->imc_flags |= IMC_IGNORE_INIT; 1184 conn->imc_flags &= ~(IMC_QUEUED_ACK_INIT << PNS_INIT); 1185 1186 count = 0; 1187 for (packet_out = TAILQ_FIRST(&conn->imc_packets_out); packet_out; 1188 packet_out = next) 1189 { 1190 next = TAILQ_NEXT(packet_out, po_next); 1191 if (PNS_INIT == lsquic_packet_out_pns(packet_out)) 1192 { 1193 TAILQ_REMOVE(&conn->imc_packets_out, packet_out, po_next); 1194 imico_destroy_packet(conn, packet_out); 1195 ++count; 1196 } 1197 } 1198 1199 LSQ_DEBUG("henceforth, no Initial packets shall be sent or received; " 1200 "destroyed %u packet%.*s", count, count != 1, "s"); 1201} 1202 1203 1204static void 1205imico_maybe_delay_processing (struct ietf_mini_conn *conn, 1206 struct lsquic_packet_in *packet_in) 1207{ 1208 unsigned max_delayed; 1209 1210 if (conn->imc_flags & IMC_ADDR_VALIDATED) 1211 max_delayed = IMICO_MAX_DELAYED_PACKETS_VALIDATED; 1212 else 1213 max_delayed = IMICO_MAX_DELAYED_PACKETS_UNVALIDATED; 1214 1215 if (conn->imc_delayed_packets_count < max_delayed) 1216 { 1217 ++conn->imc_delayed_packets_count; 1218 lsquic_packet_in_upref(packet_in); 1219 TAILQ_INSERT_TAIL(&conn->imc_app_packets, packet_in, pi_next); 1220 LSQ_DEBUG("delay processing of packet (now delayed %hhu)", 1221 conn->imc_delayed_packets_count); 1222 } 1223 else 1224 LSQ_DEBUG("drop packet, already delayed %hhu packets", 1225 conn->imc_delayed_packets_count); 1226} 1227 1228 1229/* [draft-ietf-quic-transport-30] Section 8.1: 1230 " Additionally, a server MAY consider the client address validated if 1231 " the client uses a connection ID chosen by the server and the 1232 " connection ID contains at least 64 bits of entropy. 1233 * 1234 * We use RAND_bytes() to generate SCIDs, so it's all entropy. 1235 */ 1236static void 1237imico_maybe_validate_by_dcid (struct ietf_mini_conn *conn, 1238 const lsquic_cid_t *dcid) 1239{ 1240 unsigned i; 1241 1242 if (dcid->len >= 8) 1243 /* Generic code with unnecessary loop as future-proofing */ 1244 for (i = 0; i < conn->imc_conn.cn_n_cces; ++i) 1245 if ((conn->imc_conn.cn_cces_mask & (1 << i)) 1246 && (conn->imc_conn.cn_cces[i].cce_flags & CCE_SEQNO) 1247 && LSQUIC_CIDS_EQ(&conn->imc_conn.cn_cces[i].cce_cid, dcid)) 1248 { 1249 imico_peer_addr_validated(conn, "dcid/scid + entropy"); 1250 return; 1251 } 1252} 1253 1254 1255/* Only a single packet is supported */ 1256static void 1257ietf_mini_conn_ci_packet_in (struct lsquic_conn *lconn, 1258 struct lsquic_packet_in *packet_in) 1259{ 1260 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 1261 enum dec_packin dec_packin; 1262 enum packnum_space pns; 1263 1264 /* Update "bytes in" count as early as possible. From 1265 * [draft-ietf-quic-transport-28] Section 8.1: 1266 " For the purposes of 1267 " avoiding amplification prior to address validation, servers MUST 1268 " count all of the payload bytes received in datagrams that are 1269 " uniquely attributed to a single connection. This includes datagrams 1270 " that contain packets that are successfully processed and datagrams 1271 " that contain packets that are all discarded. 1272 */ 1273 conn->imc_bytes_in += packet_in->pi_data_sz; 1274 1275 if (conn->imc_flags & IMC_ERROR) 1276 { 1277 LSQ_DEBUG("ignore incoming packet: connection is in error state"); 1278 return; 1279 } 1280 1281 if (!(conn->imc_flags & IMC_ADDR_VALIDATED)) 1282 imico_maybe_validate_by_dcid(conn, &packet_in->pi_dcid); 1283 1284 pns = lsquic_hety2pns[ packet_in->pi_header_type ]; 1285 if (pns == PNS_INIT && (conn->imc_flags & IMC_IGNORE_INIT)) 1286 { 1287 LSQ_DEBUG("ignore init packet"); /* Don't bother decrypting */ 1288 return; 1289 } 1290 1291 dec_packin = lconn->cn_esf_c->esf_decrypt_packet(lconn->cn_enc_session, 1292 conn->imc_enpub, &conn->imc_conn, packet_in); 1293 if (dec_packin != DECPI_OK) 1294 { 1295 LSQ_DEBUG("could not decrypt packet"); 1296 if (DECPI_NOT_YET == dec_packin) 1297 imico_maybe_delay_processing(conn, packet_in); 1298 return; 1299 } 1300 1301 EV_LOG_PACKET_IN(LSQUIC_LOG_CONN_ID, packet_in); 1302 1303 if (pns == PNS_APP) 1304 { 1305 imico_maybe_delay_processing(conn, packet_in); 1306 return; 1307 } 1308 else if (pns == PNS_HSK) 1309 imico_peer_addr_validated(conn, "handshake PNS"); 1310 1311 if (((conn->imc_flags >> IMCBIT_PNS_BIT_SHIFT) & 3) < pns) 1312 { 1313 conn->imc_flags &= ~(3 << IMCBIT_PNS_BIT_SHIFT); 1314 conn->imc_flags |= pns << IMCBIT_PNS_BIT_SHIFT; 1315 } 1316 1317 if (pns == PNS_HSK && !(conn->imc_flags & IMC_IGNORE_INIT)) 1318 ignore_init(conn); 1319 1320 if (conn->imc_recvd_packnos[pns] & (1ULL << packet_in->pi_packno)) 1321 { 1322 LSQ_DEBUG("duplicate packet %"PRIu64, packet_in->pi_packno); 1323 return; 1324 } 1325 1326 /* Update receive history before processing the packet: if there is an 1327 * error, the connection is terminated and recording this packet number 1328 * is helpful when it is printed along with other diagnostics in dtor. 1329 */ 1330 if (0 == conn->imc_recvd_packnos[pns] || 1331 packet_in->pi_packno > highest_bit_set(conn->imc_recvd_packnos[pns])) 1332 conn->imc_largest_recvd[pns] = packet_in->pi_received; 1333 conn->imc_recvd_packnos[pns] |= 1ULL << packet_in->pi_packno; 1334 1335 if (0 != imico_parse_regular_packet(conn, packet_in)) 1336 { 1337 LSQ_DEBUG("connection is now in error state"); 1338 conn->imc_flags |= IMC_ERROR; 1339 return; 1340 } 1341 1342 if (!(conn->imc_flags & (IMC_QUEUED_ACK_INIT << pns))) 1343 LSQ_DEBUG("queued ACK in %s", lsquic_pns2str[pns]); 1344 conn->imc_flags |= IMC_QUEUED_ACK_INIT << pns; 1345 ++conn->imc_ecn_counts_in[pns][ lsquic_packet_in_ecn(packet_in) ]; 1346 conn->imc_incoming_ecn <<= 1; 1347 conn->imc_incoming_ecn |= lsquic_packet_in_ecn(packet_in) != ECN_NOT_ECT; 1348} 1349 1350 1351static void 1352ietf_mini_conn_ci_packet_sent (struct lsquic_conn *lconn, 1353 struct lsquic_packet_out *packet_out) 1354{ 1355 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 1356 conn->imc_sent_packnos |= 1ULL << packet_out->po_packno; 1357 conn->imc_ecn_packnos |= !!lsquic_packet_out_ecn(packet_out) 1358 << packet_out->po_packno; 1359#if 0 1360 if (packet_out->po_frame_types & (1 << QUIC_FRAME_ACK)) 1361 { 1362 assert(mc->mc_flags & MC_UNSENT_ACK); 1363 mc->mc_flags &= ~MC_UNSENT_ACK; 1364 } 1365#endif 1366 ++conn->imc_ecn_counts_out[ lsquic_packet_out_pns(packet_out) ] 1367 [ lsquic_packet_out_ecn(packet_out) ]; 1368 if (packet_out->po_header_type == HETY_HANDSHAKE) 1369 conn->imc_flags |= IMC_HSK_PACKET_SENT; 1370 LSQ_DEBUG("%s: packet %"PRIu64" sent", __func__, packet_out->po_packno); 1371} 1372 1373 1374static void 1375ietf_mini_conn_ci_packet_not_sent (struct lsquic_conn *lconn, 1376 struct lsquic_packet_out *packet_out) 1377{ 1378 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 1379 size_t packet_size; 1380 1381 packet_out->po_flags &= ~PO_SENT; 1382 packet_size = lsquic_packet_out_total_sz(lconn, packet_out); 1383 conn->imc_bytes_out -= packet_size; 1384 LSQ_DEBUG("%s: packet %"PRIu64" not sent", __func__, packet_out->po_packno); 1385} 1386 1387 1388static void 1389imico_return_enc_data (struct ietf_mini_conn *conn, 1390 struct lsquic_packet_out *packet_out) 1391{ 1392 conn->imc_enpub->enp_pmi->pmi_return(conn->imc_enpub->enp_pmi_ctx, 1393 conn->imc_path.np_peer_ctx, packet_out->po_enc_data, 1394 lsquic_packet_out_ipv6(packet_out)); 1395 packet_out->po_flags &= ~PO_ENCRYPTED; 1396 packet_out->po_enc_data = NULL; 1397} 1398 1399 1400static int 1401imico_repackage_packet (struct ietf_mini_conn *conn, 1402 struct lsquic_packet_out *packet_out) 1403{ 1404 const lsquic_packno_t oldno = packet_out->po_packno; 1405 const lsquic_packno_t packno = conn->imc_next_packno++; 1406 if (packno > MAX_PACKETS) 1407 return -1; 1408 1409 LSQ_DEBUG("Packet %"PRIu64" repackaged for resending as packet %"PRIu64, 1410 oldno, packno); 1411 EV_LOG_CONN_EVENT(LSQUIC_LOG_CONN_ID, "packet %"PRIu64" repackaged for " 1412 "resending as packet %"PRIu64, oldno, packno); 1413 packet_out->po_packno = packno; 1414 packet_out->po_flags &= ~PO_SENT; 1415 lsquic_packet_out_set_ecn(packet_out, imico_get_ecn(conn)); 1416 if (packet_out->po_flags & PO_ENCRYPTED) 1417 imico_return_enc_data(conn, packet_out); 1418 TAILQ_INSERT_TAIL(&conn->imc_packets_out, packet_out, po_next); 1419 return 0; 1420} 1421 1422 1423static int 1424imico_handle_losses_and_have_unsent (struct ietf_mini_conn *conn, 1425 lsquic_time_t now) 1426{ 1427 TAILQ_HEAD(, lsquic_packet_out) lost_packets = 1428 TAILQ_HEAD_INITIALIZER(lost_packets); 1429 const struct lsquic_conn *const lconn = &conn->imc_conn; 1430 lsquic_packet_out_t *packet_out, *next; 1431 lsquic_time_t retx_to = 0; 1432 unsigned n_to_send = 0; 1433 size_t packet_size; 1434 1435 for (packet_out = TAILQ_FIRST(&conn->imc_packets_out); packet_out; 1436 packet_out = next) 1437 { 1438 next = TAILQ_NEXT(packet_out, po_next); 1439 if (packet_out->po_flags & PO_SENT) 1440 { 1441 if (0 == retx_to) 1442 retx_to = imico_calc_retx_timeout(conn); 1443 if (packet_out->po_sent + retx_to < now) 1444 { 1445 LSQ_DEBUG("packet %"PRIu64" has been lost (rto: %"PRIu64")", 1446 packet_out->po_packno, retx_to); 1447 TAILQ_REMOVE(&conn->imc_packets_out, packet_out, po_next); 1448 TAILQ_INSERT_TAIL(&lost_packets, packet_out, po_next); 1449 } 1450 } 1451 else if (packet_size = lsquic_packet_out_total_sz(lconn, packet_out), 1452 imico_can_send(conn, packet_size)) 1453 ++n_to_send; 1454 else 1455 break; 1456 } 1457 1458 conn->imc_hsk_count += !TAILQ_EMPTY(&lost_packets); 1459 1460 while ((packet_out = TAILQ_FIRST(&lost_packets))) 1461 { 1462 TAILQ_REMOVE(&lost_packets, packet_out, po_next); 1463 if ((packet_out->po_frame_types & IQUIC_FRAME_RETX_MASK) 1464 && 0 == imico_repackage_packet(conn, packet_out)) 1465 { 1466 packet_size = lsquic_packet_out_total_sz(lconn, packet_out); 1467 if (imico_can_send(conn, packet_size)) 1468 ++n_to_send; 1469 } 1470 else 1471 imico_destroy_packet(conn, packet_out); 1472 } 1473 1474 return n_to_send > 0; 1475} 1476 1477 1478static int 1479imico_have_packets_to_send (struct ietf_mini_conn *conn, lsquic_time_t now) 1480{ 1481 return imico_handle_losses_and_have_unsent(conn, now); 1482} 1483 1484 1485struct ietf_mini_rechist 1486{ 1487 const struct ietf_mini_conn *conn; 1488 packno_set_t cur_set; 1489 struct lsquic_packno_range range; /* We return a pointer to this */ 1490 int cur_idx; 1491 enum packnum_space pns; 1492}; 1493 1494 1495static void 1496imico_rechist_init (struct ietf_mini_rechist *rechist, 1497 const struct ietf_mini_conn *conn, enum packnum_space pns) 1498{ 1499 rechist->conn = conn; 1500 rechist->pns = pns; 1501 rechist->cur_set = 0; 1502 rechist->cur_idx = 0; 1503} 1504 1505 1506static lsquic_time_t 1507imico_rechist_largest_recv (void *rechist_ctx) 1508{ 1509 struct ietf_mini_rechist *rechist = rechist_ctx; 1510 return rechist->conn->imc_largest_recvd[ rechist->pns ]; 1511} 1512 1513 1514static const struct lsquic_packno_range * 1515imico_rechist_next (void *rechist_ctx) 1516{ 1517 struct ietf_mini_rechist *rechist = rechist_ctx; 1518 const struct ietf_mini_conn *conn = rechist->conn; 1519 packno_set_t packnos; 1520 int i; 1521 1522 packnos = rechist->cur_set; 1523 if (0 == packnos) 1524 return NULL; 1525 1526 /* There may be a faster way to do this, but for now, we just want 1527 * correctness. 1528 */ 1529 for (i = rechist->cur_idx; i >= 0; --i) 1530 if (packnos & (1ULL << i)) 1531 { 1532 rechist->range.low = i; 1533 rechist->range.high = i; 1534 break; 1535 } 1536 assert(i >= 0); /* We must have hit at least one bit */ 1537 --i; 1538 for ( ; i >= 0 && (packnos & (1ULL << i)); --i) 1539 rechist->range.low = i; 1540 if (i >= 0) 1541 { 1542 rechist->cur_set = packnos & ((1ULL << i) - 1); 1543 rechist->cur_idx = i; 1544 } 1545 else 1546 rechist->cur_set = 0; 1547 LSQ_DEBUG("%s: return [%"PRIu64", %"PRIu64"]", __func__, 1548 rechist->range.low, rechist->range.high); 1549 return &rechist->range; 1550} 1551 1552 1553static const struct lsquic_packno_range * 1554imico_rechist_first (void *rechist_ctx) 1555{ 1556 struct ietf_mini_rechist *rechist = rechist_ctx; 1557 rechist->cur_set = rechist->conn->imc_recvd_packnos[ rechist->pns ]; 1558 rechist->cur_idx = highest_bit_set(rechist->cur_set); 1559 return imico_rechist_next(rechist_ctx); 1560} 1561 1562 1563static const enum header_type pns2hety[] = 1564{ 1565 [PNS_INIT] = HETY_INITIAL, 1566 [PNS_HSK] = HETY_HANDSHAKE, 1567 [PNS_APP] = HETY_NOT_SET, 1568}; 1569 1570 1571static int 1572imico_generate_ack (struct ietf_mini_conn *conn, enum packnum_space pns, 1573 lsquic_time_t now) 1574{ 1575 struct lsquic_packet_out *packet_out; 1576 enum header_type header_type; 1577 struct ietf_mini_rechist rechist; 1578 int not_used_has_missing, len; 1579 uint64_t ecn_counts_buf[4]; 1580 const uint64_t *ecn_counts; 1581 1582 header_type = pns2hety[pns]; 1583 1584 if (conn->imc_incoming_ecn) 1585 { 1586 ecn_counts_buf[0] = conn->imc_ecn_counts_in[pns][0]; 1587 ecn_counts_buf[1] = conn->imc_ecn_counts_in[pns][1]; 1588 ecn_counts_buf[2] = conn->imc_ecn_counts_in[pns][2]; 1589 ecn_counts_buf[3] = conn->imc_ecn_counts_in[pns][3]; 1590 ecn_counts = ecn_counts_buf; 1591 } 1592 else 1593 ecn_counts = NULL; 1594 1595 packet_out = imico_get_packet_out(conn, header_type, 0); 1596 if (!packet_out) 1597 return -1; 1598 1599 /* Generate ACK frame */ 1600 imico_rechist_init(&rechist, conn, pns); 1601 len = conn->imc_conn.cn_pf->pf_gen_ack_frame( 1602 packet_out->po_data + packet_out->po_data_sz, 1603 lsquic_packet_out_avail(packet_out), imico_rechist_first, 1604 imico_rechist_next, imico_rechist_largest_recv, &rechist, 1605 now, ¬_used_has_missing, &packet_out->po_ack2ed, ecn_counts); 1606 if (len < 0) 1607 { 1608 LSQ_WARN("could not generate ACK frame"); 1609 return -1; 1610 } 1611 EV_LOG_GENERATED_ACK_FRAME(LSQUIC_LOG_CONN_ID, conn->imc_conn.cn_pf, 1612 packet_out->po_data + packet_out->po_data_sz, len); 1613 packet_out->po_frame_types |= 1 << QUIC_FRAME_ACK; 1614 packet_out->po_data_sz += len; 1615 packet_out->po_regen_sz += len; 1616 conn->imc_flags &= ~(IMC_QUEUED_ACK_INIT << pns); 1617 LSQ_DEBUG("wrote ACK frame of size %d in %s", len, lsquic_pns2str[pns]); 1618 return 0; 1619} 1620 1621 1622static int 1623imico_generate_acks (struct ietf_mini_conn *conn, lsquic_time_t now) 1624{ 1625 enum packnum_space pns; 1626 1627 for (pns = PNS_INIT; pns < N_PNS; ++pns) 1628 if (conn->imc_flags & (IMC_QUEUED_ACK_INIT << pns) 1629 && !(pns == PNS_INIT && (conn->imc_flags & IMC_IGNORE_INIT))) 1630 if (0 != imico_generate_ack(conn, pns, now)) 1631 return -1; 1632 1633 return 0; 1634} 1635 1636 1637static void 1638imico_generate_conn_close (struct ietf_mini_conn *conn) 1639{ 1640 struct lsquic_packet_out *packet_out; 1641 enum header_type header_type; 1642 enum packnum_space pns, pns_max; 1643 unsigned error_code; 1644 const char *reason; 1645 size_t need; 1646 int sz, rlen, is_app; 1647 char reason_buf[0x20]; 1648 1649 if (conn->imc_flags & IMC_ABORT_ERROR) 1650 { 1651 is_app = !!(conn->imc_flags & IMC_ABORT_ISAPP); 1652 error_code = conn->imc_error_code; 1653 reason = NULL; 1654 rlen = 0; 1655 } 1656 else if (conn->imc_flags & IMC_TLS_ALERT) 1657 { 1658 is_app = 0; 1659 error_code = 0x100 + conn->imc_tls_alert; 1660 if (ALERT_NO_APPLICATION_PROTOCOL == conn->imc_tls_alert) 1661 reason = "no suitable application protocol"; 1662 else 1663 { 1664 snprintf(reason_buf, sizeof(reason_buf), "TLS alert %"PRIu8, 1665 conn->imc_tls_alert); 1666 reason = reason_buf; 1667 } 1668 rlen = strlen(reason); 1669 } 1670 else if (conn->imc_flags & IMC_BAD_TRANS_PARAMS) 1671 { 1672 is_app = 0; 1673 error_code = TEC_TRANSPORT_PARAMETER_ERROR; 1674 reason = "bad transport parameters"; 1675 rlen = 24; 1676 } 1677 else if (conn->imc_flags & IMC_HSK_FAILED) 1678 { 1679 is_app = 0; 1680 error_code = TEC_NO_ERROR; 1681 reason = "handshake failed"; 1682 rlen = 16; 1683 } 1684 else if (conn->imc_flags & IMC_PARSE_FAILED) 1685 { 1686 is_app = 0; 1687 error_code = TEC_FRAME_ENCODING_ERROR; 1688 reason = "cannot decode frame"; 1689 rlen = 19; 1690 } 1691 else 1692 { 1693 is_app = 0; 1694 error_code = TEC_INTERNAL_ERROR; 1695 reason = NULL; 1696 rlen = 0; 1697 } 1698 1699 1700/* [draft-ietf-quic-transport-28] Section 10.3.1: 1701 * 1702 " A client will always know whether the server has Handshake keys (see 1703 " Section 17.2.2.1), but it is possible that a server does not know 1704 " whether the client has Handshake keys. Under these circumstances, a 1705 " server SHOULD send a CONNECTION_CLOSE frame in both Handshake and 1706 " Initial packets to ensure that at least one of them is processable by 1707 " the client. 1708--- 8< --- 1709 " Sending a CONNECTION_CLOSE of type 0x1d in an Initial or Handshake 1710 " packet could expose application state or be used to alter application 1711 " state. A CONNECTION_CLOSE of type 0x1d MUST be replaced by a 1712 " CONNECTION_CLOSE of type 0x1c when sending the frame in Initial or 1713 " Handshake packets. Otherwise, information about the application 1714 " state might be revealed. Endpoints MUST clear the value of the 1715 " Reason Phrase field and SHOULD use the APPLICATION_ERROR code when 1716 " converting to a CONNECTION_CLOSE of type 0x1c. 1717 */ 1718 LSQ_DEBUG("sending CONNECTION_CLOSE, is_app: %d, error code: %u, " 1719 "reason: %.*s", is_app, error_code, rlen, reason); 1720 if (is_app && conn->imc_conn.cn_version > LSQVER_ID27) 1721 { 1722 LSQ_DEBUG("convert to 0x1C, replace code and reason"); 1723 is_app = 0; 1724 error_code = TEC_APPLICATION_ERROR; 1725 rlen = 0; 1726 } 1727 1728 pns = (conn->imc_flags >> IMCBIT_PNS_BIT_SHIFT) & 3; 1729 switch ((!!(conn->imc_flags & IMC_HSK_PACKET_SENT) << 1) 1730 | (pns == PNS_HSK) /* Handshake packet received */) 1731 { 1732 case (0 << 1) | 0: 1733 pns = PNS_INIT; 1734 pns_max = PNS_INIT; 1735 break; 1736 case (1 << 1) | 0: 1737 pns = PNS_INIT; 1738 pns_max = PNS_HSK; 1739 break; 1740 default: 1741 pns = PNS_HSK; 1742 pns_max = PNS_HSK; 1743 break; 1744 } 1745 1746 need = conn->imc_conn.cn_pf->pf_connect_close_frame_size(is_app, 1747 error_code, 0, rlen); 1748 LSQ_DEBUG("will generate %u CONNECTION_CLOSE frame%.*s", 1749 pns_max - pns + 1, pns_max > pns, "s"); 1750 do 1751 { 1752 header_type = pns2hety[pns]; 1753 packet_out = imico_get_packet_out(conn, header_type, need); 1754 if (!packet_out) 1755 return; 1756 sz = conn->imc_conn.cn_pf->pf_gen_connect_close_frame( 1757 packet_out->po_data + packet_out->po_data_sz, 1758 lsquic_packet_out_avail(packet_out), is_app, error_code, reason, 1759 rlen); 1760 if (sz >= 0) 1761 { 1762 packet_out->po_frame_types |= 1 << QUIC_FRAME_CONNECTION_CLOSE; 1763 packet_out->po_data_sz += sz; 1764 LSQ_DEBUG("generated CONNECTION_CLOSE frame"); 1765 } 1766 else 1767 LSQ_WARN("could not generate CONNECTION_CLOSE frame"); 1768 ++pns; 1769 } 1770 while (pns <= pns_max); 1771} 1772 1773 1774static int 1775imico_generate_handshake_done (struct ietf_mini_conn *conn) 1776{ 1777 struct lsquic_packet_out *packet_out; 1778 unsigned need; 1779 int sz; 1780 1781 need = conn->imc_conn.cn_pf->pf_handshake_done_frame_size(); 1782 packet_out = imico_get_packet_out(conn, HETY_NOT_SET, need); 1783 if (!packet_out) 1784 return -1; 1785 sz = conn->imc_conn.cn_pf->pf_gen_handshake_done_frame( 1786 packet_out->po_data + packet_out->po_data_sz, 1787 lsquic_packet_out_avail(packet_out)); 1788 if (sz < 0) 1789 { 1790 LSQ_WARN("could not generate HANDSHAKE_DONE frame"); 1791 return -1; 1792 } 1793 1794 packet_out->po_frame_types |= 1 << QUIC_FRAME_HANDSHAKE_DONE; 1795 packet_out->po_data_sz += sz; 1796 LSQ_DEBUG("generated HANDSHAKE_DONE frame"); 1797 conn->imc_flags |= IMC_HSK_DONE_SENT; 1798 1799 return 0; 1800} 1801 1802 1803static enum tick_st 1804ietf_mini_conn_ci_tick (struct lsquic_conn *lconn, lsquic_time_t now) 1805{ 1806 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 1807 enum tick_st tick; 1808 1809 if (conn->imc_created + conn->imc_enpub->enp_settings.es_handshake_to < now) 1810 { 1811 LSQ_DEBUG("connection expired: closing"); 1812 return TICK_CLOSE; 1813 } 1814 1815 1816 if (conn->imc_flags & 1817 (IMC_QUEUED_ACK_INIT|IMC_QUEUED_ACK_HSK|IMC_QUEUED_ACK_APP)) 1818 { 1819 if (0 != imico_generate_acks(conn, now)) 1820 { 1821 conn->imc_flags |= IMC_ERROR; 1822 return TICK_CLOSE; 1823 } 1824 } 1825 1826 1827 tick = 0; 1828 1829 if (conn->imc_flags & IMC_ERROR) 1830 { 1831 close_on_error: 1832 if (!(conn->imc_flags & IMC_CLOSE_RECVD)) 1833 imico_generate_conn_close(conn); 1834 tick |= TICK_CLOSE; 1835 } 1836 else if (conn->imc_flags & IMC_HSK_OK) 1837 { 1838 if (lconn->cn_esf.i->esfi_in_init(lconn->cn_enc_session)) 1839 LSQ_DEBUG("still in init, defer HANDSHAKE_DONE"); 1840 else if (0 != imico_generate_handshake_done(conn)) 1841 goto close_on_error; 1842 tick |= TICK_PROMOTE; 1843 } 1844 1845 if (imico_have_packets_to_send(conn, now)) 1846 tick |= TICK_SEND; 1847 else 1848 tick |= TICK_QUIET; 1849 1850 LSQ_DEBUG("Return TICK %d", tick); 1851 return tick; 1852} 1853 1854 1855static void 1856ietf_mini_conn_ci_internal_error (struct lsquic_conn *lconn, 1857 const char *format, ...) 1858{ 1859 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 1860 LSQ_INFO("internal error reported"); 1861 conn->imc_flags |= IMC_ERROR; 1862} 1863 1864 1865static void 1866ietf_mini_conn_ci_abort_error (struct lsquic_conn *lconn, int is_app, 1867 unsigned error_code, const char *fmt, ...) 1868{ 1869 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 1870 va_list ap; 1871 const char *err_str, *percent; 1872 char err_buf[0x100]; 1873 1874 percent = strchr(fmt, '%'); 1875 if (percent) 1876 { 1877 va_start(ap, fmt); 1878 vsnprintf(err_buf, sizeof(err_buf), fmt, ap); 1879 va_end(ap); 1880 err_str = err_buf; 1881 } 1882 else 1883 err_str = fmt; 1884 LSQ_INFO("abort error: is_app: %d; error code: %u; error str: %s", 1885 is_app, error_code, err_str); 1886 conn->imc_flags |= IMC_ERROR|IMC_ABORT_ERROR; 1887 if (is_app) 1888 conn->imc_flags |= IMC_ABORT_ISAPP; 1889 conn->imc_error_code = error_code; 1890} 1891 1892 1893static struct network_path * 1894ietf_mini_conn_ci_get_path (struct lsquic_conn *lconn, 1895 const struct sockaddr *sa) 1896{ 1897 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 1898 1899 return &conn->imc_path; 1900} 1901 1902 1903static const lsquic_cid_t * 1904ietf_mini_conn_ci_get_log_cid (const struct lsquic_conn *lconn) 1905{ 1906 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 1907 1908 if (conn->imc_path.np_dcid.len) 1909 return &conn->imc_path.np_dcid; 1910 else 1911 return CN_SCID(lconn); 1912} 1913 1914 1915static unsigned char 1916ietf_mini_conn_ci_record_addrs (struct lsquic_conn *lconn, void *peer_ctx, 1917 const struct sockaddr *local_sa, const struct sockaddr *peer_sa) 1918{ 1919 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 1920 const struct sockaddr *orig_peer_sa; 1921 struct lsquic_packet_out *packet_out; 1922 size_t len; 1923 char path_str[4][INET6_ADDRSTRLEN + sizeof(":65535")]; 1924 1925 if (NP_IS_IPv6(&conn->imc_path) != (AF_INET6 == peer_sa->sa_family)) 1926 TAILQ_FOREACH(packet_out, &conn->imc_packets_out, po_next) 1927 if ((packet_out->po_flags & (PO_SENT|PO_ENCRYPTED)) == PO_ENCRYPTED) 1928 imico_return_enc_data(conn, packet_out); 1929 1930 orig_peer_sa = NP_PEER_SA(&conn->imc_path); 1931 if (orig_peer_sa->sa_family == 0) 1932 LSQ_DEBUG("connection to %s from %s", SA2STR(local_sa, path_str[0]), 1933 SA2STR(peer_sa, path_str[1])); 1934 else if (!(lsquic_sockaddr_eq(NP_PEER_SA(&conn->imc_path), peer_sa) 1935 && lsquic_sockaddr_eq(NP_LOCAL_SA(&conn->imc_path), local_sa))) 1936 { 1937 LSQ_DEBUG("path changed from (%s - %s) to (%s - %s)", 1938 SA2STR(NP_LOCAL_SA(&conn->imc_path), path_str[0]), 1939 SA2STR(NP_PEER_SA(&conn->imc_path), path_str[1]), 1940 SA2STR(local_sa, path_str[2]), 1941 SA2STR(peer_sa, path_str[3])); 1942 conn->imc_flags |= IMC_PATH_CHANGED; 1943 } 1944 1945 len = local_sa->sa_family == AF_INET ? sizeof(struct sockaddr_in) 1946 : sizeof(struct sockaddr_in6); 1947 1948 memcpy(conn->imc_path.np_peer_addr, peer_sa, len); 1949 memcpy(conn->imc_path.np_local_addr, local_sa, len); 1950 conn->imc_path.np_peer_ctx = peer_ctx; 1951 return 0; 1952} 1953 1954 1955void 1956ietf_mini_conn_ci_count_garbage (struct lsquic_conn *lconn, size_t garbage_sz) 1957{ 1958 struct ietf_mini_conn *conn = (struct ietf_mini_conn *) lconn; 1959 1960 conn->imc_bytes_in += garbage_sz; 1961 LSQ_DEBUG("count %zd bytes of garbage, new value: %u bytes", garbage_sz, 1962 conn->imc_bytes_in); 1963} 1964 1965 1966static const struct conn_iface mini_conn_ietf_iface = { 1967 .ci_abort_error = ietf_mini_conn_ci_abort_error, 1968 .ci_client_call_on_new = ietf_mini_conn_ci_client_call_on_new, 1969 .ci_count_garbage = ietf_mini_conn_ci_count_garbage, 1970 .ci_destroy = ietf_mini_conn_ci_destroy, 1971 .ci_get_engine = ietf_mini_conn_ci_get_engine, 1972 .ci_get_log_cid = ietf_mini_conn_ci_get_log_cid, 1973 .ci_get_path = ietf_mini_conn_ci_get_path, 1974 .ci_hsk_done = ietf_mini_conn_ci_hsk_done, 1975 .ci_internal_error = ietf_mini_conn_ci_internal_error, 1976 .ci_is_tickable = ietf_mini_conn_ci_is_tickable, 1977 .ci_next_packet_to_send = ietf_mini_conn_ci_next_packet_to_send, 1978 .ci_next_tick_time = ietf_mini_conn_ci_next_tick_time, 1979 .ci_packet_in = ietf_mini_conn_ci_packet_in, 1980 .ci_packet_not_sent = ietf_mini_conn_ci_packet_not_sent, 1981 .ci_packet_sent = ietf_mini_conn_ci_packet_sent, 1982 .ci_record_addrs = ietf_mini_conn_ci_record_addrs, 1983 .ci_tick = ietf_mini_conn_ci_tick, 1984 .ci_tls_alert = ietf_mini_conn_ci_tls_alert, 1985}; 1986