lsquic_full_conn_ietf.c revision ef80a65f
1/* Copyright (c) 2017 - 2020 LiteSpeed Technologies Inc. See LICENSE. */ 2/* 3 * lsquic_full_conn_ietf.c -- IETF QUIC connection. 4 */ 5 6#include <assert.h> 7#include <errno.h> 8#include <inttypes.h> 9#include <stddef.h> 10#include <stdint.h> 11#include <stdlib.h> 12#include <string.h> 13#include <sys/queue.h> 14 15#include <openssl/aead.h> 16#include <openssl/rand.h> 17 18#include "fiu-local.h" 19 20#include "lsquic.h" 21#include "lsxpack_header.h" 22#include "lsquic_types.h" 23#include "lsquic_int_types.h" 24#include "lsquic_attq.h" 25#include "lsquic_packet_common.h" 26#include "lsquic_packet_ietf.h" 27#include "lsquic_packet_in.h" 28#include "lsquic_packet_out.h" 29#include "lsquic_hash.h" 30#include "lsquic_conn.h" 31#include "lsquic_rechist.h" 32#include "lsquic_senhist.h" 33#include "lsquic_cubic.h" 34#include "lsquic_pacer.h" 35#include "lsquic_sfcw.h" 36#include "lsquic_conn_flow.h" 37#include "lsquic_varint.h" 38#include "lsquic_hq.h" 39#include "lsquic_stream.h" 40#include "lsquic_rtt.h" 41#include "lsquic_conn_public.h" 42#include "lsquic_bw_sampler.h" 43#include "lsquic_minmax.h" 44#include "lsquic_bbr.h" 45#include "lsquic_send_ctl.h" 46#include "lsquic_alarmset.h" 47#include "lsquic_ver_neg.h" 48#include "lsquic_mm.h" 49#include "lsquic_engine_public.h" 50#include "lsquic_set.h" 51#include "lsquic_sizes.h" 52#include "lsquic_trans_params.h" 53#include "lsquic_version.h" 54#include "lsquic_parse.h" 55#include "lsquic_util.h" 56#include "lsquic_enc_sess.h" 57#include "lsquic_ev_log.h" 58#include "lsquic_malo.h" 59#include "lsquic_frab_list.h" 60#include "lsquic_hcso_writer.h" 61#include "lsquic_hcsi_reader.h" 62#include "lsqpack.h" 63#include "lsquic_http1x_if.h" 64#include "lsquic_qenc_hdl.h" 65#include "lsquic_qdec_hdl.h" 66#include "lsquic_mini_conn_ietf.h" 67#include "lsquic_tokgen.h" 68#include "lsquic_full_conn.h" 69#include "lsquic_spi.h" 70#include "lsquic_ietf.h" 71#include "lsquic_push_promise.h" 72#include "lsquic_headers.h" 73#include "lsquic_crand.h" 74 75#define LSQUIC_LOGGER_MODULE LSQLM_CONN 76#define LSQUIC_LOG_CONN_ID ietf_full_conn_ci_get_log_cid(&conn->ifc_conn) 77#include "lsquic_logger.h" 78 79#define MAX_RETR_PACKETS_SINCE_LAST_ACK 2 80#define MAX_ANY_PACKETS_SINCE_LAST_ACK 20 81#define ACK_TIMEOUT (TP_DEF_MAX_ACK_DELAY * 1000) 82#define INITIAL_CHAL_TIMEOUT 25000 83 84/* Retire original CID after this much time has elapsed: */ 85#define RET_CID_TIMEOUT 2000000 86 87#define MIN(a, b) ((a) < (b) ? (a) : (b)) 88#define MAX(a, b) ((a) > (b) ? (a) : (b)) 89 90/* IETF QUIC push promise does not contain stream ID. This means that, unlike 91 * in GQUIC, one cannot create a stream immediately and pass it to the client. 92 * We may have to add a special API for IETF push promises. That's in the 93 * future: right now, we punt it. 94 */ 95#define CLIENT_PUSH_SUPPORT 0 96 97 98/* IMPORTANT: Keep values of IFC_SERVER and IFC_HTTP same as LSENG_SERVER 99 * and LSENG_HTTP. 100 */ 101enum ifull_conn_flags 102{ 103 IFC_SERVER = LSENG_SERVER, /* Server mode */ 104 IFC_HTTP = LSENG_HTTP, /* HTTP mode */ 105 IFC_ACK_HAD_MISS = 1 << 2, 106#define IFC_BIT_ERROR 3 107 IFC_ERROR = 1 << IFC_BIT_ERROR, 108 IFC_TIMED_OUT = 1 << 4, 109 IFC_ABORTED = 1 << 5, 110 IFC_HSK_FAILED = 1 << 6, 111 IFC_GOING_AWAY = 1 << 7, 112 IFC_CLOSING = 1 << 8, /* Closing */ 113 IFC_RECV_CLOSE = 1 << 9, /* Received CONNECTION_CLOSE frame */ 114 IFC_TICK_CLOSE = 1 << 10, /* We returned TICK_CLOSE */ 115 IFC_CREATED_OK = 1 << 11, 116 IFC_HAVE_SAVED_ACK= 1 << 12, 117 IFC_ABORT_COMPLAINED 118 = 1 << 13, 119 IFC_DCID_SET = 1 << 14, 120#define IFCBIT_ACK_QUED_SHIFT 15 121 IFC_ACK_QUED_INIT = 1 << 15, 122 IFC_ACK_QUED_HSK = IFC_ACK_QUED_INIT << PNS_HSK, 123 IFC_ACK_QUED_APP = IFC_ACK_QUED_INIT << PNS_APP, 124#define IFC_ACK_QUEUED (IFC_ACK_QUED_INIT|IFC_ACK_QUED_HSK|IFC_ACK_QUED_APP) 125 IFC_HAVE_PEER_SET = 1 << 18, 126 IFC_GOT_PRST = 1 << 19, 127 IFC_IGNORE_INIT = 1 << 20, 128 IFC_RETRIED = 1 << 21, 129 IFC_SWITCH_DCID = 1 << 22, /* Perform DCID switch when a new CID becomes available */ 130 IFC_GOAWAY_CLOSE = 1 << 23, 131 IFC_FIRST_TICK = 1 << 24, 132 IFC_IGNORE_HSK = 1 << 25, 133 IFC_PROC_CRYPTO = 1 << 26, 134 IFC_MIGRA = 1 << 27, 135 IFC_SPIN = 1 << 28, /* Spin bits are enabled */ 136 IFC_DELAYED_ACKS = 1 << 29, /* Delayed ACKs are enabled */ 137 IFC_TIMESTAMPS = 1 << 30, /* Timestamps are enabled */ 138}; 139 140 141enum more_flags 142{ 143 MF_VALIDATE_PATH = 1 << 0, 144 MF_NOPROG_TIMEOUT = 1 << 1, 145 MF_CHECK_MTU_PROBE = 1 << 2, 146}; 147 148 149#define N_PATHS 4 150 151enum send 152{ 153 /* PATH_CHALLENGE and PATH_RESPONSE frames are not retransmittable. They 154 * are positioned first in the enum to optimize packetization. 155 */ 156 SEND_PATH_CHAL, 157 SEND_PATH_CHAL_PATH_0 = SEND_PATH_CHAL + 0, 158 SEND_PATH_CHAL_PATH_1 = SEND_PATH_CHAL + 1, 159 SEND_PATH_CHAL_PATH_2 = SEND_PATH_CHAL + 2, 160 SEND_PATH_CHAL_PATH_3 = SEND_PATH_CHAL + 3, 161 SEND_PATH_RESP, 162 SEND_PATH_RESP_PATH_0 = SEND_PATH_RESP + 0, 163 SEND_PATH_RESP_PATH_1 = SEND_PATH_RESP + 1, 164 SEND_PATH_RESP_PATH_2 = SEND_PATH_RESP + 2, 165 SEND_PATH_RESP_PATH_3 = SEND_PATH_RESP + 3, 166 SEND_MAX_DATA, 167 SEND_PING, 168 SEND_NEW_CID, 169 SEND_RETIRE_CID, 170 SEND_CONN_CLOSE, 171 SEND_STREAMS_BLOCKED, 172 SEND_STREAMS_BLOCKED_BIDI = SEND_STREAMS_BLOCKED + SD_BIDI, 173 SEND_STREAMS_BLOCKED_UNI = SEND_STREAMS_BLOCKED + SD_UNI, 174 SEND_MAX_STREAMS, 175 SEND_MAX_STREAMS_BIDI = SEND_MAX_STREAMS + SD_BIDI, 176 SEND_MAX_STREAMS_UNI = SEND_MAX_STREAMS + SD_UNI, 177 SEND_STOP_SENDING, 178 SEND_HANDSHAKE_DONE, 179 SEND_ACK_FREQUENCY, 180 N_SEND 181}; 182 183enum send_flags 184{ 185 SF_SEND_MAX_DATA = 1 << SEND_MAX_DATA, 186 SF_SEND_PING = 1 << SEND_PING, 187 SF_SEND_PATH_CHAL = 1 << SEND_PATH_CHAL, 188 SF_SEND_PATH_CHAL_PATH_0 = 1 << SEND_PATH_CHAL_PATH_0, 189 SF_SEND_PATH_CHAL_PATH_1 = 1 << SEND_PATH_CHAL_PATH_1, 190 SF_SEND_PATH_CHAL_PATH_2 = 1 << SEND_PATH_CHAL_PATH_2, 191 SF_SEND_PATH_CHAL_PATH_3 = 1 << SEND_PATH_CHAL_PATH_3, 192 SF_SEND_PATH_RESP = 1 << SEND_PATH_RESP, 193 SF_SEND_PATH_RESP_PATH_0 = 1 << SEND_PATH_RESP_PATH_0, 194 SF_SEND_PATH_RESP_PATH_1 = 1 << SEND_PATH_RESP_PATH_1, 195 SF_SEND_PATH_RESP_PATH_2 = 1 << SEND_PATH_RESP_PATH_2, 196 SF_SEND_PATH_RESP_PATH_3 = 1 << SEND_PATH_RESP_PATH_3, 197 SF_SEND_NEW_CID = 1 << SEND_NEW_CID, 198 SF_SEND_RETIRE_CID = 1 << SEND_RETIRE_CID, 199 SF_SEND_CONN_CLOSE = 1 << SEND_CONN_CLOSE, 200 SF_SEND_STREAMS_BLOCKED = 1 << SEND_STREAMS_BLOCKED, 201 SF_SEND_STREAMS_BLOCKED_BIDI = 1 << SEND_STREAMS_BLOCKED_BIDI, 202 SF_SEND_STREAMS_BLOCKED_UNI = 1 << SEND_STREAMS_BLOCKED_UNI, 203 SF_SEND_MAX_STREAMS = 1 << SEND_MAX_STREAMS, 204 SF_SEND_MAX_STREAMS_BIDI = 1 << SEND_MAX_STREAMS_BIDI, 205 SF_SEND_MAX_STREAMS_UNI = 1 << SEND_MAX_STREAMS_UNI, 206 SF_SEND_STOP_SENDING = 1 << SEND_STOP_SENDING, 207 SF_SEND_HANDSHAKE_DONE = 1 << SEND_HANDSHAKE_DONE, 208 SF_SEND_ACK_FREQUENCY = 1 << SEND_ACK_FREQUENCY, 209}; 210 211#define SF_SEND_PATH_CHAL_ALL \ 212 (((SF_SEND_PATH_CHAL << N_PATHS) - 1) & ~(SF_SEND_PATH_CHAL - 1)) 213 214#define IFC_IMMEDIATE_CLOSE_FLAGS \ 215 (IFC_TIMED_OUT|IFC_ERROR|IFC_ABORTED|IFC_HSK_FAILED|IFC_GOT_PRST) 216 217#define MAX_ERRMSG 256 218 219#define MAX_SCID 8 220 221#define SET_ERRMSG(conn, ...) do { \ 222 if (!(conn)->ifc_errmsg) \ 223 { \ 224 (conn)->ifc_errmsg = malloc(MAX_ERRMSG); \ 225 if ((conn)->ifc_errmsg) \ 226 snprintf((conn)->ifc_errmsg, MAX_ERRMSG, __VA_ARGS__); \ 227 } \ 228} while (0) 229 230#define ABORT_WITH_FLAG(conn, log_level, flag, ...) do { \ 231 SET_ERRMSG(conn, __VA_ARGS__); \ 232 if (!((conn)->ifc_flags & IFC_ABORT_COMPLAINED)) \ 233 LSQ_LOG(log_level, "Abort connection: " __VA_ARGS__); \ 234 (conn)->ifc_flags |= flag|IFC_ABORT_COMPLAINED; \ 235} while (0) 236 237#define ABORT_ERROR(...) \ 238 ABORT_WITH_FLAG(conn, LSQ_LOG_ERROR, IFC_ERROR, __VA_ARGS__) 239#define ABORT_WARN(...) \ 240 ABORT_WITH_FLAG(conn, LSQ_LOG_WARN, IFC_ERROR, __VA_ARGS__) 241 242#define CONN_ERR(app_error_, code_) (struct conn_err) { \ 243 .app_error = (app_error_), .u.err = (code_), } 244 245/* Use this for protocol errors; they do not need to be as loud as our own 246 * internal errors. 247 */ 248#define ABORT_QUIETLY(app_error, code, ...) do { \ 249 conn->ifc_error = CONN_ERR(app_error, code); \ 250 ABORT_WITH_FLAG(conn, LSQ_LOG_INFO, IFC_ERROR, __VA_ARGS__); \ 251} while (0) 252 253 254static enum stream_id_type 255gen_sit (unsigned server, enum stream_dir sd) 256{ 257 return (server > 0) | ((sd > 0) << SD_SHIFT); 258} 259 260 261struct stream_id_to_ss 262{ 263 STAILQ_ENTRY(stream_id_to_ss) sits_next; 264 lsquic_stream_id_t sits_stream_id; 265 enum http_error_code sits_error_code; 266}; 267 268struct http_ctl_stream_in 269{ 270 struct hcsi_reader reader; 271}; 272 273struct conn_err 274{ 275 int app_error; 276 union 277 { 278 enum trans_error_code tec; 279 enum http_error_code hec; 280 unsigned err; 281 } u; 282}; 283 284 285struct dplpmtud_state 286{ 287 lsquic_packno_t ds_probe_packno; 288#ifndef NDEBUG 289 lsquic_time_t ds_probe_sent; 290#endif 291 enum { 292 DS_PROBE_SENT = 1 << 0, 293 } ds_flags; 294 unsigned short ds_probed_size, 295 ds_failed_size; /* If non-zero, defines ceiling */ 296 unsigned char ds_probe_count; 297}; 298 299 300struct conn_path 301{ 302 struct network_path cop_path; 303 uint64_t cop_path_chals[8]; /* Arbitrary number */ 304 uint64_t cop_inc_chal; /* Incoming challenge */ 305 enum { 306 /* Initialized covers cop_path.np_pack_size and cop_path.np_dcid */ 307 COP_INITIALIZED = 1 << 0, 308 /* This flag is set when we received a response to one of path 309 * challenges we sent on this path. 310 */ 311 COP_VALIDATED = 1 << 1, 312 /* Received non-probing frames. This flag is not set for the 313 * original path. 314 */ 315 COP_GOT_NONPROB = 1 << 2, 316 } cop_flags; 317 unsigned short cop_max_plpmtu; 318 unsigned char cop_n_chals; 319 unsigned char cop_cce_idx; 320 struct dplpmtud_state cop_dplpmtud; 321}; 322 323 324struct inc_ack_stats /* Incoming ACK stats */ 325{ 326 unsigned n_acks; /* Number of ACKs between ticks */ 327 float avg_acked; /* Packets acked between ticks */ 328 float avg_n_acks; /* Average number of ACKs */ 329}; 330 331 332struct ietf_full_conn 333{ 334 struct lsquic_conn ifc_conn; 335 struct conn_cid_elem ifc_cces[MAX_SCID]; 336 struct lsquic_rechist ifc_rechist[N_PNS]; 337 struct lsquic_send_ctl ifc_send_ctl; 338 struct lsquic_stream *ifc_stream_hcsi; /* HTTP Control Stream Incoming */ 339 struct lsquic_stream *ifc_stream_hcso; /* HTTP Control Stream Outgoing */ 340 struct lsquic_conn_public ifc_pub; 341 lsquic_alarmset_t ifc_alset; 342 struct lsquic_set64 ifc_closed_stream_ids[N_SITS]; 343 lsquic_stream_id_t ifc_n_created_streams[N_SDS]; 344 /* Not including the value stored in ifc_max_allowed_stream_id: */ 345 lsquic_stream_id_t ifc_max_allowed_stream_id[N_SITS]; 346 uint64_t ifc_closed_peer_streams[N_SDS]; 347 /* Maximum number of open stream initiated by peer: */ 348 unsigned ifc_max_streams_in[N_SDS]; 349 uint64_t ifc_max_stream_data_uni; 350 enum ifull_conn_flags ifc_flags; 351 enum more_flags ifc_mflags; 352 enum send_flags ifc_send_flags; 353 enum send_flags ifc_delayed_send; 354 struct { 355 uint64_t streams_blocked[N_SDS]; 356 } ifc_send; 357 struct conn_err ifc_error; 358 unsigned ifc_n_delayed_streams; 359 unsigned ifc_n_cons_unretx; 360 int ifc_spin_bit; 361 const struct lsquic_stream_if 362 *ifc_stream_if; 363 void *ifc_stream_ctx; 364 char *ifc_errmsg; 365 struct lsquic_engine_public 366 *ifc_enpub; 367 const struct lsquic_engine_settings 368 *ifc_settings; 369 lsquic_conn_ctx_t *ifc_conn_ctx; 370 STAILQ_HEAD(, stream_id_to_ss) 371 ifc_stream_ids_to_ss; 372 lsquic_time_t ifc_created; 373 lsquic_time_t ifc_saved_ack_received; 374 lsquic_packno_t ifc_max_ack_packno[N_PNS]; 375 lsquic_packno_t ifc_max_non_probing; 376 struct { 377 uint64_t max_stream_send; 378 uint8_t ack_exp; 379 } ifc_cfg; 380 int (*ifc_process_incoming_packet)( 381 struct ietf_full_conn *, 382 struct lsquic_packet_in *); 383 /* Number ackable packets received since last ACK was sent: */ 384 unsigned ifc_n_slack_akbl[N_PNS]; 385 unsigned ifc_n_slack_all; /* App PNS only */ 386 unsigned ifc_max_retx_since_last_ack; 387 uint64_t ifc_ecn_counts_in[N_PNS][4]; 388 uint64_t ifc_ecn_counts_out[N_PNS][4]; 389 lsquic_stream_id_t ifc_max_req_id; 390 struct hcso_writer ifc_hcso; 391 struct http_ctl_stream_in ifc_hcsi; 392 struct qpack_enc_hdl ifc_qeh; 393 struct qpack_dec_hdl ifc_qdh; 394 struct { 395 uint64_t header_table_size, 396 num_placeholders, 397 max_header_list_size, 398 qpack_blocked_streams; 399 } ifc_peer_hq_settings; 400 struct dcid_elem *ifc_dces[MAX_IETF_CONN_DCIDS]; 401 TAILQ_HEAD(, dcid_elem) ifc_to_retire; 402 unsigned ifc_scid_seqno; 403 lsquic_time_t ifc_scid_timestamp[MAX_SCID]; 404 /* Last 8 packets had ECN markings? */ 405 uint8_t ifc_incoming_ecn; 406 unsigned char ifc_cur_path_id; /* Indexes ifc_paths */ 407 unsigned char ifc_used_paths; /* Bitmask */ 408 unsigned char ifc_mig_path_id; 409 /* ifc_active_cids_limit is the maximum number of CIDs at any one time this 410 * endpoint is allowed to issue to peer. If the TP value exceeds cn_n_cces, 411 * it is reduced to it. ifc_active_cids_count tracks how many CIDs have 412 * been issued. It is decremented each time a CID is retired. 413 */ 414 unsigned char ifc_active_cids_limit; 415 unsigned char ifc_active_cids_count; 416 unsigned char ifc_first_active_cid_seqno; 417 unsigned char ifc_ping_unretx_thresh; 418 unsigned ifc_last_retire_prior_to; 419 unsigned ifc_ack_freq_seqno; 420 unsigned ifc_last_pack_tol; 421 unsigned ifc_max_ack_freq_seqno; /* Incoming */ 422 unsigned ifc_max_peer_ack_usec; 423 unsigned short ifc_max_udp_payload; /* Cached TP */ 424 lsquic_time_t ifc_last_live_update; 425 struct conn_path ifc_paths[N_PATHS]; 426 union { 427 struct { 428 struct lsquic_stream *crypto_streams[N_ENC_LEVS]; 429 struct ver_neg 430 ifcli_ver_neg; 431 uint64_t ifcli_max_push_id; 432 uint64_t ifcli_min_goaway_stream_id; 433 enum { 434 IFCLI_PUSH_ENABLED = 1 << 0, 435 IFCLI_HSK_SENT_OR_DEL = 1 << 1, 436 } ifcli_flags; 437 unsigned ifcli_packets_out; 438 } cli; 439 struct { 440 uint64_t ifser_max_push_id; 441 uint64_t ifser_next_push_id; 442 enum { 443 IFSER_PUSH_ENABLED = 1 << 0, 444 IFSER_MAX_PUSH_ID = 1 << 1, /* ifser_max_push_id is set */ 445 } ifser_flags; 446 } ser; 447 } ifc_u; 448 lsquic_time_t ifc_idle_to; 449 lsquic_time_t ifc_ping_period; 450 uint64_t ifc_last_max_data_off_sent; 451 struct inc_ack_stats ifc_ias; 452 struct ack_info ifc_ack; 453}; 454 455#define CUR_CPATH(conn_) (&(conn_)->ifc_paths[(conn_)->ifc_cur_path_id]) 456#define CUR_NPATH(conn_) (&(CUR_CPATH(conn_)->cop_path)) 457#define CUR_DCID(conn_) (&(CUR_NPATH(conn_)->np_dcid)) 458 459#define DCES_END(conn_) ((conn_)->ifc_dces + (sizeof((conn_)->ifc_dces) \ 460 / sizeof((conn_)->ifc_dces[0]))) 461 462static const struct ver_neg server_ver_neg; 463 464static const struct conn_iface *ietf_full_conn_iface_ptr; 465static const struct conn_iface *ietf_full_conn_prehsk_iface_ptr; 466 467static int 468process_incoming_packet_verneg (struct ietf_full_conn *, 469 struct lsquic_packet_in *); 470 471static int 472process_incoming_packet_fast (struct ietf_full_conn *, 473 struct lsquic_packet_in *); 474 475static void 476ietf_full_conn_ci_packet_in (struct lsquic_conn *, struct lsquic_packet_in *); 477 478static int 479handshake_ok (struct lsquic_conn *); 480 481static void 482ignore_init (struct ietf_full_conn *); 483 484static void 485ignore_hsk (struct ietf_full_conn *); 486 487static unsigned 488ietf_full_conn_ci_n_avail_streams (const struct lsquic_conn *); 489 490static const lsquic_cid_t * 491ietf_full_conn_ci_get_log_cid (const struct lsquic_conn *); 492 493static void 494ietf_full_conn_ci_destroy (struct lsquic_conn *); 495 496static int 497insert_new_dcid (struct ietf_full_conn *, uint64_t seqno, 498 const lsquic_cid_t *, const unsigned char *token, int update_cur_dcid); 499 500static struct conn_cid_elem * 501find_cce_by_cid (struct ietf_full_conn *, const lsquic_cid_t *); 502 503static void 504mtu_probe_too_large (struct ietf_full_conn *, const struct lsquic_packet_out *); 505 506static unsigned 507highest_bit_set (unsigned sz) 508{ 509#if __GNUC__ 510 unsigned clz = __builtin_clz(sz); 511 return 31 - clz; 512#else 513 unsigned n, y; 514 n = 32; 515 y = sz >> 16; if (y) { n -= 16; sz = y; } 516 y = sz >> 8; if (y) { n -= 8; sz = y; } 517 y = sz >> 4; if (y) { n -= 4; sz = y; } 518 y = sz >> 2; if (y) { n -= 2; sz = y; } 519 y = sz >> 1; if (y) return 31 - n + 2; 520 return 31 - n + sz; 521#endif 522} 523 524 525static void 526set_versions (struct ietf_full_conn *conn, unsigned versions, 527 enum lsquic_version *ver) 528{ 529 conn->ifc_u.cli.ifcli_ver_neg.vn_supp = versions; 530 conn->ifc_u.cli.ifcli_ver_neg.vn_ver = (ver) ? *ver : highest_bit_set(versions); 531 conn->ifc_u.cli.ifcli_ver_neg.vn_buf = lsquic_ver2tag(conn->ifc_u.cli.ifcli_ver_neg.vn_ver); 532 conn->ifc_conn.cn_version = conn->ifc_u.cli.ifcli_ver_neg.vn_ver; 533} 534 535 536static void 537init_ver_neg (struct ietf_full_conn *conn, unsigned versions, 538 enum lsquic_version *ver) 539{ 540 set_versions(conn, versions, ver); 541 conn->ifc_u.cli.ifcli_ver_neg.vn_tag = &conn->ifc_u.cli.ifcli_ver_neg.vn_buf; 542 conn->ifc_u.cli.ifcli_ver_neg.vn_state = VN_START; 543} 544 545 546static void 547ack_alarm_expired (enum alarm_id al_id, void *ctx, lsquic_time_t expiry, 548 lsquic_time_t now) 549{ 550 struct ietf_full_conn *conn = ctx; 551 assert(al_id == AL_ACK_APP); 552 LSQ_DEBUG("%s ACK timer expired (%"PRIu64" < %"PRIu64"): ACK queued", 553 lsquic_pns2str[PNS_APP], expiry, now); 554 conn->ifc_flags |= IFC_ACK_QUED_APP; 555} 556 557 558static void 559idle_alarm_expired (enum alarm_id al_id, void *ctx, lsquic_time_t expiry, 560 lsquic_time_t now) 561{ 562 struct ietf_full_conn *const conn = (struct ietf_full_conn *) ctx; 563 564 if ((conn->ifc_mflags & MF_NOPROG_TIMEOUT) 565 && conn->ifc_pub.last_prog + conn->ifc_enpub->enp_noprog_timeout < now) 566 { 567 EV_LOG_CONN_EVENT(LSQUIC_LOG_CONN_ID, "connection timed out due to " 568 "lack of progress"); 569 /* Different flag so that CONNECTION_CLOSE frame is sent */ 570 ABORT_QUIETLY(0, TEC_APPLICATION_ERROR, 571 "connection timed out due to lack of progress"); 572 } 573 else 574 { 575 LSQ_DEBUG("connection timed out"); 576 EV_LOG_CONN_EVENT(LSQUIC_LOG_CONN_ID, "connection timed out"); 577 conn->ifc_flags |= IFC_TIMED_OUT; 578 } 579} 580 581 582static void 583handshake_alarm_expired (enum alarm_id al_id, void *ctx, 584 lsquic_time_t expiry, lsquic_time_t now) 585{ 586 struct ietf_full_conn *const conn = (struct ietf_full_conn *) ctx; 587 LSQ_DEBUG("connection timed out: handshake timed out"); 588 conn->ifc_flags |= IFC_TIMED_OUT; 589} 590 591 592/* 593 * When this alarm expires, at least one SCID slot shoud be available 594 * for generation. 595 */ 596static void 597cid_throt_alarm_expired (enum alarm_id al_id, void *ctx, 598 lsquic_time_t expiry, lsquic_time_t now) 599{ 600 struct ietf_full_conn *const conn = (struct ietf_full_conn *) ctx; 601 LSQ_DEBUG("%s", __func__); 602 conn->ifc_send_flags |= SF_SEND_NEW_CID; 603 return; 604} 605 606 607static void 608wipe_path (struct ietf_full_conn *conn, unsigned path_id) 609{ 610 memset(&conn->ifc_paths[path_id], 0, sizeof(conn->ifc_paths[0])); 611 conn->ifc_paths[path_id].cop_path.np_path_id = path_id; 612} 613 614 615static void 616path_chal_alarm_expired (enum alarm_id al_id, void *ctx, 617 lsquic_time_t expiry, lsquic_time_t now) 618{ 619 struct ietf_full_conn *const conn = (struct ietf_full_conn *) ctx; 620 const unsigned path_id = al_id - AL_PATH_CHAL; 621 struct conn_path *const copath = &conn->ifc_paths[path_id]; 622 623 if (copath->cop_n_chals < sizeof(copath->cop_path_chals) 624 / sizeof(copath->cop_path_chals[0])) 625 { 626 LSQ_DEBUG("path #%u challenge expired, schedule another one", path_id); 627 conn->ifc_send_flags |= SF_SEND_PATH_CHAL << path_id; 628 } 629 else if (conn->ifc_cur_path_id != path_id) 630 { 631 LSQ_INFO("migration to path #%u failed after none of %u path " 632 "challenges received responses", path_id, copath->cop_n_chals); 633 wipe_path(conn, path_id); 634 } 635 else 636 LSQ_INFO("no path challenge responses on current path %u, stop " 637 "sending path challenges", path_id); 638} 639 640 641/* Sending DATA_BLOCKED and STREAM_DATA_BLOCKED frames is a way to elicit 642 * incoming packets from peer when it is too slow to read data. This is 643 * recommended by [draft-ietf-quic-transport-25] Section 4.1. 644 * 645 * If we are still in the blocked state, we schedule a blocked frame to 646 * be sent. 647 */ 648static void 649blocked_ka_alarm_expired (enum alarm_id al_id, void *ctx, 650 lsquic_time_t expiry, lsquic_time_t now) 651{ 652 struct ietf_full_conn *const conn = (struct ietf_full_conn *) ctx; 653 struct lsquic_stream *stream; 654 struct lsquic_hash_elem *el; 655 656 if (lsquic_conn_cap_avail(&conn->ifc_pub.conn_cap) == 0) 657 { 658 LSQ_DEBUG("set SEND_BLOCKED flag on connection"); 659 conn->ifc_conn.cn_flags |= LSCONN_SEND_BLOCKED; 660 return; 661 } 662 663 for (el = lsquic_hash_first(conn->ifc_pub.all_streams); el; 664 el = lsquic_hash_next(conn->ifc_pub.all_streams)) 665 { 666 stream = lsquic_hashelem_getdata(el); 667 if (lsquic_stream_is_blocked(stream)) 668 { 669 if (!(stream->sm_qflags & SMQF_SENDING_FLAGS)) 670 TAILQ_INSERT_TAIL(&conn->ifc_pub.sending_streams, stream, 671 next_send_stream); 672 stream->sm_qflags |= SMQF_SEND_BLOCKED; 673 LSQ_DEBUG("set SEND_BLOCKED flag on stream %"PRIu64, stream->id); 674 return; 675 } 676 } 677} 678 679 680static void 681mtu_probe_alarm_expired (enum alarm_id al_id, void *ctx, 682 lsquic_time_t expiry, lsquic_time_t now) 683{ 684 struct ietf_full_conn *const conn = (struct ietf_full_conn *) ctx; 685 686 LSQ_DEBUG("MTU probe alarm expired: set `check MTU probe' flag"); 687 assert(!(conn->ifc_mflags & MF_CHECK_MTU_PROBE)); 688 conn->ifc_mflags |= MF_CHECK_MTU_PROBE; 689} 690 691 692static int 693migra_is_on (const struct ietf_full_conn *conn, unsigned path_id) 694{ 695 return (conn->ifc_send_flags & (SF_SEND_PATH_CHAL << path_id)) 696 || lsquic_alarmset_is_set(&conn->ifc_alset, AL_PATH_CHAL + path_id); 697} 698 699 700#define TRANSPORT_OVERHEAD(is_ipv6) (((is_ipv6) ? 40 : 20) + 8 /* UDP */) 701 702static unsigned short 703calc_base_packet_size (const struct ietf_full_conn *conn, int is_ipv6) 704{ 705 unsigned short size; 706 707 if (conn->ifc_settings->es_base_plpmtu) 708 size = conn->ifc_settings->es_base_plpmtu - TRANSPORT_OVERHEAD(is_ipv6); 709 else if (is_ipv6) 710 size = IQUIC_MAX_IPv6_PACKET_SZ; 711 else 712 size = IQUIC_MAX_IPv4_PACKET_SZ; 713 714 return size; 715} 716 717 718static void 719migra_begin (struct ietf_full_conn *conn, struct conn_path *copath, 720 struct dcid_elem *dce, const struct sockaddr *dest_sa, 721 const struct transport_params *params) 722{ 723 assert(!(migra_is_on(conn, copath - conn->ifc_paths))); 724 725 dce->de_flags |= DE_ASSIGNED; 726 copath->cop_flags |= COP_INITIALIZED; 727 copath->cop_path.np_dcid = dce->de_cid; 728 copath->cop_path.np_peer_ctx = CUR_NPATH(conn)->np_peer_ctx; 729 copath->cop_path.np_pack_size 730 = calc_base_packet_size(conn, NP_IS_IPv6(CUR_NPATH(conn))); 731 if (conn->ifc_max_udp_payload < copath->cop_path.np_pack_size) 732 copath->cop_path.np_pack_size = conn->ifc_max_udp_payload; 733 memcpy(&copath->cop_path.np_local_addr, NP_LOCAL_SA(CUR_NPATH(conn)), 734 sizeof(copath->cop_path.np_local_addr)); 735 memcpy(&copath->cop_path.np_peer_addr, dest_sa, 736 sizeof(copath->cop_path.np_peer_addr)); 737 738 conn->ifc_mig_path_id = copath - conn->ifc_paths; 739 conn->ifc_used_paths |= 1 << conn->ifc_mig_path_id; 740 conn->ifc_send_flags |= SF_SEND_PATH_CHAL << conn->ifc_mig_path_id; 741 LSQ_DEBUG("Schedule migration to path %hhu: will send PATH_CHALLENGE", 742 conn->ifc_mig_path_id); 743} 744 745 746static void 747ping_alarm_expired (enum alarm_id al_id, void *ctx, lsquic_time_t expiry, 748 lsquic_time_t now) 749{ 750 struct ietf_full_conn *const conn = (struct ietf_full_conn *) ctx; 751 LSQ_DEBUG("Ping alarm rang: schedule PING frame to be generated"); 752 conn->ifc_send_flags |= SF_SEND_PING; 753} 754 755 756static void 757retire_cid (struct ietf_full_conn *, struct conn_cid_elem *, lsquic_time_t); 758 759 760static void 761log_scids (const struct ietf_full_conn *conn) 762{ 763 const struct lsquic_conn *const lconn = &conn->ifc_conn; 764 const struct conn_cid_elem *cce; 765 char flags[5]; 766 unsigned idx; 767 int fi; 768 769 LSQ_DEBUG("Log SCID array: (n_cces %hhu; mask: 0x%hhX; " 770 "active: %hhu; limit: %hhu)", 771 conn->ifc_conn.cn_n_cces, conn->ifc_conn.cn_cces_mask, 772 conn->ifc_active_cids_count, conn->ifc_active_cids_limit); 773 for (cce = lconn->cn_cces; cce < END_OF_CCES(lconn); ++cce) 774 { 775 idx = cce - lconn->cn_cces; 776 fi = 0; 777 if (cce->cce_flags & CCE_PORT) flags[fi++] = 'p'; 778 if (cce->cce_flags & CCE_REG) flags[fi++] = 'r'; 779 if (cce->cce_flags & CCE_SEQNO) flags[fi++] = 's'; 780 if (cce->cce_flags & CCE_USED) flags[fi++] = 'u'; 781 flags[fi] = '\0'; 782 if (lconn->cn_cces_mask & (1 << idx)) 783 { 784 if (cce->cce_flags & CCE_PORT) 785 LSQ_DEBUG( " %u: flags %-4s; port %hu", idx, flags, 786 cce->cce_port); 787 else if (cce->cce_flags & CCE_SEQNO) 788 LSQ_DEBUGC(" %u: flags %-4s; seqno: %u; %"CID_FMT, idx, 789 flags, cce->cce_seqno, CID_BITS(&cce->cce_cid)); 790 else 791 LSQ_DEBUGC(" %u: flags %-4s; %"CID_FMT, idx, flags, 792 CID_BITS(&cce->cce_cid)); 793 } 794 else 795 LSQ_DEBUG( " %u: flags %-4s; <empty>", idx, flags); 796 } 797} 798 799 800#define LOG_SCIDS(conn_) do { \ 801 if (LSQ_LOG_ENABLED(LSQ_LOG_DEBUG)) \ 802 log_scids(conn_); \ 803} while (0) 804 805 806static void 807ret_cids_alarm_expired (enum alarm_id al_id, void *ctx, lsquic_time_t expiry, 808 lsquic_time_t now) 809{ 810 struct ietf_full_conn *const conn = (struct ietf_full_conn *) ctx; 811 struct lsquic_conn *const lconn = &conn->ifc_conn; 812 struct conn_cid_elem *cce; 813 unsigned idx; 814 815 LSQ_DEBUG("The 'retire original CIDs' alarm rang"); 816 817 for (cce = lconn->cn_cces; cce < END_OF_CCES(lconn); ++cce) 818 { 819 idx = cce - lconn->cn_cces; 820 if ((lconn->cn_cces_mask & (1 << idx)) 821 && (cce->cce_flags & (CCE_SEQNO|CCE_PORT)) == 0) 822 { 823 LSQ_DEBUG("retiring original CID at index %u", idx); 824 retire_cid(conn, cce, now); 825 } 826 } 827 LOG_SCIDS(conn); 828} 829 830 831static ssize_t 832crypto_stream_write (void *stream, const void *buf, size_t len) 833{ 834 return lsquic_stream_write(stream, buf, len); 835} 836 837 838static int 839crypto_stream_flush (void *stream) 840{ 841 return lsquic_stream_flush(stream); 842} 843 844 845static ssize_t 846crypto_stream_readf (void *stream, 847 size_t (*readf)(void *, const unsigned char *, size_t, int), void *ctx) 848{ 849 return lsquic_stream_readf(stream, readf, ctx); 850} 851 852 853static int 854crypto_stream_wantwrite (void *stream, int is_want) 855{ 856 return lsquic_stream_wantwrite(stream, is_want); 857} 858 859 860static int 861crypto_stream_wantread (void *stream, int is_want) 862{ 863 return lsquic_stream_wantread(stream, is_want); 864} 865 866 867static enum enc_level 868crypto_stream_enc_level (void *streamp) 869{ 870 const struct lsquic_stream *stream = streamp; 871 return crypto_level(stream); 872} 873 874 875static const struct crypto_stream_if crypto_stream_if = 876{ 877 .csi_write = crypto_stream_write, 878 .csi_flush = crypto_stream_flush, 879 .csi_readf = crypto_stream_readf, 880 .csi_wantwrite = crypto_stream_wantwrite, 881 .csi_wantread = crypto_stream_wantread, 882 .csi_enc_level = crypto_stream_enc_level, 883}; 884 885 886static const struct lsquic_stream_if *unicla_if_ptr; 887 888 889static lsquic_stream_id_t 890generate_stream_id (struct ietf_full_conn *conn, enum stream_dir sd) 891{ 892 lsquic_stream_id_t id; 893 894 id = conn->ifc_n_created_streams[sd]++; 895 return id << SIT_SHIFT 896 | sd << SD_SHIFT 897 | !!(conn->ifc_flags & IFC_SERVER) 898 ; 899} 900 901 902static lsquic_stream_id_t 903avail_streams_count (const struct ietf_full_conn *conn, int server, 904 enum stream_dir sd) 905{ 906 enum stream_id_type sit; 907 lsquic_stream_id_t max_count; 908 909 sit = gen_sit(server, sd); 910 max_count = conn->ifc_max_allowed_stream_id[sit] >> SIT_SHIFT; 911 LSQ_DEBUG("sit-%u streams: max count: %"PRIu64"; created streams: %"PRIu64, 912 sit, max_count, conn->ifc_n_created_streams[sd]); 913 if (max_count >= conn->ifc_n_created_streams[sd]) 914 return max_count - conn->ifc_n_created_streams[sd]; 915 else 916 { 917 assert(0); 918 return 0; 919 } 920} 921 922 923/* If `priority' is negative, this means that the stream is critical */ 924static int 925create_uni_stream_out (struct ietf_full_conn *conn, int priority, 926 const struct lsquic_stream_if *stream_if, void *stream_if_ctx) 927{ 928 struct lsquic_stream *stream; 929 lsquic_stream_id_t stream_id; 930 931 stream_id = generate_stream_id(conn, SD_UNI); 932 stream = lsquic_stream_new(stream_id, &conn->ifc_pub, stream_if, 933 stream_if_ctx, 0, conn->ifc_max_stream_data_uni, 934 SCF_IETF | (priority < 0 ? SCF_CRITICAL : 0)); 935 if (!stream) 936 return -1; 937 if (!lsquic_hash_insert(conn->ifc_pub.all_streams, &stream->id, 938 sizeof(stream->id), stream, &stream->sm_hash_el)) 939 { 940 lsquic_stream_destroy(stream); 941 return -1; 942 } 943 if (priority >= 0) 944 lsquic_stream_set_priority_internal(stream, priority); 945 lsquic_stream_call_on_new(stream); 946 return 0; 947} 948 949 950static int 951create_ctl_stream_out (struct ietf_full_conn *conn) 952{ 953 return create_uni_stream_out(conn, -1, 954 lsquic_hcso_writer_if, &conn->ifc_hcso); 955} 956 957 958static int 959create_qenc_stream_out (struct ietf_full_conn *conn) 960{ 961 return create_uni_stream_out(conn, -1, 962 lsquic_qeh_enc_sm_out_if, &conn->ifc_qeh); 963} 964 965 966static int 967create_qdec_stream_out (struct ietf_full_conn *conn) 968{ 969 return create_uni_stream_out(conn, -1, 970 lsquic_qdh_dec_sm_out_if, &conn->ifc_qdh); 971} 972 973 974static int 975create_bidi_stream_out (struct ietf_full_conn *conn) 976{ 977 struct lsquic_stream *stream; 978 lsquic_stream_id_t stream_id; 979 enum stream_ctor_flags flags; 980 981 flags = SCF_IETF|SCF_DI_AUTOSWITCH; 982 if (conn->ifc_enpub->enp_settings.es_rw_once) 983 flags |= SCF_DISP_RW_ONCE; 984 if (conn->ifc_flags & IFC_HTTP) 985 flags |= SCF_HTTP; 986 987 stream_id = generate_stream_id(conn, SD_BIDI); 988 stream = lsquic_stream_new(stream_id, &conn->ifc_pub, 989 conn->ifc_enpub->enp_stream_if, 990 conn->ifc_enpub->enp_stream_if_ctx, 991 conn->ifc_settings->es_init_max_stream_data_bidi_local, 992 conn->ifc_cfg.max_stream_send, flags); 993 if (!stream) 994 return -1; 995 if (!lsquic_hash_insert(conn->ifc_pub.all_streams, &stream->id, 996 sizeof(stream->id), stream, &stream->sm_hash_el)) 997 { 998 lsquic_stream_destroy(stream); 999 return -1; 1000 } 1001 lsquic_stream_call_on_new(stream); 1002 return 0; 1003} 1004 1005 1006static struct lsquic_stream * 1007create_push_stream (struct ietf_full_conn *conn) 1008{ 1009 struct lsquic_stream *stream; 1010 lsquic_stream_id_t stream_id; 1011 enum stream_ctor_flags flags; 1012 1013 assert((conn->ifc_flags & (IFC_SERVER|IFC_HTTP)) == (IFC_SERVER|IFC_HTTP)); 1014 1015 flags = SCF_IETF|SCF_HTTP; 1016 if (conn->ifc_enpub->enp_settings.es_rw_once) 1017 flags |= SCF_DISP_RW_ONCE; 1018 1019 stream_id = generate_stream_id(conn, SD_UNI); 1020 stream = lsquic_stream_new(stream_id, &conn->ifc_pub, 1021 conn->ifc_enpub->enp_stream_if, 1022 conn->ifc_enpub->enp_stream_if_ctx, 1023 conn->ifc_settings->es_init_max_stream_data_bidi_local, 1024 conn->ifc_cfg.max_stream_send, flags); 1025 if (!stream) 1026 return NULL; 1027 if (!lsquic_hash_insert(conn->ifc_pub.all_streams, &stream->id, 1028 sizeof(stream->id), stream, &stream->sm_hash_el)) 1029 { 1030 lsquic_stream_destroy(stream); 1031 return NULL; 1032 } 1033 return stream; 1034} 1035 1036 1037/* This function looks through the SCID array searching for an available 1038 * slot. If it finds an available slot it will 1039 * 1. generate an SCID, 1040 * 2. mark with latest seqno, 1041 * 3. increment seqno, 1042 * 4. turn on CCE_SEQNO flag, 1043 * 5. turn on flag given through flag paramter, 1044 * 6. add cce to mask, and 1045 * 7. add timestamp for when slot is new available for CID generation. 1046 */ 1047static struct conn_cid_elem * 1048ietf_full_conn_add_scid (struct ietf_full_conn *conn, 1049 struct lsquic_engine_public *enpub, 1050 enum conn_cce_flags flags, 1051 lsquic_time_t now) 1052{ 1053 struct conn_cid_elem *cce; 1054 struct lsquic_conn *lconn = &conn->ifc_conn; 1055 lsquic_time_t *min_timestamp; 1056 int i; 1057 1058 if (enpub->enp_settings.es_scid_len) 1059 { 1060 for (cce = lconn->cn_cces; cce < END_OF_CCES(lconn); ++cce) 1061 if (!(lconn->cn_cces_mask & (1 << (cce - lconn->cn_cces)))) 1062 break; 1063 } 1064 else if (0 == lconn->cn_cces_mask) 1065 cce = lconn->cn_cces; 1066 else 1067 cce = END_OF_CCES(lconn); 1068 1069 if (cce >= END_OF_CCES(lconn)) 1070 { 1071 LSQ_LOG1(LSQ_LOG_DEBUG, "cannot find slot for new SCID"); 1072 return NULL; 1073 } 1074 1075 if (enpub->enp_settings.es_scid_len) 1076 lsquic_generate_cid(&cce->cce_cid, enpub->enp_settings.es_scid_len); 1077 cce->cce_seqno = conn->ifc_scid_seqno++; 1078 cce->cce_flags |= CCE_SEQNO | flags; 1079 lconn->cn_cces_mask |= 1 << (cce - lconn->cn_cces); 1080 ++conn->ifc_active_cids_count; 1081 if (enpub->enp_settings.es_scid_iss_rate) 1082 { 1083 min_timestamp = &conn->ifc_scid_timestamp[0]; 1084 for (i = 1; i < lconn->cn_n_cces; i++) 1085 if (conn->ifc_scid_timestamp[i] < *min_timestamp) 1086 min_timestamp = &conn->ifc_scid_timestamp[i]; 1087 *min_timestamp = now; 1088 } 1089 LSQ_LOG1C(LSQ_LOG_DEBUG, "generated and assigned SCID %"CID_FMT, 1090 CID_BITS(&cce->cce_cid)); 1091 return cce; 1092} 1093 1094 1095/* From [draft-ietf-quic-transport-25] Section 17.3.1: 1096 * " endpoints MUST disable their use of the spin bit for a random selection 1097 * " of at least one in every 16 network paths, or for one in every 16 1098 * " connection IDs. 1099 */ 1100static void 1101maybe_enable_spin (struct ietf_full_conn *conn) 1102{ 1103 uint8_t nyb; 1104 1105 if (!conn->ifc_settings->es_spin) 1106 { 1107 conn->ifc_flags &= ~IFC_SPIN; 1108 LSQ_DEBUG("spin bit disabled via settings"); 1109 } 1110 else if (lsquic_crand_get_nybble(conn->ifc_enpub->enp_crand)) 1111 { 1112 conn->ifc_flags |= IFC_SPIN; 1113 conn->ifc_spin_bit = 0; 1114 LSQ_DEBUG("spin bit enabled"); 1115 } 1116 else 1117 { 1118 /* " It is RECOMMENDED that endpoints set the spin bit to a random 1119 * " value either chosen independently for each packet or chosen 1120 * " independently for each connection ID. 1121 * (ibid.) 1122 */ 1123 conn->ifc_flags &= ~IFC_SPIN; 1124 nyb = lsquic_crand_get_nybble(conn->ifc_enpub->enp_crand); 1125 conn->ifc_spin_bit = nyb & 1; 1126 LSQ_DEBUG("spin bit randomly disabled; random spin bit value is %d", 1127 conn->ifc_spin_bit); 1128 } 1129} 1130 1131 1132static int 1133ietf_full_conn_init (struct ietf_full_conn *conn, 1134 struct lsquic_engine_public *enpub, unsigned flags, int ecn) 1135{ 1136 if (flags & IFC_SERVER) 1137 conn->ifc_conn.cn_if = ietf_full_conn_iface_ptr; 1138 else 1139 conn->ifc_conn.cn_if = ietf_full_conn_prehsk_iface_ptr; 1140 if (enpub->enp_settings.es_scid_len) 1141 assert(CN_SCID(&conn->ifc_conn)->len); 1142 conn->ifc_enpub = enpub; 1143 conn->ifc_settings = &enpub->enp_settings; 1144 conn->ifc_pub.lconn = &conn->ifc_conn; 1145 conn->ifc_pub.send_ctl = &conn->ifc_send_ctl; 1146 conn->ifc_pub.enpub = enpub; 1147 conn->ifc_pub.mm = &enpub->enp_mm; 1148 conn->ifc_pub.path = CUR_NPATH(conn); 1149 TAILQ_INIT(&conn->ifc_pub.sending_streams); 1150 TAILQ_INIT(&conn->ifc_pub.read_streams); 1151 TAILQ_INIT(&conn->ifc_pub.write_streams); 1152 TAILQ_INIT(&conn->ifc_pub.service_streams); 1153 STAILQ_INIT(&conn->ifc_stream_ids_to_ss); 1154 TAILQ_INIT(&conn->ifc_to_retire); 1155 1156 lsquic_alarmset_init(&conn->ifc_alset, &conn->ifc_conn); 1157 lsquic_alarmset_init_alarm(&conn->ifc_alset, AL_IDLE, idle_alarm_expired, conn); 1158 lsquic_alarmset_init_alarm(&conn->ifc_alset, AL_ACK_APP, ack_alarm_expired, conn); 1159 lsquic_alarmset_init_alarm(&conn->ifc_alset, AL_PING, ping_alarm_expired, conn); 1160 lsquic_alarmset_init_alarm(&conn->ifc_alset, AL_HANDSHAKE, handshake_alarm_expired, conn); 1161 lsquic_alarmset_init_alarm(&conn->ifc_alset, AL_CID_THROT, cid_throt_alarm_expired, conn); 1162 lsquic_alarmset_init_alarm(&conn->ifc_alset, AL_PATH_CHAL_0, path_chal_alarm_expired, conn); 1163 lsquic_alarmset_init_alarm(&conn->ifc_alset, AL_PATH_CHAL_1, path_chal_alarm_expired, conn); 1164 lsquic_alarmset_init_alarm(&conn->ifc_alset, AL_PATH_CHAL_2, path_chal_alarm_expired, conn); 1165 lsquic_alarmset_init_alarm(&conn->ifc_alset, AL_PATH_CHAL_3, path_chal_alarm_expired, conn); 1166 lsquic_alarmset_init_alarm(&conn->ifc_alset, AL_BLOCKED_KA, blocked_ka_alarm_expired, conn); 1167 lsquic_alarmset_init_alarm(&conn->ifc_alset, AL_MTU_PROBE, mtu_probe_alarm_expired, conn); 1168 lsquic_rechist_init(&conn->ifc_rechist[PNS_INIT], &conn->ifc_conn, 1); 1169 lsquic_rechist_init(&conn->ifc_rechist[PNS_HSK], &conn->ifc_conn, 1); 1170 lsquic_rechist_init(&conn->ifc_rechist[PNS_APP], &conn->ifc_conn, 1); 1171 lsquic_send_ctl_init(&conn->ifc_send_ctl, &conn->ifc_alset, enpub, 1172 flags & IFC_SERVER ? &server_ver_neg : &conn->ifc_u.cli.ifcli_ver_neg, 1173 &conn->ifc_pub, SC_IETF|SC_NSTP|(ecn ? SC_ECN : 0)); 1174 lsquic_cfcw_init(&conn->ifc_pub.cfcw, &conn->ifc_pub, 1175 conn->ifc_settings->es_init_max_data); 1176 conn->ifc_pub.all_streams = lsquic_hash_create(); 1177 if (!conn->ifc_pub.all_streams) 1178 return -1; 1179 conn->ifc_pub.u.ietf.qeh = &conn->ifc_qeh; 1180 conn->ifc_pub.u.ietf.qdh = &conn->ifc_qdh; 1181 1182 conn->ifc_peer_hq_settings.header_table_size = HQ_DF_QPACK_MAX_TABLE_CAPACITY; 1183 conn->ifc_peer_hq_settings.max_header_list_size = HQ_DF_MAX_HEADER_LIST_SIZE; 1184 conn->ifc_peer_hq_settings.qpack_blocked_streams = HQ_DF_QPACK_BLOCKED_STREAMS; 1185 1186 conn->ifc_flags = flags | IFC_CREATED_OK | IFC_FIRST_TICK; 1187 conn->ifc_max_ack_packno[PNS_INIT] = IQUIC_INVALID_PACKNO; 1188 conn->ifc_max_ack_packno[PNS_HSK] = IQUIC_INVALID_PACKNO; 1189 conn->ifc_max_ack_packno[PNS_APP] = IQUIC_INVALID_PACKNO; 1190 conn->ifc_paths[0].cop_path.np_path_id = 0; 1191 conn->ifc_paths[1].cop_path.np_path_id = 1; 1192 conn->ifc_paths[2].cop_path.np_path_id = 2; 1193 conn->ifc_paths[3].cop_path.np_path_id = 3; 1194#define valid_stream_id(v) ((v) <= VINT_MAX_VALUE) 1195 conn->ifc_max_req_id = VINT_MAX_VALUE + 1; 1196 conn->ifc_ping_unretx_thresh = 20; 1197 conn->ifc_max_retx_since_last_ack = MAX_RETR_PACKETS_SINCE_LAST_ACK; 1198 maybe_enable_spin(conn); 1199 if (conn->ifc_settings->es_noprogress_timeout) 1200 conn->ifc_mflags |= MF_NOPROG_TIMEOUT; 1201 return 0; 1202} 1203 1204 1205struct lsquic_conn * 1206lsquic_ietf_full_conn_client_new (struct lsquic_engine_public *enpub, 1207 unsigned versions, unsigned flags, 1208 const char *hostname, unsigned short base_plpmtu, int is_ipv4, 1209 const unsigned char *sess_resume, size_t sess_resume_sz, 1210 const unsigned char *token, size_t token_sz) 1211{ 1212 const struct enc_session_funcs_iquic *esfi; 1213 struct ietf_full_conn *conn; 1214 enum lsquic_version ver, sess_resume_version; 1215 lsquic_time_t now; 1216 1217 conn = calloc(1, sizeof(*conn)); 1218 if (!conn) 1219 goto err0; 1220 now = lsquic_time_now(); 1221 /* Set the flags early so that correct CID is used for logging */ 1222 conn->ifc_conn.cn_flags |= LSCONN_IETF; 1223 conn->ifc_conn.cn_cces = conn->ifc_cces; 1224 conn->ifc_conn.cn_n_cces = sizeof(conn->ifc_cces) 1225 / sizeof(conn->ifc_cces[0]); 1226 if (!ietf_full_conn_add_scid(conn, enpub, CCE_USED, now)) 1227 goto err1; 1228 1229 assert(versions); 1230 versions &= LSQUIC_IETF_VERSIONS; 1231 ver = highest_bit_set(versions); 1232 if (sess_resume) 1233 { 1234 sess_resume_version = lsquic_sess_resume_version(sess_resume, sess_resume_sz); 1235 if (sess_resume_version < N_LSQVER && ((1 << sess_resume_version) & versions)) 1236 ver = sess_resume_version; 1237 } 1238 esfi = select_esf_iquic_by_ver(ver); 1239 1240 if (0 != ietf_full_conn_init(conn, enpub, flags, 1241 enpub->enp_settings.es_ecn)) 1242 goto err2; 1243 1244 if (base_plpmtu) 1245 conn->ifc_paths[0].cop_path.np_pack_size 1246 = base_plpmtu - TRANSPORT_OVERHEAD(!is_ipv4); 1247 else 1248 conn->ifc_paths[0].cop_path.np_pack_size 1249 = calc_base_packet_size(conn, !is_ipv4); 1250 1251 if (token) 1252 { 1253 if (0 != lsquic_send_ctl_set_token(&conn->ifc_send_ctl, token, 1254 token_sz)) 1255 goto err2; 1256 } 1257 1258 /* Do not infer anything about server limits before processing its 1259 * transport parameters. 1260 */ 1261 conn->ifc_max_streams_in[SD_BIDI] = enpub->enp_settings.es_max_streams_in; 1262 conn->ifc_max_allowed_stream_id[SIT_BIDI_SERVER] = 1263 enpub->enp_settings.es_max_streams_in << SIT_SHIFT; 1264 1265 if (flags & IFC_HTTP) 1266 { 1267 if (enpub->enp_settings.es_support_push && CLIENT_PUSH_SUPPORT) 1268 conn->ifc_max_streams_in[SD_UNI] 1269 = MAX(3, enpub->enp_settings.es_max_streams_in); 1270 else 1271 conn->ifc_max_streams_in[SD_UNI] = 3; 1272 } 1273 else 1274 conn->ifc_max_streams_in[SD_UNI] = enpub->enp_settings.es_max_streams_in; 1275 conn->ifc_max_allowed_stream_id[SIT_UNI_SERVER] 1276 = conn->ifc_max_streams_in[SD_UNI] << SIT_SHIFT; 1277 1278 init_ver_neg(conn, versions, &ver); 1279 assert(ver == conn->ifc_u.cli.ifcli_ver_neg.vn_ver); 1280 if (conn->ifc_settings->es_handshake_to) 1281 lsquic_alarmset_set(&conn->ifc_alset, AL_HANDSHAKE, 1282 lsquic_time_now() + conn->ifc_settings->es_handshake_to); 1283 conn->ifc_idle_to = conn->ifc_settings->es_idle_timeout * 1000000; 1284 if (conn->ifc_idle_to) 1285 lsquic_alarmset_set(&conn->ifc_alset, AL_IDLE, now + conn->ifc_idle_to); 1286 if (enpub->enp_settings.es_support_push && CLIENT_PUSH_SUPPORT) 1287 { 1288 conn->ifc_u.cli.ifcli_flags |= IFCLI_PUSH_ENABLED; 1289 conn->ifc_u.cli.ifcli_max_push_id = 100; 1290 LSQ_DEBUG("push enabled: set MAX_PUSH_ID to %"PRIu64, 1291 conn->ifc_u.cli.ifcli_max_push_id); 1292 } 1293 conn->ifc_conn.cn_pf = select_pf_by_ver(ver); 1294 conn->ifc_conn.cn_esf_c = select_esf_common_by_ver(ver); 1295 conn->ifc_conn.cn_esf.i = esfi; 1296 lsquic_generate_cid(CUR_DCID(conn), 0); 1297 conn->ifc_conn.cn_enc_session = 1298 conn->ifc_conn.cn_esf.i->esfi_create_client(hostname, 1299 conn->ifc_enpub, &conn->ifc_conn, CUR_DCID(conn), 1300 &conn->ifc_u.cli.ifcli_ver_neg, 1301 (void **) conn->ifc_u.cli.crypto_streams, &crypto_stream_if, 1302 sess_resume, sess_resume_sz, &conn->ifc_alset, 1303 conn->ifc_max_streams_in[SD_UNI]); 1304 if (!conn->ifc_conn.cn_enc_session) 1305 goto err2; 1306 1307 conn->ifc_u.cli.crypto_streams[ENC_LEV_CLEAR] = lsquic_stream_new_crypto( 1308 ENC_LEV_CLEAR, &conn->ifc_pub, &lsquic_cry_sm_if, 1309 conn->ifc_conn.cn_enc_session, 1310 SCF_IETF|SCF_DI_AUTOSWITCH|SCF_CALL_ON_NEW|SCF_CRITICAL); 1311 if (!conn->ifc_u.cli.crypto_streams[ENC_LEV_CLEAR]) 1312 goto err3; 1313 if (!lsquic_stream_get_ctx(conn->ifc_u.cli.crypto_streams[ENC_LEV_CLEAR])) 1314 goto err4; 1315 conn->ifc_pub.packet_out_malo = 1316 lsquic_malo_create(sizeof(struct lsquic_packet_out)); 1317 if (!conn->ifc_pub.packet_out_malo) 1318 goto err4; 1319 conn->ifc_flags |= IFC_PROC_CRYPTO; 1320 1321 LSQ_DEBUG("negotiating version %s", 1322 lsquic_ver2str[conn->ifc_u.cli.ifcli_ver_neg.vn_ver]); 1323 conn->ifc_process_incoming_packet = process_incoming_packet_verneg; 1324 conn->ifc_created = now; 1325 LSQ_DEBUG("logging using %s SCID", 1326 LSQUIC_LOG_CONN_ID == CN_SCID(&conn->ifc_conn) ? "client" : "server"); 1327 return &conn->ifc_conn; 1328 1329 err4: 1330 lsquic_stream_destroy(conn->ifc_u.cli.crypto_streams[ENC_LEV_CLEAR]); 1331 err3: 1332 conn->ifc_conn.cn_esf.i->esfi_destroy(conn->ifc_conn.cn_enc_session); 1333 err2: 1334 lsquic_send_ctl_cleanup(&conn->ifc_send_ctl); 1335 if (conn->ifc_pub.all_streams) 1336 lsquic_hash_destroy(conn->ifc_pub.all_streams); 1337 err1: 1338 free(conn); 1339 err0: 1340 return NULL; 1341} 1342 1343 1344typedef char mini_conn_does_not_have_more_cces[ 1345 sizeof(((struct ietf_mini_conn *)0)->imc_cces) 1346 <= sizeof(((struct ietf_full_conn *)0)->ifc_cces) ? 1 : -1]; 1347 1348struct lsquic_conn * 1349lsquic_ietf_full_conn_server_new (struct lsquic_engine_public *enpub, 1350 unsigned flags, struct lsquic_conn *mini_conn) 1351{ 1352 struct ietf_mini_conn *const imc = (void *) mini_conn; 1353 struct ietf_full_conn *conn; 1354 struct lsquic_packet_out *packet_out; 1355 struct lsquic_packet_in *packet_in; 1356 struct conn_cid_elem *cce; 1357 int have_outgoing_ack; 1358 lsquic_packno_t next_packno; 1359 lsquic_time_t now; 1360 packno_set_t set; 1361 enum packnum_space pns; 1362 unsigned i; 1363 1364 conn = calloc(1, sizeof(*conn)); 1365 if (!conn) 1366 goto err0; 1367 now = lsquic_time_now(); 1368 conn->ifc_conn.cn_cces = conn->ifc_cces; 1369 conn->ifc_conn.cn_n_cces = sizeof(conn->ifc_cces) 1370 / sizeof(conn->ifc_cces[0]); 1371 assert(conn->ifc_conn.cn_n_cces >= mini_conn->cn_n_cces); 1372 conn->ifc_conn.cn_cur_cce_idx = mini_conn->cn_cur_cce_idx; 1373 conn->ifc_conn.cn_cces_mask = mini_conn->cn_cces_mask; 1374 for (cce = mini_conn->cn_cces, i = 0; cce < END_OF_CCES(mini_conn); 1375 ++cce, ++i) 1376 if ((1 << (cce - mini_conn->cn_cces)) & mini_conn->cn_cces_mask) 1377 { 1378 conn->ifc_conn.cn_cces[i].cce_cid = cce->cce_cid; 1379 conn->ifc_conn.cn_cces[i].cce_flags = cce->cce_flags; 1380 if (cce->cce_flags & CCE_SEQNO) 1381 { 1382 if (cce->cce_seqno > conn->ifc_scid_seqno) 1383 conn->ifc_scid_seqno = cce->cce_seqno; 1384 conn->ifc_conn.cn_cces[i].cce_seqno = cce->cce_seqno; 1385 ++conn->ifc_active_cids_count; 1386 } 1387 conn->ifc_scid_timestamp[i] = now; 1388 } 1389 ++conn->ifc_scid_seqno; 1390 1391 /* Set the flags early so that correct CID is used for logging */ 1392 conn->ifc_conn.cn_flags |= LSCONN_IETF | LSCONN_SERVER; 1393 1394 if (0 != ietf_full_conn_init(conn, enpub, flags, 1395 lsquic_mini_conn_ietf_ecn_ok(imc))) 1396 goto err1; 1397 conn->ifc_pub.packet_out_malo = 1398 lsquic_malo_create(sizeof(struct lsquic_packet_out)); 1399 if (!conn->ifc_pub.packet_out_malo) 1400 goto err1; 1401 if (imc->imc_flags & IMC_IGNORE_INIT) 1402 conn->ifc_flags |= IFC_IGNORE_INIT; 1403 1404 conn->ifc_paths[0].cop_path = imc->imc_path; 1405 conn->ifc_paths[0].cop_flags = COP_VALIDATED; 1406 conn->ifc_used_paths = 1 << 0; 1407 if (imc->imc_flags & IMC_ADDR_VALIDATED) 1408 lsquic_send_ctl_path_validated(&conn->ifc_send_ctl); 1409 else 1410 conn->ifc_mflags |= MF_VALIDATE_PATH; 1411 conn->ifc_pub.bytes_out = imc->imc_bytes_out; 1412 conn->ifc_pub.bytes_in = imc->imc_bytes_in; 1413 if (imc->imc_flags & IMC_PATH_CHANGED) 1414 { 1415 LSQ_DEBUG("path changed during mini conn: schedule PATH_CHALLENGE"); 1416 conn->ifc_send_flags |= SF_SEND_PATH_CHAL_PATH_0; 1417 } 1418 1419 conn->ifc_max_streams_in[SD_BIDI] 1420 = enpub->enp_settings.es_init_max_streams_bidi; 1421 conn->ifc_max_allowed_stream_id[SIT_BIDI_CLIENT] 1422 = conn->ifc_max_streams_in[SD_BIDI] << SIT_SHIFT; 1423 conn->ifc_max_streams_in[SD_UNI] 1424 = enpub->enp_settings.es_init_max_streams_uni; 1425 conn->ifc_max_allowed_stream_id[SIT_UNI_CLIENT] 1426 = conn->ifc_max_streams_in[SD_UNI] << SIT_SHIFT; 1427 conn->ifc_conn.cn_version = mini_conn->cn_version; 1428 conn->ifc_conn.cn_flags |= LSCONN_VER_SET; 1429 conn->ifc_conn.cn_pf = mini_conn->cn_pf; 1430 conn->ifc_conn.cn_esf_c = mini_conn->cn_esf_c; 1431 conn->ifc_conn.cn_esf = mini_conn->cn_esf; 1432 1433 if (enpub->enp_settings.es_support_push) 1434 conn->ifc_u.ser.ifser_flags |= IFSER_PUSH_ENABLED; 1435 if (flags & IFC_HTTP) 1436 { 1437 fiu_do_on("full_conn_ietf/promise_hash", goto promise_alloc_failed); 1438 conn->ifc_pub.u.ietf.promises = lsquic_hash_create(); 1439#if FIU_ENABLE 1440 promise_alloc_failed: 1441#endif 1442 if (!conn->ifc_pub.u.ietf.promises) 1443 goto err2; 1444 } 1445 1446 assert(mini_conn->cn_flags & LSCONN_HANDSHAKE_DONE); 1447 conn->ifc_conn.cn_flags |= LSCONN_HANDSHAKE_DONE; 1448 if (!(imc->imc_flags & IMC_HSK_DONE_SENT)) 1449 { 1450 LSQ_DEBUG("HANDSHAKE_DONE not yet sent, will process CRYPTO frames"); 1451 conn->ifc_flags |= IFC_PROC_CRYPTO; 1452 } 1453 1454 conn->ifc_conn.cn_enc_session = mini_conn->cn_enc_session; 1455 mini_conn->cn_enc_session = NULL; 1456 conn->ifc_conn.cn_esf_c->esf_set_conn(conn->ifc_conn.cn_enc_session, 1457 &conn->ifc_conn); 1458 conn->ifc_process_incoming_packet = process_incoming_packet_fast; 1459 1460 conn->ifc_send_ctl.sc_cur_packno = imc->imc_next_packno - 1; 1461 lsquic_send_ctl_begin_optack_detection(&conn->ifc_send_ctl); 1462 1463 for (pns = 0; pns < N_PNS; ++pns) 1464 { 1465 for (set = imc->imc_recvd_packnos[pns], i = 0; 1466 set && i < MAX_PACKETS; set &= ~(1ULL << i), ++i) 1467 if (set & (1ULL << i)) 1468 (void) lsquic_rechist_received(&conn->ifc_rechist[pns], i, 0); 1469 if (i) 1470 conn->ifc_rechist[pns].rh_largest_acked_received 1471 = imc->imc_largest_recvd[pns]; 1472 } 1473 1474 /* Mini connection sends out packets 0, 1, 2... and so on. It deletes 1475 * packets that have been successfully sent and acked or those that have 1476 * been lost. We take ownership of all packets in mc_packets_out; those 1477 * that are not on the list are recorded in fc_send_ctl.sc_senhist. 1478 */ 1479 have_outgoing_ack = 0; 1480 next_packno = ~0ULL; 1481 /* mini conn may drop Init packets, making gaps; don't warn about them: */ 1482 conn->ifc_send_ctl.sc_senhist.sh_flags |= SH_GAP_OK; 1483 while ((packet_out = TAILQ_FIRST(&imc->imc_packets_out))) 1484 { 1485 TAILQ_REMOVE(&imc->imc_packets_out, packet_out, po_next); 1486 1487 /* Holes in the sequence signify no-longer-relevant Initial packets or 1488 * ACKed or lost packets. 1489 */ 1490 ++next_packno; 1491 for ( ; next_packno < packet_out->po_packno; ++next_packno) 1492 { 1493 lsquic_senhist_add(&conn->ifc_send_ctl.sc_senhist, next_packno); 1494 conn->ifc_send_ctl.sc_senhist.sh_warn_thresh = next_packno; 1495 } 1496 1497 packet_out->po_path = CUR_NPATH(conn); 1498 if (imc->imc_sent_packnos & (1ULL << packet_out->po_packno)) 1499 { 1500 LSQ_DEBUG("got sent packet_out %"PRIu64" from mini", 1501 packet_out->po_packno); 1502 if (0 != lsquic_send_ctl_sent_packet(&conn->ifc_send_ctl, 1503 packet_out)) 1504 { 1505 LSQ_WARN("could not add packet %"PRIu64" to sent set: %s", 1506 packet_out->po_packno, strerror(errno)); 1507 goto err2; 1508 } 1509 } 1510 else 1511 { 1512 LSQ_DEBUG("got unsent packet_out %"PRIu64" from mini (will send)", 1513 packet_out->po_packno); 1514 lsquic_send_ctl_scheduled_one(&conn->ifc_send_ctl, packet_out); 1515 have_outgoing_ack |= packet_out->po_frame_types & 1516 (1 << QUIC_FRAME_ACK); 1517 } 1518 } 1519 conn->ifc_send_ctl.sc_senhist.sh_flags &= ~SH_GAP_OK; 1520 /* ...Yes, that's a bunch of little annoying steps to suppress the gap 1521 * warnings, but it would have been even more annoying (and expensive) 1522 * to add packet renumbering logic to the mini conn. 1523 */ 1524 1525 for (pns = 0; pns < N_PNS; ++pns) 1526 for (i = 0; i < 4; ++i) 1527 { 1528 conn->ifc_ecn_counts_in[pns][i] = imc->imc_ecn_counts_in[pns][i]; 1529 conn->ifc_ecn_counts_out[pns][i] = imc->imc_ecn_counts_out[pns][i]; 1530 } 1531 conn->ifc_incoming_ecn = imc->imc_incoming_ecn; 1532 conn->ifc_pub.rtt_stats = imc->imc_rtt_stats; 1533 1534 lsquic_alarmset_init_alarm(&conn->ifc_alset, AL_RET_CIDS, 1535 ret_cids_alarm_expired, conn); 1536 lsquic_alarmset_set(&conn->ifc_alset, AL_RET_CIDS, 1537 now + RET_CID_TIMEOUT); 1538 1539 conn->ifc_last_live_update = now; 1540 1541 LSQ_DEBUG("Calling on_new_conn callback"); 1542 conn->ifc_conn_ctx = conn->ifc_enpub->enp_stream_if->on_new_conn( 1543 conn->ifc_enpub->enp_stream_if_ctx, &conn->ifc_conn); 1544 1545 /* TODO: do something if there is outgoing ACK */ 1546 1547 if (0 != handshake_ok(&conn->ifc_conn)) 1548 goto err3; 1549 1550 conn->ifc_created = imc->imc_created; 1551 conn->ifc_idle_to = conn->ifc_settings->es_idle_timeout * 1000000; 1552 if (conn->ifc_idle_to) 1553 lsquic_alarmset_set(&conn->ifc_alset, AL_IDLE, 1554 imc->imc_created + conn->ifc_idle_to); 1555 while ((packet_in = TAILQ_FIRST(&imc->imc_app_packets))) 1556 { 1557 TAILQ_REMOVE(&imc->imc_app_packets, packet_in, pi_next); 1558 LSQ_DEBUG("inherit packet %"PRIu64" from mini conn", 1559 packet_in->pi_packno); 1560 ietf_full_conn_ci_packet_in(&conn->ifc_conn, packet_in); 1561 lsquic_packet_in_put(conn->ifc_pub.mm, packet_in); 1562 } 1563 1564 LSQ_DEBUG("logging using %s SCID", 1565 LSQUIC_LOG_CONN_ID == CN_SCID(&conn->ifc_conn) ? "server" : "client"); 1566 return &conn->ifc_conn; 1567 1568 err3: 1569 ietf_full_conn_ci_destroy(&conn->ifc_conn); 1570 return NULL; 1571 1572 err2: 1573 lsquic_malo_destroy(conn->ifc_pub.packet_out_malo); 1574 err1: 1575 lsquic_send_ctl_cleanup(&conn->ifc_send_ctl); 1576 if (conn->ifc_pub.all_streams) 1577 lsquic_hash_destroy(conn->ifc_pub.all_streams); 1578 free(conn); 1579 err0: 1580 return NULL; 1581} 1582 1583 1584static int 1585should_generate_ack (struct ietf_full_conn *conn, 1586 enum ifull_conn_flags ack_queued) 1587{ 1588 unsigned lost_acks; 1589 1590 /* Need to set which ACKs are queued because generate_ack_frame() does not 1591 * generate ACKs unconditionally. 1592 */ 1593 lost_acks = lsquic_send_ctl_lost_ack(&conn->ifc_send_ctl); 1594 if (lost_acks) 1595 conn->ifc_flags |= lost_acks << IFCBIT_ACK_QUED_SHIFT; 1596 1597 return (conn->ifc_flags & ack_queued) != 0; 1598} 1599 1600 1601static int 1602ietf_full_conn_ci_can_write_ack (struct lsquic_conn *lconn) 1603{ 1604 struct ietf_full_conn *conn = (struct ietf_full_conn *) lconn; 1605 1606 /* Follow opportunistic ACK logic. Because this method is only used by 1607 * buffered packets code path, no need to check whether anything is 1608 * writing: we know it is. 1609 */ 1610 return conn->ifc_n_slack_akbl[PNS_APP] > 0 1611 && lsquic_send_ctl_can_send(&conn->ifc_send_ctl); 1612} 1613 1614 1615static unsigned 1616ietf_full_conn_ci_cancel_pending_streams (struct lsquic_conn *lconn, unsigned n) 1617{ 1618 struct ietf_full_conn *conn = (struct ietf_full_conn *) lconn; 1619 if (n > conn->ifc_n_delayed_streams) 1620 conn->ifc_n_delayed_streams = 0; 1621 else 1622 conn->ifc_n_delayed_streams -= n; 1623 return conn->ifc_n_delayed_streams; 1624} 1625 1626 1627/* Best effort. If timestamp frame does not fit, oh well */ 1628static void 1629generate_timestamp_frame (struct ietf_full_conn *conn, 1630 struct lsquic_packet_out *packet_out, lsquic_time_t now) 1631{ 1632 uint64_t timestamp; 1633 int w; 1634 1635 timestamp = (now - conn->ifc_created) >> TP_DEF_ACK_DELAY_EXP; 1636 w = conn->ifc_conn.cn_pf->pf_gen_timestamp_frame( 1637 packet_out->po_data + packet_out->po_data_sz, 1638 lsquic_packet_out_avail(packet_out), timestamp); 1639 if (w < 0) 1640 { 1641 LSQ_DEBUG("could not generate TIMESTAMP frame"); 1642 return; 1643 } 1644 LSQ_DEBUG("generated TIMESTAMP(%"PRIu64" us) frame", 1645 timestamp << TP_DEF_ACK_DELAY_EXP); 1646 EV_LOG_CONN_EVENT(LSQUIC_LOG_CONN_ID, "generated TIMESTAMP(%" 1647 PRIu64" us) frame", timestamp << TP_DEF_ACK_DELAY_EXP); 1648 if (0 != lsquic_packet_out_add_frame(packet_out, conn->ifc_pub.mm, 0, 1649 QUIC_FRAME_TIMESTAMP, packet_out->po_data_sz, w)) 1650 { 1651 LSQ_DEBUG("%s: adding frame to packet failed: %d", __func__, errno); 1652 return; 1653 } 1654 packet_out->po_frame_types |= 1 << QUIC_FRAME_TIMESTAMP; 1655 lsquic_send_ctl_incr_pack_sz(&conn->ifc_send_ctl, packet_out, w); 1656 packet_out->po_regen_sz += w; 1657} 1658 1659 1660static int 1661generate_ack_frame_for_pns (struct ietf_full_conn *conn, 1662 struct lsquic_packet_out *packet_out, enum packnum_space pns, 1663 lsquic_time_t now) 1664{ 1665 int has_missing, w; 1666 1667 w = conn->ifc_conn.cn_pf->pf_gen_ack_frame( 1668 packet_out->po_data + packet_out->po_data_sz, 1669 lsquic_packet_out_avail(packet_out), 1670 (gaf_rechist_first_f) lsquic_rechist_first, 1671 (gaf_rechist_next_f) lsquic_rechist_next, 1672 (gaf_rechist_largest_recv_f) lsquic_rechist_largest_recv, 1673 &conn->ifc_rechist[pns], now, &has_missing, &packet_out->po_ack2ed, 1674 conn->ifc_incoming_ecn ? conn->ifc_ecn_counts_in[pns] : NULL); 1675 if (w < 0) { 1676 ABORT_ERROR("generating ACK frame failed: %d", errno); 1677 return -1; 1678 } 1679 char buf[0x100]; 1680 lsquic_hexstr(packet_out->po_data + packet_out->po_data_sz, w, buf, sizeof(buf)); 1681 LSQ_DEBUG("ACK bytes: %s", buf); 1682 EV_LOG_GENERATED_ACK_FRAME(LSQUIC_LOG_CONN_ID, conn->ifc_conn.cn_pf, 1683 packet_out->po_data + packet_out->po_data_sz, w); 1684 lsquic_send_ctl_scheduled_ack(&conn->ifc_send_ctl, pns, 1685 packet_out->po_ack2ed); 1686 packet_out->po_frame_types |= 1 << QUIC_FRAME_ACK; 1687 if (0 != lsquic_packet_out_add_frame(packet_out, conn->ifc_pub.mm, 0, 1688 QUIC_FRAME_ACK, packet_out->po_data_sz, w)) 1689 { 1690 ABORT_ERROR("adding frame to packet failed: %d", errno); 1691 return -1; 1692 } 1693 lsquic_send_ctl_incr_pack_sz(&conn->ifc_send_ctl, packet_out, w); 1694 packet_out->po_regen_sz += w; 1695 if (has_missing) 1696 conn->ifc_flags |= IFC_ACK_HAD_MISS; 1697 else 1698 conn->ifc_flags &= ~IFC_ACK_HAD_MISS; 1699 LSQ_DEBUG("Put %d bytes of ACK frame into packet on outgoing queue", w); 1700 if (conn->ifc_n_cons_unretx >= conn->ifc_ping_unretx_thresh && 1701 !lsquic_send_ctl_have_outgoing_retx_frames(&conn->ifc_send_ctl)) 1702 { 1703 LSQ_DEBUG("schedule PING frame after %u non-retx " 1704 "packets sent", conn->ifc_n_cons_unretx); 1705 conn->ifc_send_flags |= SF_SEND_PING; 1706 /* This gives a range [12, 27]: */ 1707 conn->ifc_ping_unretx_thresh = 12 1708 + lsquic_crand_get_nybble(conn->ifc_enpub->enp_crand); 1709 } 1710 1711 conn->ifc_n_slack_akbl[pns] = 0; 1712 conn->ifc_flags &= ~(IFC_ACK_QUED_INIT << pns); 1713 if (pns == PNS_APP) 1714 { 1715 conn->ifc_n_slack_all = 0; 1716 lsquic_alarmset_unset(&conn->ifc_alset, AL_ACK_APP); 1717 } 1718 lsquic_send_ctl_sanity_check(&conn->ifc_send_ctl); 1719 LSQ_DEBUG("%s ACK state reset", lsquic_pns2str[pns]); 1720 1721 if (pns == PNS_APP && (conn->ifc_flags & IFC_TIMESTAMPS)) 1722 generate_timestamp_frame(conn, packet_out, now); 1723 1724 return 0; 1725} 1726 1727 1728/* Return number of packets scheduled or 0 on error */ 1729static unsigned 1730generate_ack_frame (struct ietf_full_conn *conn, lsquic_time_t now) 1731{ 1732 struct lsquic_packet_out *packet_out; 1733 enum packnum_space pns; 1734 unsigned count; 1735 int s; 1736 1737 count = 0; 1738 for (pns = 0; pns < N_PNS; ++pns) 1739 if (conn->ifc_flags & (IFC_ACK_QUED_INIT << pns)) 1740 { 1741 packet_out = lsquic_send_ctl_new_packet_out(&conn->ifc_send_ctl, 1742 0, pns, CUR_NPATH(conn)); 1743 if (!packet_out) 1744 { 1745 ABORT_ERROR("cannot allocate packet: %s", strerror(errno)); 1746 return 0; 1747 } 1748 s = generate_ack_frame_for_pns(conn, packet_out, pns, now); 1749 lsquic_send_ctl_scheduled_one(&conn->ifc_send_ctl, packet_out); 1750 if (s != 0) 1751 return 0; 1752 ++count; 1753 } 1754 1755 return count; 1756} 1757 1758 1759static struct lsquic_packet_out * 1760get_writeable_packet_on_path (struct ietf_full_conn *conn, 1761 unsigned need_at_least, const struct network_path *path, 1762 int regen_match) 1763{ 1764 struct lsquic_packet_out *packet_out; 1765 int is_err; 1766 1767 packet_out = lsquic_send_ctl_get_writeable_packet(&conn->ifc_send_ctl, 1768 PNS_APP, need_at_least, path, regen_match, &is_err); 1769 if (!packet_out && is_err) 1770 ABORT_ERROR("cannot allocate packet: %s", strerror(errno)); 1771 return packet_out; 1772} 1773 1774 1775static struct lsquic_packet_out * 1776get_writeable_packet (struct ietf_full_conn *conn, unsigned need_at_least) 1777{ 1778 return get_writeable_packet_on_path(conn, need_at_least, 1779 CUR_NPATH(conn), 0); 1780} 1781 1782 1783static void 1784generate_max_data_frame (struct ietf_full_conn *conn) 1785{ 1786 const uint64_t offset = lsquic_cfcw_get_fc_recv_off(&conn->ifc_pub.cfcw); 1787 struct lsquic_packet_out *packet_out; 1788 unsigned need; 1789 int w; 1790 1791 need = conn->ifc_conn.cn_pf->pf_max_data_frame_size(offset); 1792 packet_out = get_writeable_packet(conn, need); 1793 if (!packet_out) 1794 return; 1795 w = conn->ifc_conn.cn_pf->pf_gen_max_data_frame( 1796 packet_out->po_data + packet_out->po_data_sz, 1797 lsquic_packet_out_avail(packet_out), offset); 1798 if (w < 0) 1799 { 1800 ABORT_ERROR("Generating MAX_DATA frame failed"); 1801 return; 1802 } 1803 LSQ_DEBUG("generated %d-byte MAX_DATA frame (offset: %"PRIu64")", w, offset); 1804 EV_LOG_CONN_EVENT(LSQUIC_LOG_CONN_ID, "generated MAX_DATA frame, offset=%" 1805 PRIu64, offset); 1806 if (0 != lsquic_packet_out_add_frame(packet_out, conn->ifc_pub.mm, 0, 1807 QUIC_FRAME_MAX_DATA, packet_out->po_data_sz, w)) 1808 { 1809 ABORT_ERROR("adding frame to packet failed: %d", errno); 1810 return; 1811 } 1812 lsquic_send_ctl_incr_pack_sz(&conn->ifc_send_ctl, packet_out, w); 1813 packet_out->po_frame_types |= QUIC_FTBIT_MAX_DATA; 1814 conn->ifc_send_flags &= ~SF_SEND_MAX_DATA; 1815 conn->ifc_last_max_data_off_sent = offset; 1816} 1817 1818 1819static int 1820can_issue_cids (const struct ietf_full_conn *conn) 1821{ 1822 int can; 1823 1824 can = ((1 << conn->ifc_conn.cn_n_cces) - 1 1825 != conn->ifc_conn.cn_cces_mask) 1826 && conn->ifc_active_cids_count < conn->ifc_active_cids_limit; 1827 LSQ_DEBUG("can issue CIDs: %d (n_cces %hhu; mask: 0x%hhX; " 1828 "active: %hhu; limit: %hhu)", 1829 can, conn->ifc_conn.cn_n_cces, conn->ifc_conn.cn_cces_mask, 1830 conn->ifc_active_cids_count, conn->ifc_active_cids_limit); 1831 return can; 1832} 1833 1834 1835static int 1836generate_new_cid_frame (struct ietf_full_conn *conn, lsquic_time_t now) 1837{ 1838 struct lsquic_packet_out *packet_out; 1839 struct conn_cid_elem *cce; 1840 size_t need; 1841 int w; 1842 unsigned char token_buf[IQUIC_SRESET_TOKEN_SZ]; 1843 1844 assert(conn->ifc_enpub->enp_settings.es_scid_len); 1845 1846 need = conn->ifc_conn.cn_pf->pf_new_connection_id_frame_size( 1847 conn->ifc_scid_seqno, conn->ifc_enpub->enp_settings.es_scid_len); 1848 packet_out = get_writeable_packet(conn, need); 1849 if (!packet_out) 1850 return -1; 1851 1852 if (!(cce = ietf_full_conn_add_scid(conn, conn->ifc_enpub, 0, now))) 1853 { 1854 ABORT_WARN("cannot add a new SCID"); 1855 return -1; 1856 } 1857 1858 lsquic_tg_generate_sreset(conn->ifc_enpub->enp_tokgen, &cce->cce_cid, 1859 token_buf); 1860 1861 if (0 != lsquic_engine_add_cid(conn->ifc_enpub, &conn->ifc_conn, 1862 cce - conn->ifc_cces)) 1863 { 1864 ABORT_WARN("cannot track new SCID"); 1865 return -1; 1866 } 1867 1868 w = conn->ifc_conn.cn_pf->pf_gen_new_connection_id_frame( 1869 packet_out->po_data + packet_out->po_data_sz, 1870 lsquic_packet_out_avail(packet_out), cce->cce_seqno, 1871 &cce->cce_cid, token_buf, sizeof(token_buf)); 1872 if (w < 0) 1873 { 1874 ABORT_ERROR("generating NEW_CONNECTION_ID frame failed: %d", errno); 1875 return -1; 1876 } 1877 LSQ_DEBUGC("generated %d-byte NEW_CONNECTION_ID frame (CID: %"CID_FMT")", 1878 w, CID_BITS(&cce->cce_cid)); 1879 EV_LOG_GENERATED_NEW_CONNECTION_ID_FRAME(LSQUIC_LOG_CONN_ID, 1880 conn->ifc_conn.cn_pf, packet_out->po_data + packet_out->po_data_sz, w); 1881 if (0 != lsquic_packet_out_add_frame(packet_out, conn->ifc_pub.mm, 0, 1882 QUIC_FRAME_NEW_CONNECTION_ID, packet_out->po_data_sz, w)) 1883 { 1884 ABORT_ERROR("adding frame to packet failed: %d", errno); 1885 return -1; 1886 } 1887 packet_out->po_frame_types |= QUIC_FTBIT_NEW_CONNECTION_ID; 1888 lsquic_send_ctl_incr_pack_sz(&conn->ifc_send_ctl, packet_out, w); 1889 1890 if (!can_issue_cids(conn)) 1891 { 1892 conn->ifc_send_flags &= ~SF_SEND_NEW_CID; 1893 LSQ_DEBUG("All %u SCID slots have been assigned", 1894 conn->ifc_conn.cn_n_cces); 1895 } 1896 1897 return 0; 1898} 1899 1900 1901static void 1902maybe_get_rate_available_scid_slot (struct ietf_full_conn *conn, 1903 lsquic_time_t now) 1904{ 1905 const struct lsquic_conn *const lconn = &conn->ifc_conn; 1906 const struct conn_cid_elem *cce; 1907 unsigned active_cid; 1908 lsquic_time_t total_elapsed, elapsed_thresh, period, wait_time; 1909 1910 if (!conn->ifc_enpub->enp_settings.es_scid_iss_rate) 1911 { 1912 conn->ifc_send_flags |= SF_SEND_NEW_CID; 1913 return; 1914 } 1915 1916 /* period: ns per cid */ 1917 period = (60 * 1000000) / conn->ifc_enpub->enp_settings.es_scid_iss_rate; 1918 active_cid = 0; 1919 total_elapsed = 0; 1920 for (cce = lconn->cn_cces; cce < END_OF_CCES(lconn); ++cce) 1921 { 1922 if ((cce->cce_flags & (CCE_SEQNO|CCE_PORT)) == CCE_SEQNO) 1923 { 1924 active_cid += 1; 1925 /* When server is promoted, the timestamp may be larger than the 1926 * first tick time. 1927 */ 1928 if (now > conn->ifc_scid_timestamp[cce - lconn->cn_cces]) 1929 total_elapsed += 1930 now - conn->ifc_scid_timestamp[cce - lconn->cn_cces]; 1931 } 1932 } 1933 elapsed_thresh = ((active_cid * (active_cid + 1)) / 2) * period; 1934 /* compare total elapsed ns to elapsed ns threshold */ 1935 if (total_elapsed < elapsed_thresh) 1936 { 1937 wait_time = (elapsed_thresh - total_elapsed) / active_cid; 1938 LSQ_DEBUG("cid_throt no SCID slots available (rate-limited), " 1939 "must wait %"PRIu64" ns", wait_time); 1940 lsquic_alarmset_set(&conn->ifc_alset, AL_CID_THROT, now + wait_time); 1941 conn->ifc_send_flags &= ~SF_SEND_NEW_CID; 1942 } 1943 else 1944 conn->ifc_send_flags |= SF_SEND_NEW_CID; 1945} 1946 1947 1948static void 1949generate_new_cid_frames (struct ietf_full_conn *conn, lsquic_time_t now) 1950{ 1951 int s; 1952 1953 do 1954 { 1955 s = generate_new_cid_frame(conn, now); 1956 if (s < 0) 1957 break; 1958 if (conn->ifc_send_flags & SF_SEND_NEW_CID) 1959 maybe_get_rate_available_scid_slot(conn, now); 1960 } 1961 while (conn->ifc_send_flags & SF_SEND_NEW_CID); 1962 LOG_SCIDS(conn); 1963} 1964 1965 1966static int 1967generate_retire_cid_frame (struct ietf_full_conn *conn) 1968{ 1969 struct lsquic_packet_out *packet_out; 1970 struct dcid_elem *dce; 1971 size_t need; 1972 int w; 1973 1974 dce = TAILQ_FIRST(&conn->ifc_to_retire); 1975 assert(dce); 1976 1977 need = conn->ifc_conn.cn_pf->pf_retire_cid_frame_size(dce->de_seqno); 1978 packet_out = get_writeable_packet(conn, need); 1979 if (!packet_out) 1980 return -1; 1981 1982 w = conn->ifc_conn.cn_pf->pf_gen_retire_cid_frame( 1983 packet_out->po_data + packet_out->po_data_sz, 1984 lsquic_packet_out_avail(packet_out), dce->de_seqno); 1985 if (w < 0) 1986 { 1987 ABORT_ERROR("generating RETIRE_CONNECTION_ID frame failed: %d", errno); 1988 return -1; 1989 } 1990 LSQ_DEBUG("generated %d-byte RETIRE_CONNECTION_ID frame (seqno: %u)", 1991 w, dce->de_seqno); 1992 EV_LOG_CONN_EVENT(LSQUIC_LOG_CONN_ID, "generated RETIRE_CONNECTION_ID " 1993 "frame, seqno=%u", dce->de_seqno); 1994 if (0 != lsquic_packet_out_add_frame(packet_out, conn->ifc_pub.mm, 0, 1995 QUIC_FRAME_RETIRE_CONNECTION_ID, packet_out->po_data_sz, w)) 1996 { 1997 ABORT_ERROR("adding frame to packet failed: %d", errno); 1998 return -1; 1999 } 2000 packet_out->po_frame_types |= QUIC_FTBIT_RETIRE_CONNECTION_ID; 2001 lsquic_send_ctl_incr_pack_sz(&conn->ifc_send_ctl, packet_out, w); 2002 2003 TAILQ_REMOVE(&conn->ifc_to_retire, dce, de_next_to_ret); 2004 lsquic_malo_put(dce); 2005 2006 if (TAILQ_EMPTY(&conn->ifc_to_retire)) 2007 conn->ifc_send_flags &= ~SF_SEND_RETIRE_CID; 2008 2009 return 0; 2010} 2011 2012 2013static void 2014generate_retire_cid_frames (struct ietf_full_conn *conn, lsquic_time_t now) 2015{ 2016 int s; 2017 2018 do 2019 s = generate_retire_cid_frame(conn); 2020 while (0 == s && (conn->ifc_send_flags & SF_SEND_RETIRE_CID)); 2021} 2022 2023 2024static void 2025generate_streams_blocked_frame (struct ietf_full_conn *conn, enum stream_dir sd) 2026{ 2027 struct lsquic_packet_out *packet_out; 2028 uint64_t limit; 2029 size_t need; 2030 int w; 2031 2032 limit = conn->ifc_send.streams_blocked[sd]; 2033 need = conn->ifc_conn.cn_pf->pf_streams_blocked_frame_size(limit); 2034 packet_out = get_writeable_packet(conn, need); 2035 if (!packet_out) 2036 return; 2037 2038 w = conn->ifc_conn.cn_pf->pf_gen_streams_blocked_frame( 2039 packet_out->po_data + packet_out->po_data_sz, 2040 lsquic_packet_out_avail(packet_out), sd == SD_UNI, limit); 2041 if (w < 0) 2042 { 2043 ABORT_ERROR("generating STREAMS_BLOCKED frame failed: %d", errno); 2044 return; 2045 } 2046 LSQ_DEBUG("generated %d-byte STREAMS_BLOCKED frame (uni: %d, " 2047 "limit: %"PRIu64")", w, sd == SD_UNI, limit); 2048 EV_LOG_CONN_EVENT(LSQUIC_LOG_CONN_ID, "generated %d-byte STREAMS_BLOCKED " 2049 "frame (uni: %d, limit: %"PRIu64")", w, sd == SD_UNI, limit); 2050 if (0 != lsquic_packet_out_add_frame(packet_out, conn->ifc_pub.mm, 0, 2051 QUIC_FRAME_STREAMS_BLOCKED, packet_out->po_data_sz, w)) 2052 { 2053 ABORT_ERROR("adding frame to packet failed: %d", errno); 2054 return; 2055 } 2056 packet_out->po_frame_types |= QUIC_FTBIT_STREAM_BLOCKED; 2057 lsquic_send_ctl_incr_pack_sz(&conn->ifc_send_ctl, packet_out, w); 2058 conn->ifc_send_flags &= ~(SF_SEND_STREAMS_BLOCKED << sd); 2059} 2060 2061 2062static void 2063generate_streams_blocked_uni_frame (struct ietf_full_conn *conn, 2064 lsquic_time_t now) 2065{ 2066 generate_streams_blocked_frame(conn, SD_UNI); 2067} 2068 2069 2070static void 2071generate_streams_blocked_bidi_frame (struct ietf_full_conn *conn, 2072 lsquic_time_t now) 2073{ 2074 generate_streams_blocked_frame(conn, SD_BIDI); 2075} 2076 2077 2078static void 2079generate_max_streams_frame (struct ietf_full_conn *conn, enum stream_dir sd) 2080{ 2081 struct lsquic_packet_out *packet_out; 2082 enum stream_id_type sit; 2083 uint64_t limit; 2084 size_t need; 2085 int w; 2086 2087 limit = conn->ifc_closed_peer_streams[sd] + conn->ifc_max_streams_in[sd]; 2088 need = conn->ifc_conn.cn_pf->pf_max_streams_frame_size(limit); 2089 packet_out = get_writeable_packet(conn, need); 2090 if (!packet_out) 2091 return; 2092 2093 w = conn->ifc_conn.cn_pf->pf_gen_max_streams_frame( 2094 packet_out->po_data + packet_out->po_data_sz, 2095 lsquic_packet_out_avail(packet_out), sd, limit); 2096 if (w < 0) 2097 { 2098 ABORT_ERROR("generating MAX_STREAMS frame failed: %d", errno); 2099 return; 2100 } 2101 LSQ_DEBUG("generated %d-byte MAX_STREAMS frame (uni: %d, " 2102 "limit: %"PRIu64")", w, sd == SD_UNI, limit); 2103 EV_LOG_CONN_EVENT(LSQUIC_LOG_CONN_ID, "generated %d-byte MAX_STREAMS " 2104 "frame (uni: %d, limit: %"PRIu64")", w, sd == SD_UNI, limit); 2105 if (0 != lsquic_packet_out_add_frame(packet_out, conn->ifc_pub.mm, 0, 2106 QUIC_FRAME_MAX_STREAMS, packet_out->po_data_sz, w)) 2107 { 2108 ABORT_ERROR("adding frame to packet failed: %d", errno); 2109 return; 2110 } 2111 packet_out->po_frame_types |= QUIC_FTBIT_MAX_STREAMS; 2112 lsquic_send_ctl_incr_pack_sz(&conn->ifc_send_ctl, packet_out, w); 2113 conn->ifc_send_flags &= ~(SF_SEND_MAX_STREAMS << sd); 2114 2115 sit = gen_sit(!(conn->ifc_flags & IFC_SERVER), sd); 2116 LSQ_DEBUG("max_allowed_stream_id[ %u ] goes from %"PRIu64" to %"PRIu64, 2117 sit, conn->ifc_max_allowed_stream_id[ sit ], limit << SIT_SHIFT); 2118 conn->ifc_max_allowed_stream_id[ sit ] = limit << SIT_SHIFT; 2119} 2120 2121 2122static void 2123generate_max_streams_uni_frame (struct ietf_full_conn *conn, lsquic_time_t now) 2124{ 2125 generate_max_streams_frame(conn, SD_UNI); 2126} 2127 2128 2129static void 2130generate_max_streams_bidi_frame (struct ietf_full_conn *conn, lsquic_time_t now) 2131{ 2132 generate_max_streams_frame(conn, SD_BIDI); 2133} 2134 2135 2136/* Return true if generated, false otherwise */ 2137static int 2138generate_blocked_frame (struct ietf_full_conn *conn) 2139{ 2140 const uint64_t offset = conn->ifc_pub.conn_cap.cc_blocked; 2141 struct lsquic_packet_out *packet_out; 2142 size_t need; 2143 int w; 2144 2145 need = conn->ifc_conn.cn_pf->pf_blocked_frame_size(offset); 2146 packet_out = get_writeable_packet(conn, need); 2147 if (!packet_out) 2148 return 0; 2149 2150 w = conn->ifc_conn.cn_pf->pf_gen_blocked_frame( 2151 packet_out->po_data + packet_out->po_data_sz, 2152 lsquic_packet_out_avail(packet_out), offset); 2153 if (w < 0) 2154 { 2155 ABORT_ERROR("generating BLOCKED frame failed: %d", errno); 2156 return 0; 2157 } 2158 LSQ_DEBUG("generated %d-byte BLOCKED frame (offset: %"PRIu64")", w, offset); 2159 EV_LOG_CONN_EVENT(LSQUIC_LOG_CONN_ID, "generated BLOCKED frame, offset=%" 2160 PRIu64, offset); 2161 if (0 != lsquic_packet_out_add_frame(packet_out, conn->ifc_pub.mm, 0, 2162 QUIC_FRAME_BLOCKED, packet_out->po_data_sz, w)) 2163 { 2164 ABORT_ERROR("adding frame to packet failed: %d", errno); 2165 return 0; 2166 } 2167 packet_out->po_frame_types |= QUIC_FTBIT_BLOCKED; 2168 lsquic_send_ctl_incr_pack_sz(&conn->ifc_send_ctl, packet_out, w); 2169 2170 return 1; 2171} 2172 2173 2174/* Return true if generated, false otherwise */ 2175static int 2176generate_max_stream_data_frame (struct ietf_full_conn *conn, 2177 struct lsquic_stream *stream) 2178{ 2179 struct lsquic_packet_out *packet_out; 2180 unsigned need; 2181 uint64_t off; 2182 int sz; 2183 2184 off = lsquic_stream_fc_recv_off_const(stream); 2185 need = conn->ifc_conn.cn_pf->pf_max_stream_data_frame_size(stream->id, off); 2186 packet_out = get_writeable_packet(conn, need); 2187 if (!packet_out) 2188 return 0; 2189 sz = conn->ifc_conn.cn_pf->pf_gen_max_stream_data_frame( 2190 packet_out->po_data + packet_out->po_data_sz, 2191 lsquic_packet_out_avail(packet_out), stream->id, off); 2192 if (sz < 0) 2193 { 2194 ABORT_ERROR("Generating MAX_STREAM_DATA frame failed"); 2195 return 0; 2196 } 2197 EV_LOG_CONN_EVENT(LSQUIC_LOG_CONN_ID, "generated %d-byte MAX_STREAM_DATA " 2198 "frame; stream_id: %"PRIu64"; offset: %"PRIu64, sz, stream->id, off); 2199 if (0 != lsquic_packet_out_add_frame(packet_out, conn->ifc_pub.mm, 0, 2200 QUIC_FRAME_MAX_STREAM_DATA, packet_out->po_data_sz, sz)) 2201 { 2202 ABORT_ERROR("adding frame to packet failed: %d", errno); 2203 return 0; 2204 } 2205 lsquic_send_ctl_incr_pack_sz(&conn->ifc_send_ctl, packet_out, sz); 2206 packet_out->po_frame_types |= 1 << QUIC_FRAME_MAX_STREAM_DATA; 2207 lsquic_stream_max_stream_data_sent(stream); 2208 return 1; 2209} 2210 2211 2212/* Return true if generated, false otherwise */ 2213static int 2214generate_stream_blocked_frame (struct ietf_full_conn *conn, 2215 struct lsquic_stream *stream) 2216{ 2217 struct lsquic_packet_out *packet_out; 2218 unsigned need; 2219 uint64_t off; 2220 int sz; 2221 2222 off = lsquic_stream_combined_send_off(stream); 2223 need = conn->ifc_conn.cn_pf->pf_stream_blocked_frame_size(stream->id, off); 2224 packet_out = get_writeable_packet(conn, need); 2225 if (!packet_out) 2226 return 0; 2227 sz = conn->ifc_conn.cn_pf->pf_gen_stream_blocked_frame( 2228 packet_out->po_data + packet_out->po_data_sz, 2229 lsquic_packet_out_avail(packet_out), stream->id, off); 2230 if (sz < 0) 2231 { 2232 ABORT_ERROR("Generating STREAM_BLOCKED frame failed"); 2233 return 0; 2234 } 2235 EV_LOG_CONN_EVENT(LSQUIC_LOG_CONN_ID, "generated %d-byte STREAM_BLOCKED " 2236 "frame; stream_id: %"PRIu64"; offset: %"PRIu64, sz, stream->id, off); 2237 if (0 != lsquic_packet_out_add_frame(packet_out, conn->ifc_pub.mm, 0, 2238 QUIC_FRAME_STREAM_BLOCKED, packet_out->po_data_sz, sz)) 2239 { 2240 ABORT_ERROR("adding frame to packet failed: %d", errno); 2241 return 0; 2242 } 2243 lsquic_send_ctl_incr_pack_sz(&conn->ifc_send_ctl, packet_out, sz); 2244 packet_out->po_frame_types |= 1 << QUIC_FRAME_STREAM_BLOCKED; 2245 lsquic_stream_blocked_frame_sent(stream); 2246 return 1; 2247} 2248 2249 2250static int 2251generate_stop_sending_frame (struct ietf_full_conn *conn, 2252 lsquic_stream_id_t stream_id, enum http_error_code error_code) 2253{ 2254 struct lsquic_packet_out *packet_out; 2255 size_t need; 2256 int w; 2257 2258 need = conn->ifc_conn.cn_pf->pf_stop_sending_frame_size(stream_id, 2259 error_code); 2260 packet_out = get_writeable_packet(conn, need); 2261 if (!packet_out) 2262 return -1; 2263 2264 w = conn->ifc_conn.cn_pf->pf_gen_stop_sending_frame( 2265 packet_out->po_data + packet_out->po_data_sz, 2266 lsquic_packet_out_avail(packet_out), 2267 stream_id, error_code); 2268 if (w < 0) 2269 { 2270 ABORT_ERROR("generating STOP_SENDING frame failed: %d", errno); 2271 return -1; 2272 } 2273 LSQ_DEBUG("generated %d-byte STOP_SENDING frame (stream id: %"PRIu64", " 2274 "error code: %u)", w, stream_id, error_code); 2275 EV_LOG_GENERATED_STOP_SENDING_FRAME(LSQUIC_LOG_CONN_ID, stream_id, 2276 error_code); 2277 if (0 != lsquic_packet_out_add_frame(packet_out, conn->ifc_pub.mm, 0, 2278 QUIC_FRAME_STOP_SENDING, packet_out->po_data_sz, w)) 2279 { 2280 ABORT_ERROR("adding frame to packet failed: %d", errno); 2281 return -1; 2282 } 2283 packet_out->po_frame_types |= QUIC_FTBIT_STOP_SENDING; 2284 lsquic_send_ctl_incr_pack_sz(&conn->ifc_send_ctl, packet_out, w); 2285 2286 return 0; 2287} 2288 2289 2290static void 2291generate_stop_sending_frames (struct ietf_full_conn *conn, lsquic_time_t now) 2292{ 2293 struct stream_id_to_ss *sits; 2294 2295 assert(conn->ifc_send_flags & SF_SEND_STOP_SENDING); 2296 2297 while (!STAILQ_EMPTY(&conn->ifc_stream_ids_to_ss)) 2298 { 2299 sits = STAILQ_FIRST(&conn->ifc_stream_ids_to_ss); 2300 if (0 == generate_stop_sending_frame(conn, sits->sits_stream_id, 2301 sits->sits_error_code)) 2302 { 2303 STAILQ_REMOVE_HEAD(&conn->ifc_stream_ids_to_ss, sits_next); 2304 free(sits); 2305 } 2306 else 2307 break; 2308 } 2309 2310 if (STAILQ_EMPTY(&conn->ifc_stream_ids_to_ss)) 2311 conn->ifc_send_flags &= ~SF_SEND_STOP_SENDING; 2312} 2313 2314 2315/* Return true if generated, false otherwise */ 2316static int 2317generate_rst_stream_frame (struct ietf_full_conn *conn, 2318 struct lsquic_stream *stream) 2319{ 2320 lsquic_packet_out_t *packet_out; 2321 unsigned need; 2322 int sz; 2323 2324 need = conn->ifc_conn.cn_pf->pf_rst_frame_size(stream->id, 2325 stream->tosend_off, stream->error_code); 2326 packet_out = get_writeable_packet(conn, need); 2327 if (!packet_out) 2328 { 2329 LSQ_DEBUG("cannot get writeable packet for RESET_STREAM frame"); 2330 return 0; 2331 } 2332 sz = conn->ifc_conn.cn_pf->pf_gen_rst_frame( 2333 packet_out->po_data + packet_out->po_data_sz, 2334 lsquic_packet_out_avail(packet_out), stream->id, 2335 stream->tosend_off, stream->error_code); 2336 if (sz < 0) 2337 { 2338 ABORT_ERROR("gen_rst_frame failed"); 2339 return 0; 2340 } 2341 if (0 != lsquic_packet_out_add_stream(packet_out, conn->ifc_pub.mm, stream, 2342 QUIC_FRAME_RST_STREAM, packet_out->po_data_sz, sz)) 2343 { 2344 ABORT_ERROR("adding frame to packet failed: %d", errno); 2345 return 0; 2346 } 2347 lsquic_send_ctl_incr_pack_sz(&conn->ifc_send_ctl, packet_out, sz); 2348 packet_out->po_frame_types |= 1 << QUIC_FRAME_RST_STREAM; 2349 lsquic_stream_rst_frame_sent(stream); 2350 LSQ_DEBUG("wrote RST: stream %"PRIu64"; offset 0x%"PRIX64"; error code " 2351 "%"PRIu64, stream->id, stream->tosend_off, stream->error_code); 2352 2353 return 1; 2354} 2355 2356 2357static int 2358is_our_stream (const struct ietf_full_conn *conn, 2359 const struct lsquic_stream *stream) 2360{ 2361 const unsigned is_server = !!(conn->ifc_flags & IFC_SERVER); 2362 return (1 & stream->id) == is_server; 2363} 2364 2365 2366static int 2367is_peer_initiated (const struct ietf_full_conn *conn, 2368 lsquic_stream_id_t stream_id) 2369{ 2370 const unsigned is_server = !!(conn->ifc_flags & IFC_SERVER); 2371 return (1 & stream_id) != is_server; 2372} 2373 2374 2375static void 2376sched_max_bidi_streams (void *conn_p) 2377{ 2378 struct ietf_full_conn *conn = conn_p; 2379 2380 conn->ifc_send_flags |= SF_SEND_MAX_STREAMS_BIDI; 2381 conn->ifc_delayed_send &= ~SF_SEND_MAX_STREAMS_BIDI; 2382 LSQ_DEBUG("schedule MAX_STREAMS frame for bidirectional streams (was " 2383 "delayed)"); 2384} 2385 2386 2387/* Do not allow peer to open more streams while QPACK decoder stream has 2388 * unsent data. 2389 */ 2390static int 2391can_give_peer_streams_credit (struct ietf_full_conn *conn, enum stream_dir sd) 2392{ 2393 /* This logic only applies to HTTP servers. */ 2394 if ((conn->ifc_flags & (IFC_SERVER|IFC_HTTP)) != (IFC_SERVER|IFC_HTTP)) 2395 return 1; 2396 /* HTTP client does not open unidirectional streams (other than the 2397 * standard three), not applicable. 2398 */ 2399 if (SD_UNI == sd) 2400 return 1; 2401 if (conn->ifc_delayed_send & (SF_SEND_MAX_STREAMS << sd)) 2402 return 0; 2403 if (lsquic_qdh_arm_if_unsent(&conn->ifc_qdh, sched_max_bidi_streams, conn)) 2404 { 2405 LSQ_DEBUG("delay sending more streams credit to peer until QPACK " 2406 "decoder sends unsent data"); 2407 conn->ifc_delayed_send |= SF_SEND_MAX_STREAMS << sd; 2408 return 0; 2409 } 2410 else 2411 return 1; 2412} 2413 2414 2415/* Because stream IDs are distributed unevenly, it is more efficient to 2416 * maintain four sets of closed stream IDs. 2417 */ 2418static void 2419conn_mark_stream_closed (struct ietf_full_conn *conn, 2420 lsquic_stream_id_t stream_id) 2421{ 2422 lsquic_stream_id_t shifted_id; 2423 uint64_t max_allowed, thresh; 2424 enum stream_id_type idx; 2425 enum stream_dir sd; 2426 2427 idx = stream_id & SIT_MASK; 2428 shifted_id = stream_id >> SIT_SHIFT; 2429 2430 if (is_peer_initiated(conn, stream_id) 2431 && !lsquic_set64_has(&conn->ifc_closed_stream_ids[idx], shifted_id)) 2432 { 2433 sd = (stream_id >> SD_SHIFT) & 1; 2434 ++conn->ifc_closed_peer_streams[sd]; 2435 if (0 == (conn->ifc_send_flags & (SF_SEND_MAX_STREAMS << sd))) 2436 { 2437 max_allowed = conn->ifc_max_allowed_stream_id[idx] >> SIT_SHIFT; 2438 thresh = conn->ifc_closed_peer_streams[sd] 2439 + conn->ifc_max_streams_in[sd] / 2; 2440 if (thresh >= max_allowed && can_give_peer_streams_credit(conn, sd)) 2441 { 2442 LSQ_DEBUG("closed incoming %sdirectional streams reached " 2443 "%"PRIu64", scheduled MAX_STREAMS frame", 2444 sd == SD_UNI ? "uni" : "bi", 2445 conn->ifc_closed_peer_streams[sd]); 2446 conn->ifc_send_flags |= SF_SEND_MAX_STREAMS << sd; 2447 } 2448 } 2449 } 2450 2451 if (0 == lsquic_set64_add(&conn->ifc_closed_stream_ids[idx], shifted_id)) 2452 LSQ_DEBUG("marked stream %"PRIu64" as closed", stream_id); 2453 else 2454 ABORT_ERROR("could not add element to set: %s", strerror(errno)); 2455} 2456 2457 2458static int 2459conn_is_stream_closed (struct ietf_full_conn *conn, 2460 lsquic_stream_id_t stream_id) 2461{ 2462 enum stream_id_type idx = stream_id & SIT_MASK; 2463 stream_id >>= SIT_SHIFT; 2464 return lsquic_set64_has(&conn->ifc_closed_stream_ids[idx], stream_id); 2465} 2466 2467 2468static int 2469either_side_going_away (const struct ietf_full_conn *conn) 2470{ 2471 return (conn->ifc_flags & IFC_GOING_AWAY) 2472 || (conn->ifc_conn.cn_flags & LSCONN_PEER_GOING_AWAY); 2473} 2474 2475 2476static void 2477maybe_create_delayed_streams (struct ietf_full_conn *conn) 2478{ 2479 unsigned avail, delayed; 2480 2481 delayed = conn->ifc_n_delayed_streams; 2482 if (0 == delayed) 2483 return; 2484 2485 avail = ietf_full_conn_ci_n_avail_streams(&conn->ifc_conn); 2486 while (avail > 0) 2487 { 2488 if (0 == create_bidi_stream_out(conn)) 2489 { 2490 --avail; 2491 --conn->ifc_n_delayed_streams; 2492 if (0 == conn->ifc_n_delayed_streams) 2493 break; 2494 } 2495 else 2496 { 2497 LSQ_INFO("cannot create BIDI stream"); 2498 break; 2499 } 2500 } 2501 2502 LSQ_DEBUG("created %u delayed stream%.*s", 2503 delayed - conn->ifc_n_delayed_streams, 2504 delayed - conn->ifc_n_delayed_streams != 1, "s"); 2505} 2506 2507 2508static int 2509have_bidi_streams (const struct ietf_full_conn *conn) 2510{ 2511 const struct lsquic_stream *stream; 2512 struct lsquic_hash_elem *el; 2513 2514 for (el = lsquic_hash_first(conn->ifc_pub.all_streams); el; 2515 el = lsquic_hash_next(conn->ifc_pub.all_streams)) 2516 { 2517 stream = lsquic_hashelem_getdata(el); 2518 if (SIT_BIDI_CLIENT == (stream->id & SIT_MASK)) 2519 return 1; 2520 } 2521 2522 return 0; 2523} 2524 2525 2526static void 2527maybe_close_conn (struct ietf_full_conn *conn) 2528{ 2529 if ((conn->ifc_flags & (IFC_CLOSING|IFC_GOING_AWAY|IFC_SERVER)) 2530 == (IFC_GOING_AWAY|IFC_SERVER) 2531 && !have_bidi_streams(conn)) 2532 { 2533 conn->ifc_flags |= IFC_CLOSING|IFC_GOAWAY_CLOSE; 2534 conn->ifc_send_flags |= SF_SEND_CONN_CLOSE; 2535 LSQ_DEBUG("closing connection: GOAWAY sent and no responses remain"); 2536 } 2537} 2538 2539 2540static void 2541service_streams (struct ietf_full_conn *conn) 2542{ 2543 struct lsquic_hash_elem *el; 2544 lsquic_stream_t *stream, *next; 2545 2546 for (stream = TAILQ_FIRST(&conn->ifc_pub.service_streams); stream; 2547 stream = next) 2548 { 2549 next = TAILQ_NEXT(stream, next_service_stream); 2550 if (stream->sm_qflags & SMQF_ABORT_CONN) 2551 /* No need to unset this flag or remove this stream: the connection 2552 * is about to be aborted. 2553 */ 2554 ABORT_ERROR("aborted due to error in stream %"PRIu64, stream->id); 2555 if (stream->sm_qflags & SMQF_CALL_ONCLOSE) 2556 lsquic_stream_call_on_close(stream); 2557 if (stream->sm_qflags & SMQF_FREE_STREAM) 2558 { 2559 TAILQ_REMOVE(&conn->ifc_pub.service_streams, stream, 2560 next_service_stream); 2561 if (!(stream->sm_bflags & SMBF_CRYPTO)) 2562 { 2563 el = lsquic_hash_find(conn->ifc_pub.all_streams, 2564 &stream->id, sizeof(stream->id)); 2565 if (el) 2566 lsquic_hash_erase(conn->ifc_pub.all_streams, el); 2567 conn_mark_stream_closed(conn, stream->id); 2568 } 2569 else 2570 assert(!(stream->sm_hash_el.qhe_flags & QHE_HASHED)); 2571 lsquic_stream_destroy(stream); 2572 } 2573 } 2574 2575 /* TODO: this chunk of code, too, should probably live elsewhere */ 2576 if (either_side_going_away(conn)) 2577 { 2578 while (conn->ifc_n_delayed_streams) 2579 { 2580 --conn->ifc_n_delayed_streams; 2581 LSQ_DEBUG("goaway mode: delayed stream results in null ctor"); 2582 (void) conn->ifc_enpub->enp_stream_if->on_new_stream( 2583 conn->ifc_enpub->enp_stream_if_ctx, NULL); 2584 } 2585 maybe_close_conn(conn); 2586 } 2587 else 2588 maybe_create_delayed_streams(conn); 2589} 2590 2591 2592static int 2593process_stream_ready_to_send (struct ietf_full_conn *conn, 2594 struct lsquic_stream *stream) 2595{ 2596 int r = 1; 2597 if (stream->sm_qflags & SMQF_SEND_MAX_STREAM_DATA) 2598 r &= generate_max_stream_data_frame(conn, stream); 2599 if (stream->sm_qflags & SMQF_SEND_BLOCKED) 2600 r &= generate_stream_blocked_frame(conn, stream); 2601 if (stream->sm_qflags & SMQF_SEND_RST) 2602 r &= generate_rst_stream_frame(conn, stream); 2603 return r; 2604} 2605 2606 2607static void 2608process_streams_ready_to_send (struct ietf_full_conn *conn) 2609{ 2610 struct lsquic_stream *stream; 2611 struct stream_prio_iter spi; 2612 2613 assert(!TAILQ_EMPTY(&conn->ifc_pub.sending_streams)); 2614 2615 lsquic_spi_init(&spi, TAILQ_FIRST(&conn->ifc_pub.sending_streams), 2616 TAILQ_LAST(&conn->ifc_pub.sending_streams, lsquic_streams_tailq), 2617 (uintptr_t) &TAILQ_NEXT((lsquic_stream_t *) NULL, next_send_stream), 2618 SMQF_SENDING_FLAGS, &conn->ifc_conn, "send", NULL, NULL); 2619 2620 for (stream = lsquic_spi_first(&spi); stream; 2621 stream = lsquic_spi_next(&spi)) 2622 if (!process_stream_ready_to_send(conn, stream)) 2623 break; 2624} 2625 2626 2627static void 2628ietf_full_conn_ci_write_ack (struct lsquic_conn *lconn, 2629 struct lsquic_packet_out *packet_out) 2630{ 2631 struct ietf_full_conn *conn = (struct ietf_full_conn *) lconn; 2632 generate_ack_frame_for_pns(conn, packet_out, PNS_APP, lsquic_time_now()); 2633} 2634 2635 2636static void 2637ietf_full_conn_ci_client_call_on_new (struct lsquic_conn *lconn) 2638{ 2639 struct ietf_full_conn *conn = (struct ietf_full_conn *) lconn; 2640 assert(conn->ifc_flags & IFC_CREATED_OK); 2641 conn->ifc_conn_ctx = conn->ifc_enpub->enp_stream_if->on_new_conn( 2642 conn->ifc_enpub->enp_stream_if_ctx, lconn); 2643} 2644 2645 2646static void 2647ietf_full_conn_ci_close (struct lsquic_conn *lconn) 2648{ 2649 struct ietf_full_conn *conn = (struct ietf_full_conn *) lconn; 2650 struct lsquic_stream *stream; 2651 struct lsquic_hash_elem *el; 2652 enum stream_dir sd; 2653 2654 if (!(conn->ifc_flags & IFC_CLOSING)) 2655 { 2656 for (el = lsquic_hash_first(conn->ifc_pub.all_streams); el; 2657 el = lsquic_hash_next(conn->ifc_pub.all_streams)) 2658 { 2659 stream = lsquic_hashelem_getdata(el); 2660 sd = (stream->id >> SD_SHIFT) & 1; 2661 if (SD_BIDI == sd) 2662 lsquic_stream_shutdown_internal(stream); 2663 } 2664 conn->ifc_flags |= IFC_CLOSING; 2665 conn->ifc_send_flags |= SF_SEND_CONN_CLOSE; 2666 lsquic_engine_add_conn_to_tickable(conn->ifc_enpub, lconn); 2667 } 2668} 2669 2670 2671static void 2672ietf_full_conn_ci_abort (struct lsquic_conn *lconn) 2673{ 2674 struct ietf_full_conn *conn = (struct ietf_full_conn *) lconn; 2675 LSQ_INFO("User aborted connection"); 2676 conn->ifc_flags |= IFC_ABORTED; 2677} 2678 2679 2680static void 2681retire_dcid (struct ietf_full_conn *conn, struct dcid_elem **dce) 2682{ 2683 if ((*dce)->de_hash_el.qhe_flags & QHE_HASHED) 2684 lsquic_hash_erase(conn->ifc_enpub->enp_srst_hash, &(*dce)->de_hash_el); 2685 TAILQ_INSERT_TAIL(&conn->ifc_to_retire, *dce, de_next_to_ret); 2686 LSQ_DEBUG("prepare to retire DCID seqno %"PRIu32"", (*dce)->de_seqno); 2687 *dce = NULL; 2688 conn->ifc_send_flags |= SF_SEND_RETIRE_CID; 2689} 2690 2691 2692static void 2693retire_seqno (struct ietf_full_conn *conn, unsigned seqno) 2694{ 2695 struct dcid_elem *dce; 2696 2697 dce = lsquic_malo_get(conn->ifc_pub.mm->malo.dcid_elem); 2698 if (dce) 2699 { 2700 memset(dce, 0, sizeof(*dce)); 2701 dce->de_seqno = seqno; 2702 TAILQ_INSERT_TAIL(&conn->ifc_to_retire, dce, de_next_to_ret); 2703 LSQ_DEBUG("prepare to retire DCID seqno %"PRIu32, seqno); 2704 conn->ifc_send_flags |= SF_SEND_RETIRE_CID; 2705 } 2706 else 2707 LSQ_INFO("%s: cannot allocate dce", __func__); 2708} 2709 2710 2711/* This function exists for testing purposes. 2712 * 2713 * The user can switch DCIDs and request that the old DCID is retired. 2714 * 2715 * If the user calls this function frequently in a short amount of time, 2716 * this should trigger the CID issuance throttling. 2717 */ 2718static void 2719ietf_full_conn_ci_retire_cid (struct lsquic_conn *lconn) 2720{ 2721 struct ietf_full_conn *conn = (struct ietf_full_conn *) lconn; 2722 struct dcid_elem **el, **dces[2]; 2723 int eq; 2724 /* 2725 * Find two DCIDs: 2726 * 1. the current DCID that will be retire_cid 2727 * 2. an available DCID that will be switched 2728 * Continue searching until there are no more DCIDs 2729 * or when both DCIDs are found. 2730 */ 2731 dces[0] = NULL; // future DCID (does not match current DCID) 2732 dces[1] = NULL; // current DCID (does match current DCID) 2733 for (el = conn->ifc_dces; el < DCES_END(conn) && !(dces[0] && dces[1]); ++el) 2734 if (*el) 2735 { 2736 eq = LSQUIC_CIDS_EQ(&(*el)->de_cid, CUR_DCID(conn)); 2737 if (!dces[eq]) 2738 dces[eq] = el; 2739 } 2740 if (!dces[1]) 2741 { 2742 ABORT_WARN("%s: cannot find own DCID", __func__); 2743 return; 2744 } 2745 if (!dces[0]) 2746 { 2747 LSQ_INFO("No DCID available: cannot switch"); 2748 /* TODO: implemened delayed switch */ 2749 // conn->ifc_flags |= IFC_SWITCH_DCID; 2750 return; 2751 } 2752 /* 2753 * Switch DCID. 2754 */ 2755 *CUR_DCID(conn) = (*dces[0])->de_cid; 2756 LSQ_INFOC("switched DCID to %"CID_FMT, CID_BITS(CUR_DCID(conn))); 2757 /* 2758 * Mark old DCID for retirement. 2759 */ 2760 retire_dcid(conn, dces[1]); 2761} 2762 2763 2764static void 2765drop_crypto_streams (struct ietf_full_conn *conn) 2766{ 2767 struct lsquic_stream **streamp; 2768 unsigned count; 2769 2770 if ((conn->ifc_flags & (IFC_SERVER|IFC_PROC_CRYPTO)) != IFC_PROC_CRYPTO) 2771 return; 2772 2773 conn->ifc_flags &= ~IFC_PROC_CRYPTO; 2774 2775 count = 0; 2776 for (streamp = conn->ifc_u.cli.crypto_streams; streamp < 2777 conn->ifc_u.cli.crypto_streams + sizeof(conn->ifc_u.cli.crypto_streams) 2778 / sizeof(conn->ifc_u.cli.crypto_streams[0]); ++streamp) 2779 if (*streamp) 2780 { 2781 lsquic_stream_force_finish(*streamp); 2782 *streamp = NULL; 2783 ++count; 2784 } 2785 2786 LSQ_DEBUG("dropped %u crypto stream%.*s", count, count != 1, "s"); 2787} 2788 2789 2790static void 2791ietf_full_conn_ci_destroy (struct lsquic_conn *lconn) 2792{ 2793 struct ietf_full_conn *conn = (struct ietf_full_conn *) lconn; 2794 struct lsquic_stream **streamp, *stream; 2795 struct stream_id_to_ss *sits; 2796 struct dcid_elem **dcep, *dce; 2797 struct lsquic_hash_elem *el; 2798 unsigned i; 2799 2800 if (!(conn->ifc_flags & IFC_SERVER)) 2801 { 2802 for (streamp = conn->ifc_u.cli.crypto_streams; streamp < 2803 conn->ifc_u.cli.crypto_streams 2804 + sizeof(conn->ifc_u.cli.crypto_streams) 2805 / sizeof(conn->ifc_u.cli.crypto_streams[0]); ++streamp) 2806 if (*streamp) 2807 lsquic_stream_destroy(*streamp); 2808 } 2809 while ((el = lsquic_hash_first(conn->ifc_pub.all_streams))) 2810 { 2811 stream = lsquic_hashelem_getdata(el); 2812 lsquic_hash_erase(conn->ifc_pub.all_streams, el); 2813 lsquic_stream_destroy(stream); 2814 } 2815 if (conn->ifc_flags & IFC_HTTP) 2816 { 2817 lsquic_qdh_cleanup(&conn->ifc_qdh); 2818 lsquic_qeh_cleanup(&conn->ifc_qeh); 2819 } 2820 for (dcep = conn->ifc_dces; dcep < conn->ifc_dces + sizeof(conn->ifc_dces) 2821 / sizeof(conn->ifc_dces[0]); ++dcep) 2822 if (*dcep) 2823 { 2824 if ((*dcep)->de_hash_el.qhe_flags & QHE_HASHED) 2825 lsquic_hash_erase(conn->ifc_enpub->enp_srst_hash, 2826 &(*dcep)->de_hash_el); 2827 lsquic_malo_put(*dcep); 2828 } 2829 while ((dce = TAILQ_FIRST(&conn->ifc_to_retire))) 2830 { 2831 TAILQ_REMOVE(&conn->ifc_to_retire, dce, de_next_to_ret); 2832 lsquic_malo_put(dce); 2833 } 2834 lsquic_send_ctl_cleanup(&conn->ifc_send_ctl); 2835 for (i = 0; i < N_PNS; ++i) 2836 lsquic_rechist_cleanup(&conn->ifc_rechist[i]); 2837 lsquic_malo_destroy(conn->ifc_pub.packet_out_malo); 2838 if (conn->ifc_flags & IFC_CREATED_OK) 2839 conn->ifc_enpub->enp_stream_if->on_conn_closed(&conn->ifc_conn); 2840 if (conn->ifc_conn.cn_enc_session) 2841 conn->ifc_conn.cn_esf.i->esfi_destroy(conn->ifc_conn.cn_enc_session); 2842 while (!STAILQ_EMPTY(&conn->ifc_stream_ids_to_ss)) 2843 { 2844 sits = STAILQ_FIRST(&conn->ifc_stream_ids_to_ss); 2845 STAILQ_REMOVE_HEAD(&conn->ifc_stream_ids_to_ss, sits_next); 2846 free(sits); 2847 } 2848 if (conn->ifc_flags & IFC_SERVER) 2849 { 2850 if (conn->ifc_pub.u.ietf.promises) 2851 lsquic_hash_destroy(conn->ifc_pub.u.ietf.promises); 2852 } 2853 lsquic_hash_destroy(conn->ifc_pub.all_streams); 2854 EV_LOG_CONN_EVENT(LSQUIC_LOG_CONN_ID, "full connection destroyed"); 2855 free(conn->ifc_errmsg); 2856 free(conn); 2857} 2858 2859 2860static lsquic_time_t 2861ietf_full_conn_ci_drain_time (const struct lsquic_conn *lconn) 2862{ 2863 struct ietf_full_conn *conn = (struct ietf_full_conn *) lconn; 2864 lsquic_time_t drain_time, pto, srtt, var; 2865 2866 /* Only applicable to a server whose connection was not timed out */ 2867 if ((conn->ifc_flags & (IFC_SERVER|IFC_TIMED_OUT)) != IFC_SERVER) 2868 { 2869 LSQ_DEBUG("drain time is zero (don't drain)"); 2870 return 0; 2871 } 2872 2873 /* PTO Calculation: [draft-ietf-quic-recovery-18], Section 6.2.2.1; 2874 * Drain time: [draft-ietf-quic-transport-19], Section 10.1. 2875 */ 2876 srtt = lsquic_rtt_stats_get_srtt(&conn->ifc_pub.rtt_stats); 2877 var = lsquic_rtt_stats_get_rttvar(&conn->ifc_pub.rtt_stats); 2878 pto = srtt + 4 * var + TP_DEF_MAX_ACK_DELAY * 1000; 2879 drain_time = 3 * pto; 2880 2881 LSQ_DEBUG("drain time is %"PRIu64" usec", drain_time); 2882 return drain_time; 2883} 2884 2885 2886static void 2887ietf_full_conn_ci_going_away (struct lsquic_conn *lconn) 2888{ 2889 struct ietf_full_conn *conn = (struct ietf_full_conn *) lconn; 2890 2891 if (conn->ifc_flags & IFC_HTTP) 2892 { 2893 if (!(conn->ifc_flags & (IFC_CLOSING|IFC_GOING_AWAY))) 2894 { 2895 LSQ_INFO("connection marked as going away"); 2896 conn->ifc_flags |= IFC_GOING_AWAY; 2897 const lsquic_stream_id_t stream_id = conn->ifc_max_req_id + N_SITS; 2898 if (valid_stream_id(stream_id)) 2899 { 2900 if (0 == lsquic_hcso_write_goaway(&conn->ifc_hcso, 2901 conn->ifc_max_req_id)) 2902 lsquic_engine_add_conn_to_tickable(conn->ifc_enpub, lconn); 2903 else 2904 /* We're already going away, don't abort because of this */ 2905 LSQ_WARN("could not write GOAWAY frame"); 2906 } 2907 maybe_close_conn(conn); 2908 } 2909 } 2910 else 2911 LSQ_NOTICE("going away has no effect in non-HTTP mode"); 2912} 2913 2914 2915static void 2916handshake_failed (struct lsquic_conn *lconn) 2917{ 2918 struct ietf_full_conn *conn = (struct ietf_full_conn *) lconn; 2919 LSQ_DEBUG("handshake failed"); 2920 lsquic_alarmset_unset(&conn->ifc_alset, AL_HANDSHAKE); 2921 conn->ifc_flags |= IFC_HSK_FAILED; 2922} 2923 2924 2925static struct dcid_elem * 2926get_new_dce (struct ietf_full_conn *conn) 2927{ 2928 struct dcid_elem **el; 2929 2930 for (el = conn->ifc_dces; el < conn->ifc_dces + sizeof(conn->ifc_dces) 2931 / sizeof(conn->ifc_dces[0]); ++el) 2932 if (!*el) 2933 return *el = lsquic_malo_get(conn->ifc_pub.mm->malo.dcid_elem); 2934 2935 return NULL; 2936} 2937 2938 2939static void 2940queue_streams_blocked_frame (struct ietf_full_conn *conn, enum stream_dir sd) 2941{ 2942 enum stream_id_type sit; 2943 uint64_t limit; 2944 2945 if (0 == (conn->ifc_send_flags & (SF_SEND_STREAMS_BLOCKED << sd))) 2946 { 2947 conn->ifc_send_flags |= SF_SEND_STREAMS_BLOCKED << sd; 2948 sit = gen_sit(conn->ifc_flags & IFC_SERVER, sd); 2949 limit = conn->ifc_max_allowed_stream_id[sit] >> SIT_SHIFT; 2950 conn->ifc_send.streams_blocked[sd] = limit; 2951 LSQ_DEBUG("scheduled %sdirectional STREAMS_BLOCKED (limit=%"PRIu64 2952 ") frame", sd == SD_BIDI ? "bi" : "uni", limit); 2953 } 2954 else 2955 LSQ_DEBUG("%sdirectional STREAMS_BLOCKED frame already queued", 2956 sd == SD_BIDI ? "bi" : "uni"); 2957} 2958 2959 2960static void 2961retire_cid_from_tp (struct ietf_full_conn *conn, 2962 const struct transport_params *params) 2963{ 2964 struct dcid_elem *dce; 2965 2966 dce = get_new_dce(conn); 2967 if (!dce) 2968 { 2969 ABORT_ERROR("cannot allocate DCE"); 2970 return; 2971 } 2972 2973 memset(dce, 0, sizeof(*dce)); 2974 dce->de_cid = params->tp_preferred_address.cid; 2975 dce->de_seqno = 1; 2976 memcpy(dce->de_srst, params->tp_preferred_address.srst, 2977 sizeof(dce->de_srst)); 2978 dce->de_flags = DE_SRST; 2979 TAILQ_INSERT_TAIL(&conn->ifc_to_retire, dce, de_next_to_ret); 2980 LSQ_DEBUG("prepare to retire DCID seqno %"PRIu32, dce->de_seqno); 2981 conn->ifc_send_flags |= SF_SEND_RETIRE_CID; 2982} 2983 2984 2985static enum { BM_MIGRATING, BM_NOT_MIGRATING, BM_ERROR, } 2986try_to_begin_migration (struct ietf_full_conn *conn, 2987 const struct transport_params *params) 2988{ 2989 struct conn_path *copath; 2990 struct dcid_elem *dce; 2991 int is_ipv6; 2992 union { 2993 struct sockaddr_in v4; 2994 struct sockaddr_in6 v6; 2995 } sockaddr; 2996 2997 if (!conn->ifc_settings->es_allow_migration) 2998 { 2999 LSQ_DEBUG("Migration not allowed: retire PreferredAddress CID"); 3000 return BM_NOT_MIGRATING; 3001 } 3002 3003 if (conn->ifc_conn.cn_version <= LSQVER_ID28 /* Starting with ID-29, 3004 disable_active_migration TP applies only to the time period during 3005 the handshake. Our client does not migrate during the handshake: 3006 this code runs only after handshake has succeeded. */ 3007 && (params->tp_set & (1 << TPI_DISABLE_ACTIVE_MIGRATION))) 3008 { 3009 LSQ_DEBUG("TP disables migration: retire PreferredAddress CID"); 3010 return BM_NOT_MIGRATING; 3011 } 3012 3013 is_ipv6 = NP_IS_IPv6(CUR_NPATH(conn)); 3014 if ((is_ipv6 && !lsquic_tp_has_pref_ipv6(params)) 3015 || (!is_ipv6 && !lsquic_tp_has_pref_ipv4(params))) 3016 { 3017 /* XXX This is a limitation in the client code outside of the library. 3018 * To support cross-IP-version migration, we need to add some callbacks 3019 * to open a different socket. 3020 */ 3021 LSQ_DEBUG("Cannot migrate from IPv%u to IPv%u", is_ipv6 ? 6 : 4, 3022 is_ipv6 ? 4 : 6); 3023 return BM_NOT_MIGRATING; 3024 } 3025 3026 if (0 == params->tp_preferred_address.cid.len) 3027 { 3028 /* TODO: mark with a new flag and begin migration when a non-zero length 3029 * DCID becomes available. 3030 */ 3031 LSQ_DEBUG("Cannot migrate using zero-length DCID"); 3032 return BM_NOT_MIGRATING; 3033 } 3034 3035 dce = get_new_dce(conn); 3036 if (!dce) 3037 { 3038 ABORT_WARN("cannot allocate DCE"); 3039 return BM_ERROR; 3040 } 3041 3042 memset(dce, 0, sizeof(*dce)); 3043 dce->de_cid = params->tp_preferred_address.cid; 3044 dce->de_seqno = 1; 3045 dce->de_flags = DE_SRST; 3046 memcpy(dce->de_srst, params->tp_preferred_address.srst, 3047 sizeof(dce->de_srst)); 3048 if (conn->ifc_enpub->enp_srst_hash) 3049 { 3050 if (!lsquic_hash_insert(conn->ifc_enpub->enp_srst_hash, 3051 dce->de_srst, sizeof(dce->de_srst), &conn->ifc_conn, 3052 &dce->de_hash_el)) 3053 { 3054 lsquic_malo_put(dce); 3055 ABORT_WARN("cannot insert DCE"); 3056 return BM_ERROR; 3057 } 3058 } 3059 3060 if (is_ipv6) 3061 { 3062 sockaddr.v6.sin6_family = AF_INET6; 3063 sockaddr.v6.sin6_port = htons(params->tp_preferred_address.ipv6_port); 3064 memcpy(&sockaddr.v6.sin6_addr, params->tp_preferred_address.ipv6_addr, 3065 sizeof(sockaddr.v6.sin6_addr)); 3066 } 3067 else 3068 { 3069 sockaddr.v4.sin_family = AF_INET; 3070 sockaddr.v4.sin_port = htons(params->tp_preferred_address.ipv4_port); 3071 memcpy(&sockaddr.v4.sin_addr, params->tp_preferred_address.ipv4_addr, 3072 sizeof(sockaddr.v4.sin_addr)); 3073 } 3074 3075 copath = &conn->ifc_paths[1]; 3076 assert(!(conn->ifc_used_paths & (1 << (copath - conn->ifc_paths)))); 3077 3078 migra_begin(conn, copath, dce, (struct sockaddr *) &sockaddr, params); 3079 return BM_MIGRATING; 3080} 3081 3082 3083static void 3084maybe_start_migration (struct ietf_full_conn *conn) 3085{ 3086 struct lsquic_conn *const lconn = &conn->ifc_conn; 3087 const struct transport_params *params; 3088 3089 params = lconn->cn_esf.i->esfi_get_peer_transport_params( 3090 lconn->cn_enc_session); 3091 if (params->tp_set & (1 << TPI_PREFERRED_ADDRESS)) 3092 switch (try_to_begin_migration(conn, params)) 3093 { 3094 case BM_MIGRATING: 3095 break; 3096 case BM_NOT_MIGRATING: 3097 if (lconn->cn_version == LSQVER_ID27) 3098 retire_cid_from_tp(conn, params); 3099 else 3100 { 3101/* 3102 * [draft-ietf-quic-transport-28] Section 5.1.1: 3103 " Connection IDs that are issued and not 3104 " retired are considered active; any active connection ID is valid for 3105 " use with the current connection at any time, in any packet type. 3106 " This includes the connection ID issued by the server via the 3107 " preferred_address transport parameter. 3108 */ 3109 LSQ_DEBUG("not migrating: save DCID from transport params"); 3110 (void) insert_new_dcid(conn, 1, 3111 ¶ms->tp_preferred_address.cid, 3112 params->tp_preferred_address.srst, 0); 3113 } 3114 break; 3115 case BM_ERROR: 3116 ABORT_QUIETLY(0, TEC_INTERNAL_ERROR, "error initiating migration"); 3117 break; 3118 } 3119} 3120 3121 3122static int 3123handshake_ok (struct lsquic_conn *lconn) 3124{ 3125 struct ietf_full_conn *const conn = (struct ietf_full_conn *) lconn; 3126 struct lsquic_stream *stream; 3127 struct lsquic_hash_elem *el; 3128 struct dcid_elem *dce; 3129 const struct transport_params *params; 3130 enum stream_id_type sit; 3131 uint64_t limit; 3132 char buf[MAX_TP_STR_SZ]; 3133 3134 fiu_return_on("full_conn_ietf/handshake_ok", -1); 3135 3136 /* Need to set this flag even we hit an error in the rest of this funciton. 3137 * This is because this flag is used to calculate packet out header size 3138 */ 3139 lconn->cn_flags |= LSCONN_HANDSHAKE_DONE; 3140 3141 params = lconn->cn_esf.i->esfi_get_peer_transport_params( 3142 lconn->cn_enc_session); 3143 if (!params) 3144 { 3145 ABORT_WARN("could not get transport parameters"); 3146 return -1; 3147 } 3148 3149 LSQ_DEBUG("peer transport parameters: %s", 3150 ((lconn->cn_version == LSQVER_ID27 ? lsquic_tp_to_str_27 3151 : lsquic_tp_to_str)(params, buf, sizeof(buf)), buf)); 3152 3153 if ((params->tp_set & (1 << TPI_LOSS_BITS)) 3154 && conn->ifc_settings->es_ql_bits == 2) 3155 { 3156 LSQ_DEBUG("turn on QL loss bits"); 3157 lsquic_send_ctl_do_ql_bits(&conn->ifc_send_ctl); 3158 } 3159 3160 if (params->tp_init_max_streams_bidi > (1ull << 60) 3161 || params->tp_init_max_streams_uni > (1ull << 60)) 3162 { 3163 if (params->tp_init_max_streams_bidi > (1ull << 60)) 3164 ABORT_QUIETLY(0, TEC_STREAM_LIMIT_ERROR, "init_max_streams_bidi is " 3165 "too large: %"PRIu64, params->tp_init_max_streams_bidi); 3166 else 3167 ABORT_QUIETLY(0, TEC_STREAM_LIMIT_ERROR, "init_max_streams_uni is " 3168 "too large: %"PRIu64, params->tp_init_max_streams_uni); 3169 return -1; 3170 } 3171 3172 sit = gen_sit(conn->ifc_flags & IFC_SERVER, SD_BIDI); 3173 conn->ifc_max_allowed_stream_id[sit] = 3174 params->tp_init_max_streams_bidi << SIT_SHIFT; 3175 sit = gen_sit(conn->ifc_flags & IFC_SERVER, SD_UNI); 3176 conn->ifc_max_allowed_stream_id[sit] = 3177 params->tp_init_max_streams_uni << SIT_SHIFT; 3178 3179 conn->ifc_max_stream_data_uni = params->tp_init_max_stream_data_uni; 3180 3181 if (params->tp_init_max_data < conn->ifc_pub.conn_cap.cc_sent) 3182 { 3183 ABORT_WARN("peer specified init_max_data=%"PRIu64" bytes, which is " 3184 "smaller than the amount of data already sent on this connection " 3185 "(%"PRIu64" bytes)", params->tp_init_max_data, 3186 conn->ifc_pub.conn_cap.cc_sent); 3187 return -1; 3188 } 3189 3190 conn->ifc_pub.conn_cap.cc_max = params->tp_init_max_data; 3191 3192 for (el = lsquic_hash_first(conn->ifc_pub.all_streams); el; 3193 el = lsquic_hash_next(conn->ifc_pub.all_streams)) 3194 { 3195 stream = lsquic_hashelem_getdata(el); 3196 if (is_our_stream(conn, stream)) 3197 limit = params->tp_init_max_stream_data_bidi_remote; 3198 else 3199 limit = params->tp_init_max_stream_data_bidi_local; 3200 if (0 != lsquic_stream_set_max_send_off(stream, limit)) 3201 { 3202 ABORT_WARN("cannot set peer-supplied max_stream_data=%"PRIu64 3203 "on stream %"PRIu64, limit, stream->id); 3204 return -1; 3205 } 3206 } 3207 3208 if (conn->ifc_flags & IFC_SERVER) 3209 conn->ifc_cfg.max_stream_send 3210 = params->tp_init_max_stream_data_bidi_local; 3211 else 3212 conn->ifc_cfg.max_stream_send 3213 = params->tp_init_max_stream_data_bidi_remote; 3214 conn->ifc_cfg.ack_exp = params->tp_ack_delay_exponent; 3215 3216 switch ((!!conn->ifc_settings->es_idle_timeout << 1) 3217 | !!params->tp_max_idle_timeout) 3218 { 3219 case (0 << 1) | 0: 3220 LSQ_DEBUG("neither side specified max idle time out, turn it off"); 3221 break; 3222 case (0 << 1) | 1: 3223 LSQ_DEBUG("peer specified max idle timeout of %"PRIu64" ms (vs ours " 3224 "of zero): use it", params->tp_max_idle_timeout); 3225 conn->ifc_idle_to = params->tp_max_idle_timeout * 1000; 3226 break; 3227 case (1 << 1) | 0: 3228 LSQ_DEBUG("peer did not specify max idle timeout, while ours is " 3229 "%u ms: use it", conn->ifc_settings->es_idle_timeout * 1000); 3230 conn->ifc_idle_to = conn->ifc_settings->es_idle_timeout * 1000000; 3231 break; 3232 default:/* (1 << 1) | 1 */ 3233 LSQ_DEBUG("our max idle timeout is %u ms, peer's is %"PRIu64" ms; " 3234 "use minimum value of %"PRIu64" ms", 3235 conn->ifc_settings->es_idle_timeout * 1000, 3236 params->tp_max_idle_timeout, 3237 MIN(conn->ifc_settings->es_idle_timeout * 1000, 3238 params->tp_max_idle_timeout)); 3239 conn->ifc_idle_to = 1000 * MIN(conn->ifc_settings->es_idle_timeout 3240 * 1000, params->tp_max_idle_timeout); 3241 break; 3242 } 3243 3244 if (conn->ifc_idle_to >= 2000000 3245 && conn->ifc_enpub->enp_settings.es_ping_period) 3246 conn->ifc_ping_period = conn->ifc_idle_to / 2; 3247 else 3248 conn->ifc_ping_period = 0; 3249 LSQ_DEBUG("PING period is set to %"PRIu64" usec", conn->ifc_ping_period); 3250 3251 if (conn->ifc_settings->es_delayed_acks 3252 && (params->tp_set & (1 << TPI_MIN_ACK_DELAY))) 3253 { 3254 LSQ_DEBUG("delayed ACKs enabled"); 3255 conn->ifc_flags |= IFC_DELAYED_ACKS; 3256 } 3257 if (conn->ifc_settings->es_timestamps 3258 && (params->tp_set & (1 << TPI_TIMESTAMPS))) 3259 { 3260 LSQ_DEBUG("timestamps enabled"); 3261 conn->ifc_flags |= IFC_TIMESTAMPS; 3262 } 3263 3264 conn->ifc_max_peer_ack_usec = params->tp_max_ack_delay * 1000; 3265 3266 if ((params->tp_set & (1 << TPI_MAX_UDP_PAYLOAD_SIZE)) 3267 /* Second check is so that we don't truncate a large value when 3268 * storing it in unsigned short. 3269 */ 3270 && params->tp_numerics[TPI_MAX_UDP_PAYLOAD_SIZE] 3271 < TP_DEF_MAX_UDP_PAYLOAD_SIZE) 3272 conn->ifc_max_udp_payload = params->tp_numerics[TPI_MAX_UDP_PAYLOAD_SIZE]; 3273 else 3274 conn->ifc_max_udp_payload = TP_DEF_MAX_UDP_PAYLOAD_SIZE; 3275 3276 if (conn->ifc_max_udp_payload < CUR_NPATH(conn)->np_pack_size) 3277 { 3278 CUR_NPATH(conn)->np_pack_size = conn->ifc_max_udp_payload; 3279 LSQ_DEBUG("decrease packet size to %hu bytes", 3280 CUR_NPATH(conn)->np_pack_size); 3281 } 3282 3283 dce = get_new_dce(conn); 3284 if (!dce) 3285 { 3286 ABORT_WARN("cannot allocate DCE"); 3287 return -1; 3288 } 3289 3290 memset(dce, 0, sizeof(*dce)); 3291 dce->de_cid = *CUR_DCID(conn); 3292 dce->de_seqno = 0; 3293 if (params->tp_set & (1 << TPI_STATELESS_RESET_TOKEN)) 3294 { 3295 memcpy(dce->de_srst, params->tp_stateless_reset_token, 3296 sizeof(dce->de_srst)); 3297 dce->de_flags = DE_SRST | DE_ASSIGNED; 3298 if (conn->ifc_enpub->enp_srst_hash) 3299 { 3300 if (!lsquic_hash_insert(conn->ifc_enpub->enp_srst_hash, 3301 dce->de_srst, sizeof(dce->de_srst), &conn->ifc_conn, 3302 &dce->de_hash_el)) 3303 { 3304 ABORT_WARN("cannot insert DCE"); 3305 return -1; 3306 } 3307 } 3308 } 3309 else 3310 dce->de_flags = DE_ASSIGNED; 3311 3312 LSQ_INFO("applied peer transport parameters"); 3313 3314 if (conn->ifc_flags & IFC_HTTP) 3315 { 3316 lsquic_qeh_init(&conn->ifc_qeh, &conn->ifc_conn); 3317 if (0 == avail_streams_count(conn, conn->ifc_flags & IFC_SERVER, 3318 SD_UNI)) 3319 { 3320 ABORT_QUIETLY(1, HEC_GENERAL_PROTOCOL_ERROR, "cannot create " 3321 "control stream due to peer-imposed limit"); 3322 conn->ifc_error = CONN_ERR(1, HEC_GENERAL_PROTOCOL_ERROR); 3323 return -1; 3324 } 3325 if (0 != create_ctl_stream_out(conn)) 3326 { 3327 ABORT_WARN("cannot create outgoing control stream"); 3328 return -1; 3329 } 3330 if (0 != lsquic_hcso_write_settings(&conn->ifc_hcso, 3331 &conn->ifc_enpub->enp_settings, conn->ifc_flags & IFC_SERVER)) 3332 { 3333 ABORT_WARN("cannot write SETTINGS"); 3334 return -1; 3335 } 3336 if (!(conn->ifc_flags & IFC_SERVER) 3337 && (conn->ifc_u.cli.ifcli_flags & IFCLI_PUSH_ENABLED) 3338 && 0 != lsquic_hcso_write_max_push_id(&conn->ifc_hcso, 3339 conn->ifc_u.cli.ifcli_max_push_id)) 3340 { 3341 ABORT_WARN("cannot write MAX_PUSH_ID"); 3342 return -1; 3343 } 3344 if (0 != lsquic_qdh_init(&conn->ifc_qdh, &conn->ifc_conn, 3345 conn->ifc_flags & IFC_SERVER, conn->ifc_enpub, 3346 conn->ifc_settings->es_qpack_dec_max_size, 3347 conn->ifc_settings->es_qpack_dec_max_blocked)) 3348 { 3349 ABORT_WARN("cannot initialize QPACK decoder"); 3350 return -1; 3351 } 3352 if (avail_streams_count(conn, conn->ifc_flags & IFC_SERVER, SD_UNI) > 0) 3353 { 3354 if (0 != create_qdec_stream_out(conn)) 3355 { 3356 ABORT_WARN("cannot create outgoing QPACK decoder stream"); 3357 return -1; 3358 } 3359 } 3360 else 3361 { 3362 queue_streams_blocked_frame(conn, SD_UNI); 3363 LSQ_DEBUG("cannot create outgoing QPACK decoder stream due to " 3364 "unidir limits"); 3365 } 3366 } 3367 3368 if (params->tp_active_connection_id_limit > conn->ifc_conn.cn_n_cces) 3369 conn->ifc_active_cids_limit = conn->ifc_conn.cn_n_cces; 3370 else 3371 conn->ifc_active_cids_limit = params->tp_active_connection_id_limit; 3372 conn->ifc_first_active_cid_seqno = conn->ifc_scid_seqno; 3373 3374 if (conn->ifc_settings->es_dplpmtud) 3375 conn->ifc_mflags |= MF_CHECK_MTU_PROBE; 3376 3377 if (can_issue_cids(conn) && CN_SCID(&conn->ifc_conn)->len != 0) 3378 conn->ifc_send_flags |= SF_SEND_NEW_CID; 3379 maybe_create_delayed_streams(conn); 3380 3381 return 0; 3382} 3383 3384 3385static void 3386ietf_full_conn_ci_hsk_done (struct lsquic_conn *lconn, 3387 enum lsquic_hsk_status status) 3388{ 3389 struct ietf_full_conn *const conn = (struct ietf_full_conn *) lconn; 3390 3391 lsquic_alarmset_unset(&conn->ifc_alset, AL_HANDSHAKE); 3392 3393 switch (status) 3394 { 3395 case LSQ_HSK_OK: 3396 case LSQ_HSK_RESUMED_OK: 3397 if (0 == handshake_ok(lconn)) 3398 { 3399 if (!(conn->ifc_flags & IFC_SERVER)) 3400 lsquic_send_ctl_begin_optack_detection(&conn->ifc_send_ctl); 3401 } 3402 else 3403 { 3404 LSQ_INFO("handshake was reported successful, but later processing " 3405 "produced an error"); 3406 status = LSQ_HSK_FAIL; 3407 handshake_failed(lconn); 3408 } 3409 break; 3410 default: 3411 assert(0); 3412 /* fall-through */ 3413 case LSQ_HSK_FAIL: 3414 case LSQ_HSK_RESUMED_FAIL: 3415 handshake_failed(lconn); 3416 break; 3417 } 3418 if (conn->ifc_enpub->enp_stream_if->on_hsk_done) 3419 conn->ifc_enpub->enp_stream_if->on_hsk_done(lconn, status); 3420} 3421 3422 3423static void 3424ietf_full_conn_ci_tls_alert (struct lsquic_conn *lconn, uint8_t alert) 3425{ 3426 struct ietf_full_conn *const conn = (struct ietf_full_conn *) lconn; 3427 ABORT_QUIETLY(0, 0x100 + alert, "TLS alert %"PRIu8, alert); 3428} 3429 3430 3431static int 3432ietf_full_conn_ci_report_live (struct lsquic_conn *lconn, lsquic_time_t now) 3433{ 3434 struct ietf_full_conn *conn = (struct ietf_full_conn *) lconn; 3435 3436 if (conn->ifc_last_live_update + 30000000 < now) 3437 { 3438 conn->ifc_last_live_update = now; 3439 return 1; 3440 } 3441 else 3442 return 0; 3443} 3444 3445 3446static int 3447ietf_full_conn_ci_is_push_enabled (struct lsquic_conn *lconn) 3448{ 3449 struct ietf_full_conn *const conn = (struct ietf_full_conn *) lconn; 3450 3451 return (conn->ifc_flags & IFC_SERVER) 3452 && (conn->ifc_u.ser.ifser_flags 3453 & (IFSER_PUSH_ENABLED|IFSER_MAX_PUSH_ID)) 3454 == (IFSER_PUSH_ENABLED|IFSER_MAX_PUSH_ID) 3455 && conn->ifc_u.ser.ifser_next_push_id 3456 <= conn->ifc_u.ser.ifser_max_push_id 3457 && !either_side_going_away(conn) 3458 && avail_streams_count(conn, 1, SD_UNI) > 0 3459 ; 3460} 3461 3462 3463static void 3464undo_stream_creation (struct ietf_full_conn *conn, 3465 struct lsquic_stream *stream) 3466{ 3467 enum stream_dir sd; 3468 3469 assert(stream->sm_hash_el.qhe_flags & QHE_HASHED); 3470 assert(!(stream->stream_flags & STREAM_ONCLOSE_DONE)); 3471 3472 LSQ_DEBUG("undo creation of stream %"PRIu64, stream->id); 3473 lsquic_hash_erase(conn->ifc_pub.all_streams, &stream->sm_hash_el); 3474 sd = (stream->id >> SD_SHIFT) & 1; 3475 --conn->ifc_n_created_streams[sd]; 3476 lsquic_stream_destroy(stream); 3477} 3478 3479 3480/* This function is long because there are a lot of steps to perform, several 3481 * things can go wrong, which we want to roll back, yet at the same time we 3482 * want to do everything efficiently. 3483 */ 3484static int 3485ietf_full_conn_ci_push_stream (struct lsquic_conn *lconn, void *hset, 3486 struct lsquic_stream *dep_stream, const struct lsquic_http_headers *headers) 3487{ 3488 struct ietf_full_conn *const conn = (struct ietf_full_conn *) lconn; 3489 unsigned char *header_block_buf, *end, *p; 3490 size_t hea_sz, enc_sz; 3491 ssize_t prefix_sz; 3492 struct lsquic_hash_elem *el; 3493 struct push_promise *promise; 3494 struct lsquic_stream *pushed_stream; 3495 struct uncompressed_headers *uh; 3496 enum lsqpack_enc_status enc_st; 3497 int i; 3498 unsigned char discard[2]; 3499 struct lsxpack_header *xhdr; 3500 3501 if (!ietf_full_conn_ci_is_push_enabled(lconn) 3502 || !lsquic_stream_can_push(dep_stream)) 3503 { 3504 LSQ_DEBUG("cannot push using stream %"PRIu64, dep_stream->id); 3505 return -1; 3506 } 3507 3508 if (!hset) 3509 { 3510 LSQ_ERROR("header set must be specified when pushing"); 3511 return -1; 3512 } 3513 3514 if (0 != lsqpack_enc_start_header(&conn->ifc_qeh.qeh_encoder, 0, 0)) 3515 { 3516 LSQ_WARN("cannot start header for push stream"); 3517 return -1; 3518 } 3519 3520 header_block_buf = lsquic_mm_get_4k(conn->ifc_pub.mm); 3521 if (!header_block_buf) 3522 { 3523 LSQ_WARN("cannot allocate 4k"); 3524 (void) lsqpack_enc_cancel_header(&conn->ifc_qeh.qeh_encoder); 3525 return -1; 3526 } 3527 3528 /* Generate header block in cheap 4K memory. It it will be copied to 3529 * a new push_promise object. 3530 */ 3531 p = header_block_buf; 3532 end = header_block_buf + 0x1000; 3533 enc_sz = 0; /* Should not change */ 3534 for (i = 0; i < headers->count; ++i) 3535 { 3536 xhdr = &headers->headers[i]; 3537 if (!xhdr->buf) 3538 continue; 3539 hea_sz = end - p; 3540 enc_st = lsqpack_enc_encode(&conn->ifc_qeh.qeh_encoder, NULL, 3541 &enc_sz, p, &hea_sz, xhdr, LQEF_NO_HIST_UPD|LQEF_NO_DYN); 3542 if (enc_st == LQES_OK) 3543 p += hea_sz; 3544 else 3545 { 3546 (void) lsqpack_enc_cancel_header(&conn->ifc_qeh.qeh_encoder); 3547 lsquic_mm_put_4k(conn->ifc_pub.mm, header_block_buf); 3548 LSQ_DEBUG("cannot encode header field for push %u", enc_st); 3549 return -1; 3550 } 3551 } 3552 prefix_sz = lsqpack_enc_end_header(&conn->ifc_qeh.qeh_encoder, 3553 discard, sizeof(discard), NULL); 3554 if (!(prefix_sz == 2 && discard[0] == 0 && discard[1] == 0)) 3555 { 3556 LSQ_WARN("stream push: unexpected prefix values %zd, %hhu, %hhu", 3557 prefix_sz, discard[0], discard[1]); 3558 lsquic_mm_put_4k(conn->ifc_pub.mm, header_block_buf); 3559 return -1; 3560 } 3561 LSQ_DEBUG("generated push promise header block of %ld bytes", 3562 (long) (p - header_block_buf)); 3563 3564 pushed_stream = create_push_stream(conn); 3565 if (!pushed_stream) 3566 { 3567 LSQ_WARN("could not create push stream"); 3568 lsquic_mm_put_4k(conn->ifc_pub.mm, header_block_buf); 3569 return -1; 3570 } 3571 3572 promise = malloc(sizeof(*promise) + (p - header_block_buf)); 3573 if (!promise) 3574 { 3575 LSQ_WARN("stream push: cannot allocate promise"); 3576 lsquic_mm_put_4k(conn->ifc_pub.mm, header_block_buf); 3577 undo_stream_creation(conn, pushed_stream); 3578 return -1; 3579 } 3580 3581 uh = malloc(sizeof(*uh)); 3582 if (!uh) 3583 { 3584 LSQ_WARN("stream push: cannot allocate uh"); 3585 free(promise); 3586 lsquic_mm_put_4k(conn->ifc_pub.mm, header_block_buf); 3587 undo_stream_creation(conn, pushed_stream); 3588 return -1; 3589 } 3590 uh->uh_stream_id = pushed_stream->id; 3591 uh->uh_oth_stream_id = 0; 3592 uh->uh_weight = lsquic_stream_priority(dep_stream) / 2 + 1; 3593 uh->uh_exclusive = 0; 3594 uh->uh_flags = UH_FIN; 3595 uh->uh_hset = hset; 3596 3597 memset(promise, 0, sizeof(*promise)); 3598 promise->pp_refcnt = 1; /* This function itself keeps a reference */ 3599 memcpy(promise->pp_content_buf, header_block_buf, p - header_block_buf); 3600 promise->pp_content_len = p - header_block_buf; 3601 promise->pp_id = conn->ifc_u.ser.ifser_next_push_id++; 3602 lsquic_mm_put_4k(conn->ifc_pub.mm, header_block_buf); 3603 3604 el = lsquic_hash_insert(conn->ifc_pub.u.ietf.promises, 3605 &promise->pp_id, sizeof(promise->pp_id), promise, 3606 &promise->pp_hash_id); 3607 if (!el) 3608 { 3609 LSQ_WARN("cannot insert push promise (ID)"); 3610 undo_stream_creation(conn, pushed_stream); 3611 lsquic_pp_put(promise, conn->ifc_pub.u.ietf.promises); 3612 free(uh); 3613 return -1; 3614 } 3615 3616 if (0 != lsquic_stream_push_promise(dep_stream, promise)) 3617 { 3618 LSQ_DEBUG("push promise failed"); 3619 undo_stream_creation(conn, pushed_stream); 3620 lsquic_pp_put(promise, conn->ifc_pub.u.ietf.promises); 3621 free(uh); 3622 return -1; 3623 } 3624 3625 if (0 != lsquic_stream_uh_in(pushed_stream, uh)) 3626 { 3627 LSQ_WARN("stream barfed when fed synthetic request"); 3628 undo_stream_creation(conn, pushed_stream); 3629 free(uh); 3630 if (0 != lsquic_hcso_write_cancel_push(&conn->ifc_hcso, 3631 promise->pp_id)) 3632 ABORT_WARN("cannot write CANCEL_PUSH"); 3633 lsquic_pp_put(promise, conn->ifc_pub.u.ietf.promises); 3634 return -1; 3635 } 3636 3637 /* Linking push promise with pushed stream is necessary for cancellation */ 3638 ++promise->pp_refcnt; 3639 promise->pp_pushed_stream = pushed_stream; 3640 pushed_stream->sm_promise = promise; 3641 3642 lsquic_stream_call_on_new(pushed_stream); 3643 3644 lsquic_pp_put(promise, conn->ifc_pub.u.ietf.promises); 3645 return 0; 3646} 3647 3648 3649static int 3650ietf_full_conn_ci_is_tickable (struct lsquic_conn *lconn) 3651{ 3652 struct ietf_full_conn *const conn = (struct ietf_full_conn *) lconn; 3653 struct lsquic_stream *stream; 3654 3655 if (!TAILQ_EMPTY(&conn->ifc_pub.service_streams)) 3656 { 3657 LSQ_DEBUG("tickable: there are streams to be serviced"); 3658 return 1; 3659 } 3660 3661 if ((conn->ifc_enpub->enp_flags & ENPUB_CAN_SEND) 3662 && (should_generate_ack(conn, IFC_ACK_QUEUED) || 3663 !lsquic_send_ctl_sched_is_blocked(&conn->ifc_send_ctl))) 3664 { 3665 /* XXX What about queued ACKs: why check but not make tickable? */ 3666 if (conn->ifc_send_flags) 3667 { 3668 LSQ_DEBUG("tickable: send flags: 0x%X", conn->ifc_send_flags); 3669 goto check_can_send; 3670 } 3671 if (lsquic_send_ctl_has_sendable(&conn->ifc_send_ctl)) 3672 { 3673 LSQ_DEBUG("tickable: has sendable packets"); 3674 return 1; /* Don't check can_send: already on scheduled queue */ 3675 } 3676 if (conn->ifc_conn.cn_flags & LSCONN_SEND_BLOCKED) 3677 { 3678 LSQ_DEBUG("tickable: send DATA_BLOCKED frame"); 3679 goto check_can_send; 3680 } 3681 if (conn->ifc_conn.cn_flags & LSCONN_HANDSHAKE_DONE ? 3682 lsquic_send_ctl_has_buffered(&conn->ifc_send_ctl) : 3683 lsquic_send_ctl_has_buffered_high(&conn->ifc_send_ctl)) 3684 { 3685 LSQ_DEBUG("tickable: has buffered packets"); 3686 goto check_can_send; 3687 } 3688 if (!TAILQ_EMPTY(&conn->ifc_pub.sending_streams)) 3689 { 3690 LSQ_DEBUG("tickable: there are sending streams"); 3691 goto check_can_send; 3692 } 3693 TAILQ_FOREACH(stream, &conn->ifc_pub.write_streams, next_write_stream) 3694 if (lsquic_stream_write_avail(stream)) 3695 { 3696 LSQ_DEBUG("tickable: stream %"PRIu64" can be written to", 3697 stream->id); 3698 goto check_can_send; 3699 } 3700 goto check_readable_streams; 3701 check_can_send: 3702 if (lsquic_send_ctl_can_send(&conn->ifc_send_ctl)) 3703 return 1; 3704 } 3705 3706 check_readable_streams: 3707 TAILQ_FOREACH(stream, &conn->ifc_pub.read_streams, next_read_stream) 3708 if (lsquic_stream_readable(stream)) 3709 { 3710 LSQ_DEBUG("tickable: stream %"PRIu64" can be read from", 3711 stream->id); 3712 return 1; 3713 } 3714 3715 LSQ_DEBUG("not tickable"); 3716 return 0; 3717} 3718 3719 3720static enum tick_st 3721immediate_close (struct ietf_full_conn *conn) 3722{ 3723 struct lsquic_packet_out *packet_out; 3724 const char *error_reason; 3725 struct conn_err conn_err; 3726 int sz; 3727 3728 if (conn->ifc_flags & (IFC_TICK_CLOSE|IFC_GOT_PRST)) 3729 return TICK_CLOSE; 3730 3731 if (!(conn->ifc_flags & IFC_SERVER) 3732 && conn->ifc_u.cli.ifcli_ver_neg.vn_state != VN_END) 3733 return TICK_CLOSE; 3734 3735 conn->ifc_flags |= IFC_TICK_CLOSE; 3736 3737 /* No reason to send anything that's been scheduled if connection is 3738 * being closed immedately. This also ensures that packet numbers 3739 * sequence is always increasing. 3740 */ 3741 lsquic_send_ctl_drop_scheduled(&conn->ifc_send_ctl); 3742 3743 if (conn->ifc_flags & (IFC_TIMED_OUT|IFC_HSK_FAILED)) 3744 return TICK_CLOSE; 3745 3746 packet_out = lsquic_send_ctl_new_packet_out(&conn->ifc_send_ctl, 0, 3747 PNS_APP, CUR_NPATH(conn)); 3748 if (!packet_out) 3749 { 3750 LSQ_WARN("cannot allocate packet: %s", strerror(errno)); 3751 return TICK_CLOSE; 3752 } 3753 3754 assert(conn->ifc_flags & (IFC_ERROR|IFC_ABORTED|IFC_HSK_FAILED)); 3755 if (conn->ifc_error.u.err != 0) 3756 { 3757 conn_err = conn->ifc_error; 3758 error_reason = conn->ifc_errmsg; 3759 } 3760 else if (conn->ifc_flags & IFC_ERROR) 3761 { 3762 conn_err = CONN_ERR(0, TEC_INTERNAL_ERROR); 3763 error_reason = "connection error"; 3764 } 3765 else if (conn->ifc_flags & IFC_ABORTED) 3766 { 3767 conn_err = CONN_ERR(0, TEC_NO_ERROR); 3768 error_reason = "user aborted connection"; 3769 } 3770 else if (conn->ifc_flags & IFC_HSK_FAILED) 3771 { 3772 conn_err = CONN_ERR(0, TEC_NO_ERROR); 3773 error_reason = "handshake failed"; 3774 } 3775 else 3776 { 3777 conn_err = CONN_ERR(0, TEC_NO_ERROR); 3778 error_reason = NULL; 3779 } 3780 3781 lsquic_send_ctl_scheduled_one(&conn->ifc_send_ctl, packet_out); 3782 sz = conn->ifc_conn.cn_pf->pf_gen_connect_close_frame( 3783 packet_out->po_data + packet_out->po_data_sz, 3784 lsquic_packet_out_avail(packet_out), conn_err.app_error, 3785 conn_err.u.err, error_reason, 3786 error_reason ? strlen(error_reason) : 0); 3787 if (sz < 0) { 3788 LSQ_WARN("%s failed", __func__); 3789 return TICK_CLOSE; 3790 } 3791 if (0 != lsquic_packet_out_add_frame(packet_out, conn->ifc_pub.mm, 0, 3792 QUIC_FRAME_CONNECTION_CLOSE, packet_out->po_data_sz, sz)) 3793 { 3794 LSQ_WARN("%s: adding frame to packet failed: %d", __func__, errno); 3795 return TICK_CLOSE; 3796 } 3797 lsquic_send_ctl_incr_pack_sz(&conn->ifc_send_ctl, packet_out, sz); 3798 packet_out->po_frame_types |= 1 << QUIC_FRAME_CONNECTION_CLOSE; 3799 LSQ_DEBUG("generated CONNECTION_CLOSE frame in its own packet"); 3800 return TICK_SEND|TICK_CLOSE; 3801} 3802 3803 3804static void 3805process_streams_read_events (struct ietf_full_conn *conn) 3806{ 3807 struct lsquic_stream *stream; 3808 int iters; 3809 enum stream_q_flags q_flags, needs_service; 3810 struct stream_prio_iter spi; 3811 static const char *const labels[2] = { "read-0", "read-1", }; 3812 3813 if (TAILQ_EMPTY(&conn->ifc_pub.read_streams)) 3814 return; 3815 3816 conn->ifc_pub.cp_flags &= ~CP_STREAM_UNBLOCKED; 3817 iters = 0; 3818 do 3819 { 3820 lsquic_spi_init(&spi, TAILQ_FIRST(&conn->ifc_pub.read_streams), 3821 TAILQ_LAST(&conn->ifc_pub.read_streams, lsquic_streams_tailq), 3822 (uintptr_t) &TAILQ_NEXT((lsquic_stream_t *) NULL, next_read_stream), 3823 SMQF_WANT_READ, &conn->ifc_conn, labels[iters], NULL, NULL); 3824 3825 needs_service = 0; 3826 for (stream = lsquic_spi_first(&spi); stream; 3827 stream = lsquic_spi_next(&spi)) 3828 { 3829 q_flags = stream->sm_qflags & SMQF_SERVICE_FLAGS; 3830 lsquic_stream_dispatch_read_events(stream); 3831 needs_service |= q_flags ^ (stream->sm_qflags & SMQF_SERVICE_FLAGS); 3832 } 3833 3834 if (needs_service) 3835 service_streams(conn); 3836 } 3837 while (iters++ == 0 && (conn->ifc_pub.cp_flags & CP_STREAM_UNBLOCKED)); 3838} 3839 3840 3841static void 3842process_crypto_stream_read_events (struct ietf_full_conn *conn) 3843{ 3844 struct lsquic_stream **stream; 3845 3846 assert(!(conn->ifc_flags & IFC_SERVER)); 3847 for (stream = conn->ifc_u.cli.crypto_streams; stream < 3848 conn->ifc_u.cli.crypto_streams + sizeof(conn->ifc_u.cli.crypto_streams) 3849 / sizeof(conn->ifc_u.cli.crypto_streams[0]); ++stream) 3850 if (*stream && (*stream)->sm_qflags & SMQF_WANT_READ) 3851 lsquic_stream_dispatch_read_events(*stream); 3852} 3853 3854 3855static void 3856process_crypto_stream_write_events (struct ietf_full_conn *conn) 3857{ 3858 struct lsquic_stream **stream; 3859 3860 assert(!(conn->ifc_flags & IFC_SERVER)); 3861 for (stream = conn->ifc_u.cli.crypto_streams; stream < 3862 conn->ifc_u.cli.crypto_streams + sizeof(conn->ifc_u.cli.crypto_streams) 3863 / sizeof(conn->ifc_u.cli.crypto_streams[0]); ++stream) 3864 if (*stream && (*stream)->sm_qflags & SMQF_WRITE_Q_FLAGS) 3865 lsquic_stream_dispatch_write_events(*stream); 3866} 3867 3868 3869static void 3870maybe_conn_flush_special_streams (struct ietf_full_conn *conn) 3871{ 3872 if (!(conn->ifc_flags & IFC_HTTP)) 3873 return; 3874 3875 struct lsquic_stream *const streams[] = { 3876 conn->ifc_hcso.how_stream, 3877 conn->ifc_qeh.qeh_enc_sm_out, 3878 conn->ifc_qdh.qdh_dec_sm_out, 3879 }; 3880 struct lsquic_stream *const *stream; 3881 3882 for (stream = streams; stream < streams + sizeof(streams) 3883 / sizeof(streams[0]); ++stream) 3884 if (*stream && lsquic_stream_has_data_to_flush(*stream)) 3885 (void) lsquic_stream_flush(*stream); 3886} 3887 3888 3889static int 3890write_is_possible (struct ietf_full_conn *conn) 3891{ 3892 const lsquic_packet_out_t *packet_out; 3893 3894 packet_out = lsquic_send_ctl_last_scheduled(&conn->ifc_send_ctl, PNS_APP, 3895 CUR_NPATH(conn), 0); 3896 return (packet_out && lsquic_packet_out_avail(packet_out) > 10) 3897 || lsquic_send_ctl_can_send(&conn->ifc_send_ctl); 3898} 3899 3900 3901static void 3902process_streams_write_events (struct ietf_full_conn *conn, int high_prio) 3903{ 3904 struct lsquic_stream *stream; 3905 struct stream_prio_iter spi; 3906 3907 lsquic_spi_init(&spi, TAILQ_FIRST(&conn->ifc_pub.write_streams), 3908 TAILQ_LAST(&conn->ifc_pub.write_streams, lsquic_streams_tailq), 3909 (uintptr_t) &TAILQ_NEXT((lsquic_stream_t *) NULL, next_write_stream), 3910 SMQF_WANT_WRITE|SMQF_WANT_FLUSH, &conn->ifc_conn, 3911 high_prio ? "write-high" : "write-low", NULL, NULL); 3912 3913 if (high_prio) 3914 lsquic_spi_drop_non_high(&spi); 3915 else 3916 lsquic_spi_drop_high(&spi); 3917 3918 for (stream = lsquic_spi_first(&spi); stream && write_is_possible(conn); 3919 stream = lsquic_spi_next(&spi)) 3920 if (stream->sm_qflags & SMQF_WRITE_Q_FLAGS) 3921 lsquic_stream_dispatch_write_events(stream); 3922 3923 maybe_conn_flush_special_streams(conn); 3924} 3925 3926 3927static int 3928conn_ok_to_close (const struct ietf_full_conn *conn) 3929{ 3930 assert(conn->ifc_flags & IFC_CLOSING); 3931 return !(conn->ifc_flags & IFC_SERVER) 3932 || (conn->ifc_flags & IFC_RECV_CLOSE) 3933 || ( 3934 !lsquic_send_ctl_have_outgoing_stream_frames(&conn->ifc_send_ctl) 3935 && !have_bidi_streams(conn) 3936 && lsquic_send_ctl_have_unacked_stream_frames( 3937 &conn->ifc_send_ctl) == 0); 3938} 3939 3940 3941static void 3942generate_connection_close_packet (struct ietf_full_conn *conn) 3943{ 3944 struct lsquic_packet_out *packet_out; 3945 int sz; 3946 3947 /* FIXME Select PNS based on handshake status (possible on the client): if 3948 * appropriate keys are not available, encryption will fail. 3949 */ 3950 packet_out = lsquic_send_ctl_new_packet_out(&conn->ifc_send_ctl, 0, PNS_APP, 3951 CUR_NPATH(conn)); 3952 if (!packet_out) 3953 { 3954 ABORT_ERROR("cannot allocate packet: %s", strerror(errno)); 3955 return; 3956 } 3957 3958 lsquic_send_ctl_scheduled_one(&conn->ifc_send_ctl, packet_out); 3959 sz = conn->ifc_conn.cn_pf->pf_gen_connect_close_frame( 3960 packet_out->po_data + packet_out->po_data_sz, 3961 lsquic_packet_out_avail(packet_out), 0, TEC_NO_ERROR, NULL, 0); 3962 if (sz < 0) { 3963 ABORT_ERROR("generate_connection_close_packet failed"); 3964 return; 3965 } 3966 if (0 != lsquic_packet_out_add_frame(packet_out, conn->ifc_pub.mm, 0, 3967 QUIC_FRAME_CONNECTION_CLOSE, packet_out->po_data_sz, sz)) 3968 { 3969 ABORT_ERROR("adding frame to packet failed: %d", errno); 3970 return; 3971 } 3972 lsquic_send_ctl_incr_pack_sz(&conn->ifc_send_ctl, packet_out, sz); 3973 packet_out->po_frame_types |= 1 << QUIC_FRAME_CONNECTION_CLOSE; 3974 LSQ_DEBUG("generated CONNECTION_CLOSE frame in its own packet"); 3975 conn->ifc_send_flags &= ~SF_SEND_CONN_CLOSE; 3976} 3977 3978 3979static void 3980generate_ping_frame (struct ietf_full_conn *conn, lsquic_time_t unused) 3981{ 3982 struct lsquic_packet_out *packet_out; 3983 int sz; 3984 3985 packet_out = get_writeable_packet(conn, 1); 3986 if (!packet_out) 3987 { 3988 LSQ_DEBUG("cannot get writeable packet for PING frame"); 3989 return; 3990 } 3991 sz = conn->ifc_conn.cn_pf->pf_gen_ping_frame( 3992 packet_out->po_data + packet_out->po_data_sz, 3993 lsquic_packet_out_avail(packet_out)); 3994 if (sz < 0) { 3995 ABORT_ERROR("gen_ping_frame failed"); 3996 return; 3997 } 3998 if (0 != lsquic_packet_out_add_frame(packet_out, conn->ifc_pub.mm, 0, 3999 QUIC_FRAME_PING, packet_out->po_data_sz, sz)) 4000 { 4001 ABORT_ERROR("adding frame to packet failed: %d", errno); 4002 return; 4003 } 4004 lsquic_send_ctl_incr_pack_sz(&conn->ifc_send_ctl, packet_out, sz); 4005 packet_out->po_frame_types |= 1 << QUIC_FRAME_PING; 4006 LSQ_DEBUG("wrote PING frame"); 4007 conn->ifc_send_flags &= ~SF_SEND_PING; 4008} 4009 4010 4011static void 4012generate_handshake_done_frame (struct ietf_full_conn *conn, 4013 lsquic_time_t unused) 4014{ 4015 struct lsquic_packet_out *packet_out; 4016 unsigned need; 4017 int sz; 4018 4019 need = conn->ifc_conn.cn_pf->pf_handshake_done_frame_size(); 4020 packet_out = get_writeable_packet(conn, need); 4021 if (!packet_out) 4022 return; 4023 sz = conn->ifc_conn.cn_pf->pf_gen_handshake_done_frame( 4024 packet_out->po_data + packet_out->po_data_sz, 4025 lsquic_packet_out_avail(packet_out)); 4026 if (sz < 0) 4027 { 4028 ABORT_ERROR("generate_handshake_done_frame failed"); 4029 return; 4030 } 4031 4032 if (0 != lsquic_packet_out_add_frame(packet_out, conn->ifc_pub.mm, 0, 4033 QUIC_FRAME_HANDSHAKE_DONE, packet_out->po_data_sz, sz)) 4034 { 4035 ABORT_ERROR("adding frame to packet failed: %d", errno); 4036 return; 4037 } 4038 lsquic_send_ctl_incr_pack_sz(&conn->ifc_send_ctl, packet_out, sz); 4039 packet_out->po_frame_types |= QUIC_FTBIT_HANDSHAKE_DONE; 4040 LSQ_DEBUG("generated HANDSHAKE_DONE frame"); 4041 conn->ifc_send_flags &= ~SF_SEND_HANDSHAKE_DONE; 4042} 4043 4044 4045static void 4046generate_ack_frequency_frame (struct ietf_full_conn *conn, lsquic_time_t unused) 4047{ 4048 struct lsquic_packet_out *packet_out; 4049 unsigned need; 4050 int sz; 4051 4052 need = conn->ifc_conn.cn_pf->pf_ack_frequency_frame_size( 4053 conn->ifc_ack_freq_seqno, 2, ACK_TIMEOUT); 4054 packet_out = get_writeable_packet(conn, need); 4055 if (!packet_out) 4056 { 4057 LSQ_DEBUG("cannot get writeable packet for ACK_FREQUENCY frame"); 4058 return; 4059 } 4060 4061 conn->ifc_last_pack_tol = conn->ifc_ias.avg_acked; 4062 sz = conn->ifc_conn.cn_pf->pf_gen_ack_frequency_frame( 4063 packet_out->po_data + packet_out->po_data_sz, 4064 lsquic_packet_out_avail(packet_out), 4065 conn->ifc_ack_freq_seqno, conn->ifc_last_pack_tol, 4066 conn->ifc_max_peer_ack_usec); 4067 if (sz < 0) 4068 { 4069 ABORT_ERROR("gen_ack_frequency_frame failed"); 4070 return; 4071 } 4072 if (0 != lsquic_packet_out_add_frame(packet_out, conn->ifc_pub.mm, 0, 4073 QUIC_FRAME_ACK_FREQUENCY, packet_out->po_data_sz, sz)) 4074 { 4075 ABORT_ERROR("adding frame to packet failed: %d", errno); 4076 return; 4077 } 4078 lsquic_send_ctl_incr_pack_sz(&conn->ifc_send_ctl, packet_out, sz); 4079 packet_out->po_frame_types |= QUIC_FTBIT_ACK_FREQUENCY; 4080 ++conn->ifc_ack_freq_seqno; 4081 conn->ifc_send_flags &= ~SF_SEND_ACK_FREQUENCY; 4082} 4083 4084 4085static void 4086generate_path_chal_frame (struct ietf_full_conn *conn, lsquic_time_t now, 4087 unsigned path_id) 4088{ 4089 struct lsquic_packet_out *packet_out; 4090 struct conn_path *copath; 4091 unsigned need; 4092 int w; 4093 char hexbuf[ sizeof(copath->cop_path_chals[0]) * 2 + 1 ]; 4094 4095 /* For now, we only support sending path challenges on a single path. 4096 * This restriction may need to be lifted if the client is probing 4097 * several paths at the same time. 4098 */ 4099 if (!(conn->ifc_flags & IFC_SERVER)) 4100 assert(path_id == conn->ifc_mig_path_id); 4101 4102 copath = &conn->ifc_paths[path_id]; 4103 if (copath->cop_n_chals >= sizeof(copath->cop_path_chals) 4104 / sizeof(copath->cop_path_chals[0])) 4105 { 4106 /* TODO: path failure? */ 4107 assert(0); 4108 return; 4109 } 4110 4111 need = conn->ifc_conn.cn_pf->pf_path_chal_frame_size(); 4112 packet_out = get_writeable_packet_on_path(conn, need, &copath->cop_path, 1); 4113 if (!packet_out) 4114 return; 4115 4116 RAND_bytes((void *) &copath->cop_path_chals[copath->cop_n_chals], 4117 sizeof(copath->cop_path_chals[0])); 4118 w = conn->ifc_conn.cn_pf->pf_gen_path_chal_frame( 4119 packet_out->po_data + packet_out->po_data_sz, 4120 lsquic_packet_out_avail(packet_out), 4121 copath->cop_path_chals[copath->cop_n_chals]); 4122 if (w < 0) 4123 { 4124 ABORT_ERROR("generating PATH_CHALLENGE frame failed: %d", errno); 4125 return; 4126 } 4127 LSQ_DEBUG("generated %d-byte PATH_CHALLENGE frame; challenge: %s" 4128 ", seq: %u", w, 4129 HEXSTR((unsigned char *) &copath->cop_path_chals[copath->cop_n_chals], 4130 sizeof(copath->cop_path_chals[copath->cop_n_chals]), hexbuf), 4131 copath->cop_n_chals); 4132 ++copath->cop_n_chals; 4133 EV_LOG_GENERATED_PATH_CHAL_FRAME(LSQUIC_LOG_CONN_ID, conn->ifc_conn.cn_pf, 4134 packet_out->po_data + packet_out->po_data_sz, w); 4135 if (0 != lsquic_packet_out_add_frame(packet_out, conn->ifc_pub.mm, 0, 4136 QUIC_FRAME_PATH_CHALLENGE, packet_out->po_data_sz, w)) 4137 { 4138 ABORT_ERROR("adding frame to packet failed: %d", errno); 4139 return; 4140 } 4141 packet_out->po_frame_types |= QUIC_FTBIT_PATH_CHALLENGE; 4142 lsquic_send_ctl_incr_pack_sz(&conn->ifc_send_ctl, packet_out, w); 4143 packet_out->po_regen_sz += w; 4144 conn->ifc_send_flags &= ~(SF_SEND_PATH_CHAL << path_id); 4145 lsquic_alarmset_set(&conn->ifc_alset, AL_PATH_CHAL + path_id, 4146 now + (INITIAL_CHAL_TIMEOUT << (copath->cop_n_chals - 1))); 4147} 4148 4149 4150static void 4151generate_path_chal_0 (struct ietf_full_conn *conn, lsquic_time_t now) 4152{ 4153 generate_path_chal_frame(conn, now, 0); 4154} 4155 4156 4157static void 4158generate_path_chal_1 (struct ietf_full_conn *conn, lsquic_time_t now) 4159{ 4160 generate_path_chal_frame(conn, now, 1); 4161} 4162 4163 4164static void 4165generate_path_chal_2 (struct ietf_full_conn *conn, lsquic_time_t now) 4166{ 4167 generate_path_chal_frame(conn, now, 2); 4168} 4169 4170 4171static void 4172generate_path_chal_3 (struct ietf_full_conn *conn, lsquic_time_t now) 4173{ 4174 generate_path_chal_frame(conn, now, 3); 4175} 4176 4177 4178static void 4179generate_path_resp_frame (struct ietf_full_conn *conn, lsquic_time_t now, 4180 unsigned path_id) 4181{ 4182 struct lsquic_packet_out *packet_out; 4183 struct conn_path *copath; 4184 unsigned need; 4185 int w; 4186 4187 copath = &conn->ifc_paths[path_id]; 4188 need = conn->ifc_conn.cn_pf->pf_path_resp_frame_size(); 4189 packet_out = get_writeable_packet_on_path(conn, need, &copath->cop_path, 1); 4190 if (!packet_out) 4191 return; 4192 4193 w = conn->ifc_conn.cn_pf->pf_gen_path_resp_frame( 4194 packet_out->po_data + packet_out->po_data_sz, 4195 lsquic_packet_out_avail(packet_out), 4196 copath->cop_inc_chal); 4197 if (w < 0) 4198 { 4199 ABORT_ERROR("generating PATH_RESPONSE frame failed: %d", errno); 4200 return; 4201 } 4202 LSQ_DEBUG("generated %d-byte PATH_RESPONSE frame; response: %016"PRIX64, 4203 w, copath->cop_inc_chal); 4204 EV_LOG_GENERATED_PATH_RESP_FRAME(LSQUIC_LOG_CONN_ID, conn->ifc_conn.cn_pf, 4205 packet_out->po_data + packet_out->po_data_sz, w); 4206 if (0 != lsquic_packet_out_add_frame(packet_out, conn->ifc_pub.mm, 0, 4207 QUIC_FRAME_PATH_RESPONSE, packet_out->po_data_sz, w)) 4208 { 4209 ABORT_ERROR("adding frame to packet failed: %d", errno); 4210 return; 4211 } 4212 packet_out->po_frame_types |= QUIC_FTBIT_PATH_RESPONSE; 4213 lsquic_send_ctl_incr_pack_sz(&conn->ifc_send_ctl, packet_out, w); 4214 packet_out->po_regen_sz += w; 4215 conn->ifc_send_flags &= ~(SF_SEND_PATH_RESP << path_id); 4216} 4217 4218 4219static void 4220generate_path_resp_0 (struct ietf_full_conn *conn, lsquic_time_t now) 4221{ 4222 generate_path_resp_frame(conn, now, 0); 4223} 4224 4225 4226static void 4227generate_path_resp_1 (struct ietf_full_conn *conn, lsquic_time_t now) 4228{ 4229 generate_path_resp_frame(conn, now, 1); 4230} 4231 4232 4233static void 4234generate_path_resp_2 (struct ietf_full_conn *conn, lsquic_time_t now) 4235{ 4236 generate_path_resp_frame(conn, now, 2); 4237} 4238 4239 4240static void 4241generate_path_resp_3 (struct ietf_full_conn *conn, lsquic_time_t now) 4242{ 4243 generate_path_resp_frame(conn, now, 3); 4244} 4245 4246 4247static struct lsquic_packet_out * 4248ietf_full_conn_ci_next_packet_to_send (struct lsquic_conn *lconn, size_t size) 4249{ 4250 struct ietf_full_conn *conn = (struct ietf_full_conn *) lconn; 4251 struct lsquic_packet_out *packet_out; 4252 4253 packet_out = lsquic_send_ctl_next_packet_to_send(&conn->ifc_send_ctl, size); 4254 if (packet_out) 4255 lsquic_packet_out_set_spin_bit(packet_out, conn->ifc_spin_bit); 4256 return packet_out; 4257} 4258 4259 4260static struct lsquic_packet_out * 4261ietf_full_conn_ci_next_packet_to_send_pre_hsk (struct lsquic_conn *lconn, 4262 size_t size) 4263{ 4264 struct ietf_full_conn *conn = (struct ietf_full_conn *) lconn; 4265 struct lsquic_packet_out *packet_out; 4266 4267 packet_out = ietf_full_conn_ci_next_packet_to_send(lconn, size); 4268 if (packet_out) 4269 ++conn->ifc_u.cli.ifcli_packets_out; 4270 return packet_out; 4271} 4272 4273 4274static lsquic_time_t 4275ietf_full_conn_ci_next_tick_time (struct lsquic_conn *lconn, unsigned *why) 4276{ 4277 struct ietf_full_conn *conn = (struct ietf_full_conn *) lconn; 4278 lsquic_time_t alarm_time, pacer_time, now; 4279 enum alarm_id al_id; 4280 4281 alarm_time = lsquic_alarmset_mintime(&conn->ifc_alset, &al_id); 4282 pacer_time = lsquic_send_ctl_next_pacer_time(&conn->ifc_send_ctl); 4283 4284 if (pacer_time && LSQ_LOG_ENABLED(LSQ_LOG_DEBUG)) 4285 { 4286 now = lsquic_time_now(); 4287 if (pacer_time < now) 4288 LSQ_DEBUG("%s: pacer is %"PRIu64" usec in the past", __func__, 4289 now - pacer_time); 4290 } 4291 4292 if (alarm_time && pacer_time) 4293 { 4294 if (alarm_time < pacer_time) 4295 { 4296 *why = N_AEWS + al_id; 4297 return alarm_time; 4298 } 4299 else 4300 { 4301 *why = AEW_PACER; 4302 return pacer_time; 4303 } 4304 } 4305 else if (alarm_time) 4306 { 4307 *why = N_AEWS + al_id; 4308 return alarm_time; 4309 } 4310 else if (pacer_time) 4311 { 4312 *why = AEW_PACER; 4313 return pacer_time; 4314 } 4315 else 4316 return 0; 4317} 4318 4319 4320static ptrdiff_t 4321count_zero_bytes (const unsigned char *p, size_t len) 4322{ 4323 const unsigned char *const end = p + len; 4324 while (p < end && 0 == *p) 4325 ++p; 4326 return len - (end - p); 4327} 4328 4329 4330static unsigned 4331process_padding_frame (struct ietf_full_conn *conn, 4332 struct lsquic_packet_in *packet_in, const unsigned char *p, size_t len) 4333{ 4334 return (unsigned) count_zero_bytes(p, len); 4335} 4336 4337 4338static void 4339handshake_confirmed (struct ietf_full_conn *conn) 4340{ 4341 ignore_hsk(conn); 4342 /* Even in ID-25, we wait for 1-RTT ACK on the server before dropping keys. 4343 */ 4344 conn->ifc_conn.cn_esf.i->esfi_handshake_confirmed( 4345 conn->ifc_conn.cn_enc_session); 4346 if (!(conn->ifc_flags & (IFC_SERVER|IFC_MIGRA))) 4347 { 4348 conn->ifc_flags |= IFC_MIGRA; /* Perform migration just once */ 4349 maybe_start_migration(conn); 4350 } 4351} 4352 4353 4354static void 4355update_ema (float *val, unsigned new) 4356{ 4357 if (*val) 4358 *val = (new - *val) * 0.4 + *val; 4359 else 4360 *val = new; 4361} 4362 4363 4364static void 4365update_target_packet_tolerance (struct ietf_full_conn *conn, 4366 const unsigned n_newly_acked) 4367{ 4368 update_ema(&conn->ifc_ias.avg_n_acks, conn->ifc_ias.n_acks); 4369 update_ema(&conn->ifc_ias.avg_acked, n_newly_acked); 4370 LSQ_DEBUG("packtol logic: %u ACK frames (avg: %.2f), %u newly acked " 4371 "(avg: %.1f), last sent %u", conn->ifc_ias.n_acks, 4372 conn->ifc_ias.avg_n_acks, n_newly_acked, conn->ifc_ias.avg_acked, 4373 conn->ifc_last_pack_tol); 4374 if (conn->ifc_ias.avg_n_acks > 1.5 && conn->ifc_ias.avg_acked > 2.0 4375 && conn->ifc_ias.avg_acked > (float) conn->ifc_last_pack_tol) 4376 { 4377 LSQ_DEBUG("old packet tolerance target: %u, schedule ACK_FREQUENCY " 4378 "increase", conn->ifc_last_pack_tol); 4379 conn->ifc_send_flags |= SF_SEND_ACK_FREQUENCY; 4380 } 4381 else if (conn->ifc_ias.avg_n_acks < 1.5 4382 && conn->ifc_ias.avg_acked < (float) conn->ifc_last_pack_tol * 3 / 4) 4383 { 4384 LSQ_DEBUG("old packet tolerance target: %u, schedule ACK_FREQUENCY " 4385 "decrease", conn->ifc_last_pack_tol); 4386 conn->ifc_send_flags |= SF_SEND_ACK_FREQUENCY; 4387 } 4388} 4389 4390 4391static int 4392process_ack (struct ietf_full_conn *conn, struct ack_info *acki, 4393 lsquic_time_t received, lsquic_time_t now) 4394{ 4395 enum packnum_space pns; 4396 lsquic_packno_t packno; 4397 unsigned n_unacked; 4398 int one_rtt_acked; 4399 4400 LSQ_DEBUG("Processing ACK"); 4401 one_rtt_acked = lsquic_send_ctl_1rtt_acked(&conn->ifc_send_ctl); 4402 n_unacked = lsquic_send_ctl_n_unacked(&conn->ifc_send_ctl); 4403 if (0 == lsquic_send_ctl_got_ack(&conn->ifc_send_ctl, acki, received, now)) 4404 { 4405 pns = acki->pns; 4406 packno = lsquic_send_ctl_largest_ack2ed(&conn->ifc_send_ctl, pns); 4407 /* It's OK to skip valid packno 0: the alternative is too expensive */ 4408 if (packno) 4409 lsquic_rechist_stop_wait(&conn->ifc_rechist[ pns ], packno + 1); 4410 /* ACK of 1-RTT packet indicates that handshake has been confirmed: */ 4411 if (!one_rtt_acked && lsquic_send_ctl_1rtt_acked(&conn->ifc_send_ctl)) 4412 { 4413 if (!(conn->ifc_flags & IFC_IGNORE_INIT)) 4414 ignore_init(conn); 4415 handshake_confirmed(conn); 4416 } 4417 if (PNS_APP == pns && (conn->ifc_flags & IFC_DELAYED_ACKS)) 4418 update_target_packet_tolerance(conn, 4419 n_unacked - lsquic_send_ctl_n_unacked(&conn->ifc_send_ctl)); 4420 return 0; 4421 } 4422 else 4423 { 4424 ABORT_ERROR("Received invalid ACK"); 4425 return -1; 4426 } 4427} 4428 4429 4430static unsigned 4431process_path_challenge_frame (struct ietf_full_conn *conn, 4432 struct lsquic_packet_in *packet_in, const unsigned char *p, size_t len) 4433{ 4434 struct conn_path *const path = &conn->ifc_paths[packet_in->pi_path_id]; 4435 int parsed_len; 4436 char hexbuf[sizeof(path->cop_inc_chal) * 2 + 1]; 4437 4438 parsed_len = conn->ifc_conn.cn_pf->pf_parse_path_chal_frame(p, len, 4439 /* It's OK to overwrite incoming challenge, only reply to latest */ 4440 &path->cop_inc_chal); 4441 if (parsed_len > 0) 4442 { 4443 LSQ_DEBUG("received path challenge %s for path #%hhu", 4444 HEXSTR((unsigned char *) &path->cop_inc_chal, 4445 sizeof(path->cop_inc_chal), hexbuf), packet_in->pi_path_id); 4446 conn->ifc_send_flags |= SF_SEND_PATH_RESP << packet_in->pi_path_id; 4447 return parsed_len; 4448 } 4449 else 4450 return 0; 4451} 4452 4453 4454/* Why "maybe?" Because it is possible that the peer did not provide us 4455 * enough CIDs and we had to reuse one. See init_new_path(). 4456 */ 4457static void 4458maybe_retire_dcid (struct ietf_full_conn *conn, const lsquic_cid_t *dcid) 4459{ 4460 struct conn_path *copath; 4461 struct dcid_elem **dce; 4462 unsigned eqs; 4463 4464 eqs = 0; 4465 for (copath = conn->ifc_paths; copath < conn->ifc_paths 4466 + sizeof(conn->ifc_paths) / sizeof(conn->ifc_paths[0]); ++copath) 4467 eqs += LSQUIC_CIDS_EQ(&copath->cop_path.np_dcid, dcid); 4468 4469 if (eqs > 1) 4470 { 4471 LSQ_INFOC("cannot retire %"CID_FMT", as it is used on more than one" 4472 "path ", CID_BITS(dcid)); 4473 return; 4474 } 4475 4476 for (dce = conn->ifc_dces; dce < DCES_END(conn); ++dce) 4477 if (*dce && ((*dce)->de_flags & DE_ASSIGNED) 4478 && LSQUIC_CIDS_EQ(&(*dce)->de_cid, dcid)) 4479 break; 4480 4481 assert(dce < DCES_END(conn)); 4482 if (dce < DCES_END(conn)) 4483 retire_dcid(conn, dce); 4484} 4485 4486 4487static void 4488switch_path_to (struct ietf_full_conn *conn, unsigned char path_id) 4489{ 4490 const unsigned char old_path_id = conn->ifc_cur_path_id; 4491 4492 assert(conn->ifc_cur_path_id != path_id); 4493 4494 EV_LOG_CONN_EVENT(LSQUIC_LOG_CONN_ID, "switched paths"); 4495 lsquic_send_ctl_repath(&conn->ifc_send_ctl, 4496 CUR_NPATH(conn), &conn->ifc_paths[path_id].cop_path); 4497 maybe_retire_dcid(conn, &CUR_NPATH(conn)->np_dcid); 4498 conn->ifc_cur_path_id = path_id; 4499 conn->ifc_pub.path = CUR_NPATH(conn); 4500 conn->ifc_conn.cn_cur_cce_idx = CUR_CPATH(conn)->cop_cce_idx; 4501 conn->ifc_send_flags &= ~(SF_SEND_PATH_CHAL << old_path_id); 4502 conn->ifc_send_flags &= ~(SF_SEND_PATH_RESP << old_path_id); 4503 lsquic_alarmset_unset(&conn->ifc_alset, AL_PATH_CHAL + old_path_id); 4504 if (conn->ifc_flags & IFC_SERVER) 4505 wipe_path(conn, old_path_id); 4506} 4507 4508 4509static unsigned 4510process_path_response_frame (struct ietf_full_conn *conn, 4511 struct lsquic_packet_in *packet_in, const unsigned char *p, size_t len) 4512{ 4513 struct conn_path *path; 4514 int parsed_len; 4515 unsigned i; 4516 unsigned char path_id; 4517 uint64_t path_resp; 4518 char hexbuf[ sizeof(path_resp) * 2 + 1 ]; 4519 4520 parsed_len = conn->ifc_conn.cn_pf->pf_parse_path_resp_frame(p, len, 4521 &path_resp); 4522 if (parsed_len <= 0) 4523 return 0; 4524 4525 LSQ_DEBUG("received path response: %s", 4526 HEXSTR((unsigned char *) &path_resp, sizeof(path_resp), hexbuf)); 4527 4528 for (path = conn->ifc_paths; path < conn->ifc_paths 4529 + sizeof(conn->ifc_paths) / sizeof(conn->ifc_paths[0]); ++path) 4530 { 4531 path_id = path - conn->ifc_paths; 4532 if ((1 << path_id) & conn->ifc_used_paths) 4533 for (i = 0; i < path->cop_n_chals; ++i) 4534 if (path_resp == path->cop_path_chals[i]) 4535 goto found; 4536 } 4537 4538 ABORT_QUIETLY(0, TEC_PROTOCOL_VIOLATION, 4539 "received path response %s that does not correspond to any " 4540 "challenge sent on this path", 4541 HEXSTR((unsigned char *) &path_resp, sizeof(path_resp), hexbuf)); 4542 return 0; 4543 4544 found: 4545 path->cop_flags |= COP_VALIDATED; 4546 conn->ifc_send_flags &= ~(SF_SEND_PATH_CHAL << path_id); 4547 lsquic_alarmset_unset(&conn->ifc_alset, AL_PATH_CHAL + path_id); 4548 switch ((path_id != conn->ifc_cur_path_id) | 4549 (!!(path->cop_flags & COP_GOT_NONPROB) << 1)) 4550 { 4551 case 3: 4552 LSQ_INFO("path validated: switching from path #%hhu to path #%hhu", 4553 conn->ifc_cur_path_id, path_id); 4554 switch_path_to(conn, path_id); 4555 break; 4556 case 1: 4557 if (conn->ifc_flags & IFC_SERVER) 4558 /* If you see this message in the log file, remember that 4559 * COP_GOT_NONPROB is set after all frames in a packet have 4560 * been processed. 4561 */ 4562 LSQ_DEBUG("path #%hhu validated, but since no non-probing frames " 4563 "have been received, delay switching to it", 4564 path_id); 4565 else 4566 { 4567 LSQ_INFO("path validated: switching from path #%hhu to path #%hhu", 4568 conn->ifc_cur_path_id, path_id); 4569 switch_path_to(conn, path_id); 4570 } 4571 break; 4572 default: 4573 LSQ_DEBUG("current path validated"); 4574 break; 4575 } 4576 4577 return parsed_len; 4578} 4579 4580 4581static lsquic_stream_t * 4582find_stream_by_id (struct ietf_full_conn *conn, lsquic_stream_id_t stream_id) 4583{ 4584 struct lsquic_hash_elem *el; 4585 el = lsquic_hash_find(conn->ifc_pub.all_streams, &stream_id, 4586 sizeof(stream_id)); 4587 if (el) 4588 return lsquic_hashelem_getdata(el); 4589 else 4590 return NULL; 4591} 4592 4593 4594static void 4595maybe_schedule_ss_for_stream (struct ietf_full_conn *conn, 4596 lsquic_stream_id_t stream_id, enum http_error_code error_code) 4597{ 4598 struct stream_id_to_ss *sits; 4599 4600 if (conn_is_stream_closed(conn, stream_id)) 4601 return; 4602 4603 sits = malloc(sizeof(*sits)); 4604 if (!sits) 4605 return; 4606 4607 sits->sits_stream_id = stream_id; 4608 sits->sits_error_code = error_code; 4609 STAILQ_INSERT_TAIL(&conn->ifc_stream_ids_to_ss, sits, sits_next); 4610 conn->ifc_send_flags |= SF_SEND_STOP_SENDING; 4611 conn_mark_stream_closed(conn, stream_id); 4612} 4613 4614 4615/* This function is called to create incoming streams */ 4616static struct lsquic_stream * 4617new_stream (struct ietf_full_conn *conn, lsquic_stream_id_t stream_id, 4618 enum stream_ctor_flags flags) 4619{ 4620 const struct lsquic_stream_if *iface; 4621 void *stream_ctx; 4622 struct lsquic_stream *stream; 4623 unsigned initial_window; 4624 const int call_on_new = flags & SCF_CALL_ON_NEW; 4625 4626 flags &= ~SCF_CALL_ON_NEW; 4627 flags |= SCF_DI_AUTOSWITCH|SCF_IETF; 4628 4629 if ((conn->ifc_flags & IFC_HTTP) && ((stream_id >> SD_SHIFT) & 1) == SD_UNI) 4630 { 4631 iface = unicla_if_ptr; 4632 stream_ctx = conn; 4633 /* FIXME: This logic does not work for push streams. Perhaps one way 4634 * to address this is to reclassify them later? 4635 */ 4636 flags |= SCF_CRITICAL; 4637 } 4638 else 4639 { 4640 iface = conn->ifc_enpub->enp_stream_if; 4641 stream_ctx = conn->ifc_enpub->enp_stream_if_ctx; 4642 if (conn->ifc_enpub->enp_settings.es_rw_once) 4643 flags |= SCF_DISP_RW_ONCE; 4644 if (conn->ifc_flags & IFC_HTTP) 4645 flags |= SCF_HTTP; 4646 } 4647 4648 if (((stream_id >> SD_SHIFT) & 1) == SD_UNI) 4649 initial_window = conn->ifc_enpub->enp_settings 4650 .es_init_max_stream_data_uni; 4651 else 4652 initial_window = conn->ifc_enpub->enp_settings 4653 .es_init_max_stream_data_bidi_remote; 4654 4655 stream = lsquic_stream_new(stream_id, &conn->ifc_pub, 4656 iface, stream_ctx, initial_window, 4657 conn->ifc_cfg.max_stream_send, flags); 4658 if (stream) 4659 { 4660 if (lsquic_hash_insert(conn->ifc_pub.all_streams, &stream->id, 4661 sizeof(stream->id), stream, &stream->sm_hash_el)) 4662 { 4663 if (call_on_new) 4664 lsquic_stream_call_on_new(stream); 4665 } 4666 else 4667 { 4668 lsquic_stream_destroy(stream); 4669 stream = NULL; 4670 } 4671 } 4672 return stream; 4673} 4674 4675 4676static int 4677conn_is_send_only_stream (const struct ietf_full_conn *conn, 4678 lsquic_stream_id_t stream_id) 4679{ 4680 enum stream_id_type sit; 4681 4682 sit = stream_id & SIT_MASK; 4683 if (conn->ifc_flags & IFC_SERVER) 4684 return sit == SIT_UNI_SERVER; 4685 else 4686 return sit == SIT_UNI_CLIENT; 4687} 4688 4689 4690static int 4691conn_is_receive_only_stream (const struct ietf_full_conn *conn, 4692 lsquic_stream_id_t stream_id) 4693{ 4694 enum stream_id_type sit; 4695 4696 sit = stream_id & SIT_MASK; 4697 if (conn->ifc_flags & IFC_SERVER) 4698 return sit == SIT_UNI_CLIENT; 4699 else 4700 return sit == SIT_UNI_SERVER; 4701} 4702 4703 4704static unsigned 4705process_rst_stream_frame (struct ietf_full_conn *conn, 4706 struct lsquic_packet_in *packet_in, const unsigned char *p, size_t len) 4707{ 4708 lsquic_stream_id_t stream_id; 4709 uint64_t offset, error_code; 4710 lsquic_stream_t *stream; 4711 int call_on_new; 4712 const int parsed_len = conn->ifc_conn.cn_pf->pf_parse_rst_frame(p, len, 4713 &stream_id, &offset, &error_code); 4714 if (parsed_len < 0) 4715 return 0; 4716 4717 EV_LOG_RST_STREAM_FRAME_IN(LSQUIC_LOG_CONN_ID, stream_id, offset, 4718 error_code); 4719 LSQ_DEBUG("Got RST_STREAM; stream: %"PRIu64"; offset: 0x%"PRIX64, stream_id, 4720 offset); 4721 4722 if (conn_is_send_only_stream(conn, stream_id)) 4723 { 4724 ABORT_QUIETLY(0, TEC_STREAM_STATE_ERROR, 4725 "received RESET_STREAM on send-only stream %"PRIu64, stream_id); 4726 return 0; 4727 } 4728 4729 call_on_new = 0; 4730 stream = find_stream_by_id(conn, stream_id); 4731 if (!stream) 4732 { 4733 if (conn_is_stream_closed(conn, stream_id)) 4734 { 4735 LSQ_DEBUG("got reset frame for closed stream %"PRIu64, stream_id); 4736 return parsed_len; 4737 } 4738 if (!is_peer_initiated(conn, stream_id)) 4739 { 4740 ABORT_ERROR("received reset for never-initiated stream %"PRIu64, 4741 stream_id); 4742 return 0; 4743 } 4744 4745 stream = new_stream(conn, stream_id, 0); 4746 if (!stream) 4747 { 4748 ABORT_ERROR("cannot create new stream: %s", strerror(errno)); 4749 return 0; 4750 } 4751 ++call_on_new; 4752 } 4753 4754 if (0 != lsquic_stream_rst_in(stream, offset, error_code)) 4755 { 4756 ABORT_ERROR("received invalid RST_STREAM"); 4757 return 0; 4758 } 4759 if (call_on_new) 4760 lsquic_stream_call_on_new(stream); 4761 return parsed_len; 4762} 4763 4764 4765static unsigned 4766process_stop_sending_frame (struct ietf_full_conn *conn, 4767 struct lsquic_packet_in *packet_in, const unsigned char *p, size_t len) 4768{ 4769 struct lsquic_stream *stream; 4770 lsquic_stream_id_t stream_id, max_allowed; 4771 uint64_t error_code; 4772 int parsed_len, our_stream; 4773 enum stream_state_sending sss; 4774 4775 parsed_len = conn->ifc_conn.cn_pf->pf_parse_stop_sending_frame(p, len, 4776 &stream_id, &error_code); 4777 if (parsed_len < 0) 4778 return 0; 4779 4780 EV_LOG_STOP_SENDING_FRAME_IN(LSQUIC_LOG_CONN_ID, stream_id, error_code); 4781 LSQ_DEBUG("Got STOP_SENDING; stream: %"PRIu64"; error code: %"PRIu64, 4782 stream_id, error_code); 4783 4784 if (conn_is_receive_only_stream(conn, stream_id)) 4785 { 4786 ABORT_QUIETLY(0, TEC_STREAM_STATE_ERROR, 4787 "received STOP_SENDING on receive-only stream %"PRIu64, stream_id); 4788 return 0; 4789 } 4790 4791 our_stream = !is_peer_initiated(conn, stream_id); 4792 stream = find_stream_by_id(conn, stream_id); 4793 if (stream) 4794 { 4795 if (our_stream && 4796 SSS_READY == (sss = lsquic_stream_sending_state(stream))) 4797 { 4798 ABORT_QUIETLY(0, TEC_PROTOCOL_VIOLATION, "stream %"PRIu64" is in " 4799 "%s state: receipt of STOP_SENDING frame is a violation", 4800 stream_id, lsquic_sss2str[sss]); 4801 return 0; 4802 } 4803 lsquic_stream_stop_sending_in(stream, error_code); 4804 } 4805 else if (conn_is_stream_closed(conn, stream_id)) 4806 LSQ_DEBUG("stream %"PRIu64" is closed: ignore STOP_SENDING frame", 4807 stream_id); 4808 else if (our_stream) 4809 { 4810 ABORT_QUIETLY(0, TEC_STREAM_STATE_ERROR, "received STOP_SENDING frame " 4811 "on locally initiated stream that has not yet been opened"); 4812 return 0; 4813 } 4814 else 4815 { 4816 max_allowed = conn->ifc_max_allowed_stream_id[stream_id & SIT_MASK]; 4817 if (stream_id >= max_allowed) 4818 { 4819 ABORT_QUIETLY(0, TEC_STREAM_LIMIT_ERROR, "incoming STOP_SENDING " 4820 "for stream %"PRIu64" would exceed allowed max of %"PRIu64, 4821 stream_id, max_allowed); 4822 return 0; 4823 } 4824 if (conn->ifc_flags & IFC_GOING_AWAY) 4825 { 4826 LSQ_DEBUG("going away: reject new incoming stream %"PRIu64, 4827 stream_id); 4828 maybe_schedule_ss_for_stream(conn, stream_id, HEC_REQUEST_REJECTED); 4829 return parsed_len; 4830 } 4831 stream = new_stream(conn, stream_id, 0); 4832 if (!stream) 4833 { 4834 ABORT_ERROR("cannot create new stream: %s", strerror(errno)); 4835 return 0; 4836 } 4837 lsquic_stream_stop_sending_in(stream, error_code); 4838 lsquic_stream_call_on_new(stream); 4839 } 4840 4841 return parsed_len; 4842} 4843 4844 4845static unsigned 4846discard_crypto_frame (struct ietf_full_conn *conn, 4847 struct lsquic_packet_in *packet_in, const unsigned char *p, size_t len) 4848{ 4849 struct stream_frame stream_frame; 4850 int parsed_len; 4851 4852 parsed_len = conn->ifc_conn.cn_pf->pf_parse_crypto_frame(p, len, 4853 &stream_frame); 4854 if (parsed_len > 0) 4855 { 4856 LSQ_DEBUG("discard %d-byte CRYPTO frame", parsed_len); 4857 return (unsigned) parsed_len; 4858 } 4859 else 4860 return 0; 4861} 4862 4863 4864/* In the server, we only wait for Finished frame */ 4865static unsigned 4866process_crypto_frame_server (struct ietf_full_conn *conn, 4867 struct lsquic_packet_in *packet_in, const unsigned char *p, size_t len) 4868{ 4869 struct stream_frame stream_frame; 4870 int parsed_len; 4871 4872 parsed_len = conn->ifc_conn.cn_pf->pf_parse_crypto_frame(p, len, 4873 &stream_frame); 4874 if (parsed_len < 0) 4875 return 0; 4876 4877 if (!(conn->ifc_flags & IFC_PROC_CRYPTO)) 4878 { 4879 LSQ_DEBUG("discard %d-byte CRYPTO frame", parsed_len); 4880 return (unsigned) parsed_len; 4881 } 4882 4883 if (0 != conn->ifc_conn.cn_esf.i->esfi_data_in( 4884 conn->ifc_conn.cn_enc_session, 4885 lsquic_packet_in_enc_level(packet_in), 4886 stream_frame.data_frame.df_data, 4887 stream_frame.data_frame.df_size)) 4888 { 4889 LSQ_DEBUG("feeding CRYPTO frame to enc session failed"); 4890 return 0; 4891 } 4892 4893 if (!conn->ifc_conn.cn_esf.i->esfi_in_init(conn->ifc_conn.cn_enc_session)) 4894 { 4895 LSQ_DEBUG("handshake confirmed: send HANDSHAKE_DONE"); 4896 conn->ifc_flags &= ~IFC_PROC_CRYPTO; 4897 conn->ifc_send_flags |= SF_SEND_HANDSHAKE_DONE; 4898 } 4899 4900 return (unsigned) parsed_len; 4901} 4902 4903 4904static unsigned 4905process_crypto_frame_client (struct ietf_full_conn *conn, 4906 struct lsquic_packet_in *packet_in, const unsigned char *p, size_t len) 4907{ 4908 struct stream_frame *stream_frame; 4909 struct lsquic_stream *stream; 4910 enum enc_level enc_level; 4911 int parsed_len; 4912 4913 /* Ignore CRYPTO frames in server mode and in client mode after SSL object 4914 * is gone. 4915 */ 4916 if (!(conn->ifc_flags & IFC_PROC_CRYPTO)) 4917 return discard_crypto_frame(conn, packet_in, p, len); 4918 4919 stream_frame = lsquic_malo_get(conn->ifc_pub.mm->malo.stream_frame); 4920 if (!stream_frame) 4921 { 4922 LSQ_WARN("could not allocate stream frame: %s", strerror(errno)); 4923 return 0; 4924 } 4925 4926 parsed_len = conn->ifc_conn.cn_pf->pf_parse_crypto_frame(p, len, 4927 stream_frame); 4928 if (parsed_len < 0) { 4929 lsquic_malo_put(stream_frame); 4930 ABORT_QUIETLY(0, TEC_FRAME_ENCODING_ERROR, 4931 "cannot decode CRYPTO frame"); 4932 return 0; 4933 } 4934 enc_level = lsquic_packet_in_enc_level(packet_in); 4935 EV_LOG_CRYPTO_FRAME_IN(LSQUIC_LOG_CONN_ID, stream_frame, enc_level); 4936 LSQ_DEBUG("Got CRYPTO frame for enc level #%u", enc_level); 4937 if ((conn->ifc_conn.cn_flags & LSCONN_HANDSHAKE_DONE) 4938 && enc_level != ENC_LEV_FORW) 4939 { 4940 LSQ_DEBUG("handshake complete: ignore CRYPTO frames in " 4941 "non-forward-secure packets"); 4942 return parsed_len; 4943 } 4944 4945 if (conn->ifc_flags & IFC_CLOSING) 4946 { 4947 LSQ_DEBUG("Connection closing: ignore frame"); 4948 lsquic_malo_put(stream_frame); 4949 return parsed_len; 4950 } 4951 4952 assert(!(conn->ifc_flags & IFC_SERVER)); 4953 if (conn->ifc_u.cli.crypto_streams[enc_level]) 4954 stream = conn->ifc_u.cli.crypto_streams[enc_level]; 4955 else 4956 { 4957 stream = lsquic_stream_new_crypto(enc_level, &conn->ifc_pub, 4958 &lsquic_cry_sm_if, conn->ifc_conn.cn_enc_session, 4959 SCF_IETF|SCF_DI_AUTOSWITCH|SCF_CALL_ON_NEW|SCF_CRITICAL); 4960 if (!stream) 4961 { 4962 lsquic_malo_put(stream_frame); 4963 ABORT_WARN("cannot create crypto stream for level %u", enc_level); 4964 return 0; 4965 } 4966 conn->ifc_u.cli.crypto_streams[enc_level] = stream; 4967 (void) lsquic_stream_wantread(stream, 1); 4968 } 4969 4970 stream_frame->packet_in = lsquic_packet_in_get(packet_in); 4971 if (0 != lsquic_stream_frame_in(stream, stream_frame)) 4972 { 4973 ABORT_ERROR("cannot insert stream frame"); 4974 return 0; 4975 } 4976 4977 if (!(conn->ifc_conn.cn_flags & LSCONN_HANDSHAKE_DONE)) 4978 { /* To enable decryption, process handshake stream as soon as its 4979 * data frames are received. 4980 * 4981 * TODO: this does not work when packets are reordered. A more 4982 * flexible solution would defer packet decryption if handshake 4983 * has not been completed yet. Nevertheless, this is good enough 4984 * for now. 4985 */ 4986 lsquic_stream_dispatch_read_events(stream); 4987 } 4988 4989 return parsed_len; 4990} 4991 4992 4993static unsigned 4994process_crypto_frame (struct ietf_full_conn *conn, 4995 struct lsquic_packet_in *packet_in, const unsigned char *p, size_t len) 4996{ 4997 if (conn->ifc_flags & IFC_SERVER) 4998 return process_crypto_frame_server(conn, packet_in, p, len); 4999 else 5000 return process_crypto_frame_client(conn, packet_in, p, len); 5001} 5002 5003 5004static unsigned 5005process_stream_frame (struct ietf_full_conn *conn, 5006 struct lsquic_packet_in *packet_in, const unsigned char *p, size_t len) 5007{ 5008 struct stream_frame *stream_frame; 5009 struct lsquic_stream *stream; 5010 int parsed_len; 5011 5012 stream_frame = lsquic_malo_get(conn->ifc_pub.mm->malo.stream_frame); 5013 if (!stream_frame) 5014 { 5015 LSQ_WARN("could not allocate stream frame: %s", strerror(errno)); 5016 return 0; 5017 } 5018 5019 parsed_len = conn->ifc_conn.cn_pf->pf_parse_stream_frame(p, len, 5020 stream_frame); 5021 if (parsed_len < 0) { 5022 lsquic_malo_put(stream_frame); 5023 ABORT_QUIETLY(0, TEC_FRAME_ENCODING_ERROR, 5024 "cannot decode STREAM frame"); 5025 return 0; 5026 } 5027 EV_LOG_STREAM_FRAME_IN(LSQUIC_LOG_CONN_ID, stream_frame); 5028 LSQ_DEBUG("Got stream frame for stream #%"PRIu64, stream_frame->stream_id); 5029 5030 if (conn_is_send_only_stream(conn, stream_frame->stream_id)) 5031 { 5032 ABORT_QUIETLY(0, TEC_STREAM_STATE_ERROR, "received STREAM frame " 5033 "on send-only stream %"PRIu64, stream_frame->stream_id); 5034 return 0; 5035 } 5036 5037 if ((conn->ifc_flags & (IFC_SERVER|IFC_HTTP)) == IFC_HTTP 5038 && SIT_BIDI_SERVER == (stream_frame->stream_id & SIT_MASK)) 5039 { 5040 ABORT_QUIETLY(1, HEC_STREAM_CREATION_ERROR, "HTTP/3 server " 5041 "is not allowed to initiate bidirectional streams (got " 5042 "STREAM frame for stream %"PRIu64, stream_frame->stream_id); 5043 return 0; 5044 } 5045 5046 if (conn->ifc_flags & IFC_CLOSING) 5047 { 5048 LSQ_DEBUG("Connection closing: ignore frame"); 5049 lsquic_malo_put(stream_frame); 5050 return parsed_len; 5051 } 5052 5053 stream = find_stream_by_id(conn, stream_frame->stream_id); 5054 if (!stream) 5055 { 5056 if (conn_is_stream_closed(conn, stream_frame->stream_id)) 5057 { 5058 LSQ_DEBUG("drop frame for closed stream %"PRIu64, 5059 stream_frame->stream_id); 5060 lsquic_malo_put(stream_frame); 5061 return parsed_len; 5062 } 5063 if (is_peer_initiated(conn, stream_frame->stream_id)) 5064 { 5065 const lsquic_stream_id_t max_allowed = 5066 conn->ifc_max_allowed_stream_id[stream_frame->stream_id & SIT_MASK]; 5067 if (stream_frame->stream_id >= max_allowed) 5068 { 5069 ABORT_QUIETLY(0, TEC_STREAM_LIMIT_ERROR, "incoming stream " 5070 "%"PRIu64" exceeds allowed max of %"PRIu64, 5071 stream_frame->stream_id, max_allowed); 5072 lsquic_malo_put(stream_frame); 5073 return 0; 5074 } 5075 if (conn->ifc_flags & IFC_GOING_AWAY) 5076 { 5077 LSQ_DEBUG("going away: reject new incoming stream %"PRIu64, 5078 stream_frame->stream_id); 5079 maybe_schedule_ss_for_stream(conn, stream_frame->stream_id, 5080 HEC_REQUEST_REJECTED); 5081 lsquic_malo_put(stream_frame); 5082 return parsed_len; 5083 } 5084 } 5085 else 5086 { 5087 ABORT_QUIETLY(0, TEC_STREAM_STATE_ERROR, "received STREAM frame " 5088 "for never-initiated stream"); 5089 lsquic_malo_put(stream_frame); 5090 return 0; 5091 } 5092 stream = new_stream(conn, stream_frame->stream_id, SCF_CALL_ON_NEW); 5093 if (!stream) 5094 { 5095 ABORT_ERROR("cannot create new stream: %s", strerror(errno)); 5096 lsquic_malo_put(stream_frame); 5097 return 0; 5098 } 5099 if (SD_BIDI == ((stream_frame->stream_id >> SD_SHIFT) & 1) 5100 && (!valid_stream_id(conn->ifc_max_req_id) 5101 || conn->ifc_max_req_id < stream_frame->stream_id)) 5102 conn->ifc_max_req_id = stream_frame->stream_id; 5103 } 5104 5105 stream_frame->packet_in = lsquic_packet_in_get(packet_in); 5106 if (0 != lsquic_stream_frame_in(stream, stream_frame)) 5107 { 5108 ABORT_ERROR("cannot insert stream frame"); 5109 return 0; 5110 } 5111 5112 /* Don't wait for the regular on_read dispatch in order to save an 5113 * unnecessary blocked/unblocked sequence. 5114 */ 5115 if ((conn->ifc_flags & IFC_HTTP) && conn->ifc_qdh.qdh_enc_sm_in == stream) 5116 lsquic_stream_dispatch_read_events(conn->ifc_qdh.qdh_enc_sm_in); 5117 5118 return parsed_len; 5119} 5120 5121 5122static unsigned 5123process_ack_frame (struct ietf_full_conn *conn, 5124 struct lsquic_packet_in *packet_in, const unsigned char *p, size_t len) 5125{ 5126 struct ack_info *new_acki; 5127 enum packnum_space pns; 5128 int parsed_len; 5129 lsquic_time_t warn_time; 5130 5131 if (conn->ifc_flags & IFC_HAVE_SAVED_ACK) 5132 new_acki = conn->ifc_pub.mm->acki; 5133 else 5134 new_acki = &conn->ifc_ack; 5135 5136 parsed_len = conn->ifc_conn.cn_pf->pf_parse_ack_frame(p, len, new_acki, 5137 conn->ifc_cfg.ack_exp); 5138 if (parsed_len < 0) 5139 goto err; 5140 5141 /* This code to throw out old ACKs is what keeps us compliant with this 5142 * requirement: 5143 * 5144 * [draft-ietf-quic-transport-18] Section 13.3.2. 5145 * 5146 > Processing counts out of order can result in verification failure. 5147 > An endpoint SHOULD NOT perform this verification if the ACK frame is 5148 > received in a packet with packet number lower than a previously 5149 > received ACK frame. Verifying based on ACK frames that arrive out of 5150 > order can result in disabling ECN unnecessarily. 5151 */ 5152 pns = lsquic_hety2pns[ packet_in->pi_header_type ]; 5153 if (is_valid_packno(conn->ifc_max_ack_packno[pns]) && 5154 packet_in->pi_packno <= conn->ifc_max_ack_packno[pns]) 5155 { 5156 LSQ_DEBUG("Ignore old ack (max %"PRIu64")", 5157 conn->ifc_max_ack_packno[pns]); 5158 return parsed_len; 5159 } 5160 5161 EV_LOG_ACK_FRAME_IN(LSQUIC_LOG_CONN_ID, new_acki); 5162 conn->ifc_max_ack_packno[pns] = packet_in->pi_packno; 5163 new_acki->pns = pns; 5164 5165 /* Only cache ACKs for PNS_APP */ 5166 if (pns == PNS_APP && new_acki == &conn->ifc_ack) 5167 { 5168 LSQ_DEBUG("Saved ACK"); 5169 conn->ifc_flags |= IFC_HAVE_SAVED_ACK; 5170 conn->ifc_saved_ack_received = packet_in->pi_received; 5171 conn->ifc_ias.n_acks = 1; 5172 } 5173 else if (pns == PNS_APP) 5174 { 5175 if (0 == lsquic_merge_acks(&conn->ifc_ack, new_acki)) 5176 { 5177 ++conn->ifc_ias.n_acks; 5178 LSQ_DEBUG("merged into saved ACK, getting %s", 5179 (lsquic_acki2str(&conn->ifc_ack, conn->ifc_pub.mm->ack_str, 5180 MAX_ACKI_STR_SZ), conn->ifc_pub.mm->ack_str)); 5181 } 5182 else 5183 { 5184 LSQ_DEBUG("could not merge new ACK into saved ACK"); 5185 if (0 != process_ack(conn, &conn->ifc_ack, packet_in->pi_received, 5186 packet_in->pi_received)) 5187 goto err; 5188 conn->ifc_ack = *new_acki; 5189 } 5190 conn->ifc_saved_ack_received = packet_in->pi_received; 5191 } 5192 else 5193 { 5194 if (0 != process_ack(conn, new_acki, packet_in->pi_received, 5195 packet_in->pi_received)) 5196 goto err; 5197 } 5198 5199 return parsed_len; 5200 5201 err: 5202 warn_time = lsquic_time_now(); 5203 if (0 == conn->ifc_enpub->enp_last_warning[WT_ACKPARSE_FULL] 5204 || conn->ifc_enpub->enp_last_warning[WT_ACKPARSE_FULL] 5205 + WARNING_INTERVAL < warn_time) 5206 { 5207 conn->ifc_enpub->enp_last_warning[WT_ACKPARSE_FULL] = warn_time; 5208 LSQ_WARN("Invalid ACK frame"); 5209 } 5210 return 0; 5211} 5212 5213 5214static unsigned 5215process_ping_frame (struct ietf_full_conn *conn, 5216 struct lsquic_packet_in *packet_in, const unsigned char *p, size_t len) 5217{ /* This frame causes ACK frame to be queued, but nothing to do here; 5218 * return the length of this frame. 5219 */ 5220 EV_LOG_PING_FRAME_IN(LSQUIC_LOG_CONN_ID); 5221 LSQ_DEBUG("received PING"); 5222 return 1; 5223} 5224 5225 5226static unsigned 5227process_connection_close_frame (struct ietf_full_conn *conn, 5228 struct lsquic_packet_in *packet_in, const unsigned char *p, size_t len) 5229{ 5230 lsquic_stream_t *stream; 5231 struct lsquic_hash_elem *el; 5232 uint64_t error_code; 5233 uint16_t reason_len; 5234 uint8_t reason_off; 5235 int parsed_len, app_error; 5236 5237 parsed_len = conn->ifc_conn.cn_pf->pf_parse_connect_close_frame(p, len, 5238 &app_error, &error_code, &reason_len, &reason_off); 5239 if (parsed_len < 0) 5240 return 0; 5241 EV_LOG_CONNECTION_CLOSE_FRAME_IN(LSQUIC_LOG_CONN_ID, error_code, 5242 (int) reason_len, (const char *) p + reason_off); 5243 LSQ_INFO("Received CONNECTION_CLOSE frame (%s-level code: %"PRIu64"; " 5244 "reason: %.*s)", app_error ? "application" : "transport", 5245 error_code, (int) reason_len, (const char *) p + reason_off); 5246 conn->ifc_flags |= IFC_RECV_CLOSE; 5247 if (!(conn->ifc_flags & IFC_CLOSING)) 5248 { 5249 for (el = lsquic_hash_first(conn->ifc_pub.all_streams); el; 5250 el = lsquic_hash_next(conn->ifc_pub.all_streams)) 5251 { 5252 stream = lsquic_hashelem_getdata(el); 5253 lsquic_stream_shutdown_internal(stream); 5254 } 5255 conn->ifc_flags |= IFC_CLOSING; 5256 } 5257 return parsed_len; 5258} 5259 5260 5261static unsigned 5262process_max_data_frame (struct ietf_full_conn *conn, 5263 struct lsquic_packet_in *packet_in, const unsigned char *p, size_t len) 5264{ 5265 uint64_t max_data; 5266 int parsed_len; 5267 5268 parsed_len = conn->ifc_conn.cn_pf->pf_parse_max_data(p, len, &max_data); 5269 if (parsed_len < 0) 5270 return 0; 5271 5272 EV_LOG_CONN_EVENT(LSQUIC_LOG_CONN_ID, "MAX_DATA frame in; offset: %"PRIu64, 5273 max_data); 5274 if (max_data > conn->ifc_pub.conn_cap.cc_max) 5275 { 5276 LSQ_DEBUG("max data goes from %"PRIu64" to %"PRIu64, 5277 conn->ifc_pub.conn_cap.cc_max, max_data); 5278 conn->ifc_pub.conn_cap.cc_max = max_data; 5279 } 5280 else 5281 LSQ_DEBUG("newly supplied max data=%"PRIu64" is not larger than the " 5282 "current value=%"PRIu64", ignoring", max_data, 5283 conn->ifc_pub.conn_cap.cc_max); 5284 return parsed_len; 5285} 5286 5287 5288static unsigned 5289process_max_stream_data_frame (struct ietf_full_conn *conn, 5290 struct lsquic_packet_in *packet_in, const unsigned char *p, size_t len) 5291{ 5292 struct lsquic_stream *stream; 5293 lsquic_stream_id_t stream_id; 5294 uint64_t max_data; 5295 int parsed_len; 5296 5297 parsed_len = conn->ifc_conn.cn_pf->pf_parse_max_stream_data_frame(p, len, 5298 &stream_id, &max_data); 5299 if (parsed_len < 0) 5300 return 0; 5301 5302 EV_LOG_CONN_EVENT(LSQUIC_LOG_CONN_ID, "MAX_STREAM_DATA frame in; " 5303 "stream_id: %"PRIu64"; offset: %"PRIu64, stream_id, max_data); 5304 if (conn_is_receive_only_stream(conn, stream_id)) 5305 { 5306 ABORT_QUIETLY(0, TEC_STREAM_STATE_ERROR, 5307 "received MAX_STREAM_DATA on receive-only stream %"PRIu64, stream_id); 5308 return 0; 5309 } 5310 5311 stream = find_stream_by_id(conn, stream_id); 5312 if (stream) 5313 lsquic_stream_window_update(stream, max_data); 5314 else if (conn_is_stream_closed(conn, stream_id)) 5315 LSQ_DEBUG("stream %"PRIu64" is closed: ignore MAX_STREAM_DATA frame", 5316 stream_id); 5317 else 5318 { 5319 ABORT_QUIETLY(0, TEC_STREAM_STATE_ERROR, "received MAX_STREAM_DATA " 5320 "frame on never-opened stream %"PRIu64, stream_id); 5321 return 0; 5322 } 5323 5324 return parsed_len; 5325} 5326 5327 5328static unsigned 5329process_max_streams_frame (struct ietf_full_conn *conn, 5330 struct lsquic_packet_in *packet_in, const unsigned char *p, size_t len) 5331{ 5332 lsquic_stream_id_t max_stream_id; 5333 enum stream_id_type sit; 5334 enum stream_dir sd; 5335 uint64_t max_streams; 5336 int parsed_len; 5337 5338 parsed_len = conn->ifc_conn.cn_pf->pf_parse_max_streams_frame(p, len, 5339 &sd, &max_streams); 5340 if (parsed_len < 0) 5341 return 0; 5342 5343 sit = gen_sit(conn->ifc_flags & IFC_SERVER, sd); 5344 max_stream_id = max_streams << SIT_SHIFT; 5345 5346 if (max_stream_id > VINT_MAX_VALUE) 5347 { 5348 ABORT_QUIETLY(0, TEC_FRAME_ENCODING_ERROR, 5349 "MAX_STREAMS: max %s stream ID of %"PRIu64" exceeds maximum " 5350 "stream ID", sd == SD_BIDI ? "bidi" : "uni", max_stream_id); 5351 return 0; 5352 } 5353 5354 if (max_stream_id > conn->ifc_max_allowed_stream_id[sit]) 5355 { 5356 LSQ_DEBUG("max %s stream ID updated from %"PRIu64" to %"PRIu64, 5357 sd == SD_BIDI ? "bidi" : "uni", 5358 conn->ifc_max_allowed_stream_id[sit], max_stream_id); 5359 conn->ifc_max_allowed_stream_id[sit] = max_stream_id; 5360 } 5361 else 5362 LSQ_DEBUG("ignore old max %s streams value of %"PRIu64, 5363 sd == SD_BIDI ? "bidi" : "uni", max_streams); 5364 5365 return parsed_len; 5366} 5367 5368 5369/* Returns true if current DCID was retired. In this case, it must be 5370 * replaced. 5371 */ 5372static int 5373retire_dcids_prior_to (struct ietf_full_conn *conn, unsigned retire_prior_to) 5374{ 5375 struct dcid_elem **el; 5376 int update_cur_dcid = 0; 5377#if LSQUIC_LOWEST_LOG_LEVEL >= LSQ_LOG_DEBUG 5378 unsigned count = 0; 5379#endif 5380 5381 for (el = conn->ifc_dces; el < conn->ifc_dces + sizeof(conn->ifc_dces) 5382 / sizeof(conn->ifc_dces[0]); ++el) 5383 if (*el && (*el)->de_seqno < retire_prior_to) 5384 { 5385 update_cur_dcid |= LSQUIC_CIDS_EQ(&(*el)->de_cid, CUR_DCID(conn)); 5386 retire_dcid(conn, el); 5387#if LSQUIC_LOWEST_LOG_LEVEL >= LSQ_LOG_DEBUG 5388 ++count; 5389#endif 5390 } 5391 5392 LSQ_DEBUG("retired %u DCID%s due to Retire Prior To=%u", count, 5393 count != 1 ? "s" : "", retire_prior_to); 5394 return update_cur_dcid; 5395} 5396 5397 5398static int 5399insert_new_dcid (struct ietf_full_conn *conn, uint64_t seqno, 5400 const lsquic_cid_t *cid, const unsigned char *token, int update_cur_dcid) 5401{ 5402 struct dcid_elem **dce, **el; 5403 char tokstr[IQUIC_SRESET_TOKEN_SZ * 2 + 1]; 5404 5405 dce = NULL; 5406 for (el = conn->ifc_dces; el < conn->ifc_dces + sizeof(conn->ifc_dces) 5407 / sizeof(conn->ifc_dces[0]); ++el) 5408 if (*el) 5409 { 5410 if ((*el)->de_seqno == seqno) 5411 { 5412 if (!LSQUIC_CIDS_EQ(&(*el)->de_cid, cid)) 5413 { 5414 ABORT_QUIETLY(0, TEC_PROTOCOL_VIOLATION, 5415 "NEW_CONNECTION_ID: already have CID seqno %"PRIu64 5416 " but with a different CID", seqno); 5417 return -1; 5418 } 5419 else 5420 { 5421 LSQ_DEBUG("Ignore duplicate CID seqno %"PRIu64, seqno); 5422 return 0; 5423 } 5424 } 5425 else if (LSQUIC_CIDS_EQ(&(*el)->de_cid, cid)) 5426 { 5427 ABORT_QUIETLY(0, TEC_PROTOCOL_VIOLATION, 5428 "NEW_CONNECTION_ID: received the same CID with sequence " 5429 "numbers %u and %"PRIu64, (*el)->de_seqno, seqno); 5430 return -1; 5431 } 5432 else if (((*el)->de_flags & DE_SRST) 5433 && 0 == memcmp((*el)->de_srst, token, 5434 IQUIC_SRESET_TOKEN_SZ)) 5435 { 5436 ABORT_QUIETLY(0, TEC_PROTOCOL_VIOLATION, 5437 "NEW_CONNECTION_ID: received second instance of reset " 5438 "token %s in seqno %"PRIu64", same as in seqno %u", 5439 (lsquic_hexstr(token, IQUIC_SRESET_TOKEN_SZ, tokstr, 5440 sizeof(tokstr)), tokstr), 5441 seqno, (*el)->de_seqno); 5442 return -1; 5443 } 5444 } 5445 else if (!dce) 5446 dce = el; 5447 5448 if (!dce) 5449 { 5450 ABORT_QUIETLY(0, TEC_CONNECTION_ID_LIMIT_ERROR, 5451 "NEW_CONNECTION_ID: received connection ID that is going over the " 5452 "limit of %u CIDs", MAX_IETF_CONN_DCIDS); 5453 return -1; 5454 } 5455 5456 *dce = lsquic_malo_get(conn->ifc_pub.mm->malo.dcid_elem); 5457 if (*dce) 5458 { 5459 memset(*dce, 0, sizeof(**dce)); 5460 (*dce)->de_seqno = seqno; 5461 (*dce)->de_cid = *cid; 5462 memcpy((*dce)->de_srst, token, sizeof((*dce)->de_srst)); 5463 (*dce)->de_flags |= DE_SRST; 5464 if (update_cur_dcid) 5465 *CUR_DCID(conn) = *cid; 5466 } 5467 else 5468 LSQ_WARN("cannot allocate dce to insert DCID seqno %"PRIu64, seqno); 5469 5470 return 0; 5471} 5472 5473 5474static unsigned 5475process_new_connection_id_frame (struct ietf_full_conn *conn, 5476 struct lsquic_packet_in *packet_in, const unsigned char *p, size_t len) 5477{ 5478 const unsigned char *token; 5479 const char *action_str; 5480 lsquic_cid_t cid; 5481 uint64_t seqno, retire_prior_to; 5482 int parsed_len, update_cur_dcid; 5483 5484 parsed_len = conn->ifc_conn.cn_pf->pf_parse_new_conn_id(p, len, 5485 &seqno, &retire_prior_to, &cid, &token); 5486 if (parsed_len < 0) 5487 { 5488 if (parsed_len == -2) 5489 ABORT_QUIETLY(0, TEC_FRAME_ENCODING_ERROR, 5490 "NEW_CONNECTION_ID contains invalid CID length"); 5491 return 0; 5492 } 5493 5494 if (seqno > UINT32_MAX || retire_prior_to > UINT32_MAX) 5495 { /* It is wasteful to use 8-byte integers for these counters, so this 5496 * is the guard here. This will "Never Happen." 5497 */ 5498 LSQ_INFO("ignoring unreasonably high seqno=%"PRIu64" or Retire Prior " 5499 "To=%"PRIu64, seqno, retire_prior_to); 5500 return parsed_len; 5501 } 5502 5503 if (retire_prior_to > seqno) 5504 { 5505 ABORT_QUIETLY(0, TEC_FRAME_ENCODING_ERROR, 5506 "NEW_CONNECTION_ID: Retire Prior To=%"PRIu64" is larger then the " 5507 "Sequence Number=%"PRIu64, retire_prior_to, seqno); 5508 return 0; 5509 } 5510 5511 if (seqno < conn->ifc_last_retire_prior_to) 5512 { 5513 retire_seqno(conn, seqno); 5514 action_str = "Ignored (seqno smaller than last retire_prior_to"; 5515 goto end; 5516 } 5517 5518 if (retire_prior_to > conn->ifc_last_retire_prior_to) 5519 { 5520 conn->ifc_last_retire_prior_to = retire_prior_to; 5521 update_cur_dcid = retire_dcids_prior_to(conn, retire_prior_to); 5522 } 5523 else 5524 update_cur_dcid = 0; 5525 5526 if (0 != insert_new_dcid(conn, seqno, &cid, token, update_cur_dcid)) 5527 return 0; 5528 action_str = "Saved"; 5529 5530 end: 5531 LSQ_DEBUGC("Got new connection ID from peer: seq=%"PRIu64"; " 5532 "cid: %"CID_FMT". %s.", seqno, CID_BITS(&cid), action_str); 5533 return parsed_len; 5534} 5535 5536 5537static void 5538retire_cid (struct ietf_full_conn *conn, struct conn_cid_elem *cce, 5539 lsquic_time_t now) 5540{ 5541 struct lsquic_conn *const lconn = &conn->ifc_conn; 5542 5543 LSQ_DEBUGC("retiring CID %"CID_FMT"; seqno: %u; %s", 5544 CID_BITS(&cce->cce_cid), cce->cce_seqno, 5545 (cce->cce_flags & CCE_SEQNO) ? "" : "original"); 5546 5547 if (cce->cce_flags & CCE_SEQNO) 5548 --conn->ifc_active_cids_count; 5549 lsquic_engine_retire_cid(conn->ifc_enpub, lconn, cce - lconn->cn_cces, now); 5550 memset(cce, 0, sizeof(*cce)); 5551 5552 if (can_issue_cids(conn) 5553 && !(lsquic_alarmset_is_set(&conn->ifc_alset, AL_CID_THROT))) 5554 maybe_get_rate_available_scid_slot(conn, now); 5555} 5556 5557 5558static unsigned 5559process_retire_connection_id_frame (struct ietf_full_conn *conn, 5560 struct lsquic_packet_in *packet_in, const unsigned char *p, size_t len) 5561{ 5562 struct lsquic_conn *const lconn = &conn->ifc_conn; 5563 struct conn_cid_elem *cce; 5564 uint64_t seqno; 5565 int parsed_len; 5566 5567 /* [draft-ietf-quic-transport-25] Section 19.16 5568 * 5569 * - Peer cannot retire zero-lenth CID. (MUST treat as PROTOCOL_VIOLATION) 5570 * - Peer cannot retire CID with sequence number that has not been 5571 * allocated yet. (MUST treat as PROTOCOL_VIOLATION) 5572 * - Peer cannot retire CID that matches the DCID in packet. 5573 * (MAY treat as PROTOCOL_VIOLATION) 5574 */ 5575 if (conn->ifc_settings->es_scid_len == 0) 5576 { 5577 ABORT_QUIETLY(0, TEC_PROTOCOL_VIOLATION, "cannot retire zero-length CID"); 5578 return 0; 5579 } 5580 5581 parsed_len = conn->ifc_conn.cn_pf->pf_parse_retire_cid_frame(p, len, 5582 &seqno); 5583 if (parsed_len < 0) 5584 return 0; 5585 5586 EV_LOG_CONN_EVENT(LSQUIC_LOG_CONN_ID, "got RETIRE_CONNECTION_ID frame: " 5587 "seqno=%"PRIu64, seqno); 5588 if (seqno >= conn->ifc_scid_seqno) 5589 { 5590 ABORT_QUIETLY(0, TEC_PROTOCOL_VIOLATION, "cannot retire CID seqno=" 5591 "%"PRIu64" as it has not been allocated yet", seqno); 5592 return 0; 5593 } 5594 5595 for (cce = lconn->cn_cces; cce < END_OF_CCES(lconn); ++cce) 5596 if ((lconn->cn_cces_mask & (1 << (cce - lconn->cn_cces)) 5597 && (cce->cce_flags & CCE_SEQNO) 5598 && cce->cce_seqno == seqno)) 5599 break; 5600 5601 conn->ifc_active_cids_count -= seqno >= conn->ifc_first_active_cid_seqno; 5602 5603 if (cce < END_OF_CCES(lconn)) 5604 { 5605 if (LSQUIC_CIDS_EQ(&cce->cce_cid, &packet_in->pi_dcid)) 5606 { 5607 ABORT_QUIETLY(0, TEC_FRAME_ENCODING_ERROR, "cannot retire CID " 5608 "seqno=%"PRIu64", for it is used as DCID in the packet", seqno); 5609 return 0; 5610 } 5611 retire_cid(conn, cce, packet_in->pi_received); 5612 if (lconn->cn_cur_cce_idx == cce - lconn->cn_cces) 5613 { 5614 cce = find_cce_by_cid(conn, &packet_in->pi_dcid); 5615 if (cce) 5616 { 5617 cce->cce_flags |= CCE_USED; 5618 lconn->cn_cur_cce_idx = cce - lconn->cn_cces; 5619 LSQ_DEBUGC("current SCID was retired; set current SCID to " 5620 "%"CID_FMT" based on DCID in incoming packet", 5621 CID_BITS(&packet_in->pi_dcid)); 5622 } 5623 else 5624 LSQ_WARN("current SCID was retired; no new SCID candidate"); 5625 /* This could theoretically happen when zero-length CIDs were 5626 * used. Currently, there should be no way lsquic could get 5627 * into this situation. 5628 */ 5629 } 5630 } 5631 else 5632 LSQ_DEBUG("cannot retire CID seqno=%"PRIu64": not found", seqno); 5633 LOG_SCIDS(conn); 5634 5635 return parsed_len; 5636} 5637 5638 5639static unsigned 5640process_new_token_frame (struct ietf_full_conn *conn, 5641 struct lsquic_packet_in *packet_in, const unsigned char *p, size_t len) 5642{ 5643 const unsigned char *token; 5644 size_t token_sz; 5645 char *token_str; 5646 int parsed_len; 5647 5648 parsed_len = conn->ifc_conn.cn_pf->pf_parse_new_token_frame(p, len, &token, 5649 &token_sz); 5650 if (parsed_len < 0) 5651 return 0; 5652 5653 if (0 == token_sz) 5654 { 5655 ABORT_QUIETLY(0, TEC_FRAME_ENCODING_ERROR, "received an empty " 5656 "NEW_TOKEN frame"); 5657 return 0; 5658 } 5659 5660 if (LSQ_LOG_ENABLED(LSQ_LOG_DEBUG) 5661 || LSQ_LOG_ENABLED_EXT(LSQ_LOG_DEBUG, LSQLM_EVENT)) 5662 { 5663 token_str = malloc(token_sz * 2 + 1); 5664 if (token_str) 5665 { 5666 lsquic_hexstr(token, token_sz, token_str, token_sz * 2 + 1); 5667 LSQ_DEBUG("Got %zu-byte NEW_TOKEN %s", token_sz, token_str); 5668 EV_LOG_CONN_EVENT(LSQUIC_LOG_CONN_ID, "got NEW_TOKEN %s", 5669 token_str); 5670 free(token_str); 5671 } 5672 } 5673 if (conn->ifc_enpub->enp_stream_if->on_new_token) 5674 conn->ifc_enpub->enp_stream_if->on_new_token( 5675 conn->ifc_enpub->enp_stream_if_ctx, token, token_sz); 5676 return parsed_len; 5677} 5678 5679 5680static unsigned 5681process_stream_blocked_frame (struct ietf_full_conn *conn, 5682 struct lsquic_packet_in *packet_in, const unsigned char *p, size_t len) 5683{ 5684 struct lsquic_stream *stream; 5685 lsquic_stream_id_t stream_id; 5686 uint64_t peer_off; 5687 int parsed_len; 5688 5689 parsed_len = conn->ifc_conn.cn_pf->pf_parse_stream_blocked_frame(p, 5690 len, &stream_id, &peer_off); 5691 if (parsed_len < 0) 5692 return 0; 5693 5694 EV_LOG_CONN_EVENT(LSQUIC_LOG_CONN_ID, "STREAM_BLOCKED frame in: stream " 5695 "%"PRIu64"; offset %"PRIu64, stream_id, peer_off); 5696 LSQ_DEBUG("received STREAM_BLOCKED frame: stream %"PRIu64 5697 "; offset %"PRIu64, stream_id, peer_off); 5698 5699 if (conn_is_send_only_stream(conn, stream_id)) 5700 { 5701 ABORT_QUIETLY(0, TEC_STREAM_STATE_ERROR, 5702 "received STREAM_BLOCKED frame on send-only stream %"PRIu64, 5703 stream_id); 5704 return 0; 5705 } 5706 5707 stream = find_stream_by_id(conn, stream_id); 5708 if (stream) 5709 lsquic_stream_peer_blocked(stream, peer_off); 5710 else 5711 LSQ_DEBUG("stream %"PRIu64" not found - ignore STREAM_BLOCKED frame", 5712 stream_id); 5713 return parsed_len; 5714} 5715 5716 5717static unsigned 5718process_streams_blocked_frame (struct ietf_full_conn *conn, 5719 struct lsquic_packet_in *packet_in, const unsigned char *p, size_t len) 5720{ 5721 lsquic_stream_id_t max_stream_id; 5722 uint64_t stream_limit; 5723 enum stream_dir sd; 5724 int parsed_len; 5725 5726 parsed_len = conn->ifc_conn.cn_pf->pf_parse_streams_blocked_frame(p, 5727 len, &sd, &stream_limit); 5728 if (parsed_len < 0) 5729 return 0; 5730 5731 max_stream_id = stream_limit << SIT_SHIFT; 5732 if (max_stream_id > VINT_MAX_VALUE) 5733 { 5734 ABORT_QUIETLY(0, TEC_FRAME_ENCODING_ERROR, 5735 "STREAMS_BLOCKED: max %s stream ID of %"PRIu64" exceeds maximum " 5736 "stream ID", sd == SD_BIDI ? "bidi" : "uni", max_stream_id); 5737 return 0; 5738 } 5739 5740 LSQ_DEBUG("received STREAMS_BLOCKED frame: limited to %"PRIu64 5741 " %sdirectional stream%.*s", stream_limit, sd == SD_UNI ? "uni" : "bi", 5742 stream_limit != 1, "s"); 5743 /* We don't do anything with this information -- at least for now */ 5744 return parsed_len; 5745} 5746 5747 5748static unsigned 5749process_blocked_frame (struct ietf_full_conn *conn, 5750 struct lsquic_packet_in *packet_in, const unsigned char *p, size_t len) 5751{ 5752 uint64_t peer_off; 5753 int parsed_len; 5754 5755 parsed_len = conn->ifc_conn.cn_pf->pf_parse_blocked_frame(p, len, 5756 &peer_off); 5757 if (parsed_len < 0) 5758 return 0; 5759 5760 EV_LOG_CONN_EVENT(LSQUIC_LOG_CONN_ID, "BLOCKED frame in: offset %"PRIu64, 5761 peer_off); 5762 LSQ_DEBUG("received BLOCKED frame: offset %"PRIu64, peer_off); 5763 5764 if (peer_off > conn->ifc_last_max_data_off_sent 5765 && !(conn->ifc_send_flags & SF_SEND_MAX_DATA)) 5766 { 5767 conn->ifc_send_flags |= SF_SEND_MAX_DATA; 5768 LSQ_DEBUG("marked to send MAX_DATA frame"); 5769 } 5770 else if (conn->ifc_send_flags & SF_SEND_MAX_DATA) 5771 LSQ_DEBUG("MAX_STREAM_DATA frame is already scheduled"); 5772 else 5773 LSQ_DEBUG("MAX_DATA(%"PRIu64") has already been either " 5774 "packetized or sent to peer", conn->ifc_last_max_data_off_sent); 5775 5776 return parsed_len; 5777} 5778 5779 5780static unsigned 5781process_handshake_done_frame (struct ietf_full_conn *conn, 5782 struct lsquic_packet_in *packet_in, const unsigned char *p, size_t len) 5783{ 5784 int parsed_len; 5785 5786 parsed_len = conn->ifc_conn.cn_pf->pf_parse_handshake_done_frame(p, len); 5787 if (parsed_len < 0) 5788 return 0; 5789 5790 EV_LOG_CONN_EVENT(LSQUIC_LOG_CONN_ID, "HANDSHAKE_DONE frame in"); 5791 LSQ_DEBUG("received HANDSHAKE_DONE frame"); 5792 5793 if (conn->ifc_flags & IFC_SERVER) 5794 { 5795 ABORT_QUIETLY(0, TEC_PROTOCOL_VIOLATION, 5796 "Client cannot send HANDSHAKE_DONE frame"); 5797 return 0; 5798 } 5799 5800 handshake_confirmed(conn); 5801 5802 return parsed_len; 5803} 5804 5805 5806static unsigned 5807process_ack_frequency_frame (struct ietf_full_conn *conn, 5808 struct lsquic_packet_in *packet_in, const unsigned char *p, size_t len) 5809{ 5810 uint64_t seqno, pack_tol, upd_mad; 5811 int parsed_len; 5812 5813 if (!(conn->ifc_flags & IFC_DELAYED_ACKS)) 5814 { 5815 ABORT_QUIETLY(0, TEC_PROTOCOL_VIOLATION, 5816 "Received unexpected ACK_FREQUENCY frame (not negotiated)"); 5817 return 0; 5818 } 5819 5820 parsed_len = conn->ifc_conn.cn_pf->pf_parse_ack_frequency_frame(p, len, 5821 &seqno, &pack_tol, &upd_mad); 5822 if (parsed_len < 0) 5823 return 0; 5824 5825 EV_LOG_CONN_EVENT(LSQUIC_LOG_CONN_ID, "ACK_FREQUENCY(seqno: %"PRIu64"; " 5826 "pack_tol: %"PRIu64"; upd: %"PRIu64") frame in", seqno, pack_tol, 5827 upd_mad); 5828 LSQ_DEBUG("ACK_FREQUENCY(seqno: %"PRIu64"; pack_tol: %"PRIu64"; " 5829 "upd: %"PRIu64") frame in", seqno, pack_tol, upd_mad); 5830 5831 if (pack_tol == 0) 5832 { 5833 ABORT_QUIETLY(0, TEC_PROTOCOL_VIOLATION, 5834 "Packet Tolerance of zero is invalid"); 5835 return 0; 5836 } 5837 5838 if (conn->ifc_max_ack_freq_seqno > 0 5839 && seqno <= conn->ifc_max_ack_freq_seqno) 5840 { 5841 LSQ_DEBUG("ignore old ACK_FREQUENCY frame"); 5842 return parsed_len; 5843 } 5844 conn->ifc_max_ack_freq_seqno = seqno; 5845 5846 if (pack_tol < UINT_MAX) 5847 { 5848 LSQ_DEBUG("set packet tolerance to %"PRIu64, pack_tol); 5849 conn->ifc_max_retx_since_last_ack = pack_tol; 5850 } 5851 5852 /* TODO: do something with max ack delay update */ 5853 5854 return parsed_len; 5855} 5856 5857 5858static unsigned 5859process_timestamp_frame (struct ietf_full_conn *conn, 5860 struct lsquic_packet_in *packet_in, const unsigned char *p, size_t len) 5861{ 5862 uint64_t timestamp; 5863 int parsed_len; 5864 5865 if (!(conn->ifc_flags & IFC_TIMESTAMPS)) 5866 { 5867 ABORT_QUIETLY(0, TEC_PROTOCOL_VIOLATION, 5868 "Received unexpected TIMESTAMP frame (not negotiated)"); 5869 return 0; 5870 } 5871 5872 parsed_len = conn->ifc_conn.cn_pf->pf_parse_timestamp_frame(p, len, 5873 ×tamp); 5874 if (parsed_len < 0) 5875 return 0; 5876 5877 timestamp <<= conn->ifc_cfg.ack_exp; 5878 EV_LOG_CONN_EVENT(LSQUIC_LOG_CONN_ID, "TIMESTAMP(%"PRIu64" us)", timestamp); 5879 LSQ_DEBUG("TIMESTAMP(%"PRIu64" us) (%"PRIu64" << %"PRIu8")", timestamp, 5880 timestamp >> conn->ifc_cfg.ack_exp, conn->ifc_cfg.ack_exp); 5881 5882 /* We don't do anything with the timestamp */ 5883 5884 return parsed_len; 5885} 5886 5887 5888typedef unsigned (*process_frame_f)( 5889 struct ietf_full_conn *, struct lsquic_packet_in *, 5890 const unsigned char *p, size_t); 5891 5892 5893static process_frame_f const process_frames[N_QUIC_FRAMES] = 5894{ 5895 [QUIC_FRAME_PADDING] = process_padding_frame, 5896 [QUIC_FRAME_RST_STREAM] = process_rst_stream_frame, 5897 [QUIC_FRAME_CONNECTION_CLOSE] = process_connection_close_frame, 5898 [QUIC_FRAME_MAX_DATA] = process_max_data_frame, 5899 [QUIC_FRAME_MAX_STREAM_DATA] = process_max_stream_data_frame, 5900 [QUIC_FRAME_MAX_STREAMS] = process_max_streams_frame, 5901 [QUIC_FRAME_PING] = process_ping_frame, 5902 [QUIC_FRAME_BLOCKED] = process_blocked_frame, 5903 [QUIC_FRAME_STREAM_BLOCKED] = process_stream_blocked_frame, 5904 [QUIC_FRAME_STREAMS_BLOCKED] = process_streams_blocked_frame, 5905 [QUIC_FRAME_NEW_CONNECTION_ID] = process_new_connection_id_frame, 5906 [QUIC_FRAME_NEW_TOKEN] = process_new_token_frame, 5907 [QUIC_FRAME_STOP_SENDING] = process_stop_sending_frame, 5908 [QUIC_FRAME_ACK] = process_ack_frame, 5909 [QUIC_FRAME_PATH_CHALLENGE] = process_path_challenge_frame, 5910 [QUIC_FRAME_PATH_RESPONSE] = process_path_response_frame, 5911 [QUIC_FRAME_RETIRE_CONNECTION_ID] = process_retire_connection_id_frame, 5912 [QUIC_FRAME_STREAM] = process_stream_frame, 5913 [QUIC_FRAME_CRYPTO] = process_crypto_frame, 5914 [QUIC_FRAME_HANDSHAKE_DONE] = process_handshake_done_frame, 5915 [QUIC_FRAME_ACK_FREQUENCY] = process_ack_frequency_frame, 5916 [QUIC_FRAME_TIMESTAMP] = process_timestamp_frame, 5917}; 5918 5919 5920static unsigned 5921process_packet_frame (struct ietf_full_conn *conn, 5922 struct lsquic_packet_in *packet_in, const unsigned char *p, size_t len) 5923{ 5924 enum enc_level enc_level; 5925 enum quic_frame_type type; 5926 char str[8 * 2 + 1]; 5927 5928 enc_level = lsquic_packet_in_enc_level(packet_in); 5929 type = conn->ifc_conn.cn_pf->pf_parse_frame_type(p, len); 5930 if (lsquic_legal_frames_by_level[conn->ifc_conn.cn_version][enc_level] 5931 & (1 << type)) 5932 { 5933 LSQ_DEBUG("about to process %s frame", frame_type_2_str[type]); 5934 packet_in->pi_frame_types |= 1 << type; 5935 return process_frames[type](conn, packet_in, p, len); 5936 } 5937 else 5938 { 5939 LSQ_DEBUG("invalid frame %u (bytes: %s) at encryption level %s", 5940 type, HEXSTR(p, MIN(len, 8), str), lsquic_enclev2str[enc_level]); 5941 return 0; 5942 } 5943} 5944 5945 5946static struct dcid_elem * 5947find_unassigned_dcid (struct ietf_full_conn *conn) 5948{ 5949 struct dcid_elem **dce; 5950 5951 for (dce = conn->ifc_dces; dce < DCES_END(conn); ++dce) 5952 if (*dce && !((*dce)->de_flags & DE_ASSIGNED)) 5953 return *dce; 5954 5955 return NULL; 5956} 5957 5958 5959static struct conn_cid_elem * 5960find_cce_by_cid (struct ietf_full_conn *conn, const lsquic_cid_t *cid) 5961{ 5962 struct lsquic_conn *const lconn = &conn->ifc_conn; 5963 struct conn_cid_elem *cce; 5964 5965 for (cce = lconn->cn_cces; cce < END_OF_CCES(lconn); ++cce) 5966 if ((lconn->cn_cces_mask & (1 << (cce - lconn->cn_cces))) 5967 && LSQUIC_CIDS_EQ(&cce->cce_cid, cid)) 5968 return cce; 5969 5970 return NULL; 5971} 5972 5973 5974static int 5975init_new_path (struct ietf_full_conn *conn, struct conn_path *path, 5976 int dcid_changed) 5977{ 5978 struct dcid_elem *dce; 5979 5980 dce = find_unassigned_dcid(conn); 5981 if (dce) 5982 { 5983 LSQ_DEBUGC("assigned new DCID %"CID_FMT" to new path %u", 5984 CID_BITS(&dce->de_cid), (unsigned) (path - conn->ifc_paths)); 5985 path->cop_path.np_dcid = dce->de_cid; 5986 dce->de_flags |= DE_ASSIGNED; 5987 } 5988 else if (!dcid_changed) 5989 { 5990 /* It is OK to reuse DCID if the peer did not use a new DCID when its 5991 * address changed. See [draft-ietf-quic-transport-24] Section 9.5. 5992 */ 5993 path->cop_path.np_dcid = CUR_NPATH(conn)->np_dcid; 5994 LSQ_DEBUGC("assigned already-used DCID %"CID_FMT" to new path %u, " 5995 "as incoming DCID did not change", 5996 CID_BITS(&path->cop_path.np_dcid), 5997 (unsigned) (path - conn->ifc_paths)); 5998 } 5999 else 6000 { 6001 LSQ_DEBUG("Don't have an unassigned DCID: cannot initialize path"); 6002 return -1; 6003 } 6004 6005 path->cop_path.np_pack_size 6006 = calc_base_packet_size(conn, NP_IS_IPv6(&path->cop_path)); 6007 6008 if (conn->ifc_max_udp_payload < path->cop_path.np_pack_size) 6009 path->cop_path.np_pack_size = conn->ifc_max_udp_payload; 6010 6011 LSQ_DEBUG("initialized path %u", (unsigned) (path - conn->ifc_paths)); 6012 6013 return 0; 6014} 6015 6016 6017static void 6018on_new_or_unconfirmed_path (struct ietf_full_conn *conn, 6019 const struct lsquic_packet_in *packet_in) 6020{ 6021 struct lsquic_conn *const lconn = &conn->ifc_conn; 6022 struct conn_path *const path = &conn->ifc_paths[packet_in->pi_path_id]; 6023 struct conn_cid_elem *cce; 6024 int dcid_changed; 6025 char cidbuf_[MAX_CID_LEN * 2 + 1]; 6026 6027 /* An endpoint only changes the address that it sends packets to in 6028 * response to the highest-numbered non-probing packet. This ensures 6029 * that an endpoint does not send packets to an old peer address in the 6030 * case that it receives reordered packets. 6031 * 6032 * [draft-ietf-quic-transport-20], Section 9.3. 6033 */ 6034 if (lsquic_packet_in_non_probing(packet_in) 6035 && packet_in->pi_packno > conn->ifc_max_non_probing) 6036 path->cop_flags |= COP_GOT_NONPROB; 6037 6038 /* If we cannot find a SCID at this point, something is wrong. */ 6039 cce = find_cce_by_cid(conn, &packet_in->pi_dcid); 6040 if (!cce) 6041 { 6042 ABORT_ERROR("DCID %"CID_FMT" not found on new path", 6043 CID_BITS(&packet_in->pi_dcid)); 6044 return; 6045 } 6046 6047 dcid_changed = !(cce->cce_flags & CCE_USED); 6048 if (!(path->cop_flags & COP_INITIALIZED)) 6049 { 6050 LSQ_DEBUGC("current SCID: %"CID_FMT, CID_BITS(CN_SCID(&conn->ifc_conn))); 6051 LSQ_DEBUGC("packet in DCID: %"CID_FMT"; changed: %d", 6052 CID_BITS(&packet_in->pi_dcid), dcid_changed); 6053 if (0 == init_new_path(conn, path, dcid_changed)) 6054 path->cop_flags |= COP_INITIALIZED; 6055 else 6056 return; 6057 6058 conn->ifc_send_flags |= SF_SEND_PATH_CHAL << packet_in->pi_path_id; 6059 LSQ_DEBUG("scheduled return path challenge on path %hhu", 6060 packet_in->pi_path_id); 6061 } 6062 else if ((path->cop_flags & (COP_VALIDATED|COP_GOT_NONPROB)) 6063 == (COP_VALIDATED|COP_GOT_NONPROB)) 6064 { 6065 assert(path->cop_flags & COP_INITIALIZED); 6066 LSQ_DEBUG("received non-probing frame on validated path %hhu, " 6067 "switch to it", packet_in->pi_path_id); 6068 switch_path_to(conn, packet_in->pi_path_id); 6069 } 6070 6071 path->cop_cce_idx = cce - lconn->cn_cces; 6072 cce->cce_flags |= CCE_USED; 6073 LOG_SCIDS(conn); 6074} 6075 6076 6077static void 6078parse_regular_packet (struct ietf_full_conn *conn, 6079 struct lsquic_packet_in *packet_in) 6080{ 6081 const unsigned char *p, *pend; 6082 unsigned len; 6083 6084 p = packet_in->pi_data + packet_in->pi_header_sz; 6085 pend = packet_in->pi_data + packet_in->pi_data_sz; 6086 6087 while (p < pend) 6088 { 6089 len = process_packet_frame(conn, packet_in, p, pend - p); 6090 if (len > 0) 6091 p += len; 6092 else 6093 { 6094 ABORT_ERROR("Error parsing frame"); 6095 break; 6096 } 6097 } 6098} 6099 6100 6101/* From [draft-ietf-quic-transport-24] Section 13.2.1: 6102 * " An endpoint MUST NOT send a non-ack-eliciting packet in response 6103 * " to a non-ack-eliciting packet, even if there are packet gaps 6104 * " which precede the received packet. 6105 * 6106 * To ensure that we always send an ack-eliciting packet in this case, we 6107 * check that there are frames that are about to be written. 6108 */ 6109static int 6110many_in_and_will_write (struct ietf_full_conn *conn) 6111{ 6112 return conn->ifc_n_slack_all > MAX_ANY_PACKETS_SINCE_LAST_ACK 6113 && (conn->ifc_send_flags 6114 || !TAILQ_EMPTY(&conn->ifc_pub.sending_streams) 6115 || !TAILQ_EMPTY(&conn->ifc_pub.write_streams)) 6116 ; 6117} 6118 6119 6120static void 6121try_queueing_ack_app (struct ietf_full_conn *conn, 6122 int was_missing, lsquic_time_t now) 6123{ 6124 lsquic_time_t srtt, ack_timeout; 6125 6126 if (conn->ifc_n_slack_akbl[PNS_APP] >= conn->ifc_max_retx_since_last_ack 6127 || ((conn->ifc_flags & IFC_ACK_HAD_MISS) 6128 && was_missing && conn->ifc_n_slack_akbl[PNS_APP] > 0) 6129 || many_in_and_will_write(conn)) 6130 { 6131 lsquic_alarmset_unset(&conn->ifc_alset, AL_ACK_APP); 6132 lsquic_send_ctl_sanity_check(&conn->ifc_send_ctl); 6133 conn->ifc_flags |= IFC_ACK_QUED_APP; 6134 LSQ_DEBUG("%s ACK queued: ackable: %u; all: %u; had_miss: %d; " 6135 "was_missing: %d", 6136 lsquic_pns2str[PNS_APP], conn->ifc_n_slack_akbl[PNS_APP], 6137 conn->ifc_n_slack_all, 6138 !!(conn->ifc_flags & IFC_ACK_HAD_MISS), was_missing); 6139 } 6140 else if (conn->ifc_n_slack_akbl[PNS_APP] > 0) 6141 { 6142 /* See https://github.com/quicwg/base-drafts/issues/3304 for more */ 6143 srtt = lsquic_rtt_stats_get_srtt(&conn->ifc_pub.rtt_stats); 6144 if (srtt) 6145 ack_timeout = MAX(1000, MIN(ACK_TIMEOUT, srtt / 4)); 6146 else 6147 ack_timeout = ACK_TIMEOUT; 6148 lsquic_alarmset_set(&conn->ifc_alset, AL_ACK_APP, 6149 now + ack_timeout); 6150 LSQ_DEBUG("%s ACK alarm set to %"PRIu64, lsquic_pns2str[PNS_APP], 6151 now + ack_timeout); 6152 } 6153} 6154 6155 6156static void 6157try_queueing_ack_init_or_hsk (struct ietf_full_conn *conn, 6158 enum packnum_space pns) 6159{ 6160 if (conn->ifc_n_slack_akbl[pns] > 0) 6161 { 6162 conn->ifc_flags |= IFC_ACK_QUED_INIT << pns; 6163 LSQ_DEBUG("%s ACK queued: ackable: %u", 6164 lsquic_pns2str[pns], conn->ifc_n_slack_akbl[pns]); 6165 } 6166} 6167 6168 6169static void 6170try_queueing_ack (struct ietf_full_conn *conn, enum packnum_space pns, 6171 int was_missing, lsquic_time_t now) 6172{ 6173 if (PNS_APP == pns) 6174 try_queueing_ack_app(conn, was_missing, now); 6175 else 6176 try_queueing_ack_init_or_hsk(conn, pns); 6177} 6178 6179 6180static int 6181maybe_queue_opp_ack (struct ietf_full_conn *conn) 6182{ 6183 if (/* If there is at least one ackable packet */ 6184 conn->ifc_n_slack_akbl[PNS_APP] > 0 6185 /* ...and there are things to write */ 6186 && (!TAILQ_EMPTY(&conn->ifc_pub.write_streams) || conn->ifc_send_flags) 6187 /* ...and writing is possible */ 6188 && write_is_possible(conn)) 6189 { 6190 lsquic_alarmset_unset(&conn->ifc_alset, AL_ACK_APP); 6191 lsquic_send_ctl_sanity_check(&conn->ifc_send_ctl); 6192 conn->ifc_flags |= IFC_ACK_QUED_APP; 6193 LSQ_DEBUG("%s ACK queued opportunistically", lsquic_pns2str[PNS_APP]); 6194 return 1; 6195 } 6196 else 6197 return 0; 6198} 6199 6200 6201static int 6202verify_retry_packet (struct ietf_full_conn *conn, 6203 const struct lsquic_packet_in *packet_in) 6204{ 6205 unsigned char *pseudo_packet; 6206 size_t out_len, ad_len; 6207 unsigned ret_ver; 6208 int verified; 6209 6210 if (1 + CUR_DCID(conn)->len + packet_in->pi_data_sz > 0x1000) 6211 { 6212 /* Cover the theoretical possibility that we cannot fit the pseudo- 6213 * packet and 16-byte decrypted output into 4 KB: 6214 */ 6215 LSQ_INFO("%s: Retry packet is too long: %hu bytes", __func__, 6216 packet_in->pi_data_sz); 6217 return -1; 6218 } 6219 6220 pseudo_packet = lsquic_mm_get_4k(conn->ifc_pub.mm); 6221 if (!pseudo_packet) 6222 { 6223 LSQ_INFO("%s: cannot allocate memory", __func__); 6224 return -1; 6225 } 6226 6227 pseudo_packet[0] = CUR_DCID(conn)->len; 6228 memcpy(pseudo_packet + 1, CUR_DCID(conn)->idbuf, CUR_DCID(conn)->len); 6229 memcpy(pseudo_packet + 1 + CUR_DCID(conn)->len, packet_in->pi_data, 6230 packet_in->pi_data_sz); 6231 6232 ret_ver = lsquic_version_2_retryver(conn->ifc_conn.cn_version); 6233 out_len = 0; 6234 ad_len = 1 + CUR_DCID(conn)->len + packet_in->pi_data_sz - 16; 6235 verified = 1 == EVP_AEAD_CTX_open( 6236 &conn->ifc_enpub->enp_retry_aead_ctx[ret_ver], 6237 pseudo_packet + ad_len, &out_len, out_len, 6238 lsquic_retry_nonce_buf[ret_ver], IETF_RETRY_NONCE_SZ, 6239 pseudo_packet + ad_len, 16, pseudo_packet, ad_len) 6240 && out_len == 0; 6241 6242 lsquic_mm_put_4k(conn->ifc_pub.mm, pseudo_packet); 6243 return verified ? 0 : -1; 6244} 6245 6246 6247static int 6248process_retry_packet (struct ietf_full_conn *conn, 6249 struct lsquic_packet_in *packet_in) 6250{ 6251 lsquic_cid_t scid; 6252 6253 if (conn->ifc_flags & (IFC_SERVER|IFC_RETRIED)) 6254 { 6255 /* [draft-ietf-quic-transport-24] Section 17.2.5: 6256 " After the client has received and processed an Initial or Retry 6257 " packet from the server, it MUST discard any subsequent Retry 6258 " packets that it receives. 6259 */ 6260 LSQ_DEBUG("ignore Retry packet"); 6261 return 0; 6262 } 6263 6264 if (CUR_DCID(conn)->len == packet_in->pi_scid_len 6265 && 0 == memcmp(CUR_DCID(conn)->idbuf, 6266 packet_in->pi_data + packet_in->pi_scid_off, 6267 packet_in->pi_scid_len)) 6268 { 6269 /* 6270 * [draft-ietf-quic-transport-24] Section 17.2.5: 6271 " A client MUST discard a Retry packet that contains a Source 6272 " Connection ID field that is identical to the Destination 6273 " Connection ID field of its Initial packet. 6274 */ 6275 LSQ_DEBUG("server provided same SCID as ODCID: discard packet"); 6276 return 0; 6277 } 6278 6279 if (0 != verify_retry_packet(conn, packet_in)) 6280 { 6281 LSQ_DEBUG("cannot verify retry packet: ignore it"); 6282 return 0; 6283 } 6284 6285 if (0 != lsquic_send_ctl_retry(&conn->ifc_send_ctl, 6286 packet_in->pi_data + packet_in->pi_token, 6287 packet_in->pi_token_size)) 6288 return -1; 6289 6290 lsquic_scid_from_packet_in(packet_in, &scid); 6291 if (0 != conn->ifc_conn.cn_esf.i->esfi_reset_dcid( 6292 conn->ifc_conn.cn_enc_session, CUR_DCID(conn), &scid)) 6293 return -1; 6294 6295 *CUR_DCID(conn) = scid; 6296 lsquic_alarmset_unset(&conn->ifc_alset, AL_RETX_INIT); 6297 lsquic_alarmset_unset(&conn->ifc_alset, AL_RETX_HSK); 6298 lsquic_alarmset_unset(&conn->ifc_alset, AL_RETX_APP); 6299 6300 LSQ_INFO("Received a retry packet. Will retry."); 6301 conn->ifc_flags |= IFC_RETRIED; 6302 return 0; 6303} 6304 6305 6306static int 6307is_stateless_reset (struct ietf_full_conn *conn, 6308 const struct lsquic_packet_in *packet_in) 6309{ 6310 struct lsquic_hash_elem *el; 6311 6312 if (packet_in->pi_data_sz < IQUIC_MIN_SRST_SIZE) 6313 return 0; 6314 6315 el = lsquic_hash_find(conn->ifc_enpub->enp_srst_hash, 6316 packet_in->pi_data + packet_in->pi_data_sz - IQUIC_SRESET_TOKEN_SZ, 6317 IQUIC_SRESET_TOKEN_SZ); 6318 if (!el) 6319 return 0; 6320 6321#ifndef NDEBUG 6322 const struct lsquic_conn *reset_lconn; 6323 reset_lconn = lsquic_hashelem_getdata(el); 6324 assert(reset_lconn == &conn->ifc_conn); 6325#endif 6326 return 1; 6327} 6328 6329 6330/* 6331 * Sets the new current SCID if the DCID in the incoming packet: 6332 * (1) was issued by this endpoint and 6333 * (2) has not been used before. 6334 */ 6335static int 6336on_dcid_change (struct ietf_full_conn *conn, const lsquic_cid_t *dcid_in) 6337{ 6338 struct lsquic_conn *const lconn = &conn->ifc_conn; /* Shorthand */ 6339 struct conn_cid_elem *cce; 6340 6341 LSQ_DEBUG("peer switched its DCID, attempt to switch own SCID"); 6342 6343 for (cce = lconn->cn_cces; cce < END_OF_CCES(lconn); ++cce) 6344 if (cce - lconn->cn_cces != lconn->cn_cur_cce_idx 6345 && (lconn->cn_cces_mask & (1 << (cce - lconn->cn_cces))) 6346 && LSQUIC_CIDS_EQ(&cce->cce_cid, dcid_in)) 6347 break; 6348 6349 if (cce >= END_OF_CCES(lconn)) 6350 { 6351 ABORT_WARN("DCID not found"); 6352 return -1; 6353 } 6354 6355 if (cce->cce_flags & CCE_USED) 6356 { 6357 LSQ_DEBUGC("Current CID: %"CID_FMT, CID_BITS(CN_SCID(lconn))); 6358 LSQ_DEBUGC("DCID %"CID_FMT" has been used, not switching", 6359 CID_BITS(dcid_in)); 6360 return 0; 6361 } 6362 6363 cce->cce_flags |= CCE_USED; 6364 lconn->cn_cur_cce_idx = cce - lconn->cn_cces; 6365 LSQ_DEBUGC("%s: set SCID to %"CID_FMT, __func__, CID_BITS(CN_SCID(lconn))); 6366 LOG_SCIDS(conn); 6367 6368 /* Reset spin bit, see [draft-ietf-quic-transport-20] Section 17.3.1 */ 6369 maybe_enable_spin(conn); 6370 6371 return 0; 6372} 6373 6374 6375static void 6376ignore_init (struct ietf_full_conn *conn) 6377{ 6378 LSQ_DEBUG("henceforth, no Initial packets shall be sent or received"); 6379 conn->ifc_flags |= IFC_IGNORE_INIT; 6380 conn->ifc_flags &= ~(IFC_ACK_QUED_INIT << PNS_INIT); 6381 lsquic_send_ctl_empty_pns(&conn->ifc_send_ctl, PNS_INIT); 6382 lsquic_rechist_cleanup(&conn->ifc_rechist[PNS_INIT]); 6383 if (!(conn->ifc_flags & IFC_SERVER)) 6384 { 6385 if (conn->ifc_u.cli.crypto_streams[ENC_LEV_CLEAR]) 6386 { 6387 lsquic_stream_destroy(conn->ifc_u.cli.crypto_streams[ENC_LEV_CLEAR]); 6388 conn->ifc_u.cli.crypto_streams[ENC_LEV_CLEAR] = NULL; 6389 } 6390 conn->ifc_conn.cn_if = ietf_full_conn_iface_ptr; 6391 } 6392} 6393 6394 6395static void 6396ignore_hsk (struct ietf_full_conn *conn) 6397{ 6398 LSQ_DEBUG("henceforth, no Handshake packets shall be sent or received"); 6399 conn->ifc_flags |= IFC_IGNORE_HSK; 6400 conn->ifc_flags &= ~(IFC_ACK_QUED_INIT << PNS_HSK); 6401 lsquic_send_ctl_empty_pns(&conn->ifc_send_ctl, PNS_HSK); 6402 lsquic_rechist_cleanup(&conn->ifc_rechist[PNS_HSK]); 6403 if (!(conn->ifc_flags & IFC_SERVER)) 6404 if (conn->ifc_u.cli.crypto_streams[ENC_LEV_INIT]) 6405 { 6406 lsquic_stream_destroy(conn->ifc_u.cli.crypto_streams[ENC_LEV_INIT]); 6407 conn->ifc_u.cli.crypto_streams[ENC_LEV_INIT] = NULL; 6408 } 6409} 6410 6411 6412static void 6413record_dcid (struct ietf_full_conn *conn, 6414 const struct lsquic_packet_in *packet_in) 6415{ 6416 unsigned orig_cid_len; 6417 6418 orig_cid_len = CUR_DCID(conn)->len; 6419 conn->ifc_flags |= IFC_DCID_SET; 6420 lsquic_scid_from_packet_in(packet_in, CUR_DCID(conn)); 6421 LSQ_DEBUGC("set DCID to %"CID_FMT, CID_BITS(CUR_DCID(conn))); 6422 lsquic_send_ctl_cidlen_change(&conn->ifc_send_ctl, orig_cid_len, 6423 CUR_DCID(conn)->len); 6424} 6425 6426 6427static int 6428process_regular_packet (struct ietf_full_conn *conn, 6429 struct lsquic_packet_in *packet_in) 6430{ 6431 enum packnum_space pns; 6432 enum received_st st; 6433 enum dec_packin dec_packin; 6434 enum quic_ft_bit frame_types; 6435 int was_missing, packno_increased; 6436 unsigned char saved_path_id; 6437 6438 if (HETY_RETRY == packet_in->pi_header_type) 6439 return process_retry_packet(conn, packet_in); 6440 6441 pns = lsquic_hety2pns[ packet_in->pi_header_type ]; 6442 if (pns == PNS_INIT) 6443 conn->ifc_conn.cn_esf.i->esfi_set_iscid(conn->ifc_conn.cn_enc_session, 6444 packet_in); 6445 if ((pns == PNS_INIT && (conn->ifc_flags & IFC_IGNORE_INIT)) 6446 || (pns == PNS_HSK && (conn->ifc_flags & IFC_IGNORE_HSK))) 6447 { 6448 /* Don't bother decrypting */ 6449 LSQ_DEBUG("ignore %s packet", 6450 pns == PNS_INIT ? "Initial" : "Handshake"); 6451 return 0; 6452 } 6453 6454 /* If a client receives packets from an unknown server address, the client 6455 * MUST discard these packets. 6456 * [draft-ietf-quic-transport-20], Section 9 6457 */ 6458 if (packet_in->pi_path_id != conn->ifc_cur_path_id 6459 && 0 == (conn->ifc_flags & IFC_SERVER) 6460 && !(packet_in->pi_path_id == conn->ifc_mig_path_id 6461 && migra_is_on(conn, conn->ifc_mig_path_id))) 6462 { 6463 /* The "known server address" is recorded in the current path. */ 6464 switch ((NP_IS_IPv6(CUR_NPATH(conn)) << 1) | 6465 NP_IS_IPv6(&conn->ifc_paths[packet_in->pi_path_id].cop_path)) 6466 { 6467 case (1 << 1) | 1: /* IPv6 */ 6468 if (lsquic_sockaddr_eq(NP_PEER_SA(CUR_NPATH(conn)), NP_PEER_SA( 6469 &conn->ifc_paths[packet_in->pi_path_id].cop_path))) 6470 goto known_peer_addr; 6471 break; 6472 case (0 << 1) | 0: /* IPv4 */ 6473 if (lsquic_sockaddr_eq(NP_PEER_SA(CUR_NPATH(conn)), NP_PEER_SA( 6474 &conn->ifc_paths[packet_in->pi_path_id].cop_path))) 6475 goto known_peer_addr; 6476 break; 6477 } 6478 LSQ_DEBUG("ignore packet from unknown server address"); 6479 return 0; 6480 } 6481 known_peer_addr: 6482 6483 /* The packet is decrypted before receive history is updated. This is 6484 * done to make sure that a bad packet won't occupy a slot in receive 6485 * history and subsequent good packet won't be marked as a duplicate. 6486 */ 6487 if (0 == (packet_in->pi_flags & PI_DECRYPTED)) 6488 { 6489 dec_packin = conn->ifc_conn.cn_esf_c->esf_decrypt_packet( 6490 conn->ifc_conn.cn_enc_session, conn->ifc_enpub, 6491 &conn->ifc_conn, packet_in); 6492 switch (dec_packin) 6493 { 6494 case DECPI_BADCRYPT: 6495 case DECPI_TOO_SHORT: 6496 if (conn->ifc_enpub->enp_settings.es_honor_prst 6497 /* In server mode, even if we do support stateless reset packets, 6498 * they are handled in lsquic_engine.c. No need to have this 6499 * logic here. 6500 */ 6501 && !(conn->ifc_flags & IFC_SERVER) 6502 && is_stateless_reset(conn, packet_in)) 6503 { 6504 LSQ_INFO("received stateless reset packet: aborting connection"); 6505 conn->ifc_flags |= IFC_GOT_PRST; 6506 return -1; 6507 } 6508 else if (dec_packin == DECPI_BADCRYPT) 6509 { 6510 LSQ_INFO("could not decrypt packet (type %s)", 6511 lsquic_hety2str[packet_in->pi_header_type]); 6512 return 0; 6513 } 6514 else 6515 { 6516 LSQ_INFO("packet is too short to be decrypted"); 6517 return 0; 6518 } 6519 case DECPI_NOT_YET: 6520 return 0; 6521 case DECPI_NOMEM: 6522 return 0; 6523 case DECPI_VIOLATION: 6524 ABORT_QUIETLY(0, TEC_PROTOCOL_VIOLATION, 6525 "decrypter reports protocol violation"); 6526 return -1; 6527 case DECPI_OK: 6528 /* Receiving any other type of packet precludes subsequent retries. 6529 * We only set it if decryption is successful. 6530 */ 6531 conn->ifc_flags |= IFC_RETRIED; 6532 break; 6533 } 6534 } 6535 6536 EV_LOG_PACKET_IN(LSQUIC_LOG_CONN_ID, packet_in); 6537 6538 packno_increased = packet_in->pi_packno 6539 > lsquic_rechist_largest_packno(&conn->ifc_rechist[pns]); 6540 st = lsquic_rechist_received(&conn->ifc_rechist[pns], packet_in->pi_packno, 6541 packet_in->pi_received); 6542 switch (st) { 6543 case REC_ST_OK: 6544 if (!(conn->ifc_flags & (IFC_SERVER|IFC_DCID_SET)) 6545 && (packet_in->pi_scid_len)) 6546 record_dcid(conn, packet_in); 6547 saved_path_id = conn->ifc_cur_path_id; 6548 parse_regular_packet(conn, packet_in); 6549 if (saved_path_id == conn->ifc_cur_path_id) 6550 { 6551 if (conn->ifc_cur_path_id != packet_in->pi_path_id) 6552 on_new_or_unconfirmed_path(conn, packet_in); 6553 else if (!LSQUIC_CIDS_EQ(CN_SCID(&conn->ifc_conn), 6554 &packet_in->pi_dcid)) 6555 { 6556 if (0 != on_dcid_change(conn, &packet_in->pi_dcid)) 6557 return -1; 6558 } 6559 } 6560 if (lsquic_packet_in_non_probing(packet_in) 6561 && packet_in->pi_packno > conn->ifc_max_non_probing) 6562 conn->ifc_max_non_probing = packet_in->pi_packno; 6563 if (0 == (conn->ifc_flags & (IFC_ACK_QUED_INIT << pns))) 6564 { 6565 frame_types = packet_in->pi_frame_types; 6566 if (frame_types & IQUIC_FRAME_ACKABLE_MASK) 6567 { 6568 was_missing = packet_in->pi_packno != 6569 lsquic_rechist_largest_packno(&conn->ifc_rechist[pns]); 6570 ++conn->ifc_n_slack_akbl[pns]; 6571 } 6572 else 6573 was_missing = 0; 6574 conn->ifc_n_slack_all += PNS_APP == pns; 6575 try_queueing_ack(conn, pns, was_missing, packet_in->pi_received); 6576 } 6577 conn->ifc_incoming_ecn <<= 1; 6578 conn->ifc_incoming_ecn |= 6579 lsquic_packet_in_ecn(packet_in) != ECN_NOT_ECT; 6580 ++conn->ifc_ecn_counts_in[pns][ lsquic_packet_in_ecn(packet_in) ]; 6581 if (packno_increased && PNS_APP == pns && (conn->ifc_flags & IFC_SPIN)) 6582 { 6583 if (conn->ifc_flags & IFC_SERVER) 6584 conn->ifc_spin_bit = lsquic_packet_in_spin_bit(packet_in); 6585 else 6586 conn->ifc_spin_bit = !lsquic_packet_in_spin_bit(packet_in); 6587 } 6588 conn->ifc_pub.bytes_in += packet_in->pi_data_sz; 6589 if ((conn->ifc_mflags & MF_VALIDATE_PATH) && 6590 (packet_in->pi_header_type == HETY_NOT_SET 6591 || packet_in->pi_header_type == HETY_HANDSHAKE)) 6592 { 6593 conn->ifc_mflags &= ~MF_VALIDATE_PATH; 6594 lsquic_send_ctl_path_validated(&conn->ifc_send_ctl); 6595 } 6596 return 0; 6597 case REC_ST_DUP: 6598 LSQ_INFO("packet %"PRIu64" is a duplicate", packet_in->pi_packno); 6599 return 0; 6600 default: 6601 assert(0); 6602 /* Fall through */ 6603 case REC_ST_ERR: 6604 LSQ_INFO("error processing packet %"PRIu64, packet_in->pi_packno); 6605 return -1; 6606 } 6607} 6608 6609 6610static int 6611verneg_ok (const struct ietf_full_conn *conn) 6612{ 6613 enum lsquic_version ver; 6614 6615 ver = highest_bit_set(conn->ifc_u.cli.ifcli_ver_neg.vn_supp); 6616 return (1 << ver) & LSQUIC_IETF_DRAFT_VERSIONS; 6617} 6618 6619 6620/* This function is used by the client when version negotiation is not yet 6621 * complete. 6622 */ 6623static int 6624process_incoming_packet_verneg (struct ietf_full_conn *conn, 6625 struct lsquic_packet_in *packet_in) 6626{ 6627 int s; 6628 struct ver_iter vi; 6629 lsquic_ver_tag_t ver_tag; 6630 enum lsquic_version version; 6631 unsigned versions; 6632 6633 if (lsquic_packet_in_is_verneg(packet_in)) 6634 { 6635 LSQ_DEBUG("Processing version-negotiation packet"); 6636 6637 if (conn->ifc_u.cli.ifcli_ver_neg.vn_state != VN_START) 6638 { 6639 LSQ_DEBUG("ignore a likely duplicate version negotiation packet"); 6640 return 0; 6641 } 6642 6643 if (!(LSQUIC_CIDS_EQ(&conn->ifc_conn.cn_cid, &packet_in->pi_dcid) 6644 && CUR_DCID(conn)->len == packet_in->pi_scid_len 6645 && 0 == memcmp(CUR_DCID(conn)->idbuf, packet_in->pi_data 6646 + packet_in->pi_scid_off, packet_in->pi_scid_len))) 6647 { 6648 LSQ_DEBUG("SCID and DCID in verneg packet don't match what we " 6649 "sent: ignore"); 6650 return 0; 6651 } 6652 6653 versions = 0; 6654 for (s = lsquic_packet_in_ver_first(packet_in, &vi, &ver_tag); s; 6655 s = lsquic_packet_in_ver_next(&vi, &ver_tag)) 6656 { 6657 version = lsquic_tag2ver(ver_tag); 6658 if (version < N_LSQVER) 6659 { 6660 versions |= 1 << version; 6661 LSQ_DEBUG("server supports version %s", lsquic_ver2str[version]); 6662 EV_LOG_VER_NEG(LSQUIC_LOG_CONN_ID, 6663 "supports", lsquic_ver2str[version]); 6664 } 6665 } 6666 6667 /* [draft-ietf-quic-transport-28] Section 6.2: 6668 " A client MUST discard a Version Negotiation packet that lists the 6669 " QUIC version selected by the client. 6670 */ 6671 if (versions & (1 << conn->ifc_u.cli.ifcli_ver_neg.vn_ver)) 6672 { 6673 LSQ_DEBUG("server replied with version we sent, %s, ignore", 6674 lsquic_ver2str[conn->ifc_u.cli.ifcli_ver_neg.vn_ver]); 6675 return 0; 6676 } 6677 6678 /* [draft-ietf-quic-transport-28] Section 6.2: 6679 " A client that supports only this version of QUIC MUST abandon the 6680 " current connection attempt if it receives a Version Negotiation 6681 " packet [...] 6682 */ 6683 if (!verneg_ok(conn)) 6684 { 6685 ABORT_ERROR("version negotiation not permitted in this version " 6686 "of QUIC"); 6687 return -1; 6688 } 6689 6690 versions &= conn->ifc_u.cli.ifcli_ver_neg.vn_supp; 6691 if (0 == versions) 6692 { 6693 ABORT_ERROR("client does not support any of the server-specified " 6694 "versions"); 6695 return -1; 6696 } 6697 6698 set_versions(conn, versions, NULL); 6699 conn->ifc_u.cli.ifcli_ver_neg.vn_state = VN_IN_PROGRESS; 6700 lsquic_send_ctl_expire_all(&conn->ifc_send_ctl); 6701 return 0; 6702 } 6703 6704 assert(conn->ifc_u.cli.ifcli_ver_neg.vn_tag); 6705 assert(conn->ifc_u.cli.ifcli_ver_neg.vn_state != VN_END); 6706 conn->ifc_u.cli.ifcli_ver_neg.vn_state = VN_END; 6707 conn->ifc_u.cli.ifcli_ver_neg.vn_tag = NULL; 6708 conn->ifc_conn.cn_version = conn->ifc_u.cli.ifcli_ver_neg.vn_ver; 6709 conn->ifc_conn.cn_flags |= LSCONN_VER_SET; 6710 LSQ_DEBUG("end of version negotiation: agreed upon %s", 6711 lsquic_ver2str[conn->ifc_u.cli.ifcli_ver_neg.vn_ver]); 6712 EV_LOG_VER_NEG(LSQUIC_LOG_CONN_ID, 6713 "agreed", lsquic_ver2str[conn->ifc_u.cli.ifcli_ver_neg.vn_ver]); 6714 conn->ifc_process_incoming_packet = process_incoming_packet_fast; 6715 6716 return process_regular_packet(conn, packet_in); 6717} 6718 6719 6720/* This function is used after version negotiation is completed */ 6721static int 6722process_incoming_packet_fast (struct ietf_full_conn *conn, 6723 struct lsquic_packet_in *packet_in) 6724{ 6725 return process_regular_packet(conn, packet_in); 6726} 6727 6728 6729static void 6730set_earliest_idle_alarm (struct ietf_full_conn *conn, lsquic_time_t idle_conn_to) 6731{ 6732 lsquic_time_t exp; 6733 6734 if (conn->ifc_pub.last_prog 6735 && (assert(conn->ifc_mflags & MF_NOPROG_TIMEOUT), 6736 exp = conn->ifc_pub.last_prog + conn->ifc_enpub->enp_noprog_timeout, 6737 exp < idle_conn_to)) 6738 idle_conn_to = exp; 6739 if (idle_conn_to) 6740 lsquic_alarmset_set(&conn->ifc_alset, AL_IDLE, idle_conn_to); 6741} 6742 6743 6744static void 6745ietf_full_conn_ci_packet_in (struct lsquic_conn *lconn, 6746 struct lsquic_packet_in *packet_in) 6747{ 6748 struct ietf_full_conn *conn = (struct ietf_full_conn *) lconn; 6749 6750 set_earliest_idle_alarm(conn, conn->ifc_idle_to 6751 ? packet_in->pi_received + conn->ifc_idle_to : 0); 6752 if (0 == (conn->ifc_flags & IFC_IMMEDIATE_CLOSE_FLAGS)) 6753 if (0 != conn->ifc_process_incoming_packet(conn, packet_in)) 6754 conn->ifc_flags |= IFC_ERROR; 6755} 6756 6757 6758static void 6759ietf_full_conn_ci_packet_not_sent (struct lsquic_conn *lconn, 6760 struct lsquic_packet_out *packet_out) 6761{ 6762#ifndef NDEBUG 6763 if (packet_out->po_flags & PO_ENCRYPTED) 6764 assert(packet_out->po_lflags & POL_HEADER_PROT); 6765#endif 6766 struct ietf_full_conn *conn = (struct ietf_full_conn *) lconn; 6767 lsquic_send_ctl_delayed_one(&conn->ifc_send_ctl, packet_out); 6768} 6769 6770 6771static void 6772ietf_full_conn_ci_packet_too_large (struct lsquic_conn *lconn, 6773 struct lsquic_packet_out *packet_out) 6774{ 6775 struct ietf_full_conn *conn = (struct ietf_full_conn *) lconn; 6776 6777#ifndef NDEBUG 6778 assert(packet_out->po_lflags & POL_HEADER_PROT); 6779#endif 6780 6781 lsquic_senhist_add(&conn->ifc_send_ctl.sc_senhist, packet_out->po_packno); 6782 lsquic_send_ctl_sanity_check(&conn->ifc_send_ctl); 6783 if (packet_out->po_flags & PO_MTU_PROBE) 6784 { 6785 LSQ_DEBUG("%zu-byte MTU probe in packet %"PRIu64" is too large", 6786 lsquic_packet_out_sent_sz(&conn->ifc_conn, packet_out), 6787 packet_out->po_packno); 6788 mtu_probe_too_large(conn, packet_out); 6789 } 6790 else 6791 ABORT_WARN("non-MTU probe %zu-byte packet %"PRIu64" is too large", 6792 lsquic_packet_out_sent_sz(&conn->ifc_conn, packet_out), 6793 packet_out->po_packno); 6794 6795 lsquic_packet_out_destroy(packet_out, conn->ifc_enpub, 6796 packet_out->po_path->np_peer_ctx); 6797} 6798 6799 6800/* Calling of ignore_init() must be delayed until all batched packets have 6801 * been returned by the engine. 6802 */ 6803static void 6804pre_hsk_packet_sent_or_delayed (struct ietf_full_conn *conn, 6805 const struct lsquic_packet_out *packet_out) 6806{ 6807#ifndef NDEBUG 6808 if (packet_out->po_flags & PO_ENCRYPTED) 6809 assert(packet_out->po_lflags & POL_HEADER_PROT); 6810#endif 6811 /* Once IFC_IGNORE_INIT is set, the pre-hsk wrapper is removed: */ 6812 assert(!(conn->ifc_flags & IFC_IGNORE_INIT)); 6813 --conn->ifc_u.cli.ifcli_packets_out; 6814 if (PNS_HSK == lsquic_packet_out_pns(packet_out)) 6815 conn->ifc_u.cli.ifcli_flags |= IFCLI_HSK_SENT_OR_DEL; 6816 if (0 == conn->ifc_u.cli.ifcli_packets_out 6817 && (conn->ifc_u.cli.ifcli_flags & IFCLI_HSK_SENT_OR_DEL)) 6818 ignore_init(conn); 6819} 6820 6821 6822static void 6823ietf_full_conn_ci_packet_not_sent_pre_hsk (struct lsquic_conn *lconn, 6824 struct lsquic_packet_out *packet_out) 6825{ 6826 struct ietf_full_conn *conn = (struct ietf_full_conn *) lconn; 6827 ietf_full_conn_ci_packet_not_sent(lconn, packet_out); 6828 pre_hsk_packet_sent_or_delayed(conn, packet_out); 6829} 6830 6831 6832static void 6833ietf_full_conn_ci_packet_sent (struct lsquic_conn *lconn, 6834 struct lsquic_packet_out *packet_out) 6835{ 6836 struct ietf_full_conn *const conn = (struct ietf_full_conn *) lconn; 6837 int s; 6838 6839 if (packet_out->po_frame_types & IQUIC_FRAME_RETX_MASK) 6840 conn->ifc_n_cons_unretx = 0; 6841 else 6842 ++conn->ifc_n_cons_unretx; 6843 s = lsquic_send_ctl_sent_packet(&conn->ifc_send_ctl, packet_out); 6844 if (s != 0) 6845 ABORT_ERROR("sent packet failed: %s", strerror(errno)); 6846 ++conn->ifc_ecn_counts_out[ lsquic_packet_out_pns(packet_out) ] 6847 [ lsquic_packet_out_ecn(packet_out) ]; 6848 /* Set blocked keep-alive for a [1,8] seconds */ 6849 if (packet_out->po_frame_types 6850 & (QUIC_FTBIT_BLOCKED|QUIC_FTBIT_STREAM_BLOCKED)) 6851 lsquic_alarmset_set(&conn->ifc_alset, AL_BLOCKED_KA, 6852 packet_out->po_sent + (1 + (7 & lsquic_crand_get_nybble( 6853 conn->ifc_enpub->enp_crand))) * 1000000); 6854 conn->ifc_pub.bytes_out += lsquic_packet_out_sent_sz(&conn->ifc_conn, 6855 packet_out); 6856} 6857 6858 6859static void 6860ietf_full_conn_ci_packet_sent_pre_hsk (struct lsquic_conn *lconn, 6861 struct lsquic_packet_out *packet_out) 6862{ 6863 struct ietf_full_conn *const conn = (struct ietf_full_conn *) lconn; 6864 ietf_full_conn_ci_packet_sent(lconn, packet_out); 6865 pre_hsk_packet_sent_or_delayed(conn, packet_out); 6866} 6867 6868 6869static void (*const send_funcs[N_SEND])( 6870 struct ietf_full_conn *, lsquic_time_t) = 6871{ 6872 [SEND_NEW_CID] = generate_new_cid_frames, 6873 [SEND_RETIRE_CID] = generate_retire_cid_frames, 6874 [SEND_STREAMS_BLOCKED_UNI] = generate_streams_blocked_uni_frame, 6875 [SEND_STREAMS_BLOCKED_BIDI] = generate_streams_blocked_bidi_frame, 6876 [SEND_MAX_STREAMS_UNI] = generate_max_streams_uni_frame, 6877 [SEND_MAX_STREAMS_BIDI] = generate_max_streams_bidi_frame, 6878 [SEND_STOP_SENDING] = generate_stop_sending_frames, 6879 [SEND_PATH_CHAL_PATH_0] = generate_path_chal_0, 6880 [SEND_PATH_CHAL_PATH_1] = generate_path_chal_1, 6881 [SEND_PATH_CHAL_PATH_2] = generate_path_chal_2, 6882 [SEND_PATH_CHAL_PATH_3] = generate_path_chal_3, 6883 [SEND_PATH_RESP_PATH_0] = generate_path_resp_0, 6884 [SEND_PATH_RESP_PATH_1] = generate_path_resp_1, 6885 [SEND_PATH_RESP_PATH_2] = generate_path_resp_2, 6886 [SEND_PATH_RESP_PATH_3] = generate_path_resp_3, 6887 [SEND_PING] = generate_ping_frame, 6888 [SEND_HANDSHAKE_DONE] = generate_handshake_done_frame, 6889 [SEND_ACK_FREQUENCY] = generate_ack_frequency_frame, 6890}; 6891 6892 6893/* List bits that have corresponding entries in send_funcs */ 6894#define SEND_WITH_FUNCS (SF_SEND_NEW_CID|SF_SEND_RETIRE_CID\ 6895 |SF_SEND_STREAMS_BLOCKED_UNI|SF_SEND_STREAMS_BLOCKED_BIDI\ 6896 |SF_SEND_MAX_STREAMS_UNI|SF_SEND_MAX_STREAMS_BIDI\ 6897 |SF_SEND_PATH_CHAL_PATH_0|SF_SEND_PATH_CHAL_PATH_1\ 6898 |SF_SEND_PATH_CHAL_PATH_2|SF_SEND_PATH_CHAL_PATH_3\ 6899 |SF_SEND_PATH_RESP_PATH_0|SF_SEND_PATH_RESP_PATH_1\ 6900 |SF_SEND_PATH_RESP_PATH_2|SF_SEND_PATH_RESP_PATH_3\ 6901 |SF_SEND_PING|SF_SEND_HANDSHAKE_DONE\ 6902 |SF_SEND_ACK_FREQUENCY\ 6903 |SF_SEND_STOP_SENDING) 6904 6905 6906/* This should be called before lsquic_alarmset_ring_expired() */ 6907static void 6908maybe_set_noprogress_alarm (struct ietf_full_conn *conn, lsquic_time_t now) 6909{ 6910 lsquic_time_t exp; 6911 6912 if (conn->ifc_mflags & MF_NOPROG_TIMEOUT) 6913 { 6914 if (conn->ifc_pub.last_tick) 6915 { 6916 exp = conn->ifc_pub.last_prog + conn->ifc_enpub->enp_noprog_timeout; 6917 if (!lsquic_alarmset_is_set(&conn->ifc_alset, AL_IDLE) 6918 || exp < conn->ifc_alset.as_expiry[AL_IDLE]) 6919 lsquic_alarmset_set(&conn->ifc_alset, AL_IDLE, exp); 6920 conn->ifc_pub.last_tick = now; 6921 } 6922 else 6923 { 6924 conn->ifc_pub.last_tick = now; 6925 conn->ifc_pub.last_prog = now; 6926 } 6927 } 6928} 6929 6930 6931static void 6932check_or_schedule_mtu_probe (struct ietf_full_conn *conn, lsquic_time_t now) 6933{ 6934 struct conn_path *const cpath = CUR_CPATH(conn); 6935 struct dplpmtud_state *const ds = &cpath->cop_dplpmtud; 6936 struct lsquic_packet_out *packet_out; 6937 unsigned short saved_packet_sz, avail, mtu_ceiling, net_header_sz, probe_sz; 6938 int sz; 6939 6940 if (ds->ds_flags & DS_PROBE_SENT) 6941 { 6942 assert(ds->ds_probe_sent + conn->ifc_enpub->enp_mtu_probe_timer < now); 6943 LSQ_DEBUG("MTU probe of %hu bytes lost", ds->ds_probed_size); 6944 ds->ds_flags &= ~DS_PROBE_SENT; 6945 conn->ifc_mflags |= MF_CHECK_MTU_PROBE; 6946 if (ds->ds_probe_count >= 3) 6947 { 6948 LSQ_DEBUG("MTU probe of %hu bytes lost after %hhu tries", 6949 ds->ds_probed_size, ds->ds_probe_count); 6950 ds->ds_failed_size = ds->ds_probed_size; 6951 ds->ds_probe_count = 0; 6952 } 6953 } 6954 6955 assert(0 == ds->ds_probe_sent 6956 || ds->ds_probe_sent + conn->ifc_enpub->enp_mtu_probe_timer < now); 6957 6958 if (!(conn->ifc_conn.cn_flags & LSCONN_HANDSHAKE_DONE) 6959 || lsquic_senhist_largest(&conn->ifc_send_ctl.sc_senhist) < 30 6960 || lsquic_send_ctl_in_recovery(&conn->ifc_send_ctl) 6961 || !lsquic_send_ctl_can_send_probe(&conn->ifc_send_ctl, 6962 &cpath->cop_path)) 6963 { 6964 return; 6965 } 6966 6967 net_header_sz = TRANSPORT_OVERHEAD(NP_IS_IPv6(&cpath->cop_path)); 6968 if (ds->ds_failed_size) 6969 mtu_ceiling = ds->ds_failed_size; /* Don't subtract net_header_sz */ 6970 else if (conn->ifc_settings->es_max_plpmtu) 6971 mtu_ceiling = conn->ifc_settings->es_max_plpmtu - net_header_sz; 6972 else 6973 mtu_ceiling = 1500 - net_header_sz; 6974 6975 if (conn->ifc_max_udp_payload < mtu_ceiling) 6976 { 6977 LSQ_DEBUG("cap MTU ceiling to peer's max_udp_payload_size TP of %hu " 6978 "bytes", conn->ifc_max_udp_payload); 6979 mtu_ceiling = conn->ifc_max_udp_payload; 6980 } 6981 6982 if (cpath->cop_path.np_pack_size >= mtu_ceiling 6983 || (float) cpath->cop_path.np_pack_size / (float) mtu_ceiling >= 0.99) 6984 { 6985 LSQ_DEBUG("stop MTU probing on path %hhu having achieved about " 6986 "%.1f%% efficiency (detected MTU: %hu; failed MTU: %hu)", 6987 cpath->cop_path.np_path_id, 6988 100. * (float) cpath->cop_path.np_pack_size / (float) mtu_ceiling, 6989 cpath->cop_path.np_pack_size, ds->ds_failed_size); 6990 conn->ifc_mflags &= ~MF_CHECK_MTU_PROBE; 6991 return; 6992 } 6993 6994 LSQ_DEBUG("MTU ratio: %hu / %hu = %.4f", 6995 cpath->cop_path.np_pack_size, mtu_ceiling, 6996 (float) cpath->cop_path.np_pack_size / (float) mtu_ceiling); 6997 6998 if (!ds->ds_failed_size && mtu_ceiling < 1500) 6999 /* Try the largest ethernet MTU immediately */ 7000 probe_sz = mtu_ceiling; 7001 else if (cpath->cop_path.np_pack_size * 2 >= mtu_ceiling) 7002 /* Pick half-way point */ 7003 probe_sz = (mtu_ceiling + cpath->cop_path.np_pack_size) / 2; 7004 else 7005 probe_sz = cpath->cop_path.np_pack_size * 2; 7006 7007 /* XXX Changing np_pack_size is action at a distance */ 7008 saved_packet_sz = cpath->cop_path.np_pack_size; 7009 cpath->cop_path.np_pack_size = probe_sz; 7010 packet_out = lsquic_send_ctl_new_packet_out(&conn->ifc_send_ctl, 7011 0, PNS_APP, CUR_NPATH(conn)); 7012 if (!packet_out) 7013 goto restore_packet_size; 7014 sz = conn->ifc_conn.cn_pf->pf_gen_ping_frame( 7015 packet_out->po_data + packet_out->po_data_sz, 7016 lsquic_packet_out_avail(packet_out)); 7017 if (sz < 0) { 7018 ABORT_ERROR("gen_ping_frame failed"); 7019 goto restore_packet_size; 7020 } 7021 /* We don't record frame records for MTU probes as they are never 7022 * resized, only discarded. 7023 */ 7024 lsquic_send_ctl_incr_pack_sz(&conn->ifc_send_ctl, packet_out, sz); 7025 packet_out->po_frame_types |= 1 << QUIC_FRAME_PING; 7026 avail = lsquic_packet_out_avail(packet_out); 7027 if (avail) 7028 { 7029 memset(packet_out->po_data + packet_out->po_data_sz, 0, avail); 7030 lsquic_send_ctl_incr_pack_sz(&conn->ifc_send_ctl, packet_out, avail); 7031 packet_out->po_frame_types |= 1 << QUIC_FRAME_PADDING; 7032 } 7033 packet_out->po_flags |= PO_MTU_PROBE; 7034 lsquic_send_ctl_scheduled_one(&conn->ifc_send_ctl, packet_out); 7035 LSQ_DEBUG("generated MTU probe of %hu bytes in packet %"PRIu64, 7036 cpath->cop_path.np_pack_size, packet_out->po_packno); 7037#ifndef NDEBUG 7038 ds->ds_probe_sent = now; 7039#endif 7040 ds->ds_probe_packno = packet_out->po_packno; 7041 ds->ds_probed_size = probe_sz; 7042 ds->ds_flags |= DS_PROBE_SENT; 7043 ++ds->ds_probe_count; 7044 conn->ifc_mflags &= ~MF_CHECK_MTU_PROBE; 7045 assert(!lsquic_alarmset_is_set(&conn->ifc_alset, AL_MTU_PROBE)); 7046 lsquic_alarmset_set(&conn->ifc_alset, AL_MTU_PROBE, 7047 now + conn->ifc_enpub->enp_mtu_probe_timer); 7048 restore_packet_size: 7049 cpath->cop_path.np_pack_size = saved_packet_sz; 7050} 7051 7052 7053static void 7054ietf_full_conn_ci_mtu_probe_acked (struct lsquic_conn *lconn, 7055 const struct lsquic_packet_out *packet_out) 7056{ 7057 struct ietf_full_conn *const conn = (struct ietf_full_conn *) lconn; 7058 struct conn_path *cpath; 7059 struct dplpmtud_state *ds; 7060 unsigned char path_id; 7061 7062 path_id = packet_out->po_path->np_path_id; 7063 cpath = &conn->ifc_paths[path_id]; 7064 ds = &cpath->cop_dplpmtud; 7065 if (ds->ds_probe_packno != packet_out->po_packno) 7066 { 7067 LSQ_DEBUG("Acked MTU probe packet %"PRIu64" on path %hhu, but it is " 7068 "old: discard", packet_out->po_packno, path_id); 7069 return; 7070 } 7071 ds->ds_flags &= ~DS_PROBE_SENT; 7072 ds->ds_probe_count = 0; 7073 7074 cpath->cop_path.np_pack_size = lsquic_packet_out_sent_sz(&conn->ifc_conn, 7075 packet_out); 7076 LSQ_INFO("update path %hhu MTU to %hu bytes", path_id, 7077 cpath->cop_path.np_pack_size); 7078 conn->ifc_mflags &= ~MF_CHECK_MTU_PROBE; 7079 lsquic_alarmset_set(&conn->ifc_alset, AL_MTU_PROBE, 7080 packet_out->po_sent + conn->ifc_enpub->enp_mtu_probe_timer); 7081 LSQ_DEBUG("set alarm to %"PRIu64" usec ", packet_out->po_sent + conn->ifc_enpub->enp_mtu_probe_timer); 7082} 7083 7084 7085static void 7086mtu_probe_too_large (struct ietf_full_conn *conn, 7087 const struct lsquic_packet_out *packet_out) 7088{ 7089 struct conn_path *cpath; 7090 unsigned char path_id; 7091 7092 path_id = packet_out->po_path->np_path_id; 7093 cpath = &conn->ifc_paths[path_id]; 7094 cpath->cop_dplpmtud.ds_failed_size 7095 = lsquic_packet_out_sent_sz(&conn->ifc_conn, packet_out); 7096} 7097 7098 7099static void 7100ietf_full_conn_ci_retx_timeout (struct lsquic_conn *lconn) 7101{ 7102 struct ietf_full_conn *conn = (struct ietf_full_conn *) lconn; 7103 unsigned short pack_size; 7104 struct conn_path *cpath; 7105 int resize; 7106 7107 resize = 0; 7108 for (cpath = conn->ifc_paths; cpath < conn->ifc_paths + N_PATHS; ++cpath) 7109 if (cpath->cop_flags & COP_INITIALIZED) 7110 { 7111 pack_size = calc_base_packet_size(conn, 7112 NP_IS_IPv6(&cpath->cop_path)); 7113 if (cpath->cop_path.np_pack_size > pack_size) 7114 { 7115 LSQ_DEBUG("RTO occurred: change packet size of path %hhu " 7116 "to %hu bytes", cpath->cop_path.np_path_id, pack_size); 7117 cpath->cop_path.np_pack_size = pack_size; 7118 resize |= 1; 7119 } 7120 } 7121 7122 if (resize) 7123 lsquic_send_ctl_resize(&conn->ifc_send_ctl); 7124 else 7125 LSQ_DEBUG("RTO occurred, but no MTUs to reset"); 7126} 7127 7128 7129static enum tick_st 7130ietf_full_conn_ci_tick (struct lsquic_conn *lconn, lsquic_time_t now) 7131{ 7132 struct ietf_full_conn *conn = (struct ietf_full_conn *) lconn; 7133 int have_delayed_packets, s; 7134 enum tick_st tick = 0; 7135 unsigned n; 7136 7137#define CLOSE_IF_NECESSARY() do { \ 7138 if (conn->ifc_flags & IFC_IMMEDIATE_CLOSE_FLAGS) \ 7139 { \ 7140 tick |= immediate_close(conn); \ 7141 goto close_end; \ 7142 } \ 7143} while (0) 7144 7145#define RETURN_IF_OUT_OF_PACKETS() do { \ 7146 if (!lsquic_send_ctl_can_send(&conn->ifc_send_ctl)) \ 7147 { \ 7148 if (0 == lsquic_send_ctl_n_scheduled(&conn->ifc_send_ctl)) \ 7149 { \ 7150 LSQ_DEBUG("used up packet allowance, quiet now (line %d)", \ 7151 __LINE__); \ 7152 tick |= TICK_QUIET; \ 7153 } \ 7154 else \ 7155 { \ 7156 LSQ_DEBUG("used up packet allowance, sending now (line %d)",\ 7157 __LINE__); \ 7158 tick |= TICK_SEND; \ 7159 } \ 7160 goto end; \ 7161 } \ 7162} while (0) 7163 7164 if (conn->ifc_flags & IFC_HAVE_SAVED_ACK) 7165 { 7166 (void) /* If there is an error, we'll fail shortly */ 7167 process_ack(conn, &conn->ifc_ack, conn->ifc_saved_ack_received, now); 7168 conn->ifc_flags &= ~IFC_HAVE_SAVED_ACK; 7169 } 7170 7171 maybe_set_noprogress_alarm(conn, now); 7172 7173 lsquic_send_ctl_tick_in(&conn->ifc_send_ctl, now); 7174 lsquic_send_ctl_set_buffer_stream_packets(&conn->ifc_send_ctl, 1); 7175 CLOSE_IF_NECESSARY(); 7176 7177 lsquic_alarmset_ring_expired(&conn->ifc_alset, now); 7178 CLOSE_IF_NECESSARY(); 7179 7180 /* To make things simple, only stream 1 is active until the handshake 7181 * has been completed. This will be adjusted in the future: the client 7182 * does not want to wait if it has the server information. 7183 */ 7184 if (conn->ifc_conn.cn_flags & LSCONN_HANDSHAKE_DONE) 7185 process_streams_read_events(conn); 7186 else 7187 process_crypto_stream_read_events(conn); 7188 CLOSE_IF_NECESSARY(); 7189 7190 if (lsquic_send_ctl_pacer_blocked(&conn->ifc_send_ctl)) 7191 goto end_write; 7192 7193 if (conn->ifc_flags & IFC_FIRST_TICK) 7194 { 7195 conn->ifc_flags &= ~IFC_FIRST_TICK; 7196 have_delayed_packets = 0; 7197 } 7198 else 7199 /* If there are any scheduled packets at this point, it means that 7200 * they were not sent during previous tick; in other words, they 7201 * are delayed. When there are delayed packets, the only packet 7202 * we sometimes add is a packet with an ACK frame, and we add it 7203 * to the *front* of the queue. 7204 */ 7205 have_delayed_packets = 7206 lsquic_send_ctl_maybe_squeeze_sched(&conn->ifc_send_ctl); 7207 7208 if (should_generate_ack(conn, IFC_ACK_QUEUED) || 7209 (!have_delayed_packets && maybe_queue_opp_ack(conn))) 7210 { 7211 if (have_delayed_packets) 7212 lsquic_send_ctl_reset_packnos(&conn->ifc_send_ctl); 7213 7214 n = generate_ack_frame(conn, now); 7215 CLOSE_IF_NECESSARY(); 7216 7217 if (have_delayed_packets && n) 7218 lsquic_send_ctl_ack_to_front(&conn->ifc_send_ctl, n); 7219 } 7220 7221 if (have_delayed_packets) 7222 { 7223 /* The reason for not adding the other frames below to the packet 7224 * carrying ACK frame generated when there are delayed packets is 7225 * so that if the ACK packet itself is delayed, it can be dropped 7226 * and replaced by new ACK packet. This way, we are never more 7227 * than 1 packet over CWND. 7228 */ 7229 tick |= TICK_SEND; 7230 goto end; 7231 } 7232 7233 /* Try to fit MAX_DATA before checking if we have run out of room. 7234 * If it does not fit, it will be tried next time around. 7235 */ 7236 if (lsquic_cfcw_fc_offsets_changed(&conn->ifc_pub.cfcw) || 7237 (conn->ifc_send_flags & SF_SEND_MAX_DATA)) 7238 { 7239 conn->ifc_send_flags |= SF_SEND_MAX_DATA; 7240 generate_max_data_frame(conn); 7241 CLOSE_IF_NECESSARY(); 7242 } 7243 7244 if (conn->ifc_send_flags & SEND_WITH_FUNCS) 7245 { 7246 enum send send; 7247 for (send = 0; send < N_SEND; ++send) 7248 if (conn->ifc_send_flags & (1 << send) & SEND_WITH_FUNCS) 7249 { 7250 send_funcs[send](conn, now); 7251 CLOSE_IF_NECESSARY(); 7252 } 7253 } 7254 7255 if (conn->ifc_mflags & MF_CHECK_MTU_PROBE) 7256 check_or_schedule_mtu_probe(conn, now); 7257 7258 n = lsquic_send_ctl_reschedule_packets(&conn->ifc_send_ctl); 7259 if (n > 0) 7260 CLOSE_IF_NECESSARY(); 7261 7262 if (conn->ifc_conn.cn_flags & LSCONN_SEND_BLOCKED) 7263 { 7264 RETURN_IF_OUT_OF_PACKETS(); 7265 if (generate_blocked_frame(conn)) 7266 conn->ifc_conn.cn_flags &= ~LSCONN_SEND_BLOCKED; 7267 } 7268 7269 if (!TAILQ_EMPTY(&conn->ifc_pub.sending_streams)) 7270 { 7271 process_streams_ready_to_send(conn); 7272 CLOSE_IF_NECESSARY(); 7273 } 7274 7275 lsquic_send_ctl_set_buffer_stream_packets(&conn->ifc_send_ctl, 0); 7276 if (!(conn->ifc_conn.cn_flags & LSCONN_HANDSHAKE_DONE)) 7277 { 7278 s = lsquic_send_ctl_schedule_buffered(&conn->ifc_send_ctl, 7279 BPT_HIGHEST_PRIO); 7280 conn->ifc_flags |= (s < 0) << IFC_BIT_ERROR; 7281 if (0 == s) 7282 process_crypto_stream_write_events(conn); 7283 goto end_write; 7284 } 7285 7286 maybe_conn_flush_special_streams(conn); 7287 7288 s = lsquic_send_ctl_schedule_buffered(&conn->ifc_send_ctl, BPT_HIGHEST_PRIO); 7289 conn->ifc_flags |= (s < 0) << IFC_BIT_ERROR; 7290 if (!write_is_possible(conn)) 7291 goto end_write; 7292 7293 if (!TAILQ_EMPTY(&conn->ifc_pub.write_streams)) 7294 { 7295 process_streams_write_events(conn, 1); 7296 if (!write_is_possible(conn)) 7297 goto end_write; 7298 } 7299 7300 s = lsquic_send_ctl_schedule_buffered(&conn->ifc_send_ctl, BPT_OTHER_PRIO); 7301 conn->ifc_flags |= (s < 0) << IFC_BIT_ERROR; 7302 if (!write_is_possible(conn)) 7303 goto end_write; 7304 7305 if (!TAILQ_EMPTY(&conn->ifc_pub.write_streams)) 7306 process_streams_write_events(conn, 0); 7307 7308 lsquic_send_ctl_maybe_app_limited(&conn->ifc_send_ctl, CUR_NPATH(conn)); 7309 7310 end_write: 7311 if ((conn->ifc_flags & IFC_CLOSING) && conn_ok_to_close(conn)) 7312 { 7313 LSQ_DEBUG("connection is OK to close"); 7314 conn->ifc_flags |= IFC_TICK_CLOSE; 7315 if ((conn->ifc_send_flags & SF_SEND_CONN_CLOSE) 7316 /* This is normal termination sequence for the server: 7317 * 7318 * Generate CONNECTION_CLOSE frame if we are responding to one 7319 * or have packets scheduled to send 7320 */ 7321 && (!(conn->ifc_flags & (IFC_SERVER|IFC_HSK_FAILED)) 7322 || (conn->ifc_flags & (IFC_RECV_CLOSE|IFC_GOAWAY_CLOSE)) 7323 || 0 != lsquic_send_ctl_n_scheduled(&conn->ifc_send_ctl)) 7324 ) 7325 { 7326 RETURN_IF_OUT_OF_PACKETS(); 7327 generate_connection_close_packet(conn); 7328 tick |= TICK_SEND|TICK_CLOSE; 7329 } 7330 else 7331 tick |= TICK_CLOSE; 7332 goto end; 7333 } 7334 7335 if (0 == lsquic_send_ctl_n_scheduled(&conn->ifc_send_ctl)) 7336 { 7337 if (conn->ifc_send_flags & SF_SEND_PING) 7338 { 7339 RETURN_IF_OUT_OF_PACKETS(); 7340 generate_ping_frame(conn, now); 7341 CLOSE_IF_NECESSARY(); 7342 assert(lsquic_send_ctl_n_scheduled(&conn->ifc_send_ctl) != 0); 7343 } 7344 else 7345 { 7346 tick |= TICK_QUIET; 7347 goto end; 7348 } 7349 } 7350 else if (conn->ifc_ping_period) 7351 { 7352 lsquic_alarmset_unset(&conn->ifc_alset, AL_PING); 7353 lsquic_send_ctl_sanity_check(&conn->ifc_send_ctl); 7354 conn->ifc_send_flags &= ~SF_SEND_PING; /* It may have rung */ 7355 } 7356 7357 /* [draft-ietf-quic-transport-11] Section 7.9: 7358 * 7359 * The PING frame can be used to keep a connection alive when an 7360 * application or application protocol wishes to prevent the connection 7361 * from timing out. An application protocol SHOULD provide guidance 7362 * about the conditions under which generating a PING is recommended. 7363 * This guidance SHOULD indicate whether it is the client or the server 7364 * that is expected to send the PING. Having both endpoints send PING 7365 * frames without coordination can produce an excessive number of 7366 * packets and poor performance. 7367 */ 7368 if (conn->ifc_ping_period 7369 && lsquic_hash_count(conn->ifc_pub.all_streams) > 0) 7370 lsquic_alarmset_set(&conn->ifc_alset, AL_PING, 7371 now + conn->ifc_ping_period); 7372 7373 tick |= TICK_SEND; 7374 7375 end: 7376 service_streams(conn); 7377 CLOSE_IF_NECESSARY(); 7378 7379 close_end: 7380 lsquic_send_ctl_set_buffer_stream_packets(&conn->ifc_send_ctl, 1); 7381 lsquic_send_ctl_tick_out(&conn->ifc_send_ctl); 7382 return tick; 7383} 7384 7385 7386static enum LSQUIC_CONN_STATUS 7387ietf_full_conn_ci_status (struct lsquic_conn *lconn, char *errbuf, size_t bufsz) 7388{ 7389 struct ietf_full_conn *const conn = (struct ietf_full_conn *) lconn; 7390 size_t n; 7391 7392 /* Test the common case first: */ 7393 if (!(conn->ifc_flags & (IFC_ERROR 7394 |IFC_TIMED_OUT 7395 |IFC_ABORTED 7396 |IFC_GOT_PRST 7397 |IFC_HSK_FAILED 7398 |IFC_CLOSING 7399 |IFC_GOING_AWAY))) 7400 { 7401 if (lconn->cn_flags & LSCONN_PEER_GOING_AWAY) 7402 return LSCONN_ST_PEER_GOING_AWAY; 7403 else if (lconn->cn_flags & LSCONN_HANDSHAKE_DONE) 7404 return LSCONN_ST_CONNECTED; 7405 else 7406 return LSCONN_ST_HSK_IN_PROGRESS; 7407 } 7408 7409 if (errbuf && bufsz) 7410 { 7411 if (conn->ifc_errmsg) 7412 { 7413 n = bufsz < MAX_ERRMSG ? bufsz : MAX_ERRMSG; 7414 strncpy(errbuf, conn->ifc_errmsg, n); 7415 errbuf[n - 1] = '\0'; 7416 } 7417 else 7418 errbuf[0] = '\0'; 7419 } 7420 7421 if (conn->ifc_flags & IFC_ERROR) 7422 return LSCONN_ST_ERROR; 7423 if (conn->ifc_flags & IFC_TIMED_OUT) 7424 return LSCONN_ST_TIMED_OUT; 7425 if (conn->ifc_flags & IFC_ABORTED) 7426 return LSCONN_ST_USER_ABORTED; 7427 if (conn->ifc_flags & IFC_GOT_PRST) 7428 return LSCONN_ST_RESET; 7429 if (conn->ifc_flags & IFC_HSK_FAILED) 7430 return LSCONN_ST_HSK_FAILURE; 7431 if (conn->ifc_flags & IFC_CLOSING) 7432 return LSCONN_ST_CLOSED; 7433 assert(conn->ifc_flags & IFC_GOING_AWAY); 7434 return LSCONN_ST_GOING_AWAY; 7435} 7436 7437 7438static void 7439ietf_full_conn_ci_stateless_reset (struct lsquic_conn *lconn) 7440{ 7441 struct ietf_full_conn *const conn = (struct ietf_full_conn *) lconn; 7442 conn->ifc_flags |= IFC_GOT_PRST; 7443 LSQ_INFO("stateless reset reported"); 7444} 7445 7446 7447static struct lsquic_conn_ctx * 7448ietf_full_conn_ci_get_ctx (const struct lsquic_conn *lconn) 7449{ 7450 struct ietf_full_conn *const conn = (struct ietf_full_conn *) lconn; 7451 return conn->ifc_conn_ctx; 7452} 7453 7454 7455static struct lsquic_engine * 7456ietf_full_conn_ci_get_engine (struct lsquic_conn *lconn) 7457{ 7458 struct ietf_full_conn *conn = (struct ietf_full_conn *) lconn; 7459 return conn->ifc_enpub->enp_engine; 7460} 7461 7462 7463static void 7464ietf_full_conn_ci_set_ctx (struct lsquic_conn *lconn, lsquic_conn_ctx_t *ctx) 7465{ 7466 struct ietf_full_conn *const conn = (struct ietf_full_conn *) lconn; 7467 conn->ifc_conn_ctx = ctx; 7468} 7469 7470 7471static unsigned 7472ietf_full_conn_ci_n_pending_streams (const struct lsquic_conn *lconn) 7473{ 7474 struct ietf_full_conn *const conn = (struct ietf_full_conn *) lconn; 7475 return conn->ifc_n_delayed_streams; 7476} 7477 7478 7479static unsigned 7480ietf_full_conn_ci_n_avail_streams (const struct lsquic_conn *lconn) 7481{ 7482 struct ietf_full_conn *const conn = (struct ietf_full_conn *) lconn; 7483 return avail_streams_count(conn, conn->ifc_flags & IFC_SERVER, SD_BIDI); 7484} 7485 7486 7487static int 7488handshake_done_or_doing_sess_resume (const struct ietf_full_conn *conn) 7489{ 7490 return (conn->ifc_conn.cn_flags & LSCONN_HANDSHAKE_DONE) 7491 || conn->ifc_conn.cn_esf_c->esf_is_sess_resume_enabled( 7492 conn->ifc_conn.cn_enc_session); 7493} 7494 7495 7496static void 7497ietf_full_conn_ci_make_stream (struct lsquic_conn *lconn) 7498{ 7499 struct ietf_full_conn *const conn = (struct ietf_full_conn *) lconn; 7500 7501 if (handshake_done_or_doing_sess_resume(conn) 7502 && ietf_full_conn_ci_n_avail_streams(lconn) > 0) 7503 { 7504 if (0 != create_bidi_stream_out(conn)) 7505 ABORT_ERROR("could not create new stream: %s", strerror(errno)); 7506 } 7507 else if (either_side_going_away(conn)) 7508 { 7509 (void) conn->ifc_enpub->enp_stream_if->on_new_stream( 7510 conn->ifc_enpub->enp_stream_if_ctx, NULL); 7511 LSQ_DEBUG("going away: no streams will be initiated"); 7512 } 7513 else 7514 { 7515 ++conn->ifc_n_delayed_streams; 7516 LSQ_DEBUG("delayed stream creation. Backlog size: %u", 7517 conn->ifc_n_delayed_streams); 7518 } 7519} 7520 7521 7522static void 7523ietf_full_conn_ci_internal_error (struct lsquic_conn *lconn, 7524 const char *format, ...) 7525{ 7526 struct ietf_full_conn *const conn = (struct ietf_full_conn *) lconn; 7527 LSQ_INFO("internal error reported"); 7528 ABORT_QUIETLY(0, TEC_INTERNAL_ERROR, "Internal error"); 7529} 7530 7531 7532static void 7533ietf_full_conn_ci_abort_error (struct lsquic_conn *lconn, int is_app, 7534 unsigned error_code, const char *fmt, ...) 7535{ 7536 struct ietf_full_conn *const conn = (struct ietf_full_conn *) lconn; 7537 va_list ap; 7538 const char *err_str, *percent; 7539 char err_buf[0x100]; 7540 7541 percent = strchr(fmt, '%'); 7542 if (percent) 7543 { 7544 va_start(ap, fmt); 7545 vsnprintf(err_buf, sizeof(err_buf), fmt, ap); 7546 va_end(ap); 7547 err_str = err_buf; 7548 } 7549 else 7550 err_str = fmt; 7551 LSQ_INFO("abort error: is_app: %d; error code: %u; error str: %s", 7552 is_app, error_code, err_str); 7553 ABORT_QUIETLY(is_app, error_code, "%s", err_str); 7554} 7555 7556 7557static int 7558path_matches_local_sa (const struct network_path *path, 7559 const struct sockaddr *local_sa) 7560{ 7561 return lsquic_sockaddr_eq(NP_LOCAL_SA(path), local_sa); 7562} 7563 7564 7565static const lsquic_cid_t * 7566ietf_full_conn_ci_get_log_cid (const struct lsquic_conn *lconn) 7567{ 7568 struct ietf_full_conn *const conn = (struct ietf_full_conn *) lconn; 7569 7570 if (lconn->cn_flags & LSCONN_SERVER) 7571 { 7572 if (CUR_DCID(conn)->len) 7573 return CUR_DCID(conn); 7574 else 7575 return CN_SCID(lconn); 7576 } 7577 if (CN_SCID(lconn)->len) 7578 return CN_SCID(lconn); 7579 else 7580 return CUR_DCID(conn); 7581} 7582 7583 7584static struct network_path * 7585ietf_full_conn_ci_get_path (struct lsquic_conn *lconn, 7586 const struct sockaddr *sa) 7587{ 7588 struct ietf_full_conn *const conn = (struct ietf_full_conn *) lconn; 7589 struct conn_path *copath; 7590 7591 if (NULL == sa || path_matches_local_sa(CUR_NPATH(conn), sa)) 7592 return CUR_NPATH(conn); 7593 7594 for (copath = conn->ifc_paths; copath < conn->ifc_paths 7595 + sizeof(conn->ifc_paths) / sizeof(conn->ifc_paths[0]); ++copath) 7596 if ((conn->ifc_used_paths & (1 << (copath - conn->ifc_paths))) 7597 && path_matches_local_sa(&copath->cop_path, sa)) 7598 return &copath->cop_path; 7599 7600 return CUR_NPATH(conn); 7601} 7602 7603 7604static int 7605path_matches (const struct network_path *path, 7606 const struct sockaddr *local_sa, const struct sockaddr *peer_sa) 7607{ 7608 return local_sa->sa_family == NP_LOCAL_SA(path)->sa_family 7609 && lsquic_sockaddr_eq(local_sa, NP_LOCAL_SA(path)) 7610 && lsquic_sockaddr_eq(peer_sa, NP_PEER_SA(path)); 7611} 7612 7613 7614static void 7615record_to_path (struct ietf_full_conn *conn, struct conn_path *copath, void *peer_ctx, 7616 const struct sockaddr *local_sa, const struct sockaddr *peer_sa) 7617{ 7618 struct network_path *path; 7619 size_t len; 7620 char path_str[2][INET6_ADDRSTRLEN + sizeof(":65535")]; 7621 7622 LSQ_DEBUG("record path %d: (%s - %s)", (int) (copath - conn->ifc_paths), 7623 SA2STR(local_sa, path_str[0]), SA2STR(peer_sa, path_str[1])); 7624 path = &copath->cop_path; 7625 len = local_sa->sa_family == AF_INET ? sizeof(struct sockaddr_in) 7626 : sizeof(struct sockaddr_in6); 7627 memcpy(NP_LOCAL_SA(path), local_sa, len); 7628 len = peer_sa->sa_family == AF_INET ? sizeof(struct sockaddr_in) 7629 : sizeof(struct sockaddr_in6); 7630 memcpy(NP_PEER_SA(path), peer_sa, len); 7631 path->np_peer_ctx = peer_ctx; 7632} 7633 7634 7635static unsigned char 7636ietf_full_conn_ci_record_addrs (struct lsquic_conn *lconn, void *peer_ctx, 7637 const struct sockaddr *local_sa, const struct sockaddr *peer_sa) 7638{ 7639 struct ietf_full_conn *conn = (struct ietf_full_conn *) lconn; 7640 struct network_path *path; 7641 struct conn_path *copath, *first_unused, *first_unvalidated, *first_other, 7642 *victim; 7643 7644 path = &conn->ifc_paths[conn->ifc_cur_path_id].cop_path; 7645 if (path_matches(path, local_sa, peer_sa)) 7646 { 7647 path->np_peer_ctx = peer_ctx; 7648 return conn->ifc_cur_path_id; 7649 } 7650 7651 first_unvalidated = NULL; 7652 first_unused = NULL; 7653 first_other = NULL; 7654 for (copath = conn->ifc_paths; copath < conn->ifc_paths 7655 + sizeof(conn->ifc_paths) / sizeof(conn->ifc_paths[0]); ++copath) 7656 { 7657 if (conn->ifc_used_paths & (1 << (copath - conn->ifc_paths))) 7658 { 7659 if (path_matches(&copath->cop_path, local_sa, peer_sa)) 7660 { 7661 copath->cop_path.np_peer_ctx = peer_ctx; 7662 return copath - conn->ifc_paths; 7663 } 7664 if (!first_unvalidated 7665 && (0 == (copath->cop_flags & COP_VALIDATED))) 7666 first_unvalidated = copath; 7667 else if (!first_other) 7668 first_other = copath; 7669 } 7670 else if (!first_unused) 7671 first_unused = copath; 7672 } 7673 7674 if (first_unused) 7675 { 7676 record_to_path(conn, first_unused, peer_ctx, local_sa, peer_sa); 7677 if (0 == conn->ifc_used_paths && !(conn->ifc_flags & IFC_SERVER)) 7678 /* First path is considered valid immediately */ 7679 first_unused->cop_flags |= COP_VALIDATED; 7680 LSQ_DEBUG("record new path ID %d", 7681 (int) (first_unused - conn->ifc_paths)); 7682 conn->ifc_used_paths |= 1 << (first_unused - conn->ifc_paths); 7683 return first_unused - conn->ifc_paths; 7684 } 7685 7686 if (first_unvalidated || first_other) 7687 { 7688 victim = first_unvalidated ? first_unvalidated : first_other; 7689 record_to_path(conn, victim, peer_ctx, local_sa, peer_sa); 7690 return victim - conn->ifc_paths; 7691 } 7692 7693 return conn->ifc_cur_path_id; 7694} 7695 7696 7697static void 7698ietf_full_conn_ci_drop_crypto_streams (struct lsquic_conn *lconn) 7699{ 7700 struct ietf_full_conn *conn = (struct ietf_full_conn *) lconn; 7701 drop_crypto_streams(conn); 7702} 7703 7704 7705void 7706ietf_full_conn_ci_count_garbage (struct lsquic_conn *lconn, size_t garbage_sz) 7707{ 7708 struct ietf_full_conn *conn = (struct ietf_full_conn *) lconn; 7709 7710 conn->ifc_pub.bytes_in = garbage_sz; 7711 LSQ_DEBUG("count %zd bytes of garbage, new value: %u bytes", garbage_sz, 7712 conn->ifc_pub.bytes_in); 7713} 7714 7715 7716#define IETF_FULL_CONN_FUNCS \ 7717 .ci_abort = ietf_full_conn_ci_abort, \ 7718 .ci_abort_error = ietf_full_conn_ci_abort_error, \ 7719 .ci_retire_cid = ietf_full_conn_ci_retire_cid, \ 7720 .ci_can_write_ack = ietf_full_conn_ci_can_write_ack, \ 7721 .ci_cancel_pending_streams = ietf_full_conn_ci_cancel_pending_streams, \ 7722 .ci_client_call_on_new = ietf_full_conn_ci_client_call_on_new, \ 7723 .ci_close = ietf_full_conn_ci_close, \ 7724 .ci_count_garbage = ietf_full_conn_ci_count_garbage, \ 7725 .ci_destroy = ietf_full_conn_ci_destroy, \ 7726 .ci_drain_time = ietf_full_conn_ci_drain_time, \ 7727 .ci_drop_crypto_streams = ietf_full_conn_ci_drop_crypto_streams, \ 7728 .ci_get_ctx = ietf_full_conn_ci_get_ctx, \ 7729 .ci_get_engine = ietf_full_conn_ci_get_engine, \ 7730 .ci_get_log_cid = ietf_full_conn_ci_get_log_cid, \ 7731 .ci_get_path = ietf_full_conn_ci_get_path, \ 7732 .ci_going_away = ietf_full_conn_ci_going_away, \ 7733 .ci_hsk_done = ietf_full_conn_ci_hsk_done, \ 7734 .ci_internal_error = ietf_full_conn_ci_internal_error, \ 7735 .ci_is_push_enabled = ietf_full_conn_ci_is_push_enabled, \ 7736 .ci_is_tickable = ietf_full_conn_ci_is_tickable, \ 7737 .ci_make_stream = ietf_full_conn_ci_make_stream, \ 7738 .ci_mtu_probe_acked = ietf_full_conn_ci_mtu_probe_acked, \ 7739 .ci_n_avail_streams = ietf_full_conn_ci_n_avail_streams, \ 7740 .ci_n_pending_streams = ietf_full_conn_ci_n_pending_streams, \ 7741 .ci_next_tick_time = ietf_full_conn_ci_next_tick_time, \ 7742 .ci_packet_in = ietf_full_conn_ci_packet_in, \ 7743 .ci_push_stream = ietf_full_conn_ci_push_stream, \ 7744 .ci_record_addrs = ietf_full_conn_ci_record_addrs, \ 7745 .ci_report_live = ietf_full_conn_ci_report_live, \ 7746 .ci_retx_timeout = ietf_full_conn_ci_retx_timeout, \ 7747 .ci_set_ctx = ietf_full_conn_ci_set_ctx, \ 7748 .ci_status = ietf_full_conn_ci_status, \ 7749 .ci_stateless_reset = ietf_full_conn_ci_stateless_reset, \ 7750 .ci_tick = ietf_full_conn_ci_tick, \ 7751 .ci_tls_alert = ietf_full_conn_ci_tls_alert, \ 7752 .ci_write_ack = ietf_full_conn_ci_write_ack 7753 7754static const struct conn_iface ietf_full_conn_iface = { 7755 IETF_FULL_CONN_FUNCS, 7756 .ci_next_packet_to_send = ietf_full_conn_ci_next_packet_to_send, 7757 .ci_packet_not_sent = ietf_full_conn_ci_packet_not_sent, 7758 .ci_packet_sent = ietf_full_conn_ci_packet_sent, 7759 .ci_packet_too_large = ietf_full_conn_ci_packet_too_large, 7760}; 7761static const struct conn_iface *ietf_full_conn_iface_ptr = 7762 &ietf_full_conn_iface; 7763 7764static const struct conn_iface ietf_full_conn_prehsk_iface = { 7765 IETF_FULL_CONN_FUNCS, 7766 .ci_next_packet_to_send = ietf_full_conn_ci_next_packet_to_send_pre_hsk, 7767 .ci_packet_not_sent = ietf_full_conn_ci_packet_not_sent_pre_hsk, 7768 .ci_packet_sent = ietf_full_conn_ci_packet_sent_pre_hsk, 7769}; 7770static const struct conn_iface *ietf_full_conn_prehsk_iface_ptr = 7771 &ietf_full_conn_prehsk_iface; 7772 7773 7774static void 7775on_cancel_push (void *ctx, uint64_t push_id) 7776{ 7777 struct ietf_full_conn *const conn = ctx; 7778 LSQ_DEBUG("TODO %s: %"PRIu64, __func__, push_id); 7779 /* TODO */ 7780} 7781 7782 7783static void 7784on_max_push_id_client (void *ctx, uint64_t push_id) 7785{ 7786 struct ietf_full_conn *const conn = ctx; 7787 ABORT_QUIETLY(1, HEC_FRAME_UNEXPECTED, "client does not expect the server " 7788 "to send MAX_PUSH_ID frame"); 7789} 7790 7791 7792static void 7793on_max_push_id (void *ctx, uint64_t push_id) 7794{ 7795 struct ietf_full_conn *const conn = ctx; 7796 7797 if (!(conn->ifc_u.ser.ifser_flags & IFSER_MAX_PUSH_ID) 7798 || push_id > conn->ifc_u.ser.ifser_max_push_id) 7799 { 7800 conn->ifc_u.ser.ifser_max_push_id = push_id; 7801 conn->ifc_u.ser.ifser_flags |= IFSER_MAX_PUSH_ID; 7802 LSQ_DEBUG("set MAX_PUSH_ID to %"PRIu64, push_id); 7803 } 7804 else if (push_id < conn->ifc_u.ser.ifser_max_push_id) 7805 ABORT_QUIETLY(1, HEC_ID_ERROR, "MAX_PUSH_ID reduced from " 7806 "%"PRIu64" to %"PRIu64, conn->ifc_u.ser.ifser_max_push_id, push_id); 7807 else 7808 LSQ_DEBUG("ignore repeated value of MAX_PUSH_ID=%"PRIu64, push_id); 7809} 7810 7811 7812static void 7813on_settings_frame (void *ctx) 7814{ 7815 struct ietf_full_conn *const conn = ctx; 7816 unsigned dyn_table_size, max_risked_streams; 7817 7818 LSQ_DEBUG("SETTINGS frame"); 7819 if (conn->ifc_flags & IFC_HAVE_PEER_SET) 7820 { 7821 ABORT_WARN("second incoming SETTING frame on HTTP control stream"); 7822 return; 7823 } 7824 7825 conn->ifc_flags |= IFC_HAVE_PEER_SET; 7826 dyn_table_size = MIN(conn->ifc_settings->es_qpack_enc_max_size, 7827 conn->ifc_peer_hq_settings.header_table_size); 7828 max_risked_streams = MIN(conn->ifc_settings->es_qpack_enc_max_blocked, 7829 conn->ifc_peer_hq_settings.qpack_blocked_streams); 7830 if (0 != lsquic_qeh_settings(&conn->ifc_qeh, 7831 conn->ifc_peer_hq_settings.header_table_size, 7832 dyn_table_size, max_risked_streams, conn->ifc_flags & IFC_SERVER)) 7833 ABORT_WARN("could not initialize QPACK encoder handler"); 7834 if (avail_streams_count(conn, conn->ifc_flags & IFC_SERVER, SD_UNI) > 0) 7835 { 7836 if (0 != create_qenc_stream_out(conn)) 7837 ABORT_WARN("cannot create outgoing QPACK encoder stream"); 7838 } 7839 else 7840 { 7841 queue_streams_blocked_frame(conn, SD_UNI); 7842 LSQ_DEBUG("cannot create QPACK encoder stream due to unidir limit"); 7843 } 7844 maybe_create_delayed_streams(conn); 7845} 7846 7847 7848static void 7849on_setting (void *ctx, uint64_t setting_id, uint64_t value) 7850{ 7851 struct ietf_full_conn *const conn = ctx; 7852 7853 switch (setting_id) 7854 { 7855 case HQSID_QPACK_BLOCKED_STREAMS: 7856 LSQ_DEBUG("Peer's SETTINGS_QPACK_BLOCKED_STREAMS=%"PRIu64, value); 7857 conn->ifc_peer_hq_settings.qpack_blocked_streams = value; 7858 break; 7859 case HQSID_QPACK_MAX_TABLE_CAPACITY: 7860 LSQ_DEBUG("Peer's SETTINGS_QPACK_MAX_TABLE_CAPACITY=%"PRIu64, value); 7861 conn->ifc_peer_hq_settings.header_table_size = value; 7862 break; 7863 case HQSID_MAX_HEADER_LIST_SIZE: 7864 LSQ_DEBUG("Peer's SETTINGS_MAX_HEADER_LIST_SIZE=%"PRIu64, value); 7865 conn->ifc_peer_hq_settings.max_header_list_size = value; 7866 /* TODO: apply it */ 7867 break; 7868 default: 7869 LSQ_DEBUG("received unknown SETTING 0x%"PRIX64"=0x%"PRIX64 7870 "; ignore it", setting_id, value); 7871 break; 7872 } 7873} 7874 7875 7876static void 7877on_goaway_server_27 (void *ctx, uint64_t stream_id) 7878{ 7879 struct ietf_full_conn *const conn = ctx; 7880 ABORT_QUIETLY(1, HEC_FRAME_UNEXPECTED, 7881 "client should not send GOAWAY frames"); 7882} 7883 7884 7885static void 7886on_goaway_client_28 (void *ctx, uint64_t stream_id) 7887{ 7888 struct ietf_full_conn *const conn = ctx; 7889 struct lsquic_stream *stream; 7890 struct lsquic_hash_elem *el; 7891 enum stream_id_type sit; 7892 7893 sit = stream_id & SIT_MASK; 7894 if (sit != SIT_BIDI_CLIENT) 7895 { 7896 ABORT_QUIETLY(1, HEC_ID_ERROR, 7897 "stream ID %"PRIu64" in GOAWAY frame", stream_id); 7898 return; 7899 } 7900 7901 if (conn->ifc_conn.cn_flags & LSCONN_PEER_GOING_AWAY) 7902 { 7903 LSQ_DEBUG("ignore duplicate GOAWAY frame"); 7904 return; 7905 } 7906 7907 conn->ifc_conn.cn_flags |= LSCONN_PEER_GOING_AWAY; 7908 LSQ_DEBUG("received GOAWAY frame, last good stream ID: %"PRIu64, stream_id); 7909 if (conn->ifc_enpub->enp_stream_if->on_goaway_received) 7910 conn->ifc_enpub->enp_stream_if->on_goaway_received(&conn->ifc_conn); 7911 7912 for (el = lsquic_hash_first(conn->ifc_pub.all_streams); el; 7913 el = lsquic_hash_next(conn->ifc_pub.all_streams)) 7914 { 7915 stream = lsquic_hashelem_getdata(el); 7916 if (stream->id >= stream_id 7917 && (stream->id & SIT_MASK) == SIT_BIDI_CLIENT) 7918 { 7919 lsquic_stream_received_goaway(stream); 7920 } 7921 } 7922} 7923 7924 7925static void 7926on_goaway_client (void *ctx, uint64_t stream_id) 7927{ 7928 struct ietf_full_conn *const conn = ctx; 7929 struct lsquic_stream *stream; 7930 struct lsquic_hash_elem *el; 7931 enum stream_id_type sit; 7932 7933 sit = stream_id & SIT_MASK; 7934 if (sit != SIT_BIDI_CLIENT) 7935 { 7936 ABORT_QUIETLY(1, HEC_ID_ERROR, 7937 "stream ID %"PRIu64" in GOAWAY frame", stream_id); 7938 return; 7939 } 7940 7941 LSQ_DEBUG("received GOAWAY frame, last good stream ID: %"PRIu64, stream_id); 7942 7943 if (conn->ifc_conn.cn_flags & LSCONN_PEER_GOING_AWAY) 7944 { 7945 if (stream_id == conn->ifc_u.cli.ifcli_min_goaway_stream_id) 7946 { 7947 LSQ_DEBUG("ignore duplicate GOAWAY frame"); 7948 return; 7949 } 7950 if (stream_id > conn->ifc_u.cli.ifcli_min_goaway_stream_id) 7951 { 7952 ABORT_QUIETLY(1, HEC_ID_ERROR, 7953 "stream ID %"PRIu64" is larger than one already seen in a " 7954 "previous GOAWAY frame, %"PRIu64, stream_id, 7955 conn->ifc_u.cli.ifcli_min_goaway_stream_id); 7956 return; 7957 } 7958 } 7959 else 7960 { 7961 conn->ifc_u.cli.ifcli_min_goaway_stream_id = stream_id; 7962 conn->ifc_conn.cn_flags |= LSCONN_PEER_GOING_AWAY; 7963 if (conn->ifc_enpub->enp_stream_if->on_goaway_received) 7964 conn->ifc_enpub->enp_stream_if->on_goaway_received(&conn->ifc_conn); 7965 } 7966 7967 for (el = lsquic_hash_first(conn->ifc_pub.all_streams); el; 7968 el = lsquic_hash_next(conn->ifc_pub.all_streams)) 7969 { 7970 stream = lsquic_hashelem_getdata(el); 7971 if (stream->id >= stream_id 7972 && (stream->id & SIT_MASK) == SIT_BIDI_CLIENT) 7973 { 7974 lsquic_stream_received_goaway(stream); 7975 } 7976 } 7977} 7978 7979 7980static void 7981on_goaway_server (void *ctx, uint64_t max_push_id) 7982{ 7983 /* TODO: cancel pushes? */ 7984} 7985 7986 7987static void 7988on_unexpected_frame (void *ctx, uint64_t frame_type) 7989{ 7990 struct ietf_full_conn *const conn = ctx; 7991 ABORT_QUIETLY(1, HEC_FRAME_UNEXPECTED, "Frame type %"PRIu64" is not " 7992 "allowed on the control stream", frame_type); 7993} 7994 7995 7996static const struct hcsi_callbacks hcsi_callbacks_server_27 = 7997{ 7998 .on_cancel_push = on_cancel_push, 7999 .on_max_push_id = on_max_push_id, 8000 .on_settings_frame = on_settings_frame, 8001 .on_setting = on_setting, 8002 .on_goaway = on_goaway_server_27, 8003 .on_unexpected_frame = on_unexpected_frame, 8004}; 8005 8006static const struct hcsi_callbacks hcsi_callbacks_client_27 = 8007{ 8008 .on_cancel_push = on_cancel_push, 8009 .on_max_push_id = on_max_push_id_client, 8010 .on_settings_frame = on_settings_frame, 8011 .on_setting = on_setting, 8012 .on_goaway = on_goaway_client_28 /* sic */, 8013 .on_unexpected_frame = on_unexpected_frame, 8014}; 8015 8016 8017static const struct hcsi_callbacks hcsi_callbacks_server_28 = 8018{ 8019 .on_cancel_push = on_cancel_push, 8020 .on_max_push_id = on_max_push_id, 8021 .on_settings_frame = on_settings_frame, 8022 .on_setting = on_setting, 8023 .on_goaway = on_goaway_server /* sic */, 8024 .on_unexpected_frame = on_unexpected_frame, 8025}; 8026 8027static const struct hcsi_callbacks hcsi_callbacks_client_28 = 8028{ 8029 .on_cancel_push = on_cancel_push, 8030 .on_max_push_id = on_max_push_id_client, 8031 .on_settings_frame = on_settings_frame, 8032 .on_setting = on_setting, 8033 .on_goaway = on_goaway_client_28, 8034 .on_unexpected_frame = on_unexpected_frame, 8035}; 8036 8037 8038static const struct hcsi_callbacks hcsi_callbacks_server_29 = 8039{ 8040 .on_cancel_push = on_cancel_push, 8041 .on_max_push_id = on_max_push_id, 8042 .on_settings_frame = on_settings_frame, 8043 .on_setting = on_setting, 8044 .on_goaway = on_goaway_server, 8045 .on_unexpected_frame = on_unexpected_frame, 8046}; 8047 8048static const struct hcsi_callbacks hcsi_callbacks_client_29 = 8049{ 8050 .on_cancel_push = on_cancel_push, 8051 .on_max_push_id = on_max_push_id_client, 8052 .on_settings_frame = on_settings_frame, 8053 .on_setting = on_setting, 8054 .on_goaway = on_goaway_client, 8055 .on_unexpected_frame = on_unexpected_frame, 8056}; 8057 8058 8059static lsquic_stream_ctx_t * 8060hcsi_on_new (void *stream_if_ctx, struct lsquic_stream *stream) 8061{ 8062 struct ietf_full_conn *const conn = (void *) stream_if_ctx; 8063 const struct hcsi_callbacks *callbacks; 8064 8065 conn->ifc_stream_hcsi = stream; 8066 8067 switch ((!!(conn->ifc_flags & IFC_SERVER) << 8) | conn->ifc_conn.cn_version) 8068 { 8069 case (0 << 8) | LSQVER_ID27: 8070 callbacks = &hcsi_callbacks_client_27; 8071 break; 8072 case (1 << 8) | LSQVER_ID27: 8073 callbacks = &hcsi_callbacks_server_27; 8074 break; 8075 case (0 << 8) | LSQVER_ID28: 8076 callbacks = &hcsi_callbacks_client_28; 8077 break; 8078 case (1 << 8) | LSQVER_ID28: 8079 callbacks = &hcsi_callbacks_server_28; 8080 break; 8081 case (0 << 8) | LSQVER_ID29: 8082 callbacks = &hcsi_callbacks_client_29; 8083 break; 8084 default: 8085 assert(0); 8086 /* fallthru */ 8087 case (1 << 8) | LSQVER_ID29: 8088 callbacks = &hcsi_callbacks_server_29; 8089 break; 8090 } 8091 lsquic_hcsi_reader_init(&conn->ifc_hcsi.reader, &conn->ifc_conn, 8092 callbacks, conn); 8093 lsquic_stream_wantread(stream, 1); 8094 return stream_if_ctx; 8095} 8096 8097 8098struct feed_hcsi_ctx 8099{ 8100 struct ietf_full_conn *conn; 8101 int s; 8102}; 8103 8104 8105static size_t 8106feed_hcsi_reader (void *ctx, const unsigned char *buf, size_t bufsz, int fin) 8107{ 8108 struct feed_hcsi_ctx *feed_ctx = ctx; 8109 struct ietf_full_conn *conn = feed_ctx->conn; 8110 8111 feed_ctx->s = lsquic_hcsi_reader_feed(&conn->ifc_hcsi.reader, buf, bufsz); 8112 return bufsz; 8113} 8114 8115 8116static void 8117hcsi_on_read (struct lsquic_stream *stream, lsquic_stream_ctx_t *ctx) 8118{ 8119 struct ietf_full_conn *const conn = (void *) ctx; 8120 struct lsquic_conn *const lconn = &conn->ifc_conn; 8121 struct feed_hcsi_ctx feed_ctx = { conn, 0, }; 8122 ssize_t nread; 8123 8124 nread = lsquic_stream_readf(stream, feed_hcsi_reader, &feed_ctx); 8125 LSQ_DEBUG("fed %zd bytes to HTTP control stream reader, status=%d", 8126 nread, feed_ctx.s); 8127 if (nread < 0) 8128 { 8129 lsquic_stream_wantread(stream, 0); 8130 ABORT_WARN("error reading from HTTP control stream"); 8131 } 8132 else if (nread == 0) 8133 { 8134 lsquic_stream_wantread(stream, 0); 8135 LSQ_INFO("control stream closed by peer: abort connection"); 8136 lconn->cn_if->ci_abort_error(lconn, 1, 8137 HEC_CLOSED_CRITICAL_STREAM, "control stream closed"); 8138 } 8139 else if (feed_ctx.s != 0) 8140 { 8141 lsquic_stream_wantread(stream, 0); 8142 ABORT_WARN("error processing HTTP control stream"); 8143 } 8144} 8145 8146 8147static void 8148hcsi_on_write (struct lsquic_stream *stream, lsquic_stream_ctx_t *ctx) 8149{ 8150 assert(0); 8151} 8152 8153 8154static void 8155hcsi_on_close (struct lsquic_stream *stream, lsquic_stream_ctx_t *ctx) 8156{ 8157 struct ietf_full_conn *const conn = (void *) ctx; 8158 conn->ifc_stream_hcsi = NULL; 8159} 8160 8161 8162static const struct lsquic_stream_if hcsi_if = 8163{ 8164 .on_new_stream = hcsi_on_new, 8165 .on_read = hcsi_on_read, 8166 .on_write = hcsi_on_write, 8167 .on_close = hcsi_on_close, 8168}; 8169 8170 8171static void 8172apply_uni_stream_class (struct ietf_full_conn *conn, 8173 struct lsquic_stream *stream, uint64_t stream_type) 8174{ 8175 switch (stream_type) 8176 { 8177 case HQUST_CONTROL: 8178 if (!conn->ifc_stream_hcsi) 8179 { 8180 LSQ_DEBUG("Incoming HTTP control stream ID: %"PRIu64, 8181 stream->id); 8182 lsquic_stream_set_stream_if(stream, &hcsi_if, conn); 8183 } 8184 else 8185 { 8186 ABORT_QUIETLY(1, HEC_STREAM_CREATION_ERROR, 8187 "Control stream %"PRIu64" already exists: cannot create " 8188 "second control stream %"PRIu64, conn->ifc_stream_hcsi->id, 8189 stream->id); 8190 lsquic_stream_close(stream); 8191 } 8192 break; 8193 case HQUST_QPACK_ENC: 8194 if (!lsquic_qdh_has_enc_stream(&conn->ifc_qdh)) 8195 { 8196 LSQ_DEBUG("Incoming QPACK encoder stream ID: %"PRIu64, 8197 stream->id); 8198 lsquic_stream_set_stream_if(stream, lsquic_qdh_enc_sm_in_if, 8199 &conn->ifc_qdh); 8200 } 8201 else 8202 { 8203 ABORT_QUIETLY(1, HEC_STREAM_CREATION_ERROR, 8204 "Incoming QPACK encoder stream %"PRIu64" already exists: " 8205 "cannot create second stream %"PRIu64, 8206 conn->ifc_qdh.qdh_enc_sm_in->id, stream->id); 8207 lsquic_stream_close(stream); 8208 } 8209 break; 8210 case HQUST_QPACK_DEC: 8211 if (!lsquic_qeh_has_dec_stream(&conn->ifc_qeh)) 8212 { 8213 LSQ_DEBUG("Incoming QPACK decoder stream ID: %"PRIu64, 8214 stream->id); 8215 lsquic_stream_set_stream_if(stream, lsquic_qeh_dec_sm_in_if, 8216 &conn->ifc_qeh); 8217 } 8218 else 8219 { 8220 ABORT_QUIETLY(1, HEC_STREAM_CREATION_ERROR, 8221 "Incoming QPACK decoder stream %"PRIu64" already exists: " 8222 "cannot create second stream %"PRIu64, 8223 conn->ifc_qeh.qeh_dec_sm_in->id, stream->id); 8224 lsquic_stream_close(stream); 8225 } 8226 break; 8227 case HQUST_PUSH: 8228 if (conn->ifc_flags & IFC_SERVER) 8229 { 8230 ABORT_QUIETLY(1, HEC_STREAM_CREATION_ERROR, 8231 "clients can't open push streams"); 8232 } 8233 else 8234 { 8235 LSQ_DEBUG("Refuse push stream %"PRIu64, stream->id); 8236 maybe_schedule_ss_for_stream(conn, stream->id, 8237 HEC_REQUEST_CANCELLED); 8238 } 8239 lsquic_stream_close(stream); 8240 break; 8241 default: 8242 LSQ_DEBUG("unknown unidirectional stream %"PRIu64 " of type %"PRIu64 8243 ", will send STOP_SENDING and close", stream->id, stream_type); 8244 /* XXX This approach may be risky, as it assumes that the peer updates 8245 * its flow control window correctly. The safe way to do it is to 8246 * create a stream and wait for RESET_STREAM frame. This is not an 8247 * issue in the normal case, as the server does not allow the peer to 8248 * create more than 3 unidirectional streams. 8249 */ 8250 maybe_schedule_ss_for_stream(conn, stream->id, 8251 HEC_STREAM_CREATION_ERROR); 8252 lsquic_stream_close(stream); 8253 break; 8254 } 8255} 8256 8257 8258static lsquic_stream_ctx_t * 8259unicla_on_new (void *stream_if_ctx, struct lsquic_stream *stream) 8260{ 8261 lsquic_stream_wantread(stream, 1); 8262 stream->sm_uni_type_state.pos = 0; 8263 return stream_if_ctx; 8264} 8265 8266 8267struct unicla_ctx 8268{ 8269 struct varint_read_state *state; 8270 enum { UC_MORE, UC_ERROR, UC_DONE, } status; 8271}; 8272 8273 8274static const char *const unicla_stat2str[] = { 8275 [UC_ERROR] = "UC_ERROR", [UC_MORE] = "UC_MORE", [UC_DONE] = "UC_DONE", 8276}; 8277 8278 8279static size_t 8280unicla_readf (void *ctx, const unsigned char *begin, size_t sz, int fin) 8281{ 8282 struct unicla_ctx *const unicla_ctx = ctx; 8283 const unsigned char *buf = begin; 8284 int s; 8285 8286 switch (unicla_ctx->status) 8287 { 8288 case UC_MORE: 8289 s = lsquic_varint_read_nb(&buf, begin + sz, unicla_ctx->state); 8290 if (s == 0) 8291 unicla_ctx->status = UC_DONE; 8292 else if (fin) 8293 unicla_ctx->status = UC_ERROR; 8294 return buf - begin; 8295 case UC_DONE: 8296 return 0; 8297 default: 8298 return sz; 8299 } 8300} 8301 8302 8303static void 8304unicla_on_read (struct lsquic_stream *stream, lsquic_stream_ctx_t *ctx) 8305{ 8306 struct ietf_full_conn *const conn = (void *) ctx; 8307 struct unicla_ctx unicla_ctx = { .state = &stream->sm_uni_type_state, 8308 .status = UC_MORE, }; 8309 ssize_t nr; 8310 8311 nr = lsquic_stream_readf(stream, unicla_readf, &unicla_ctx); 8312 LSQ_DEBUG("unistream classifier read %zd byte%.*s, status: %s", nr, 8313 nr != 1, "s", unicla_stat2str[unicla_ctx.status]); 8314 if (nr > 0) 8315 { 8316 if (unicla_ctx.status == UC_DONE) 8317 apply_uni_stream_class(conn, stream, unicla_ctx.state->val); 8318 else if (unicla_ctx.status == UC_ERROR) 8319 goto unexpected_fin; 8320 /* else: do nothing */ 8321 } 8322 else if (nr < 0) /* This should never happen */ 8323 { 8324 LSQ_WARN("unicla: cannot read from stream %"PRIu64, stream->id); 8325 lsquic_stream_close(stream); 8326 } 8327 else 8328 { 8329 unexpected_fin: 8330 LSQ_INFO("unicla: unexpected FIN while reading stream type from " 8331 "stream %"PRIu64, stream->id); 8332 lsquic_stream_close(stream); 8333 } 8334} 8335 8336 8337static void 8338unicla_on_write (struct lsquic_stream *stream, lsquic_stream_ctx_t *ctx) 8339{ 8340 assert(0); 8341} 8342 8343 8344static void 8345unicla_on_close (struct lsquic_stream *stream, lsquic_stream_ctx_t *ctx) 8346{ 8347} 8348 8349 8350static const struct lsquic_stream_if unicla_if = 8351{ 8352 .on_new_stream = unicla_on_new, 8353 .on_read = unicla_on_read, 8354 .on_write = unicla_on_write, 8355 .on_close = unicla_on_close, 8356}; 8357 8358 8359static const struct lsquic_stream_if *unicla_if_ptr = &unicla_if; 8360 8361typedef char dcid_elem_fits_in_128_bytes[sizeof(struct dcid_elem) <= 128 ? 1 : - 1]; 8362