lsquic_mm.c revision afe3d363
1/* Copyright (c) 2017 - 2020 LiteSpeed Technologies Inc. See LICENSE. */ 2/* 3 * lsquic_mm.c -- Memory manager. 4 */ 5 6#include <assert.h> 7#include <errno.h> 8#include <stddef.h> 9#include <stdlib.h> 10#include <string.h> 11#include <sys/queue.h> 12 13#include "fiu-local.h" 14 15#include "lsquic.h" 16#include "lsquic_int_types.h" 17#include "lsquic_sizes.h" 18#include "lsquic_malo.h" 19#include "lsquic_hash.h" 20#include "lsquic_conn.h" 21#include "lsquic_rtt.h" 22#include "lsquic_packet_common.h" 23#include "lsquic_mini_conn.h" 24#include "lsquic_enc_sess.h" 25#include "lsquic_mini_conn_ietf.h" 26#include "lsquic_packet_gquic.h" 27#include "lsquic_packet_in.h" 28#include "lsquic_packet_out.h" 29#include "lsquic_parse.h" 30#include "lsquic_mm.h" 31#include "lsquic_engine_public.h" 32#include "lsquic_full_conn.h" 33#include "lsquic_varint.h" 34#include "lsquic_hq.h" 35#include "lsquic_sfcw.h" 36#include "lsquic_stream.h" 37 38#ifndef LSQUIC_LOG_POOL_STATS 39#define LSQUIC_LOG_POOL_STATS 0 40#endif 41 42#if LSQUIC_LOG_POOL_STATS 43#include "lsquic_logger.h" 44#endif 45 46#ifndef LSQUIC_USE_POOLS 47#define LSQUIC_USE_POOLS 1 48#endif 49 50#define FAIL_NOMEM do { errno = ENOMEM; return NULL; } while (0) 51 52 53struct packet_in_buf 54{ 55 SLIST_ENTRY(packet_in_buf) next_pib; 56}; 57 58struct packet_out_buf 59{ 60 SLIST_ENTRY(packet_out_buf) next_pob; 61}; 62 63struct four_k_page 64{ 65 SLIST_ENTRY(four_k_page) next_fkp; 66}; 67 68struct sixteen_k_page 69{ 70 SLIST_ENTRY(sixteen_k_page) next_skp; 71}; 72 73 74int 75lsquic_mm_init (struct lsquic_mm *mm) 76{ 77#if LSQUIC_USE_POOLS 78 int i; 79#endif 80 81 mm->acki = malloc(sizeof(*mm->acki)); 82 mm->malo.stream_frame = lsquic_malo_create(sizeof(struct stream_frame)); 83 mm->malo.stream_rec_arr = lsquic_malo_create(sizeof(struct stream_rec_arr)); 84 mm->malo.mini_conn = lsquic_malo_create(sizeof(struct mini_conn)); 85 mm->malo.mini_conn_ietf = lsquic_malo_create(sizeof(struct ietf_mini_conn)); 86 mm->malo.packet_in = lsquic_malo_create(sizeof(struct lsquic_packet_in)); 87 mm->malo.packet_out = lsquic_malo_create(sizeof(struct lsquic_packet_out)); 88 mm->malo.dcid_elem = lsquic_malo_create(sizeof(struct dcid_elem)); 89 mm->malo.stream_hq_frame 90 = lsquic_malo_create(sizeof(struct stream_hq_frame)); 91 mm->ack_str = malloc(MAX_ACKI_STR_SZ); 92#if LSQUIC_USE_POOLS 93 TAILQ_INIT(&mm->free_packets_in); 94 for (i = 0; i < MM_N_OUT_BUCKETS; ++i) 95 SLIST_INIT(&mm->packet_out_bufs[i]); 96 for (i = 0; i < MM_N_IN_BUCKETS; ++i) 97 SLIST_INIT(&mm->packet_in_bufs[i]); 98 SLIST_INIT(&mm->four_k_pages); 99 SLIST_INIT(&mm->sixteen_k_pages); 100#endif 101 if (mm->acki && mm->malo.stream_frame && mm->malo.stream_rec_arr 102 && mm->malo.mini_conn && mm->malo.mini_conn_ietf && mm->malo.packet_in 103 && mm->malo.packet_out && mm->malo.dcid_elem 104 && mm->malo.stream_hq_frame && mm->ack_str) 105 { 106 return 0; 107 } 108 else 109 return -1; 110} 111 112 113void 114lsquic_mm_cleanup (struct lsquic_mm *mm) 115{ 116#if LSQUIC_USE_POOLS 117 int i; 118 struct packet_out_buf *pob; 119 struct packet_in_buf *pib; 120 struct four_k_page *fkp; 121 struct sixteen_k_page *skp; 122#endif 123 124 free(mm->acki); 125 lsquic_malo_destroy(mm->malo.stream_hq_frame); 126 lsquic_malo_destroy(mm->malo.dcid_elem); 127 lsquic_malo_destroy(mm->malo.packet_in); 128 lsquic_malo_destroy(mm->malo.packet_out); 129 lsquic_malo_destroy(mm->malo.stream_frame); 130 lsquic_malo_destroy(mm->malo.stream_rec_arr); 131 lsquic_malo_destroy(mm->malo.mini_conn); 132 lsquic_malo_destroy(mm->malo.mini_conn_ietf); 133 free(mm->ack_str); 134 135#if LSQUIC_USE_POOLS 136 for (i = 0; i < MM_N_OUT_BUCKETS; ++i) 137 while ((pob = SLIST_FIRST(&mm->packet_out_bufs[i]))) 138 { 139 SLIST_REMOVE_HEAD(&mm->packet_out_bufs[i], next_pob); 140 free(pob); 141 } 142 143 for (i = 0; i < MM_N_IN_BUCKETS; ++i) 144 while ((pib = SLIST_FIRST(&mm->packet_in_bufs[i]))) 145 { 146 SLIST_REMOVE_HEAD(&mm->packet_in_bufs[i], next_pib); 147 free(pib); 148 } 149 150 while ((fkp = SLIST_FIRST(&mm->four_k_pages))) 151 { 152 SLIST_REMOVE_HEAD(&mm->four_k_pages, next_fkp); 153 free(fkp); 154 } 155 156 while ((skp = SLIST_FIRST(&mm->sixteen_k_pages))) 157 { 158 SLIST_REMOVE_HEAD(&mm->sixteen_k_pages, next_skp); 159 free(skp); 160 } 161#endif 162} 163 164 165#if LSQUIC_USE_POOLS 166enum { 167 PACKET_IN_PAYLOAD_0 = 1370, /* common QUIC payload size upperbound */ 168 PACKET_IN_PAYLOAD_1 = 4096, /* payload size middleground guess */ 169 PACKET_IN_PAYLOAD_2 = 0xffff, /* UDP payload size upperbound */ 170}; 171 172 173static const unsigned packet_in_sizes[] = { 174 PACKET_IN_PAYLOAD_0, 175 PACKET_IN_PAYLOAD_1, 176 PACKET_IN_PAYLOAD_2, 177}; 178 179 180static unsigned 181packet_in_index (unsigned size) 182{ 183 unsigned idx = (size > PACKET_IN_PAYLOAD_0) 184 + (size > PACKET_IN_PAYLOAD_1); 185 return idx; 186} 187#endif 188 189 190void 191lsquic_mm_put_packet_in (struct lsquic_mm *mm, 192 struct lsquic_packet_in *packet_in) 193{ 194#if LSQUIC_USE_POOLS 195 unsigned idx; 196 struct packet_in_buf *pib; 197 198 assert(0 == packet_in->pi_refcnt); 199 if (packet_in->pi_flags & PI_OWN_DATA) 200 { 201 pib = (struct packet_in_buf *) packet_in->pi_data; 202 idx = packet_in_index(packet_in->pi_data_sz); 203 SLIST_INSERT_HEAD(&mm->packet_in_bufs[idx], pib, next_pib); 204 } 205 TAILQ_INSERT_HEAD(&mm->free_packets_in, packet_in, pi_next); 206#else 207 if (packet_in->pi_flags & PI_OWN_DATA) 208 free(packet_in->pi_data); 209 lsquic_malo_put(packet_in); 210#endif 211} 212 213 214struct lsquic_packet_in * 215lsquic_mm_get_packet_in (struct lsquic_mm *mm) 216{ 217 struct lsquic_packet_in *packet_in; 218 219 fiu_do_on("mm/packet_in", FAIL_NOMEM); 220 221#if LSQUIC_USE_POOLS 222 packet_in = TAILQ_FIRST(&mm->free_packets_in); 223 if (packet_in) 224 { 225 assert(0 == packet_in->pi_refcnt); 226 TAILQ_REMOVE(&mm->free_packets_in, packet_in, pi_next); 227 } 228 else 229#endif 230 packet_in = lsquic_malo_get(mm->malo.packet_in); 231 232 if (packet_in) 233 memset(packet_in, 0, sizeof(*packet_in)); 234 235 return packet_in; 236} 237 238 239#if LSQUIC_USE_POOLS 240/* Based on commonly used MTUs, ordered from small to large: */ 241enum { 242 PACKET_OUT_PAYLOAD_0 = 1280 - GQUIC_MIN_PACKET_OVERHEAD, 243 PACKET_OUT_PAYLOAD_1 = GQUIC_MAX_IPv6_PACKET_SZ - GQUIC_MIN_PACKET_OVERHEAD, 244 PACKET_OUT_PAYLOAD_2 = GQUIC_MAX_IPv4_PACKET_SZ - GQUIC_MIN_PACKET_OVERHEAD, 245 PACKET_OUT_PAYLOAD_3 = 4096, 246 PACKET_OUT_PAYLOAD_4 = 0xffff, 247}; 248 249 250static const unsigned packet_out_sizes[] = { 251 PACKET_OUT_PAYLOAD_0, 252 PACKET_OUT_PAYLOAD_1, 253 PACKET_OUT_PAYLOAD_2, 254 PACKET_OUT_PAYLOAD_3, 255 PACKET_OUT_PAYLOAD_4, 256}; 257 258 259static unsigned 260packet_out_index (unsigned size) 261{ 262 unsigned idx = (size > PACKET_OUT_PAYLOAD_0) 263 + (size > PACKET_OUT_PAYLOAD_1) 264 + (size > PACKET_OUT_PAYLOAD_2) 265 + (size > PACKET_OUT_PAYLOAD_3); 266 return idx; 267} 268#endif 269 270#if LSQUIC_USE_POOLS 271#define POOL_SAMPLE_PERIOD 1024 272 273static void 274poolst_sample_max (struct pool_stats *poolst) 275{ 276#define ALPHA_SHIFT 3 277#define BETA_SHIFT 2 278 unsigned diff; 279 280 if (poolst->ps_max_avg) 281 { 282 poolst->ps_max_var -= poolst->ps_max_var >> BETA_SHIFT; 283 if (poolst->ps_max_avg > poolst->ps_max) 284 diff = poolst->ps_max_avg - poolst->ps_max; 285 else 286 diff = poolst->ps_max - poolst->ps_max_avg; 287 poolst->ps_max_var += diff >> BETA_SHIFT; 288 poolst->ps_max_avg -= poolst->ps_max_avg >> ALPHA_SHIFT; 289 poolst->ps_max_avg += poolst->ps_max >> ALPHA_SHIFT; 290 } 291 else 292 { 293 /* First measurement */ 294 poolst->ps_max_avg = poolst->ps_max; 295 poolst->ps_max_var = poolst->ps_max / 2; 296 } 297 298 poolst->ps_calls = 0; 299 poolst->ps_max = poolst->ps_objs_out; 300#if LSQUIC_LOG_POOL_STATS 301 LSQ_DEBUG("new sample: max avg: %u; var: %u", poolst->ps_max_avg, 302 poolst->ps_max_var); 303#endif 304} 305 306 307static void 308poolst_allocated (struct pool_stats *poolst, unsigned new) 309{ 310 poolst->ps_objs_out += 1; 311 poolst->ps_objs_all += new; 312 if (poolst->ps_objs_out > poolst->ps_max) 313 poolst->ps_max = poolst->ps_objs_out; 314 ++poolst->ps_calls; 315 if (0 == poolst->ps_calls % POOL_SAMPLE_PERIOD) 316 poolst_sample_max(poolst); 317} 318 319 320static void 321poolst_freed (struct pool_stats *poolst) 322{ 323 --poolst->ps_objs_out; 324 ++poolst->ps_calls; 325 if (0 == poolst->ps_calls % POOL_SAMPLE_PERIOD) 326 poolst_sample_max(poolst); 327} 328 329 330static int 331poolst_has_new_sample (const struct pool_stats *poolst) 332{ 333 return poolst->ps_calls == 0; 334} 335 336 337/* If average maximum falls under 1/4 of all objects allocated, release 338 * half of the objects allocated. 339 */ 340static void 341maybe_shrink_packet_out_bufs (struct lsquic_mm *mm, unsigned idx) 342{ 343 struct pool_stats *poolst; 344 struct packet_out_buf *pob; 345 unsigned n_to_leave; 346 347 poolst = &mm->packet_out_bstats[idx]; 348 if (poolst->ps_max_avg * 4 < poolst->ps_objs_all) 349 { 350 n_to_leave = poolst->ps_objs_all / 2; 351 while (poolst->ps_objs_all > n_to_leave 352 && (pob = SLIST_FIRST(&mm->packet_out_bufs[idx]))) 353 { 354 SLIST_REMOVE_HEAD(&mm->packet_out_bufs[idx], next_pob); 355 free(pob); 356 --poolst->ps_objs_all; 357 } 358#if LSQUIC_LOG_POOL_STATS 359 LSQ_DEBUG("pool #%u; max avg %u; shrank from %u to %u objs", 360 idx, poolst->ps_max_avg, n_to_leave * 2, poolst->ps_objs_all); 361#endif 362 } 363#if LSQUIC_LOG_POOL_STATS 364 else 365 LSQ_DEBUG("pool #%u; max avg %u; objs: %u; won't shrink", 366 idx, poolst->ps_max_avg, poolst->ps_objs_all); 367#endif 368} 369#endif 370 371 372void 373lsquic_mm_put_packet_out (struct lsquic_mm *mm, 374 struct lsquic_packet_out *packet_out) 375{ 376#if LSQUIC_USE_POOLS 377 struct packet_out_buf *pob; 378 unsigned idx; 379 380 assert(packet_out->po_data); 381 pob = (struct packet_out_buf *) packet_out->po_data; 382 idx = packet_out_index(packet_out->po_n_alloc); 383 SLIST_INSERT_HEAD(&mm->packet_out_bufs[idx], pob, next_pob); 384 poolst_freed(&mm->packet_out_bstats[idx]); 385 if (poolst_has_new_sample(&mm->packet_out_bstats[idx])) 386 maybe_shrink_packet_out_bufs(mm, idx); 387#else 388 free(packet_out->po_data); 389#endif 390 lsquic_malo_put(packet_out); 391} 392 393 394struct lsquic_packet_out * 395lsquic_mm_get_packet_out (struct lsquic_mm *mm, struct malo *malo, 396 unsigned short size) 397{ 398 struct lsquic_packet_out *packet_out; 399 struct packet_out_buf *pob; 400#if LSQUIC_USE_POOLS 401 unsigned idx; 402#endif 403 404 fiu_do_on("mm/packet_out", FAIL_NOMEM); 405 406 packet_out = lsquic_malo_get(malo ? malo : mm->malo.packet_out); 407 if (!packet_out) 408 return NULL; 409 410#if LSQUIC_USE_POOLS 411 idx = packet_out_index(size); 412 pob = SLIST_FIRST(&mm->packet_out_bufs[idx]); 413 if (pob) 414 { 415 SLIST_REMOVE_HEAD(&mm->packet_out_bufs[idx], next_pob); 416 poolst_allocated(&mm->packet_out_bstats[idx], 0); 417 } 418 else 419 { 420 pob = malloc(packet_out_sizes[idx]); 421 if (!pob) 422 { 423 lsquic_malo_put(packet_out); 424 return NULL; 425 } 426 poolst_allocated(&mm->packet_out_bstats[idx], 1); 427 } 428 if (poolst_has_new_sample(&mm->packet_out_bstats[idx])) 429 maybe_shrink_packet_out_bufs(mm, idx); 430#else 431 pob = malloc(size); 432 if (!pob) 433 { 434 lsquic_malo_put(packet_out); 435 return NULL; 436 } 437#endif 438 439 memset(packet_out, 0, sizeof(*packet_out)); 440 packet_out->po_n_alloc = size; 441 packet_out->po_data = (unsigned char *) pob; 442 443 return packet_out; 444} 445 446 447void * 448lsquic_mm_get_packet_in_buf (struct lsquic_mm *mm, size_t size) 449{ 450 struct packet_in_buf *pib; 451#if LSQUIC_USE_POOLS 452 unsigned idx; 453 454 idx = packet_in_index(size); 455 pib = SLIST_FIRST(&mm->packet_in_bufs[idx]); 456 fiu_do_on("mm/packet_in_buf", FAIL_NOMEM); 457 if (pib) 458 SLIST_REMOVE_HEAD(&mm->packet_in_bufs[idx], next_pib); 459 else 460 pib = malloc(packet_in_sizes[idx]); 461#else 462 pib = malloc(size); 463#endif 464 return pib; 465} 466 467 468void 469lsquic_mm_put_packet_in_buf (struct lsquic_mm *mm, void *mem, size_t size) 470{ 471#if LSQUIC_USE_POOLS 472 unsigned idx; 473 struct packet_in_buf *pib; 474 475 pib = (struct packet_in_buf *) mem; 476 idx = packet_in_index(size); 477 SLIST_INSERT_HEAD(&mm->packet_in_bufs[idx], pib, next_pib); 478#else 479 free(mem); 480#endif 481} 482 483 484void * 485lsquic_mm_get_4k (struct lsquic_mm *mm) 486{ 487#if LSQUIC_USE_POOLS 488 struct four_k_page *fkp = SLIST_FIRST(&mm->four_k_pages); 489 fiu_do_on("mm/4k", FAIL_NOMEM); 490 if (fkp) 491 SLIST_REMOVE_HEAD(&mm->four_k_pages, next_fkp); 492 else 493 fkp = malloc(0x1000); 494 return fkp; 495#else 496 return malloc(0x1000); 497#endif 498} 499 500 501void 502lsquic_mm_put_4k (struct lsquic_mm *mm, void *mem) 503{ 504#if LSQUIC_USE_POOLS 505 struct four_k_page *fkp = mem; 506 SLIST_INSERT_HEAD(&mm->four_k_pages, fkp, next_fkp); 507#else 508 free(mem); 509#endif 510} 511 512 513void * 514lsquic_mm_get_16k (struct lsquic_mm *mm) 515{ 516#if LSQUIC_USE_POOLS 517 struct sixteen_k_page *skp = SLIST_FIRST(&mm->sixteen_k_pages); 518 fiu_do_on("mm/16k", FAIL_NOMEM); 519 if (skp) 520 SLIST_REMOVE_HEAD(&mm->sixteen_k_pages, next_skp); 521 else 522 skp = malloc(16 * 1024); 523 return skp; 524#else 525 return malloc(16 * 1024); 526#endif 527} 528 529 530void 531lsquic_mm_put_16k (struct lsquic_mm *mm, void *mem) 532{ 533#if LSQUIC_USE_POOLS 534 struct sixteen_k_page *skp = mem; 535 SLIST_INSERT_HEAD(&mm->sixteen_k_pages, skp, next_skp); 536#else 537 free(mem); 538#endif 539} 540 541 542size_t 543lsquic_mm_mem_used (const struct lsquic_mm *mm) 544{ 545#if LSQUIC_USE_POOLS 546 const struct packet_out_buf *pob; 547 const struct packet_in_buf *pib; 548 const struct four_k_page *fkp; 549 const struct sixteen_k_page *skp; 550 unsigned i; 551 size_t size; 552 553 size = sizeof(*mm); 554 size += sizeof(*mm->acki); 555 size += lsquic_malo_mem_used(mm->malo.stream_frame); 556 size += lsquic_malo_mem_used(mm->malo.stream_rec_arr); 557 size += lsquic_malo_mem_used(mm->malo.mini_conn); 558 size += lsquic_malo_mem_used(mm->malo.mini_conn_ietf); 559 size += lsquic_malo_mem_used(mm->malo.packet_in); 560 size += lsquic_malo_mem_used(mm->malo.packet_out); 561 562 for (i = 0; i < MM_N_OUT_BUCKETS; ++i) 563 SLIST_FOREACH(pob, &mm->packet_out_bufs[i], next_pob) 564 size += packet_out_sizes[i]; 565 566 for (i = 0; i < MM_N_IN_BUCKETS; ++i) 567 SLIST_FOREACH(pib, &mm->packet_in_bufs[i], next_pib) 568 size += packet_in_sizes[i]; 569 570 SLIST_FOREACH(fkp, &mm->four_k_pages, next_fkp) 571 size += 0x1000; 572 573 SLIST_FOREACH(skp, &mm->sixteen_k_pages, next_skp) 574 size += 0x4000; 575 576 return size; 577#else 578 return sizeof(*mm); 579#endif 580} 581