lsquic_mm.c revision 229fce07
1/* Copyright (c) 2017 - 2019 LiteSpeed Technologies Inc. See LICENSE. */ 2/* 3 * lsquic_mm.c -- Memory manager. 4 */ 5 6#include <assert.h> 7#include <errno.h> 8#include <stddef.h> 9#include <stdlib.h> 10#include <string.h> 11#include <sys/queue.h> 12 13#include "fiu-local.h" 14 15#include "lsquic.h" 16#include "lsquic_int_types.h" 17#include "lsquic_malo.h" 18#include "lsquic_conn.h" 19#include "lsquic_rtt.h" 20#include "lsquic_packet_common.h" 21#include "lsquic_packet_in.h" 22#include "lsquic_packet_out.h" 23#include "lsquic_parse.h" 24#include "lsquic_mm.h" 25#include "lsquic_engine_public.h" 26 27#define FAIL_NOMEM do { errno = ENOMEM; return NULL; } while (0) 28 29 30struct payload_buf 31{ 32 SLIST_ENTRY(payload_buf) next_pb; 33}; 34 35struct packet_out_buf 36{ 37 SLIST_ENTRY(packet_out_buf) next_pob; 38}; 39 40struct four_k_page 41{ 42 SLIST_ENTRY(four_k_page) next_fkp; 43}; 44 45struct sixteen_k_page 46{ 47 SLIST_ENTRY(sixteen_k_page) next_skp; 48}; 49 50 51int 52lsquic_mm_init (struct lsquic_mm *mm) 53{ 54 int i; 55 56 mm->acki = malloc(sizeof(*mm->acki)); 57 mm->malo.stream_frame = lsquic_malo_create(sizeof(struct stream_frame)); 58 mm->malo.stream_rec_arr = lsquic_malo_create(sizeof(struct stream_rec_arr)); 59 mm->malo.packet_in = lsquic_malo_create(sizeof(struct lsquic_packet_in)); 60 mm->malo.packet_out = lsquic_malo_create(sizeof(struct lsquic_packet_out)); 61 TAILQ_INIT(&mm->free_packets_in); 62 for (i = 0; i < MM_N_OUT_BUCKETS; ++i) 63 SLIST_INIT(&mm->packet_out_bufs[i]); 64 SLIST_INIT(&mm->payload_bufs); 65 SLIST_INIT(&mm->four_k_pages); 66 SLIST_INIT(&mm->sixteen_k_pages); 67 if (mm->acki && mm->malo.stream_frame && mm->malo.stream_rec_arr && 68 mm->malo.packet_in) 69 { 70 return 0; 71 } 72 else 73 return -1; 74} 75 76 77void 78lsquic_mm_cleanup (struct lsquic_mm *mm) 79{ 80 int i; 81 struct packet_out_buf *pob; 82 struct payload_buf *pb; 83 struct four_k_page *fkp; 84 struct sixteen_k_page *skp; 85 86 free(mm->acki); 87 lsquic_malo_destroy(mm->malo.packet_in); 88 lsquic_malo_destroy(mm->malo.packet_out); 89 lsquic_malo_destroy(mm->malo.stream_frame); 90 lsquic_malo_destroy(mm->malo.stream_rec_arr); 91 92 for (i = 0; i < MM_N_OUT_BUCKETS; ++i) 93 while ((pob = SLIST_FIRST(&mm->packet_out_bufs[i]))) 94 { 95 SLIST_REMOVE_HEAD(&mm->packet_out_bufs[i], next_pob); 96 free(pob); 97 } 98 99 while ((pb = SLIST_FIRST(&mm->payload_bufs))) 100 { 101 SLIST_REMOVE_HEAD(&mm->payload_bufs, next_pb); 102 free(pb); 103 } 104 105 while ((fkp = SLIST_FIRST(&mm->four_k_pages))) 106 { 107 SLIST_REMOVE_HEAD(&mm->four_k_pages, next_fkp); 108 free(fkp); 109 } 110 111 while ((skp = SLIST_FIRST(&mm->sixteen_k_pages))) 112 { 113 SLIST_REMOVE_HEAD(&mm->sixteen_k_pages, next_skp); 114 free(skp); 115 } 116} 117 118 119struct lsquic_packet_in * 120lsquic_mm_get_packet_in (struct lsquic_mm *mm) 121{ 122 struct lsquic_packet_in *packet_in; 123 124 fiu_do_on("mm/packet_in", FAIL_NOMEM); 125 126 packet_in = TAILQ_FIRST(&mm->free_packets_in); 127 if (packet_in) 128 { 129 assert(0 == packet_in->pi_refcnt); 130 TAILQ_REMOVE(&mm->free_packets_in, packet_in, pi_next); 131 } 132 else 133 packet_in = lsquic_malo_get(mm->malo.packet_in); 134 135 if (packet_in) 136 memset(packet_in, 0, sizeof(*packet_in)); 137 138 return packet_in; 139} 140 141 142/* Based on commonly used MTUs, ordered from small to large: */ 143enum { 144 PACKET_OUT_PAYLOAD_0 = 1280 - QUIC_MIN_PACKET_OVERHEAD, 145 PACKET_OUT_PAYLOAD_1 = QUIC_MAX_IPv6_PACKET_SZ - QUIC_MIN_PACKET_OVERHEAD, 146 PACKET_OUT_PAYLOAD_2 = QUIC_MAX_IPv4_PACKET_SZ - QUIC_MIN_PACKET_OVERHEAD, 147}; 148 149 150static const unsigned packet_out_sizes[] = { 151 PACKET_OUT_PAYLOAD_0, 152 PACKET_OUT_PAYLOAD_1, 153 PACKET_OUT_PAYLOAD_2, 154}; 155 156 157static unsigned 158packet_out_index (unsigned size) 159{ 160 unsigned idx = (size > PACKET_OUT_PAYLOAD_0) 161 + (size > PACKET_OUT_PAYLOAD_1); 162 return idx; 163} 164 165 166void 167lsquic_mm_put_packet_out (struct lsquic_mm *mm, 168 struct lsquic_packet_out *packet_out) 169{ 170 struct packet_out_buf *pob; 171 unsigned idx; 172 173 assert(packet_out->po_data); 174 pob = (struct packet_out_buf *) packet_out->po_data; 175 idx = packet_out_index(packet_out->po_n_alloc); 176 SLIST_INSERT_HEAD(&mm->packet_out_bufs[idx], pob, next_pob); 177 lsquic_malo_put(packet_out); 178} 179 180 181struct lsquic_packet_out * 182lsquic_mm_get_packet_out (struct lsquic_mm *mm, struct malo *malo, 183 unsigned short size) 184{ 185 struct lsquic_packet_out *packet_out; 186 struct packet_out_buf *pob; 187 unsigned idx; 188 189 assert(size <= QUIC_MAX_PAYLOAD_SZ); 190 191 fiu_do_on("mm/packet_out", FAIL_NOMEM); 192 193 packet_out = lsquic_malo_get(malo ? malo : mm->malo.packet_out); 194 if (!packet_out) 195 return NULL; 196 197 idx = packet_out_index(size); 198 pob = SLIST_FIRST(&mm->packet_out_bufs[idx]); 199 if (pob) 200 SLIST_REMOVE_HEAD(&mm->packet_out_bufs[idx], next_pob); 201 else 202 { 203 pob = malloc(packet_out_sizes[idx]); 204 if (!pob) 205 { 206 lsquic_malo_put(packet_out); 207 return NULL; 208 } 209 } 210 211 memset(packet_out, 0, sizeof(*packet_out)); 212 packet_out->po_n_alloc = size; 213 packet_out->po_data = (unsigned char *) pob; 214 215 return packet_out; 216} 217 218 219void * 220lsquic_mm_get_1370 (struct lsquic_mm *mm) 221{ 222 struct payload_buf *pb = SLIST_FIRST(&mm->payload_bufs); 223 fiu_do_on("mm/1370", FAIL_NOMEM); 224 if (pb) 225 SLIST_REMOVE_HEAD(&mm->payload_bufs, next_pb); 226 else 227 pb = malloc(1370); 228 return pb; 229} 230 231 232void 233lsquic_mm_put_1370 (struct lsquic_mm *mm, void *mem) 234{ 235 struct payload_buf *pb = mem; 236 SLIST_INSERT_HEAD(&mm->payload_bufs, pb, next_pb); 237} 238 239 240void * 241lsquic_mm_get_4k (struct lsquic_mm *mm) 242{ 243 struct four_k_page *fkp = SLIST_FIRST(&mm->four_k_pages); 244 fiu_do_on("mm/4k", FAIL_NOMEM); 245 if (fkp) 246 SLIST_REMOVE_HEAD(&mm->four_k_pages, next_fkp); 247 else 248 fkp = malloc(0x1000); 249 return fkp; 250} 251 252 253void 254lsquic_mm_put_4k (struct lsquic_mm *mm, void *mem) 255{ 256 struct four_k_page *fkp = mem; 257 SLIST_INSERT_HEAD(&mm->four_k_pages, fkp, next_fkp); 258} 259 260 261void * 262lsquic_mm_get_16k (struct lsquic_mm *mm) 263{ 264 struct sixteen_k_page *skp = SLIST_FIRST(&mm->sixteen_k_pages); 265 fiu_do_on("mm/16k", FAIL_NOMEM); 266 if (skp) 267 SLIST_REMOVE_HEAD(&mm->sixteen_k_pages, next_skp); 268 else 269 skp = malloc(16 * 1024); 270 return skp; 271} 272 273 274void 275lsquic_mm_put_16k (struct lsquic_mm *mm, void *mem) 276{ 277 struct sixteen_k_page *skp = mem; 278 SLIST_INSERT_HEAD(&mm->sixteen_k_pages, skp, next_skp); 279} 280 281 282void 283lsquic_mm_put_packet_in (struct lsquic_mm *mm, 284 struct lsquic_packet_in *packet_in) 285{ 286 assert(0 == packet_in->pi_refcnt); 287 if (packet_in->pi_flags & PI_OWN_DATA) 288 lsquic_mm_put_1370(mm, packet_in->pi_data); 289 TAILQ_INSERT_HEAD(&mm->free_packets_in, packet_in, pi_next); 290} 291 292 293size_t 294lsquic_mm_mem_used (const struct lsquic_mm *mm) 295{ 296 const struct packet_out_buf *pob; 297 const struct payload_buf *pb; 298 const struct four_k_page *fkp; 299 const struct sixteen_k_page *skp; 300 unsigned i; 301 size_t size; 302 303 size = sizeof(*mm); 304 size += sizeof(*mm->acki); 305 size += lsquic_malo_mem_used(mm->malo.stream_frame); 306 size += lsquic_malo_mem_used(mm->malo.stream_rec_arr); 307 size += lsquic_malo_mem_used(mm->malo.packet_in); 308 size += lsquic_malo_mem_used(mm->malo.packet_out); 309 310 for (i = 0; i < MM_N_OUT_BUCKETS; ++i) 311 SLIST_FOREACH(pob, &mm->packet_out_bufs[i], next_pob) 312 size += packet_out_sizes[i]; 313 314 SLIST_FOREACH(pb, &mm->payload_bufs, next_pb) 315 size += 1370; 316 317 SLIST_FOREACH(fkp, &mm->four_k_pages, next_fkp) 318 size += 0x1000; 319 320 SLIST_FOREACH(skp, &mm->sixteen_k_pages, next_skp) 321 size += 0x4000; 322 323 return size; 324} 325