lsquic_mm.c revision a74702c6
1/* Copyright (c) 2017 - 2022 LiteSpeed Technologies Inc.  See LICENSE. */
2/*
3 * lsquic_mm.c -- Memory manager.
4 */
5
6#include <assert.h>
7#include <errno.h>
8#include <stddef.h>
9#include <stdlib.h>
10#include <string.h>
11#include <sys/queue.h>
12
13#include "fiu-local.h"
14
15#include "lsquic.h"
16#include "lsquic_int_types.h"
17#include "lsquic_sizes.h"
18#include "lsquic_malo.h"
19#include "lsquic_hash.h"
20#include "lsquic_conn.h"
21#include "lsquic_rtt.h"
22#include "lsquic_packet_common.h"
23#include "lsquic_mini_conn.h"
24#include "lsquic_enc_sess.h"
25#include "lsquic_trechist.h"
26#include "lsquic_mini_conn_ietf.h"
27#include "lsquic_packet_gquic.h"
28#include "lsquic_packet_in.h"
29#include "lsquic_packet_out.h"
30#include "lsquic_parse.h"
31#include "lsquic_mm.h"
32#include "lsquic_engine_public.h"
33#include "lsquic_full_conn.h"
34#include "lsquic_varint.h"
35#include "lsquic_hq.h"
36#include "lsquic_sfcw.h"
37#include "lsquic_stream.h"
38
39#ifndef LSQUIC_LOG_POOL_STATS
40#define LSQUIC_LOG_POOL_STATS 0
41#endif
42
43#if LSQUIC_LOG_POOL_STATS
44#include "lsquic_logger.h"
45#endif
46
47#ifndef LSQUIC_USE_POOLS
48#define LSQUIC_USE_POOLS 1
49#endif
50
51#define FAIL_NOMEM do { errno = ENOMEM; return NULL; } while (0)
52
53
54struct packet_in_buf
55{
56    SLIST_ENTRY(packet_in_buf)  next_pib;
57};
58
59struct packet_out_buf
60{
61    SLIST_ENTRY(packet_out_buf) next_pob;
62};
63
64struct four_k_page
65{
66    SLIST_ENTRY(four_k_page)  next_fkp;
67};
68
69struct sixteen_k_page
70{
71    SLIST_ENTRY(sixteen_k_page)  next_skp;
72};
73
74
75int
76lsquic_mm_init (struct lsquic_mm *mm)
77{
78#if LSQUIC_USE_POOLS
79    int i;
80#endif
81
82    mm->acki = malloc(sizeof(*mm->acki));
83    mm->malo.stream_frame = lsquic_malo_create(sizeof(struct stream_frame));
84    mm->malo.frame_rec_arr = lsquic_malo_create(sizeof(struct frame_rec_arr));
85    mm->malo.mini_conn = lsquic_malo_create(sizeof(struct mini_conn));
86    mm->malo.mini_conn_ietf = lsquic_malo_create(sizeof(struct ietf_mini_conn));
87    mm->malo.packet_in = lsquic_malo_create(sizeof(struct lsquic_packet_in));
88    mm->malo.packet_out = lsquic_malo_create(sizeof(struct lsquic_packet_out));
89    mm->malo.dcid_elem = lsquic_malo_create(sizeof(struct dcid_elem));
90    mm->malo.stream_hq_frame
91                        = lsquic_malo_create(sizeof(struct stream_hq_frame));
92    mm->ack_str = malloc(MAX_ACKI_STR_SZ);
93#if LSQUIC_USE_POOLS
94    TAILQ_INIT(&mm->free_packets_in);
95    for (i = 0; i < MM_N_OUT_BUCKETS; ++i)
96        SLIST_INIT(&mm->packet_out_bufs[i]);
97    for (i = 0; i < MM_N_IN_BUCKETS; ++i)
98        SLIST_INIT(&mm->packet_in_bufs[i]);
99    SLIST_INIT(&mm->four_k_pages);
100    SLIST_INIT(&mm->sixteen_k_pages);
101#endif
102    if (mm->acki && mm->malo.stream_frame && mm->malo.frame_rec_arr
103        && mm->malo.mini_conn && mm->malo.mini_conn_ietf && mm->malo.packet_in
104        && mm->malo.packet_out && mm->malo.dcid_elem
105        && mm->malo.stream_hq_frame && mm->ack_str)
106    {
107        return 0;
108    }
109    else
110        return -1;
111}
112
113
114void
115lsquic_mm_cleanup (struct lsquic_mm *mm)
116{
117#if LSQUIC_USE_POOLS
118    int i;
119    struct packet_out_buf *pob;
120    struct packet_in_buf *pib;
121    struct four_k_page *fkp;
122    struct sixteen_k_page *skp;
123#endif
124
125    free(mm->acki);
126    lsquic_malo_destroy(mm->malo.stream_hq_frame);
127    lsquic_malo_destroy(mm->malo.dcid_elem);
128    lsquic_malo_destroy(mm->malo.packet_in);
129    lsquic_malo_destroy(mm->malo.packet_out);
130    lsquic_malo_destroy(mm->malo.stream_frame);
131    lsquic_malo_destroy(mm->malo.frame_rec_arr);
132    lsquic_malo_destroy(mm->malo.mini_conn);
133    lsquic_malo_destroy(mm->malo.mini_conn_ietf);
134    free(mm->ack_str);
135
136#if LSQUIC_USE_POOLS
137    for (i = 0; i < MM_N_OUT_BUCKETS; ++i)
138        while ((pob = SLIST_FIRST(&mm->packet_out_bufs[i])))
139        {
140            SLIST_REMOVE_HEAD(&mm->packet_out_bufs[i], next_pob);
141            free(pob);
142        }
143
144    for (i = 0; i < MM_N_IN_BUCKETS; ++i)
145        while ((pib = SLIST_FIRST(&mm->packet_in_bufs[i])))
146        {
147            SLIST_REMOVE_HEAD(&mm->packet_in_bufs[i], next_pib);
148            free(pib);
149        }
150
151    while ((fkp = SLIST_FIRST(&mm->four_k_pages)))
152    {
153        SLIST_REMOVE_HEAD(&mm->four_k_pages, next_fkp);
154        free(fkp);
155    }
156
157    while ((skp = SLIST_FIRST(&mm->sixteen_k_pages)))
158    {
159        SLIST_REMOVE_HEAD(&mm->sixteen_k_pages, next_skp);
160        free(skp);
161    }
162#endif
163}
164
165
166#if LSQUIC_USE_POOLS
167enum {
168    PACKET_IN_PAYLOAD_0 = 1370,     /* common QUIC payload size upperbound */
169    PACKET_IN_PAYLOAD_1 = 4096,     /* payload size middleground guess */
170    PACKET_IN_PAYLOAD_2 = 0xffff,   /* UDP payload size upperbound */
171};
172
173
174static const unsigned packet_in_sizes[] = {
175    PACKET_IN_PAYLOAD_0,
176    PACKET_IN_PAYLOAD_1,
177    PACKET_IN_PAYLOAD_2,
178};
179
180
181static unsigned
182packet_in_index (unsigned size)
183{
184    unsigned idx = (size > PACKET_IN_PAYLOAD_0)
185                 + (size > PACKET_IN_PAYLOAD_1);
186    return idx;
187}
188#endif
189
190
191void
192lsquic_mm_put_packet_in (struct lsquic_mm *mm,
193                                        struct lsquic_packet_in *packet_in)
194{
195#if LSQUIC_USE_POOLS
196    unsigned idx;
197    struct packet_in_buf *pib;
198
199    assert(0 == packet_in->pi_refcnt);
200    if (packet_in->pi_flags & PI_OWN_DATA)
201    {
202        pib = (struct packet_in_buf *) packet_in->pi_data;
203        idx = packet_in_index(packet_in->pi_data_sz);
204        SLIST_INSERT_HEAD(&mm->packet_in_bufs[idx], pib, next_pib);
205    }
206    TAILQ_INSERT_HEAD(&mm->free_packets_in, packet_in, pi_next);
207#else
208    if (packet_in->pi_flags & PI_OWN_DATA)
209        free(packet_in->pi_data);
210    lsquic_malo_put(packet_in);
211#endif
212}
213
214
215struct lsquic_packet_in *
216lsquic_mm_get_packet_in (struct lsquic_mm *mm)
217{
218    struct lsquic_packet_in *packet_in;
219
220    fiu_do_on("mm/packet_in", FAIL_NOMEM);
221
222#if LSQUIC_USE_POOLS
223    packet_in = TAILQ_FIRST(&mm->free_packets_in);
224    if (packet_in)
225    {
226        assert(0 == packet_in->pi_refcnt);
227        TAILQ_REMOVE(&mm->free_packets_in, packet_in, pi_next);
228    }
229    else
230#endif
231        packet_in = lsquic_malo_get(mm->malo.packet_in);
232
233    if (packet_in)
234        memset(packet_in, 0, sizeof(*packet_in));
235
236    return packet_in;
237}
238
239
240#if LSQUIC_USE_POOLS
241/* Based on commonly used MTUs, ordered from small to large: */
242enum {
243    PACKET_OUT_PAYLOAD_0 = 1280                    - GQUIC_MIN_PACKET_OVERHEAD,
244    PACKET_OUT_PAYLOAD_1 = GQUIC_MAX_IPv6_PACKET_SZ - GQUIC_MIN_PACKET_OVERHEAD,
245    PACKET_OUT_PAYLOAD_2 = GQUIC_MAX_IPv4_PACKET_SZ - GQUIC_MIN_PACKET_OVERHEAD,
246    PACKET_OUT_PAYLOAD_3 = 4096,
247    PACKET_OUT_PAYLOAD_4 = 0xffff,
248};
249
250
251static const unsigned packet_out_sizes[] = {
252    PACKET_OUT_PAYLOAD_0,
253    PACKET_OUT_PAYLOAD_1,
254    PACKET_OUT_PAYLOAD_2,
255    PACKET_OUT_PAYLOAD_3,
256    PACKET_OUT_PAYLOAD_4,
257};
258
259
260static unsigned
261packet_out_index (unsigned size)
262{
263    unsigned idx = (size > PACKET_OUT_PAYLOAD_0)
264                 + (size > PACKET_OUT_PAYLOAD_1)
265                 + (size > PACKET_OUT_PAYLOAD_2)
266                 + (size > PACKET_OUT_PAYLOAD_3);
267    return idx;
268}
269#endif
270
271#if LSQUIC_USE_POOLS
272#define POOL_SAMPLE_PERIOD 1024
273
274static void
275poolst_sample_max (struct pool_stats *poolst)
276{
277#define ALPHA_SHIFT 3
278#define BETA_SHIFT  2
279    unsigned diff;
280
281    if (poolst->ps_max_avg)
282    {
283        poolst->ps_max_var -= poolst->ps_max_var >> BETA_SHIFT;
284        if (poolst->ps_max_avg > poolst->ps_max)
285            diff = poolst->ps_max_avg - poolst->ps_max;
286        else
287            diff = poolst->ps_max - poolst->ps_max_avg;
288        poolst->ps_max_var += diff >> BETA_SHIFT;
289        poolst->ps_max_avg -= poolst->ps_max_avg >> ALPHA_SHIFT;
290        poolst->ps_max_avg += poolst->ps_max >> ALPHA_SHIFT;
291    }
292    else
293    {
294        /* First measurement */
295        poolst->ps_max_avg  = poolst->ps_max;
296        poolst->ps_max_var  = poolst->ps_max / 2;
297    }
298
299    poolst->ps_calls = 0;
300    poolst->ps_max = poolst->ps_objs_out;
301#if LSQUIC_LOG_POOL_STATS
302    LSQ_DEBUG("new sample: max avg: %u; var: %u", poolst->ps_max_avg,
303                                                        poolst->ps_max_var);
304#endif
305}
306
307
308static void
309poolst_allocated (struct pool_stats *poolst, unsigned new)
310{
311    poolst->ps_objs_out += 1;
312    poolst->ps_objs_all += new;
313    if (poolst->ps_objs_out > poolst->ps_max)
314        poolst->ps_max = poolst->ps_objs_out;
315    ++poolst->ps_calls;
316    if (0 == poolst->ps_calls % POOL_SAMPLE_PERIOD)
317        poolst_sample_max(poolst);
318}
319
320
321static void
322poolst_freed (struct pool_stats *poolst)
323{
324    --poolst->ps_objs_out;
325    ++poolst->ps_calls;
326    if (0 == poolst->ps_calls % POOL_SAMPLE_PERIOD)
327        poolst_sample_max(poolst);
328}
329
330
331static int
332poolst_has_new_sample (const struct pool_stats *poolst)
333{
334    return poolst->ps_calls == 0;
335}
336
337
338/* If average maximum falls under 1/4 of all objects allocated, release
339 * half of the objects allocated.
340 */
341static void
342maybe_shrink_packet_out_bufs (struct lsquic_mm *mm, unsigned idx)
343{
344    struct pool_stats *poolst;
345    struct packet_out_buf *pob;
346    unsigned n_to_leave;
347
348    poolst = &mm->packet_out_bstats[idx];
349    if (poolst->ps_max_avg * 4 < poolst->ps_objs_all)
350    {
351        n_to_leave = poolst->ps_objs_all / 2;
352        while (poolst->ps_objs_all > n_to_leave
353                        && (pob = SLIST_FIRST(&mm->packet_out_bufs[idx])))
354        {
355            SLIST_REMOVE_HEAD(&mm->packet_out_bufs[idx], next_pob);
356            free(pob);
357            --poolst->ps_objs_all;
358        }
359#if LSQUIC_LOG_POOL_STATS
360        LSQ_DEBUG("pool #%u; max avg %u; shrank from %u to %u objs",
361                idx, poolst->ps_max_avg, n_to_leave * 2, poolst->ps_objs_all);
362#endif
363    }
364#if LSQUIC_LOG_POOL_STATS
365    else
366        LSQ_DEBUG("pool #%u; max avg %u; objs: %u; won't shrink",
367                                idx, poolst->ps_max_avg, poolst->ps_objs_all);
368#endif
369}
370#endif
371
372
373void
374lsquic_mm_put_packet_out (struct lsquic_mm *mm,
375                          struct lsquic_packet_out *packet_out)
376{
377#if LSQUIC_USE_POOLS
378    struct packet_out_buf *pob;
379    unsigned idx;
380
381    assert(packet_out->po_data);
382    pob = (struct packet_out_buf *) packet_out->po_data;
383    idx = packet_out_index(packet_out->po_n_alloc);
384    SLIST_INSERT_HEAD(&mm->packet_out_bufs[idx], pob, next_pob);
385    poolst_freed(&mm->packet_out_bstats[idx]);
386    if (poolst_has_new_sample(&mm->packet_out_bstats[idx]))
387        maybe_shrink_packet_out_bufs(mm, idx);
388#else
389    free(packet_out->po_data);
390#endif
391    lsquic_malo_put(packet_out);
392}
393
394
395struct lsquic_packet_out *
396lsquic_mm_get_packet_out (struct lsquic_mm *mm, struct malo *malo,
397                          unsigned short size)
398{
399    struct lsquic_packet_out *packet_out;
400    struct packet_out_buf *pob;
401#if LSQUIC_USE_POOLS
402    unsigned idx;
403#endif
404
405    fiu_do_on("mm/packet_out", FAIL_NOMEM);
406
407    packet_out = lsquic_malo_get(malo ? malo : mm->malo.packet_out);
408    if (!packet_out)
409        return NULL;
410
411#if LSQUIC_USE_POOLS
412    idx = packet_out_index(size);
413    pob = SLIST_FIRST(&mm->packet_out_bufs[idx]);
414    if (pob)
415    {
416        SLIST_REMOVE_HEAD(&mm->packet_out_bufs[idx], next_pob);
417        poolst_allocated(&mm->packet_out_bstats[idx], 0);
418    }
419    else
420    {
421        pob = malloc(packet_out_sizes[idx]);
422        if (!pob)
423        {
424            lsquic_malo_put(packet_out);
425            return NULL;
426        }
427        poolst_allocated(&mm->packet_out_bstats[idx], 1);
428    }
429    if (poolst_has_new_sample(&mm->packet_out_bstats[idx]))
430        maybe_shrink_packet_out_bufs(mm, idx);
431#else
432    pob = malloc(size);
433    if (!pob)
434    {
435        lsquic_malo_put(packet_out);
436        return NULL;
437    }
438#endif
439
440    memset(packet_out, 0, sizeof(*packet_out));
441    packet_out->po_n_alloc = size;
442    packet_out->po_data = (unsigned char *) pob;
443
444    return packet_out;
445}
446
447
448void *
449lsquic_mm_get_packet_in_buf (struct lsquic_mm *mm, size_t size)
450{
451    struct packet_in_buf *pib;
452#if LSQUIC_USE_POOLS
453    unsigned idx;
454
455    idx = packet_in_index(size);
456    pib = SLIST_FIRST(&mm->packet_in_bufs[idx]);
457    fiu_do_on("mm/packet_in_buf", FAIL_NOMEM);
458    if (pib)
459        SLIST_REMOVE_HEAD(&mm->packet_in_bufs[idx], next_pib);
460    else
461        pib = malloc(packet_in_sizes[idx]);
462#else
463    pib = malloc(size);
464#endif
465    return pib;
466}
467
468
469void
470lsquic_mm_put_packet_in_buf (struct lsquic_mm *mm, void *mem, size_t size)
471{
472#if LSQUIC_USE_POOLS
473    unsigned idx;
474    struct packet_in_buf *pib;
475
476    pib = (struct packet_in_buf *) mem;
477    idx = packet_in_index(size);
478    SLIST_INSERT_HEAD(&mm->packet_in_bufs[idx], pib, next_pib);
479#else
480    free(mem);
481#endif
482}
483
484
485void *
486lsquic_mm_get_4k (struct lsquic_mm *mm)
487{
488#if LSQUIC_USE_POOLS
489    struct four_k_page *fkp = SLIST_FIRST(&mm->four_k_pages);
490    fiu_do_on("mm/4k", FAIL_NOMEM);
491    if (fkp)
492        SLIST_REMOVE_HEAD(&mm->four_k_pages, next_fkp);
493    else
494        fkp = malloc(0x1000);
495    return fkp;
496#else
497    return malloc(0x1000);
498#endif
499}
500
501
502void
503lsquic_mm_put_4k (struct lsquic_mm *mm, void *mem)
504{
505#if LSQUIC_USE_POOLS
506    struct four_k_page *fkp = mem;
507    SLIST_INSERT_HEAD(&mm->four_k_pages, fkp, next_fkp);
508#else
509    free(mem);
510#endif
511}
512
513
514void *
515lsquic_mm_get_16k (struct lsquic_mm *mm)
516{
517#if LSQUIC_USE_POOLS
518    struct sixteen_k_page *skp = SLIST_FIRST(&mm->sixteen_k_pages);
519    fiu_do_on("mm/16k", FAIL_NOMEM);
520    if (skp)
521        SLIST_REMOVE_HEAD(&mm->sixteen_k_pages, next_skp);
522    else
523        skp = malloc(16 * 1024);
524    return skp;
525#else
526    return malloc(16 * 1024);
527#endif
528}
529
530
531void
532lsquic_mm_put_16k (struct lsquic_mm *mm, void *mem)
533{
534#if LSQUIC_USE_POOLS
535    struct sixteen_k_page *skp = mem;
536    SLIST_INSERT_HEAD(&mm->sixteen_k_pages, skp, next_skp);
537#else
538    free(mem);
539#endif
540}
541
542
543size_t
544lsquic_mm_mem_used (const struct lsquic_mm *mm)
545{
546#if LSQUIC_USE_POOLS
547    const struct packet_out_buf *pob;
548    const struct packet_in_buf *pib;
549    const struct four_k_page *fkp;
550    const struct sixteen_k_page *skp;
551    unsigned i;
552    size_t size;
553
554    size = sizeof(*mm);
555    size += sizeof(*mm->acki);
556    size += lsquic_malo_mem_used(mm->malo.stream_frame);
557    size += lsquic_malo_mem_used(mm->malo.frame_rec_arr);
558    size += lsquic_malo_mem_used(mm->malo.mini_conn);
559    size += lsquic_malo_mem_used(mm->malo.mini_conn_ietf);
560    size += lsquic_malo_mem_used(mm->malo.packet_in);
561    size += lsquic_malo_mem_used(mm->malo.packet_out);
562
563    for (i = 0; i < MM_N_OUT_BUCKETS; ++i)
564        SLIST_FOREACH(pob, &mm->packet_out_bufs[i], next_pob)
565            size += packet_out_sizes[i];
566
567    for (i = 0; i < MM_N_IN_BUCKETS; ++i)
568        SLIST_FOREACH(pib, &mm->packet_in_bufs[i], next_pib)
569            size += packet_in_sizes[i];
570
571    SLIST_FOREACH(fkp, &mm->four_k_pages, next_fkp)
572        size += 0x1000;
573
574    SLIST_FOREACH(skp, &mm->sixteen_k_pages, next_skp)
575        size += 0x4000;
576
577    return size;
578#else
579    return sizeof(*mm);
580#endif
581}
582