lsquic_mm.c revision de46bf2f
1/* Copyright (c) 2017 - 2019 LiteSpeed Technologies Inc.  See LICENSE. */
2/*
3 * lsquic_mm.c -- Memory manager.
4 */
5
6#include <assert.h>
7#include <errno.h>
8#include <stddef.h>
9#include <stdlib.h>
10#include <string.h>
11#include <sys/queue.h>
12
13#include "fiu-local.h"
14
15#include "lsquic.h"
16#include "lsquic_int_types.h"
17#include "lsquic_sizes.h"
18#include "lsquic_malo.h"
19#include "lsquic_hash.h"
20#include "lsquic_conn.h"
21#include "lsquic_rtt.h"
22#include "lsquic_packet_common.h"
23#include "lsquic_mini_conn.h"
24#include "lsquic_enc_sess.h"
25#include "lsquic_mini_conn_ietf.h"
26#include "lsquic_packet_gquic.h"
27#include "lsquic_packet_in.h"
28#include "lsquic_packet_out.h"
29#include "lsquic_parse.h"
30#include "lsquic_mm.h"
31#include "lsquic_engine_public.h"
32#include "lsquic_full_conn.h"
33#include "lsquic_varint.h"
34#include "lsquic_hq.h"
35#include "lsquic_sfcw.h"
36#include "lsquic_stream.h"
37
38#ifndef LSQUIC_LOG_POOL_STATS
39#define LSQUIC_LOG_POOL_STATS 0
40#endif
41
42#if LSQUIC_LOG_POOL_STATS
43#include "lsquic_logger.h"
44#endif
45
46#ifndef LSQUIC_USE_POOLS
47#define LSQUIC_USE_POOLS 1
48#endif
49
50#define FAIL_NOMEM do { errno = ENOMEM; return NULL; } while (0)
51
52
53struct packet_in_buf
54{
55    SLIST_ENTRY(packet_in_buf)  next_pib;
56};
57
58struct packet_out_buf
59{
60    SLIST_ENTRY(packet_out_buf) next_pob;
61};
62
63struct four_k_page
64{
65    SLIST_ENTRY(four_k_page)  next_fkp;
66};
67
68struct sixteen_k_page
69{
70    SLIST_ENTRY(sixteen_k_page)  next_skp;
71};
72
73
74int
75lsquic_mm_init (struct lsquic_mm *mm)
76{
77#if LSQUIC_USE_POOLS
78    int i;
79#endif
80
81    mm->acki = malloc(sizeof(*mm->acki));
82    mm->malo.stream_frame = lsquic_malo_create(sizeof(struct stream_frame));
83    mm->malo.stream_rec_arr = lsquic_malo_create(sizeof(struct stream_rec_arr));
84    mm->malo.mini_conn = lsquic_malo_create(sizeof(struct mini_conn));
85    mm->malo.mini_conn_ietf = lsquic_malo_create(sizeof(struct ietf_mini_conn));
86    mm->malo.packet_in = lsquic_malo_create(sizeof(struct lsquic_packet_in));
87    mm->malo.packet_out = lsquic_malo_create(sizeof(struct lsquic_packet_out));
88    mm->malo.dcid_elem = lsquic_malo_create(sizeof(struct dcid_elem));
89    mm->malo.stream_hq_frame
90                        = lsquic_malo_create(sizeof(struct stream_hq_frame));
91    mm->ack_str = malloc(MAX_ACKI_STR_SZ);
92#if LSQUIC_USE_POOLS
93    TAILQ_INIT(&mm->free_packets_in);
94    for (i = 0; i < MM_N_OUT_BUCKETS; ++i)
95        SLIST_INIT(&mm->packet_out_bufs[i]);
96    for (i = 0; i < MM_N_IN_BUCKETS; ++i)
97        SLIST_INIT(&mm->packet_in_bufs[i]);
98    SLIST_INIT(&mm->four_k_pages);
99    SLIST_INIT(&mm->sixteen_k_pages);
100#endif
101    if (mm->acki && mm->malo.stream_frame && mm->malo.stream_rec_arr
102        && mm->malo.mini_conn && mm->malo.mini_conn_ietf && mm->malo.packet_in
103        && mm->malo.packet_out && mm->malo.dcid_elem
104        && mm->malo.stream_hq_frame && mm->ack_str)
105    {
106        return 0;
107    }
108    else
109        return -1;
110}
111
112
113void
114lsquic_mm_cleanup (struct lsquic_mm *mm)
115{
116#if LSQUIC_USE_POOLS
117    int i;
118    struct packet_out_buf *pob;
119    struct packet_in_buf *pib;
120    struct four_k_page *fkp;
121    struct sixteen_k_page *skp;
122#endif
123
124    free(mm->acki);
125    lsquic_malo_destroy(mm->malo.stream_hq_frame);
126    lsquic_malo_destroy(mm->malo.dcid_elem);
127    lsquic_malo_destroy(mm->malo.packet_in);
128    lsquic_malo_destroy(mm->malo.packet_out);
129    lsquic_malo_destroy(mm->malo.stream_frame);
130    lsquic_malo_destroy(mm->malo.stream_rec_arr);
131    lsquic_malo_destroy(mm->malo.mini_conn);
132    lsquic_malo_destroy(mm->malo.mini_conn_ietf);
133    free(mm->ack_str);
134
135#if LSQUIC_USE_POOLS
136    for (i = 0; i < MM_N_OUT_BUCKETS; ++i)
137        while ((pob = SLIST_FIRST(&mm->packet_out_bufs[i])))
138        {
139            SLIST_REMOVE_HEAD(&mm->packet_out_bufs[i], next_pob);
140            free(pob);
141        }
142
143    for (i = 0; i < MM_N_IN_BUCKETS; ++i)
144        while ((pib = SLIST_FIRST(&mm->packet_in_bufs[i])))
145        {
146            SLIST_REMOVE_HEAD(&mm->packet_in_bufs[i], next_pib);
147            free(pib);
148        }
149
150    while ((fkp = SLIST_FIRST(&mm->four_k_pages)))
151    {
152        SLIST_REMOVE_HEAD(&mm->four_k_pages, next_fkp);
153        free(fkp);
154    }
155
156    while ((skp = SLIST_FIRST(&mm->sixteen_k_pages)))
157    {
158        SLIST_REMOVE_HEAD(&mm->sixteen_k_pages, next_skp);
159        free(skp);
160    }
161#endif
162}
163
164
165#if LSQUIC_USE_POOLS
166enum {
167    PACKET_IN_PAYLOAD_0 = 1370,     /* common QUIC payload size upperbound */
168    PACKET_IN_PAYLOAD_1 = 4096,     /* payload size middleground guess */
169    PACKET_IN_PAYLOAD_2 = 0xffff,   /* UDP payload size upperbound */
170};
171
172
173static const unsigned packet_in_sizes[] = {
174    PACKET_IN_PAYLOAD_0,
175    PACKET_IN_PAYLOAD_1,
176    PACKET_IN_PAYLOAD_2,
177};
178
179
180static unsigned
181packet_in_index (unsigned size)
182{
183    unsigned idx = (size > PACKET_IN_PAYLOAD_0)
184                 + (size > PACKET_IN_PAYLOAD_1);
185    return idx;
186}
187#endif
188
189
190void
191lsquic_mm_put_packet_in (struct lsquic_mm *mm,
192                                        struct lsquic_packet_in *packet_in)
193{
194#if LSQUIC_USE_POOLS
195    unsigned idx;
196    struct packet_in_buf *pib;
197
198    assert(0 == packet_in->pi_refcnt);
199    if (packet_in->pi_flags & PI_OWN_DATA)
200    {
201        pib = (struct packet_in_buf *) packet_in->pi_data;
202        idx = packet_in_index(packet_in->pi_data_sz);
203        SLIST_INSERT_HEAD(&mm->packet_in_bufs[idx], pib, next_pib);
204    }
205    TAILQ_INSERT_HEAD(&mm->free_packets_in, packet_in, pi_next);
206#else
207    if (packet_in->pi_flags & PI_OWN_DATA)
208        free(packet_in->pi_data);
209    lsquic_malo_put(packet_in);
210#endif
211}
212
213
214struct lsquic_packet_in *
215lsquic_mm_get_packet_in (struct lsquic_mm *mm)
216{
217    struct lsquic_packet_in *packet_in;
218
219    fiu_do_on("mm/packet_in", FAIL_NOMEM);
220
221#if LSQUIC_USE_POOLS
222    packet_in = TAILQ_FIRST(&mm->free_packets_in);
223    if (packet_in)
224    {
225        assert(0 == packet_in->pi_refcnt);
226        TAILQ_REMOVE(&mm->free_packets_in, packet_in, pi_next);
227    }
228    else
229#endif
230        packet_in = lsquic_malo_get(mm->malo.packet_in);
231
232    if (packet_in)
233        memset(packet_in, 0, sizeof(*packet_in));
234
235    return packet_in;
236}
237
238
239#if LSQUIC_USE_POOLS
240/* Based on commonly used MTUs, ordered from small to large: */
241enum {
242    PACKET_OUT_PAYLOAD_0 = 1280                    - GQUIC_MIN_PACKET_OVERHEAD,
243    PACKET_OUT_PAYLOAD_1 = GQUIC_MAX_IPv6_PACKET_SZ - GQUIC_MIN_PACKET_OVERHEAD,
244    PACKET_OUT_PAYLOAD_2 = GQUIC_MAX_IPv4_PACKET_SZ - GQUIC_MIN_PACKET_OVERHEAD,
245    PACKET_OUT_PAYLOAD_3 = 4096,
246    PACKET_OUT_PAYLOAD_4 = 0xffff,
247};
248
249
250static const unsigned packet_out_sizes[] = {
251    PACKET_OUT_PAYLOAD_0,
252    PACKET_OUT_PAYLOAD_1,
253    PACKET_OUT_PAYLOAD_2,
254    PACKET_OUT_PAYLOAD_3,
255    PACKET_OUT_PAYLOAD_4,
256};
257
258
259static unsigned
260packet_out_index (unsigned size)
261{
262    unsigned idx = (size > PACKET_OUT_PAYLOAD_0)
263                 + (size > PACKET_OUT_PAYLOAD_1)
264                 + (size > PACKET_OUT_PAYLOAD_2)
265                 + (size > PACKET_OUT_PAYLOAD_3);
266    return idx;
267}
268#endif
269
270#if LSQUIC_USE_POOLS
271#define POOL_SAMPLE_PERIOD 1024
272
273static void
274poolst_sample_max (struct pool_stats *poolst)
275{
276#define ALPHA_SHIFT 3
277#define BETA_SHIFT  2
278    unsigned diff;
279
280    if (poolst->ps_max_avg)
281    {
282        poolst->ps_max_var -= poolst->ps_max_var >> BETA_SHIFT;
283        if (poolst->ps_max_avg > poolst->ps_max)
284            diff = poolst->ps_max_avg - poolst->ps_max;
285        else
286            diff = poolst->ps_max - poolst->ps_max_avg;
287        poolst->ps_max_var += diff >> BETA_SHIFT;
288        poolst->ps_max_avg -= poolst->ps_max_avg >> ALPHA_SHIFT;
289        poolst->ps_max_avg += poolst->ps_max >> ALPHA_SHIFT;
290    }
291    else
292    {
293        /* First measurement */
294        poolst->ps_max_avg  = poolst->ps_max;
295        poolst->ps_max_var  = poolst->ps_max / 2;
296    }
297
298    poolst->ps_calls = 0;
299    poolst->ps_max = poolst->ps_objs_out;
300#if LSQUIC_LOG_POOL_STATS
301    LSQ_DEBUG("new sample: max avg: %u; var: %u", poolst->ps_max_avg,
302                                                        poolst->ps_max_var);
303#endif
304}
305
306
307static void
308poolst_allocated (struct pool_stats *poolst, unsigned new)
309{
310    poolst->ps_objs_out += 1;
311    poolst->ps_objs_all += new;
312    if (poolst->ps_objs_out > poolst->ps_max)
313        poolst->ps_max = poolst->ps_objs_out;
314    ++poolst->ps_calls;
315    if (0 == poolst->ps_calls % POOL_SAMPLE_PERIOD)
316        poolst_sample_max(poolst);
317}
318
319
320static void
321poolst_freed (struct pool_stats *poolst)
322{
323    --poolst->ps_objs_out;
324    ++poolst->ps_calls;
325    if (0 == poolst->ps_calls % POOL_SAMPLE_PERIOD)
326        poolst_sample_max(poolst);
327}
328
329
330static int
331poolst_has_new_sample (const struct pool_stats *poolst)
332{
333    return poolst->ps_calls == 0;
334}
335
336
337/* If average maximum falls under 1/4 of all objects allocated, release
338 * half of the objects allocated.
339 */
340static void
341maybe_shrink_packet_out_bufs (struct lsquic_mm *mm, unsigned idx)
342{
343    struct pool_stats *poolst;
344    struct packet_out_buf *pob;
345    unsigned n_to_leave;
346
347    poolst = &mm->packet_out_bstats[idx];
348    if (poolst->ps_max_avg * 4 < poolst->ps_objs_all)
349    {
350        n_to_leave = poolst->ps_objs_all / 2;
351        while (poolst->ps_objs_all > n_to_leave
352                        && (pob = SLIST_FIRST(&mm->packet_out_bufs[idx])))
353        {
354            SLIST_REMOVE_HEAD(&mm->packet_out_bufs[idx], next_pob);
355            free(pob);
356            --poolst->ps_objs_all;
357        }
358#if LSQUIC_LOG_POOL_STATS
359        LSQ_DEBUG("pool #%u; max avg %u; shrank from %u to %u objs",
360                idx, poolst->ps_max_avg, n_to_leave * 2, poolst->ps_objs_all);
361#endif
362    }
363#if LSQUIC_LOG_POOL_STATS
364    else
365        LSQ_DEBUG("pool #%u; max avg %u; objs: %u; won't shrink",
366                                idx, poolst->ps_max_avg, poolst->ps_objs_all);
367#endif
368}
369#endif
370
371
372void
373lsquic_mm_put_packet_out (struct lsquic_mm *mm,
374                          struct lsquic_packet_out *packet_out)
375{
376#if LSQUIC_USE_POOLS
377    struct packet_out_buf *pob;
378    unsigned idx;
379
380    assert(packet_out->po_data);
381    pob = (struct packet_out_buf *) packet_out->po_data;
382    idx = packet_out_index(packet_out->po_n_alloc);
383    SLIST_INSERT_HEAD(&mm->packet_out_bufs[idx], pob, next_pob);
384    poolst_freed(&mm->packet_out_bstats[idx]);
385    if (poolst_has_new_sample(&mm->packet_out_bstats[idx]))
386        maybe_shrink_packet_out_bufs(mm, idx);
387    if (packet_out->po_bwp_state)
388        lsquic_malo_put(packet_out->po_bwp_state);
389#else
390    free(packet_out->po_data);
391#endif
392    lsquic_malo_put(packet_out);
393}
394
395
396struct lsquic_packet_out *
397lsquic_mm_get_packet_out (struct lsquic_mm *mm, struct malo *malo,
398                          unsigned short size)
399{
400    struct lsquic_packet_out *packet_out;
401    struct packet_out_buf *pob;
402#if LSQUIC_USE_POOLS
403    unsigned idx;
404#endif
405
406    fiu_do_on("mm/packet_out", FAIL_NOMEM);
407
408    packet_out = lsquic_malo_get(malo ? malo : mm->malo.packet_out);
409    if (!packet_out)
410        return NULL;
411
412#if LSQUIC_USE_POOLS
413    idx = packet_out_index(size);
414    pob = SLIST_FIRST(&mm->packet_out_bufs[idx]);
415    if (pob)
416    {
417        SLIST_REMOVE_HEAD(&mm->packet_out_bufs[idx], next_pob);
418        poolst_allocated(&mm->packet_out_bstats[idx], 0);
419    }
420    else
421    {
422        pob = malloc(packet_out_sizes[idx]);
423        if (!pob)
424        {
425            lsquic_malo_put(packet_out);
426            return NULL;
427        }
428        poolst_allocated(&mm->packet_out_bstats[idx], 1);
429    }
430    if (poolst_has_new_sample(&mm->packet_out_bstats[idx]))
431        maybe_shrink_packet_out_bufs(mm, idx);
432#else
433    pob = malloc(size);
434    if (!pob)
435    {
436        lsquic_malo_put(packet_out);
437        return NULL;
438    }
439#endif
440
441    memset(packet_out, 0, sizeof(*packet_out));
442    packet_out->po_n_alloc = size;
443    packet_out->po_data = (unsigned char *) pob;
444
445    return packet_out;
446}
447
448
449void *
450lsquic_mm_get_packet_in_buf (struct lsquic_mm *mm, size_t size)
451{
452    struct packet_in_buf *pib;
453#if LSQUIC_USE_POOLS
454    unsigned idx;
455
456    idx = packet_in_index(size);
457    pib = SLIST_FIRST(&mm->packet_in_bufs[idx]);
458    fiu_do_on("mm/packet_in_buf", FAIL_NOMEM);
459    if (pib)
460        SLIST_REMOVE_HEAD(&mm->packet_in_bufs[idx], next_pib);
461    else
462        pib = malloc(packet_in_sizes[idx]);
463#else
464    pib = malloc(size);
465#endif
466    return pib;
467}
468
469
470void
471lsquic_mm_put_packet_in_buf (struct lsquic_mm *mm, void *mem, size_t size)
472{
473#if LSQUIC_USE_POOLS
474    unsigned idx;
475    struct packet_in_buf *pib;
476
477    pib = (struct packet_in_buf *) mem;
478    idx = packet_in_index(size);
479    SLIST_INSERT_HEAD(&mm->packet_in_bufs[idx], pib, next_pib);
480#else
481    free(mem);
482#endif
483}
484
485
486void *
487lsquic_mm_get_4k (struct lsquic_mm *mm)
488{
489#if LSQUIC_USE_POOLS
490    struct four_k_page *fkp = SLIST_FIRST(&mm->four_k_pages);
491    fiu_do_on("mm/4k", FAIL_NOMEM);
492    if (fkp)
493        SLIST_REMOVE_HEAD(&mm->four_k_pages, next_fkp);
494    else
495        fkp = malloc(0x1000);
496    return fkp;
497#else
498    return malloc(0x1000);
499#endif
500}
501
502
503void
504lsquic_mm_put_4k (struct lsquic_mm *mm, void *mem)
505{
506#if LSQUIC_USE_POOLS
507    struct four_k_page *fkp = mem;
508    SLIST_INSERT_HEAD(&mm->four_k_pages, fkp, next_fkp);
509#else
510    free(mem);
511#endif
512}
513
514
515void *
516lsquic_mm_get_16k (struct lsquic_mm *mm)
517{
518#if LSQUIC_USE_POOLS
519    struct sixteen_k_page *skp = SLIST_FIRST(&mm->sixteen_k_pages);
520    fiu_do_on("mm/16k", FAIL_NOMEM);
521    if (skp)
522        SLIST_REMOVE_HEAD(&mm->sixteen_k_pages, next_skp);
523    else
524        skp = malloc(16 * 1024);
525    return skp;
526#else
527    return malloc(16 * 1024);
528#endif
529}
530
531
532void
533lsquic_mm_put_16k (struct lsquic_mm *mm, void *mem)
534{
535#if LSQUIC_USE_POOLS
536    struct sixteen_k_page *skp = mem;
537    SLIST_INSERT_HEAD(&mm->sixteen_k_pages, skp, next_skp);
538#else
539    free(mem);
540#endif
541}
542
543
544size_t
545lsquic_mm_mem_used (const struct lsquic_mm *mm)
546{
547#if LSQUIC_USE_POOLS
548    const struct packet_out_buf *pob;
549    const struct packet_in_buf *pib;
550    const struct four_k_page *fkp;
551    const struct sixteen_k_page *skp;
552    unsigned i;
553    size_t size;
554
555    size = sizeof(*mm);
556    size += sizeof(*mm->acki);
557    size += lsquic_malo_mem_used(mm->malo.stream_frame);
558    size += lsquic_malo_mem_used(mm->malo.stream_rec_arr);
559    size += lsquic_malo_mem_used(mm->malo.mini_conn);
560    size += lsquic_malo_mem_used(mm->malo.mini_conn_ietf);
561    size += lsquic_malo_mem_used(mm->malo.packet_in);
562    size += lsquic_malo_mem_used(mm->malo.packet_out);
563
564    for (i = 0; i < MM_N_OUT_BUCKETS; ++i)
565        SLIST_FOREACH(pob, &mm->packet_out_bufs[i], next_pob)
566            size += packet_out_sizes[i];
567
568    for (i = 0; i < MM_N_IN_BUCKETS; ++i)
569        SLIST_FOREACH(pib, &mm->packet_in_bufs[i], next_pib)
570            size += packet_in_sizes[i];
571
572    SLIST_FOREACH(fkp, &mm->four_k_pages, next_fkp)
573        size += 0x1000;
574
575    SLIST_FOREACH(skp, &mm->sixteen_k_pages, next_skp)
576        size += 0x4000;
577
578    return size;
579#else
580    return sizeof(*mm);
581#endif
582}
583