lsquic_mm.c revision 5392f7a3
1/* Copyright (c) 2017 - 2019 LiteSpeed Technologies Inc.  See LICENSE. */
2/*
3 * lsquic_mm.c -- Memory manager.
4 */
5
6#include <assert.h>
7#include <errno.h>
8#include <stddef.h>
9#include <stdlib.h>
10#include <string.h>
11#include <sys/queue.h>
12
13#include "fiu-local.h"
14
15#include "lsquic.h"
16#include "lsquic_int_types.h"
17#include "lsquic_sizes.h"
18#include "lsquic_malo.h"
19#include "lsquic_hash.h"
20#include "lsquic_conn.h"
21#include "lsquic_rtt.h"
22#include "lsquic_packet_common.h"
23#include "lsquic_mini_conn.h"
24#include "lsquic_enc_sess.h"
25#include "lsquic_mini_conn_ietf.h"
26#include "lsquic_packet_gquic.h"
27#include "lsquic_packet_in.h"
28#include "lsquic_packet_out.h"
29#include "lsquic_parse.h"
30#include "lsquic_mm.h"
31#include "lsquic_engine_public.h"
32#include "lsquic_full_conn.h"
33#include "lsquic_varint.h"
34#include "lsquic_hq.h"
35#include "lsquic_sfcw.h"
36#include "lsquic_stream.h"
37
38#ifndef LSQUIC_LOG_POOL_STATS
39#define LSQUIC_LOG_POOL_STATS 0
40#endif
41
42#if LSQUIC_LOG_POOL_STATS
43#include "lsquic_logger.h"
44#endif
45
46#ifndef LSQUIC_USE_POOLS
47#define LSQUIC_USE_POOLS 1
48#endif
49
50#define FAIL_NOMEM do { errno = ENOMEM; return NULL; } while (0)
51
52
53struct packet_in_buf
54{
55    SLIST_ENTRY(packet_in_buf)  next_pib;
56};
57
58struct packet_out_buf
59{
60    SLIST_ENTRY(packet_out_buf) next_pob;
61};
62
63struct four_k_page
64{
65    SLIST_ENTRY(four_k_page)  next_fkp;
66};
67
68struct sixteen_k_page
69{
70    SLIST_ENTRY(sixteen_k_page)  next_skp;
71};
72
73
74int
75lsquic_mm_init (struct lsquic_mm *mm)
76{
77#if LSQUIC_USE_POOLS
78    int i;
79#endif
80
81    mm->acki = malloc(sizeof(*mm->acki));
82    mm->malo.stream_frame = lsquic_malo_create(sizeof(struct stream_frame));
83    mm->malo.stream_rec_arr = lsquic_malo_create(sizeof(struct stream_rec_arr));
84    mm->malo.mini_conn = lsquic_malo_create(sizeof(struct mini_conn));
85    mm->malo.mini_conn_ietf = lsquic_malo_create(sizeof(struct ietf_mini_conn));
86    mm->malo.packet_in = lsquic_malo_create(sizeof(struct lsquic_packet_in));
87    mm->malo.packet_out = lsquic_malo_create(sizeof(struct lsquic_packet_out));
88    mm->malo.dcid_elem = lsquic_malo_create(sizeof(struct dcid_elem));
89    mm->malo.stream_hq_frame
90                        = lsquic_malo_create(sizeof(struct stream_hq_frame));
91#if LSQUIC_USE_POOLS
92    TAILQ_INIT(&mm->free_packets_in);
93    for (i = 0; i < MM_N_OUT_BUCKETS; ++i)
94        SLIST_INIT(&mm->packet_out_bufs[i]);
95    for (i = 0; i < MM_N_IN_BUCKETS; ++i)
96        SLIST_INIT(&mm->packet_in_bufs[i]);
97    SLIST_INIT(&mm->four_k_pages);
98    SLIST_INIT(&mm->sixteen_k_pages);
99#endif
100    if (mm->acki && mm->malo.stream_frame && mm->malo.stream_rec_arr
101        && mm->malo.mini_conn && mm->malo.mini_conn_ietf && mm->malo.packet_in
102        && mm->malo.stream_hq_frame)
103    {
104        return 0;
105    }
106    else
107        return -1;
108}
109
110
111void
112lsquic_mm_cleanup (struct lsquic_mm *mm)
113{
114#if LSQUIC_USE_POOLS
115    int i;
116    struct packet_out_buf *pob;
117    struct packet_in_buf *pib;
118    struct four_k_page *fkp;
119    struct sixteen_k_page *skp;
120#endif
121
122    free(mm->acki);
123    lsquic_malo_destroy(mm->malo.stream_hq_frame);
124    lsquic_malo_destroy(mm->malo.dcid_elem);
125    lsquic_malo_destroy(mm->malo.packet_in);
126    lsquic_malo_destroy(mm->malo.packet_out);
127    lsquic_malo_destroy(mm->malo.stream_frame);
128    lsquic_malo_destroy(mm->malo.stream_rec_arr);
129    lsquic_malo_destroy(mm->malo.mini_conn);
130    lsquic_malo_destroy(mm->malo.mini_conn_ietf);
131
132#if LSQUIC_USE_POOLS
133    for (i = 0; i < MM_N_OUT_BUCKETS; ++i)
134        while ((pob = SLIST_FIRST(&mm->packet_out_bufs[i])))
135        {
136            SLIST_REMOVE_HEAD(&mm->packet_out_bufs[i], next_pob);
137            free(pob);
138        }
139
140    for (i = 0; i < MM_N_IN_BUCKETS; ++i)
141        while ((pib = SLIST_FIRST(&mm->packet_in_bufs[i])))
142        {
143            SLIST_REMOVE_HEAD(&mm->packet_in_bufs[i], next_pib);
144            free(pib);
145        }
146
147    while ((fkp = SLIST_FIRST(&mm->four_k_pages)))
148    {
149        SLIST_REMOVE_HEAD(&mm->four_k_pages, next_fkp);
150        free(fkp);
151    }
152
153    while ((skp = SLIST_FIRST(&mm->sixteen_k_pages)))
154    {
155        SLIST_REMOVE_HEAD(&mm->sixteen_k_pages, next_skp);
156        free(skp);
157    }
158#endif
159}
160
161
162#if LSQUIC_USE_POOLS
163enum {
164    PACKET_IN_PAYLOAD_0 = 1370,     /* common QUIC payload size upperbound */
165    PACKET_IN_PAYLOAD_1 = 4096,     /* payload size middleground guess */
166    PACKET_IN_PAYLOAD_2 = 0xffff,   /* UDP payload size upperbound */
167};
168
169
170static const unsigned packet_in_sizes[] = {
171    PACKET_IN_PAYLOAD_0,
172    PACKET_IN_PAYLOAD_1,
173    PACKET_IN_PAYLOAD_2,
174};
175
176
177static unsigned
178packet_in_index (unsigned size)
179{
180    unsigned idx = (size > PACKET_IN_PAYLOAD_0)
181                 + (size > PACKET_IN_PAYLOAD_1);
182    return idx;
183}
184#endif
185
186
187void
188lsquic_mm_put_packet_in (struct lsquic_mm *mm,
189                                        struct lsquic_packet_in *packet_in)
190{
191#if LSQUIC_USE_POOLS
192    unsigned idx;
193    struct packet_in_buf *pib;
194
195    assert(0 == packet_in->pi_refcnt);
196    if (packet_in->pi_flags & PI_OWN_DATA)
197    {
198        pib = (struct packet_in_buf *) packet_in->pi_data;
199        idx = packet_in_index(packet_in->pi_data_sz);
200        SLIST_INSERT_HEAD(&mm->packet_in_bufs[idx], pib, next_pib);
201    }
202    TAILQ_INSERT_HEAD(&mm->free_packets_in, packet_in, pi_next);
203#else
204    if (packet_in->pi_flags & PI_OWN_DATA)
205        free(packet_in->pi_data);
206    lsquic_malo_put(packet_in);
207#endif
208}
209
210
211struct lsquic_packet_in *
212lsquic_mm_get_packet_in (struct lsquic_mm *mm)
213{
214    struct lsquic_packet_in *packet_in;
215
216    fiu_do_on("mm/packet_in", FAIL_NOMEM);
217
218#if LSQUIC_USE_POOLS
219    packet_in = TAILQ_FIRST(&mm->free_packets_in);
220    if (packet_in)
221    {
222        assert(0 == packet_in->pi_refcnt);
223        TAILQ_REMOVE(&mm->free_packets_in, packet_in, pi_next);
224    }
225    else
226#endif
227        packet_in = lsquic_malo_get(mm->malo.packet_in);
228
229    if (packet_in)
230        memset(packet_in, 0, sizeof(*packet_in));
231
232    return packet_in;
233}
234
235
236#if LSQUIC_USE_POOLS
237/* Based on commonly used MTUs, ordered from small to large: */
238enum {
239    PACKET_OUT_PAYLOAD_0 = 1280                    - GQUIC_MIN_PACKET_OVERHEAD,
240    PACKET_OUT_PAYLOAD_1 = GQUIC_MAX_IPv6_PACKET_SZ - GQUIC_MIN_PACKET_OVERHEAD,
241    PACKET_OUT_PAYLOAD_2 = GQUIC_MAX_IPv4_PACKET_SZ - GQUIC_MIN_PACKET_OVERHEAD,
242    PACKET_OUT_PAYLOAD_3 = 4096,
243    PACKET_OUT_PAYLOAD_4 = 0xffff,
244};
245
246
247static const unsigned packet_out_sizes[] = {
248    PACKET_OUT_PAYLOAD_0,
249    PACKET_OUT_PAYLOAD_1,
250    PACKET_OUT_PAYLOAD_2,
251    PACKET_OUT_PAYLOAD_3,
252    PACKET_OUT_PAYLOAD_4,
253};
254
255
256static unsigned
257packet_out_index (unsigned size)
258{
259    unsigned idx = (size > PACKET_OUT_PAYLOAD_0)
260                 + (size > PACKET_OUT_PAYLOAD_1)
261                 + (size > PACKET_OUT_PAYLOAD_2)
262                 + (size > PACKET_OUT_PAYLOAD_3);
263    return idx;
264}
265#endif
266
267#if LSQUIC_USE_POOLS
268#define POOL_SAMPLE_PERIOD 1024
269
270static void
271poolst_sample_max (struct pool_stats *poolst)
272{
273#define ALPHA_SHIFT 3
274#define BETA_SHIFT  2
275    unsigned diff;
276
277    if (poolst->ps_max_avg)
278    {
279        poolst->ps_max_var -= poolst->ps_max_var >> BETA_SHIFT;
280        if (poolst->ps_max_avg > poolst->ps_max)
281            diff = poolst->ps_max_avg - poolst->ps_max;
282        else
283            diff = poolst->ps_max - poolst->ps_max_avg;
284        poolst->ps_max_var += diff >> BETA_SHIFT;
285        poolst->ps_max_avg -= poolst->ps_max_avg >> ALPHA_SHIFT;
286        poolst->ps_max_avg += poolst->ps_max >> ALPHA_SHIFT;
287    }
288    else
289    {
290        /* First measurement */
291        poolst->ps_max_avg  = poolst->ps_max;
292        poolst->ps_max_var  = poolst->ps_max / 2;
293    }
294
295    poolst->ps_calls = 0;
296    poolst->ps_max = poolst->ps_objs_out;
297#if LSQUIC_LOG_POOL_STATS
298    LSQ_DEBUG("new sample: max avg: %u; var: %u", poolst->ps_max_avg,
299                                                        poolst->ps_max_var);
300#endif
301}
302
303
304static void
305poolst_allocated (struct pool_stats *poolst, unsigned new)
306{
307    poolst->ps_objs_out += 1;
308    poolst->ps_objs_all += new;
309    if (poolst->ps_objs_out > poolst->ps_max)
310        poolst->ps_max = poolst->ps_objs_out;
311    ++poolst->ps_calls;
312    if (0 == poolst->ps_calls % POOL_SAMPLE_PERIOD)
313        poolst_sample_max(poolst);
314}
315
316
317static void
318poolst_freed (struct pool_stats *poolst)
319{
320    --poolst->ps_objs_out;
321    ++poolst->ps_calls;
322    if (0 == poolst->ps_calls % POOL_SAMPLE_PERIOD)
323        poolst_sample_max(poolst);
324}
325
326
327static int
328poolst_has_new_sample (const struct pool_stats *poolst)
329{
330    return poolst->ps_calls == 0;
331}
332
333
334/* If average maximum falls under 1/4 of all objects allocated, release
335 * half of the objects allocated.
336 */
337static void
338maybe_shrink_packet_out_bufs (struct lsquic_mm *mm, unsigned idx)
339{
340    struct pool_stats *poolst;
341    struct packet_out_buf *pob;
342    unsigned n_to_leave;
343
344    poolst = &mm->packet_out_bstats[idx];
345    if (poolst->ps_max_avg * 4 < poolst->ps_objs_all)
346    {
347        n_to_leave = poolst->ps_objs_all / 2;
348        while (poolst->ps_objs_all > n_to_leave
349                        && (pob = SLIST_FIRST(&mm->packet_out_bufs[idx])))
350        {
351            SLIST_REMOVE_HEAD(&mm->packet_out_bufs[idx], next_pob);
352            free(pob);
353            --poolst->ps_objs_all;
354        }
355#if LSQUIC_LOG_POOL_STATS
356        LSQ_DEBUG("pool #%u; max avg %u; shrank from %u to %u objs",
357                idx, poolst->ps_max_avg, n_to_leave * 2, poolst->ps_objs_all);
358#endif
359    }
360#if LSQUIC_LOG_POOL_STATS
361    else
362        LSQ_DEBUG("pool #%u; max avg %u; objs: %u; won't shrink",
363                                idx, poolst->ps_max_avg, poolst->ps_objs_all);
364#endif
365}
366#endif
367
368
369void
370lsquic_mm_put_packet_out (struct lsquic_mm *mm,
371                          struct lsquic_packet_out *packet_out)
372{
373#if LSQUIC_USE_POOLS
374    struct packet_out_buf *pob;
375    unsigned idx;
376
377    assert(packet_out->po_data);
378    pob = (struct packet_out_buf *) packet_out->po_data;
379    idx = packet_out_index(packet_out->po_n_alloc);
380    SLIST_INSERT_HEAD(&mm->packet_out_bufs[idx], pob, next_pob);
381    poolst_freed(&mm->packet_out_bstats[idx]);
382    if (poolst_has_new_sample(&mm->packet_out_bstats[idx]))
383        maybe_shrink_packet_out_bufs(mm, idx);
384    if (packet_out->po_bwp_state)
385        lsquic_malo_put(packet_out->po_bwp_state);
386#else
387    free(packet_out->po_data);
388#endif
389    lsquic_malo_put(packet_out);
390}
391
392
393struct lsquic_packet_out *
394lsquic_mm_get_packet_out (struct lsquic_mm *mm, struct malo *malo,
395                          unsigned short size)
396{
397    struct lsquic_packet_out *packet_out;
398    struct packet_out_buf *pob;
399#if LSQUIC_USE_POOLS
400    unsigned idx;
401#endif
402
403    fiu_do_on("mm/packet_out", FAIL_NOMEM);
404
405    packet_out = lsquic_malo_get(malo ? malo : mm->malo.packet_out);
406    if (!packet_out)
407        return NULL;
408
409#if LSQUIC_USE_POOLS
410    idx = packet_out_index(size);
411    pob = SLIST_FIRST(&mm->packet_out_bufs[idx]);
412    if (pob)
413    {
414        SLIST_REMOVE_HEAD(&mm->packet_out_bufs[idx], next_pob);
415        poolst_allocated(&mm->packet_out_bstats[idx], 0);
416    }
417    else
418    {
419        pob = malloc(packet_out_sizes[idx]);
420        if (!pob)
421        {
422            lsquic_malo_put(packet_out);
423            return NULL;
424        }
425        poolst_allocated(&mm->packet_out_bstats[idx], 1);
426    }
427    if (poolst_has_new_sample(&mm->packet_out_bstats[idx]))
428        maybe_shrink_packet_out_bufs(mm, idx);
429#else
430    pob = malloc(size);
431    if (!pob)
432    {
433        lsquic_malo_put(packet_out);
434        return NULL;
435    }
436#endif
437
438    memset(packet_out, 0, sizeof(*packet_out));
439    packet_out->po_n_alloc = size;
440    packet_out->po_data = (unsigned char *) pob;
441
442    return packet_out;
443}
444
445
446void *
447lsquic_mm_get_packet_in_buf (struct lsquic_mm *mm, size_t size)
448{
449    struct packet_in_buf *pib;
450#if LSQUIC_USE_POOLS
451    unsigned idx;
452
453    idx = packet_in_index(size);
454    pib = SLIST_FIRST(&mm->packet_in_bufs[idx]);
455    fiu_do_on("mm/packet_in_buf", FAIL_NOMEM);
456    if (pib)
457        SLIST_REMOVE_HEAD(&mm->packet_in_bufs[idx], next_pib);
458    else
459        pib = malloc(packet_in_sizes[idx]);
460#else
461    pib = malloc(size);
462#endif
463    return pib;
464}
465
466
467void
468lsquic_mm_put_packet_in_buf (struct lsquic_mm *mm, void *mem, size_t size)
469{
470#if LSQUIC_USE_POOLS
471    unsigned idx;
472    struct packet_in_buf *pib;
473
474    pib = (struct packet_in_buf *) mem;
475    idx = packet_in_index(size);
476    SLIST_INSERT_HEAD(&mm->packet_in_bufs[idx], pib, next_pib);
477#else
478    free(mem);
479#endif
480}
481
482
483void *
484lsquic_mm_get_4k (struct lsquic_mm *mm)
485{
486#if LSQUIC_USE_POOLS
487    struct four_k_page *fkp = SLIST_FIRST(&mm->four_k_pages);
488    fiu_do_on("mm/4k", FAIL_NOMEM);
489    if (fkp)
490        SLIST_REMOVE_HEAD(&mm->four_k_pages, next_fkp);
491    else
492        fkp = malloc(0x1000);
493    return fkp;
494#else
495    return malloc(0x1000);
496#endif
497}
498
499
500void
501lsquic_mm_put_4k (struct lsquic_mm *mm, void *mem)
502{
503#if LSQUIC_USE_POOLS
504    struct four_k_page *fkp = mem;
505    SLIST_INSERT_HEAD(&mm->four_k_pages, fkp, next_fkp);
506#else
507    free(mem);
508#endif
509}
510
511
512void *
513lsquic_mm_get_16k (struct lsquic_mm *mm)
514{
515#if LSQUIC_USE_POOLS
516    struct sixteen_k_page *skp = SLIST_FIRST(&mm->sixteen_k_pages);
517    fiu_do_on("mm/16k", FAIL_NOMEM);
518    if (skp)
519        SLIST_REMOVE_HEAD(&mm->sixteen_k_pages, next_skp);
520    else
521        skp = malloc(16 * 1024);
522    return skp;
523#else
524    return malloc(16 * 1024);
525#endif
526}
527
528
529void
530lsquic_mm_put_16k (struct lsquic_mm *mm, void *mem)
531{
532#if LSQUIC_USE_POOLS
533    struct sixteen_k_page *skp = mem;
534    SLIST_INSERT_HEAD(&mm->sixteen_k_pages, skp, next_skp);
535#else
536    free(mem);
537#endif
538}
539
540
541size_t
542lsquic_mm_mem_used (const struct lsquic_mm *mm)
543{
544#if LSQUIC_USE_POOLS
545    const struct packet_out_buf *pob;
546    const struct packet_in_buf *pib;
547    const struct four_k_page *fkp;
548    const struct sixteen_k_page *skp;
549    unsigned i;
550    size_t size;
551
552    size = sizeof(*mm);
553    size += sizeof(*mm->acki);
554    size += lsquic_malo_mem_used(mm->malo.stream_frame);
555    size += lsquic_malo_mem_used(mm->malo.stream_rec_arr);
556    size += lsquic_malo_mem_used(mm->malo.mini_conn);
557    size += lsquic_malo_mem_used(mm->malo.mini_conn_ietf);
558    size += lsquic_malo_mem_used(mm->malo.packet_in);
559    size += lsquic_malo_mem_used(mm->malo.packet_out);
560
561    for (i = 0; i < MM_N_OUT_BUCKETS; ++i)
562        SLIST_FOREACH(pob, &mm->packet_out_bufs[i], next_pob)
563            size += packet_out_sizes[i];
564
565    for (i = 0; i < MM_N_IN_BUCKETS; ++i)
566        SLIST_FOREACH(pib, &mm->packet_in_bufs[i], next_pib)
567            size += packet_in_sizes[i];
568
569    SLIST_FOREACH(fkp, &mm->four_k_pages, next_fkp)
570        size += 0x1000;
571
572    SLIST_FOREACH(skp, &mm->sixteen_k_pages, next_skp)
573        size += 0x4000;
574
575    return size;
576#else
577    return sizeof(*mm);
578#endif
579}
580