lsquic_send_ctl.c revision 33291708
1/* Copyright (c) 2017 - 2019 LiteSpeed Technologies Inc.  See LICENSE. */
2/*
3 * lsquic_send_ctl.c -- Logic for sending and sent packets
4 */
5
6#include <assert.h>
7#include <errno.h>
8#include <inttypes.h>
9#include <stdlib.h>
10#include <string.h>
11#include <sys/queue.h>
12
13#include "lsquic_types.h"
14#include "lsquic_int_types.h"
15#include "lsquic.h"
16#include "lsquic_mm.h"
17#include "lsquic_engine_public.h"
18#include "lsquic_alarmset.h"
19#include "lsquic_packet_common.h"
20#include "lsquic_parse.h"
21#include "lsquic_packet_out.h"
22#include "lsquic_senhist.h"
23#include "lsquic_rtt.h"
24#include "lsquic_cubic.h"
25#include "lsquic_pacer.h"
26#include "lsquic_send_ctl.h"
27#include "lsquic_util.h"
28#include "lsquic_sfcw.h"
29#include "lsquic_stream.h"
30#include "lsquic_ver_neg.h"
31#include "lsquic_ev_log.h"
32#include "lsquic_conn.h"
33#include "lsquic_conn_flow.h"
34#include "lsquic_conn_public.h"
35#include "lsquic_hash.h"
36
37#define LSQUIC_LOGGER_MODULE LSQLM_SENDCTL
38#define LSQUIC_LOG_CONN_ID ctl->sc_conn_pub->lconn->cn_cid
39#include "lsquic_logger.h"
40
41#define MAX_RESUBMITTED_ON_RTO  2
42#define MAX_RTO_BACKOFFS        10
43#define DEFAULT_RETX_DELAY      500000      /* Microseconds */
44#define MAX_RTO_DELAY           60000000    /* Microseconds */
45#define MIN_RTO_DELAY           1000000      /* Microseconds */
46#define N_NACKS_BEFORE_RETX     3
47
48#define packet_out_total_sz(p) \
49                lsquic_packet_out_total_sz(ctl->sc_conn_pub->lconn, p)
50#define packet_out_sent_sz(p) \
51                lsquic_packet_out_sent_sz(ctl->sc_conn_pub->lconn, p)
52
53enum retx_mode {
54    RETX_MODE_HANDSHAKE,
55    RETX_MODE_LOSS,
56    RETX_MODE_TLP,
57    RETX_MODE_RTO,
58};
59
60
61static const char *const retx2str[] = {
62    [RETX_MODE_HANDSHAKE] = "RETX_MODE_HANDSHAKE",
63    [RETX_MODE_LOSS]      = "RETX_MODE_LOSS",
64    [RETX_MODE_TLP]       = "RETX_MODE_TLP",
65    [RETX_MODE_RTO]       = "RETX_MODE_RTO",
66};
67
68
69static void
70update_for_resending (lsquic_send_ctl_t *ctl, lsquic_packet_out_t *packet_out);
71
72
73enum expire_filter { EXFI_ALL, EXFI_HSK, EXFI_LAST, };
74
75
76static void
77send_ctl_expire (lsquic_send_ctl_t *, enum expire_filter);
78
79static void
80set_retx_alarm (lsquic_send_ctl_t *ctl);
81
82static void
83send_ctl_detect_losses (lsquic_send_ctl_t *ctl, lsquic_time_t time);
84
85static unsigned
86send_ctl_retx_bytes_out (const struct lsquic_send_ctl *ctl);
87
88
89#ifdef NDEBUG
90static
91#elif __GNUC__
92__attribute__((weak))
93#endif
94int
95lsquic_send_ctl_schedule_stream_packets_immediately (lsquic_send_ctl_t *ctl)
96{
97    return !(ctl->sc_flags & SC_BUFFER_STREAM);
98}
99
100
101#ifdef NDEBUG
102static
103#elif __GNUC__
104__attribute__((weak))
105#endif
106enum lsquic_packno_bits
107lsquic_send_ctl_guess_packno_bits (lsquic_send_ctl_t *ctl)
108{
109    return PACKNO_LEN_2;
110}
111
112
113int
114lsquic_send_ctl_have_unacked_stream_frames (const lsquic_send_ctl_t *ctl)
115{
116    const lsquic_packet_out_t *packet_out;
117    TAILQ_FOREACH(packet_out, &ctl->sc_unacked_packets, po_next)
118        if (packet_out->po_frame_types &
119                    ((1 << QUIC_FRAME_STREAM) | (1 << QUIC_FRAME_RST_STREAM)))
120            return 1;
121    return 0;
122}
123
124
125static lsquic_packet_out_t *
126send_ctl_first_unacked_retx_packet (const lsquic_send_ctl_t *ctl)
127{
128    lsquic_packet_out_t *packet_out;
129    TAILQ_FOREACH(packet_out, &ctl->sc_unacked_packets, po_next)
130        if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK)
131            return packet_out;
132    return NULL;
133}
134
135
136static lsquic_packet_out_t *
137send_ctl_last_unacked_retx_packet (const lsquic_send_ctl_t *ctl)
138{
139    lsquic_packet_out_t *packet_out;
140    TAILQ_FOREACH_REVERSE(packet_out, &ctl->sc_unacked_packets,
141                                            lsquic_packets_tailq, po_next)
142        if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK)
143            return packet_out;
144    return NULL;
145}
146
147
148static int
149have_unacked_handshake_packets (const lsquic_send_ctl_t *ctl)
150{
151    const lsquic_packet_out_t *packet_out;
152    TAILQ_FOREACH(packet_out, &ctl->sc_unacked_packets, po_next)
153        if (packet_out->po_flags & PO_HELLO)
154            return 1;
155    return 0;
156}
157
158
159static enum retx_mode
160get_retx_mode (lsquic_send_ctl_t *ctl)
161{
162    if (!(ctl->sc_conn_pub->lconn->cn_flags & LSCONN_HANDSHAKE_DONE)
163                                    && have_unacked_handshake_packets(ctl))
164        return RETX_MODE_HANDSHAKE;
165    if (ctl->sc_loss_to)
166        return RETX_MODE_LOSS;
167    if (ctl->sc_n_tlp < 2)
168        return RETX_MODE_TLP;
169    return RETX_MODE_RTO;
170}
171
172
173static lsquic_time_t
174get_retx_delay (const struct lsquic_rtt_stats *rtt_stats)
175{
176    lsquic_time_t srtt, delay;
177
178    srtt = lsquic_rtt_stats_get_srtt(rtt_stats);
179    if (srtt)
180    {
181        delay = srtt + 4 * lsquic_rtt_stats_get_rttvar(rtt_stats);
182        if (delay < MIN_RTO_DELAY)
183            delay = MIN_RTO_DELAY;
184    }
185    else
186        delay = DEFAULT_RETX_DELAY;
187
188    return delay;
189}
190
191
192static void
193retx_alarm_rings (void *ctx, lsquic_time_t expiry, lsquic_time_t now)
194{
195    lsquic_send_ctl_t *ctl = ctx;
196    lsquic_packet_out_t *packet_out;
197    enum retx_mode rm;
198
199    /* This is a callback -- before it is called, the alarm is unset */
200    assert(!lsquic_alarmset_is_set(ctl->sc_alset, AL_RETX));
201
202    rm = get_retx_mode(ctl);
203    LSQ_INFO("retx timeout, mode %s", retx2str[rm]);
204
205    switch (rm)
206    {
207    case RETX_MODE_HANDSHAKE:
208        send_ctl_expire(ctl, EXFI_HSK);
209        /* Do not register cubic loss during handshake */
210        break;
211    case RETX_MODE_LOSS:
212        send_ctl_detect_losses(ctl, lsquic_time_now());
213        break;
214    case RETX_MODE_TLP:
215        ++ctl->sc_n_tlp;
216        send_ctl_expire(ctl, EXFI_LAST);
217        break;
218    case RETX_MODE_RTO:
219        ++ctl->sc_n_consec_rtos;
220        ctl->sc_next_limit = 2;
221        LSQ_DEBUG("packet RTO is %"PRIu64" usec", expiry);
222        send_ctl_expire(ctl, EXFI_ALL);
223        lsquic_cubic_timeout(&ctl->sc_cubic);
224        break;
225    }
226
227    packet_out = send_ctl_first_unacked_retx_packet(ctl);
228    if (packet_out)
229        set_retx_alarm(ctl);
230    lsquic_send_ctl_sanity_check(ctl);
231}
232
233
234void
235lsquic_send_ctl_init (lsquic_send_ctl_t *ctl, struct lsquic_alarmset *alset,
236          struct lsquic_engine_public *enpub, const struct ver_neg *ver_neg,
237          struct lsquic_conn_public *conn_pub, unsigned short pack_size)
238{
239    unsigned i;
240    memset(ctl, 0, sizeof(*ctl));
241    TAILQ_INIT(&ctl->sc_scheduled_packets);
242    TAILQ_INIT(&ctl->sc_unacked_packets);
243    TAILQ_INIT(&ctl->sc_lost_packets);
244    ctl->sc_enpub = enpub;
245    ctl->sc_alset = alset;
246    ctl->sc_ver_neg = ver_neg;
247    ctl->sc_pack_size = pack_size;
248    ctl->sc_conn_pub = conn_pub;
249    if (enpub->enp_settings.es_pace_packets)
250        ctl->sc_flags |= SC_PACE;
251    lsquic_alarmset_init_alarm(alset, AL_RETX, retx_alarm_rings, ctl);
252    lsquic_senhist_init(&ctl->sc_senhist);
253    lsquic_cubic_init(&ctl->sc_cubic, LSQUIC_LOG_CONN_ID);
254    if (ctl->sc_flags & SC_PACE)
255        pacer_init(&ctl->sc_pacer, LSQUIC_LOG_CONN_ID,
256                                    enpub->enp_settings.es_clock_granularity);
257    for (i = 0; i < sizeof(ctl->sc_buffered_packets) /
258                                sizeof(ctl->sc_buffered_packets[0]); ++i)
259        TAILQ_INIT(&ctl->sc_buffered_packets[i].bpq_packets);
260}
261
262
263static lsquic_time_t
264calculate_packet_rto (lsquic_send_ctl_t *ctl)
265{
266    lsquic_time_t delay;
267
268    delay = get_retx_delay(&ctl->sc_conn_pub->rtt_stats);
269
270    unsigned exp = ctl->sc_n_consec_rtos;
271    if (exp > MAX_RTO_BACKOFFS)
272        exp = MAX_RTO_BACKOFFS;
273
274    delay = delay * (1 << exp);
275
276    return delay;
277}
278
279
280static lsquic_time_t
281calculate_tlp_delay (lsquic_send_ctl_t *ctl)
282{
283    lsquic_time_t srtt, delay;
284
285    srtt = lsquic_rtt_stats_get_srtt(&ctl->sc_conn_pub->rtt_stats);
286    if (ctl->sc_n_in_flight_all > 1)
287    {
288        delay = 10000;  /* 10 ms is the minimum tail loss probe delay */
289        if (delay < 2 * srtt)
290            delay = 2 * srtt;
291    }
292    else
293    {
294        delay = srtt + srtt / 2 + MIN_RTO_DELAY;
295        if (delay < 2 * srtt)
296            delay = 2 * srtt;
297    }
298
299    return delay;
300}
301
302
303static void
304set_retx_alarm (lsquic_send_ctl_t *ctl)
305{
306    enum retx_mode rm;
307    lsquic_time_t delay, now;
308
309    assert(!TAILQ_EMPTY(&ctl->sc_unacked_packets));
310
311    now = lsquic_time_now();
312
313    rm = get_retx_mode(ctl);
314    switch (rm)
315    {
316    case RETX_MODE_HANDSHAKE:
317    /* [draft-iyengar-quic-loss-recovery-01]:
318     *
319     *  if (handshake packets are outstanding):
320     *      alarm_duration = max(1.5 * smoothed_rtt, 10ms) << handshake_count;
321     *      handshake_count++;
322     */
323        delay = lsquic_rtt_stats_get_srtt(&ctl->sc_conn_pub->rtt_stats);
324        if (delay)
325        {
326            delay += delay / 2;
327            if (10000 > delay)
328                delay = 10000;
329        }
330        else
331            delay = 150000;
332        delay <<= ctl->sc_n_hsk;
333        ++ctl->sc_n_hsk;
334        break;
335    case RETX_MODE_LOSS:
336        delay = ctl->sc_loss_to;
337        break;
338    case RETX_MODE_TLP:
339        delay = calculate_tlp_delay(ctl);
340        break;
341    case RETX_MODE_RTO:
342        /* Base RTO on the first unacked packet, following reference
343         * implementation.
344         */
345        delay = calculate_packet_rto(ctl);
346        break;
347#ifdef WIN32
348    default:
349        delay = 0;
350#endif
351    }
352
353    if (delay > MAX_RTO_DELAY)
354        delay = MAX_RTO_DELAY;
355
356    LSQ_DEBUG("set retx alarm to %"PRIu64", which is %"PRIu64
357        " usec from now, mode %s", now + delay, delay, retx2str[rm]);
358    lsquic_alarmset_set(ctl->sc_alset, AL_RETX, now + delay);
359}
360
361
362static int
363send_ctl_in_recovery (lsquic_send_ctl_t *ctl)
364{
365    return ctl->sc_largest_acked_packno
366        && ctl->sc_largest_acked_packno <= ctl->sc_largest_sent_at_cutback;
367}
368
369
370static int
371send_ctl_in_slow_start (lsquic_send_ctl_t *ctl)
372{
373    return lsquic_cubic_in_slow_start(&ctl->sc_cubic);
374}
375
376
377static lsquic_time_t
378send_ctl_transfer_time (void *ctx)
379{
380    lsquic_send_ctl_t *const ctl = ctx;
381    uint64_t bandwidth, pacing_rate;
382    lsquic_time_t srtt, tx_time;
383    unsigned long cwnd;
384
385    srtt = lsquic_rtt_stats_get_srtt(&ctl->sc_conn_pub->rtt_stats);
386    if (srtt == 0)
387        srtt = 50000;
388    cwnd = lsquic_cubic_get_cwnd(&ctl->sc_cubic);
389    bandwidth = cwnd * 1000000 / srtt;
390    if (send_ctl_in_slow_start(ctl))
391        pacing_rate = bandwidth * 2;
392    else if (send_ctl_in_recovery(ctl))
393        pacing_rate = bandwidth;
394    else
395        pacing_rate = bandwidth + bandwidth / 4;
396
397    tx_time = (uint64_t) ctl->sc_pack_size * 1000000 / pacing_rate;
398    LSQ_DEBUG("srtt: %"PRIu64"; ss: %d; rec: %d; cwnd: %lu; bandwidth: "
399        "%"PRIu64"; tx_time: %"PRIu64, srtt, send_ctl_in_slow_start(ctl),
400        send_ctl_in_recovery(ctl), cwnd, bandwidth, tx_time);
401    return tx_time;
402}
403
404
405static void
406send_ctl_unacked_append (struct lsquic_send_ctl *ctl,
407                         struct lsquic_packet_out *packet_out)
408{
409    TAILQ_INSERT_TAIL(&ctl->sc_unacked_packets, packet_out, po_next);
410    ctl->sc_bytes_unacked_all += packet_out_total_sz(packet_out);
411    ctl->sc_n_in_flight_all  += 1;
412    if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK)
413    {
414        ctl->sc_bytes_unacked_retx += packet_out_total_sz(packet_out);
415        ++ctl->sc_n_in_flight_retx;
416    }
417}
418
419
420static void
421send_ctl_unacked_remove (struct lsquic_send_ctl *ctl,
422                     struct lsquic_packet_out *packet_out, unsigned packet_sz)
423{
424    TAILQ_REMOVE(&ctl->sc_unacked_packets, packet_out, po_next);
425    assert(ctl->sc_bytes_unacked_all >= packet_sz);
426    ctl->sc_bytes_unacked_all -= packet_sz;
427    ctl->sc_n_in_flight_all  -= 1;
428    if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK)
429    {
430        ctl->sc_bytes_unacked_retx -= packet_sz;
431        --ctl->sc_n_in_flight_retx;
432    }
433}
434
435
436static void
437send_ctl_sched_Xpend_common (struct lsquic_send_ctl *ctl,
438                      struct lsquic_packet_out *packet_out)
439{
440    packet_out->po_flags |= PO_SCHED;
441    ++ctl->sc_n_scheduled;
442    ctl->sc_bytes_scheduled += packet_out_total_sz(packet_out);
443    lsquic_send_ctl_sanity_check(ctl);
444}
445
446
447static void
448send_ctl_sched_append (struct lsquic_send_ctl *ctl,
449                       struct lsquic_packet_out *packet_out)
450{
451    TAILQ_INSERT_TAIL(&ctl->sc_scheduled_packets, packet_out, po_next);
452    send_ctl_sched_Xpend_common(ctl, packet_out);
453}
454
455
456static void
457send_ctl_sched_prepend (struct lsquic_send_ctl *ctl,
458                       struct lsquic_packet_out *packet_out)
459{
460    TAILQ_INSERT_HEAD(&ctl->sc_scheduled_packets, packet_out, po_next);
461    send_ctl_sched_Xpend_common(ctl, packet_out);
462}
463
464
465static void
466send_ctl_sched_remove (struct lsquic_send_ctl *ctl,
467                       struct lsquic_packet_out *packet_out)
468{
469    TAILQ_REMOVE(&ctl->sc_scheduled_packets, packet_out, po_next);
470    packet_out->po_flags &= ~PO_SCHED;
471    assert(ctl->sc_n_scheduled);
472    --ctl->sc_n_scheduled;
473    ctl->sc_bytes_scheduled -= packet_out_total_sz(packet_out);
474    lsquic_send_ctl_sanity_check(ctl);
475}
476
477
478int
479lsquic_send_ctl_sent_packet (lsquic_send_ctl_t *ctl,
480                             struct lsquic_packet_out *packet_out, int account)
481{
482    char frames[lsquic_frame_types_str_sz];
483    LSQ_DEBUG("packet %"PRIu64" has been sent (frame types: %s)",
484        packet_out->po_packno, lsquic_frame_types_to_str(frames,
485            sizeof(frames), packet_out->po_frame_types));
486    if (account)
487        ctl->sc_bytes_out -= packet_out_total_sz(packet_out);
488    lsquic_senhist_add(&ctl->sc_senhist, packet_out->po_packno);
489    send_ctl_unacked_append(ctl, packet_out);
490    if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK)
491    {
492        if (!lsquic_alarmset_is_set(ctl->sc_alset, AL_RETX))
493            set_retx_alarm(ctl);
494        if (ctl->sc_n_in_flight_retx == 1)
495            ctl->sc_flags |= SC_WAS_QUIET;
496    }
497    /* TODO: Do we really want to use those for RTT info? Revisit this. */
498    /* Hold on to packets that are not retransmittable because we need them
499     * to sample RTT information.  They are released when ACK is received.
500     */
501#if LSQUIC_SEND_STATS
502    ++ctl->sc_stats.n_total_sent;
503#endif
504    lsquic_send_ctl_sanity_check(ctl);
505    return 0;
506}
507
508
509static void
510take_rtt_sample (lsquic_send_ctl_t *ctl,
511                 lsquic_time_t now, lsquic_time_t lack_delta)
512{
513    const lsquic_packno_t packno = ctl->sc_largest_acked_packno;
514    const lsquic_time_t sent = ctl->sc_largest_acked_sent_time;
515    const lsquic_time_t measured_rtt = now - sent;
516    if (packno > ctl->sc_max_rtt_packno && lack_delta < measured_rtt)
517    {
518        ctl->sc_max_rtt_packno = packno;
519        lsquic_rtt_stats_update(&ctl->sc_conn_pub->rtt_stats, measured_rtt, lack_delta);
520        LSQ_DEBUG("packno %"PRIu64"; rtt: %"PRIu64"; delta: %"PRIu64"; "
521            "new srtt: %"PRIu64, packno, measured_rtt, lack_delta,
522            lsquic_rtt_stats_get_srtt(&ctl->sc_conn_pub->rtt_stats));
523    }
524}
525
526
527static void
528send_ctl_release_enc_data (struct lsquic_send_ctl *ctl,
529                                        struct lsquic_packet_out *packet_out)
530{
531    ctl->sc_enpub->enp_pmi->pmi_release(ctl->sc_enpub->enp_pmi_ctx,
532        ctl->sc_conn_pub->lconn->cn_peer_ctx, packet_out->po_enc_data,
533        lsquic_packet_out_ipv6(packet_out));
534    packet_out->po_flags &= ~PO_ENCRYPTED;
535    packet_out->po_enc_data = NULL;
536}
537
538
539static void
540send_ctl_destroy_packet (struct lsquic_send_ctl *ctl,
541                                        struct lsquic_packet_out *packet_out)
542{
543    lsquic_packet_out_destroy(packet_out, ctl->sc_enpub,
544                                        ctl->sc_conn_pub->lconn->cn_peer_ctx);
545}
546
547
548/* Returns true if packet was rescheduled, false otherwise.  In the latter
549 * case, you should not dereference packet_out after the function returns.
550 */
551static int
552send_ctl_handle_lost_packet (lsquic_send_ctl_t *ctl,
553                                            lsquic_packet_out_t *packet_out)
554{
555    unsigned packet_sz;
556
557    assert(ctl->sc_n_in_flight_all);
558    packet_sz = packet_out_sent_sz(packet_out);
559    send_ctl_unacked_remove(ctl, packet_out, packet_sz);
560    if (packet_out->po_flags & PO_ENCRYPTED)
561        send_ctl_release_enc_data(ctl, packet_out);
562    if (packet_out->po_frame_types & (1 << QUIC_FRAME_ACK))
563    {
564        ctl->sc_flags |= SC_LOST_ACK;
565        LSQ_DEBUG("lost ACK in packet %"PRIu64, packet_out->po_packno);
566    }
567    if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK)
568    {
569        LSQ_DEBUG("lost retransmittable packet %"PRIu64,
570                                                    packet_out->po_packno);
571        TAILQ_INSERT_TAIL(&ctl->sc_lost_packets, packet_out, po_next);
572        return 1;
573    }
574    else
575    {
576        LSQ_DEBUG("lost unretransmittable packet %"PRIu64,
577                                                    packet_out->po_packno);
578        send_ctl_destroy_packet(ctl, packet_out);
579        return 0;
580    }
581}
582
583
584static lsquic_packno_t
585largest_retx_packet_number (const lsquic_send_ctl_t *ctl)
586{
587    const lsquic_packet_out_t *packet_out;
588    TAILQ_FOREACH_REVERSE(packet_out, &ctl->sc_unacked_packets,
589                                                lsquic_packets_tailq, po_next)
590    {
591        if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK)
592            return packet_out->po_packno;
593    }
594    return 0;
595}
596
597
598static void
599send_ctl_detect_losses (lsquic_send_ctl_t *ctl, lsquic_time_t time)
600{
601    lsquic_packet_out_t *packet_out, *next;
602    lsquic_packno_t largest_retx_packno, largest_lost_packno;
603
604    largest_retx_packno = largest_retx_packet_number(ctl);
605    largest_lost_packno = 0;
606    ctl->sc_loss_to = 0;
607
608    for (packet_out = TAILQ_FIRST(&ctl->sc_unacked_packets);
609            packet_out && packet_out->po_packno <= ctl->sc_largest_acked_packno;
610                packet_out = next)
611    {
612        next = TAILQ_NEXT(packet_out, po_next);
613
614        if (packet_out->po_packno + N_NACKS_BEFORE_RETX <
615                                                ctl->sc_largest_acked_packno)
616        {
617            LSQ_DEBUG("loss by FACK detected, packet %"PRIu64,
618                                                    packet_out->po_packno);
619            largest_lost_packno = packet_out->po_packno;
620            (void) send_ctl_handle_lost_packet(ctl, packet_out);
621            continue;
622        }
623
624        if (largest_retx_packno
625            && (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK)
626            && largest_retx_packno <= ctl->sc_largest_acked_packno)
627        {
628            LSQ_DEBUG("loss by early retransmit detected, packet %"PRIu64,
629                                                    packet_out->po_packno);
630            largest_lost_packno = packet_out->po_packno;
631            ctl->sc_loss_to =
632                lsquic_rtt_stats_get_srtt(&ctl->sc_conn_pub->rtt_stats) / 4;
633            LSQ_DEBUG("set sc_loss_to to %"PRIu64", packet %"PRIu64,
634                                    ctl->sc_loss_to, packet_out->po_packno);
635            (void) send_ctl_handle_lost_packet(ctl, packet_out);
636            continue;
637        }
638
639        if (ctl->sc_largest_acked_sent_time > packet_out->po_sent +
640                    lsquic_rtt_stats_get_srtt(&ctl->sc_conn_pub->rtt_stats))
641        {
642            LSQ_DEBUG("loss by sent time detected: packet %"PRIu64,
643                                                    packet_out->po_packno);
644            if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK)
645                largest_lost_packno = packet_out->po_packno;
646            else { /* don't count it as a loss */; }
647            (void) send_ctl_handle_lost_packet(ctl, packet_out);
648            continue;
649        }
650    }
651
652    if (largest_lost_packno > ctl->sc_largest_sent_at_cutback)
653    {
654        LSQ_DEBUG("detected new loss: packet %"PRIu64"; new lsac: "
655            "%"PRIu64, largest_lost_packno, ctl->sc_largest_sent_at_cutback);
656        lsquic_cubic_loss(&ctl->sc_cubic);
657        if (ctl->sc_flags & SC_PACE)
658            pacer_loss_event(&ctl->sc_pacer);
659        ctl->sc_largest_sent_at_cutback =
660                                lsquic_senhist_largest(&ctl->sc_senhist);
661    }
662    else if (largest_lost_packno)
663        /* Lost packets whose numbers are smaller than the largest packet
664         * number sent at the time of the last loss event indicate the same
665         * loss event.  This follows NewReno logic, see RFC 6582.
666         */
667        LSQ_DEBUG("ignore loss of packet %"PRIu64" smaller than lsac "
668            "%"PRIu64, largest_lost_packno, ctl->sc_largest_sent_at_cutback);
669}
670
671
672int
673lsquic_send_ctl_got_ack (lsquic_send_ctl_t *ctl,
674                         const struct ack_info *acki,
675                         lsquic_time_t ack_recv_time)
676{
677    const struct lsquic_packno_range *range =
678                                    &acki->ranges[ acki->n_ranges - 1 ];
679    lsquic_packet_out_t *packet_out, *next;
680    lsquic_time_t now = 0;
681    lsquic_packno_t smallest_unacked;
682    lsquic_packno_t ack2ed[2];
683    unsigned packet_sz;
684    int app_limited;
685    signed char do_rtt, skip_checks;
686
687    packet_out = TAILQ_FIRST(&ctl->sc_unacked_packets);
688#if __GNUC__
689    __builtin_prefetch(packet_out);
690#endif
691
692#if __GNUC__
693#   define UNLIKELY(cond) __builtin_expect(cond, 0)
694#else
695#   define UNLIKELY(cond) cond
696#endif
697
698#if __GNUC__
699    if (UNLIKELY(LSQ_LOG_ENABLED(LSQ_LOG_DEBUG)))
700#endif
701        LSQ_DEBUG("Got ACK frame, largest acked: %"PRIu64"; delta: %"PRIu64,
702                            largest_acked(acki), acki->lack_delta);
703
704    /* Validate ACK first: */
705    if (UNLIKELY(largest_acked(acki)
706                                > lsquic_senhist_largest(&ctl->sc_senhist)))
707    {
708        LSQ_INFO("at least one packet in ACK range [%"PRIu64" - %"PRIu64"] "
709            "was never sent", acki->ranges[0].low, acki->ranges[0].high);
710        return -1;
711    }
712
713    if (UNLIKELY(ctl->sc_flags & SC_WAS_QUIET))
714    {
715        ctl->sc_flags &= ~SC_WAS_QUIET;
716        LSQ_DEBUG("ACK comes after a period of quiescence");
717        if (!now)
718            now = lsquic_time_now();
719        lsquic_cubic_was_quiet(&ctl->sc_cubic, now);
720    }
721
722    if (UNLIKELY(!packet_out))
723        goto no_unacked_packets;
724
725    smallest_unacked = packet_out->po_packno;
726    ack2ed[1] = 0;
727
728    if (packet_out->po_packno > largest_acked(acki))
729        goto detect_losses;
730
731    do_rtt = 0, skip_checks = 0;
732    app_limited = -1;
733    do
734    {
735        next = TAILQ_NEXT(packet_out, po_next);
736#if __GNUC__
737        __builtin_prefetch(next);
738#endif
739        if (skip_checks)
740            goto after_checks;
741        /* This is faster than binary search in the normal case when the number
742         * of ranges is not much larger than the number of unacked packets.
743         */
744        while (UNLIKELY(range->high < packet_out->po_packno))
745            --range;
746        if (range->low <= packet_out->po_packno)
747        {
748            skip_checks = range == acki->ranges;
749            if (app_limited < 0)
750                app_limited = send_ctl_retx_bytes_out(ctl) + 3 * ctl->sc_pack_size /* This
751                    is the "maximum burst" parameter */
752                    < lsquic_cubic_get_cwnd(&ctl->sc_cubic);
753            if (!now)
754                now = lsquic_time_now();
755  after_checks:
756            packet_sz = packet_out_sent_sz(packet_out);
757            ctl->sc_largest_acked_packno    = packet_out->po_packno;
758            ctl->sc_largest_acked_sent_time = packet_out->po_sent;
759            send_ctl_unacked_remove(ctl, packet_out, packet_sz);
760            ack2ed[!!(packet_out->po_frame_types & (1 << QUIC_FRAME_ACK))]
761                = packet_out->po_ack2ed;
762            do_rtt |= packet_out->po_packno == largest_acked(acki);
763            lsquic_cubic_ack(&ctl->sc_cubic, now, now - packet_out->po_sent,
764                             app_limited, packet_sz);
765            lsquic_packet_out_ack_streams(packet_out);
766            send_ctl_destroy_packet(ctl, packet_out);
767        }
768        packet_out = next;
769    }
770    while (packet_out && packet_out->po_packno <= largest_acked(acki));
771
772    if (do_rtt)
773    {
774        take_rtt_sample(ctl, ack_recv_time, acki->lack_delta);
775        ctl->sc_n_consec_rtos = 0;
776        ctl->sc_n_hsk = 0;
777        ctl->sc_n_tlp = 0;
778    }
779
780  detect_losses:
781    send_ctl_detect_losses(ctl, ack_recv_time);
782    if (send_ctl_first_unacked_retx_packet(ctl))
783        set_retx_alarm(ctl);
784    else
785    {
786        LSQ_DEBUG("No retransmittable packets: clear alarm");
787        lsquic_alarmset_unset(ctl->sc_alset, AL_RETX);
788    }
789    lsquic_send_ctl_sanity_check(ctl);
790
791    if ((ctl->sc_flags & SC_NSTP) && ack2ed[1] > ctl->sc_largest_ack2ed)
792        ctl->sc_largest_ack2ed = ack2ed[1];
793
794    if (ctl->sc_n_in_flight_retx == 0)
795        ctl->sc_flags |= SC_WAS_QUIET;
796
797  update_n_stop_waiting:
798    if (smallest_unacked > smallest_acked(acki))
799        /* Peer is acking packets that have been acked already.  Schedule ACK
800         * and STOP_WAITING frame to chop the range if we get two of these in
801         * a row.
802         */
803        ++ctl->sc_n_stop_waiting;
804    else
805        ctl->sc_n_stop_waiting = 0;
806    lsquic_send_ctl_sanity_check(ctl);
807    return 0;
808
809  no_unacked_packets:
810    smallest_unacked = lsquic_senhist_largest(&ctl->sc_senhist) + 1;
811    ctl->sc_flags |= SC_WAS_QUIET;
812    goto update_n_stop_waiting;
813}
814
815
816lsquic_packno_t
817lsquic_send_ctl_smallest_unacked (lsquic_send_ctl_t *ctl)
818{
819    const lsquic_packet_out_t *packet_out;
820
821    /* Packets are always sent out in order (unless we are reordering them
822     * on purpose).  Thus, the first packet on the unacked packets list has
823     * the smallest packet number of all packets on that list.
824     */
825    if ((packet_out = TAILQ_FIRST(&ctl->sc_unacked_packets)))
826        return packet_out->po_packno;
827    else
828        return lsquic_senhist_largest(&ctl->sc_senhist) + 1;
829}
830
831
832static struct lsquic_packet_out *
833send_ctl_next_lost (lsquic_send_ctl_t *ctl)
834{
835    struct lsquic_packet_out *lost_packet;
836
837  get_next_lost:
838    lost_packet = TAILQ_FIRST(&ctl->sc_lost_packets);
839    if (lost_packet)
840    {
841        if (lost_packet->po_frame_types & (1 << QUIC_FRAME_STREAM))
842        {
843            lsquic_packet_out_elide_reset_stream_frames(lost_packet, 0);
844            if (lost_packet->po_regen_sz >= lost_packet->po_data_sz)
845            {
846                LSQ_DEBUG("Dropping packet %"PRIu64" from lost queue",
847                    lost_packet->po_packno);
848                TAILQ_REMOVE(&ctl->sc_lost_packets, lost_packet, po_next);
849                send_ctl_destroy_packet(ctl, lost_packet);
850                goto get_next_lost;
851            }
852        }
853
854        if (!lsquic_send_ctl_can_send(ctl))
855            return NULL;
856
857        TAILQ_REMOVE(&ctl->sc_lost_packets, lost_packet, po_next);
858    }
859
860    return lost_packet;
861}
862
863
864static lsquic_packno_t
865send_ctl_next_packno (lsquic_send_ctl_t *ctl)
866{
867    return ++ctl->sc_cur_packno;
868}
869
870
871void
872lsquic_send_ctl_cleanup (lsquic_send_ctl_t *ctl)
873{
874    lsquic_packet_out_t *packet_out, *next;
875    unsigned n;
876    lsquic_senhist_cleanup(&ctl->sc_senhist);
877    while ((packet_out = TAILQ_FIRST(&ctl->sc_scheduled_packets)))
878    {
879        send_ctl_sched_remove(ctl, packet_out);
880        send_ctl_destroy_packet(ctl, packet_out);
881    }
882    assert(0 == ctl->sc_n_scheduled);
883    assert(0 == ctl->sc_bytes_scheduled);
884    while ((packet_out = TAILQ_FIRST(&ctl->sc_unacked_packets)))
885    {
886        TAILQ_REMOVE(&ctl->sc_unacked_packets, packet_out, po_next);
887        ctl->sc_bytes_unacked_all -= packet_out_total_sz(packet_out);
888        send_ctl_destroy_packet(ctl, packet_out);
889        --ctl->sc_n_in_flight_all;
890    }
891    assert(0 == ctl->sc_n_in_flight_all);
892    assert(0 == ctl->sc_bytes_unacked_all);
893    while ((packet_out = TAILQ_FIRST(&ctl->sc_lost_packets)))
894    {
895        TAILQ_REMOVE(&ctl->sc_lost_packets, packet_out, po_next);
896        send_ctl_destroy_packet(ctl, packet_out);
897    }
898    for (n = 0; n < sizeof(ctl->sc_buffered_packets) /
899                                sizeof(ctl->sc_buffered_packets[0]); ++n)
900    {
901        for (packet_out = TAILQ_FIRST(&ctl->sc_buffered_packets[n].bpq_packets);
902                                                packet_out; packet_out = next)
903        {
904            next = TAILQ_NEXT(packet_out, po_next);
905            send_ctl_destroy_packet(ctl, packet_out);
906        }
907    }
908    if (ctl->sc_flags & SC_PACE)
909        pacer_cleanup(&ctl->sc_pacer);
910#if LSQUIC_SEND_STATS
911    LSQ_NOTICE("stats: n_total_sent: %u; n_resent: %u; n_delayed: %u",
912        ctl->sc_stats.n_total_sent, ctl->sc_stats.n_resent,
913        ctl->sc_stats.n_delayed);
914#endif
915}
916
917
918static unsigned
919send_ctl_retx_bytes_out (const struct lsquic_send_ctl *ctl)
920{
921    return ctl->sc_bytes_scheduled
922         + ctl->sc_bytes_unacked_retx
923         + ctl->sc_bytes_out;
924}
925
926
927static unsigned
928send_ctl_all_bytes_out (const struct lsquic_send_ctl *ctl)
929{
930    return ctl->sc_bytes_scheduled
931         + ctl->sc_bytes_unacked_all
932         + ctl->sc_bytes_out;
933}
934
935
936int
937lsquic_send_ctl_pacer_blocked (struct lsquic_send_ctl *ctl)
938{
939    return (ctl->sc_flags & SC_PACE)
940        && !pacer_can_schedule(&ctl->sc_pacer,
941                               ctl->sc_n_scheduled + ctl->sc_n_in_flight_all);
942}
943
944
945#ifndef NDEBUG
946#if __GNUC__
947__attribute__((weak))
948#endif
949#endif
950int
951lsquic_send_ctl_can_send (lsquic_send_ctl_t *ctl)
952{
953    const unsigned n_out = send_ctl_all_bytes_out(ctl);
954    LSQ_DEBUG("%s: n_out: %u (unacked_all: %u, out: %u); cwnd: %lu", __func__,
955        n_out, ctl->sc_bytes_unacked_all, ctl->sc_bytes_out,
956        lsquic_cubic_get_cwnd(&ctl->sc_cubic));
957    if (ctl->sc_flags & SC_PACE)
958    {
959        if (n_out >= lsquic_cubic_get_cwnd(&ctl->sc_cubic))
960            return 0;
961        if (pacer_can_schedule(&ctl->sc_pacer,
962                               ctl->sc_n_scheduled + ctl->sc_n_in_flight_all))
963            return 1;
964        if (ctl->sc_flags & SC_SCHED_TICK)
965        {
966            ctl->sc_flags &= ~SC_SCHED_TICK;
967            lsquic_engine_add_conn_to_attq(ctl->sc_enpub,
968                    ctl->sc_conn_pub->lconn, pacer_next_sched(&ctl->sc_pacer));
969        }
970        return 0;
971    }
972    else
973        return n_out < lsquic_cubic_get_cwnd(&ctl->sc_cubic);
974}
975
976
977static void
978send_ctl_expire (lsquic_send_ctl_t *ctl, enum expire_filter filter)
979{
980    lsquic_packet_out_t *packet_out, *next;
981    int n_resubmitted;
982    static const char *const filter_type2str[] = {
983        [EXFI_ALL] = "all",
984        [EXFI_HSK] = "handshake",
985        [EXFI_LAST] = "last",
986    };
987
988    switch (filter)
989    {
990    case EXFI_ALL:
991        n_resubmitted = 0;
992        while ((packet_out = TAILQ_FIRST(&ctl->sc_unacked_packets)))
993            n_resubmitted += send_ctl_handle_lost_packet(ctl, packet_out);
994        break;
995    case EXFI_HSK:
996        n_resubmitted = 0;
997        for (packet_out = TAILQ_FIRST(&ctl->sc_unacked_packets); packet_out;
998                                                            packet_out = next)
999        {
1000            next = TAILQ_NEXT(packet_out, po_next);
1001            if (packet_out->po_flags & PO_HELLO)
1002                n_resubmitted += send_ctl_handle_lost_packet(ctl, packet_out);
1003        }
1004        break;
1005    case EXFI_LAST:
1006        packet_out = send_ctl_last_unacked_retx_packet(ctl);
1007        if (packet_out)
1008            n_resubmitted = send_ctl_handle_lost_packet(ctl, packet_out);
1009        else
1010            n_resubmitted = 0;
1011        break;
1012#ifdef WIN32
1013    default:
1014        n_resubmitted = 0;
1015#endif
1016    }
1017
1018    LSQ_DEBUG("consider %s packets lost: %d resubmitted",
1019                                    filter_type2str[filter], n_resubmitted);
1020}
1021
1022
1023void
1024lsquic_send_ctl_expire_all (lsquic_send_ctl_t *ctl)
1025{
1026    lsquic_alarmset_unset(ctl->sc_alset, AL_RETX);
1027    send_ctl_expire(ctl, EXFI_ALL);
1028    lsquic_send_ctl_sanity_check(ctl);
1029}
1030
1031
1032#if LSQUIC_EXTRA_CHECKS
1033void
1034lsquic_send_ctl_sanity_check (const lsquic_send_ctl_t *ctl)
1035{
1036    const struct lsquic_packet_out *packet_out;
1037    unsigned count, bytes;
1038
1039    assert(!send_ctl_first_unacked_retx_packet(ctl) ||
1040                    lsquic_alarmset_is_set(ctl->sc_alset, AL_RETX));
1041    if (lsquic_alarmset_is_set(ctl->sc_alset, AL_RETX))
1042    {
1043        assert(send_ctl_first_unacked_retx_packet(ctl));
1044        assert(lsquic_time_now() < ctl->sc_alset->as_expiry[AL_RETX] + MAX_RTO_DELAY);
1045    }
1046
1047    count = 0, bytes = 0;
1048    TAILQ_FOREACH(packet_out, &ctl->sc_unacked_packets, po_next)
1049    {
1050        bytes += packet_out_sent_sz(packet_out);
1051        ++count;
1052    }
1053    assert(count == ctl->sc_n_in_flight_all);
1054    assert(bytes == ctl->sc_bytes_unacked_all);
1055
1056    count = 0, bytes = 0;
1057    TAILQ_FOREACH(packet_out, &ctl->sc_scheduled_packets, po_next)
1058    {
1059        assert(packet_out->po_flags & PO_SCHED);
1060        bytes += packet_out_total_sz(packet_out);
1061        ++count;
1062    }
1063    assert(count == ctl->sc_n_scheduled);
1064    assert(bytes == ctl->sc_bytes_scheduled);
1065}
1066
1067
1068#endif
1069
1070
1071void
1072lsquic_send_ctl_scheduled_one (lsquic_send_ctl_t *ctl,
1073                                            lsquic_packet_out_t *packet_out)
1074{
1075#ifndef NDEBUG
1076    const lsquic_packet_out_t *last;
1077    last = TAILQ_LAST(&ctl->sc_scheduled_packets, lsquic_packets_tailq);
1078    if (last)
1079        assert((last->po_flags & PO_REPACKNO) ||
1080                last->po_packno < packet_out->po_packno);
1081#endif
1082    if (ctl->sc_flags & SC_PACE)
1083    {
1084        unsigned n_out = ctl->sc_n_in_flight_retx + ctl->sc_n_scheduled;
1085        pacer_packet_scheduled(&ctl->sc_pacer, n_out,
1086            send_ctl_in_recovery(ctl), send_ctl_transfer_time, ctl);
1087    }
1088    send_ctl_sched_append(ctl, packet_out);
1089}
1090
1091
1092/* This mimics the logic in lsquic_send_ctl_next_packet_to_send(): we want
1093 * to check whether the first scheduled packet cannot be sent.
1094 */
1095int
1096lsquic_send_ctl_sched_is_blocked (const struct lsquic_send_ctl *ctl)
1097{
1098    const lsquic_packet_out_t *packet_out
1099                            = TAILQ_FIRST(&ctl->sc_scheduled_packets);
1100    return ctl->sc_n_consec_rtos
1101        && 0 == ctl->sc_next_limit
1102        && packet_out
1103        && !(packet_out->po_frame_types & (1 << QUIC_FRAME_ACK));
1104}
1105
1106
1107lsquic_packet_out_t *
1108lsquic_send_ctl_next_packet_to_send (lsquic_send_ctl_t *ctl)
1109{
1110    lsquic_packet_out_t *packet_out;
1111    int dec_limit;
1112
1113  get_packet:
1114    packet_out = TAILQ_FIRST(&ctl->sc_scheduled_packets);
1115    if (!packet_out)
1116        return NULL;
1117
1118    if (ctl->sc_n_consec_rtos &&
1119                    !(packet_out->po_frame_types & (1 << QUIC_FRAME_ACK)))
1120    {
1121        if (ctl->sc_next_limit)
1122            dec_limit = 1;
1123        else
1124            return NULL;
1125    }
1126    else
1127        dec_limit = 0;
1128
1129    send_ctl_sched_remove(ctl, packet_out);
1130    if (packet_out->po_flags & PO_REPACKNO)
1131    {
1132        if (packet_out->po_regen_sz < packet_out->po_data_sz)
1133        {
1134            update_for_resending(ctl, packet_out);
1135            packet_out->po_flags &= ~PO_REPACKNO;
1136        }
1137        else
1138        {
1139            LSQ_DEBUG("Dropping packet %"PRIu64" from scheduled queue",
1140                packet_out->po_packno);
1141            send_ctl_destroy_packet(ctl, packet_out);
1142            goto get_packet;
1143        }
1144    }
1145
1146    ctl->sc_bytes_out += packet_out_total_sz(packet_out);
1147    if (dec_limit)
1148    {
1149        --ctl->sc_next_limit;
1150        packet_out->po_flags |= PO_LIMITED;
1151    }
1152    else
1153        packet_out->po_flags &= ~PO_LIMITED;
1154    return packet_out;
1155}
1156
1157
1158void
1159lsquic_send_ctl_delayed_one (lsquic_send_ctl_t *ctl,
1160                                            lsquic_packet_out_t *packet_out)
1161{
1162    send_ctl_sched_prepend(ctl, packet_out);
1163    ctl->sc_bytes_out -= packet_out_total_sz(packet_out);
1164    if (packet_out->po_flags & PO_LIMITED)
1165        ++ctl->sc_next_limit;
1166    LSQ_DEBUG("packet %"PRIu64" has been delayed", packet_out->po_packno);
1167#if LSQUIC_SEND_STATS
1168    ++ctl->sc_stats.n_delayed;
1169#endif
1170}
1171
1172
1173int
1174lsquic_send_ctl_have_outgoing_stream_frames (const lsquic_send_ctl_t *ctl)
1175{
1176    const lsquic_packet_out_t *packet_out;
1177    TAILQ_FOREACH(packet_out, &ctl->sc_scheduled_packets, po_next)
1178        if (packet_out->po_frame_types &
1179                    ((1 << QUIC_FRAME_STREAM) | (1 << QUIC_FRAME_RST_STREAM)))
1180            return 1;
1181    return 0;
1182}
1183
1184
1185int
1186lsquic_send_ctl_have_outgoing_retx_frames (const lsquic_send_ctl_t *ctl)
1187{
1188    const lsquic_packet_out_t *packet_out;
1189    TAILQ_FOREACH(packet_out, &ctl->sc_scheduled_packets, po_next)
1190        if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK)
1191            return 1;
1192    return 0;
1193}
1194
1195
1196static lsquic_packet_out_t *
1197send_ctl_allocate_packet (lsquic_send_ctl_t *ctl, enum lsquic_packno_bits bits,
1198                                                        unsigned need_at_least)
1199{
1200    lsquic_packet_out_t *packet_out;
1201
1202    packet_out = lsquic_packet_out_new(&ctl->sc_enpub->enp_mm,
1203                    ctl->sc_conn_pub->packet_out_malo,
1204                    !(ctl->sc_flags & SC_TCID0), ctl->sc_conn_pub->lconn, bits,
1205                    ctl->sc_ver_neg->vn_tag, NULL);
1206    if (!packet_out)
1207        return NULL;
1208
1209    if (need_at_least && lsquic_packet_out_avail(packet_out) < need_at_least)
1210    {   /* This should never happen, this is why this check is performed at
1211         * this level and not lower, before the packet is actually allocated.
1212         */
1213        LSQ_ERROR("wanted to allocate packet with at least %u bytes of "
1214            "payload, but only got %u bytes (mtu: %u bytes)", need_at_least,
1215            lsquic_packet_out_avail(packet_out), ctl->sc_pack_size);
1216        send_ctl_destroy_packet(ctl, packet_out);
1217        return NULL;
1218    }
1219
1220    return packet_out;
1221}
1222
1223
1224lsquic_packet_out_t *
1225lsquic_send_ctl_new_packet_out (lsquic_send_ctl_t *ctl, unsigned need_at_least)
1226{
1227    lsquic_packet_out_t *packet_out;
1228    enum lsquic_packno_bits bits;
1229
1230    bits = lsquic_send_ctl_packno_bits(ctl);
1231    packet_out = send_ctl_allocate_packet(ctl, bits, need_at_least);
1232    if (!packet_out)
1233        return NULL;
1234
1235    packet_out->po_packno = send_ctl_next_packno(ctl);
1236    LSQ_DEBUG("created packet %"PRIu64, packet_out->po_packno);
1237    EV_LOG_PACKET_CREATED(LSQUIC_LOG_CONN_ID, packet_out);
1238    return packet_out;
1239}
1240
1241
1242lsquic_packet_out_t *
1243lsquic_send_ctl_get_writeable_packet (lsquic_send_ctl_t *ctl,
1244                                      unsigned need_at_least, int *is_err)
1245{
1246    lsquic_packet_out_t *packet_out;
1247
1248    assert(need_at_least > 0);
1249
1250    packet_out = lsquic_send_ctl_last_scheduled(ctl);
1251    if (packet_out
1252        && !(packet_out->po_flags & PO_STREAM_END)
1253        && lsquic_packet_out_avail(packet_out) >= need_at_least)
1254    {
1255        return packet_out;
1256    }
1257
1258    if (!lsquic_send_ctl_can_send(ctl))
1259    {
1260        if (is_err)
1261            *is_err = 0;
1262        return NULL;
1263    }
1264
1265    packet_out = lsquic_send_ctl_new_packet_out(ctl, need_at_least);
1266    if (packet_out)
1267        lsquic_send_ctl_scheduled_one(ctl, packet_out);
1268    else if (is_err)
1269        *is_err = 1;
1270    return packet_out;
1271}
1272
1273
1274static void
1275update_for_resending (lsquic_send_ctl_t *ctl, lsquic_packet_out_t *packet_out)
1276{
1277
1278    lsquic_packno_t oldno, packno;
1279
1280    /* When the packet is resent, it uses the same number of bytes to encode
1281     * the packet number as the original packet.  This follows the reference
1282     * implementation.
1283     */
1284    oldno = packet_out->po_packno;
1285    packno = send_ctl_next_packno(ctl);
1286
1287    packet_out->po_flags &= ~PO_SENT_SZ;
1288    packet_out->po_frame_types &= ~QFRAME_REGEN_MASK;
1289    assert(packet_out->po_frame_types);
1290    packet_out->po_packno = packno;
1291
1292    if (ctl->sc_ver_neg->vn_tag)
1293    {
1294        assert(packet_out->po_flags & PO_VERSION);  /* It can only disappear */
1295        packet_out->po_ver_tag = *ctl->sc_ver_neg->vn_tag;
1296    }
1297
1298    assert(packet_out->po_regen_sz < packet_out->po_data_sz);
1299    if (packet_out->po_regen_sz)
1300    {
1301        if (packet_out->po_flags & PO_SCHED)
1302            ctl->sc_bytes_scheduled -= packet_out->po_regen_sz;
1303        lsquic_packet_out_chop_regen(packet_out);
1304    }
1305    LSQ_DEBUG("Packet %"PRIu64" repackaged for resending as packet %"PRIu64,
1306                                                            oldno, packno);
1307    EV_LOG_CONN_EVENT(LSQUIC_LOG_CONN_ID, "packet %"PRIu64" repackaged for "
1308        "resending as packet %"PRIu64, oldno, packno);
1309}
1310
1311
1312unsigned
1313lsquic_send_ctl_reschedule_packets (lsquic_send_ctl_t *ctl)
1314{
1315    lsquic_packet_out_t *packet_out;
1316    unsigned n = 0;
1317
1318    while ((packet_out = send_ctl_next_lost(ctl)))
1319    {
1320        assert(packet_out->po_regen_sz < packet_out->po_data_sz);
1321        ++n;
1322#if LSQUIC_CONN_STATS
1323        ++ctl->sc_conn_pub->conn_stats->out.retx_packets;
1324#endif
1325        update_for_resending(ctl, packet_out);
1326        lsquic_send_ctl_scheduled_one(ctl, packet_out);
1327    }
1328
1329    if (n)
1330        LSQ_DEBUG("rescheduled %u packets", n);
1331
1332    return n;
1333}
1334
1335
1336void
1337lsquic_send_ctl_set_tcid0 (lsquic_send_ctl_t *ctl, int tcid0)
1338{
1339    if (tcid0)
1340    {
1341        LSQ_INFO("set TCID flag");
1342        ctl->sc_flags |=  SC_TCID0;
1343    }
1344    else
1345    {
1346        LSQ_INFO("unset TCID flag");
1347        ctl->sc_flags &= ~SC_TCID0;
1348    }
1349}
1350
1351
1352/* The controller elides this STREAM frames of stream `stream_id' from
1353 * scheduled and buffered packets.  If a packet becomes empty as a result,
1354 * it is dropped.
1355 *
1356 * Packets on other queues do not need to be processed: unacked packets
1357 * have already been sent, and lost packets' reset stream frames will be
1358 * elided in due time.
1359 */
1360void
1361lsquic_send_ctl_elide_stream_frames (lsquic_send_ctl_t *ctl, uint32_t stream_id)
1362{
1363    struct lsquic_packet_out *packet_out, *next;
1364    unsigned n, adj;
1365    int dropped;
1366
1367    dropped = 0;
1368#ifdef WIN32
1369    next = NULL;
1370#endif
1371    for (packet_out = TAILQ_FIRST(&ctl->sc_scheduled_packets); packet_out;
1372                                                            packet_out = next)
1373    {
1374        next = TAILQ_NEXT(packet_out, po_next);
1375
1376        if ((packet_out->po_frame_types & (1 << QUIC_FRAME_STREAM))
1377                                                                   )
1378        {
1379            adj = lsquic_packet_out_elide_reset_stream_frames(packet_out,
1380                                                              stream_id);
1381            ctl->sc_bytes_scheduled -= adj;
1382            if (0 == packet_out->po_frame_types)
1383            {
1384                LSQ_DEBUG("cancel packet %"PRIu64" after eliding frames for "
1385                    "stream %"PRIu32, packet_out->po_packno, stream_id);
1386                send_ctl_sched_remove(ctl, packet_out);
1387                send_ctl_destroy_packet(ctl, packet_out);
1388                ++dropped;
1389            }
1390        }
1391    }
1392
1393    if (dropped)
1394        lsquic_send_ctl_reset_packnos(ctl);
1395
1396    for (n = 0; n < sizeof(ctl->sc_buffered_packets) /
1397                                sizeof(ctl->sc_buffered_packets[0]); ++n)
1398    {
1399        for (packet_out = TAILQ_FIRST(&ctl->sc_buffered_packets[n].bpq_packets);
1400                                                packet_out; packet_out = next)
1401        {
1402            next = TAILQ_NEXT(packet_out, po_next);
1403            if (packet_out->po_frame_types & (1 << QUIC_FRAME_STREAM))
1404            {
1405                lsquic_packet_out_elide_reset_stream_frames(packet_out, stream_id);
1406                if (0 == packet_out->po_frame_types)
1407                {
1408                    LSQ_DEBUG("cancel buffered packet in queue #%u after eliding "
1409                        "frames for stream %"PRIu32, n, stream_id);
1410                    TAILQ_REMOVE(&ctl->sc_buffered_packets[n].bpq_packets,
1411                                 packet_out, po_next);
1412                    --ctl->sc_buffered_packets[n].bpq_count;
1413                    send_ctl_destroy_packet(ctl, packet_out);
1414                    LSQ_DEBUG("Elide packet from buffered queue #%u; count: %u",
1415                              n, ctl->sc_buffered_packets[n].bpq_count);
1416                }
1417            }
1418        }
1419    }
1420}
1421
1422
1423/* Count how many packets will remain after the squeezing performed by
1424 * lsquic_send_ctl_squeeze_sched().  This is the number of delayed data
1425 * packets.
1426 */
1427#ifndef NDEBUG
1428#if __GNUC__
1429__attribute__((weak))
1430#endif
1431#endif
1432int
1433lsquic_send_ctl_have_delayed_packets (const lsquic_send_ctl_t *ctl)
1434{
1435    const struct lsquic_packet_out *packet_out;
1436    TAILQ_FOREACH(packet_out, &ctl->sc_scheduled_packets, po_next)
1437        if (packet_out->po_regen_sz < packet_out->po_data_sz)
1438            return 1;
1439    return 0;
1440}
1441
1442
1443#ifndef NDEBUG
1444static void
1445send_ctl_log_packet_q (const lsquic_send_ctl_t *ctl, const char *prefix,
1446                                const struct lsquic_packets_tailq *tailq)
1447{
1448    const lsquic_packet_out_t *packet_out;
1449    unsigned n_packets;
1450    char *buf;
1451    size_t bufsz;
1452    int off;
1453
1454    n_packets = 0;
1455    TAILQ_FOREACH(packet_out, tailq, po_next)
1456        ++n_packets;
1457
1458    if (n_packets == 0)
1459    {
1460        LSQ_DEBUG("%s: [<empty set>]", prefix);
1461        return;
1462    }
1463
1464    bufsz = n_packets * sizeof("18446744073709551615" /* UINT64_MAX */);
1465    buf = malloc(bufsz);
1466    if (!buf)
1467    {
1468        LSQ_ERROR("%s: malloc: %s", __func__, strerror(errno));
1469        return;
1470    }
1471
1472    off = 0;
1473    TAILQ_FOREACH(packet_out, tailq, po_next)
1474    {
1475        if (off)
1476            buf[off++] = ' ';
1477        off += sprintf(buf + off, "%"PRIu64, packet_out->po_packno);
1478    }
1479
1480    LSQ_DEBUG("%s: [%s]", prefix, buf);
1481    free(buf);
1482}
1483
1484
1485#define LOG_PACKET_Q(prefix, queue) do {                                    \
1486    if (LSQ_LOG_ENABLED(LSQ_LOG_DEBUG))                                     \
1487        send_ctl_log_packet_q(ctl, queue, prefix);                          \
1488} while (0)
1489#else
1490#define LOG_PACKET_Q(p, q)
1491#endif
1492
1493
1494int
1495lsquic_send_ctl_squeeze_sched (lsquic_send_ctl_t *ctl)
1496{
1497    struct lsquic_packet_out *packet_out, *next;
1498    int dropped;
1499#ifndef NDEBUG
1500    int pre_squeeze_logged = 0;
1501#endif
1502
1503    dropped = 0;
1504    for (packet_out = TAILQ_FIRST(&ctl->sc_scheduled_packets); packet_out;
1505                                                            packet_out = next)
1506    {
1507        next = TAILQ_NEXT(packet_out, po_next);
1508        if (packet_out->po_regen_sz < packet_out->po_data_sz)
1509        {
1510            if (packet_out->po_flags & PO_ENCRYPTED)
1511                send_ctl_release_enc_data(ctl, packet_out);
1512        }
1513        else
1514        {
1515#ifndef NDEBUG
1516            /* Log the whole list before we squeeze for the first time */
1517            if (!pre_squeeze_logged++)
1518                LOG_PACKET_Q(&ctl->sc_scheduled_packets,
1519                                        "unacked packets before squeezing");
1520#endif
1521            send_ctl_sched_remove(ctl, packet_out);
1522            LSQ_DEBUG("Dropping packet %"PRIu64" from scheduled queue",
1523                packet_out->po_packno);
1524            send_ctl_destroy_packet(ctl, packet_out);
1525            ++dropped;
1526        }
1527    }
1528
1529    if (dropped)
1530        lsquic_send_ctl_reset_packnos(ctl);
1531
1532#ifndef NDEBUG
1533    if (pre_squeeze_logged)
1534        LOG_PACKET_Q(&ctl->sc_scheduled_packets,
1535                                        "unacked packets after squeezing");
1536    else if (ctl->sc_n_scheduled > 0)
1537        LOG_PACKET_Q(&ctl->sc_scheduled_packets, "delayed packets");
1538#endif
1539
1540    return ctl->sc_n_scheduled > 0;
1541}
1542
1543
1544void
1545lsquic_send_ctl_reset_packnos (lsquic_send_ctl_t *ctl)
1546{
1547    struct lsquic_packet_out *packet_out;
1548
1549    ctl->sc_cur_packno = lsquic_senhist_largest(&ctl->sc_senhist);
1550    TAILQ_FOREACH(packet_out, &ctl->sc_scheduled_packets, po_next)
1551        packet_out->po_flags |= PO_REPACKNO;
1552}
1553
1554
1555void
1556lsquic_send_ctl_ack_to_front (lsquic_send_ctl_t *ctl)
1557{
1558    struct lsquic_packet_out *ack_packet;
1559
1560    assert(ctl->sc_n_scheduled > 1);    /* Otherwise, why is this called? */
1561    ack_packet = TAILQ_LAST(&ctl->sc_scheduled_packets, lsquic_packets_tailq);
1562    assert(ack_packet->po_frame_types & (1 << QUIC_FRAME_ACK));
1563    TAILQ_REMOVE(&ctl->sc_scheduled_packets, ack_packet, po_next);
1564    TAILQ_INSERT_HEAD(&ctl->sc_scheduled_packets, ack_packet, po_next);
1565}
1566
1567
1568void
1569lsquic_send_ctl_drop_scheduled (lsquic_send_ctl_t *ctl)
1570{
1571    lsquic_packet_out_t *packet_out;
1572    const unsigned n = ctl->sc_n_scheduled;
1573    while ((packet_out = TAILQ_FIRST(&ctl->sc_scheduled_packets)))
1574    {
1575        send_ctl_sched_remove(ctl, packet_out);
1576        send_ctl_destroy_packet(ctl, packet_out);
1577    }
1578    assert(0 == ctl->sc_n_scheduled);
1579    ctl->sc_cur_packno = lsquic_senhist_largest(&ctl->sc_senhist);
1580    LSQ_DEBUG("dropped %u scheduled packet%s", n, n != 0 ? "s" : "");
1581}
1582
1583
1584#ifdef NDEBUG
1585static
1586#elif __GNUC__
1587__attribute__((weak))
1588#endif
1589enum buf_packet_type
1590lsquic_send_ctl_determine_bpt (lsquic_send_ctl_t *ctl,
1591                                            const lsquic_stream_t *stream)
1592{
1593    const lsquic_stream_t *other_stream;
1594    struct lsquic_hash_elem *el;
1595    struct lsquic_hash *all_streams;
1596
1597    all_streams = ctl->sc_conn_pub->all_streams;
1598    for (el = lsquic_hash_first(all_streams); el;
1599                                     el = lsquic_hash_next(all_streams))
1600    {
1601        other_stream = lsquic_hashelem_getdata(el);
1602        if (other_stream != stream
1603              && (!(other_stream->stream_flags & STREAM_U_WRITE_DONE))
1604                && !lsquic_stream_is_critical(other_stream)
1605                  && other_stream->sm_priority < stream->sm_priority)
1606            return BPT_OTHER_PRIO;
1607    }
1608    return BPT_HIGHEST_PRIO;
1609}
1610
1611
1612static enum buf_packet_type
1613send_ctl_lookup_bpt (lsquic_send_ctl_t *ctl,
1614                                        const struct lsquic_stream *stream)
1615{
1616    if (ctl->sc_cached_bpt.stream_id != stream->id)
1617    {
1618        ctl->sc_cached_bpt.stream_id = stream->id;
1619        ctl->sc_cached_bpt.packet_type =
1620                                lsquic_send_ctl_determine_bpt(ctl, stream);
1621    }
1622    return ctl->sc_cached_bpt.packet_type;
1623}
1624
1625
1626static unsigned
1627send_ctl_max_bpq_count (const lsquic_send_ctl_t *ctl,
1628                                        enum buf_packet_type packet_type)
1629{
1630    unsigned count;
1631
1632    switch (packet_type)
1633    {
1634    case BPT_OTHER_PRIO:
1635        return MAX_BPQ_COUNT;
1636    case BPT_HIGHEST_PRIO:
1637    default: /* clang does not complain about absence of `default'... */
1638        count = ctl->sc_n_scheduled + ctl->sc_n_in_flight_retx;
1639        if (count < lsquic_cubic_get_cwnd(&ctl->sc_cubic) / ctl->sc_pack_size)
1640        {
1641            count -= lsquic_cubic_get_cwnd(&ctl->sc_cubic) / ctl->sc_pack_size;
1642            if (count > MAX_BPQ_COUNT)
1643                return count;
1644        }
1645        return MAX_BPQ_COUNT;
1646    }
1647}
1648
1649
1650static void
1651send_ctl_move_ack (struct lsquic_send_ctl *ctl, struct lsquic_packet_out *dst,
1652                    struct lsquic_packet_out *src)
1653{
1654    assert(dst->po_data_sz == 0);
1655
1656    if (lsquic_packet_out_avail(dst) >= src->po_regen_sz)
1657    {
1658        memcpy(dst->po_data, src->po_data, src->po_regen_sz);
1659        dst->po_data_sz = src->po_regen_sz;
1660        dst->po_regen_sz = src->po_regen_sz;
1661        dst->po_frame_types |= (QFRAME_REGEN_MASK & src->po_frame_types);
1662        src->po_frame_types &= ~QFRAME_REGEN_MASK;
1663        lsquic_packet_out_chop_regen(src);
1664    }
1665}
1666
1667
1668static lsquic_packet_out_t *
1669send_ctl_get_buffered_packet (lsquic_send_ctl_t *ctl,
1670                enum buf_packet_type packet_type, unsigned need_at_least,
1671                                        const struct lsquic_stream *stream)
1672{
1673    struct buf_packet_q *const packet_q =
1674                                    &ctl->sc_buffered_packets[packet_type];
1675    struct lsquic_conn *const lconn = ctl->sc_conn_pub->lconn;
1676    lsquic_packet_out_t *packet_out;
1677    enum lsquic_packno_bits bits;
1678    enum { AA_STEAL, AA_GENERATE, AA_NONE, } ack_action;
1679
1680    packet_out = TAILQ_LAST(&packet_q->bpq_packets, lsquic_packets_tailq);
1681    if (packet_out
1682        && !(packet_out->po_flags & PO_STREAM_END)
1683        && lsquic_packet_out_avail(packet_out) >= need_at_least)
1684    {
1685        return packet_out;
1686    }
1687
1688    if (packet_q->bpq_count >= send_ctl_max_bpq_count(ctl, packet_type))
1689        return NULL;
1690
1691    if (packet_q->bpq_count == 0)
1692    {
1693        /* If ACK was written to the low-priority queue first, steal it */
1694        if (packet_q == &ctl->sc_buffered_packets[BPT_HIGHEST_PRIO]
1695            && !TAILQ_EMPTY(&ctl->sc_buffered_packets[BPT_OTHER_PRIO].bpq_packets)
1696            && (TAILQ_FIRST(&ctl->sc_buffered_packets[BPT_OTHER_PRIO].bpq_packets)
1697                                        ->po_frame_types & QUIC_FTBIT_ACK))
1698        {
1699            LSQ_DEBUG("steal ACK frame from low-priority buffered queue");
1700            ack_action = AA_STEAL;
1701            bits = PACKNO_LEN_6;
1702        }
1703        /* If ACK can be generated, write it to the first buffered packet. */
1704        else if (lconn->cn_if->ci_can_write_ack(lconn))
1705        {
1706            LSQ_DEBUG("generate ACK frame for first buffered packet in "
1707                                                    "queue #%u", packet_type);
1708            ack_action = AA_GENERATE;
1709            /* Packet length is set to the largest possible size to guarantee
1710             * that buffered packet with the ACK will not need to be split.
1711             */
1712            bits = PACKNO_LEN_6;
1713        }
1714        else
1715            goto no_ack_action;
1716    }
1717    else
1718    {
1719  no_ack_action:
1720        ack_action = AA_NONE;
1721        bits = lsquic_send_ctl_guess_packno_bits(ctl);
1722    }
1723
1724    packet_out = send_ctl_allocate_packet(ctl, bits, need_at_least);
1725    if (!packet_out)
1726        return NULL;
1727
1728    switch (ack_action)
1729    {
1730    case AA_STEAL:
1731        send_ctl_move_ack(ctl, packet_out,
1732            TAILQ_FIRST(&ctl->sc_buffered_packets[BPT_OTHER_PRIO].bpq_packets));
1733        break;
1734    case AA_GENERATE:
1735        lconn->cn_if->ci_write_ack(lconn, packet_out);
1736        break;
1737    case AA_NONE:
1738        break;
1739    }
1740
1741    TAILQ_INSERT_TAIL(&packet_q->bpq_packets, packet_out, po_next);
1742    ++packet_q->bpq_count;
1743    LSQ_DEBUG("Add new packet to buffered queue #%u; count: %u",
1744              packet_type, packet_q->bpq_count);
1745    return packet_out;
1746}
1747
1748
1749lsquic_packet_out_t *
1750lsquic_send_ctl_get_packet_for_stream (lsquic_send_ctl_t *ctl,
1751                unsigned need_at_least, const struct lsquic_stream *stream)
1752{
1753    enum buf_packet_type packet_type;
1754
1755    if (lsquic_send_ctl_schedule_stream_packets_immediately(ctl))
1756        return lsquic_send_ctl_get_writeable_packet(ctl, need_at_least, NULL);
1757    else
1758    {
1759        packet_type = send_ctl_lookup_bpt(ctl, stream);
1760        return send_ctl_get_buffered_packet(ctl, packet_type, need_at_least,
1761                                            stream);
1762    }
1763}
1764
1765
1766int
1767lsquic_send_ctl_buffered_and_same_prio_as_headers (struct lsquic_send_ctl *ctl,
1768                                            const struct lsquic_stream *stream)
1769{
1770    return !lsquic_send_ctl_schedule_stream_packets_immediately(ctl)
1771        && BPT_HIGHEST_PRIO == send_ctl_lookup_bpt(ctl, stream);
1772}
1773
1774
1775#ifdef NDEBUG
1776static
1777#elif __GNUC__
1778__attribute__((weak))
1779#endif
1780enum lsquic_packno_bits
1781lsquic_send_ctl_calc_packno_bits (lsquic_send_ctl_t *ctl)
1782{
1783    lsquic_packno_t smallest_unacked;
1784    unsigned n_in_flight;
1785
1786    smallest_unacked = lsquic_send_ctl_smallest_unacked(ctl);
1787    n_in_flight = lsquic_cubic_get_cwnd(&ctl->sc_cubic) / ctl->sc_pack_size;
1788    return calc_packno_bits(ctl->sc_cur_packno + 1, smallest_unacked,
1789                                                            n_in_flight);
1790}
1791
1792
1793enum lsquic_packno_bits
1794lsquic_send_ctl_packno_bits (lsquic_send_ctl_t *ctl)
1795{
1796
1797    if (lsquic_send_ctl_schedule_stream_packets_immediately(ctl))
1798        return lsquic_send_ctl_calc_packno_bits(ctl);
1799    else
1800        return lsquic_send_ctl_guess_packno_bits(ctl);
1801}
1802
1803
1804static int
1805split_buffered_packet (lsquic_send_ctl_t *ctl,
1806        enum buf_packet_type packet_type, lsquic_packet_out_t *packet_out,
1807        enum lsquic_packno_bits bits, unsigned excess_bytes)
1808{
1809    struct buf_packet_q *const packet_q =
1810                                    &ctl->sc_buffered_packets[packet_type];
1811    lsquic_packet_out_t *new_packet_out;
1812
1813    assert(TAILQ_FIRST(&packet_q->bpq_packets) == packet_out);
1814
1815    new_packet_out = send_ctl_allocate_packet(ctl, bits, 0);
1816    if (!packet_out)
1817        return -1;
1818
1819    if (0 == lsquic_packet_out_split_in_two(&ctl->sc_enpub->enp_mm, packet_out,
1820                  new_packet_out, ctl->sc_conn_pub->lconn->cn_pf, excess_bytes))
1821    {
1822        lsquic_packet_out_set_packno_bits(packet_out, bits);
1823        TAILQ_INSERT_AFTER(&packet_q->bpq_packets, packet_out, new_packet_out,
1824                           po_next);
1825        ++packet_q->bpq_count;
1826        LSQ_DEBUG("Add split packet to buffered queue #%u; count: %u",
1827                  packet_type, packet_q->bpq_count);
1828        return 0;
1829    }
1830    else
1831    {
1832        send_ctl_destroy_packet(ctl, packet_out);
1833        return -1;
1834    }
1835}
1836
1837
1838int
1839lsquic_send_ctl_schedule_buffered (lsquic_send_ctl_t *ctl,
1840                                            enum buf_packet_type packet_type)
1841{
1842    struct buf_packet_q *const packet_q =
1843                                    &ctl->sc_buffered_packets[packet_type];
1844    lsquic_packet_out_t *packet_out;
1845    unsigned used, excess;
1846
1847    assert(lsquic_send_ctl_schedule_stream_packets_immediately(ctl));
1848    const enum lsquic_packno_bits bits = lsquic_send_ctl_calc_packno_bits(ctl);
1849    const unsigned need = packno_bits2len(bits);
1850
1851    while ((packet_out = TAILQ_FIRST(&packet_q->bpq_packets)) &&
1852                                            lsquic_send_ctl_can_send(ctl))
1853    {
1854        if (bits != lsquic_packet_out_packno_bits(packet_out))
1855        {
1856            used = packno_bits2len(lsquic_packet_out_packno_bits(packet_out));
1857            if (need > used
1858                && need - used > lsquic_packet_out_avail(packet_out))
1859            {
1860                excess = need - used - lsquic_packet_out_avail(packet_out);
1861                if (0 != split_buffered_packet(ctl, packet_type,
1862                                               packet_out, bits, excess))
1863                {
1864                    return -1;
1865                }
1866            }
1867        }
1868        TAILQ_REMOVE(&packet_q->bpq_packets, packet_out, po_next);
1869        --packet_q->bpq_count;
1870        packet_out->po_packno = send_ctl_next_packno(ctl);
1871        LSQ_DEBUG("Remove packet from buffered queue #%u; count: %u.  "
1872            "It becomes packet %"PRIu64, packet_type, packet_q->bpq_count,
1873            packet_out->po_packno);
1874        lsquic_send_ctl_scheduled_one(ctl, packet_out);
1875    }
1876
1877    return 0;
1878}
1879
1880
1881int
1882lsquic_send_ctl_turn_on_fin (struct lsquic_send_ctl *ctl,
1883                             const struct lsquic_stream *stream)
1884{
1885    enum buf_packet_type packet_type;
1886    struct buf_packet_q *packet_q;
1887    lsquic_packet_out_t *packet_out;
1888    const struct parse_funcs *pf;
1889
1890    pf = ctl->sc_conn_pub->lconn->cn_pf;
1891    packet_type = send_ctl_lookup_bpt(ctl, stream);
1892    packet_q = &ctl->sc_buffered_packets[packet_type];
1893
1894    TAILQ_FOREACH_REVERSE(packet_out, &packet_q->bpq_packets,
1895                          lsquic_packets_tailq, po_next)
1896        if (0 == lsquic_packet_out_turn_on_fin(packet_out, pf, stream))
1897            return 0;
1898
1899    TAILQ_FOREACH(packet_out, &ctl->sc_scheduled_packets, po_next)
1900        if (0 == packet_out->po_sent
1901            && 0 == lsquic_packet_out_turn_on_fin(packet_out, pf, stream))
1902        {
1903            return 0;
1904        }
1905
1906    return -1;
1907}
1908
1909
1910size_t
1911lsquic_send_ctl_mem_used (const struct lsquic_send_ctl *ctl)
1912{
1913    const lsquic_packet_out_t *packet_out;
1914    unsigned n;
1915    size_t size;
1916    const struct lsquic_packets_tailq queues[] = {
1917        ctl->sc_scheduled_packets,
1918        ctl->sc_unacked_packets,
1919        ctl->sc_lost_packets,
1920        ctl->sc_buffered_packets[0].bpq_packets,
1921        ctl->sc_buffered_packets[1].bpq_packets,
1922    };
1923
1924    size = sizeof(*ctl);
1925
1926    for (n = 0; n < sizeof(queues) / sizeof(queues[0]); ++n)
1927        TAILQ_FOREACH(packet_out, &queues[n], po_next)
1928            size += lsquic_packet_out_mem_used(packet_out);
1929
1930    return size;
1931}
1932