lsquic_send_ctl.c revision 6aba801d
1/* Copyright (c) 2017 - 2019 LiteSpeed Technologies Inc.  See LICENSE. */
2/*
3 * lsquic_send_ctl.c -- Logic for sending and sent packets
4 */
5
6#include <assert.h>
7#include <errno.h>
8#include <inttypes.h>
9#include <stdlib.h>
10#include <string.h>
11#include <sys/queue.h>
12
13#include "lsquic_types.h"
14#include "lsquic_int_types.h"
15#include "lsquic.h"
16#include "lsquic_mm.h"
17#include "lsquic_engine_public.h"
18#include "lsquic_alarmset.h"
19#include "lsquic_packet_common.h"
20#include "lsquic_parse.h"
21#include "lsquic_packet_out.h"
22#include "lsquic_senhist.h"
23#include "lsquic_rtt.h"
24#include "lsquic_cubic.h"
25#include "lsquic_pacer.h"
26#include "lsquic_send_ctl.h"
27#include "lsquic_util.h"
28#include "lsquic_sfcw.h"
29#include "lsquic_stream.h"
30#include "lsquic_ver_neg.h"
31#include "lsquic_ev_log.h"
32#include "lsquic_conn.h"
33#include "lsquic_conn_flow.h"
34#include "lsquic_conn_public.h"
35#include "lsquic_hash.h"
36
37#define LSQUIC_LOGGER_MODULE LSQLM_SENDCTL
38#define LSQUIC_LOG_CONN_ID ctl->sc_conn_pub->lconn->cn_cid
39#include "lsquic_logger.h"
40
41#define MAX_RESUBMITTED_ON_RTO  2
42#define MAX_RTO_BACKOFFS        10
43#define DEFAULT_RETX_DELAY      500000      /* Microseconds */
44#define MAX_RTO_DELAY           60000000    /* Microseconds */
45#define MIN_RTO_DELAY           1000000      /* Microseconds */
46#define N_NACKS_BEFORE_RETX     3
47
48#define packet_out_total_sz(p) \
49                lsquic_packet_out_total_sz(ctl->sc_conn_pub->lconn, p)
50#define packet_out_sent_sz(p) \
51                lsquic_packet_out_sent_sz(ctl->sc_conn_pub->lconn, p)
52
53enum retx_mode {
54    RETX_MODE_HANDSHAKE,
55    RETX_MODE_LOSS,
56    RETX_MODE_TLP,
57    RETX_MODE_RTO,
58};
59
60
61static const char *const retx2str[] = {
62    [RETX_MODE_HANDSHAKE] = "RETX_MODE_HANDSHAKE",
63    [RETX_MODE_LOSS]      = "RETX_MODE_LOSS",
64    [RETX_MODE_TLP]       = "RETX_MODE_TLP",
65    [RETX_MODE_RTO]       = "RETX_MODE_RTO",
66};
67
68
69static void
70update_for_resending (lsquic_send_ctl_t *ctl, lsquic_packet_out_t *packet_out);
71
72
73enum expire_filter { EXFI_ALL, EXFI_HSK, EXFI_LAST, };
74
75
76static void
77send_ctl_expire (lsquic_send_ctl_t *, enum expire_filter);
78
79static void
80set_retx_alarm (lsquic_send_ctl_t *ctl);
81
82static void
83send_ctl_detect_losses (lsquic_send_ctl_t *ctl, lsquic_time_t time);
84
85static unsigned
86send_ctl_retx_bytes_out (const struct lsquic_send_ctl *ctl);
87
88
89#ifdef NDEBUG
90static
91#elif __GNUC__
92__attribute__((weak))
93#endif
94int
95lsquic_send_ctl_schedule_stream_packets_immediately (lsquic_send_ctl_t *ctl)
96{
97    return !(ctl->sc_flags & SC_BUFFER_STREAM);
98}
99
100
101#ifdef NDEBUG
102static
103#elif __GNUC__
104__attribute__((weak))
105#endif
106enum lsquic_packno_bits
107lsquic_send_ctl_guess_packno_bits (lsquic_send_ctl_t *ctl)
108{
109    return PACKNO_LEN_2;
110}
111
112
113int
114lsquic_send_ctl_have_unacked_stream_frames (const lsquic_send_ctl_t *ctl)
115{
116    const lsquic_packet_out_t *packet_out;
117    TAILQ_FOREACH(packet_out, &ctl->sc_unacked_packets, po_next)
118        if (packet_out->po_frame_types &
119                    ((1 << QUIC_FRAME_STREAM) | (1 << QUIC_FRAME_RST_STREAM)))
120            return 1;
121    return 0;
122}
123
124
125static lsquic_packet_out_t *
126send_ctl_first_unacked_retx_packet (const lsquic_send_ctl_t *ctl)
127{
128    lsquic_packet_out_t *packet_out;
129    TAILQ_FOREACH(packet_out, &ctl->sc_unacked_packets, po_next)
130        if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK)
131            return packet_out;
132    return NULL;
133}
134
135
136static lsquic_packet_out_t *
137send_ctl_last_unacked_retx_packet (const lsquic_send_ctl_t *ctl)
138{
139    lsquic_packet_out_t *packet_out;
140    TAILQ_FOREACH_REVERSE(packet_out, &ctl->sc_unacked_packets,
141                                            lsquic_packets_tailq, po_next)
142        if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK)
143            return packet_out;
144    return NULL;
145}
146
147
148static int
149have_unacked_handshake_packets (const lsquic_send_ctl_t *ctl)
150{
151    const lsquic_packet_out_t *packet_out;
152    TAILQ_FOREACH(packet_out, &ctl->sc_unacked_packets, po_next)
153        if (packet_out->po_flags & PO_HELLO)
154            return 1;
155    return 0;
156}
157
158
159static enum retx_mode
160get_retx_mode (lsquic_send_ctl_t *ctl)
161{
162    if (!(ctl->sc_conn_pub->lconn->cn_flags & LSCONN_HANDSHAKE_DONE)
163                                    && have_unacked_handshake_packets(ctl))
164        return RETX_MODE_HANDSHAKE;
165    if (ctl->sc_loss_to)
166        return RETX_MODE_LOSS;
167    if (ctl->sc_n_tlp < 2)
168        return RETX_MODE_TLP;
169    return RETX_MODE_RTO;
170}
171
172
173static lsquic_time_t
174get_retx_delay (const struct lsquic_rtt_stats *rtt_stats)
175{
176    lsquic_time_t srtt, delay;
177
178    srtt = lsquic_rtt_stats_get_srtt(rtt_stats);
179    if (srtt)
180    {
181        delay = srtt + 4 * lsquic_rtt_stats_get_rttvar(rtt_stats);
182        if (delay < MIN_RTO_DELAY)
183            delay = MIN_RTO_DELAY;
184    }
185    else
186        delay = DEFAULT_RETX_DELAY;
187
188    return delay;
189}
190
191
192static void
193retx_alarm_rings (void *ctx, lsquic_time_t expiry, lsquic_time_t now)
194{
195    lsquic_send_ctl_t *ctl = ctx;
196    lsquic_packet_out_t *packet_out;
197    enum retx_mode rm;
198
199    /* This is a callback -- before it is called, the alarm is unset */
200    assert(!lsquic_alarmset_is_set(ctl->sc_alset, AL_RETX));
201
202    rm = get_retx_mode(ctl);
203    LSQ_INFO("retx timeout, mode %s", retx2str[rm]);
204
205    switch (rm)
206    {
207    case RETX_MODE_HANDSHAKE:
208        send_ctl_expire(ctl, EXFI_HSK);
209        /* Do not register cubic loss during handshake */
210        break;
211    case RETX_MODE_LOSS:
212        send_ctl_detect_losses(ctl, lsquic_time_now());
213        break;
214    case RETX_MODE_TLP:
215        ++ctl->sc_n_tlp;
216        send_ctl_expire(ctl, EXFI_LAST);
217        break;
218    case RETX_MODE_RTO:
219        ++ctl->sc_n_consec_rtos;
220        ctl->sc_next_limit = 2;
221        LSQ_DEBUG("packet RTO is %"PRIu64" usec", expiry);
222        send_ctl_expire(ctl, EXFI_ALL);
223        lsquic_cubic_timeout(&ctl->sc_cubic);
224        break;
225    }
226
227    packet_out = send_ctl_first_unacked_retx_packet(ctl);
228    if (packet_out)
229        set_retx_alarm(ctl);
230    lsquic_send_ctl_sanity_check(ctl);
231}
232
233
234void
235lsquic_send_ctl_init (lsquic_send_ctl_t *ctl, struct lsquic_alarmset *alset,
236          struct lsquic_engine_public *enpub, const struct ver_neg *ver_neg,
237          struct lsquic_conn_public *conn_pub, unsigned short pack_size)
238{
239    unsigned i;
240    memset(ctl, 0, sizeof(*ctl));
241    TAILQ_INIT(&ctl->sc_scheduled_packets);
242    TAILQ_INIT(&ctl->sc_unacked_packets);
243    TAILQ_INIT(&ctl->sc_lost_packets);
244    ctl->sc_enpub = enpub;
245    ctl->sc_alset = alset;
246    ctl->sc_ver_neg = ver_neg;
247    ctl->sc_pack_size = pack_size;
248    ctl->sc_conn_pub = conn_pub;
249    if (enpub->enp_settings.es_pace_packets)
250        ctl->sc_flags |= SC_PACE;
251    lsquic_alarmset_init_alarm(alset, AL_RETX, retx_alarm_rings, ctl);
252    lsquic_senhist_init(&ctl->sc_senhist);
253    lsquic_cubic_init(&ctl->sc_cubic, LSQUIC_LOG_CONN_ID);
254    if (ctl->sc_flags & SC_PACE)
255        pacer_init(&ctl->sc_pacer, LSQUIC_LOG_CONN_ID,
256                                    enpub->enp_settings.es_clock_granularity);
257    for (i = 0; i < sizeof(ctl->sc_buffered_packets) /
258                                sizeof(ctl->sc_buffered_packets[0]); ++i)
259        TAILQ_INIT(&ctl->sc_buffered_packets[i].bpq_packets);
260}
261
262
263static lsquic_time_t
264calculate_packet_rto (lsquic_send_ctl_t *ctl)
265{
266    lsquic_time_t delay;
267
268    delay = get_retx_delay(&ctl->sc_conn_pub->rtt_stats);
269
270    unsigned exp = ctl->sc_n_consec_rtos;
271    if (exp > MAX_RTO_BACKOFFS)
272        exp = MAX_RTO_BACKOFFS;
273
274    delay = delay * (1 << exp);
275
276    return delay;
277}
278
279
280static lsquic_time_t
281calculate_tlp_delay (lsquic_send_ctl_t *ctl)
282{
283    lsquic_time_t srtt, delay;
284
285    srtt = lsquic_rtt_stats_get_srtt(&ctl->sc_conn_pub->rtt_stats);
286    if (ctl->sc_n_in_flight_all > 1)
287    {
288        delay = 10000;  /* 10 ms is the minimum tail loss probe delay */
289        if (delay < 2 * srtt)
290            delay = 2 * srtt;
291    }
292    else
293    {
294        delay = srtt + srtt / 2 + MIN_RTO_DELAY;
295        if (delay < 2 * srtt)
296            delay = 2 * srtt;
297    }
298
299    return delay;
300}
301
302
303static void
304set_retx_alarm (lsquic_send_ctl_t *ctl)
305{
306    enum retx_mode rm;
307    lsquic_time_t delay, now;
308
309    assert(!TAILQ_EMPTY(&ctl->sc_unacked_packets));
310
311    now = lsquic_time_now();
312
313    rm = get_retx_mode(ctl);
314    switch (rm)
315    {
316    case RETX_MODE_HANDSHAKE:
317    /* [draft-iyengar-quic-loss-recovery-01]:
318     *
319     *  if (handshake packets are outstanding):
320     *      alarm_duration = max(1.5 * smoothed_rtt, 10ms) << handshake_count;
321     *      handshake_count++;
322     */
323        delay = lsquic_rtt_stats_get_srtt(&ctl->sc_conn_pub->rtt_stats);
324        if (delay)
325        {
326            delay += delay / 2;
327            if (10000 > delay)
328                delay = 10000;
329        }
330        else
331            delay = 150000;
332        delay <<= ctl->sc_n_hsk;
333        ++ctl->sc_n_hsk;
334        break;
335    case RETX_MODE_LOSS:
336        delay = ctl->sc_loss_to;
337        break;
338    case RETX_MODE_TLP:
339        delay = calculate_tlp_delay(ctl);
340        break;
341    case RETX_MODE_RTO:
342        /* Base RTO on the first unacked packet, following reference
343         * implementation.
344         */
345        delay = calculate_packet_rto(ctl);
346        break;
347#ifdef WIN32
348    default:
349        delay = 0;
350#endif
351    }
352
353    if (delay > MAX_RTO_DELAY)
354        delay = MAX_RTO_DELAY;
355
356    LSQ_DEBUG("set retx alarm to %"PRIu64", which is %"PRIu64
357        " usec from now, mode %s", now + delay, delay, retx2str[rm]);
358    lsquic_alarmset_set(ctl->sc_alset, AL_RETX, now + delay);
359}
360
361
362static int
363send_ctl_in_recovery (lsquic_send_ctl_t *ctl)
364{
365    return ctl->sc_largest_acked_packno
366        && ctl->sc_largest_acked_packno <= ctl->sc_largest_sent_at_cutback;
367}
368
369
370static int
371send_ctl_in_slow_start (lsquic_send_ctl_t *ctl)
372{
373    return lsquic_cubic_in_slow_start(&ctl->sc_cubic);
374}
375
376
377static lsquic_time_t
378send_ctl_transfer_time (void *ctx)
379{
380    lsquic_send_ctl_t *const ctl = ctx;
381    uint64_t bandwidth, pacing_rate;
382    lsquic_time_t srtt, tx_time;
383    unsigned long cwnd;
384
385    srtt = lsquic_rtt_stats_get_srtt(&ctl->sc_conn_pub->rtt_stats);
386    if (srtt == 0)
387        srtt = 50000;
388    cwnd = lsquic_cubic_get_cwnd(&ctl->sc_cubic);
389    bandwidth = cwnd * 1000000 / srtt;
390    if (send_ctl_in_slow_start(ctl))
391        pacing_rate = bandwidth * 2;
392    else if (send_ctl_in_recovery(ctl))
393        pacing_rate = bandwidth;
394    else
395        pacing_rate = bandwidth + bandwidth / 4;
396
397    tx_time = (uint64_t) ctl->sc_pack_size * 1000000 / pacing_rate;
398    LSQ_DEBUG("srtt: %"PRIu64"; ss: %d; rec: %d; cwnd: %lu; bandwidth: "
399        "%"PRIu64"; tx_time: %"PRIu64, srtt, send_ctl_in_slow_start(ctl),
400        send_ctl_in_recovery(ctl), cwnd, bandwidth, tx_time);
401    return tx_time;
402}
403
404
405static void
406send_ctl_unacked_append (struct lsquic_send_ctl *ctl,
407                         struct lsquic_packet_out *packet_out)
408{
409    TAILQ_INSERT_TAIL(&ctl->sc_unacked_packets, packet_out, po_next);
410    ctl->sc_bytes_unacked_all += packet_out_total_sz(packet_out);
411    ctl->sc_n_in_flight_all  += 1;
412    if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK)
413    {
414        ctl->sc_bytes_unacked_retx += packet_out_total_sz(packet_out);
415        ++ctl->sc_n_in_flight_retx;
416    }
417}
418
419
420static void
421send_ctl_unacked_remove (struct lsquic_send_ctl *ctl,
422                     struct lsquic_packet_out *packet_out, unsigned packet_sz)
423{
424    TAILQ_REMOVE(&ctl->sc_unacked_packets, packet_out, po_next);
425    assert(ctl->sc_bytes_unacked_all >= packet_sz);
426    ctl->sc_bytes_unacked_all -= packet_sz;
427    ctl->sc_n_in_flight_all  -= 1;
428    if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK)
429    {
430        ctl->sc_bytes_unacked_retx -= packet_sz;
431        --ctl->sc_n_in_flight_retx;
432    }
433}
434
435
436static void
437send_ctl_sched_Xpend_common (struct lsquic_send_ctl *ctl,
438                      struct lsquic_packet_out *packet_out)
439{
440    packet_out->po_flags |= PO_SCHED;
441    ++ctl->sc_n_scheduled;
442    ctl->sc_bytes_scheduled += packet_out_total_sz(packet_out);
443    lsquic_send_ctl_sanity_check(ctl);
444}
445
446
447static void
448send_ctl_sched_append (struct lsquic_send_ctl *ctl,
449                       struct lsquic_packet_out *packet_out)
450{
451    TAILQ_INSERT_TAIL(&ctl->sc_scheduled_packets, packet_out, po_next);
452    send_ctl_sched_Xpend_common(ctl, packet_out);
453}
454
455
456static void
457send_ctl_sched_prepend (struct lsquic_send_ctl *ctl,
458                       struct lsquic_packet_out *packet_out)
459{
460    TAILQ_INSERT_HEAD(&ctl->sc_scheduled_packets, packet_out, po_next);
461    send_ctl_sched_Xpend_common(ctl, packet_out);
462}
463
464
465static void
466send_ctl_sched_remove (struct lsquic_send_ctl *ctl,
467                       struct lsquic_packet_out *packet_out)
468{
469    TAILQ_REMOVE(&ctl->sc_scheduled_packets, packet_out, po_next);
470    packet_out->po_flags &= ~PO_SCHED;
471    assert(ctl->sc_n_scheduled);
472    --ctl->sc_n_scheduled;
473    ctl->sc_bytes_scheduled -= packet_out_total_sz(packet_out);
474    lsquic_send_ctl_sanity_check(ctl);
475}
476
477
478int
479lsquic_send_ctl_sent_packet (lsquic_send_ctl_t *ctl,
480                             struct lsquic_packet_out *packet_out, int account)
481{
482    char frames[lsquic_frame_types_str_sz];
483    LSQ_DEBUG("packet %"PRIu64" has been sent (frame types: %s)",
484        packet_out->po_packno, lsquic_frame_types_to_str(frames,
485            sizeof(frames), packet_out->po_frame_types));
486    if (account)
487        ctl->sc_bytes_out -= packet_out_total_sz(packet_out);
488    lsquic_senhist_add(&ctl->sc_senhist, packet_out->po_packno);
489    send_ctl_unacked_append(ctl, packet_out);
490    if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK)
491    {
492        if (!lsquic_alarmset_is_set(ctl->sc_alset, AL_RETX))
493            set_retx_alarm(ctl);
494        if (ctl->sc_n_in_flight_retx == 1)
495            ctl->sc_flags |= SC_WAS_QUIET;
496    }
497    /* TODO: Do we really want to use those for RTT info? Revisit this. */
498    /* Hold on to packets that are not retransmittable because we need them
499     * to sample RTT information.  They are released when ACK is received.
500     */
501#if LSQUIC_SEND_STATS
502    ++ctl->sc_stats.n_total_sent;
503#endif
504    lsquic_send_ctl_sanity_check(ctl);
505    return 0;
506}
507
508
509static void
510take_rtt_sample (lsquic_send_ctl_t *ctl,
511                 lsquic_time_t now, lsquic_time_t lack_delta)
512{
513    const lsquic_packno_t packno = ctl->sc_largest_acked_packno;
514    const lsquic_time_t sent = ctl->sc_largest_acked_sent_time;
515    const lsquic_time_t measured_rtt = now - sent;
516    if (packno > ctl->sc_max_rtt_packno && lack_delta < measured_rtt)
517    {
518        ctl->sc_max_rtt_packno = packno;
519        lsquic_rtt_stats_update(&ctl->sc_conn_pub->rtt_stats, measured_rtt, lack_delta);
520        LSQ_DEBUG("packno %"PRIu64"; rtt: %"PRIu64"; delta: %"PRIu64"; "
521            "new srtt: %"PRIu64, packno, measured_rtt, lack_delta,
522            lsquic_rtt_stats_get_srtt(&ctl->sc_conn_pub->rtt_stats));
523    }
524}
525
526
527static void
528send_ctl_release_enc_data (struct lsquic_send_ctl *ctl,
529                                        struct lsquic_packet_out *packet_out)
530{
531    ctl->sc_enpub->enp_pmi->pmi_release(ctl->sc_enpub->enp_pmi_ctx,
532        ctl->sc_conn_pub->lconn->cn_peer_ctx, packet_out->po_enc_data,
533        lsquic_packet_out_ipv6(packet_out));
534    packet_out->po_flags &= ~PO_ENCRYPTED;
535    packet_out->po_enc_data = NULL;
536}
537
538
539static void
540send_ctl_destroy_packet (struct lsquic_send_ctl *ctl,
541                                        struct lsquic_packet_out *packet_out)
542{
543    lsquic_packet_out_destroy(packet_out, ctl->sc_enpub,
544                                        ctl->sc_conn_pub->lconn->cn_peer_ctx);
545}
546
547
548/* Returns true if packet was rescheduled, false otherwise.  In the latter
549 * case, you should not dereference packet_out after the function returns.
550 */
551static int
552send_ctl_handle_lost_packet (lsquic_send_ctl_t *ctl,
553                                            lsquic_packet_out_t *packet_out)
554{
555    unsigned packet_sz;
556
557    assert(ctl->sc_n_in_flight_all);
558    packet_sz = packet_out_sent_sz(packet_out);
559    send_ctl_unacked_remove(ctl, packet_out, packet_sz);
560    if (packet_out->po_flags & PO_ENCRYPTED)
561        send_ctl_release_enc_data(ctl, packet_out);
562    if (packet_out->po_frame_types & (1 << QUIC_FRAME_ACK))
563    {
564        ctl->sc_flags |= SC_LOST_ACK;
565        LSQ_DEBUG("lost ACK in packet %"PRIu64, packet_out->po_packno);
566    }
567    if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK)
568    {
569        LSQ_DEBUG("lost retransmittable packet %"PRIu64,
570                                                    packet_out->po_packno);
571        TAILQ_INSERT_TAIL(&ctl->sc_lost_packets, packet_out, po_next);
572        return 1;
573    }
574    else
575    {
576        LSQ_DEBUG("lost unretransmittable packet %"PRIu64,
577                                                    packet_out->po_packno);
578        send_ctl_destroy_packet(ctl, packet_out);
579        return 0;
580    }
581}
582
583
584static lsquic_packno_t
585largest_retx_packet_number (const lsquic_send_ctl_t *ctl)
586{
587    const lsquic_packet_out_t *packet_out;
588    TAILQ_FOREACH_REVERSE(packet_out, &ctl->sc_unacked_packets,
589                                                lsquic_packets_tailq, po_next)
590    {
591        if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK)
592            return packet_out->po_packno;
593    }
594    return 0;
595}
596
597
598static void
599send_ctl_detect_losses (lsquic_send_ctl_t *ctl, lsquic_time_t time)
600{
601    lsquic_packet_out_t *packet_out, *next;
602    lsquic_packno_t largest_retx_packno, largest_lost_packno;
603
604    largest_retx_packno = largest_retx_packet_number(ctl);
605    largest_lost_packno = 0;
606    ctl->sc_loss_to = 0;
607
608    for (packet_out = TAILQ_FIRST(&ctl->sc_unacked_packets);
609            packet_out && packet_out->po_packno <= ctl->sc_largest_acked_packno;
610                packet_out = next)
611    {
612        next = TAILQ_NEXT(packet_out, po_next);
613
614        if (packet_out->po_packno + N_NACKS_BEFORE_RETX <
615                                                ctl->sc_largest_acked_packno)
616        {
617            LSQ_DEBUG("loss by FACK detected, packet %"PRIu64,
618                                                    packet_out->po_packno);
619            largest_lost_packno = packet_out->po_packno;
620            (void) send_ctl_handle_lost_packet(ctl, packet_out);
621            continue;
622        }
623
624        if (largest_retx_packno
625            && (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK)
626            && largest_retx_packno <= ctl->sc_largest_acked_packno)
627        {
628            LSQ_DEBUG("loss by early retransmit detected, packet %"PRIu64,
629                                                    packet_out->po_packno);
630            largest_lost_packno = packet_out->po_packno;
631            ctl->sc_loss_to =
632                lsquic_rtt_stats_get_srtt(&ctl->sc_conn_pub->rtt_stats) / 4;
633            LSQ_DEBUG("set sc_loss_to to %"PRIu64", packet %"PRIu64,
634                                    ctl->sc_loss_to, packet_out->po_packno);
635            (void) send_ctl_handle_lost_packet(ctl, packet_out);
636            continue;
637        }
638
639        if (ctl->sc_largest_acked_sent_time > packet_out->po_sent +
640                    lsquic_rtt_stats_get_srtt(&ctl->sc_conn_pub->rtt_stats))
641        {
642            LSQ_DEBUG("loss by sent time detected: packet %"PRIu64,
643                                                    packet_out->po_packno);
644            if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK)
645                largest_lost_packno = packet_out->po_packno;
646            else { /* don't count it as a loss */; }
647            (void) send_ctl_handle_lost_packet(ctl, packet_out);
648            continue;
649        }
650    }
651
652    if (largest_lost_packno > ctl->sc_largest_sent_at_cutback)
653    {
654        LSQ_DEBUG("detected new loss: packet %"PRIu64"; new lsac: "
655            "%"PRIu64, largest_lost_packno, ctl->sc_largest_sent_at_cutback);
656        lsquic_cubic_loss(&ctl->sc_cubic);
657        if (ctl->sc_flags & SC_PACE)
658            pacer_loss_event(&ctl->sc_pacer);
659        ctl->sc_largest_sent_at_cutback =
660                                lsquic_senhist_largest(&ctl->sc_senhist);
661    }
662    else if (largest_lost_packno)
663        /* Lost packets whose numbers are smaller than the largest packet
664         * number sent at the time of the last loss event indicate the same
665         * loss event.  This follows NewReno logic, see RFC 6582.
666         */
667        LSQ_DEBUG("ignore loss of packet %"PRIu64" smaller than lsac "
668            "%"PRIu64, largest_lost_packno, ctl->sc_largest_sent_at_cutback);
669}
670
671
672int
673lsquic_send_ctl_got_ack (lsquic_send_ctl_t *ctl,
674                         const struct ack_info *acki,
675                         lsquic_time_t ack_recv_time)
676{
677    struct lsquic_packets_tailq acked_acks =
678                                    TAILQ_HEAD_INITIALIZER(acked_acks);
679    const struct lsquic_packno_range *range =
680                                    &acki->ranges[ acki->n_ranges - 1 ];
681    lsquic_packet_out_t *packet_out, *next;
682    lsquic_time_t now = 0;
683    lsquic_packno_t smallest_unacked;
684    lsquic_packno_t ack2ed[2];
685    unsigned packet_sz;
686    int app_limited;
687    signed char do_rtt, skip_checks;
688
689    packet_out = TAILQ_FIRST(&ctl->sc_unacked_packets);
690#if __GNUC__
691    __builtin_prefetch(packet_out);
692#endif
693
694#if __GNUC__
695#   define UNLIKELY(cond) __builtin_expect(cond, 0)
696#else
697#   define UNLIKELY(cond) cond
698#endif
699
700#if __GNUC__
701    if (UNLIKELY(LSQ_LOG_ENABLED(LSQ_LOG_DEBUG)))
702#endif
703        LSQ_DEBUG("Got ACK frame, largest acked: %"PRIu64"; delta: %"PRIu64,
704                            largest_acked(acki), acki->lack_delta);
705
706    /* Validate ACK first: */
707    if (UNLIKELY(largest_acked(acki)
708                                > lsquic_senhist_largest(&ctl->sc_senhist)))
709    {
710        LSQ_INFO("at least one packet in ACK range [%"PRIu64" - %"PRIu64"] "
711            "was never sent", acki->ranges[0].low, acki->ranges[0].high);
712        return -1;
713    }
714
715    if (UNLIKELY(ctl->sc_flags & SC_WAS_QUIET))
716    {
717        ctl->sc_flags &= ~SC_WAS_QUIET;
718        LSQ_DEBUG("ACK comes after a period of quiescence");
719        if (!now)
720            now = lsquic_time_now();
721        lsquic_cubic_was_quiet(&ctl->sc_cubic, now);
722    }
723
724    if (UNLIKELY(!packet_out))
725        goto no_unacked_packets;
726
727    smallest_unacked = packet_out->po_packno;
728    ack2ed[1] = 0;
729
730    if (packet_out->po_packno > largest_acked(acki))
731        goto detect_losses;
732
733    do_rtt = 0, skip_checks = 0;
734    app_limited = -1;
735    do
736    {
737        next = TAILQ_NEXT(packet_out, po_next);
738#if __GNUC__
739        __builtin_prefetch(next);
740#endif
741        if (skip_checks)
742            goto after_checks;
743        /* This is faster than binary search in the normal case when the number
744         * of ranges is not much larger than the number of unacked packets.
745         */
746        while (UNLIKELY(range->high < packet_out->po_packno))
747            --range;
748        if (range->low <= packet_out->po_packno)
749        {
750            skip_checks = range == acki->ranges;
751            if (app_limited < 0)
752                app_limited = send_ctl_retx_bytes_out(ctl) + 3 * ctl->sc_pack_size /* This
753                    is the "maximum burst" parameter */
754                    < lsquic_cubic_get_cwnd(&ctl->sc_cubic);
755            if (!now)
756                now = lsquic_time_now();
757  after_checks:
758            packet_sz = packet_out_sent_sz(packet_out);
759            ctl->sc_largest_acked_packno    = packet_out->po_packno;
760            ctl->sc_largest_acked_sent_time = packet_out->po_sent;
761            send_ctl_unacked_remove(ctl, packet_out, packet_sz);
762            ack2ed[!!(packet_out->po_frame_types & (1 << QUIC_FRAME_ACK))]
763                = packet_out->po_ack2ed;
764            do_rtt |= packet_out->po_packno == largest_acked(acki);
765            lsquic_cubic_ack(&ctl->sc_cubic, now, now - packet_out->po_sent,
766                             app_limited, packet_sz);
767            lsquic_packet_out_ack_streams(packet_out);
768            send_ctl_destroy_packet(ctl, packet_out);
769        }
770        packet_out = next;
771    }
772    while (packet_out && packet_out->po_packno <= largest_acked(acki));
773
774    if (do_rtt)
775    {
776        take_rtt_sample(ctl, ack_recv_time, acki->lack_delta);
777        ctl->sc_n_consec_rtos = 0;
778        ctl->sc_n_hsk = 0;
779        ctl->sc_n_tlp = 0;
780    }
781
782  detect_losses:
783    send_ctl_detect_losses(ctl, ack_recv_time);
784    if (send_ctl_first_unacked_retx_packet(ctl))
785        set_retx_alarm(ctl);
786    else
787    {
788        LSQ_DEBUG("No retransmittable packets: clear alarm");
789        lsquic_alarmset_unset(ctl->sc_alset, AL_RETX);
790    }
791    lsquic_send_ctl_sanity_check(ctl);
792
793    if ((ctl->sc_flags & SC_NSTP) && ack2ed[1] > ctl->sc_largest_ack2ed)
794        ctl->sc_largest_ack2ed = ack2ed[1];
795
796    if (ctl->sc_n_in_flight_retx == 0)
797        ctl->sc_flags |= SC_WAS_QUIET;
798
799  update_n_stop_waiting:
800    if (smallest_unacked > smallest_acked(acki))
801        /* Peer is acking packets that have been acked already.  Schedule ACK
802         * and STOP_WAITING frame to chop the range if we get two of these in
803         * a row.
804         */
805        ++ctl->sc_n_stop_waiting;
806    else
807        ctl->sc_n_stop_waiting = 0;
808    lsquic_send_ctl_sanity_check(ctl);
809    return 0;
810
811  no_unacked_packets:
812    smallest_unacked = lsquic_senhist_largest(&ctl->sc_senhist) + 1;
813    ctl->sc_flags |= SC_WAS_QUIET;
814    goto update_n_stop_waiting;
815}
816
817
818lsquic_packno_t
819lsquic_send_ctl_smallest_unacked (lsquic_send_ctl_t *ctl)
820{
821    const lsquic_packet_out_t *packet_out;
822
823    /* Packets are always sent out in order (unless we are reordering them
824     * on purpose).  Thus, the first packet on the unacked packets list has
825     * the smallest packet number of all packets on that list.
826     */
827    if ((packet_out = TAILQ_FIRST(&ctl->sc_unacked_packets)))
828        return packet_out->po_packno;
829    else
830        return lsquic_senhist_largest(&ctl->sc_senhist) + 1;
831}
832
833
834static struct lsquic_packet_out *
835send_ctl_next_lost (lsquic_send_ctl_t *ctl)
836{
837    struct lsquic_packet_out *lost_packet;
838
839  get_next_lost:
840    lost_packet = TAILQ_FIRST(&ctl->sc_lost_packets);
841    if (lost_packet)
842    {
843        if (lost_packet->po_frame_types & (1 << QUIC_FRAME_STREAM))
844        {
845            lsquic_packet_out_elide_reset_stream_frames(lost_packet, 0);
846            if (lost_packet->po_regen_sz >= lost_packet->po_data_sz)
847            {
848                LSQ_DEBUG("Dropping packet %"PRIu64" from lost queue",
849                    lost_packet->po_packno);
850                TAILQ_REMOVE(&ctl->sc_lost_packets, lost_packet, po_next);
851                send_ctl_destroy_packet(ctl, lost_packet);
852                goto get_next_lost;
853            }
854        }
855
856        if (!lsquic_send_ctl_can_send(ctl))
857            return NULL;
858
859        TAILQ_REMOVE(&ctl->sc_lost_packets, lost_packet, po_next);
860    }
861
862    return lost_packet;
863}
864
865
866static lsquic_packno_t
867send_ctl_next_packno (lsquic_send_ctl_t *ctl)
868{
869    return ++ctl->sc_cur_packno;
870}
871
872
873void
874lsquic_send_ctl_cleanup (lsquic_send_ctl_t *ctl)
875{
876    lsquic_packet_out_t *packet_out, *next;
877    unsigned n;
878    lsquic_senhist_cleanup(&ctl->sc_senhist);
879    while ((packet_out = TAILQ_FIRST(&ctl->sc_scheduled_packets)))
880    {
881        send_ctl_sched_remove(ctl, packet_out);
882        send_ctl_destroy_packet(ctl, packet_out);
883    }
884    assert(0 == ctl->sc_n_scheduled);
885    assert(0 == ctl->sc_bytes_scheduled);
886    while ((packet_out = TAILQ_FIRST(&ctl->sc_unacked_packets)))
887    {
888        TAILQ_REMOVE(&ctl->sc_unacked_packets, packet_out, po_next);
889        ctl->sc_bytes_unacked_all -= packet_out_total_sz(packet_out);
890        send_ctl_destroy_packet(ctl, packet_out);
891        --ctl->sc_n_in_flight_all;
892    }
893    assert(0 == ctl->sc_n_in_flight_all);
894    assert(0 == ctl->sc_bytes_unacked_all);
895    while ((packet_out = TAILQ_FIRST(&ctl->sc_lost_packets)))
896    {
897        TAILQ_REMOVE(&ctl->sc_lost_packets, packet_out, po_next);
898        send_ctl_destroy_packet(ctl, packet_out);
899    }
900    for (n = 0; n < sizeof(ctl->sc_buffered_packets) /
901                                sizeof(ctl->sc_buffered_packets[0]); ++n)
902    {
903        for (packet_out = TAILQ_FIRST(&ctl->sc_buffered_packets[n].bpq_packets);
904                                                packet_out; packet_out = next)
905        {
906            next = TAILQ_NEXT(packet_out, po_next);
907            send_ctl_destroy_packet(ctl, packet_out);
908        }
909    }
910    pacer_cleanup(&ctl->sc_pacer);
911#if LSQUIC_SEND_STATS
912    LSQ_NOTICE("stats: n_total_sent: %u; n_resent: %u; n_delayed: %u",
913        ctl->sc_stats.n_total_sent, ctl->sc_stats.n_resent,
914        ctl->sc_stats.n_delayed);
915#endif
916}
917
918
919static unsigned
920send_ctl_retx_bytes_out (const struct lsquic_send_ctl *ctl)
921{
922    return ctl->sc_bytes_scheduled
923         + ctl->sc_bytes_unacked_retx
924         + ctl->sc_bytes_out;
925}
926
927
928static unsigned
929send_ctl_all_bytes_out (const struct lsquic_send_ctl *ctl)
930{
931    return ctl->sc_bytes_scheduled
932         + ctl->sc_bytes_unacked_all
933         + ctl->sc_bytes_out;
934}
935
936
937int
938lsquic_send_ctl_pacer_blocked (struct lsquic_send_ctl *ctl)
939{
940    return (ctl->sc_flags & SC_PACE)
941        && !pacer_can_schedule(&ctl->sc_pacer,
942                               ctl->sc_n_scheduled + ctl->sc_n_in_flight_all);
943}
944
945
946#ifndef NDEBUG
947#if __GNUC__
948__attribute__((weak))
949#endif
950#endif
951int
952lsquic_send_ctl_can_send (lsquic_send_ctl_t *ctl)
953{
954    const unsigned n_out = send_ctl_all_bytes_out(ctl);
955    LSQ_DEBUG("%s: n_out: %u (unacked_all: %u, out: %u); cwnd: %lu", __func__,
956        n_out, ctl->sc_bytes_unacked_all, ctl->sc_bytes_out,
957        lsquic_cubic_get_cwnd(&ctl->sc_cubic));
958    if (ctl->sc_flags & SC_PACE)
959    {
960        if (n_out >= lsquic_cubic_get_cwnd(&ctl->sc_cubic))
961            return 0;
962        if (pacer_can_schedule(&ctl->sc_pacer,
963                               ctl->sc_n_scheduled + ctl->sc_n_in_flight_all))
964            return 1;
965        if (ctl->sc_flags & SC_SCHED_TICK)
966        {
967            ctl->sc_flags &= ~SC_SCHED_TICK;
968            lsquic_engine_add_conn_to_attq(ctl->sc_enpub,
969                    ctl->sc_conn_pub->lconn, pacer_next_sched(&ctl->sc_pacer));
970        }
971        return 0;
972    }
973    else
974        return n_out < lsquic_cubic_get_cwnd(&ctl->sc_cubic);
975}
976
977
978static void
979send_ctl_expire (lsquic_send_ctl_t *ctl, enum expire_filter filter)
980{
981    lsquic_packet_out_t *packet_out, *next;
982    int n_resubmitted;
983    static const char *const filter_type2str[] = {
984        [EXFI_ALL] = "all",
985        [EXFI_HSK] = "handshake",
986        [EXFI_LAST] = "last",
987    };
988
989    switch (filter)
990    {
991    case EXFI_ALL:
992        n_resubmitted = 0;
993        while ((packet_out = TAILQ_FIRST(&ctl->sc_unacked_packets)))
994            n_resubmitted += send_ctl_handle_lost_packet(ctl, packet_out);
995        break;
996    case EXFI_HSK:
997        n_resubmitted = 0;
998        for (packet_out = TAILQ_FIRST(&ctl->sc_unacked_packets); packet_out;
999                                                            packet_out = next)
1000        {
1001            next = TAILQ_NEXT(packet_out, po_next);
1002            if (packet_out->po_flags & PO_HELLO)
1003                n_resubmitted += send_ctl_handle_lost_packet(ctl, packet_out);
1004        }
1005        break;
1006    case EXFI_LAST:
1007        packet_out = send_ctl_last_unacked_retx_packet(ctl);
1008        if (packet_out)
1009            n_resubmitted = send_ctl_handle_lost_packet(ctl, packet_out);
1010        else
1011            n_resubmitted = 0;
1012        break;
1013#ifdef WIN32
1014    default:
1015        n_resubmitted = 0;
1016#endif
1017    }
1018
1019    LSQ_DEBUG("consider %s packets lost: %d resubmitted",
1020                                    filter_type2str[filter], n_resubmitted);
1021}
1022
1023
1024void
1025lsquic_send_ctl_expire_all (lsquic_send_ctl_t *ctl)
1026{
1027    lsquic_alarmset_unset(ctl->sc_alset, AL_RETX);
1028    send_ctl_expire(ctl, EXFI_ALL);
1029    lsquic_send_ctl_sanity_check(ctl);
1030}
1031
1032
1033#if LSQUIC_EXTRA_CHECKS
1034void
1035lsquic_send_ctl_sanity_check (const lsquic_send_ctl_t *ctl)
1036{
1037    const struct lsquic_packet_out *packet_out;
1038    unsigned count, bytes;
1039
1040    assert(!send_ctl_first_unacked_retx_packet(ctl) ||
1041                    lsquic_alarmset_is_set(ctl->sc_alset, AL_RETX));
1042    if (lsquic_alarmset_is_set(ctl->sc_alset, AL_RETX))
1043    {
1044        assert(send_ctl_first_unacked_retx_packet(ctl));
1045        assert(lsquic_time_now() < ctl->sc_alset->as_expiry[AL_RETX] + MAX_RTO_DELAY);
1046    }
1047
1048    count = 0, bytes = 0;
1049    TAILQ_FOREACH(packet_out, &ctl->sc_unacked_packets, po_next)
1050    {
1051        bytes += packet_out_sent_sz(packet_out);
1052        ++count;
1053    }
1054    assert(count == ctl->sc_n_in_flight_all);
1055    assert(bytes == ctl->sc_bytes_unacked_all);
1056
1057    count = 0, bytes = 0;
1058    TAILQ_FOREACH(packet_out, &ctl->sc_scheduled_packets, po_next)
1059    {
1060        assert(packet_out->po_flags & PO_SCHED);
1061        bytes += packet_out_total_sz(packet_out);
1062        ++count;
1063    }
1064    assert(count == ctl->sc_n_scheduled);
1065    assert(bytes == ctl->sc_bytes_scheduled);
1066}
1067
1068
1069#endif
1070
1071
1072void
1073lsquic_send_ctl_scheduled_one (lsquic_send_ctl_t *ctl,
1074                                            lsquic_packet_out_t *packet_out)
1075{
1076#ifndef NDEBUG
1077    const lsquic_packet_out_t *last;
1078    last = TAILQ_LAST(&ctl->sc_scheduled_packets, lsquic_packets_tailq);
1079    if (last)
1080        assert((last->po_flags & PO_REPACKNO) ||
1081                last->po_packno < packet_out->po_packno);
1082#endif
1083    if (ctl->sc_flags & SC_PACE)
1084    {
1085        unsigned n_out = ctl->sc_n_in_flight_retx + ctl->sc_n_scheduled;
1086        pacer_packet_scheduled(&ctl->sc_pacer, n_out,
1087            send_ctl_in_recovery(ctl), send_ctl_transfer_time, ctl);
1088    }
1089    send_ctl_sched_append(ctl, packet_out);
1090}
1091
1092
1093/* This mimics the logic in lsquic_send_ctl_next_packet_to_send(): we want
1094 * to check whether the first scheduled packet cannot be sent.
1095 */
1096int
1097lsquic_send_ctl_sched_is_blocked (const struct lsquic_send_ctl *ctl)
1098{
1099    const lsquic_packet_out_t *packet_out
1100                            = TAILQ_FIRST(&ctl->sc_scheduled_packets);
1101    return ctl->sc_n_consec_rtos
1102        && 0 == ctl->sc_next_limit
1103        && packet_out
1104        && !(packet_out->po_frame_types & (1 << QUIC_FRAME_ACK));
1105}
1106
1107
1108lsquic_packet_out_t *
1109lsquic_send_ctl_next_packet_to_send (lsquic_send_ctl_t *ctl)
1110{
1111    lsquic_packet_out_t *packet_out;
1112    int dec_limit;
1113
1114  get_packet:
1115    packet_out = TAILQ_FIRST(&ctl->sc_scheduled_packets);
1116    if (!packet_out)
1117        return NULL;
1118
1119    if (ctl->sc_n_consec_rtos &&
1120                    !(packet_out->po_frame_types & (1 << QUIC_FRAME_ACK)))
1121    {
1122        if (ctl->sc_next_limit)
1123            dec_limit = 1;
1124        else
1125            return NULL;
1126    }
1127    else
1128        dec_limit = 0;
1129
1130    send_ctl_sched_remove(ctl, packet_out);
1131    if (packet_out->po_flags & PO_REPACKNO)
1132    {
1133        if (packet_out->po_regen_sz < packet_out->po_data_sz)
1134        {
1135            update_for_resending(ctl, packet_out);
1136            packet_out->po_flags &= ~PO_REPACKNO;
1137        }
1138        else
1139        {
1140            LSQ_DEBUG("Dropping packet %"PRIu64" from scheduled queue",
1141                packet_out->po_packno);
1142            send_ctl_destroy_packet(ctl, packet_out);
1143            goto get_packet;
1144        }
1145    }
1146
1147    ctl->sc_bytes_out += packet_out_total_sz(packet_out);
1148    if (dec_limit)
1149    {
1150        --ctl->sc_next_limit;
1151        packet_out->po_flags |= PO_LIMITED;
1152    }
1153    else
1154        packet_out->po_flags &= ~PO_LIMITED;
1155    return packet_out;
1156}
1157
1158
1159void
1160lsquic_send_ctl_delayed_one (lsquic_send_ctl_t *ctl,
1161                                            lsquic_packet_out_t *packet_out)
1162{
1163    send_ctl_sched_prepend(ctl, packet_out);
1164    ctl->sc_bytes_out -= packet_out_total_sz(packet_out);
1165    if (packet_out->po_flags & PO_LIMITED)
1166        ++ctl->sc_next_limit;
1167    LSQ_DEBUG("packet %"PRIu64" has been delayed", packet_out->po_packno);
1168#if LSQUIC_SEND_STATS
1169    ++ctl->sc_stats.n_delayed;
1170#endif
1171}
1172
1173
1174int
1175lsquic_send_ctl_have_outgoing_stream_frames (const lsquic_send_ctl_t *ctl)
1176{
1177    const lsquic_packet_out_t *packet_out;
1178    TAILQ_FOREACH(packet_out, &ctl->sc_scheduled_packets, po_next)
1179        if (packet_out->po_frame_types &
1180                    ((1 << QUIC_FRAME_STREAM) | (1 << QUIC_FRAME_RST_STREAM)))
1181            return 1;
1182    return 0;
1183}
1184
1185
1186int
1187lsquic_send_ctl_have_outgoing_retx_frames (const lsquic_send_ctl_t *ctl)
1188{
1189    const lsquic_packet_out_t *packet_out;
1190    TAILQ_FOREACH(packet_out, &ctl->sc_scheduled_packets, po_next)
1191        if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK)
1192            return 1;
1193    return 0;
1194}
1195
1196
1197static lsquic_packet_out_t *
1198send_ctl_allocate_packet (lsquic_send_ctl_t *ctl, enum lsquic_packno_bits bits,
1199                                                        unsigned need_at_least)
1200{
1201    lsquic_packet_out_t *packet_out;
1202
1203    packet_out = lsquic_packet_out_new(&ctl->sc_enpub->enp_mm,
1204                    ctl->sc_conn_pub->packet_out_malo,
1205                    !(ctl->sc_flags & SC_TCID0), ctl->sc_conn_pub->lconn, bits,
1206                    ctl->sc_ver_neg->vn_tag, NULL);
1207    if (!packet_out)
1208        return NULL;
1209
1210    if (need_at_least && lsquic_packet_out_avail(packet_out) < need_at_least)
1211    {   /* This should never happen, this is why this check is performed at
1212         * this level and not lower, before the packet is actually allocated.
1213         */
1214        LSQ_ERROR("wanted to allocate packet with at least %u bytes of "
1215            "payload, but only got %u bytes (mtu: %u bytes)", need_at_least,
1216            lsquic_packet_out_avail(packet_out), ctl->sc_pack_size);
1217        send_ctl_destroy_packet(ctl, packet_out);
1218        return NULL;
1219    }
1220
1221    return packet_out;
1222}
1223
1224
1225lsquic_packet_out_t *
1226lsquic_send_ctl_new_packet_out (lsquic_send_ctl_t *ctl, unsigned need_at_least)
1227{
1228    lsquic_packet_out_t *packet_out;
1229    enum lsquic_packno_bits bits;
1230
1231    bits = lsquic_send_ctl_packno_bits(ctl);
1232    packet_out = send_ctl_allocate_packet(ctl, bits, need_at_least);
1233    if (!packet_out)
1234        return NULL;
1235
1236    packet_out->po_packno = send_ctl_next_packno(ctl);
1237    LSQ_DEBUG("created packet %"PRIu64, packet_out->po_packno);
1238    EV_LOG_PACKET_CREATED(LSQUIC_LOG_CONN_ID, packet_out);
1239    return packet_out;
1240}
1241
1242
1243lsquic_packet_out_t *
1244lsquic_send_ctl_get_writeable_packet (lsquic_send_ctl_t *ctl,
1245                                      unsigned need_at_least, int *is_err)
1246{
1247    lsquic_packet_out_t *packet_out;
1248
1249    assert(need_at_least > 0);
1250
1251    packet_out = lsquic_send_ctl_last_scheduled(ctl);
1252    if (packet_out
1253        && !(packet_out->po_flags & PO_STREAM_END)
1254        && lsquic_packet_out_avail(packet_out) >= need_at_least)
1255    {
1256        return packet_out;
1257    }
1258
1259    if (!lsquic_send_ctl_can_send(ctl))
1260    {
1261        if (is_err)
1262            *is_err = 0;
1263        return NULL;
1264    }
1265
1266    packet_out = lsquic_send_ctl_new_packet_out(ctl, need_at_least);
1267    if (packet_out)
1268        lsquic_send_ctl_scheduled_one(ctl, packet_out);
1269    else if (is_err)
1270        *is_err = 1;
1271    return packet_out;
1272}
1273
1274
1275static void
1276update_for_resending (lsquic_send_ctl_t *ctl, lsquic_packet_out_t *packet_out)
1277{
1278
1279    lsquic_packno_t oldno, packno;
1280
1281    /* When the packet is resent, it uses the same number of bytes to encode
1282     * the packet number as the original packet.  This follows the reference
1283     * implementation.
1284     */
1285    oldno = packet_out->po_packno;
1286    packno = send_ctl_next_packno(ctl);
1287
1288    packet_out->po_flags &= ~PO_SENT_SZ;
1289    packet_out->po_frame_types &= ~QFRAME_REGEN_MASK;
1290    assert(packet_out->po_frame_types);
1291    packet_out->po_packno = packno;
1292
1293    if (ctl->sc_ver_neg->vn_tag)
1294    {
1295        assert(packet_out->po_flags & PO_VERSION);  /* It can only disappear */
1296        packet_out->po_ver_tag = *ctl->sc_ver_neg->vn_tag;
1297    }
1298
1299    assert(packet_out->po_regen_sz < packet_out->po_data_sz);
1300    if (packet_out->po_regen_sz)
1301    {
1302        if (packet_out->po_flags & PO_SCHED)
1303            ctl->sc_bytes_scheduled -= packet_out->po_regen_sz;
1304        lsquic_packet_out_chop_regen(packet_out);
1305    }
1306    LSQ_DEBUG("Packet %"PRIu64" repackaged for resending as packet %"PRIu64,
1307                                                            oldno, packno);
1308    EV_LOG_CONN_EVENT(LSQUIC_LOG_CONN_ID, "packet %"PRIu64" repackaged for "
1309        "resending as packet %"PRIu64, oldno, packno);
1310}
1311
1312
1313unsigned
1314lsquic_send_ctl_reschedule_packets (lsquic_send_ctl_t *ctl)
1315{
1316    lsquic_packet_out_t *packet_out;
1317    unsigned n = 0;
1318
1319    while ((packet_out = send_ctl_next_lost(ctl)))
1320    {
1321        assert(packet_out->po_regen_sz < packet_out->po_data_sz);
1322        ++n;
1323#if LSQUIC_CONN_STATS
1324        ++ctl->sc_conn_pub->conn_stats->out.retx_packets;
1325#endif
1326        update_for_resending(ctl, packet_out);
1327        lsquic_send_ctl_scheduled_one(ctl, packet_out);
1328    }
1329
1330    if (n)
1331        LSQ_DEBUG("rescheduled %u packets", n);
1332
1333    return n;
1334}
1335
1336
1337void
1338lsquic_send_ctl_set_tcid0 (lsquic_send_ctl_t *ctl, int tcid0)
1339{
1340    if (tcid0)
1341    {
1342        LSQ_INFO("set TCID flag");
1343        ctl->sc_flags |=  SC_TCID0;
1344    }
1345    else
1346    {
1347        LSQ_INFO("unset TCID flag");
1348        ctl->sc_flags &= ~SC_TCID0;
1349    }
1350}
1351
1352
1353/* The controller elides this STREAM frames of stream `stream_id' from
1354 * scheduled and buffered packets.  If a packet becomes empty as a result,
1355 * it is dropped.
1356 *
1357 * Packets on other queues do not need to be processed: unacked packets
1358 * have already been sent, and lost packets' reset stream frames will be
1359 * elided in due time.
1360 */
1361void
1362lsquic_send_ctl_elide_stream_frames (lsquic_send_ctl_t *ctl, uint32_t stream_id)
1363{
1364    struct lsquic_packet_out *packet_out, *next;
1365    unsigned n, adj;
1366    int dropped;
1367
1368    dropped = 0;
1369#ifdef WIN32
1370    next = NULL;
1371#endif
1372    for (packet_out = TAILQ_FIRST(&ctl->sc_scheduled_packets); packet_out;
1373                                                            packet_out = next)
1374    {
1375        next = TAILQ_NEXT(packet_out, po_next);
1376
1377        if ((packet_out->po_frame_types & (1 << QUIC_FRAME_STREAM))
1378                                                                   )
1379        {
1380            adj = lsquic_packet_out_elide_reset_stream_frames(packet_out,
1381                                                              stream_id);
1382            ctl->sc_bytes_scheduled -= adj;
1383            if (0 == packet_out->po_frame_types)
1384            {
1385                LSQ_DEBUG("cancel packet %"PRIu64" after eliding frames for "
1386                    "stream %"PRIu32, packet_out->po_packno, stream_id);
1387                send_ctl_sched_remove(ctl, packet_out);
1388                send_ctl_destroy_packet(ctl, packet_out);
1389                ++dropped;
1390            }
1391        }
1392    }
1393
1394    if (dropped)
1395        lsquic_send_ctl_reset_packnos(ctl);
1396
1397    for (n = 0; n < sizeof(ctl->sc_buffered_packets) /
1398                                sizeof(ctl->sc_buffered_packets[0]); ++n)
1399    {
1400        for (packet_out = TAILQ_FIRST(&ctl->sc_buffered_packets[n].bpq_packets);
1401                                                packet_out; packet_out = next)
1402        {
1403            next = TAILQ_NEXT(packet_out, po_next);
1404            assert(packet_out->po_frame_types & (1 << QUIC_FRAME_STREAM));
1405            lsquic_packet_out_elide_reset_stream_frames(packet_out, stream_id);
1406            if (0 == packet_out->po_frame_types)
1407            {
1408                LSQ_DEBUG("cancel buffered packet in queue #%u after eliding "
1409                    "frames for stream %"PRIu32, n, stream_id);
1410                TAILQ_REMOVE(&ctl->sc_buffered_packets[n].bpq_packets,
1411                             packet_out, po_next);
1412                --ctl->sc_buffered_packets[n].bpq_count;
1413                send_ctl_destroy_packet(ctl, packet_out);
1414                LSQ_DEBUG("Elide packet from buffered queue #%u; count: %u",
1415                          n, ctl->sc_buffered_packets[n].bpq_count);
1416            }
1417        }
1418    }
1419}
1420
1421
1422/* Count how many packets will remain after the squeezing performed by
1423 * lsquic_send_ctl_squeeze_sched().  This is the number of delayed data
1424 * packets.
1425 */
1426#ifndef NDEBUG
1427#if __GNUC__
1428__attribute__((weak))
1429#endif
1430#endif
1431int
1432lsquic_send_ctl_have_delayed_packets (const lsquic_send_ctl_t *ctl)
1433{
1434    const struct lsquic_packet_out *packet_out;
1435    TAILQ_FOREACH(packet_out, &ctl->sc_scheduled_packets, po_next)
1436        if (packet_out->po_regen_sz < packet_out->po_data_sz)
1437            return 1;
1438    return 0;
1439}
1440
1441
1442#ifndef NDEBUG
1443static void
1444send_ctl_log_packet_q (const lsquic_send_ctl_t *ctl, const char *prefix,
1445                                const struct lsquic_packets_tailq *tailq)
1446{
1447    const lsquic_packet_out_t *packet_out;
1448    unsigned n_packets;
1449    char *buf;
1450    size_t bufsz;
1451    int off;
1452
1453    n_packets = 0;
1454    TAILQ_FOREACH(packet_out, tailq, po_next)
1455        ++n_packets;
1456
1457    if (n_packets == 0)
1458    {
1459        LSQ_DEBUG("%s: [<empty set>]", prefix);
1460        return;
1461    }
1462
1463    bufsz = n_packets * sizeof("18446744073709551615" /* UINT64_MAX */);
1464    buf = malloc(bufsz);
1465    if (!buf)
1466    {
1467        LSQ_ERROR("%s: malloc: %s", __func__, strerror(errno));
1468        return;
1469    }
1470
1471    off = 0;
1472    TAILQ_FOREACH(packet_out, tailq, po_next)
1473    {
1474        if (off)
1475            buf[off++] = ' ';
1476        off += sprintf(buf + off, "%"PRIu64, packet_out->po_packno);
1477    }
1478
1479    LSQ_DEBUG("%s: [%s]", prefix, buf);
1480    free(buf);
1481}
1482
1483
1484#define LOG_PACKET_Q(prefix, queue) do {                                    \
1485    if (LSQ_LOG_ENABLED(LSQ_LOG_DEBUG))                                     \
1486        send_ctl_log_packet_q(ctl, queue, prefix);                          \
1487} while (0)
1488#else
1489#define LOG_PACKET_Q(p, q)
1490#endif
1491
1492
1493int
1494lsquic_send_ctl_squeeze_sched (lsquic_send_ctl_t *ctl)
1495{
1496    struct lsquic_packet_out *packet_out, *next;
1497    int dropped;
1498#ifndef NDEBUG
1499    int pre_squeeze_logged = 0;
1500#endif
1501
1502    dropped = 0;
1503    for (packet_out = TAILQ_FIRST(&ctl->sc_scheduled_packets); packet_out;
1504                                                            packet_out = next)
1505    {
1506        next = TAILQ_NEXT(packet_out, po_next);
1507        if (packet_out->po_regen_sz < packet_out->po_data_sz)
1508        {
1509            if (packet_out->po_flags & PO_ENCRYPTED)
1510                send_ctl_release_enc_data(ctl, packet_out);
1511        }
1512        else
1513        {
1514#ifndef NDEBUG
1515            /* Log the whole list before we squeeze for the first time */
1516            if (!pre_squeeze_logged++)
1517                LOG_PACKET_Q(&ctl->sc_scheduled_packets,
1518                                        "unacked packets before squeezing");
1519#endif
1520            send_ctl_sched_remove(ctl, packet_out);
1521            LSQ_DEBUG("Dropping packet %"PRIu64" from scheduled queue",
1522                packet_out->po_packno);
1523            send_ctl_destroy_packet(ctl, packet_out);
1524            ++dropped;
1525        }
1526    }
1527
1528    if (dropped)
1529        lsquic_send_ctl_reset_packnos(ctl);
1530
1531#ifndef NDEBUG
1532    if (pre_squeeze_logged)
1533        LOG_PACKET_Q(&ctl->sc_scheduled_packets,
1534                                        "unacked packets after squeezing");
1535    else if (ctl->sc_n_scheduled > 0)
1536        LOG_PACKET_Q(&ctl->sc_scheduled_packets, "delayed packets");
1537#endif
1538
1539    return ctl->sc_n_scheduled > 0;
1540}
1541
1542
1543void
1544lsquic_send_ctl_reset_packnos (lsquic_send_ctl_t *ctl)
1545{
1546    struct lsquic_packet_out *packet_out;
1547
1548    ctl->sc_cur_packno = lsquic_senhist_largest(&ctl->sc_senhist);
1549    TAILQ_FOREACH(packet_out, &ctl->sc_scheduled_packets, po_next)
1550        packet_out->po_flags |= PO_REPACKNO;
1551}
1552
1553
1554void
1555lsquic_send_ctl_ack_to_front (lsquic_send_ctl_t *ctl)
1556{
1557    struct lsquic_packet_out *ack_packet;
1558
1559    assert(ctl->sc_n_scheduled > 1);    /* Otherwise, why is this called? */
1560    ack_packet = TAILQ_LAST(&ctl->sc_scheduled_packets, lsquic_packets_tailq);
1561    assert(ack_packet->po_frame_types & (1 << QUIC_FRAME_ACK));
1562    TAILQ_REMOVE(&ctl->sc_scheduled_packets, ack_packet, po_next);
1563    TAILQ_INSERT_HEAD(&ctl->sc_scheduled_packets, ack_packet, po_next);
1564}
1565
1566
1567void
1568lsquic_send_ctl_drop_scheduled (lsquic_send_ctl_t *ctl)
1569{
1570    lsquic_packet_out_t *packet_out;
1571    const unsigned n = ctl->sc_n_scheduled;
1572    while ((packet_out = TAILQ_FIRST(&ctl->sc_scheduled_packets)))
1573    {
1574        send_ctl_sched_remove(ctl, packet_out);
1575        send_ctl_destroy_packet(ctl, packet_out);
1576    }
1577    assert(0 == ctl->sc_n_scheduled);
1578    ctl->sc_cur_packno = lsquic_senhist_largest(&ctl->sc_senhist);
1579    LSQ_DEBUG("dropped %u scheduled packet%s", n, n != 0 ? "s" : "");
1580}
1581
1582
1583#ifdef NDEBUG
1584static
1585#elif __GNUC__
1586__attribute__((weak))
1587#endif
1588enum buf_packet_type
1589lsquic_send_ctl_determine_bpt (lsquic_send_ctl_t *ctl,
1590                                            const lsquic_stream_t *stream)
1591{
1592    const lsquic_stream_t *other_stream;
1593    struct lsquic_hash_elem *el;
1594    struct lsquic_hash *all_streams;
1595
1596    all_streams = ctl->sc_conn_pub->all_streams;
1597    for (el = lsquic_hash_first(all_streams); el;
1598                                     el = lsquic_hash_next(all_streams))
1599    {
1600        other_stream = lsquic_hashelem_getdata(el);
1601        if (other_stream != stream
1602              && (!(other_stream->stream_flags & STREAM_U_WRITE_DONE))
1603                && !lsquic_stream_is_critical(other_stream)
1604                  && other_stream->sm_priority < stream->sm_priority)
1605            return BPT_OTHER_PRIO;
1606    }
1607    return BPT_HIGHEST_PRIO;
1608}
1609
1610
1611static enum buf_packet_type
1612send_ctl_lookup_bpt (lsquic_send_ctl_t *ctl,
1613                                        const struct lsquic_stream *stream)
1614{
1615    if (ctl->sc_cached_bpt.stream_id != stream->id)
1616    {
1617        ctl->sc_cached_bpt.stream_id = stream->id;
1618        ctl->sc_cached_bpt.packet_type =
1619                                lsquic_send_ctl_determine_bpt(ctl, stream);
1620    }
1621    return ctl->sc_cached_bpt.packet_type;
1622}
1623
1624
1625static unsigned
1626send_ctl_max_bpq_count (const lsquic_send_ctl_t *ctl,
1627                                        enum buf_packet_type packet_type)
1628{
1629    unsigned count;
1630
1631    switch (packet_type)
1632    {
1633    case BPT_OTHER_PRIO:
1634        return MAX_BPQ_COUNT;
1635    case BPT_HIGHEST_PRIO:
1636    default: /* clang does not complain about absence of `default'... */
1637        count = ctl->sc_n_scheduled + ctl->sc_n_in_flight_retx;
1638        if (count < lsquic_cubic_get_cwnd(&ctl->sc_cubic) / ctl->sc_pack_size)
1639        {
1640            count -= lsquic_cubic_get_cwnd(&ctl->sc_cubic) / ctl->sc_pack_size;
1641            if (count > MAX_BPQ_COUNT)
1642                return count;
1643        }
1644        return MAX_BPQ_COUNT;
1645    }
1646}
1647
1648
1649static void
1650send_ctl_move_ack (struct lsquic_send_ctl *ctl, struct lsquic_packet_out *dst,
1651                    struct lsquic_packet_out *src)
1652{
1653    assert(dst->po_data_sz == 0);
1654
1655    if (lsquic_packet_out_avail(dst) >= src->po_regen_sz)
1656    {
1657        memcpy(dst->po_data, src->po_data, src->po_regen_sz);
1658        dst->po_data_sz = src->po_regen_sz;
1659        dst->po_regen_sz = src->po_regen_sz;
1660        dst->po_frame_types |= (QFRAME_REGEN_MASK & src->po_frame_types);
1661        src->po_frame_types &= ~QFRAME_REGEN_MASK;
1662        lsquic_packet_out_chop_regen(src);
1663    }
1664}
1665
1666
1667static lsquic_packet_out_t *
1668send_ctl_get_buffered_packet (lsquic_send_ctl_t *ctl,
1669                enum buf_packet_type packet_type, unsigned need_at_least,
1670                                        const struct lsquic_stream *stream)
1671{
1672    struct buf_packet_q *const packet_q =
1673                                    &ctl->sc_buffered_packets[packet_type];
1674    struct lsquic_conn *const lconn = ctl->sc_conn_pub->lconn;
1675    lsquic_packet_out_t *packet_out;
1676    enum lsquic_packno_bits bits;
1677    enum { AA_STEAL, AA_GENERATE, AA_NONE, } ack_action;
1678
1679    packet_out = TAILQ_LAST(&packet_q->bpq_packets, lsquic_packets_tailq);
1680    if (packet_out
1681        && !(packet_out->po_flags & PO_STREAM_END)
1682        && lsquic_packet_out_avail(packet_out) >= need_at_least)
1683    {
1684        return packet_out;
1685    }
1686
1687    if (packet_q->bpq_count >= send_ctl_max_bpq_count(ctl, packet_type))
1688        return NULL;
1689
1690    bits = lsquic_send_ctl_guess_packno_bits(ctl);
1691    if (packet_q->bpq_count == 0)
1692    {
1693        /* If ACK was written to the low-priority queue first, steal it */
1694        if (packet_q == &ctl->sc_buffered_packets[BPT_HIGHEST_PRIO]
1695            && !TAILQ_EMPTY(&ctl->sc_buffered_packets[BPT_OTHER_PRIO].bpq_packets)
1696            && (TAILQ_FIRST(&ctl->sc_buffered_packets[BPT_OTHER_PRIO].bpq_packets)
1697                                        ->po_frame_types & QUIC_FTBIT_ACK))
1698        {
1699            LSQ_DEBUG("steal ACK frame from low-priority buffered queue");
1700            ack_action = AA_STEAL;
1701            bits = PACKNO_LEN_6;
1702        }
1703        /* If ACK can be generated, write it to the first buffered packet. */
1704        else if (lconn->cn_if->ci_can_write_ack(lconn))
1705        {
1706            LSQ_DEBUG("generate ACK frame for first buffered packet in "
1707                                                    "queue #%u", packet_type);
1708            ack_action = AA_GENERATE;
1709            /* Packet length is set to the largest possible size to guarantee
1710             * that buffered packet with the ACK will not need to be split.
1711             */
1712            bits = PACKNO_LEN_6;
1713        }
1714        else
1715            goto no_ack_action;
1716    }
1717    else
1718    {
1719  no_ack_action:
1720        ack_action = AA_NONE;
1721        bits = lsquic_send_ctl_guess_packno_bits(ctl);
1722    }
1723
1724    packet_out = send_ctl_allocate_packet(ctl, bits, need_at_least);
1725    if (!packet_out)
1726        return NULL;
1727
1728    switch (ack_action)
1729    {
1730    case AA_STEAL:
1731        send_ctl_move_ack(ctl, packet_out,
1732            TAILQ_FIRST(&ctl->sc_buffered_packets[BPT_OTHER_PRIO].bpq_packets));
1733        break;
1734    case AA_GENERATE:
1735        lconn->cn_if->ci_write_ack(lconn, packet_out);
1736        break;
1737    case AA_NONE:
1738        break;
1739    }
1740
1741    TAILQ_INSERT_TAIL(&packet_q->bpq_packets, packet_out, po_next);
1742    ++packet_q->bpq_count;
1743    LSQ_DEBUG("Add new packet to buffered queue #%u; count: %u",
1744              packet_type, packet_q->bpq_count);
1745    return packet_out;
1746}
1747
1748
1749lsquic_packet_out_t *
1750lsquic_send_ctl_get_packet_for_stream (lsquic_send_ctl_t *ctl,
1751                unsigned need_at_least, const struct lsquic_stream *stream)
1752{
1753    enum buf_packet_type packet_type;
1754
1755    if (lsquic_send_ctl_schedule_stream_packets_immediately(ctl))
1756        return lsquic_send_ctl_get_writeable_packet(ctl, need_at_least, NULL);
1757    else
1758    {
1759        packet_type = send_ctl_lookup_bpt(ctl, stream);
1760        return send_ctl_get_buffered_packet(ctl, packet_type, need_at_least,
1761                                            stream);
1762    }
1763}
1764
1765
1766int
1767lsquic_send_ctl_buffered_and_same_prio_as_headers (struct lsquic_send_ctl *ctl,
1768                                            const struct lsquic_stream *stream)
1769{
1770    return !lsquic_send_ctl_schedule_stream_packets_immediately(ctl)
1771        && BPT_HIGHEST_PRIO == send_ctl_lookup_bpt(ctl, stream);
1772}
1773
1774
1775#ifdef NDEBUG
1776static
1777#elif __GNUC__
1778__attribute__((weak))
1779#endif
1780enum lsquic_packno_bits
1781lsquic_send_ctl_calc_packno_bits (lsquic_send_ctl_t *ctl)
1782{
1783    lsquic_packno_t smallest_unacked;
1784    unsigned n_in_flight;
1785
1786    smallest_unacked = lsquic_send_ctl_smallest_unacked(ctl);
1787    n_in_flight = lsquic_cubic_get_cwnd(&ctl->sc_cubic) / ctl->sc_pack_size;
1788    return calc_packno_bits(ctl->sc_cur_packno + 1, smallest_unacked,
1789                                                            n_in_flight);
1790}
1791
1792
1793enum lsquic_packno_bits
1794lsquic_send_ctl_packno_bits (lsquic_send_ctl_t *ctl)
1795{
1796
1797    if (lsquic_send_ctl_schedule_stream_packets_immediately(ctl))
1798        return lsquic_send_ctl_calc_packno_bits(ctl);
1799    else
1800        return lsquic_send_ctl_guess_packno_bits(ctl);
1801}
1802
1803
1804static int
1805split_buffered_packet (lsquic_send_ctl_t *ctl,
1806        enum buf_packet_type packet_type, lsquic_packet_out_t *packet_out,
1807        enum lsquic_packno_bits bits, unsigned excess_bytes)
1808{
1809    struct buf_packet_q *const packet_q =
1810                                    &ctl->sc_buffered_packets[packet_type];
1811    lsquic_packet_out_t *new_packet_out;
1812
1813    assert(TAILQ_FIRST(&packet_q->bpq_packets) == packet_out);
1814
1815    new_packet_out = send_ctl_allocate_packet(ctl, bits, 0);
1816    if (!packet_out)
1817        return -1;
1818
1819    if (0 == lsquic_packet_out_split_in_two(&ctl->sc_enpub->enp_mm, packet_out,
1820                  new_packet_out, ctl->sc_conn_pub->lconn->cn_pf, excess_bytes))
1821    {
1822        lsquic_packet_out_set_packno_bits(packet_out, bits);
1823        TAILQ_INSERT_AFTER(&packet_q->bpq_packets, packet_out, new_packet_out,
1824                           po_next);
1825        ++packet_q->bpq_count;
1826        LSQ_DEBUG("Add split packet to buffered queue #%u; count: %u",
1827                  packet_type, packet_q->bpq_count);
1828        return 0;
1829    }
1830    else
1831    {
1832        send_ctl_destroy_packet(ctl, packet_out);
1833        return -1;
1834    }
1835}
1836
1837
1838int
1839lsquic_send_ctl_schedule_buffered (lsquic_send_ctl_t *ctl,
1840                                            enum buf_packet_type packet_type)
1841{
1842    struct buf_packet_q *const packet_q =
1843                                    &ctl->sc_buffered_packets[packet_type];
1844    lsquic_packet_out_t *packet_out;
1845    unsigned used, excess;
1846
1847    assert(lsquic_send_ctl_schedule_stream_packets_immediately(ctl));
1848    const enum lsquic_packno_bits bits = lsquic_send_ctl_calc_packno_bits(ctl);
1849    const unsigned need = packno_bits2len(bits);
1850
1851    while ((packet_out = TAILQ_FIRST(&packet_q->bpq_packets)) &&
1852                                            lsquic_send_ctl_can_send(ctl))
1853    {
1854        if (bits != lsquic_packet_out_packno_bits(packet_out))
1855        {
1856            used = packno_bits2len(lsquic_packet_out_packno_bits(packet_out));
1857            if (need > used
1858                && need - used > lsquic_packet_out_avail(packet_out))
1859            {
1860                excess = need - used - lsquic_packet_out_avail(packet_out);
1861                if (0 != split_buffered_packet(ctl, packet_type,
1862                                               packet_out, bits, excess))
1863                {
1864                    return -1;
1865                }
1866            }
1867        }
1868        TAILQ_REMOVE(&packet_q->bpq_packets, packet_out, po_next);
1869        --packet_q->bpq_count;
1870        packet_out->po_packno = send_ctl_next_packno(ctl);
1871        LSQ_DEBUG("Remove packet from buffered queue #%u; count: %u.  "
1872            "It becomes packet %"PRIu64, packet_type, packet_q->bpq_count,
1873            packet_out->po_packno);
1874        lsquic_send_ctl_scheduled_one(ctl, packet_out);
1875    }
1876
1877    return 0;
1878}
1879
1880
1881int
1882lsquic_send_ctl_turn_on_fin (struct lsquic_send_ctl *ctl,
1883                             const struct lsquic_stream *stream)
1884{
1885    enum buf_packet_type packet_type;
1886    struct buf_packet_q *packet_q;
1887    lsquic_packet_out_t *packet_out;
1888    const struct parse_funcs *pf;
1889
1890    pf = ctl->sc_conn_pub->lconn->cn_pf;
1891    packet_type = send_ctl_lookup_bpt(ctl, stream);
1892    packet_q = &ctl->sc_buffered_packets[packet_type];
1893
1894    TAILQ_FOREACH_REVERSE(packet_out, &packet_q->bpq_packets,
1895                          lsquic_packets_tailq, po_next)
1896        if (0 == lsquic_packet_out_turn_on_fin(packet_out, pf, stream))
1897            return 0;
1898
1899    TAILQ_FOREACH(packet_out, &ctl->sc_scheduled_packets, po_next)
1900        if (0 == packet_out->po_sent
1901            && 0 == lsquic_packet_out_turn_on_fin(packet_out, pf, stream))
1902        {
1903            return 0;
1904        }
1905
1906    return -1;
1907}
1908
1909
1910size_t
1911lsquic_send_ctl_mem_used (const struct lsquic_send_ctl *ctl)
1912{
1913    const lsquic_packet_out_t *packet_out;
1914    unsigned n;
1915    size_t size;
1916    const struct lsquic_packets_tailq queues[] = {
1917        ctl->sc_scheduled_packets,
1918        ctl->sc_unacked_packets,
1919        ctl->sc_lost_packets,
1920        ctl->sc_buffered_packets[0].bpq_packets,
1921        ctl->sc_buffered_packets[1].bpq_packets,
1922    };
1923
1924    size = sizeof(*ctl);
1925
1926    for (n = 0; n < sizeof(queues) / sizeof(queues[0]); ++n)
1927        TAILQ_FOREACH(packet_out, &queues[n], po_next)
1928            size += lsquic_packet_out_mem_used(packet_out);
1929
1930    return size;
1931}
1932