lsquic_send_ctl.c revision 4d83f5bd
1/* Copyright (c) 2017 - 2019 LiteSpeed Technologies Inc.  See LICENSE. */
2/*
3 * lsquic_send_ctl.c -- Logic for sending and sent packets
4 */
5
6#include <assert.h>
7#include <errno.h>
8#include <inttypes.h>
9#include <stdlib.h>
10#include <string.h>
11#include <sys/queue.h>
12
13#include "lsquic_types.h"
14#include "lsquic_int_types.h"
15#include "lsquic.h"
16#include "lsquic_mm.h"
17#include "lsquic_engine_public.h"
18#include "lsquic_alarmset.h"
19#include "lsquic_packet_common.h"
20#include "lsquic_parse.h"
21#include "lsquic_packet_out.h"
22#include "lsquic_senhist.h"
23#include "lsquic_rtt.h"
24#include "lsquic_cubic.h"
25#include "lsquic_pacer.h"
26#include "lsquic_send_ctl.h"
27#include "lsquic_util.h"
28#include "lsquic_sfcw.h"
29#include "lsquic_stream.h"
30#include "lsquic_ver_neg.h"
31#include "lsquic_ev_log.h"
32#include "lsquic_conn.h"
33#include "lsquic_conn_flow.h"
34#include "lsquic_conn_public.h"
35#include "lsquic_hash.h"
36
37#define LSQUIC_LOGGER_MODULE LSQLM_SENDCTL
38#define LSQUIC_LOG_CONN_ID ctl->sc_conn_pub->lconn->cn_cid
39#include "lsquic_logger.h"
40
41#define MAX_RESUBMITTED_ON_RTO  2
42#define MAX_RTO_BACKOFFS        10
43#define DEFAULT_RETX_DELAY      500000      /* Microseconds */
44#define MAX_RTO_DELAY           60000000    /* Microseconds */
45#define MIN_RTO_DELAY           1000000      /* Microseconds */
46#define N_NACKS_BEFORE_RETX     3
47
48#define packet_out_total_sz(p) \
49                lsquic_packet_out_total_sz(ctl->sc_conn_pub->lconn, p)
50#define packet_out_sent_sz(p) \
51                lsquic_packet_out_sent_sz(ctl->sc_conn_pub->lconn, p)
52
53enum retx_mode {
54    RETX_MODE_HANDSHAKE,
55    RETX_MODE_LOSS,
56    RETX_MODE_TLP,
57    RETX_MODE_RTO,
58};
59
60
61static const char *const retx2str[] = {
62    [RETX_MODE_HANDSHAKE] = "RETX_MODE_HANDSHAKE",
63    [RETX_MODE_LOSS]      = "RETX_MODE_LOSS",
64    [RETX_MODE_TLP]       = "RETX_MODE_TLP",
65    [RETX_MODE_RTO]       = "RETX_MODE_RTO",
66};
67
68
69static void
70update_for_resending (lsquic_send_ctl_t *ctl, lsquic_packet_out_t *packet_out);
71
72
73enum expire_filter { EXFI_ALL, EXFI_HSK, EXFI_LAST, };
74
75
76static void
77send_ctl_expire (lsquic_send_ctl_t *, enum expire_filter);
78
79static void
80set_retx_alarm (lsquic_send_ctl_t *ctl);
81
82static void
83send_ctl_detect_losses (lsquic_send_ctl_t *ctl, lsquic_time_t time);
84
85static unsigned
86send_ctl_retx_bytes_out (const struct lsquic_send_ctl *ctl);
87
88
89#ifdef NDEBUG
90static
91#elif __GNUC__
92__attribute__((weak))
93#endif
94int
95lsquic_send_ctl_schedule_stream_packets_immediately (lsquic_send_ctl_t *ctl)
96{
97    return !(ctl->sc_flags & SC_BUFFER_STREAM);
98}
99
100
101#ifdef NDEBUG
102static
103#elif __GNUC__
104__attribute__((weak))
105#endif
106enum lsquic_packno_bits
107lsquic_send_ctl_guess_packno_bits (lsquic_send_ctl_t *ctl)
108{
109    return PACKNO_LEN_2;
110}
111
112
113int
114lsquic_send_ctl_have_unacked_stream_frames (const lsquic_send_ctl_t *ctl)
115{
116    const lsquic_packet_out_t *packet_out;
117    TAILQ_FOREACH(packet_out, &ctl->sc_unacked_packets, po_next)
118        if (packet_out->po_frame_types &
119                    ((1 << QUIC_FRAME_STREAM) | (1 << QUIC_FRAME_RST_STREAM)))
120            return 1;
121    return 0;
122}
123
124
125static lsquic_packet_out_t *
126send_ctl_first_unacked_retx_packet (const lsquic_send_ctl_t *ctl)
127{
128    lsquic_packet_out_t *packet_out;
129    TAILQ_FOREACH(packet_out, &ctl->sc_unacked_packets, po_next)
130        if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK)
131            return packet_out;
132    return NULL;
133}
134
135
136static lsquic_packet_out_t *
137send_ctl_last_unacked_retx_packet (const lsquic_send_ctl_t *ctl)
138{
139    lsquic_packet_out_t *packet_out;
140    TAILQ_FOREACH_REVERSE(packet_out, &ctl->sc_unacked_packets,
141                                            lsquic_packets_tailq, po_next)
142        if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK)
143            return packet_out;
144    return NULL;
145}
146
147
148static int
149have_unacked_handshake_packets (const lsquic_send_ctl_t *ctl)
150{
151    const lsquic_packet_out_t *packet_out;
152    TAILQ_FOREACH(packet_out, &ctl->sc_unacked_packets, po_next)
153        if (packet_out->po_flags & PO_HELLO)
154            return 1;
155    return 0;
156}
157
158
159static enum retx_mode
160get_retx_mode (lsquic_send_ctl_t *ctl)
161{
162    if (!(ctl->sc_conn_pub->lconn->cn_flags & LSCONN_HANDSHAKE_DONE)
163                                    && have_unacked_handshake_packets(ctl))
164        return RETX_MODE_HANDSHAKE;
165    if (ctl->sc_loss_to)
166        return RETX_MODE_LOSS;
167    if (ctl->sc_n_tlp < 2)
168        return RETX_MODE_TLP;
169    return RETX_MODE_RTO;
170}
171
172
173static lsquic_time_t
174get_retx_delay (const struct lsquic_rtt_stats *rtt_stats)
175{
176    lsquic_time_t srtt, delay;
177
178    srtt = lsquic_rtt_stats_get_srtt(rtt_stats);
179    if (srtt)
180    {
181        delay = srtt + 4 * lsquic_rtt_stats_get_rttvar(rtt_stats);
182        if (delay < MIN_RTO_DELAY)
183            delay = MIN_RTO_DELAY;
184    }
185    else
186        delay = DEFAULT_RETX_DELAY;
187
188    return delay;
189}
190
191
192static void
193retx_alarm_rings (void *ctx, lsquic_time_t expiry, lsquic_time_t now)
194{
195    lsquic_send_ctl_t *ctl = ctx;
196    lsquic_packet_out_t *packet_out;
197    enum retx_mode rm;
198
199    /* This is a callback -- before it is called, the alarm is unset */
200    assert(!lsquic_alarmset_is_set(ctl->sc_alset, AL_RETX));
201
202    rm = get_retx_mode(ctl);
203    LSQ_INFO("retx timeout, mode %s", retx2str[rm]);
204
205    switch (rm)
206    {
207    case RETX_MODE_HANDSHAKE:
208        send_ctl_expire(ctl, EXFI_HSK);
209        /* Do not register cubic loss during handshake */
210        break;
211    case RETX_MODE_LOSS:
212        send_ctl_detect_losses(ctl, lsquic_time_now());
213        break;
214    case RETX_MODE_TLP:
215        ++ctl->sc_n_tlp;
216        send_ctl_expire(ctl, EXFI_LAST);
217        break;
218    case RETX_MODE_RTO:
219        ++ctl->sc_n_consec_rtos;
220        ctl->sc_next_limit = 2;
221        LSQ_DEBUG("packet RTO is %"PRIu64" usec", expiry);
222        send_ctl_expire(ctl, EXFI_ALL);
223        lsquic_cubic_timeout(&ctl->sc_cubic);
224        break;
225    }
226
227    packet_out = send_ctl_first_unacked_retx_packet(ctl);
228    if (packet_out)
229        set_retx_alarm(ctl);
230    lsquic_send_ctl_sanity_check(ctl);
231}
232
233
234void
235lsquic_send_ctl_init (lsquic_send_ctl_t *ctl, struct lsquic_alarmset *alset,
236          struct lsquic_engine_public *enpub, const struct ver_neg *ver_neg,
237          struct lsquic_conn_public *conn_pub, unsigned short pack_size)
238{
239    unsigned i;
240    memset(ctl, 0, sizeof(*ctl));
241    TAILQ_INIT(&ctl->sc_scheduled_packets);
242    TAILQ_INIT(&ctl->sc_unacked_packets);
243    TAILQ_INIT(&ctl->sc_lost_packets);
244    ctl->sc_enpub = enpub;
245    ctl->sc_alset = alset;
246    ctl->sc_ver_neg = ver_neg;
247    ctl->sc_pack_size = pack_size;
248    ctl->sc_conn_pub = conn_pub;
249    if (enpub->enp_settings.es_pace_packets)
250        ctl->sc_flags |= SC_PACE;
251    lsquic_alarmset_init_alarm(alset, AL_RETX, retx_alarm_rings, ctl);
252    lsquic_senhist_init(&ctl->sc_senhist);
253    lsquic_cubic_init(&ctl->sc_cubic, LSQUIC_LOG_CONN_ID);
254    if (ctl->sc_flags & SC_PACE)
255        pacer_init(&ctl->sc_pacer, LSQUIC_LOG_CONN_ID, 100000);
256    for (i = 0; i < sizeof(ctl->sc_buffered_packets) /
257                                sizeof(ctl->sc_buffered_packets[0]); ++i)
258        TAILQ_INIT(&ctl->sc_buffered_packets[i].bpq_packets);
259}
260
261
262static lsquic_time_t
263calculate_packet_rto (lsquic_send_ctl_t *ctl)
264{
265    lsquic_time_t delay;
266
267    delay = get_retx_delay(&ctl->sc_conn_pub->rtt_stats);
268
269    unsigned exp = ctl->sc_n_consec_rtos;
270    if (exp > MAX_RTO_BACKOFFS)
271        exp = MAX_RTO_BACKOFFS;
272
273    delay = delay * (1 << exp);
274
275    return delay;
276}
277
278
279static lsquic_time_t
280calculate_tlp_delay (lsquic_send_ctl_t *ctl)
281{
282    lsquic_time_t srtt, delay;
283
284    srtt = lsquic_rtt_stats_get_srtt(&ctl->sc_conn_pub->rtt_stats);
285    if (ctl->sc_n_in_flight_all > 1)
286    {
287        delay = 10000;  /* 10 ms is the minimum tail loss probe delay */
288        if (delay < 2 * srtt)
289            delay = 2 * srtt;
290    }
291    else
292    {
293        delay = srtt + srtt / 2 + MIN_RTO_DELAY;
294        if (delay < 2 * srtt)
295            delay = 2 * srtt;
296    }
297
298    return delay;
299}
300
301
302static void
303set_retx_alarm (lsquic_send_ctl_t *ctl)
304{
305    enum retx_mode rm;
306    lsquic_time_t delay, now;
307
308    assert(!TAILQ_EMPTY(&ctl->sc_unacked_packets));
309
310    now = lsquic_time_now();
311
312    rm = get_retx_mode(ctl);
313    switch (rm)
314    {
315    case RETX_MODE_HANDSHAKE:
316    /* [draft-iyengar-quic-loss-recovery-01]:
317     *
318     *  if (handshake packets are outstanding):
319     *      alarm_duration = max(1.5 * smoothed_rtt, 10ms) << handshake_count;
320     *      handshake_count++;
321     */
322        delay = lsquic_rtt_stats_get_srtt(&ctl->sc_conn_pub->rtt_stats);
323        if (delay)
324        {
325            delay += delay / 2;
326            if (10000 > delay)
327                delay = 10000;
328        }
329        else
330            delay = 150000;
331        delay <<= ctl->sc_n_hsk;
332        ++ctl->sc_n_hsk;
333        break;
334    case RETX_MODE_LOSS:
335        delay = ctl->sc_loss_to;
336        break;
337    case RETX_MODE_TLP:
338        delay = calculate_tlp_delay(ctl);
339        break;
340    case RETX_MODE_RTO:
341        /* Base RTO on the first unacked packet, following reference
342         * implementation.
343         */
344        delay = calculate_packet_rto(ctl);
345        break;
346#ifdef WIN32
347    default:
348        delay = 0;
349#endif
350    }
351
352    if (delay > MAX_RTO_DELAY)
353        delay = MAX_RTO_DELAY;
354
355    LSQ_DEBUG("set retx alarm to %"PRIu64", which is %"PRIu64
356        " usec from now, mode %s", now + delay, delay, retx2str[rm]);
357    lsquic_alarmset_set(ctl->sc_alset, AL_RETX, now + delay);
358}
359
360
361static int
362send_ctl_in_recovery (lsquic_send_ctl_t *ctl)
363{
364    return ctl->sc_largest_acked_packno
365        && ctl->sc_largest_acked_packno <= ctl->sc_largest_sent_at_cutback;
366}
367
368
369static int
370send_ctl_in_slow_start (lsquic_send_ctl_t *ctl)
371{
372    return lsquic_cubic_in_slow_start(&ctl->sc_cubic);
373}
374
375
376static lsquic_time_t
377send_ctl_transfer_time (void *ctx)
378{
379    lsquic_send_ctl_t *const ctl = ctx;
380    uint64_t bandwidth, pacing_rate;
381    lsquic_time_t srtt, tx_time;
382    unsigned long cwnd;
383
384    srtt = lsquic_rtt_stats_get_srtt(&ctl->sc_conn_pub->rtt_stats);
385    if (srtt == 0)
386        srtt = 50000;
387    cwnd = lsquic_cubic_get_cwnd(&ctl->sc_cubic);
388    bandwidth = cwnd * 1000000 / srtt;
389    if (send_ctl_in_slow_start(ctl))
390        pacing_rate = bandwidth * 2;
391    else if (send_ctl_in_recovery(ctl))
392        pacing_rate = bandwidth;
393    else
394        pacing_rate = bandwidth + bandwidth / 4;
395
396    tx_time = (uint64_t) ctl->sc_pack_size * 1000000 / pacing_rate;
397    LSQ_DEBUG("srtt: %"PRIu64"; ss: %d; rec: %d; cwnd: %lu; bandwidth: "
398        "%"PRIu64"; tx_time: %"PRIu64, srtt, send_ctl_in_slow_start(ctl),
399        send_ctl_in_recovery(ctl), cwnd, bandwidth, tx_time);
400    return tx_time;
401}
402
403
404static void
405send_ctl_unacked_append (struct lsquic_send_ctl *ctl,
406                         struct lsquic_packet_out *packet_out)
407{
408    TAILQ_INSERT_TAIL(&ctl->sc_unacked_packets, packet_out, po_next);
409    ctl->sc_bytes_unacked_all += packet_out_total_sz(packet_out);
410    ctl->sc_n_in_flight_all  += 1;
411    if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK)
412    {
413        ctl->sc_bytes_unacked_retx += packet_out_total_sz(packet_out);
414        ++ctl->sc_n_in_flight_retx;
415    }
416}
417
418
419static void
420send_ctl_unacked_remove (struct lsquic_send_ctl *ctl,
421                     struct lsquic_packet_out *packet_out, unsigned packet_sz)
422{
423    TAILQ_REMOVE(&ctl->sc_unacked_packets, packet_out, po_next);
424    assert(ctl->sc_bytes_unacked_all >= packet_sz);
425    ctl->sc_bytes_unacked_all -= packet_sz;
426    ctl->sc_n_in_flight_all  -= 1;
427    if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK)
428    {
429        ctl->sc_bytes_unacked_retx -= packet_sz;
430        --ctl->sc_n_in_flight_retx;
431    }
432}
433
434
435static void
436send_ctl_sched_Xpend_common (struct lsquic_send_ctl *ctl,
437                      struct lsquic_packet_out *packet_out)
438{
439    packet_out->po_flags |= PO_SCHED;
440    ++ctl->sc_n_scheduled;
441    ctl->sc_bytes_scheduled += packet_out_total_sz(packet_out);
442    lsquic_send_ctl_sanity_check(ctl);
443}
444
445
446static void
447send_ctl_sched_append (struct lsquic_send_ctl *ctl,
448                       struct lsquic_packet_out *packet_out)
449{
450    TAILQ_INSERT_TAIL(&ctl->sc_scheduled_packets, packet_out, po_next);
451    send_ctl_sched_Xpend_common(ctl, packet_out);
452}
453
454
455static void
456send_ctl_sched_prepend (struct lsquic_send_ctl *ctl,
457                       struct lsquic_packet_out *packet_out)
458{
459    TAILQ_INSERT_HEAD(&ctl->sc_scheduled_packets, packet_out, po_next);
460    send_ctl_sched_Xpend_common(ctl, packet_out);
461}
462
463
464static void
465send_ctl_sched_remove (struct lsquic_send_ctl *ctl,
466                       struct lsquic_packet_out *packet_out)
467{
468    TAILQ_REMOVE(&ctl->sc_scheduled_packets, packet_out, po_next);
469    packet_out->po_flags &= ~PO_SCHED;
470    assert(ctl->sc_n_scheduled);
471    --ctl->sc_n_scheduled;
472    ctl->sc_bytes_scheduled -= packet_out_total_sz(packet_out);
473    lsquic_send_ctl_sanity_check(ctl);
474}
475
476
477int
478lsquic_send_ctl_sent_packet (lsquic_send_ctl_t *ctl,
479                             struct lsquic_packet_out *packet_out, int account)
480{
481    char frames[lsquic_frame_types_str_sz];
482    LSQ_DEBUG("packet %"PRIu64" has been sent (frame types: %s)",
483        packet_out->po_packno, lsquic_frame_types_to_str(frames,
484            sizeof(frames), packet_out->po_frame_types));
485    if (account)
486        ctl->sc_bytes_out -= packet_out_total_sz(packet_out);
487    lsquic_senhist_add(&ctl->sc_senhist, packet_out->po_packno);
488    send_ctl_unacked_append(ctl, packet_out);
489    if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK)
490    {
491        if (!lsquic_alarmset_is_set(ctl->sc_alset, AL_RETX))
492            set_retx_alarm(ctl);
493        if (ctl->sc_n_in_flight_retx == 1)
494            ctl->sc_flags |= SC_WAS_QUIET;
495    }
496    /* TODO: Do we really want to use those for RTT info? Revisit this. */
497    /* Hold on to packets that are not retransmittable because we need them
498     * to sample RTT information.  They are released when ACK is received.
499     */
500#if LSQUIC_SEND_STATS
501    ++ctl->sc_stats.n_total_sent;
502#endif
503    lsquic_send_ctl_sanity_check(ctl);
504    return 0;
505}
506
507
508static void
509take_rtt_sample (lsquic_send_ctl_t *ctl,
510                 lsquic_time_t now, lsquic_time_t lack_delta)
511{
512    const lsquic_packno_t packno = ctl->sc_largest_acked_packno;
513    const lsquic_time_t sent = ctl->sc_largest_acked_sent_time;
514    const lsquic_time_t measured_rtt = now - sent;
515    if (packno > ctl->sc_max_rtt_packno && lack_delta < measured_rtt)
516    {
517        ctl->sc_max_rtt_packno = packno;
518        lsquic_rtt_stats_update(&ctl->sc_conn_pub->rtt_stats, measured_rtt, lack_delta);
519        LSQ_DEBUG("packno %"PRIu64"; rtt: %"PRIu64"; delta: %"PRIu64"; "
520            "new srtt: %"PRIu64, packno, measured_rtt, lack_delta,
521            lsquic_rtt_stats_get_srtt(&ctl->sc_conn_pub->rtt_stats));
522    }
523}
524
525
526static void
527send_ctl_release_enc_data (struct lsquic_send_ctl *ctl,
528                                        struct lsquic_packet_out *packet_out)
529{
530    ctl->sc_enpub->enp_pmi->pmi_release(ctl->sc_enpub->enp_pmi_ctx,
531        ctl->sc_conn_pub->lconn->cn_peer_ctx, packet_out->po_enc_data,
532        lsquic_packet_out_ipv6(packet_out));
533    packet_out->po_flags &= ~PO_ENCRYPTED;
534    packet_out->po_enc_data = NULL;
535}
536
537
538static void
539send_ctl_destroy_packet (struct lsquic_send_ctl *ctl,
540                                        struct lsquic_packet_out *packet_out)
541{
542    lsquic_packet_out_destroy(packet_out, ctl->sc_enpub,
543                                        ctl->sc_conn_pub->lconn->cn_peer_ctx);
544}
545
546
547/* Returns true if packet was rescheduled, false otherwise.  In the latter
548 * case, you should not dereference packet_out after the function returns.
549 */
550static int
551send_ctl_handle_lost_packet (lsquic_send_ctl_t *ctl,
552                                            lsquic_packet_out_t *packet_out)
553{
554    unsigned packet_sz;
555
556    assert(ctl->sc_n_in_flight_all);
557    packet_sz = packet_out_sent_sz(packet_out);
558    send_ctl_unacked_remove(ctl, packet_out, packet_sz);
559    if (packet_out->po_flags & PO_ENCRYPTED)
560        send_ctl_release_enc_data(ctl, packet_out);
561    if (packet_out->po_frame_types & (1 << QUIC_FRAME_ACK))
562    {
563        ctl->sc_flags |= SC_LOST_ACK;
564        LSQ_DEBUG("lost ACK in packet %"PRIu64, packet_out->po_packno);
565    }
566    if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK)
567    {
568        LSQ_DEBUG("lost retransmittable packet %"PRIu64,
569                                                    packet_out->po_packno);
570        TAILQ_INSERT_TAIL(&ctl->sc_lost_packets, packet_out, po_next);
571        return 1;
572    }
573    else
574    {
575        LSQ_DEBUG("lost unretransmittable packet %"PRIu64,
576                                                    packet_out->po_packno);
577        send_ctl_destroy_packet(ctl, packet_out);
578        return 0;
579    }
580}
581
582
583static lsquic_packno_t
584largest_retx_packet_number (const lsquic_send_ctl_t *ctl)
585{
586    const lsquic_packet_out_t *packet_out;
587    TAILQ_FOREACH_REVERSE(packet_out, &ctl->sc_unacked_packets,
588                                                lsquic_packets_tailq, po_next)
589    {
590        if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK)
591            return packet_out->po_packno;
592    }
593    return 0;
594}
595
596
597static void
598send_ctl_detect_losses (lsquic_send_ctl_t *ctl, lsquic_time_t time)
599{
600    lsquic_packet_out_t *packet_out, *next;
601    lsquic_packno_t largest_retx_packno, largest_lost_packno;
602
603    largest_retx_packno = largest_retx_packet_number(ctl);
604    largest_lost_packno = 0;
605    ctl->sc_loss_to = 0;
606
607    for (packet_out = TAILQ_FIRST(&ctl->sc_unacked_packets);
608            packet_out && packet_out->po_packno <= ctl->sc_largest_acked_packno;
609                packet_out = next)
610    {
611        next = TAILQ_NEXT(packet_out, po_next);
612
613        if (packet_out->po_packno + N_NACKS_BEFORE_RETX <
614                                                ctl->sc_largest_acked_packno)
615        {
616            LSQ_DEBUG("loss by FACK detected, packet %"PRIu64,
617                                                    packet_out->po_packno);
618            largest_lost_packno = packet_out->po_packno;
619            (void) send_ctl_handle_lost_packet(ctl, packet_out);
620            continue;
621        }
622
623        if (largest_retx_packno
624            && (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK)
625            && largest_retx_packno <= ctl->sc_largest_acked_packno)
626        {
627            LSQ_DEBUG("loss by early retransmit detected, packet %"PRIu64,
628                                                    packet_out->po_packno);
629            largest_lost_packno = packet_out->po_packno;
630            ctl->sc_loss_to =
631                lsquic_rtt_stats_get_srtt(&ctl->sc_conn_pub->rtt_stats) / 4;
632            LSQ_DEBUG("set sc_loss_to to %"PRIu64", packet %"PRIu64,
633                                    ctl->sc_loss_to, packet_out->po_packno);
634            (void) send_ctl_handle_lost_packet(ctl, packet_out);
635            continue;
636        }
637
638        if (ctl->sc_largest_acked_sent_time > packet_out->po_sent +
639                    lsquic_rtt_stats_get_srtt(&ctl->sc_conn_pub->rtt_stats))
640        {
641            LSQ_DEBUG("loss by sent time detected: packet %"PRIu64,
642                                                    packet_out->po_packno);
643            if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK)
644                largest_lost_packno = packet_out->po_packno;
645            else { /* don't count it as a loss */; }
646            (void) send_ctl_handle_lost_packet(ctl, packet_out);
647            continue;
648        }
649    }
650
651    if (largest_lost_packno > ctl->sc_largest_sent_at_cutback)
652    {
653        LSQ_DEBUG("detected new loss: packet %"PRIu64"; new lsac: "
654            "%"PRIu64, largest_lost_packno, ctl->sc_largest_sent_at_cutback);
655        lsquic_cubic_loss(&ctl->sc_cubic);
656        if (ctl->sc_flags & SC_PACE)
657            pacer_loss_event(&ctl->sc_pacer);
658        ctl->sc_largest_sent_at_cutback =
659                                lsquic_senhist_largest(&ctl->sc_senhist);
660    }
661    else if (largest_lost_packno)
662        /* Lost packets whose numbers are smaller than the largest packet
663         * number sent at the time of the last loss event indicate the same
664         * loss event.  This follows NewReno logic, see RFC 6582.
665         */
666        LSQ_DEBUG("ignore loss of packet %"PRIu64" smaller than lsac "
667            "%"PRIu64, largest_lost_packno, ctl->sc_largest_sent_at_cutback);
668}
669
670
671int
672lsquic_send_ctl_got_ack (lsquic_send_ctl_t *ctl,
673                         const struct ack_info *acki,
674                         lsquic_time_t ack_recv_time)
675{
676    struct lsquic_packets_tailq acked_acks =
677                                    TAILQ_HEAD_INITIALIZER(acked_acks);
678    const struct lsquic_packno_range *range =
679                                    &acki->ranges[ acki->n_ranges - 1 ];
680    lsquic_packet_out_t *packet_out, *next;
681    lsquic_time_t now = 0;
682    lsquic_packno_t smallest_unacked;
683    lsquic_packno_t ack2ed[2];
684    unsigned packet_sz;
685    int app_limited;
686    signed char do_rtt, skip_checks;
687
688    packet_out = TAILQ_FIRST(&ctl->sc_unacked_packets);
689#if __GNUC__
690    __builtin_prefetch(packet_out);
691#endif
692
693#if __GNUC__
694#   define UNLIKELY(cond) __builtin_expect(cond, 0)
695#else
696#   define UNLIKELY(cond) cond
697#endif
698
699#if __GNUC__
700    if (UNLIKELY(LSQ_LOG_ENABLED(LSQ_LOG_DEBUG)))
701#endif
702        LSQ_DEBUG("Got ACK frame, largest acked: %"PRIu64"; delta: %"PRIu64,
703                            largest_acked(acki), acki->lack_delta);
704
705    /* Validate ACK first: */
706    if (UNLIKELY(largest_acked(acki)
707                                > lsquic_senhist_largest(&ctl->sc_senhist)))
708    {
709        LSQ_INFO("at least one packet in ACK range [%"PRIu64" - %"PRIu64"] "
710            "was never sent", acki->ranges[0].low, acki->ranges[0].high);
711        return -1;
712    }
713
714    if (UNLIKELY(ctl->sc_flags & SC_WAS_QUIET))
715    {
716        ctl->sc_flags &= ~SC_WAS_QUIET;
717        LSQ_DEBUG("ACK comes after a period of quiescence");
718        if (!now)
719            now = lsquic_time_now();
720        lsquic_cubic_was_quiet(&ctl->sc_cubic, now);
721    }
722
723    if (UNLIKELY(!packet_out))
724        goto no_unacked_packets;
725
726    smallest_unacked = packet_out->po_packno;
727    ack2ed[1] = 0;
728
729    if (packet_out->po_packno > largest_acked(acki))
730        goto detect_losses;
731
732    do_rtt = 0, skip_checks = 0;
733    app_limited = -1;
734    do
735    {
736        next = TAILQ_NEXT(packet_out, po_next);
737#if __GNUC__
738        __builtin_prefetch(next);
739#endif
740        if (skip_checks)
741            goto after_checks;
742        /* This is faster than binary search in the normal case when the number
743         * of ranges is not much larger than the number of unacked packets.
744         */
745        while (UNLIKELY(range->high < packet_out->po_packno))
746            --range;
747        if (range->low <= packet_out->po_packno)
748        {
749            skip_checks = range == acki->ranges;
750            if (app_limited < 0)
751                app_limited = send_ctl_retx_bytes_out(ctl) + 3 * ctl->sc_pack_size /* This
752                    is the "maximum burst" parameter */
753                    < lsquic_cubic_get_cwnd(&ctl->sc_cubic);
754            if (!now)
755                now = lsquic_time_now();
756  after_checks:
757            packet_sz = packet_out_sent_sz(packet_out);
758            ctl->sc_largest_acked_packno    = packet_out->po_packno;
759            ctl->sc_largest_acked_sent_time = packet_out->po_sent;
760            send_ctl_unacked_remove(ctl, packet_out, packet_sz);
761            ack2ed[!!(packet_out->po_frame_types & (1 << QUIC_FRAME_ACK))]
762                = packet_out->po_ack2ed;
763            do_rtt |= packet_out->po_packno == largest_acked(acki);
764            lsquic_cubic_ack(&ctl->sc_cubic, now, now - packet_out->po_sent,
765                             app_limited, packet_sz);
766            lsquic_packet_out_ack_streams(packet_out);
767            send_ctl_destroy_packet(ctl, packet_out);
768        }
769        packet_out = next;
770    }
771    while (packet_out && packet_out->po_packno <= largest_acked(acki));
772
773    if (do_rtt)
774    {
775        take_rtt_sample(ctl, ack_recv_time, acki->lack_delta);
776        ctl->sc_n_consec_rtos = 0;
777        ctl->sc_n_hsk = 0;
778        ctl->sc_n_tlp = 0;
779    }
780
781  detect_losses:
782    send_ctl_detect_losses(ctl, ack_recv_time);
783    if (send_ctl_first_unacked_retx_packet(ctl))
784        set_retx_alarm(ctl);
785    else
786    {
787        LSQ_DEBUG("No retransmittable packets: clear alarm");
788        lsquic_alarmset_unset(ctl->sc_alset, AL_RETX);
789    }
790    lsquic_send_ctl_sanity_check(ctl);
791
792    if ((ctl->sc_flags & SC_NSTP) && ack2ed[1] > ctl->sc_largest_ack2ed)
793        ctl->sc_largest_ack2ed = ack2ed[1];
794
795    if (ctl->sc_n_in_flight_retx == 0)
796        ctl->sc_flags |= SC_WAS_QUIET;
797
798  update_n_stop_waiting:
799    if (smallest_unacked > smallest_acked(acki))
800        /* Peer is acking packets that have been acked already.  Schedule ACK
801         * and STOP_WAITING frame to chop the range if we get two of these in
802         * a row.
803         */
804        ++ctl->sc_n_stop_waiting;
805    else
806        ctl->sc_n_stop_waiting = 0;
807    lsquic_send_ctl_sanity_check(ctl);
808    return 0;
809
810  no_unacked_packets:
811    smallest_unacked = lsquic_senhist_largest(&ctl->sc_senhist) + 1;
812    ctl->sc_flags |= SC_WAS_QUIET;
813    goto update_n_stop_waiting;
814}
815
816
817lsquic_packno_t
818lsquic_send_ctl_smallest_unacked (lsquic_send_ctl_t *ctl)
819{
820    const lsquic_packet_out_t *packet_out;
821
822    /* Packets are always sent out in order (unless we are reordering them
823     * on purpose).  Thus, the first packet on the unacked packets list has
824     * the smallest packet number of all packets on that list.
825     */
826    if ((packet_out = TAILQ_FIRST(&ctl->sc_unacked_packets)))
827        return packet_out->po_packno;
828    else
829        return lsquic_senhist_largest(&ctl->sc_senhist) + 1;
830}
831
832
833static struct lsquic_packet_out *
834send_ctl_next_lost (lsquic_send_ctl_t *ctl)
835{
836    lsquic_packet_out_t *lost_packet = TAILQ_FIRST(&ctl->sc_lost_packets);
837    if (lost_packet)
838    {
839        TAILQ_REMOVE(&ctl->sc_lost_packets, lost_packet, po_next);
840        if (lost_packet->po_frame_types & (1 << QUIC_FRAME_STREAM))
841        {
842            lsquic_packet_out_elide_reset_stream_frames(lost_packet, 0);
843        }
844        return lost_packet;
845    }
846    else
847        return NULL;
848}
849
850
851static lsquic_packno_t
852send_ctl_next_packno (lsquic_send_ctl_t *ctl)
853{
854    return ++ctl->sc_cur_packno;
855}
856
857
858void
859lsquic_send_ctl_cleanup (lsquic_send_ctl_t *ctl)
860{
861    lsquic_packet_out_t *packet_out, *next;
862    unsigned n;
863    lsquic_senhist_cleanup(&ctl->sc_senhist);
864    while ((packet_out = TAILQ_FIRST(&ctl->sc_scheduled_packets)))
865    {
866        send_ctl_sched_remove(ctl, packet_out);
867        send_ctl_destroy_packet(ctl, packet_out);
868    }
869    assert(0 == ctl->sc_n_scheduled);
870    assert(0 == ctl->sc_bytes_scheduled);
871    while ((packet_out = TAILQ_FIRST(&ctl->sc_unacked_packets)))
872    {
873        TAILQ_REMOVE(&ctl->sc_unacked_packets, packet_out, po_next);
874        ctl->sc_bytes_unacked_all -= packet_out_total_sz(packet_out);
875        send_ctl_destroy_packet(ctl, packet_out);
876        --ctl->sc_n_in_flight_all;
877    }
878    assert(0 == ctl->sc_n_in_flight_all);
879    assert(0 == ctl->sc_bytes_unacked_all);
880    while ((packet_out = TAILQ_FIRST(&ctl->sc_lost_packets)))
881    {
882        TAILQ_REMOVE(&ctl->sc_lost_packets, packet_out, po_next);
883        send_ctl_destroy_packet(ctl, packet_out);
884    }
885    for (n = 0; n < sizeof(ctl->sc_buffered_packets) /
886                                sizeof(ctl->sc_buffered_packets[0]); ++n)
887    {
888        for (packet_out = TAILQ_FIRST(&ctl->sc_buffered_packets[n].bpq_packets);
889                                                packet_out; packet_out = next)
890        {
891            next = TAILQ_NEXT(packet_out, po_next);
892            send_ctl_destroy_packet(ctl, packet_out);
893        }
894    }
895    pacer_cleanup(&ctl->sc_pacer);
896#if LSQUIC_SEND_STATS
897    LSQ_NOTICE("stats: n_total_sent: %u; n_resent: %u; n_delayed: %u",
898        ctl->sc_stats.n_total_sent, ctl->sc_stats.n_resent,
899        ctl->sc_stats.n_delayed);
900#endif
901}
902
903
904static unsigned
905send_ctl_retx_bytes_out (const struct lsquic_send_ctl *ctl)
906{
907    return ctl->sc_bytes_scheduled
908         + ctl->sc_bytes_unacked_retx
909         + ctl->sc_bytes_out;
910}
911
912
913static unsigned
914send_ctl_all_bytes_out (const struct lsquic_send_ctl *ctl)
915{
916    return ctl->sc_bytes_scheduled
917         + ctl->sc_bytes_unacked_all
918         + ctl->sc_bytes_out;
919}
920
921
922int
923lsquic_send_ctl_pacer_blocked (struct lsquic_send_ctl *ctl)
924{
925    return (ctl->sc_flags & SC_PACE)
926        && !pacer_can_schedule(&ctl->sc_pacer,
927                               ctl->sc_n_scheduled + ctl->sc_n_in_flight_all);
928}
929
930
931#ifndef NDEBUG
932#if __GNUC__
933__attribute__((weak))
934#endif
935#endif
936int
937lsquic_send_ctl_can_send (lsquic_send_ctl_t *ctl)
938{
939    const unsigned n_out = send_ctl_all_bytes_out(ctl);
940    LSQ_DEBUG("%s: n_out: %u (unacked_all: %u, out: %u); cwnd: %lu", __func__,
941        n_out, ctl->sc_bytes_unacked_all, ctl->sc_bytes_out,
942        lsquic_cubic_get_cwnd(&ctl->sc_cubic));
943    if (ctl->sc_flags & SC_PACE)
944    {
945        if (n_out >= lsquic_cubic_get_cwnd(&ctl->sc_cubic))
946            return 0;
947        if (pacer_can_schedule(&ctl->sc_pacer,
948                               ctl->sc_n_scheduled + ctl->sc_n_in_flight_all))
949            return 1;
950        if (ctl->sc_flags & SC_SCHED_TICK)
951        {
952            ctl->sc_flags &= ~SC_SCHED_TICK;
953            lsquic_engine_add_conn_to_attq(ctl->sc_enpub,
954                    ctl->sc_conn_pub->lconn, pacer_next_sched(&ctl->sc_pacer));
955        }
956        return 0;
957    }
958    else
959        return n_out < lsquic_cubic_get_cwnd(&ctl->sc_cubic);
960}
961
962
963static void
964send_ctl_expire (lsquic_send_ctl_t *ctl, enum expire_filter filter)
965{
966    lsquic_packet_out_t *packet_out, *next;
967    int n_resubmitted;
968    static const char *const filter_type2str[] = {
969        [EXFI_ALL] = "all",
970        [EXFI_HSK] = "handshake",
971        [EXFI_LAST] = "last",
972    };
973
974    switch (filter)
975    {
976    case EXFI_ALL:
977        n_resubmitted = 0;
978        while ((packet_out = TAILQ_FIRST(&ctl->sc_unacked_packets)))
979            n_resubmitted += send_ctl_handle_lost_packet(ctl, packet_out);
980        break;
981    case EXFI_HSK:
982        n_resubmitted = 0;
983        for (packet_out = TAILQ_FIRST(&ctl->sc_unacked_packets); packet_out;
984                                                            packet_out = next)
985        {
986            next = TAILQ_NEXT(packet_out, po_next);
987            if (packet_out->po_flags & PO_HELLO)
988                n_resubmitted += send_ctl_handle_lost_packet(ctl, packet_out);
989        }
990        break;
991    case EXFI_LAST:
992        packet_out = send_ctl_last_unacked_retx_packet(ctl);
993        if (packet_out)
994            n_resubmitted = send_ctl_handle_lost_packet(ctl, packet_out);
995        else
996            n_resubmitted = 0;
997        break;
998#ifdef WIN32
999    default:
1000        n_resubmitted = 0;
1001#endif
1002    }
1003
1004    LSQ_DEBUG("consider %s packets lost: %d resubmitted",
1005                                    filter_type2str[filter], n_resubmitted);
1006}
1007
1008
1009void
1010lsquic_send_ctl_expire_all (lsquic_send_ctl_t *ctl)
1011{
1012    lsquic_alarmset_unset(ctl->sc_alset, AL_RETX);
1013    send_ctl_expire(ctl, EXFI_ALL);
1014    lsquic_send_ctl_sanity_check(ctl);
1015}
1016
1017
1018#if LSQUIC_EXTRA_CHECKS
1019void
1020lsquic_send_ctl_sanity_check (const lsquic_send_ctl_t *ctl)
1021{
1022    const struct lsquic_packet_out *packet_out;
1023    unsigned count, bytes;
1024
1025    assert(!send_ctl_first_unacked_retx_packet(ctl) ||
1026                    lsquic_alarmset_is_set(ctl->sc_alset, AL_RETX));
1027    if (lsquic_alarmset_is_set(ctl->sc_alset, AL_RETX))
1028    {
1029        assert(send_ctl_first_unacked_retx_packet(ctl));
1030        assert(lsquic_time_now() < ctl->sc_alset->as_expiry[AL_RETX] + MAX_RTO_DELAY);
1031    }
1032
1033    count = 0, bytes = 0;
1034    TAILQ_FOREACH(packet_out, &ctl->sc_unacked_packets, po_next)
1035    {
1036        bytes += packet_out_sent_sz(packet_out);
1037        ++count;
1038    }
1039    assert(count == ctl->sc_n_in_flight_all);
1040    assert(bytes == ctl->sc_bytes_unacked_all);
1041
1042    count = 0, bytes = 0;
1043    TAILQ_FOREACH(packet_out, &ctl->sc_scheduled_packets, po_next)
1044    {
1045        assert(packet_out->po_flags & PO_SCHED);
1046        bytes += packet_out_total_sz(packet_out);
1047        ++count;
1048    }
1049    assert(count == ctl->sc_n_scheduled);
1050    assert(bytes == ctl->sc_bytes_scheduled);
1051}
1052
1053
1054#endif
1055
1056
1057void
1058lsquic_send_ctl_scheduled_one (lsquic_send_ctl_t *ctl,
1059                                            lsquic_packet_out_t *packet_out)
1060{
1061#ifndef NDEBUG
1062    const lsquic_packet_out_t *last;
1063    last = TAILQ_LAST(&ctl->sc_scheduled_packets, lsquic_packets_tailq);
1064    if (last)
1065        assert((last->po_flags & PO_REPACKNO) ||
1066                last->po_packno < packet_out->po_packno);
1067#endif
1068    if (ctl->sc_flags & SC_PACE)
1069    {
1070        unsigned n_out = ctl->sc_n_in_flight_retx + ctl->sc_n_scheduled;
1071        pacer_packet_scheduled(&ctl->sc_pacer, n_out,
1072            send_ctl_in_recovery(ctl), send_ctl_transfer_time, ctl);
1073    }
1074    send_ctl_sched_append(ctl, packet_out);
1075}
1076
1077
1078/* This mimics the logic in lsquic_send_ctl_next_packet_to_send(): we want
1079 * to check whether the first scheduled packet cannot be sent.
1080 */
1081int
1082lsquic_send_ctl_sched_is_blocked (const struct lsquic_send_ctl *ctl)
1083{
1084    const lsquic_packet_out_t *packet_out
1085                            = TAILQ_FIRST(&ctl->sc_scheduled_packets);
1086    return ctl->sc_n_consec_rtos
1087        && 0 == ctl->sc_next_limit
1088        && packet_out
1089        && !(packet_out->po_frame_types & (1 << QUIC_FRAME_ACK));
1090}
1091
1092
1093lsquic_packet_out_t *
1094lsquic_send_ctl_next_packet_to_send (lsquic_send_ctl_t *ctl)
1095{
1096    lsquic_packet_out_t *packet_out;
1097    int dec_limit;
1098
1099  get_packet:
1100    packet_out = TAILQ_FIRST(&ctl->sc_scheduled_packets);
1101    if (!packet_out)
1102        return NULL;
1103
1104    if (ctl->sc_n_consec_rtos &&
1105                    !(packet_out->po_frame_types & (1 << QUIC_FRAME_ACK)))
1106    {
1107        if (ctl->sc_next_limit)
1108            dec_limit = 1;
1109        else
1110            return NULL;
1111    }
1112    else
1113        dec_limit = 0;
1114
1115    send_ctl_sched_remove(ctl, packet_out);
1116    if (packet_out->po_flags & PO_REPACKNO)
1117    {
1118        if (packet_out->po_regen_sz < packet_out->po_data_sz)
1119        {
1120#if LSQUIC_CONN_STATS
1121            ++ctl->sc_conn_pub->conn_stats->out.retx_packets;
1122#endif
1123            update_for_resending(ctl, packet_out);
1124            packet_out->po_flags &= ~PO_REPACKNO;
1125        }
1126        else
1127        {
1128            LSQ_DEBUG("Dropping packet %"PRIu64" from scheduled queue",
1129                packet_out->po_packno);
1130            send_ctl_destroy_packet(ctl, packet_out);
1131            goto get_packet;
1132        }
1133    }
1134
1135    ctl->sc_bytes_out += packet_out_total_sz(packet_out);
1136    if (dec_limit)
1137    {
1138        --ctl->sc_next_limit;
1139        packet_out->po_flags |= PO_LIMITED;
1140    }
1141    else
1142        packet_out->po_flags &= ~PO_LIMITED;
1143    return packet_out;
1144}
1145
1146
1147void
1148lsquic_send_ctl_delayed_one (lsquic_send_ctl_t *ctl,
1149                                            lsquic_packet_out_t *packet_out)
1150{
1151    send_ctl_sched_prepend(ctl, packet_out);
1152    ctl->sc_bytes_out -= packet_out_total_sz(packet_out);
1153    if (packet_out->po_flags & PO_LIMITED)
1154        ++ctl->sc_next_limit;
1155    LSQ_DEBUG("packet %"PRIu64" has been delayed", packet_out->po_packno);
1156#if LSQUIC_SEND_STATS
1157    ++ctl->sc_stats.n_delayed;
1158#endif
1159}
1160
1161
1162int
1163lsquic_send_ctl_have_outgoing_stream_frames (const lsquic_send_ctl_t *ctl)
1164{
1165    const lsquic_packet_out_t *packet_out;
1166    TAILQ_FOREACH(packet_out, &ctl->sc_scheduled_packets, po_next)
1167        if (packet_out->po_frame_types &
1168                    ((1 << QUIC_FRAME_STREAM) | (1 << QUIC_FRAME_RST_STREAM)))
1169            return 1;
1170    return 0;
1171}
1172
1173
1174int
1175lsquic_send_ctl_have_outgoing_retx_frames (const lsquic_send_ctl_t *ctl)
1176{
1177    const lsquic_packet_out_t *packet_out;
1178    TAILQ_FOREACH(packet_out, &ctl->sc_scheduled_packets, po_next)
1179        if (packet_out->po_frame_types & QFRAME_RETRANSMITTABLE_MASK)
1180            return 1;
1181    return 0;
1182}
1183
1184
1185static lsquic_packet_out_t *
1186send_ctl_allocate_packet (lsquic_send_ctl_t *ctl, enum lsquic_packno_bits bits,
1187                                                        unsigned need_at_least)
1188{
1189    lsquic_packet_out_t *packet_out;
1190
1191    packet_out = lsquic_packet_out_new(&ctl->sc_enpub->enp_mm,
1192                    ctl->sc_conn_pub->packet_out_malo,
1193                    !(ctl->sc_flags & SC_TCID0), ctl->sc_conn_pub->lconn, bits,
1194                    ctl->sc_ver_neg->vn_tag, NULL);
1195    if (!packet_out)
1196        return NULL;
1197
1198    if (need_at_least && lsquic_packet_out_avail(packet_out) < need_at_least)
1199    {   /* This should never happen, this is why this check is performed at
1200         * this level and not lower, before the packet is actually allocated.
1201         */
1202        LSQ_ERROR("wanted to allocate packet with at least %u bytes of "
1203            "payload, but only got %u bytes (mtu: %u bytes)", need_at_least,
1204            lsquic_packet_out_avail(packet_out), ctl->sc_pack_size);
1205        send_ctl_destroy_packet(ctl, packet_out);
1206        return NULL;
1207    }
1208
1209    return packet_out;
1210}
1211
1212
1213lsquic_packet_out_t *
1214lsquic_send_ctl_new_packet_out (lsquic_send_ctl_t *ctl, unsigned need_at_least)
1215{
1216    lsquic_packet_out_t *packet_out;
1217    enum lsquic_packno_bits bits;
1218
1219    bits = lsquic_send_ctl_packno_bits(ctl);
1220    packet_out = send_ctl_allocate_packet(ctl, bits, need_at_least);
1221    if (!packet_out)
1222        return NULL;
1223
1224    packet_out->po_packno = send_ctl_next_packno(ctl);
1225    LSQ_DEBUG("created packet %"PRIu64, packet_out->po_packno);
1226    EV_LOG_PACKET_CREATED(LSQUIC_LOG_CONN_ID, packet_out);
1227    return packet_out;
1228}
1229
1230
1231lsquic_packet_out_t *
1232lsquic_send_ctl_get_writeable_packet (lsquic_send_ctl_t *ctl,
1233                                      unsigned need_at_least, int *is_err)
1234{
1235    lsquic_packet_out_t *packet_out;
1236
1237    assert(need_at_least > 0);
1238
1239    packet_out = lsquic_send_ctl_last_scheduled(ctl);
1240    if (packet_out
1241        && !(packet_out->po_flags & PO_STREAM_END)
1242        && lsquic_packet_out_avail(packet_out) >= need_at_least)
1243    {
1244        return packet_out;
1245    }
1246
1247    if (!lsquic_send_ctl_can_send(ctl))
1248    {
1249        if (is_err)
1250            *is_err = 0;
1251        return NULL;
1252    }
1253
1254    packet_out = lsquic_send_ctl_new_packet_out(ctl, need_at_least);
1255    if (packet_out)
1256        lsquic_send_ctl_scheduled_one(ctl, packet_out);
1257    else if (is_err)
1258        *is_err = 1;
1259    return packet_out;
1260}
1261
1262
1263static void
1264update_for_resending (lsquic_send_ctl_t *ctl, lsquic_packet_out_t *packet_out)
1265{
1266
1267    lsquic_packno_t oldno, packno;
1268
1269    /* When the packet is resent, it uses the same number of bytes to encode
1270     * the packet number as the original packet.  This follows the reference
1271     * implementation.
1272     */
1273    oldno = packet_out->po_packno;
1274    packno = send_ctl_next_packno(ctl);
1275
1276    packet_out->po_flags &= ~PO_SENT_SZ;
1277    packet_out->po_frame_types &= ~QFRAME_REGEN_MASK;
1278    assert(packet_out->po_frame_types);
1279    packet_out->po_packno = packno;
1280
1281    if (ctl->sc_ver_neg->vn_tag)
1282    {
1283        assert(packet_out->po_flags & PO_VERSION);  /* It can only disappear */
1284        packet_out->po_ver_tag = *ctl->sc_ver_neg->vn_tag;
1285    }
1286
1287    assert(packet_out->po_regen_sz < packet_out->po_data_sz);
1288    if (packet_out->po_regen_sz)
1289    {
1290        if (packet_out->po_flags & PO_SCHED)
1291            ctl->sc_bytes_scheduled -= packet_out->po_regen_sz;
1292        lsquic_packet_out_chop_regen(packet_out);
1293    }
1294    LSQ_DEBUG("Packet %"PRIu64" repackaged for resending as packet %"PRIu64,
1295                                                            oldno, packno);
1296    EV_LOG_CONN_EVENT(LSQUIC_LOG_CONN_ID, "packet %"PRIu64" repackaged for "
1297        "resending as packet %"PRIu64, oldno, packno);
1298}
1299
1300
1301unsigned
1302lsquic_send_ctl_reschedule_packets (lsquic_send_ctl_t *ctl)
1303{
1304    lsquic_packet_out_t *packet_out;
1305    unsigned n = 0;
1306
1307    while (lsquic_send_ctl_can_send(ctl) &&
1308                                (packet_out = send_ctl_next_lost(ctl)))
1309    {
1310        if (packet_out->po_regen_sz < packet_out->po_data_sz)
1311        {
1312            ++n;
1313            update_for_resending(ctl, packet_out);
1314            lsquic_send_ctl_scheduled_one(ctl, packet_out);
1315        }
1316        else
1317        {
1318            LSQ_DEBUG("Dropping packet %"PRIu64" from unacked queue",
1319                packet_out->po_packno);
1320            send_ctl_destroy_packet(ctl, packet_out);
1321        }
1322    }
1323
1324    if (n)
1325        LSQ_DEBUG("rescheduled %u packets", n);
1326
1327    return n;
1328}
1329
1330
1331void
1332lsquic_send_ctl_set_tcid0 (lsquic_send_ctl_t *ctl, int tcid0)
1333{
1334    if (tcid0)
1335    {
1336        LSQ_INFO("set TCID flag");
1337        ctl->sc_flags |=  SC_TCID0;
1338    }
1339    else
1340    {
1341        LSQ_INFO("unset TCID flag");
1342        ctl->sc_flags &= ~SC_TCID0;
1343    }
1344}
1345
1346
1347/* The controller elides this STREAM frames of stream `stream_id' from
1348 * scheduled and buffered packets.  If a packet becomes empty as a result,
1349 * it is dropped.
1350 *
1351 * Packets on other queues do not need to be processed: unacked packets
1352 * have already been sent, and lost packets' reset stream frames will be
1353 * elided in due time.
1354 */
1355void
1356lsquic_send_ctl_elide_stream_frames (lsquic_send_ctl_t *ctl, uint32_t stream_id)
1357{
1358    struct lsquic_packet_out *packet_out, *next;
1359    unsigned n, adj;
1360    int dropped;
1361
1362    dropped = 0;
1363#ifdef WIN32
1364    next = NULL;
1365#endif
1366    for (packet_out = TAILQ_FIRST(&ctl->sc_scheduled_packets); packet_out;
1367                                                            packet_out = next)
1368    {
1369        next = TAILQ_NEXT(packet_out, po_next);
1370
1371        if ((packet_out->po_frame_types & (1 << QUIC_FRAME_STREAM))
1372                                                                   )
1373        {
1374            adj = lsquic_packet_out_elide_reset_stream_frames(packet_out,
1375                                                              stream_id);
1376            ctl->sc_bytes_scheduled -= adj;
1377            if (0 == packet_out->po_frame_types)
1378            {
1379                LSQ_DEBUG("cancel packet %"PRIu64" after eliding frames for "
1380                    "stream %"PRIu32, packet_out->po_packno, stream_id);
1381                send_ctl_sched_remove(ctl, packet_out);
1382                send_ctl_destroy_packet(ctl, packet_out);
1383                ++dropped;
1384            }
1385        }
1386    }
1387
1388    if (dropped)
1389        lsquic_send_ctl_reset_packnos(ctl);
1390
1391    for (n = 0; n < sizeof(ctl->sc_buffered_packets) /
1392                                sizeof(ctl->sc_buffered_packets[0]); ++n)
1393    {
1394        for (packet_out = TAILQ_FIRST(&ctl->sc_buffered_packets[n].bpq_packets);
1395                                                packet_out; packet_out = next)
1396        {
1397            next = TAILQ_NEXT(packet_out, po_next);
1398            assert(packet_out->po_frame_types & (1 << QUIC_FRAME_STREAM));
1399            lsquic_packet_out_elide_reset_stream_frames(packet_out, stream_id);
1400            if (0 == packet_out->po_frame_types)
1401            {
1402                LSQ_DEBUG("cancel buffered packet in queue #%u after eliding "
1403                    "frames for stream %"PRIu32, n, stream_id);
1404                TAILQ_REMOVE(&ctl->sc_buffered_packets[n].bpq_packets,
1405                             packet_out, po_next);
1406                --ctl->sc_buffered_packets[n].bpq_count;
1407                send_ctl_destroy_packet(ctl, packet_out);
1408                LSQ_DEBUG("Elide packet from buffered queue #%u; count: %u",
1409                          n, ctl->sc_buffered_packets[n].bpq_count);
1410            }
1411        }
1412    }
1413}
1414
1415
1416/* Count how many packets will remain after the squeezing performed by
1417 * lsquic_send_ctl_squeeze_sched().  This is the number of delayed data
1418 * packets.
1419 */
1420#ifndef NDEBUG
1421#if __GNUC__
1422__attribute__((weak))
1423#endif
1424#endif
1425int
1426lsquic_send_ctl_have_delayed_packets (const lsquic_send_ctl_t *ctl)
1427{
1428    const struct lsquic_packet_out *packet_out;
1429    TAILQ_FOREACH(packet_out, &ctl->sc_scheduled_packets, po_next)
1430        if (packet_out->po_regen_sz < packet_out->po_data_sz)
1431            return 1;
1432    return 0;
1433}
1434
1435
1436#ifndef NDEBUG
1437static void
1438send_ctl_log_packet_q (const lsquic_send_ctl_t *ctl, const char *prefix,
1439                                const struct lsquic_packets_tailq *tailq)
1440{
1441    const lsquic_packet_out_t *packet_out;
1442    unsigned n_packets;
1443    char *buf;
1444    size_t bufsz;
1445    int off;
1446
1447    n_packets = 0;
1448    TAILQ_FOREACH(packet_out, tailq, po_next)
1449        ++n_packets;
1450
1451    if (n_packets == 0)
1452    {
1453        LSQ_DEBUG("%s: [<empty set>]", prefix);
1454        return;
1455    }
1456
1457    bufsz = n_packets * sizeof("18446744073709551615" /* UINT64_MAX */);
1458    buf = malloc(bufsz);
1459    if (!buf)
1460    {
1461        LSQ_ERROR("%s: malloc: %s", __func__, strerror(errno));
1462        return;
1463    }
1464
1465    off = 0;
1466    TAILQ_FOREACH(packet_out, tailq, po_next)
1467    {
1468        if (off)
1469            buf[off++] = ' ';
1470        off += sprintf(buf + off, "%"PRIu64, packet_out->po_packno);
1471    }
1472
1473    LSQ_DEBUG("%s: [%s]", prefix, buf);
1474    free(buf);
1475}
1476
1477
1478#define LOG_PACKET_Q(prefix, queue) do {                                    \
1479    if (LSQ_LOG_ENABLED(LSQ_LOG_DEBUG))                                     \
1480        send_ctl_log_packet_q(ctl, queue, prefix);                          \
1481} while (0)
1482#else
1483#define LOG_PACKET_Q(p, q)
1484#endif
1485
1486
1487int
1488lsquic_send_ctl_squeeze_sched (lsquic_send_ctl_t *ctl)
1489{
1490    struct lsquic_packet_out *packet_out, *next;
1491    int dropped;
1492#ifndef NDEBUG
1493    int pre_squeeze_logged = 0;
1494#endif
1495
1496    dropped = 0;
1497    for (packet_out = TAILQ_FIRST(&ctl->sc_scheduled_packets); packet_out;
1498                                                            packet_out = next)
1499    {
1500        next = TAILQ_NEXT(packet_out, po_next);
1501        if (packet_out->po_regen_sz < packet_out->po_data_sz)
1502        {
1503            if (packet_out->po_flags & PO_ENCRYPTED)
1504                send_ctl_release_enc_data(ctl, packet_out);
1505        }
1506        else
1507        {
1508#ifndef NDEBUG
1509            /* Log the whole list before we squeeze for the first time */
1510            if (!pre_squeeze_logged++)
1511                LOG_PACKET_Q(&ctl->sc_scheduled_packets,
1512                                        "unacked packets before squeezing");
1513#endif
1514            send_ctl_sched_remove(ctl, packet_out);
1515            LSQ_DEBUG("Dropping packet %"PRIu64" from scheduled queue",
1516                packet_out->po_packno);
1517            send_ctl_destroy_packet(ctl, packet_out);
1518            ++dropped;
1519        }
1520    }
1521
1522    if (dropped)
1523        lsquic_send_ctl_reset_packnos(ctl);
1524
1525#ifndef NDEBUG
1526    if (pre_squeeze_logged)
1527        LOG_PACKET_Q(&ctl->sc_scheduled_packets,
1528                                        "unacked packets after squeezing");
1529    else if (ctl->sc_n_scheduled > 0)
1530        LOG_PACKET_Q(&ctl->sc_scheduled_packets, "delayed packets");
1531#endif
1532
1533    return ctl->sc_n_scheduled > 0;
1534}
1535
1536
1537void
1538lsquic_send_ctl_reset_packnos (lsquic_send_ctl_t *ctl)
1539{
1540    struct lsquic_packet_out *packet_out;
1541
1542    ctl->sc_cur_packno = lsquic_senhist_largest(&ctl->sc_senhist);
1543    TAILQ_FOREACH(packet_out, &ctl->sc_scheduled_packets, po_next)
1544        packet_out->po_flags |= PO_REPACKNO;
1545}
1546
1547
1548void
1549lsquic_send_ctl_ack_to_front (lsquic_send_ctl_t *ctl)
1550{
1551    struct lsquic_packet_out *ack_packet;
1552
1553    assert(ctl->sc_n_scheduled > 1);    /* Otherwise, why is this called? */
1554    ack_packet = TAILQ_LAST(&ctl->sc_scheduled_packets, lsquic_packets_tailq);
1555    assert(ack_packet->po_frame_types & (1 << QUIC_FRAME_ACK));
1556    TAILQ_REMOVE(&ctl->sc_scheduled_packets, ack_packet, po_next);
1557    TAILQ_INSERT_HEAD(&ctl->sc_scheduled_packets, ack_packet, po_next);
1558}
1559
1560
1561void
1562lsquic_send_ctl_drop_scheduled (lsquic_send_ctl_t *ctl)
1563{
1564    lsquic_packet_out_t *packet_out;
1565    const unsigned n = ctl->sc_n_scheduled;
1566    while ((packet_out = TAILQ_FIRST(&ctl->sc_scheduled_packets)))
1567    {
1568        send_ctl_sched_remove(ctl, packet_out);
1569        send_ctl_destroy_packet(ctl, packet_out);
1570    }
1571    assert(0 == ctl->sc_n_scheduled);
1572    ctl->sc_cur_packno = lsquic_senhist_largest(&ctl->sc_senhist);
1573    LSQ_DEBUG("dropped %u scheduled packet%s", n, n != 0 ? "s" : "");
1574}
1575
1576
1577#ifdef NDEBUG
1578static
1579#elif __GNUC__
1580__attribute__((weak))
1581#endif
1582enum buf_packet_type
1583lsquic_send_ctl_determine_bpt (lsquic_send_ctl_t *ctl,
1584                                            const lsquic_stream_t *stream)
1585{
1586    const lsquic_stream_t *other_stream;
1587    struct lsquic_hash_elem *el;
1588    struct lsquic_hash *all_streams;
1589
1590    all_streams = ctl->sc_conn_pub->all_streams;
1591    for (el = lsquic_hash_first(all_streams); el;
1592                                     el = lsquic_hash_next(all_streams))
1593    {
1594        other_stream = lsquic_hashelem_getdata(el);
1595        if (other_stream != stream
1596              && (!(other_stream->stream_flags & STREAM_U_WRITE_DONE))
1597                && !lsquic_stream_is_critical(other_stream)
1598                  && other_stream->sm_priority < stream->sm_priority)
1599            return BPT_OTHER_PRIO;
1600    }
1601    return BPT_HIGHEST_PRIO;
1602}
1603
1604
1605static enum buf_packet_type
1606send_ctl_lookup_bpt (lsquic_send_ctl_t *ctl,
1607                                        const struct lsquic_stream *stream)
1608{
1609    if (ctl->sc_cached_bpt.stream_id != stream->id)
1610    {
1611        ctl->sc_cached_bpt.stream_id = stream->id;
1612        ctl->sc_cached_bpt.packet_type =
1613                                lsquic_send_ctl_determine_bpt(ctl, stream);
1614    }
1615    return ctl->sc_cached_bpt.packet_type;
1616}
1617
1618
1619static unsigned
1620send_ctl_max_bpq_count (const lsquic_send_ctl_t *ctl,
1621                                        enum buf_packet_type packet_type)
1622{
1623    unsigned count;
1624
1625    switch (packet_type)
1626    {
1627    case BPT_OTHER_PRIO:
1628        return MAX_BPQ_COUNT;
1629    case BPT_HIGHEST_PRIO:
1630    default: /* clang does not complain about absence of `default'... */
1631        count = ctl->sc_n_scheduled + ctl->sc_n_in_flight_retx;
1632        if (count < lsquic_cubic_get_cwnd(&ctl->sc_cubic) / ctl->sc_pack_size)
1633        {
1634            count -= lsquic_cubic_get_cwnd(&ctl->sc_cubic) / ctl->sc_pack_size;
1635            if (count > MAX_BPQ_COUNT)
1636                return count;
1637        }
1638        return MAX_BPQ_COUNT;
1639    }
1640}
1641
1642
1643static void
1644send_ctl_move_ack (struct lsquic_send_ctl *ctl, struct lsquic_packet_out *dst,
1645                    struct lsquic_packet_out *src)
1646{
1647    assert(dst->po_data_sz == 0);
1648
1649    if (lsquic_packet_out_avail(dst) >= src->po_regen_sz)
1650    {
1651        memcpy(dst->po_data, src->po_data, src->po_regen_sz);
1652        dst->po_data_sz = src->po_regen_sz;
1653        dst->po_regen_sz = src->po_regen_sz;
1654        dst->po_frame_types |= (QFRAME_REGEN_MASK & src->po_frame_types);
1655        src->po_frame_types &= ~QFRAME_REGEN_MASK;
1656        lsquic_packet_out_chop_regen(src);
1657    }
1658}
1659
1660
1661static lsquic_packet_out_t *
1662send_ctl_get_buffered_packet (lsquic_send_ctl_t *ctl,
1663                enum buf_packet_type packet_type, unsigned need_at_least,
1664                                        const struct lsquic_stream *stream)
1665{
1666    struct buf_packet_q *const packet_q =
1667                                    &ctl->sc_buffered_packets[packet_type];
1668    struct lsquic_conn *const lconn = ctl->sc_conn_pub->lconn;
1669    lsquic_packet_out_t *packet_out;
1670    enum lsquic_packno_bits bits;
1671    enum { AA_STEAL, AA_GENERATE, AA_NONE, } ack_action;
1672
1673    packet_out = TAILQ_LAST(&packet_q->bpq_packets, lsquic_packets_tailq);
1674    if (packet_out
1675        && !(packet_out->po_flags & PO_STREAM_END)
1676        && lsquic_packet_out_avail(packet_out) >= need_at_least)
1677    {
1678        return packet_out;
1679    }
1680
1681    if (packet_q->bpq_count >= send_ctl_max_bpq_count(ctl, packet_type))
1682        return NULL;
1683
1684    bits = lsquic_send_ctl_guess_packno_bits(ctl);
1685    if (packet_q->bpq_count == 0)
1686    {
1687        /* If ACK was written to the low-priority queue first, steal it */
1688        if (packet_q == &ctl->sc_buffered_packets[BPT_HIGHEST_PRIO]
1689            && !TAILQ_EMPTY(&ctl->sc_buffered_packets[BPT_OTHER_PRIO].bpq_packets)
1690            && (TAILQ_FIRST(&ctl->sc_buffered_packets[BPT_OTHER_PRIO].bpq_packets)
1691                                        ->po_frame_types & QUIC_FTBIT_ACK))
1692        {
1693            LSQ_DEBUG("steal ACK frame from low-priority buffered queue");
1694            ack_action = AA_STEAL;
1695            bits = PACKNO_LEN_6;
1696        }
1697        /* If ACK can be generated, write it to the first buffered packet. */
1698        else if (lconn->cn_if->ci_can_write_ack(lconn))
1699        {
1700            LSQ_DEBUG("generate ACK frame for first buffered packet in "
1701                                                    "queue #%u", packet_type);
1702            ack_action = AA_GENERATE;
1703            /* Packet length is set to the largest possible size to guarantee
1704             * that buffered packet with the ACK will not need to be split.
1705             */
1706            bits = PACKNO_LEN_6;
1707        }
1708        else
1709            goto no_ack_action;
1710    }
1711    else
1712    {
1713  no_ack_action:
1714        ack_action = AA_NONE;
1715        bits = lsquic_send_ctl_guess_packno_bits(ctl);
1716    }
1717
1718    packet_out = send_ctl_allocate_packet(ctl, bits, need_at_least);
1719    if (!packet_out)
1720        return NULL;
1721
1722    switch (ack_action)
1723    {
1724    case AA_STEAL:
1725        send_ctl_move_ack(ctl, packet_out,
1726            TAILQ_FIRST(&ctl->sc_buffered_packets[BPT_OTHER_PRIO].bpq_packets));
1727        break;
1728    case AA_GENERATE:
1729        lconn->cn_if->ci_write_ack(lconn, packet_out);
1730        break;
1731    case AA_NONE:
1732        break;
1733    }
1734
1735    TAILQ_INSERT_TAIL(&packet_q->bpq_packets, packet_out, po_next);
1736    ++packet_q->bpq_count;
1737    LSQ_DEBUG("Add new packet to buffered queue #%u; count: %u",
1738              packet_type, packet_q->bpq_count);
1739    return packet_out;
1740}
1741
1742
1743lsquic_packet_out_t *
1744lsquic_send_ctl_get_packet_for_stream (lsquic_send_ctl_t *ctl,
1745                unsigned need_at_least, const struct lsquic_stream *stream)
1746{
1747    enum buf_packet_type packet_type;
1748
1749    if (lsquic_send_ctl_schedule_stream_packets_immediately(ctl))
1750        return lsquic_send_ctl_get_writeable_packet(ctl, need_at_least, NULL);
1751    else
1752    {
1753        packet_type = send_ctl_lookup_bpt(ctl, stream);
1754        return send_ctl_get_buffered_packet(ctl, packet_type, need_at_least,
1755                                            stream);
1756    }
1757}
1758
1759
1760int
1761lsquic_send_ctl_buffered_and_same_prio_as_headers (struct lsquic_send_ctl *ctl,
1762                                            const struct lsquic_stream *stream)
1763{
1764    return !lsquic_send_ctl_schedule_stream_packets_immediately(ctl)
1765        && BPT_HIGHEST_PRIO == send_ctl_lookup_bpt(ctl, stream);
1766}
1767
1768
1769#ifdef NDEBUG
1770static
1771#elif __GNUC__
1772__attribute__((weak))
1773#endif
1774enum lsquic_packno_bits
1775lsquic_send_ctl_calc_packno_bits (lsquic_send_ctl_t *ctl)
1776{
1777    lsquic_packno_t smallest_unacked;
1778    unsigned n_in_flight;
1779
1780    smallest_unacked = lsquic_send_ctl_smallest_unacked(ctl);
1781    n_in_flight = lsquic_cubic_get_cwnd(&ctl->sc_cubic) / ctl->sc_pack_size;
1782    return calc_packno_bits(ctl->sc_cur_packno + 1, smallest_unacked,
1783                                                            n_in_flight);
1784}
1785
1786
1787enum lsquic_packno_bits
1788lsquic_send_ctl_packno_bits (lsquic_send_ctl_t *ctl)
1789{
1790
1791    if (lsquic_send_ctl_schedule_stream_packets_immediately(ctl))
1792        return lsquic_send_ctl_calc_packno_bits(ctl);
1793    else
1794        return lsquic_send_ctl_guess_packno_bits(ctl);
1795}
1796
1797
1798static int
1799split_buffered_packet (lsquic_send_ctl_t *ctl,
1800        enum buf_packet_type packet_type, lsquic_packet_out_t *packet_out,
1801        enum lsquic_packno_bits bits, unsigned excess_bytes)
1802{
1803    struct buf_packet_q *const packet_q =
1804                                    &ctl->sc_buffered_packets[packet_type];
1805    lsquic_packet_out_t *new_packet_out;
1806
1807    assert(TAILQ_FIRST(&packet_q->bpq_packets) == packet_out);
1808
1809    new_packet_out = send_ctl_allocate_packet(ctl, bits, 0);
1810    if (!packet_out)
1811        return -1;
1812
1813    if (0 == lsquic_packet_out_split_in_two(&ctl->sc_enpub->enp_mm, packet_out,
1814                  new_packet_out, ctl->sc_conn_pub->lconn->cn_pf, excess_bytes))
1815    {
1816        lsquic_packet_out_set_packno_bits(packet_out, bits);
1817        TAILQ_INSERT_AFTER(&packet_q->bpq_packets, packet_out, new_packet_out,
1818                           po_next);
1819        ++packet_q->bpq_count;
1820        LSQ_DEBUG("Add split packet to buffered queue #%u; count: %u",
1821                  packet_type, packet_q->bpq_count);
1822        return 0;
1823    }
1824    else
1825    {
1826        send_ctl_destroy_packet(ctl, packet_out);
1827        return -1;
1828    }
1829}
1830
1831
1832int
1833lsquic_send_ctl_schedule_buffered (lsquic_send_ctl_t *ctl,
1834                                            enum buf_packet_type packet_type)
1835{
1836    struct buf_packet_q *const packet_q =
1837                                    &ctl->sc_buffered_packets[packet_type];
1838    lsquic_packet_out_t *packet_out;
1839    unsigned used, excess;
1840
1841    assert(lsquic_send_ctl_schedule_stream_packets_immediately(ctl));
1842    const enum lsquic_packno_bits bits = lsquic_send_ctl_calc_packno_bits(ctl);
1843    const unsigned need = packno_bits2len(bits);
1844
1845    while ((packet_out = TAILQ_FIRST(&packet_q->bpq_packets)) &&
1846                                            lsquic_send_ctl_can_send(ctl))
1847    {
1848        if (bits != lsquic_packet_out_packno_bits(packet_out))
1849        {
1850            used = packno_bits2len(lsquic_packet_out_packno_bits(packet_out));
1851            if (need > used
1852                && need - used > lsquic_packet_out_avail(packet_out))
1853            {
1854                excess = need - used - lsquic_packet_out_avail(packet_out);
1855                if (0 != split_buffered_packet(ctl, packet_type,
1856                                               packet_out, bits, excess))
1857                {
1858                    return -1;
1859                }
1860            }
1861        }
1862        TAILQ_REMOVE(&packet_q->bpq_packets, packet_out, po_next);
1863        --packet_q->bpq_count;
1864        packet_out->po_packno = send_ctl_next_packno(ctl);
1865        LSQ_DEBUG("Remove packet from buffered queue #%u; count: %u.  "
1866            "It becomes packet %"PRIu64, packet_type, packet_q->bpq_count,
1867            packet_out->po_packno);
1868        lsquic_send_ctl_scheduled_one(ctl, packet_out);
1869    }
1870
1871    return 0;
1872}
1873
1874
1875int
1876lsquic_send_ctl_turn_on_fin (struct lsquic_send_ctl *ctl,
1877                             const struct lsquic_stream *stream)
1878{
1879    enum buf_packet_type packet_type;
1880    struct buf_packet_q *packet_q;
1881    lsquic_packet_out_t *packet_out;
1882    const struct parse_funcs *pf;
1883
1884    pf = ctl->sc_conn_pub->lconn->cn_pf;
1885    packet_type = send_ctl_lookup_bpt(ctl, stream);
1886    packet_q = &ctl->sc_buffered_packets[packet_type];
1887
1888    TAILQ_FOREACH_REVERSE(packet_out, &packet_q->bpq_packets,
1889                          lsquic_packets_tailq, po_next)
1890        if (0 == lsquic_packet_out_turn_on_fin(packet_out, pf, stream))
1891            return 0;
1892
1893    TAILQ_FOREACH(packet_out, &ctl->sc_scheduled_packets, po_next)
1894        if (0 == packet_out->po_sent
1895            && 0 == lsquic_packet_out_turn_on_fin(packet_out, pf, stream))
1896        {
1897            return 0;
1898        }
1899
1900    return -1;
1901}
1902
1903
1904size_t
1905lsquic_send_ctl_mem_used (const struct lsquic_send_ctl *ctl)
1906{
1907    const lsquic_packet_out_t *packet_out;
1908    unsigned n;
1909    size_t size;
1910    const struct lsquic_packets_tailq queues[] = {
1911        ctl->sc_scheduled_packets,
1912        ctl->sc_unacked_packets,
1913        ctl->sc_lost_packets,
1914        ctl->sc_buffered_packets[0].bpq_packets,
1915        ctl->sc_buffered_packets[1].bpq_packets,
1916    };
1917
1918    size = sizeof(*ctl);
1919
1920    for (n = 0; n < sizeof(queues) / sizeof(queues[0]); ++n)
1921        TAILQ_FOREACH(packet_out, &queues[n], po_next)
1922            size += lsquic_packet_out_mem_used(packet_out);
1923
1924    return size;
1925}
1926